aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/Kconfig49
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/drm_crtc.c88
-rw-r--r--drivers/gpu/drm/drm_drv.c49
-rw-r--r--drivers/gpu/drm/drm_edid.c62
-rw-r--r--drivers/gpu/drm/drm_edid_modes.h4
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c36
-rw-r--r--drivers/gpu/drm/drm_gem.c10
-rw-r--r--drivers/gpu/drm/drm_hashtab.c27
-rw-r--r--drivers/gpu/drm/drm_info.c27
-rw-r--r--drivers/gpu/drm/drm_ioctl.c137
-rw-r--r--drivers/gpu/drm/drm_irq.c81
-rw-r--r--drivers/gpu/drm/drm_mm.c572
-rw-r--r--drivers/gpu/drm/drm_modes.c6
-rw-r--r--drivers/gpu/drm/drm_pci.c205
-rw-r--r--drivers/gpu/drm/drm_platform.c75
-rw-r--r--drivers/gpu/drm/drm_sman.c4
-rw-r--r--drivers/gpu/drm/drm_stub.c21
-rw-r--r--drivers/gpu/drm/drm_sysfs.c7
-rw-r--r--drivers/gpu/drm/drm_usb.c117
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c18
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c20
-rw-r--r--drivers/gpu/drm/i830/Makefile8
-rw-r--r--drivers/gpu/drm/i830/i830_dma.c1560
-rw-r--r--drivers/gpu/drm/i830/i830_drv.c107
-rw-r--r--drivers/gpu/drm/i830/i830_drv.h295
-rw-r--r--drivers/gpu/drm/i830/i830_irq.c186
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c88
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c53
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c48
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h144
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c466
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c45
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c201
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c31
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c218
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h486
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c429
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h301
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c53
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c53
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1906
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c161
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h16
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c2
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c10
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c13
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c28
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c43
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c30
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c4
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c41
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c34
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c137
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h42
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c61
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo_regs.h2
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c15
-rw-r--r--drivers/gpu/drm/mga/mga_dma.c4
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c26
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c94
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c260
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c75
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h65
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fb.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c207
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c52
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c255
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c45
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c56
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ramht.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c369
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c71
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_temp.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_util.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_util.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.h19
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c19
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv_modes.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fb.c59
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c161
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c191
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h42
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c291
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.h8
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fb.c199
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_gpio.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c151
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c16
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c25
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vram.c65
-rw-r--r--drivers/gpu/drm/nouveau/nv84_crypt.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fifo.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_instmem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vm.c31
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vram.c62
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c14
-rw-r--r--drivers/gpu/drm/radeon/Kconfig1
-rw-r--r--drivers/gpu/drm/radeon/Makefile7
-rw-r--r--drivers/gpu/drm/radeon/atom.c12
-rw-r--r--drivers/gpu/drm/radeon/atombios.h34
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c42
-rw-r--r--drivers/gpu/drm/radeon/cayman_blit_shaders.c55
-rw-r--r--drivers/gpu/drm/radeon/cayman_blit_shaders.h32
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c155
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c6
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c132
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h22
-rw-r--r--drivers/gpu/drm/radeon/ni.c1294
-rw-r--r--drivers/gpu/drm/radeon/nid.h495
-rw-r--r--drivers/gpu/drm/radeon/r100.c52
-rw-r--r--drivers/gpu/drm/radeon/r300.c6
-rw-r--r--drivers/gpu/drm/radeon/r300_reg.h4
-rw-r--r--drivers/gpu/drm/radeon/r420.c4
-rw-r--r--drivers/gpu/drm/radeon/r520.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c29
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c1
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c6
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c429
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c3
-rw-r--r--drivers/gpu/drm/radeon/r600d.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon.h155
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c54
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h87
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c48
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c34
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c83
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c52
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c40
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c103
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c38
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c257
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h8
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c24
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/cayman619
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/evergreen2
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r6001
-rw-r--r--drivers/gpu/drm/radeon/rs400.c4
-rw-r--r--drivers/gpu/drm/radeon/rs600.c18
-rw-r--r--drivers/gpu/drm/radeon/rs690.c5
-rw-r--r--drivers/gpu/drm/radeon/rv515.c4
-rw-r--r--drivers/gpu/drm/radeon/rv770.c13
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c14
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c13
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c13
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c13
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c28
-rw-r--r--drivers/gpu/drm/via/via_drv.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c23
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c5
-rw-r--r--drivers/gpu/stub/Kconfig1
-rw-r--r--drivers/gpu/vga/vgaarb.c4
198 files changed, 10324 insertions, 6586 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 0902d4460039..b493663c7ba7 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -24,6 +24,7 @@ config DRM_KMS_HELPER
depends on DRM
select FB
select FRAMEBUFFER_CONSOLE if !EXPERT
+ select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
help
FB and CRTC helpers for KMS drivers.
@@ -73,32 +74,17 @@ source "drivers/gpu/drm/radeon/Kconfig"
config DRM_I810
tristate "Intel I810"
- # BKL usage in order to avoid AB-BA deadlocks, may become BROKEN_ON_SMP
- depends on DRM && AGP && AGP_INTEL && BKL
+ # !PREEMPT because of missing ioctl locking
+ depends on DRM && AGP && AGP_INTEL && (!PREEMPT || BROKEN)
help
Choose this option if you have an Intel I810 graphics card. If M is
selected, the module will be called i810. AGP support is required
for this driver to work.
-choice
- prompt "Intel 830M, 845G, 852GM, 855GM, 865G"
- depends on DRM && AGP && AGP_INTEL
- optional
-
-config DRM_I830
- tristate "i830 driver"
- # BKL usage in order to avoid AB-BA deadlocks, i830 may get removed
- depends on BKL
- help
- Choose this option if you have a system that has Intel 830M, 845G,
- 852GM, 855GM or 865G integrated graphics. If M is selected, the
- module will be called i830. AGP support is required for this driver
- to work. This driver is used by the older X releases X.org 6.7 and
- XFree86 4.3. If unsure, build this and i915 as modules and the X server
- will load the correct one.
-
config DRM_I915
- tristate "i915 driver"
+ tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
+ depends on DRM
+ depends on AGP
depends on AGP_INTEL
# we need shmfs for the swappable backing store, and in particular
# the shmem_readpage() which depends upon tmpfs
@@ -111,16 +97,25 @@ config DRM_I915
# i915 depends on ACPI_VIDEO when ACPI is enabled
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
select BACKLIGHT_CLASS_DEVICE if ACPI
+ select VIDEO_OUTPUT_CONTROL if ACPI
select INPUT if ACPI
select ACPI_VIDEO if ACPI
select ACPI_BUTTON if ACPI
help
- Choose this option if you have a system that has Intel 830M, 845G,
- 852GM, 855GM 865G or 915G integrated graphics. If M is selected, the
- module will be called i915. AGP support is required for this driver
- to work. This driver is used by the Intel driver in X.org 6.8 and
- XFree86 4.4 and above. If unsure, build this and i830 as modules and
- the X server will load the correct one.
+ Choose this option if you have a system that has "Intel Graphics
+ Media Accelerator" or "HD Graphics" integrated graphics,
+ including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G,
+ G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3,
+ Core i5, Core i7 as well as Atom CPUs with integrated graphics.
+ If M is selected, the module will be called i915. AGP support
+ is required for this driver to work. This driver is used by
+ the Intel driver in X.org 6.8 and XFree86 4.4 and above. It
+ replaces the older i830 module that supported a subset of the
+ hardware in older X.org releases.
+
+ Note that the older i810/i815 chipsets require the use of the
+ i810 driver instead, and the Atom z5xx series has an entirely
+ different implementation.
config DRM_I915_KMS
bool "Enable modesetting on intel by default"
@@ -132,8 +127,6 @@ config DRM_I915_KMS
the driver to bind to PCI devices, which precludes loading things
like intelfb.
-endchoice
-
config DRM_MGA
tristate "Matrox g200/g400"
depends on DRM && PCI
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 997c43d04909..89cf05a72d1c 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -12,7 +12,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_platform.o drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
drm_crtc.o drm_modes.o drm_edid.o \
drm_info.o drm_debugfs.o drm_encoder_slave.o \
- drm_trace_points.o drm_global.o
+ drm_trace_points.o drm_global.o drm_usb.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
@@ -29,7 +29,6 @@ obj-$(CONFIG_DRM_R128) += r128/
obj-$(CONFIG_DRM_RADEON)+= radeon/
obj-$(CONFIG_DRM_MGA) += mga/
obj-$(CONFIG_DRM_I810) += i810/
-obj-$(CONFIG_DRM_I830) += i830/
obj-$(CONFIG_DRM_I915) += i915/
obj-$(CONFIG_DRM_SIS) += sis/
obj-$(CONFIG_DRM_SAVAGE)+= savage/
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 654faa803dcb..872747c5a544 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -1073,6 +1073,9 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
uint32_t __user *encoder_id;
struct drm_mode_group *mode_group;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
mutex_lock(&dev->mode_config.mutex);
/*
@@ -1244,6 +1247,9 @@ int drm_mode_getcrtc(struct drm_device *dev,
struct drm_mode_object *obj;
int ret = 0;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
@@ -1312,6 +1318,9 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
uint64_t __user *prop_values;
uint32_t __user *encoder_ptr;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id);
@@ -1431,6 +1440,9 @@ int drm_mode_getencoder(struct drm_device *dev, void *data,
struct drm_encoder *encoder;
int ret = 0;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, enc_resp->encoder_id,
DRM_MODE_OBJECT_ENCODER);
@@ -1486,6 +1498,9 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
int ret = 0;
int i;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, crtc_req->crtc_id,
DRM_MODE_OBJECT_CRTC);
@@ -1603,6 +1618,9 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
struct drm_crtc *crtc;
int ret = 0;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if (!req->flags) {
DRM_ERROR("no operation set\n");
return -EINVAL;
@@ -1667,6 +1685,9 @@ int drm_mode_addfb(struct drm_device *dev,
struct drm_framebuffer *fb;
int ret = 0;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if ((config->min_width > r->width) || (r->width > config->max_width)) {
DRM_ERROR("mode new framebuffer width not within limits\n");
return -EINVAL;
@@ -1678,7 +1699,7 @@ int drm_mode_addfb(struct drm_device *dev,
mutex_lock(&dev->mode_config.mutex);
- /* TODO check buffer is sufficently large */
+ /* TODO check buffer is sufficiently large */
/* TODO setup destructor callback */
fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
@@ -1724,9 +1745,12 @@ int drm_mode_rmfb(struct drm_device *dev,
int ret = 0;
int found = 0;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB);
- /* TODO check that we realy get a framebuffer back. */
+ /* TODO check that we really get a framebuffer back. */
if (!obj) {
DRM_ERROR("mode invalid framebuffer id\n");
ret = -EINVAL;
@@ -1780,6 +1804,9 @@ int drm_mode_getfb(struct drm_device *dev,
struct drm_framebuffer *fb;
int ret = 0;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
if (!obj) {
@@ -1813,6 +1840,9 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
int num_clips;
int ret = 0;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
if (!obj) {
@@ -1996,6 +2026,9 @@ int drm_mode_attachmode_ioctl(struct drm_device *dev,
struct drm_mode_modeinfo *umode = &mode_cmd->mode;
int ret = 0;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
@@ -2042,6 +2075,9 @@ int drm_mode_detachmode_ioctl(struct drm_device *dev,
struct drm_mode_modeinfo *umode = &mode_cmd->mode;
int ret = 0;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
@@ -2211,6 +2247,9 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
uint64_t __user *values_ptr;
uint32_t __user *blob_length_ptr;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
if (!obj) {
@@ -2333,6 +2372,9 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
int ret = 0;
void *blob_ptr;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
if (!obj) {
@@ -2393,6 +2435,9 @@ int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
int ret = -EINVAL;
int i;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR);
@@ -2509,6 +2554,9 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
int size;
int ret = 0;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
@@ -2560,6 +2608,9 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev,
int size;
int ret = 0;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
@@ -2694,3 +2745,36 @@ void drm_mode_config_reset(struct drm_device *dev)
connector->funcs->reset(connector);
}
EXPORT_SYMBOL(drm_mode_config_reset);
+
+int drm_mode_create_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_create_dumb *args = data;
+
+ if (!dev->driver->dumb_create)
+ return -ENOSYS;
+ return dev->driver->dumb_create(file_priv, dev, args);
+}
+
+int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_map_dumb *args = data;
+
+ /* call driver ioctl to get mmap offset */
+ if (!dev->driver->dumb_map_offset)
+ return -ENOSYS;
+
+ return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset);
+}
+
+int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_destroy_dumb *args = data;
+
+ if (!dev->driver->dumb_destroy)
+ return -ENOSYS;
+
+ return dev->driver->dumb_destroy(file_priv, dev, args->handle);
+}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 271835a71570..93a112d45c1a 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -67,6 +67,7 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, 0),
DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -150,7 +151,10 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED)
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED)
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
@@ -234,49 +238,6 @@ int drm_lastclose(struct drm_device * dev)
return 0;
}
-/**
- * Module initialization. Called via init_module at module load time, or via
- * linux/init/main.c (this is not currently supported).
- *
- * \return zero on success or a negative number on failure.
- *
- * Initializes an array of drm_device structures, and attempts to
- * initialize all available devices, using consecutive minors, registering the
- * stubs and initializing the device.
- *
- * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
- * after the initialization for driver customization.
- */
-int drm_init(struct drm_driver *driver)
-{
- DRM_DEBUG("\n");
- INIT_LIST_HEAD(&driver->device_list);
-
- if (driver->driver_features & DRIVER_USE_PLATFORM_DEVICE)
- return drm_platform_init(driver);
- else
- return drm_pci_init(driver);
-}
-
-EXPORT_SYMBOL(drm_init);
-
-void drm_exit(struct drm_driver *driver)
-{
- struct drm_device *dev, *tmp;
- DRM_DEBUG("\n");
-
- if (driver->driver_features & DRIVER_MODESET) {
- pci_unregister_driver(&driver->pci_driver);
- } else {
- list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
- drm_put_dev(dev);
- }
-
- DRM_INFO("Module unloaded\n");
-}
-
-EXPORT_SYMBOL(drm_exit);
-
/** File operations structure */
static const struct file_operations drm_stub_fops = {
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index a245d17165ae..adc9358c9bec 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -230,24 +230,32 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
int block, int len)
{
unsigned char start = block * EDID_LENGTH;
- struct i2c_msg msgs[] = {
- {
- .addr = DDC_ADDR,
- .flags = 0,
- .len = 1,
- .buf = &start,
- }, {
- .addr = DDC_ADDR,
- .flags = I2C_M_RD,
- .len = len,
- .buf = buf,
- }
- };
+ int ret, retries = 5;
- if (i2c_transfer(adapter, msgs, 2) == 2)
- return 0;
+ /* The core i2c driver will automatically retry the transfer if the
+ * adapter reports EAGAIN. However, we find that bit-banging transfers
+ * are susceptible to errors under a heavily loaded machine and
+ * generate spurious NAKs and timeouts. Retrying the transfer
+ * of the individual block a few times seems to overcome this.
+ */
+ do {
+ struct i2c_msg msgs[] = {
+ {
+ .addr = DDC_ADDR,
+ .flags = 0,
+ .len = 1,
+ .buf = &start,
+ }, {
+ .addr = DDC_ADDR,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = buf,
+ }
+ };
+ ret = i2c_transfer(adapter, msgs, 2);
+ } while (ret != 2 && --retries);
- return -1;
+ return ret == 2 ? 0 : -1;
}
static u8 *
@@ -449,12 +457,11 @@ static void edid_fixup_preferred(struct drm_connector *connector,
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh)
{
+ struct drm_display_mode *mode = NULL;
int i;
- struct drm_display_mode *ptr, *mode;
- mode = NULL;
for (i = 0; i < drm_num_dmt_modes; i++) {
- ptr = &drm_dmt_modes[i];
+ const struct drm_display_mode *ptr = &drm_dmt_modes[i];
if (hsize == ptr->hdisplay &&
vsize == ptr->vdisplay &&
fresh == drm_mode_vrefresh(ptr)) {
@@ -885,7 +892,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
}
static bool
-mode_is_rb(struct drm_display_mode *mode)
+mode_is_rb(const struct drm_display_mode *mode)
{
return (mode->htotal - mode->hdisplay == 160) &&
(mode->hsync_end - mode->hdisplay == 80) &&
@@ -894,7 +901,8 @@ mode_is_rb(struct drm_display_mode *mode)
}
static bool
-mode_in_hsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
+mode_in_hsync_range(const struct drm_display_mode *mode,
+ struct edid *edid, u8 *t)
{
int hsync, hmin, hmax;
@@ -910,7 +918,8 @@ mode_in_hsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
}
static bool
-mode_in_vsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
+mode_in_vsync_range(const struct drm_display_mode *mode,
+ struct edid *edid, u8 *t)
{
int vsync, vmin, vmax;
@@ -941,7 +950,7 @@ range_pixel_clock(struct edid *edid, u8 *t)
}
static bool
-mode_in_range(struct drm_display_mode *mode, struct edid *edid,
+mode_in_range(const struct drm_display_mode *mode, struct edid *edid,
struct detailed_timing *timing)
{
u32 max_clock;
@@ -1288,7 +1297,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
/**
* Search EDID for CEA extension block.
*/
-static u8 *drm_find_cea_extension(struct edid *edid)
+u8 *drm_find_cea_extension(struct edid *edid)
{
u8 *edid_ext = NULL;
int i;
@@ -1309,6 +1318,7 @@ static u8 *drm_find_cea_extension(struct edid *edid)
return edid_ext;
}
+EXPORT_SYMBOL(drm_find_cea_extension);
/**
* drm_detect_hdmi_monitor - detect whether monitor is hdmi.
@@ -1472,7 +1482,7 @@ int drm_add_modes_noedid(struct drm_connector *connector,
int hdisplay, int vdisplay)
{
int i, count, num_modes = 0;
- struct drm_display_mode *mode, *ptr;
+ struct drm_display_mode *mode;
struct drm_device *dev = connector->dev;
count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
@@ -1482,7 +1492,7 @@ int drm_add_modes_noedid(struct drm_connector *connector,
vdisplay = 0;
for (i = 0; i < count; i++) {
- ptr = &drm_dmt_modes[i];
+ const struct drm_display_mode *ptr = &drm_dmt_modes[i];
if (hdisplay && vdisplay) {
/*
* Only when two are valid, they will be used to check
diff --git a/drivers/gpu/drm/drm_edid_modes.h b/drivers/gpu/drm/drm_edid_modes.h
index 6eb7592e152f..5f2064489fd5 100644
--- a/drivers/gpu/drm/drm_edid_modes.h
+++ b/drivers/gpu/drm/drm_edid_modes.h
@@ -32,7 +32,7 @@
* This table is copied from xfree86/modes/xf86EdidModes.c.
* But the mode with Reduced blank feature is deleted.
*/
-static struct drm_display_mode drm_dmt_modes[] = {
+static const struct drm_display_mode drm_dmt_modes[] = {
/* 640x350@85Hz */
{ DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
736, 832, 0, 350, 382, 385, 445, 0,
@@ -266,7 +266,7 @@ static struct drm_display_mode drm_dmt_modes[] = {
static const int drm_num_dmt_modes =
sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
-static struct drm_display_mode edid_est_modes[] = {
+static const struct drm_display_mode edid_est_modes[] = {
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
968, 1056, 0, 600, 601, 605, 628, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 6977a1ce9d98..11d7a72c22d9 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -342,9 +342,22 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_debug_leave);
+bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
+{
+ bool error = false;
+ int i, ret;
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
+ ret = drm_crtc_helper_set_config(mode_set);
+ if (ret)
+ error = true;
+ }
+ return error;
+}
+EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode);
+
bool drm_fb_helper_force_kernel_mode(void)
{
- int i = 0;
bool ret, error = false;
struct drm_fb_helper *helper;
@@ -352,12 +365,12 @@ bool drm_fb_helper_force_kernel_mode(void)
return false;
list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
- for (i = 0; i < helper->crtc_count; i++) {
- struct drm_mode_set *mode_set = &helper->crtc_info[i].mode_set;
- ret = drm_crtc_helper_set_config(mode_set);
- if (ret)
- error = true;
- }
+ if (helper->dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ continue;
+
+ ret = drm_fb_helper_restore_fbdev_mode(helper);
+ if (ret)
+ error = true;
}
return error;
}
@@ -627,6 +640,11 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
value = (red << info->var.red.offset) |
(green << info->var.green.offset) |
(blue << info->var.blue.offset);
+ if (info->var.transp.length > 0) {
+ u32 mask = (1 << info->var.transp.length) - 1;
+ mask <<= info->var.transp.offset;
+ value |= mask;
+ }
palette[regno] = value;
return 0;
}
@@ -672,7 +690,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
struct drm_crtc_helper_funcs *crtc_funcs;
u16 *red, *green, *blue, *transp;
struct drm_crtc *crtc;
- int i, rc = 0;
+ int i, j, rc = 0;
int start;
for (i = 0; i < fb_helper->crtc_count; i++) {
@@ -685,7 +703,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
transp = cmap->transp;
start = cmap->start;
- for (i = 0; i < cmap->len; i++) {
+ for (j = 0; j < cmap->len; j++) {
u16 hred, hgreen, hblue, htransp = 0xffff;
hred = *red++;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index ea1c4b019ebf..74e4ff578017 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -101,7 +101,7 @@ drm_gem_init(struct drm_device *dev)
dev->mm_private = mm;
- if (drm_ht_create(&mm->offset_hash, 19)) {
+ if (drm_ht_create(&mm->offset_hash, 12)) {
kfree(mm);
return -ENOMEM;
}
@@ -181,7 +181,7 @@ EXPORT_SYMBOL(drm_gem_object_alloc);
/**
* Removes the mapping from handle to filp for this object.
*/
-static int
+int
drm_gem_handle_delete(struct drm_file *filp, u32 handle)
{
struct drm_device *dev;
@@ -214,6 +214,7 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
return 0;
}
+EXPORT_SYMBOL(drm_gem_handle_delete);
/**
* Create a handle for this object. This adds a handle reference
@@ -498,11 +499,12 @@ EXPORT_SYMBOL(drm_gem_vm_open);
void drm_gem_vm_close(struct vm_area_struct *vma)
{
struct drm_gem_object *obj = vma->vm_private_data;
+ struct drm_device *dev = obj->dev;
- mutex_lock(&obj->dev->struct_mutex);
+ mutex_lock(&dev->struct_mutex);
drm_vm_close_locked(vma);
drm_gem_object_unreference(obj);
- mutex_unlock(&obj->dev->struct_mutex);
+ mutex_unlock(&dev->struct_mutex);
}
EXPORT_SYMBOL(drm_gem_vm_close);
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index a93d7b4ddaa6..e3a75688f3cd 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -39,27 +39,18 @@
int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
{
- unsigned int i;
+ unsigned int size = 1 << order;
- ht->size = 1 << order;
ht->order = order;
- ht->fill = 0;
ht->table = NULL;
- ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE);
- if (!ht->use_vmalloc) {
- ht->table = kcalloc(ht->size, sizeof(*ht->table), GFP_KERNEL);
- }
- if (!ht->table) {
- ht->use_vmalloc = 1;
- ht->table = vmalloc(ht->size*sizeof(*ht->table));
- }
+ if (size <= PAGE_SIZE / sizeof(*ht->table))
+ ht->table = kcalloc(size, sizeof(*ht->table), GFP_KERNEL);
+ else
+ ht->table = vzalloc(size*sizeof(*ht->table));
if (!ht->table) {
DRM_ERROR("Out of memory for hash table\n");
return -ENOMEM;
}
- for (i=0; i< ht->size; ++i) {
- INIT_HLIST_HEAD(&ht->table[i]);
- }
return 0;
}
EXPORT_SYMBOL(drm_ht_create);
@@ -180,7 +171,6 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
list = drm_ht_find_key(ht, key);
if (list) {
hlist_del_init(list);
- ht->fill--;
return 0;
}
return -EINVAL;
@@ -189,7 +179,6 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{
hlist_del_init(&item->head);
- ht->fill--;
return 0;
}
EXPORT_SYMBOL(drm_ht_remove_item);
@@ -197,10 +186,10 @@ EXPORT_SYMBOL(drm_ht_remove_item);
void drm_ht_remove(struct drm_open_hash *ht)
{
if (ht->table) {
- if (ht->use_vmalloc)
- vfree(ht->table);
- else
+ if ((PAGE_SIZE / sizeof(*ht->table)) >> ht->order)
kfree(ht->table);
+ else
+ vfree(ht->table);
ht->table = NULL;
}
}
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index be9a9c07d152..ab1162da70f8 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -47,30 +47,19 @@ int drm_name_info(struct seq_file *m, void *data)
struct drm_minor *minor = node->minor;
struct drm_device *dev = minor->dev;
struct drm_master *master = minor->master;
-
+ const char *bus_name;
if (!master)
return 0;
- if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE)) {
- if (master->unique) {
- seq_printf(m, "%s %s %s\n",
- dev->driver->platform_device->name,
- dev_name(dev->dev), master->unique);
- } else {
- seq_printf(m, "%s\n",
- dev->driver->platform_device->name);
- }
+ bus_name = dev->driver->bus->get_name(dev);
+ if (master->unique) {
+ seq_printf(m, "%s %s %s\n",
+ bus_name,
+ dev_name(dev->dev), master->unique);
} else {
- if (master->unique) {
- seq_printf(m, "%s %s %s\n",
- dev->driver->pci_driver.name,
- dev_name(dev->dev), master->unique);
- } else {
- seq_printf(m, "%s %s\n", dev->driver->pci_driver.name,
- dev_name(dev->dev));
- }
+ seq_printf(m, "%s %s\n",
+ bus_name, dev_name(dev->dev));
}
-
return 0;
}
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 47db4df37a69..904d7e9c8e47 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -96,7 +96,7 @@ int drm_setunique(struct drm_device *dev, void *data,
{
struct drm_unique *u = data;
struct drm_master *master = file_priv->master;
- int domain, bus, slot, func, ret;
+ int ret;
if (master->unique_len || master->unique)
return -EBUSY;
@@ -104,50 +104,12 @@ int drm_setunique(struct drm_device *dev, void *data,
if (!u->unique_len || u->unique_len > 1024)
return -EINVAL;
- master->unique_len = u->unique_len;
- master->unique_size = u->unique_len + 1;
- master->unique = kmalloc(master->unique_size, GFP_KERNEL);
- if (!master->unique) {
- ret = -ENOMEM;
- goto err;
- }
-
- if (copy_from_user(master->unique, u->unique, master->unique_len)) {
- ret = -EFAULT;
- goto err;
- }
-
- master->unique[master->unique_len] = '\0';
-
- dev->devname = kmalloc(strlen(dev->driver->pci_driver.name) +
- strlen(master->unique) + 2, GFP_KERNEL);
- if (!dev->devname) {
- ret = -ENOMEM;
- goto err;
- }
-
- sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
- master->unique);
+ if (!dev->driver->bus->set_unique)
+ return -EINVAL;
- /* Return error if the busid submitted doesn't match the device's actual
- * busid.
- */
- ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
- if (ret != 3) {
- ret = -EINVAL;
+ ret = dev->driver->bus->set_unique(dev, master, u);
+ if (ret)
goto err;
- }
-
- domain = bus >> 8;
- bus &= 0xff;
-
- if ((domain != drm_get_pci_domain(dev)) ||
- (bus != dev->pdev->bus->number) ||
- (slot != PCI_SLOT(dev->pdev->devfn)) ||
- (func != PCI_FUNC(dev->pdev->devfn))) {
- ret = -EINVAL;
- goto err;
- }
return 0;
@@ -159,74 +121,15 @@ err:
static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
{
struct drm_master *master = file_priv->master;
- int len, ret;
+ int ret;
if (master->unique != NULL)
drm_unset_busid(dev, master);
- if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE)) {
- master->unique_len = 10 + strlen(dev->platformdev->name);
- master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
-
- if (master->unique == NULL)
- return -ENOMEM;
-
- len = snprintf(master->unique, master->unique_len,
- "platform:%s", dev->platformdev->name);
-
- if (len > master->unique_len) {
- DRM_ERROR("Unique buffer overflowed\n");
- ret = -EINVAL;
- goto err;
- }
-
- dev->devname =
- kmalloc(strlen(dev->platformdev->name) +
- master->unique_len + 2, GFP_KERNEL);
-
- if (dev->devname == NULL) {
- ret = -ENOMEM;
- goto err;
- }
-
- sprintf(dev->devname, "%s@%s", dev->platformdev->name,
- master->unique);
-
- } else {
- master->unique_len = 40;
- master->unique_size = master->unique_len;
- master->unique = kmalloc(master->unique_size, GFP_KERNEL);
- if (master->unique == NULL)
- return -ENOMEM;
-
- len = snprintf(master->unique, master->unique_len,
- "pci:%04x:%02x:%02x.%d",
- drm_get_pci_domain(dev),
- dev->pdev->bus->number,
- PCI_SLOT(dev->pdev->devfn),
- PCI_FUNC(dev->pdev->devfn));
- if (len >= master->unique_len) {
- DRM_ERROR("buffer overflow");
- ret = -EINVAL;
- goto err;
- } else
- master->unique_len = len;
-
- dev->devname =
- kmalloc(strlen(dev->driver->pci_driver.name) +
- master->unique_len + 2, GFP_KERNEL);
-
- if (dev->devname == NULL) {
- ret = -ENOMEM;
- goto err;
- }
-
- sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
- master->unique);
- }
-
+ ret = dev->driver->bus->set_busid(dev, master);
+ if (ret)
+ goto err;
return 0;
-
err:
drm_unset_busid(dev, master);
return ret;
@@ -365,6 +268,28 @@ int drm_getstats(struct drm_device *dev, void *data,
}
/**
+ * Get device/driver capabilities
+ */
+int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_get_cap *req = data;
+
+ req->value = 0;
+ switch (req->capability) {
+ case DRM_CAP_DUMB_BUFFER:
+ if (dev->driver->dumb_create)
+ req->value = 1;
+ break;
+ case DRM_CAP_VBLANK_HIGH_CRTC:
+ req->value = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
* Setversion ioctl.
*
* \param inode device inode.
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 3dadfa2a8528..a1f12cb043de 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -74,23 +74,13 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
{
struct drm_irq_busid *p = data;
- if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE))
+ if (!dev->driver->bus->irq_by_busid)
return -EINVAL;
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;
- if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
- (p->busnum & 0xff) != dev->pdev->bus->number ||
- p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
- return -EINVAL;
-
- p->irq = dev->pdev->irq;
-
- DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
- p->irq);
-
- return 0;
+ return dev->driver->bus->irq_by_busid(dev, p);
}
/*
@@ -164,8 +154,10 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
* available. In that case we can't account for this and just
* hope for the best.
*/
- if ((vblrc > 0) && (abs(diff_ns) > 1000000))
+ if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
atomic_inc(&dev->_vblank_count[crtc]);
+ smp_mb__after_atomic_inc();
+ }
/* Invalidate all timestamps while vblank irq's are off. */
clear_vblank_timestamps(dev, crtc);
@@ -491,6 +483,12 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc)
/* Dot clock in Hz: */
dotclock = (u64) crtc->hwmode.clock * 1000;
+ /* Fields of interlaced scanout modes are only halve a frame duration.
+ * Double the dotclock to get halve the frame-/line-/pixelduration.
+ */
+ if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE)
+ dotclock *= 2;
+
/* Valid dotclock? */
if (dotclock > 0) {
/* Convert scanline length in pixels and video dot clock to
@@ -603,14 +601,6 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
return -EAGAIN;
}
- /* Don't know yet how to handle interlaced or
- * double scan modes. Just no-op for now.
- */
- if (mode->flags & (DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN)) {
- DRM_DEBUG("crtc %d: Noop due to unsupported mode.\n", crtc);
- return -ENOTSUPP;
- }
-
/* Get current scanout position with system timestamp.
* Repeat query up to DRM_TIMESTAMP_MAXRETRIES times
* if single query takes longer than max_error nanoseconds.
@@ -858,10 +848,11 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
if (rc) {
tslot = atomic_read(&dev->_vblank_count[crtc]) + diff;
vblanktimestamp(dev, crtc, tslot) = t_vblank;
- smp_wmb();
}
+ smp_mb__before_atomic_inc();
atomic_add(diff, &dev->_vblank_count[crtc]);
+ smp_mb__after_atomic_inc();
}
/**
@@ -941,11 +932,34 @@ EXPORT_SYMBOL(drm_vblank_put);
void drm_vblank_off(struct drm_device *dev, int crtc)
{
+ struct drm_pending_vblank_event *e, *t;
+ struct timeval now;
unsigned long irqflags;
+ unsigned int seq;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
vblank_disable_and_save(dev, crtc);
DRM_WAKEUP(&dev->vbl_queue[crtc]);
+
+ /* Send any queued vblank events, lest the natives grow disquiet */
+ seq = drm_vblank_count_and_time(dev, crtc, &now);
+ list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
+ if (e->pipe != crtc)
+ continue;
+ DRM_DEBUG("Sending premature vblank event on disable: \
+ wanted %d, current %d\n",
+ e->event.sequence, seq);
+
+ e->event.sequence = seq;
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+ drm_vblank_put(dev, e->pipe);
+ list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
+ e->event.sequence);
+ }
+
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
EXPORT_SYMBOL(drm_vblank_off);
@@ -1011,7 +1025,8 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_modeset_ctl *modeset = data;
- int crtc, ret = 0;
+ int ret = 0;
+ unsigned int crtc;
/* If drm_vblank_init() hasn't been called yet, just no-op */
if (!dev->num_crtcs)
@@ -1133,7 +1148,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
{
union drm_wait_vblank *vblwait = data;
int ret = 0;
- unsigned int flags, seq, crtc;
+ unsigned int flags, seq, crtc, high_crtc;
if ((!drm_dev_to_irq(dev)) || (!dev->irq_enabled))
return -EINVAL;
@@ -1142,16 +1157,21 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
return -EINVAL;
if (vblwait->request.type &
- ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
+ ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK |
+ _DRM_VBLANK_HIGH_CRTC_MASK)) {
DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
vblwait->request.type,
- (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK));
+ (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK |
+ _DRM_VBLANK_HIGH_CRTC_MASK));
return -EINVAL;
}
flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
- crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
-
+ high_crtc = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK);
+ if (high_crtc)
+ crtc = high_crtc >> _DRM_VBLANK_HIGH_CRTC_SHIFT;
+ else
+ crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
if (crtc >= dev->num_crtcs)
return -EINVAL;
@@ -1293,15 +1313,16 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
* e.g., due to spurious vblank interrupts. We need to
* ignore those for accounting.
*/
- if (abs(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
+ if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
/* Store new timestamp in ringbuffer. */
vblanktimestamp(dev, crtc, vblcount + 1) = tvblank;
- smp_wmb();
/* Increment cooked vblank count. This also atomically commits
* the timestamp computed above.
*/
+ smp_mb__before_atomic_inc();
atomic_inc(&dev->_vblank_count[crtc]);
+ smp_mb__after_atomic_inc();
} else {
DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
crtc, (int) diff_ns);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index c59515ba7e69..5d00b0fc0d91 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -64,8 +64,8 @@ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
else {
child =
list_entry(mm->unused_nodes.next,
- struct drm_mm_node, free_stack);
- list_del(&child->free_stack);
+ struct drm_mm_node, node_list);
+ list_del(&child->node_list);
--mm->num_unused;
}
spin_unlock(&mm->unused_lock);
@@ -94,195 +94,242 @@ int drm_mm_pre_get(struct drm_mm *mm)
return ret;
}
++mm->num_unused;
- list_add_tail(&node->free_stack, &mm->unused_nodes);
+ list_add_tail(&node->node_list, &mm->unused_nodes);
}
spin_unlock(&mm->unused_lock);
return 0;
}
EXPORT_SYMBOL(drm_mm_pre_get);
-static int drm_mm_create_tail_node(struct drm_mm *mm,
- unsigned long start,
- unsigned long size, int atomic)
+static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
{
- struct drm_mm_node *child;
-
- child = drm_mm_kmalloc(mm, atomic);
- if (unlikely(child == NULL))
- return -ENOMEM;
-
- child->free = 1;
- child->size = size;
- child->start = start;
- child->mm = mm;
+ return hole_node->start + hole_node->size;
+}
- list_add_tail(&child->node_list, &mm->node_list);
- list_add_tail(&child->free_stack, &mm->free_stack);
+static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
+{
+ struct drm_mm_node *next_node =
+ list_entry(hole_node->node_list.next, struct drm_mm_node,
+ node_list);
- return 0;
+ return next_node->start;
}
-static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
- unsigned long size,
- int atomic)
+static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
+ struct drm_mm_node *node,
+ unsigned long size, unsigned alignment)
{
- struct drm_mm_node *child;
+ struct drm_mm *mm = hole_node->mm;
+ unsigned long tmp = 0, wasted = 0;
+ unsigned long hole_start = drm_mm_hole_node_start(hole_node);
+ unsigned long hole_end = drm_mm_hole_node_end(hole_node);
- child = drm_mm_kmalloc(parent->mm, atomic);
- if (unlikely(child == NULL))
- return NULL;
+ BUG_ON(!hole_node->hole_follows || node->allocated);
- INIT_LIST_HEAD(&child->free_stack);
+ if (alignment)
+ tmp = hole_start % alignment;
- child->size = size;
- child->start = parent->start;
- child->mm = parent->mm;
+ if (!tmp) {
+ hole_node->hole_follows = 0;
+ list_del_init(&hole_node->hole_stack);
+ } else
+ wasted = alignment - tmp;
- list_add_tail(&child->node_list, &parent->node_list);
- INIT_LIST_HEAD(&child->free_stack);
+ node->start = hole_start + wasted;
+ node->size = size;
+ node->mm = mm;
+ node->allocated = 1;
- parent->size -= size;
- parent->start += size;
- return child;
-}
+ INIT_LIST_HEAD(&node->hole_stack);
+ list_add(&node->node_list, &hole_node->node_list);
+
+ BUG_ON(node->start + node->size > hole_end);
+ if (node->start + node->size < hole_end) {
+ list_add(&node->hole_stack, &mm->hole_stack);
+ node->hole_follows = 1;
+ } else {
+ node->hole_follows = 0;
+ }
+}
-struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
+struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
unsigned long size,
unsigned alignment,
int atomic)
{
+ struct drm_mm_node *node;
- struct drm_mm_node *align_splitoff = NULL;
- unsigned tmp = 0;
+ node = drm_mm_kmalloc(hole_node->mm, atomic);
+ if (unlikely(node == NULL))
+ return NULL;
- if (alignment)
- tmp = node->start % alignment;
+ drm_mm_insert_helper(hole_node, node, size, alignment);
- if (tmp) {
- align_splitoff =
- drm_mm_split_at_start(node, alignment - tmp, atomic);
- if (unlikely(align_splitoff == NULL))
- return NULL;
- }
+ return node;
+}
+EXPORT_SYMBOL(drm_mm_get_block_generic);
- if (node->size == size) {
- list_del_init(&node->free_stack);
- node->free = 0;
- } else {
- node = drm_mm_split_at_start(node, size, atomic);
- }
+/**
+ * Search for free space and insert a preallocated memory node. Returns
+ * -ENOSPC if no suitable free area is available. The preallocated memory node
+ * must be cleared.
+ */
+int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment)
+{
+ struct drm_mm_node *hole_node;
- if (align_splitoff)
- drm_mm_put_block(align_splitoff);
+ hole_node = drm_mm_search_free(mm, size, alignment, 0);
+ if (!hole_node)
+ return -ENOSPC;
- return node;
+ drm_mm_insert_helper(hole_node, node, size, alignment);
+
+ return 0;
}
-EXPORT_SYMBOL(drm_mm_get_block_generic);
+EXPORT_SYMBOL(drm_mm_insert_node);
-struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
- unsigned long size,
- unsigned alignment,
- unsigned long start,
- unsigned long end,
- int atomic)
+static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
+ struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long start, unsigned long end)
{
- struct drm_mm_node *align_splitoff = NULL;
- unsigned tmp = 0;
- unsigned wasted = 0;
+ struct drm_mm *mm = hole_node->mm;
+ unsigned long tmp = 0, wasted = 0;
+ unsigned long hole_start = drm_mm_hole_node_start(hole_node);
+ unsigned long hole_end = drm_mm_hole_node_end(hole_node);
+
+ BUG_ON(!hole_node->hole_follows || node->allocated);
- if (node->start < start)
- wasted += start - node->start;
+ if (hole_start < start)
+ wasted += start - hole_start;
if (alignment)
- tmp = ((node->start + wasted) % alignment);
+ tmp = (hole_start + wasted) % alignment;
if (tmp)
wasted += alignment - tmp;
- if (wasted) {
- align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
- if (unlikely(align_splitoff == NULL))
- return NULL;
+
+ if (!wasted) {
+ hole_node->hole_follows = 0;
+ list_del_init(&hole_node->hole_stack);
}
- if (node->size == size) {
- list_del_init(&node->free_stack);
- node->free = 0;
+ node->start = hole_start + wasted;
+ node->size = size;
+ node->mm = mm;
+ node->allocated = 1;
+
+ INIT_LIST_HEAD(&node->hole_stack);
+ list_add(&node->node_list, &hole_node->node_list);
+
+ BUG_ON(node->start + node->size > hole_end);
+ BUG_ON(node->start + node->size > end);
+
+ if (node->start + node->size < hole_end) {
+ list_add(&node->hole_stack, &mm->hole_stack);
+ node->hole_follows = 1;
} else {
- node = drm_mm_split_at_start(node, size, atomic);
+ node->hole_follows = 0;
}
+}
- if (align_splitoff)
- drm_mm_put_block(align_splitoff);
+struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end,
+ int atomic)
+{
+ struct drm_mm_node *node;
+
+ node = drm_mm_kmalloc(hole_node->mm, atomic);
+ if (unlikely(node == NULL))
+ return NULL;
+
+ drm_mm_insert_helper_range(hole_node, node, size, alignment,
+ start, end);
return node;
}
EXPORT_SYMBOL(drm_mm_get_block_range_generic);
-/*
- * Put a block. Merge with the previous and / or next block if they are free.
- * Otherwise add to the free stack.
+/**
+ * Search for free space and insert a preallocated memory node. Returns
+ * -ENOSPC if no suitable free area is available. This is for range
+ * restricted allocations. The preallocated memory node must be cleared.
*/
-
-void drm_mm_put_block(struct drm_mm_node *cur)
+int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long start, unsigned long end)
{
+ struct drm_mm_node *hole_node;
- struct drm_mm *mm = cur->mm;
- struct list_head *cur_head = &cur->node_list;
- struct list_head *root_head = &mm->node_list;
- struct drm_mm_node *prev_node = NULL;
- struct drm_mm_node *next_node;
+ hole_node = drm_mm_search_free_in_range(mm, size, alignment,
+ start, end, 0);
+ if (!hole_node)
+ return -ENOSPC;
- int merged = 0;
+ drm_mm_insert_helper_range(hole_node, node, size, alignment,
+ start, end);
- BUG_ON(cur->scanned_block || cur->scanned_prev_free
- || cur->scanned_next_free);
+ return 0;
+}
+EXPORT_SYMBOL(drm_mm_insert_node_in_range);
- if (cur_head->prev != root_head) {
- prev_node =
- list_entry(cur_head->prev, struct drm_mm_node, node_list);
- if (prev_node->free) {
- prev_node->size += cur->size;
- merged = 1;
- }
- }
- if (cur_head->next != root_head) {
- next_node =
- list_entry(cur_head->next, struct drm_mm_node, node_list);
- if (next_node->free) {
- if (merged) {
- prev_node->size += next_node->size;
- list_del(&next_node->node_list);
- list_del(&next_node->free_stack);
- spin_lock(&mm->unused_lock);
- if (mm->num_unused < MM_UNUSED_TARGET) {
- list_add(&next_node->free_stack,
- &mm->unused_nodes);
- ++mm->num_unused;
- } else
- kfree(next_node);
- spin_unlock(&mm->unused_lock);
- } else {
- next_node->size += cur->size;
- next_node->start = cur->start;
- merged = 1;
- }
- }
- }
- if (!merged) {
- cur->free = 1;
- list_add(&cur->free_stack, &mm->free_stack);
- } else {
- list_del(&cur->node_list);
- spin_lock(&mm->unused_lock);
- if (mm->num_unused < MM_UNUSED_TARGET) {
- list_add(&cur->free_stack, &mm->unused_nodes);
- ++mm->num_unused;
- } else
- kfree(cur);
- spin_unlock(&mm->unused_lock);
- }
+/**
+ * Remove a memory node from the allocator.
+ */
+void drm_mm_remove_node(struct drm_mm_node *node)
+{
+ struct drm_mm *mm = node->mm;
+ struct drm_mm_node *prev_node;
+
+ BUG_ON(node->scanned_block || node->scanned_prev_free
+ || node->scanned_next_free);
+
+ prev_node =
+ list_entry(node->node_list.prev, struct drm_mm_node, node_list);
+
+ if (node->hole_follows) {
+ BUG_ON(drm_mm_hole_node_start(node)
+ == drm_mm_hole_node_end(node));
+ list_del(&node->hole_stack);
+ } else
+ BUG_ON(drm_mm_hole_node_start(node)
+ != drm_mm_hole_node_end(node));
+
+ if (!prev_node->hole_follows) {
+ prev_node->hole_follows = 1;
+ list_add(&prev_node->hole_stack, &mm->hole_stack);
+ } else
+ list_move(&prev_node->hole_stack, &mm->hole_stack);
+
+ list_del(&node->node_list);
+ node->allocated = 0;
}
+EXPORT_SYMBOL(drm_mm_remove_node);
+
+/*
+ * Remove a memory node from the allocator and free the allocated struct
+ * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
+ * drm_mm_get_block functions.
+ */
+void drm_mm_put_block(struct drm_mm_node *node)
+{
+ struct drm_mm *mm = node->mm;
+
+ drm_mm_remove_node(node);
+
+ spin_lock(&mm->unused_lock);
+ if (mm->num_unused < MM_UNUSED_TARGET) {
+ list_add(&node->node_list, &mm->unused_nodes);
+ ++mm->num_unused;
+ } else
+ kfree(node);
+ spin_unlock(&mm->unused_lock);
+}
EXPORT_SYMBOL(drm_mm_put_block);
static int check_free_hole(unsigned long start, unsigned long end,
@@ -319,8 +366,10 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
best = NULL;
best_size = ~0UL;
- list_for_each_entry(entry, &mm->free_stack, free_stack) {
- if (!check_free_hole(entry->start, entry->start + entry->size,
+ list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
+ BUG_ON(!entry->hole_follows);
+ if (!check_free_hole(drm_mm_hole_node_start(entry),
+ drm_mm_hole_node_end(entry),
size, alignment))
continue;
@@ -353,12 +402,13 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
best = NULL;
best_size = ~0UL;
- list_for_each_entry(entry, &mm->free_stack, free_stack) {
- unsigned long adj_start = entry->start < start ?
- start : entry->start;
- unsigned long adj_end = entry->start + entry->size > end ?
- end : entry->start + entry->size;
+ list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
+ unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
+ start : drm_mm_hole_node_start(entry);
+ unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
+ end : drm_mm_hole_node_end(entry);
+ BUG_ON(!entry->hole_follows);
if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
@@ -376,6 +426,23 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
EXPORT_SYMBOL(drm_mm_search_free_in_range);
/**
+ * Moves an allocation. To be used with embedded struct drm_mm_node.
+ */
+void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
+{
+ list_replace(&old->node_list, &new->node_list);
+ list_replace(&old->node_list, &new->hole_stack);
+ new->hole_follows = old->hole_follows;
+ new->mm = old->mm;
+ new->start = old->start;
+ new->size = old->size;
+
+ old->allocated = 0;
+ new->allocated = 1;
+}
+EXPORT_SYMBOL(drm_mm_replace_node);
+
+/**
* Initializa lru scanning.
*
* This simply sets up the scanning routines with the parameters for the desired
@@ -393,6 +460,7 @@ void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
mm->scan_hit_start = 0;
mm->scan_hit_size = 0;
mm->scan_check_range = 0;
+ mm->prev_scanned_node = NULL;
}
EXPORT_SYMBOL(drm_mm_init_scan);
@@ -418,6 +486,7 @@ void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
mm->scan_start = start;
mm->scan_end = end;
mm->scan_check_range = 1;
+ mm->prev_scanned_node = NULL;
}
EXPORT_SYMBOL(drm_mm_init_scan_with_range);
@@ -430,70 +499,42 @@ EXPORT_SYMBOL(drm_mm_init_scan_with_range);
int drm_mm_scan_add_block(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
- struct list_head *prev_free, *next_free;
- struct drm_mm_node *prev_node, *next_node;
+ struct drm_mm_node *prev_node;
+ unsigned long hole_start, hole_end;
unsigned long adj_start;
unsigned long adj_end;
mm->scanned_blocks++;
- prev_free = next_free = NULL;
-
- BUG_ON(node->free);
+ BUG_ON(node->scanned_block);
node->scanned_block = 1;
- node->free = 1;
-
- if (node->node_list.prev != &mm->node_list) {
- prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
- node_list);
-
- if (prev_node->free) {
- list_del(&prev_node->node_list);
- node->start = prev_node->start;
- node->size += prev_node->size;
+ prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
+ node_list);
- prev_node->scanned_prev_free = 1;
-
- prev_free = &prev_node->free_stack;
- }
- }
-
- if (node->node_list.next != &mm->node_list) {
- next_node = list_entry(node->node_list.next, struct drm_mm_node,
- node_list);
-
- if (next_node->free) {
- list_del(&next_node->node_list);
-
- node->size += next_node->size;
-
- next_node->scanned_next_free = 1;
-
- next_free = &next_node->free_stack;
- }
- }
-
- /* The free_stack list is not used for allocated objects, so these two
- * pointers can be abused (as long as no allocations in this memory
- * manager happens). */
- node->free_stack.prev = prev_free;
- node->free_stack.next = next_free;
+ node->scanned_preceeds_hole = prev_node->hole_follows;
+ prev_node->hole_follows = 1;
+ list_del(&node->node_list);
+ node->node_list.prev = &prev_node->node_list;
+ node->node_list.next = &mm->prev_scanned_node->node_list;
+ mm->prev_scanned_node = node;
+ hole_start = drm_mm_hole_node_start(prev_node);
+ hole_end = drm_mm_hole_node_end(prev_node);
if (mm->scan_check_range) {
- adj_start = node->start < mm->scan_start ?
- mm->scan_start : node->start;
- adj_end = node->start + node->size > mm->scan_end ?
- mm->scan_end : node->start + node->size;
+ adj_start = hole_start < mm->scan_start ?
+ mm->scan_start : hole_start;
+ adj_end = hole_end > mm->scan_end ?
+ mm->scan_end : hole_end;
} else {
- adj_start = node->start;
- adj_end = node->start + node->size;
+ adj_start = hole_start;
+ adj_end = hole_end;
}
if (check_free_hole(adj_start , adj_end,
mm->scan_size, mm->scan_alignment)) {
- mm->scan_hit_start = node->start;
- mm->scan_hit_size = node->size;
+ mm->scan_hit_start = hole_start;
+ mm->scan_hit_size = hole_end;
return 1;
}
@@ -510,7 +551,7 @@ EXPORT_SYMBOL(drm_mm_scan_add_block);
* corrupted.
*
* When the scan list is empty, the selected memory nodes can be freed. An
- * immediatly following drm_mm_search_free with best_match = 0 will then return
+ * immediately following drm_mm_search_free with best_match = 0 will then return
* the just freed block (because its at the top of the free_stack list).
*
* Returns one if this block should be evicted, zero otherwise. Will always
@@ -519,39 +560,19 @@ EXPORT_SYMBOL(drm_mm_scan_add_block);
int drm_mm_scan_remove_block(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
- struct drm_mm_node *prev_node, *next_node;
+ struct drm_mm_node *prev_node;
mm->scanned_blocks--;
BUG_ON(!node->scanned_block);
node->scanned_block = 0;
- node->free = 0;
-
- prev_node = list_entry(node->free_stack.prev, struct drm_mm_node,
- free_stack);
- next_node = list_entry(node->free_stack.next, struct drm_mm_node,
- free_stack);
- if (prev_node) {
- BUG_ON(!prev_node->scanned_prev_free);
- prev_node->scanned_prev_free = 0;
-
- list_add_tail(&prev_node->node_list, &node->node_list);
-
- node->start = prev_node->start + prev_node->size;
- node->size -= prev_node->size;
- }
+ prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
+ node_list);
- if (next_node) {
- BUG_ON(!next_node->scanned_next_free);
- next_node->scanned_next_free = 0;
-
- list_add(&next_node->node_list, &node->node_list);
-
- node->size -= next_node->size;
- }
-
- INIT_LIST_HEAD(&node->free_stack);
+ prev_node->hole_follows = node->scanned_preceeds_hole;
+ INIT_LIST_HEAD(&node->node_list);
+ list_add(&node->node_list, &prev_node->node_list);
/* Only need to check for containement because start&size for the
* complete resulting free block (not just the desired part) is
@@ -568,7 +589,7 @@ EXPORT_SYMBOL(drm_mm_scan_remove_block);
int drm_mm_clean(struct drm_mm * mm)
{
- struct list_head *head = &mm->node_list;
+ struct list_head *head = &mm->head_node.node_list;
return (head->next->next == head);
}
@@ -576,38 +597,40 @@ EXPORT_SYMBOL(drm_mm_clean);
int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
{
- INIT_LIST_HEAD(&mm->node_list);
- INIT_LIST_HEAD(&mm->free_stack);
+ INIT_LIST_HEAD(&mm->hole_stack);
INIT_LIST_HEAD(&mm->unused_nodes);
mm->num_unused = 0;
mm->scanned_blocks = 0;
spin_lock_init(&mm->unused_lock);
- return drm_mm_create_tail_node(mm, start, size, 0);
+ /* Clever trick to avoid a special case in the free hole tracking. */
+ INIT_LIST_HEAD(&mm->head_node.node_list);
+ INIT_LIST_HEAD(&mm->head_node.hole_stack);
+ mm->head_node.hole_follows = 1;
+ mm->head_node.scanned_block = 0;
+ mm->head_node.scanned_prev_free = 0;
+ mm->head_node.scanned_next_free = 0;
+ mm->head_node.mm = mm;
+ mm->head_node.start = start + size;
+ mm->head_node.size = start - mm->head_node.start;
+ list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
+
+ return 0;
}
EXPORT_SYMBOL(drm_mm_init);
void drm_mm_takedown(struct drm_mm * mm)
{
- struct list_head *bnode = mm->free_stack.next;
- struct drm_mm_node *entry;
- struct drm_mm_node *next;
+ struct drm_mm_node *entry, *next;
- entry = list_entry(bnode, struct drm_mm_node, free_stack);
-
- if (entry->node_list.next != &mm->node_list ||
- entry->free_stack.next != &mm->free_stack) {
+ if (!list_empty(&mm->head_node.node_list)) {
DRM_ERROR("Memory manager not clean. Delaying takedown\n");
return;
}
- list_del(&entry->free_stack);
- list_del(&entry->node_list);
- kfree(entry);
-
spin_lock(&mm->unused_lock);
- list_for_each_entry_safe(entry, next, &mm->unused_nodes, free_stack) {
- list_del(&entry->free_stack);
+ list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
+ list_del(&entry->node_list);
kfree(entry);
--mm->num_unused;
}
@@ -620,19 +643,37 @@ EXPORT_SYMBOL(drm_mm_takedown);
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
{
struct drm_mm_node *entry;
- int total_used = 0, total_free = 0, total = 0;
-
- list_for_each_entry(entry, &mm->node_list, node_list) {
- printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
+ unsigned long total_used = 0, total_free = 0, total = 0;
+ unsigned long hole_start, hole_end, hole_size;
+
+ hole_start = drm_mm_hole_node_start(&mm->head_node);
+ hole_end = drm_mm_hole_node_end(&mm->head_node);
+ hole_size = hole_end - hole_start;
+ if (hole_size)
+ printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
+ prefix, hole_start, hole_end,
+ hole_size);
+ total_free += hole_size;
+
+ drm_mm_for_each_node(entry, mm) {
+ printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
prefix, entry->start, entry->start + entry->size,
- entry->size, entry->free ? "free" : "used");
- total += entry->size;
- if (entry->free)
- total_free += entry->size;
- else
- total_used += entry->size;
+ entry->size);
+ total_used += entry->size;
+
+ if (entry->hole_follows) {
+ hole_start = drm_mm_hole_node_start(entry);
+ hole_end = drm_mm_hole_node_end(entry);
+ hole_size = hole_end - hole_start;
+ printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
+ prefix, hole_start, hole_end,
+ hole_size);
+ total_free += hole_size;
+ }
}
- printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
+ total = total_free + total_used;
+
+ printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
total_used, total_free);
}
EXPORT_SYMBOL(drm_mm_debug_table);
@@ -641,17 +682,34 @@ EXPORT_SYMBOL(drm_mm_debug_table);
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
{
struct drm_mm_node *entry;
- int total_used = 0, total_free = 0, total = 0;
-
- list_for_each_entry(entry, &mm->node_list, node_list) {
- seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used");
- total += entry->size;
- if (entry->free)
- total_free += entry->size;
- else
- total_used += entry->size;
+ unsigned long total_used = 0, total_free = 0, total = 0;
+ unsigned long hole_start, hole_end, hole_size;
+
+ hole_start = drm_mm_hole_node_start(&mm->head_node);
+ hole_end = drm_mm_hole_node_end(&mm->head_node);
+ hole_size = hole_end - hole_start;
+ if (hole_size)
+ seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
+ hole_start, hole_end, hole_size);
+ total_free += hole_size;
+
+ drm_mm_for_each_node(entry, mm) {
+ seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
+ entry->start, entry->start + entry->size,
+ entry->size);
+ total_used += entry->size;
+ if (entry->hole_follows) {
+ hole_start = drm_mm_hole_node_start(&mm->head_node);
+ hole_end = drm_mm_hole_node_end(&mm->head_node);
+ hole_size = hole_end - hole_start;
+ seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
+ hole_start, hole_end, hole_size);
+ total_free += hole_size;
+ }
}
- seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free);
+ total = total_free + total_used;
+
+ seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
return 0;
}
EXPORT_SYMBOL(drm_mm_dump_table);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 58e65f92c232..25bf87390f53 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -593,7 +593,7 @@ EXPORT_SYMBOL(drm_mode_height);
*
* Return @modes's hsync rate in kHz, rounded to the nearest int.
*/
-int drm_mode_hsync(struct drm_display_mode *mode)
+int drm_mode_hsync(const struct drm_display_mode *mode)
{
unsigned int calc_val;
@@ -627,7 +627,7 @@ EXPORT_SYMBOL(drm_mode_hsync);
* If it is 70.288, it will return 70Hz.
* If it is 59.6, it will return 60Hz.
*/
-int drm_mode_vrefresh(struct drm_display_mode *mode)
+int drm_mode_vrefresh(const struct drm_display_mode *mode)
{
int refresh = 0;
unsigned int calc_val;
@@ -725,7 +725,7 @@ EXPORT_SYMBOL(drm_mode_set_crtcinfo);
* a pointer to it. Used to create new instances of established modes.
*/
struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_display_mode *nmode;
int new_id;
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index f5bd9e590c80..e1aee4f6a7c6 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -125,6 +125,176 @@ void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
EXPORT_SYMBOL(drm_pci_free);
#ifdef CONFIG_PCI
+
+static int drm_get_pci_domain(struct drm_device *dev)
+{
+#ifndef __alpha__
+ /* For historical reasons, drm_get_pci_domain() is busticated
+ * on most archs and has to remain so for userspace interface
+ * < 1.4, except on alpha which was right from the beginning
+ */
+ if (dev->if_version < 0x10004)
+ return 0;
+#endif /* __alpha__ */
+
+ return pci_domain_nr(dev->pdev->bus);
+}
+
+static int drm_pci_get_irq(struct drm_device *dev)
+{
+ return dev->pdev->irq;
+}
+
+static const char *drm_pci_get_name(struct drm_device *dev)
+{
+ struct pci_driver *pdriver = dev->driver->kdriver.pci;
+ return pdriver->name;
+}
+
+int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
+{
+ int len, ret;
+ struct pci_driver *pdriver = dev->driver->kdriver.pci;
+ master->unique_len = 40;
+ master->unique_size = master->unique_len;
+ master->unique = kmalloc(master->unique_size, GFP_KERNEL);
+ if (master->unique == NULL)
+ return -ENOMEM;
+
+
+ len = snprintf(master->unique, master->unique_len,
+ "pci:%04x:%02x:%02x.%d",
+ drm_get_pci_domain(dev),
+ dev->pdev->bus->number,
+ PCI_SLOT(dev->pdev->devfn),
+ PCI_FUNC(dev->pdev->devfn));
+
+ if (len >= master->unique_len) {
+ DRM_ERROR("buffer overflow");
+ ret = -EINVAL;
+ goto err;
+ } else
+ master->unique_len = len;
+
+ dev->devname =
+ kmalloc(strlen(pdriver->name) +
+ master->unique_len + 2, GFP_KERNEL);
+
+ if (dev->devname == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ sprintf(dev->devname, "%s@%s", pdriver->name,
+ master->unique);
+
+ return 0;
+err:
+ return ret;
+}
+
+int drm_pci_set_unique(struct drm_device *dev,
+ struct drm_master *master,
+ struct drm_unique *u)
+{
+ int domain, bus, slot, func, ret;
+ const char *bus_name;
+
+ master->unique_len = u->unique_len;
+ master->unique_size = u->unique_len + 1;
+ master->unique = kmalloc(master->unique_size, GFP_KERNEL);
+ if (!master->unique) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ if (copy_from_user(master->unique, u->unique, master->unique_len)) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ master->unique[master->unique_len] = '\0';
+
+ bus_name = dev->driver->bus->get_name(dev);
+ dev->devname = kmalloc(strlen(bus_name) +
+ strlen(master->unique) + 2, GFP_KERNEL);
+ if (!dev->devname) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ sprintf(dev->devname, "%s@%s", bus_name,
+ master->unique);
+
+ /* Return error if the busid submitted doesn't match the device's actual
+ * busid.
+ */
+ ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
+ if (ret != 3) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ domain = bus >> 8;
+ bus &= 0xff;
+
+ if ((domain != drm_get_pci_domain(dev)) ||
+ (bus != dev->pdev->bus->number) ||
+ (slot != PCI_SLOT(dev->pdev->devfn)) ||
+ (func != PCI_FUNC(dev->pdev->devfn))) {
+ ret = -EINVAL;
+ goto err;
+ }
+ return 0;
+err:
+ return ret;
+}
+
+
+int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
+{
+ if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
+ (p->busnum & 0xff) != dev->pdev->bus->number ||
+ p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
+ return -EINVAL;
+
+ p->irq = dev->pdev->irq;
+
+ DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
+ p->irq);
+ return 0;
+}
+
+int drm_pci_agp_init(struct drm_device *dev)
+{
+ if (drm_core_has_AGP(dev)) {
+ if (drm_pci_device_is_agp(dev))
+ dev->agp = drm_agp_init(dev);
+ if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
+ && (dev->agp == NULL)) {
+ DRM_ERROR("Cannot initialize the agpgart module.\n");
+ return -EINVAL;
+ }
+ if (drm_core_has_MTRR(dev)) {
+ if (dev->agp)
+ dev->agp->agp_mtrr =
+ mtrr_add(dev->agp->agp_info.aper_base,
+ dev->agp->agp_info.aper_size *
+ 1024 * 1024, MTRR_TYPE_WRCOMB, 1);
+ }
+ }
+ return 0;
+}
+
+static struct drm_bus drm_pci_bus = {
+ .bus_type = DRIVER_BUS_PCI,
+ .get_irq = drm_pci_get_irq,
+ .get_name = drm_pci_get_name,
+ .set_busid = drm_pci_set_busid,
+ .set_unique = drm_pci_set_unique,
+ .agp_init = drm_pci_agp_init,
+};
+
/**
* Register.
*
@@ -219,7 +389,7 @@ err_g1:
EXPORT_SYMBOL(drm_get_pci_dev);
/**
- * PCI device initialization. Called via drm_init at module load time,
+ * PCI device initialization. Called direct from modules at load time.
*
* \return zero on success or a negative number on failure.
*
@@ -229,18 +399,24 @@ EXPORT_SYMBOL(drm_get_pci_dev);
* Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
* after the initialization for driver customization.
*/
-int drm_pci_init(struct drm_driver *driver)
+int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
{
struct pci_dev *pdev = NULL;
const struct pci_device_id *pid;
int i;
+ DRM_DEBUG("\n");
+
+ INIT_LIST_HEAD(&driver->device_list);
+ driver->kdriver.pci = pdriver;
+ driver->bus = &drm_pci_bus;
+
if (driver->driver_features & DRIVER_MODESET)
- return pci_register_driver(&driver->pci_driver);
+ return pci_register_driver(pdriver);
/* If not using KMS, fall back to stealth mode manual scanning. */
- for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) {
- pid = &driver->pci_driver.id_table[i];
+ for (i = 0; pdriver->id_table[i].vendor != 0; i++) {
+ pid = &pdriver->id_table[i];
/* Loop around setting up a DRM device for each PCI device
* matching our ID and device class. If we had the internal
@@ -265,10 +441,27 @@ int drm_pci_init(struct drm_driver *driver)
#else
-int drm_pci_init(struct drm_driver *driver)
+int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
{
return -1;
}
#endif
+
+EXPORT_SYMBOL(drm_pci_init);
+
/*@}*/
+void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
+{
+ struct drm_device *dev, *tmp;
+ DRM_DEBUG("\n");
+
+ if (driver->driver_features & DRIVER_MODESET) {
+ pci_unregister_driver(pdriver);
+ } else {
+ list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
+ drm_put_dev(dev);
+ }
+ DRM_INFO("Module unloaded\n");
+}
+EXPORT_SYMBOL(drm_pci_exit);
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index 92d1d0fb7b75..7223f06d8e58 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -109,8 +109,60 @@ err_g1:
}
EXPORT_SYMBOL(drm_get_platform_dev);
+static int drm_platform_get_irq(struct drm_device *dev)
+{
+ return platform_get_irq(dev->platformdev, 0);
+}
+
+static const char *drm_platform_get_name(struct drm_device *dev)
+{
+ return dev->platformdev->name;
+}
+
+static int drm_platform_set_busid(struct drm_device *dev, struct drm_master *master)
+{
+ int len, ret;
+
+ master->unique_len = 10 + strlen(dev->platformdev->name);
+ master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
+
+ if (master->unique == NULL)
+ return -ENOMEM;
+
+ len = snprintf(master->unique, master->unique_len,
+ "platform:%s", dev->platformdev->name);
+
+ if (len > master->unique_len) {
+ DRM_ERROR("Unique buffer overflowed\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ dev->devname =
+ kmalloc(strlen(dev->platformdev->name) +
+ master->unique_len + 2, GFP_KERNEL);
+
+ if (dev->devname == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ sprintf(dev->devname, "%s@%s", dev->platformdev->name,
+ master->unique);
+ return 0;
+err:
+ return ret;
+}
+
+static struct drm_bus drm_platform_bus = {
+ .bus_type = DRIVER_BUS_PLATFORM,
+ .get_irq = drm_platform_get_irq,
+ .get_name = drm_platform_get_name,
+ .set_busid = drm_platform_set_busid,
+};
+
/**
- * Platform device initialization. Called via drm_init at module load time,
+ * Platform device initialization. Called direct from modules.
*
* \return zero on success or a negative number on failure.
*
@@ -121,7 +173,24 @@ EXPORT_SYMBOL(drm_get_platform_dev);
* after the initialization for driver customization.
*/
-int drm_platform_init(struct drm_driver *driver)
+int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device)
{
- return drm_get_platform_dev(driver->platform_device, driver);
+ DRM_DEBUG("\n");
+
+ driver->kdriver.platform_device = platform_device;
+ driver->bus = &drm_platform_bus;
+ INIT_LIST_HEAD(&driver->device_list);
+ return drm_get_platform_dev(platform_device, driver);
+}
+EXPORT_SYMBOL(drm_platform_init);
+
+void drm_platform_exit(struct drm_driver *driver, struct platform_device *platform_device)
+{
+ struct drm_device *dev, *tmp;
+ DRM_DEBUG("\n");
+
+ list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
+ drm_put_dev(dev);
+ DRM_INFO("Module unloaded\n");
}
+EXPORT_SYMBOL(drm_platform_exit);
diff --git a/drivers/gpu/drm/drm_sman.c b/drivers/gpu/drm/drm_sman.c
index 463aed9403db..34664587a74e 100644
--- a/drivers/gpu/drm/drm_sman.c
+++ b/drivers/gpu/drm/drm_sman.c
@@ -59,9 +59,7 @@ drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
{
int ret = 0;
- sman->mm = (struct drm_sman_mm *) kcalloc(num_managers,
- sizeof(*sman->mm),
- GFP_KERNEL);
+ sman->mm = kcalloc(num_managers, sizeof(*sman->mm), GFP_KERNEL);
if (!sman->mm) {
ret = -ENOMEM;
goto out;
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index d59edc18301f..001273d57f2d 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -269,25 +269,14 @@ int drm_fill_in_dev(struct drm_device *dev,
dev->driver = driver;
- if (drm_core_has_AGP(dev)) {
- if (drm_device_is_agp(dev))
- dev->agp = drm_agp_init(dev);
- if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
- && (dev->agp == NULL)) {
- DRM_ERROR("Cannot initialize the agpgart module.\n");
- retcode = -EINVAL;
+ if (dev->driver->bus->agp_init) {
+ retcode = dev->driver->bus->agp_init(dev);
+ if (retcode)
goto error_out_unreg;
- }
- if (drm_core_has_MTRR(dev)) {
- if (dev->agp)
- dev->agp->agp_mtrr =
- mtrr_add(dev->agp->agp_info.aper_base,
- dev->agp->agp_info.aper_size *
- 1024 * 1024, MTRR_TYPE_WRCOMB, 1);
- }
}
+
retcode = drm_ctxbitmap_init(dev);
if (retcode) {
DRM_ERROR("Cannot allocate memory for context bitmap.\n");
@@ -425,7 +414,6 @@ int drm_put_minor(struct drm_minor **minor_p)
*
* Cleans up all DRM device, calling drm_lastclose().
*
- * \sa drm_init
*/
void drm_put_dev(struct drm_device *dev)
{
@@ -475,6 +463,7 @@ void drm_put_dev(struct drm_device *dev)
drm_put_minor(&dev->primary);
+ list_del(&dev->driver_item);
if (dev->devname) {
kfree(dev->devname);
dev->devname = NULL;
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 85da4c40694c..2eee8e016b38 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -158,8 +158,15 @@ static ssize_t status_show(struct device *device,
{
struct drm_connector *connector = to_drm_connector(device);
enum drm_connector_status status;
+ int ret;
+
+ ret = mutex_lock_interruptible(&connector->dev->mode_config.mutex);
+ if (ret)
+ return ret;
status = connector->funcs->detect(connector, true);
+ mutex_unlock(&connector->dev->mode_config.mutex);
+
return snprintf(buf, PAGE_SIZE, "%s\n",
drm_get_connector_status_name(status));
}
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
new file mode 100644
index 000000000000..206d2300d873
--- /dev/null
+++ b/drivers/gpu/drm/drm_usb.c
@@ -0,0 +1,117 @@
+#include "drmP.h"
+#include <linux/usb.h>
+
+#ifdef CONFIG_USB
+int drm_get_usb_dev(struct usb_interface *interface,
+ const struct usb_device_id *id,
+ struct drm_driver *driver)
+{
+ struct drm_device *dev;
+ struct usb_device *usbdev;
+ int ret;
+
+ DRM_DEBUG("\n");
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ usbdev = interface_to_usbdev(interface);
+ dev->usbdev = usbdev;
+ dev->dev = &usbdev->dev;
+
+ mutex_lock(&drm_global_mutex);
+
+ ret = drm_fill_in_dev(dev, NULL, driver);
+ if (ret) {
+ printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
+ goto err_g1;
+ }
+
+ usb_set_intfdata(interface, dev);
+ ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
+ if (ret)
+ goto err_g1;
+
+ ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
+ if (ret)
+ goto err_g2;
+
+ if (dev->driver->load) {
+ ret = dev->driver->load(dev, 0);
+ if (ret)
+ goto err_g3;
+ }
+
+ /* setup the grouping for the legacy output */
+ ret = drm_mode_group_init_legacy_group(dev,
+ &dev->primary->mode_group);
+ if (ret)
+ goto err_g3;
+
+ list_add_tail(&dev->driver_item, &driver->device_list);
+
+ mutex_unlock(&drm_global_mutex);
+
+ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+ driver->name, driver->major, driver->minor, driver->patchlevel,
+ driver->date, dev->primary->index);
+
+ return 0;
+
+err_g3:
+ drm_put_minor(&dev->primary);
+err_g2:
+ drm_put_minor(&dev->control);
+err_g1:
+ kfree(dev);
+ mutex_unlock(&drm_global_mutex);
+ return ret;
+
+}
+EXPORT_SYMBOL(drm_get_usb_dev);
+
+static int drm_usb_get_irq(struct drm_device *dev)
+{
+ return 0;
+}
+
+static const char *drm_usb_get_name(struct drm_device *dev)
+{
+ return "USB";
+}
+
+static int drm_usb_set_busid(struct drm_device *dev,
+ struct drm_master *master)
+{
+ return 0;
+}
+
+static struct drm_bus drm_usb_bus = {
+ .bus_type = DRIVER_BUS_USB,
+ .get_irq = drm_usb_get_irq,
+ .get_name = drm_usb_get_name,
+ .set_busid = drm_usb_set_busid,
+};
+
+int drm_usb_init(struct drm_driver *driver, struct usb_driver *udriver)
+{
+ int res;
+ DRM_DEBUG("\n");
+
+ INIT_LIST_HEAD(&driver->device_list);
+ driver->kdriver.usb = udriver;
+ driver->bus = &drm_usb_bus;
+
+ res = usb_register(udriver);
+ return res;
+}
+EXPORT_SYMBOL(drm_usb_init);
+
+void drm_usb_exit(struct drm_driver *driver,
+ struct usb_driver *udriver)
+{
+ usb_deregister(udriver);
+}
+EXPORT_SYMBOL(drm_usb_exit);
+#endif
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index ff33e53bbbf8..8f371e8d630f 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -37,7 +37,6 @@
#include <linux/interrupt.h> /* For task queue support */
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/pagemap.h>
#define I810_BUF_FREE 2
@@ -94,7 +93,6 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
struct drm_buf *buf;
drm_i810_buf_priv_t *buf_priv;
- lock_kernel();
dev = priv->minor->dev;
dev_priv = dev->dev_private;
buf = dev_priv->mmap_buffer;
@@ -104,7 +102,6 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
vma->vm_file = filp;
buf_priv->currently_mapped = I810_BUF_MAPPED;
- unlock_kernel();
if (io_remap_pfn_range(vma, vma->vm_start,
vma->vm_pgoff,
@@ -116,7 +113,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
static const struct file_operations i810_buffer_fops = {
.open = drm_open,
.release = drm_release,
- .unlocked_ioctl = i810_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = i810_mmap_buffers,
.fasync = drm_fasync,
.llseek = noop_llseek,
@@ -1242,19 +1239,6 @@ int i810_driver_dma_quiescent(struct drm_device *dev)
return 0;
}
-/*
- * call the drm_ioctl under the big kernel lock because
- * to lock against the i810_mmap_buffers function.
- */
-long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- int ret;
- lock_kernel();
- ret = drm_ioctl(file, cmd, arg);
- unlock_kernel();
- return ret;
-}
-
struct drm_ioctl_desc i810_ioctls[] = {
DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index 88bcd331e7c5..6f98d059f68a 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -57,18 +57,13 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .unlocked_ioctl = i810_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
-
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@@ -77,15 +72,24 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct pci_driver i810_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+};
+
static int __init i810_init(void)
{
+ if (num_possible_cpus() > 1) {
+ pr_err("drm/i810 does not support SMP\n");
+ return -EINVAL;
+ }
driver.num_ioctls = i810_max_ioctl;
- return drm_init(&driver);
+ return drm_pci_init(&driver, &i810_pci_driver);
}
static void __exit i810_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &i810_pci_driver);
}
module_init(i810_init);
diff --git a/drivers/gpu/drm/i830/Makefile b/drivers/gpu/drm/i830/Makefile
deleted file mode 100644
index c642ee0b238c..000000000000
--- a/drivers/gpu/drm/i830/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-#
-# Makefile for the drm device driver. This driver provides support for the
-# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-
-ccflags-y := -Iinclude/drm
-i830-y := i830_drv.o i830_dma.o i830_irq.o
-
-obj-$(CONFIG_DRM_I830) += i830.o
diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c
deleted file mode 100644
index ca6f31ff0eec..000000000000
--- a/drivers/gpu/drm/i830/i830_dma.c
+++ /dev/null
@@ -1,1560 +0,0 @@
-/* i830_dma.c -- DMA support for the I830 -*- linux-c -*-
- * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
- * Jeff Hartmann <jhartmann@valinux.com>
- * Keith Whitwell <keith@tungstengraphics.com>
- * Abraham vd Merwe <abraham@2d3d.co.za>
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "i830_drm.h"
-#include "i830_drv.h"
-#include <linux/interrupt.h> /* For task queue support */
-#include <linux/smp_lock.h>
-#include <linux/pagemap.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <asm/uaccess.h>
-
-#define I830_BUF_FREE 2
-#define I830_BUF_CLIENT 1
-#define I830_BUF_HARDWARE 0
-
-#define I830_BUF_UNMAPPED 0
-#define I830_BUF_MAPPED 1
-
-static struct drm_buf *i830_freelist_get(struct drm_device * dev)
-{
- struct drm_device_dma *dma = dev->dma;
- int i;
- int used;
-
- /* Linear search might not be the best solution */
-
- for (i = 0; i < dma->buf_count; i++) {
- struct drm_buf *buf = dma->buflist[i];
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
- /* In use is already a pointer */
- used = cmpxchg(buf_priv->in_use, I830_BUF_FREE,
- I830_BUF_CLIENT);
- if (used == I830_BUF_FREE)
- return buf;
- }
- return NULL;
-}
-
-/* This should only be called if the buffer is not sent to the hardware
- * yet, the hardware updates in use for us once its on the ring buffer.
- */
-
-static int i830_freelist_put(struct drm_device *dev, struct drm_buf *buf)
-{
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
- int used;
-
- /* In use is already a pointer */
- used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT, I830_BUF_FREE);
- if (used != I830_BUF_CLIENT) {
- DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev;
- drm_i830_private_t *dev_priv;
- struct drm_buf *buf;
- drm_i830_buf_priv_t *buf_priv;
-
- lock_kernel();
- dev = priv->minor->dev;
- dev_priv = dev->dev_private;
- buf = dev_priv->mmap_buffer;
- buf_priv = buf->dev_private;
-
- vma->vm_flags |= (VM_IO | VM_DONTCOPY);
- vma->vm_file = filp;
-
- buf_priv->currently_mapped = I830_BUF_MAPPED;
- unlock_kernel();
-
- if (io_remap_pfn_range(vma, vma->vm_start,
- vma->vm_pgoff,
- vma->vm_end - vma->vm_start, vma->vm_page_prot))
- return -EAGAIN;
- return 0;
-}
-
-static const struct file_operations i830_buffer_fops = {
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = i830_ioctl,
- .mmap = i830_mmap_buffers,
- .fasync = drm_fasync,
- .llseek = noop_llseek,
-};
-
-static int i830_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
-{
- struct drm_device *dev = file_priv->minor->dev;
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
- drm_i830_private_t *dev_priv = dev->dev_private;
- const struct file_operations *old_fops;
- unsigned long virtual;
- int retcode = 0;
-
- if (buf_priv->currently_mapped == I830_BUF_MAPPED)
- return -EINVAL;
-
- down_write(&current->mm->mmap_sem);
- old_fops = file_priv->filp->f_op;
- file_priv->filp->f_op = &i830_buffer_fops;
- dev_priv->mmap_buffer = buf;
- virtual = do_mmap(file_priv->filp, 0, buf->total, PROT_READ | PROT_WRITE,
- MAP_SHARED, buf->bus_address);
- dev_priv->mmap_buffer = NULL;
- file_priv->filp->f_op = old_fops;
- if (IS_ERR((void *)virtual)) { /* ugh */
- /* Real error */
- DRM_ERROR("mmap error\n");
- retcode = PTR_ERR((void *)virtual);
- buf_priv->virtual = NULL;
- } else {
- buf_priv->virtual = (void __user *)virtual;
- }
- up_write(&current->mm->mmap_sem);
-
- return retcode;
-}
-
-static int i830_unmap_buffer(struct drm_buf *buf)
-{
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
- int retcode = 0;
-
- if (buf_priv->currently_mapped != I830_BUF_MAPPED)
- return -EINVAL;
-
- down_write(&current->mm->mmap_sem);
- retcode = do_munmap(current->mm,
- (unsigned long)buf_priv->virtual,
- (size_t) buf->total);
- up_write(&current->mm->mmap_sem);
-
- buf_priv->currently_mapped = I830_BUF_UNMAPPED;
- buf_priv->virtual = NULL;
-
- return retcode;
-}
-
-static int i830_dma_get_buffer(struct drm_device *dev, drm_i830_dma_t *d,
- struct drm_file *file_priv)
-{
- struct drm_buf *buf;
- drm_i830_buf_priv_t *buf_priv;
- int retcode = 0;
-
- buf = i830_freelist_get(dev);
- if (!buf) {
- retcode = -ENOMEM;
- DRM_DEBUG("retcode=%d\n", retcode);
- return retcode;
- }
-
- retcode = i830_map_buffer(buf, file_priv);
- if (retcode) {
- i830_freelist_put(dev, buf);
- DRM_ERROR("mapbuf failed, retcode %d\n", retcode);
- return retcode;
- }
- buf->file_priv = file_priv;
- buf_priv = buf->dev_private;
- d->granted = 1;
- d->request_idx = buf->idx;
- d->request_size = buf->total;
- d->virtual = buf_priv->virtual;
-
- return retcode;
-}
-
-static int i830_dma_cleanup(struct drm_device *dev)
-{
- struct drm_device_dma *dma = dev->dma;
-
- /* Make sure interrupts are disabled here because the uninstall ioctl
- * may not have been called from userspace and after dev_private
- * is freed, it's too late.
- */
- if (dev->irq_enabled)
- drm_irq_uninstall(dev);
-
- if (dev->dev_private) {
- int i;
- drm_i830_private_t *dev_priv =
- (drm_i830_private_t *) dev->dev_private;
-
- if (dev_priv->ring.virtual_start)
- drm_core_ioremapfree(&dev_priv->ring.map, dev);
- if (dev_priv->hw_status_page) {
- pci_free_consistent(dev->pdev, PAGE_SIZE,
- dev_priv->hw_status_page,
- dev_priv->dma_status_page);
- /* Need to rewrite hardware status page */
- I830_WRITE(0x02080, 0x1ffff000);
- }
-
- kfree(dev->dev_private);
- dev->dev_private = NULL;
-
- for (i = 0; i < dma->buf_count; i++) {
- struct drm_buf *buf = dma->buflist[i];
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
- if (buf_priv->kernel_virtual && buf->total)
- drm_core_ioremapfree(&buf_priv->map, dev);
- }
- }
- return 0;
-}
-
-int i830_wait_ring(struct drm_device *dev, int n, const char *caller)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
- int iters = 0;
- unsigned long end;
- unsigned int last_head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
-
- end = jiffies + (HZ * 3);
- while (ring->space < n) {
- ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->Size;
-
- if (ring->head != last_head) {
- end = jiffies + (HZ * 3);
- last_head = ring->head;
- }
-
- iters++;
- if (time_before(end, jiffies)) {
- DRM_ERROR("space: %d wanted %d\n", ring->space, n);
- DRM_ERROR("lockup\n");
- goto out_wait_ring;
- }
- udelay(1);
- dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
- }
-
-out_wait_ring:
- return iters;
-}
-
-static void i830_kernel_lost_context(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
-
- ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
- ring->tail = I830_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->Size;
-
- if (ring->head == ring->tail)
- dev_priv->sarea_priv->perf_boxes |= I830_BOX_RING_EMPTY;
-}
-
-static int i830_freelist_init(struct drm_device *dev, drm_i830_private_t *dev_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- int my_idx = 36;
- u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx);
- int i;
-
- if (dma->buf_count > 1019) {
- /* Not enough space in the status page for the freelist */
- return -EINVAL;
- }
-
- for (i = 0; i < dma->buf_count; i++) {
- struct drm_buf *buf = dma->buflist[i];
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
-
- buf_priv->in_use = hw_status++;
- buf_priv->my_use_idx = my_idx;
- my_idx += 4;
-
- *buf_priv->in_use = I830_BUF_FREE;
-
- buf_priv->map.offset = buf->bus_address;
- buf_priv->map.size = buf->total;
- buf_priv->map.type = _DRM_AGP;
- buf_priv->map.flags = 0;
- buf_priv->map.mtrr = 0;
-
- drm_core_ioremap(&buf_priv->map, dev);
- buf_priv->kernel_virtual = buf_priv->map.handle;
- }
- return 0;
-}
-
-static int i830_dma_initialize(struct drm_device *dev,
- drm_i830_private_t *dev_priv,
- drm_i830_init_t *init)
-{
- struct drm_map_list *r_list;
-
- memset(dev_priv, 0, sizeof(drm_i830_private_t));
-
- list_for_each_entry(r_list, &dev->maplist, head) {
- if (r_list->map &&
- r_list->map->type == _DRM_SHM &&
- r_list->map->flags & _DRM_CONTAINS_LOCK) {
- dev_priv->sarea_map = r_list->map;
- break;
- }
- }
-
- if (!dev_priv->sarea_map) {
- dev->dev_private = (void *)dev_priv;
- i830_dma_cleanup(dev);
- DRM_ERROR("can not find sarea!\n");
- return -EINVAL;
- }
- dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
- if (!dev_priv->mmio_map) {
- dev->dev_private = (void *)dev_priv;
- i830_dma_cleanup(dev);
- DRM_ERROR("can not find mmio map!\n");
- return -EINVAL;
- }
- dev->agp_buffer_token = init->buffers_offset;
- dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
- if (!dev->agp_buffer_map) {
- dev->dev_private = (void *)dev_priv;
- i830_dma_cleanup(dev);
- DRM_ERROR("can not find dma buffer map!\n");
- return -EINVAL;
- }
-
- dev_priv->sarea_priv = (drm_i830_sarea_t *)
- ((u8 *) dev_priv->sarea_map->handle + init->sarea_priv_offset);
-
- dev_priv->ring.Start = init->ring_start;
- dev_priv->ring.End = init->ring_end;
- dev_priv->ring.Size = init->ring_size;
-
- dev_priv->ring.map.offset = dev->agp->base + init->ring_start;
- dev_priv->ring.map.size = init->ring_size;
- dev_priv->ring.map.type = _DRM_AGP;
- dev_priv->ring.map.flags = 0;
- dev_priv->ring.map.mtrr = 0;
-
- drm_core_ioremap(&dev_priv->ring.map, dev);
-
- if (dev_priv->ring.map.handle == NULL) {
- dev->dev_private = (void *)dev_priv;
- i830_dma_cleanup(dev);
- DRM_ERROR("can not ioremap virtual address for"
- " ring buffer\n");
- return -ENOMEM;
- }
-
- dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
-
- dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
-
- dev_priv->w = init->w;
- dev_priv->h = init->h;
- dev_priv->pitch = init->pitch;
- dev_priv->back_offset = init->back_offset;
- dev_priv->depth_offset = init->depth_offset;
- dev_priv->front_offset = init->front_offset;
-
- dev_priv->front_di1 = init->front_offset | init->pitch_bits;
- dev_priv->back_di1 = init->back_offset | init->pitch_bits;
- dev_priv->zi1 = init->depth_offset | init->pitch_bits;
-
- DRM_DEBUG("front_di1 %x\n", dev_priv->front_di1);
- DRM_DEBUG("back_offset %x\n", dev_priv->back_offset);
- DRM_DEBUG("back_di1 %x\n", dev_priv->back_di1);
- DRM_DEBUG("pitch_bits %x\n", init->pitch_bits);
-
- dev_priv->cpp = init->cpp;
- /* We are using separate values as placeholders for mechanisms for
- * private backbuffer/depthbuffer usage.
- */
-
- dev_priv->back_pitch = init->back_pitch;
- dev_priv->depth_pitch = init->depth_pitch;
- dev_priv->do_boxes = 0;
- dev_priv->use_mi_batchbuffer_start = 0;
-
- /* Program Hardware Status Page */
- dev_priv->hw_status_page =
- pci_alloc_consistent(dev->pdev, PAGE_SIZE,
- &dev_priv->dma_status_page);
- if (!dev_priv->hw_status_page) {
- dev->dev_private = (void *)dev_priv;
- i830_dma_cleanup(dev);
- DRM_ERROR("Can not allocate hardware status page\n");
- return -ENOMEM;
- }
- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
- DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
-
- I830_WRITE(0x02080, dev_priv->dma_status_page);
- DRM_DEBUG("Enabled hardware status page\n");
-
- /* Now we need to init our freelist */
- if (i830_freelist_init(dev, dev_priv) != 0) {
- dev->dev_private = (void *)dev_priv;
- i830_dma_cleanup(dev);
- DRM_ERROR("Not enough space in the status page for"
- " the freelist\n");
- return -ENOMEM;
- }
- dev->dev_private = (void *)dev_priv;
-
- return 0;
-}
-
-static int i830_dma_init(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_private_t *dev_priv;
- drm_i830_init_t *init = data;
- int retcode = 0;
-
- switch (init->func) {
- case I830_INIT_DMA:
- dev_priv = kmalloc(sizeof(drm_i830_private_t), GFP_KERNEL);
- if (dev_priv == NULL)
- return -ENOMEM;
- retcode = i830_dma_initialize(dev, dev_priv, init);
- break;
- case I830_CLEANUP_DMA:
- retcode = i830_dma_cleanup(dev);
- break;
- default:
- retcode = -EINVAL;
- break;
- }
-
- return retcode;
-}
-
-#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
-#define ST1_ENABLE (1<<16)
-#define ST1_MASK (0xffff)
-
-/* Most efficient way to verify state for the i830 is as it is
- * emitted. Non-conformant state is silently dropped.
- */
-static void i830EmitContextVerified(struct drm_device *dev, unsigned int *code)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- int i, j = 0;
- unsigned int tmp;
- RING_LOCALS;
-
- BEGIN_LP_RING(I830_CTX_SETUP_SIZE + 4);
-
- for (i = 0; i < I830_CTXREG_BLENDCOLR0; i++) {
- tmp = code[i];
- if ((tmp & (7 << 29)) == CMD_3D &&
- (tmp & (0x1f << 24)) < (0x1d << 24)) {
- OUT_RING(tmp);
- j++;
- } else {
- DRM_ERROR("Skipping %d\n", i);
- }
- }
-
- OUT_RING(STATE3D_CONST_BLEND_COLOR_CMD);
- OUT_RING(code[I830_CTXREG_BLENDCOLR]);
- j += 2;
-
- for (i = I830_CTXREG_VF; i < I830_CTXREG_MCSB0; i++) {
- tmp = code[i];
- if ((tmp & (7 << 29)) == CMD_3D &&
- (tmp & (0x1f << 24)) < (0x1d << 24)) {
- OUT_RING(tmp);
- j++;
- } else {
- DRM_ERROR("Skipping %d\n", i);
- }
- }
-
- OUT_RING(STATE3D_MAP_COORD_SETBIND_CMD);
- OUT_RING(code[I830_CTXREG_MCSB1]);
- j += 2;
-
- if (j & 1)
- OUT_RING(0);
-
- ADVANCE_LP_RING();
-}
-
-static void i830EmitTexVerified(struct drm_device *dev, unsigned int *code)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- int i, j = 0;
- unsigned int tmp;
- RING_LOCALS;
-
- if (code[I830_TEXREG_MI0] == GFX_OP_MAP_INFO ||
- (code[I830_TEXREG_MI0] & ~(0xf * LOAD_TEXTURE_MAP0)) ==
- (STATE3D_LOAD_STATE_IMMEDIATE_2 | 4)) {
-
- BEGIN_LP_RING(I830_TEX_SETUP_SIZE);
-
- OUT_RING(code[I830_TEXREG_MI0]); /* TM0LI */
- OUT_RING(code[I830_TEXREG_MI1]); /* TM0S0 */
- OUT_RING(code[I830_TEXREG_MI2]); /* TM0S1 */
- OUT_RING(code[I830_TEXREG_MI3]); /* TM0S2 */
- OUT_RING(code[I830_TEXREG_MI4]); /* TM0S3 */
- OUT_RING(code[I830_TEXREG_MI5]); /* TM0S4 */
-
- for (i = 6; i < I830_TEX_SETUP_SIZE; i++) {
- tmp = code[i];
- OUT_RING(tmp);
- j++;
- }
-
- if (j & 1)
- OUT_RING(0);
-
- ADVANCE_LP_RING();
- } else
- printk("rejected packet %x\n", code[0]);
-}
-
-static void i830EmitTexBlendVerified(struct drm_device *dev,
- unsigned int *code, unsigned int num)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- int i, j = 0;
- unsigned int tmp;
- RING_LOCALS;
-
- if (!num)
- return;
-
- BEGIN_LP_RING(num + 1);
-
- for (i = 0; i < num; i++) {
- tmp = code[i];
- OUT_RING(tmp);
- j++;
- }
-
- if (j & 1)
- OUT_RING(0);
-
- ADVANCE_LP_RING();
-}
-
-static void i830EmitTexPalette(struct drm_device *dev,
- unsigned int *palette, int number, int is_shared)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- int i;
- RING_LOCALS;
-
- return;
-
- BEGIN_LP_RING(258);
-
- if (is_shared == 1) {
- OUT_RING(CMD_OP_MAP_PALETTE_LOAD |
- MAP_PALETTE_NUM(0) | MAP_PALETTE_BOTH);
- } else {
- OUT_RING(CMD_OP_MAP_PALETTE_LOAD | MAP_PALETTE_NUM(number));
- }
- for (i = 0; i < 256; i++)
- OUT_RING(palette[i]);
- OUT_RING(0);
- /* KW: WHERE IS THE ADVANCE_LP_RING? This is effectively a noop!
- */
-}
-
-/* Need to do some additional checking when setting the dest buffer.
- */
-static void i830EmitDestVerified(struct drm_device *dev, unsigned int *code)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- unsigned int tmp;
- RING_LOCALS;
-
- BEGIN_LP_RING(I830_DEST_SETUP_SIZE + 10);
-
- tmp = code[I830_DESTREG_CBUFADDR];
- if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) {
- if (((int)outring) & 8) {
- OUT_RING(0);
- OUT_RING(0);
- }
-
- OUT_RING(CMD_OP_DESTBUFFER_INFO);
- OUT_RING(BUF_3D_ID_COLOR_BACK |
- BUF_3D_PITCH(dev_priv->back_pitch * dev_priv->cpp) |
- BUF_3D_USE_FENCE);
- OUT_RING(tmp);
- OUT_RING(0);
-
- OUT_RING(CMD_OP_DESTBUFFER_INFO);
- OUT_RING(BUF_3D_ID_DEPTH | BUF_3D_USE_FENCE |
- BUF_3D_PITCH(dev_priv->depth_pitch * dev_priv->cpp));
- OUT_RING(dev_priv->zi1);
- OUT_RING(0);
- } else {
- DRM_ERROR("bad di1 %x (allow %x or %x)\n",
- tmp, dev_priv->front_di1, dev_priv->back_di1);
- }
-
- /* invarient:
- */
-
- OUT_RING(GFX_OP_DESTBUFFER_VARS);
- OUT_RING(code[I830_DESTREG_DV1]);
-
- OUT_RING(GFX_OP_DRAWRECT_INFO);
- OUT_RING(code[I830_DESTREG_DR1]);
- OUT_RING(code[I830_DESTREG_DR2]);
- OUT_RING(code[I830_DESTREG_DR3]);
- OUT_RING(code[I830_DESTREG_DR4]);
-
- /* Need to verify this */
- tmp = code[I830_DESTREG_SENABLE];
- if ((tmp & ~0x3) == GFX_OP_SCISSOR_ENABLE) {
- OUT_RING(tmp);
- } else {
- DRM_ERROR("bad scissor enable\n");
- OUT_RING(0);
- }
-
- OUT_RING(GFX_OP_SCISSOR_RECT);
- OUT_RING(code[I830_DESTREG_SR1]);
- OUT_RING(code[I830_DESTREG_SR2]);
- OUT_RING(0);
-
- ADVANCE_LP_RING();
-}
-
-static void i830EmitStippleVerified(struct drm_device *dev, unsigned int *code)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- RING_LOCALS;
-
- BEGIN_LP_RING(2);
- OUT_RING(GFX_OP_STIPPLE);
- OUT_RING(code[1]);
- ADVANCE_LP_RING();
-}
-
-static void i830EmitState(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
- unsigned int dirty = sarea_priv->dirty;
-
- DRM_DEBUG("%s %x\n", __func__, dirty);
-
- if (dirty & I830_UPLOAD_BUFFERS) {
- i830EmitDestVerified(dev, sarea_priv->BufferState);
- sarea_priv->dirty &= ~I830_UPLOAD_BUFFERS;
- }
-
- if (dirty & I830_UPLOAD_CTX) {
- i830EmitContextVerified(dev, sarea_priv->ContextState);
- sarea_priv->dirty &= ~I830_UPLOAD_CTX;
- }
-
- if (dirty & I830_UPLOAD_TEX0) {
- i830EmitTexVerified(dev, sarea_priv->TexState[0]);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX0;
- }
-
- if (dirty & I830_UPLOAD_TEX1) {
- i830EmitTexVerified(dev, sarea_priv->TexState[1]);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX1;
- }
-
- if (dirty & I830_UPLOAD_TEXBLEND0) {
- i830EmitTexBlendVerified(dev, sarea_priv->TexBlendState[0],
- sarea_priv->TexBlendStateWordsUsed[0]);
- sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND0;
- }
-
- if (dirty & I830_UPLOAD_TEXBLEND1) {
- i830EmitTexBlendVerified(dev, sarea_priv->TexBlendState[1],
- sarea_priv->TexBlendStateWordsUsed[1]);
- sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND1;
- }
-
- if (dirty & I830_UPLOAD_TEX_PALETTE_SHARED) {
- i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 1);
- } else {
- if (dirty & I830_UPLOAD_TEX_PALETTE_N(0)) {
- i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 0);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(0);
- }
- if (dirty & I830_UPLOAD_TEX_PALETTE_N(1)) {
- i830EmitTexPalette(dev, sarea_priv->Palette[1], 1, 0);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(1);
- }
-
- /* 1.3:
- */
-#if 0
- if (dirty & I830_UPLOAD_TEX_PALETTE_N(2)) {
- i830EmitTexPalette(dev, sarea_priv->Palette2[0], 0, 0);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2);
- }
- if (dirty & I830_UPLOAD_TEX_PALETTE_N(3)) {
- i830EmitTexPalette(dev, sarea_priv->Palette2[1], 1, 0);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2);
- }
-#endif
- }
-
- /* 1.3:
- */
- if (dirty & I830_UPLOAD_STIPPLE) {
- i830EmitStippleVerified(dev, sarea_priv->StippleState);
- sarea_priv->dirty &= ~I830_UPLOAD_STIPPLE;
- }
-
- if (dirty & I830_UPLOAD_TEX2) {
- i830EmitTexVerified(dev, sarea_priv->TexState2);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX2;
- }
-
- if (dirty & I830_UPLOAD_TEX3) {
- i830EmitTexVerified(dev, sarea_priv->TexState3);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX3;
- }
-
- if (dirty & I830_UPLOAD_TEXBLEND2) {
- i830EmitTexBlendVerified(dev,
- sarea_priv->TexBlendState2,
- sarea_priv->TexBlendStateWordsUsed2);
-
- sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND2;
- }
-
- if (dirty & I830_UPLOAD_TEXBLEND3) {
- i830EmitTexBlendVerified(dev,
- sarea_priv->TexBlendState3,
- sarea_priv->TexBlendStateWordsUsed3);
- sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND3;
- }
-}
-
-/* ================================================================
- * Performance monitoring functions
- */
-
-static void i830_fill_box(struct drm_device *dev,
- int x, int y, int w, int h, int r, int g, int b)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- u32 color;
- unsigned int BR13, CMD;
- RING_LOCALS;
-
- BR13 = (0xF0 << 16) | (dev_priv->pitch * dev_priv->cpp) | (1 << 24);
- CMD = XY_COLOR_BLT_CMD;
- x += dev_priv->sarea_priv->boxes[0].x1;
- y += dev_priv->sarea_priv->boxes[0].y1;
-
- if (dev_priv->cpp == 4) {
- BR13 |= (1 << 25);
- CMD |= (XY_COLOR_BLT_WRITE_ALPHA | XY_COLOR_BLT_WRITE_RGB);
- color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
- } else {
- color = (((r & 0xf8) << 8) |
- ((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
- }
-
- BEGIN_LP_RING(6);
- OUT_RING(CMD);
- OUT_RING(BR13);
- OUT_RING((y << 16) | x);
- OUT_RING(((y + h) << 16) | (x + w));
-
- if (dev_priv->current_page == 1)
- OUT_RING(dev_priv->front_offset);
- else
- OUT_RING(dev_priv->back_offset);
-
- OUT_RING(color);
- ADVANCE_LP_RING();
-}
-
-static void i830_cp_performance_boxes(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
-
- /* Purple box for page flipping
- */
- if (dev_priv->sarea_priv->perf_boxes & I830_BOX_FLIP)
- i830_fill_box(dev, 4, 4, 8, 8, 255, 0, 255);
-
- /* Red box if we have to wait for idle at any point
- */
- if (dev_priv->sarea_priv->perf_boxes & I830_BOX_WAIT)
- i830_fill_box(dev, 16, 4, 8, 8, 255, 0, 0);
-
- /* Blue box: lost context?
- */
- if (dev_priv->sarea_priv->perf_boxes & I830_BOX_LOST_CONTEXT)
- i830_fill_box(dev, 28, 4, 8, 8, 0, 0, 255);
-
- /* Yellow box for texture swaps
- */
- if (dev_priv->sarea_priv->perf_boxes & I830_BOX_TEXTURE_LOAD)
- i830_fill_box(dev, 40, 4, 8, 8, 255, 255, 0);
-
- /* Green box if hardware never idles (as far as we can tell)
- */
- if (!(dev_priv->sarea_priv->perf_boxes & I830_BOX_RING_EMPTY))
- i830_fill_box(dev, 64, 4, 8, 8, 0, 255, 0);
-
- /* Draw bars indicating number of buffers allocated
- * (not a great measure, easily confused)
- */
- if (dev_priv->dma_used) {
- int bar = dev_priv->dma_used / 10240;
- if (bar > 100)
- bar = 100;
- if (bar < 1)
- bar = 1;
- i830_fill_box(dev, 4, 16, bar, 4, 196, 128, 128);
- dev_priv->dma_used = 0;
- }
-
- dev_priv->sarea_priv->perf_boxes = 0;
-}
-
-static void i830_dma_dispatch_clear(struct drm_device *dev, int flags,
- unsigned int clear_color,
- unsigned int clear_zval,
- unsigned int clear_depthmask)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
- int nbox = sarea_priv->nbox;
- struct drm_clip_rect *pbox = sarea_priv->boxes;
- int pitch = dev_priv->pitch;
- int cpp = dev_priv->cpp;
- int i;
- unsigned int BR13, CMD, D_CMD;
- RING_LOCALS;
-
- if (dev_priv->current_page == 1) {
- unsigned int tmp = flags;
-
- flags &= ~(I830_FRONT | I830_BACK);
- if (tmp & I830_FRONT)
- flags |= I830_BACK;
- if (tmp & I830_BACK)
- flags |= I830_FRONT;
- }
-
- i830_kernel_lost_context(dev);
-
- switch (cpp) {
- case 2:
- BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24);
- D_CMD = CMD = XY_COLOR_BLT_CMD;
- break;
- case 4:
- BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24) | (1 << 25);
- CMD = (XY_COLOR_BLT_CMD | XY_COLOR_BLT_WRITE_ALPHA |
- XY_COLOR_BLT_WRITE_RGB);
- D_CMD = XY_COLOR_BLT_CMD;
- if (clear_depthmask & 0x00ffffff)
- D_CMD |= XY_COLOR_BLT_WRITE_RGB;
- if (clear_depthmask & 0xff000000)
- D_CMD |= XY_COLOR_BLT_WRITE_ALPHA;
- break;
- default:
- BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24);
- D_CMD = CMD = XY_COLOR_BLT_CMD;
- break;
- }
-
- if (nbox > I830_NR_SAREA_CLIPRECTS)
- nbox = I830_NR_SAREA_CLIPRECTS;
-
- for (i = 0; i < nbox; i++, pbox++) {
- if (pbox->x1 > pbox->x2 ||
- pbox->y1 > pbox->y2 ||
- pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
- continue;
-
- if (flags & I830_FRONT) {
- DRM_DEBUG("clear front\n");
- BEGIN_LP_RING(6);
- OUT_RING(CMD);
- OUT_RING(BR13);
- OUT_RING((pbox->y1 << 16) | pbox->x1);
- OUT_RING((pbox->y2 << 16) | pbox->x2);
- OUT_RING(dev_priv->front_offset);
- OUT_RING(clear_color);
- ADVANCE_LP_RING();
- }
-
- if (flags & I830_BACK) {
- DRM_DEBUG("clear back\n");
- BEGIN_LP_RING(6);
- OUT_RING(CMD);
- OUT_RING(BR13);
- OUT_RING((pbox->y1 << 16) | pbox->x1);
- OUT_RING((pbox->y2 << 16) | pbox->x2);
- OUT_RING(dev_priv->back_offset);
- OUT_RING(clear_color);
- ADVANCE_LP_RING();
- }
-
- if (flags & I830_DEPTH) {
- DRM_DEBUG("clear depth\n");
- BEGIN_LP_RING(6);
- OUT_RING(D_CMD);
- OUT_RING(BR13);
- OUT_RING((pbox->y1 << 16) | pbox->x1);
- OUT_RING((pbox->y2 << 16) | pbox->x2);
- OUT_RING(dev_priv->depth_offset);
- OUT_RING(clear_zval);
- ADVANCE_LP_RING();
- }
- }
-}
-
-static void i830_dma_dispatch_swap(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
- int nbox = sarea_priv->nbox;
- struct drm_clip_rect *pbox = sarea_priv->boxes;
- int pitch = dev_priv->pitch;
- int cpp = dev_priv->cpp;
- int i;
- unsigned int CMD, BR13;
- RING_LOCALS;
-
- DRM_DEBUG("swapbuffers\n");
-
- i830_kernel_lost_context(dev);
-
- if (dev_priv->do_boxes)
- i830_cp_performance_boxes(dev);
-
- switch (cpp) {
- case 2:
- BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24);
- CMD = XY_SRC_COPY_BLT_CMD;
- break;
- case 4:
- BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24) | (1 << 25);
- CMD = (XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA |
- XY_SRC_COPY_BLT_WRITE_RGB);
- break;
- default:
- BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24);
- CMD = XY_SRC_COPY_BLT_CMD;
- break;
- }
-
- if (nbox > I830_NR_SAREA_CLIPRECTS)
- nbox = I830_NR_SAREA_CLIPRECTS;
-
- for (i = 0; i < nbox; i++, pbox++) {
- if (pbox->x1 > pbox->x2 ||
- pbox->y1 > pbox->y2 ||
- pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
- continue;
-
- DRM_DEBUG("dispatch swap %d,%d-%d,%d!\n",
- pbox->x1, pbox->y1, pbox->x2, pbox->y2);
-
- BEGIN_LP_RING(8);
- OUT_RING(CMD);
- OUT_RING(BR13);
- OUT_RING((pbox->y1 << 16) | pbox->x1);
- OUT_RING((pbox->y2 << 16) | pbox->x2);
-
- if (dev_priv->current_page == 0)
- OUT_RING(dev_priv->front_offset);
- else
- OUT_RING(dev_priv->back_offset);
-
- OUT_RING((pbox->y1 << 16) | pbox->x1);
- OUT_RING(BR13 & 0xffff);
-
- if (dev_priv->current_page == 0)
- OUT_RING(dev_priv->back_offset);
- else
- OUT_RING(dev_priv->front_offset);
-
- ADVANCE_LP_RING();
- }
-}
-
-static void i830_dma_dispatch_flip(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- RING_LOCALS;
-
- DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
- __func__,
- dev_priv->current_page,
- dev_priv->sarea_priv->pf_current_page);
-
- i830_kernel_lost_context(dev);
-
- if (dev_priv->do_boxes) {
- dev_priv->sarea_priv->perf_boxes |= I830_BOX_FLIP;
- i830_cp_performance_boxes(dev);
- }
-
- BEGIN_LP_RING(2);
- OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- BEGIN_LP_RING(6);
- OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
- OUT_RING(0);
- if (dev_priv->current_page == 0) {
- OUT_RING(dev_priv->back_offset);
- dev_priv->current_page = 1;
- } else {
- OUT_RING(dev_priv->front_offset);
- dev_priv->current_page = 0;
- }
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- BEGIN_LP_RING(2);
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
-}
-
-static void i830_dma_dispatch_vertex(struct drm_device *dev,
- struct drm_buf *buf, int discard, int used)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
- drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
- struct drm_clip_rect *box = sarea_priv->boxes;
- int nbox = sarea_priv->nbox;
- unsigned long address = (unsigned long)buf->bus_address;
- unsigned long start = address - dev->agp->base;
- int i = 0, u;
- RING_LOCALS;
-
- i830_kernel_lost_context(dev);
-
- if (nbox > I830_NR_SAREA_CLIPRECTS)
- nbox = I830_NR_SAREA_CLIPRECTS;
-
- if (discard) {
- u = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
- I830_BUF_HARDWARE);
- if (u != I830_BUF_CLIENT)
- DRM_DEBUG("xxxx 2\n");
- }
-
- if (used > 4 * 1023)
- used = 0;
-
- if (sarea_priv->dirty)
- i830EmitState(dev);
-
- DRM_DEBUG("dispatch vertex addr 0x%lx, used 0x%x nbox %d\n",
- address, used, nbox);
-
- dev_priv->counter++;
- DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
- DRM_DEBUG("i830_dma_dispatch\n");
- DRM_DEBUG("start : %lx\n", start);
- DRM_DEBUG("used : %d\n", used);
- DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4);
-
- if (buf_priv->currently_mapped == I830_BUF_MAPPED) {
- u32 *vp = buf_priv->kernel_virtual;
-
- vp[0] = (GFX_OP_PRIMITIVE |
- sarea_priv->vertex_prim | ((used / 4) - 2));
-
- if (dev_priv->use_mi_batchbuffer_start) {
- vp[used / 4] = MI_BATCH_BUFFER_END;
- used += 4;
- }
-
- if (used & 4) {
- vp[used / 4] = 0;
- used += 4;
- }
-
- i830_unmap_buffer(buf);
- }
-
- if (used) {
- do {
- if (i < nbox) {
- BEGIN_LP_RING(6);
- OUT_RING(GFX_OP_DRAWRECT_INFO);
- OUT_RING(sarea_priv->
- BufferState[I830_DESTREG_DR1]);
- OUT_RING(box[i].x1 | (box[i].y1 << 16));
- OUT_RING(box[i].x2 | (box[i].y2 << 16));
- OUT_RING(sarea_priv->
- BufferState[I830_DESTREG_DR4]);
- OUT_RING(0);
- ADVANCE_LP_RING();
- }
-
- if (dev_priv->use_mi_batchbuffer_start) {
- BEGIN_LP_RING(2);
- OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
- OUT_RING(start | MI_BATCH_NON_SECURE);
- ADVANCE_LP_RING();
- } else {
- BEGIN_LP_RING(4);
- OUT_RING(MI_BATCH_BUFFER);
- OUT_RING(start | MI_BATCH_NON_SECURE);
- OUT_RING(start + used - 4);
- OUT_RING(0);
- ADVANCE_LP_RING();
- }
-
- } while (++i < nbox);
- }
-
- if (discard) {
- dev_priv->counter++;
-
- (void)cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
- I830_BUF_HARDWARE);
-
- BEGIN_LP_RING(8);
- OUT_RING(CMD_STORE_DWORD_IDX);
- OUT_RING(20);
- OUT_RING(dev_priv->counter);
- OUT_RING(CMD_STORE_DWORD_IDX);
- OUT_RING(buf_priv->my_use_idx);
- OUT_RING(I830_BUF_FREE);
- OUT_RING(CMD_REPORT_HEAD);
- OUT_RING(0);
- ADVANCE_LP_RING();
- }
-}
-
-static void i830_dma_quiescent(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- RING_LOCALS;
-
- i830_kernel_lost_context(dev);
-
- BEGIN_LP_RING(4);
- OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
- OUT_RING(CMD_REPORT_HEAD);
- OUT_RING(0);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- i830_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
-}
-
-static int i830_flush_queue(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- struct drm_device_dma *dma = dev->dma;
- int i, ret = 0;
- RING_LOCALS;
-
- i830_kernel_lost_context(dev);
-
- BEGIN_LP_RING(2);
- OUT_RING(CMD_REPORT_HEAD);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- i830_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
-
- for (i = 0; i < dma->buf_count; i++) {
- struct drm_buf *buf = dma->buflist[i];
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
-
- int used = cmpxchg(buf_priv->in_use, I830_BUF_HARDWARE,
- I830_BUF_FREE);
-
- if (used == I830_BUF_HARDWARE)
- DRM_DEBUG("reclaimed from HARDWARE\n");
- if (used == I830_BUF_CLIENT)
- DRM_DEBUG("still on client\n");
- }
-
- return ret;
-}
-
-/* Must be called with the lock held */
-static void i830_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- int i;
-
- if (!dma)
- return;
- if (!dev->dev_private)
- return;
- if (!dma->buflist)
- return;
-
- i830_flush_queue(dev);
-
- for (i = 0; i < dma->buf_count; i++) {
- struct drm_buf *buf = dma->buflist[i];
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
-
- if (buf->file_priv == file_priv && buf_priv) {
- int used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
- I830_BUF_FREE);
-
- if (used == I830_BUF_CLIENT)
- DRM_DEBUG("reclaimed from client\n");
- if (buf_priv->currently_mapped == I830_BUF_MAPPED)
- buf_priv->currently_mapped = I830_BUF_UNMAPPED;
- }
- }
-}
-
-static int i830_flush_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- i830_flush_queue(dev);
- return 0;
-}
-
-static int i830_dma_vertex(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
- u32 *hw_status = dev_priv->hw_status_page;
- drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
- dev_priv->sarea_priv;
- drm_i830_vertex_t *vertex = data;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- DRM_DEBUG("i830 dma vertex, idx %d used %d discard %d\n",
- vertex->idx, vertex->used, vertex->discard);
-
- if (vertex->idx < 0 || vertex->idx > dma->buf_count)
- return -EINVAL;
-
- i830_dma_dispatch_vertex(dev,
- dma->buflist[vertex->idx],
- vertex->discard, vertex->used);
-
- sarea_priv->last_enqueue = dev_priv->counter - 1;
- sarea_priv->last_dispatch = (int)hw_status[5];
-
- return 0;
-}
-
-static int i830_clear_bufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_clear_t *clear = data;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- /* GH: Someone's doing nasty things... */
- if (!dev->dev_private)
- return -EINVAL;
-
- i830_dma_dispatch_clear(dev, clear->flags,
- clear->clear_color,
- clear->clear_depth, clear->clear_depthmask);
- return 0;
-}
-
-static int i830_swap_bufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- DRM_DEBUG("i830_swap_bufs\n");
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- i830_dma_dispatch_swap(dev);
- return 0;
-}
-
-/* Not sure why this isn't set all the time:
- */
-static void i830_do_init_pageflip(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
-
- DRM_DEBUG("%s\n", __func__);
- dev_priv->page_flipping = 1;
- dev_priv->current_page = 0;
- dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
-}
-
-static int i830_do_cleanup_pageflip(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
-
- DRM_DEBUG("%s\n", __func__);
- if (dev_priv->current_page != 0)
- i830_dma_dispatch_flip(dev);
-
- dev_priv->page_flipping = 0;
- return 0;
-}
-
-static int i830_flip_bufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
-
- DRM_DEBUG("%s\n", __func__);
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- if (!dev_priv->page_flipping)
- i830_do_init_pageflip(dev);
-
- i830_dma_dispatch_flip(dev);
- return 0;
-}
-
-static int i830_getage(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
- u32 *hw_status = dev_priv->hw_status_page;
- drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
- dev_priv->sarea_priv;
-
- sarea_priv->last_dispatch = (int)hw_status[5];
- return 0;
-}
-
-static int i830_getbuf(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- int retcode = 0;
- drm_i830_dma_t *d = data;
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
- u32 *hw_status = dev_priv->hw_status_page;
- drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
- dev_priv->sarea_priv;
-
- DRM_DEBUG("getbuf\n");
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- d->granted = 0;
-
- retcode = i830_dma_get_buffer(dev, d, file_priv);
-
- DRM_DEBUG("i830_dma: %d returning %d, granted = %d\n",
- task_pid_nr(current), retcode, d->granted);
-
- sarea_priv->last_dispatch = (int)hw_status[5];
-
- return retcode;
-}
-
-static int i830_copybuf(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- /* Never copy - 2.4.x doesn't need it */
- return 0;
-}
-
-static int i830_docopy(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- return 0;
-}
-
-static int i830_getparam(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_getparam_t *param = data;
- int value;
-
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __func__);
- return -EINVAL;
- }
-
- switch (param->param) {
- case I830_PARAM_IRQ_ACTIVE:
- value = dev->irq_enabled;
- break;
- default:
- return -EINVAL;
- }
-
- if (copy_to_user(param->value, &value, sizeof(int))) {
- DRM_ERROR("copy_to_user\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int i830_setparam(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_setparam_t *param = data;
-
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __func__);
- return -EINVAL;
- }
-
- switch (param->param) {
- case I830_SETPARAM_USE_MI_BATCHBUFFER_START:
- dev_priv->use_mi_batchbuffer_start = param->value;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-int i830_driver_load(struct drm_device *dev, unsigned long flags)
-{
- /* i830 has 4 more counters */
- dev->counters += 4;
- dev->types[6] = _DRM_STAT_IRQ;
- dev->types[7] = _DRM_STAT_PRIMARY;
- dev->types[8] = _DRM_STAT_SECONDARY;
- dev->types[9] = _DRM_STAT_DMA;
-
- return 0;
-}
-
-void i830_driver_lastclose(struct drm_device *dev)
-{
- i830_dma_cleanup(dev);
-}
-
-void i830_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
-{
- if (dev->dev_private) {
- drm_i830_private_t *dev_priv = dev->dev_private;
- if (dev_priv->page_flipping)
- i830_do_cleanup_pageflip(dev);
- }
-}
-
-void i830_driver_reclaim_buffers_locked(struct drm_device *dev, struct drm_file *file_priv)
-{
- i830_reclaim_buffers(dev, file_priv);
-}
-
-int i830_driver_dma_quiescent(struct drm_device *dev)
-{
- i830_dma_quiescent(dev);
- return 0;
-}
-
-/*
- * call the drm_ioctl under the big kernel lock because
- * to lock against the i830_mmap_buffers function.
- */
-long i830_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- int ret;
- lock_kernel();
- ret = drm_ioctl(file, cmd, arg);
- unlock_kernel();
- return ret;
-}
-
-struct drm_ioctl_desc i830_ioctls[] = {
- DRM_IOCTL_DEF_DRV(I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_VERTEX, i830_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_CLEAR, i830_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_FLUSH, i830_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_GETAGE, i830_getage, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_GETBUF, i830_getbuf, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_SWAP, i830_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_COPY, i830_copybuf, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_DOCOPY, i830_docopy, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_FLIP, i830_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_GETPARAM, i830_getparam, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_SETPARAM, i830_setparam, DRM_AUTH|DRM_UNLOCKED),
-};
-
-int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
-
-/**
- * Determine if the device really is AGP or not.
- *
- * All Intel graphics chipsets are treated as AGP, even if they are really
- * PCI-e.
- *
- * \param dev The device to be tested.
- *
- * \returns
- * A value of 1 is always retured to indictate every i8xx is AGP.
- */
-int i830_driver_device_is_agp(struct drm_device *dev)
-{
- return 1;
-}
diff --git a/drivers/gpu/drm/i830/i830_drv.c b/drivers/gpu/drm/i830/i830_drv.c
deleted file mode 100644
index f655ab7977da..000000000000
--- a/drivers/gpu/drm/i830/i830_drv.c
+++ /dev/null
@@ -1,107 +0,0 @@
-/* i830_drv.c -- I810 driver -*- linux-c -*-
- * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Rickard E. (Rik) Faith <faith@valinux.com>
- * Jeff Hartmann <jhartmann@valinux.com>
- * Gareth Hughes <gareth@valinux.com>
- * Abraham vd Merwe <abraham@2d3d.co.za>
- * Keith Whitwell <keith@tungstengraphics.com>
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "i830_drm.h"
-#include "i830_drv.h"
-
-#include "drm_pciids.h"
-
-static struct pci_device_id pciidlist[] = {
- i830_PCI_IDS
-};
-
-static struct drm_driver driver = {
- .driver_features =
- DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
- DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE,
-#if USE_IRQS
- .driver_features |= DRIVER_HAVE_IRQ | DRIVER_SHARED_IRQ,
-#endif
- .dev_priv_size = sizeof(drm_i830_buf_priv_t),
- .load = i830_driver_load,
- .lastclose = i830_driver_lastclose,
- .preclose = i830_driver_preclose,
- .device_is_agp = i830_driver_device_is_agp,
- .reclaim_buffers_locked = i830_driver_reclaim_buffers_locked,
- .dma_quiescent = i830_driver_dma_quiescent,
-#if USE_IRQS
- .irq_preinstall = i830_driver_irq_preinstall,
- .irq_postinstall = i830_driver_irq_postinstall,
- .irq_uninstall = i830_driver_irq_uninstall,
- .irq_handler = i830_driver_irq_handler,
-#endif
- .ioctls = i830_ioctls,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = i830_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .llseek = noop_llseek,
- },
-
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
-
- .name = DRIVER_NAME,
- .desc = DRIVER_DESC,
- .date = DRIVER_DATE,
- .major = DRIVER_MAJOR,
- .minor = DRIVER_MINOR,
- .patchlevel = DRIVER_PATCHLEVEL,
-};
-
-static int __init i830_init(void)
-{
- driver.num_ioctls = i830_max_ioctl;
- return drm_init(&driver);
-}
-
-static void __exit i830_exit(void)
-{
- drm_exit(&driver);
-}
-
-module_init(i830_init);
-module_exit(i830_exit);
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i830/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h
deleted file mode 100644
index 0df1c720560b..000000000000
--- a/drivers/gpu/drm/i830/i830_drv.h
+++ /dev/null
@@ -1,295 +0,0 @@
-/* i830_drv.h -- Private header for the I830 driver -*- linux-c -*-
- * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
- * Jeff Hartmann <jhartmann@valinux.com>
- *
- */
-
-#ifndef _I830_DRV_H_
-#define _I830_DRV_H_
-
-/* General customization:
- */
-
-#define DRIVER_AUTHOR "VA Linux Systems Inc."
-
-#define DRIVER_NAME "i830"
-#define DRIVER_DESC "Intel 830M"
-#define DRIVER_DATE "20021108"
-
-/* Interface history:
- *
- * 1.1: Original.
- * 1.2: ?
- * 1.3: New irq emit/wait ioctls.
- * New pageflip ioctl.
- * New getparam ioctl.
- * State for texunits 3&4 in sarea.
- * New (alternative) layout for texture state.
- */
-#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 3
-#define DRIVER_PATCHLEVEL 2
-
-/* Driver will work either way: IRQ's save cpu time when waiting for
- * the card, but are subject to subtle interactions between bios,
- * hardware and the driver.
- */
-/* XXX: Add vblank support? */
-#define USE_IRQS 0
-
-typedef struct drm_i830_buf_priv {
- u32 *in_use;
- int my_use_idx;
- int currently_mapped;
- void __user *virtual;
- void *kernel_virtual;
- drm_local_map_t map;
-} drm_i830_buf_priv_t;
-
-typedef struct _drm_i830_ring_buffer {
- int tail_mask;
- unsigned long Start;
- unsigned long End;
- unsigned long Size;
- u8 *virtual_start;
- int head;
- int tail;
- int space;
- drm_local_map_t map;
-} drm_i830_ring_buffer_t;
-
-typedef struct drm_i830_private {
- struct drm_local_map *sarea_map;
- struct drm_local_map *mmio_map;
-
- drm_i830_sarea_t *sarea_priv;
- drm_i830_ring_buffer_t ring;
-
- void *hw_status_page;
- unsigned long counter;
-
- dma_addr_t dma_status_page;
-
- struct drm_buf *mmap_buffer;
-
- u32 front_di1, back_di1, zi1;
-
- int back_offset;
- int depth_offset;
- int front_offset;
- int w, h;
- int pitch;
- int back_pitch;
- int depth_pitch;
- unsigned int cpp;
-
- int do_boxes;
- int dma_used;
-
- int current_page;
- int page_flipping;
-
- wait_queue_head_t irq_queue;
- atomic_t irq_received;
- atomic_t irq_emitted;
-
- int use_mi_batchbuffer_start;
-
-} drm_i830_private_t;
-
-long i830_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-extern struct drm_ioctl_desc i830_ioctls[];
-extern int i830_max_ioctl;
-
-/* i830_irq.c */
-extern int i830_irq_emit(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int i830_irq_wait(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
-extern irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS);
-extern void i830_driver_irq_preinstall(struct drm_device *dev);
-extern void i830_driver_irq_postinstall(struct drm_device *dev);
-extern void i830_driver_irq_uninstall(struct drm_device *dev);
-extern int i830_driver_load(struct drm_device *, unsigned long flags);
-extern void i830_driver_preclose(struct drm_device *dev,
- struct drm_file *file_priv);
-extern void i830_driver_lastclose(struct drm_device *dev);
-extern void i830_driver_reclaim_buffers_locked(struct drm_device *dev,
- struct drm_file *file_priv);
-extern int i830_driver_dma_quiescent(struct drm_device *dev);
-extern int i830_driver_device_is_agp(struct drm_device *dev);
-
-#define I830_READ(reg) DRM_READ32(dev_priv->mmio_map, reg)
-#define I830_WRITE(reg, val) DRM_WRITE32(dev_priv->mmio_map, reg, val)
-#define I830_READ16(reg) DRM_READ16(dev_priv->mmio_map, reg)
-#define I830_WRITE16(reg, val) DRM_WRITE16(dev_priv->mmio_map, reg, val)
-
-#define I830_VERBOSE 0
-
-#define RING_LOCALS unsigned int outring, ringmask, outcount; \
- volatile char *virt;
-
-#define BEGIN_LP_RING(n) do { \
- if (I830_VERBOSE) \
- printk("BEGIN_LP_RING(%d)\n", (n)); \
- if (dev_priv->ring.space < n*4) \
- i830_wait_ring(dev, n*4, __func__); \
- outcount = 0; \
- outring = dev_priv->ring.tail; \
- ringmask = dev_priv->ring.tail_mask; \
- virt = dev_priv->ring.virtual_start; \
-} while (0)
-
-#define OUT_RING(n) do { \
- if (I830_VERBOSE) \
- printk(" OUT_RING %x\n", (int)(n)); \
- *(volatile unsigned int *)(virt + outring) = n; \
- outcount++; \
- outring += 4; \
- outring &= ringmask; \
-} while (0)
-
-#define ADVANCE_LP_RING() do { \
- if (I830_VERBOSE) \
- printk("ADVANCE_LP_RING %x\n", outring); \
- dev_priv->ring.tail = outring; \
- dev_priv->ring.space -= outcount * 4; \
- I830_WRITE(LP_RING + RING_TAIL, outring); \
-} while (0)
-
-extern int i830_wait_ring(struct drm_device *dev, int n, const char *caller);
-
-#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
-#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
-#define CMD_REPORT_HEAD (7<<23)
-#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
-#define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1)
-
-#define STATE3D_LOAD_STATE_IMMEDIATE_2 ((0x3<<29)|(0x1d<<24)|(0x03<<16))
-#define LOAD_TEXTURE_MAP0 (1<<11)
-
-#define INST_PARSER_CLIENT 0x00000000
-#define INST_OP_FLUSH 0x02000000
-#define INST_FLUSH_MAP_CACHE 0x00000001
-
-#define BB1_START_ADDR_MASK (~0x7)
-#define BB1_PROTECTED (1<<0)
-#define BB1_UNPROTECTED (0<<0)
-#define BB2_END_ADDR_MASK (~0x7)
-
-#define I830REG_HWSTAM 0x02098
-#define I830REG_INT_IDENTITY_R 0x020a4
-#define I830REG_INT_MASK_R 0x020a8
-#define I830REG_INT_ENABLE_R 0x020a0
-
-#define I830_IRQ_RESERVED ((1<<13)|(3<<2))
-
-#define LP_RING 0x2030
-#define HP_RING 0x2040
-#define RING_TAIL 0x00
-#define TAIL_ADDR 0x001FFFF8
-#define RING_HEAD 0x04
-#define HEAD_WRAP_COUNT 0xFFE00000
-#define HEAD_WRAP_ONE 0x00200000
-#define HEAD_ADDR 0x001FFFFC
-#define RING_START 0x08
-#define START_ADDR 0x0xFFFFF000
-#define RING_LEN 0x0C
-#define RING_NR_PAGES 0x001FF000
-#define RING_REPORT_MASK 0x00000006
-#define RING_REPORT_64K 0x00000002
-#define RING_REPORT_128K 0x00000004
-#define RING_NO_REPORT 0x00000000
-#define RING_VALID_MASK 0x00000001
-#define RING_VALID 0x00000001
-#define RING_INVALID 0x00000000
-
-#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
-#define SC_UPDATE_SCISSOR (0x1<<1)
-#define SC_ENABLE_MASK (0x1<<0)
-#define SC_ENABLE (0x1<<0)
-
-#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
-#define SCI_YMIN_MASK (0xffff<<16)
-#define SCI_XMIN_MASK (0xffff<<0)
-#define SCI_YMAX_MASK (0xffff<<16)
-#define SCI_XMAX_MASK (0xffff<<0)
-
-#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19))
-#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
-#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
-#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
-#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
-#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
-#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
-#define GFX_OP_PRIMITIVE ((0x3<<29)|(0x1f<<24))
-
-#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
-
-#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
-#define ASYNC_FLIP (1<<22)
-
-#define CMD_3D (0x3<<29)
-#define STATE3D_CONST_BLEND_COLOR_CMD (CMD_3D|(0x1d<<24)|(0x88<<16))
-#define STATE3D_MAP_COORD_SETBIND_CMD (CMD_3D|(0x1d<<24)|(0x02<<16))
-
-#define BR00_BITBLT_CLIENT 0x40000000
-#define BR00_OP_COLOR_BLT 0x10000000
-#define BR00_OP_SRC_COPY_BLT 0x10C00000
-#define BR13_SOLID_PATTERN 0x80000000
-
-#define BUF_3D_ID_COLOR_BACK (0x3<<24)
-#define BUF_3D_ID_DEPTH (0x7<<24)
-#define BUF_3D_USE_FENCE (1<<23)
-#define BUF_3D_PITCH(x) (((x)/4)<<2)
-
-#define CMD_OP_MAP_PALETTE_LOAD ((3<<29)|(0x1d<<24)|(0x82<<16)|255)
-#define MAP_PALETTE_NUM(x) ((x<<8) & (1<<8))
-#define MAP_PALETTE_BOTH (1<<11)
-
-#define XY_COLOR_BLT_CMD ((2<<29)|(0x50<<22)|0x4)
-#define XY_COLOR_BLT_WRITE_ALPHA (1<<21)
-#define XY_COLOR_BLT_WRITE_RGB (1<<20)
-
-#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
-#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
-#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
-
-#define MI_BATCH_BUFFER ((0x30<<23)|1)
-#define MI_BATCH_BUFFER_START (0x31<<23)
-#define MI_BATCH_BUFFER_END (0xA<<23)
-#define MI_BATCH_NON_SECURE (1)
-
-#define MI_WAIT_FOR_EVENT ((0x3<<23))
-#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
-#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
-
-#define MI_LOAD_SCAN_LINES_INCL ((0x12<<23))
-
-#endif
diff --git a/drivers/gpu/drm/i830/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c
deleted file mode 100644
index d1a6b95d631d..000000000000
--- a/drivers/gpu/drm/i830/i830_irq.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/* i830_dma.c -- DMA support for the I830 -*- linux-c -*-
- *
- * Copyright 2002 Tungsten Graphics, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors: Keith Whitwell <keith@tungstengraphics.com>
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "i830_drm.h"
-#include "i830_drv.h"
-#include <linux/interrupt.h> /* For task queue support */
-#include <linux/delay.h>
-
-irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
-{
- struct drm_device *dev = (struct drm_device *) arg;
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
- u16 temp;
-
- temp = I830_READ16(I830REG_INT_IDENTITY_R);
- DRM_DEBUG("%x\n", temp);
-
- if (!(temp & 2))
- return IRQ_NONE;
-
- I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
-
- atomic_inc(&dev_priv->irq_received);
- wake_up_interruptible(&dev_priv->irq_queue);
-
- return IRQ_HANDLED;
-}
-
-static int i830_emit_irq(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- RING_LOCALS;
-
- DRM_DEBUG("%s\n", __func__);
-
- atomic_inc(&dev_priv->irq_emitted);
-
- BEGIN_LP_RING(2);
- OUT_RING(0);
- OUT_RING(GFX_OP_USER_INTERRUPT);
- ADVANCE_LP_RING();
-
- return atomic_read(&dev_priv->irq_emitted);
-}
-
-static int i830_wait_irq(struct drm_device *dev, int irq_nr)
-{
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
- DECLARE_WAITQUEUE(entry, current);
- unsigned long end = jiffies + HZ * 3;
- int ret = 0;
-
- DRM_DEBUG("%s\n", __func__);
-
- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
- return 0;
-
- dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
-
- add_wait_queue(&dev_priv->irq_queue, &entry);
-
- for (;;) {
- __set_current_state(TASK_INTERRUPTIBLE);
- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
- break;
- if ((signed)(end - jiffies) <= 0) {
- DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
- I830_READ16(I830REG_INT_IDENTITY_R),
- I830_READ16(I830REG_INT_MASK_R),
- I830_READ16(I830REG_INT_ENABLE_R),
- I830_READ16(I830REG_HWSTAM));
-
- ret = -EBUSY; /* Lockup? Missed irq? */
- break;
- }
- schedule_timeout(HZ * 3);
- if (signal_pending(current)) {
- ret = -EINTR;
- break;
- }
- }
-
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&dev_priv->irq_queue, &entry);
- return ret;
-}
-
-/* Needs the lock as it touches the ring.
- */
-int i830_irq_emit(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_irq_emit_t *emit = data;
- int result;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __func__);
- return -EINVAL;
- }
-
- result = i830_emit_irq(dev);
-
- if (copy_to_user(emit->irq_seq, &result, sizeof(int))) {
- DRM_ERROR("copy_to_user\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-/* Doesn't need the hardware lock.
- */
-int i830_irq_wait(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_irq_wait_t *irqwait = data;
-
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __func__);
- return -EINVAL;
- }
-
- return i830_wait_irq(dev, irqwait->irq_seq);
-}
-
-/* drm_dma.h hooks
-*/
-void i830_driver_irq_preinstall(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
-
- I830_WRITE16(I830REG_HWSTAM, 0xffff);
- I830_WRITE16(I830REG_INT_MASK_R, 0x0);
- I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
- atomic_set(&dev_priv->irq_received, 0);
- atomic_set(&dev_priv->irq_emitted, 0);
- init_waitqueue_head(&dev_priv->irq_queue);
-}
-
-void i830_driver_irq_postinstall(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
-
- I830_WRITE16(I830REG_INT_ENABLE_R, 0x2);
-}
-
-void i830_driver_irq_uninstall(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
- if (!dev_priv)
- return;
-
- I830_WRITE16(I830REG_INT_MASK_R, 0xffff);
- I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
-}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 3601466c5502..87c8e29465e3 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -326,21 +326,21 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
struct intel_crtc *crtc;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
- const char *pipe = crtc->pipe ? "B" : "A";
- const char *plane = crtc->plane ? "B" : "A";
+ const char pipe = pipe_name(crtc->pipe);
+ const char plane = plane_name(crtc->plane);
struct intel_unpin_work *work;
spin_lock_irqsave(&dev->event_lock, flags);
work = crtc->unpin_work;
if (work == NULL) {
- seq_printf(m, "No flip due on pipe %s (plane %s)\n",
+ seq_printf(m, "No flip due on pipe %c (plane %c)\n",
pipe, plane);
} else {
if (!work->pending) {
- seq_printf(m, "Flip queued on pipe %s (plane %s)\n",
+ seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
pipe, plane);
} else {
- seq_printf(m, "Flip pending (waiting for vsync) on pipe %s (plane %s)\n",
+ seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
pipe, plane);
}
if (work->enable_stall_check)
@@ -458,7 +458,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret, i;
+ int ret, i, pipe;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
@@ -471,10 +471,10 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
I915_READ(IIR));
seq_printf(m, "Interrupt mask: %08x\n",
I915_READ(IMR));
- seq_printf(m, "Pipe A stat: %08x\n",
- I915_READ(PIPEASTAT));
- seq_printf(m, "Pipe B stat: %08x\n",
- I915_READ(PIPEBSTAT));
+ for_each_pipe(pipe)
+ seq_printf(m, "Pipe %c stat: %08x\n",
+ pipe_name(pipe),
+ I915_READ(PIPESTAT(pipe)));
} else {
seq_printf(m, "North Display Interrupt enable: %08x\n",
I915_READ(DEIER));
@@ -544,11 +544,11 @@ static int i915_hws_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
- volatile u32 *hws;
+ const volatile u32 __iomem *hws;
int i;
ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
- hws = (volatile u32 *)ring->status_page.page_addr;
+ hws = (volatile u32 __iomem *)ring->status_page.page_addr;
if (hws == NULL)
return 0;
@@ -615,7 +615,7 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
if (!ring->obj) {
seq_printf(m, "No ringbuffer setup\n");
} else {
- u8 *virt = ring->virtual_start;
+ const u8 __iomem *virt = ring->virtual_start;
uint32_t off;
for (off = 0; off < ring->size; off += 4) {
@@ -805,15 +805,20 @@ static int i915_error_state(struct seq_file *m, void *unused)
}
}
- if (error->ringbuffer) {
- struct drm_i915_error_object *obj = error->ringbuffer;
-
- seq_printf(m, "--- ringbuffer = 0x%08x\n", obj->gtt_offset);
- offset = 0;
- for (page = 0; page < obj->page_count; page++) {
- for (elt = 0; elt < PAGE_SIZE/4; elt++) {
- seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
- offset += 4;
+ for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++) {
+ if (error->ringbuffer[i]) {
+ struct drm_i915_error_object *obj = error->ringbuffer[i];
+ seq_printf(m, "%s --- ringbuffer = 0x%08x\n",
+ dev_priv->ring[i].name,
+ obj->gtt_offset);
+ offset = 0;
+ for (page = 0; page < obj->page_count; page++) {
+ for (elt = 0; elt < PAGE_SIZE/4; elt++) {
+ seq_printf(m, "%08x : %08x\n",
+ offset,
+ obj->pages[page][elt]);
+ offset += 4;
+ }
}
}
}
@@ -862,33 +867,58 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ u32 rpstat;
+ u32 rpupei, rpcurup, rpprevup;
+ u32 rpdownei, rpcurdown, rpprevdown;
int max_freq;
/* RPSTAT1 is in the GT power well */
- __gen6_force_wake_get(dev_priv);
+ __gen6_gt_force_wake_get(dev_priv);
+
+ rpstat = I915_READ(GEN6_RPSTAT1);
+ rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
+ rpcurup = I915_READ(GEN6_RP_CUR_UP);
+ rpprevup = I915_READ(GEN6_RP_PREV_UP);
+ rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
+ rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
+ rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
- seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1));
+ seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
seq_printf(m, "Render p-state ratio: %d\n",
(gt_perf_status & 0xff00) >> 8);
seq_printf(m, "Render p-state VID: %d\n",
gt_perf_status & 0xff);
seq_printf(m, "Render p-state limit: %d\n",
rp_state_limits & 0xff);
+ seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >>
+ GEN6_CAGF_SHIFT) * 50);
+ seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
+ GEN6_CURICONT_MASK);
+ seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
+ GEN6_CURBSYTAVG_MASK);
+ seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
+ GEN6_CURBSYTAVG_MASK);
+ seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
+ GEN6_CURIAVG_MASK);
+ seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
+ GEN6_CURBSYTAVG_MASK);
+ seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
+ GEN6_CURBSYTAVG_MASK);
max_freq = (rp_state_cap & 0xff0000) >> 16;
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
- max_freq * 100);
+ max_freq * 50);
max_freq = (rp_state_cap & 0xff00) >> 8;
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
- max_freq * 100);
+ max_freq * 50);
max_freq = rp_state_cap & 0xff;
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
- max_freq * 100);
+ max_freq * 50);
- __gen6_force_wake_put(dev_priv);
+ __gen6_gt_force_wake_put(dev_priv);
} else {
seq_printf(m, "no P-state info available\n");
}
@@ -1259,7 +1289,7 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
}
static struct drm_info_list i915_debugfs_list[] = {
- {"i915_capabilities", i915_capabilities, 0, 0},
+ {"i915_capabilities", i915_capabilities, 0},
{"i915_gem_objects", i915_gem_object_info, 0},
{"i915_gem_gtt", i915_gem_gtt_info, 0},
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 17bd766f2081..12876f2795d2 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -43,6 +43,17 @@
#include <linux/slab.h>
#include <acpi/video.h>
+static void i915_write_hws_pga(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 addr;
+
+ addr = dev_priv->status_page_dmah->busaddr;
+ if (INTEL_INFO(dev)->gen >= 4)
+ addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
+ I915_WRITE(HWS_PGA, addr);
+}
+
/**
* Sets up the hardware status page for devices that need a physical address
* in the register.
@@ -60,16 +71,13 @@ static int i915_init_phys_hws(struct drm_device *dev)
DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM;
}
- ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
- dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
+ ring->status_page.page_addr =
+ (void __force __iomem *)dev_priv->status_page_dmah->vaddr;
- memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+ memset_io(ring->status_page.page_addr, 0, PAGE_SIZE);
- if (INTEL_INFO(dev)->gen >= 4)
- dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
- 0xf0;
+ i915_write_hws_pga(dev);
- I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
DRM_DEBUG_DRIVER("Enabled hardware status page\n");
return 0;
}
@@ -216,7 +224,7 @@ static int i915_dma_resume(struct drm_device * dev)
if (ring->status_page.gfx_addr != 0)
intel_ring_setup_status_page(ring);
else
- I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
+ i915_write_hws_pga(dev);
DRM_DEBUG_DRIVER("Enabled hardware status page\n");
@@ -771,6 +779,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_EXEC_CONSTANTS:
value = INTEL_INFO(dev)->gen >= 4;
break;
+ case I915_PARAM_HAS_RELAXED_DELTA:
+ value = 1;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -859,8 +870,9 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
" G33 hw status page\n");
return -ENOMEM;
}
- ring->status_page.page_addr = dev_priv->hws_map.handle;
- memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+ ring->status_page.page_addr =
+ (void __force __iomem *)dev_priv->hws_map.handle;
+ memset_io(ring->status_page.page_addr, 0, PAGE_SIZE);
I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
@@ -1895,6 +1907,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (IS_GEN2(dev))
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
+ /* 965GM sometimes incorrectly writes to hardware status page (HWS)
+ * using 32bit addressing, overwriting memory if HWS is located
+ * above 4GB.
+ *
+ * The documentation also mentions an issue with undefined
+ * behaviour if any general state is accessed within a page above 4GB,
+ * which also needs to be handled carefully.
+ */
+ if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
+ dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
+
mmio_bar = IS_GEN2(dev) ? 1 : 0;
dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
if (!dev_priv->regs) {
@@ -2002,9 +2025,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->error_lock);
- dev_priv->trace_irq_seqno = 0;
- ret = drm_vblank_init(dev, I915_NUM_PIPE);
+ if (IS_MOBILE(dev) || !IS_GEN2(dev))
+ dev_priv->num_pipe = 2;
+ else
+ dev_priv->num_pipe = 1;
+
+ ret = drm_vblank_init(dev, dev_priv->num_pipe);
if (ret)
goto out_gem_unload;
@@ -2180,7 +2207,7 @@ void i915_driver_lastclose(struct drm_device * dev)
drm_i915_private_t *dev_priv = dev->dev_private;
if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) {
- drm_fb_helper_restore();
+ intel_fb_restore_mode(dev);
vga_switcheroo_process_delayed_switch();
return;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 0ad533f06af9..c34a8dd31d02 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -43,9 +43,15 @@ module_param_named(modeset, i915_modeset, int, 0400);
unsigned int i915_fbpercrtc = 0;
module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
+int i915_panel_ignore_lid = 0;
+module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
+
unsigned int i915_powersave = 1;
module_param_named(powersave, i915_powersave, int, 0600);
+unsigned int i915_semaphores = 1;
+module_param_named(semaphores, i915_semaphores, int, 0600);
+
unsigned int i915_enable_rc6 = 0;
module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
@@ -55,7 +61,10 @@ module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
unsigned int i915_panel_use_ssc = 1;
module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
-bool i915_try_reset = true;
+int i915_vbt_sdvo_panel_type = -1;
+module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
+
+static bool i915_try_reset = true;
module_param_named(reset, i915_try_reset, bool, 0600);
static struct drm_driver driver;
@@ -254,7 +263,7 @@ void intel_detect_pch (struct drm_device *dev)
}
}
-void __gen6_force_wake_get(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
int count;
@@ -270,12 +279,22 @@ void __gen6_force_wake_get(struct drm_i915_private *dev_priv)
udelay(10);
}
-void __gen6_force_wake_put(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE, 0);
POSTING_READ(FORCEWAKE);
}
+void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
+{
+ int loop = 500;
+ u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+ while (fifo < 20 && loop--) {
+ udelay(10);
+ fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+ }
+}
+
static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -703,6 +722,9 @@ static struct drm_driver driver = {
.gem_init_object = i915_gem_init_object,
.gem_free_object = i915_gem_free_object,
.gem_vm_ops = &i915_gem_vm_ops,
+ .dumb_create = i915_gem_dumb_create,
+ .dumb_map_offset = i915_gem_mmap_gtt,
+ .dumb_destroy = i915_gem_dumb_destroy,
.ioctls = i915_ioctls,
.fops = {
.owner = THIS_MODULE,
@@ -719,14 +741,6 @@ static struct drm_driver driver = {
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- .probe = i915_pci_probe,
- .remove = i915_pci_remove,
- .driver.pm = &i915_pm_ops,
- },
-
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@@ -735,6 +749,14 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct pci_driver i915_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = i915_pci_probe,
+ .remove = i915_pci_remove,
+ .driver.pm = &i915_pm_ops,
+};
+
static int __init i915_init(void)
{
if (!intel_agp_enabled) {
@@ -768,12 +790,12 @@ static int __init i915_init(void)
if (!(driver.driver_features & DRIVER_MODESET))
driver.get_vblank_timestamp = NULL;
- return drm_init(&driver);
+ return drm_pci_init(&driver, &i915_pci_driver);
}
static void __exit i915_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &i915_pci_driver);
}
module_init(i915_init);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 65dfe81d0035..1c1b27c97e5c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -49,17 +49,22 @@
enum pipe {
PIPE_A = 0,
PIPE_B,
+ PIPE_C,
+ I915_MAX_PIPES
};
+#define pipe_name(p) ((p) + 'A')
enum plane {
PLANE_A = 0,
PLANE_B,
+ PLANE_C,
};
-
-#define I915_NUM_PIPE 2
+#define plane_name(p) ((p) + 'A')
#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
+
/* Interface history:
*
* 1.1: Original.
@@ -75,10 +80,7 @@ enum plane {
#define DRIVER_PATCHLEVEL 0
#define WATCH_COHERENCY 0
-#define WATCH_EXEC 0
-#define WATCH_RELOC 0
#define WATCH_LISTS 0
-#define WATCH_PWRITE 0
#define I915_GEM_PHYS_CURSOR_0 1
#define I915_GEM_PHYS_CURSOR_1 2
@@ -111,6 +113,7 @@ struct intel_opregion {
struct opregion_swsci *swsci;
struct opregion_asle *asle;
void *vbt;
+ u32 __iomem *lid_state;
};
#define OPREGION_SIZE (8*1024)
@@ -144,8 +147,7 @@ struct intel_display_error_state;
struct drm_i915_error_state {
u32 eir;
u32 pgtbl_er;
- u32 pipeastat;
- u32 pipebstat;
+ u32 pipestat[I915_MAX_PIPES];
u32 ipeir;
u32 ipehr;
u32 instdone;
@@ -172,7 +174,7 @@ struct drm_i915_error_state {
int page_count;
u32 gtt_offset;
u32 *pages[0];
- } *ringbuffer, *batchbuffer[I915_NUM_RINGS];
+ } *ringbuffer[I915_NUM_RINGS], *batchbuffer[I915_NUM_RINGS];
struct drm_i915_error_buffer {
u32 size;
u32 name;
@@ -200,9 +202,7 @@ struct drm_i915_display_funcs {
void (*disable_fbc)(struct drm_device *dev);
int (*get_display_clock_speed)(struct drm_device *dev);
int (*get_fifo_size)(struct drm_device *dev, int plane);
- void (*update_wm)(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int sr_htotal,
- int pixel_size);
+ void (*update_wm)(struct drm_device *dev);
/* clock updates for mode set */
/* cursor updates */
/* render clock increase/decrease */
@@ -274,7 +274,6 @@ typedef struct drm_i915_private {
uint32_t next_seqno;
drm_dma_handle_t *status_page_dmah;
- dma_addr_t dma_status_page;
uint32_t counter;
drm_local_map_t hws_map;
struct drm_i915_gem_object *pwrctx;
@@ -289,7 +288,6 @@ typedef struct drm_i915_private {
int page_flipping;
atomic_t irq_received;
- u32 trace_irq_seqno;
/* protects the irq masks */
spinlock_t irq_lock;
@@ -324,8 +322,6 @@ typedef struct drm_i915_private {
int cfb_plane;
int cfb_y;
- int irq_enabled;
-
struct intel_opregion opregion;
/* overlay */
@@ -615,6 +611,12 @@ typedef struct drm_i915_private {
struct delayed_work retire_work;
/**
+ * Are we in a non-interruptible section of code like
+ * modesetting?
+ */
+ bool interruptible;
+
+ /**
* Flag if the X Server, and thus DRM, is not currently in
* control of the device.
*
@@ -628,7 +630,7 @@ typedef struct drm_i915_private {
* Flag if the hardware appears to be wedged.
*
* This is set when attempts to idle the device timeout.
- * It prevents command submission from occuring and makes
+ * It prevents command submission from occurring and makes
* every pending request fail
*/
atomic_t wedged;
@@ -652,6 +654,7 @@ typedef struct drm_i915_private {
unsigned int lvds_border_bits;
/* Panel fitter placement and size for Ironlake+ */
u32 pch_pf_pos, pch_pf_size;
+ int panel_t3, panel_t12;
struct drm_crtc *plane_to_crtc_mapping[2];
struct drm_crtc *pipe_to_crtc_mapping[2];
@@ -698,6 +701,8 @@ typedef struct drm_i915_private {
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
+
+ struct drm_property *broadcast_rgb_property;
} drm_i915_private_t;
struct drm_i915_gem_object {
@@ -955,9 +960,12 @@ enum intel_chip_family {
extern struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
extern unsigned int i915_fbpercrtc;
+extern int i915_panel_ignore_lid;
extern unsigned int i915_powersave;
+extern unsigned int i915_semaphores;
extern unsigned int i915_lvds_downclock;
extern unsigned int i915_panel_use_ssc;
+extern int i915_vbt_sdvo_panel_type;
extern unsigned int i915_enable_rc6;
extern int i915_suspend(struct drm_device *dev, pm_message_t state);
@@ -997,8 +1005,6 @@ extern int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_irq_wait(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void i915_trace_irq_get(struct drm_device *dev, u32 seqno);
-extern void i915_enable_interrupt (struct drm_device *dev);
extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
extern void i915_driver_irq_preinstall(struct drm_device * dev);
@@ -1050,7 +1056,6 @@ extern void i915_mem_takedown(struct mem_block **heap);
extern void i915_mem_release(struct drm_device * dev,
struct drm_file *file_priv, struct mem_block *heap);
/* i915_gem.c */
-int i915_gem_check_is_wedged(struct drm_device *dev);
int i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_create_ioctl(struct drm_device *dev, void *data,
@@ -1093,8 +1098,7 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
int i915_gem_init_object(struct drm_gem_object *obj);
-int __must_check i915_gem_flush_ring(struct drm_device *dev,
- struct intel_ring_buffer *ring,
+int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring,
uint32_t invalidate_domains,
uint32_t flush_domains);
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
@@ -1109,12 +1113,18 @@ void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
-int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
- bool interruptible);
+int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring,
u32 seqno);
+int i915_gem_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset);
+int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
+ uint32_t handle);
/**
* Returns true if seq1 is later than seq2.
*/
@@ -1125,16 +1135,14 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
}
static inline u32
-i915_gem_next_request_seqno(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
return ring->outstanding_lazy_request = dev_priv->next_seqno;
}
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined,
- bool interruptible);
+ struct intel_ring_buffer *pipelined);
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
void i915_gem_retire_requests(struct drm_device *dev);
@@ -1143,8 +1151,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain);
-int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
- bool interruptible);
+int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init_ringbuffer(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
void i915_gem_do_init(struct drm_device *dev,
@@ -1153,14 +1160,11 @@ void i915_gem_do_init(struct drm_device *dev,
unsigned long end);
int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_idle(struct drm_device *dev);
-int __must_check i915_add_request(struct drm_device *dev,
- struct drm_file *file_priv,
- struct drm_i915_gem_request *request,
- struct intel_ring_buffer *ring);
-int __must_check i915_do_wait_request(struct drm_device *dev,
- uint32_t seqno,
- bool interruptible,
- struct intel_ring_buffer *ring);
+int __must_check i915_add_request(struct intel_ring_buffer *ring,
+ struct drm_file *file,
+ struct drm_i915_gem_request *request);
+int __must_check i915_wait_request(struct intel_ring_buffer *ring,
+ uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
@@ -1177,6 +1181,9 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
void i915_gem_free_all_phys_object(struct drm_device *dev);
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
+uint32_t
+i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj);
+
/* i915_gem_gtt.c */
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
@@ -1309,7 +1316,7 @@ extern void intel_display_print_error_state(struct seq_file *m,
#define __i915_read(x, y) \
static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
u##x val = read##y(dev_priv->regs + reg); \
- trace_i915_reg_rw('R', reg, val, sizeof(val)); \
+ trace_i915_reg_rw(false, reg, val, sizeof(val)); \
return val; \
}
__i915_read(8, b)
@@ -1320,7 +1327,7 @@ __i915_read(64, q)
#define __i915_write(x, y) \
static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
- trace_i915_reg_rw('W', reg, val, sizeof(val)); \
+ trace_i915_reg_rw(true, reg, val, sizeof(val)); \
write##y(val, dev_priv->regs + reg); \
}
__i915_write(8, b)
@@ -1353,62 +1360,29 @@ __i915_write(64, q)
* must be set to prevent GT core from power down and stale values being
* returned.
*/
-void __gen6_force_wake_get(struct drm_i915_private *dev_priv);
-void __gen6_force_wake_put (struct drm_i915_private *dev_priv);
-static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg)
+void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
+void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
+void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
+
+static inline u32 i915_gt_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val;
if (dev_priv->info->gen >= 6) {
- __gen6_force_wake_get(dev_priv);
+ __gen6_gt_force_wake_get(dev_priv);
val = I915_READ(reg);
- __gen6_force_wake_put(dev_priv);
+ __gen6_gt_force_wake_put(dev_priv);
} else
val = I915_READ(reg);
return val;
}
-static inline void
-i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len)
+static inline void i915_gt_write(struct drm_i915_private *dev_priv,
+ u32 reg, u32 val)
{
- /* Trace down the write operation before the real write */
- trace_i915_reg_rw('W', reg, val, len);
- switch (len) {
- case 8:
- writeq(val, dev_priv->regs + reg);
- break;
- case 4:
- writel(val, dev_priv->regs + reg);
- break;
- case 2:
- writew(val, dev_priv->regs + reg);
- break;
- case 1:
- writeb(val, dev_priv->regs + reg);
- break;
- }
+ if (dev_priv->info->gen >= 6)
+ __gen6_gt_wait_for_fifo(dev_priv);
+ I915_WRITE(reg, val);
}
-
-/**
- * Reads a dword out of the status page, which is written to from the command
- * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
- * MI_STORE_DATA_IMM.
- *
- * The following dwords have a reserved meaning:
- * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
- * 0x04: ring 0 head pointer
- * 0x05: ring 1 head pointer (915-class)
- * 0x06: ring 2 head pointer (915-class)
- * 0x10-0x1b: Context status DWords (GM45)
- * 0x1f: Last written status offset. (GM45)
- *
- * The area from dword 0x20 to 0x3ff is available for driver usage.
- */
-#define READ_HWSP(dev_priv, reg) (((volatile u32 *)\
- (LP_RING(dev_priv)->status_page.page_addr))[reg])
-#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
-#define I915_GEM_HWS_INDEX 0x20
-#define I915_BREADCRUMB_INDEX 0x21
-
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index cf4f74c7c6fb..7ce3f353af33 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -75,8 +75,8 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
dev_priv->mm.object_memory -= size;
}
-int
-i915_gem_check_is_wedged(struct drm_device *dev)
+static int
+i915_gem_wait_for_error(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct completion *x = &dev_priv->error_completion;
@@ -90,27 +90,24 @@ i915_gem_check_is_wedged(struct drm_device *dev)
if (ret)
return ret;
- /* Success, we reset the GPU! */
- if (!atomic_read(&dev_priv->mm.wedged))
- return 0;
-
- /* GPU is hung, bump the completion count to account for
- * the token we just consumed so that we never hit zero and
- * end up waiting upon a subsequent completion event that
- * will never happen.
- */
- spin_lock_irqsave(&x->wait.lock, flags);
- x->done++;
- spin_unlock_irqrestore(&x->wait.lock, flags);
- return -EIO;
+ if (atomic_read(&dev_priv->mm.wedged)) {
+ /* GPU is hung, bump the completion count to account for
+ * the token we just consumed so that we never hit zero and
+ * end up waiting upon a subsequent completion event that
+ * will never happen.
+ */
+ spin_lock_irqsave(&x->wait.lock, flags);
+ x->done++;
+ spin_unlock_irqrestore(&x->wait.lock, flags);
+ }
+ return 0;
}
int i915_mutex_lock_interruptible(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- ret = i915_gem_check_is_wedged(dev);
+ ret = i915_gem_wait_for_error(dev);
if (ret)
return ret;
@@ -118,11 +115,6 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
if (ret)
return ret;
- if (atomic_read(&dev_priv->mm.wedged)) {
- mutex_unlock(&dev->struct_mutex);
- return -EAGAIN;
- }
-
WARN_ON(i915_verify_lists(dev));
return 0;
}
@@ -193,22 +185,20 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
return 0;
}
-/**
- * Creates a new mm object and returns a handle to it.
- */
-int
-i915_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
+static int
+i915_gem_create(struct drm_file *file,
+ struct drm_device *dev,
+ uint64_t size,
+ uint32_t *handle_p)
{
- struct drm_i915_gem_create *args = data;
struct drm_i915_gem_object *obj;
int ret;
u32 handle;
- args->size = roundup(args->size, PAGE_SIZE);
+ size = roundup(size, PAGE_SIZE);
/* Allocate the new object */
- obj = i915_gem_alloc_object(dev, args->size);
+ obj = i915_gem_alloc_object(dev, size);
if (obj == NULL)
return -ENOMEM;
@@ -224,10 +214,41 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
drm_gem_object_unreference(&obj->base);
trace_i915_gem_object_create(obj);
- args->handle = handle;
+ *handle_p = handle;
return 0;
}
+int
+i915_gem_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ /* have to work out size/pitch and return them */
+ args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
+ args->size = args->pitch * args->height;
+ return i915_gem_create(file, dev,
+ args->size, &args->handle);
+}
+
+int i915_gem_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file, handle);
+}
+
+/**
+ * Creates a new mm object and returns a handle to it.
+ */
+int
+i915_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_create *args = data;
+ return i915_gem_create(file, dev,
+ args->size, &args->handle);
+}
+
static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
@@ -514,7 +535,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (obj == NULL) {
+ if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
@@ -526,6 +547,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
goto out;
}
+ trace_i915_gem_object_pread(obj, args->offset, args->size);
+
ret = i915_gem_object_set_cpu_read_domain_range(obj,
args->offset,
args->size);
@@ -955,7 +978,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (obj == NULL) {
+ if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
@@ -967,6 +990,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
goto out;
}
+ trace_i915_gem_object_pwrite(obj, args->offset, args->size);
+
/* We can only do the GTT pwrite on untiled buffers, as otherwise
* it would end up going through the fenced access, and we'll get
* different detiling behavior between reading and writing.
@@ -1049,7 +1074,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (obj == NULL) {
+ if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
@@ -1092,7 +1117,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (obj == NULL) {
+ if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
@@ -1121,7 +1146,6 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_mmap *args = data;
struct drm_gem_object *obj;
- loff_t offset;
unsigned long addr;
if (!(dev->driver->driver_features & DRIVER_GEM))
@@ -1136,8 +1160,6 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
return -E2BIG;
}
- offset = args->offset;
-
down_write(&current->mm->mmap_sem);
addr = do_mmap(obj->filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED,
@@ -1182,9 +1204,13 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
PAGE_SHIFT;
- /* Now bind it into the GTT if needed */
- mutex_lock(&dev->struct_mutex);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto out;
+
+ trace_i915_gem_object_fault(obj, page_offset, true, write);
+ /* Now bind it into the GTT if needed */
if (!obj->map_and_fenceable) {
ret = i915_gem_object_unbind(obj);
if (ret)
@@ -1203,7 +1229,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (obj->tiling_mode == I915_TILING_NONE)
ret = i915_gem_object_put_fence(obj);
else
- ret = i915_gem_object_get_fence(obj, NULL, true);
+ ret = i915_gem_object_get_fence(obj, NULL);
if (ret)
goto unlock;
@@ -1219,12 +1245,21 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
unlock:
mutex_unlock(&dev->struct_mutex);
-
+out:
switch (ret) {
+ case -EIO:
case -EAGAIN:
+ /* Give the error handler a chance to run and move the
+ * objects off the GPU active list. Next time we service the
+ * fault, we should be able to transition the page into the
+ * GTT without touching the GPU (and so avoid further
+ * EIO/EGAIN). If the GPU is wedged, then there is no issue
+ * with coherency, just lost writes.
+ */
set_need_resched();
case 0:
case -ERESTARTSYS:
+ case -EINTR:
return VM_FAULT_NOPAGE;
case -ENOMEM:
return VM_FAULT_OOM;
@@ -1321,9 +1356,10 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
if (!obj->fault_mappable)
return;
- unmap_mapping_range(obj->base.dev->dev_mapping,
- (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
- obj->base.size, 1);
+ if (obj->base.dev->dev_mapping)
+ unmap_mapping_range(obj->base.dev->dev_mapping,
+ (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
+ obj->base.size, 1);
obj->fault_mappable = false;
}
@@ -1398,7 +1434,7 @@ i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
* Return the required GTT alignment for an object, only taking into account
* unfenced tiled surface requirements.
*/
-static uint32_t
+uint32_t
i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
@@ -1425,27 +1461,13 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
return tile_height * obj->stride * 2;
}
-/**
- * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
- * @dev: DRM device
- * @data: GTT mapping ioctl data
- * @file: GEM object info
- *
- * Simply returns the fake offset to userspace so it can mmap it.
- * The mmap call will end up in drm_gem_mmap(), which will set things
- * up so we can get faults in the handler above.
- *
- * The fault handler will take care of binding the object into the GTT
- * (since it may have been evicted to make room for something), allocating
- * a fence register, and mapping the appropriate aperture address into
- * userspace.
- */
int
-i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
+i915_gem_mmap_gtt(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_mmap_gtt *args = data;
struct drm_i915_gem_object *obj;
int ret;
@@ -1456,8 +1478,8 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (obj == NULL) {
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
+ if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
@@ -1479,7 +1501,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
goto out;
}
- args->offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
+ *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
out:
drm_gem_object_unreference(&obj->base);
@@ -1488,6 +1510,34 @@ unlock:
return ret;
}
+/**
+ * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
+ * @dev: DRM device
+ * @data: GTT mapping ioctl data
+ * @file: GEM object info
+ *
+ * Simply returns the fake offset to userspace so it can mmap it.
+ * The mmap call will end up in drm_gem_mmap(), which will set things
+ * up so we can get faults in the handler above.
+ *
+ * The fault handler will take care of binding the object into the GTT
+ * (since it may have been evicted to make room for something), allocating
+ * a fence register, and mapping the appropriate aperture address into
+ * userspace.
+ */
+int
+i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_mmap_gtt *args = data;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
+}
+
+
static int
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
gfp_t gfpmask)
@@ -1669,9 +1719,8 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
}
static void
-i915_gem_process_flushing_list(struct drm_device *dev,
- uint32_t flush_domains,
- struct intel_ring_buffer *ring)
+i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
+ uint32_t flush_domains)
{
struct drm_i915_gem_object *obj, *next;
@@ -1684,7 +1733,7 @@ i915_gem_process_flushing_list(struct drm_device *dev,
obj->base.write_domain = 0;
list_del_init(&obj->gpu_write_list);
i915_gem_object_move_to_active(obj, ring,
- i915_gem_next_request_seqno(dev, ring));
+ i915_gem_next_request_seqno(ring));
trace_i915_gem_object_change_domain(obj,
obj->base.read_domains,
@@ -1694,27 +1743,22 @@ i915_gem_process_flushing_list(struct drm_device *dev,
}
int
-i915_add_request(struct drm_device *dev,
+i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
- struct drm_i915_gem_request *request,
- struct intel_ring_buffer *ring)
+ struct drm_i915_gem_request *request)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_file_private *file_priv = NULL;
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
uint32_t seqno;
int was_empty;
int ret;
BUG_ON(request == NULL);
- if (file != NULL)
- file_priv = file->driver_priv;
-
ret = ring->add_request(ring, &seqno);
if (ret)
return ret;
- ring->outstanding_lazy_request = false;
+ trace_i915_gem_request_add(ring, seqno);
request->seqno = seqno;
request->ring = ring;
@@ -1722,7 +1766,9 @@ i915_add_request(struct drm_device *dev,
was_empty = list_empty(&ring->request_list);
list_add_tail(&request->list, &ring->request_list);
- if (file_priv) {
+ if (file) {
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+
spin_lock(&file_priv->mm.lock);
request->file_priv = file_priv;
list_add_tail(&request->client_list,
@@ -1730,6 +1776,8 @@ i915_add_request(struct drm_device *dev,
spin_unlock(&file_priv->mm.lock);
}
+ ring->outstanding_lazy_request = false;
+
if (!dev_priv->mm.suspended) {
mod_timer(&dev_priv->hangcheck_timer,
jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
@@ -1749,8 +1797,10 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
return;
spin_lock(&file_priv->mm.lock);
- list_del(&request->client_list);
- request->file_priv = NULL;
+ if (request->file_priv) {
+ list_del(&request->client_list);
+ request->file_priv = NULL;
+ }
spin_unlock(&file_priv->mm.lock);
}
@@ -1846,18 +1896,15 @@ void i915_gem_reset(struct drm_device *dev)
* This function clears the request list as sequence numbers are passed.
*/
static void
-i915_gem_retire_requests_ring(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t seqno;
int i;
- if (!ring->status_page.page_addr ||
- list_empty(&ring->request_list))
+ if (list_empty(&ring->request_list))
return;
- WARN_ON(i915_verify_lists(dev));
+ WARN_ON(i915_verify_lists(ring->dev));
seqno = ring->get_seqno(ring);
@@ -1875,7 +1922,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
if (!i915_seqno_passed(seqno, request->seqno))
break;
- trace_i915_gem_request_retire(dev, request->seqno);
+ trace_i915_gem_request_retire(ring, request->seqno);
list_del(&request->list);
i915_gem_request_remove_from_client(request);
@@ -1901,13 +1948,13 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
i915_gem_object_move_to_inactive(obj);
}
- if (unlikely (dev_priv->trace_irq_seqno &&
- i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
+ if (unlikely(ring->trace_irq_seqno &&
+ i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
ring->irq_put(ring);
- dev_priv->trace_irq_seqno = 0;
+ ring->trace_irq_seqno = 0;
}
- WARN_ON(i915_verify_lists(dev));
+ WARN_ON(i915_verify_lists(ring->dev));
}
void
@@ -1931,7 +1978,7 @@ i915_gem_retire_requests(struct drm_device *dev)
}
for (i = 0; i < I915_NUM_RINGS; i++)
- i915_gem_retire_requests_ring(dev, &dev_priv->ring[i]);
+ i915_gem_retire_requests_ring(&dev_priv->ring[i]);
}
static void
@@ -1965,11 +2012,11 @@ i915_gem_retire_work_handler(struct work_struct *work)
struct drm_i915_gem_request *request;
int ret;
- ret = i915_gem_flush_ring(dev, ring, 0,
- I915_GEM_GPU_DOMAINS);
+ ret = i915_gem_flush_ring(ring,
+ 0, I915_GEM_GPU_DOMAINS);
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (ret || request == NULL ||
- i915_add_request(dev, NULL, request, ring))
+ i915_add_request(ring, NULL, request))
kfree(request);
}
@@ -1982,18 +2029,32 @@ i915_gem_retire_work_handler(struct work_struct *work)
mutex_unlock(&dev->struct_mutex);
}
+/**
+ * Waits for a sequence number to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
int
-i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
- bool interruptible, struct intel_ring_buffer *ring)
+i915_wait_request(struct intel_ring_buffer *ring,
+ uint32_t seqno)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
u32 ier;
int ret = 0;
BUG_ON(seqno == 0);
- if (atomic_read(&dev_priv->mm.wedged))
- return -EAGAIN;
+ if (atomic_read(&dev_priv->mm.wedged)) {
+ struct completion *x = &dev_priv->error_completion;
+ bool recovery_complete;
+ unsigned long flags;
+
+ /* Give the error handler a chance to run. */
+ spin_lock_irqsave(&x->wait.lock, flags);
+ recovery_complete = x->done > 0;
+ spin_unlock_irqrestore(&x->wait.lock, flags);
+
+ return recovery_complete ? -EIO : -EAGAIN;
+ }
if (seqno == ring->outstanding_lazy_request) {
struct drm_i915_gem_request *request;
@@ -2002,7 +2063,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
if (request == NULL)
return -ENOMEM;
- ret = i915_add_request(dev, NULL, request, ring);
+ ret = i915_add_request(ring, NULL, request);
if (ret) {
kfree(request);
return ret;
@@ -2012,22 +2073,22 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
}
if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(ring->dev))
ier = I915_READ(DEIER) | I915_READ(GTIER);
else
ier = I915_READ(IER);
if (!ier) {
DRM_ERROR("something (likely vbetool) disabled "
"interrupts, re-enabling\n");
- i915_driver_irq_preinstall(dev);
- i915_driver_irq_postinstall(dev);
+ i915_driver_irq_preinstall(ring->dev);
+ i915_driver_irq_postinstall(ring->dev);
}
- trace_i915_gem_request_wait_begin(dev, seqno);
+ trace_i915_gem_request_wait_begin(ring, seqno);
ring->waiting_seqno = seqno;
if (ring->irq_get(ring)) {
- if (interruptible)
+ if (dev_priv->mm.interruptible)
ret = wait_event_interruptible(ring->irq_queue,
i915_seqno_passed(ring->get_seqno(ring), seqno)
|| atomic_read(&dev_priv->mm.wedged));
@@ -2043,7 +2104,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
ret = -EBUSY;
ring->waiting_seqno = 0;
- trace_i915_gem_request_wait_end(dev, seqno);
+ trace_i915_gem_request_wait_end(ring, seqno);
}
if (atomic_read(&dev_priv->mm.wedged))
ret = -EAGAIN;
@@ -2059,31 +2120,18 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
* a separate wait queue to handle that.
*/
if (ret == 0)
- i915_gem_retire_requests_ring(dev, ring);
+ i915_gem_retire_requests_ring(ring);
return ret;
}
/**
- * Waits for a sequence number to be signaled, and cleans up the
- * request and object lists appropriately for that event.
- */
-static int
-i915_wait_request(struct drm_device *dev, uint32_t seqno,
- struct intel_ring_buffer *ring)
-{
- return i915_do_wait_request(dev, seqno, 1, ring);
-}
-
-/**
* Ensures that all rendering to the object has completed and the object is
* safe to unbind from the GTT or access from the CPU.
*/
int
-i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
- bool interruptible)
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
int ret;
/* This function only exists to support waiting for existing rendering,
@@ -2095,10 +2143,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
* it.
*/
if (obj->active) {
- ret = i915_do_wait_request(dev,
- obj->last_rendering_seqno,
- interruptible,
- obj->ring);
+ ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
if (ret)
return ret;
}
@@ -2148,6 +2193,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
if (ret == -ERESTARTSYS)
return ret;
+ trace_i915_gem_object_unbind(obj);
+
i915_gem_gtt_unbind_object(obj);
i915_gem_object_put_pages_gtt(obj);
@@ -2163,29 +2210,32 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
if (i915_gem_object_is_purgeable(obj))
i915_gem_object_truncate(obj);
- trace_i915_gem_object_unbind(obj);
-
return ret;
}
int
-i915_gem_flush_ring(struct drm_device *dev,
- struct intel_ring_buffer *ring,
+i915_gem_flush_ring(struct intel_ring_buffer *ring,
uint32_t invalidate_domains,
uint32_t flush_domains)
{
int ret;
+ if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
+ return 0;
+
+ trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
+
ret = ring->flush(ring, invalidate_domains, flush_domains);
if (ret)
return ret;
- i915_gem_process_flushing_list(dev, flush_domains, ring);
+ if (flush_domains & I915_GEM_GPU_DOMAINS)
+ i915_gem_process_flushing_list(ring, flush_domains);
+
return 0;
}
-static int i915_ring_idle(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static int i915_ring_idle(struct intel_ring_buffer *ring)
{
int ret;
@@ -2193,15 +2243,13 @@ static int i915_ring_idle(struct drm_device *dev,
return 0;
if (!list_empty(&ring->gpu_write_list)) {
- ret = i915_gem_flush_ring(dev, ring,
+ ret = i915_gem_flush_ring(ring,
I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
}
- return i915_wait_request(dev,
- i915_gem_next_request_seqno(dev, ring),
- ring);
+ return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
}
int
@@ -2218,7 +2266,7 @@ i915_gpu_idle(struct drm_device *dev)
/* Flush everything onto the inactive list. */
for (i = 0; i < I915_NUM_RINGS; i++) {
- ret = i915_ring_idle(dev, &dev_priv->ring[i]);
+ ret = i915_ring_idle(&dev_priv->ring[i]);
if (ret)
return ret;
}
@@ -2402,15 +2450,13 @@ static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
static int
i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined,
- bool interruptible)
+ struct intel_ring_buffer *pipelined)
{
int ret;
if (obj->fenced_gpu_access) {
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
- ret = i915_gem_flush_ring(obj->base.dev,
- obj->last_fenced_ring,
+ ret = i915_gem_flush_ring(obj->last_fenced_ring,
0, obj->base.write_domain);
if (ret)
return ret;
@@ -2422,10 +2468,8 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
if (!ring_passed_seqno(obj->last_fenced_ring,
obj->last_fenced_seqno)) {
- ret = i915_do_wait_request(obj->base.dev,
- obj->last_fenced_seqno,
- interruptible,
- obj->last_fenced_ring);
+ ret = i915_wait_request(obj->last_fenced_ring,
+ obj->last_fenced_seqno);
if (ret)
return ret;
}
@@ -2451,7 +2495,7 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
if (obj->tiling_mode)
i915_gem_release_mmap(obj);
- ret = i915_gem_object_flush_fence(obj, NULL, true);
+ ret = i915_gem_object_flush_fence(obj, NULL);
if (ret)
return ret;
@@ -2528,8 +2572,7 @@ i915_find_fence_reg(struct drm_device *dev,
*/
int
i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined,
- bool interruptible)
+ struct intel_ring_buffer *pipelined)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2544,17 +2587,30 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
reg = &dev_priv->fence_regs[obj->fence_reg];
list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
- if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
- pipelined = NULL;
+ if (obj->tiling_changed) {
+ ret = i915_gem_object_flush_fence(obj, pipelined);
+ if (ret)
+ return ret;
+
+ if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
+ pipelined = NULL;
+
+ if (pipelined) {
+ reg->setup_seqno =
+ i915_gem_next_request_seqno(pipelined);
+ obj->last_fenced_seqno = reg->setup_seqno;
+ obj->last_fenced_ring = pipelined;
+ }
+
+ goto update;
+ }
if (!pipelined) {
if (reg->setup_seqno) {
if (!ring_passed_seqno(obj->last_fenced_ring,
reg->setup_seqno)) {
- ret = i915_do_wait_request(obj->base.dev,
- reg->setup_seqno,
- interruptible,
- obj->last_fenced_ring);
+ ret = i915_wait_request(obj->last_fenced_ring,
+ reg->setup_seqno);
if (ret)
return ret;
}
@@ -2563,36 +2619,9 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
}
} else if (obj->last_fenced_ring &&
obj->last_fenced_ring != pipelined) {
- ret = i915_gem_object_flush_fence(obj,
- pipelined,
- interruptible);
+ ret = i915_gem_object_flush_fence(obj, pipelined);
if (ret)
return ret;
- } else if (obj->tiling_changed) {
- if (obj->fenced_gpu_access) {
- if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
- ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
- 0, obj->base.write_domain);
- if (ret)
- return ret;
- }
-
- obj->fenced_gpu_access = false;
- }
- }
-
- if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
- pipelined = NULL;
- BUG_ON(!pipelined && reg->setup_seqno);
-
- if (obj->tiling_changed) {
- if (pipelined) {
- reg->setup_seqno =
- i915_gem_next_request_seqno(dev, pipelined);
- obj->last_fenced_seqno = reg->setup_seqno;
- obj->last_fenced_ring = pipelined;
- }
- goto update;
}
return 0;
@@ -2602,7 +2631,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
if (reg == NULL)
return -ENOSPC;
- ret = i915_gem_object_flush_fence(obj, pipelined, interruptible);
+ ret = i915_gem_object_flush_fence(obj, pipelined);
if (ret)
return ret;
@@ -2614,9 +2643,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
if (old->tiling_mode)
i915_gem_release_mmap(old);
- ret = i915_gem_object_flush_fence(old,
- pipelined,
- interruptible);
+ ret = i915_gem_object_flush_fence(old, pipelined);
if (ret) {
drm_gem_object_unreference(&old->base);
return ret;
@@ -2628,7 +2655,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
old->fence_reg = I915_FENCE_REG_NONE;
old->last_fenced_ring = pipelined;
old->last_fenced_seqno =
- pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
+ pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
drm_gem_object_unreference(&old->base);
} else if (obj->last_fenced_seqno == 0)
@@ -2640,7 +2667,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
obj->last_fenced_ring = pipelined;
reg->setup_seqno =
- pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
+ pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
obj->last_fenced_seqno = reg->setup_seqno;
update:
@@ -2837,7 +2864,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
obj->map_and_fenceable = mappable && fenceable;
- trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable);
+ trace_i915_gem_object_bind(obj, map_and_fenceable);
return 0;
}
@@ -2860,13 +2887,11 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
static int
i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
-
if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
return 0;
/* Queue the GPU write cache flushing we need. */
- return i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain);
+ return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
}
/** Flushes the GTT write domain for the object if it's dirty. */
@@ -2933,12 +2958,15 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
if (obj->gtt_space == NULL)
return -EINVAL;
+ if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
+ return 0;
+
ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
if (obj->pending_gpu_write || write) {
- ret = i915_gem_object_wait_rendering(obj, true);
+ ret = i915_gem_object_wait_rendering(obj);
if (ret)
return ret;
}
@@ -2988,7 +3016,7 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
/* Currently, we are always called from an non-interruptible context. */
if (pipelined != obj->ring) {
- ret = i915_gem_object_wait_rendering(obj, false);
+ ret = i915_gem_object_wait_rendering(obj);
if (ret)
return ret;
}
@@ -3006,8 +3034,7 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
}
int
-i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
- bool interruptible)
+i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj)
{
int ret;
@@ -3015,13 +3042,12 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
return 0;
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
- ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
- 0, obj->base.write_domain);
+ ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
if (ret)
return ret;
}
- return i915_gem_object_wait_rendering(obj, interruptible);
+ return i915_gem_object_wait_rendering(obj);
}
/**
@@ -3036,11 +3062,14 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
uint32_t old_write_domain, old_read_domains;
int ret;
+ if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
+ return 0;
+
ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
- ret = i915_gem_object_wait_rendering(obj, true);
+ ret = i915_gem_object_wait_rendering(obj);
if (ret)
return ret;
@@ -3138,7 +3167,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- ret = i915_gem_object_wait_rendering(obj, true);
+ ret = i915_gem_object_wait_rendering(obj);
if (ret)
return ret;
@@ -3209,6 +3238,9 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
u32 seqno = 0;
int ret;
+ if (atomic_read(&dev_priv->mm.wedged))
+ return -EIO;
+
spin_lock(&file_priv->mm.lock);
list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
if (time_after_eq(request->emitted_jiffies, recent_enough))
@@ -3324,7 +3356,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (obj == NULL) {
+ if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
@@ -3375,7 +3407,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (obj == NULL) {
+ if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
@@ -3412,7 +3444,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (obj == NULL) {
+ if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
@@ -3430,7 +3462,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* flush earlier is beneficial.
*/
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
- ret = i915_gem_flush_ring(dev, obj->ring,
+ ret = i915_gem_flush_ring(obj->ring,
0, obj->base.write_domain);
} else if (obj->ring->outstanding_lazy_request ==
obj->last_rendering_seqno) {
@@ -3441,9 +3473,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
*/
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (request)
- ret = i915_add_request(dev,
- NULL, request,
- obj->ring);
+ ret = i915_add_request(obj->ring, NULL,request);
else
ret = -ENOMEM;
}
@@ -3453,7 +3483,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* are actually unmasked, and our working set ends up being
* larger than required.
*/
- i915_gem_retire_requests_ring(dev, obj->ring);
+ i915_gem_retire_requests_ring(obj->ring);
args->busy = obj->active;
}
@@ -3492,7 +3522,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
- if (obj == NULL) {
+ if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
@@ -3574,6 +3604,8 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
return;
}
+ trace_i915_gem_object_destroy(obj);
+
if (obj->base.map_list.map)
i915_gem_free_mmap_offset(obj);
@@ -3590,8 +3622,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
struct drm_device *dev = obj->base.dev;
- trace_i915_gem_object_destroy(obj);
-
while (obj->pin_count > 0)
i915_gem_object_unpin(obj);
@@ -3837,6 +3867,8 @@ i915_gem_load(struct drm_device *dev)
i915_gem_detect_bit_6_swizzle(dev);
init_waitqueue_head(&dev_priv->pending_flip_queue);
+ dev_priv->mm.interruptible = true;
+
dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
register_shrinker(&dev_priv->mm.inactive_shrinker);
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 29d014c48ca2..8da1899bd24f 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -134,51 +134,6 @@ i915_verify_lists(struct drm_device *dev)
}
#endif /* WATCH_INACTIVE */
-
-#if WATCH_EXEC | WATCH_PWRITE
-static void
-i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
- uint32_t bias, uint32_t mark)
-{
- uint32_t *mem = kmap_atomic(page, KM_USER0);
- int i;
- for (i = start; i < end; i += 4)
- DRM_INFO("%08x: %08x%s\n",
- (int) (bias + i), mem[i / 4],
- (bias + i == mark) ? " ********" : "");
- kunmap_atomic(mem, KM_USER0);
- /* give syslog time to catch up */
- msleep(1);
-}
-
-void
-i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
- const char *where, uint32_t mark)
-{
- int page;
-
- DRM_INFO("%s: object at offset %08x\n", where, obj->gtt_offset);
- for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
- int page_len, chunk, chunk_len;
-
- page_len = len - page * PAGE_SIZE;
- if (page_len > PAGE_SIZE)
- page_len = PAGE_SIZE;
-
- for (chunk = 0; chunk < page_len; chunk += 128) {
- chunk_len = page_len - chunk;
- if (chunk_len > 128)
- chunk_len = 128;
- i915_gem_dump_page(obj->pages[page],
- chunk, chunk + chunk_len,
- obj->gtt_offset +
- page * PAGE_SIZE,
- mark);
- }
- }
-}
-#endif
-
#if WATCH_COHERENCY
void
i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 3d39005540aa..da05a2692a75 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -30,6 +30,7 @@
#include "drm.h"
#include "i915_drv.h"
#include "i915_drm.h"
+#include "i915_trace.h"
static bool
mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
@@ -63,6 +64,8 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
return 0;
}
+ trace_i915_gem_evict(dev, min_size, alignment, mappable);
+
/*
* The goal is to evict objects and amalgamate space in LRU order.
* The oldest idle objects reside on the inactive list, which is in
@@ -189,6 +192,8 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
if (lists_empty)
return -ENOSPC;
+ trace_i915_gem_evict_everything(dev, purgeable_only);
+
/* Flush everything (on to the inactive lists) and evict */
ret = i915_gpu_idle(dev);
if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d2f445e825f2..20a4cc5b818f 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -37,6 +37,7 @@ struct change_domains {
uint32_t invalidate_domains;
uint32_t flush_domains;
uint32_t flush_rings;
+ uint32_t flips;
};
/*
@@ -190,6 +191,9 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT)
i915_gem_release_mmap(obj);
+ if (obj->base.pending_write_domain)
+ cd->flips |= atomic_read(&obj->pending_flip);
+
/* The actual obj->write_domain will be updated with
* pending_write_domain after we emit the accumulated flush for all
* of our domain changes in execbuffers (which clears objects'
@@ -282,21 +286,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
target_offset = to_intel_bo(target_obj)->gtt_offset;
-#if WATCH_RELOC
- DRM_INFO("%s: obj %p offset %08x target %d "
- "read %08x write %08x gtt %08x "
- "presumed %08x delta %08x\n",
- __func__,
- obj,
- (int) reloc->offset,
- (int) reloc->target_handle,
- (int) reloc->read_domains,
- (int) reloc->write_domain,
- (int) target_offset,
- (int) reloc->presumed_offset,
- reloc->delta);
-#endif
-
/* The target buffer should have appeared before us in the
* exec_object list, so it should have a GTT space bound by now.
*/
@@ -365,16 +354,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return ret;
}
- /* and points to somewhere within the target object. */
- if (unlikely(reloc->delta >= target_obj->size)) {
- DRM_ERROR("Relocation beyond target object bounds: "
- "obj %p target %d delta %d size %d.\n",
- obj, reloc->target_handle,
- (int) reloc->delta,
- (int) target_obj->size);
- return ret;
- }
-
reloc->delta += target_offset;
if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
uint32_t page_offset = reloc->offset & ~PAGE_MASK;
@@ -388,6 +367,10 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
uint32_t __iomem *reloc_entry;
void __iomem *reloc_page;
+ /* We can't wait for rendering with pagefaults disabled */
+ if (obj->active && in_atomic())
+ return -EFAULT;
+
ret = i915_gem_object_set_to_gtt_domain(obj, 1);
if (ret)
return ret;
@@ -461,15 +444,24 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
- int ret;
-
+ int ret = 0;
+
+ /* This is the fast path and we cannot handle a pagefault whilst
+ * holding the struct mutex lest the user pass in the relocations
+ * contained within a mmaped bo. For in such a case we, the page
+ * fault handler would call i915_gem_fault() and we would try to
+ * acquire the struct mutex again. Obviously this is bad and so
+ * lockdep complains vehemently.
+ */
+ pagefault_disable();
list_for_each_entry(obj, objects, exec_list) {
ret = i915_gem_execbuffer_relocate_object(obj, eb);
if (ret)
- return ret;
+ break;
}
+ pagefault_enable();
- return 0;
+ return ret;
}
static int
@@ -575,7 +567,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
if (has_fenced_gpu_access) {
if (need_fence) {
- ret = i915_gem_object_get_fence(obj, ring, 1);
+ ret = i915_gem_object_get_fence(obj, ring);
if (ret)
break;
} else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
@@ -690,11 +682,9 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
/* reacquire the objects */
eb_reset(eb);
for (i = 0; i < count; i++) {
- struct drm_i915_gem_object *obj;
-
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle));
- if (obj == NULL) {
+ if (&obj->base == NULL) {
DRM_ERROR("Invalid object handle %d at index %d\n",
exec[i].handle, i);
ret = -ENOENT;
@@ -749,8 +739,7 @@ i915_gem_execbuffer_flush(struct drm_device *dev,
if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
for (i = 0; i < I915_NUM_RINGS; i++)
if (flush_rings & (1 << i)) {
- ret = i915_gem_flush_ring(dev,
- &dev_priv->ring[i],
+ ret = i915_gem_flush_ring(&dev_priv->ring[i],
invalidate_domains,
flush_domains);
if (ret)
@@ -772,9 +761,9 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
if (from == NULL || to == from)
return 0;
- /* XXX gpu semaphores are currently causing hard hangs on SNB mobile */
- if (INTEL_INFO(obj->base.dev)->gen < 6 || IS_MOBILE(obj->base.dev))
- return i915_gem_object_wait_rendering(obj, true);
+ /* XXX gpu semaphores are implicated in various hard hangs on SNB */
+ if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores)
+ return i915_gem_object_wait_rendering(obj);
idx = intel_ring_sync_index(from, to);
@@ -789,7 +778,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
if (request == NULL)
return -ENOMEM;
- ret = i915_add_request(obj->base.dev, NULL, request, from);
+ ret = i915_add_request(from, NULL, request);
if (ret) {
kfree(request);
return ret;
@@ -803,6 +792,39 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
}
static int
+i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
+{
+ u32 plane, flip_mask;
+ int ret;
+
+ /* Check for any pending flips. As we only maintain a flip queue depth
+ * of 1, we can simply insert a WAIT for the next display flip prior
+ * to executing the batch and avoid stalling the CPU.
+ */
+
+ for (plane = 0; flips >> plane; plane++) {
+ if (((flips >> plane) & 1) == 0)
+ continue;
+
+ if (plane)
+ flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+ else
+ flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ }
+
+ return 0;
+}
+
+
+static int
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
struct list_head *objects)
{
@@ -810,19 +832,11 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
struct change_domains cd;
int ret;
- cd.invalidate_domains = 0;
- cd.flush_domains = 0;
- cd.flush_rings = 0;
+ memset(&cd, 0, sizeof(cd));
list_for_each_entry(obj, objects, exec_list)
i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
if (cd.invalidate_domains | cd.flush_domains) {
-#if WATCH_EXEC
- DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
- __func__,
- cd.invalidate_domains,
- cd.flush_domains);
-#endif
ret = i915_gem_execbuffer_flush(ring->dev,
cd.invalidate_domains,
cd.flush_domains,
@@ -831,6 +845,12 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
return ret;
}
+ if (cd.flips) {
+ ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips);
+ if (ret)
+ return ret;
+ }
+
list_for_each_entry(obj, objects, exec_list) {
ret = i915_gem_execbuffer_sync_rings(obj, ring);
if (ret)
@@ -877,47 +897,6 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
return 0;
}
-static int
-i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring,
- struct list_head *objects)
-{
- struct drm_i915_gem_object *obj;
- int flips;
-
- /* Check for any pending flips. As we only maintain a flip queue depth
- * of 1, we can simply insert a WAIT for the next display flip prior
- * to executing the batch and avoid stalling the CPU.
- */
- flips = 0;
- list_for_each_entry(obj, objects, exec_list) {
- if (obj->base.write_domain)
- flips |= atomic_read(&obj->pending_flip);
- }
- if (flips) {
- int plane, flip_mask, ret;
-
- for (plane = 0; flips >> plane; plane++) {
- if (((flips >> plane) & 1) == 0)
- continue;
-
- if (plane)
- flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
- else
- flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-
- ret = intel_ring_begin(ring, 2);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
- }
- }
-
- return 0;
-}
-
static void
i915_gem_execbuffer_move_to_active(struct list_head *objects,
struct intel_ring_buffer *ring,
@@ -926,6 +905,10 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
struct drm_i915_gem_object *obj;
list_for_each_entry(obj, objects, exec_list) {
+ u32 old_read = obj->base.read_domains;
+ u32 old_write = obj->base.write_domain;
+
+
obj->base.read_domains = obj->base.pending_read_domains;
obj->base.write_domain = obj->base.pending_write_domain;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
@@ -939,9 +922,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
intel_mark_busy(ring->dev, obj);
}
- trace_i915_gem_object_change_domain(obj,
- obj->base.read_domains,
- obj->base.write_domain);
+ trace_i915_gem_object_change_domain(obj, old_read, old_write);
}
}
@@ -963,14 +944,14 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
if (INTEL_INFO(dev)->gen >= 4)
invalidate |= I915_GEM_DOMAIN_SAMPLER;
if (ring->flush(ring, invalidate, 0)) {
- i915_gem_next_request_seqno(dev, ring);
+ i915_gem_next_request_seqno(ring);
return;
}
/* Add a breadcrumb for the completion of the batch buffer */
request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL || i915_add_request(dev, file, request, ring)) {
- i915_gem_next_request_seqno(dev, ring);
+ if (request == NULL || i915_add_request(ring, file, request)) {
+ i915_gem_next_request_seqno(ring);
kfree(request);
}
}
@@ -1000,10 +981,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
return ret;
-#if WATCH_EXEC
- DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
- (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-#endif
switch (args->flags & I915_EXEC_RING_MASK) {
case I915_EXEC_DEFAULT:
case I915_EXEC_RENDER:
@@ -1113,7 +1090,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle));
- if (obj == NULL) {
+ if (&obj->base == NULL) {
DRM_ERROR("Invalid object handle %d at index %d\n",
exec[i].handle, i);
/* prevent error path from reading uninitialized data */
@@ -1170,11 +1147,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
goto err;
- ret = i915_gem_execbuffer_wait_for_flips(ring, &objects);
- if (ret)
- goto err;
-
- seqno = i915_gem_next_request_seqno(dev, ring);
+ seqno = i915_gem_next_request_seqno(ring);
for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
if (seqno < ring->sync_seqno[i]) {
/* The GPU can not handle its semaphore value wrapping,
@@ -1189,6 +1162,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
}
+ trace_i915_gem_ring_dispatch(ring, seqno);
+
exec_start = batch_obj->gtt_offset + args->batch_start_offset;
exec_len = args->batch_len;
if (cliprects) {
@@ -1245,11 +1220,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
int ret, i;
-#if WATCH_EXEC
- DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
- (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-#endif
-
if (args->buffer_count < 1) {
DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
@@ -1330,17 +1300,16 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
int ret;
-#if WATCH_EXEC
- DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
- (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-#endif
-
if (args->buffer_count < 1) {
DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
return -EINVAL;
}
- exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+ exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
+ GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ if (exec2_list == NULL)
+ exec2_list = drm_malloc_ab(sizeof(*exec2_list),
+ args->buffer_count);
if (exec2_list == NULL) {
DRM_ERROR("Failed to allocate exec list for %d buffers\n",
args->buffer_count);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 22a32b9932c5..281ad3d6115d 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -284,14 +284,10 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
struct drm_i915_gem_set_tiling *args = data;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
- int ret;
-
- ret = i915_gem_check_is_wedged(dev);
- if (ret)
- return ret;
+ int ret = 0;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (obj == NULL)
+ if (&obj->base == NULL)
return -ENOENT;
if (!i915_tiling_ok(dev,
@@ -349,14 +345,27 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
(obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
i915_gem_object_fence_ok(obj, args->tiling_mode));
- obj->tiling_changed = true;
- obj->tiling_mode = args->tiling_mode;
- obj->stride = args->stride;
+ /* Rebind if we need a change of alignment */
+ if (!obj->map_and_fenceable) {
+ u32 unfenced_alignment =
+ i915_gem_get_unfenced_gtt_alignment(obj);
+ if (obj->gtt_offset & (unfenced_alignment - 1))
+ ret = i915_gem_object_unbind(obj);
+ }
+
+ if (ret == 0) {
+ obj->tiling_changed = true;
+ obj->tiling_mode = args->tiling_mode;
+ obj->stride = args->stride;
+ }
}
+ /* we have to maintain this existing ABI... */
+ args->stride = obj->stride;
+ args->tiling_mode = obj->tiling_mode;
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
- return 0;
+ return ret;
}
/**
@@ -371,7 +380,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (obj == NULL)
+ if (&obj->base == NULL)
return -ENOENT;
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 97f946dcc1aa..188b497e5076 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -85,21 +85,11 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
}
}
-static inline u32
-i915_pipestat(int pipe)
-{
- if (pipe == 0)
- return PIPEASTAT;
- if (pipe == 1)
- return PIPEBSTAT;
- BUG();
-}
-
void
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
{
if ((dev_priv->pipestat[pipe] & mask) != mask) {
- u32 reg = i915_pipestat(pipe);
+ u32 reg = PIPESTAT(pipe);
dev_priv->pipestat[pipe] |= mask;
/* Enable the interrupt, clear any pending status */
@@ -112,7 +102,7 @@ void
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
{
if ((dev_priv->pipestat[pipe] & mask) != 0) {
- u32 reg = i915_pipestat(pipe);
+ u32 reg = PIPESTAT(pipe);
dev_priv->pipestat[pipe] &= ~mask;
I915_WRITE(reg, dev_priv->pipestat[pipe]);
@@ -171,12 +161,12 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
if (!i915_pipe_enabled(dev, pipe)) {
DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
- "pipe %d\n", pipe);
+ "pipe %c\n", pipe_name(pipe));
return 0;
}
- high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
- low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
+ high_frame = PIPEFRAME(pipe);
+ low_frame = PIPEFRAMEPIXEL(pipe);
/*
* High & low register fields aren't synchronized, so make sure
@@ -197,11 +187,11 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45;
+ int reg = PIPE_FRMCOUNT_GM45(pipe);
if (!i915_pipe_enabled(dev, pipe)) {
DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
- "pipe %d\n", pipe);
+ "pipe %c\n", pipe_name(pipe));
return 0;
}
@@ -219,7 +209,7 @@ int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
if (!i915_pipe_enabled(dev, pipe)) {
DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
- "pipe %d\n", pipe);
+ "pipe %c\n", pipe_name(pipe));
return 0;
}
@@ -316,6 +306,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
+ DRM_DEBUG_KMS("running encoder hotplug functions\n");
+
list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
if (encoder->hot_plug)
encoder->hot_plug(encoder);
@@ -365,7 +357,7 @@ static void notify_ring(struct drm_device *dev,
return;
seqno = ring->get_seqno(ring);
- trace_i915_gem_request_complete(dev, seqno);
+ trace_i915_gem_request_complete(ring, seqno);
ring->irq_seqno = seqno;
wake_up_all(&ring->irq_queue);
@@ -417,6 +409,7 @@ static void pch_irq_handler(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 pch_iir;
+ int pipe;
pch_iir = I915_READ(SDEIIR);
@@ -437,13 +430,11 @@ static void pch_irq_handler(struct drm_device *dev)
if (pch_iir & SDE_POISON)
DRM_ERROR("PCH poison interrupt\n");
- if (pch_iir & SDE_FDI_MASK) {
- u32 fdia, fdib;
-
- fdia = I915_READ(FDI_RXA_IIR);
- fdib = I915_READ(FDI_RXB_IIR);
- DRM_DEBUG_DRIVER("PCH FDI RX interrupt; FDI RXA IIR: 0x%08x, FDI RXB IIR: 0x%08x\n", fdia, fdib);
- }
+ if (pch_iir & SDE_FDI_MASK)
+ for_each_pipe(pipe)
+ DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
+ pipe_name(pipe),
+ I915_READ(FDI_RX_IIR(pipe)));
if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
@@ -648,9 +639,14 @@ static void
i915_error_state_free(struct drm_device *dev,
struct drm_i915_error_state *error)
{
- i915_error_object_free(error->batchbuffer[0]);
- i915_error_object_free(error->batchbuffer[1]);
- i915_error_object_free(error->ringbuffer);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++)
+ i915_error_object_free(error->batchbuffer[i]);
+
+ for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++)
+ i915_error_object_free(error->ringbuffer[i]);
+
kfree(error->active_bo);
kfree(error->overlay);
kfree(error);
@@ -765,7 +761,7 @@ static void i915_capture_error_state(struct drm_device *dev)
struct drm_i915_gem_object *obj;
struct drm_i915_error_state *error;
unsigned long flags;
- int i;
+ int i, pipe;
spin_lock_irqsave(&dev_priv->error_lock, flags);
error = dev_priv->first_error;
@@ -773,19 +769,21 @@ static void i915_capture_error_state(struct drm_device *dev)
if (error)
return;
+ /* Account for pipe specific data like PIPE*STAT */
error = kmalloc(sizeof(*error), GFP_ATOMIC);
if (!error) {
DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
return;
}
- DRM_DEBUG_DRIVER("generating error event\n");
+ DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
+ dev->primary->index);
error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]);
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
- error->pipeastat = I915_READ(PIPEASTAT);
- error->pipebstat = I915_READ(PIPEBSTAT);
+ for_each_pipe(pipe)
+ error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
error->instpm = I915_READ(INSTPM);
error->error = 0;
if (INTEL_INFO(dev)->gen >= 6) {
@@ -824,15 +822,16 @@ static void i915_capture_error_state(struct drm_device *dev)
}
i915_gem_record_fences(dev, error);
- /* Record the active batchbuffers */
- for (i = 0; i < I915_NUM_RINGS; i++)
+ /* Record the active batch and ring buffers */
+ for (i = 0; i < I915_NUM_RINGS; i++) {
error->batchbuffer[i] =
i915_error_first_batchbuffer(dev_priv,
&dev_priv->ring[i]);
- /* Record the ringbuffer */
- error->ringbuffer = i915_error_object_create(dev_priv,
- dev_priv->ring[RCS].obj);
+ error->ringbuffer[i] =
+ i915_error_object_create(dev_priv,
+ dev_priv->ring[i].obj);
+ }
/* Record buffers on the active and pinned lists. */
error->active_bo = NULL;
@@ -905,6 +904,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 eir = I915_READ(EIR);
+ int pipe;
if (!eir)
return;
@@ -953,14 +953,10 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
}
if (eir & I915_ERROR_MEMORY_REFRESH) {
- u32 pipea_stats = I915_READ(PIPEASTAT);
- u32 pipeb_stats = I915_READ(PIPEBSTAT);
-
- printk(KERN_ERR "memory refresh error\n");
- printk(KERN_ERR "PIPEASTAT: 0x%08x\n",
- pipea_stats);
- printk(KERN_ERR "PIPEBSTAT: 0x%08x\n",
- pipeb_stats);
+ printk(KERN_ERR "memory refresh error:\n");
+ for_each_pipe(pipe)
+ printk(KERN_ERR "pipe %c stat: 0x%08x\n",
+ pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
/* pipestat has already been acked */
}
if (eir & I915_ERROR_INSTRUCTION) {
@@ -1074,10 +1070,10 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
obj = work->pending_flip_obj;
if (INTEL_INFO(dev)->gen >= 4) {
- int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
+ int dspsurf = DSPSURF(intel_crtc->plane);
stall_detected = I915_READ(dspsurf) == obj->gtt_offset;
} else {
- int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR;
+ int dspaddr = DSPADDR(intel_crtc->plane);
stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
crtc->y * crtc->fb->pitch +
crtc->x * crtc->fb->bits_per_pixel/8);
@@ -1097,12 +1093,13 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_master_private *master_priv;
u32 iir, new_iir;
- u32 pipea_stats, pipeb_stats;
+ u32 pipe_stats[I915_MAX_PIPES];
u32 vblank_status;
int vblank = 0;
unsigned long irqflags;
int irq_received;
- int ret = IRQ_NONE;
+ int ret = IRQ_NONE, pipe;
+ bool blc_event = false;
atomic_inc(&dev_priv->irq_received);
@@ -1125,27 +1122,23 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
* interrupts (for non-MSI).
*/
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- pipea_stats = I915_READ(PIPEASTAT);
- pipeb_stats = I915_READ(PIPEBSTAT);
-
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
i915_handle_error(dev, false);
- /*
- * Clear the PIPE(A|B)STAT regs before the IIR
- */
- if (pipea_stats & 0x8000ffff) {
- if (pipea_stats & PIPE_FIFO_UNDERRUN_STATUS)
- DRM_DEBUG_DRIVER("pipe a underrun\n");
- I915_WRITE(PIPEASTAT, pipea_stats);
- irq_received = 1;
- }
-
- if (pipeb_stats & 0x8000ffff) {
- if (pipeb_stats & PIPE_FIFO_UNDERRUN_STATUS)
- DRM_DEBUG_DRIVER("pipe b underrun\n");
- I915_WRITE(PIPEBSTAT, pipeb_stats);
- irq_received = 1;
+ for_each_pipe(pipe) {
+ int reg = PIPESTAT(pipe);
+ pipe_stats[pipe] = I915_READ(reg);
+
+ /*
+ * Clear the PIPE*STAT regs before the IIR
+ */
+ if (pipe_stats[pipe] & 0x8000ffff) {
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ DRM_DEBUG_DRIVER("pipe %c underrun\n",
+ pipe_name(pipe));
+ I915_WRITE(reg, pipe_stats[pipe]);
+ irq_received = 1;
+ }
}
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@@ -1196,27 +1189,22 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
intel_finish_page_flip_plane(dev, 1);
}
- if (pipea_stats & vblank_status &&
- drm_handle_vblank(dev, 0)) {
- vblank++;
- if (!dev_priv->flip_pending_is_done) {
- i915_pageflip_stall_check(dev, 0);
- intel_finish_page_flip(dev, 0);
+ for_each_pipe(pipe) {
+ if (pipe_stats[pipe] & vblank_status &&
+ drm_handle_vblank(dev, pipe)) {
+ vblank++;
+ if (!dev_priv->flip_pending_is_done) {
+ i915_pageflip_stall_check(dev, pipe);
+ intel_finish_page_flip(dev, pipe);
+ }
}
- }
- if (pipeb_stats & vblank_status &&
- drm_handle_vblank(dev, 1)) {
- vblank++;
- if (!dev_priv->flip_pending_is_done) {
- i915_pageflip_stall_check(dev, 1);
- intel_finish_page_flip(dev, 1);
- }
+ if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
+ blc_event = true;
}
- if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
- (pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
- (iir & I915_ASLE_INTERRUPT))
+
+ if (blc_event || (iir & I915_ASLE_INTERRUPT))
intel_opregion_asle_intr(dev);
/* With MSI, interrupts are only generated when iir
@@ -1266,16 +1254,6 @@ static int i915_emit_irq(struct drm_device * dev)
return dev_priv->counter;
}
-void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- struct intel_ring_buffer *ring = LP_RING(dev_priv);
-
- if (dev_priv->trace_irq_seqno == 0 &&
- ring->irq_get(ring))
- dev_priv->trace_irq_seqno = seqno;
-}
-
static int i915_wait_irq(struct drm_device * dev, int irq_nr)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -1375,7 +1353,12 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
else
i915_enable_pipestat(dev_priv, pipe,
PIPE_VBLANK_INTERRUPT_ENABLE);
+
+ /* maintain vblank delivery even in deep C-states */
+ if (dev_priv->info->gen == 3)
+ I915_WRITE(INSTPM, INSTPM_AGPBUSY_DIS << 16);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
return 0;
}
@@ -1388,6 +1371,10 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ if (dev_priv->info->gen == 3)
+ I915_WRITE(INSTPM,
+ INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS);
+
if (HAS_PCH_SPLIT(dev))
ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
@@ -1398,16 +1385,6 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-void i915_enable_interrupt (struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!HAS_PCH_SPLIT(dev))
- intel_opregion_enable_asle(dev);
- dev_priv->irq_enabled = 1;
-}
-
-
/* Set the vblank monitor pipe
*/
int i915_vblank_pipe_set(struct drm_device *dev, void *data,
@@ -1644,14 +1621,16 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
POSTING_READ(GTIER);
if (HAS_PCH_CPT(dev)) {
- hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT |
- SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT ;
+ hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
+ SDE_PORTB_HOTPLUG_CPT |
+ SDE_PORTC_HOTPLUG_CPT |
+ SDE_PORTD_HOTPLUG_CPT);
} else {
- hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
- SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
- hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK;
- I915_WRITE(FDI_RXA_IMR, 0);
- I915_WRITE(FDI_RXB_IMR, 0);
+ hotplug_mask = (SDE_CRT_HOTPLUG |
+ SDE_PORTB_HOTPLUG |
+ SDE_PORTC_HOTPLUG |
+ SDE_PORTD_HOTPLUG |
+ SDE_AUX_MASK);
}
dev_priv->pch_irq_mask = ~hotplug_mask;
@@ -1674,6 +1653,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
void i915_driver_irq_preinstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
atomic_set(&dev_priv->irq_received, 0);
@@ -1691,8 +1671,8 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
}
I915_WRITE(HWSTAM, 0xeffe);
- I915_WRITE(PIPEASTAT, 0);
- I915_WRITE(PIPEBSTAT, 0);
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0);
I915_WRITE(IMR, 0xffffffff);
I915_WRITE(IER, 0x0);
POSTING_READ(IER);
@@ -1804,6 +1784,7 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
void i915_driver_irq_uninstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
if (!dev_priv)
return;
@@ -1821,12 +1802,13 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
}
I915_WRITE(HWSTAM, 0xffffffff);
- I915_WRITE(PIPEASTAT, 0);
- I915_WRITE(PIPEBSTAT, 0);
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0);
I915_WRITE(IMR, 0xffffffff);
I915_WRITE(IER, 0x0);
- I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
- I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe),
+ I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
I915_WRITE(IIR, I915_READ(IIR));
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 15d94c63918c..f39ac3a0fa93 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -405,9 +405,12 @@
#define I915_ERROR_INSTRUCTION (1<<0)
#define INSTPM 0x020c0
#define INSTPM_SELF_EN (1<<12) /* 915GM only */
+#define INSTPM_AGPBUSY_DIS (1<<11) /* gen3: when disabled, pending interrupts
+ will not assert AGPBUSY# and will only
+ be delivered when out of C3. */
#define ACTHD 0x020c8
#define FW_BLC 0x020d8
-#define FW_BLC2 0x020dc
+#define FW_BLC2 0x020dc
#define FW_BLC_SELF 0x020e0 /* 915+ only */
#define FW_BLC_SELF_EN_MASK (1<<31)
#define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */
@@ -706,9 +709,9 @@
#define VGA1_PD_P1_DIV_2 (1 << 13)
#define VGA1_PD_P1_SHIFT 8
#define VGA1_PD_P1_MASK (0x1f << 8)
-#define DPLL_A 0x06014
-#define DPLL_B 0x06018
-#define DPLL(pipe) _PIPE(pipe, DPLL_A, DPLL_B)
+#define _DPLL_A 0x06014
+#define _DPLL_B 0x06018
+#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
#define DPLL_VCO_ENABLE (1 << 31)
#define DPLL_DVO_HIGH_SPEED (1 << 30)
#define DPLL_SYNCLOCK_ENABLE (1 << 29)
@@ -779,7 +782,7 @@
#define SDVO_MULTIPLIER_MASK 0x000000ff
#define SDVO_MULTIPLIER_SHIFT_HIRES 4
#define SDVO_MULTIPLIER_SHIFT_VGA 0
-#define DPLL_A_MD 0x0601c /* 965+ only */
+#define _DPLL_A_MD 0x0601c /* 965+ only */
/*
* UDI pixel divider, controlling how many pixels are stuffed into a packet.
*
@@ -816,14 +819,14 @@
*/
#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
-#define DPLL_B_MD 0x06020 /* 965+ only */
-#define DPLL_MD(pipe) _PIPE(pipe, DPLL_A_MD, DPLL_B_MD)
-#define FPA0 0x06040
-#define FPA1 0x06044
-#define FPB0 0x06048
-#define FPB1 0x0604c
-#define FP0(pipe) _PIPE(pipe, FPA0, FPB0)
-#define FP1(pipe) _PIPE(pipe, FPA1, FPB1)
+#define _DPLL_B_MD 0x06020 /* 965+ only */
+#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD)
+#define _FPA0 0x06040
+#define _FPA1 0x06044
+#define _FPB0 0x06048
+#define _FPB1 0x0604c
+#define FP0(pipe) _PIPE(pipe, _FPA0, _FPB0)
+#define FP1(pipe) _PIPE(pipe, _FPA1, _FPB1)
#define FP_N_DIV_MASK 0x003f0000
#define FP_N_PINEVIEW_DIV_MASK 0x00ff0000
#define FP_N_DIV_SHIFT 16
@@ -962,8 +965,9 @@
* Palette regs
*/
-#define PALETTE_A 0x0a000
-#define PALETTE_B 0x0a800
+#define _PALETTE_A 0x0a000
+#define _PALETTE_B 0x0a800
+#define PALETTE(pipe) _PIPE(pipe, _PALETTE_A, _PALETTE_B)
/* MCH MMIO space */
@@ -1267,32 +1271,32 @@
*/
/* Pipe A timing regs */
-#define HTOTAL_A 0x60000
-#define HBLANK_A 0x60004
-#define HSYNC_A 0x60008
-#define VTOTAL_A 0x6000c
-#define VBLANK_A 0x60010
-#define VSYNC_A 0x60014
-#define PIPEASRC 0x6001c
-#define BCLRPAT_A 0x60020
+#define _HTOTAL_A 0x60000
+#define _HBLANK_A 0x60004
+#define _HSYNC_A 0x60008
+#define _VTOTAL_A 0x6000c
+#define _VBLANK_A 0x60010
+#define _VSYNC_A 0x60014
+#define _PIPEASRC 0x6001c
+#define _BCLRPAT_A 0x60020
/* Pipe B timing regs */
-#define HTOTAL_B 0x61000
-#define HBLANK_B 0x61004
-#define HSYNC_B 0x61008
-#define VTOTAL_B 0x6100c
-#define VBLANK_B 0x61010
-#define VSYNC_B 0x61014
-#define PIPEBSRC 0x6101c
-#define BCLRPAT_B 0x61020
-
-#define HTOTAL(pipe) _PIPE(pipe, HTOTAL_A, HTOTAL_B)
-#define HBLANK(pipe) _PIPE(pipe, HBLANK_A, HBLANK_B)
-#define HSYNC(pipe) _PIPE(pipe, HSYNC_A, HSYNC_B)
-#define VTOTAL(pipe) _PIPE(pipe, VTOTAL_A, VTOTAL_B)
-#define VBLANK(pipe) _PIPE(pipe, VBLANK_A, VBLANK_B)
-#define VSYNC(pipe) _PIPE(pipe, VSYNC_A, VSYNC_B)
-#define BCLRPAT(pipe) _PIPE(pipe, BCLRPAT_A, BCLRPAT_B)
+#define _HTOTAL_B 0x61000
+#define _HBLANK_B 0x61004
+#define _HSYNC_B 0x61008
+#define _VTOTAL_B 0x6100c
+#define _VBLANK_B 0x61010
+#define _VSYNC_B 0x61014
+#define _PIPEBSRC 0x6101c
+#define _BCLRPAT_B 0x61020
+
+#define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B)
+#define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B)
+#define HSYNC(pipe) _PIPE(pipe, _HSYNC_A, _HSYNC_B)
+#define VTOTAL(pipe) _PIPE(pipe, _VTOTAL_A, _VTOTAL_B)
+#define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B)
+#define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B)
+#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
/* VGA port control */
#define ADPA 0x61100
@@ -1386,6 +1390,7 @@
#define SDVO_ENCODING_HDMI (0x2 << 10)
/** Requird for HDMI operation */
#define SDVO_NULL_PACKETS_DURING_VSYNC (1 << 9)
+#define SDVO_COLOR_RANGE_16_235 (1 << 8)
#define SDVO_BORDER_ENABLE (1 << 7)
#define SDVO_AUDIO_ENABLE (1 << 6)
/** New with 965, default is to be set */
@@ -1441,8 +1446,13 @@
#define LVDS_PORT_EN (1 << 31)
/* Selects pipe B for LVDS data. Must be set on pre-965. */
#define LVDS_PIPEB_SELECT (1 << 30)
+#define LVDS_PIPE_MASK (1 << 30)
/* LVDS dithering flag on 965/g4x platform */
#define LVDS_ENABLE_DITHER (1 << 25)
+/* LVDS sync polarity flags. Set to invert (i.e. negative) */
+#define LVDS_VSYNC_POLARITY (1 << 21)
+#define LVDS_HSYNC_POLARITY (1 << 20)
+
/* Enable border for unscaled (or aspect-scaled) display */
#define LVDS_BORDER_ENABLE (1 << 15)
/*
@@ -1476,6 +1486,9 @@
#define LVDS_B0B3_POWER_DOWN (0 << 2)
#define LVDS_B0B3_POWER_UP (3 << 2)
+#define LVDS_PIPE_ENABLED(V, P) \
+ (((V) & (LVDS_PIPE_MASK | LVDS_PORT_EN)) == ((P) << 30 | LVDS_PORT_EN))
+
/* Video Data Island Packet control */
#define VIDEO_DIP_DATA 0x61178
#define VIDEO_DIP_CTL 0x61170
@@ -2064,6 +2077,10 @@
#define DP_PORT_EN (1 << 31)
#define DP_PIPEB_SELECT (1 << 30)
+#define DP_PIPE_MASK (1 << 30)
+
+#define DP_PIPE_ENABLED(V, P) \
+ (((V) & (DP_PIPE_MASK | DP_PORT_EN)) == ((P) << 30 | DP_PORT_EN))
/* Link training mode - select a suitable mode for each stage */
#define DP_LINK_TRAIN_PAT_1 (0 << 28)
@@ -2206,8 +2223,8 @@
* which is after the LUTs, so we want the bytes for our color format.
* For our current usage, this is always 3, one byte for R, G and B.
*/
-#define PIPEA_GMCH_DATA_M 0x70050
-#define PIPEB_GMCH_DATA_M 0x71050
+#define _PIPEA_GMCH_DATA_M 0x70050
+#define _PIPEB_GMCH_DATA_M 0x71050
/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
#define PIPE_GMCH_DATA_M_TU_SIZE_MASK (0x3f << 25)
@@ -2215,8 +2232,8 @@
#define PIPE_GMCH_DATA_M_MASK (0xffffff)
-#define PIPEA_GMCH_DATA_N 0x70054
-#define PIPEB_GMCH_DATA_N 0x71054
+#define _PIPEA_GMCH_DATA_N 0x70054
+#define _PIPEB_GMCH_DATA_N 0x71054
#define PIPE_GMCH_DATA_N_MASK (0xffffff)
/*
@@ -2230,20 +2247,25 @@
* Attributes and VB-ID.
*/
-#define PIPEA_DP_LINK_M 0x70060
-#define PIPEB_DP_LINK_M 0x71060
+#define _PIPEA_DP_LINK_M 0x70060
+#define _PIPEB_DP_LINK_M 0x71060
#define PIPEA_DP_LINK_M_MASK (0xffffff)
-#define PIPEA_DP_LINK_N 0x70064
-#define PIPEB_DP_LINK_N 0x71064
+#define _PIPEA_DP_LINK_N 0x70064
+#define _PIPEB_DP_LINK_N 0x71064
#define PIPEA_DP_LINK_N_MASK (0xffffff)
+#define PIPE_GMCH_DATA_M(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_M, _PIPEB_GMCH_DATA_M)
+#define PIPE_GMCH_DATA_N(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_N, _PIPEB_GMCH_DATA_N)
+#define PIPE_DP_LINK_M(pipe) _PIPE(pipe, _PIPEA_DP_LINK_M, _PIPEB_DP_LINK_M)
+#define PIPE_DP_LINK_N(pipe) _PIPE(pipe, _PIPEA_DP_LINK_N, _PIPEB_DP_LINK_N)
+
/* Display & cursor control */
/* Pipe A */
-#define PIPEADSL 0x70000
+#define _PIPEADSL 0x70000
#define DSL_LINEMASK 0x00000fff
-#define PIPEACONF 0x70008
+#define _PIPEACONF 0x70008
#define PIPECONF_ENABLE (1<<31)
#define PIPECONF_DISABLE 0
#define PIPECONF_DOUBLE_WIDE (1<<30)
@@ -2269,7 +2291,7 @@
#define PIPECONF_DITHER_TYPE_ST1 (1<<2)
#define PIPECONF_DITHER_TYPE_ST2 (2<<2)
#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
-#define PIPEASTAT 0x70024
+#define _PIPEASTAT 0x70024
#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
#define PIPE_CRC_DONE_ENABLE (1UL<<28)
@@ -2305,10 +2327,12 @@
#define PIPE_6BPC (2 << 5)
#define PIPE_12BPC (3 << 5)
-#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC)
-#define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF)
-#define PIPEDSL(pipe) _PIPE(pipe, PIPEADSL, PIPEBDSL)
-#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, PIPEAFRAMEPIXEL, PIPEBFRAMEPIXEL)
+#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC)
+#define PIPECONF(pipe) _PIPE(pipe, _PIPEACONF, _PIPEBCONF)
+#define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL)
+#define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH)
+#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
+#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
#define DSPARB 0x70030
#define DSPARB_CSTART_MASK (0x7f << 7)
@@ -2470,20 +2494,21 @@
* } while (high1 != high2);
* frame = (high1 << 8) | low1;
*/
-#define PIPEAFRAMEHIGH 0x70040
+#define _PIPEAFRAMEHIGH 0x70040
#define PIPE_FRAME_HIGH_MASK 0x0000ffff
#define PIPE_FRAME_HIGH_SHIFT 0
-#define PIPEAFRAMEPIXEL 0x70044
+#define _PIPEAFRAMEPIXEL 0x70044
#define PIPE_FRAME_LOW_MASK 0xff000000
#define PIPE_FRAME_LOW_SHIFT 24
#define PIPE_PIXEL_MASK 0x00ffffff
#define PIPE_PIXEL_SHIFT 0
/* GM45+ just has to be different */
-#define PIPEA_FRMCOUNT_GM45 0x70040
-#define PIPEA_FLIPCOUNT_GM45 0x70044
+#define _PIPEA_FRMCOUNT_GM45 0x70040
+#define _PIPEA_FLIPCOUNT_GM45 0x70044
+#define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45)
/* Cursor A & B regs */
-#define CURACNTR 0x70080
+#define _CURACNTR 0x70080
/* Old style CUR*CNTR flags (desktop 8xx) */
#define CURSOR_ENABLE 0x80000000
#define CURSOR_GAMMA_ENABLE 0x40000000
@@ -2504,23 +2529,23 @@
#define MCURSOR_PIPE_A 0x00
#define MCURSOR_PIPE_B (1 << 28)
#define MCURSOR_GAMMA_ENABLE (1 << 26)
-#define CURABASE 0x70084
-#define CURAPOS 0x70088
+#define _CURABASE 0x70084
+#define _CURAPOS 0x70088
#define CURSOR_POS_MASK 0x007FF
#define CURSOR_POS_SIGN 0x8000
#define CURSOR_X_SHIFT 0
#define CURSOR_Y_SHIFT 16
#define CURSIZE 0x700a0
-#define CURBCNTR 0x700c0
-#define CURBBASE 0x700c4
-#define CURBPOS 0x700c8
+#define _CURBCNTR 0x700c0
+#define _CURBBASE 0x700c4
+#define _CURBPOS 0x700c8
-#define CURCNTR(pipe) _PIPE(pipe, CURACNTR, CURBCNTR)
-#define CURBASE(pipe) _PIPE(pipe, CURABASE, CURBBASE)
-#define CURPOS(pipe) _PIPE(pipe, CURAPOS, CURBPOS)
+#define CURCNTR(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR)
+#define CURBASE(pipe) _PIPE(pipe, _CURABASE, _CURBBASE)
+#define CURPOS(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS)
/* Display A control */
-#define DSPACNTR 0x70180
+#define _DSPACNTR 0x70180
#define DISPLAY_PLANE_ENABLE (1<<31)
#define DISPLAY_PLANE_DISABLE 0
#define DISPPLANE_GAMMA_ENABLE (1<<30)
@@ -2534,9 +2559,10 @@
#define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26)
#define DISPPLANE_STEREO_ENABLE (1<<25)
#define DISPPLANE_STEREO_DISABLE 0
-#define DISPPLANE_SEL_PIPE_MASK (1<<24)
+#define DISPPLANE_SEL_PIPE_SHIFT 24
+#define DISPPLANE_SEL_PIPE_MASK (3<<DISPPLANE_SEL_PIPE_SHIFT)
#define DISPPLANE_SEL_PIPE_A 0
-#define DISPPLANE_SEL_PIPE_B (1<<24)
+#define DISPPLANE_SEL_PIPE_B (1<<DISPPLANE_SEL_PIPE_SHIFT)
#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
#define DISPPLANE_SRC_KEY_DISABLE 0
#define DISPPLANE_LINE_DOUBLE (1<<20)
@@ -2545,20 +2571,20 @@
#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
#define DISPPLANE_TILED (1<<10)
-#define DSPAADDR 0x70184
-#define DSPASTRIDE 0x70188
-#define DSPAPOS 0x7018C /* reserved */
-#define DSPASIZE 0x70190
-#define DSPASURF 0x7019C /* 965+ only */
-#define DSPATILEOFF 0x701A4 /* 965+ only */
-
-#define DSPCNTR(plane) _PIPE(plane, DSPACNTR, DSPBCNTR)
-#define DSPADDR(plane) _PIPE(plane, DSPAADDR, DSPBADDR)
-#define DSPSTRIDE(plane) _PIPE(plane, DSPASTRIDE, DSPBSTRIDE)
-#define DSPPOS(plane) _PIPE(plane, DSPAPOS, DSPBPOS)
-#define DSPSIZE(plane) _PIPE(plane, DSPASIZE, DSPBSIZE)
-#define DSPSURF(plane) _PIPE(plane, DSPASURF, DSPBSURF)
-#define DSPTILEOFF(plane) _PIPE(plane, DSPATILEOFF, DSPBTILEOFF)
+#define _DSPAADDR 0x70184
+#define _DSPASTRIDE 0x70188
+#define _DSPAPOS 0x7018C /* reserved */
+#define _DSPASIZE 0x70190
+#define _DSPASURF 0x7019C /* 965+ only */
+#define _DSPATILEOFF 0x701A4 /* 965+ only */
+
+#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR)
+#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR)
+#define DSPSTRIDE(plane) _PIPE(plane, _DSPASTRIDE, _DSPBSTRIDE)
+#define DSPPOS(plane) _PIPE(plane, _DSPAPOS, _DSPBPOS)
+#define DSPSIZE(plane) _PIPE(plane, _DSPASIZE, _DSPBSIZE)
+#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
+#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
/* VBIOS flags */
#define SWF00 0x71410
@@ -2576,27 +2602,27 @@
#define SWF32 0x7241c
/* Pipe B */
-#define PIPEBDSL 0x71000
-#define PIPEBCONF 0x71008
-#define PIPEBSTAT 0x71024
-#define PIPEBFRAMEHIGH 0x71040
-#define PIPEBFRAMEPIXEL 0x71044
-#define PIPEB_FRMCOUNT_GM45 0x71040
-#define PIPEB_FLIPCOUNT_GM45 0x71044
+#define _PIPEBDSL 0x71000
+#define _PIPEBCONF 0x71008
+#define _PIPEBSTAT 0x71024
+#define _PIPEBFRAMEHIGH 0x71040
+#define _PIPEBFRAMEPIXEL 0x71044
+#define _PIPEB_FRMCOUNT_GM45 0x71040
+#define _PIPEB_FLIPCOUNT_GM45 0x71044
/* Display B control */
-#define DSPBCNTR 0x71180
+#define _DSPBCNTR 0x71180
#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
#define DISPPLANE_ALPHA_TRANS_DISABLE 0
#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0
#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
-#define DSPBADDR 0x71184
-#define DSPBSTRIDE 0x71188
-#define DSPBPOS 0x7118C
-#define DSPBSIZE 0x71190
-#define DSPBSURF 0x7119C
-#define DSPBTILEOFF 0x711A4
+#define _DSPBADDR 0x71184
+#define _DSPBSTRIDE 0x71188
+#define _DSPBPOS 0x7118C
+#define _DSPBSIZE 0x71190
+#define _DSPBSURF 0x7119C
+#define _DSPBTILEOFF 0x711A4
/* VBIOS regs */
#define VGACNTRL 0x71400
@@ -2650,68 +2676,80 @@
#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff
-#define PIPEA_DATA_M1 0x60030
+#define _PIPEA_DATA_M1 0x60030
#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
#define TU_SIZE_MASK 0x7e000000
#define PIPE_DATA_M1_OFFSET 0
-#define PIPEA_DATA_N1 0x60034
+#define _PIPEA_DATA_N1 0x60034
#define PIPE_DATA_N1_OFFSET 0
-#define PIPEA_DATA_M2 0x60038
+#define _PIPEA_DATA_M2 0x60038
#define PIPE_DATA_M2_OFFSET 0
-#define PIPEA_DATA_N2 0x6003c
+#define _PIPEA_DATA_N2 0x6003c
#define PIPE_DATA_N2_OFFSET 0
-#define PIPEA_LINK_M1 0x60040
+#define _PIPEA_LINK_M1 0x60040
#define PIPE_LINK_M1_OFFSET 0
-#define PIPEA_LINK_N1 0x60044
+#define _PIPEA_LINK_N1 0x60044
#define PIPE_LINK_N1_OFFSET 0
-#define PIPEA_LINK_M2 0x60048
+#define _PIPEA_LINK_M2 0x60048
#define PIPE_LINK_M2_OFFSET 0
-#define PIPEA_LINK_N2 0x6004c
+#define _PIPEA_LINK_N2 0x6004c
#define PIPE_LINK_N2_OFFSET 0
/* PIPEB timing regs are same start from 0x61000 */
-#define PIPEB_DATA_M1 0x61030
-#define PIPEB_DATA_N1 0x61034
+#define _PIPEB_DATA_M1 0x61030
+#define _PIPEB_DATA_N1 0x61034
-#define PIPEB_DATA_M2 0x61038
-#define PIPEB_DATA_N2 0x6103c
+#define _PIPEB_DATA_M2 0x61038
+#define _PIPEB_DATA_N2 0x6103c
-#define PIPEB_LINK_M1 0x61040
-#define PIPEB_LINK_N1 0x61044
+#define _PIPEB_LINK_M1 0x61040
+#define _PIPEB_LINK_N1 0x61044
-#define PIPEB_LINK_M2 0x61048
-#define PIPEB_LINK_N2 0x6104c
+#define _PIPEB_LINK_M2 0x61048
+#define _PIPEB_LINK_N2 0x6104c
-#define PIPE_DATA_M1(pipe) _PIPE(pipe, PIPEA_DATA_M1, PIPEB_DATA_M1)
-#define PIPE_DATA_N1(pipe) _PIPE(pipe, PIPEA_DATA_N1, PIPEB_DATA_N1)
-#define PIPE_DATA_M2(pipe) _PIPE(pipe, PIPEA_DATA_M2, PIPEB_DATA_M2)
-#define PIPE_DATA_N2(pipe) _PIPE(pipe, PIPEA_DATA_N2, PIPEB_DATA_N2)
-#define PIPE_LINK_M1(pipe) _PIPE(pipe, PIPEA_LINK_M1, PIPEB_LINK_M1)
-#define PIPE_LINK_N1(pipe) _PIPE(pipe, PIPEA_LINK_N1, PIPEB_LINK_N1)
-#define PIPE_LINK_M2(pipe) _PIPE(pipe, PIPEA_LINK_M2, PIPEB_LINK_M2)
-#define PIPE_LINK_N2(pipe) _PIPE(pipe, PIPEA_LINK_N2, PIPEB_LINK_N2)
+#define PIPE_DATA_M1(pipe) _PIPE(pipe, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
+#define PIPE_DATA_N1(pipe) _PIPE(pipe, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
+#define PIPE_DATA_M2(pipe) _PIPE(pipe, _PIPEA_DATA_M2, _PIPEB_DATA_M2)
+#define PIPE_DATA_N2(pipe) _PIPE(pipe, _PIPEA_DATA_N2, _PIPEB_DATA_N2)
+#define PIPE_LINK_M1(pipe) _PIPE(pipe, _PIPEA_LINK_M1, _PIPEB_LINK_M1)
+#define PIPE_LINK_N1(pipe) _PIPE(pipe, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
+#define PIPE_LINK_M2(pipe) _PIPE(pipe, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
+#define PIPE_LINK_N2(pipe) _PIPE(pipe, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
/* CPU panel fitter */
-#define PFA_CTL_1 0x68080
-#define PFB_CTL_1 0x68880
+/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
+#define _PFA_CTL_1 0x68080
+#define _PFB_CTL_1 0x68880
#define PF_ENABLE (1<<31)
#define PF_FILTER_MASK (3<<23)
#define PF_FILTER_PROGRAMMED (0<<23)
#define PF_FILTER_MED_3x3 (1<<23)
#define PF_FILTER_EDGE_ENHANCE (2<<23)
#define PF_FILTER_EDGE_SOFTEN (3<<23)
-#define PFA_WIN_SZ 0x68074
-#define PFB_WIN_SZ 0x68874
-#define PFA_WIN_POS 0x68070
-#define PFB_WIN_POS 0x68870
+#define _PFA_WIN_SZ 0x68074
+#define _PFB_WIN_SZ 0x68874
+#define _PFA_WIN_POS 0x68070
+#define _PFB_WIN_POS 0x68870
+#define _PFA_VSCALE 0x68084
+#define _PFB_VSCALE 0x68884
+#define _PFA_HSCALE 0x68090
+#define _PFB_HSCALE 0x68890
+
+#define PF_CTL(pipe) _PIPE(pipe, _PFA_CTL_1, _PFB_CTL_1)
+#define PF_WIN_SZ(pipe) _PIPE(pipe, _PFA_WIN_SZ, _PFB_WIN_SZ)
+#define PF_WIN_POS(pipe) _PIPE(pipe, _PFA_WIN_POS, _PFB_WIN_POS)
+#define PF_VSCALE(pipe) _PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE)
+#define PF_HSCALE(pipe) _PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE)
/* legacy palette */
-#define LGC_PALETTE_A 0x4a000
-#define LGC_PALETTE_B 0x4a800
+#define _LGC_PALETTE_A 0x4a000
+#define _LGC_PALETTE_B 0x4a800
+#define LGC_PALETTE(pipe) _PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B)
/* interrupts */
#define DE_MASTER_IRQ_CONTROL (1 << 31)
@@ -2877,17 +2915,17 @@
#define PCH_GMBUS4 0xc5110
#define PCH_GMBUS5 0xc5120
-#define PCH_DPLL_A 0xc6014
-#define PCH_DPLL_B 0xc6018
-#define PCH_DPLL(pipe) _PIPE(pipe, PCH_DPLL_A, PCH_DPLL_B)
+#define _PCH_DPLL_A 0xc6014
+#define _PCH_DPLL_B 0xc6018
+#define PCH_DPLL(pipe) _PIPE(pipe, _PCH_DPLL_A, _PCH_DPLL_B)
-#define PCH_FPA0 0xc6040
+#define _PCH_FPA0 0xc6040
#define FP_CB_TUNE (0x3<<22)
-#define PCH_FPA1 0xc6044
-#define PCH_FPB0 0xc6048
-#define PCH_FPB1 0xc604c
-#define PCH_FP0(pipe) _PIPE(pipe, PCH_FPA0, PCH_FPB0)
-#define PCH_FP1(pipe) _PIPE(pipe, PCH_FPA1, PCH_FPB1)
+#define _PCH_FPA1 0xc6044
+#define _PCH_FPB0 0xc6048
+#define _PCH_FPB1 0xc604c
+#define PCH_FP0(pipe) _PIPE(pipe, _PCH_FPA0, _PCH_FPB0)
+#define PCH_FP1(pipe) _PIPE(pipe, _PCH_FPA1, _PCH_FPB1)
#define PCH_DPLL_TEST 0xc606c
@@ -2906,6 +2944,7 @@
#define DREF_NONSPREAD_SOURCE_MASK (3<<9)
#define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7)
#define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7)
+#define DREF_SUPERSPREAD_SOURCE_MASK (3<<7)
#define DREF_SSC4_DOWNSPREAD (0<<6)
#define DREF_SSC4_CENTERSPREAD (1<<6)
#define DREF_SSC1_DISABLE (0<<1)
@@ -2938,60 +2977,69 @@
/* transcoder */
-#define TRANS_HTOTAL_A 0xe0000
+#define _TRANS_HTOTAL_A 0xe0000
#define TRANS_HTOTAL_SHIFT 16
#define TRANS_HACTIVE_SHIFT 0
-#define TRANS_HBLANK_A 0xe0004
+#define _TRANS_HBLANK_A 0xe0004
#define TRANS_HBLANK_END_SHIFT 16
#define TRANS_HBLANK_START_SHIFT 0
-#define TRANS_HSYNC_A 0xe0008
+#define _TRANS_HSYNC_A 0xe0008
#define TRANS_HSYNC_END_SHIFT 16
#define TRANS_HSYNC_START_SHIFT 0
-#define TRANS_VTOTAL_A 0xe000c
+#define _TRANS_VTOTAL_A 0xe000c
#define TRANS_VTOTAL_SHIFT 16
#define TRANS_VACTIVE_SHIFT 0
-#define TRANS_VBLANK_A 0xe0010
+#define _TRANS_VBLANK_A 0xe0010
#define TRANS_VBLANK_END_SHIFT 16
#define TRANS_VBLANK_START_SHIFT 0
-#define TRANS_VSYNC_A 0xe0014
+#define _TRANS_VSYNC_A 0xe0014
#define TRANS_VSYNC_END_SHIFT 16
#define TRANS_VSYNC_START_SHIFT 0
-#define TRANSA_DATA_M1 0xe0030
-#define TRANSA_DATA_N1 0xe0034
-#define TRANSA_DATA_M2 0xe0038
-#define TRANSA_DATA_N2 0xe003c
-#define TRANSA_DP_LINK_M1 0xe0040
-#define TRANSA_DP_LINK_N1 0xe0044
-#define TRANSA_DP_LINK_M2 0xe0048
-#define TRANSA_DP_LINK_N2 0xe004c
-
-#define TRANS_HTOTAL_B 0xe1000
-#define TRANS_HBLANK_B 0xe1004
-#define TRANS_HSYNC_B 0xe1008
-#define TRANS_VTOTAL_B 0xe100c
-#define TRANS_VBLANK_B 0xe1010
-#define TRANS_VSYNC_B 0xe1014
-
-#define TRANS_HTOTAL(pipe) _PIPE(pipe, TRANS_HTOTAL_A, TRANS_HTOTAL_B)
-#define TRANS_HBLANK(pipe) _PIPE(pipe, TRANS_HBLANK_A, TRANS_HBLANK_B)
-#define TRANS_HSYNC(pipe) _PIPE(pipe, TRANS_HSYNC_A, TRANS_HSYNC_B)
-#define TRANS_VTOTAL(pipe) _PIPE(pipe, TRANS_VTOTAL_A, TRANS_VTOTAL_B)
-#define TRANS_VBLANK(pipe) _PIPE(pipe, TRANS_VBLANK_A, TRANS_VBLANK_B)
-#define TRANS_VSYNC(pipe) _PIPE(pipe, TRANS_VSYNC_A, TRANS_VSYNC_B)
-
-#define TRANSB_DATA_M1 0xe1030
-#define TRANSB_DATA_N1 0xe1034
-#define TRANSB_DATA_M2 0xe1038
-#define TRANSB_DATA_N2 0xe103c
-#define TRANSB_DP_LINK_M1 0xe1040
-#define TRANSB_DP_LINK_N1 0xe1044
-#define TRANSB_DP_LINK_M2 0xe1048
-#define TRANSB_DP_LINK_N2 0xe104c
-
-#define TRANSACONF 0xf0008
-#define TRANSBCONF 0xf1008
-#define TRANSCONF(plane) _PIPE(plane, TRANSACONF, TRANSBCONF)
+#define _TRANSA_DATA_M1 0xe0030
+#define _TRANSA_DATA_N1 0xe0034
+#define _TRANSA_DATA_M2 0xe0038
+#define _TRANSA_DATA_N2 0xe003c
+#define _TRANSA_DP_LINK_M1 0xe0040
+#define _TRANSA_DP_LINK_N1 0xe0044
+#define _TRANSA_DP_LINK_M2 0xe0048
+#define _TRANSA_DP_LINK_N2 0xe004c
+
+#define _TRANS_HTOTAL_B 0xe1000
+#define _TRANS_HBLANK_B 0xe1004
+#define _TRANS_HSYNC_B 0xe1008
+#define _TRANS_VTOTAL_B 0xe100c
+#define _TRANS_VBLANK_B 0xe1010
+#define _TRANS_VSYNC_B 0xe1014
+
+#define TRANS_HTOTAL(pipe) _PIPE(pipe, _TRANS_HTOTAL_A, _TRANS_HTOTAL_B)
+#define TRANS_HBLANK(pipe) _PIPE(pipe, _TRANS_HBLANK_A, _TRANS_HBLANK_B)
+#define TRANS_HSYNC(pipe) _PIPE(pipe, _TRANS_HSYNC_A, _TRANS_HSYNC_B)
+#define TRANS_VTOTAL(pipe) _PIPE(pipe, _TRANS_VTOTAL_A, _TRANS_VTOTAL_B)
+#define TRANS_VBLANK(pipe) _PIPE(pipe, _TRANS_VBLANK_A, _TRANS_VBLANK_B)
+#define TRANS_VSYNC(pipe) _PIPE(pipe, _TRANS_VSYNC_A, _TRANS_VSYNC_B)
+
+#define _TRANSB_DATA_M1 0xe1030
+#define _TRANSB_DATA_N1 0xe1034
+#define _TRANSB_DATA_M2 0xe1038
+#define _TRANSB_DATA_N2 0xe103c
+#define _TRANSB_DP_LINK_M1 0xe1040
+#define _TRANSB_DP_LINK_N1 0xe1044
+#define _TRANSB_DP_LINK_M2 0xe1048
+#define _TRANSB_DP_LINK_N2 0xe104c
+
+#define TRANSDATA_M1(pipe) _PIPE(pipe, _TRANSA_DATA_M1, _TRANSB_DATA_M1)
+#define TRANSDATA_N1(pipe) _PIPE(pipe, _TRANSA_DATA_N1, _TRANSB_DATA_N1)
+#define TRANSDATA_M2(pipe) _PIPE(pipe, _TRANSA_DATA_M2, _TRANSB_DATA_M2)
+#define TRANSDATA_N2(pipe) _PIPE(pipe, _TRANSA_DATA_N2, _TRANSB_DATA_N2)
+#define TRANSDPLINK_M1(pipe) _PIPE(pipe, _TRANSA_DP_LINK_M1, _TRANSB_DP_LINK_M1)
+#define TRANSDPLINK_N1(pipe) _PIPE(pipe, _TRANSA_DP_LINK_N1, _TRANSB_DP_LINK_N1)
+#define TRANSDPLINK_M2(pipe) _PIPE(pipe, _TRANSA_DP_LINK_M2, _TRANSB_DP_LINK_M2)
+#define TRANSDPLINK_N2(pipe) _PIPE(pipe, _TRANSA_DP_LINK_N2, _TRANSB_DP_LINK_N2)
+
+#define _TRANSACONF 0xf0008
+#define _TRANSBCONF 0xf1008
+#define TRANSCONF(plane) _PIPE(plane, _TRANSACONF, _TRANSBCONF)
#define TRANS_DISABLE (0<<31)
#define TRANS_ENABLE (1<<31)
#define TRANS_STATE_MASK (1<<30)
@@ -3009,18 +3057,19 @@
#define TRANS_6BPC (2<<5)
#define TRANS_12BPC (3<<5)
-#define FDI_RXA_CHICKEN 0xc200c
-#define FDI_RXB_CHICKEN 0xc2010
-#define FDI_RX_PHASE_SYNC_POINTER_ENABLE (1)
-#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, FDI_RXA_CHICKEN, FDI_RXB_CHICKEN)
+#define _FDI_RXA_CHICKEN 0xc200c
+#define _FDI_RXB_CHICKEN 0xc2010
+#define FDI_RX_PHASE_SYNC_POINTER_OVR (1<<1)
+#define FDI_RX_PHASE_SYNC_POINTER_EN (1<<0)
+#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN)
#define SOUTH_DSPCLK_GATE_D 0xc2020
#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
/* CPU: FDI_TX */
-#define FDI_TXA_CTL 0x60100
-#define FDI_TXB_CTL 0x61100
-#define FDI_TX_CTL(pipe) _PIPE(pipe, FDI_TXA_CTL, FDI_TXB_CTL)
+#define _FDI_TXA_CTL 0x60100
+#define _FDI_TXB_CTL 0x61100
+#define FDI_TX_CTL(pipe) _PIPE(pipe, _FDI_TXA_CTL, _FDI_TXB_CTL)
#define FDI_TX_DISABLE (0<<31)
#define FDI_TX_ENABLE (1<<31)
#define FDI_LINK_TRAIN_PATTERN_1 (0<<28)
@@ -3060,9 +3109,9 @@
#define FDI_SCRAMBLING_DISABLE (1<<7)
/* FDI_RX, FDI_X is hard-wired to Transcoder_X */
-#define FDI_RXA_CTL 0xf000c
-#define FDI_RXB_CTL 0xf100c
-#define FDI_RX_CTL(pipe) _PIPE(pipe, FDI_RXA_CTL, FDI_RXB_CTL)
+#define _FDI_RXA_CTL 0xf000c
+#define _FDI_RXB_CTL 0xf100c
+#define FDI_RX_CTL(pipe) _PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL)
#define FDI_RX_ENABLE (1<<31)
/* train, dp width same as FDI_TX */
#define FDI_DP_PORT_WIDTH_X8 (7<<19)
@@ -3087,15 +3136,15 @@
#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8)
#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8)
-#define FDI_RXA_MISC 0xf0010
-#define FDI_RXB_MISC 0xf1010
-#define FDI_RXA_TUSIZE1 0xf0030
-#define FDI_RXA_TUSIZE2 0xf0038
-#define FDI_RXB_TUSIZE1 0xf1030
-#define FDI_RXB_TUSIZE2 0xf1038
-#define FDI_RX_MISC(pipe) _PIPE(pipe, FDI_RXA_MISC, FDI_RXB_MISC)
-#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, FDI_RXA_TUSIZE1, FDI_RXB_TUSIZE1)
-#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, FDI_RXA_TUSIZE2, FDI_RXB_TUSIZE2)
+#define _FDI_RXA_MISC 0xf0010
+#define _FDI_RXB_MISC 0xf1010
+#define _FDI_RXA_TUSIZE1 0xf0030
+#define _FDI_RXA_TUSIZE2 0xf0038
+#define _FDI_RXB_TUSIZE1 0xf1030
+#define _FDI_RXB_TUSIZE2 0xf1038
+#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
+#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1)
+#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
/* FDI_RX interrupt register format */
#define FDI_RX_INTER_LANE_ALIGN (1<<10)
@@ -3110,12 +3159,12 @@
#define FDI_RX_CROSS_CLOCK_OVERFLOW (1<<1)
#define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1<<0)
-#define FDI_RXA_IIR 0xf0014
-#define FDI_RXA_IMR 0xf0018
-#define FDI_RXB_IIR 0xf1014
-#define FDI_RXB_IMR 0xf1018
-#define FDI_RX_IIR(pipe) _PIPE(pipe, FDI_RXA_IIR, FDI_RXB_IIR)
-#define FDI_RX_IMR(pipe) _PIPE(pipe, FDI_RXA_IMR, FDI_RXB_IMR)
+#define _FDI_RXA_IIR 0xf0014
+#define _FDI_RXA_IMR 0xf0018
+#define _FDI_RXB_IIR 0xf1014
+#define _FDI_RXB_IMR 0xf1018
+#define FDI_RX_IIR(pipe) _PIPE(pipe, _FDI_RXA_IIR, _FDI_RXB_IIR)
+#define FDI_RX_IMR(pipe) _PIPE(pipe, _FDI_RXA_IMR, _FDI_RXB_IMR)
#define FDI_PLL_CTL_1 0xfe000
#define FDI_PLL_CTL_2 0xfe004
@@ -3145,11 +3194,15 @@
#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
+#define ADPA_PIPE_ENABLED(V, P) \
+ (((V) & (ADPA_TRANS_SELECT_MASK | ADPA_DAC_ENABLE)) == ((P) << 30 | ADPA_DAC_ENABLE))
+
/* or SDVOB */
#define HDMIB 0xe1140
#define PORT_ENABLE (1 << 31)
#define TRANSCODER_A (0)
#define TRANSCODER_B (1 << 30)
+#define TRANSCODER_MASK (1 << 30)
#define COLOR_FORMAT_8bpc (0)
#define COLOR_FORMAT_12bpc (3 << 26)
#define SDVOB_HOTPLUG_ENABLE (1 << 23)
@@ -3165,6 +3218,9 @@
#define HSYNC_ACTIVE_HIGH (1 << 3)
#define PORT_DETECTED (1 << 2)
+#define HDMI_PIPE_ENABLED(V, P) \
+ (((V) & (TRANSCODER_MASK | PORT_ENABLE)) == ((P) << 30 | PORT_ENABLE))
+
/* PCH SDVOB multiplex with HDMIB */
#define PCH_SDVOB HDMIB
@@ -3240,6 +3296,7 @@
#define TRANS_DP_PORT_SEL_B (0<<29)
#define TRANS_DP_PORT_SEL_C (1<<29)
#define TRANS_DP_PORT_SEL_D (2<<29)
+#define TRANS_DP_PORT_SEL_NONE (3<<29)
#define TRANS_DP_PORT_SEL_MASK (3<<29)
#define TRANS_DP_AUDIO_ONLY (1<<26)
#define TRANS_DP_ENH_FRAMING (1<<18)
@@ -3271,6 +3328,8 @@
#define FORCEWAKE 0xA18C
#define FORCEWAKE_ACK 0x130090
+#define GT_FIFO_FREE_ENTRIES 0x120008
+
#define GEN6_RPNSWREQ 0xA008
#define GEN6_TURBO_DISABLE (1<<31)
#define GEN6_FREQUENCY(x) ((x)<<25)
@@ -3288,15 +3347,28 @@
#define GEN6_RP_DOWN_TIMEOUT 0xA010
#define GEN6_RP_INTERRUPT_LIMITS 0xA014
#define GEN6_RPSTAT1 0xA01C
+#define GEN6_CAGF_SHIFT 8
+#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT)
#define GEN6_RP_CONTROL 0xA024
#define GEN6_RP_MEDIA_TURBO (1<<11)
#define GEN6_RP_USE_NORMAL_FREQ (1<<9)
#define GEN6_RP_MEDIA_IS_GFX (1<<8)
#define GEN6_RP_ENABLE (1<<7)
-#define GEN6_RP_UP_BUSY_MAX (0x2<<3)
-#define GEN6_RP_DOWN_BUSY_MIN (0x2<<0)
+#define GEN6_RP_UP_IDLE_MIN (0x1<<3)
+#define GEN6_RP_UP_BUSY_AVG (0x2<<3)
+#define GEN6_RP_UP_BUSY_CONT (0x4<<3)
+#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0)
#define GEN6_RP_UP_THRESHOLD 0xA02C
#define GEN6_RP_DOWN_THRESHOLD 0xA030
+#define GEN6_RP_CUR_UP_EI 0xA050
+#define GEN6_CURICONT_MASK 0xffffff
+#define GEN6_RP_CUR_UP 0xA054
+#define GEN6_CURBSYTAVG_MASK 0xffffff
+#define GEN6_RP_PREV_UP 0xA058
+#define GEN6_RP_CUR_DOWN_EI 0xA05C
+#define GEN6_CURIAVG_MASK 0xffffff
+#define GEN6_RP_CUR_DOWN 0xA060
+#define GEN6_RP_PREV_DOWN 0xA064
#define GEN6_RP_UP_EI 0xA068
#define GEN6_RP_DOWN_EI 0xA06C
#define GEN6_RP_IDLE_HYSTERSIS 0xA070
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 0521ecf26017..da474153a0a2 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -34,11 +34,10 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll_reg;
- if (HAS_PCH_SPLIT(dev)) {
- dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B;
- } else {
- dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B;
- }
+ if (HAS_PCH_SPLIT(dev))
+ dpll_reg = (pipe == PIPE_A) ? _PCH_DPLL_A : _PCH_DPLL_B;
+ else
+ dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE);
}
@@ -46,7 +45,7 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
+ unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
u32 *array;
int i;
@@ -54,7 +53,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
return;
if (HAS_PCH_SPLIT(dev))
- reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
+ reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
if (pipe == PIPE_A)
array = dev_priv->save_palette_a;
@@ -68,7 +67,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
+ unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
u32 *array;
int i;
@@ -76,7 +75,7 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
return;
if (HAS_PCH_SPLIT(dev))
- reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
+ reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
if (pipe == PIPE_A)
array = dev_priv->save_palette_a;
@@ -241,12 +240,12 @@ static void i915_save_modeset_reg(struct drm_device *dev)
return;
/* Cursor state */
- dev_priv->saveCURACNTR = I915_READ(CURACNTR);
- dev_priv->saveCURAPOS = I915_READ(CURAPOS);
- dev_priv->saveCURABASE = I915_READ(CURABASE);
- dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
- dev_priv->saveCURBPOS = I915_READ(CURBPOS);
- dev_priv->saveCURBBASE = I915_READ(CURBBASE);
+ dev_priv->saveCURACNTR = I915_READ(_CURACNTR);
+ dev_priv->saveCURAPOS = I915_READ(_CURAPOS);
+ dev_priv->saveCURABASE = I915_READ(_CURABASE);
+ dev_priv->saveCURBCNTR = I915_READ(_CURBCNTR);
+ dev_priv->saveCURBPOS = I915_READ(_CURBPOS);
+ dev_priv->saveCURBBASE = I915_READ(_CURBBASE);
if (IS_GEN2(dev))
dev_priv->saveCURSIZE = I915_READ(CURSIZE);
@@ -256,118 +255,118 @@ static void i915_save_modeset_reg(struct drm_device *dev)
}
/* Pipe & plane A info */
- dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
- dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
+ dev_priv->savePIPEACONF = I915_READ(_PIPEACONF);
+ dev_priv->savePIPEASRC = I915_READ(_PIPEASRC);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->saveFPA0 = I915_READ(PCH_FPA0);
- dev_priv->saveFPA1 = I915_READ(PCH_FPA1);
- dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A);
+ dev_priv->saveFPA0 = I915_READ(_PCH_FPA0);
+ dev_priv->saveFPA1 = I915_READ(_PCH_FPA1);
+ dev_priv->saveDPLL_A = I915_READ(_PCH_DPLL_A);
} else {
- dev_priv->saveFPA0 = I915_READ(FPA0);
- dev_priv->saveFPA1 = I915_READ(FPA1);
- dev_priv->saveDPLL_A = I915_READ(DPLL_A);
+ dev_priv->saveFPA0 = I915_READ(_FPA0);
+ dev_priv->saveFPA1 = I915_READ(_FPA1);
+ dev_priv->saveDPLL_A = I915_READ(_DPLL_A);
}
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
- dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
- dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
- dev_priv->saveHSYNC_A = I915_READ(HSYNC_A);
- dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
- dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
- dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
+ dev_priv->saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
+ dev_priv->saveHTOTAL_A = I915_READ(_HTOTAL_A);
+ dev_priv->saveHBLANK_A = I915_READ(_HBLANK_A);
+ dev_priv->saveHSYNC_A = I915_READ(_HSYNC_A);
+ dev_priv->saveVTOTAL_A = I915_READ(_VTOTAL_A);
+ dev_priv->saveVBLANK_A = I915_READ(_VBLANK_A);
+ dev_priv->saveVSYNC_A = I915_READ(_VSYNC_A);
if (!HAS_PCH_SPLIT(dev))
- dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
+ dev_priv->saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1);
- dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1);
- dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1);
- dev_priv->savePIPEA_LINK_N1 = I915_READ(PIPEA_LINK_N1);
-
- dev_priv->saveFDI_TXA_CTL = I915_READ(FDI_TXA_CTL);
- dev_priv->saveFDI_RXA_CTL = I915_READ(FDI_RXA_CTL);
-
- dev_priv->savePFA_CTL_1 = I915_READ(PFA_CTL_1);
- dev_priv->savePFA_WIN_SZ = I915_READ(PFA_WIN_SZ);
- dev_priv->savePFA_WIN_POS = I915_READ(PFA_WIN_POS);
-
- dev_priv->saveTRANSACONF = I915_READ(TRANSACONF);
- dev_priv->saveTRANS_HTOTAL_A = I915_READ(TRANS_HTOTAL_A);
- dev_priv->saveTRANS_HBLANK_A = I915_READ(TRANS_HBLANK_A);
- dev_priv->saveTRANS_HSYNC_A = I915_READ(TRANS_HSYNC_A);
- dev_priv->saveTRANS_VTOTAL_A = I915_READ(TRANS_VTOTAL_A);
- dev_priv->saveTRANS_VBLANK_A = I915_READ(TRANS_VBLANK_A);
- dev_priv->saveTRANS_VSYNC_A = I915_READ(TRANS_VSYNC_A);
- }
-
- dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
- dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
- dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
- dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
- dev_priv->saveDSPAADDR = I915_READ(DSPAADDR);
+ dev_priv->savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
+ dev_priv->savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
+ dev_priv->savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
+ dev_priv->savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
+
+ dev_priv->saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
+ dev_priv->saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
+
+ dev_priv->savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
+ dev_priv->savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
+ dev_priv->savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
+
+ dev_priv->saveTRANSACONF = I915_READ(_TRANSACONF);
+ dev_priv->saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
+ dev_priv->saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
+ dev_priv->saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
+ dev_priv->saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
+ dev_priv->saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
+ dev_priv->saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
+ }
+
+ dev_priv->saveDSPACNTR = I915_READ(_DSPACNTR);
+ dev_priv->saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
+ dev_priv->saveDSPASIZE = I915_READ(_DSPASIZE);
+ dev_priv->saveDSPAPOS = I915_READ(_DSPAPOS);
+ dev_priv->saveDSPAADDR = I915_READ(_DSPAADDR);
if (INTEL_INFO(dev)->gen >= 4) {
- dev_priv->saveDSPASURF = I915_READ(DSPASURF);
- dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
+ dev_priv->saveDSPASURF = I915_READ(_DSPASURF);
+ dev_priv->saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
}
i915_save_palette(dev, PIPE_A);
- dev_priv->savePIPEASTAT = I915_READ(PIPEASTAT);
+ dev_priv->savePIPEASTAT = I915_READ(_PIPEASTAT);
/* Pipe & plane B info */
- dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
- dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
+ dev_priv->savePIPEBCONF = I915_READ(_PIPEBCONF);
+ dev_priv->savePIPEBSRC = I915_READ(_PIPEBSRC);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->saveFPB0 = I915_READ(PCH_FPB0);
- dev_priv->saveFPB1 = I915_READ(PCH_FPB1);
- dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B);
+ dev_priv->saveFPB0 = I915_READ(_PCH_FPB0);
+ dev_priv->saveFPB1 = I915_READ(_PCH_FPB1);
+ dev_priv->saveDPLL_B = I915_READ(_PCH_DPLL_B);
} else {
- dev_priv->saveFPB0 = I915_READ(FPB0);
- dev_priv->saveFPB1 = I915_READ(FPB1);
- dev_priv->saveDPLL_B = I915_READ(DPLL_B);
+ dev_priv->saveFPB0 = I915_READ(_FPB0);
+ dev_priv->saveFPB1 = I915_READ(_FPB1);
+ dev_priv->saveDPLL_B = I915_READ(_DPLL_B);
}
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
- dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
- dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
- dev_priv->saveHSYNC_B = I915_READ(HSYNC_B);
- dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
- dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
- dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
+ dev_priv->saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
+ dev_priv->saveHTOTAL_B = I915_READ(_HTOTAL_B);
+ dev_priv->saveHBLANK_B = I915_READ(_HBLANK_B);
+ dev_priv->saveHSYNC_B = I915_READ(_HSYNC_B);
+ dev_priv->saveVTOTAL_B = I915_READ(_VTOTAL_B);
+ dev_priv->saveVBLANK_B = I915_READ(_VBLANK_B);
+ dev_priv->saveVSYNC_B = I915_READ(_VSYNC_B);
if (!HAS_PCH_SPLIT(dev))
- dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B);
+ dev_priv->saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1);
- dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1);
- dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1);
- dev_priv->savePIPEB_LINK_N1 = I915_READ(PIPEB_LINK_N1);
-
- dev_priv->saveFDI_TXB_CTL = I915_READ(FDI_TXB_CTL);
- dev_priv->saveFDI_RXB_CTL = I915_READ(FDI_RXB_CTL);
-
- dev_priv->savePFB_CTL_1 = I915_READ(PFB_CTL_1);
- dev_priv->savePFB_WIN_SZ = I915_READ(PFB_WIN_SZ);
- dev_priv->savePFB_WIN_POS = I915_READ(PFB_WIN_POS);
-
- dev_priv->saveTRANSBCONF = I915_READ(TRANSBCONF);
- dev_priv->saveTRANS_HTOTAL_B = I915_READ(TRANS_HTOTAL_B);
- dev_priv->saveTRANS_HBLANK_B = I915_READ(TRANS_HBLANK_B);
- dev_priv->saveTRANS_HSYNC_B = I915_READ(TRANS_HSYNC_B);
- dev_priv->saveTRANS_VTOTAL_B = I915_READ(TRANS_VTOTAL_B);
- dev_priv->saveTRANS_VBLANK_B = I915_READ(TRANS_VBLANK_B);
- dev_priv->saveTRANS_VSYNC_B = I915_READ(TRANS_VSYNC_B);
- }
-
- dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
- dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
- dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
- dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
- dev_priv->saveDSPBADDR = I915_READ(DSPBADDR);
+ dev_priv->savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
+ dev_priv->savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
+ dev_priv->savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
+ dev_priv->savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
+
+ dev_priv->saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
+ dev_priv->saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
+
+ dev_priv->savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
+ dev_priv->savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
+ dev_priv->savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
+
+ dev_priv->saveTRANSBCONF = I915_READ(_TRANSBCONF);
+ dev_priv->saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
+ dev_priv->saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
+ dev_priv->saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
+ dev_priv->saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
+ dev_priv->saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
+ dev_priv->saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
+ }
+
+ dev_priv->saveDSPBCNTR = I915_READ(_DSPBCNTR);
+ dev_priv->saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
+ dev_priv->saveDSPBSIZE = I915_READ(_DSPBSIZE);
+ dev_priv->saveDSPBPOS = I915_READ(_DSPBPOS);
+ dev_priv->saveDSPBADDR = I915_READ(_DSPBADDR);
if (INTEL_INFO(dev)->gen >= 4) {
- dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
- dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
+ dev_priv->saveDSPBSURF = I915_READ(_DSPBSURF);
+ dev_priv->saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
}
i915_save_palette(dev, PIPE_B);
- dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
+ dev_priv->savePIPEBSTAT = I915_READ(_PIPEBSTAT);
/* Fences */
switch (INTEL_INFO(dev)->gen) {
@@ -426,19 +425,19 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev)) {
- dpll_a_reg = PCH_DPLL_A;
- dpll_b_reg = PCH_DPLL_B;
- fpa0_reg = PCH_FPA0;
- fpb0_reg = PCH_FPB0;
- fpa1_reg = PCH_FPA1;
- fpb1_reg = PCH_FPB1;
+ dpll_a_reg = _PCH_DPLL_A;
+ dpll_b_reg = _PCH_DPLL_B;
+ fpa0_reg = _PCH_FPA0;
+ fpb0_reg = _PCH_FPB0;
+ fpa1_reg = _PCH_FPA1;
+ fpb1_reg = _PCH_FPB1;
} else {
- dpll_a_reg = DPLL_A;
- dpll_b_reg = DPLL_B;
- fpa0_reg = FPA0;
- fpb0_reg = FPB0;
- fpa1_reg = FPA1;
- fpb1_reg = FPB1;
+ dpll_a_reg = _DPLL_A;
+ dpll_b_reg = _DPLL_B;
+ fpa0_reg = _FPA0;
+ fpb0_reg = _FPB0;
+ fpa1_reg = _FPA1;
+ fpb1_reg = _FPB1;
}
if (HAS_PCH_SPLIT(dev)) {
@@ -461,60 +460,60 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
POSTING_READ(dpll_a_reg);
udelay(150);
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
- I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
- POSTING_READ(DPLL_A_MD);
+ I915_WRITE(_DPLL_A_MD, dev_priv->saveDPLL_A_MD);
+ POSTING_READ(_DPLL_A_MD);
}
udelay(150);
/* Restore mode */
- I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
- I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
- I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
- I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
- I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
- I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
+ I915_WRITE(_HTOTAL_A, dev_priv->saveHTOTAL_A);
+ I915_WRITE(_HBLANK_A, dev_priv->saveHBLANK_A);
+ I915_WRITE(_HSYNC_A, dev_priv->saveHSYNC_A);
+ I915_WRITE(_VTOTAL_A, dev_priv->saveVTOTAL_A);
+ I915_WRITE(_VBLANK_A, dev_priv->saveVBLANK_A);
+ I915_WRITE(_VSYNC_A, dev_priv->saveVSYNC_A);
if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
+ I915_WRITE(_BCLRPAT_A, dev_priv->saveBCLRPAT_A);
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
- I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
- I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
- I915_WRITE(PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1);
+ I915_WRITE(_PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
+ I915_WRITE(_PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
+ I915_WRITE(_PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
+ I915_WRITE(_PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1);
- I915_WRITE(FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL);
- I915_WRITE(FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL);
+ I915_WRITE(_FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL);
+ I915_WRITE(_FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL);
- I915_WRITE(PFA_CTL_1, dev_priv->savePFA_CTL_1);
- I915_WRITE(PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ);
- I915_WRITE(PFA_WIN_POS, dev_priv->savePFA_WIN_POS);
+ I915_WRITE(_PFA_CTL_1, dev_priv->savePFA_CTL_1);
+ I915_WRITE(_PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ);
+ I915_WRITE(_PFA_WIN_POS, dev_priv->savePFA_WIN_POS);
- I915_WRITE(TRANSACONF, dev_priv->saveTRANSACONF);
- I915_WRITE(TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A);
- I915_WRITE(TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A);
- I915_WRITE(TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A);
- I915_WRITE(TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A);
- I915_WRITE(TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A);
- I915_WRITE(TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A);
+ I915_WRITE(_TRANSACONF, dev_priv->saveTRANSACONF);
+ I915_WRITE(_TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A);
+ I915_WRITE(_TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A);
+ I915_WRITE(_TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A);
+ I915_WRITE(_TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A);
+ I915_WRITE(_TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A);
+ I915_WRITE(_TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A);
}
/* Restore plane info */
- I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
- I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
- I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
- I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
- I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
+ I915_WRITE(_DSPASIZE, dev_priv->saveDSPASIZE);
+ I915_WRITE(_DSPAPOS, dev_priv->saveDSPAPOS);
+ I915_WRITE(_PIPEASRC, dev_priv->savePIPEASRC);
+ I915_WRITE(_DSPAADDR, dev_priv->saveDSPAADDR);
+ I915_WRITE(_DSPASTRIDE, dev_priv->saveDSPASTRIDE);
if (INTEL_INFO(dev)->gen >= 4) {
- I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
- I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
+ I915_WRITE(_DSPASURF, dev_priv->saveDSPASURF);
+ I915_WRITE(_DSPATILEOFF, dev_priv->saveDSPATILEOFF);
}
- I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
+ I915_WRITE(_PIPEACONF, dev_priv->savePIPEACONF);
i915_restore_palette(dev, PIPE_A);
/* Enable the plane */
- I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
- I915_WRITE(DSPAADDR, I915_READ(DSPAADDR));
+ I915_WRITE(_DSPACNTR, dev_priv->saveDSPACNTR);
+ I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
/* Pipe & plane B info */
if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
@@ -530,68 +529,68 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
POSTING_READ(dpll_b_reg);
udelay(150);
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
- I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
- POSTING_READ(DPLL_B_MD);
+ I915_WRITE(_DPLL_B_MD, dev_priv->saveDPLL_B_MD);
+ POSTING_READ(_DPLL_B_MD);
}
udelay(150);
/* Restore mode */
- I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
- I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
- I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
- I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
- I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
- I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
+ I915_WRITE(_HTOTAL_B, dev_priv->saveHTOTAL_B);
+ I915_WRITE(_HBLANK_B, dev_priv->saveHBLANK_B);
+ I915_WRITE(_HSYNC_B, dev_priv->saveHSYNC_B);
+ I915_WRITE(_VTOTAL_B, dev_priv->saveVTOTAL_B);
+ I915_WRITE(_VBLANK_B, dev_priv->saveVBLANK_B);
+ I915_WRITE(_VSYNC_B, dev_priv->saveVSYNC_B);
if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
+ I915_WRITE(_BCLRPAT_B, dev_priv->saveBCLRPAT_B);
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
- I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
- I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
- I915_WRITE(PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1);
+ I915_WRITE(_PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
+ I915_WRITE(_PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
+ I915_WRITE(_PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
+ I915_WRITE(_PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1);
- I915_WRITE(FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL);
- I915_WRITE(FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL);
+ I915_WRITE(_FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL);
+ I915_WRITE(_FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL);
- I915_WRITE(PFB_CTL_1, dev_priv->savePFB_CTL_1);
- I915_WRITE(PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ);
- I915_WRITE(PFB_WIN_POS, dev_priv->savePFB_WIN_POS);
+ I915_WRITE(_PFB_CTL_1, dev_priv->savePFB_CTL_1);
+ I915_WRITE(_PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ);
+ I915_WRITE(_PFB_WIN_POS, dev_priv->savePFB_WIN_POS);
- I915_WRITE(TRANSBCONF, dev_priv->saveTRANSBCONF);
- I915_WRITE(TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B);
- I915_WRITE(TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B);
- I915_WRITE(TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B);
- I915_WRITE(TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B);
- I915_WRITE(TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B);
- I915_WRITE(TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B);
+ I915_WRITE(_TRANSBCONF, dev_priv->saveTRANSBCONF);
+ I915_WRITE(_TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B);
+ I915_WRITE(_TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B);
+ I915_WRITE(_TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B);
+ I915_WRITE(_TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B);
+ I915_WRITE(_TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B);
+ I915_WRITE(_TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B);
}
/* Restore plane info */
- I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
- I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
- I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
- I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
- I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
+ I915_WRITE(_DSPBSIZE, dev_priv->saveDSPBSIZE);
+ I915_WRITE(_DSPBPOS, dev_priv->saveDSPBPOS);
+ I915_WRITE(_PIPEBSRC, dev_priv->savePIPEBSRC);
+ I915_WRITE(_DSPBADDR, dev_priv->saveDSPBADDR);
+ I915_WRITE(_DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
if (INTEL_INFO(dev)->gen >= 4) {
- I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
- I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
+ I915_WRITE(_DSPBSURF, dev_priv->saveDSPBSURF);
+ I915_WRITE(_DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
}
- I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
+ I915_WRITE(_PIPEBCONF, dev_priv->savePIPEBCONF);
i915_restore_palette(dev, PIPE_B);
/* Enable the plane */
- I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
- I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
+ I915_WRITE(_DSPBCNTR, dev_priv->saveDSPBCNTR);
+ I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
/* Cursor state */
- I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
- I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
- I915_WRITE(CURABASE, dev_priv->saveCURABASE);
- I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
- I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
- I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
+ I915_WRITE(_CURAPOS, dev_priv->saveCURAPOS);
+ I915_WRITE(_CURACNTR, dev_priv->saveCURACNTR);
+ I915_WRITE(_CURABASE, dev_priv->saveCURABASE);
+ I915_WRITE(_CURBPOS, dev_priv->saveCURBPOS);
+ I915_WRITE(_CURBCNTR, dev_priv->saveCURBCNTR);
+ I915_WRITE(_CURBBASE, dev_priv->saveCURBBASE);
if (IS_GEN2(dev))
I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
@@ -653,14 +652,14 @@ void i915_save_display(struct drm_device *dev)
dev_priv->saveDP_B = I915_READ(DP_B);
dev_priv->saveDP_C = I915_READ(DP_C);
dev_priv->saveDP_D = I915_READ(DP_D);
- dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(PIPEA_GMCH_DATA_M);
- dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(PIPEB_GMCH_DATA_M);
- dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(PIPEA_GMCH_DATA_N);
- dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(PIPEB_GMCH_DATA_N);
- dev_priv->savePIPEA_DP_LINK_M = I915_READ(PIPEA_DP_LINK_M);
- dev_priv->savePIPEB_DP_LINK_M = I915_READ(PIPEB_DP_LINK_M);
- dev_priv->savePIPEA_DP_LINK_N = I915_READ(PIPEA_DP_LINK_N);
- dev_priv->savePIPEB_DP_LINK_N = I915_READ(PIPEB_DP_LINK_N);
+ dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
+ dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
+ dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
+ dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
+ dev_priv->savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
+ dev_priv->savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
+ dev_priv->savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
+ dev_priv->savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
}
/* FIXME: save TV & SDVO state */
@@ -699,14 +698,14 @@ void i915_restore_display(struct drm_device *dev)
/* Display port ratios (must be done before clock is set) */
if (SUPPORTS_INTEGRATED_DP(dev)) {
- I915_WRITE(PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M);
- I915_WRITE(PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M);
- I915_WRITE(PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N);
- I915_WRITE(PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N);
- I915_WRITE(PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M);
- I915_WRITE(PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M);
- I915_WRITE(PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N);
- I915_WRITE(PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N);
+ I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M);
+ I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M);
+ I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N);
+ I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N);
+ I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M);
+ I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M);
+ I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N);
+ I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N);
}
/* This is only meaningful in non-KMS mode */
@@ -808,8 +807,8 @@ int i915_save_state(struct drm_device *dev)
dev_priv->saveDEIMR = I915_READ(DEIMR);
dev_priv->saveGTIER = I915_READ(GTIER);
dev_priv->saveGTIMR = I915_READ(GTIMR);
- dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR);
- dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR);
+ dev_priv->saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
+ dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
dev_priv->saveMCHBAR_RENDER_STANDBY =
I915_READ(RSTDBYCTL);
} else {
@@ -857,11 +856,11 @@ int i915_restore_state(struct drm_device *dev)
I915_WRITE(DEIMR, dev_priv->saveDEIMR);
I915_WRITE(GTIER, dev_priv->saveGTIER);
I915_WRITE(GTIMR, dev_priv->saveGTIMR);
- I915_WRITE(FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
- I915_WRITE(FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
+ I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
+ I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
} else {
- I915_WRITE (IER, dev_priv->saveIER);
- I915_WRITE (IMR, dev_priv->saveIMR);
+ I915_WRITE(IER, dev_priv->saveIER);
+ I915_WRITE(IMR, dev_priv->saveIMR);
}
/* Clock gating state */
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 7f0fc3ed61aa..d623fefbfaca 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -7,6 +7,7 @@
#include <drm/drmP.h>
#include "i915_drv.h"
+#include "intel_ringbuffer.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM i915
@@ -16,9 +17,7 @@
/* object tracking */
TRACE_EVENT(i915_gem_object_create,
-
TP_PROTO(struct drm_i915_gem_object *obj),
-
TP_ARGS(obj),
TP_STRUCT__entry(
@@ -35,33 +34,51 @@ TRACE_EVENT(i915_gem_object_create,
);
TRACE_EVENT(i915_gem_object_bind,
-
- TP_PROTO(struct drm_i915_gem_object *obj, u32 gtt_offset, bool mappable),
-
- TP_ARGS(obj, gtt_offset, mappable),
+ TP_PROTO(struct drm_i915_gem_object *obj, bool mappable),
+ TP_ARGS(obj, mappable),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
- __field(u32, gtt_offset)
+ __field(u32, offset)
+ __field(u32, size)
__field(bool, mappable)
),
TP_fast_assign(
__entry->obj = obj;
- __entry->gtt_offset = gtt_offset;
+ __entry->offset = obj->gtt_space->start;
+ __entry->size = obj->gtt_space->size;
__entry->mappable = mappable;
),
- TP_printk("obj=%p, gtt_offset=%08x%s",
- __entry->obj, __entry->gtt_offset,
+ TP_printk("obj=%p, offset=%08x size=%x%s",
+ __entry->obj, __entry->offset, __entry->size,
__entry->mappable ? ", mappable" : "")
);
-TRACE_EVENT(i915_gem_object_change_domain,
+TRACE_EVENT(i915_gem_object_unbind,
+ TP_PROTO(struct drm_i915_gem_object *obj),
+ TP_ARGS(obj),
+
+ TP_STRUCT__entry(
+ __field(struct drm_i915_gem_object *, obj)
+ __field(u32, offset)
+ __field(u32, size)
+ ),
- TP_PROTO(struct drm_i915_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->offset = obj->gtt_space->start;
+ __entry->size = obj->gtt_space->size;
+ ),
- TP_ARGS(obj, old_read_domains, old_write_domain),
+ TP_printk("obj=%p, offset=%08x size=%x",
+ __entry->obj, __entry->offset, __entry->size)
+);
+
+TRACE_EVENT(i915_gem_object_change_domain,
+ TP_PROTO(struct drm_i915_gem_object *obj, u32 old_read, u32 old_write),
+ TP_ARGS(obj, old_read, old_write),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
@@ -71,177 +88,264 @@ TRACE_EVENT(i915_gem_object_change_domain,
TP_fast_assign(
__entry->obj = obj;
- __entry->read_domains = obj->base.read_domains | (old_read_domains << 16);
- __entry->write_domain = obj->base.write_domain | (old_write_domain << 16);
+ __entry->read_domains = obj->base.read_domains | (old_read << 16);
+ __entry->write_domain = obj->base.write_domain | (old_write << 16);
),
- TP_printk("obj=%p, read=%04x, write=%04x",
+ TP_printk("obj=%p, read=%02x=>%02x, write=%02x=>%02x",
__entry->obj,
- __entry->read_domains, __entry->write_domain)
+ __entry->read_domains >> 16,
+ __entry->read_domains & 0xffff,
+ __entry->write_domain >> 16,
+ __entry->write_domain & 0xffff)
);
-DECLARE_EVENT_CLASS(i915_gem_object,
+TRACE_EVENT(i915_gem_object_pwrite,
+ TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
+ TP_ARGS(obj, offset, len),
- TP_PROTO(struct drm_i915_gem_object *obj),
+ TP_STRUCT__entry(
+ __field(struct drm_i915_gem_object *, obj)
+ __field(u32, offset)
+ __field(u32, len)
+ ),
- TP_ARGS(obj),
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->offset = offset;
+ __entry->len = len;
+ ),
+
+ TP_printk("obj=%p, offset=%u, len=%u",
+ __entry->obj, __entry->offset, __entry->len)
+);
+
+TRACE_EVENT(i915_gem_object_pread,
+ TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
+ TP_ARGS(obj, offset, len),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
+ __field(u32, offset)
+ __field(u32, len)
),
TP_fast_assign(
__entry->obj = obj;
+ __entry->offset = offset;
+ __entry->len = len;
),
- TP_printk("obj=%p", __entry->obj)
+ TP_printk("obj=%p, offset=%u, len=%u",
+ __entry->obj, __entry->offset, __entry->len)
);
-DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
+TRACE_EVENT(i915_gem_object_fault,
+ TP_PROTO(struct drm_i915_gem_object *obj, u32 index, bool gtt, bool write),
+ TP_ARGS(obj, index, gtt, write),
+
+ TP_STRUCT__entry(
+ __field(struct drm_i915_gem_object *, obj)
+ __field(u32, index)
+ __field(bool, gtt)
+ __field(bool, write)
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->index = index;
+ __entry->gtt = gtt;
+ __entry->write = write;
+ ),
+ TP_printk("obj=%p, %s index=%u %s",
+ __entry->obj,
+ __entry->gtt ? "GTT" : "CPU",
+ __entry->index,
+ __entry->write ? ", writable" : "")
+);
+
+DECLARE_EVENT_CLASS(i915_gem_object,
TP_PROTO(struct drm_i915_gem_object *obj),
+ TP_ARGS(obj),
- TP_ARGS(obj)
+ TP_STRUCT__entry(
+ __field(struct drm_i915_gem_object *, obj)
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ ),
+
+ TP_printk("obj=%p", __entry->obj)
);
-DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind,
+DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
+ TP_PROTO(struct drm_i915_gem_object *obj),
+ TP_ARGS(obj)
+);
+DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
TP_PROTO(struct drm_i915_gem_object *obj),
-
TP_ARGS(obj)
);
-DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
+TRACE_EVENT(i915_gem_evict,
+ TP_PROTO(struct drm_device *dev, u32 size, u32 align, bool mappable),
+ TP_ARGS(dev, size, align, mappable),
- TP_PROTO(struct drm_i915_gem_object *obj),
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u32, size)
+ __field(u32, align)
+ __field(bool, mappable)
+ ),
- TP_ARGS(obj)
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->size = size;
+ __entry->align = align;
+ __entry->mappable = mappable;
+ ),
+
+ TP_printk("dev=%d, size=%d, align=%d %s",
+ __entry->dev, __entry->size, __entry->align,
+ __entry->mappable ? ", mappable" : "")
);
-/* batch tracing */
+TRACE_EVENT(i915_gem_evict_everything,
+ TP_PROTO(struct drm_device *dev, bool purgeable),
+ TP_ARGS(dev, purgeable),
-TRACE_EVENT(i915_gem_request_submit,
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(bool, purgeable)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->purgeable = purgeable;
+ ),
- TP_PROTO(struct drm_device *dev, u32 seqno),
+ TP_printk("dev=%d%s",
+ __entry->dev,
+ __entry->purgeable ? ", purgeable only" : "")
+);
- TP_ARGS(dev, seqno),
+TRACE_EVENT(i915_gem_ring_dispatch,
+ TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+ TP_ARGS(ring, seqno),
TP_STRUCT__entry(
__field(u32, dev)
+ __field(u32, ring)
__field(u32, seqno)
),
TP_fast_assign(
- __entry->dev = dev->primary->index;
+ __entry->dev = ring->dev->primary->index;
+ __entry->ring = ring->id;
__entry->seqno = seqno;
- i915_trace_irq_get(dev, seqno);
+ i915_trace_irq_get(ring, seqno);
),
- TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+ TP_printk("dev=%u, ring=%u, seqno=%u",
+ __entry->dev, __entry->ring, __entry->seqno)
);
-TRACE_EVENT(i915_gem_request_flush,
-
- TP_PROTO(struct drm_device *dev, u32 seqno,
- u32 flush_domains, u32 invalidate_domains),
-
- TP_ARGS(dev, seqno, flush_domains, invalidate_domains),
+TRACE_EVENT(i915_gem_ring_flush,
+ TP_PROTO(struct intel_ring_buffer *ring, u32 invalidate, u32 flush),
+ TP_ARGS(ring, invalidate, flush),
TP_STRUCT__entry(
__field(u32, dev)
- __field(u32, seqno)
- __field(u32, flush_domains)
- __field(u32, invalidate_domains)
+ __field(u32, ring)
+ __field(u32, invalidate)
+ __field(u32, flush)
),
TP_fast_assign(
- __entry->dev = dev->primary->index;
- __entry->seqno = seqno;
- __entry->flush_domains = flush_domains;
- __entry->invalidate_domains = invalidate_domains;
+ __entry->dev = ring->dev->primary->index;
+ __entry->ring = ring->id;
+ __entry->invalidate = invalidate;
+ __entry->flush = flush;
),
- TP_printk("dev=%u, seqno=%u, flush=%04x, invalidate=%04x",
- __entry->dev, __entry->seqno,
- __entry->flush_domains, __entry->invalidate_domains)
+ TP_printk("dev=%u, ring=%x, invalidate=%04x, flush=%04x",
+ __entry->dev, __entry->ring,
+ __entry->invalidate, __entry->flush)
);
DECLARE_EVENT_CLASS(i915_gem_request,
-
- TP_PROTO(struct drm_device *dev, u32 seqno),
-
- TP_ARGS(dev, seqno),
+ TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+ TP_ARGS(ring, seqno),
TP_STRUCT__entry(
__field(u32, dev)
+ __field(u32, ring)
__field(u32, seqno)
),
TP_fast_assign(
- __entry->dev = dev->primary->index;
+ __entry->dev = ring->dev->primary->index;
+ __entry->ring = ring->id;
__entry->seqno = seqno;
),
- TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+ TP_printk("dev=%u, ring=%u, seqno=%u",
+ __entry->dev, __entry->ring, __entry->seqno)
);
-DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
-
- TP_PROTO(struct drm_device *dev, u32 seqno),
+DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
+ TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+ TP_ARGS(ring, seqno)
+);
- TP_ARGS(dev, seqno)
+DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
+ TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+ TP_ARGS(ring, seqno)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
-
- TP_PROTO(struct drm_device *dev, u32 seqno),
-
- TP_ARGS(dev, seqno)
+ TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+ TP_ARGS(ring, seqno)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin,
-
- TP_PROTO(struct drm_device *dev, u32 seqno),
-
- TP_ARGS(dev, seqno)
+ TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+ TP_ARGS(ring, seqno)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
-
- TP_PROTO(struct drm_device *dev, u32 seqno),
-
- TP_ARGS(dev, seqno)
+ TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+ TP_ARGS(ring, seqno)
);
DECLARE_EVENT_CLASS(i915_ring,
-
- TP_PROTO(struct drm_device *dev),
-
- TP_ARGS(dev),
+ TP_PROTO(struct intel_ring_buffer *ring),
+ TP_ARGS(ring),
TP_STRUCT__entry(
__field(u32, dev)
+ __field(u32, ring)
),
TP_fast_assign(
- __entry->dev = dev->primary->index;
+ __entry->dev = ring->dev->primary->index;
+ __entry->ring = ring->id;
),
- TP_printk("dev=%u", __entry->dev)
+ TP_printk("dev=%u, ring=%u", __entry->dev, __entry->ring)
);
DEFINE_EVENT(i915_ring, i915_ring_wait_begin,
-
- TP_PROTO(struct drm_device *dev),
-
- TP_ARGS(dev)
+ TP_PROTO(struct intel_ring_buffer *ring),
+ TP_ARGS(ring)
);
DEFINE_EVENT(i915_ring, i915_ring_wait_end,
-
- TP_PROTO(struct drm_device *dev),
-
- TP_ARGS(dev)
+ TP_PROTO(struct intel_ring_buffer *ring),
+ TP_ARGS(ring)
);
TRACE_EVENT(i915_flip_request,
@@ -281,26 +385,29 @@ TRACE_EVENT(i915_flip_complete,
);
TRACE_EVENT(i915_reg_rw,
- TP_PROTO(int cmd, uint32_t reg, uint64_t val, int len),
+ TP_PROTO(bool write, u32 reg, u64 val, int len),
- TP_ARGS(cmd, reg, val, len),
+ TP_ARGS(write, reg, val, len),
TP_STRUCT__entry(
- __field(int, cmd)
- __field(uint32_t, reg)
- __field(uint64_t, val)
- __field(int, len)
+ __field(u64, val)
+ __field(u32, reg)
+ __field(u16, write)
+ __field(u16, len)
),
TP_fast_assign(
- __entry->cmd = cmd;
+ __entry->val = (u64)val;
__entry->reg = reg;
- __entry->val = (uint64_t)val;
+ __entry->write = write;
__entry->len = len;
),
- TP_printk("cmd=%c, reg=0x%x, val=0x%llx, len=%d",
- __entry->cmd, __entry->reg, __entry->val, __entry->len)
+ TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
+ __entry->write ? "write" : "read",
+ __entry->reg, __entry->len,
+ (u32)(__entry->val & 0xffffffff),
+ (u32)(__entry->val >> 32))
);
#endif /* _I915_TRACE_H_ */
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 0b44956c336b..fb5b4d426ae0 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -226,29 +226,49 @@ static void
parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
{
- struct bdb_sdvo_lvds_options *sdvo_lvds_options;
struct lvds_dvo_timing *dvo_timing;
struct drm_display_mode *panel_fixed_mode;
+ int index;
- sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
- if (!sdvo_lvds_options)
- return;
+ index = i915_vbt_sdvo_panel_type;
+ if (index == -1) {
+ struct bdb_sdvo_lvds_options *sdvo_lvds_options;
+
+ sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
+ if (!sdvo_lvds_options)
+ return;
+
+ index = sdvo_lvds_options->panel_type;
+ }
dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS);
if (!dvo_timing)
return;
panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
-
if (!panel_fixed_mode)
return;
- fill_detail_timing_data(panel_fixed_mode,
- dvo_timing + sdvo_lvds_options->panel_type);
+ fill_detail_timing_data(panel_fixed_mode, dvo_timing + index);
dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode;
- return;
+ DRM_DEBUG_KMS("Found SDVO panel mode in BIOS VBT tables:\n");
+ drm_mode_debug_printmodeline(panel_fixed_mode);
+}
+
+static int intel_bios_ssc_frequency(struct drm_device *dev,
+ bool alternate)
+{
+ switch (INTEL_INFO(dev)->gen) {
+ case 2:
+ return alternate ? 66 : 48;
+ case 3:
+ case 4:
+ return alternate ? 100 : 96;
+ default:
+ return alternate ? 100 : 120;
+ }
}
static void
@@ -263,13 +283,8 @@ parse_general_features(struct drm_i915_private *dev_priv,
dev_priv->int_tv_support = general->int_tv_support;
dev_priv->int_crt_support = general->int_crt_support;
dev_priv->lvds_use_ssc = general->enable_ssc;
-
- if (IS_I85X(dev))
- dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48;
- else if (IS_GEN5(dev) || IS_GEN6(dev))
- dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 120;
- else
- dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 96;
+ dev_priv->lvds_ssc_freq =
+ intel_bios_ssc_frequency(dev, general->ssc_freq);
}
}
@@ -553,6 +568,8 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
static void
init_vbt_defaults(struct drm_i915_private *dev_priv)
{
+ struct drm_device *dev = dev_priv->dev;
+
dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC;
/* LFP panel data */
@@ -565,7 +582,11 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
/* general features */
dev_priv->int_tv_support = 1;
dev_priv->int_crt_support = 1;
- dev_priv->lvds_use_ssc = 0;
+
+ /* Default to using SSC */
+ dev_priv->lvds_use_ssc = 1;
+ dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
+ DRM_DEBUG("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
/* eDP data */
dev_priv->edp.bpp = 18;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 8a77ff4a7237..d03fc05b39c0 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -129,10 +129,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
u32 adpa, dpll_md;
u32 adpa_reg;
- if (intel_crtc->pipe == 0)
- dpll_md_reg = DPLL_A_MD;
- else
- dpll_md_reg = DPLL_B_MD;
+ dpll_md_reg = DPLL_MD(intel_crtc->pipe);
if (HAS_PCH_SPLIT(dev))
adpa_reg = PCH_ADPA;
@@ -160,17 +157,16 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
adpa |= PORT_TRANS_A_SEL_CPT;
else
adpa |= ADPA_PIPE_A_SELECT;
- if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(BCLRPAT_A, 0);
} else {
if (HAS_PCH_CPT(dev))
adpa |= PORT_TRANS_B_SEL_CPT;
else
adpa |= ADPA_PIPE_B_SELECT;
- if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(BCLRPAT_B, 0);
}
+ if (!HAS_PCH_SPLIT(dev))
+ I915_WRITE(BCLRPAT(intel_crtc->pipe), 0);
+
I915_WRITE(adpa_reg, adpa);
}
@@ -273,21 +269,6 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
return ret;
}
-static bool intel_crt_ddc_probe(struct drm_i915_private *dev_priv, int ddc_bus)
-{
- u8 buf;
- struct i2c_msg msgs[] = {
- {
- .addr = 0xA0,
- .flags = 0,
- .len = 1,
- .buf = &buf,
- },
- };
- /* DDC monitor detect: Does it ACK a write to 0xA0? */
- return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1;
-}
-
static bool intel_crt_detect_ddc(struct drm_connector *connector)
{
struct intel_crt *crt = intel_attached_crt(connector);
@@ -297,11 +278,6 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
if (crt->base.type != INTEL_OUTPUT_ANALOG)
return false;
- if (intel_crt_ddc_probe(dev_priv, dev_priv->crt_ddc_pin)) {
- DRM_DEBUG_KMS("CRT detected via DDC:0xa0\n");
- return true;
- }
-
if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) {
struct edid *edid;
bool is_digital = false;
@@ -353,21 +329,12 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_crt *crt)
DRM_DEBUG_KMS("starting load-detect on CRT\n");
- if (pipe == 0) {
- bclrpat_reg = BCLRPAT_A;
- vtotal_reg = VTOTAL_A;
- vblank_reg = VBLANK_A;
- vsync_reg = VSYNC_A;
- pipeconf_reg = PIPEACONF;
- pipe_dsl_reg = PIPEADSL;
- } else {
- bclrpat_reg = BCLRPAT_B;
- vtotal_reg = VTOTAL_B;
- vblank_reg = VBLANK_B;
- vsync_reg = VSYNC_B;
- pipeconf_reg = PIPEBCONF;
- pipe_dsl_reg = PIPEBDSL;
- }
+ bclrpat_reg = BCLRPAT(pipe);
+ vtotal_reg = VTOTAL(pipe);
+ vblank_reg = VBLANK(pipe);
+ vsync_reg = VSYNC(pipe);
+ pipeconf_reg = PIPECONF(pipe);
+ pipe_dsl_reg = PIPEDSL(pipe);
save_bclrpat = I915_READ(bclrpat_reg);
save_vtotal = I915_READ(vtotal_reg);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3b006536b3d2..e522c702b04e 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -989,7 +989,7 @@ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
void intel_wait_for_vblank(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int pipestat_reg = (pipe == 0 ? PIPEASTAT : PIPEBSTAT);
+ int pipestat_reg = PIPESTAT(pipe);
/* Clear existing vblank status. Note this will clear any other
* sticky status fields as well.
@@ -1058,6 +1058,616 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
}
}
+static const char *state_string(bool enabled)
+{
+ return enabled ? "on" : "off";
+}
+
+/* Only for pre-ILK configs */
+static void assert_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ int reg;
+ u32 val;
+ bool cur_state;
+
+ reg = DPLL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & DPLL_VCO_ENABLE);
+ WARN(cur_state != state,
+ "PLL state assertion failure (expected %s, current %s)\n",
+ state_string(state), state_string(cur_state));
+}
+#define assert_pll_enabled(d, p) assert_pll(d, p, true)
+#define assert_pll_disabled(d, p) assert_pll(d, p, false)
+
+/* For ILK+ */
+static void assert_pch_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ int reg;
+ u32 val;
+ bool cur_state;
+
+ reg = PCH_DPLL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & DPLL_VCO_ENABLE);
+ WARN(cur_state != state,
+ "PCH PLL state assertion failure (expected %s, current %s)\n",
+ state_string(state), state_string(cur_state));
+}
+#define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
+#define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
+
+static void assert_fdi_tx(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ int reg;
+ u32 val;
+ bool cur_state;
+
+ reg = FDI_TX_CTL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & FDI_TX_ENABLE);
+ WARN(cur_state != state,
+ "FDI TX state assertion failure (expected %s, current %s)\n",
+ state_string(state), state_string(cur_state));
+}
+#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
+#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
+
+static void assert_fdi_rx(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ int reg;
+ u32 val;
+ bool cur_state;
+
+ reg = FDI_RX_CTL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & FDI_RX_ENABLE);
+ WARN(cur_state != state,
+ "FDI RX state assertion failure (expected %s, current %s)\n",
+ state_string(state), state_string(cur_state));
+}
+#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
+#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
+
+static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ /* ILK FDI PLL is always enabled */
+ if (dev_priv->info->gen == 5)
+ return;
+
+ reg = FDI_TX_CTL(pipe);
+ val = I915_READ(reg);
+ WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
+}
+
+static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ reg = FDI_RX_CTL(pipe);
+ val = I915_READ(reg);
+ WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
+}
+
+static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int pp_reg, lvds_reg;
+ u32 val;
+ enum pipe panel_pipe = PIPE_A;
+ bool locked = locked;
+
+ if (HAS_PCH_SPLIT(dev_priv->dev)) {
+ pp_reg = PCH_PP_CONTROL;
+ lvds_reg = PCH_LVDS;
+ } else {
+ pp_reg = PP_CONTROL;
+ lvds_reg = LVDS;
+ }
+
+ val = I915_READ(pp_reg);
+ if (!(val & PANEL_POWER_ON) ||
+ ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
+ locked = false;
+
+ if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
+ panel_pipe = PIPE_B;
+
+ WARN(panel_pipe == pipe && locked,
+ "panel assertion failure, pipe %c regs locked\n",
+ pipe_name(pipe));
+}
+
+static void assert_pipe(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ int reg;
+ u32 val;
+ bool cur_state;
+
+ reg = PIPECONF(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & PIPECONF_ENABLE);
+ WARN(cur_state != state,
+ "pipe %c assertion failure (expected %s, current %s)\n",
+ pipe_name(pipe), state_string(state), state_string(cur_state));
+}
+#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
+#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
+
+static void assert_plane_enabled(struct drm_i915_private *dev_priv,
+ enum plane plane)
+{
+ int reg;
+ u32 val;
+
+ reg = DSPCNTR(plane);
+ val = I915_READ(reg);
+ WARN(!(val & DISPLAY_PLANE_ENABLE),
+ "plane %c assertion failure, should be active but is disabled\n",
+ plane_name(plane));
+}
+
+static void assert_planes_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg, i;
+ u32 val;
+ int cur_pipe;
+
+ /* Planes are fixed to pipes on ILK+ */
+ if (HAS_PCH_SPLIT(dev_priv->dev))
+ return;
+
+ /* Need to check both planes against the pipe */
+ for (i = 0; i < 2; i++) {
+ reg = DSPCNTR(i);
+ val = I915_READ(reg);
+ cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
+ DISPPLANE_SEL_PIPE_SHIFT;
+ WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
+ "plane %c assertion failure, should be off on pipe %c but is still active\n",
+ plane_name(i), pipe_name(pipe));
+ }
+}
+
+static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+ bool enabled;
+
+ val = I915_READ(PCH_DREF_CONTROL);
+ enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
+ DREF_SUPERSPREAD_SOURCE_MASK));
+ WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
+}
+
+static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+ bool enabled;
+
+ reg = TRANSCONF(pipe);
+ val = I915_READ(reg);
+ enabled = !!(val & TRANS_ENABLE);
+ WARN(enabled,
+ "transcoder assertion failed, should be off on pipe %c but is still active\n",
+ pipe_name(pipe));
+}
+
+static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int reg)
+{
+ u32 val = I915_READ(reg);
+ WARN(DP_PIPE_ENABLED(val, pipe),
+ "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
+ reg, pipe_name(pipe));
+}
+
+static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int reg)
+{
+ u32 val = I915_READ(reg);
+ WARN(HDMI_PIPE_ENABLED(val, pipe),
+ "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
+ reg, pipe_name(pipe));
+}
+
+static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B);
+ assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C);
+ assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D);
+
+ reg = PCH_ADPA;
+ val = I915_READ(reg);
+ WARN(ADPA_PIPE_ENABLED(val, pipe),
+ "PCH VGA enabled on transcoder %c, should be disabled\n",
+ pipe_name(pipe));
+
+ reg = PCH_LVDS;
+ val = I915_READ(reg);
+ WARN(LVDS_PIPE_ENABLED(val, pipe),
+ "PCH LVDS enabled on transcoder %c, should be disabled\n",
+ pipe_name(pipe));
+
+ assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
+ assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
+ assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
+}
+
+/**
+ * intel_enable_pll - enable a PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to enable
+ *
+ * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to
+ * make sure the PLL reg is writable first though, since the panel write
+ * protect mechanism may be enabled.
+ *
+ * Note! This is for pre-ILK only.
+ */
+static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ /* No really, not for ILK+ */
+ BUG_ON(dev_priv->info->gen >= 5);
+
+ /* PLL is protected by panel, make sure we can write it */
+ if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
+ assert_panel_unlocked(dev_priv, pipe);
+
+ reg = DPLL(pipe);
+ val = I915_READ(reg);
+ val |= DPLL_VCO_ENABLE;
+
+ /* We do this three times for luck */
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ udelay(150); /* wait for warmup */
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ udelay(150); /* wait for warmup */
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ udelay(150); /* wait for warmup */
+}
+
+/**
+ * intel_disable_pll - disable a PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to disable
+ *
+ * Disable the PLL for @pipe, making sure the pipe is off first.
+ *
+ * Note! This is for pre-ILK only.
+ */
+static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ /* Don't disable pipe A or pipe A PLLs if needed */
+ if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
+ return;
+
+ /* Make sure the pipe isn't still relying on us */
+ assert_pipe_disabled(dev_priv, pipe);
+
+ reg = DPLL(pipe);
+ val = I915_READ(reg);
+ val &= ~DPLL_VCO_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+}
+
+/**
+ * intel_enable_pch_pll - enable PCH PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to enable
+ *
+ * The PCH PLL needs to be enabled before the PCH transcoder, since it
+ * drives the transcoder clock.
+ */
+static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ /* PCH only available on ILK+ */
+ BUG_ON(dev_priv->info->gen < 5);
+
+ /* PCH refclock must be enabled first */
+ assert_pch_refclk_enabled(dev_priv);
+
+ reg = PCH_DPLL(pipe);
+ val = I915_READ(reg);
+ val |= DPLL_VCO_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ udelay(200);
+}
+
+static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ /* PCH only available on ILK+ */
+ BUG_ON(dev_priv->info->gen < 5);
+
+ /* Make sure transcoder isn't still depending on us */
+ assert_transcoder_disabled(dev_priv, pipe);
+
+ reg = PCH_DPLL(pipe);
+ val = I915_READ(reg);
+ val &= ~DPLL_VCO_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ udelay(200);
+}
+
+static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ /* PCH only available on ILK+ */
+ BUG_ON(dev_priv->info->gen < 5);
+
+ /* Make sure PCH DPLL is enabled */
+ assert_pch_pll_enabled(dev_priv, pipe);
+
+ /* FDI must be feeding us bits for PCH ports */
+ assert_fdi_tx_enabled(dev_priv, pipe);
+ assert_fdi_rx_enabled(dev_priv, pipe);
+
+ reg = TRANSCONF(pipe);
+ val = I915_READ(reg);
+ /*
+ * make the BPC in transcoder be consistent with
+ * that in pipeconf reg.
+ */
+ val &= ~PIPE_BPC_MASK;
+ val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
+ I915_WRITE(reg, val | TRANS_ENABLE);
+ if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
+ DRM_ERROR("failed to enable transcoder %d\n", pipe);
+}
+
+static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ /* FDI relies on the transcoder */
+ assert_fdi_tx_disabled(dev_priv, pipe);
+ assert_fdi_rx_disabled(dev_priv, pipe);
+
+ /* Ports must be off as well */
+ assert_pch_ports_disabled(dev_priv, pipe);
+
+ reg = TRANSCONF(pipe);
+ val = I915_READ(reg);
+ val &= ~TRANS_ENABLE;
+ I915_WRITE(reg, val);
+ /* wait for PCH transcoder off, transcoder state */
+ if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
+ DRM_ERROR("failed to disable transcoder\n");
+}
+
+/**
+ * intel_enable_pipe - enable a pipe, asserting requirements
+ * @dev_priv: i915 private structure
+ * @pipe: pipe to enable
+ * @pch_port: on ILK+, is this pipe driving a PCH port or not
+ *
+ * Enable @pipe, making sure that various hardware specific requirements
+ * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
+ *
+ * @pipe should be %PIPE_A or %PIPE_B.
+ *
+ * Will wait until the pipe is actually running (i.e. first vblank) before
+ * returning.
+ */
+static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
+ bool pch_port)
+{
+ int reg;
+ u32 val;
+
+ /*
+ * A pipe without a PLL won't actually be able to drive bits from
+ * a plane. On ILK+ the pipe PLLs are integrated, so we don't
+ * need the check.
+ */
+ if (!HAS_PCH_SPLIT(dev_priv->dev))
+ assert_pll_enabled(dev_priv, pipe);
+ else {
+ if (pch_port) {
+ /* if driving the PCH, we need FDI enabled */
+ assert_fdi_rx_pll_enabled(dev_priv, pipe);
+ assert_fdi_tx_pll_enabled(dev_priv, pipe);
+ }
+ /* FIXME: assert CPU port conditions for SNB+ */
+ }
+
+ reg = PIPECONF(pipe);
+ val = I915_READ(reg);
+ if (val & PIPECONF_ENABLE)
+ return;
+
+ I915_WRITE(reg, val | PIPECONF_ENABLE);
+ intel_wait_for_vblank(dev_priv->dev, pipe);
+}
+
+/**
+ * intel_disable_pipe - disable a pipe, asserting requirements
+ * @dev_priv: i915 private structure
+ * @pipe: pipe to disable
+ *
+ * Disable @pipe, making sure that various hardware specific requirements
+ * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
+ *
+ * @pipe should be %PIPE_A or %PIPE_B.
+ *
+ * Will wait until the pipe has shut down before returning.
+ */
+static void intel_disable_pipe(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ /*
+ * Make sure planes won't keep trying to pump pixels to us,
+ * or we might hang the display.
+ */
+ assert_planes_disabled(dev_priv, pipe);
+
+ /* Don't disable pipe A or pipe A PLLs if needed */
+ if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
+ return;
+
+ reg = PIPECONF(pipe);
+ val = I915_READ(reg);
+ if ((val & PIPECONF_ENABLE) == 0)
+ return;
+
+ I915_WRITE(reg, val & ~PIPECONF_ENABLE);
+ intel_wait_for_pipe_off(dev_priv->dev, pipe);
+}
+
+/**
+ * intel_enable_plane - enable a display plane on a given pipe
+ * @dev_priv: i915 private structure
+ * @plane: plane to enable
+ * @pipe: pipe being fed
+ *
+ * Enable @plane on @pipe, making sure that @pipe is running first.
+ */
+static void intel_enable_plane(struct drm_i915_private *dev_priv,
+ enum plane plane, enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ /* If the pipe isn't enabled, we can't pump pixels and may hang */
+ assert_pipe_enabled(dev_priv, pipe);
+
+ reg = DSPCNTR(plane);
+ val = I915_READ(reg);
+ if (val & DISPLAY_PLANE_ENABLE)
+ return;
+
+ I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
+ intel_wait_for_vblank(dev_priv->dev, pipe);
+}
+
+/*
+ * Plane regs are double buffered, going from enabled->disabled needs a
+ * trigger in order to latch. The display address reg provides this.
+ */
+static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
+ enum plane plane)
+{
+ u32 reg = DSPADDR(plane);
+ I915_WRITE(reg, I915_READ(reg));
+}
+
+/**
+ * intel_disable_plane - disable a display plane
+ * @dev_priv: i915 private structure
+ * @plane: plane to disable
+ * @pipe: pipe consuming the data
+ *
+ * Disable @plane; should be an independent operation.
+ */
+static void intel_disable_plane(struct drm_i915_private *dev_priv,
+ enum plane plane, enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ reg = DSPCNTR(plane);
+ val = I915_READ(reg);
+ if ((val & DISPLAY_PLANE_ENABLE) == 0)
+ return;
+
+ I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
+ intel_flush_display_plane(dev_priv, plane);
+ intel_wait_for_vblank(dev_priv->dev, pipe);
+}
+
+static void disable_pch_dp(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int reg)
+{
+ u32 val = I915_READ(reg);
+ if (DP_PIPE_ENABLED(val, pipe))
+ I915_WRITE(reg, val & ~DP_PORT_EN);
+}
+
+static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int reg)
+{
+ u32 val = I915_READ(reg);
+ if (HDMI_PIPE_ENABLED(val, pipe))
+ I915_WRITE(reg, val & ~PORT_ENABLE);
+}
+
+/* Disable any ports connected to this transcoder */
+static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ u32 reg, val;
+
+ val = I915_READ(PCH_PP_CONTROL);
+ I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
+
+ disable_pch_dp(dev_priv, pipe, PCH_DP_B);
+ disable_pch_dp(dev_priv, pipe, PCH_DP_C);
+ disable_pch_dp(dev_priv, pipe, PCH_DP_D);
+
+ reg = PCH_ADPA;
+ val = I915_READ(reg);
+ if (ADPA_PIPE_ENABLED(val, pipe))
+ I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
+
+ reg = PCH_LVDS;
+ val = I915_READ(reg);
+ if (LVDS_PIPE_ENABLED(val, pipe)) {
+ I915_WRITE(reg, val & ~LVDS_PORT_EN);
+ POSTING_READ(reg);
+ udelay(100);
+ }
+
+ disable_pch_hdmi(dev_priv, pipe, HDMIB);
+ disable_pch_hdmi(dev_priv, pipe, HDMIC);
+ disable_pch_hdmi(dev_priv, pipe, HDMID);
+}
+
static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
{
struct drm_device *dev = crtc->dev;
@@ -1163,7 +1773,6 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
return;
I915_WRITE(DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
- POSTING_READ(DPFC_CONTROL);
intel_wait_for_vblank(dev, intel_crtc->pipe);
}
@@ -1219,7 +1828,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev)
u32 blt_ecoskpd;
/* Make sure blitter notifies FBC of writes */
- __gen6_force_wake_get(dev_priv);
+ __gen6_gt_force_wake_get(dev_priv);
blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
GEN6_BLITTER_LOCK_SHIFT;
@@ -1230,7 +1839,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev)
GEN6_BLITTER_LOCK_SHIFT);
I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
POSTING_READ(GEN6_BLITTER_ECOSKPD);
- __gen6_force_wake_put(dev_priv);
+ __gen6_gt_force_wake_put(dev_priv);
}
static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
@@ -1255,7 +1864,6 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
return;
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
- POSTING_READ(ILK_DPFC_CONTROL);
intel_wait_for_vblank(dev, intel_crtc->pipe);
}
@@ -1390,7 +1998,7 @@ static void intel_update_fbc(struct drm_device *dev)
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
- if (tmp_crtc->enabled) {
+ if (tmp_crtc->enabled && tmp_crtc->fb) {
if (crtc) {
DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
@@ -1461,6 +2069,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 alignment;
int ret;
@@ -1485,9 +2094,10 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
BUG();
}
+ dev_priv->mm.interruptible = false;
ret = i915_gem_object_pin(obj, alignment, true);
if (ret)
- return ret;
+ goto err_interruptible;
ret = i915_gem_object_set_to_display_plane(obj, pipelined);
if (ret)
@@ -1499,15 +2109,18 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
* a fence as the cost is not that onerous.
*/
if (obj->tiling_mode != I915_TILING_NONE) {
- ret = i915_gem_object_get_fence(obj, pipelined, false);
+ ret = i915_gem_object_get_fence(obj, pipelined);
if (ret)
goto err_unpin;
}
+ dev_priv->mm.interruptible = true;
return 0;
err_unpin:
i915_gem_object_unpin(obj);
+err_interruptible:
+ dev_priv->mm.interruptible = true;
return ret;
}
@@ -1630,19 +2243,19 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
wait_event(dev_priv->pending_flip_queue,
+ atomic_read(&dev_priv->mm.wedged) ||
atomic_read(&obj->pending_flip) == 0);
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
* current scanout is retired before unpinning the old
* framebuffer.
+ *
+ * This should only fail upon a hung GPU, in which case we
+ * can safely continue.
*/
- ret = i915_gem_object_flush_gpu(obj, false);
- if (ret) {
- i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ ret = i915_gem_object_flush_gpu(obj);
+ (void) ret;
}
ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
@@ -1753,8 +2366,13 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
u32 reg, temp, tries;
+ /* FDI needs bits from pipe & plane first */
+ assert_pipe_enabled(dev_priv, pipe);
+ assert_plane_enabled(dev_priv, plane);
+
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
reg = FDI_RX_IMR(pipe);
@@ -1784,7 +2402,11 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
udelay(150);
/* Ironlake workaround, enable clock pointer after FDI enable*/
- I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_ENABLE);
+ if (HAS_PCH_IBX(dev)) {
+ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
+ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
+ FDI_RX_PHASE_SYNC_POINTER_EN);
+ }
reg = FDI_RX_IIR(pipe);
for (tries = 0; tries < 5; tries++) {
@@ -1834,7 +2456,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
}
-static const int const snb_b_fdi_train_param [] = {
+static const int snb_b_fdi_train_param [] = {
FDI_LINK_TRAIN_400MV_0DB_SNB_B,
FDI_LINK_TRAIN_400MV_6DB_SNB_B,
FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
@@ -2003,12 +2625,60 @@ static void ironlake_fdi_enable(struct drm_crtc *crtc)
}
}
-static void intel_flush_display_plane(struct drm_device *dev,
- int plane)
+static void ironlake_fdi_disable(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 reg = DSPADDR(plane);
- I915_WRITE(reg, I915_READ(reg));
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp;
+
+ /* disable CPU FDI tx and PCH FDI rx */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
+ POSTING_READ(reg);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(0x7 << 16);
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
+
+ POSTING_READ(reg);
+ udelay(100);
+
+ /* Ironlake workaround, disable clock pointer after downing FDI */
+ if (HAS_PCH_IBX(dev)) {
+ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
+ I915_WRITE(FDI_RX_CHICKEN(pipe),
+ I915_READ(FDI_RX_CHICKEN(pipe) &
+ ~FDI_RX_PHASE_SYNC_POINTER_EN));
+ }
+
+ /* still set train pattern 1 */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ }
+ /* BPC in FDI rx is consistent with that in PIPECONF */
+ temp &= ~(0x07 << 16);
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ udelay(100);
}
/*
@@ -2045,60 +2715,46 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
atomic_read(&obj->pending_flip) == 0);
}
-static void ironlake_crtc_enable(struct drm_crtc *crtc)
+static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
- u32 reg, temp;
-
- if (intel_crtc->active)
- return;
-
- intel_crtc->active = true;
- intel_update_watermarks(dev);
-
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- temp = I915_READ(PCH_LVDS);
- if ((temp & LVDS_PORT_EN) == 0)
- I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
- }
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *encoder;
- ironlake_fdi_enable(crtc);
+ /*
+ * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
+ * must be driven by its own crtc; no sharing is possible.
+ */
+ list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+ if (encoder->base.crtc != crtc)
+ continue;
- /* Enable panel fitting for LVDS */
- if (dev_priv->pch_pf_size &&
- (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
- /* Force use of hard-coded filter coefficients
- * as some pre-programmed values are broken,
- * e.g. x201.
- */
- I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1,
- PF_ENABLE | PF_FILTER_MED_3x3);
- I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS,
- dev_priv->pch_pf_pos);
- I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ,
- dev_priv->pch_pf_size);
+ switch (encoder->type) {
+ case INTEL_OUTPUT_EDP:
+ if (!intel_encoder_is_pch_edp(&encoder->base))
+ return false;
+ continue;
+ }
}
- /* Enable CPU pipe */
- reg = PIPECONF(pipe);
- temp = I915_READ(reg);
- if ((temp & PIPECONF_ENABLE) == 0) {
- I915_WRITE(reg, temp | PIPECONF_ENABLE);
- POSTING_READ(reg);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
- }
+ return true;
+}
- /* configure and enable CPU plane */
- reg = DSPCNTR(plane);
- temp = I915_READ(reg);
- if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE);
- intel_flush_display_plane(dev, plane);
- }
+/*
+ * Enable PCH resources required for PCH ports:
+ * - PCH PLLs
+ * - FDI training & RX/TX
+ * - update transcoder timings
+ * - DP transcoding bits
+ * - transcoder
+ */
+static void ironlake_pch_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp;
/* For PCH output, training FDI link */
if (IS_GEN6(dev))
@@ -2106,14 +2762,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
else
ironlake_fdi_link_train(crtc);
- /* enable PCH DPLL */
- reg = PCH_DPLL(pipe);
- temp = I915_READ(reg);
- if ((temp & DPLL_VCO_ENABLE) == 0) {
- I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
- POSTING_READ(reg);
- udelay(200);
- }
+ intel_enable_pch_pll(dev_priv, pipe);
if (HAS_PCH_CPT(dev)) {
/* Be sure PCH DPLL SEL is set */
@@ -2125,7 +2774,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
I915_WRITE(PCH_DPLL_SEL, temp);
}
- /* set transcoder timing */
+ /* set transcoder timing, panel must allow it */
+ assert_panel_unlocked(dev_priv, pipe);
I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe)));
@@ -2172,18 +2822,55 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
I915_WRITE(reg, temp);
}
- /* enable PCH transcoder */
- reg = TRANSCONF(pipe);
- temp = I915_READ(reg);
- /*
- * make the BPC in transcoder be consistent with
- * that in pipeconf reg.
- */
- temp &= ~PIPE_BPC_MASK;
- temp |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
- I915_WRITE(reg, temp | TRANS_ENABLE);
- if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
- DRM_ERROR("failed to enable transcoder %d\n", pipe);
+ intel_enable_transcoder(dev_priv, pipe);
+}
+
+static void ironlake_crtc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ u32 temp;
+ bool is_pch_port;
+
+ if (intel_crtc->active)
+ return;
+
+ intel_crtc->active = true;
+ intel_update_watermarks(dev);
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ temp = I915_READ(PCH_LVDS);
+ if ((temp & LVDS_PORT_EN) == 0)
+ I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
+ }
+
+ is_pch_port = intel_crtc_driving_pch(crtc);
+
+ if (is_pch_port)
+ ironlake_fdi_enable(crtc);
+ else
+ ironlake_fdi_disable(crtc);
+
+ /* Enable panel fitting for LVDS */
+ if (dev_priv->pch_pf_size &&
+ (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
+ /* Force use of hard-coded filter coefficients
+ * as some pre-programmed values are broken,
+ * e.g. x201.
+ */
+ I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
+ I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
+ I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
+ }
+
+ intel_enable_pipe(dev_priv, pipe, is_pch_port);
+ intel_enable_plane(dev_priv, plane, pipe);
+
+ if (is_pch_port)
+ ironlake_pch_enable(crtc);
intel_crtc_load_lut(crtc);
intel_update_fbc(dev);
@@ -2206,116 +2893,58 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
drm_vblank_off(dev, pipe);
intel_crtc_update_cursor(crtc, false);
- /* Disable display plane */
- reg = DSPCNTR(plane);
- temp = I915_READ(reg);
- if (temp & DISPLAY_PLANE_ENABLE) {
- I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE);
- intel_flush_display_plane(dev, plane);
- }
+ intel_disable_plane(dev_priv, plane, pipe);
if (dev_priv->cfb_plane == plane &&
dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
- /* disable cpu pipe, disable after all planes disabled */
- reg = PIPECONF(pipe);
- temp = I915_READ(reg);
- if (temp & PIPECONF_ENABLE) {
- I915_WRITE(reg, temp & ~PIPECONF_ENABLE);
- POSTING_READ(reg);
- /* wait for cpu pipe off, pipe state */
- intel_wait_for_pipe_off(dev, intel_crtc->pipe);
- }
+ intel_disable_pipe(dev_priv, pipe);
/* Disable PF */
- I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0);
- I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0);
-
- /* disable CPU FDI tx and PCH FDI rx */
- reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
- I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
- POSTING_READ(reg);
-
- reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
- temp &= ~(0x7 << 16);
- temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
- I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
-
- POSTING_READ(reg);
- udelay(100);
-
- /* Ironlake workaround, disable clock pointer after downing FDI */
- if (HAS_PCH_IBX(dev))
- I915_WRITE(FDI_RX_CHICKEN(pipe),
- I915_READ(FDI_RX_CHICKEN(pipe) &
- ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
+ I915_WRITE(PF_CTL(pipe), 0);
+ I915_WRITE(PF_WIN_SZ(pipe), 0);
- /* still set train pattern 1 */
- reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
- if (HAS_PCH_CPT(dev)) {
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
- } else {
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- }
- /* BPC in FDI rx is consistent with that in PIPECONF */
- temp &= ~(0x07 << 16);
- temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
- I915_WRITE(reg, temp);
-
- POSTING_READ(reg);
- udelay(100);
+ ironlake_fdi_disable(crtc);
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- temp = I915_READ(PCH_LVDS);
- if (temp & LVDS_PORT_EN) {
- I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN);
- POSTING_READ(PCH_LVDS);
- udelay(100);
- }
- }
+ /* This is a horrible layering violation; we should be doing this in
+ * the connector/encoder ->prepare instead, but we don't always have
+ * enough information there about the config to know whether it will
+ * actually be necessary or just cause undesired flicker.
+ */
+ intel_disable_pch_ports(dev_priv, pipe);
- /* disable PCH transcoder */
- reg = TRANSCONF(plane);
- temp = I915_READ(reg);
- if (temp & TRANS_ENABLE) {
- I915_WRITE(reg, temp & ~TRANS_ENABLE);
- /* wait for PCH transcoder off, transcoder state */
- if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
- DRM_ERROR("failed to disable transcoder\n");
- }
+ intel_disable_transcoder(dev_priv, pipe);
if (HAS_PCH_CPT(dev)) {
/* disable TRANS_DP_CTL */
reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
+ temp |= TRANS_DP_PORT_SEL_NONE;
I915_WRITE(reg, temp);
/* disable DPLL_SEL */
temp = I915_READ(PCH_DPLL_SEL);
- if (pipe == 0)
- temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
- else
+ switch (pipe) {
+ case 0:
+ temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
+ break;
+ case 1:
temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+ break;
+ case 2:
+ /* FIXME: manage transcoder PLLs? */
+ temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
+ break;
+ default:
+ BUG(); /* wtf */
+ }
I915_WRITE(PCH_DPLL_SEL, temp);
}
/* disable PCH DPLL */
- reg = PCH_DPLL(pipe);
- temp = I915_READ(reg);
- I915_WRITE(reg, temp & ~DPLL_VCO_ENABLE);
+ intel_disable_pch_pll(dev_priv, pipe);
/* Switch from PCDclk to Rawclk */
reg = FDI_RX_CTL(pipe);
@@ -2372,9 +3001,12 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
{
if (!enable && intel_crtc->overlay) {
struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
mutex_lock(&dev->struct_mutex);
- (void) intel_overlay_switch_off(intel_crtc->overlay, false);
+ dev_priv->mm.interruptible = false;
+ (void) intel_overlay_switch_off(intel_crtc->overlay);
+ dev_priv->mm.interruptible = true;
mutex_unlock(&dev->struct_mutex);
}
@@ -2390,7 +3022,6 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
- u32 reg, temp;
if (intel_crtc->active)
return;
@@ -2398,42 +3029,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
intel_crtc->active = true;
intel_update_watermarks(dev);
- /* Enable the DPLL */
- reg = DPLL(pipe);
- temp = I915_READ(reg);
- if ((temp & DPLL_VCO_ENABLE) == 0) {
- I915_WRITE(reg, temp);
-
- /* Wait for the clocks to stabilize. */
- POSTING_READ(reg);
- udelay(150);
-
- I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
-
- /* Wait for the clocks to stabilize. */
- POSTING_READ(reg);
- udelay(150);
-
- I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
-
- /* Wait for the clocks to stabilize. */
- POSTING_READ(reg);
- udelay(150);
- }
-
- /* Enable the pipe */
- reg = PIPECONF(pipe);
- temp = I915_READ(reg);
- if ((temp & PIPECONF_ENABLE) == 0)
- I915_WRITE(reg, temp | PIPECONF_ENABLE);
-
- /* Enable the plane */
- reg = DSPCNTR(plane);
- temp = I915_READ(reg);
- if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE);
- intel_flush_display_plane(dev, plane);
- }
+ intel_enable_pll(dev_priv, pipe);
+ intel_enable_pipe(dev_priv, pipe, false);
+ intel_enable_plane(dev_priv, plane, pipe);
intel_crtc_load_lut(crtc);
intel_update_fbc(dev);
@@ -2450,7 +3048,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
- u32 reg, temp;
if (!intel_crtc->active)
return;
@@ -2465,45 +3062,10 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
- /* Disable display plane */
- reg = DSPCNTR(plane);
- temp = I915_READ(reg);
- if (temp & DISPLAY_PLANE_ENABLE) {
- I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- intel_flush_display_plane(dev, plane);
-
- /* Wait for vblank for the disable to take effect */
- if (IS_GEN2(dev))
- intel_wait_for_vblank(dev, pipe);
- }
-
- /* Don't disable pipe A or pipe A PLLs if needed */
- if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
- goto done;
-
- /* Next, disable display pipes */
- reg = PIPECONF(pipe);
- temp = I915_READ(reg);
- if (temp & PIPECONF_ENABLE) {
- I915_WRITE(reg, temp & ~PIPECONF_ENABLE);
-
- /* Wait for the pipe to turn off */
- POSTING_READ(reg);
- intel_wait_for_pipe_off(dev, pipe);
- }
-
- reg = DPLL(pipe);
- temp = I915_READ(reg);
- if (temp & DPLL_VCO_ENABLE) {
- I915_WRITE(reg, temp & ~DPLL_VCO_ENABLE);
-
- /* Wait for the clocks to turn off. */
- POSTING_READ(reg);
- udelay(150);
- }
+ intel_disable_plane(dev_priv, plane, pipe);
+ intel_disable_pipe(dev_priv, pipe);
+ intel_disable_pll(dev_priv, pipe);
-done:
intel_crtc->active = false;
intel_update_fbc(dev);
intel_update_watermarks(dev);
@@ -2565,7 +3127,7 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
break;
default:
- DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
+ DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
break;
}
}
@@ -2762,77 +3324,77 @@ struct intel_watermark_params {
};
/* Pineview has different values for various configs */
-static struct intel_watermark_params pineview_display_wm = {
+static const struct intel_watermark_params pineview_display_wm = {
PINEVIEW_DISPLAY_FIFO,
PINEVIEW_MAX_WM,
PINEVIEW_DFT_WM,
PINEVIEW_GUARD_WM,
PINEVIEW_FIFO_LINE_SIZE
};
-static struct intel_watermark_params pineview_display_hplloff_wm = {
+static const struct intel_watermark_params pineview_display_hplloff_wm = {
PINEVIEW_DISPLAY_FIFO,
PINEVIEW_MAX_WM,
PINEVIEW_DFT_HPLLOFF_WM,
PINEVIEW_GUARD_WM,
PINEVIEW_FIFO_LINE_SIZE
};
-static struct intel_watermark_params pineview_cursor_wm = {
+static const struct intel_watermark_params pineview_cursor_wm = {
PINEVIEW_CURSOR_FIFO,
PINEVIEW_CURSOR_MAX_WM,
PINEVIEW_CURSOR_DFT_WM,
PINEVIEW_CURSOR_GUARD_WM,
PINEVIEW_FIFO_LINE_SIZE,
};
-static struct intel_watermark_params pineview_cursor_hplloff_wm = {
+static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
PINEVIEW_CURSOR_FIFO,
PINEVIEW_CURSOR_MAX_WM,
PINEVIEW_CURSOR_DFT_WM,
PINEVIEW_CURSOR_GUARD_WM,
PINEVIEW_FIFO_LINE_SIZE
};
-static struct intel_watermark_params g4x_wm_info = {
+static const struct intel_watermark_params g4x_wm_info = {
G4X_FIFO_SIZE,
G4X_MAX_WM,
G4X_MAX_WM,
2,
G4X_FIFO_LINE_SIZE,
};
-static struct intel_watermark_params g4x_cursor_wm_info = {
+static const struct intel_watermark_params g4x_cursor_wm_info = {
I965_CURSOR_FIFO,
I965_CURSOR_MAX_WM,
I965_CURSOR_DFT_WM,
2,
G4X_FIFO_LINE_SIZE,
};
-static struct intel_watermark_params i965_cursor_wm_info = {
+static const struct intel_watermark_params i965_cursor_wm_info = {
I965_CURSOR_FIFO,
I965_CURSOR_MAX_WM,
I965_CURSOR_DFT_WM,
2,
I915_FIFO_LINE_SIZE,
};
-static struct intel_watermark_params i945_wm_info = {
+static const struct intel_watermark_params i945_wm_info = {
I945_FIFO_SIZE,
I915_MAX_WM,
1,
2,
I915_FIFO_LINE_SIZE
};
-static struct intel_watermark_params i915_wm_info = {
+static const struct intel_watermark_params i915_wm_info = {
I915_FIFO_SIZE,
I915_MAX_WM,
1,
2,
I915_FIFO_LINE_SIZE
};
-static struct intel_watermark_params i855_wm_info = {
+static const struct intel_watermark_params i855_wm_info = {
I855GM_FIFO_SIZE,
I915_MAX_WM,
1,
2,
I830_FIFO_LINE_SIZE
};
-static struct intel_watermark_params i830_wm_info = {
+static const struct intel_watermark_params i830_wm_info = {
I830_FIFO_SIZE,
I915_MAX_WM,
1,
@@ -2840,31 +3402,28 @@ static struct intel_watermark_params i830_wm_info = {
I830_FIFO_LINE_SIZE
};
-static struct intel_watermark_params ironlake_display_wm_info = {
+static const struct intel_watermark_params ironlake_display_wm_info = {
ILK_DISPLAY_FIFO,
ILK_DISPLAY_MAXWM,
ILK_DISPLAY_DFTWM,
2,
ILK_FIFO_LINE_SIZE
};
-
-static struct intel_watermark_params ironlake_cursor_wm_info = {
+static const struct intel_watermark_params ironlake_cursor_wm_info = {
ILK_CURSOR_FIFO,
ILK_CURSOR_MAXWM,
ILK_CURSOR_DFTWM,
2,
ILK_FIFO_LINE_SIZE
};
-
-static struct intel_watermark_params ironlake_display_srwm_info = {
+static const struct intel_watermark_params ironlake_display_srwm_info = {
ILK_DISPLAY_SR_FIFO,
ILK_DISPLAY_MAX_SRWM,
ILK_DISPLAY_DFT_SRWM,
2,
ILK_FIFO_LINE_SIZE
};
-
-static struct intel_watermark_params ironlake_cursor_srwm_info = {
+static const struct intel_watermark_params ironlake_cursor_srwm_info = {
ILK_CURSOR_SR_FIFO,
ILK_CURSOR_MAX_SRWM,
ILK_CURSOR_DFT_SRWM,
@@ -2872,31 +3431,28 @@ static struct intel_watermark_params ironlake_cursor_srwm_info = {
ILK_FIFO_LINE_SIZE
};
-static struct intel_watermark_params sandybridge_display_wm_info = {
+static const struct intel_watermark_params sandybridge_display_wm_info = {
SNB_DISPLAY_FIFO,
SNB_DISPLAY_MAXWM,
SNB_DISPLAY_DFTWM,
2,
SNB_FIFO_LINE_SIZE
};
-
-static struct intel_watermark_params sandybridge_cursor_wm_info = {
+static const struct intel_watermark_params sandybridge_cursor_wm_info = {
SNB_CURSOR_FIFO,
SNB_CURSOR_MAXWM,
SNB_CURSOR_DFTWM,
2,
SNB_FIFO_LINE_SIZE
};
-
-static struct intel_watermark_params sandybridge_display_srwm_info = {
+static const struct intel_watermark_params sandybridge_display_srwm_info = {
SNB_DISPLAY_SR_FIFO,
SNB_DISPLAY_MAX_SRWM,
SNB_DISPLAY_DFT_SRWM,
2,
SNB_FIFO_LINE_SIZE
};
-
-static struct intel_watermark_params sandybridge_cursor_srwm_info = {
+static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
SNB_CURSOR_SR_FIFO,
SNB_CURSOR_MAX_SRWM,
SNB_CURSOR_DFT_SRWM,
@@ -2924,7 +3480,8 @@ static struct intel_watermark_params sandybridge_cursor_srwm_info = {
* will occur, and a display engine hang could result.
*/
static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
- struct intel_watermark_params *wm,
+ const struct intel_watermark_params *wm,
+ int fifo_size,
int pixel_size,
unsigned long latency_ns)
{
@@ -2942,7 +3499,7 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required);
- wm_size = wm->fifo_size - (entries_required + wm->guard_size);
+ wm_size = fifo_size - (entries_required + wm->guard_size);
DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
@@ -3115,15 +3672,28 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
return size;
}
-static void pineview_update_wm(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int unused,
- int pixel_size)
+static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
+{
+ struct drm_crtc *crtc, *enabled = NULL;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->enabled && crtc->fb) {
+ if (enabled)
+ return NULL;
+ enabled = crtc;
+ }
+ }
+
+ return enabled;
+}
+
+static void pineview_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
const struct cxsr_latency *latency;
u32 reg;
unsigned long wm;
- int sr_clock;
latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
dev_priv->fsb_freq, dev_priv->mem_freq);
@@ -3133,11 +3703,14 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
return;
}
- if (!planea_clock || !planeb_clock) {
- sr_clock = planea_clock ? planea_clock : planeb_clock;
+ crtc = single_enabled_crtc(dev);
+ if (crtc) {
+ int clock = crtc->mode.clock;
+ int pixel_size = crtc->fb->bits_per_pixel / 8;
/* Display SR */
- wm = intel_calculate_wm(sr_clock, &pineview_display_wm,
+ wm = intel_calculate_wm(clock, &pineview_display_wm,
+ pineview_display_wm.fifo_size,
pixel_size, latency->display_sr);
reg = I915_READ(DSPFW1);
reg &= ~DSPFW_SR_MASK;
@@ -3146,7 +3719,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
/* cursor SR */
- wm = intel_calculate_wm(sr_clock, &pineview_cursor_wm,
+ wm = intel_calculate_wm(clock, &pineview_cursor_wm,
+ pineview_display_wm.fifo_size,
pixel_size, latency->cursor_sr);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_CURSOR_SR_MASK;
@@ -3154,7 +3728,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
I915_WRITE(DSPFW3, reg);
/* Display HPLL off SR */
- wm = intel_calculate_wm(sr_clock, &pineview_display_hplloff_wm,
+ wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
+ pineview_display_hplloff_wm.fifo_size,
pixel_size, latency->display_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_HPLL_SR_MASK;
@@ -3162,7 +3737,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
I915_WRITE(DSPFW3, reg);
/* cursor HPLL off SR */
- wm = intel_calculate_wm(sr_clock, &pineview_cursor_hplloff_wm,
+ wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
+ pineview_display_hplloff_wm.fifo_size,
pixel_size, latency->cursor_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_HPLL_CURSOR_MASK;
@@ -3180,125 +3756,229 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
}
}
-static void g4x_update_wm(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int sr_htotal,
- int pixel_size)
+static bool g4x_compute_wm0(struct drm_device *dev,
+ int plane,
+ const struct intel_watermark_params *display,
+ int display_latency_ns,
+ const struct intel_watermark_params *cursor,
+ int cursor_latency_ns,
+ int *plane_wm,
+ int *cursor_wm)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int total_size, cacheline_size;
- int planea_wm, planeb_wm, cursora_wm, cursorb_wm, cursor_sr;
- struct intel_watermark_params planea_params, planeb_params;
- unsigned long line_time_us;
- int sr_clock, sr_entries = 0, entries_required;
+ struct drm_crtc *crtc;
+ int htotal, hdisplay, clock, pixel_size;
+ int line_time_us, line_count;
+ int entries, tlb_miss;
- /* Create copies of the base settings for each pipe */
- planea_params = planeb_params = g4x_wm_info;
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ if (crtc->fb == NULL || !crtc->enabled) {
+ *cursor_wm = cursor->guard_size;
+ *plane_wm = display->guard_size;
+ return false;
+ }
- /* Grab a couple of global values before we overwrite them */
- total_size = planea_params.fifo_size;
- cacheline_size = planea_params.cacheline_size;
+ htotal = crtc->mode.htotal;
+ hdisplay = crtc->mode.hdisplay;
+ clock = crtc->mode.clock;
+ pixel_size = crtc->fb->bits_per_pixel / 8;
- /*
- * Note: we need to make sure we don't overflow for various clock &
- * latency values.
- * clocks go from a few thousand to several hundred thousand.
- * latency is usually a few thousand
- */
- entries_required = ((planea_clock / 1000) * pixel_size * latency_ns) /
- 1000;
- entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE);
- planea_wm = entries_required + planea_params.guard_size;
+ /* Use the small buffer method to calculate plane watermark */
+ entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
+ tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
+ if (tlb_miss > 0)
+ entries += tlb_miss;
+ entries = DIV_ROUND_UP(entries, display->cacheline_size);
+ *plane_wm = entries + display->guard_size;
+ if (*plane_wm > (int)display->max_wm)
+ *plane_wm = display->max_wm;
- entries_required = ((planeb_clock / 1000) * pixel_size * latency_ns) /
- 1000;
- entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE);
- planeb_wm = entries_required + planeb_params.guard_size;
+ /* Use the large buffer method to calculate cursor watermark */
+ line_time_us = ((htotal * 1000) / clock);
+ line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
+ entries = line_count * 64 * pixel_size;
+ tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
+ if (tlb_miss > 0)
+ entries += tlb_miss;
+ entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+ *cursor_wm = entries + cursor->guard_size;
+ if (*cursor_wm > (int)cursor->max_wm)
+ *cursor_wm = (int)cursor->max_wm;
- cursora_wm = cursorb_wm = 16;
- cursor_sr = 32;
+ return true;
+}
- DRM_DEBUG("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
+/*
+ * Check the wm result.
+ *
+ * If any calculated watermark values is larger than the maximum value that
+ * can be programmed into the associated watermark register, that watermark
+ * must be disabled.
+ */
+static bool g4x_check_srwm(struct drm_device *dev,
+ int display_wm, int cursor_wm,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor)
+{
+ DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
+ display_wm, cursor_wm);
- /* Calc sr entries for one plane configs */
- if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
- /* self-refresh has much higher latency */
- static const int sr_latency_ns = 12000;
+ if (display_wm > display->max_wm) {
+ DRM_DEBUG_KMS("display watermark is too large(%d), disabling\n",
+ display_wm, display->max_wm);
+ return false;
+ }
- sr_clock = planea_clock ? planea_clock : planeb_clock;
- line_time_us = ((sr_htotal * 1000) / sr_clock);
+ if (cursor_wm > cursor->max_wm) {
+ DRM_DEBUG_KMS("cursor watermark is too large(%d), disabling\n",
+ cursor_wm, cursor->max_wm);
+ return false;
+ }
- /* Use ns/us then divide to preserve precision */
- sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * sr_hdisplay;
- sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
-
- entries_required = (((sr_latency_ns / line_time_us) +
- 1000) / 1000) * pixel_size * 64;
- entries_required = DIV_ROUND_UP(entries_required,
- g4x_cursor_wm_info.cacheline_size);
- cursor_sr = entries_required + g4x_cursor_wm_info.guard_size;
-
- if (cursor_sr > g4x_cursor_wm_info.max_wm)
- cursor_sr = g4x_cursor_wm_info.max_wm;
- DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
- "cursor %d\n", sr_entries, cursor_sr);
+ if (!(display_wm || cursor_wm)) {
+ DRM_DEBUG_KMS("SR latency is 0, disabling\n");
+ return false;
+ }
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
- } else {
- /* Turn off self refresh if both pipes are enabled */
- I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
- & ~FW_BLC_SELF_EN);
+ return true;
+}
+
+static bool g4x_compute_srwm(struct drm_device *dev,
+ int plane,
+ int latency_ns,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor,
+ int *display_wm, int *cursor_wm)
+{
+ struct drm_crtc *crtc;
+ int hdisplay, htotal, pixel_size, clock;
+ unsigned long line_time_us;
+ int line_count, line_size;
+ int small, large;
+ int entries;
+
+ if (!latency_ns) {
+ *display_wm = *cursor_wm = 0;
+ return false;
}
- DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
- planea_wm, planeb_wm, sr_entries);
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ hdisplay = crtc->mode.hdisplay;
+ htotal = crtc->mode.htotal;
+ clock = crtc->mode.clock;
+ pixel_size = crtc->fb->bits_per_pixel / 8;
- planea_wm &= 0x3f;
- planeb_wm &= 0x3f;
+ line_time_us = (htotal * 1000) / clock;
+ line_count = (latency_ns / line_time_us + 1000) / 1000;
+ line_size = hdisplay * pixel_size;
+
+ /* Use the minimum of the small and large buffer method for primary */
+ small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+ large = line_count * line_size;
+
+ entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
+ *display_wm = entries + display->guard_size;
- I915_WRITE(DSPFW1, (sr_entries << DSPFW_SR_SHIFT) |
+ /* calculate the self-refresh watermark for display cursor */
+ entries = line_count * pixel_size * 64;
+ entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+ *cursor_wm = entries + cursor->guard_size;
+
+ return g4x_check_srwm(dev,
+ *display_wm, *cursor_wm,
+ display, cursor);
+}
+
+#define single_plane_enabled(mask) is_power_of_2(mask)
+
+static void g4x_update_wm(struct drm_device *dev)
+{
+ static const int sr_latency_ns = 12000;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
+ int plane_sr, cursor_sr;
+ unsigned int enabled = 0;
+
+ if (g4x_compute_wm0(dev, 0,
+ &g4x_wm_info, latency_ns,
+ &g4x_cursor_wm_info, latency_ns,
+ &planea_wm, &cursora_wm))
+ enabled |= 1;
+
+ if (g4x_compute_wm0(dev, 1,
+ &g4x_wm_info, latency_ns,
+ &g4x_cursor_wm_info, latency_ns,
+ &planeb_wm, &cursorb_wm))
+ enabled |= 2;
+
+ plane_sr = cursor_sr = 0;
+ if (single_plane_enabled(enabled) &&
+ g4x_compute_srwm(dev, ffs(enabled) - 1,
+ sr_latency_ns,
+ &g4x_wm_info,
+ &g4x_cursor_wm_info,
+ &plane_sr, &cursor_sr))
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ else
+ I915_WRITE(FW_BLC_SELF,
+ I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
+ planea_wm, cursora_wm,
+ planeb_wm, cursorb_wm,
+ plane_sr, cursor_sr);
+
+ I915_WRITE(DSPFW1,
+ (plane_sr << DSPFW_SR_SHIFT) |
(cursorb_wm << DSPFW_CURSORB_SHIFT) |
- (planeb_wm << DSPFW_PLANEB_SHIFT) | planea_wm);
- I915_WRITE(DSPFW2, (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
+ (planeb_wm << DSPFW_PLANEB_SHIFT) |
+ planea_wm);
+ I915_WRITE(DSPFW2,
+ (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
(cursora_wm << DSPFW_CURSORA_SHIFT));
/* HPLL off in SR has some issues on G4x... disable it */
- I915_WRITE(DSPFW3, (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
+ I915_WRITE(DSPFW3,
+ (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
-static void i965_update_wm(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int sr_htotal,
- int pixel_size)
+static void i965_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long line_time_us;
- int sr_clock, sr_entries, srwm = 1;
+ struct drm_crtc *crtc;
+ int srwm = 1;
int cursor_sr = 16;
/* Calc sr entries for one plane configs */
- if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
+ crtc = single_enabled_crtc(dev);
+ if (crtc) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 12000;
+ int clock = crtc->mode.clock;
+ int htotal = crtc->mode.htotal;
+ int hdisplay = crtc->mode.hdisplay;
+ int pixel_size = crtc->fb->bits_per_pixel / 8;
+ unsigned long line_time_us;
+ int entries;
- sr_clock = planea_clock ? planea_clock : planeb_clock;
- line_time_us = ((sr_htotal * 1000) / sr_clock);
+ line_time_us = ((htotal * 1000) / clock);
/* Use ns/us then divide to preserve precision */
- sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * sr_hdisplay;
- sr_entries = DIV_ROUND_UP(sr_entries, I915_FIFO_LINE_SIZE);
- DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
- srwm = I965_FIFO_SIZE - sr_entries;
+ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * hdisplay;
+ entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
+ srwm = I965_FIFO_SIZE - entries;
if (srwm < 0)
srwm = 1;
srwm &= 0x1ff;
+ DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
+ entries, srwm);
- sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
pixel_size * 64;
- sr_entries = DIV_ROUND_UP(sr_entries,
+ entries = DIV_ROUND_UP(entries,
i965_cursor_wm_info.cacheline_size);
cursor_sr = i965_cursor_wm_info.fifo_size -
- (sr_entries + i965_cursor_wm_info.guard_size);
+ (entries + i965_cursor_wm_info.guard_size);
if (cursor_sr > i965_cursor_wm_info.max_wm)
cursor_sr = i965_cursor_wm_info.max_wm;
@@ -3319,46 +3999,56 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
srwm);
/* 965 has limitations... */
- I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) |
- (8 << 0));
+ I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
+ (8 << 16) | (8 << 8) | (8 << 0));
I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
/* update cursor SR watermark */
I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
-static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int sr_htotal,
- int pixel_size)
+static void i9xx_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct intel_watermark_params *wm_info;
uint32_t fwater_lo;
uint32_t fwater_hi;
- int total_size, cacheline_size, cwm, srwm = 1;
+ int cwm, srwm = 1;
+ int fifo_size;
int planea_wm, planeb_wm;
- struct intel_watermark_params planea_params, planeb_params;
- unsigned long line_time_us;
- int sr_clock, sr_entries = 0;
+ struct drm_crtc *crtc, *enabled = NULL;
- /* Create copies of the base settings for each pipe */
- if (IS_CRESTLINE(dev) || IS_I945GM(dev))
- planea_params = planeb_params = i945_wm_info;
+ if (IS_I945GM(dev))
+ wm_info = &i945_wm_info;
else if (!IS_GEN2(dev))
- planea_params = planeb_params = i915_wm_info;
+ wm_info = &i915_wm_info;
else
- planea_params = planeb_params = i855_wm_info;
-
- /* Grab a couple of global values before we overwrite them */
- total_size = planea_params.fifo_size;
- cacheline_size = planea_params.cacheline_size;
-
- /* Update per-plane FIFO sizes */
- planea_params.fifo_size = dev_priv->display.get_fifo_size(dev, 0);
- planeb_params.fifo_size = dev_priv->display.get_fifo_size(dev, 1);
+ wm_info = &i855_wm_info;
+
+ fifo_size = dev_priv->display.get_fifo_size(dev, 0);
+ crtc = intel_get_crtc_for_plane(dev, 0);
+ if (crtc->enabled && crtc->fb) {
+ planea_wm = intel_calculate_wm(crtc->mode.clock,
+ wm_info, fifo_size,
+ crtc->fb->bits_per_pixel / 8,
+ latency_ns);
+ enabled = crtc;
+ } else
+ planea_wm = fifo_size - wm_info->guard_size;
+
+ fifo_size = dev_priv->display.get_fifo_size(dev, 1);
+ crtc = intel_get_crtc_for_plane(dev, 1);
+ if (crtc->enabled && crtc->fb) {
+ planeb_wm = intel_calculate_wm(crtc->mode.clock,
+ wm_info, fifo_size,
+ crtc->fb->bits_per_pixel / 8,
+ latency_ns);
+ if (enabled == NULL)
+ enabled = crtc;
+ else
+ enabled = NULL;
+ } else
+ planeb_wm = fifo_size - wm_info->guard_size;
- planea_wm = intel_calculate_wm(planea_clock, &planea_params,
- pixel_size, latency_ns);
- planeb_wm = intel_calculate_wm(planeb_clock, &planeb_params,
- pixel_size, latency_ns);
DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
/*
@@ -3366,39 +4056,39 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
*/
cwm = 2;
+ /* Play safe and disable self-refresh before adjusting watermarks. */
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
+ else if (IS_I915GM(dev))
+ I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
+
/* Calc sr entries for one plane configs */
- if (HAS_FW_BLC(dev) && sr_hdisplay &&
- (!planea_clock || !planeb_clock)) {
+ if (HAS_FW_BLC(dev) && enabled) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 6000;
+ int clock = enabled->mode.clock;
+ int htotal = enabled->mode.htotal;
+ int hdisplay = enabled->mode.hdisplay;
+ int pixel_size = enabled->fb->bits_per_pixel / 8;
+ unsigned long line_time_us;
+ int entries;
- sr_clock = planea_clock ? planea_clock : planeb_clock;
- line_time_us = ((sr_htotal * 1000) / sr_clock);
+ line_time_us = (htotal * 1000) / clock;
/* Use ns/us then divide to preserve precision */
- sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * sr_hdisplay;
- sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
- DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries);
- srwm = total_size - sr_entries;
+ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * hdisplay;
+ entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
+ DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
+ srwm = wm_info->fifo_size - entries;
if (srwm < 0)
srwm = 1;
if (IS_I945G(dev) || IS_I945GM(dev))
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
- else if (IS_I915GM(dev)) {
- /* 915M has a smaller SRWM field */
+ I915_WRITE(FW_BLC_SELF,
+ FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
+ else if (IS_I915GM(dev))
I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
- I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
- }
- } else {
- /* Turn off self refresh if both pipes are enabled */
- if (IS_I945G(dev) || IS_I945GM(dev)) {
- I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
- & ~FW_BLC_SELF_EN);
- } else if (IS_I915GM(dev)) {
- I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
- }
}
DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
@@ -3413,19 +4103,36 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
I915_WRITE(FW_BLC, fwater_lo);
I915_WRITE(FW_BLC2, fwater_hi);
+
+ if (HAS_FW_BLC(dev)) {
+ if (enabled) {
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ I915_WRITE(FW_BLC_SELF,
+ FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
+ else if (IS_I915GM(dev))
+ I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
+ DRM_DEBUG_KMS("memory self refresh enabled\n");
+ } else
+ DRM_DEBUG_KMS("memory self refresh disabled\n");
+ }
}
-static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
- int unused2, int unused3, int pixel_size)
+static void i830_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff;
+ struct drm_crtc *crtc;
+ uint32_t fwater_lo;
int planea_wm;
- i830_wm_info.fifo_size = dev_priv->display.get_fifo_size(dev, 0);
+ crtc = single_enabled_crtc(dev);
+ if (crtc == NULL)
+ return;
- planea_wm = intel_calculate_wm(planea_clock, &i830_wm_info,
- pixel_size, latency_ns);
+ planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
+ dev_priv->display.get_fifo_size(dev, 0),
+ crtc->fb->bits_per_pixel / 8,
+ latency_ns);
+ fwater_lo = I915_READ(FW_BLC) & ~0xfff;
fwater_lo |= (3<<8) | planea_wm;
DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
@@ -3534,15 +4241,15 @@ static bool ironlake_check_srwm(struct drm_device *dev, int level,
/*
* Compute watermark values of WM[1-3],
*/
-static bool ironlake_compute_srwm(struct drm_device *dev, int level,
- int hdisplay, int htotal,
- int pixel_size, int clock, int latency_ns,
+static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
+ int latency_ns,
const struct intel_watermark_params *display,
const struct intel_watermark_params *cursor,
int *fbc_wm, int *display_wm, int *cursor_wm)
{
-
+ struct drm_crtc *crtc;
unsigned long line_time_us;
+ int hdisplay, htotal, pixel_size, clock;
int line_count, line_size;
int small, large;
int entries;
@@ -3552,6 +4259,12 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level,
return false;
}
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ hdisplay = crtc->mode.hdisplay;
+ htotal = crtc->mode.htotal;
+ clock = crtc->mode.clock;
+ pixel_size = crtc->fb->bits_per_pixel / 8;
+
line_time_us = (htotal * 1000) / clock;
line_count = (latency_ns / line_time_us + 1000) / 1000;
line_size = hdisplay * pixel_size;
@@ -3579,14 +4292,11 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level,
display, cursor);
}
-static void ironlake_update_wm(struct drm_device *dev,
- int planea_clock, int planeb_clock,
- int hdisplay, int htotal,
- int pixel_size)
+static void ironlake_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int fbc_wm, plane_wm, cursor_wm, enabled;
- int clock;
+ int fbc_wm, plane_wm, cursor_wm;
+ unsigned int enabled;
enabled = 0;
if (ironlake_compute_wm0(dev, 0,
@@ -3600,7 +4310,7 @@ static void ironlake_update_wm(struct drm_device *dev,
DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
" plane %d, " "cursor: %d\n",
plane_wm, cursor_wm);
- enabled++;
+ enabled |= 1;
}
if (ironlake_compute_wm0(dev, 1,
@@ -3614,7 +4324,7 @@ static void ironlake_update_wm(struct drm_device *dev,
DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
" plane %d, cursor: %d\n",
plane_wm, cursor_wm);
- enabled++;
+ enabled |= 2;
}
/*
@@ -3625,14 +4335,13 @@ static void ironlake_update_wm(struct drm_device *dev,
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
- if (enabled != 1)
+ if (!single_plane_enabled(enabled))
return;
-
- clock = planea_clock ? planea_clock : planeb_clock;
+ enabled = ffs(enabled) - 1;
/* WM1 */
- if (!ironlake_compute_srwm(dev, 1, hdisplay, htotal, pixel_size,
- clock, ILK_READ_WM1_LATENCY() * 500,
+ if (!ironlake_compute_srwm(dev, 1, enabled,
+ ILK_READ_WM1_LATENCY() * 500,
&ironlake_display_srwm_info,
&ironlake_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
@@ -3646,8 +4355,8 @@ static void ironlake_update_wm(struct drm_device *dev,
cursor_wm);
/* WM2 */
- if (!ironlake_compute_srwm(dev, 2, hdisplay, htotal, pixel_size,
- clock, ILK_READ_WM2_LATENCY() * 500,
+ if (!ironlake_compute_srwm(dev, 2, enabled,
+ ILK_READ_WM2_LATENCY() * 500,
&ironlake_display_srwm_info,
&ironlake_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
@@ -3666,15 +4375,12 @@ static void ironlake_update_wm(struct drm_device *dev,
*/
}
-static void sandybridge_update_wm(struct drm_device *dev,
- int planea_clock, int planeb_clock,
- int hdisplay, int htotal,
- int pixel_size)
+static void sandybridge_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
- int fbc_wm, plane_wm, cursor_wm, enabled;
- int clock;
+ int fbc_wm, plane_wm, cursor_wm;
+ unsigned int enabled;
enabled = 0;
if (ironlake_compute_wm0(dev, 0,
@@ -3686,7 +4392,7 @@ static void sandybridge_update_wm(struct drm_device *dev,
DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
" plane %d, " "cursor: %d\n",
plane_wm, cursor_wm);
- enabled++;
+ enabled |= 1;
}
if (ironlake_compute_wm0(dev, 1,
@@ -3698,7 +4404,7 @@ static void sandybridge_update_wm(struct drm_device *dev,
DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
" plane %d, cursor: %d\n",
plane_wm, cursor_wm);
- enabled++;
+ enabled |= 2;
}
/*
@@ -3715,14 +4421,13 @@ static void sandybridge_update_wm(struct drm_device *dev,
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
- if (enabled != 1)
+ if (!single_plane_enabled(enabled))
return;
-
- clock = planea_clock ? planea_clock : planeb_clock;
+ enabled = ffs(enabled) - 1;
/* WM1 */
- if (!ironlake_compute_srwm(dev, 1, hdisplay, htotal, pixel_size,
- clock, SNB_READ_WM1_LATENCY() * 500,
+ if (!ironlake_compute_srwm(dev, 1, enabled,
+ SNB_READ_WM1_LATENCY() * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
@@ -3736,9 +4441,8 @@ static void sandybridge_update_wm(struct drm_device *dev,
cursor_wm);
/* WM2 */
- if (!ironlake_compute_srwm(dev, 2,
- hdisplay, htotal, pixel_size,
- clock, SNB_READ_WM2_LATENCY() * 500,
+ if (!ironlake_compute_srwm(dev, 2, enabled,
+ SNB_READ_WM2_LATENCY() * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
@@ -3752,9 +4456,8 @@ static void sandybridge_update_wm(struct drm_device *dev,
cursor_wm);
/* WM3 */
- if (!ironlake_compute_srwm(dev, 3,
- hdisplay, htotal, pixel_size,
- clock, SNB_READ_WM3_LATENCY() * 500,
+ if (!ironlake_compute_srwm(dev, 3, enabled,
+ SNB_READ_WM3_LATENCY() * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
@@ -3803,44 +4506,9 @@ static void sandybridge_update_wm(struct drm_device *dev,
static void intel_update_watermarks(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc;
- int sr_hdisplay = 0;
- unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0;
- int enabled = 0, pixel_size = 0;
- int sr_htotal = 0;
-
- if (!dev_priv->display.update_wm)
- return;
-
- /* Get the clock config from both planes */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- if (intel_crtc->active) {
- enabled++;
- if (intel_crtc->plane == 0) {
- DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n",
- intel_crtc->pipe, crtc->mode.clock);
- planea_clock = crtc->mode.clock;
- } else {
- DRM_DEBUG_KMS("plane B (pipe %d) clock: %d\n",
- intel_crtc->pipe, crtc->mode.clock);
- planeb_clock = crtc->mode.clock;
- }
- sr_hdisplay = crtc->mode.hdisplay;
- sr_clock = crtc->mode.clock;
- sr_htotal = crtc->mode.htotal;
- if (crtc->fb)
- pixel_size = crtc->fb->bits_per_pixel / 8;
- else
- pixel_size = 4; /* by default */
- }
- }
- if (enabled <= 0)
- return;
-
- dev_priv->display.update_wm(dev, planea_clock, planeb_clock,
- sr_hdisplay, sr_htotal, pixel_size);
+ if (dev_priv->display.update_wm)
+ dev_priv->display.update_wm(dev);
}
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
@@ -3872,6 +4540,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
int ret;
struct fdi_m_n m_n = {0};
u32 reg, temp;
+ u32 lvds_sync = 0;
int target_clock;
drm_vblank_pre_modeset(dev, pipe);
@@ -4243,9 +4912,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
pipeconf &= ~PIPECONF_DOUBLE_WIDE;
}
- dspcntr |= DISPLAY_PLANE_ENABLE;
- pipeconf |= PIPECONF_ENABLE;
- dpll |= DPLL_VCO_ENABLE;
+ if (!HAS_PCH_SPLIT(dev))
+ dpll |= DPLL_VCO_ENABLE;
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
@@ -4271,10 +4939,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* enable transcoder DPLL */
if (HAS_PCH_CPT(dev)) {
temp = I915_READ(PCH_DPLL_SEL);
- if (pipe == 0)
+ switch (pipe) {
+ case 0:
temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL;
- else
+ break;
+ case 1:
temp |= TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL;
+ break;
+ case 2:
+ /* FIXME: manage transcoder PLLs? */
+ temp |= TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL;
+ break;
+ default:
+ BUG();
+ }
I915_WRITE(PCH_DPLL_SEL, temp);
POSTING_READ(PCH_DPLL_SEL);
@@ -4324,6 +5002,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
else
temp &= ~LVDS_ENABLE_DITHER;
}
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ lvds_sync |= LVDS_HSYNC_POLARITY;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ lvds_sync |= LVDS_VSYNC_POLARITY;
+ if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
+ != lvds_sync) {
+ char flags[2] = "-+";
+ DRM_INFO("Changing LVDS panel from "
+ "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
+ flags[!(temp & LVDS_HSYNC_POLARITY)],
+ flags[!(temp & LVDS_VSYNC_POLARITY)],
+ flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
+ flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
+ temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
+ temp |= lvds_sync;
+ }
I915_WRITE(reg, temp);
}
@@ -4341,17 +5035,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
intel_dp_set_m_n(crtc, mode, adjusted_mode);
} else if (HAS_PCH_SPLIT(dev)) {
/* For non-DP output, clear any trans DP clock recovery setting.*/
- if (pipe == 0) {
- I915_WRITE(TRANSA_DATA_M1, 0);
- I915_WRITE(TRANSA_DATA_N1, 0);
- I915_WRITE(TRANSA_DP_LINK_M1, 0);
- I915_WRITE(TRANSA_DP_LINK_N1, 0);
- } else {
- I915_WRITE(TRANSB_DATA_M1, 0);
- I915_WRITE(TRANSB_DATA_N1, 0);
- I915_WRITE(TRANSB_DP_LINK_M1, 0);
- I915_WRITE(TRANSB_DP_LINK_N1, 0);
- }
+ I915_WRITE(TRANSDATA_M1(pipe), 0);
+ I915_WRITE(TRANSDATA_N1(pipe), 0);
+ I915_WRITE(TRANSDPLINK_M1(pipe), 0);
+ I915_WRITE(TRANSDPLINK_N1(pipe), 0);
}
if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
@@ -4454,6 +5141,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(PIPECONF(pipe), pipeconf);
POSTING_READ(PIPECONF(pipe));
+ if (!HAS_PCH_SPLIT(dev))
+ intel_enable_pipe(dev_priv, pipe, false);
intel_wait_for_vblank(dev, pipe);
@@ -4464,6 +5153,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
I915_WRITE(DSPCNTR(plane), dspcntr);
+ POSTING_READ(DSPCNTR(plane));
+ if (!HAS_PCH_SPLIT(dev))
+ intel_enable_plane(dev_priv, plane, pipe);
ret = intel_pipe_set_base(crtc, x, y, old_fb);
@@ -4480,7 +5172,7 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int palreg = (intel_crtc->pipe == 0) ? PALETTE_A : PALETTE_B;
+ int palreg = PALETTE(intel_crtc->pipe);
int i;
/* The clocks have to be on to load the palette. */
@@ -4489,8 +5181,7 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
/* use legacy palette for Ironlake */
if (HAS_PCH_SPLIT(dev))
- palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A :
- LGC_PALETTE_B;
+ palreg = LGC_PALETTE(intel_crtc->pipe);
for (i = 0; i < 256; i++) {
I915_WRITE(palreg + 4 * i,
@@ -4511,12 +5202,12 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
if (intel_crtc->cursor_visible == visible)
return;
- cntl = I915_READ(CURACNTR);
+ cntl = I915_READ(_CURACNTR);
if (visible) {
/* On these chipsets we can only modify the base whilst
* the cursor is disabled.
*/
- I915_WRITE(CURABASE, base);
+ I915_WRITE(_CURABASE, base);
cntl &= ~(CURSOR_FORMAT_MASK);
/* XXX width must be 64, stride 256 => 0x00 << 28 */
@@ -4525,7 +5216,7 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
CURSOR_FORMAT_ARGB;
} else
cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
- I915_WRITE(CURACNTR, cntl);
+ I915_WRITE(_CURACNTR, cntl);
intel_crtc->cursor_visible = visible;
}
@@ -4539,7 +5230,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
bool visible = base != 0;
if (intel_crtc->cursor_visible != visible) {
- uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR);
+ uint32_t cntl = I915_READ(CURCNTR(pipe));
if (base) {
cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
@@ -4548,12 +5239,12 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
cntl |= CURSOR_MODE_DISABLE;
}
- I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl);
+ I915_WRITE(CURCNTR(pipe), cntl);
intel_crtc->cursor_visible = visible;
}
/* and commit changes on next vblank */
- I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base);
+ I915_WRITE(CURBASE(pipe), base);
}
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
@@ -4603,7 +5294,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
if (!visible && !intel_crtc->cursor_visible)
return;
- I915_WRITE(pipe == 0 ? CURAPOS : CURBPOS, pos);
+ I915_WRITE(CURPOS(pipe), pos);
if (IS_845G(dev) || IS_I865G(dev))
i845_update_cursor(crtc, base);
else
@@ -4643,7 +5334,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
}
obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
- if (!obj)
+ if (&obj->base == NULL)
return -ENOENT;
if (obj->base.size < width * height * 4) {
@@ -4909,14 +5600,14 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- u32 dpll = I915_READ((pipe == 0) ? DPLL_A : DPLL_B);
+ u32 dpll = I915_READ(DPLL(pipe));
u32 fp;
intel_clock_t clock;
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
- fp = I915_READ((pipe == 0) ? FPA0 : FPB0);
+ fp = FP0(pipe);
else
- fp = I915_READ((pipe == 0) ? FPA1 : FPB1);
+ fp = FP1(pipe);
clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
if (IS_PINEVIEW(dev)) {
@@ -4998,10 +5689,10 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
struct drm_display_mode *mode;
- int htot = I915_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
- int hsync = I915_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
- int vtot = I915_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
- int vsync = I915_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
+ int htot = I915_READ(HTOTAL(pipe));
+ int hsync = I915_READ(HSYNC(pipe));
+ int vtot = I915_READ(VTOTAL(pipe));
+ int vsync = I915_READ(VSYNC(pipe));
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
if (!mode)
@@ -5088,7 +5779,6 @@ static void intel_increase_pllclock(struct drm_crtc *crtc)
dpll &= ~DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
- POSTING_READ(dpll_reg);
intel_wait_for_vblank(dev, pipe);
dpll = I915_READ(dpll_reg);
@@ -5110,7 +5800,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+ int dpll_reg = DPLL(pipe);
int dpll = I915_READ(dpll_reg);
if (HAS_PCH_SPLIT(dev))
@@ -5132,7 +5822,6 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
dpll |= DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
- dpll = I915_READ(dpll_reg);
intel_wait_for_vblank(dev, pipe);
dpll = I915_READ(dpll_reg);
if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
@@ -5158,7 +5847,6 @@ static void intel_idle_update(struct work_struct *work)
struct drm_device *dev = dev_priv->dev;
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
- int enabled = 0;
if (!i915_powersave)
return;
@@ -5172,16 +5860,11 @@ static void intel_idle_update(struct work_struct *work)
if (!crtc->fb)
continue;
- enabled++;
intel_crtc = to_intel_crtc(crtc);
if (!intel_crtc->busy)
intel_decrease_pllclock(crtc);
}
- if ((enabled == 1) && (IS_I945G(dev) || IS_I945GM(dev))) {
- DRM_DEBUG_DRIVER("enable memory self refresh on 945\n");
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
- }
mutex_unlock(&dev->struct_mutex);
}
@@ -5206,17 +5889,9 @@ void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
- if (!dev_priv->busy) {
- if (IS_I945G(dev) || IS_I945GM(dev)) {
- u32 fw_blc_self;
-
- DRM_DEBUG_DRIVER("disable memory self refresh on 945\n");
- fw_blc_self = I915_READ(FW_BLC_SELF);
- fw_blc_self &= ~FW_BLC_SELF_EN;
- I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
- }
+ if (!dev_priv->busy)
dev_priv->busy = true;
- } else
+ else
mod_timer(&dev_priv->idle_timer, jiffies +
msecs_to_jiffies(GPU_IDLE_TIMEOUT));
@@ -5228,14 +5903,6 @@ void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
intel_fb = to_intel_framebuffer(crtc->fb);
if (intel_fb->obj == obj) {
if (!intel_crtc->busy) {
- if (IS_I945G(dev) || IS_I945GM(dev)) {
- u32 fw_blc_self;
-
- DRM_DEBUG_DRIVER("disable memory self refresh on 945\n");
- fw_blc_self = I915_READ(FW_BLC_SELF);
- fw_blc_self &= ~FW_BLC_SELF_EN;
- I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
- }
/* Non-busy -> busy, upclock */
intel_increase_pllclock(crtc);
intel_crtc->busy = true;
@@ -5513,7 +6180,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
* pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
*/
pf = 0;
- pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
+ pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff;
OUT_RING(pf | pipesrc);
break;
@@ -5523,8 +6190,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
OUT_RING(fb->pitch | obj->tiling_mode);
OUT_RING(obj->gtt_offset);
- pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
- pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
+ pf = I915_READ(PF_CTL(pipe)) & PF_ENABLE;
+ pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff;
OUT_RING(pf | pipesrc);
break;
}
@@ -5551,36 +6218,6 @@ cleanup_work:
return ret;
}
-static void intel_crtc_reset(struct drm_crtc *crtc)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- /* Reset flags back to the 'unknown' status so that they
- * will be correctly set on the initial modeset.
- */
- intel_crtc->dpms_mode = -1;
-}
-
-static struct drm_crtc_helper_funcs intel_helper_funcs = {
- .dpms = intel_crtc_dpms,
- .mode_fixup = intel_crtc_mode_fixup,
- .mode_set = intel_crtc_mode_set,
- .mode_set_base = intel_pipe_set_base,
- .mode_set_base_atomic = intel_pipe_set_base_atomic,
- .load_lut = intel_crtc_load_lut,
- .disable = intel_crtc_disable,
-};
-
-static const struct drm_crtc_funcs intel_crtc_funcs = {
- .reset = intel_crtc_reset,
- .cursor_set = intel_crtc_cursor_set,
- .cursor_move = intel_crtc_cursor_move,
- .gamma_set = intel_crtc_gamma_set,
- .set_config = drm_crtc_helper_set_config,
- .destroy = intel_crtc_destroy,
- .page_flip = intel_crtc_page_flip,
-};
-
static void intel_sanitize_modesetting(struct drm_device *dev,
int pipe, int plane)
{
@@ -5613,24 +6250,46 @@ static void intel_sanitize_modesetting(struct drm_device *dev,
pipe = !pipe;
/* Disable the plane and wait for it to stop reading from the pipe. */
- I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
- intel_flush_display_plane(dev, plane);
+ intel_disable_plane(dev_priv, plane, pipe);
+ intel_disable_pipe(dev_priv, pipe);
+}
- if (IS_GEN2(dev))
- intel_wait_for_vblank(dev, pipe);
+static void intel_crtc_reset(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
- return;
+ /* Reset flags back to the 'unknown' status so that they
+ * will be correctly set on the initial modeset.
+ */
+ intel_crtc->dpms_mode = -1;
- /* Switch off the pipe. */
- reg = PIPECONF(pipe);
- val = I915_READ(reg);
- if (val & PIPECONF_ENABLE) {
- I915_WRITE(reg, val & ~PIPECONF_ENABLE);
- intel_wait_for_pipe_off(dev, pipe);
- }
+ /* We need to fix up any BIOS configuration that conflicts with
+ * our expectations.
+ */
+ intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
}
+static struct drm_crtc_helper_funcs intel_helper_funcs = {
+ .dpms = intel_crtc_dpms,
+ .mode_fixup = intel_crtc_mode_fixup,
+ .mode_set = intel_crtc_mode_set,
+ .mode_set_base = intel_pipe_set_base,
+ .mode_set_base_atomic = intel_pipe_set_base_atomic,
+ .load_lut = intel_crtc_load_lut,
+ .disable = intel_crtc_disable,
+};
+
+static const struct drm_crtc_funcs intel_crtc_funcs = {
+ .reset = intel_crtc_reset,
+ .cursor_set = intel_crtc_cursor_set,
+ .cursor_move = intel_crtc_cursor_move,
+ .gamma_set = intel_crtc_gamma_set,
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = intel_crtc_destroy,
+ .page_flip = intel_crtc_page_flip,
+};
+
static void intel_crtc_init(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -5680,8 +6339,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
(unsigned long)intel_crtc);
-
- intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
}
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
@@ -5918,7 +6575,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
int ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle));
- if (!obj)
+ if (&obj->base == NULL)
return ERR_PTR(-ENOENT);
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
@@ -6203,7 +6860,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
* userspace...
*/
I915_WRITE(GEN6_RC_STATE, 0);
- __gen6_force_wake_get(dev_priv);
+ __gen6_gt_force_wake_get(dev_priv);
/* disable the counters and set deterministic thresholds */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -6240,18 +6897,18 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
18 << 24 |
6 << 16);
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 90000);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 100000);
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
I915_WRITE(GEN6_RP_UP_EI, 100000);
- I915_WRITE(GEN6_RP_DOWN_EI, 300000);
+ I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
GEN6_RP_USE_NORMAL_FREQ |
GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_MAX |
- GEN6_RP_DOWN_BUSY_MIN);
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_CONT);
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500))
@@ -6280,7 +6937,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
if (pcu_mbox & (1<<31)) { /* OC supported */
max_freq = pcu_mbox & 0xff;
- DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 100);
+ DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
}
/* In units of 100MHz */
@@ -6301,12 +6958,13 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
/* enable all PM interrupts */
I915_WRITE(GEN6_PMINTRMSK, 0);
- __gen6_force_wake_put(dev_priv);
+ __gen6_gt_force_wake_put(dev_priv);
}
void intel_enable_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
/*
* Disable clock gating reported to work incorrectly according to the
@@ -6416,12 +7074,10 @@ void intel_enable_clock_gating(struct drm_device *dev)
ILK_DPARB_CLK_GATE |
ILK_DPFD_CLK_GATE);
- I915_WRITE(DSPACNTR,
- I915_READ(DSPACNTR) |
- DISPPLANE_TRICKLE_FEED_DISABLE);
- I915_WRITE(DSPBCNTR,
- I915_READ(DSPBCNTR) |
- DISPPLANE_TRICKLE_FEED_DISABLE);
+ for_each_pipe(pipe)
+ I915_WRITE(DSPCNTR(pipe),
+ I915_READ(DSPCNTR(pipe)) |
+ DISPPLANE_TRICKLE_FEED_DISABLE);
}
} else if (IS_G4X(dev)) {
uint32_t dspclk_gate;
@@ -6496,7 +7152,7 @@ static void ironlake_disable_rc6(struct drm_device *dev)
POSTING_READ(RSTDBYCTL);
}
- ironlake_disable_rc6(dev);
+ ironlake_teardown_rc6(dev);
}
static int ironlake_setup_rc6(struct drm_device *dev)
@@ -6776,10 +7432,6 @@ void intel_modeset_init(struct drm_device *dev)
}
dev->mode_config.fb_base = dev->agp->base;
- if (IS_MOBILE(dev) || !IS_GEN2(dev))
- dev_priv->num_pipe = 2;
- else
- dev_priv->num_pipe = 1;
DRM_DEBUG_KMS("%d display pipe%s available.\n",
dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 51cb4e36997f..cb8578b7e443 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -49,6 +49,7 @@ struct intel_dp {
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
bool has_audio;
int force_audio;
+ uint32_t color_range;
int dpms_mode;
uint8_t link_bw;
uint8_t lane_count;
@@ -212,7 +213,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
return MODE_PANEL;
}
- /* only refuse the mode on non eDP since we have seen some wierd eDP panels
+ /* only refuse the mode on non eDP since we have seen some weird eDP panels
which are outside spec tolerances but somehow work by magic */
if (!is_edp(intel_dp) &&
(intel_dp_link_required(connector->dev, intel_dp, mode->clock)
@@ -685,6 +686,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int lane_count = 4, bpp = 24;
struct intel_dp_m_n m_n;
+ int pipe = intel_crtc->pipe;
/*
* Find the lane count in the intel_encoder private
@@ -715,39 +717,19 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
mode->clock, adjusted_mode->clock, &m_n);
if (HAS_PCH_SPLIT(dev)) {
- if (intel_crtc->pipe == 0) {
- I915_WRITE(TRANSA_DATA_M1,
- ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
- m_n.gmch_m);
- I915_WRITE(TRANSA_DATA_N1, m_n.gmch_n);
- I915_WRITE(TRANSA_DP_LINK_M1, m_n.link_m);
- I915_WRITE(TRANSA_DP_LINK_N1, m_n.link_n);
- } else {
- I915_WRITE(TRANSB_DATA_M1,
- ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
- m_n.gmch_m);
- I915_WRITE(TRANSB_DATA_N1, m_n.gmch_n);
- I915_WRITE(TRANSB_DP_LINK_M1, m_n.link_m);
- I915_WRITE(TRANSB_DP_LINK_N1, m_n.link_n);
- }
+ I915_WRITE(TRANSDATA_M1(pipe),
+ ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
+ m_n.gmch_m);
+ I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n);
+ I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m);
+ I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n);
} else {
- if (intel_crtc->pipe == 0) {
- I915_WRITE(PIPEA_GMCH_DATA_M,
- ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
- m_n.gmch_m);
- I915_WRITE(PIPEA_GMCH_DATA_N,
- m_n.gmch_n);
- I915_WRITE(PIPEA_DP_LINK_M, m_n.link_m);
- I915_WRITE(PIPEA_DP_LINK_N, m_n.link_n);
- } else {
- I915_WRITE(PIPEB_GMCH_DATA_M,
- ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
- m_n.gmch_m);
- I915_WRITE(PIPEB_GMCH_DATA_N,
- m_n.gmch_n);
- I915_WRITE(PIPEB_DP_LINK_M, m_n.link_m);
- I915_WRITE(PIPEB_DP_LINK_N, m_n.link_n);
- }
+ I915_WRITE(PIPE_GMCH_DATA_M(pipe),
+ ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
+ m_n.gmch_m);
+ I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
+ I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
+ I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
}
}
@@ -760,8 +742,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- intel_dp->DP = (DP_VOLTAGE_0_4 |
- DP_PRE_EMPHASIS_0);
+ intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
+ intel_dp->DP |= intel_dp->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH;
@@ -813,6 +795,40 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
}
+static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp;
+
+ /*
+ * If the panel wasn't on, make sure there's not a currently
+ * active PP sequence before enabling AUX VDD.
+ */
+ if (!(I915_READ(PCH_PP_STATUS) & PP_ON))
+ msleep(dev_priv->panel_t3);
+
+ pp = I915_READ(PCH_PP_CONTROL);
+ pp |= EDP_FORCE_VDD;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+}
+
+static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp;
+
+ pp = I915_READ(PCH_PP_CONTROL);
+ pp &= ~EDP_FORCE_VDD;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+
+ /* Make sure sequencer is idle before allowing subsequent activity */
+ msleep(dev_priv->panel_t12);
+}
+
/* Returns true if the panel was already on when called */
static bool ironlake_edp_panel_on (struct intel_dp *intel_dp)
{
@@ -834,11 +850,6 @@ static bool ironlake_edp_panel_on (struct intel_dp *intel_dp)
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
- /* Ouch. We need to wait here for some panels, like Dell e6510
- * https://bugs.freedesktop.org/show_bug.cgi?id=29278i
- */
- msleep(300);
-
if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask,
5000))
DRM_ERROR("panel on wait timed out: 0x%08x\n",
@@ -875,11 +886,6 @@ static void ironlake_edp_panel_off (struct drm_device *dev)
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
-
- /* Ouch. We need to wait here for some panels, like Dell e6510
- * https://bugs.freedesktop.org/show_bug.cgi?id=29278i
- */
- msleep(300);
}
static void ironlake_edp_backlight_on (struct drm_device *dev)
@@ -945,7 +951,7 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
if (is_edp(intel_dp)) {
ironlake_edp_backlight_off(dev);
- ironlake_edp_panel_on(intel_dp);
+ ironlake_edp_panel_off(dev);
if (!is_pch_edp(intel_dp))
ironlake_edp_pll_on(encoder);
else
@@ -959,10 +965,15 @@ static void intel_dp_commit(struct drm_encoder *encoder)
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_device *dev = encoder->dev;
+ if (is_edp(intel_dp))
+ ironlake_edp_panel_vdd_on(intel_dp);
+
intel_dp_start_link_train(intel_dp);
- if (is_edp(intel_dp))
+ if (is_edp(intel_dp)) {
ironlake_edp_panel_on(intel_dp);
+ ironlake_edp_panel_vdd_off(intel_dp);
+ }
intel_dp_complete_link_train(intel_dp);
@@ -988,9 +999,13 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
ironlake_edp_pll_off(encoder);
} else {
if (is_edp(intel_dp))
- ironlake_edp_panel_on(intel_dp);
+ ironlake_edp_panel_vdd_on(intel_dp);
if (!(dp_reg & DP_PORT_EN)) {
intel_dp_start_link_train(intel_dp);
+ if (is_edp(intel_dp)) {
+ ironlake_edp_panel_on(intel_dp);
+ ironlake_edp_panel_vdd_off(intel_dp);
+ }
intel_dp_complete_link_train(intel_dp);
}
if (is_edp(intel_dp))
@@ -1508,9 +1523,13 @@ ironlake_dp_detect(struct intel_dp *intel_dp)
{
enum drm_connector_status status;
- /* Can't disconnect eDP */
- if (is_edp(intel_dp))
- return connector_status_connected;
+ /* Can't disconnect eDP, but you can close the lid... */
+ if (is_edp(intel_dp)) {
+ status = intel_panel_detect(intel_dp->base.base.dev);
+ if (status == connector_status_unknown)
+ status = connector_status_connected;
+ return status;
+ }
status = connector_status_disconnected;
if (intel_dp_aux_native_read(intel_dp,
@@ -1662,6 +1681,7 @@ intel_dp_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
struct intel_dp *intel_dp = intel_attached_dp(connector);
int ret;
@@ -1690,6 +1710,14 @@ intel_dp_set_property(struct drm_connector *connector,
goto done;
}
+ if (property == dev_priv->broadcast_rgb_property) {
+ if (val == !!intel_dp->color_range)
+ return 0;
+
+ intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0;
+ goto done;
+ }
+
return -EINVAL;
done:
@@ -1809,6 +1837,8 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
intel_dp->force_audio_property->values[1] = 1;
drm_connector_attach_property(connector, intel_dp->force_audio_property, 0);
}
+
+ intel_attach_broadcast_rgb_property(connector);
}
void
@@ -1826,6 +1856,9 @@ intel_dp_init(struct drm_device *dev, int output_reg)
if (!intel_dp)
return;
+ intel_dp->output_reg = output_reg;
+ intel_dp->dpms_mode = -1;
+
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_dp);
@@ -1865,10 +1898,6 @@ intel_dp_init(struct drm_device *dev, int output_reg)
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
- intel_dp->output_reg = output_reg;
- intel_dp->has_audio = false;
- intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
-
drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
@@ -1906,21 +1935,33 @@ intel_dp_init(struct drm_device *dev, int output_reg)
/* Cache some DPCD data in the eDP case */
if (is_edp(intel_dp)) {
int ret;
- bool was_on;
+ u32 pp_on, pp_div;
- was_on = ironlake_edp_panel_on(intel_dp);
+ pp_on = I915_READ(PCH_PP_ON_DELAYS);
+ pp_div = I915_READ(PCH_PP_DIVISOR);
+
+ /* Get T3 & T12 values (note: VESA not bspec terminology) */
+ dev_priv->panel_t3 = (pp_on & 0x1fff0000) >> 16;
+ dev_priv->panel_t3 /= 10; /* t3 in 100us units */
+ dev_priv->panel_t12 = pp_div & 0xf;
+ dev_priv->panel_t12 *= 100; /* t12 in 100ms units */
+
+ ironlake_edp_panel_vdd_on(intel_dp);
ret = intel_dp_aux_native_read(intel_dp, DP_DPCD_REV,
intel_dp->dpcd,
sizeof(intel_dp->dpcd));
+ ironlake_edp_panel_vdd_off(intel_dp);
if (ret == sizeof(intel_dp->dpcd)) {
if (intel_dp->dpcd[0] >= 0x11)
dev_priv->no_aux_handshake = intel_dp->dpcd[3] &
DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
} else {
- DRM_ERROR("failed to retrieve link info\n");
+ /* if this fails, presume the device is a ghost */
+ DRM_INFO("failed to retrieve link info, disabling eDP\n");
+ intel_dp_encoder_destroy(&intel_dp->base.base);
+ intel_dp_destroy(&intel_connector->base);
+ return;
}
- if (!was_on)
- ironlake_edp_panel_off(dev);
}
intel_encoder->hot_plug = intel_dp_hot_plug;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 2c431049963c..1d20712d527f 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -39,7 +39,7 @@
ret__ = -ETIMEDOUT; \
break; \
} \
- if (W && !in_dbg_master()) msleep(W); \
+ if (W && !(in_atomic() || in_dbg_master())) msleep(W); \
} \
ret__; \
})
@@ -217,6 +217,13 @@ intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
return dev_priv->pipe_to_crtc_mapping[pipe];
}
+static inline struct drm_crtc *
+intel_get_crtc_for_plane(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ return dev_priv->plane_to_crtc_mapping[plane];
+}
+
struct intel_unpin_work {
struct work_struct work;
struct drm_device *dev;
@@ -230,6 +237,8 @@ struct intel_unpin_work {
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
+extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+
extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
@@ -260,6 +269,7 @@ extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
extern void intel_panel_setup_backlight(struct drm_device *dev);
extern void intel_panel_enable_backlight(struct drm_device *dev);
extern void intel_panel_disable_backlight(struct drm_device *dev);
+extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_encoder_prepare (struct drm_encoder *encoder);
@@ -321,12 +331,12 @@ extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
extern void intel_setup_overlay(struct drm_device *dev);
extern void intel_cleanup_overlay(struct drm_device *dev);
-extern int intel_overlay_switch_off(struct intel_overlay *overlay,
- bool interruptible);
+extern int intel_overlay_switch_off(struct intel_overlay *overlay);
extern int intel_overlay_put_image(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int intel_overlay_attrs(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern void intel_fb_output_poll_changed(struct drm_device *dev);
+extern void intel_fb_restore_mode(struct drm_device *dev);
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index ea373283c93b..6eda1b51c636 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -178,7 +178,7 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
int pipe = intel_crtc->pipe;
u32 dvo_val;
u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg;
- int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+ int dpll_reg = DPLL(pipe);
switch (dvo_reg) {
case DVOA:
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 512782728e51..ec49bae73382 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -264,3 +264,13 @@ void intel_fb_output_poll_changed(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
}
+
+void intel_fb_restore_mode(struct drm_device *dev)
+{
+ int ret;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper);
+ if (ret)
+ DRM_DEBUG("failed to restore crtc mode\n");
+}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index c635c9e357b9..f289b8642976 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -41,6 +41,7 @@ struct intel_hdmi {
struct intel_encoder base;
u32 sdvox_reg;
int ddc_bus;
+ uint32_t color_range;
bool has_hdmi_sink;
bool has_audio;
int force_audio;
@@ -124,6 +125,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
u32 sdvox;
sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE;
+ sdvox |= intel_hdmi->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -278,6 +280,7 @@ intel_hdmi_set_property(struct drm_connector *connector,
uint64_t val)
{
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
int ret;
ret = drm_connector_property_set_value(connector, property, val);
@@ -305,6 +308,14 @@ intel_hdmi_set_property(struct drm_connector *connector,
goto done;
}
+ if (property == dev_priv->broadcast_rgb_property) {
+ if (val == !!intel_hdmi->color_range)
+ return 0;
+
+ intel_hdmi->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
+ goto done;
+ }
+
return -EINVAL;
done:
@@ -363,6 +374,8 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_hdmi->force_audio_property->values[1] = 1;
drm_connector_attach_property(connector, intel_hdmi->force_audio_property, 0);
}
+
+ intel_attach_broadcast_rgb_property(connector);
}
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 58040f68ed7a..d3b903bce7c5 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -259,7 +259,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
goto timeout;
if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
- return 0;
+ goto clear_err;
val = I915_READ(GMBUS3 + reg_offset);
do {
@@ -287,7 +287,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
goto timeout;
if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
- return 0;
+ goto clear_err;
val = loop = 0;
do {
@@ -302,14 +302,31 @@ gmbus_xfer(struct i2c_adapter *adapter,
if (i + 1 < num && wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
goto timeout;
if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
- return 0;
+ goto clear_err;
}
- return num;
+ goto done;
+
+clear_err:
+ /* Toggle the Software Clear Interrupt bit. This has the effect
+ * of resetting the GMBUS controller and so clearing the
+ * BUS_ERROR raised by the slave's NAK.
+ */
+ I915_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
+ I915_WRITE(GMBUS1 + reg_offset, 0);
+
+done:
+ /* Mark the GMBUS interface as disabled. We will re-enable it at the
+ * start of the next xfer, till then let it sleep.
+ */
+ I915_WRITE(GMBUS0 + reg_offset, 0);
+ return i;
timeout:
DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
bus->reg0 & 0xff, bus->adapter.name);
+ I915_WRITE(GMBUS0 + reg_offset, 0);
+
/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff);
if (!bus->force_bit)
@@ -384,7 +401,8 @@ int intel_setup_gmbus(struct drm_device *dev)
bus->reg0 = i | GMBUS_RATE_100KHZ;
/* XXX force bit banging until GMBUS is fully debugged */
- bus->force_bit = intel_gpio_create(dev_priv, i);
+ if (IS_GEN2(dev))
+ bus->force_bit = intel_gpio_create(dev_priv, i);
}
intel_i2c_reset(dev_priv->dev);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index bcdba7bd5cfa..a562bd2648c7 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -231,6 +231,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
struct drm_encoder *tmp_encoder;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
+ int pipe;
/* Should never happen!! */
if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) {
@@ -277,8 +278,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
* to register description and PRM.
* Change the value here to see the borders for debugging
*/
- I915_WRITE(BCLRPAT_A, 0);
- I915_WRITE(BCLRPAT_B, 0);
+ for_each_pipe(pipe)
+ I915_WRITE(BCLRPAT(pipe), 0);
switch (intel_lvds->fitting_mode) {
case DRM_MODE_SCALE_CENTER:
@@ -472,15 +473,13 @@ static enum drm_connector_status
intel_lvds_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
- enum drm_connector_status status = connector_status_connected;
+ enum drm_connector_status status;
- /* ACPI lid methods were generally unreliable in this generation, so
- * don't even bother.
- */
- if (IS_GEN2(dev) || IS_GEN3(dev))
- return connector_status_connected;
+ status = intel_panel_detect(dev);
+ if (status != connector_status_unknown)
+ return status;
- return status;
+ return connector_status_connected;
}
/**
@@ -496,7 +495,7 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
return drm_add_edid_modes(connector, intel_lvds->edid);
mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
- if (mode == 0)
+ if (mode == NULL)
return 0;
drm_mode_probed_add(connector, mode);
@@ -830,25 +829,6 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
return false;
}
-static bool intel_lvds_ddc_probe(struct drm_device *dev, u8 pin)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u8 buf = 0;
- struct i2c_msg msgs[] = {
- {
- .addr = 0xA0,
- .flags = 0,
- .len = 1,
- .buf = &buf,
- },
- };
- struct i2c_adapter *i2c = &dev_priv->gmbus[pin].adapter;
- /* XXX this only appears to work when using GMBUS */
- if (intel_gmbus_is_forced_bit(i2c))
- return true;
- return i2c_transfer(i2c, msgs, 1) == 1;
-}
-
/**
* intel_lvds_init - setup LVDS connectors on this device
* @dev: drm device
@@ -889,11 +869,6 @@ bool intel_lvds_init(struct drm_device *dev)
}
}
- if (!intel_lvds_ddc_probe(dev, pin)) {
- DRM_DEBUG_KMS("LVDS did not respond to DDC probe\n");
- return false;
- }
-
intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL);
if (!intel_lvds) {
return false;
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index f70b7cf32bff..9034dd8f33c7 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -80,3 +80,33 @@ int intel_ddc_get_modes(struct drm_connector *connector,
return ret;
}
+
+static const char *broadcast_rgb_names[] = {
+ "Full",
+ "Limited 16:235",
+};
+
+void
+intel_attach_broadcast_rgb_property(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_property *prop;
+ int i;
+
+ prop = dev_priv->broadcast_rgb_property;
+ if (prop == NULL) {
+ prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ "Broadcast RGB",
+ ARRAY_SIZE(broadcast_rgb_names));
+ if (prop == NULL)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(broadcast_rgb_names); i++)
+ drm_property_add_enum(prop, i, i, broadcast_rgb_names[i]);
+
+ dev_priv->broadcast_rgb_property = prop;
+ }
+
+ drm_connector_attach_property(connector, prop, 0);
+}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 64fd64443ca6..d2c710422908 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -39,6 +39,8 @@
#define OPREGION_HEADER_OFFSET 0
#define OPREGION_ACPI_OFFSET 0x100
+#define ACPI_CLID 0x01ac /* current lid state indicator */
+#define ACPI_CDCK 0x01b0 /* current docking state indicator */
#define OPREGION_SWSCI_OFFSET 0x200
#define OPREGION_ASLE_OFFSET 0x300
#define OPREGION_VBT_OFFSET 0x400
@@ -489,6 +491,8 @@ int intel_opregion_setup(struct drm_device *dev)
opregion->header = base;
opregion->vbt = base + OPREGION_VBT_OFFSET;
+ opregion->lid_state = base + ACPI_CLID;
+
mboxes = opregion->header->mboxes;
if (mboxes & MBOX_ACPI) {
DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 3fbb98b948d6..a670c006982e 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -213,7 +213,6 @@ static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
struct drm_i915_gem_request *request,
- bool interruptible,
void (*tail)(struct intel_overlay *))
{
struct drm_device *dev = overlay->dev;
@@ -221,16 +220,14 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
int ret;
BUG_ON(overlay->last_flip_req);
- ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv));
+ ret = i915_add_request(LP_RING(dev_priv), NULL, request);
if (ret) {
kfree(request);
return ret;
}
overlay->last_flip_req = request->seqno;
overlay->flip_tail = tail;
- ret = i915_do_wait_request(dev,
- overlay->last_flip_req, true,
- LP_RING(dev_priv));
+ ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
if (ret)
return ret;
@@ -256,7 +253,7 @@ i830_activate_pipe_a(struct drm_device *dev)
return 0;
/* most i8xx have pipe a forced on, so don't trust dpms mode */
- if (I915_READ(PIPEACONF) & PIPECONF_ENABLE)
+ if (I915_READ(_PIPEACONF) & PIPECONF_ENABLE)
return 0;
crtc_funcs = crtc->base.helper_private;
@@ -322,7 +319,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
- ret = intel_overlay_do_wait_request(overlay, request, true, NULL);
+ ret = intel_overlay_do_wait_request(overlay, request, NULL);
out:
if (pipe_a_quirk)
i830_deactivate_pipe_a(dev);
@@ -364,7 +361,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
OUT_RING(flip_addr);
ADVANCE_LP_RING();
- ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv));
+ ret = i915_add_request(LP_RING(dev_priv), NULL, request);
if (ret) {
kfree(request);
return ret;
@@ -401,8 +398,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
}
/* overlay needs to be disabled in OCMD reg */
-static int intel_overlay_off(struct intel_overlay *overlay,
- bool interruptible)
+static int intel_overlay_off(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -437,14 +433,13 @@ static int intel_overlay_off(struct intel_overlay *overlay,
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
ADVANCE_LP_RING();
- return intel_overlay_do_wait_request(overlay, request, interruptible,
+ return intel_overlay_do_wait_request(overlay, request,
intel_overlay_off_tail);
}
/* recover from an interruption due to a signal
* We have to be careful not to repeat work forever an make forward progess. */
-static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
- bool interruptible)
+static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -453,8 +448,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
if (overlay->last_flip_req == 0)
return 0;
- ret = i915_do_wait_request(dev, overlay->last_flip_req,
- interruptible, LP_RING(dev_priv));
+ ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
if (ret)
return ret;
@@ -499,7 +493,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
- ret = intel_overlay_do_wait_request(overlay, request, true,
+ ret = intel_overlay_do_wait_request(overlay, request,
intel_overlay_release_old_vid_tail);
if (ret)
return ret;
@@ -868,8 +862,7 @@ out_unpin:
return ret;
}
-int intel_overlay_switch_off(struct intel_overlay *overlay,
- bool interruptible)
+int intel_overlay_switch_off(struct intel_overlay *overlay)
{
struct overlay_registers *regs;
struct drm_device *dev = overlay->dev;
@@ -878,7 +871,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay,
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
- ret = intel_overlay_recover_from_interrupt(overlay, interruptible);
+ ret = intel_overlay_recover_from_interrupt(overlay);
if (ret != 0)
return ret;
@@ -893,7 +886,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay,
regs->OCMD = 0;
intel_overlay_unmap_regs(overlay, regs);
- ret = intel_overlay_off(overlay, interruptible);
+ ret = intel_overlay_off(overlay);
if (ret != 0)
return ret;
@@ -1135,7 +1128,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
mutex_lock(&dev->mode_config.mutex);
mutex_lock(&dev->struct_mutex);
- ret = intel_overlay_switch_off(overlay, true);
+ ret = intel_overlay_switch_off(overlay);
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev->mode_config.mutex);
@@ -1157,7 +1150,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv,
put_image_rec->bo_handle));
- if (!new_bo) {
+ if (&new_bo->base == NULL) {
ret = -ENOENT;
goto out_free;
}
@@ -1171,13 +1164,13 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
goto out_unlock;
}
- ret = intel_overlay_recover_from_interrupt(overlay, true);
+ ret = intel_overlay_recover_from_interrupt(overlay);
if (ret != 0)
goto out_unlock;
if (overlay->crtc != crtc) {
struct drm_display_mode *mode = &crtc->base.mode;
- ret = intel_overlay_switch_off(overlay, true);
+ ret = intel_overlay_switch_off(overlay);
if (ret != 0)
goto out_unlock;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index c65992df458d..a06ff07a4d3b 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -208,7 +208,6 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
val &= ~1;
pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
val *= lbpc;
- val >>= 1;
}
}
@@ -235,11 +234,11 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level)
if (is_backlight_combination_mode(dev)){
u32 max = intel_panel_get_max_backlight(dev);
- u8 lpbc;
+ u8 lbpc;
- lpbc = level * 0xfe / max + 1;
- level /= lpbc;
- pci_write_config_byte(dev->pdev, PCI_LBPC, lpbc);
+ lbpc = level * 0xfe / max + 1;
+ level /= lbpc;
+ pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc);
}
tmp = I915_READ(BLC_PWM_CTL);
@@ -281,3 +280,28 @@ void intel_panel_setup_backlight(struct drm_device *dev)
dev_priv->backlight_level = intel_panel_get_backlight(dev);
dev_priv->backlight_enabled = dev_priv->backlight_level != 0;
}
+
+enum drm_connector_status
+intel_panel_detect(struct drm_device *dev)
+{
+#if 0
+ struct drm_i915_private *dev_priv = dev->dev_private;
+#endif
+
+ if (i915_panel_ignore_lid)
+ return i915_panel_ignore_lid > 0 ?
+ connector_status_connected :
+ connector_status_disconnected;
+
+ /* opregion lid state on HP 2540p is wrong at boot up,
+ * appears to be either the BIOS or Linux ACPI fault */
+#if 0
+ /* Assume that the BIOS does not lie through the OpRegion... */
+ if (dev_priv->opregion.lid_state)
+ return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
+ connector_status_connected :
+ connector_status_disconnected;
+#endif
+
+ return connector_status_unknown;
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 445f27efe677..e9e6f71418a4 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -62,77 +62,63 @@ render_ring_flush(struct intel_ring_buffer *ring,
u32 flush_domains)
{
struct drm_device *dev = ring->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
u32 cmd;
int ret;
-#if WATCH_EXEC
- DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
- invalidate_domains, flush_domains);
-#endif
-
- trace_i915_gem_request_flush(dev, dev_priv->next_seqno,
- invalidate_domains, flush_domains);
+ /*
+ * read/write caches:
+ *
+ * I915_GEM_DOMAIN_RENDER is always invalidated, but is
+ * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
+ * also flushed at 2d versus 3d pipeline switches.
+ *
+ * read-only caches:
+ *
+ * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
+ * MI_READ_FLUSH is set, and is always flushed on 965.
+ *
+ * I915_GEM_DOMAIN_COMMAND may not exist?
+ *
+ * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
+ * invalidated when MI_EXE_FLUSH is set.
+ *
+ * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
+ * invalidated with every MI_FLUSH.
+ *
+ * TLBs:
+ *
+ * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
+ * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
+ * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
+ * are flushed at any MI_FLUSH.
+ */
- if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
+ cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
+ if ((invalidate_domains|flush_domains) &
+ I915_GEM_DOMAIN_RENDER)
+ cmd &= ~MI_NO_WRITE_FLUSH;
+ if (INTEL_INFO(dev)->gen < 4) {
/*
- * read/write caches:
- *
- * I915_GEM_DOMAIN_RENDER is always invalidated, but is
- * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
- * also flushed at 2d versus 3d pipeline switches.
- *
- * read-only caches:
- *
- * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
- * MI_READ_FLUSH is set, and is always flushed on 965.
- *
- * I915_GEM_DOMAIN_COMMAND may not exist?
- *
- * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
- * invalidated when MI_EXE_FLUSH is set.
- *
- * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
- * invalidated with every MI_FLUSH.
- *
- * TLBs:
- *
- * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
- * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
- * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
- * are flushed at any MI_FLUSH.
+ * On the 965, the sampler cache always gets flushed
+ * and this bit is reserved.
*/
+ if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
+ cmd |= MI_READ_FLUSH;
+ }
+ if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
+ cmd |= MI_EXE_FLUSH;
- cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
- if ((invalidate_domains|flush_domains) &
- I915_GEM_DOMAIN_RENDER)
- cmd &= ~MI_NO_WRITE_FLUSH;
- if (INTEL_INFO(dev)->gen < 4) {
- /*
- * On the 965, the sampler cache always gets flushed
- * and this bit is reserved.
- */
- if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
- cmd |= MI_READ_FLUSH;
- }
- if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
- cmd |= MI_EXE_FLUSH;
-
- if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
- (IS_G4X(dev) || IS_GEN5(dev)))
- cmd |= MI_INVALIDATE_ISP;
+ if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
+ (IS_G4X(dev) || IS_GEN5(dev)))
+ cmd |= MI_INVALIDATE_ISP;
-#if WATCH_EXEC
- DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
-#endif
- ret = intel_ring_begin(ring, 2);
- if (ret)
- return ret;
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
- intel_ring_emit(ring, cmd);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
- }
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
return 0;
}
@@ -580,9 +566,6 @@ bsd_ring_flush(struct intel_ring_buffer *ring,
{
int ret;
- if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
- return 0;
-
ret = intel_ring_begin(ring, 2);
if (ret)
return ret;
@@ -612,7 +595,6 @@ ring_add_request(struct intel_ring_buffer *ring,
intel_ring_emit(ring, MI_USER_INTERRUPT);
intel_ring_advance(ring);
- DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
*result = seqno;
return 0;
}
@@ -715,11 +697,8 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 len)
{
struct drm_device *dev = ring->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
- trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
-
if (IS_I830(dev) || IS_845G(dev)) {
ret = intel_ring_begin(ring, 4);
if (ret)
@@ -894,6 +873,10 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
/* Disable the ring buffer. The ring must be idle at this point */
dev_priv = ring->dev->dev_private;
ret = intel_wait_ring_buffer(ring, ring->size - 8);
+ if (ret)
+ DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
+ ring->name, ret);
+
I915_WRITE_CTL(ring, 0);
drm_core_ioremapfree(&ring->map, ring->dev);
@@ -950,13 +933,13 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
return 0;
}
- trace_i915_ring_wait_begin (dev);
+ trace_i915_ring_wait_begin(ring);
end = jiffies + 3 * HZ;
do {
ring->head = I915_READ_HEAD(ring);
ring->space = ring_space(ring);
if (ring->space >= n) {
- trace_i915_ring_wait_end(dev);
+ trace_i915_ring_wait_end(ring);
return 0;
}
@@ -970,16 +953,20 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
if (atomic_read(&dev_priv->mm.wedged))
return -EAGAIN;
} while (!time_after(jiffies, end));
- trace_i915_ring_wait_end (dev);
+ trace_i915_ring_wait_end(ring);
return -EBUSY;
}
int intel_ring_begin(struct intel_ring_buffer *ring,
int num_dwords)
{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
int n = 4*num_dwords;
int ret;
+ if (unlikely(atomic_read(&dev_priv->mm.wedged)))
+ return -EIO;
+
if (unlikely(ring->tail + n > ring->effective_size)) {
ret = intel_wrap_ring_buffer(ring);
if (unlikely(ret))
@@ -1064,9 +1051,6 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
uint32_t cmd;
int ret;
- if (((invalidate | flush) & I915_GEM_GPU_DOMAINS) == 0)
- return 0;
-
ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
@@ -1238,9 +1222,6 @@ static int blt_ring_flush(struct intel_ring_buffer *ring,
uint32_t cmd;
int ret;
- if (((invalidate | flush) & I915_GEM_DOMAIN_RENDER) == 0)
- return 0;
-
ret = blt_ring_begin(ring, 4);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 6d6fde85a636..f23cc5f037a6 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -14,22 +14,23 @@ struct intel_hw_status_page {
struct drm_i915_gem_object *obj;
};
-#define I915_RING_READ(reg) i915_safe_read(dev_priv, reg)
+#define I915_RING_READ(reg) i915_gt_read(dev_priv, reg)
+#define I915_RING_WRITE(reg, val) i915_gt_write(dev_priv, reg, val)
#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base))
-#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
+#define I915_WRITE_TAIL(ring, val) I915_RING_WRITE(RING_TAIL((ring)->mmio_base), val)
#define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base))
-#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
+#define I915_WRITE_START(ring, val) I915_RING_WRITE(RING_START((ring)->mmio_base), val)
#define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base))
-#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
+#define I915_WRITE_HEAD(ring, val) I915_RING_WRITE(RING_HEAD((ring)->mmio_base), val)
#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base))
-#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
+#define I915_WRITE_CTL(ring, val) I915_RING_WRITE(RING_CTL((ring)->mmio_base), val)
-#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
#define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base))
+#define I915_WRITE_IMR(ring, val) I915_RING_WRITE(RING_IMR((ring)->mmio_base), val)
#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base))
#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base))
@@ -43,7 +44,7 @@ struct intel_ring_buffer {
RING_BLT = 0x4,
} id;
u32 mmio_base;
- void *virtual_start;
+ void __iomem *virtual_start;
struct drm_device *dev;
struct drm_i915_gem_object *obj;
@@ -58,6 +59,7 @@ struct intel_ring_buffer {
u32 irq_refcount;
u32 irq_mask;
u32 irq_seqno; /* last seq seem at irq time */
+ u32 trace_irq_seqno;
u32 waiting_seqno;
u32 sync_seqno[I915_NUM_RINGS-1];
bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
@@ -141,6 +143,26 @@ intel_read_status_page(struct intel_ring_buffer *ring,
return ioread32(ring->status_page.page_addr + reg);
}
+/**
+ * Reads a dword out of the status page, which is written to from the command
+ * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
+ * MI_STORE_DATA_IMM.
+ *
+ * The following dwords have a reserved meaning:
+ * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
+ * 0x04: ring 0 head pointer
+ * 0x05: ring 1 head pointer (915-class)
+ * 0x06: ring 2 head pointer (915-class)
+ * 0x10-0x1b: Context status DWords (GM45)
+ * 0x1f: Last written status offset. (GM45)
+ *
+ * The area from dword 0x20 to 0x3ff is available for driver usage.
+ */
+#define READ_HWSP(dev_priv, reg) intel_read_status_page(LP_RING(dev_priv), reg)
+#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
+#define I915_GEM_HWS_INDEX 0x20
+#define I915_BREADCRUMB_INDEX 0x21
+
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
@@ -166,6 +188,12 @@ int intel_init_blt_ring_buffer(struct drm_device *dev);
u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
+static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
+{
+ if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
+ ring->trace_irq_seqno = seqno;
+}
+
/* DRI warts */
int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 7c50cdce84f0..4324f33212d6 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -93,6 +93,12 @@ struct intel_sdvo {
uint16_t attached_output;
/**
+ * This is used to select the color range of RBG outputs in HDMI mode.
+ * It is only valid when using TMDS encoding and 8 bit per color mode.
+ */
+ uint32_t color_range;
+
+ /**
* This is set if we're going to treat the device as TV-out.
*
* While we have these nice friendly flags for output types that ought
@@ -585,6 +591,7 @@ static bool intel_sdvo_get_trained_inputs(struct intel_sdvo *intel_sdvo, bool *i
{
struct intel_sdvo_get_trained_inputs_response response;
+ BUILD_BUG_ON(sizeof(response) != 1);
if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS,
&response, sizeof(response)))
return false;
@@ -632,6 +639,7 @@ static bool intel_sdvo_get_input_pixel_clock_range(struct intel_sdvo *intel_sdvo
{
struct intel_sdvo_pixel_clock_range clocks;
+ BUILD_BUG_ON(sizeof(clocks) != 4);
if (!intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
&clocks, sizeof(clocks)))
@@ -699,6 +707,8 @@ intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
static bool intel_sdvo_get_preferred_input_timing(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_dtd *dtd)
{
+ BUILD_BUG_ON(sizeof(dtd->part1) != 8);
+ BUILD_BUG_ON(sizeof(dtd->part2) != 8);
return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
&dtd->part1, sizeof(dtd->part1)) &&
intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
@@ -796,6 +806,7 @@ static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
{
struct intel_sdvo_encode encode;
+ BUILD_BUG_ON(sizeof(encode) != 2);
return intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_SUPP_ENCODE,
&encode, sizeof(encode));
@@ -1051,6 +1062,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
/* Set the SDVO control regs. */
if (INTEL_INFO(dev)->gen >= 4) {
sdvox = 0;
+ if (intel_sdvo->is_hdmi)
+ sdvox |= intel_sdvo->color_range;
if (INTEL_INFO(dev)->gen < 5)
sdvox |= SDVO_BORDER_ENABLE;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -1162,6 +1175,7 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector,
static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps)
{
+ BUILD_BUG_ON(sizeof(*caps) != 8);
if (!intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_DEVICE_CAPS,
caps, sizeof(*caps)))
@@ -1268,33 +1282,9 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
static bool
intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
{
- int caps = 0;
-
- if (intel_sdvo->caps.output_flags &
- (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1))
- caps++;
- if (intel_sdvo->caps.output_flags &
- (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1))
- caps++;
- if (intel_sdvo->caps.output_flags &
- (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_SVID1))
- caps++;
- if (intel_sdvo->caps.output_flags &
- (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_CVBS1))
- caps++;
- if (intel_sdvo->caps.output_flags &
- (SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_YPRPB1))
- caps++;
-
- if (intel_sdvo->caps.output_flags &
- (SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1))
- caps++;
-
- if (intel_sdvo->caps.output_flags &
- (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1))
- caps++;
-
- return (caps > 1);
+ /* Is there more than one type of output? */
+ int caps = intel_sdvo->caps.output_flags & 0xf;
+ return caps & -caps;
}
static struct edid *
@@ -1482,7 +1472,7 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
* Note! This is in reply order (see loop in get_tv_modes).
* XXX: all 60Hz refresh?
*/
-struct drm_display_mode sdvo_tv_modes[] = {
+static const struct drm_display_mode sdvo_tv_modes[] = {
{ DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815, 320, 321, 384,
416, 0, 200, 201, 232, 233, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
@@ -1713,6 +1703,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
{
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
uint16_t temp_value;
uint8_t cmd;
int ret;
@@ -1742,6 +1733,14 @@ intel_sdvo_set_property(struct drm_connector *connector,
goto done;
}
+ if (property == dev_priv->broadcast_rgb_property) {
+ if (val == !!intel_sdvo->color_range)
+ return 0;
+
+ intel_sdvo->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
+ goto done;
+ }
+
#define CHECK_PROPERTY(name, NAME) \
if (intel_sdvo_connector->name == property) { \
if (intel_sdvo_connector->cur_##name == temp_value) return 0; \
@@ -2046,6 +2045,9 @@ intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector)
drm_connector_attach_property(&connector->base.base,
connector->force_audio_property, 0);
}
+
+ if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev))
+ intel_attach_broadcast_rgb_property(&connector->base.base);
}
static bool
@@ -2268,6 +2270,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_set_target_output(intel_sdvo, type))
return false;
+ BUILD_BUG_ON(sizeof(format) != 6);
if (!intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_SUPPORTED_TV_FORMATS,
&format, sizeof(format)))
@@ -2474,6 +2477,8 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
uint16_t response;
} enhancements;
+ BUILD_BUG_ON(sizeof(enhancements) != 2);
+
enhancements.response = 0;
intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
index a386b022e538..4f4e23bc2d16 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -230,7 +230,7 @@ struct intel_sdvo_set_target_input_args {
} __attribute__((packed));
/**
- * Takes a struct intel_sdvo_output_flags of which outputs are targetted by
+ * Takes a struct intel_sdvo_output_flags of which outputs are targeted by
* future output commands.
*
* Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index fe4a53a50b83..6b22c1dcc015 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1006,6 +1006,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
const struct video_levels *video_levels;
const struct color_conversion *color_conversion;
bool burst_ena;
+ int pipe = intel_crtc->pipe;
if (!tv_mode)
return; /* can't happen (mode_prepare prevents this) */
@@ -1149,14 +1150,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
(video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
{
- int pipeconf_reg = (intel_crtc->pipe == 0) ?
- PIPEACONF : PIPEBCONF;
- int dspcntr_reg = (intel_crtc->plane == 0) ?
- DSPACNTR : DSPBCNTR;
+ int pipeconf_reg = PIPECONF(pipe);
+ int dspcntr_reg = DSPCNTR(intel_crtc->plane);
int pipeconf = I915_READ(pipeconf_reg);
int dspcntr = I915_READ(dspcntr_reg);
- int dspbase_reg = (intel_crtc->plane == 0) ?
- DSPAADDR : DSPBADDR;
+ int dspbase_reg = DSPADDR(intel_crtc->plane);
int xpos = 0x0, ypos = 0x0;
unsigned int xsize, ysize;
/* Pipe must be off here */
@@ -1380,7 +1378,9 @@ intel_tv_detect(struct drm_connector *connector, bool force)
if (type < 0)
return connector_status_disconnected;
+ intel_tv->type = type;
intel_tv_find_better_format(connector);
+
return connector_status_connected;
}
@@ -1672,8 +1672,7 @@ intel_tv_init(struct drm_device *dev)
*
* More recent chipsets favour HDMI rather than integrated S-Video.
*/
- connector->polled =
- DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
drm_connector_init(dev, connector, &intel_tv_connector_funcs,
DRM_MODE_CONNECTOR_SVIDEO);
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
index 08868ac3048a..5ccb65deb83c 100644
--- a/drivers/gpu/drm/mga/mga_dma.c
+++ b/drivers/gpu/drm/mga/mga_dma.c
@@ -426,7 +426,7 @@ int mga_driver_load(struct drm_device *dev, unsigned long flags)
* Bootstrap the driver for AGP DMA.
*
* \todo
- * Investigate whether there is any benifit to storing the WARP microcode in
+ * Investigate whether there is any benefit to storing the WARP microcode in
* AGP memory. If not, the microcode may as well always be put in PCI
* memory.
*
@@ -703,7 +703,7 @@ static int mga_do_pci_dma_bootstrap(struct drm_device *dev,
static int mga_do_dma_bootstrap(struct drm_device *dev,
drm_mga_dma_bootstrap_t *dma_bs)
{
- const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev);
+ const int is_agp = (dma_bs->agp_mode != 0) && drm_pci_device_is_agp(dev);
int err;
drm_mga_private_t *const dev_priv =
(drm_mga_private_t *) dev->dev_private;
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index 0aaf5f67a436..42d31874edf2 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -75,10 +75,6 @@ static struct drm_driver driver = {
#endif
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -88,15 +84,20 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct pci_driver mga_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+};
+
static int __init mga_init(void)
{
driver.num_ioctls = mga_max_ioctl;
- return drm_init(&driver);
+ return drm_pci_init(&driver, &mga_pci_driver);
}
static void __exit mga_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &mga_pci_driver);
}
module_init(mga_init);
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index d3a9c6e02477..00a55dfdba82 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -88,18 +88,20 @@ static const struct backlight_ops nv50_bl_ops = {
.update_status = nv50_set_intensity,
};
-static int nouveau_nv40_backlight_init(struct drm_device *dev)
+static int nouveau_nv40_backlight_init(struct drm_connector *connector)
{
- struct backlight_properties props;
+ struct drm_device *dev = connector->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct backlight_properties props;
struct backlight_device *bd;
if (!(nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK))
return 0;
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
props.max_brightness = 31;
- bd = backlight_device_register("nv_backlight", &dev->pdev->dev, dev,
+ bd = backlight_device_register("nv_backlight", &connector->kdev, dev,
&nv40_bl_ops, &props);
if (IS_ERR(bd))
return PTR_ERR(bd);
@@ -111,18 +113,20 @@ static int nouveau_nv40_backlight_init(struct drm_device *dev)
return 0;
}
-static int nouveau_nv50_backlight_init(struct drm_device *dev)
+static int nouveau_nv50_backlight_init(struct drm_connector *connector)
{
- struct backlight_properties props;
+ struct drm_device *dev = connector->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct backlight_properties props;
struct backlight_device *bd;
if (!nv_rd32(dev, NV50_PDISPLAY_SOR_BACKLIGHT))
return 0;
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
props.max_brightness = 1025;
- bd = backlight_device_register("nv_backlight", &dev->pdev->dev, dev,
+ bd = backlight_device_register("nv_backlight", &connector->kdev, dev,
&nv50_bl_ops, &props);
if (IS_ERR(bd))
return PTR_ERR(bd);
@@ -133,8 +137,9 @@ static int nouveau_nv50_backlight_init(struct drm_device *dev)
return 0;
}
-int nouveau_backlight_init(struct drm_device *dev)
+int nouveau_backlight_init(struct drm_connector *connector)
{
+ struct drm_device *dev = connector->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
#ifdef CONFIG_ACPI
@@ -147,9 +152,9 @@ int nouveau_backlight_init(struct drm_device *dev)
switch (dev_priv->card_type) {
case NV_40:
- return nouveau_nv40_backlight_init(dev);
+ return nouveau_nv40_backlight_init(connector);
case NV_50:
- return nouveau_nv50_backlight_init(dev);
+ return nouveau_nv50_backlight_init(connector);
default:
break;
}
@@ -157,8 +162,9 @@ int nouveau_backlight_init(struct drm_device *dev)
return 0;
}
-void nouveau_backlight_exit(struct drm_device *dev)
+void nouveau_backlight_exit(struct drm_connector *connector)
{
+ struct drm_device *dev = connector->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
if (dev_priv->backlight) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 6bdab891c64e..90aef64b76f2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -269,7 +269,7 @@ struct init_tbl_entry {
int (*handler)(struct nvbios *, uint16_t, struct init_exec *);
};
-static int parse_init_table(struct nvbios *, unsigned int, struct init_exec *);
+static int parse_init_table(struct nvbios *, uint16_t, struct init_exec *);
#define MACRO_INDEX_SIZE 2
#define MACRO_SIZE 8
@@ -282,7 +282,7 @@ static void still_alive(void)
{
#if 0
sync();
- msleep(2);
+ mdelay(2);
#endif
}
@@ -1904,7 +1904,7 @@ init_condition_time(struct nvbios *bios, uint16_t offset,
BIOSLOG(bios, "0x%04X: "
"Condition not met, sleeping for 20ms\n",
offset);
- msleep(20);
+ mdelay(20);
}
}
@@ -1938,7 +1938,7 @@ init_ltime(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X milliseconds\n",
offset, time);
- msleep(time);
+ mdelay(time);
return 3;
}
@@ -2011,6 +2011,27 @@ init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
}
static int
+init_jump(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_JUMP opcode: 0x5C ('\')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (16 bit): offset (in bios)
+ *
+ * Continue execution of init table from 'offset'
+ */
+
+ uint16_t jmp_offset = ROM16(bios->data[offset + 1]);
+
+ if (!iexec->execute)
+ return 3;
+
+ BIOSLOG(bios, "0x%04X: Jump to 0x%04X\n", offset, jmp_offset);
+ return jmp_offset - offset;
+}
+
+static int
init_i2c_if(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2962,7 +2983,7 @@ init_time(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
if (time < 1000)
udelay(time);
else
- msleep((time + 900) / 1000);
+ mdelay((time + 900) / 1000);
return 3;
}
@@ -3659,6 +3680,7 @@ static struct init_tbl_entry itbl_entry[] = {
{ "INIT_ZM_REG_SEQUENCE" , 0x58, init_zm_reg_sequence },
/* INIT_INDIRECT_REG (0x5A, 7, 0, 0) removed due to no example of use */
{ "INIT_SUB_DIRECT" , 0x5B, init_sub_direct },
+ { "INIT_JUMP" , 0x5C, init_jump },
{ "INIT_I2C_IF" , 0x5E, init_i2c_if },
{ "INIT_COPY_NV_REG" , 0x5F, init_copy_nv_reg },
{ "INIT_ZM_INDEX_IO" , 0x62, init_zm_index_io },
@@ -3700,8 +3722,7 @@ static struct init_tbl_entry itbl_entry[] = {
#define MAX_TABLE_OPS 1000
static int
-parse_init_table(struct nvbios *bios, unsigned int offset,
- struct init_exec *iexec)
+parse_init_table(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
* Parses all commands in an init table.
@@ -3856,7 +3877,7 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entr
if (script == LVDS_PANEL_OFF) {
/* off-on delay in ms */
- msleep(ROM16(bios->data[bios->fp.xlated_entry + 7]));
+ mdelay(ROM16(bios->data[bios->fp.xlated_entry + 7]));
}
#ifdef __powerpc__
/* Powerbook specific quirks */
@@ -5950,6 +5971,11 @@ apply_dcb_connector_quirks(struct nvbios *bios, int idx)
}
}
+static const u8 hpd_gpio[16] = {
+ 0xff, 0x07, 0x08, 0xff, 0xff, 0x51, 0x52, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x5f, 0x60,
+};
+
static void
parse_dcb_connector_table(struct nvbios *bios)
{
@@ -5986,23 +6012,9 @@ parse_dcb_connector_table(struct nvbios *bios)
cte->type = (cte->entry & 0x000000ff) >> 0;
cte->index2 = (cte->entry & 0x00000f00) >> 8;
- switch (cte->entry & 0x00033000) {
- case 0x00001000:
- cte->gpio_tag = 0x07;
- break;
- case 0x00002000:
- cte->gpio_tag = 0x08;
- break;
- case 0x00010000:
- cte->gpio_tag = 0x51;
- break;
- case 0x00020000:
- cte->gpio_tag = 0x52;
- break;
- default:
- cte->gpio_tag = 0xff;
- break;
- }
+
+ cte->gpio_tag = ffs((cte->entry & 0x07033000) >> 12);
+ cte->gpio_tag = hpd_gpio[cte->gpio_tag];
if (cte->type == 0xff)
continue;
@@ -6342,6 +6354,32 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
}
}
+ /* XFX GT-240X-YA
+ *
+ * So many things wrong here, replace the entire encoder table..
+ */
+ if (nv_match_device(dev, 0x0ca3, 0x1682, 0x3003)) {
+ if (idx == 0) {
+ *conn = 0x02001300; /* VGA, connector 1 */
+ *conf = 0x00000028;
+ } else
+ if (idx == 1) {
+ *conn = 0x01010312; /* DVI, connector 0 */
+ *conf = 0x00020030;
+ } else
+ if (idx == 2) {
+ *conn = 0x01010310; /* VGA, connector 0 */
+ *conf = 0x00000028;
+ } else
+ if (idx == 3) {
+ *conn = 0x02022362; /* HDMI, connector 2 */
+ *conf = 0x00020010;
+ } else {
+ *conn = 0x0000000e; /* EOL */
+ *conf = 0x00000000;
+ }
+ }
+
return true;
}
@@ -6702,11 +6740,11 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
struct nvbios *bios = &dev_priv->vbios;
struct init_exec iexec = { true, false };
- mutex_lock(&bios->lock);
+ spin_lock_bh(&bios->lock);
bios->display.output = dcbent;
parse_init_table(bios, table, &iexec);
bios->display.output = NULL;
- mutex_unlock(&bios->lock);
+ spin_unlock_bh(&bios->lock);
}
static bool NVInitVBIOS(struct drm_device *dev)
@@ -6715,7 +6753,7 @@ static bool NVInitVBIOS(struct drm_device *dev)
struct nvbios *bios = &dev_priv->vbios;
memset(bios, 0, sizeof(struct nvbios));
- mutex_init(&bios->lock);
+ spin_lock_init(&bios->lock);
bios->dev = dev;
if (!NVShadowVBIOS(dev, bios->data))
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 50a648e01c49..8a54fa7edf5c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -251,7 +251,7 @@ struct nvbios {
uint8_t digital_min_front_porch;
bool fp_no_ddc;
- struct mutex lock;
+ spinlock_t lock;
uint8_t data[NV_PROM_SIZE];
unsigned int length;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index d38a4d9f9b0b..2ad49cbf7c8b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -49,13 +49,16 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
DRM_ERROR("bo %p still attached to GEM object\n", bo);
nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
- nouveau_vm_put(&nvbo->vma);
+ if (nvbo->vma.node) {
+ nouveau_vm_unmap(&nvbo->vma);
+ nouveau_vm_put(&nvbo->vma);
+ }
kfree(nvbo);
}
static void
-nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
- int *page_shift)
+nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
+ int *align, int *size, int *page_shift)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
@@ -80,7 +83,7 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
}
} else {
if (likely(dev_priv->chan_vm)) {
- if (*size > 256 * 1024)
+ if (!(flags & TTM_PL_FLAG_TT) && *size > 256 * 1024)
*page_shift = dev_priv->chan_vm->lpg_shift;
else
*page_shift = dev_priv->chan_vm->spg_shift;
@@ -98,8 +101,7 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
int
nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
int size, int align, uint32_t flags, uint32_t tile_mode,
- uint32_t tile_flags, bool no_vm, bool mappable,
- struct nouveau_bo **pnvbo)
+ uint32_t tile_flags, struct nouveau_bo **pnvbo)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *nvbo;
@@ -110,16 +112,14 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
return -ENOMEM;
INIT_LIST_HEAD(&nvbo->head);
INIT_LIST_HEAD(&nvbo->entry);
- nvbo->mappable = mappable;
- nvbo->no_vm = no_vm;
nvbo->tile_mode = tile_mode;
nvbo->tile_flags = tile_flags;
nvbo->bo.bdev = &dev_priv->ttm.bdev;
- nouveau_bo_fixup_align(nvbo, &align, &size, &page_shift);
+ nouveau_bo_fixup_align(nvbo, flags, &align, &size, &page_shift);
align >>= PAGE_SHIFT;
- if (!nvbo->no_vm && dev_priv->chan_vm) {
+ if (dev_priv->chan_vm) {
ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
NV_MEM_ACCESS_RW, &nvbo->vma);
if (ret) {
@@ -141,11 +141,8 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
}
nvbo->channel = NULL;
- if (nvbo->vma.node) {
- if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
- nvbo->bo.offset = nvbo->vma.offset;
- }
-
+ if (nvbo->vma.node)
+ nvbo->bo.offset = nvbo->vma.offset;
*pnvbo = nvbo;
return 0;
}
@@ -315,11 +312,8 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
if (ret)
return ret;
- if (nvbo->vma.node) {
- if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
- nvbo->bo.offset = nvbo->vma.offset;
- }
-
+ if (nvbo->vma.node)
+ nvbo->bo.offset = nvbo->vma.offset;
return 0;
}
@@ -382,7 +376,8 @@ nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
case NOUVEAU_GART_AGP:
return ttm_agp_backend_init(bdev, dev->agp->bridge);
#endif
- case NOUVEAU_GART_SGDMA:
+ case NOUVEAU_GART_PDMA:
+ case NOUVEAU_GART_HW:
return nouveau_sgdma_init_ttm(dev);
default:
NV_ERROR(dev, "Unknown GART type %d\n",
@@ -428,7 +423,10 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_WC;
break;
case TTM_PL_TT:
- man->func = &ttm_bo_manager_func;
+ if (dev_priv->card_type >= NV_50)
+ man->func = &nouveau_gart_manager;
+ else
+ man->func = &ttm_bo_manager_func;
switch (dev_priv->gart_info.type) {
case NOUVEAU_GART_AGP:
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
@@ -436,7 +434,8 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
break;
- case NOUVEAU_GART_SGDMA:
+ case NOUVEAU_GART_PDMA:
+ case NOUVEAU_GART_HW:
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
TTM_MEMTYPE_FLAG_CMA;
man->available_caching = TTM_PL_MASK_CACHING;
@@ -498,45 +497,22 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
return ret;
}
-static inline uint32_t
-nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
- struct nouveau_channel *chan, struct ttm_mem_reg *mem)
-{
- struct nouveau_bo *nvbo = nouveau_bo(bo);
-
- if (nvbo->no_vm) {
- if (mem->mem_type == TTM_PL_TT)
- return NvDmaGART;
- return NvDmaVRAM;
- }
-
- if (mem->mem_type == TTM_PL_TT)
- return chan->gart_handle;
- return chan->vram_handle;
-}
-
static int
nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_mem *old_node = old_mem->mm_node;
+ struct nouveau_mem *new_node = new_mem->mm_node;
struct nouveau_bo *nvbo = nouveau_bo(bo);
- u64 src_offset = old_mem->start << PAGE_SHIFT;
- u64 dst_offset = new_mem->start << PAGE_SHIFT;
u32 page_count = new_mem->num_pages;
+ u64 src_offset, dst_offset;
int ret;
- if (!nvbo->no_vm) {
- if (old_mem->mem_type == TTM_PL_VRAM)
- src_offset = nvbo->vma.offset;
- else
- src_offset += dev_priv->gart_info.aper_base;
-
- if (new_mem->mem_type == TTM_PL_VRAM)
- dst_offset = nvbo->vma.offset;
- else
- dst_offset += dev_priv->gart_info.aper_base;
- }
+ src_offset = old_node->tmp_vma.offset;
+ if (new_node->tmp_vma.node)
+ dst_offset = new_node->tmp_vma.offset;
+ else
+ dst_offset = nvbo->vma.offset;
page_count = new_mem->num_pages;
while (page_count) {
@@ -571,33 +547,18 @@ static int
nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_mem *old_node = old_mem->mm_node;
+ struct nouveau_mem *new_node = new_mem->mm_node;
struct nouveau_bo *nvbo = nouveau_bo(bo);
u64 length = (new_mem->num_pages << PAGE_SHIFT);
u64 src_offset, dst_offset;
int ret;
- src_offset = old_mem->start << PAGE_SHIFT;
- dst_offset = new_mem->start << PAGE_SHIFT;
- if (!nvbo->no_vm) {
- if (old_mem->mem_type == TTM_PL_VRAM)
- src_offset = nvbo->vma.offset;
- else
- src_offset += dev_priv->gart_info.aper_base;
-
- if (new_mem->mem_type == TTM_PL_VRAM)
- dst_offset = nvbo->vma.offset;
- else
- dst_offset += dev_priv->gart_info.aper_base;
- }
-
- ret = RING_SPACE(chan, 3);
- if (ret)
- return ret;
-
- BEGIN_RING(chan, NvSubM2MF, 0x0184, 2);
- OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
- OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
+ src_offset = old_node->tmp_vma.offset;
+ if (new_node->tmp_vma.node)
+ dst_offset = new_node->tmp_vma.offset;
+ else
+ dst_offset = nvbo->vma.offset;
while (length) {
u32 amount, stride, height;
@@ -678,6 +639,15 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
return 0;
}
+static inline uint32_t
+nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
+ struct nouveau_channel *chan, struct ttm_mem_reg *mem)
+{
+ if (mem->mem_type == TTM_PL_TT)
+ return chan->gart_handle;
+ return chan->vram_handle;
+}
+
static int
nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
@@ -731,15 +701,43 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct ttm_mem_reg *old_mem = &bo->mem;
struct nouveau_channel *chan;
int ret;
chan = nvbo->channel;
- if (!chan || nvbo->no_vm) {
+ if (!chan) {
chan = dev_priv->channel;
mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
}
+ /* create temporary vma for old memory, this will get cleaned
+ * up after ttm destroys the ttm_mem_reg
+ */
+ if (dev_priv->card_type >= NV_50) {
+ struct nouveau_mem *node = old_mem->mm_node;
+ if (!node->tmp_vma.node) {
+ u32 page_shift = nvbo->vma.node->type;
+ if (old_mem->mem_type == TTM_PL_TT)
+ page_shift = nvbo->vma.vm->spg_shift;
+
+ ret = nouveau_vm_get(chan->vm,
+ old_mem->num_pages << PAGE_SHIFT,
+ page_shift, NV_MEM_ACCESS_RO,
+ &node->tmp_vma);
+ if (ret)
+ goto out;
+ }
+
+ if (old_mem->mem_type == TTM_PL_VRAM)
+ nouveau_vm_map(&node->tmp_vma, node);
+ else {
+ nouveau_vm_map_sg(&node->tmp_vma, 0,
+ old_mem->num_pages << PAGE_SHIFT,
+ node, node->pages);
+ }
+ }
+
if (dev_priv->card_type < NV_50)
ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
else
@@ -753,6 +751,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
no_wait_gpu, new_mem);
}
+out:
if (chan == dev_priv->channel)
mutex_unlock(&chan->mutex);
return ret;
@@ -763,6 +762,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
struct ttm_placement placement;
struct ttm_mem_reg tmp_mem;
@@ -782,7 +782,23 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
+ if (dev_priv->card_type >= NV_50) {
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct nouveau_mem *node = tmp_mem.mm_node;
+ struct nouveau_vma *vma = &nvbo->vma;
+ if (vma->node->type != vma->vm->spg_shift)
+ vma = &node->tmp_vma;
+ nouveau_vm_map_sg(vma, 0, tmp_mem.num_pages << PAGE_SHIFT,
+ node, node->pages);
+ }
+
ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
+
+ if (dev_priv->card_type >= NV_50) {
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ nouveau_vm_unmap(&nvbo->vma);
+ }
+
if (ret)
goto out;
@@ -825,6 +841,36 @@ out:
return ret;
}
+static void
+nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_mem *node = new_mem->mm_node;
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct nouveau_vma *vma = &nvbo->vma;
+ struct nouveau_vm *vm = vma->vm;
+
+ if (dev_priv->card_type < NV_50)
+ return;
+
+ switch (new_mem->mem_type) {
+ case TTM_PL_VRAM:
+ nouveau_vm_map(vma, node);
+ break;
+ case TTM_PL_TT:
+ if (vma->node->type != vm->spg_shift) {
+ nouveau_vm_unmap(vma);
+ vma = &node->tmp_vma;
+ }
+ nouveau_vm_map_sg(vma, 0, new_mem->num_pages << PAGE_SHIFT,
+ node, node->pages);
+ break;
+ default:
+ nouveau_vm_unmap(&nvbo->vma);
+ break;
+ }
+}
+
static int
nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
struct nouveau_tile_reg **new_tile)
@@ -832,19 +878,13 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct drm_device *dev = dev_priv->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
- uint64_t offset;
+ u64 offset = new_mem->start << PAGE_SHIFT;
- if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
- /* Nothing to do. */
- *new_tile = NULL;
+ *new_tile = NULL;
+ if (new_mem->mem_type != TTM_PL_VRAM)
return 0;
- }
-
- offset = new_mem->start << PAGE_SHIFT;
- if (dev_priv->chan_vm) {
- nouveau_vm_map(&nvbo->vma, new_mem->mm_node);
- } else if (dev_priv->card_type >= NV_10) {
+ if (dev_priv->card_type >= NV_10) {
*new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
nvbo->tile_mode,
nvbo->tile_flags);
@@ -861,11 +901,8 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct drm_device *dev = dev_priv->dev;
- if (dev_priv->card_type >= NV_10 &&
- dev_priv->card_type < NV_50) {
- nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
- *old_tile = new_tile;
- }
+ nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
+ *old_tile = new_tile;
}
static int
@@ -879,9 +916,11 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
struct nouveau_tile_reg *new_tile = NULL;
int ret = 0;
- ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
- if (ret)
- return ret;
+ if (dev_priv->card_type < NV_50) {
+ ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
+ if (ret)
+ return ret;
+ }
/* Fake bo copy. */
if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
@@ -912,10 +951,12 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
out:
- if (ret)
- nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
- else
- nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
+ if (dev_priv->card_type < NV_50) {
+ if (ret)
+ nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
+ else
+ nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
+ }
return ret;
}
@@ -956,7 +997,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
break;
case TTM_PL_VRAM:
{
- struct nouveau_vram *vram = mem->mm_node;
+ struct nouveau_mem *node = mem->mm_node;
u8 page_shift;
if (!dev_priv->bar1_vm) {
@@ -967,23 +1008,23 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
}
if (dev_priv->card_type == NV_C0)
- page_shift = vram->page_shift;
+ page_shift = node->page_shift;
else
page_shift = 12;
ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
page_shift, NV_MEM_ACCESS_RW,
- &vram->bar_vma);
+ &node->bar_vma);
if (ret)
return ret;
- nouveau_vm_map(&vram->bar_vma, vram);
+ nouveau_vm_map(&node->bar_vma, node);
if (ret) {
- nouveau_vm_put(&vram->bar_vma);
+ nouveau_vm_put(&node->bar_vma);
return ret;
}
- mem->bus.offset = vram->bar_vma.offset;
+ mem->bus.offset = node->bar_vma.offset;
if (dev_priv->card_type == NV_50) /*XXX*/
mem->bus.offset -= 0x0020000000ULL;
mem->bus.base = pci_resource_start(dev->pdev, 1);
@@ -1000,16 +1041,16 @@ static void
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
- struct nouveau_vram *vram = mem->mm_node;
+ struct nouveau_mem *node = mem->mm_node;
if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
return;
- if (!vram->bar_vma.node)
+ if (!node->bar_vma.node)
return;
- nouveau_vm_unmap(&vram->bar_vma);
- nouveau_vm_put(&vram->bar_vma);
+ nouveau_vm_unmap(&node->bar_vma);
+ nouveau_vm_put(&node->bar_vma);
}
static int
@@ -1059,6 +1100,7 @@ struct ttm_bo_driver nouveau_bo_driver = {
.invalidate_caches = nouveau_bo_invalidate_caches,
.init_mem_type = nouveau_bo_init_mem_type,
.evict_flags = nouveau_bo_evict_flags,
+ .move_notify = nouveau_bo_move_ntfy,
.move = nouveau_bo_move,
.verify_access = nouveau_bo_verify_access,
.sync_obj_signaled = __nouveau_fence_signalled,
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 3960d66d7aba..4cea35c57d15 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -35,7 +35,7 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *pb = chan->pushbuf_bo;
struct nouveau_gpuobj *pushbuf = NULL;
- int ret;
+ int ret = 0;
if (dev_priv->card_type >= NV_50) {
if (dev_priv->card_type < NV_C0) {
@@ -90,8 +90,7 @@ nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
else
location = TTM_PL_FLAG_TT;
- ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, false,
- true, &pushbuf);
+ ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, &pushbuf);
if (ret) {
NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret);
return NULL;
@@ -201,7 +200,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
/* disable the fifo caches */
pfifo->reassign(dev, false);
- /* Construct inital RAMFC for new channel */
+ /* Construct initial RAMFC for new channel */
ret = pfifo->create_context(chan);
if (ret) {
nouveau_channel_put(&chan);
@@ -279,7 +278,7 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
return;
}
- /* noone wants the channel anymore */
+ /* no one wants the channel anymore */
NV_DEBUG(dev, "freeing channel %d\n", chan->id);
nouveau_debugfs_channel_fini(chan);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 390d82c3c4b0..7ae151109a66 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -116,6 +116,10 @@ nouveau_connector_destroy(struct drm_connector *connector)
nouveau_connector_hotplug, connector);
}
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ nouveau_backlight_exit(connector);
+
kfree(nv_connector->edid);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
@@ -894,6 +898,11 @@ nouveau_connector_create(struct drm_device *dev, int index)
}
drm_sysfs_connector_add(connector);
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ nouveau_backlight_init(connector);
+
dcb->drm = connector;
return dcb->drm;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 505c6bfb4d75..764c15d537ba 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -32,6 +32,7 @@
#include "nouveau_hw.h"
#include "nouveau_crtc.h"
#include "nouveau_dma.h"
+#include "nv50_display.h"
static void
nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
@@ -61,18 +62,59 @@ static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
};
int
-nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb,
- struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo)
+nouveau_framebuffer_init(struct drm_device *dev,
+ struct nouveau_framebuffer *nv_fb,
+ struct drm_mode_fb_cmd *mode_cmd,
+ struct nouveau_bo *nvbo)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = &nv_fb->base;
int ret;
- ret = drm_framebuffer_init(dev, &nouveau_fb->base, &nouveau_framebuffer_funcs);
+ ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
if (ret) {
return ret;
}
- drm_helper_mode_fill_fb_struct(&nouveau_fb->base, mode_cmd);
- nouveau_fb->nvbo = nvbo;
+ drm_helper_mode_fill_fb_struct(fb, mode_cmd);
+ nv_fb->nvbo = nvbo;
+
+ if (dev_priv->card_type >= NV_50) {
+ u32 tile_flags = nouveau_bo_tile_layout(nvbo);
+ if (tile_flags == 0x7a00 ||
+ tile_flags == 0xfe00)
+ nv_fb->r_dma = NvEvoFB32;
+ else
+ if (tile_flags == 0x7000)
+ nv_fb->r_dma = NvEvoFB16;
+ else
+ nv_fb->r_dma = NvEvoVRAM_LP;
+
+ switch (fb->depth) {
+ case 8: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_8; break;
+ case 15: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_15; break;
+ case 16: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_16; break;
+ case 24:
+ case 32: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_24; break;
+ case 30: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_30; break;
+ default:
+ NV_ERROR(dev, "unknown depth %d\n", fb->depth);
+ return -EINVAL;
+ }
+
+ if (dev_priv->chipset == 0x50)
+ nv_fb->r_format |= (tile_flags << 8);
+
+ if (!tile_flags)
+ nv_fb->r_pitch = 0x00100000 | fb->pitch;
+ else {
+ u32 mode = nvbo->tile_mode;
+ if (dev_priv->card_type >= NV_C0)
+ mode >>= 4;
+ nv_fb->r_pitch = ((fb->pitch / 4) << 4) | mode;
+ }
+ }
+
return 0;
}
@@ -182,6 +224,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
struct nouveau_page_flip_state *s,
struct nouveau_fence **pfence)
{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct drm_device *dev = chan->dev;
unsigned long flags;
int ret;
@@ -201,9 +244,12 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
if (ret)
goto fail;
- BEGIN_RING(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
- OUT_RING(chan, 0);
- FIRE_RING(chan);
+ if (dev_priv->card_type < NV_C0)
+ BEGIN_RING(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
+ else
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0500, 1);
+ OUT_RING (chan, 0);
+ FIRE_RING (chan);
ret = nouveau_fence_new(chan, pfence, true);
if (ret)
@@ -244,7 +290,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* Initialize a page flip struct */
*s = (struct nouveau_page_flip_state)
- { { }, s->event, nouveau_crtc(crtc)->index,
+ { { }, event, nouveau_crtc(crtc)->index,
fb->bits_per_pixel, fb->pitch, crtc->x, crtc->y,
new_bo->bo.offset };
@@ -255,6 +301,14 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
mutex_lock(&chan->mutex);
/* Emit a page flip */
+ if (dev_priv->card_type >= NV_50) {
+ ret = nv50_display_flip_next(crtc, fb, chan);
+ if (ret) {
+ nouveau_channel_put(&chan);
+ goto fail_unreserve;
+ }
+ }
+
ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
nouveau_channel_put(&chan);
if (ret)
@@ -305,7 +359,8 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
}
list_del(&s->head);
- *ps = *s;
+ if (ps)
+ *ps = *s;
kfree(s);
spin_unlock_irqrestore(&dev->event_lock, flags);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 65699bfaaaea..568caedd7216 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -83,7 +83,8 @@ nouveau_dma_init(struct nouveau_channel *chan)
return ret;
/* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
- ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy);
+ ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
+ &chan->m2mf_ntfy);
if (ret)
return ret;
@@ -96,13 +97,15 @@ nouveau_dma_init(struct nouveau_channel *chan)
OUT_RING(chan, 0);
/* Initialise NV_MEMORY_TO_MEMORY_FORMAT */
- ret = RING_SPACE(chan, 4);
+ ret = RING_SPACE(chan, 6);
if (ret)
return ret;
BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1);
- OUT_RING(chan, NvM2MF);
- BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1);
- OUT_RING(chan, NvNotify0);
+ OUT_RING (chan, NvM2MF);
+ BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 3);
+ OUT_RING (chan, NvNotify0);
+ OUT_RING (chan, chan->vram_handle);
+ OUT_RING (chan, chan->gart_handle);
/* Sit back and pray the channel works.. */
FIRE_RING(chan);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index c36f1763feaa..23d4edf992b7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -61,8 +61,6 @@ enum {
NvM2MF = 0x80000001,
NvDmaFB = 0x80000002,
NvDmaTT = 0x80000003,
- NvDmaVRAM = 0x80000004,
- NvDmaGART = 0x80000005,
NvNotify0 = 0x80000006,
Nv2D = 0x80000007,
NvCtxSurf2D = 0x80000008,
@@ -73,12 +71,15 @@ enum {
NvImageBlit = 0x8000000d,
NvSw = 0x8000000e,
NvSema = 0x8000000f,
+ NvEvoSema0 = 0x80000010,
+ NvEvoSema1 = 0x80000011,
/* G80+ display objects */
NvEvoVRAM = 0x01000000,
NvEvoFB16 = 0x01000001,
NvEvoFB32 = 0x01000002,
- NvEvoVRAM_LP = 0x01000003
+ NvEvoVRAM_LP = 0x01000003,
+ NvEvoSync = 0xcafe0000
};
#define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 38d599554bce..7beb82a0315d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -175,7 +175,6 @@ nouveau_dp_link_train_adjust(struct drm_encoder *encoder, uint8_t *config)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_device *dev = encoder->dev;
- struct bit_displayport_encoder_table_entry *dpse;
struct bit_displayport_encoder_table *dpe;
int ret, i, dpe_headerlen, vs = 0, pre = 0;
uint8_t request[2];
@@ -183,7 +182,6 @@ nouveau_dp_link_train_adjust(struct drm_encoder *encoder, uint8_t *config)
dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
if (!dpe)
return false;
- dpse = (void *)((char *)dpe + dpe_headerlen);
ret = auxch_rd(encoder, DP_ADJUST_REQUEST_LANE0_1, request, 2);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index f658a04eecf9..155ebdcbf06f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -408,14 +408,6 @@ static struct drm_driver driver = {
#endif
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- .probe = nouveau_pci_probe,
- .remove = nouveau_pci_remove,
- .suspend = nouveau_pci_suspend,
- .resume = nouveau_pci_resume
- },
.gem_init_object = nouveau_gem_object_new,
.gem_free_object = nouveau_gem_object_del,
@@ -432,6 +424,15 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct pci_driver nouveau_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = nouveau_pci_probe,
+ .remove = nouveau_pci_remove,
+ .suspend = nouveau_pci_suspend,
+ .resume = nouveau_pci_resume
+};
+
static int __init nouveau_init(void)
{
driver.num_ioctls = nouveau_max_ioctl;
@@ -449,7 +450,7 @@ static int __init nouveau_init(void)
return 0;
nouveau_register_dsm_handler();
- return drm_init(&driver);
+ return drm_pci_init(&driver, &nouveau_pci_driver);
}
static void __exit nouveau_exit(void)
@@ -457,7 +458,7 @@ static void __exit nouveau_exit(void)
if (!nouveau_modeset)
return;
- drm_exit(&driver);
+ drm_pci_exit(&driver, &nouveau_pci_driver);
nouveau_unregister_dsm_handler();
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 9821fcacc3d2..a76514a209b3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -57,7 +57,7 @@ struct nouveau_fpriv {
#include "nouveau_util.h"
struct nouveau_grctx;
-struct nouveau_vram;
+struct nouveau_mem;
#include "nouveau_vm.h"
#define MAX_NUM_DCB_ENTRIES 16
@@ -65,13 +65,16 @@ struct nouveau_vram;
#define NOUVEAU_MAX_CHANNEL_NR 128
#define NOUVEAU_MAX_TILE_NR 15
-struct nouveau_vram {
+struct nouveau_mem {
struct drm_device *dev;
struct nouveau_vma bar_vma;
+ struct nouveau_vma tmp_vma;
u8 page_shift;
+ struct drm_mm_node *tag;
struct list_head regions;
+ dma_addr_t *pages;
u32 memtype;
u64 offset;
u64 size;
@@ -90,6 +93,7 @@ struct nouveau_tile_reg {
struct nouveau_bo {
struct ttm_buffer_object bo;
struct ttm_placement placement;
+ u32 valid_domains;
u32 placements[3];
u32 busy_placements[3];
struct ttm_bo_kmap_obj kmap;
@@ -104,8 +108,6 @@ struct nouveau_bo {
struct nouveau_channel *channel;
struct nouveau_vma vma;
- bool mappable;
- bool no_vm;
uint32_t tile_mode;
uint32_t tile_flags;
@@ -214,7 +216,7 @@ struct nouveau_channel {
/* mapping of the fifo itself */
struct drm_local_map *map;
- /* mapping of the regs controling the fifo */
+ /* mapping of the regs controlling the fifo */
void __iomem *user;
uint32_t user_get;
uint32_t user_put;
@@ -387,6 +389,7 @@ struct nouveau_pgraph_engine {
};
struct nouveau_display_engine {
+ void *priv;
int (*early_init)(struct drm_device *);
void (*late_takedown)(struct drm_device *);
int (*create)(struct drm_device *);
@@ -509,8 +512,8 @@ struct nouveau_crypt_engine {
struct nouveau_vram_engine {
int (*init)(struct drm_device *);
int (*get)(struct drm_device *, u64, u32 align, u32 size_nc,
- u32 type, struct nouveau_vram **);
- void (*put)(struct drm_device *, struct nouveau_vram **);
+ u32 type, struct nouveau_mem **);
+ void (*put)(struct drm_device *, struct nouveau_mem **);
bool (*flags_valid)(struct drm_device *, u32 tile_flags);
};
@@ -652,8 +655,6 @@ struct drm_nouveau_private {
/* interrupt handling */
void (*irq_handler[32])(struct drm_device *);
bool msi_enabled;
- struct workqueue_struct *wq;
- struct work_struct irq_work;
struct list_head vbl_waiting;
@@ -681,6 +682,9 @@ struct drm_nouveau_private {
/* For PFIFO and PGRAPH. */
spinlock_t context_switch_lock;
+ /* VM/PRAMIN flush, legacy PRAMIN aperture */
+ spinlock_t vm_lock;
+
/* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
struct nouveau_ramht *ramht;
struct nouveau_gpuobj *ramfc;
@@ -691,15 +695,22 @@ struct drm_nouveau_private {
struct {
enum {
NOUVEAU_GART_NONE = 0,
- NOUVEAU_GART_AGP,
- NOUVEAU_GART_SGDMA
+ NOUVEAU_GART_AGP, /* AGP */
+ NOUVEAU_GART_PDMA, /* paged dma object */
+ NOUVEAU_GART_HW /* on-chip gart/vm */
} type;
uint64_t aper_base;
uint64_t aper_size;
uint64_t aper_free;
+ struct ttm_backend_func *func;
+
+ struct {
+ struct page *page;
+ dma_addr_t addr;
+ } dummy;
+
struct nouveau_gpuobj *sg_ctxdma;
- struct nouveau_vma vma;
} gart_info;
/* nv10-nv40 tiling regions */
@@ -740,14 +751,6 @@ struct drm_nouveau_private {
struct backlight_device *backlight;
- struct nouveau_channel *evo;
- u32 evo_alloc;
- struct {
- struct dcb_entry *dcb;
- u16 script;
- u32 pclk;
- } evo_irq;
-
struct {
struct dentry *channel_root;
} debugfs;
@@ -847,12 +850,14 @@ extern void nv10_mem_put_tile_region(struct drm_device *dev,
struct nouveau_tile_reg *tile,
struct nouveau_fence *fence);
extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
+extern const struct ttm_mem_type_manager_func nouveau_gart_manager;
/* nouveau_notifier.c */
extern int nouveau_notifier_init_channel(struct nouveau_channel *);
extern void nouveau_notifier_takedown_channel(struct nouveau_channel *);
extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle,
- int cout, uint32_t *offset);
+ int cout, uint32_t start, uint32_t end,
+ uint32_t *offset);
extern int nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *);
extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data,
struct drm_file *);
@@ -997,15 +1002,15 @@ static inline int nouveau_acpi_edid(struct drm_device *dev, struct drm_connector
/* nouveau_backlight.c */
#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
-extern int nouveau_backlight_init(struct drm_device *);
-extern void nouveau_backlight_exit(struct drm_device *);
+extern int nouveau_backlight_init(struct drm_connector *);
+extern void nouveau_backlight_exit(struct drm_connector *);
#else
-static inline int nouveau_backlight_init(struct drm_device *dev)
+static inline int nouveau_backlight_init(struct drm_connector *dev)
{
return 0;
}
-static inline void nouveau_backlight_exit(struct drm_device *dev) { }
+static inline void nouveau_backlight_exit(struct drm_connector *dev) { }
#endif
/* nouveau_bios.c */
@@ -1075,7 +1080,7 @@ extern void nv40_fb_set_tile_region(struct drm_device *dev, int i);
/* nv50_fb.c */
extern int nv50_fb_init(struct drm_device *);
extern void nv50_fb_takedown(struct drm_device *);
-extern void nv50_fb_vm_trap(struct drm_device *, int display, const char *);
+extern void nv50_fb_vm_trap(struct drm_device *, int display);
/* nvc0_fb.c */
extern int nvc0_fb_init(struct drm_device *);
@@ -1188,7 +1193,7 @@ extern int nv50_graph_load_context(struct nouveau_channel *);
extern int nv50_graph_unload_context(struct drm_device *);
extern int nv50_grctx_init(struct nouveau_grctx *);
extern void nv50_graph_tlb_flush(struct drm_device *dev);
-extern void nv86_graph_tlb_flush(struct drm_device *dev);
+extern void nv84_graph_tlb_flush(struct drm_device *dev);
extern struct nouveau_enum nv50_data_error_names[];
/* nvc0_graph.c */
@@ -1294,7 +1299,7 @@ extern struct ttm_bo_driver nouveau_bo_driver;
extern int nouveau_bo_new(struct drm_device *, struct nouveau_channel *,
int size, int align, uint32_t flags,
uint32_t tile_mode, uint32_t tile_flags,
- bool no_vm, bool mappable, struct nouveau_bo **);
+ struct nouveau_bo **);
extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags);
extern int nouveau_bo_unpin(struct nouveau_bo *);
extern int nouveau_bo_map(struct nouveau_bo *);
@@ -1355,9 +1360,9 @@ static inline struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *obj)
/* nouveau_gem.c */
extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *,
- int size, int align, uint32_t flags,
+ int size, int align, uint32_t domain,
uint32_t tile_mode, uint32_t tile_flags,
- bool no_vm, bool mappable, struct nouveau_bo **);
+ struct nouveau_bo **);
extern int nouveau_gem_object_new(struct drm_gem_object *);
extern void nouveau_gem_object_del(struct drm_gem_object *);
extern int nouveau_gem_ioctl_new(struct drm_device *, void *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h
index d432134b71e0..a3a88ad00f86 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fb.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fb.h
@@ -30,6 +30,9 @@
struct nouveau_framebuffer {
struct drm_framebuffer base;
struct nouveau_bo *nvbo;
+ u32 r_dma;
+ u32 r_format;
+ u32 r_pitch;
};
static inline struct nouveau_framebuffer *
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 60769d2f9a66..39aee6d4daf8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -181,13 +181,13 @@ nouveau_fbcon_sync(struct fb_info *info)
OUT_RING (chan, 0);
}
- nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff);
+ nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3, 0xffffffff);
FIRE_RING(chan);
mutex_unlock(&chan->mutex);
ret = -EBUSY;
for (i = 0; i < 100000; i++) {
- if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy + 3)) {
+ if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3)) {
ret = 0;
break;
}
@@ -296,8 +296,8 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
size = mode_cmd.pitch * mode_cmd.height;
size = roundup(size, PAGE_SIZE);
- ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, TTM_PL_FLAG_VRAM,
- 0, 0x0000, false, true, &nvbo);
+ ret = nouveau_gem_new(dev, dev_priv->channel, size, 0,
+ NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, &nvbo);
if (ret) {
NV_ERROR(dev, "failed to allocate framebuffer\n");
goto out;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 221b8462ea37..4b9f4493c9f9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -27,13 +27,15 @@
#include "drmP.h"
#include "drm.h"
+#include <linux/ktime.h>
+#include <linux/hrtimer.h>
+
#include "nouveau_drv.h"
#include "nouveau_ramht.h"
#include "nouveau_dma.h"
#define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10)
-#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17 && \
- nouveau_private(dev)->card_type < NV_C0)
+#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17)
struct nouveau_fence {
struct nouveau_channel *channel;
@@ -230,7 +232,8 @@ int
__nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
{
unsigned long timeout = jiffies + (3 * DRM_HZ);
- unsigned long sleep_time = jiffies + 1;
+ unsigned long sleep_time = NSEC_PER_MSEC / 1000;
+ ktime_t t;
int ret = 0;
while (1) {
@@ -244,8 +247,13 @@ __nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
__set_current_state(intr ? TASK_INTERRUPTIBLE
: TASK_UNINTERRUPTIBLE);
- if (lazy && time_after_eq(jiffies, sleep_time))
- schedule_timeout(1);
+ if (lazy) {
+ t = ktime_set(0, sleep_time);
+ schedule_hrtimeout(&t, HRTIMER_MODE_REL);
+ sleep_time *= 2;
+ if (sleep_time > NSEC_PER_MSEC)
+ sleep_time = NSEC_PER_MSEC;
+ }
if (intr && signal_pending(current)) {
ret = -ERESTARTSYS;
@@ -259,11 +267,12 @@ __nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
}
static struct nouveau_semaphore *
-alloc_semaphore(struct drm_device *dev)
+semaphore_alloc(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_semaphore *sema;
- int ret;
+ int size = (dev_priv->chipset < 0x84) ? 4 : 16;
+ int ret, i;
if (!USE_SEMA(dev))
return NULL;
@@ -277,9 +286,9 @@ alloc_semaphore(struct drm_device *dev)
goto fail;
spin_lock(&dev_priv->fence.lock);
- sema->mem = drm_mm_search_free(&dev_priv->fence.heap, 4, 0, 0);
+ sema->mem = drm_mm_search_free(&dev_priv->fence.heap, size, 0, 0);
if (sema->mem)
- sema->mem = drm_mm_get_block_atomic(sema->mem, 4, 0);
+ sema->mem = drm_mm_get_block_atomic(sema->mem, size, 0);
spin_unlock(&dev_priv->fence.lock);
if (!sema->mem)
@@ -287,7 +296,8 @@ alloc_semaphore(struct drm_device *dev)
kref_init(&sema->ref);
sema->dev = dev;
- nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 0);
+ for (i = sema->mem->start; i < sema->mem->start + size; i += 4)
+ nouveau_bo_wr32(dev_priv->fence.bo, i / 4, 0);
return sema;
fail:
@@ -296,7 +306,7 @@ fail:
}
static void
-free_semaphore(struct kref *ref)
+semaphore_free(struct kref *ref)
{
struct nouveau_semaphore *sema =
container_of(ref, struct nouveau_semaphore, ref);
@@ -318,61 +328,107 @@ semaphore_work(void *priv, bool signalled)
if (unlikely(!signalled))
nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 1);
- kref_put(&sema->ref, free_semaphore);
+ kref_put(&sema->ref, semaphore_free);
}
static int
-emit_semaphore(struct nouveau_channel *chan, int method,
- struct nouveau_semaphore *sema)
+semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
{
- struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
- struct nouveau_fence *fence;
- bool smart = (dev_priv->card_type >= NV_50);
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nouveau_fence *fence = NULL;
int ret;
- ret = RING_SPACE(chan, smart ? 8 : 4);
+ if (dev_priv->chipset < 0x84) {
+ ret = RING_SPACE(chan, 3);
+ if (ret)
+ return ret;
+
+ BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 2);
+ OUT_RING (chan, sema->mem->start);
+ OUT_RING (chan, 1);
+ } else
+ if (dev_priv->chipset < 0xc0) {
+ struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
+ u64 offset = vma->offset + sema->mem->start;
+
+ ret = RING_SPACE(chan, 5);
+ if (ret)
+ return ret;
+
+ BEGIN_RING(chan, NvSubSw, 0x0010, 4);
+ OUT_RING (chan, upper_32_bits(offset));
+ OUT_RING (chan, lower_32_bits(offset));
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 1); /* ACQUIRE_EQ */
+ } else {
+ struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
+ u64 offset = vma->offset + sema->mem->start;
+
+ ret = RING_SPACE(chan, 5);
+ if (ret)
+ return ret;
+
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
+ OUT_RING (chan, upper_32_bits(offset));
+ OUT_RING (chan, lower_32_bits(offset));
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 0x1001); /* ACQUIRE_EQ */
+ }
+
+ /* Delay semaphore destruction until its work is done */
+ ret = nouveau_fence_new(chan, &fence, true);
if (ret)
return ret;
- if (smart) {
- BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
- OUT_RING(chan, NvSema);
- }
- BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 1);
- OUT_RING(chan, sema->mem->start);
-
- if (smart && method == NV_SW_SEMAPHORE_ACQUIRE) {
- /*
- * NV50 tries to be too smart and context-switch
- * between semaphores instead of doing a "first come,
- * first served" strategy like previous cards
- * do.
- *
- * That's bad because the ACQUIRE latency can get as
- * large as the PFIFO context time slice in the
- * typical DRI2 case where you have several
- * outstanding semaphores at the same moment.
- *
- * If we're going to ACQUIRE, force the card to
- * context switch before, just in case the matching
- * RELEASE is already scheduled to be executed in
- * another channel.
- */
- BEGIN_RING(chan, NvSubSw, NV_SW_YIELD, 1);
- OUT_RING(chan, 0);
- }
+ kref_get(&sema->ref);
+ nouveau_fence_work(fence, semaphore_work, sema);
+ nouveau_fence_unref(&fence);
+ return 0;
+}
+
+static int
+semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nouveau_fence *fence = NULL;
+ int ret;
+
+ if (dev_priv->chipset < 0x84) {
+ ret = RING_SPACE(chan, 4);
+ if (ret)
+ return ret;
+
+ BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 1);
+ OUT_RING (chan, sema->mem->start);
+ BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_RELEASE, 1);
+ OUT_RING (chan, 1);
+ } else
+ if (dev_priv->chipset < 0xc0) {
+ struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
+ u64 offset = vma->offset + sema->mem->start;
+
+ ret = RING_SPACE(chan, 5);
+ if (ret)
+ return ret;
+
+ BEGIN_RING(chan, NvSubSw, 0x0010, 4);
+ OUT_RING (chan, upper_32_bits(offset));
+ OUT_RING (chan, lower_32_bits(offset));
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 2); /* RELEASE */
+ } else {
+ struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
+ u64 offset = vma->offset + sema->mem->start;
- BEGIN_RING(chan, NvSubSw, method, 1);
- OUT_RING(chan, 1);
-
- if (smart && method == NV_SW_SEMAPHORE_RELEASE) {
- /*
- * Force the card to context switch, there may be
- * another channel waiting for the semaphore we just
- * released.
- */
- BEGIN_RING(chan, NvSubSw, NV_SW_YIELD, 1);
- OUT_RING(chan, 0);
+ ret = RING_SPACE(chan, 5);
+ if (ret)
+ return ret;
+
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
+ OUT_RING (chan, upper_32_bits(offset));
+ OUT_RING (chan, lower_32_bits(offset));
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 0x1002); /* RELEASE */
}
/* Delay semaphore destruction until its work is done */
@@ -383,7 +439,6 @@ emit_semaphore(struct nouveau_channel *chan, int method,
kref_get(&sema->ref);
nouveau_fence_work(fence, semaphore_work, sema);
nouveau_fence_unref(&fence);
-
return 0;
}
@@ -400,7 +455,7 @@ nouveau_fence_sync(struct nouveau_fence *fence,
nouveau_fence_signalled(fence)))
goto out;
- sema = alloc_semaphore(dev);
+ sema = semaphore_alloc(dev);
if (!sema) {
/* Early card or broken userspace, fall back to
* software sync. */
@@ -418,17 +473,17 @@ nouveau_fence_sync(struct nouveau_fence *fence,
}
/* Make wchan wait until it gets signalled */
- ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema);
+ ret = semaphore_acquire(wchan, sema);
if (ret)
goto out_unlock;
/* Signal the semaphore from chan */
- ret = emit_semaphore(chan, NV_SW_SEMAPHORE_RELEASE, sema);
+ ret = semaphore_release(chan, sema);
out_unlock:
mutex_unlock(&chan->mutex);
out_unref:
- kref_put(&sema->ref, free_semaphore);
+ kref_put(&sema->ref, semaphore_free);
out:
if (chan)
nouveau_channel_put_unlocked(&chan);
@@ -449,22 +504,23 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
struct nouveau_gpuobj *obj = NULL;
int ret;
+ if (dev_priv->card_type >= NV_C0)
+ goto out_initialised;
+
/* Create an NV_SW object for various sync purposes */
ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW);
if (ret)
return ret;
/* we leave subchannel empty for nvc0 */
- if (dev_priv->card_type < NV_C0) {
- ret = RING_SPACE(chan, 2);
- if (ret)
- return ret;
- BEGIN_RING(chan, NvSubSw, 0, 1);
- OUT_RING(chan, NvSw);
- }
+ ret = RING_SPACE(chan, 2);
+ if (ret)
+ return ret;
+ BEGIN_RING(chan, NvSubSw, 0, 1);
+ OUT_RING(chan, NvSw);
/* Create a DMA object for the shared cross-channel sync area. */
- if (USE_SEMA(dev)) {
+ if (USE_SEMA(dev) && dev_priv->chipset < 0x84) {
struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
@@ -484,14 +540,20 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
return ret;
BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
OUT_RING(chan, NvSema);
+ } else {
+ ret = RING_SPACE(chan, 2);
+ if (ret)
+ return ret;
+ BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
+ OUT_RING (chan, chan->vram_handle); /* whole VM */
}
FIRE_RING(chan);
+out_initialised:
INIT_LIST_HEAD(&chan->fence.pending);
spin_lock_init(&chan->fence.lock);
atomic_set(&chan->fence.last_sequence_irq, 0);
-
return 0;
}
@@ -519,12 +581,13 @@ int
nouveau_fence_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int size = (dev_priv->chipset < 0x84) ? 4096 : 16384;
int ret;
/* Create a shared VRAM heap for cross-channel sync. */
if (USE_SEMA(dev)) {
- ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM,
- 0, 0, false, true, &dev_priv->fence.bo);
+ ret = nouveau_bo_new(dev, NULL, size, 0, TTM_PL_FLAG_VRAM,
+ 0, 0, &dev_priv->fence.bo);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 506c508b7eda..b52e46018245 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -61,26 +61,43 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
int
nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
- int size, int align, uint32_t flags, uint32_t tile_mode,
- uint32_t tile_flags, bool no_vm, bool mappable,
- struct nouveau_bo **pnvbo)
+ int size, int align, uint32_t domain, uint32_t tile_mode,
+ uint32_t tile_flags, struct nouveau_bo **pnvbo)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *nvbo;
+ u32 flags = 0;
int ret;
+ if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
+ flags |= TTM_PL_FLAG_VRAM;
+ if (domain & NOUVEAU_GEM_DOMAIN_GART)
+ flags |= TTM_PL_FLAG_TT;
+ if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
+ flags |= TTM_PL_FLAG_SYSTEM;
+
ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode,
- tile_flags, no_vm, mappable, pnvbo);
+ tile_flags, pnvbo);
if (ret)
return ret;
nvbo = *pnvbo;
+ /* we restrict allowed domains on nv50+ to only the types
+ * that were requested at creation time. not possibly on
+ * earlier chips without busting the ABI.
+ */
+ nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
+ NOUVEAU_GEM_DOMAIN_GART;
+ if (dev_priv->card_type >= NV_50)
+ nvbo->valid_domains &= domain;
+
nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
if (!nvbo->gem) {
nouveau_bo_ref(NULL, pnvbo);
return -ENOMEM;
}
- nvbo->bo.persistant_swap_storage = nvbo->gem->filp;
+ nvbo->bo.persistent_swap_storage = nvbo->gem->filp;
nvbo->gem->driver_private = nvbo;
return 0;
}
@@ -97,7 +114,7 @@ nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep)
rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
rep->offset = nvbo->bo.offset;
- rep->map_handle = nvbo->mappable ? nvbo->bo.addr_space_offset : 0;
+ rep->map_handle = nvbo->bo.addr_space_offset;
rep->tile_mode = nvbo->tile_mode;
rep->tile_flags = nvbo->tile_flags;
return 0;
@@ -111,19 +128,11 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
struct drm_nouveau_gem_new *req = data;
struct nouveau_bo *nvbo = NULL;
struct nouveau_channel *chan = NULL;
- uint32_t flags = 0;
int ret = 0;
if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
- if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM)
- flags |= TTM_PL_FLAG_VRAM;
- if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART)
- flags |= TTM_PL_FLAG_TT;
- if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU)
- flags |= TTM_PL_FLAG_SYSTEM;
-
if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) {
NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags);
return -EINVAL;
@@ -135,10 +144,9 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
return PTR_ERR(chan);
}
- ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags,
- req->info.tile_mode, req->info.tile_flags, false,
- (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE),
- &nvbo);
+ ret = nouveau_gem_new(dev, chan, req->info.size, req->align,
+ req->info.domain, req->info.tile_mode,
+ req->info.tile_flags, &nvbo);
if (chan)
nouveau_channel_put(&chan);
if (ret)
@@ -161,7 +169,7 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
{
struct nouveau_bo *nvbo = gem->driver_private;
struct ttm_buffer_object *bo = &nvbo->bo;
- uint32_t domains = valid_domains &
+ uint32_t domains = valid_domains & nvbo->valid_domains &
(write_domains ? write_domains : read_domains);
uint32_t pref_flags = 0, valid_flags = 0;
@@ -592,7 +600,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
if (push[i].bo_index >= req->nr_buffers) {
NV_ERROR(dev, "push %d buffer not in list\n", i);
ret = -EINVAL;
- goto out;
+ goto out_prevalid;
}
bo[push[i].bo_index].read_domains |= (1 << 31);
@@ -604,7 +612,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
if (ret) {
if (ret != -ERESTARTSYS)
NV_ERROR(dev, "validate: %d\n", ret);
- goto out;
+ goto out_prevalid;
}
/* Apply any relocations that are required */
@@ -697,6 +705,8 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
out:
validate_fini(&op, fence);
nouveau_fence_unref(&fence);
+
+out_prevalid:
kfree(bo);
kfree(push);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 26347b7cd872..5045f8b921d6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -152,7 +152,6 @@ nouveau_mem_vram_fini(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- nouveau_bo_unpin(dev_priv->vga_ram);
nouveau_bo_ref(NULL, &dev_priv->vga_ram);
ttm_bo_device_release(&dev_priv->ttm.bdev);
@@ -393,11 +392,17 @@ nouveau_mem_vram_init(struct drm_device *dev)
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
int ret, dma_bits;
- if (dev_priv->card_type >= NV_50 &&
- pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
- dma_bits = 40;
- else
- dma_bits = 32;
+ dma_bits = 32;
+ if (dev_priv->card_type >= NV_50) {
+ if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
+ dma_bits = 40;
+ } else
+ if (drm_pci_device_is_pcie(dev) &&
+ dev_priv->chipset > 0x40 &&
+ dev_priv->chipset != 0x45) {
+ if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
+ dma_bits = 39;
+ }
ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
if (ret)
@@ -419,14 +424,32 @@ nouveau_mem_vram_init(struct drm_device *dev)
}
/* reserve space at end of VRAM for PRAMIN */
- if (dev_priv->chipset == 0x40 || dev_priv->chipset == 0x47 ||
- dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b)
- dev_priv->ramin_rsvd_vram = (2 * 1024 * 1024);
- else
- if (dev_priv->card_type >= NV_40)
- dev_priv->ramin_rsvd_vram = (1 * 1024 * 1024);
- else
- dev_priv->ramin_rsvd_vram = (512 * 1024);
+ if (dev_priv->card_type >= NV_50) {
+ dev_priv->ramin_rsvd_vram = 1 * 1024 * 1024;
+ } else
+ if (dev_priv->card_type >= NV_40) {
+ u32 vs = hweight8((nv_rd32(dev, 0x001540) & 0x0000ff00) >> 8);
+ u32 rsvd;
+
+ /* estimate grctx size, the magics come from nv40_grctx.c */
+ if (dev_priv->chipset == 0x40) rsvd = 0x6aa0 * vs;
+ else if (dev_priv->chipset < 0x43) rsvd = 0x4f00 * vs;
+ else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs;
+ else rsvd = 0x4a40 * vs;
+ rsvd += 16 * 1024;
+ rsvd *= dev_priv->engine.fifo.channels;
+
+ /* pciegart table */
+ if (drm_pci_device_is_pcie(dev))
+ rsvd += 512 * 1024;
+
+ /* object storage */
+ rsvd += 512 * 1024;
+
+ dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096);
+ } else {
+ dev_priv->ramin_rsvd_vram = 512 * 1024;
+ }
ret = dev_priv->engine.vram.init(dev);
if (ret)
@@ -455,13 +478,17 @@ nouveau_mem_vram_init(struct drm_device *dev)
return ret;
}
- ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM,
- 0, 0, true, true, &dev_priv->vga_ram);
- if (ret == 0)
- ret = nouveau_bo_pin(dev_priv->vga_ram, TTM_PL_FLAG_VRAM);
- if (ret) {
- NV_WARN(dev, "failed to reserve VGA memory\n");
- nouveau_bo_ref(NULL, &dev_priv->vga_ram);
+ if (dev_priv->card_type < NV_50) {
+ ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM,
+ 0, 0, &dev_priv->vga_ram);
+ if (ret == 0)
+ ret = nouveau_bo_pin(dev_priv->vga_ram,
+ TTM_PL_FLAG_VRAM);
+
+ if (ret) {
+ NV_WARN(dev, "failed to reserve VGA memory\n");
+ nouveau_bo_ref(NULL, &dev_priv->vga_ram);
+ }
}
dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
@@ -480,7 +507,7 @@ nouveau_mem_gart_init(struct drm_device *dev)
dev_priv->gart_info.type = NOUVEAU_GART_NONE;
#if !defined(__powerpc__) && !defined(__ia64__)
- if (drm_device_is_agp(dev) && dev->agp && nouveau_agpmode) {
+ if (drm_pci_device_is_agp(dev) && dev->agp && nouveau_agpmode) {
ret = nouveau_mem_init_agp(dev);
if (ret)
NV_ERROR(dev, "Error initialising AGP: %d\n", ret);
@@ -525,6 +552,7 @@ nouveau_mem_timing_init(struct drm_device *dev)
u8 tRC; /* Byte 9 */
u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14;
u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21;
+ u8 magic_number = 0; /* Yeah... sorry*/
u8 *mem = NULL, *entry;
int i, recordlen, entries;
@@ -569,6 +597,12 @@ nouveau_mem_timing_init(struct drm_device *dev)
if (!memtimings->timing)
return;
+ /* Get "some number" from the timing reg for NV_40
+ * Used in calculations later */
+ if(dev_priv->card_type == NV_40) {
+ magic_number = (nv_rd32(dev,0x100228) & 0x0f000000) >> 24;
+ }
+
entry = mem + mem[1];
for (i = 0; i < entries; i++, entry += recordlen) {
struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i];
@@ -608,36 +642,51 @@ nouveau_mem_timing_init(struct drm_device *dev)
/* XXX: I don't trust the -1's and +1's... they must come
* from somewhere! */
- timing->reg_100224 = ((tUNK_0 + tUNK_19 + 1) << 24 |
+ timing->reg_100224 = (tUNK_0 + tUNK_19 + 1 + magic_number) << 24 |
tUNK_18 << 16 |
- (tUNK_1 + tUNK_19 + 1) << 8 |
- (tUNK_2 - 1));
+ (tUNK_1 + tUNK_19 + 1 + magic_number) << 8;
+ if(dev_priv->chipset == 0xa8) {
+ timing->reg_100224 |= (tUNK_2 - 1);
+ } else {
+ timing->reg_100224 |= (tUNK_2 + 2 - magic_number);
+ }
timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10);
- if(recordlen > 19) {
- timing->reg_100228 += (tUNK_19 - 1) << 24;
- }/* I cannot back-up this else-statement right now
- else {
- timing->reg_100228 += tUNK_12 << 24;
- }*/
-
- /* XXX: reg_10022c */
- timing->reg_10022c = tUNK_2 - 1;
-
- timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 |
- tUNK_13 << 8 | tUNK_13);
-
- /* XXX: +6? */
- timing->reg_100234 = (tRAS << 24 | (tUNK_19 + 6) << 8 | tRC);
- timing->reg_100234 += max(tUNK_10,tUNK_11) << 16;
-
- /* XXX; reg_100238, reg_10023c
- * reg: 0x00??????
- * reg_10023c:
- * 0 for pre-NV50 cards
- * 0x????0202 for NV50+ cards (empirical evidence) */
- if(dev_priv->card_type >= NV_50) {
+ if(dev_priv->chipset >= 0xa3 && dev_priv->chipset < 0xaa) {
+ timing->reg_100228 |= (tUNK_19 - 1) << 24;
+ }
+
+ if(dev_priv->card_type == NV_40) {
+ /* NV40: don't know what the rest of the regs are..
+ * And don't need to know either */
+ timing->reg_100228 |= 0x20200000 | magic_number << 24;
+ } else if(dev_priv->card_type >= NV_50) {
+ /* XXX: reg_10022c */
+ timing->reg_10022c = tUNK_2 - 1;
+
+ timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 |
+ tUNK_13 << 8 | tUNK_13);
+
+ timing->reg_100234 = (tRAS << 24 | tRC);
+ timing->reg_100234 += max(tUNK_10,tUNK_11) << 16;
+
+ if(dev_priv->chipset < 0xa3) {
+ timing->reg_100234 |= (tUNK_2 + 2) << 8;
+ } else {
+ /* XXX: +6? */
+ timing->reg_100234 |= (tUNK_19 + 6) << 8;
+ }
+
+ /* XXX; reg_100238, reg_10023c
+ * reg_100238: 0x00??????
+ * reg_10023c: 0x!!??0202 for NV50+ cards (empirical evidence) */
timing->reg_10023c = 0x202;
+ if(dev_priv->chipset < 0xa3) {
+ timing->reg_10023c |= 0x4000000 | (tUNK_2 - 1) << 16;
+ } else {
+ /* currently unknown
+ * 10023c seen as 06xxxxxx, 0bxxxxxx or 0fxxxxxx */
+ }
}
NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i,
@@ -648,7 +697,7 @@ nouveau_mem_timing_init(struct drm_device *dev)
timing->reg_100238, timing->reg_10023c);
}
- memtimings->nr_timing = entries;
+ memtimings->nr_timing = entries;
memtimings->supported = true;
}
@@ -666,13 +715,14 @@ nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long p_size
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
struct nouveau_mm *mm;
- u32 b_size;
+ u64 size, block, rsvd;
int ret;
- p_size = (p_size << PAGE_SHIFT) >> 12;
- b_size = dev_priv->vram_rblock_size >> 12;
+ rsvd = (256 * 1024); /* vga memory */
+ size = (p_size << PAGE_SHIFT) - rsvd;
+ block = dev_priv->vram_rblock_size;
- ret = nouveau_mm_init(&mm, 0, p_size, b_size);
+ ret = nouveau_mm_init(&mm, rsvd >> 12, size >> 12, block >> 12);
if (ret)
return ret;
@@ -700,9 +750,15 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+ struct nouveau_mem *node = mem->mm_node;
struct drm_device *dev = dev_priv->dev;
- vram->put(dev, (struct nouveau_vram **)&mem->mm_node);
+ if (node->tmp_vma.node) {
+ nouveau_vm_unmap(&node->tmp_vma);
+ nouveau_vm_put(&node->tmp_vma);
+ }
+
+ vram->put(dev, (struct nouveau_mem **)&mem->mm_node);
}
static int
@@ -715,7 +771,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
struct drm_device *dev = dev_priv->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nouveau_vram *node;
+ struct nouveau_mem *node;
u32 size_nc = 0;
int ret;
@@ -724,9 +780,11 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
mem->page_alignment << PAGE_SHIFT, size_nc,
- (nvbo->tile_flags >> 8) & 0xff, &node);
- if (ret)
- return ret;
+ (nvbo->tile_flags >> 8) & 0x3ff, &node);
+ if (ret) {
+ mem->mm_node = NULL;
+ return (ret == -ENOSPC) ? 0 : ret;
+ }
node->page_shift = 12;
if (nvbo->vma.node)
@@ -769,3 +827,84 @@ const struct ttm_mem_type_manager_func nouveau_vram_manager = {
nouveau_vram_manager_del,
nouveau_vram_manager_debug
};
+
+static int
+nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
+{
+ return 0;
+}
+
+static int
+nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
+{
+ return 0;
+}
+
+static void
+nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
+ struct ttm_mem_reg *mem)
+{
+ struct nouveau_mem *node = mem->mm_node;
+
+ if (node->tmp_vma.node) {
+ nouveau_vm_unmap(&node->tmp_vma);
+ nouveau_vm_put(&node->tmp_vma);
+ }
+ mem->mm_node = NULL;
+}
+
+static int
+nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct nouveau_vma *vma = &nvbo->vma;
+ struct nouveau_vm *vm = vma->vm;
+ struct nouveau_mem *node;
+ int ret;
+
+ if (unlikely((mem->num_pages << PAGE_SHIFT) >=
+ dev_priv->gart_info.aper_size))
+ return -ENOMEM;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ /* This node must be for evicting large-paged VRAM
+ * to system memory. Due to a nv50 limitation of
+ * not being able to mix large/small pages within
+ * the same PDE, we need to create a temporary
+ * small-paged VMA for the eviction.
+ */
+ if (vma->node->type != vm->spg_shift) {
+ ret = nouveau_vm_get(vm, (u64)vma->node->length << 12,
+ vm->spg_shift, NV_MEM_ACCESS_RW,
+ &node->tmp_vma);
+ if (ret) {
+ kfree(node);
+ return ret;
+ }
+ }
+
+ node->page_shift = nvbo->vma.node->type;
+ mem->mm_node = node;
+ mem->start = 0;
+ return 0;
+}
+
+void
+nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
+{
+}
+
+const struct ttm_mem_type_manager_func nouveau_gart_manager = {
+ nouveau_gart_manager_init,
+ nouveau_gart_manager_fini,
+ nouveau_gart_manager_new,
+ nouveau_gart_manager_del,
+ nouveau_gart_manager_debug
+};
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c
index 8844b50c3e54..7609756b6faf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.c
@@ -123,7 +123,7 @@ nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
return 0;
}
- return -ENOMEM;
+ return -ENOSPC;
}
int
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h
index 798eaf39691c..1f7483aae9a4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.h
@@ -53,13 +53,13 @@ void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *);
int nv50_vram_init(struct drm_device *);
int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc,
- u32 memtype, struct nouveau_vram **);
-void nv50_vram_del(struct drm_device *, struct nouveau_vram **);
+ u32 memtype, struct nouveau_mem **);
+void nv50_vram_del(struct drm_device *, struct nouveau_mem **);
bool nv50_vram_flags_valid(struct drm_device *, u32 tile_flags);
int nvc0_vram_init(struct drm_device *);
int nvc0_vram_new(struct drm_device *, u64 size, u32 align, u32 ncmin,
- u32 memtype, struct nouveau_vram **);
+ u32 memtype, struct nouveau_mem **);
bool nvc0_vram_flags_valid(struct drm_device *, u32 tile_flags);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index fe29d604b820..5b39718ae1f8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -35,20 +35,22 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct nouveau_bo *ntfy = NULL;
- uint32_t flags;
+ uint32_t flags, ttmpl;
int ret;
- if (nouveau_vram_notify)
- flags = TTM_PL_FLAG_VRAM;
- else
- flags = TTM_PL_FLAG_TT;
+ if (nouveau_vram_notify) {
+ flags = NOUVEAU_GEM_DOMAIN_VRAM;
+ ttmpl = TTM_PL_FLAG_VRAM;
+ } else {
+ flags = NOUVEAU_GEM_DOMAIN_GART;
+ ttmpl = TTM_PL_FLAG_TT;
+ }
- ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags,
- 0, 0x0000, false, true, &ntfy);
+ ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 0, 0, &ntfy);
if (ret)
return ret;
- ret = nouveau_bo_pin(ntfy, flags);
+ ret = nouveau_bo_pin(ntfy, ttmpl);
if (ret)
goto out_err;
@@ -96,27 +98,35 @@ nouveau_notifier_gpuobj_dtor(struct drm_device *dev,
int
nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
- int size, uint32_t *b_offset)
+ int size, uint32_t start, uint32_t end,
+ uint32_t *b_offset)
{
struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *nobj = NULL;
struct drm_mm_node *mem;
uint32_t offset;
int target, ret;
- mem = drm_mm_search_free(&chan->notifier_heap, size, 0, 0);
+ mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0,
+ start, end, 0);
if (mem)
- mem = drm_mm_get_block(mem, size, 0);
+ mem = drm_mm_get_block_range(mem, size, 0, start, end);
if (!mem) {
NV_ERROR(dev, "Channel %d notifier block full\n", chan->id);
return -ENOMEM;
}
- if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM)
- target = NV_MEM_TARGET_VRAM;
- else
- target = NV_MEM_TARGET_GART;
- offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
+ if (dev_priv->card_type < NV_50) {
+ if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM)
+ target = NV_MEM_TARGET_VRAM;
+ else
+ target = NV_MEM_TARGET_GART;
+ offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
+ } else {
+ target = NV_MEM_TARGET_VM;
+ offset = chan->notifier_bo->vma.offset;
+ }
offset += mem->start;
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset,
@@ -177,7 +187,8 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
if (IS_ERR(chan))
return PTR_ERR(chan);
- ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset);
+ ret = nouveau_notifier_alloc(chan, na->handle, na->size, 0, 0x1000,
+ &na->offset);
nouveau_channel_put(&chan);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 30b6544467ca..67a16e01ffa6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -36,6 +36,7 @@
#include "nouveau_drm.h"
#include "nouveau_ramht.h"
#include "nouveau_vm.h"
+#include "nv50_display.h"
struct nouveau_gpuobj_method {
struct list_head head;
@@ -490,16 +491,22 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
}
if (target == NV_MEM_TARGET_GART) {
- if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
- target = NV_MEM_TARGET_PCI_NOSNOOP;
- base += dev_priv->gart_info.aper_base;
- } else
- if (base != 0) {
- base = nouveau_sgdma_get_physical(dev, base);
+ struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
+
+ if (dev_priv->gart_info.type == NOUVEAU_GART_PDMA) {
+ if (base == 0) {
+ nouveau_gpuobj_ref(gart, pobj);
+ return 0;
+ }
+
+ base = nouveau_sgdma_get_physical(dev, base);
target = NV_MEM_TARGET_PCI;
} else {
- nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, pobj);
- return 0;
+ base += dev_priv->gart_info.aper_base;
+ if (dev_priv->gart_info.type == NOUVEAU_GART_AGP)
+ target = NV_MEM_TARGET_PCI_NOSNOOP;
+ else
+ target = NV_MEM_TARGET_PCI;
}
}
@@ -776,7 +783,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *vram = NULL, *tt = NULL;
- int ret;
+ int ret, i;
NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
@@ -841,6 +848,25 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
nouveau_gpuobj_ref(NULL, &ramht);
if (ret)
return ret;
+
+ /* dma objects for display sync channel semaphore blocks */
+ for (i = 0; i < 2; i++) {
+ struct nouveau_gpuobj *sem = NULL;
+ struct nv50_display_crtc *dispc =
+ &nv50_display(dev)->crtc[i];
+ u64 offset = dispc->sem.bo->bo.mem.start << PAGE_SHIFT;
+
+ ret = nouveau_gpuobj_dma_new(chan, 0x3d, offset, 0xfff,
+ NV_MEM_ACCESS_RW,
+ NV_MEM_TARGET_VRAM, &sem);
+ if (ret)
+ return ret;
+
+ ret = nouveau_ramht_insert(chan, NvEvoSema0 + i, sem);
+ nouveau_gpuobj_ref(NULL, &sem);
+ if (ret)
+ return ret;
+ }
}
/* VRAM ctxdma */
@@ -909,7 +935,7 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
nouveau_gpuobj_ref(NULL, &chan->vm_pd);
- if (chan->ramin_heap.free_stack.next)
+ if (drm_mm_initialized(&chan->ramin_heap))
drm_mm_takedown(&chan->ramin_heap);
nouveau_gpuobj_ref(NULL, &chan->ramin);
}
@@ -1013,19 +1039,20 @@ nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
{
struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
struct drm_device *dev = gpuobj->dev;
+ unsigned long flags;
if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
u64 ptr = gpuobj->vinst + offset;
u32 base = ptr >> 16;
u32 val;
- spin_lock(&dev_priv->ramin_lock);
+ spin_lock_irqsave(&dev_priv->vm_lock, flags);
if (dev_priv->ramin_base != base) {
dev_priv->ramin_base = base;
nv_wr32(dev, 0x001700, dev_priv->ramin_base);
}
val = nv_rd32(dev, 0x700000 + (ptr & 0xffff));
- spin_unlock(&dev_priv->ramin_lock);
+ spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
return val;
}
@@ -1037,18 +1064,19 @@ nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)
{
struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
struct drm_device *dev = gpuobj->dev;
+ unsigned long flags;
if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
u64 ptr = gpuobj->vinst + offset;
u32 base = ptr >> 16;
- spin_lock(&dev_priv->ramin_lock);
+ spin_lock_irqsave(&dev_priv->vm_lock, flags);
if (dev_priv->ramin_base != base) {
dev_priv->ramin_base = base;
nv_wr32(dev, 0x001700, dev_priv->ramin_base);
}
nv_wr32(dev, 0x700000 + (ptr & 0xffff), val);
- spin_unlock(&dev_priv->ramin_lock);
+ spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
return;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
index ac62a1b8c4fc..670e3cb697ec 100644
--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
+++ b/drivers/gpu/drm/nouveau/nouveau_perf.c
@@ -134,7 +134,7 @@ nouveau_perf_init(struct drm_device *dev)
case 0x13:
case 0x15:
perflvl->fanspeed = entry[55];
- perflvl->voltage = entry[56];
+ perflvl->voltage = (recordlen > 56) ? entry[56] : 0;
perflvl->core = ROM32(entry[1]) * 10;
perflvl->memory = ROM32(entry[5]) * 20;
break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c
index bef3e6910418..a24a81f5a89e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ramht.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ramht.c
@@ -114,7 +114,9 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
(gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
} else {
if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
- ctx = (gpuobj->cinst << 10) | chan->id;
+ ctx = (gpuobj->cinst << 10) |
+ (chan->id << 28) |
+ chan->id; /* HASH_TAG */
} else {
ctx = (gpuobj->cinst >> 4) |
((gpuobj->engine <<
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 9a250eb53098..4bce801bc588 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -12,6 +12,7 @@ struct nouveau_sgdma_be {
struct drm_device *dev;
dma_addr_t *pages;
+ bool *ttm_alloced;
unsigned nr_pages;
u64 offset;
@@ -20,7 +21,8 @@ struct nouveau_sgdma_be {
static int
nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
- struct page **pages, struct page *dummy_read_page)
+ struct page **pages, struct page *dummy_read_page,
+ dma_addr_t *dma_addrs)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_device *dev = nvbe->dev;
@@ -34,15 +36,26 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
if (!nvbe->pages)
return -ENOMEM;
+ nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL);
+ if (!nvbe->ttm_alloced)
+ return -ENOMEM;
+
nvbe->nr_pages = 0;
while (num_pages--) {
- nvbe->pages[nvbe->nr_pages] =
- pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
+ if (dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE) {
+ nvbe->pages[nvbe->nr_pages] =
+ dma_addrs[nvbe->nr_pages];
+ nvbe->ttm_alloced[nvbe->nr_pages] = true;
+ } else {
+ nvbe->pages[nvbe->nr_pages] =
+ pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(dev->pdev,
- nvbe->pages[nvbe->nr_pages])) {
- be->func->clear(be);
- return -EFAULT;
+ if (pci_dma_mapping_error(dev->pdev,
+ nvbe->pages[nvbe->nr_pages])) {
+ be->func->clear(be);
+ return -EFAULT;
+ }
+ nvbe->ttm_alloced[nvbe->nr_pages] = false;
}
nvbe->nr_pages++;
@@ -65,17 +78,36 @@ nouveau_sgdma_clear(struct ttm_backend *be)
be->func->unbind(be);
while (nvbe->nr_pages--) {
- pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
+ if (!nvbe->ttm_alloced[nvbe->nr_pages])
+ pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
}
kfree(nvbe->pages);
+ kfree(nvbe->ttm_alloced);
nvbe->pages = NULL;
+ nvbe->ttm_alloced = NULL;
nvbe->nr_pages = 0;
}
}
+static void
+nouveau_sgdma_destroy(struct ttm_backend *be)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+
+ if (be) {
+ NV_DEBUG(nvbe->dev, "\n");
+
+ if (nvbe) {
+ if (nvbe->pages)
+ be->func->clear(be);
+ kfree(nvbe);
+ }
+ }
+}
+
static int
-nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_device *dev = nvbe->dev;
@@ -102,7 +134,7 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
}
static int
-nouveau_sgdma_unbind(struct ttm_backend *be)
+nv04_sgdma_unbind(struct ttm_backend *be)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_device *dev = nvbe->dev;
@@ -125,59 +157,245 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
return 0;
}
+static struct ttm_backend_func nv04_sgdma_backend = {
+ .populate = nouveau_sgdma_populate,
+ .clear = nouveau_sgdma_clear,
+ .bind = nv04_sgdma_bind,
+ .unbind = nv04_sgdma_unbind,
+ .destroy = nouveau_sgdma_destroy
+};
+
static void
-nouveau_sgdma_destroy(struct ttm_backend *be)
+nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe)
+{
+ struct drm_device *dev = nvbe->dev;
+
+ nv_wr32(dev, 0x100810, 0x00000022);
+ if (!nv_wait(dev, 0x100810, 0x00000100, 0x00000100))
+ NV_ERROR(dev, "vm flush timeout: 0x%08x\n",
+ nv_rd32(dev, 0x100810));
+ nv_wr32(dev, 0x100810, 0x00000000);
+}
+
+static int
+nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+ struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
+ dma_addr_t *list = nvbe->pages;
+ u32 pte = mem->start << 2;
+ u32 cnt = nvbe->nr_pages;
- if (be) {
- NV_DEBUG(nvbe->dev, "\n");
+ nvbe->offset = mem->start << PAGE_SHIFT;
- if (nvbe) {
- if (nvbe->pages)
- be->func->clear(be);
- kfree(nvbe);
+ while (cnt--) {
+ nv_wo32(pgt, pte, (*list++ >> 7) | 1);
+ pte += 4;
+ }
+
+ nv41_sgdma_flush(nvbe);
+ nvbe->bound = true;
+ return 0;
+}
+
+static int
+nv41_sgdma_unbind(struct ttm_backend *be)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+ struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
+ u32 pte = (nvbe->offset >> 12) << 2;
+ u32 cnt = nvbe->nr_pages;
+
+ while (cnt--) {
+ nv_wo32(pgt, pte, 0x00000000);
+ pte += 4;
+ }
+
+ nv41_sgdma_flush(nvbe);
+ nvbe->bound = false;
+ return 0;
+}
+
+static struct ttm_backend_func nv41_sgdma_backend = {
+ .populate = nouveau_sgdma_populate,
+ .clear = nouveau_sgdma_clear,
+ .bind = nv41_sgdma_bind,
+ .unbind = nv41_sgdma_unbind,
+ .destroy = nouveau_sgdma_destroy
+};
+
+static void
+nv44_sgdma_flush(struct nouveau_sgdma_be *nvbe)
+{
+ struct drm_device *dev = nvbe->dev;
+
+ nv_wr32(dev, 0x100814, (nvbe->nr_pages - 1) << 12);
+ nv_wr32(dev, 0x100808, nvbe->offset | 0x20);
+ if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001))
+ NV_ERROR(dev, "gart flush timeout: 0x%08x\n",
+ nv_rd32(dev, 0x100808));
+ nv_wr32(dev, 0x100808, 0x00000000);
+}
+
+static void
+nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt)
+{
+ struct drm_nouveau_private *dev_priv = pgt->dev->dev_private;
+ dma_addr_t dummy = dev_priv->gart_info.dummy.addr;
+ u32 pte, tmp[4];
+
+ pte = base >> 2;
+ base &= ~0x0000000f;
+
+ tmp[0] = nv_ro32(pgt, base + 0x0);
+ tmp[1] = nv_ro32(pgt, base + 0x4);
+ tmp[2] = nv_ro32(pgt, base + 0x8);
+ tmp[3] = nv_ro32(pgt, base + 0xc);
+ while (cnt--) {
+ u32 addr = list ? (*list++ >> 12) : (dummy >> 12);
+ switch (pte++ & 0x3) {
+ case 0:
+ tmp[0] &= ~0x07ffffff;
+ tmp[0] |= addr;
+ break;
+ case 1:
+ tmp[0] &= ~0xf8000000;
+ tmp[0] |= addr << 27;
+ tmp[1] &= ~0x003fffff;
+ tmp[1] |= addr >> 5;
+ break;
+ case 2:
+ tmp[1] &= ~0xffc00000;
+ tmp[1] |= addr << 22;
+ tmp[2] &= ~0x0001ffff;
+ tmp[2] |= addr >> 10;
+ break;
+ case 3:
+ tmp[2] &= ~0xfffe0000;
+ tmp[2] |= addr << 17;
+ tmp[3] &= ~0x00000fff;
+ tmp[3] |= addr >> 15;
+ break;
}
}
+
+ tmp[3] |= 0x40000000;
+
+ nv_wo32(pgt, base + 0x0, tmp[0]);
+ nv_wo32(pgt, base + 0x4, tmp[1]);
+ nv_wo32(pgt, base + 0x8, tmp[2]);
+ nv_wo32(pgt, base + 0xc, tmp[3]);
}
static int
-nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+ struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
+ dma_addr_t *list = nvbe->pages;
+ u32 pte = mem->start << 2, tmp[4];
+ u32 cnt = nvbe->nr_pages;
+ int i;
nvbe->offset = mem->start << PAGE_SHIFT;
- nouveau_vm_map_sg(&dev_priv->gart_info.vma, nvbe->offset,
- nvbe->nr_pages << PAGE_SHIFT, nvbe->pages);
+ if (pte & 0x0000000c) {
+ u32 max = 4 - ((pte >> 2) & 0x3);
+ u32 part = (cnt > max) ? max : cnt;
+ nv44_sgdma_fill(pgt, list, pte, part);
+ pte += (part << 2);
+ list += part;
+ cnt -= part;
+ }
+
+ while (cnt >= 4) {
+ for (i = 0; i < 4; i++)
+ tmp[i] = *list++ >> 12;
+ nv_wo32(pgt, pte + 0x0, tmp[0] >> 0 | tmp[1] << 27);
+ nv_wo32(pgt, pte + 0x4, tmp[1] >> 5 | tmp[2] << 22);
+ nv_wo32(pgt, pte + 0x8, tmp[2] >> 10 | tmp[3] << 17);
+ nv_wo32(pgt, pte + 0xc, tmp[3] >> 15 | 0x40000000);
+ pte += 0x10;
+ cnt -= 4;
+ }
+
+ if (cnt)
+ nv44_sgdma_fill(pgt, list, pte, cnt);
+
+ nv44_sgdma_flush(nvbe);
nvbe->bound = true;
return 0;
}
static int
-nv50_sgdma_unbind(struct ttm_backend *be)
+nv44_sgdma_unbind(struct ttm_backend *be)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+ struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
+ u32 pte = (nvbe->offset >> 12) << 2;
+ u32 cnt = nvbe->nr_pages;
+
+ if (pte & 0x0000000c) {
+ u32 max = 4 - ((pte >> 2) & 0x3);
+ u32 part = (cnt > max) ? max : cnt;
+ nv44_sgdma_fill(pgt, NULL, pte, part);
+ pte += (part << 2);
+ cnt -= part;
+ }
- if (!nvbe->bound)
- return 0;
+ while (cnt >= 4) {
+ nv_wo32(pgt, pte + 0x0, 0x00000000);
+ nv_wo32(pgt, pte + 0x4, 0x00000000);
+ nv_wo32(pgt, pte + 0x8, 0x00000000);
+ nv_wo32(pgt, pte + 0xc, 0x00000000);
+ pte += 0x10;
+ cnt -= 4;
+ }
+
+ if (cnt)
+ nv44_sgdma_fill(pgt, NULL, pte, cnt);
- nouveau_vm_unmap_at(&dev_priv->gart_info.vma, nvbe->offset,
- nvbe->nr_pages << PAGE_SHIFT);
+ nv44_sgdma_flush(nvbe);
nvbe->bound = false;
return 0;
}
-static struct ttm_backend_func nouveau_sgdma_backend = {
+static struct ttm_backend_func nv44_sgdma_backend = {
.populate = nouveau_sgdma_populate,
.clear = nouveau_sgdma_clear,
- .bind = nouveau_sgdma_bind,
- .unbind = nouveau_sgdma_unbind,
+ .bind = nv44_sgdma_bind,
+ .unbind = nv44_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
};
+static int
+nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct nouveau_mem *node = mem->mm_node;
+ /* noop: bound in move_notify() */
+ node->pages = nvbe->pages;
+ nvbe->pages = (dma_addr_t *)node;
+ nvbe->bound = true;
+ return 0;
+}
+
+static int
+nv50_sgdma_unbind(struct ttm_backend *be)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct nouveau_mem *node = (struct nouveau_mem *)nvbe->pages;
+ /* noop: unbound in move_notify() */
+ nvbe->pages = node->pages;
+ node->pages = NULL;
+ nvbe->bound = false;
+ return 0;
+}
+
static struct ttm_backend_func nv50_sgdma_backend = {
.populate = nouveau_sgdma_populate,
.clear = nouveau_sgdma_clear,
@@ -198,10 +416,7 @@ nouveau_sgdma_init_ttm(struct drm_device *dev)
nvbe->dev = dev;
- if (dev_priv->card_type < NV_50)
- nvbe->backend.func = &nouveau_sgdma_backend;
- else
- nvbe->backend.func = &nv50_sgdma_backend;
+ nvbe->backend.func = dev_priv->gart_info.func;
return &nvbe->backend;
}
@@ -210,21 +425,64 @@ nouveau_sgdma_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj = NULL;
- uint32_t aper_size, obj_size;
- int i, ret;
+ u32 aper_size, align;
+ int ret;
- if (dev_priv->card_type < NV_50) {
- if(dev_priv->ramin_rsvd_vram < 2 * 1024 * 1024)
- aper_size = 64 * 1024 * 1024;
- else
- aper_size = 512 * 1024 * 1024;
+ if (dev_priv->card_type >= NV_40 && drm_pci_device_is_pcie(dev))
+ aper_size = 512 * 1024 * 1024;
+ else
+ aper_size = 64 * 1024 * 1024;
+
+ /* Dear NVIDIA, NV44+ would like proper present bits in PTEs for
+ * christmas. The cards before it have them, the cards after
+ * it have them, why is NV44 so unloved?
+ */
+ dev_priv->gart_info.dummy.page = alloc_page(GFP_DMA32 | GFP_KERNEL);
+ if (!dev_priv->gart_info.dummy.page)
+ return -ENOMEM;
+
+ dev_priv->gart_info.dummy.addr =
+ pci_map_page(dev->pdev, dev_priv->gart_info.dummy.page,
+ 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(dev->pdev, dev_priv->gart_info.dummy.addr)) {
+ NV_ERROR(dev, "error mapping dummy page\n");
+ __free_page(dev_priv->gart_info.dummy.page);
+ dev_priv->gart_info.dummy.page = NULL;
+ return -ENOMEM;
+ }
+
+ if (dev_priv->card_type >= NV_50) {
+ dev_priv->gart_info.aper_base = 0;
+ dev_priv->gart_info.aper_size = aper_size;
+ dev_priv->gart_info.type = NOUVEAU_GART_HW;
+ dev_priv->gart_info.func = &nv50_sgdma_backend;
+ } else
+ if (drm_pci_device_is_pcie(dev) &&
+ dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) {
+ if (nv44_graph_class(dev)) {
+ dev_priv->gart_info.func = &nv44_sgdma_backend;
+ align = 512 * 1024;
+ } else {
+ dev_priv->gart_info.func = &nv41_sgdma_backend;
+ align = 16;
+ }
- obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
- obj_size += 8; /* ctxdma header */
+ ret = nouveau_gpuobj_new(dev, NULL, aper_size / 1024, align,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &gpuobj);
+ if (ret) {
+ NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
+ return ret;
+ }
- ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
- NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &gpuobj);
+ dev_priv->gart_info.sg_ctxdma = gpuobj;
+ dev_priv->gart_info.aper_base = 0;
+ dev_priv->gart_info.aper_size = aper_size;
+ dev_priv->gart_info.type = NOUVEAU_GART_HW;
+ } else {
+ ret = nouveau_gpuobj_new(dev, NULL, (aper_size / 1024) + 8, 16,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &gpuobj);
if (ret) {
NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
return ret;
@@ -236,25 +494,14 @@ nouveau_sgdma_init(struct drm_device *dev)
(0 << 14) /* RW */ |
(2 << 16) /* PCI */);
nv_wo32(gpuobj, 4, aper_size - 1);
- for (i = 2; i < 2 + (aper_size >> 12); i++)
- nv_wo32(gpuobj, i * 4, 0x00000000);
dev_priv->gart_info.sg_ctxdma = gpuobj;
dev_priv->gart_info.aper_base = 0;
dev_priv->gart_info.aper_size = aper_size;
- } else
- if (dev_priv->chan_vm) {
- ret = nouveau_vm_get(dev_priv->chan_vm, 512 * 1024 * 1024,
- 12, NV_MEM_ACCESS_RW,
- &dev_priv->gart_info.vma);
- if (ret)
- return ret;
-
- dev_priv->gart_info.aper_base = dev_priv->gart_info.vma.offset;
- dev_priv->gart_info.aper_size = 512 * 1024 * 1024;
+ dev_priv->gart_info.type = NOUVEAU_GART_PDMA;
+ dev_priv->gart_info.func = &nv04_sgdma_backend;
}
- dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
return 0;
}
@@ -264,7 +511,13 @@ nouveau_sgdma_takedown(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
- nouveau_vm_put(&dev_priv->gart_info.vma);
+
+ if (dev_priv->gart_info.dummy.page) {
+ pci_unmap_page(dev->pdev, dev_priv->gart_info.dummy.addr,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ __free_page(dev_priv->gart_info.dummy.page);
+ dev_priv->gart_info.dummy.page = NULL;
+ }
}
uint32_t
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index a54fc431fe98..a30adec5beaa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -376,15 +376,11 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->graph.destroy_context = nv50_graph_destroy_context;
engine->graph.load_context = nv50_graph_load_context;
engine->graph.unload_context = nv50_graph_unload_context;
- if (dev_priv->chipset != 0x86)
+ if (dev_priv->chipset == 0x50 ||
+ dev_priv->chipset == 0xac)
engine->graph.tlb_flush = nv50_graph_tlb_flush;
- else {
- /* from what i can see nvidia do this on every
- * pre-NVA3 board except NVAC, but, we've only
- * ever seen problems on NV86
- */
- engine->graph.tlb_flush = nv86_graph_tlb_flush;
- }
+ else
+ engine->graph.tlb_flush = nv84_graph_tlb_flush;
engine->fifo.channels = 128;
engine->fifo.init = nv50_fifo_init;
engine->fifo.takedown = nv50_fifo_takedown;
@@ -544,7 +540,6 @@ static int
nouveau_card_init_channel(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *gpuobj = NULL;
int ret;
ret = nouveau_channel_alloc(dev, &dev_priv->channel,
@@ -552,41 +547,8 @@ nouveau_card_init_channel(struct drm_device *dev)
if (ret)
return ret;
- /* no dma objects on fermi... */
- if (dev_priv->card_type >= NV_C0)
- goto out_done;
-
- ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
- 0, dev_priv->vram_size,
- NV_MEM_ACCESS_RW, NV_MEM_TARGET_VRAM,
- &gpuobj);
- if (ret)
- goto out_err;
-
- ret = nouveau_ramht_insert(dev_priv->channel, NvDmaVRAM, gpuobj);
- nouveau_gpuobj_ref(NULL, &gpuobj);
- if (ret)
- goto out_err;
-
- ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
- 0, dev_priv->gart_info.aper_size,
- NV_MEM_ACCESS_RW, NV_MEM_TARGET_GART,
- &gpuobj);
- if (ret)
- goto out_err;
-
- ret = nouveau_ramht_insert(dev_priv->channel, NvDmaGART, gpuobj);
- nouveau_gpuobj_ref(NULL, &gpuobj);
- if (ret)
- goto out_err;
-
-out_done:
mutex_unlock(&dev_priv->channel->mutex);
return 0;
-
-out_err:
- nouveau_channel_put(&dev_priv->channel);
- return ret;
}
static void nouveau_switcheroo_set_state(struct pci_dev *pdev,
@@ -646,6 +608,7 @@ nouveau_card_init(struct drm_device *dev)
spin_lock_init(&dev_priv->channels.lock);
spin_lock_init(&dev_priv->tile.lock);
spin_lock_init(&dev_priv->context_switch_lock);
+ spin_lock_init(&dev_priv->vm_lock);
/* Make the CRTCs and I2C buses accessible */
ret = engine->display.early_init(dev);
@@ -738,10 +701,6 @@ nouveau_card_init(struct drm_device *dev)
goto out_fence;
}
- ret = nouveau_backlight_init(dev);
- if (ret)
- NV_ERROR(dev, "Error %d registering backlight\n", ret);
-
nouveau_fbcon_init(dev);
drm_kms_helper_poll_init(dev);
return 0;
@@ -793,8 +752,6 @@ static void nouveau_card_takedown(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_engine *engine = &dev_priv->engine;
- nouveau_backlight_exit(dev);
-
if (!engine->graph.accel_blocked) {
nouveau_fence_fini(dev);
nouveau_channel_put_unlocked(&dev_priv->channel);
@@ -929,12 +886,6 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n",
dev->pci_vendor, dev->pci_device, dev->pdev->class);
- dev_priv->wq = create_workqueue("nouveau");
- if (!dev_priv->wq) {
- ret = -EINVAL;
- goto err_priv;
- }
-
/* resource 0 is mmio regs */
/* resource 1 is linear FB */
/* resource 2 is RAMIN (mmio regs + 0x1000000) */
@@ -947,7 +898,7 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
NV_ERROR(dev, "Unable to initialize the mmio mapping. "
"Please report your setup to " DRIVER_EMAIL "\n");
ret = -EINVAL;
- goto err_wq;
+ goto err_priv;
}
NV_DEBUG(dev, "regs mapped ok at 0x%llx\n",
(unsigned long long)mmio_start_offs);
@@ -1009,7 +960,7 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto err_mmio;
- /* Map PRAMIN BAR, or on older cards, the aperture withing BAR0 */
+ /* Map PRAMIN BAR, or on older cards, the aperture within BAR0 */
if (dev_priv->card_type >= NV_40) {
int ramin_bar = 2;
if (pci_resource_len(dev->pdev, ramin_bar) == 0)
@@ -1054,8 +1005,6 @@ err_ramin:
iounmap(dev_priv->ramin);
err_mmio:
iounmap(dev_priv->mmio);
-err_wq:
- destroy_workqueue(dev_priv->wq);
err_priv:
kfree(dev_priv);
dev->dev_private = NULL;
@@ -1103,9 +1052,9 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
getparam->value = dev->pci_device;
break;
case NOUVEAU_GETPARAM_BUS_TYPE:
- if (drm_device_is_agp(dev))
+ if (drm_pci_device_is_agp(dev))
getparam->value = NV_AGP;
- else if (drm_device_is_pcie(dev))
+ else if (drm_pci_device_is_pcie(dev))
getparam->value = NV_PCIE;
else
getparam->value = NV_PCI;
@@ -1126,7 +1075,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
getparam->value = 1;
break;
case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
- getparam->value = (dev_priv->card_type < NV_50);
+ getparam->value = 1;
break;
case NOUVEAU_GETPARAM_GRAPH_UNITS:
/* NV40 and NV50 versions are quite different, but register
diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c
index 8d9968e1cba8..649b0413b09f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_temp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_temp.c
@@ -239,11 +239,9 @@ static bool
probe_monitoring_device(struct nouveau_i2c_chan *i2c,
struct i2c_board_info *info)
{
- char modalias[16] = "i2c:";
struct i2c_client *client;
- strlcat(modalias, info->type, sizeof(modalias));
- request_module(modalias);
+ request_module("%s%s", I2C_MODULE_PREFIX, info->type);
client = i2c_new_device(&i2c->adapter, info);
if (!client)
diff --git a/drivers/gpu/drm/nouveau/nouveau_util.c b/drivers/gpu/drm/nouveau/nouveau_util.c
index fbe0fb13bc1e..e51b51503baa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_util.c
+++ b/drivers/gpu/drm/nouveau/nouveau_util.c
@@ -47,18 +47,27 @@ nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value)
printk(" (unknown bits 0x%08x)", value);
}
-void
-nouveau_enum_print(const struct nouveau_enum *en, u32 value)
+const struct nouveau_enum *
+nouveau_enum_find(const struct nouveau_enum *en, u32 value)
{
while (en->name) {
- if (value == en->value) {
- printk("%s", en->name);
- return;
- }
-
+ if (en->value == value)
+ return en;
en++;
}
+ return NULL;
+}
+
+void
+nouveau_enum_print(const struct nouveau_enum *en, u32 value)
+{
+ en = nouveau_enum_find(en, value);
+ if (en) {
+ printk("%s", en->name);
+ return;
+ }
+
printk("(unknown enum 0x%08x)", value);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_util.h b/drivers/gpu/drm/nouveau/nouveau_util.h
index d9ceaea26f4b..b97719fbb739 100644
--- a/drivers/gpu/drm/nouveau/nouveau_util.h
+++ b/drivers/gpu/drm/nouveau/nouveau_util.h
@@ -36,10 +36,14 @@ struct nouveau_bitfield {
struct nouveau_enum {
u32 value;
const char *name;
+ void *data;
};
void nouveau_bitfield_print(const struct nouveau_bitfield *, u32 value);
void nouveau_enum_print(const struct nouveau_enum *, u32 value);
+const struct nouveau_enum *
+nouveau_enum_find(const struct nouveau_enum *, u32 value);
+
int nouveau_ratelimit(void);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
index 97d82aedf86b..0059e6f58a8b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
@@ -28,7 +28,7 @@
#include "nouveau_vm.h"
void
-nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
+nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
{
struct nouveau_vm *vm = vma->vm;
struct nouveau_mm_node *r;
@@ -40,7 +40,8 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
u32 max = 1 << (vm->pgt_bits - bits);
u32 end, len;
- list_for_each_entry(r, &vram->regions, rl_entry) {
+ delta = 0;
+ list_for_each_entry(r, &node->regions, rl_entry) {
u64 phys = (u64)r->offset << 12;
u32 num = r->length >> bits;
@@ -52,7 +53,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
end = max;
len = end - pte;
- vm->map(vma, pgt, vram, pte, len, phys);
+ vm->map(vma, pgt, node, pte, len, phys, delta);
num -= len;
pte += len;
@@ -60,6 +61,8 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
pde++;
pte = 0;
}
+
+ delta += (u64)len << vma->node->type;
}
}
@@ -67,14 +70,14 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
}
void
-nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_vram *vram)
+nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
{
- nouveau_vm_map_at(vma, 0, vram);
+ nouveau_vm_map_at(vma, 0, node);
}
void
nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
- dma_addr_t *list)
+ struct nouveau_mem *mem, dma_addr_t *list)
{
struct nouveau_vm *vm = vma->vm;
int big = vma->node->type != vm->spg_shift;
@@ -94,7 +97,7 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
end = max;
len = end - pte;
- vm->map_sg(vma, pgt, pte, list, len);
+ vm->map_sg(vma, pgt, mem, pte, len, list);
num -= len;
pte += len;
@@ -311,18 +314,7 @@ nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
vm->spg_shift = 12;
vm->lpg_shift = 17;
pgt_bits = 27;
-
- /* Should be 4096 everywhere, this is a hack that's
- * currently necessary to avoid an elusive bug that
- * causes corruption when mixing small/large pages
- */
- if (length < (1ULL << 40))
- block = 4096;
- else {
- block = (1 << pgt_bits);
- if (length < block)
- block = length;
- }
+ block = 4096;
} else {
kfree(vm);
return -ENOSYS;
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h
index e1193515771b..2e06b55cfdc1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.h
@@ -67,9 +67,10 @@ struct nouveau_vm {
void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde,
struct nouveau_gpuobj *pgt[2]);
void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *,
- struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
+ struct nouveau_mem *, u32 pte, u32 cnt,
+ u64 phys, u64 delta);
void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
- u32 pte, dma_addr_t *, u32 cnt);
+ struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt);
void (*flush)(struct nouveau_vm *);
};
@@ -82,20 +83,20 @@ int nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **,
int nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift,
u32 access, struct nouveau_vma *);
void nouveau_vm_put(struct nouveau_vma *);
-void nouveau_vm_map(struct nouveau_vma *, struct nouveau_vram *);
-void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_vram *);
+void nouveau_vm_map(struct nouveau_vma *, struct nouveau_mem *);
+void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_mem *);
void nouveau_vm_unmap(struct nouveau_vma *);
void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
- dma_addr_t *);
+ struct nouveau_mem *, dma_addr_t *);
/* nv50_vm.c */
void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
struct nouveau_gpuobj *pgt[2]);
void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
- struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
+ struct nouveau_mem *, u32 pte, u32 cnt, u64 phys, u64 delta);
void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
- u32 pte, dma_addr_t *, u32 cnt);
+ struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
void nv50_vm_flush(struct nouveau_vm *);
void nv50_vm_flush_engine(struct drm_device *, int engine);
@@ -104,9 +105,9 @@ void nv50_vm_flush_engine(struct drm_device *, int engine);
void nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
struct nouveau_gpuobj *pgt[2]);
void nvc0_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
- struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
+ struct nouveau_mem *, u32 pte, u32 cnt, u64 phys, u64 delta);
void nvc0_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
- u32 pte, dma_addr_t *, u32 cnt);
+ struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
void nvc0_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
void nvc0_vm_flush(struct nouveau_vm *);
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 297505eb98d5..748b9d9c2949 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -164,7 +164,7 @@ nv_crtc_dpms(struct drm_crtc *crtc, int mode)
NV_DEBUG_KMS(dev, "Setting dpms mode %d on CRTC %d\n", mode,
nv_crtc->index);
- if (nv_crtc->last_dpms == mode) /* Don't do unnecesary mode changes. */
+ if (nv_crtc->last_dpms == mode) /* Don't do unnecessary mode changes. */
return;
nv_crtc->last_dpms = mode;
@@ -677,7 +677,7 @@ static void nv_crtc_prepare(struct drm_crtc *crtc)
NVBlankScreen(dev, nv_crtc->index, true);
- /* Some more preperation. */
+ /* Some more preparation. */
NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_CONFIG, NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA);
if (dev_priv->card_type == NV_40) {
uint32_t reg900 = NVReadRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900);
@@ -1031,7 +1031,7 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, false, true, &nv_crtc->cursor.nvbo);
+ 0, 0x0000, &nv_crtc->cursor.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
if (!ret)
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index c82db37d9f41..12098bf839c4 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -581,12 +581,13 @@ static void nv04_dfp_restore(struct drm_encoder *encoder)
int head = nv_encoder->restore.head;
if (nv_encoder->dcb->type == OUTPUT_LVDS) {
- struct drm_display_mode *native_mode = nouveau_encoder_connector_get(nv_encoder)->native_mode;
- if (native_mode)
- call_lvds_script(dev, nv_encoder->dcb, head, LVDS_PANEL_ON,
- native_mode->clock);
- else
- NV_ERROR(dev, "Not restoring LVDS without native mode\n");
+ struct nouveau_connector *connector =
+ nouveau_encoder_connector_get(nv_encoder);
+
+ if (connector && connector->native_mode)
+ call_lvds_script(dev, nv_encoder->dcb, head,
+ LVDS_PANEL_ON,
+ connector->native_mode->clock);
} else if (nv_encoder->dcb->type == OUTPUT_TMDS) {
int clock = nouveau_hw_pllvals_to_clk
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
index f89d104698df..db465a3ee1b2 100644
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -379,6 +379,15 @@ out:
return handled;
}
+static const char *nv_dma_state_err(u32 state)
+{
+ static const char * const desc[] = {
+ "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
+ "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
+ };
+ return desc[(state >> 29) & 0x7];
+}
+
void
nv04_fifo_isr(struct drm_device *dev)
{
@@ -460,9 +469,10 @@ nv04_fifo_isr(struct drm_device *dev)
if (nouveau_ratelimit())
NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
"Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
- "State 0x%08x Push 0x%08x\n",
+ "State 0x%08x (err: %s) Push 0x%08x\n",
chid, ho_get, dma_get, ho_put,
dma_put, ib_get, ib_put, state,
+ nv_dma_state_err(state),
push);
/* METHOD_COUNT, in DMA_STATE on earlier chipsets */
@@ -476,8 +486,9 @@ nv04_fifo_isr(struct drm_device *dev)
}
} else {
NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
- "Put 0x%08x State 0x%08x Push 0x%08x\n",
- chid, dma_get, dma_put, state, push);
+ "Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
+ chid, dma_get, dma_put, state,
+ nv_dma_state_err(state), push);
if (dma_get != dma_put)
nv_wr32(dev, 0x003244, dma_put);
@@ -505,7 +516,7 @@ nv04_fifo_isr(struct drm_device *dev)
if (dev_priv->card_type == NV_50) {
if (status & 0x00000010) {
- nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT");
+ nv50_fb_vm_trap(dev, nouveau_ratelimit());
status &= ~0x00000010;
nv_wr32(dev, 0x002100, 0x00000010);
}
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 28119fd19d03..3900cebba560 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -197,10 +197,12 @@ static int nv17_tv_get_ld_modes(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
- struct drm_display_mode *mode, *tv_mode;
+ const struct drm_display_mode *tv_mode;
int n = 0;
for (tv_mode = nv17_tv_modes; tv_mode->hdisplay; tv_mode++) {
+ struct drm_display_mode *mode;
+
mode = drm_mode_duplicate(encoder->dev, tv_mode);
mode->clock = tv_norm->tv_enc_mode.vrefresh *
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.h b/drivers/gpu/drm/nouveau/nv17_tv.h
index 6bf03840f9eb..622e72221682 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.h
+++ b/drivers/gpu/drm/nouveau/nv17_tv.h
@@ -112,7 +112,7 @@ extern struct nv17_tv_norm_params {
} nv17_tv_norms[NUM_TV_NORMS];
#define get_tv_norm(enc) (&nv17_tv_norms[to_tv_enc(enc)->tv_norm])
-extern struct drm_display_mode nv17_tv_modes[];
+extern const struct drm_display_mode nv17_tv_modes[];
static inline int interpolate(int y0, int y1, int y2, int x)
{
diff --git a/drivers/gpu/drm/nouveau/nv17_tv_modes.c b/drivers/gpu/drm/nouveau/nv17_tv_modes.c
index 9d3893c50a41..4d1d29f60307 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv_modes.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv_modes.c
@@ -438,7 +438,7 @@ void nv17_tv_state_load(struct drm_device *dev, struct nv17_tv_state *state)
/* Timings similar to the ones the blob sets */
-struct drm_display_mode nv17_tv_modes[] = {
+const struct drm_display_mode nv17_tv_modes[] = {
{ DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 0,
320, 344, 392, 560, 0, 200, 200, 202, 220, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC
diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c
index f3d9c0505f7b..f0ac2a768c67 100644
--- a/drivers/gpu/drm/nouveau/nv40_fb.c
+++ b/drivers/gpu/drm/nouveau/nv40_fb.c
@@ -24,6 +24,53 @@ nv40_fb_set_tile_region(struct drm_device *dev, int i)
}
}
+static void
+nv40_fb_init_gart(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
+
+ if (dev_priv->gart_info.type != NOUVEAU_GART_HW) {
+ nv_wr32(dev, 0x100800, 0x00000001);
+ return;
+ }
+
+ nv_wr32(dev, 0x100800, gart->pinst | 0x00000002);
+ nv_mask(dev, 0x10008c, 0x00000100, 0x00000100);
+ nv_wr32(dev, 0x100820, 0x00000000);
+}
+
+static void
+nv44_fb_init_gart(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
+ u32 vinst;
+
+ if (dev_priv->gart_info.type != NOUVEAU_GART_HW) {
+ nv_wr32(dev, 0x100850, 0x80000000);
+ nv_wr32(dev, 0x100800, 0x00000001);
+ return;
+ }
+
+ /* calculate vram address of this PRAMIN block, object
+ * must be allocated on 512KiB alignment, and not exceed
+ * a total size of 512KiB for this to work correctly
+ */
+ vinst = nv_rd32(dev, 0x10020c);
+ vinst -= ((gart->pinst >> 19) + 1) << 19;
+
+ nv_wr32(dev, 0x100850, 0x80000000);
+ nv_wr32(dev, 0x100818, dev_priv->gart_info.dummy.addr);
+
+ nv_wr32(dev, 0x100804, dev_priv->gart_info.aper_size);
+ nv_wr32(dev, 0x100850, 0x00008000);
+ nv_mask(dev, 0x10008c, 0x00000200, 0x00000200);
+ nv_wr32(dev, 0x100820, 0x00000000);
+ nv_wr32(dev, 0x10082c, 0x00000001);
+ nv_wr32(dev, 0x100800, vinst | 0x00000010);
+}
+
int
nv40_fb_init(struct drm_device *dev)
{
@@ -32,12 +79,12 @@ nv40_fb_init(struct drm_device *dev)
uint32_t tmp;
int i;
- /* This is strictly a NV4x register (don't know about NV5x). */
- /* The blob sets these to all kinds of values, and they mess up our setup. */
- /* I got value 0x52802 instead. For some cards the blob even sets it back to 0x1. */
- /* Note: the blob doesn't read this value, so i'm pretty sure this is safe for all cards. */
- /* Any idea what this is? */
- nv_wr32(dev, NV40_PFB_UNK_800, 0x1);
+ if (dev_priv->chipset != 0x40 && dev_priv->chipset != 0x45) {
+ if (nv44_graph_class(dev))
+ nv44_fb_init_gart(dev);
+ else
+ nv40_fb_init_gart(dev);
+ }
switch (dev_priv->chipset) {
case 0x40:
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 18d30c2c1aa6..fceb44c0ec74 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -181,7 +181,7 @@ nv40_graph_load_context(struct nouveau_channel *chan)
NV40_PGRAPH_CTXCTL_CUR_LOADED);
/* 0x32E0 records the instance address of the active FIFO's PGRAPH
* context. If at any time this doesn't match 0x40032C, you will
- * recieve PGRAPH_INTR_CONTEXT_SWITCH
+ * receive PGRAPH_INTR_CONTEXT_SWITCH
*/
nv_wr32(dev, NV40_PFIFO_GRCTX_INSTANCE, inst);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 9023c4dbb449..a19ccaa025b3 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -65,7 +65,7 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
{
struct drm_device *dev = nv_crtc->base.dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
int index = nv_crtc->index, ret;
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
@@ -135,8 +135,7 @@ static int
nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
{
struct drm_device *dev = nv_crtc->base.dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
int ret;
NV_DEBUG_KMS(dev, "\n");
@@ -186,8 +185,7 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update)
struct nouveau_connector *nv_connector =
nouveau_crtc_connector_get(nv_crtc);
struct drm_device *dev = nv_crtc->base.dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
struct drm_display_mode *native_mode = NULL;
struct drm_display_mode *mode = &nv_crtc->base.mode;
uint32_t outX, outY, horiz, vert;
@@ -445,6 +443,39 @@ nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
{
}
+static int
+nv50_crtc_wait_complete(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+ struct nv50_display *disp = nv50_display(dev);
+ struct nouveau_channel *evo = disp->master;
+ u64 start;
+ int ret;
+
+ ret = RING_SPACE(evo, 6);
+ if (ret)
+ return ret;
+ BEGIN_RING(evo, 0, 0x0084, 1);
+ OUT_RING (evo, 0x80000000);
+ BEGIN_RING(evo, 0, 0x0080, 1);
+ OUT_RING (evo, 0);
+ BEGIN_RING(evo, 0, 0x0084, 1);
+ OUT_RING (evo, 0x00000000);
+
+ nv_wo32(disp->ntfy, 0x000, 0x00000000);
+ FIRE_RING (evo);
+
+ start = ptimer->read(dev);
+ do {
+ if (nv_ro32(disp->ntfy, 0x000))
+ return 0;
+ } while (ptimer->read(dev) - start < 2000000000ULL);
+
+ return -EBUSY;
+}
+
static void
nv50_crtc_prepare(struct drm_crtc *crtc)
{
@@ -453,6 +484,7 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
+ nv50_display_flip_stop(crtc);
drm_vblank_pre_modeset(dev, nv_crtc->index);
nv50_crtc_blank(nv_crtc, true);
}
@@ -461,24 +493,14 @@ static void
nv50_crtc_commit(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- int ret;
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
nv50_crtc_blank(nv_crtc, false);
drm_vblank_post_modeset(dev, nv_crtc->index);
-
- ret = RING_SPACE(evo, 2);
- if (ret) {
- NV_ERROR(dev, "no space while committing crtc\n");
- return;
- }
- BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING (evo, 0);
- FIRE_RING (evo);
+ nv50_crtc_wait_complete(crtc);
+ nv50_display_flip_next(crtc, crtc->fb, NULL);
}
static bool
@@ -491,15 +513,15 @@ nv50_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode,
static int
nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
struct drm_framebuffer *passed_fb,
- int x, int y, bool update, bool atomic)
+ int x, int y, bool atomic)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct drm_device *dev = nv_crtc->base.dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
struct drm_framebuffer *drm_fb = nv_crtc->base.fb;
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
- int ret, format;
+ int ret;
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
@@ -525,28 +547,6 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
}
}
- switch (drm_fb->depth) {
- case 8:
- format = NV50_EVO_CRTC_FB_DEPTH_8;
- break;
- case 15:
- format = NV50_EVO_CRTC_FB_DEPTH_15;
- break;
- case 16:
- format = NV50_EVO_CRTC_FB_DEPTH_16;
- break;
- case 24:
- case 32:
- format = NV50_EVO_CRTC_FB_DEPTH_24;
- break;
- case 30:
- format = NV50_EVO_CRTC_FB_DEPTH_30;
- break;
- default:
- NV_ERROR(dev, "unknown depth %d\n", drm_fb->depth);
- return -EINVAL;
- }
-
nv_crtc->fb.offset = fb->nvbo->bo.mem.start << PAGE_SHIFT;
nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
@@ -556,14 +556,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
return ret;
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
- if (nv_crtc->fb.tile_flags == 0x7a00 ||
- nv_crtc->fb.tile_flags == 0xfe00)
- OUT_RING(evo, NvEvoFB32);
- else
- if (nv_crtc->fb.tile_flags == 0x7000)
- OUT_RING(evo, NvEvoFB16);
- else
- OUT_RING(evo, NvEvoVRAM_LP);
+ OUT_RING (evo, fb->r_dma);
}
ret = RING_SPACE(evo, 12);
@@ -571,45 +564,26 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
return ret;
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5);
- OUT_RING(evo, nv_crtc->fb.offset >> 8);
- OUT_RING(evo, 0);
- OUT_RING(evo, (drm_fb->height << 16) | drm_fb->width);
- if (!nv_crtc->fb.tile_flags) {
- OUT_RING(evo, drm_fb->pitch | (1 << 20));
- } else {
- u32 tile_mode = fb->nvbo->tile_mode;
- if (dev_priv->card_type >= NV_C0)
- tile_mode >>= 4;
- OUT_RING(evo, ((drm_fb->pitch / 4) << 4) | tile_mode);
- }
- if (dev_priv->chipset == 0x50)
- OUT_RING(evo, (nv_crtc->fb.tile_flags << 8) | format);
- else
- OUT_RING(evo, format);
+ OUT_RING (evo, nv_crtc->fb.offset >> 8);
+ OUT_RING (evo, 0);
+ OUT_RING (evo, (drm_fb->height << 16) | drm_fb->width);
+ OUT_RING (evo, fb->r_pitch);
+ OUT_RING (evo, fb->r_format);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1);
- OUT_RING(evo, fb->base.depth == 8 ?
- NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON);
+ OUT_RING (evo, fb->base.depth == 8 ?
+ NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1);
- OUT_RING(evo, NV50_EVO_CRTC_COLOR_CTRL_COLOR);
+ OUT_RING (evo, NV50_EVO_CRTC_COLOR_CTRL_COLOR);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1);
- OUT_RING(evo, (y << 16) | x);
+ OUT_RING (evo, (y << 16) | x);
if (nv_crtc->lut.depth != fb->base.depth) {
nv_crtc->lut.depth = fb->base.depth;
nv50_crtc_lut_load(crtc);
}
- if (update) {
- ret = RING_SPACE(evo, 2);
- if (ret)
- return ret;
- BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING(evo, 0);
- FIRE_RING(evo);
- }
-
return 0;
}
@@ -619,8 +593,7 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nouveau_connector *nv_connector = NULL;
uint32_t hsync_dur, vsync_dur, hsync_start_to_end, vsync_start_to_end;
@@ -700,14 +673,25 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering, false);
nv_crtc->set_scale(nv_crtc, nv_connector->scaling_mode, false);
- return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false, false);
+ return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
}
static int
nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
- return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, true, false);
+ int ret;
+
+ nv50_display_flip_stop(crtc);
+ ret = nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
+ if (ret)
+ return ret;
+
+ ret = nv50_crtc_wait_complete(crtc);
+ if (ret)
+ return ret;
+
+ return nv50_display_flip_next(crtc, crtc->fb, NULL);
}
static int
@@ -715,7 +699,14 @@ nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y, enum mode_set_atomic state)
{
- return nv50_crtc_do_mode_set_base(crtc, fb, x, y, true, true);
+ int ret;
+
+ nv50_display_flip_stop(crtc);
+ ret = nv50_crtc_do_mode_set_base(crtc, fb, x, y, true);
+ if (ret)
+ return ret;
+
+ return nv50_crtc_wait_complete(crtc);
}
static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
@@ -758,7 +749,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
nv_crtc->lut.depth = 0;
ret = nouveau_bo_new(dev, NULL, 4096, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, false, true, &nv_crtc->lut.nvbo);
+ 0, 0x0000, &nv_crtc->lut.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
if (!ret)
@@ -784,7 +775,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, false, true, &nv_crtc->cursor.nvbo);
+ 0, 0x0000, &nv_crtc->cursor.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
if (!ret)
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
index 1b9ce3021aa3..9752c35bb84b 100644
--- a/drivers/gpu/drm/nouveau/nv50_cursor.c
+++ b/drivers/gpu/drm/nouveau/nv50_cursor.c
@@ -36,9 +36,9 @@
static void
nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
{
- struct drm_nouveau_private *dev_priv = nv_crtc->base.dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
struct drm_device *dev = nv_crtc->base.dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
int ret;
NV_DEBUG_KMS(dev, "\n");
@@ -71,9 +71,9 @@ nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
static void
nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
{
- struct drm_nouveau_private *dev_priv = nv_crtc->base.dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
struct drm_device *dev = nv_crtc->base.dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
int ret;
NV_DEBUG_KMS(dev, "\n");
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
index 875414b09ade..808f3ec8f827 100644
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -41,8 +41,7 @@ nv50_dac_disconnect(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_device *dev = encoder->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
int ret;
if (!nv_encoder->crtc)
@@ -216,8 +215,7 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_device *dev = encoder->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
uint32_t mode_ctl = 0, mode_ctl2 = 0;
int ret;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 7cc94ed9ed95..75a376cc342a 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -24,6 +24,7 @@
*
*/
+#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
#include "nv50_display.h"
#include "nouveau_crtc.h"
#include "nouveau_encoder.h"
@@ -34,6 +35,7 @@
#include "drm_crtc_helper.h"
static void nv50_display_isr(struct drm_device *);
+static void nv50_display_bh(unsigned long);
static inline int
nv50_sor_nr(struct drm_device *dev)
@@ -172,16 +174,16 @@ nv50_display_init(struct drm_device *dev)
ret = nv50_evo_init(dev);
if (ret)
return ret;
- evo = dev_priv->evo;
+ evo = nv50_display(dev)->master;
nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
- ret = RING_SPACE(evo, 11);
+ ret = RING_SPACE(evo, 15);
if (ret)
return ret;
BEGIN_RING(evo, 0, NV50_EVO_UNK84, 2);
OUT_RING(evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
- OUT_RING(evo, NV50_EVO_DMA_NOTIFY_HANDLE_NONE);
+ OUT_RING(evo, NvEvoSync);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, FB_DMA), 1);
OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK0800), 1);
@@ -190,6 +192,11 @@ nv50_display_init(struct drm_device *dev)
OUT_RING(evo, 0);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK082C), 1);
OUT_RING(evo, 0);
+ /* required to make display sync channels not hate life */
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK900), 1);
+ OUT_RING (evo, 0x00000311);
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(1, UNK900), 1);
+ OUT_RING (evo, 0x00000311);
FIRE_RING(evo);
if (!nv_wait(dev, 0x640004, 0xffffffff, evo->dma.put << 2))
NV_ERROR(dev, "evo pushbuf stalled\n");
@@ -201,6 +208,8 @@ nv50_display_init(struct drm_device *dev)
static int nv50_display_disable(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_display *disp = nv50_display(dev);
+ struct nouveau_channel *evo = disp->master;
struct drm_crtc *drm_crtc;
int ret, i;
@@ -212,12 +221,12 @@ static int nv50_display_disable(struct drm_device *dev)
nv50_crtc_blank(crtc, true);
}
- ret = RING_SPACE(dev_priv->evo, 2);
+ ret = RING_SPACE(evo, 2);
if (ret == 0) {
- BEGIN_RING(dev_priv->evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING(dev_priv->evo, 0);
+ BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ OUT_RING(evo, 0);
}
- FIRE_RING(dev_priv->evo);
+ FIRE_RING(evo);
/* Almost like ack'ing a vblank interrupt, maybe in the spirit of
* cleaning up?
@@ -267,10 +276,16 @@ int nv50_display_create(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct dcb_table *dcb = &dev_priv->vbios.dcb;
struct drm_connector *connector, *ct;
+ struct nv50_display *priv;
int ret, i;
NV_DEBUG_KMS(dev, "\n");
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ dev_priv->engine.display.priv = priv;
+
/* init basic kernel modesetting */
drm_mode_config_init(dev);
@@ -330,7 +345,7 @@ int nv50_display_create(struct drm_device *dev)
}
}
- INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
+ tasklet_init(&priv->tasklet, nv50_display_bh, (unsigned long)dev);
nouveau_irq_register(dev, 26, nv50_display_isr);
ret = nv50_display_init(dev);
@@ -345,12 +360,131 @@ int nv50_display_create(struct drm_device *dev)
void
nv50_display_destroy(struct drm_device *dev)
{
+ struct nv50_display *disp = nv50_display(dev);
+
NV_DEBUG_KMS(dev, "\n");
drm_mode_config_cleanup(dev);
nv50_display_disable(dev);
nouveau_irq_unregister(dev, 26);
+ kfree(disp);
+}
+
+void
+nv50_display_flip_stop(struct drm_crtc *crtc)
+{
+ struct nv50_display *disp = nv50_display(crtc->dev);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nv50_display_crtc *dispc = &disp->crtc[nv_crtc->index];
+ struct nouveau_channel *evo = dispc->sync;
+ int ret;
+
+ ret = RING_SPACE(evo, 8);
+ if (ret) {
+ WARN_ON(1);
+ return;
+ }
+
+ BEGIN_RING(evo, 0, 0x0084, 1);
+ OUT_RING (evo, 0x00000000);
+ BEGIN_RING(evo, 0, 0x0094, 1);
+ OUT_RING (evo, 0x00000000);
+ BEGIN_RING(evo, 0, 0x00c0, 1);
+ OUT_RING (evo, 0x00000000);
+ BEGIN_RING(evo, 0, 0x0080, 1);
+ OUT_RING (evo, 0x00000000);
+ FIRE_RING (evo);
+}
+
+int
+nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ struct nouveau_channel *chan)
+{
+ struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
+ struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
+ struct nv50_display *disp = nv50_display(crtc->dev);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nv50_display_crtc *dispc = &disp->crtc[nv_crtc->index];
+ struct nouveau_channel *evo = dispc->sync;
+ int ret;
+
+ ret = RING_SPACE(evo, 24);
+ if (unlikely(ret))
+ return ret;
+
+ /* synchronise with the rendering channel, if necessary */
+ if (likely(chan)) {
+ u64 offset = dispc->sem.bo->vma.offset + dispc->sem.offset;
+
+ ret = RING_SPACE(chan, 10);
+ if (ret) {
+ WIND_RING(evo);
+ return ret;
+ }
+
+ if (dev_priv->chipset < 0xc0) {
+ BEGIN_RING(chan, NvSubSw, 0x0060, 2);
+ OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
+ OUT_RING (chan, dispc->sem.offset);
+ BEGIN_RING(chan, NvSubSw, 0x006c, 1);
+ OUT_RING (chan, 0xf00d0000 | dispc->sem.value);
+ BEGIN_RING(chan, NvSubSw, 0x0064, 2);
+ OUT_RING (chan, dispc->sem.offset ^ 0x10);
+ OUT_RING (chan, 0x74b1e000);
+ BEGIN_RING(chan, NvSubSw, 0x0060, 1);
+ if (dev_priv->chipset < 0x84)
+ OUT_RING (chan, NvSema);
+ else
+ OUT_RING (chan, chan->vram_handle);
+ } else {
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
+ OUT_RING (chan, upper_32_bits(offset));
+ OUT_RING (chan, lower_32_bits(offset));
+ OUT_RING (chan, 0xf00d0000 | dispc->sem.value);
+ OUT_RING (chan, 0x1002);
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
+ OUT_RING (chan, upper_32_bits(offset));
+ OUT_RING (chan, lower_32_bits(offset ^ 0x10));
+ OUT_RING (chan, 0x74b1e000);
+ OUT_RING (chan, 0x1001);
+ }
+ FIRE_RING (chan);
+ } else {
+ nouveau_bo_wr32(dispc->sem.bo, dispc->sem.offset / 4,
+ 0xf00d0000 | dispc->sem.value);
+ }
+
+ /* queue the flip on the crtc's "display sync" channel */
+ BEGIN_RING(evo, 0, 0x0100, 1);
+ OUT_RING (evo, 0xfffe0000);
+ BEGIN_RING(evo, 0, 0x0084, 5);
+ OUT_RING (evo, chan ? 0x00000100 : 0x00000010);
+ OUT_RING (evo, dispc->sem.offset);
+ OUT_RING (evo, 0xf00d0000 | dispc->sem.value);
+ OUT_RING (evo, 0x74b1e000);
+ OUT_RING (evo, NvEvoSync);
+ BEGIN_RING(evo, 0, 0x00a0, 2);
+ OUT_RING (evo, 0x00000000);
+ OUT_RING (evo, 0x00000000);
+ BEGIN_RING(evo, 0, 0x00c0, 1);
+ OUT_RING (evo, nv_fb->r_dma);
+ BEGIN_RING(evo, 0, 0x0110, 2);
+ OUT_RING (evo, 0x00000000);
+ OUT_RING (evo, 0x00000000);
+ BEGIN_RING(evo, 0, 0x0800, 5);
+ OUT_RING (evo, (nv_fb->nvbo->bo.mem.start << PAGE_SHIFT) >> 8);
+ OUT_RING (evo, 0);
+ OUT_RING (evo, (fb->height << 16) | fb->width);
+ OUT_RING (evo, nv_fb->r_pitch);
+ OUT_RING (evo, nv_fb->r_format);
+ BEGIN_RING(evo, 0, 0x0080, 1);
+ OUT_RING (evo, 0x00000000);
+ FIRE_RING (evo);
+
+ dispc->sem.offset ^= 0x10;
+ dispc->sem.value++;
+ return 0;
}
static u16
@@ -466,11 +600,12 @@ static void
nv50_display_unk10_handler(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_display *disp = nv50_display(dev);
u32 unk30 = nv_rd32(dev, 0x610030), mc;
int i, crtc, or, type = OUTPUT_ANY;
NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
- dev_priv->evo_irq.dcb = NULL;
+ disp->irq.dcb = NULL;
nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) & ~8);
@@ -541,7 +676,7 @@ nv50_display_unk10_handler(struct drm_device *dev)
if (dcb->type == type && (dcb->or & (1 << or))) {
nouveau_bios_run_display_table(dev, dcb, 0, -1);
- dev_priv->evo_irq.dcb = dcb;
+ disp->irq.dcb = dcb;
goto ack;
}
}
@@ -587,15 +722,16 @@ static void
nv50_display_unk20_handler(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc;
+ struct nv50_display *disp = nv50_display(dev);
+ u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc = 0;
struct dcb_entry *dcb;
int i, crtc, or, type = OUTPUT_ANY;
NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
- dcb = dev_priv->evo_irq.dcb;
+ dcb = disp->irq.dcb;
if (dcb) {
nouveau_bios_run_display_table(dev, dcb, 0, -2);
- dev_priv->evo_irq.dcb = NULL;
+ disp->irq.dcb = NULL;
}
/* CRTC clock change requested? */
@@ -692,9 +828,9 @@ nv50_display_unk20_handler(struct drm_device *dev)
nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0);
}
- dev_priv->evo_irq.dcb = dcb;
- dev_priv->evo_irq.pclk = pclk;
- dev_priv->evo_irq.script = script;
+ disp->irq.dcb = dcb;
+ disp->irq.pclk = pclk;
+ disp->irq.script = script;
ack:
nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20);
@@ -735,13 +871,13 @@ nv50_display_unk40_dp_set_tmds(struct drm_device *dev, struct dcb_entry *dcb)
static void
nv50_display_unk40_handler(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct dcb_entry *dcb = dev_priv->evo_irq.dcb;
- u16 script = dev_priv->evo_irq.script;
- u32 unk30 = nv_rd32(dev, 0x610030), pclk = dev_priv->evo_irq.pclk;
+ struct nv50_display *disp = nv50_display(dev);
+ struct dcb_entry *dcb = disp->irq.dcb;
+ u16 script = disp->irq.script;
+ u32 unk30 = nv_rd32(dev, 0x610030), pclk = disp->irq.pclk;
NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
- dev_priv->evo_irq.dcb = NULL;
+ disp->irq.dcb = NULL;
if (!dcb)
goto ack;
@@ -754,12 +890,10 @@ ack:
nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) | 8);
}
-void
-nv50_display_irq_handler_bh(struct work_struct *work)
+static void
+nv50_display_bh(unsigned long data)
{
- struct drm_nouveau_private *dev_priv =
- container_of(work, struct drm_nouveau_private, irq_work);
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = (struct drm_device *)data;
for (;;) {
uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
@@ -807,7 +941,7 @@ nv50_display_error_handler(struct drm_device *dev)
static void
nv50_display_isr(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_display *disp = nv50_display(dev);
uint32_t delayed = 0;
while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
@@ -835,8 +969,7 @@ nv50_display_isr(struct drm_device *dev)
NV50_PDISPLAY_INTR_1_CLK_UNK40));
if (clock) {
nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
- if (!work_pending(&dev_priv->irq_work))
- queue_work(dev_priv->wq, &dev_priv->irq_work);
+ tasklet_schedule(&disp->tasklet);
delayed |= clock;
intr1 &= ~clock;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index f0e30b78ef6b..c2da503a22aa 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -35,7 +35,36 @@
#include "nouveau_crtc.h"
#include "nv50_evo.h"
-void nv50_display_irq_handler_bh(struct work_struct *work);
+struct nv50_display_crtc {
+ struct nouveau_channel *sync;
+ struct {
+ struct nouveau_bo *bo;
+ u32 offset;
+ u16 value;
+ } sem;
+};
+
+struct nv50_display {
+ struct nouveau_channel *master;
+ struct nouveau_gpuobj *ntfy;
+
+ struct nv50_display_crtc crtc[2];
+
+ struct tasklet_struct tasklet;
+ struct {
+ struct dcb_entry *dcb;
+ u16 script;
+ u32 pclk;
+ } irq;
+};
+
+static inline struct nv50_display *
+nv50_display(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ return dev_priv->engine.display.priv;
+}
+
int nv50_display_early_init(struct drm_device *dev);
void nv50_display_late_takedown(struct drm_device *dev);
int nv50_display_create(struct drm_device *dev);
@@ -44,4 +73,15 @@ void nv50_display_destroy(struct drm_device *dev);
int nv50_crtc_blank(struct nouveau_crtc *, bool blank);
int nv50_crtc_set_clock(struct drm_device *, int head, int pclk);
+int nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
+ struct nouveau_channel *chan);
+void nv50_display_flip_stop(struct drm_crtc *);
+
+int nv50_evo_init(struct drm_device *dev);
+void nv50_evo_fini(struct drm_device *dev);
+void nv50_evo_dmaobj_init(struct nouveau_gpuobj *, u32 memtype, u64 base,
+ u64 size);
+int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 handle, u32 memtype,
+ u64 base, u64 size, struct nouveau_gpuobj **);
+
#endif /* __NV50_DISPLAY_H__ */
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
index 0ea090f4244a..c8e83c1a4de8 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -27,20 +27,17 @@
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_ramht.h"
+#include "nv50_display.h"
static void
nv50_evo_channel_del(struct nouveau_channel **pevo)
{
- struct drm_nouveau_private *dev_priv;
struct nouveau_channel *evo = *pevo;
if (!evo)
return;
*pevo = NULL;
- dev_priv = evo->dev->dev_private;
- dev_priv->evo_alloc &= ~(1 << evo->id);
-
nouveau_gpuobj_channel_takedown(evo);
nouveau_bo_unmap(evo->pushbuf_bo);
nouveau_bo_ref(NULL, &evo->pushbuf_bo);
@@ -51,42 +48,61 @@ nv50_evo_channel_del(struct nouveau_channel **pevo)
kfree(evo);
}
+void
+nv50_evo_dmaobj_init(struct nouveau_gpuobj *obj, u32 memtype, u64 base, u64 size)
+{
+ struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
+ u32 flags5;
+
+ if (dev_priv->chipset < 0xc0) {
+ /* not supported on 0x50, specified in format mthd */
+ if (dev_priv->chipset == 0x50)
+ memtype = 0;
+ flags5 = 0x00010000;
+ } else {
+ if (memtype & 0x80000000)
+ flags5 = 0x00000000; /* large pages */
+ else
+ flags5 = 0x00020000;
+ }
+
+ nv50_gpuobj_dma_init(obj, 0, 0x3d, base, size, NV_MEM_TARGET_VRAM,
+ NV_MEM_ACCESS_RW, (memtype >> 8) & 0xff, 0);
+ nv_wo32(obj, 0x14, flags5);
+ dev_priv->engine.instmem.flush(obj->dev);
+}
+
int
-nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 class, u32 name,
- u32 tile_flags, u32 magic_flags, u32 offset, u32 limit,
- u32 flags5)
+nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 handle, u32 memtype,
+ u64 base, u64 size, struct nouveau_gpuobj **pobj)
{
- struct drm_nouveau_private *dev_priv = evo->dev->dev_private;
- struct drm_device *dev = evo->dev;
+ struct nv50_display *disp = nv50_display(evo->dev);
struct nouveau_gpuobj *obj = NULL;
int ret;
- ret = nouveau_gpuobj_new(dev, dev_priv->evo, 6*4, 32, 0, &obj);
+ ret = nouveau_gpuobj_new(evo->dev, disp->master, 6*4, 32, 0, &obj);
if (ret)
return ret;
obj->engine = NVOBJ_ENGINE_DISPLAY;
- nv_wo32(obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
- nv_wo32(obj, 4, limit);
- nv_wo32(obj, 8, offset);
- nv_wo32(obj, 12, 0x00000000);
- nv_wo32(obj, 16, 0x00000000);
- nv_wo32(obj, 20, flags5);
- dev_priv->engine.instmem.flush(dev);
+ nv50_evo_dmaobj_init(obj, memtype, base, size);
- ret = nouveau_ramht_insert(evo, name, obj);
- nouveau_gpuobj_ref(NULL, &obj);
- if (ret) {
- return ret;
- }
+ ret = nouveau_ramht_insert(evo, handle, obj);
+ if (ret)
+ goto out;
- return 0;
+ if (pobj)
+ nouveau_gpuobj_ref(obj, pobj);
+out:
+ nouveau_gpuobj_ref(NULL, &obj);
+ return ret;
}
static int
-nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pevo)
+nv50_evo_channel_new(struct drm_device *dev, int chid,
+ struct nouveau_channel **pevo)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_display *disp = nv50_display(dev);
struct nouveau_channel *evo;
int ret;
@@ -95,25 +111,13 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pevo)
return -ENOMEM;
*pevo = evo;
- for (evo->id = 0; evo->id < 5; evo->id++) {
- if (dev_priv->evo_alloc & (1 << evo->id))
- continue;
-
- dev_priv->evo_alloc |= (1 << evo->id);
- break;
- }
-
- if (evo->id == 5) {
- kfree(evo);
- return -ENODEV;
- }
-
+ evo->id = chid;
evo->dev = dev;
evo->user_get = 4;
evo->user_put = 0;
ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
- false, true, &evo->pushbuf_bo);
+ &evo->pushbuf_bo);
if (ret == 0)
ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM);
if (ret) {
@@ -138,8 +142,8 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pevo)
}
/* bind primary evo channel's ramht to the channel */
- if (dev_priv->evo && evo != dev_priv->evo)
- nouveau_ramht_ref(dev_priv->evo->ramht, &evo->ramht, NULL);
+ if (disp->master && evo != disp->master)
+ nouveau_ramht_ref(disp->master->ramht, &evo->ramht, NULL);
return 0;
}
@@ -182,6 +186,7 @@ nv50_evo_channel_init(struct nouveau_channel *evo)
nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id);
evo->dma.max = (4096/4) - 2;
+ evo->dma.max &= ~7;
evo->dma.put = 0;
evo->dma.cur = evo->dma.put;
evo->dma.free = evo->dma.max - evo->dma.cur;
@@ -212,21 +217,39 @@ nv50_evo_channel_fini(struct nouveau_channel *evo)
}
}
+static void
+nv50_evo_destroy(struct drm_device *dev)
+{
+ struct nv50_display *disp = nv50_display(dev);
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ if (disp->crtc[i].sem.bo) {
+ nouveau_bo_unmap(disp->crtc[i].sem.bo);
+ nouveau_bo_ref(NULL, &disp->crtc[i].sem.bo);
+ }
+ nv50_evo_channel_del(&disp->crtc[i].sync);
+ }
+ nouveau_gpuobj_ref(NULL, &disp->ntfy);
+ nv50_evo_channel_del(&disp->master);
+}
+
static int
nv50_evo_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_display *disp = nv50_display(dev);
struct nouveau_gpuobj *ramht = NULL;
struct nouveau_channel *evo;
- int ret;
+ int ret, i, j;
/* create primary evo channel, the one we use for modesetting
* purporses
*/
- ret = nv50_evo_channel_new(dev, &dev_priv->evo);
+ ret = nv50_evo_channel_new(dev, 0, &disp->master);
if (ret)
return ret;
- evo = dev_priv->evo;
+ evo = disp->master;
/* setup object management on it, any other evo channel will
* use this also as there's no per-channel support on the
@@ -236,109 +259,167 @@ nv50_evo_create(struct drm_device *dev)
NVOBJ_FLAG_ZERO_ALLOC, &evo->ramin);
if (ret) {
NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
+ goto err;
}
ret = drm_mm_init(&evo->ramin_heap, 0, 32768);
if (ret) {
NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
+ goto err;
}
ret = nouveau_gpuobj_new(dev, evo, 4096, 16, 0, &ramht);
if (ret) {
NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
+ goto err;
}
ret = nouveau_ramht_new(dev, ramht, &evo->ramht);
nouveau_gpuobj_ref(NULL, &ramht);
- if (ret) {
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
- }
+ if (ret)
+ goto err;
+
+ /* not sure exactly what this is..
+ *
+ * the first dword of the structure is used by nvidia to wait on
+ * full completion of an EVO "update" command.
+ *
+ * method 0x8c on the master evo channel will fill a lot more of
+ * this structure with some undefined info
+ */
+ ret = nouveau_gpuobj_new(dev, disp->master, 0x1000, 0,
+ NVOBJ_FLAG_ZERO_ALLOC, &disp->ntfy);
+ if (ret)
+ goto err;
+
+ ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000,
+ disp->ntfy->vinst, disp->ntfy->size, NULL);
+ if (ret)
+ goto err;
/* create some default objects for the scanout memtypes we support */
- if (dev_priv->card_type >= NV_C0) {
- ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0xfe, 0x19,
- 0, 0xffffffff, 0x00000000);
- if (ret) {
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
- }
+ ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM, 0x0000,
+ 0, dev_priv->vram_size, NULL);
+ if (ret)
+ goto err;
- ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19,
- 0, dev_priv->vram_size, 0x00020000);
- if (ret) {
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
- }
+ ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM_LP, 0x80000000,
+ 0, dev_priv->vram_size, NULL);
+ if (ret)
+ goto err;
- ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM_LP, 0, 0x19,
- 0, dev_priv->vram_size, 0x00000000);
- if (ret) {
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
- }
- } else {
- ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB16, 0x70, 0x19,
- 0, 0xffffffff, 0x00010000);
- if (ret) {
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
- }
+ ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB32, 0x80000000 |
+ (dev_priv->chipset < 0xc0 ? 0x7a00 : 0xfe00),
+ 0, dev_priv->vram_size, NULL);
+ if (ret)
+ goto err;
+ ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB16, 0x80000000 |
+ (dev_priv->chipset < 0xc0 ? 0x7000 : 0xfe00),
+ 0, dev_priv->vram_size, NULL);
+ if (ret)
+ goto err;
- ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0x7a, 0x19,
- 0, 0xffffffff, 0x00010000);
- if (ret) {
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
- }
+ /* create "display sync" channels and other structures we need
+ * to implement page flipping
+ */
+ for (i = 0; i < 2; i++) {
+ struct nv50_display_crtc *dispc = &disp->crtc[i];
+ u64 offset;
- ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19,
- 0, dev_priv->vram_size, 0x00010000);
- if (ret) {
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
+ ret = nv50_evo_channel_new(dev, 1 + i, &dispc->sync);
+ if (ret)
+ goto err;
+
+ ret = nouveau_bo_new(dev, NULL, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+ 0, 0x0000, &dispc->sem.bo);
+ if (!ret) {
+ offset = dispc->sem.bo->bo.mem.start << PAGE_SHIFT;
+
+ ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM);
+ if (!ret)
+ ret = nouveau_bo_map(dispc->sem.bo);
+ if (ret)
+ nouveau_bo_ref(NULL, &dispc->sem.bo);
}
- ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM_LP, 0, 0x19,
- 0, dev_priv->vram_size, 0x00010000);
- if (ret) {
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
- }
+ if (ret)
+ goto err;
+
+ ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoSync, 0x0000,
+ offset, 4096, NULL);
+ if (ret)
+ goto err;
+
+ ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoVRAM_LP, 0x80000000,
+ 0, dev_priv->vram_size, NULL);
+ if (ret)
+ goto err;
+
+ ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB32, 0x80000000 |
+ (dev_priv->chipset < 0xc0 ?
+ 0x7a00 : 0xfe00),
+ 0, dev_priv->vram_size, NULL);
+ if (ret)
+ goto err;
+
+ ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB16, 0x80000000 |
+ (dev_priv->chipset < 0xc0 ?
+ 0x7000 : 0xfe00),
+ 0, dev_priv->vram_size, NULL);
+ if (ret)
+ goto err;
+
+ for (j = 0; j < 4096; j += 4)
+ nouveau_bo_wr32(dispc->sem.bo, j / 4, 0x74b1e000);
+ dispc->sem.offset = 0;
}
return 0;
+
+err:
+ nv50_evo_destroy(dev);
+ return ret;
}
int
nv50_evo_init(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int ret;
+ struct nv50_display *disp = nv50_display(dev);
+ int ret, i;
- if (!dev_priv->evo) {
+ if (!disp->master) {
ret = nv50_evo_create(dev);
if (ret)
return ret;
}
- return nv50_evo_channel_init(dev_priv->evo);
+ ret = nv50_evo_channel_init(disp->master);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < 2; i++) {
+ ret = nv50_evo_channel_init(disp->crtc[i].sync);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
void
nv50_evo_fini(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_display *disp = nv50_display(dev);
+ int i;
- if (dev_priv->evo) {
- nv50_evo_channel_fini(dev_priv->evo);
- nv50_evo_channel_del(&dev_priv->evo);
+ for (i = 0; i < 2; i++) {
+ if (disp->crtc[i].sync)
+ nv50_evo_channel_fini(disp->crtc[i].sync);
}
+
+ if (disp->master)
+ nv50_evo_channel_fini(disp->master);
+
+ nv50_evo_destroy(dev);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h
index aa4f0d3cea8e..3860ca62cb19 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.h
+++ b/drivers/gpu/drm/nouveau/nv50_evo.h
@@ -27,12 +27,6 @@
#ifndef __NV50_EVO_H__
#define __NV50_EVO_H__
-int nv50_evo_init(struct drm_device *dev);
-void nv50_evo_fini(struct drm_device *dev);
-int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 class, u32 name,
- u32 tile_flags, u32 magic_flags,
- u32 offset, u32 limit);
-
#define NV50_EVO_UPDATE 0x00000080
#define NV50_EVO_UNK84 0x00000084
#define NV50_EVO_UNK84_NOTIFY 0x40000000
@@ -119,5 +113,7 @@ int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 class, u32 name,
/* Both of these are needed, otherwise nothing happens. */
#define NV50_EVO_CRTC_SCALE_RES1 0x000008d8
#define NV50_EVO_CRTC_SCALE_RES2 0x000008dc
+#define NV50_EVO_CRTC_UNK900 0x00000900
+#define NV50_EVO_CRTC_UNK904 0x00000904
#endif
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c
index 50290dea0ac4..bdd2afe29205 100644
--- a/drivers/gpu/drm/nouveau/nv50_fb.c
+++ b/drivers/gpu/drm/nouveau/nv50_fb.c
@@ -8,31 +8,61 @@ struct nv50_fb_priv {
dma_addr_t r100c08;
};
+static void
+nv50_fb_destroy(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ struct nv50_fb_priv *priv = pfb->priv;
+
+ if (drm_mm_initialized(&pfb->tag_heap))
+ drm_mm_takedown(&pfb->tag_heap);
+
+ if (priv->r100c08_page) {
+ pci_unmap_page(dev->pdev, priv->r100c08, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ __free_page(priv->r100c08_page);
+ }
+
+ kfree(priv);
+ pfb->priv = NULL;
+}
+
static int
nv50_fb_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
struct nv50_fb_priv *priv;
+ u32 tagmem;
+ int ret;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ pfb->priv = priv;
priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!priv->r100c08_page) {
- kfree(priv);
+ nv50_fb_destroy(dev);
return -ENOMEM;
}
priv->r100c08 = pci_map_page(dev->pdev, priv->r100c08_page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(dev->pdev, priv->r100c08)) {
- __free_page(priv->r100c08_page);
- kfree(priv);
+ nv50_fb_destroy(dev);
return -EFAULT;
}
- dev_priv->engine.fb.priv = priv;
+ tagmem = nv_rd32(dev, 0x100320);
+ NV_DEBUG(dev, "%d tags available\n", tagmem);
+ ret = drm_mm_init(&pfb->tag_heap, 0, tagmem);
+ if (ret) {
+ nv50_fb_destroy(dev);
+ return ret;
+ }
+
return 0;
}
@@ -81,26 +111,112 @@ nv50_fb_init(struct drm_device *dev)
void
nv50_fb_takedown(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_fb_priv *priv;
+ nv50_fb_destroy(dev);
+}
- priv = dev_priv->engine.fb.priv;
- if (!priv)
- return;
- dev_priv->engine.fb.priv = NULL;
+static struct nouveau_enum vm_dispatch_subclients[] = {
+ { 0x00000000, "GRCTX", NULL },
+ { 0x00000001, "NOTIFY", NULL },
+ { 0x00000002, "QUERY", NULL },
+ { 0x00000003, "COND", NULL },
+ { 0x00000004, "M2M_IN", NULL },
+ { 0x00000005, "M2M_OUT", NULL },
+ { 0x00000006, "M2M_NOTIFY", NULL },
+ {}
+};
- pci_unmap_page(dev->pdev, priv->r100c08, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- __free_page(priv->r100c08_page);
- kfree(priv);
-}
+static struct nouveau_enum vm_ccache_subclients[] = {
+ { 0x00000000, "CB", NULL },
+ { 0x00000001, "TIC", NULL },
+ { 0x00000002, "TSC", NULL },
+ {}
+};
+
+static struct nouveau_enum vm_prop_subclients[] = {
+ { 0x00000000, "RT0", NULL },
+ { 0x00000001, "RT1", NULL },
+ { 0x00000002, "RT2", NULL },
+ { 0x00000003, "RT3", NULL },
+ { 0x00000004, "RT4", NULL },
+ { 0x00000005, "RT5", NULL },
+ { 0x00000006, "RT6", NULL },
+ { 0x00000007, "RT7", NULL },
+ { 0x00000008, "ZETA", NULL },
+ { 0x00000009, "LOCAL", NULL },
+ { 0x0000000a, "GLOBAL", NULL },
+ { 0x0000000b, "STACK", NULL },
+ { 0x0000000c, "DST2D", NULL },
+ {}
+};
+
+static struct nouveau_enum vm_pfifo_subclients[] = {
+ { 0x00000000, "PUSHBUF", NULL },
+ { 0x00000001, "SEMAPHORE", NULL },
+ {}
+};
+
+static struct nouveau_enum vm_bar_subclients[] = {
+ { 0x00000000, "FB", NULL },
+ { 0x00000001, "IN", NULL },
+ {}
+};
+
+static struct nouveau_enum vm_client[] = {
+ { 0x00000000, "STRMOUT", NULL },
+ { 0x00000003, "DISPATCH", vm_dispatch_subclients },
+ { 0x00000004, "PFIFO_WRITE", NULL },
+ { 0x00000005, "CCACHE", vm_ccache_subclients },
+ { 0x00000006, "PPPP", NULL },
+ { 0x00000007, "CLIPID", NULL },
+ { 0x00000008, "PFIFO_READ", NULL },
+ { 0x00000009, "VFETCH", NULL },
+ { 0x0000000a, "TEXTURE", NULL },
+ { 0x0000000b, "PROP", vm_prop_subclients },
+ { 0x0000000c, "PVP", NULL },
+ { 0x0000000d, "PBSP", NULL },
+ { 0x0000000e, "PCRYPT", NULL },
+ { 0x0000000f, "PCOUNTER", NULL },
+ { 0x00000011, "PDAEMON", NULL },
+ {}
+};
+
+static struct nouveau_enum vm_engine[] = {
+ { 0x00000000, "PGRAPH", NULL },
+ { 0x00000001, "PVP", NULL },
+ { 0x00000004, "PEEPHOLE", NULL },
+ { 0x00000005, "PFIFO", vm_pfifo_subclients },
+ { 0x00000006, "BAR", vm_bar_subclients },
+ { 0x00000008, "PPPP", NULL },
+ { 0x00000009, "PBSP", NULL },
+ { 0x0000000a, "PCRYPT", NULL },
+ { 0x0000000b, "PCOUNTER", NULL },
+ { 0x0000000c, "SEMAPHORE_BG", NULL },
+ { 0x0000000d, "PCOPY", NULL },
+ { 0x0000000e, "PDAEMON", NULL },
+ {}
+};
+
+static struct nouveau_enum vm_fault[] = {
+ { 0x00000000, "PT_NOT_PRESENT", NULL },
+ { 0x00000001, "PT_TOO_SHORT", NULL },
+ { 0x00000002, "PAGE_NOT_PRESENT", NULL },
+ { 0x00000003, "PAGE_SYSTEM_ONLY", NULL },
+ { 0x00000004, "PAGE_READ_ONLY", NULL },
+ { 0x00000006, "NULL_DMAOBJ", NULL },
+ { 0x00000007, "WRONG_MEMTYPE", NULL },
+ { 0x0000000b, "VRAM_LIMIT", NULL },
+ { 0x0000000f, "DMAOBJ_LIMIT", NULL },
+ {}
+};
void
-nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
+nv50_fb_vm_trap(struct drm_device *dev, int display)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ const struct nouveau_enum *en, *cl;
unsigned long flags;
u32 trap[6], idx, chinst;
+ u8 st0, st1, st2, st3;
int i, ch;
idx = nv_rd32(dev, 0x100c90);
@@ -117,8 +233,8 @@ nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
if (!display)
return;
+ /* lookup channel id */
chinst = (trap[2] << 16) | trap[1];
-
spin_lock_irqsave(&dev_priv->channels.lock, flags);
for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) {
struct nouveau_channel *chan = dev_priv->channels.ptr[ch];
@@ -131,9 +247,48 @@ nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
}
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
- NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x "
- "channel %d (0x%08x)\n",
- name, (trap[5] & 0x100 ? "read" : "write"),
- trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff,
- trap[0], ch, chinst);
+ /* decode status bits into something more useful */
+ if (dev_priv->chipset < 0xa3 ||
+ dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) {
+ st0 = (trap[0] & 0x0000000f) >> 0;
+ st1 = (trap[0] & 0x000000f0) >> 4;
+ st2 = (trap[0] & 0x00000f00) >> 8;
+ st3 = (trap[0] & 0x0000f000) >> 12;
+ } else {
+ st0 = (trap[0] & 0x000000ff) >> 0;
+ st1 = (trap[0] & 0x0000ff00) >> 8;
+ st2 = (trap[0] & 0x00ff0000) >> 16;
+ st3 = (trap[0] & 0xff000000) >> 24;
+ }
+
+ NV_INFO(dev, "VM: trapped %s at 0x%02x%04x%04x on ch %d [0x%08x] ",
+ (trap[5] & 0x00000100) ? "read" : "write",
+ trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff, ch, chinst);
+
+ en = nouveau_enum_find(vm_engine, st0);
+ if (en)
+ printk("%s/", en->name);
+ else
+ printk("%02x/", st0);
+
+ cl = nouveau_enum_find(vm_client, st2);
+ if (cl)
+ printk("%s/", cl->name);
+ else
+ printk("%02x/", st2);
+
+ if (cl && cl->data) cl = nouveau_enum_find(cl->data, st3);
+ else if (en && en->data) cl = nouveau_enum_find(en->data, st3);
+ else cl = NULL;
+ if (cl)
+ printk("%s", cl->name);
+ else
+ printk("%02x", st3);
+
+ printk(" reason: ");
+ en = nouveau_enum_find(vm_fault, st1);
+ if (en)
+ printk("%s\n", en->name);
+ else
+ printk("0x%08x\n", st1);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index 8dd04c5dac67..c34a074f7ea1 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -149,6 +149,7 @@ nv50_fifo_init_regs(struct drm_device *dev)
nv_wr32(dev, 0x3204, 0);
nv_wr32(dev, 0x3210, 0);
nv_wr32(dev, 0x3270, 0);
+ nv_wr32(dev, 0x2044, 0x01003fff);
/* Enable dummy channels setup by nv50_instmem.c */
nv50_fifo_channel_enable(dev, 0);
@@ -273,7 +274,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
nv_wo32(ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
(4 << 24) /* SEARCH_FULL */ |
(chan->ramht->gpuobj->cinst >> 4));
- nv_wo32(ramfc, 0x44, 0x2101ffff);
+ nv_wo32(ramfc, 0x44, 0x01003fff);
nv_wo32(ramfc, 0x60, 0x7fffffff);
nv_wo32(ramfc, 0x40, 0x00000000);
nv_wo32(ramfc, 0x7c, 0x30000001);
diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c
index 6b149c0cc06d..d4f4206dad7e 100644
--- a/drivers/gpu/drm/nouveau/nv50_gpio.c
+++ b/drivers/gpu/drm/nouveau/nv50_gpio.c
@@ -137,6 +137,7 @@ nv50_gpio_irq_unregister(struct drm_device *dev, enum dcb_gpio_tag tag,
struct nv50_gpio_priv *priv = pgpio->priv;
struct nv50_gpio_handler *gpioh, *tmp;
struct dcb_gpio_entry *gpio;
+ LIST_HEAD(tofree);
unsigned long flags;
gpio = nouveau_bios_gpio_entry(dev, tag);
@@ -149,10 +150,14 @@ nv50_gpio_irq_unregister(struct drm_device *dev, enum dcb_gpio_tag tag,
gpioh->handler != handler ||
gpioh->data != data)
continue;
- list_del(&gpioh->head);
- kfree(gpioh);
+ list_move(&gpioh->head, &tofree);
}
spin_unlock_irqrestore(&priv->lock, flags);
+
+ list_for_each_entry_safe(gpioh, tmp, &tofree, head) {
+ flush_work_sync(&gpioh->work);
+ kfree(gpioh);
+ }
}
bool
@@ -205,7 +210,6 @@ nv50_gpio_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- struct nv50_gpio_priv *priv;
int ret;
if (!pgpio->priv) {
@@ -213,7 +217,6 @@ nv50_gpio_init(struct drm_device *dev)
if (ret)
return ret;
}
- priv = pgpio->priv;
/* disable, and ack any pending gpio interrupts */
nv_wr32(dev, 0xe050, 0x00000000);
@@ -293,7 +296,7 @@ nv50_gpio_isr(struct drm_device *dev)
continue;
gpioh->inhibit = true;
- queue_work(dev_priv->wq, &gpioh->work);
+ schedule_work(&gpioh->work);
}
spin_unlock(&priv->lock);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 37e21d2be95b..b02a5b1e7d37 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -95,13 +95,41 @@ nv50_graph_init_regs__nv(struct drm_device *dev)
}
static void
-nv50_graph_init_regs(struct drm_device *dev)
+nv50_graph_init_zcull(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
+
NV_DEBUG(dev, "\n");
- nv_wr32(dev, NV04_PGRAPH_DEBUG_3,
- (1 << 2) /* HW_CONTEXT_SWITCH_ENABLED */);
- nv_wr32(dev, 0x402ca8, 0x800);
+ switch (dev_priv->chipset & 0xf0) {
+ case 0x50:
+ case 0x80:
+ case 0x90:
+ nv_wr32(dev, 0x402ca8, 0x00000800);
+ break;
+ case 0xa0:
+ default:
+ nv_wr32(dev, 0x402cc0, 0x00000000);
+ if (dev_priv->chipset == 0xa0 ||
+ dev_priv->chipset == 0xaa ||
+ dev_priv->chipset == 0xac) {
+ nv_wr32(dev, 0x402ca8, 0x00000802);
+ } else {
+ nv_wr32(dev, 0x402cc0, 0x00000000);
+ nv_wr32(dev, 0x402ca8, 0x00000002);
+ }
+
+ break;
+ }
+
+ /* zero out zcull regions */
+ for (i = 0; i < 8; i++) {
+ nv_wr32(dev, 0x402c20 + (i * 8), 0x00000000);
+ nv_wr32(dev, 0x402c24 + (i * 8), 0x00000000);
+ nv_wr32(dev, 0x402c28 + (i * 8), 0x00000000);
+ nv_wr32(dev, 0x402c2c + (i * 8), 0x00000000);
+ }
}
static int
@@ -136,6 +164,7 @@ nv50_graph_init_ctxctl(struct drm_device *dev)
}
kfree(cp);
+ nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */
nv_wr32(dev, 0x400320, 4);
nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0);
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, 0);
@@ -151,7 +180,7 @@ nv50_graph_init(struct drm_device *dev)
nv50_graph_init_reset(dev);
nv50_graph_init_regs__nv(dev);
- nv50_graph_init_regs(dev);
+ nv50_graph_init_zcull(dev);
ret = nv50_graph_init_ctxctl(dev);
if (ret)
@@ -409,12 +438,7 @@ static int
nv50_graph_nvsw_mthd_page_flip(struct nouveau_channel *chan,
u32 class, u32 mthd, u32 data)
{
- struct nouveau_page_flip_state s;
-
- if (!nouveau_finish_page_flip(chan, &s)) {
- /* XXX - Do something here */
- }
-
+ nouveau_finish_page_flip(chan, NULL);
return 0;
}
@@ -479,7 +503,7 @@ nv50_graph_tlb_flush(struct drm_device *dev)
}
void
-nv86_graph_tlb_flush(struct drm_device *dev)
+nv84_graph_tlb_flush(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
@@ -526,11 +550,11 @@ nv86_graph_tlb_flush(struct drm_device *dev)
static struct nouveau_enum nv50_mp_exec_error_names[] =
{
- { 3, "STACK_UNDERFLOW" },
- { 4, "QUADON_ACTIVE" },
- { 8, "TIMEOUT" },
- { 0x10, "INVALID_OPCODE" },
- { 0x40, "BREAKPOINT" },
+ { 3, "STACK_UNDERFLOW", NULL },
+ { 4, "QUADON_ACTIVE", NULL },
+ { 8, "TIMEOUT", NULL },
+ { 0x10, "INVALID_OPCODE", NULL },
+ { 0x40, "BREAKPOINT", NULL },
{}
};
@@ -558,47 +582,47 @@ static struct nouveau_bitfield nv50_graph_trap_ccache[] = {
/* There must be a *lot* of these. Will take some time to gather them up. */
struct nouveau_enum nv50_data_error_names[] = {
- { 0x00000003, "INVALID_QUERY_OR_TEXTURE" },
- { 0x00000004, "INVALID_VALUE" },
- { 0x00000005, "INVALID_ENUM" },
- { 0x00000008, "INVALID_OBJECT" },
- { 0x00000009, "READ_ONLY_OBJECT" },
- { 0x0000000a, "SUPERVISOR_OBJECT" },
- { 0x0000000b, "INVALID_ADDRESS_ALIGNMENT" },
- { 0x0000000c, "INVALID_BITFIELD" },
- { 0x0000000d, "BEGIN_END_ACTIVE" },
- { 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT" },
- { 0x0000000f, "VIEWPORT_ID_NEEDS_GP" },
- { 0x00000010, "RT_DOUBLE_BIND" },
- { 0x00000011, "RT_TYPES_MISMATCH" },
- { 0x00000012, "RT_LINEAR_WITH_ZETA" },
- { 0x00000015, "FP_TOO_FEW_REGS" },
- { 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH" },
- { 0x00000017, "RT_LINEAR_WITH_MSAA" },
- { 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT" },
- { 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT" },
- { 0x0000001a, "RT_INVALID_ALIGNMENT" },
- { 0x0000001b, "SAMPLER_OVER_LIMIT" },
- { 0x0000001c, "TEXTURE_OVER_LIMIT" },
- { 0x0000001e, "GP_TOO_MANY_OUTPUTS" },
- { 0x0000001f, "RT_BPP128_WITH_MS8" },
- { 0x00000021, "Z_OUT_OF_BOUNDS" },
- { 0x00000023, "XY_OUT_OF_BOUNDS" },
- { 0x00000027, "CP_MORE_PARAMS_THAN_SHARED" },
- { 0x00000028, "CP_NO_REG_SPACE_STRIPED" },
- { 0x00000029, "CP_NO_REG_SPACE_PACKED" },
- { 0x0000002a, "CP_NOT_ENOUGH_WARPS" },
- { 0x0000002b, "CP_BLOCK_SIZE_MISMATCH" },
- { 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS" },
- { 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS" },
- { 0x0000002e, "CP_NO_BLOCKDIM_LATCH" },
- { 0x00000031, "ENG2D_FORMAT_MISMATCH" },
- { 0x0000003f, "PRIMITIVE_ID_NEEDS_GP" },
- { 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT" },
- { 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT" },
- { 0x00000046, "LAYER_ID_NEEDS_GP" },
- { 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT" },
- { 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT" },
+ { 0x00000003, "INVALID_QUERY_OR_TEXTURE", NULL },
+ { 0x00000004, "INVALID_VALUE", NULL },
+ { 0x00000005, "INVALID_ENUM", NULL },
+ { 0x00000008, "INVALID_OBJECT", NULL },
+ { 0x00000009, "READ_ONLY_OBJECT", NULL },
+ { 0x0000000a, "SUPERVISOR_OBJECT", NULL },
+ { 0x0000000b, "INVALID_ADDRESS_ALIGNMENT", NULL },
+ { 0x0000000c, "INVALID_BITFIELD", NULL },
+ { 0x0000000d, "BEGIN_END_ACTIVE", NULL },
+ { 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT", NULL },
+ { 0x0000000f, "VIEWPORT_ID_NEEDS_GP", NULL },
+ { 0x00000010, "RT_DOUBLE_BIND", NULL },
+ { 0x00000011, "RT_TYPES_MISMATCH", NULL },
+ { 0x00000012, "RT_LINEAR_WITH_ZETA", NULL },
+ { 0x00000015, "FP_TOO_FEW_REGS", NULL },
+ { 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH", NULL },
+ { 0x00000017, "RT_LINEAR_WITH_MSAA", NULL },
+ { 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT", NULL },
+ { 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT", NULL },
+ { 0x0000001a, "RT_INVALID_ALIGNMENT", NULL },
+ { 0x0000001b, "SAMPLER_OVER_LIMIT", NULL },
+ { 0x0000001c, "TEXTURE_OVER_LIMIT", NULL },
+ { 0x0000001e, "GP_TOO_MANY_OUTPUTS", NULL },
+ { 0x0000001f, "RT_BPP128_WITH_MS8", NULL },
+ { 0x00000021, "Z_OUT_OF_BOUNDS", NULL },
+ { 0x00000023, "XY_OUT_OF_BOUNDS", NULL },
+ { 0x00000027, "CP_MORE_PARAMS_THAN_SHARED", NULL },
+ { 0x00000028, "CP_NO_REG_SPACE_STRIPED", NULL },
+ { 0x00000029, "CP_NO_REG_SPACE_PACKED", NULL },
+ { 0x0000002a, "CP_NOT_ENOUGH_WARPS", NULL },
+ { 0x0000002b, "CP_BLOCK_SIZE_MISMATCH", NULL },
+ { 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS", NULL },
+ { 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS", NULL },
+ { 0x0000002e, "CP_NO_BLOCKDIM_LATCH", NULL },
+ { 0x00000031, "ENG2D_FORMAT_MISMATCH", NULL },
+ { 0x0000003f, "PRIMITIVE_ID_NEEDS_GP", NULL },
+ { 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT", NULL },
+ { 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT", NULL },
+ { 0x00000046, "LAYER_ID_NEEDS_GP", NULL },
+ { 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT", NULL },
+ { 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT", NULL },
{}
};
@@ -678,7 +702,6 @@ nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
tps++;
switch (type) {
case 6: /* texture error... unknown for now */
- nv50_fb_vm_trap(dev, display, name);
if (display) {
NV_ERROR(dev, "magic set %d:\n", i);
for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
@@ -701,7 +724,6 @@ nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
- nv50_fb_vm_trap(dev, display, name);
/* 2d engine destination */
if (ustatus & 0x00000010) {
if (display) {
@@ -912,10 +934,10 @@ nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid
printk("\n");
NV_INFO(dev, "PGRAPH - TRAP_CCACHE %08x %08x %08x %08x"
" %08x %08x %08x\n",
- nv_rd32(dev, 0x405800), nv_rd32(dev, 0x405804),
- nv_rd32(dev, 0x405808), nv_rd32(dev, 0x40580c),
- nv_rd32(dev, 0x405810), nv_rd32(dev, 0x405814),
- nv_rd32(dev, 0x40581c));
+ nv_rd32(dev, 0x405000), nv_rd32(dev, 0x405004),
+ nv_rd32(dev, 0x405008), nv_rd32(dev, 0x40500c),
+ nv_rd32(dev, 0x405010), nv_rd32(dev, 0x405014),
+ nv_rd32(dev, 0x40501c));
}
@@ -1044,6 +1066,7 @@ nv50_graph_isr(struct drm_device *dev)
NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) subc %d "
"class 0x%04x mthd 0x%04x data 0x%08x\n",
chid, inst, subc, class, mthd, data);
+ nv50_fb_vm_trap(dev, 1);
}
}
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index ea0041810ae3..4f95a1e5822e 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -56,7 +56,7 @@ nv50_channel_del(struct nouveau_channel **pchan)
nouveau_gpuobj_ref(NULL, &chan->ramfc);
nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
nouveau_gpuobj_ref(NULL, &chan->vm_pd);
- if (chan->ramin_heap.free_stack.next)
+ if (drm_mm_initialized(&chan->ramin_heap))
drm_mm_takedown(&chan->ramin_heap);
nouveau_gpuobj_ref(NULL, &chan->ramin);
kfree(chan);
@@ -259,7 +259,7 @@ nv50_instmem_takedown(struct drm_device *dev)
nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
- if (dev_priv->ramin_heap.free_stack.next)
+ if (drm_mm_initialized(&dev_priv->ramin_heap))
drm_mm_takedown(&dev_priv->ramin_heap);
dev_priv->engine.instmem.priv = NULL;
@@ -300,7 +300,7 @@ nv50_instmem_resume(struct drm_device *dev)
}
struct nv50_gpuobj_node {
- struct nouveau_vram *vram;
+ struct nouveau_mem *vram;
struct nouveau_vma chan_vma;
u32 align;
};
@@ -403,16 +403,26 @@ nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj)
void
nv50_instmem_flush(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->vm_lock, flags);
nv_wr32(dev, 0x00330c, 0x00000001);
if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000))
NV_ERROR(dev, "PRAMIN flush timeout\n");
+ spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
}
void
nv84_instmem_flush(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->vm_lock, flags);
nv_wr32(dev, 0x070000, 0x00000001);
if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
NV_ERROR(dev, "PRAMIN flush timeout\n");
+ spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index b4a5ecb199f9..c25c59386420 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -41,8 +41,7 @@ nv50_sor_disconnect(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_device *dev = encoder->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
int ret;
if (!nv_encoder->crtc)
@@ -184,8 +183,7 @@ static void
nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_channel *evo = nv50_display(encoder->dev)->master;
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_device *dev = encoder->dev;
struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
index 459ff08241e5..6c2694490741 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -31,7 +31,6 @@ void
nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
struct nouveau_gpuobj *pgt[2])
{
- struct drm_nouveau_private *dev_priv = pgd->dev->dev_private;
u64 phys = 0xdeadcafe00000000ULL;
u32 coverage = 0;
@@ -58,10 +57,9 @@ nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
}
static inline u64
-nv50_vm_addr(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
- u64 phys, u32 memtype, u32 target)
+nv50_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
{
- struct drm_nouveau_private *dev_priv = pgt->dev->dev_private;
+ struct drm_nouveau_private *dev_priv = vma->vm->dev->dev_private;
phys |= 1; /* present */
phys |= (u64)memtype << 40;
@@ -85,12 +83,13 @@ nv50_vm_addr(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
void
nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
- struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys)
+ struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
{
+ u32 comp = (mem->memtype & 0x180) >> 7;
u32 block;
int i;
- phys = nv50_vm_addr(vma, pgt, phys, mem->memtype, 0);
+ phys = nv50_vm_addr(vma, phys, mem->memtype, 0);
pte <<= 3;
cnt <<= 3;
@@ -107,6 +106,11 @@ nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
phys += block << (vma->node->type - 3);
cnt -= block;
+ if (comp) {
+ u32 tag = mem->tag->start + ((delta >> 16) * comp);
+ offset_h |= (tag << 17);
+ delta += block << (vma->node->type - 3);
+ }
while (block) {
nv_wo32(pgt, pte + 0, offset_l);
@@ -119,11 +123,11 @@ nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
void
nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
- u32 pte, dma_addr_t *list, u32 cnt)
+ struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{
pte <<= 3;
while (cnt--) {
- u64 phys = nv50_vm_addr(vma, pgt, (u64)*list++, 0, 2);
+ u64 phys = nv50_vm_addr(vma, (u64)*list++, mem->memtype, 2);
nv_wo32(pgt, pte + 0, lower_32_bits(phys));
nv_wo32(pgt, pte + 4, upper_32_bits(phys));
pte += 8;
@@ -169,7 +173,12 @@ nv50_vm_flush(struct nouveau_vm *vm)
void
nv50_vm_flush_engine(struct drm_device *dev, int engine)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->vm_lock, flags);
nv_wr32(dev, 0x100c80, (engine << 16) | 1);
if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
+ spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c
index 58e98ad36347..ffbc3d8cf5be 100644
--- a/drivers/gpu/drm/nouveau/nv50_vram.c
+++ b/drivers/gpu/drm/nouveau/nv50_vram.c
@@ -48,42 +48,49 @@ nv50_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
}
void
-nv50_vram_del(struct drm_device *dev, struct nouveau_vram **pvram)
+nv50_vram_del(struct drm_device *dev, struct nouveau_mem **pmem)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
struct nouveau_mm *mm = man->priv;
struct nouveau_mm_node *this;
- struct nouveau_vram *vram;
+ struct nouveau_mem *mem;
- vram = *pvram;
- *pvram = NULL;
- if (unlikely(vram == NULL))
+ mem = *pmem;
+ *pmem = NULL;
+ if (unlikely(mem == NULL))
return;
mutex_lock(&mm->mutex);
- while (!list_empty(&vram->regions)) {
- this = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
+ while (!list_empty(&mem->regions)) {
+ this = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
list_del(&this->rl_entry);
nouveau_mm_put(mm, this);
}
+
+ if (mem->tag) {
+ drm_mm_put_block(mem->tag);
+ mem->tag = NULL;
+ }
mutex_unlock(&mm->mutex);
- kfree(vram);
+ kfree(mem);
}
int
nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
- u32 type, struct nouveau_vram **pvram)
+ u32 memtype, struct nouveau_mem **pmem)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
struct nouveau_mm *mm = man->priv;
struct nouveau_mm_node *r;
- struct nouveau_vram *vram;
+ struct nouveau_mem *mem;
+ int comp = (memtype & 0x300) >> 8;
+ int type = (memtype & 0x07f);
int ret;
if (!types[type])
@@ -92,32 +99,46 @@ nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
align >>= 12;
size_nc >>= 12;
- vram = kzalloc(sizeof(*vram), GFP_KERNEL);
- if (!vram)
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+ if (!mem)
return -ENOMEM;
- INIT_LIST_HEAD(&vram->regions);
- vram->dev = dev_priv->dev;
- vram->memtype = type;
- vram->size = size;
-
mutex_lock(&mm->mutex);
+ if (comp) {
+ if (align == 16) {
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ int n = (size >> 4) * comp;
+
+ mem->tag = drm_mm_search_free(&pfb->tag_heap, n, 0, 0);
+ if (mem->tag)
+ mem->tag = drm_mm_get_block(mem->tag, n, 0);
+ }
+
+ if (unlikely(!mem->tag))
+ comp = 0;
+ }
+
+ INIT_LIST_HEAD(&mem->regions);
+ mem->dev = dev_priv->dev;
+ mem->memtype = (comp << 7) | type;
+ mem->size = size;
+
do {
ret = nouveau_mm_get(mm, types[type], size, size_nc, align, &r);
if (ret) {
mutex_unlock(&mm->mutex);
- nv50_vram_del(dev, &vram);
+ nv50_vram_del(dev, &mem);
return ret;
}
- list_add_tail(&r->rl_entry, &vram->regions);
+ list_add_tail(&r->rl_entry, &mem->regions);
size -= r->length;
} while (size);
mutex_unlock(&mm->mutex);
- r = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
- vram->offset = (u64)r->offset << 12;
- *pvram = vram;
+ r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
+ mem->offset = (u64)r->offset << 12;
+ *pmem = mem;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv84_crypt.c b/drivers/gpu/drm/nouveau/nv84_crypt.c
index ec18ae1c3886..fabc7fd30b1d 100644
--- a/drivers/gpu/drm/nouveau/nv84_crypt.c
+++ b/drivers/gpu/drm/nouveau/nv84_crypt.c
@@ -136,5 +136,5 @@ nv84_crypt_isr(struct drm_device *dev)
nv_wr32(dev, 0x102130, stat);
nv_wr32(dev, 0x10200c, 0x10);
- nv50_fb_vm_trap(dev, show, "PCRYPT");
+ nv50_fb_vm_trap(dev, show);
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c
index e6f92c541dba..2886f2726a9e 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c
@@ -116,7 +116,7 @@ nvc0_fifo_create_context(struct nouveau_channel *chan)
/* allocate vram for control regs, map into polling area */
ret = nouveau_bo_new(dev, NULL, 0x1000, 0, TTM_PL_FLAG_VRAM,
- 0, 0, true, true, &fifoch->user);
+ 0, 0, &fifoch->user);
if (ret)
goto error;
@@ -418,6 +418,12 @@ nvc0_fifo_isr(struct drm_device *dev)
{
u32 stat = nv_rd32(dev, 0x002100);
+ if (stat & 0x00000100) {
+ NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");
+ nv_wr32(dev, 0x002100, 0x00000100);
+ stat &= ~0x00000100;
+ }
+
if (stat & 0x10000000) {
u32 units = nv_rd32(dev, 0x00259c);
u32 u = units;
@@ -446,10 +452,15 @@ nvc0_fifo_isr(struct drm_device *dev)
stat &= ~0x20000000;
}
+ if (stat & 0x40000000) {
+ NV_INFO(dev, "PFIFO: unknown status 0x40000000\n");
+ nv_mask(dev, 0x002a00, 0x00000000, 0x00000000);
+ stat &= ~0x40000000;
+ }
+
if (stat) {
NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
nv_wr32(dev, 0x002100, stat);
+ nv_wr32(dev, 0x002140, 0);
}
-
- nv_wr32(dev, 0x2140, 0);
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index eb18a7e89f5b..3de9b721d8db 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -299,6 +299,14 @@ nvc0_graph_takedown(struct drm_device *dev)
}
static int
+nvc0_graph_mthd_page_flip(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
+{
+ nouveau_finish_page_flip(chan, NULL);
+ return 0;
+}
+
+static int
nvc0_graph_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -395,6 +403,7 @@ nvc0_graph_create(struct drm_device *dev)
nouveau_irq_register(dev, 25, nvc0_runk140_isr);
NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */
+ NVOBJ_MTHD (dev, 0x9039, 0x0500, nvc0_graph_mthd_page_flip);
NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */
NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */
return 0;
@@ -640,7 +649,6 @@ nvc0_graph_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
- struct nvc0_graph_priv *priv;
int ret;
dev_priv->engine.graph.accel_blocked = true;
@@ -665,7 +673,6 @@ nvc0_graph_init(struct drm_device *dev)
if (ret)
return ret;
}
- priv = pgraph->priv;
nvc0_graph_init_obj418880(dev);
nvc0_graph_init_regs(dev);
@@ -730,9 +737,12 @@ nvc0_graph_isr(struct drm_device *dev)
u32 class = nv_rd32(dev, 0x404200 + (subc * 4));
if (stat & 0x00000010) {
- NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] subc %d "
- "class 0x%04x mthd 0x%04x data 0x%08x\n",
- chid, inst, subc, class, mthd, data);
+ if (nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) {
+ NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] "
+ "subc %d class 0x%04x mthd 0x%04x "
+ "data 0x%08x\n",
+ chid, inst, subc, class, mthd, data);
+ }
nv_wr32(dev, 0x400100, 0x00000010);
stat &= ~0x00000010;
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c
index c09091749054..82357d2df1f4 100644
--- a/drivers/gpu/drm/nouveau/nvc0_instmem.c
+++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c
@@ -67,7 +67,7 @@ nvc0_channel_del(struct nouveau_channel **pchan)
return;
nouveau_vm_ref(NULL, &chan->vm, NULL);
- if (chan->ramin_heap.free_stack.next)
+ if (drm_mm_initialized(&chan->ramin_heap))
drm_mm_takedown(&chan->ramin_heap);
nouveau_gpuobj_ref(NULL, &chan->ramin);
kfree(chan);
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c
index e4e83c2caf5b..a179e6c55afb 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vm.c
@@ -59,7 +59,7 @@ nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
void
nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
- struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys)
+ struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
{
u32 next = 1 << (vma->node->type - 8);
@@ -75,11 +75,11 @@ nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
void
nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
- u32 pte, dma_addr_t *list, u32 cnt)
+ struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{
pte <<= 3;
while (cnt--) {
- u64 phys = nvc0_vm_addr(vma, *list++, 0, 5);
+ u64 phys = nvc0_vm_addr(vma, *list++, mem->memtype, 5);
nv_wo32(pgt, pte + 0, lower_32_bits(phys));
nv_wo32(pgt, pte + 4, upper_32_bits(phys));
pte += 8;
@@ -104,20 +104,27 @@ nvc0_vm_flush(struct nouveau_vm *vm)
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
struct drm_device *dev = vm->dev;
struct nouveau_vm_pgd *vpgd;
- u32 r100c80, engine;
+ unsigned long flags;
+ u32 engine = (dev_priv->chan_vm == vm) ? 1 : 5;
pinstmem->flush(vm->dev);
- if (vm == dev_priv->chan_vm)
- engine = 1;
- else
- engine = 5;
-
+ spin_lock_irqsave(&dev_priv->vm_lock, flags);
list_for_each_entry(vpgd, &vm->pgd_list, head) {
- r100c80 = nv_rd32(dev, 0x100c80);
+ /* looks like maybe a "free flush slots" counter, the
+ * faster you write to 0x100cbc to more it decreases
+ */
+ if (!nv_wait_ne(dev, 0x100c80, 0x00ff0000, 0x00000000)) {
+ NV_ERROR(dev, "vm timeout 0: 0x%08x %d\n",
+ nv_rd32(dev, 0x100c80), engine);
+ }
nv_wr32(dev, 0x100cb8, vpgd->obj->vinst >> 8);
nv_wr32(dev, 0x100cbc, 0x80000000 | engine);
- if (!nv_wait(dev, 0x100c80, 0xffffffff, r100c80))
- NV_ERROR(dev, "vm flush timeout eng %d\n", engine);
+ /* wait for flush to be queued? */
+ if (!nv_wait(dev, 0x100c80, 0x00008000, 0x00008000)) {
+ NV_ERROR(dev, "vm timeout 1: 0x%08x %d\n",
+ nv_rd32(dev, 0x100c80), engine);
+ }
}
+ spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c
index 858eda5dedd1..67c6ec6f34ea 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vram.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vram.c
@@ -26,64 +26,78 @@
#include "nouveau_drv.h"
#include "nouveau_mm.h"
+/* 0 = unsupported
+ * 1 = non-compressed
+ * 3 = compressed
+ */
+static const u8 types[256] = {
+ 1, 1, 3, 3, 3, 3, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0,
+ 0, 1, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3,
+ 3, 3, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 1, 1, 1, 1, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
+ 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3,
+ 3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3,
+ 3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0
+};
+
bool
nvc0_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
{
- switch (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) {
- case 0x0000:
- case 0xfe00:
- case 0xdb00:
- case 0x1100:
- return true;
- default:
- break;
- }
-
- return false;
+ u8 memtype = (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) >> 8;
+ return likely((types[memtype] == 1));
}
int
nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin,
- u32 type, struct nouveau_vram **pvram)
+ u32 type, struct nouveau_mem **pmem)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
struct nouveau_mm *mm = man->priv;
struct nouveau_mm_node *r;
- struct nouveau_vram *vram;
+ struct nouveau_mem *mem;
int ret;
size >>= 12;
align >>= 12;
ncmin >>= 12;
- vram = kzalloc(sizeof(*vram), GFP_KERNEL);
- if (!vram)
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+ if (!mem)
return -ENOMEM;
- INIT_LIST_HEAD(&vram->regions);
- vram->dev = dev_priv->dev;
- vram->memtype = type;
- vram->size = size;
+ INIT_LIST_HEAD(&mem->regions);
+ mem->dev = dev_priv->dev;
+ mem->memtype = (type & 0xff);
+ mem->size = size;
mutex_lock(&mm->mutex);
do {
ret = nouveau_mm_get(mm, 1, size, ncmin, align, &r);
if (ret) {
mutex_unlock(&mm->mutex);
- nv50_vram_del(dev, &vram);
+ nv50_vram_del(dev, &mem);
return ret;
}
- list_add_tail(&r->rl_entry, &vram->regions);
+ list_add_tail(&r->rl_entry, &mem->regions);
size -= r->length;
} while (size);
mutex_unlock(&mm->mutex);
- r = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
- vram->offset = (u64)r->offset << 12;
- *pvram = vram;
+ r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
+ mem->offset = (u64)r->offset << 12;
+ *pmem = mem;
return 0;
}
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index 18c3c71e41b1..b9e8efd2b754 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -71,10 +71,7 @@ static struct drm_driver driver = {
#endif
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -89,16 +86,21 @@ int r128_driver_load(struct drm_device *dev, unsigned long flags)
return drm_vblank_init(dev, 1);
}
+static struct pci_driver r128_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+};
+
static int __init r128_init(void)
{
driver.num_ioctls = r128_max_ioctl;
- return drm_init(&driver);
+ return drm_pci_init(&driver, &r128_pci_driver);
}
static void __exit r128_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &r128_pci_driver);
}
module_init(r128_init);
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index 1c02d23f6fcc..9746fee59f56 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -1,6 +1,7 @@
config DRM_RADEON_KMS
bool "Enable modesetting on radeon by default - NEW DRIVER"
depends on DRM_RADEON
+ select BACKLIGHT_CLASS_DEVICE
help
Choose this option if you want kernel modesetting enabled by default.
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index e47eecfc2df4..3896ef811102 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -36,6 +36,9 @@ $(obj)/r600_reg_safe.h: $(src)/reg_srcs/r600 $(obj)/mkregtable
$(obj)/evergreen_reg_safe.h: $(src)/reg_srcs/evergreen $(obj)/mkregtable
$(call if_changed,mkregtable)
+$(obj)/cayman_reg_safe.h: $(src)/reg_srcs/cayman $(obj)/mkregtable
+ $(call if_changed,mkregtable)
+
$(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h
$(obj)/r200.o: $(obj)/r200_reg_safe.h
@@ -50,7 +53,7 @@ $(obj)/rs600.o: $(obj)/rs600_reg_safe.h
$(obj)/r600_cs.o: $(obj)/r600_reg_safe.h
-$(obj)/evergreen_cs.o: $(obj)/evergreen_reg_safe.h
+$(obj)/evergreen_cs.o: $(obj)/evergreen_reg_safe.h $(obj)/cayman_reg_safe.h
radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
radeon_irq.o r300_cmdbuf.o r600_cp.o
@@ -66,7 +69,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
- radeon_trace_points.o ni.o
+ radeon_trace_points.o ni.o cayman_blit_shaders.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 258fa5e7a2d9..7bd745689097 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -32,6 +32,7 @@
#include "atom.h"
#include "atom-names.h"
#include "atom-bits.h"
+#include "radeon.h"
#define ATOM_COND_ABOVE 0
#define ATOM_COND_ABOVEOREQUAL 1
@@ -101,7 +102,9 @@ static void debug_print_spaces(int n)
static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
uint32_t index, uint32_t data)
{
+ struct radeon_device *rdev = ctx->card->dev->dev_private;
uint32_t temp = 0xCDCDCDCD;
+
while (1)
switch (CU8(base)) {
case ATOM_IIO_NOP:
@@ -112,7 +115,8 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
base += 3;
break;
case ATOM_IIO_WRITE:
- (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1));
+ if (rdev->family == CHIP_RV515)
+ (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1));
ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
base += 3;
break;
@@ -131,7 +135,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
case ATOM_IIO_MOVE_INDEX:
temp &=
~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
- CU8(base + 2));
+ CU8(base + 3));
temp |=
((index >> CU8(base + 2)) &
(0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
@@ -141,7 +145,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
case ATOM_IIO_MOVE_DATA:
temp &=
~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
- CU8(base + 2));
+ CU8(base + 3));
temp |=
((data >> CU8(base + 2)) &
(0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
@@ -151,7 +155,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
case ATOM_IIO_MOVE_ATTR:
temp &=
~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
- CU8(base + 2));
+ CU8(base + 3));
temp |=
((ctx->
io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 04b269d14a59..7fd88497b930 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -738,13 +738,13 @@ typedef struct _ATOM_DIG_ENCODER_CONFIG_V3
{
#if ATOM_BIG_ENDIAN
UCHAR ucReserved1:1;
- UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F)
+ UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F)
UCHAR ucReserved:3;
UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
#else
UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
UCHAR ucReserved:3;
- UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F)
+ UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F)
UCHAR ucReserved1:1;
#endif
}ATOM_DIG_ENCODER_CONFIG_V3;
@@ -785,13 +785,13 @@ typedef struct _ATOM_DIG_ENCODER_CONFIG_V4
{
#if ATOM_BIG_ENDIAN
UCHAR ucReserved1:1;
- UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F)
+ UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F)
UCHAR ucReserved:2;
UCHAR ucDPLinkRate:2; // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz <= Changed comparing to previous version
#else
UCHAR ucDPLinkRate:2; // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz <= Changed comparing to previous version
UCHAR ucReserved:2;
- UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F)
+ UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F)
UCHAR ucReserved1:1;
#endif
}ATOM_DIG_ENCODER_CONFIG_V4;
@@ -2126,7 +2126,7 @@ typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO
// Structures used in FirmwareInfoTable
/****************************************************************************/
-// usBIOSCapability Defintion:
+// usBIOSCapability Definition:
// Bit 0 = 0: Bios image is not Posted, =1:Bios image is Posted;
// Bit 1 = 0: Dual CRTC is not supported, =1: Dual CRTC is supported;
// Bit 2 = 0: Extended Desktop is not supported, =1: Extended Desktop is supported;
@@ -3341,7 +3341,7 @@ typedef struct _ATOM_SPREAD_SPECTRUM_INFO
/****************************************************************************/
// Structure used in AnalogTV_InfoTable (Top level)
/****************************************************************************/
-//ucTVBootUpDefaultStd definiton:
+//ucTVBootUpDefaultStd definition:
//ATOM_TV_NTSC 1
//ATOM_TV_NTSCJ 2
@@ -3816,7 +3816,7 @@ typedef struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO
UCHAR Reserved [6]; // for potential expansion
}ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO;
-//Related definitions, all records are differnt but they have a commond header
+//Related definitions, all records are different but they have a commond header
typedef struct _ATOM_COMMON_RECORD_HEADER
{
UCHAR ucRecordType; //An emun to indicate the record type
@@ -4365,14 +4365,14 @@ ucUMAChannelNumber: System memory channel numbers.
ulCSR_M3_ARB_CNTL_DEFAULT[10]: Arrays with values for CSR M3 arbiter for default
ulCSR_M3_ARB_CNTL_UVD[10]: Arrays with values for CSR M3 arbiter for UVD playback.
ulCSR_M3_ARB_CNTL_FS3D[10]: Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
-sAvail_SCLK[5]: Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high
+sAvail_SCLK[5]: Arrays to provide available list of SLCK and corresponding voltage, order from low to high
ulGMCRestoreResetTime: GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns.
ulMinimumNClk: Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz.
ulIdleNClk: NCLK speed while memory runs in self-refresh state. Unit in 10kHz.
ulDDR_DLL_PowerUpTime: DDR PHY DLL power up time. Unit in ns.
ulDDR_PLL_PowerUpTime: DDR PHY PLL power up time. Unit in ns.
-usPCIEClkSSPercentage: PCIE Clock Spred Spectrum Percentage in unit 0.01%; 100 mean 1%.
-usPCIEClkSSType: PCIE Clock Spred Spectrum Type. 0 for Down spread(default); 1 for Center spread.
+usPCIEClkSSPercentage: PCIE Clock Spread Spectrum Percentage in unit 0.01%; 100 mean 1%.
+usPCIEClkSSType: PCIE Clock Spread Spectrum Type. 0 for Down spread(default); 1 for Center spread.
usLvdsSSPercentage: LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting.
usLvdsSSpreadRateIn10Hz: LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
usHDMISSPercentage: HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
@@ -4555,7 +4555,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3
#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LIT2AC 4
-//Byte aligned defintion for BIOS usage
+//Byte aligned definition for BIOS usage
#define ATOM_S0_CRT1_MONOb0 0x01
#define ATOM_S0_CRT1_COLORb0 0x02
#define ATOM_S0_CRT1_MASKb0 (ATOM_S0_CRT1_MONOb0+ATOM_S0_CRT1_COLORb0)
@@ -4621,7 +4621,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
#define ATOM_S2_DISPLAY_ROTATION_ANGLE_MASK 0xC0000000L
-//Byte aligned defintion for BIOS usage
+//Byte aligned definition for BIOS usage
#define ATOM_S2_TV1_STANDARD_MASKb0 0x0F
#define ATOM_S2_CURRENT_BL_LEVEL_MASKb1 0xFF
#define ATOM_S2_DEVICE_DPMS_STATEb2 0x01
@@ -4671,7 +4671,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
#define ATOM_S3_ALLOW_FAST_PWR_SWITCH 0x40000000L
#define ATOM_S3_RQST_GPU_USE_MIN_PWR 0x80000000L
-//Byte aligned defintion for BIOS usage
+//Byte aligned definition for BIOS usage
#define ATOM_S3_CRT1_ACTIVEb0 0x01
#define ATOM_S3_LCD1_ACTIVEb0 0x02
#define ATOM_S3_TV1_ACTIVEb0 0x04
@@ -4707,7 +4707,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
#define ATOM_S4_LCD1_REFRESH_MASK 0x0000FF00L
#define ATOM_S4_LCD1_REFRESH_SHIFT 8
-//Byte aligned defintion for BIOS usage
+//Byte aligned definition for BIOS usage
#define ATOM_S4_LCD1_PANEL_ID_MASKb0 0x0FF
#define ATOM_S4_LCD1_REFRESH_MASKb1 ATOM_S4_LCD1_PANEL_ID_MASKb0
#define ATOM_S4_VRAM_INFO_MASKb2 ATOM_S4_LCD1_PANEL_ID_MASKb0
@@ -4786,7 +4786,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
#define ATOM_S6_VRI_BRIGHTNESS_CHANGE 0x40000000L
#define ATOM_S6_CONFIG_DISPLAY_CHANGE_MASK 0x80000000L
-//Byte aligned defintion for BIOS usage
+//Byte aligned definition for BIOS usage
#define ATOM_S6_DEVICE_CHANGEb0 0x01
#define ATOM_S6_SCALER_CHANGEb0 0x02
#define ATOM_S6_LID_CHANGEb0 0x04
@@ -5027,7 +5027,7 @@ typedef struct _ENABLE_GRAPH_SURFACE_PS_ALLOCATION
typedef struct _MEMORY_CLEAN_UP_PARAMETERS
{
- USHORT usMemoryStart; //in 8Kb boundry, offset from memory base address
+ USHORT usMemoryStart; //in 8Kb boundary, offset from memory base address
USHORT usMemorySize; //8Kb blocks aligned
}MEMORY_CLEAN_UP_PARAMETERS;
#define MEMORY_CLEAN_UP_PS_ALLOCATION MEMORY_CLEAN_UP_PARAMETERS
@@ -6855,7 +6855,7 @@ typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
/**************************************************************************/
-// Following definitions are for compatiblity issue in different SW components.
+// Following definitions are for compatibility issue in different SW components.
#define ATOM_MASTER_DATA_TABLE_REVISION 0x01
#define Object_Info Object_Header
#define AdjustARB_SEQ MC_InitParameter
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index a4e5e53e0a62..529a3a704731 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -61,8 +61,8 @@ static void atombios_overscan_setup(struct drm_crtc *crtc,
args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
} else if (a2 > a1) {
- args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
- args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
+ args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
+ args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
}
break;
case RMX_FULL:
@@ -531,6 +531,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
else
pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+
+ if (rdev->family < CHIP_RV770)
+ pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
} else {
pll->flags |= RADEON_PLL_LEGACY;
@@ -559,7 +562,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (ss_enabled) {
if (ss->refdiv) {
- pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
pll->flags |= RADEON_PLL_USE_REF_DIV;
pll->reference_div = ss->refdiv;
if (ASIC_IS_AVIVO(rdev))
@@ -957,7 +959,11 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
/* adjust pixel clock as needed */
adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss);
- if (ASIC_IS_AVIVO(rdev))
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+ /* TV seems to prefer the legacy algo on some boards */
+ radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
+ &ref_div, &post_div);
+ else if (ASIC_IS_AVIVO(rdev))
radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
&ref_div, &post_div);
else
@@ -1005,6 +1011,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
uint64_t fb_location;
uint32_t fb_format, fb_pitch_pixels, tiling_flags;
u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
+ u32 tmp;
int r;
/* no fb bound */
@@ -1026,7 +1033,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
* just update base pointers
*/
obj = radeon_fb->obj;
- rbo = obj->driver_private;
+ rbo = gem_to_radeon_bo(obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
@@ -1133,9 +1140,18 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
(crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
+ /* pageflip setup */
+ /* make sure flip is at vb rather than hb */
+ tmp = RREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset);
+ tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN;
+ WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
+
+ /* set pageflip to happen anywhere in vblank interval */
+ WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
+
if (!atomic && fb && fb != crtc->fb) {
radeon_fb = to_radeon_framebuffer(fb);
- rbo = radeon_fb->obj->driver_private;
+ rbo = gem_to_radeon_bo(radeon_fb->obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
@@ -1163,6 +1179,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
uint64_t fb_location;
uint32_t fb_format, fb_pitch_pixels, tiling_flags;
u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE;
+ u32 tmp;
int r;
/* no fb bound */
@@ -1181,7 +1198,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
}
obj = radeon_fb->obj;
- rbo = obj->driver_private;
+ rbo = gem_to_radeon_bo(obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
@@ -1290,9 +1307,18 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
(crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
+ /* pageflip setup */
+ /* make sure flip is at vb rather than hb */
+ tmp = RREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset);
+ tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN;
+ WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
+
+ /* set pageflip to happen anywhere in vblank interval */
+ WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
+
if (!atomic && fb && fb != crtc->fb) {
radeon_fb = to_radeon_framebuffer(fb);
- rbo = radeon_fb->obj->driver_private;
+ rbo = gem_to_radeon_bo(radeon_fb->obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
diff --git a/drivers/gpu/drm/radeon/cayman_blit_shaders.c b/drivers/gpu/drm/radeon/cayman_blit_shaders.c
new file mode 100644
index 000000000000..e148ab04b80b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/cayman_blit_shaders.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Alex Deucher <alexander.deucher@amd.com>
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+/*
+ * evergreen cards need to use the 3D engine to blit data which requires
+ * quite a bit of hw state setup. Rather than pull the whole 3D driver
+ * (which normally generates the 3D state) into the DRM, we opt to use
+ * statically generated state tables. The regsiter state and shaders
+ * were hand generated to support blitting functionality. See the 3D
+ * driver or documentation for descriptions of the registers and
+ * shader instructions.
+ */
+
+const u32 cayman_default_state[] =
+{
+ /* XXX fill in additional blit state */
+
+ 0xc0026900,
+ 0x00000316,
+ 0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+ 0x00000010, /* */
+
+ 0xc0026900,
+ 0x000000d9,
+ 0x00000000, /* CP_RINGID */
+ 0x00000000, /* CP_VMID */
+};
+
+const u32 cayman_default_size = ARRAY_SIZE(cayman_default_state);
diff --git a/drivers/gpu/drm/radeon/cayman_blit_shaders.h b/drivers/gpu/drm/radeon/cayman_blit_shaders.h
new file mode 100644
index 000000000000..33b75e5d0fa4
--- /dev/null
+++ b/drivers/gpu/drm/radeon/cayman_blit_shaders.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef CAYMAN_BLIT_SHADERS_H
+#define CAYMAN_BLIT_SHADERS_H
+
+extern const u32 cayman_default_state[];
+
+extern const u32 cayman_default_size;
+
+#endif
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index d270b3ff896b..c20eac3379e6 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -43,17 +43,6 @@ static void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
{
- struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
- u32 tmp;
-
- /* make sure flip is at vb rather than hb */
- tmp = RREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset);
- tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN;
- WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
-
- /* set pageflip to happen anywhere in vblank interval */
- WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
-
/* enable the pflip int */
radeon_irq_kms_pflip_irq_get(rdev, crtc);
}
@@ -131,11 +120,16 @@ void evergreen_pm_misc(struct radeon_device *rdev)
struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
- if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
- if (voltage->voltage != rdev->pm.current_vddc) {
- radeon_atom_set_voltage(rdev, voltage->voltage);
+ if (voltage->type == VOLTAGE_SW) {
+ if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) {
+ radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
rdev->pm.current_vddc = voltage->voltage;
- DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
+ DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
+ }
+ if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) {
+ radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI);
+ rdev->pm.current_vddci = voltage->vddci;
+ DRM_DEBUG("Setting: vddci: %d\n", voltage->vddci);
}
}
}
@@ -359,7 +353,7 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
struct drm_display_mode *mode,
struct drm_display_mode *other_mode)
{
- u32 tmp = 0;
+ u32 tmp;
/*
* Line Buffer Setup
* There are 3 line buffers, each one shared by 2 display controllers.
@@ -369,64 +363,63 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
* first display controller
* 0 - first half of lb (3840 * 2)
* 1 - first 3/4 of lb (5760 * 2)
- * 2 - whole lb (7680 * 2)
+ * 2 - whole lb (7680 * 2), other crtc must be disabled
* 3 - first 1/4 of lb (1920 * 2)
* second display controller
* 4 - second half of lb (3840 * 2)
* 5 - second 3/4 of lb (5760 * 2)
- * 6 - whole lb (7680 * 2)
+ * 6 - whole lb (7680 * 2), other crtc must be disabled
* 7 - last 1/4 of lb (1920 * 2)
*/
- if (mode && other_mode) {
- if (mode->hdisplay > other_mode->hdisplay) {
- if (mode->hdisplay > 2560)
- tmp = 1; /* 3/4 */
- else
- tmp = 0; /* 1/2 */
- } else if (other_mode->hdisplay > mode->hdisplay) {
- if (other_mode->hdisplay > 2560)
- tmp = 3; /* 1/4 */
- else
- tmp = 0; /* 1/2 */
- } else
+ /* this can get tricky if we have two large displays on a paired group
+ * of crtcs. Ideally for multiple large displays we'd assign them to
+ * non-linked crtcs for maximum line buffer allocation.
+ */
+ if (radeon_crtc->base.enabled && mode) {
+ if (other_mode)
tmp = 0; /* 1/2 */
- } else if (mode)
- tmp = 2; /* whole */
- else if (other_mode)
- tmp = 3; /* 1/4 */
+ else
+ tmp = 2; /* whole */
+ } else
+ tmp = 0;
/* second controller of the pair uses second half of the lb */
if (radeon_crtc->crtc_id % 2)
tmp += 4;
WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
- switch (tmp) {
- case 0:
- case 4:
- default:
- if (ASIC_IS_DCE5(rdev))
- return 4096 * 2;
- else
- return 3840 * 2;
- case 1:
- case 5:
- if (ASIC_IS_DCE5(rdev))
- return 6144 * 2;
- else
- return 5760 * 2;
- case 2:
- case 6:
- if (ASIC_IS_DCE5(rdev))
- return 8192 * 2;
- else
- return 7680 * 2;
- case 3:
- case 7:
- if (ASIC_IS_DCE5(rdev))
- return 2048 * 2;
- else
- return 1920 * 2;
+ if (radeon_crtc->base.enabled && mode) {
+ switch (tmp) {
+ case 0:
+ case 4:
+ default:
+ if (ASIC_IS_DCE5(rdev))
+ return 4096 * 2;
+ else
+ return 3840 * 2;
+ case 1:
+ case 5:
+ if (ASIC_IS_DCE5(rdev))
+ return 6144 * 2;
+ else
+ return 5760 * 2;
+ case 2:
+ case 6:
+ if (ASIC_IS_DCE5(rdev))
+ return 8192 * 2;
+ else
+ return 7680 * 2;
+ case 3:
+ case 7:
+ if (ASIC_IS_DCE5(rdev))
+ return 2048 * 2;
+ else
+ return 1920 * 2;
+ }
}
+
+ /* controller not enabled, so no lb used */
+ return 0;
}
static u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
@@ -804,7 +797,7 @@ void evergreen_bandwidth_update(struct radeon_device *rdev)
}
}
-static int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
+int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
{
unsigned i;
u32 tmp;
@@ -869,9 +862,15 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
SYSTEM_ACCESS_MODE_NOT_IN_SYS |
SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
- WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
- WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
- WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+ if (rdev->flags & RADEON_IS_IGP) {
+ WREG32(FUS_MC_VM_MD_L1_TLB0_CNTL, tmp);
+ WREG32(FUS_MC_VM_MD_L1_TLB1_CNTL, tmp);
+ WREG32(FUS_MC_VM_MD_L1_TLB2_CNTL, tmp);
+ } else {
+ WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+ }
WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
@@ -957,7 +956,7 @@ void evergreen_agp_enable(struct radeon_device *rdev)
WREG32(VM_CONTEXT1_CNTL, 0);
}
-static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
+void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
{
save->vga_control[0] = RREG32(D1VGA_CONTROL);
save->vga_control[1] = RREG32(D2VGA_CONTROL);
@@ -1011,7 +1010,7 @@ static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_sa
WREG32(EVERGREEN_D6VGA_CONTROL, 0);
}
-static void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
+void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
{
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
@@ -1108,7 +1107,7 @@ static void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_
WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
}
-static void evergreen_mc_program(struct radeon_device *rdev)
+void evergreen_mc_program(struct radeon_device *rdev)
{
struct evergreen_mc_save save;
u32 tmp;
@@ -2194,7 +2193,6 @@ int evergreen_mc_init(struct radeon_device *rdev)
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
}
rdev->mc.visible_vram_size = rdev->mc.aper_size;
- rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
r700_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
@@ -2577,7 +2575,7 @@ void evergreen_irq_disable(struct radeon_device *rdev)
evergreen_disable_interrupt_state(rdev);
}
-static void evergreen_irq_suspend(struct radeon_device *rdev)
+void evergreen_irq_suspend(struct radeon_device *rdev)
{
evergreen_irq_disable(rdev);
r600_rlc_stop(rdev);
@@ -2588,7 +2586,7 @@ static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
u32 wptr, tmp;
if (rdev->wb.enabled)
- wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4];
+ wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
else
wptr = RREG32(IH_RB_WPTR);
@@ -2900,7 +2898,7 @@ static int evergreen_startup(struct radeon_device *rdev)
return r;
}
}
- r = btc_mc_load_microcode(rdev);
+ r = ni_mc_load_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load MC firmware!\n");
return r;
@@ -2931,11 +2929,6 @@ static int evergreen_startup(struct radeon_device *rdev)
rdev->asic->copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
- /* XXX: ontario has problems blitting to gart at the moment */
- if (rdev->family == CHIP_PALM) {
- rdev->asic->copy = NULL;
- rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
- }
/* allocate wb buffer */
r = radeon_wb_init(rdev);
@@ -2982,13 +2975,13 @@ int evergreen_resume(struct radeon_device *rdev)
r = evergreen_startup(rdev);
if (r) {
- DRM_ERROR("r600 startup failed on resume\n");
+ DRM_ERROR("evergreen startup failed on resume\n");
return r;
}
r = r600_ib_test(rdev);
if (r) {
- DRM_ERROR("radeon: failled testing IB (%d).\n", r);
+ DRM_ERROR("radeon: failed testing IB (%d).\n", r);
return r;
}
@@ -3048,9 +3041,6 @@ int evergreen_init(struct radeon_device *rdev)
{
int r;
- r = radeon_dummy_page_init(rdev);
- if (r)
- return r;
/* This don't do much */
r = radeon_gem_init(rdev);
if (r)
@@ -3062,7 +3052,7 @@ int evergreen_init(struct radeon_device *rdev)
}
/* Must be an ATOMBIOS */
if (!rdev->is_atom_bios) {
- dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
+ dev_err(rdev->dev, "Expecting atombios for evergreen GPU\n");
return -EINVAL;
}
r = radeon_atombios_init(rdev);
@@ -3162,7 +3152,6 @@ void evergreen_fini(struct radeon_device *rdev)
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
- radeon_dummy_page_fini(rdev);
}
static void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index 2adfb03f479b..ba06a69c6de8 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -579,7 +579,7 @@ int evergreen_blit_init(struct radeon_device *rdev)
obj_size += evergreen_ps_size * 4;
obj_size = ALIGN(obj_size, 256);
- r = radeon_bo_create(rdev, NULL, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
+ r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
&rdev->r600_blit.shader_obj);
if (r) {
DRM_ERROR("evergreen failed to allocate shader\n");
@@ -623,7 +623,7 @@ done:
dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
return r;
}
- rdev->mc.active_vram_size = rdev->mc.real_vram_size;
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
return 0;
}
@@ -631,7 +631,7 @@ void evergreen_blit_fini(struct radeon_device *rdev)
{
int r;
- rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
if (rdev->r600_blit.shader_obj == NULL)
return;
/* If we can't reserve the bo, unref should be enough to destroy
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 345a75a03c96..23d36417158d 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -29,6 +29,7 @@
#include "radeon.h"
#include "evergreend.h"
#include "evergreen_reg_safe.h"
+#include "cayman_reg_safe.h"
static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
struct radeon_cs_reloc **cs_reloc);
@@ -292,33 +293,28 @@ static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
if (wait_reg_mem.type != PACKET_TYPE3 ||
wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
- r = -EINVAL;
- return r;
+ return -EINVAL;
}
wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
/* bit 4 is reg (0) or mem (1) */
if (wait_reg_mem_info & 0x10) {
DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
- r = -EINVAL;
- return r;
+ return -EINVAL;
}
/* waiting for value to be equal */
if ((wait_reg_mem_info & 0x7) != 0x3) {
DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
- r = -EINVAL;
- return r;
+ return -EINVAL;
}
if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != EVERGREEN_VLINE_STATUS) {
DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
- r = -EINVAL;
- return r;
+ return -EINVAL;
}
if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != EVERGREEN_VLINE_STAT) {
DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
- r = -EINVAL;
- return r;
+ return -EINVAL;
}
/* jump over the NOP */
@@ -336,8 +332,7 @@ static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
- r = -EINVAL;
- goto out;
+ return -EINVAL;
}
crtc = obj_to_crtc(obj);
radeon_crtc = to_radeon_crtc(crtc);
@@ -362,12 +357,10 @@ static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
break;
default:
DRM_ERROR("unknown crtc reloc\n");
- r = -EINVAL;
- goto out;
+ return -EINVAL;
}
}
-out:
- return r;
+ return 0;
}
static int evergreen_packet0_check(struct radeon_cs_parser *p,
@@ -425,21 +418,31 @@ static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u3
{
struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
struct radeon_cs_reloc *reloc;
- u32 last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
+ u32 last_reg;
u32 m, i, tmp, *ib;
int r;
+ if (p->rdev->family >= CHIP_CAYMAN)
+ last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
+ else
+ last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
+
i = (reg >> 7);
if (i > last_reg) {
dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return -EINVAL;
}
m = 1 << ((reg >> 2) & 31);
- if (!(evergreen_reg_safe_bm[i] & m))
- return 0;
+ if (p->rdev->family >= CHIP_CAYMAN) {
+ if (!(cayman_reg_safe_bm[i] & m))
+ return 0;
+ } else {
+ if (!(evergreen_reg_safe_bm[i] & m))
+ return 0;
+ }
ib = p->ib->ptr;
switch (reg) {
- /* force following reg to 0 in an attemp to disable out buffer
+ /* force following reg to 0 in an attempt to disable out buffer
* which will need us to better understand how it works to perform
* security check on it (Jerome)
*/
@@ -468,12 +471,42 @@ static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u3
case SQ_VSTMP_RING_ITEMSIZE:
case VGT_TF_RING_SIZE:
/* get value to populate the IB don't remove */
- tmp =radeon_get_ib_value(p, idx);
- ib[idx] = 0;
+ /*tmp =radeon_get_ib_value(p, idx);
+ ib[idx] = 0;*/
+ break;
+ case SQ_ESGS_RING_BASE:
+ case SQ_GSVS_RING_BASE:
+ case SQ_ESTMP_RING_BASE:
+ case SQ_GSTMP_RING_BASE:
+ case SQ_HSTMP_RING_BASE:
+ case SQ_LSTMP_RING_BASE:
+ case SQ_PSTMP_RING_BASE:
+ case SQ_VSTMP_RING_BASE:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
break;
case DB_DEPTH_CONTROL:
track->db_depth_control = radeon_get_ib_value(p, idx);
break;
+ case CAYMAN_DB_EQAA:
+ if (p->rdev->family < CHIP_CAYMAN) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ break;
+ case CAYMAN_DB_DEPTH_INFO:
+ if (p->rdev->family < CHIP_CAYMAN) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ break;
case DB_Z_INFO:
r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
@@ -559,9 +592,23 @@ static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u3
track->cb_shader_mask = radeon_get_ib_value(p, idx);
break;
case PA_SC_AA_CONFIG:
+ if (p->rdev->family >= CHIP_CAYMAN) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK;
track->nsamples = 1 << tmp;
break;
+ case CAYMAN_PA_SC_AA_CONFIG:
+ if (p->rdev->family < CHIP_CAYMAN) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ tmp = radeon_get_ib_value(p, idx) & CAYMAN_MSAA_NUM_SAMPLES_MASK;
+ track->nsamples = 1 << tmp;
+ break;
case CB_COLOR0_VIEW:
case CB_COLOR1_VIEW:
case CB_COLOR2_VIEW:
@@ -942,6 +989,37 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
idx_value = radeon_get_ib_value(p, idx);
switch (pkt->opcode) {
+ case PACKET3_SET_PREDICATION:
+ {
+ int pred_op;
+ int tmp;
+ if (pkt->count != 1) {
+ DRM_ERROR("bad SET PREDICATION\n");
+ return -EINVAL;
+ }
+
+ tmp = radeon_get_ib_value(p, idx + 1);
+ pred_op = (tmp >> 16) & 0x7;
+
+ /* for the clear predicate operation */
+ if (pred_op == 0)
+ return 0;
+
+ if (pred_op > 2) {
+ DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
+ return -EINVAL;
+ }
+
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad SET PREDICATION\n");
+ return -EINVAL;
+ }
+
+ ib[idx + 0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx + 1] = tmp + (upper_32_bits(reloc->lobj.gpu_offset) & 0xff);
+ }
+ break;
case PACKET3_CONTEXT_CONTROL:
if (pkt->count != 1) {
DRM_ERROR("bad CONTEXT_CONTROL\n");
@@ -956,6 +1034,16 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
break;
+ case CAYMAN_PACKET3_DEALLOC_STATE:
+ if (p->rdev->family < CHIP_CAYMAN) {
+ DRM_ERROR("bad PACKET3_DEALLOC_STATE\n");
+ return -EINVAL;
+ }
+ if (pkt->count) {
+ DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
+ return -EINVAL;
+ }
+ break;
case PACKET3_INDEX_BASE:
if (pkt->count != 1) {
DRM_ERROR("bad INDEX_BASE\n");
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index eb4acf4528ff..94533849927e 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -221,6 +221,11 @@
#define MC_VM_MD_L1_TLB0_CNTL 0x2654
#define MC_VM_MD_L1_TLB1_CNTL 0x2658
#define MC_VM_MD_L1_TLB2_CNTL 0x265C
+
+#define FUS_MC_VM_MD_L1_TLB0_CNTL 0x265C
+#define FUS_MC_VM_MD_L1_TLB1_CNTL 0x2660
+#define FUS_MC_VM_MD_L1_TLB2_CNTL 0x2664
+
#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C
#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
@@ -755,13 +760,21 @@
#define SQ_CONST_MEM_BASE 0x8df8
+#define SQ_ESGS_RING_BASE 0x8c40
#define SQ_ESGS_RING_SIZE 0x8c44
+#define SQ_GSVS_RING_BASE 0x8c48
#define SQ_GSVS_RING_SIZE 0x8c4c
+#define SQ_ESTMP_RING_BASE 0x8c50
#define SQ_ESTMP_RING_SIZE 0x8c54
+#define SQ_GSTMP_RING_BASE 0x8c58
#define SQ_GSTMP_RING_SIZE 0x8c5c
+#define SQ_VSTMP_RING_BASE 0x8c60
#define SQ_VSTMP_RING_SIZE 0x8c64
+#define SQ_PSTMP_RING_BASE 0x8c68
#define SQ_PSTMP_RING_SIZE 0x8c6c
+#define SQ_LSTMP_RING_BASE 0x8e10
#define SQ_LSTMP_RING_SIZE 0x8e14
+#define SQ_HSTMP_RING_BASE 0x8e18
#define SQ_HSTMP_RING_SIZE 0x8e1c
#define VGT_TF_RING_SIZE 0x8988
@@ -1093,5 +1106,14 @@
#define SQ_TEX_RESOURCE_WORD6_0 0x30018
#define SQ_TEX_RESOURCE_WORD7_0 0x3001c
+/* cayman 3D regs */
+#define CAYMAN_VGT_OFFCHIP_LDS_BASE 0x89B0
+#define CAYMAN_DB_EQAA 0x28804
+#define CAYMAN_DB_DEPTH_INFO 0x2803C
+#define CAYMAN_PA_SC_AA_CONFIG 0x28BE0
+#define CAYMAN_MSAA_NUM_SAMPLES_SHIFT 0
+#define CAYMAN_MSAA_NUM_SAMPLES_MASK 0x7
+/* cayman packet3 addition */
+#define CAYMAN_PACKET3_DEALLOC_STATE 0x14
#endif
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 5e0bef80ad7f..7aade20f63a8 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -31,12 +31,25 @@
#include "nid.h"
#include "atom.h"
#include "ni_reg.h"
+#include "cayman_blit_shaders.h"
+
+extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
+extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
+extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
+extern void evergreen_mc_program(struct radeon_device *rdev);
+extern void evergreen_irq_suspend(struct radeon_device *rdev);
+extern int evergreen_mc_init(struct radeon_device *rdev);
#define EVERGREEN_PFP_UCODE_SIZE 1120
#define EVERGREEN_PM4_UCODE_SIZE 1376
#define EVERGREEN_RLC_UCODE_SIZE 768
#define BTC_MC_UCODE_SIZE 6024
+#define CAYMAN_PFP_UCODE_SIZE 2176
+#define CAYMAN_PM4_UCODE_SIZE 2176
+#define CAYMAN_RLC_UCODE_SIZE 1024
+#define CAYMAN_MC_UCODE_SIZE 6037
+
/* Firmware Names */
MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
MODULE_FIRMWARE("radeon/BARTS_me.bin");
@@ -48,6 +61,10 @@ MODULE_FIRMWARE("radeon/TURKS_mc.bin");
MODULE_FIRMWARE("radeon/CAICOS_pfp.bin");
MODULE_FIRMWARE("radeon/CAICOS_me.bin");
MODULE_FIRMWARE("radeon/CAICOS_mc.bin");
+MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin");
+MODULE_FIRMWARE("radeon/CAYMAN_me.bin");
+MODULE_FIRMWARE("radeon/CAYMAN_mc.bin");
+MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin");
#define BTC_IO_MC_REGS_SIZE 29
@@ -147,12 +164,44 @@ static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
{0x0000009f, 0x00916a00}
};
-int btc_mc_load_microcode(struct radeon_device *rdev)
+static const u32 cayman_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
+ {0x00000077, 0xff010100},
+ {0x00000078, 0x00000000},
+ {0x00000079, 0x00001434},
+ {0x0000007a, 0xcc08ec08},
+ {0x0000007b, 0x00040000},
+ {0x0000007c, 0x000080c0},
+ {0x0000007d, 0x09000000},
+ {0x0000007e, 0x00210404},
+ {0x00000081, 0x08a8e800},
+ {0x00000082, 0x00030444},
+ {0x00000083, 0x00000000},
+ {0x00000085, 0x00000001},
+ {0x00000086, 0x00000002},
+ {0x00000087, 0x48490000},
+ {0x00000088, 0x20244647},
+ {0x00000089, 0x00000005},
+ {0x0000008b, 0x66030000},
+ {0x0000008c, 0x00006603},
+ {0x0000008d, 0x00000100},
+ {0x0000008f, 0x00001c0a},
+ {0x00000090, 0xff000001},
+ {0x00000094, 0x00101101},
+ {0x00000095, 0x00000fff},
+ {0x00000096, 0x00116fff},
+ {0x00000097, 0x60010000},
+ {0x00000098, 0x10010000},
+ {0x00000099, 0x00006000},
+ {0x0000009a, 0x00001000},
+ {0x0000009f, 0x00976b00}
+};
+
+int ni_mc_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
u32 mem_type, running, blackout = 0;
u32 *io_mc_regs;
- int i;
+ int i, ucode_size, regs_size;
if (!rdev->mc_fw)
return -EINVAL;
@@ -160,13 +209,24 @@ int btc_mc_load_microcode(struct radeon_device *rdev)
switch (rdev->family) {
case CHIP_BARTS:
io_mc_regs = (u32 *)&barts_io_mc_regs;
+ ucode_size = BTC_MC_UCODE_SIZE;
+ regs_size = BTC_IO_MC_REGS_SIZE;
break;
case CHIP_TURKS:
io_mc_regs = (u32 *)&turks_io_mc_regs;
+ ucode_size = BTC_MC_UCODE_SIZE;
+ regs_size = BTC_IO_MC_REGS_SIZE;
break;
case CHIP_CAICOS:
default:
io_mc_regs = (u32 *)&caicos_io_mc_regs;
+ ucode_size = BTC_MC_UCODE_SIZE;
+ regs_size = BTC_IO_MC_REGS_SIZE;
+ break;
+ case CHIP_CAYMAN:
+ io_mc_regs = (u32 *)&cayman_io_mc_regs;
+ ucode_size = CAYMAN_MC_UCODE_SIZE;
+ regs_size = BTC_IO_MC_REGS_SIZE;
break;
}
@@ -184,13 +244,13 @@ int btc_mc_load_microcode(struct radeon_device *rdev)
WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
/* load mc io regs */
- for (i = 0; i < BTC_IO_MC_REGS_SIZE; i++) {
+ for (i = 0; i < regs_size; i++) {
WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
}
/* load the MC ucode */
fw_data = (const __be32 *)rdev->mc_fw->data;
- for (i = 0; i < BTC_MC_UCODE_SIZE; i++)
+ for (i = 0; i < ucode_size; i++)
WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
/* put the engine back into the active state */
@@ -231,23 +291,38 @@ int ni_init_microcode(struct radeon_device *rdev)
case CHIP_BARTS:
chip_name = "BARTS";
rlc_chip_name = "BTC";
+ pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+ me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+ rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
+ mc_req_size = BTC_MC_UCODE_SIZE * 4;
break;
case CHIP_TURKS:
chip_name = "TURKS";
rlc_chip_name = "BTC";
+ pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+ me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+ rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
+ mc_req_size = BTC_MC_UCODE_SIZE * 4;
break;
case CHIP_CAICOS:
chip_name = "CAICOS";
rlc_chip_name = "BTC";
+ pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+ me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+ rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
+ mc_req_size = BTC_MC_UCODE_SIZE * 4;
+ break;
+ case CHIP_CAYMAN:
+ chip_name = "CAYMAN";
+ rlc_chip_name = "CAYMAN";
+ pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
+ me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
+ rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4;
+ mc_req_size = CAYMAN_MC_UCODE_SIZE * 4;
break;
default: BUG();
}
- pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
- me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
- rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
- mc_req_size = BTC_MC_UCODE_SIZE * 4;
-
DRM_INFO("Loading %s Microcode\n", chip_name);
snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
@@ -314,3 +389,1204 @@ out:
return err;
}
+/*
+ * Core functions
+ */
+static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
+ u32 num_tile_pipes,
+ u32 num_backends_per_asic,
+ u32 *backend_disable_mask_per_asic,
+ u32 num_shader_engines)
+{
+ u32 backend_map = 0;
+ u32 enabled_backends_mask = 0;
+ u32 enabled_backends_count = 0;
+ u32 num_backends_per_se;
+ u32 cur_pipe;
+ u32 swizzle_pipe[CAYMAN_MAX_PIPES];
+ u32 cur_backend = 0;
+ u32 i;
+ bool force_no_swizzle;
+
+ /* force legal values */
+ if (num_tile_pipes < 1)
+ num_tile_pipes = 1;
+ if (num_tile_pipes > rdev->config.cayman.max_tile_pipes)
+ num_tile_pipes = rdev->config.cayman.max_tile_pipes;
+ if (num_shader_engines < 1)
+ num_shader_engines = 1;
+ if (num_shader_engines > rdev->config.cayman.max_shader_engines)
+ num_shader_engines = rdev->config.cayman.max_shader_engines;
+ if (num_backends_per_asic > num_shader_engines)
+ num_backends_per_asic = num_shader_engines;
+ if (num_backends_per_asic > (rdev->config.cayman.max_backends_per_se * num_shader_engines))
+ num_backends_per_asic = rdev->config.cayman.max_backends_per_se * num_shader_engines;
+
+ /* make sure we have the same number of backends per se */
+ num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines);
+ /* set up the number of backends per se */
+ num_backends_per_se = num_backends_per_asic / num_shader_engines;
+ if (num_backends_per_se > rdev->config.cayman.max_backends_per_se) {
+ num_backends_per_se = rdev->config.cayman.max_backends_per_se;
+ num_backends_per_asic = num_backends_per_se * num_shader_engines;
+ }
+
+ /* create enable mask and count for enabled backends */
+ for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
+ if (((*backend_disable_mask_per_asic >> i) & 1) == 0) {
+ enabled_backends_mask |= (1 << i);
+ ++enabled_backends_count;
+ }
+ if (enabled_backends_count == num_backends_per_asic)
+ break;
+ }
+
+ /* force the backends mask to match the current number of backends */
+ if (enabled_backends_count != num_backends_per_asic) {
+ u32 this_backend_enabled;
+ u32 shader_engine;
+ u32 backend_per_se;
+
+ enabled_backends_mask = 0;
+ enabled_backends_count = 0;
+ *backend_disable_mask_per_asic = CAYMAN_MAX_BACKENDS_MASK;
+ for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
+ /* calc the current se */
+ shader_engine = i / rdev->config.cayman.max_backends_per_se;
+ /* calc the backend per se */
+ backend_per_se = i % rdev->config.cayman.max_backends_per_se;
+ /* default to not enabled */
+ this_backend_enabled = 0;
+ if ((shader_engine < num_shader_engines) &&
+ (backend_per_se < num_backends_per_se))
+ this_backend_enabled = 1;
+ if (this_backend_enabled) {
+ enabled_backends_mask |= (1 << i);
+ *backend_disable_mask_per_asic &= ~(1 << i);
+ ++enabled_backends_count;
+ }
+ }
+ }
+
+
+ memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * CAYMAN_MAX_PIPES);
+ switch (rdev->family) {
+ case CHIP_CAYMAN:
+ force_no_swizzle = true;
+ break;
+ default:
+ force_no_swizzle = false;
+ break;
+ }
+ if (force_no_swizzle) {
+ bool last_backend_enabled = false;
+
+ force_no_swizzle = false;
+ for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
+ if (((enabled_backends_mask >> i) & 1) == 1) {
+ if (last_backend_enabled)
+ force_no_swizzle = true;
+ last_backend_enabled = true;
+ } else
+ last_backend_enabled = false;
+ }
+ }
+
+ switch (num_tile_pipes) {
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ DRM_ERROR("odd number of pipes!\n");
+ break;
+ case 2:
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ break;
+ case 4:
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 1;
+ swizzle_pipe[3] = 3;
+ }
+ break;
+ case 6:
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ swizzle_pipe[5] = 5;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 1;
+ swizzle_pipe[4] = 3;
+ swizzle_pipe[5] = 5;
+ }
+ break;
+ case 8:
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ swizzle_pipe[5] = 5;
+ swizzle_pipe[6] = 6;
+ swizzle_pipe[7] = 7;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 6;
+ swizzle_pipe[4] = 1;
+ swizzle_pipe[5] = 3;
+ swizzle_pipe[6] = 5;
+ swizzle_pipe[7] = 7;
+ }
+ break;
+ }
+
+ for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
+ while (((1 << cur_backend) & enabled_backends_mask) == 0)
+ cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS;
+
+ backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
+
+ cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS;
+ }
+
+ return backend_map;
+}
+
+static void cayman_program_channel_remap(struct radeon_device *rdev)
+{
+ u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
+
+ tmp = RREG32(MC_SHARED_CHMAP);
+ switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ default:
+ /* default mapping */
+ mc_shared_chremap = 0x00fac688;
+ break;
+ }
+
+ switch (rdev->family) {
+ case CHIP_CAYMAN:
+ default:
+ //tcp_chan_steer_lo = 0x54763210
+ tcp_chan_steer_lo = 0x76543210;
+ tcp_chan_steer_hi = 0x0000ba98;
+ break;
+ }
+
+ WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
+ WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
+ WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
+}
+
+static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev,
+ u32 disable_mask_per_se,
+ u32 max_disable_mask_per_se,
+ u32 num_shader_engines)
+{
+ u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se);
+ u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se;
+
+ if (num_shader_engines == 1)
+ return disable_mask_per_asic;
+ else if (num_shader_engines == 2)
+ return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se);
+ else
+ return 0xffffffff;
+}
+
+static void cayman_gpu_init(struct radeon_device *rdev)
+{
+ u32 cc_rb_backend_disable = 0;
+ u32 cc_gc_shader_pipe_config;
+ u32 gb_addr_config = 0;
+ u32 mc_shared_chmap, mc_arb_ramcfg;
+ u32 gb_backend_map;
+ u32 cgts_tcc_disable;
+ u32 sx_debug_1;
+ u32 smx_dc_ctl0;
+ u32 gc_user_shader_pipe_config;
+ u32 gc_user_rb_backend_disable;
+ u32 cgts_user_tcc_disable;
+ u32 cgts_sm_ctrl_reg;
+ u32 hdp_host_path_cntl;
+ u32 tmp;
+ int i, j;
+
+ switch (rdev->family) {
+ case CHIP_CAYMAN:
+ default:
+ rdev->config.cayman.max_shader_engines = 2;
+ rdev->config.cayman.max_pipes_per_simd = 4;
+ rdev->config.cayman.max_tile_pipes = 8;
+ rdev->config.cayman.max_simds_per_se = 12;
+ rdev->config.cayman.max_backends_per_se = 4;
+ rdev->config.cayman.max_texture_channel_caches = 8;
+ rdev->config.cayman.max_gprs = 256;
+ rdev->config.cayman.max_threads = 256;
+ rdev->config.cayman.max_gs_threads = 32;
+ rdev->config.cayman.max_stack_entries = 512;
+ rdev->config.cayman.sx_num_of_sets = 8;
+ rdev->config.cayman.sx_max_export_size = 256;
+ rdev->config.cayman.sx_max_export_pos_size = 64;
+ rdev->config.cayman.sx_max_export_smx_size = 192;
+ rdev->config.cayman.max_hw_contexts = 8;
+ rdev->config.cayman.sq_num_cf_insts = 2;
+
+ rdev->config.cayman.sc_prim_fifo_size = 0x100;
+ rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
+ break;
+ }
+
+ /* Initialize HDP */
+ for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+ WREG32((0x2c14 + j), 0x00000000);
+ WREG32((0x2c18 + j), 0x00000000);
+ WREG32((0x2c1c + j), 0x00000000);
+ WREG32((0x2c20 + j), 0x00000000);
+ WREG32((0x2c24 + j), 0x00000000);
+ }
+
+ WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+
+ mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
+ mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+
+ cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE);
+ cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
+ cgts_tcc_disable = RREG32(CGTS_TCC_DISABLE);
+ gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE);
+ gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG);
+ cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE);
+
+ rdev->config.cayman.num_shader_engines = rdev->config.cayman.max_shader_engines;
+ tmp = ((~gc_user_shader_pipe_config) & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT;
+ rdev->config.cayman.num_shader_pipes_per_simd = r600_count_pipe_bits(tmp);
+ rdev->config.cayman.num_tile_pipes = rdev->config.cayman.max_tile_pipes;
+ tmp = ((~gc_user_shader_pipe_config) & INACTIVE_SIMDS_MASK) >> INACTIVE_SIMDS_SHIFT;
+ rdev->config.cayman.num_simds_per_se = r600_count_pipe_bits(tmp);
+ tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
+ rdev->config.cayman.num_backends_per_se = r600_count_pipe_bits(tmp);
+ tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
+ rdev->config.cayman.backend_disable_mask_per_asic =
+ cayman_get_disable_mask_per_asic(rdev, tmp, CAYMAN_MAX_BACKENDS_PER_SE_MASK,
+ rdev->config.cayman.num_shader_engines);
+ rdev->config.cayman.backend_map =
+ cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes,
+ rdev->config.cayman.num_backends_per_se *
+ rdev->config.cayman.num_shader_engines,
+ &rdev->config.cayman.backend_disable_mask_per_asic,
+ rdev->config.cayman.num_shader_engines);
+ tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT;
+ rdev->config.cayman.num_texture_channel_caches = r600_count_pipe_bits(tmp);
+ tmp = (mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT;
+ rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
+ if (rdev->config.cayman.mem_max_burst_length_bytes > 512)
+ rdev->config.cayman.mem_max_burst_length_bytes = 512;
+ tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
+ rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
+ if (rdev->config.cayman.mem_row_size_in_kb > 4)
+ rdev->config.cayman.mem_row_size_in_kb = 4;
+ /* XXX use MC settings? */
+ rdev->config.cayman.shader_engine_tile_size = 32;
+ rdev->config.cayman.num_gpus = 1;
+ rdev->config.cayman.multi_gpu_tile_size = 64;
+
+ //gb_addr_config = 0x02011003
+#if 0
+ gb_addr_config = RREG32(GB_ADDR_CONFIG);
+#else
+ gb_addr_config = 0;
+ switch (rdev->config.cayman.num_tile_pipes) {
+ case 1:
+ default:
+ gb_addr_config |= NUM_PIPES(0);
+ break;
+ case 2:
+ gb_addr_config |= NUM_PIPES(1);
+ break;
+ case 4:
+ gb_addr_config |= NUM_PIPES(2);
+ break;
+ case 8:
+ gb_addr_config |= NUM_PIPES(3);
+ break;
+ }
+
+ tmp = (rdev->config.cayman.mem_max_burst_length_bytes / 256) - 1;
+ gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp);
+ gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.cayman.num_shader_engines - 1);
+ tmp = (rdev->config.cayman.shader_engine_tile_size / 16) - 1;
+ gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp);
+ switch (rdev->config.cayman.num_gpus) {
+ case 1:
+ default:
+ gb_addr_config |= NUM_GPUS(0);
+ break;
+ case 2:
+ gb_addr_config |= NUM_GPUS(1);
+ break;
+ case 4:
+ gb_addr_config |= NUM_GPUS(2);
+ break;
+ }
+ switch (rdev->config.cayman.multi_gpu_tile_size) {
+ case 16:
+ gb_addr_config |= MULTI_GPU_TILE_SIZE(0);
+ break;
+ case 32:
+ default:
+ gb_addr_config |= MULTI_GPU_TILE_SIZE(1);
+ break;
+ case 64:
+ gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
+ break;
+ case 128:
+ gb_addr_config |= MULTI_GPU_TILE_SIZE(3);
+ break;
+ }
+ switch (rdev->config.cayman.mem_row_size_in_kb) {
+ case 1:
+ default:
+ gb_addr_config |= ROW_SIZE(0);
+ break;
+ case 2:
+ gb_addr_config |= ROW_SIZE(1);
+ break;
+ case 4:
+ gb_addr_config |= ROW_SIZE(2);
+ break;
+ }
+#endif
+
+ tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
+ rdev->config.cayman.num_tile_pipes = (1 << tmp);
+ tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
+ rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
+ tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT;
+ rdev->config.cayman.num_shader_engines = tmp + 1;
+ tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT;
+ rdev->config.cayman.num_gpus = tmp + 1;
+ tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT;
+ rdev->config.cayman.multi_gpu_tile_size = 1 << tmp;
+ tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
+ rdev->config.cayman.mem_row_size_in_kb = 1 << tmp;
+
+ //gb_backend_map = 0x76541032;
+#if 0
+ gb_backend_map = RREG32(GB_BACKEND_MAP);
+#else
+ gb_backend_map =
+ cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes,
+ rdev->config.cayman.num_backends_per_se *
+ rdev->config.cayman.num_shader_engines,
+ &rdev->config.cayman.backend_disable_mask_per_asic,
+ rdev->config.cayman.num_shader_engines);
+#endif
+ /* setup tiling info dword. gb_addr_config is not adequate since it does
+ * not have bank info, so create a custom tiling dword.
+ * bits 3:0 num_pipes
+ * bits 7:4 num_banks
+ * bits 11:8 group_size
+ * bits 15:12 row_size
+ */
+ rdev->config.cayman.tile_config = 0;
+ switch (rdev->config.cayman.num_tile_pipes) {
+ case 1:
+ default:
+ rdev->config.cayman.tile_config |= (0 << 0);
+ break;
+ case 2:
+ rdev->config.cayman.tile_config |= (1 << 0);
+ break;
+ case 4:
+ rdev->config.cayman.tile_config |= (2 << 0);
+ break;
+ case 8:
+ rdev->config.cayman.tile_config |= (3 << 0);
+ break;
+ }
+ rdev->config.cayman.tile_config |=
+ ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
+ rdev->config.cayman.tile_config |=
+ (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
+ rdev->config.cayman.tile_config |=
+ ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
+
+ WREG32(GB_BACKEND_MAP, gb_backend_map);
+ WREG32(GB_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+ WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+
+ cayman_program_channel_remap(rdev);
+
+ /* primary versions */
+ WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
+ WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
+ WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
+
+ WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
+ WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable);
+
+ /* user versions */
+ WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable);
+ WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
+ WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
+
+ WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable);
+ WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
+
+ /* reprogram the shader complex */
+ cgts_sm_ctrl_reg = RREG32(CGTS_SM_CTRL_REG);
+ for (i = 0; i < 16; i++)
+ WREG32(CGTS_SM_CTRL_REG, OVERRIDE);
+ WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg);
+
+ /* set HW defaults for 3D engine */
+ WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
+
+ sx_debug_1 = RREG32(SX_DEBUG_1);
+ sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
+ WREG32(SX_DEBUG_1, sx_debug_1);
+
+ smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
+ smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
+ smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
+ WREG32(SMX_DC_CTL0, smx_dc_ctl0);
+
+ WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE);
+
+ /* need to be explicitly zero-ed */
+ WREG32(VGT_OFFCHIP_LDS_BASE, 0);
+ WREG32(SQ_LSTMP_RING_BASE, 0);
+ WREG32(SQ_HSTMP_RING_BASE, 0);
+ WREG32(SQ_ESTMP_RING_BASE, 0);
+ WREG32(SQ_GSTMP_RING_BASE, 0);
+ WREG32(SQ_VSTMP_RING_BASE, 0);
+ WREG32(SQ_PSTMP_RING_BASE, 0);
+
+ WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO);
+
+ WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
+ POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
+ SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
+
+ WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) |
+ SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) |
+ SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size)));
+
+
+ WREG32(VGT_NUM_INSTANCES, 1);
+
+ WREG32(CP_PERFMON_CNTL, 0);
+
+ WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) |
+ FETCH_FIFO_HIWATER(0x4) |
+ DONE_FIFO_HIWATER(0xe0) |
+ ALU_UPDATE_FIFO_HIWATER(0x8)));
+
+ WREG32(SQ_GPR_RESOURCE_MGMT_1, NUM_CLAUSE_TEMP_GPRS(4));
+ WREG32(SQ_CONFIG, (VC_ENABLE |
+ EXPORT_SRC_C |
+ GFX_PRIO(0) |
+ CS1_PRIO(0) |
+ CS2_PRIO(1)));
+ WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, DYN_GPR_ENABLE);
+
+ WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
+ FORCE_EOV_MAX_REZ_CNT(255)));
+
+ WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
+ AUTO_INVLD_EN(ES_AND_GS_AUTO));
+
+ WREG32(VGT_GS_VERTEX_REUSE, 16);
+ WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+
+ WREG32(CB_PERF_CTR0_SEL_0, 0);
+ WREG32(CB_PERF_CTR0_SEL_1, 0);
+ WREG32(CB_PERF_CTR1_SEL_0, 0);
+ WREG32(CB_PERF_CTR1_SEL_1, 0);
+ WREG32(CB_PERF_CTR2_SEL_0, 0);
+ WREG32(CB_PERF_CTR2_SEL_1, 0);
+ WREG32(CB_PERF_CTR3_SEL_0, 0);
+ WREG32(CB_PERF_CTR3_SEL_1, 0);
+
+ hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
+ WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+ WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
+
+ udelay(50);
+}
+
+/*
+ * GART
+ */
+void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev)
+{
+ /* flush hdp cache */
+ WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+
+ /* bits 0-7 are the VM contexts0-7 */
+ WREG32(VM_INVALIDATE_REQUEST, 1);
+}
+
+int cayman_pcie_gart_enable(struct radeon_device *rdev)
+{
+ int r;
+
+ if (rdev->gart.table.vram.robj == NULL) {
+ dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+ return -EINVAL;
+ }
+ r = radeon_gart_table_vram_pin(rdev);
+ if (r)
+ return r;
+ radeon_gart_restore(rdev);
+ /* Setup TLB control */
+ WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB |
+ ENABLE_L1_FRAGMENT_PROCESSING |
+ SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+ SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+ /* Setup L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
+ ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+ ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
+ EFFECTIVE_L2_QUEUE_SIZE(7) |
+ CONTEXT1_IDENTITY_ACCESS_MODE(1));
+ WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
+ WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+ L2_CACHE_BIGK_FRAGMENT_SIZE(6));
+ /* setup context0 */
+ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
+ WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+ (u32)(rdev->dummy_page.addr >> 12));
+ WREG32(VM_CONTEXT0_CNTL2, 0);
+ WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+ /* disable context1-7 */
+ WREG32(VM_CONTEXT1_CNTL2, 0);
+ WREG32(VM_CONTEXT1_CNTL, 0);
+
+ cayman_pcie_gart_tlb_flush(rdev);
+ rdev->gart.ready = true;
+ return 0;
+}
+
+void cayman_pcie_gart_disable(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Disable all tables */
+ WREG32(VM_CONTEXT0_CNTL, 0);
+ WREG32(VM_CONTEXT1_CNTL, 0);
+ /* Setup TLB control */
+ WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING |
+ SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+ SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+ /* Setup L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+ ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
+ EFFECTIVE_L2_QUEUE_SIZE(7) |
+ CONTEXT1_IDENTITY_ACCESS_MODE(1));
+ WREG32(VM_L2_CNTL2, 0);
+ WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+ L2_CACHE_BIGK_FRAGMENT_SIZE(6));
+ if (rdev->gart.table.vram.robj) {
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->gart.table.vram.robj);
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ }
+ }
+}
+
+void cayman_pcie_gart_fini(struct radeon_device *rdev)
+{
+ cayman_pcie_gart_disable(rdev);
+ radeon_gart_table_vram_free(rdev);
+ radeon_gart_fini(rdev);
+}
+
+/*
+ * CP.
+ */
+static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
+{
+ if (enable)
+ WREG32(CP_ME_CNTL, 0);
+ else {
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+ WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
+ WREG32(SCRATCH_UMSK, 0);
+ }
+}
+
+static int cayman_cp_load_microcode(struct radeon_device *rdev)
+{
+ const __be32 *fw_data;
+ int i;
+
+ if (!rdev->me_fw || !rdev->pfp_fw)
+ return -EINVAL;
+
+ cayman_cp_enable(rdev, false);
+
+ fw_data = (const __be32 *)rdev->pfp_fw->data;
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ for (i = 0; i < CAYMAN_PFP_UCODE_SIZE; i++)
+ WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+
+ fw_data = (const __be32 *)rdev->me_fw->data;
+ WREG32(CP_ME_RAM_WADDR, 0);
+ for (i = 0; i < CAYMAN_PM4_UCODE_SIZE; i++)
+ WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ WREG32(CP_ME_RAM_WADDR, 0);
+ WREG32(CP_ME_RAM_RADDR, 0);
+ return 0;
+}
+
+static int cayman_cp_start(struct radeon_device *rdev)
+{
+ int r, i;
+
+ r = radeon_ring_lock(rdev, 7);
+ if (r) {
+ DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+ return r;
+ }
+ radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
+ radeon_ring_write(rdev, 0x1);
+ radeon_ring_write(rdev, 0x0);
+ radeon_ring_write(rdev, rdev->config.cayman.max_hw_contexts - 1);
+ radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_unlock_commit(rdev);
+
+ cayman_cp_enable(rdev, true);
+
+ r = radeon_ring_lock(rdev, cayman_default_size + 19);
+ if (r) {
+ DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+ return r;
+ }
+
+ /* setup clear context state */
+ radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+ for (i = 0; i < cayman_default_size; i++)
+ radeon_ring_write(rdev, cayman_default_state[i]);
+
+ radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+ /* set clear context state */
+ radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
+ radeon_ring_write(rdev, 0);
+
+ /* SQ_VTX_BASE_VTX_LOC */
+ radeon_ring_write(rdev, 0xc0026f00);
+ radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(rdev, 0x00000000);
+
+ /* Clear consts */
+ radeon_ring_write(rdev, 0xc0036f00);
+ radeon_ring_write(rdev, 0x00000bc4);
+ radeon_ring_write(rdev, 0xffffffff);
+ radeon_ring_write(rdev, 0xffffffff);
+ radeon_ring_write(rdev, 0xffffffff);
+
+ radeon_ring_write(rdev, 0xc0026900);
+ radeon_ring_write(rdev, 0x00000316);
+ radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+ radeon_ring_write(rdev, 0x00000010); /* */
+
+ radeon_ring_unlock_commit(rdev);
+
+ /* XXX init other rings */
+
+ return 0;
+}
+
+static void cayman_cp_fini(struct radeon_device *rdev)
+{
+ cayman_cp_enable(rdev, false);
+ radeon_ring_fini(rdev);
+}
+
+int cayman_cp_resume(struct radeon_device *rdev)
+{
+ u32 tmp;
+ u32 rb_bufsz;
+ int r;
+
+ /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
+ WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
+ SOFT_RESET_PA |
+ SOFT_RESET_SH |
+ SOFT_RESET_VGT |
+ SOFT_RESET_SX));
+ RREG32(GRBM_SOFT_RESET);
+ mdelay(15);
+ WREG32(GRBM_SOFT_RESET, 0);
+ RREG32(GRBM_SOFT_RESET);
+
+ WREG32(CP_SEM_WAIT_TIMER, 0x4);
+
+ /* Set the write pointer delay */
+ WREG32(CP_RB_WPTR_DELAY, 0);
+
+ WREG32(CP_DEBUG, (1 << 27));
+
+ /* ring 0 - compute and gfx */
+ /* Set ring buffer size */
+ rb_bufsz = drm_order(rdev->cp.ring_size / 8);
+ tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+ tmp |= BUF_SWAP_32BIT;
+#endif
+ WREG32(CP_RB0_CNTL, tmp);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
+ WREG32(CP_RB0_WPTR, 0);
+
+ /* set the wb address wether it's enabled or not */
+ WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
+ WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
+ WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
+
+ if (rdev->wb.enabled)
+ WREG32(SCRATCH_UMSK, 0xff);
+ else {
+ tmp |= RB_NO_UPDATE;
+ WREG32(SCRATCH_UMSK, 0);
+ }
+
+ mdelay(1);
+ WREG32(CP_RB0_CNTL, tmp);
+
+ WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8);
+
+ rdev->cp.rptr = RREG32(CP_RB0_RPTR);
+ rdev->cp.wptr = RREG32(CP_RB0_WPTR);
+
+ /* ring1 - compute only */
+ /* Set ring buffer size */
+ rb_bufsz = drm_order(rdev->cp1.ring_size / 8);
+ tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+ tmp |= BUF_SWAP_32BIT;
+#endif
+ WREG32(CP_RB1_CNTL, tmp);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
+ WREG32(CP_RB1_WPTR, 0);
+
+ /* set the wb address wether it's enabled or not */
+ WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
+ WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF);
+
+ mdelay(1);
+ WREG32(CP_RB1_CNTL, tmp);
+
+ WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8);
+
+ rdev->cp1.rptr = RREG32(CP_RB1_RPTR);
+ rdev->cp1.wptr = RREG32(CP_RB1_WPTR);
+
+ /* ring2 - compute only */
+ /* Set ring buffer size */
+ rb_bufsz = drm_order(rdev->cp2.ring_size / 8);
+ tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+ tmp |= BUF_SWAP_32BIT;
+#endif
+ WREG32(CP_RB2_CNTL, tmp);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
+ WREG32(CP_RB2_WPTR, 0);
+
+ /* set the wb address wether it's enabled or not */
+ WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
+ WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF);
+
+ mdelay(1);
+ WREG32(CP_RB2_CNTL, tmp);
+
+ WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8);
+
+ rdev->cp2.rptr = RREG32(CP_RB2_RPTR);
+ rdev->cp2.wptr = RREG32(CP_RB2_WPTR);
+
+ /* start the rings */
+ cayman_cp_start(rdev);
+ rdev->cp.ready = true;
+ rdev->cp1.ready = true;
+ rdev->cp2.ready = true;
+ /* this only test cp0 */
+ r = radeon_ring_test(rdev);
+ if (r) {
+ rdev->cp.ready = false;
+ rdev->cp1.ready = false;
+ rdev->cp2.ready = false;
+ return r;
+ }
+
+ return 0;
+}
+
+bool cayman_gpu_is_lockup(struct radeon_device *rdev)
+{
+ u32 srbm_status;
+ u32 grbm_status;
+ u32 grbm_status_se0, grbm_status_se1;
+ struct r100_gpu_lockup *lockup = &rdev->config.cayman.lockup;
+ int r;
+
+ srbm_status = RREG32(SRBM_STATUS);
+ grbm_status = RREG32(GRBM_STATUS);
+ grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
+ grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
+ if (!(grbm_status & GUI_ACTIVE)) {
+ r100_gpu_lockup_update(lockup, &rdev->cp);
+ return false;
+ }
+ /* force CP activities */
+ r = radeon_ring_lock(rdev, 2);
+ if (!r) {
+ /* PACKET2 NOP */
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_unlock_commit(rdev);
+ }
+ /* XXX deal with CP0,1,2 */
+ rdev->cp.rptr = RREG32(CP_RB0_RPTR);
+ return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
+}
+
+static int cayman_gpu_soft_reset(struct radeon_device *rdev)
+{
+ struct evergreen_mc_save save;
+ u32 grbm_reset = 0;
+
+ if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+ return 0;
+
+ dev_info(rdev->dev, "GPU softreset \n");
+ dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
+ RREG32(GRBM_STATUS));
+ dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
+ RREG32(GRBM_STATUS_SE0));
+ dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
+ RREG32(GRBM_STATUS_SE1));
+ dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
+ RREG32(SRBM_STATUS));
+ evergreen_mc_stop(rdev, &save);
+ if (evergreen_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+ /* Disable CP parsing/prefetching */
+ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
+
+ /* reset all the gfx blocks */
+ grbm_reset = (SOFT_RESET_CP |
+ SOFT_RESET_CB |
+ SOFT_RESET_DB |
+ SOFT_RESET_GDS |
+ SOFT_RESET_PA |
+ SOFT_RESET_SC |
+ SOFT_RESET_SPI |
+ SOFT_RESET_SH |
+ SOFT_RESET_SX |
+ SOFT_RESET_TC |
+ SOFT_RESET_TA |
+ SOFT_RESET_VGT |
+ SOFT_RESET_IA);
+
+ dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
+ WREG32(GRBM_SOFT_RESET, grbm_reset);
+ (void)RREG32(GRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(GRBM_SOFT_RESET, 0);
+ (void)RREG32(GRBM_SOFT_RESET);
+ /* Wait a little for things to settle down */
+ udelay(50);
+ dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
+ RREG32(GRBM_STATUS));
+ dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
+ RREG32(GRBM_STATUS_SE0));
+ dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
+ RREG32(GRBM_STATUS_SE1));
+ dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
+ RREG32(SRBM_STATUS));
+ evergreen_mc_resume(rdev, &save);
+ return 0;
+}
+
+int cayman_asic_reset(struct radeon_device *rdev)
+{
+ return cayman_gpu_soft_reset(rdev);
+}
+
+static int cayman_startup(struct radeon_device *rdev)
+{
+ int r;
+
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
+ r = ni_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+ r = ni_mc_load_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load MC firmware!\n");
+ return r;
+ }
+
+ evergreen_mc_program(rdev);
+ r = cayman_pcie_gart_enable(rdev);
+ if (r)
+ return r;
+ cayman_gpu_init(rdev);
+
+#if 0
+ r = cayman_blit_init(rdev);
+ if (r) {
+ cayman_blit_fini(rdev);
+ rdev->asic->copy = NULL;
+ dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+ }
+#endif
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
+
+ /* Enable IRQ */
+ r = r600_irq_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: IH init failed (%d).\n", r);
+ radeon_irq_kms_fini(rdev);
+ return r;
+ }
+ evergreen_irq_set(rdev);
+
+ r = radeon_ring_init(rdev, rdev->cp.ring_size);
+ if (r)
+ return r;
+ r = cayman_cp_load_microcode(rdev);
+ if (r)
+ return r;
+ r = cayman_cp_resume(rdev);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+int cayman_resume(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
+ * posting will perform necessary task to bring back GPU into good
+ * shape.
+ */
+ /* post card */
+ atom_asic_init(rdev->mode_info.atom_context);
+
+ r = cayman_startup(rdev);
+ if (r) {
+ DRM_ERROR("cayman startup failed on resume\n");
+ return r;
+ }
+
+ r = r600_ib_test(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failled testing IB (%d).\n", r);
+ return r;
+ }
+
+ return r;
+
+}
+
+int cayman_suspend(struct radeon_device *rdev)
+{
+ /* int r; */
+
+ /* FIXME: we should wait for ring to be empty */
+ cayman_cp_enable(rdev, false);
+ rdev->cp.ready = false;
+ evergreen_irq_suspend(rdev);
+ radeon_wb_disable(rdev);
+ cayman_pcie_gart_disable(rdev);
+
+#if 0
+ /* unpin shaders bo */
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (likely(r == 0)) {
+ radeon_bo_unpin(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ }
+#endif
+ return 0;
+}
+
+/* Plan is to move initialization in that function and use
+ * helper function so that radeon_device_init pretty much
+ * do nothing more than calling asic specific function. This
+ * should also allow to remove a bunch of callback function
+ * like vram_info.
+ */
+int cayman_init(struct radeon_device *rdev)
+{
+ int r;
+
+ /* This don't do much */
+ r = radeon_gem_init(rdev);
+ if (r)
+ return r;
+ /* Read BIOS */
+ if (!radeon_get_bios(rdev)) {
+ if (ASIC_IS_AVIVO(rdev))
+ return -EINVAL;
+ }
+ /* Must be an ATOMBIOS */
+ if (!rdev->is_atom_bios) {
+ dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
+ return -EINVAL;
+ }
+ r = radeon_atombios_init(rdev);
+ if (r)
+ return r;
+
+ /* Post card if necessary */
+ if (!radeon_card_posted(rdev)) {
+ if (!rdev->bios) {
+ dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+ return -EINVAL;
+ }
+ DRM_INFO("GPU not posted. posting now...\n");
+ atom_asic_init(rdev->mode_info.atom_context);
+ }
+ /* Initialize scratch registers */
+ r600_scratch_init(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
+ /* Initialize clocks */
+ radeon_get_clock_info(rdev->ddev);
+ /* Fence driver */
+ r = radeon_fence_driver_init(rdev);
+ if (r)
+ return r;
+ /* initialize memory controller */
+ r = evergreen_mc_init(rdev);
+ if (r)
+ return r;
+ /* Memory manager */
+ r = radeon_bo_init(rdev);
+ if (r)
+ return r;
+
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+
+ rdev->cp.ring_obj = NULL;
+ r600_ring_init(rdev, 1024 * 1024);
+
+ rdev->ih.ring_obj = NULL;
+ r600_ih_ring_init(rdev, 64 * 1024);
+
+ r = r600_pcie_gart_init(rdev);
+ if (r)
+ return r;
+
+ rdev->accel_working = true;
+ r = cayman_startup(rdev);
+ if (r) {
+ dev_err(rdev->dev, "disabling GPU acceleration\n");
+ cayman_cp_fini(rdev);
+ r600_irq_fini(rdev);
+ radeon_wb_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ cayman_pcie_gart_fini(rdev);
+ rdev->accel_working = false;
+ }
+ if (rdev->accel_working) {
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
+ rdev->accel_working = false;
+ }
+ r = r600_ib_test(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
+ }
+ }
+
+ /* Don't start up if the MC ucode is missing.
+ * The default clocks and voltages before the MC ucode
+ * is loaded are not suffient for advanced operations.
+ */
+ if (!rdev->mc_fw) {
+ DRM_ERROR("radeon: MC ucode required for NI+.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void cayman_fini(struct radeon_device *rdev)
+{
+ /* cayman_blit_fini(rdev); */
+ cayman_cp_fini(rdev);
+ r600_irq_fini(rdev);
+ radeon_wb_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ cayman_pcie_gart_fini(rdev);
+ radeon_gem_fini(rdev);
+ radeon_fence_driver_fini(rdev);
+ radeon_bo_fini(rdev);
+ radeon_atombios_fini(rdev);
+ kfree(rdev->bios);
+ rdev->bios = NULL;
+}
+
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index f7b445390e02..0f9a08b53fbd 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -24,7 +24,101 @@
#ifndef NI_H
#define NI_H
+#define CAYMAN_MAX_SH_GPRS 256
+#define CAYMAN_MAX_TEMP_GPRS 16
+#define CAYMAN_MAX_SH_THREADS 256
+#define CAYMAN_MAX_SH_STACK_ENTRIES 4096
+#define CAYMAN_MAX_FRC_EOV_CNT 16384
+#define CAYMAN_MAX_BACKENDS 8
+#define CAYMAN_MAX_BACKENDS_MASK 0xFF
+#define CAYMAN_MAX_BACKENDS_PER_SE_MASK 0xF
+#define CAYMAN_MAX_SIMDS 16
+#define CAYMAN_MAX_SIMDS_MASK 0xFFFF
+#define CAYMAN_MAX_SIMDS_PER_SE_MASK 0xFFF
+#define CAYMAN_MAX_PIPES 8
+#define CAYMAN_MAX_PIPES_MASK 0xFF
+#define CAYMAN_MAX_LDS_NUM 0xFFFF
+#define CAYMAN_MAX_TCC 16
+#define CAYMAN_MAX_TCC_MASK 0xFF
+
+#define DMIF_ADDR_CONFIG 0xBD4
+#define SRBM_STATUS 0x0E50
+
+#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
+#define REQUEST_TYPE(x) (((x) & 0xf) << 0)
+#define RESPONSE_TYPE_MASK 0x000000F0
+#define RESPONSE_TYPE_SHIFT 4
+#define VM_L2_CNTL 0x1400
+#define ENABLE_L2_CACHE (1 << 0)
+#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
+#define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9)
+#define ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE (1 << 10)
+#define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 14)
+#define CONTEXT1_IDENTITY_ACCESS_MODE(x) (((x) & 3) << 18)
+/* CONTEXT1_IDENTITY_ACCESS_MODE
+ * 0 physical = logical
+ * 1 logical via context1 page table
+ * 2 inside identity aperture use translation, outside physical = logical
+ * 3 inside identity aperture physical = logical, outside use translation
+ */
+#define VM_L2_CNTL2 0x1404
+#define INVALIDATE_ALL_L1_TLBS (1 << 0)
+#define INVALIDATE_L2_CACHE (1 << 1)
+#define VM_L2_CNTL3 0x1408
+#define BANK_SELECT(x) ((x) << 0)
+#define CACHE_UPDATE_MODE(x) ((x) << 6)
+#define L2_CACHE_BIGK_ASSOCIATIVITY (1 << 20)
+#define L2_CACHE_BIGK_FRAGMENT_SIZE(x) ((x) << 15)
+#define VM_L2_STATUS 0x140C
+#define L2_BUSY (1 << 0)
+#define VM_CONTEXT0_CNTL 0x1410
+#define ENABLE_CONTEXT (1 << 0)
+#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
+#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
+#define VM_CONTEXT1_CNTL 0x1414
+#define VM_CONTEXT0_CNTL2 0x1430
+#define VM_CONTEXT1_CNTL2 0x1434
+#define VM_INVALIDATE_REQUEST 0x1478
+#define VM_INVALIDATE_RESPONSE 0x147c
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518
+#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR 0x151c
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155C
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
+
+#define MC_SHARED_CHMAP 0x2004
+#define NOOFCHAN_SHIFT 12
+#define NOOFCHAN_MASK 0x00003000
+#define MC_SHARED_CHREMAP 0x2008
+
+#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
+#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C
+#define MC_VM_MX_L1_TLB_CNTL 0x2064
+#define ENABLE_L1_TLB (1 << 0)
+#define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1)
+#define SYSTEM_ACCESS_MODE_PA_ONLY (0 << 3)
+#define SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 3)
+#define SYSTEM_ACCESS_MODE_IN_SYS (2 << 3)
+#define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 3)
+#define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5)
+#define ENABLE_ADVANCED_DRIVER_MODEL (1 << 6)
+
#define MC_SHARED_BLACKOUT_CNTL 0x20ac
+#define MC_ARB_RAMCFG 0x2760
+#define NOOFBANK_SHIFT 0
+#define NOOFBANK_MASK 0x00000003
+#define NOOFRANK_SHIFT 2
+#define NOOFRANK_MASK 0x00000004
+#define NOOFROWS_SHIFT 3
+#define NOOFROWS_MASK 0x00000038
+#define NOOFCOLS_SHIFT 6
+#define NOOFCOLS_MASK 0x000000C0
+#define CHANSIZE_SHIFT 8
+#define CHANSIZE_MASK 0x00000100
+#define BURSTLENGTH_SHIFT 9
+#define BURSTLENGTH_MASK 0x00000200
+#define CHANSIZE_OVERRIDE (1 << 11)
#define MC_SEQ_SUP_CNTL 0x28c8
#define RUN_MASK (1 << 0)
#define MC_SEQ_SUP_PGM 0x28cc
@@ -37,5 +131,406 @@
#define MC_SEQ_IO_DEBUG_INDEX 0x2a44
#define MC_SEQ_IO_DEBUG_DATA 0x2a48
+#define HDP_HOST_PATH_CNTL 0x2C00
+#define HDP_NONSURFACE_BASE 0x2C04
+#define HDP_NONSURFACE_INFO 0x2C08
+#define HDP_NONSURFACE_SIZE 0x2C0C
+#define HDP_ADDR_CONFIG 0x2F48
+
+#define CC_SYS_RB_BACKEND_DISABLE 0x3F88
+#define GC_USER_SYS_RB_BACKEND_DISABLE 0x3F8C
+#define CGTS_SYS_TCC_DISABLE 0x3F90
+#define CGTS_USER_SYS_TCC_DISABLE 0x3F94
+
+#define CONFIG_MEMSIZE 0x5428
+
+#define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
+#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
+
+#define GRBM_CNTL 0x8000
+#define GRBM_READ_TIMEOUT(x) ((x) << 0)
+#define GRBM_STATUS 0x8010
+#define CMDFIFO_AVAIL_MASK 0x0000000F
+#define RING2_RQ_PENDING (1 << 4)
+#define SRBM_RQ_PENDING (1 << 5)
+#define RING1_RQ_PENDING (1 << 6)
+#define CF_RQ_PENDING (1 << 7)
+#define PF_RQ_PENDING (1 << 8)
+#define GDS_DMA_RQ_PENDING (1 << 9)
+#define GRBM_EE_BUSY (1 << 10)
+#define SX_CLEAN (1 << 11)
+#define DB_CLEAN (1 << 12)
+#define CB_CLEAN (1 << 13)
+#define TA_BUSY (1 << 14)
+#define GDS_BUSY (1 << 15)
+#define VGT_BUSY_NO_DMA (1 << 16)
+#define VGT_BUSY (1 << 17)
+#define IA_BUSY_NO_DMA (1 << 18)
+#define IA_BUSY (1 << 19)
+#define SX_BUSY (1 << 20)
+#define SH_BUSY (1 << 21)
+#define SPI_BUSY (1 << 22)
+#define SC_BUSY (1 << 24)
+#define PA_BUSY (1 << 25)
+#define DB_BUSY (1 << 26)
+#define CP_COHERENCY_BUSY (1 << 28)
+#define CP_BUSY (1 << 29)
+#define CB_BUSY (1 << 30)
+#define GUI_ACTIVE (1 << 31)
+#define GRBM_STATUS_SE0 0x8014
+#define GRBM_STATUS_SE1 0x8018
+#define SE_SX_CLEAN (1 << 0)
+#define SE_DB_CLEAN (1 << 1)
+#define SE_CB_CLEAN (1 << 2)
+#define SE_VGT_BUSY (1 << 23)
+#define SE_PA_BUSY (1 << 24)
+#define SE_TA_BUSY (1 << 25)
+#define SE_SX_BUSY (1 << 26)
+#define SE_SPI_BUSY (1 << 27)
+#define SE_SH_BUSY (1 << 28)
+#define SE_SC_BUSY (1 << 29)
+#define SE_DB_BUSY (1 << 30)
+#define SE_CB_BUSY (1 << 31)
+#define GRBM_SOFT_RESET 0x8020
+#define SOFT_RESET_CP (1 << 0)
+#define SOFT_RESET_CB (1 << 1)
+#define SOFT_RESET_DB (1 << 3)
+#define SOFT_RESET_GDS (1 << 4)
+#define SOFT_RESET_PA (1 << 5)
+#define SOFT_RESET_SC (1 << 6)
+#define SOFT_RESET_SPI (1 << 8)
+#define SOFT_RESET_SH (1 << 9)
+#define SOFT_RESET_SX (1 << 10)
+#define SOFT_RESET_TC (1 << 11)
+#define SOFT_RESET_TA (1 << 12)
+#define SOFT_RESET_VGT (1 << 14)
+#define SOFT_RESET_IA (1 << 15)
+
+#define SCRATCH_REG0 0x8500
+#define SCRATCH_REG1 0x8504
+#define SCRATCH_REG2 0x8508
+#define SCRATCH_REG3 0x850C
+#define SCRATCH_REG4 0x8510
+#define SCRATCH_REG5 0x8514
+#define SCRATCH_REG6 0x8518
+#define SCRATCH_REG7 0x851C
+#define SCRATCH_UMSK 0x8540
+#define SCRATCH_ADDR 0x8544
+#define CP_SEM_WAIT_TIMER 0x85BC
+#define CP_ME_CNTL 0x86D8
+#define CP_ME_HALT (1 << 28)
+#define CP_PFP_HALT (1 << 26)
+#define CP_RB2_RPTR 0x86f8
+#define CP_RB1_RPTR 0x86fc
+#define CP_RB0_RPTR 0x8700
+#define CP_RB_WPTR_DELAY 0x8704
+#define CP_MEQ_THRESHOLDS 0x8764
+#define MEQ1_START(x) ((x) << 0)
+#define MEQ2_START(x) ((x) << 8)
+#define CP_PERFMON_CNTL 0x87FC
+
+#define VGT_CACHE_INVALIDATION 0x88C4
+#define CACHE_INVALIDATION(x) ((x) << 0)
+#define VC_ONLY 0
+#define TC_ONLY 1
+#define VC_AND_TC 2
+#define AUTO_INVLD_EN(x) ((x) << 6)
+#define NO_AUTO 0
+#define ES_AUTO 1
+#define GS_AUTO 2
+#define ES_AND_GS_AUTO 3
+#define VGT_GS_VERTEX_REUSE 0x88D4
+
+#define CC_GC_SHADER_PIPE_CONFIG 0x8950
+#define GC_USER_SHADER_PIPE_CONFIG 0x8954
+#define INACTIVE_QD_PIPES(x) ((x) << 8)
+#define INACTIVE_QD_PIPES_MASK 0x0000FF00
+#define INACTIVE_QD_PIPES_SHIFT 8
+#define INACTIVE_SIMDS(x) ((x) << 16)
+#define INACTIVE_SIMDS_MASK 0xFFFF0000
+#define INACTIVE_SIMDS_SHIFT 16
+
+#define VGT_PRIMITIVE_TYPE 0x8958
+#define VGT_NUM_INSTANCES 0x8974
+#define VGT_TF_RING_SIZE 0x8988
+#define VGT_OFFCHIP_LDS_BASE 0x89b4
+
+#define PA_SC_LINE_STIPPLE_STATE 0x8B10
+#define PA_CL_ENHANCE 0x8A14
+#define CLIP_VTX_REORDER_ENA (1 << 0)
+#define NUM_CLIP_SEQ(x) ((x) << 1)
+#define PA_SC_FIFO_SIZE 0x8BCC
+#define SC_PRIM_FIFO_SIZE(x) ((x) << 0)
+#define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 12)
+#define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 20)
+#define PA_SC_FORCE_EOV_MAX_CNTS 0x8B24
+#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0)
+#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16)
+
+#define SQ_CONFIG 0x8C00
+#define VC_ENABLE (1 << 0)
+#define EXPORT_SRC_C (1 << 1)
+#define GFX_PRIO(x) ((x) << 2)
+#define CS1_PRIO(x) ((x) << 4)
+#define CS2_PRIO(x) ((x) << 6)
+#define SQ_GPR_RESOURCE_MGMT_1 0x8C04
+#define NUM_PS_GPRS(x) ((x) << 0)
+#define NUM_VS_GPRS(x) ((x) << 16)
+#define NUM_CLAUSE_TEMP_GPRS(x) ((x) << 28)
+#define SQ_ESGS_RING_SIZE 0x8c44
+#define SQ_GSVS_RING_SIZE 0x8c4c
+#define SQ_ESTMP_RING_BASE 0x8c50
+#define SQ_ESTMP_RING_SIZE 0x8c54
+#define SQ_GSTMP_RING_BASE 0x8c58
+#define SQ_GSTMP_RING_SIZE 0x8c5c
+#define SQ_VSTMP_RING_BASE 0x8c60
+#define SQ_VSTMP_RING_SIZE 0x8c64
+#define SQ_PSTMP_RING_BASE 0x8c68
+#define SQ_PSTMP_RING_SIZE 0x8c6c
+#define SQ_MS_FIFO_SIZES 0x8CF0
+#define CACHE_FIFO_SIZE(x) ((x) << 0)
+#define FETCH_FIFO_HIWATER(x) ((x) << 8)
+#define DONE_FIFO_HIWATER(x) ((x) << 16)
+#define ALU_UPDATE_FIFO_HIWATER(x) ((x) << 24)
+#define SQ_LSTMP_RING_BASE 0x8e10
+#define SQ_LSTMP_RING_SIZE 0x8e14
+#define SQ_HSTMP_RING_BASE 0x8e18
+#define SQ_HSTMP_RING_SIZE 0x8e1c
+#define SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 0x8D8C
+#define DYN_GPR_ENABLE (1 << 8)
+#define SQ_CONST_MEM_BASE 0x8df8
+
+#define SX_EXPORT_BUFFER_SIZES 0x900C
+#define COLOR_BUFFER_SIZE(x) ((x) << 0)
+#define POSITION_BUFFER_SIZE(x) ((x) << 8)
+#define SMX_BUFFER_SIZE(x) ((x) << 16)
+#define SX_DEBUG_1 0x9058
+#define ENABLE_NEW_SMX_ADDRESS (1 << 16)
+
+#define SPI_CONFIG_CNTL 0x9100
+#define GPR_WRITE_PRIORITY(x) ((x) << 0)
+#define SPI_CONFIG_CNTL_1 0x913C
+#define VTX_DONE_DELAY(x) ((x) << 0)
+#define INTERP_ONE_PRIM_PER_ROW (1 << 4)
+#define CRC_SIMD_ID_WADDR_DISABLE (1 << 8)
+
+#define CGTS_TCC_DISABLE 0x9148
+#define CGTS_USER_TCC_DISABLE 0x914C
+#define TCC_DISABLE_MASK 0xFFFF0000
+#define TCC_DISABLE_SHIFT 16
+#define CGTS_SM_CTRL_REG 0x915C
+#define OVERRIDE (1 << 21)
+
+#define TA_CNTL_AUX 0x9508
+#define DISABLE_CUBE_WRAP (1 << 0)
+#define DISABLE_CUBE_ANISO (1 << 1)
+
+#define TCP_CHAN_STEER_LO 0x960c
+#define TCP_CHAN_STEER_HI 0x9610
+
+#define CC_RB_BACKEND_DISABLE 0x98F4
+#define BACKEND_DISABLE(x) ((x) << 16)
+#define GB_ADDR_CONFIG 0x98F8
+#define NUM_PIPES(x) ((x) << 0)
+#define NUM_PIPES_MASK 0x00000007
+#define NUM_PIPES_SHIFT 0
+#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4)
+#define PIPE_INTERLEAVE_SIZE_MASK 0x00000070
+#define PIPE_INTERLEAVE_SIZE_SHIFT 4
+#define BANK_INTERLEAVE_SIZE(x) ((x) << 8)
+#define NUM_SHADER_ENGINES(x) ((x) << 12)
+#define NUM_SHADER_ENGINES_MASK 0x00003000
+#define NUM_SHADER_ENGINES_SHIFT 12
+#define SHADER_ENGINE_TILE_SIZE(x) ((x) << 16)
+#define SHADER_ENGINE_TILE_SIZE_MASK 0x00070000
+#define SHADER_ENGINE_TILE_SIZE_SHIFT 16
+#define NUM_GPUS(x) ((x) << 20)
+#define NUM_GPUS_MASK 0x00700000
+#define NUM_GPUS_SHIFT 20
+#define MULTI_GPU_TILE_SIZE(x) ((x) << 24)
+#define MULTI_GPU_TILE_SIZE_MASK 0x03000000
+#define MULTI_GPU_TILE_SIZE_SHIFT 24
+#define ROW_SIZE(x) ((x) << 28)
+#define ROW_SIZE_MASK 0x30000007
+#define ROW_SIZE_SHIFT 28
+#define NUM_LOWER_PIPES(x) ((x) << 30)
+#define NUM_LOWER_PIPES_MASK 0x40000000
+#define NUM_LOWER_PIPES_SHIFT 30
+#define GB_BACKEND_MAP 0x98FC
+
+#define CB_PERF_CTR0_SEL_0 0x9A20
+#define CB_PERF_CTR0_SEL_1 0x9A24
+#define CB_PERF_CTR1_SEL_0 0x9A28
+#define CB_PERF_CTR1_SEL_1 0x9A2C
+#define CB_PERF_CTR2_SEL_0 0x9A30
+#define CB_PERF_CTR2_SEL_1 0x9A34
+#define CB_PERF_CTR3_SEL_0 0x9A38
+#define CB_PERF_CTR3_SEL_1 0x9A3C
+
+#define GC_USER_RB_BACKEND_DISABLE 0x9B7C
+#define BACKEND_DISABLE_MASK 0x00FF0000
+#define BACKEND_DISABLE_SHIFT 16
+
+#define SMX_DC_CTL0 0xA020
+#define USE_HASH_FUNCTION (1 << 0)
+#define NUMBER_OF_SETS(x) ((x) << 1)
+#define FLUSH_ALL_ON_EVENT (1 << 10)
+#define STALL_ON_EVENT (1 << 11)
+#define SMX_EVENT_CTL 0xA02C
+#define ES_FLUSH_CTL(x) ((x) << 0)
+#define GS_FLUSH_CTL(x) ((x) << 3)
+#define ACK_FLUSH_CTL(x) ((x) << 6)
+#define SYNC_FLUSH_CTL (1 << 8)
+
+#define CP_RB0_BASE 0xC100
+#define CP_RB0_CNTL 0xC104
+#define RB_BUFSZ(x) ((x) << 0)
+#define RB_BLKSZ(x) ((x) << 8)
+#define RB_NO_UPDATE (1 << 27)
+#define RB_RPTR_WR_ENA (1 << 31)
+#define BUF_SWAP_32BIT (2 << 16)
+#define CP_RB0_RPTR_ADDR 0xC10C
+#define CP_RB0_RPTR_ADDR_HI 0xC110
+#define CP_RB0_WPTR 0xC114
+#define CP_RB1_BASE 0xC180
+#define CP_RB1_CNTL 0xC184
+#define CP_RB1_RPTR_ADDR 0xC188
+#define CP_RB1_RPTR_ADDR_HI 0xC18C
+#define CP_RB1_WPTR 0xC190
+#define CP_RB2_BASE 0xC194
+#define CP_RB2_CNTL 0xC198
+#define CP_RB2_RPTR_ADDR 0xC19C
+#define CP_RB2_RPTR_ADDR_HI 0xC1A0
+#define CP_RB2_WPTR 0xC1A4
+#define CP_PFP_UCODE_ADDR 0xC150
+#define CP_PFP_UCODE_DATA 0xC154
+#define CP_ME_RAM_RADDR 0xC158
+#define CP_ME_RAM_WADDR 0xC15C
+#define CP_ME_RAM_DATA 0xC160
+#define CP_DEBUG 0xC1FC
+
+/*
+ * PM4
+ */
+#define PACKET_TYPE0 0
+#define PACKET_TYPE1 1
+#define PACKET_TYPE2 2
+#define PACKET_TYPE3 3
+
+#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
+#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
+#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
+#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
+#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
+ (((reg) >> 2) & 0xFFFF) | \
+ ((n) & 0x3FFF) << 16)
+#define CP_PACKET2 0x80000000
+#define PACKET2_PAD_SHIFT 0
+#define PACKET2_PAD_MASK (0x3fffffff << 0)
+
+#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+
+#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
+ (((op) & 0xFF) << 8) | \
+ ((n) & 0x3FFF) << 16)
+
+/* Packet 3 types */
+#define PACKET3_NOP 0x10
+#define PACKET3_SET_BASE 0x11
+#define PACKET3_CLEAR_STATE 0x12
+#define PACKET3_INDEX_BUFFER_SIZE 0x13
+#define PACKET3_DEALLOC_STATE 0x14
+#define PACKET3_DISPATCH_DIRECT 0x15
+#define PACKET3_DISPATCH_INDIRECT 0x16
+#define PACKET3_INDIRECT_BUFFER_END 0x17
+#define PACKET3_SET_PREDICATION 0x20
+#define PACKET3_REG_RMW 0x21
+#define PACKET3_COND_EXEC 0x22
+#define PACKET3_PRED_EXEC 0x23
+#define PACKET3_DRAW_INDIRECT 0x24
+#define PACKET3_DRAW_INDEX_INDIRECT 0x25
+#define PACKET3_INDEX_BASE 0x26
+#define PACKET3_DRAW_INDEX_2 0x27
+#define PACKET3_CONTEXT_CONTROL 0x28
+#define PACKET3_DRAW_INDEX_OFFSET 0x29
+#define PACKET3_INDEX_TYPE 0x2A
+#define PACKET3_DRAW_INDEX 0x2B
+#define PACKET3_DRAW_INDEX_AUTO 0x2D
+#define PACKET3_DRAW_INDEX_IMMD 0x2E
+#define PACKET3_NUM_INSTANCES 0x2F
+#define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30
+#define PACKET3_INDIRECT_BUFFER 0x32
+#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
+#define PACKET3_DRAW_INDEX_OFFSET_2 0x35
+#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36
+#define PACKET3_WRITE_DATA 0x37
+#define PACKET3_MEM_SEMAPHORE 0x39
+#define PACKET3_MPEG_INDEX 0x3A
+#define PACKET3_WAIT_REG_MEM 0x3C
+#define PACKET3_MEM_WRITE 0x3D
+#define PACKET3_SURFACE_SYNC 0x43
+# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
+# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
+# define PACKET3_CB2_DEST_BASE_ENA (1 << 8)
+# define PACKET3_CB3_DEST_BASE_ENA (1 << 9)
+# define PACKET3_CB4_DEST_BASE_ENA (1 << 10)
+# define PACKET3_CB5_DEST_BASE_ENA (1 << 11)
+# define PACKET3_CB6_DEST_BASE_ENA (1 << 12)
+# define PACKET3_CB7_DEST_BASE_ENA (1 << 13)
+# define PACKET3_DB_DEST_BASE_ENA (1 << 14)
+# define PACKET3_CB8_DEST_BASE_ENA (1 << 15)
+# define PACKET3_CB9_DEST_BASE_ENA (1 << 16)
+# define PACKET3_CB10_DEST_BASE_ENA (1 << 17)
+# define PACKET3_CB11_DEST_BASE_ENA (1 << 18)
+# define PACKET3_FULL_CACHE_ENA (1 << 20)
+# define PACKET3_TC_ACTION_ENA (1 << 23)
+# define PACKET3_CB_ACTION_ENA (1 << 25)
+# define PACKET3_DB_ACTION_ENA (1 << 26)
+# define PACKET3_SH_ACTION_ENA (1 << 27)
+# define PACKET3_SX_ACTION_ENA (1 << 28)
+#define PACKET3_ME_INITIALIZE 0x44
+#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
+#define PACKET3_COND_WRITE 0x45
+#define PACKET3_EVENT_WRITE 0x46
+#define PACKET3_EVENT_WRITE_EOP 0x47
+#define PACKET3_EVENT_WRITE_EOS 0x48
+#define PACKET3_PREAMBLE_CNTL 0x4A
+# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28)
+# define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28)
+#define PACKET3_ALU_PS_CONST_BUFFER_COPY 0x4C
+#define PACKET3_ALU_VS_CONST_BUFFER_COPY 0x4D
+#define PACKET3_ALU_PS_CONST_UPDATE 0x4E
+#define PACKET3_ALU_VS_CONST_UPDATE 0x4F
+#define PACKET3_ONE_REG_WRITE 0x57
+#define PACKET3_SET_CONFIG_REG 0x68
+#define PACKET3_SET_CONFIG_REG_START 0x00008000
+#define PACKET3_SET_CONFIG_REG_END 0x0000ac00
+#define PACKET3_SET_CONTEXT_REG 0x69
+#define PACKET3_SET_CONTEXT_REG_START 0x00028000
+#define PACKET3_SET_CONTEXT_REG_END 0x00029000
+#define PACKET3_SET_ALU_CONST 0x6A
+/* alu const buffers only; no reg file */
+#define PACKET3_SET_BOOL_CONST 0x6B
+#define PACKET3_SET_BOOL_CONST_START 0x0003a500
+#define PACKET3_SET_BOOL_CONST_END 0x0003a518
+#define PACKET3_SET_LOOP_CONST 0x6C
+#define PACKET3_SET_LOOP_CONST_START 0x0003a200
+#define PACKET3_SET_LOOP_CONST_END 0x0003a500
+#define PACKET3_SET_RESOURCE 0x6D
+#define PACKET3_SET_RESOURCE_START 0x00030000
+#define PACKET3_SET_RESOURCE_END 0x00038000
+#define PACKET3_SET_SAMPLER 0x6E
+#define PACKET3_SET_SAMPLER_START 0x0003c000
+#define PACKET3_SET_SAMPLER_END 0x0003c600
+#define PACKET3_SET_CTL_CONST 0x6F
+#define PACKET3_SET_CTL_CONST_START 0x0003cff0
+#define PACKET3_SET_CTL_CONST_END 0x0003ff0c
+#define PACKET3_SET_RESOURCE_OFFSET 0x70
+#define PACKET3_SET_ALU_CONST_VS 0x71
+#define PACKET3_SET_ALU_CONST_DI 0x72
+#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73
+#define PACKET3_SET_RESOURCE_INDIRECT 0x74
+#define PACKET3_SET_APPEND_CNT 0x75
+
#endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 56deae5bf02e..f2204cb1ccdf 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -70,23 +70,6 @@ MODULE_FIRMWARE(FIRMWARE_R520);
void r100_pre_page_flip(struct radeon_device *rdev, int crtc)
{
- struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
- u32 tmp;
-
- /* make sure flip is at vb rather than hb */
- tmp = RREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset);
- tmp &= ~RADEON_CRTC_OFFSET_FLIP_CNTL;
- /* make sure pending bit is asserted */
- tmp |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN;
- WREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset, tmp);
-
- /* set pageflip to happen as late as possible in the vblank interval.
- * same field for crtc1/2
- */
- tmp = RREG32(RADEON_CRTC_GEN_CNTL);
- tmp &= ~RADEON_CRTC_VSTAT_MODE_MASK;
- WREG32(RADEON_CRTC_GEN_CNTL, tmp);
-
/* enable the pflip int */
radeon_irq_kms_pflip_irq_get(rdev, crtc);
}
@@ -1041,7 +1024,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
return r;
}
rdev->cp.ready = true;
- rdev->mc.active_vram_size = rdev->mc.real_vram_size;
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
return 0;
}
@@ -1059,7 +1042,7 @@ void r100_cp_fini(struct radeon_device *rdev)
void r100_cp_disable(struct radeon_device *rdev)
{
/* Disable ring */
- rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
rdev->cp.ready = false;
WREG32(RADEON_CP_CSQ_MODE, 0);
WREG32(RADEON_CP_CSQ_CNTL, 0);
@@ -1222,14 +1205,12 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
if (waitreloc.reg != RADEON_WAIT_UNTIL ||
waitreloc.count != 0) {
DRM_ERROR("vline wait had illegal wait until segment\n");
- r = -EINVAL;
- return r;
+ return -EINVAL;
}
if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) {
DRM_ERROR("vline wait had illegal wait until\n");
- r = -EINVAL;
- return r;
+ return -EINVAL;
}
/* jump over the NOP */
@@ -1247,8 +1228,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
- r = -EINVAL;
- goto out;
+ return -EINVAL;
}
crtc = obj_to_crtc(obj);
radeon_crtc = to_radeon_crtc(crtc);
@@ -1270,14 +1250,13 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
break;
default:
DRM_ERROR("unknown crtc reloc\n");
- r = -EINVAL;
- goto out;
+ return -EINVAL;
}
ib[h_idx] = header;
ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
}
-out:
- return r;
+
+ return 0;
}
/**
@@ -2329,7 +2308,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
/* FIXME we don't use the second aperture yet when we could use it */
if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
rdev->mc.visible_vram_size = rdev->mc.aper_size;
- rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
if (rdev->flags & RADEON_IS_IGP) {
uint32_t tom;
@@ -3490,7 +3468,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
track->num_texture = 16;
track->maxy = 4096;
track->separate_cube = 0;
- track->aaresolve = true;
+ track->aaresolve = false;
track->aa.robj = NULL;
}
@@ -3639,7 +3617,7 @@ int r100_ib_test(struct radeon_device *rdev)
if (i < rdev->usec_timeout) {
DRM_INFO("ib test succeeded in %u usecs\n", i);
} else {
- DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
+ DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
scratch, tmp);
r = -EINVAL;
}
@@ -3659,13 +3637,13 @@ int r100_ib_init(struct radeon_device *rdev)
r = radeon_ib_pool_init(rdev);
if (r) {
- dev_err(rdev->dev, "failled initializing IB pool (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing IB pool (%d).\n", r);
r100_ib_fini(rdev);
return r;
}
r = r100_ib_test(rdev);
if (r) {
- dev_err(rdev->dev, "failled testing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed testing IB (%d).\n", r);
r100_ib_fini(rdev);
return r;
}
@@ -3801,8 +3779,6 @@ static int r100_startup(struct radeon_device *rdev)
r100_mc_program(rdev);
/* Resume clock */
r100_clock_startup(rdev);
- /* Initialize GPU configuration (# pipes, ...) */
-// r100_gpu_init(rdev);
/* Initialize GART (initialize after TTM so we can allocate
* memory through TTM but finalize after TTM) */
r100_enable_bm(rdev);
@@ -3823,12 +3799,12 @@ static int r100_startup(struct radeon_device *rdev)
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
- dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
r = r100_ib_init(rdev);
if (r) {
- dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 069efa8c8ecf..55a7f190027e 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -437,7 +437,7 @@ int r300_asic_reset(struct radeon_device *rdev)
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* resetting the CP seems to be problematic sometimes it end up
- * hard locking the computer, but it's necessary for successfull
+ * hard locking the computer, but it's necessary for successful
* reset more test & playing is needed on R3XX/R4XX to find a
* reliable (if any solution)
*/
@@ -1401,12 +1401,12 @@ static int r300_startup(struct radeon_device *rdev)
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
- dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
r = r100_ib_init(rdev);
if (r) {
- dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h
index f0bce399c9f3..00c0d2ba22d3 100644
--- a/drivers/gpu/drm/radeon/r300_reg.h
+++ b/drivers/gpu/drm/radeon/r300_reg.h
@@ -608,7 +608,7 @@
* My guess is that there are two bits for each zbias primitive
* (FILL, LINE, POINT).
* One to enable depth test and one for depth write.
- * Yet this doesnt explain why depth writes work ...
+ * Yet this doesn't explain why depth writes work ...
*/
#define R300_RE_OCCLUSION_CNTL 0x42B4
# define R300_OCCLUSION_ON (1<<1)
@@ -817,7 +817,7 @@
# define R300_TX_MIN_FILTER_LINEAR_MIP_NEAREST (6 << 11)
# define R300_TX_MIN_FILTER_LINEAR_MIP_LINEAR (10 << 11)
-/* NOTE: NEAREST doesnt seem to exist.
+/* NOTE: NEAREST doesn't seem to exist.
* Im not seting MAG_FILTER_MASK and (3 << 11) on for all
* anisotropy modes because that would void selected mag filter
*/
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 0b59ed7c7d2c..417fab81812f 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -260,13 +260,13 @@ static int r420_startup(struct radeon_device *rdev)
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
- dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
r420_cp_errata_init(rdev);
r = r100_ib_init(rdev);
if (r) {
- dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 2ce80d976568..3081d07f8de5 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -193,12 +193,12 @@ static int r520_startup(struct radeon_device *rdev)
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
- dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
r = r100_ib_init(rdev);
if (r) {
- dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index de88624d5f87..6f27593901c7 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -47,6 +47,7 @@
#define EVERGREEN_PFP_UCODE_SIZE 1120
#define EVERGREEN_PM4_UCODE_SIZE 1376
#define EVERGREEN_RLC_UCODE_SIZE 768
+#define CAYMAN_RLC_UCODE_SIZE 1024
/* Firmware Names */
MODULE_FIRMWARE("radeon/R600_pfp.bin");
@@ -586,7 +587,7 @@ void r600_pm_misc(struct radeon_device *rdev)
if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
if (voltage->voltage != rdev->pm.current_vddc) {
- radeon_atom_set_voltage(rdev, voltage->voltage);
+ radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
rdev->pm.current_vddc = voltage->voltage;
DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
}
@@ -1255,7 +1256,6 @@ int r600_mc_init(struct radeon_device *rdev)
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.visible_vram_size = rdev->mc.aper_size;
- rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
r600_vram_gtt_location(rdev, &rdev->mc);
if (rdev->flags & RADEON_IS_IGP) {
@@ -1937,7 +1937,7 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
*/
void r600_cp_stop(struct radeon_device *rdev)
{
- rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
WREG32(SCRATCH_UMSK, 0);
}
@@ -2464,7 +2464,7 @@ int r600_resume(struct radeon_device *rdev)
r = r600_ib_test(rdev);
if (r) {
- DRM_ERROR("radeon: failled testing IB (%d).\n", r);
+ DRM_ERROR("radeon: failed testing IB (%d).\n", r);
return r;
}
@@ -2509,9 +2509,6 @@ int r600_init(struct radeon_device *rdev)
{
int r;
- r = radeon_dummy_page_init(rdev);
- if (r)
- return r;
if (r600_debugfs_mc_info_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for mc !\n");
}
@@ -2625,7 +2622,6 @@ void r600_fini(struct radeon_device *rdev)
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
- radeon_dummy_page_fini(rdev);
}
@@ -2740,7 +2736,7 @@ static int r600_ih_ring_alloc(struct radeon_device *rdev)
/* Allocate ring buffer */
if (rdev->ih.ring_obj == NULL) {
- r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
+ r = radeon_bo_create(rdev, rdev->ih.ring_size,
PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT,
&rdev->ih.ring_obj);
@@ -2821,13 +2817,20 @@ static int r600_rlc_init(struct radeon_device *rdev)
WREG32(RLC_HB_CNTL, 0);
WREG32(RLC_HB_RPTR, 0);
WREG32(RLC_HB_WPTR, 0);
- WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
- WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
+ if (rdev->family <= CHIP_CAICOS) {
+ WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
+ WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
+ }
WREG32(RLC_MC_CNTL, 0);
WREG32(RLC_UCODE_CNTL, 0);
fw_data = (const __be32 *)rdev->rlc_fw->data;
- if (rdev->family >= CHIP_CEDAR) {
+ if (rdev->family >= CHIP_CAYMAN) {
+ for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) {
+ WREG32(RLC_UCODE_ADDR, i);
+ WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+ }
+ } else if (rdev->family >= CHIP_CEDAR) {
for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
WREG32(RLC_UCODE_ADDR, i);
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
@@ -3228,7 +3231,7 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
u32 wptr, tmp;
if (rdev->wb.enabled)
- wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4];
+ wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
else
wptr = RREG32(IH_RB_WPTR);
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index b5443fe1c1d1..846fae576399 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -26,6 +26,7 @@
#include "drmP.h"
#include "radeon.h"
#include "radeon_reg.h"
+#include "radeon_asic.h"
#include "atom.h"
#define AUDIO_TIMER_INTERVALL 100 /* 1/10 sekund should be enough */
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 41f7aafc97c4..9aa74c3f8cb6 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -512,7 +512,7 @@ int r600_blit_init(struct radeon_device *rdev)
obj_size += r6xx_ps_size * 4;
obj_size = ALIGN(obj_size, 256);
- r = radeon_bo_create(rdev, NULL, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
+ r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
&rdev->r600_blit.shader_obj);
if (r) {
DRM_ERROR("r600 failed to allocate shader\n");
@@ -558,7 +558,7 @@ done:
dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
return r;
}
- rdev->mc.active_vram_size = rdev->mc.real_vram_size;
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
return 0;
}
@@ -566,7 +566,7 @@ void r600_blit_fini(struct radeon_device *rdev)
{
int r;
- rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
if (rdev->r600_blit.shader_obj == NULL)
return;
/* If we can't reserve the bo, unref should be enough to destroy
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 153095fba62f..fd18be9871ab 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -71,75 +71,167 @@ struct r600_cs_track {
u64 db_bo_mc;
};
+#define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc }
+#define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc }
+#define FMT_24_BIT(fmt) [fmt] = { 1, 1, 3, 0 }
+#define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc }
+#define FMT_48_BIT(fmt) [fmt] = { 1, 1, 6, 0 }
+#define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc }
+#define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0 }
+#define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16, vc }
+
+struct gpu_formats {
+ unsigned blockwidth;
+ unsigned blockheight;
+ unsigned blocksize;
+ unsigned valid_color;
+};
+
+static const struct gpu_formats color_formats_table[] = {
+ /* 8 bit */
+ FMT_8_BIT(V_038004_COLOR_8, 1),
+ FMT_8_BIT(V_038004_COLOR_4_4, 1),
+ FMT_8_BIT(V_038004_COLOR_3_3_2, 1),
+ FMT_8_BIT(V_038004_FMT_1, 0),
+
+ /* 16-bit */
+ FMT_16_BIT(V_038004_COLOR_16, 1),
+ FMT_16_BIT(V_038004_COLOR_16_FLOAT, 1),
+ FMT_16_BIT(V_038004_COLOR_8_8, 1),
+ FMT_16_BIT(V_038004_COLOR_5_6_5, 1),
+ FMT_16_BIT(V_038004_COLOR_6_5_5, 1),
+ FMT_16_BIT(V_038004_COLOR_1_5_5_5, 1),
+ FMT_16_BIT(V_038004_COLOR_4_4_4_4, 1),
+ FMT_16_BIT(V_038004_COLOR_5_5_5_1, 1),
+
+ /* 24-bit */
+ FMT_24_BIT(V_038004_FMT_8_8_8),
+
+ /* 32-bit */
+ FMT_32_BIT(V_038004_COLOR_32, 1),
+ FMT_32_BIT(V_038004_COLOR_32_FLOAT, 1),
+ FMT_32_BIT(V_038004_COLOR_16_16, 1),
+ FMT_32_BIT(V_038004_COLOR_16_16_FLOAT, 1),
+ FMT_32_BIT(V_038004_COLOR_8_24, 1),
+ FMT_32_BIT(V_038004_COLOR_8_24_FLOAT, 1),
+ FMT_32_BIT(V_038004_COLOR_24_8, 1),
+ FMT_32_BIT(V_038004_COLOR_24_8_FLOAT, 1),
+ FMT_32_BIT(V_038004_COLOR_10_11_11, 1),
+ FMT_32_BIT(V_038004_COLOR_10_11_11_FLOAT, 1),
+ FMT_32_BIT(V_038004_COLOR_11_11_10, 1),
+ FMT_32_BIT(V_038004_COLOR_11_11_10_FLOAT, 1),
+ FMT_32_BIT(V_038004_COLOR_2_10_10_10, 1),
+ FMT_32_BIT(V_038004_COLOR_8_8_8_8, 1),
+ FMT_32_BIT(V_038004_COLOR_10_10_10_2, 1),
+ FMT_32_BIT(V_038004_FMT_5_9_9_9_SHAREDEXP, 0),
+ FMT_32_BIT(V_038004_FMT_32_AS_8, 0),
+ FMT_32_BIT(V_038004_FMT_32_AS_8_8, 0),
+
+ /* 48-bit */
+ FMT_48_BIT(V_038004_FMT_16_16_16),
+ FMT_48_BIT(V_038004_FMT_16_16_16_FLOAT),
+
+ /* 64-bit */
+ FMT_64_BIT(V_038004_COLOR_X24_8_32_FLOAT, 1),
+ FMT_64_BIT(V_038004_COLOR_32_32, 1),
+ FMT_64_BIT(V_038004_COLOR_32_32_FLOAT, 1),
+ FMT_64_BIT(V_038004_COLOR_16_16_16_16, 1),
+ FMT_64_BIT(V_038004_COLOR_16_16_16_16_FLOAT, 1),
+
+ FMT_96_BIT(V_038004_FMT_32_32_32),
+ FMT_96_BIT(V_038004_FMT_32_32_32_FLOAT),
+
+ /* 128-bit */
+ FMT_128_BIT(V_038004_COLOR_32_32_32_32, 1),
+ FMT_128_BIT(V_038004_COLOR_32_32_32_32_FLOAT, 1),
+
+ [V_038004_FMT_GB_GR] = { 2, 1, 4, 0 },
+ [V_038004_FMT_BG_RG] = { 2, 1, 4, 0 },
+
+ /* block compressed formats */
+ [V_038004_FMT_BC1] = { 4, 4, 8, 0 },
+ [V_038004_FMT_BC2] = { 4, 4, 16, 0 },
+ [V_038004_FMT_BC3] = { 4, 4, 16, 0 },
+ [V_038004_FMT_BC4] = { 4, 4, 8, 0 },
+ [V_038004_FMT_BC5] = { 4, 4, 16, 0},
+
+};
+
+static inline bool fmt_is_valid_color(u32 format)
+{
+ if (format >= ARRAY_SIZE(color_formats_table))
+ return false;
+
+ if (color_formats_table[format].valid_color)
+ return true;
+
+ return false;
+}
+
+static inline bool fmt_is_valid_texture(u32 format)
+{
+ if (format >= ARRAY_SIZE(color_formats_table))
+ return false;
+
+ if (color_formats_table[format].blockwidth > 0)
+ return true;
+
+ return false;
+}
+
+static inline int fmt_get_blocksize(u32 format)
+{
+ if (format >= ARRAY_SIZE(color_formats_table))
+ return 0;
+
+ return color_formats_table[format].blocksize;
+}
+
+static inline int fmt_get_nblocksx(u32 format, u32 w)
+{
+ unsigned bw;
+
+ if (format >= ARRAY_SIZE(color_formats_table))
+ return 0;
+
+ bw = color_formats_table[format].blockwidth;
+ if (bw == 0)
+ return 0;
+
+ return (w + bw - 1) / bw;
+}
+
+static inline int fmt_get_nblocksy(u32 format, u32 h)
+{
+ unsigned bh;
+
+ if (format >= ARRAY_SIZE(color_formats_table))
+ return 0;
+
+ bh = color_formats_table[format].blockheight;
+ if (bh == 0)
+ return 0;
+
+ return (h + bh - 1) / bh;
+}
+
static inline int r600_bpe_from_format(u32 *bpe, u32 format)
{
- switch (format) {
- case V_038004_COLOR_8:
- case V_038004_COLOR_4_4:
- case V_038004_COLOR_3_3_2:
- case V_038004_FMT_1:
- *bpe = 1;
- break;
- case V_038004_COLOR_16:
- case V_038004_COLOR_16_FLOAT:
- case V_038004_COLOR_8_8:
- case V_038004_COLOR_5_6_5:
- case V_038004_COLOR_6_5_5:
- case V_038004_COLOR_1_5_5_5:
- case V_038004_COLOR_4_4_4_4:
- case V_038004_COLOR_5_5_5_1:
- *bpe = 2;
- break;
- case V_038004_FMT_8_8_8:
- *bpe = 3;
- break;
- case V_038004_COLOR_32:
- case V_038004_COLOR_32_FLOAT:
- case V_038004_COLOR_16_16:
- case V_038004_COLOR_16_16_FLOAT:
- case V_038004_COLOR_8_24:
- case V_038004_COLOR_8_24_FLOAT:
- case V_038004_COLOR_24_8:
- case V_038004_COLOR_24_8_FLOAT:
- case V_038004_COLOR_10_11_11:
- case V_038004_COLOR_10_11_11_FLOAT:
- case V_038004_COLOR_11_11_10:
- case V_038004_COLOR_11_11_10_FLOAT:
- case V_038004_COLOR_2_10_10_10:
- case V_038004_COLOR_8_8_8_8:
- case V_038004_COLOR_10_10_10_2:
- case V_038004_FMT_5_9_9_9_SHAREDEXP:
- case V_038004_FMT_32_AS_8:
- case V_038004_FMT_32_AS_8_8:
- *bpe = 4;
- break;
- case V_038004_COLOR_X24_8_32_FLOAT:
- case V_038004_COLOR_32_32:
- case V_038004_COLOR_32_32_FLOAT:
- case V_038004_COLOR_16_16_16_16:
- case V_038004_COLOR_16_16_16_16_FLOAT:
- *bpe = 8;
- break;
- case V_038004_FMT_16_16_16:
- case V_038004_FMT_16_16_16_FLOAT:
- *bpe = 6;
- break;
- case V_038004_FMT_32_32_32:
- case V_038004_FMT_32_32_32_FLOAT:
- *bpe = 12;
- break;
- case V_038004_COLOR_32_32_32_32:
- case V_038004_COLOR_32_32_32_32_FLOAT:
- *bpe = 16;
- break;
- case V_038004_FMT_GB_GR:
- case V_038004_FMT_BG_RG:
- case V_038004_COLOR_INVALID:
- default:
- *bpe = 16;
- return -EINVAL;
- }
+ unsigned res;
+
+ if (format >= ARRAY_SIZE(color_formats_table))
+ goto fail;
+
+ res = color_formats_table[format].blocksize;
+ if (res == 0)
+ goto fail;
+
+ *bpe = res;
return 0;
+
+fail:
+ *bpe = 16;
+ return -EINVAL;
}
struct array_mode_checker {
@@ -148,7 +240,7 @@ struct array_mode_checker {
u32 nbanks;
u32 npipes;
u32 nsamples;
- u32 bpe;
+ u32 blocksize;
};
/* returns alignment in pixels for pitch/height/depth and bytes for base */
@@ -162,7 +254,7 @@ static inline int r600_get_array_mode_alignment(struct array_mode_checker *value
u32 tile_height = 8;
u32 macro_tile_width = values->nbanks;
u32 macro_tile_height = values->npipes;
- u32 tile_bytes = tile_width * tile_height * values->bpe * values->nsamples;
+ u32 tile_bytes = tile_width * tile_height * values->blocksize * values->nsamples;
u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes;
switch (values->array_mode) {
@@ -174,7 +266,7 @@ static inline int r600_get_array_mode_alignment(struct array_mode_checker *value
*base_align = 1;
break;
case ARRAY_LINEAR_ALIGNED:
- *pitch_align = max((u32)64, (u32)(values->group_size / values->bpe));
+ *pitch_align = max((u32)64, (u32)(values->group_size / values->blocksize));
*height_align = tile_height;
*depth_align = 1;
*base_align = values->group_size;
@@ -182,7 +274,7 @@ static inline int r600_get_array_mode_alignment(struct array_mode_checker *value
case ARRAY_1D_TILED_THIN1:
*pitch_align = max((u32)tile_width,
(u32)(values->group_size /
- (tile_height * values->bpe * values->nsamples)));
+ (tile_height * values->blocksize * values->nsamples)));
*height_align = tile_height;
*depth_align = 1;
*base_align = values->group_size;
@@ -190,12 +282,12 @@ static inline int r600_get_array_mode_alignment(struct array_mode_checker *value
case ARRAY_2D_TILED_THIN1:
*pitch_align = max((u32)macro_tile_width,
(u32)(((values->group_size / tile_height) /
- (values->bpe * values->nsamples)) *
+ (values->blocksize * values->nsamples)) *
values->nbanks)) * tile_width;
*height_align = macro_tile_height * tile_height;
*depth_align = 1;
*base_align = max(macro_tile_bytes,
- (*pitch_align) * values->bpe * (*height_align) * values->nsamples);
+ (*pitch_align) * values->blocksize * (*height_align) * values->nsamples);
break;
default:
return -EINVAL;
@@ -234,21 +326,22 @@ static void r600_cs_track_init(struct r600_cs_track *track)
static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
{
struct r600_cs_track *track = p->track;
- u32 bpe = 0, slice_tile_max, size, tmp;
+ u32 slice_tile_max, size, tmp;
u32 height, height_align, pitch, pitch_align, depth_align;
u64 base_offset, base_align;
struct array_mode_checker array_check;
volatile u32 *ib = p->ib->ptr;
unsigned array_mode;
-
+ u32 format;
if (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n");
return -EINVAL;
}
size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
- if (r600_bpe_from_format(&bpe, G_0280A0_FORMAT(track->cb_color_info[i]))) {
+ format = G_0280A0_FORMAT(track->cb_color_info[i]);
+ if (!fmt_is_valid_color(format)) {
dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
- __func__, __LINE__, G_0280A0_FORMAT(track->cb_color_info[i]),
+ __func__, __LINE__, format,
i, track->cb_color_info[i]);
return -EINVAL;
}
@@ -267,7 +360,7 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
array_check.nbanks = track->nbanks;
array_check.npipes = track->npipes;
array_check.nsamples = track->nsamples;
- array_check.bpe = bpe;
+ array_check.blocksize = fmt_get_blocksize(format);
if (r600_get_array_mode_alignment(&array_check,
&pitch_align, &height_align, &depth_align, &base_align)) {
dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
@@ -311,7 +404,7 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
}
/* check offset */
- tmp = height * pitch * bpe;
+ tmp = fmt_get_nblocksy(format, height) * fmt_get_nblocksx(format, pitch) * fmt_get_blocksize(format);
if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) {
/* the initial DDX does bad things with the CB size occasionally */
@@ -436,7 +529,7 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
array_check.nbanks = track->nbanks;
array_check.npipes = track->npipes;
array_check.nsamples = track->nsamples;
- array_check.bpe = bpe;
+ array_check.blocksize = bpe;
if (r600_get_array_mode_alignment(&array_check,
&pitch_align, &height_align, &depth_align, &base_align)) {
dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
@@ -687,33 +780,28 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
if (wait_reg_mem.type != PACKET_TYPE3 ||
wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
- r = -EINVAL;
- return r;
+ return -EINVAL;
}
wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
/* bit 4 is reg (0) or mem (1) */
if (wait_reg_mem_info & 0x10) {
DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
- r = -EINVAL;
- return r;
+ return -EINVAL;
}
/* waiting for value to be equal */
if ((wait_reg_mem_info & 0x7) != 0x3) {
DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
- r = -EINVAL;
- return r;
+ return -EINVAL;
}
if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) {
DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
- r = -EINVAL;
- return r;
+ return -EINVAL;
}
if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) {
DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
- r = -EINVAL;
- return r;
+ return -EINVAL;
}
/* jump over the NOP */
@@ -732,8 +820,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
- r = -EINVAL;
- goto out;
+ return -EINVAL;
}
crtc = obj_to_crtc(obj);
radeon_crtc = to_radeon_crtc(crtc);
@@ -756,14 +843,13 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
break;
default:
DRM_ERROR("unknown crtc reloc\n");
- r = -EINVAL;
- goto out;
+ return -EINVAL;
}
ib[h_idx] = header;
ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2;
}
-out:
- return r;
+
+ return 0;
}
static int r600_packet0_check(struct radeon_cs_parser *p,
@@ -835,7 +921,7 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
return 0;
ib = p->ib->ptr;
switch (reg) {
- /* force following reg to 0 in an attemp to disable out buffer
+ /* force following reg to 0 in an attempt to disable out buffer
* which will need us to better understand how it works to perform
* security check on it (Jerome)
*/
@@ -1113,39 +1199,61 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
return 0;
}
-static inline unsigned minify(unsigned size, unsigned levels)
+static inline unsigned mip_minify(unsigned size, unsigned level)
{
- size = size >> levels;
- if (size < 1)
- size = 1;
- return size;
+ unsigned val;
+
+ val = max(1U, size >> level);
+ if (level > 0)
+ val = roundup_pow_of_two(val);
+ return val;
}
-static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels,
- unsigned w0, unsigned h0, unsigned d0, unsigned bpe,
- unsigned pitch_align,
+static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
+ unsigned w0, unsigned h0, unsigned d0, unsigned format,
+ unsigned block_align, unsigned height_align, unsigned base_align,
unsigned *l0_size, unsigned *mipmap_size)
{
- unsigned offset, i, level, face;
- unsigned width, height, depth, rowstride, size;
-
- w0 = minify(w0, 0);
- h0 = minify(h0, 0);
- d0 = minify(d0, 0);
+ unsigned offset, i, level;
+ unsigned width, height, depth, size;
+ unsigned blocksize;
+ unsigned nbx, nby;
+ unsigned nlevels = llevel - blevel + 1;
+
+ *l0_size = -1;
+ blocksize = fmt_get_blocksize(format);
+
+ w0 = mip_minify(w0, 0);
+ h0 = mip_minify(h0, 0);
+ d0 = mip_minify(d0, 0);
for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
- width = minify(w0, i);
- height = minify(h0, i);
- depth = minify(d0, i);
- for(face = 0; face < nfaces; face++) {
- rowstride = ALIGN((width * bpe), pitch_align);
- size = height * rowstride * depth;
- offset += size;
- offset = (offset + 0x1f) & ~0x1f;
- }
+ width = mip_minify(w0, i);
+ nbx = fmt_get_nblocksx(format, width);
+
+ nbx = round_up(nbx, block_align);
+
+ height = mip_minify(h0, i);
+ nby = fmt_get_nblocksy(format, height);
+ nby = round_up(nby, height_align);
+
+ depth = mip_minify(d0, i);
+
+ size = nbx * nby * blocksize;
+ if (nfaces)
+ size *= nfaces;
+ else
+ size *= depth;
+
+ if (i == 0)
+ *l0_size = size;
+
+ if (i == 0 || i == 1)
+ offset = round_up(offset, base_align);
+
+ offset += size;
}
- *l0_size = ALIGN((w0 * bpe), pitch_align) * h0 * d0;
*mipmap_size = offset;
- if (!nlevels)
+ if (llevel == 0)
*mipmap_size = *l0_size;
if (!blevel)
*mipmap_size -= *l0_size;
@@ -1169,11 +1277,13 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
u32 tiling_flags)
{
struct r600_cs_track *track = p->track;
- u32 nfaces, nlevels, blevel, w0, h0, d0, bpe = 0;
- u32 word0, word1, l0_size, mipmap_size;
+ u32 nfaces, llevel, blevel, w0, h0, d0;
+ u32 word0, word1, l0_size, mipmap_size, word2, word3;
u32 height_align, pitch, pitch_align, depth_align;
+ u32 array, barray, larray;
u64 base_align;
struct array_mode_checker array_check;
+ u32 format;
/* on legacy kernel we don't perform advanced check */
if (p->rdev == NULL)
@@ -1199,19 +1309,25 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
case V_038000_SQ_TEX_DIM_3D:
break;
case V_038000_SQ_TEX_DIM_CUBEMAP:
- nfaces = 6;
+ if (p->family >= CHIP_RV770)
+ nfaces = 8;
+ else
+ nfaces = 6;
break;
case V_038000_SQ_TEX_DIM_1D_ARRAY:
case V_038000_SQ_TEX_DIM_2D_ARRAY:
+ array = 1;
+ break;
case V_038000_SQ_TEX_DIM_2D_MSAA:
case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
default:
dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
return -EINVAL;
}
- if (r600_bpe_from_format(&bpe, G_038004_DATA_FORMAT(word1))) {
+ format = G_038004_DATA_FORMAT(word1);
+ if (!fmt_is_valid_texture(format)) {
dev_warn(p->dev, "%s:%d texture invalid format %d\n",
- __func__, __LINE__, G_038004_DATA_FORMAT(word1));
+ __func__, __LINE__, format);
return -EINVAL;
}
@@ -1222,7 +1338,7 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
array_check.nbanks = track->nbanks;
array_check.npipes = track->npipes;
array_check.nsamples = 1;
- array_check.bpe = bpe;
+ array_check.blocksize = fmt_get_blocksize(format);
if (r600_get_array_mode_alignment(&array_check,
&pitch_align, &height_align, &depth_align, &base_align)) {
dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
@@ -1248,25 +1364,34 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
return -EINVAL;
}
+ word2 = radeon_get_ib_value(p, idx + 2) << 8;
+ word3 = radeon_get_ib_value(p, idx + 3) << 8;
+
word0 = radeon_get_ib_value(p, idx + 4);
word1 = radeon_get_ib_value(p, idx + 5);
blevel = G_038010_BASE_LEVEL(word0);
- nlevels = G_038014_LAST_LEVEL(word1);
- r600_texture_size(nfaces, blevel, nlevels, w0, h0, d0, bpe,
- (pitch_align * bpe),
+ llevel = G_038014_LAST_LEVEL(word1);
+ if (array == 1) {
+ barray = G_038014_BASE_ARRAY(word1);
+ larray = G_038014_LAST_ARRAY(word1);
+
+ nfaces = larray - barray + 1;
+ }
+ r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, format,
+ pitch_align, height_align, base_align,
&l0_size, &mipmap_size);
/* using get ib will give us the offset into the texture bo */
- word0 = radeon_get_ib_value(p, idx + 2) << 8;
- if ((l0_size + word0) > radeon_bo_size(texture)) {
+ if ((l0_size + word2) > radeon_bo_size(texture)) {
dev_warn(p->dev, "texture bo too small (%d %d %d %d -> %d have %ld)\n",
- w0, h0, bpe, word0, l0_size, radeon_bo_size(texture));
+ w0, h0, format, word2, l0_size, radeon_bo_size(texture));
+ dev_warn(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align);
return -EINVAL;
}
/* using get ib will give us the offset into the mipmap bo */
- word0 = radeon_get_ib_value(p, idx + 3) << 8;
- if ((mipmap_size + word0) > radeon_bo_size(mipmap)) {
+ word3 = radeon_get_ib_value(p, idx + 3) << 8;
+ if ((mipmap_size + word3) > radeon_bo_size(mipmap)) {
/*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
- w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));*/
+ w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/
}
return 0;
}
@@ -1289,6 +1414,38 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
idx_value = radeon_get_ib_value(p, idx);
switch (pkt->opcode) {
+ case PACKET3_SET_PREDICATION:
+ {
+ int pred_op;
+ int tmp;
+ if (pkt->count != 1) {
+ DRM_ERROR("bad SET PREDICATION\n");
+ return -EINVAL;
+ }
+
+ tmp = radeon_get_ib_value(p, idx + 1);
+ pred_op = (tmp >> 16) & 0x7;
+
+ /* for the clear predicate operation */
+ if (pred_op == 0)
+ return 0;
+
+ if (pred_op > 2) {
+ DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
+ return -EINVAL;
+ }
+
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad SET PREDICATION\n");
+ return -EINVAL;
+ }
+
+ ib[idx + 0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx + 1] = tmp + (upper_32_bits(reloc->lobj.gpu_offset) & 0xff);
+ }
+ break;
+
case PACKET3_START_3D_CMDBUF:
if (p->family >= CHIP_RV770 || pkt->count) {
DRM_ERROR("bad START_3D\n");
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index e6a58ed48dcf..f5ac7e788d81 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -26,6 +26,7 @@
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "atom.h"
/*
@@ -333,7 +334,7 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
r600_hdmi_videoinfoframe(encoder, RGB, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
- /* it's unknown what these bits do excatly, but it's indeed quite usefull for debugging */
+ /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
WREG32(offset+R600_HDMI_AUDIO_DEBUG_0, 0x00FFFFFF);
WREG32(offset+R600_HDMI_AUDIO_DEBUG_1, 0x007FFFFF);
WREG32(offset+R600_HDMI_AUDIO_DEBUG_2, 0x00000001);
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 04bac0bbd3ec..b2b944bcd05a 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1304,6 +1304,11 @@
#define V_038004_FMT_16_16_16_FLOAT 0x0000002E
#define V_038004_FMT_32_32_32 0x0000002F
#define V_038004_FMT_32_32_32_FLOAT 0x00000030
+#define V_038004_FMT_BC1 0x00000031
+#define V_038004_FMT_BC2 0x00000032
+#define V_038004_FMT_BC3 0x00000033
+#define V_038004_FMT_BC4 0x00000034
+#define V_038004_FMT_BC5 0x00000035
#define R_038010_SQ_TEX_RESOURCE_WORD4_0 0x038010
#define S_038010_FORMAT_COMP_X(x) (((x) & 0x3) << 0)
#define G_038010_FORMAT_COMP_X(x) (((x) >> 0) & 0x3)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 56c48b67ef3d..ba643b576054 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -177,7 +177,7 @@ void radeon_pm_suspend(struct radeon_device *rdev);
void radeon_pm_resume(struct radeon_device *rdev);
void radeon_combios_get_power_modes(struct radeon_device *rdev);
void radeon_atombios_get_power_modes(struct radeon_device *rdev);
-void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level);
+void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
void rs690_pm_info(struct radeon_device *rdev);
extern int rv6xx_get_temp(struct radeon_device *rdev);
extern int rv770_get_temp(struct radeon_device *rdev);
@@ -258,8 +258,9 @@ struct radeon_bo {
int surface_reg;
/* Constant after initialization */
struct radeon_device *rdev;
- struct drm_gem_object *gobj;
+ struct drm_gem_object gem_base;
};
+#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
struct radeon_bo_list {
struct ttm_validate_buffer tv;
@@ -288,6 +289,15 @@ int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
uint64_t *gpu_addr);
void radeon_gem_object_unpin(struct drm_gem_object *obj);
+int radeon_mode_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int radeon_mode_dumb_mmap(struct drm_file *filp,
+ struct drm_device *dev,
+ uint32_t handle, uint64_t *offset_p);
+int radeon_mode_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle);
/*
* GART structures, functions & helpers
@@ -319,6 +329,7 @@ struct radeon_gart {
union radeon_gart_table table;
struct page **pages;
dma_addr_t *pages_addr;
+ bool *ttm_alloced;
bool ready;
};
@@ -331,7 +342,8 @@ void radeon_gart_fini(struct radeon_device *rdev);
void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
int pages);
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
- int pages, struct page **pagelist);
+ int pages, struct page **pagelist,
+ dma_addr_t *dma_addr);
/*
@@ -345,7 +357,6 @@ struct radeon_mc {
* about vram size near mc fb location */
u64 mc_vram_size;
u64 visible_vram_size;
- u64 active_vram_size;
u64 gtt_size;
u64 gtt_start;
u64 gtt_end;
@@ -652,6 +663,8 @@ struct radeon_wb {
#define RADEON_WB_SCRATCH_OFFSET 0
#define RADEON_WB_CP_RPTR_OFFSET 1024
+#define RADEON_WB_CP1_RPTR_OFFSET 1280
+#define RADEON_WB_CP2_RPTR_OFFSET 1536
#define R600_WB_IH_WPTR_OFFSET 2048
#define R600_WB_EVENT_OFFSET 3072
@@ -666,11 +679,11 @@ struct radeon_wb {
* @sideport_bandwidth: sideport bandwidth the gpu has (MByte/s) (IGP)
* @ht_bandwidth: ht bandwidth the gpu has (MByte/s) (IGP)
* @core_bandwidth: core GPU bandwidth the gpu has (MByte/s) (IGP)
- * @sclk: GPU clock Mhz (core bandwith depends of this clock)
+ * @sclk: GPU clock Mhz (core bandwidth depends of this clock)
* @needed_bandwidth: current bandwidth needs
*
* It keeps track of various data needed to take powermanagement decision.
- * Bandwith need is used to determine minimun clock of the GPU and memory.
+ * Bandwidth need is used to determine minimun clock of the GPU and memory.
* Equation between gpu/memory clock and available bandwidth is hw dependent
* (type of memory, bus size, efficiency, ...)
*/
@@ -754,7 +767,9 @@ struct radeon_voltage {
u8 vddci_id; /* index into vddci voltage table */
bool vddci_enabled;
/* r6xx+ sw */
- u32 voltage;
+ u16 voltage;
+ /* evergreen+ vddci */
+ u16 vddci;
};
/* clock mode flags */
@@ -822,10 +837,12 @@ struct radeon_pm {
int default_power_state_index;
u32 current_sclk;
u32 current_mclk;
- u32 current_vddc;
+ u16 current_vddc;
+ u16 current_vddci;
u32 default_sclk;
u32 default_mclk;
- u32 default_vddc;
+ u16 default_vddc;
+ u16 default_vddci;
struct radeon_i2c_chan *i2c_bus;
/* selected pm method */
enum radeon_pm_method pm_method;
@@ -1038,12 +1055,52 @@ struct evergreen_asic {
struct r100_gpu_lockup lockup;
};
+struct cayman_asic {
+ unsigned max_shader_engines;
+ unsigned max_pipes_per_simd;
+ unsigned max_tile_pipes;
+ unsigned max_simds_per_se;
+ unsigned max_backends_per_se;
+ unsigned max_texture_channel_caches;
+ unsigned max_gprs;
+ unsigned max_threads;
+ unsigned max_gs_threads;
+ unsigned max_stack_entries;
+ unsigned sx_num_of_sets;
+ unsigned sx_max_export_size;
+ unsigned sx_max_export_pos_size;
+ unsigned sx_max_export_smx_size;
+ unsigned max_hw_contexts;
+ unsigned sq_num_cf_insts;
+ unsigned sc_prim_fifo_size;
+ unsigned sc_hiz_tile_fifo_size;
+ unsigned sc_earlyz_tile_fifo_size;
+
+ unsigned num_shader_engines;
+ unsigned num_shader_pipes_per_simd;
+ unsigned num_tile_pipes;
+ unsigned num_simds_per_se;
+ unsigned num_backends_per_se;
+ unsigned backend_disable_mask_per_asic;
+ unsigned backend_map;
+ unsigned num_texture_channel_caches;
+ unsigned mem_max_burst_length_bytes;
+ unsigned mem_row_size_in_kb;
+ unsigned shader_engine_tile_size;
+ unsigned num_gpus;
+ unsigned multi_gpu_tile_size;
+
+ unsigned tile_config;
+ struct r100_gpu_lockup lockup;
+};
+
union radeon_asic_config {
struct r300_asic r300;
struct r100_asic r100;
struct r600_asic r600;
struct rv770_asic rv770;
struct evergreen_asic evergreen;
+ struct cayman_asic cayman;
};
/*
@@ -1134,6 +1191,9 @@ struct radeon_device {
struct radeon_mman mman;
struct radeon_fence_driver fence_drv;
struct radeon_cp cp;
+ /* cayman compute rings */
+ struct radeon_cp cp1;
+ struct radeon_cp cp2;
struct radeon_ib_pool ib_pool;
struct radeon_irq irq;
struct radeon_asic *asic;
@@ -1186,19 +1246,6 @@ int radeon_device_init(struct radeon_device *rdev,
void radeon_device_fini(struct radeon_device *rdev);
int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
-/* r600 blit */
-int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
-void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
-void r600_kms_blit_copy(struct radeon_device *rdev,
- u64 src_gpu_addr, u64 dst_gpu_addr,
- int size_bytes);
-/* evergreen blit */
-int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
-void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
-void evergreen_kms_blit_copy(struct radeon_device *rdev,
- u64 src_gpu_addr, u64 dst_gpu_addr,
- int size_bytes);
-
static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
{
if (reg < rdev->rmmio_size)
@@ -1448,63 +1495,17 @@ extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *m
extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
extern int radeon_resume_kms(struct drm_device *dev);
extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
+extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
-/* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */
-extern bool r600_card_posted(struct radeon_device *rdev);
-extern void r600_cp_stop(struct radeon_device *rdev);
-extern int r600_cp_start(struct radeon_device *rdev);
-extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
-extern int r600_cp_resume(struct radeon_device *rdev);
-extern void r600_cp_fini(struct radeon_device *rdev);
-extern int r600_count_pipe_bits(uint32_t val);
-extern int r600_mc_wait_for_idle(struct radeon_device *rdev);
-extern int r600_pcie_gart_init(struct radeon_device *rdev);
-extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
-extern int r600_ib_test(struct radeon_device *rdev);
-extern int r600_ring_test(struct radeon_device *rdev);
-extern void r600_scratch_init(struct radeon_device *rdev);
-extern int r600_blit_init(struct radeon_device *rdev);
-extern void r600_blit_fini(struct radeon_device *rdev);
-extern int r600_init_microcode(struct radeon_device *rdev);
-extern int r600_asic_reset(struct radeon_device *rdev);
-/* r600 irq */
-extern int r600_irq_init(struct radeon_device *rdev);
-extern void r600_irq_fini(struct radeon_device *rdev);
-extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
-extern int r600_irq_set(struct radeon_device *rdev);
-extern void r600_irq_suspend(struct radeon_device *rdev);
-extern void r600_disable_interrupts(struct radeon_device *rdev);
-extern void r600_rlc_stop(struct radeon_device *rdev);
-/* r600 audio */
-extern int r600_audio_init(struct radeon_device *rdev);
-extern int r600_audio_tmds_index(struct drm_encoder *encoder);
-extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
-extern int r600_audio_channels(struct radeon_device *rdev);
-extern int r600_audio_bits_per_sample(struct radeon_device *rdev);
-extern int r600_audio_rate(struct radeon_device *rdev);
-extern uint8_t r600_audio_status_bits(struct radeon_device *rdev);
-extern uint8_t r600_audio_category_code(struct radeon_device *rdev);
-extern void r600_audio_schedule_polling(struct radeon_device *rdev);
-extern void r600_audio_enable_polling(struct drm_encoder *encoder);
-extern void r600_audio_disable_polling(struct drm_encoder *encoder);
-extern void r600_audio_fini(struct radeon_device *rdev);
-extern void r600_hdmi_init(struct drm_encoder *encoder);
+/*
+ * r600 functions used by radeon_encoder.c
+ */
extern void r600_hdmi_enable(struct drm_encoder *encoder);
extern void r600_hdmi_disable(struct drm_encoder *encoder);
extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
-extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
-extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
-
-extern void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
-extern void r700_cp_stop(struct radeon_device *rdev);
-extern void r700_cp_fini(struct radeon_device *rdev);
-extern void evergreen_disable_interrupt_state(struct radeon_device *rdev);
-extern int evergreen_irq_set(struct radeon_device *rdev);
-extern int evergreen_blit_init(struct radeon_device *rdev);
-extern void evergreen_blit_fini(struct radeon_device *rdev);
extern int ni_init_microcode(struct radeon_device *rdev);
-extern int btc_mc_load_microcode(struct radeon_device *rdev);
+extern int ni_mc_load_microcode(struct radeon_device *rdev);
/* radeon_acpi.c */
#if defined(CONFIG_ACPI)
@@ -1513,14 +1514,6 @@ extern int radeon_acpi_init(struct radeon_device *rdev);
static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
#endif
-/* evergreen */
-struct evergreen_mc_save {
- u32 vga_control[6];
- u32 vga_render_control;
- u32 vga_hdp_control;
- u32 crtc_control[6];
-};
-
#include "radeon_object.h"
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index e75d63b8e21d..ca576191d058 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -94,7 +94,7 @@ static void radeon_register_accessor_init(struct radeon_device *rdev)
rdev->mc_rreg = &rs600_mc_rreg;
rdev->mc_wreg = &rs600_mc_wreg;
}
- if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_HEMLOCK)) {
+ if (rdev->family >= CHIP_R600) {
rdev->pciep_rreg = &r600_pciep_rreg;
rdev->pciep_wreg = &r600_pciep_wreg;
}
@@ -834,6 +834,9 @@ static struct radeon_asic sumo_asic = {
.pm_finish = &evergreen_pm_finish,
.pm_init_profile = &rs780_pm_init_profile,
.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+ .pre_page_flip = &evergreen_pre_page_flip,
+ .page_flip = &evergreen_page_flip,
+ .post_page_flip = &evergreen_post_page_flip,
};
static struct radeon_asic btc_asic = {
@@ -882,6 +885,52 @@ static struct radeon_asic btc_asic = {
.post_page_flip = &evergreen_post_page_flip,
};
+static struct radeon_asic cayman_asic = {
+ .init = &cayman_init,
+ .fini = &cayman_fini,
+ .suspend = &cayman_suspend,
+ .resume = &cayman_resume,
+ .cp_commit = &r600_cp_commit,
+ .gpu_is_lockup = &cayman_gpu_is_lockup,
+ .asic_reset = &cayman_asic_reset,
+ .vga_set_state = &r600_vga_set_state,
+ .gart_tlb_flush = &cayman_pcie_gart_tlb_flush,
+ .gart_set_page = &rs600_gart_set_page,
+ .ring_test = &r600_ring_test,
+ .ring_ib_execute = &evergreen_ring_ib_execute,
+ .irq_set = &evergreen_irq_set,
+ .irq_process = &evergreen_irq_process,
+ .get_vblank_counter = &evergreen_get_vblank_counter,
+ .fence_ring_emit = &r600_fence_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .copy_blit = NULL,
+ .copy_dma = NULL,
+ .copy = NULL,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = NULL,
+ .set_surface_reg = r600_set_surface_reg,
+ .clear_surface_reg = r600_clear_surface_reg,
+ .bandwidth_update = &evergreen_bandwidth_update,
+ .hpd_init = &evergreen_hpd_init,
+ .hpd_fini = &evergreen_hpd_fini,
+ .hpd_sense = &evergreen_hpd_sense,
+ .hpd_set_polarity = &evergreen_hpd_set_polarity,
+ .gui_idle = &r600_gui_idle,
+ .pm_misc = &evergreen_pm_misc,
+ .pm_prepare = &evergreen_pm_prepare,
+ .pm_finish = &evergreen_pm_finish,
+ .pm_init_profile = &r600_pm_init_profile,
+ .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+ .pre_page_flip = &evergreen_pre_page_flip,
+ .page_flip = &evergreen_page_flip,
+ .post_page_flip = &evergreen_post_page_flip,
+};
+
int radeon_asic_init(struct radeon_device *rdev)
{
radeon_register_accessor_init(rdev);
@@ -974,6 +1023,9 @@ int radeon_asic_init(struct radeon_device *rdev)
case CHIP_CAICOS:
rdev->asic = &btc_asic;
break;
+ case CHIP_CAYMAN:
+ rdev->asic = &cayman_asic;
+ break;
default:
/* FIXME: not supported yet */
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index c59bd98a2029..3d7a0d7c6a9a 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -57,8 +57,6 @@ int r100_init(struct radeon_device *rdev);
void r100_fini(struct radeon_device *rdev);
int r100_suspend(struct radeon_device *rdev);
int r100_resume(struct radeon_device *rdev);
-uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
-void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void r100_vga_set_state(struct radeon_device *rdev, bool state);
bool r100_gpu_is_lockup(struct radeon_device *rdev);
int r100_asic_reset(struct radeon_device *rdev);
@@ -164,8 +162,6 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev,
extern int r300_cs_parse(struct radeon_cs_parser *p);
extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
-extern uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
-extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
extern void r300_set_reg_safe(struct radeon_device *rdev);
@@ -208,7 +204,6 @@ void rs400_gart_adjust_size(struct radeon_device *rdev);
void rs400_gart_disable(struct radeon_device *rdev);
void rs400_gart_fini(struct radeon_device *rdev);
-
/*
* rs600.
*/
@@ -270,8 +265,6 @@ void rv515_fini(struct radeon_device *rdev);
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rv515_ring_start(struct radeon_device *rdev);
-uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
-void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rv515_bandwidth_update(struct radeon_device *rdev);
int rv515_resume(struct radeon_device *rdev);
int rv515_suspend(struct radeon_device *rdev);
@@ -307,14 +300,13 @@ void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int r600_cs_parse(struct radeon_cs_parser *p);
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-int r600_irq_process(struct radeon_device *rdev);
-int r600_irq_set(struct radeon_device *rdev);
bool r600_gpu_is_lockup(struct radeon_device *rdev);
int r600_asic_reset(struct radeon_device *rdev);
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
+int r600_ib_test(struct radeon_device *rdev);
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r600_ring_test(struct radeon_device *rdev);
int r600_copy_blit(struct radeon_device *rdev,
@@ -333,6 +325,50 @@ extern void rs780_pm_init_profile(struct radeon_device *rdev);
extern void r600_pm_get_dynpm_state(struct radeon_device *rdev);
extern void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes);
extern int r600_get_pcie_lanes(struct radeon_device *rdev);
+bool r600_card_posted(struct radeon_device *rdev);
+void r600_cp_stop(struct radeon_device *rdev);
+int r600_cp_start(struct radeon_device *rdev);
+void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
+int r600_cp_resume(struct radeon_device *rdev);
+void r600_cp_fini(struct radeon_device *rdev);
+int r600_count_pipe_bits(uint32_t val);
+int r600_mc_wait_for_idle(struct radeon_device *rdev);
+int r600_pcie_gart_init(struct radeon_device *rdev);
+void r600_scratch_init(struct radeon_device *rdev);
+int r600_blit_init(struct radeon_device *rdev);
+void r600_blit_fini(struct radeon_device *rdev);
+int r600_init_microcode(struct radeon_device *rdev);
+/* r600 irq */
+int r600_irq_process(struct radeon_device *rdev);
+int r600_irq_init(struct radeon_device *rdev);
+void r600_irq_fini(struct radeon_device *rdev);
+void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
+int r600_irq_set(struct radeon_device *rdev);
+void r600_irq_suspend(struct radeon_device *rdev);
+void r600_disable_interrupts(struct radeon_device *rdev);
+void r600_rlc_stop(struct radeon_device *rdev);
+/* r600 audio */
+int r600_audio_init(struct radeon_device *rdev);
+int r600_audio_tmds_index(struct drm_encoder *encoder);
+void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
+int r600_audio_channels(struct radeon_device *rdev);
+int r600_audio_bits_per_sample(struct radeon_device *rdev);
+int r600_audio_rate(struct radeon_device *rdev);
+uint8_t r600_audio_status_bits(struct radeon_device *rdev);
+uint8_t r600_audio_category_code(struct radeon_device *rdev);
+void r600_audio_schedule_polling(struct radeon_device *rdev);
+void r600_audio_enable_polling(struct drm_encoder *encoder);
+void r600_audio_disable_polling(struct drm_encoder *encoder);
+void r600_audio_fini(struct radeon_device *rdev);
+void r600_hdmi_init(struct drm_encoder *encoder);
+int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
+void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
+/* r600 blit */
+int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
+void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
+void r600_kms_blit_copy(struct radeon_device *rdev,
+ u64 src_gpu_addr, u64 dst_gpu_addr,
+ int size_bytes);
/*
* rv770,rv730,rv710,rv740
@@ -341,12 +377,21 @@ int rv770_init(struct radeon_device *rdev);
void rv770_fini(struct radeon_device *rdev);
int rv770_suspend(struct radeon_device *rdev);
int rv770_resume(struct radeon_device *rdev);
-extern void rv770_pm_misc(struct radeon_device *rdev);
-extern u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+void rv770_pm_misc(struct radeon_device *rdev);
+u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
+void r700_cp_stop(struct radeon_device *rdev);
+void r700_cp_fini(struct radeon_device *rdev);
/*
* evergreen
*/
+struct evergreen_mc_save {
+ u32 vga_control[6];
+ u32 vga_render_control;
+ u32 vga_hdp_control;
+ u32 crtc_control[6];
+};
void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
int evergreen_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
@@ -374,5 +419,25 @@ extern void evergreen_pm_finish(struct radeon_device *rdev);
extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
+void evergreen_disable_interrupt_state(struct radeon_device *rdev);
+int evergreen_blit_init(struct radeon_device *rdev);
+void evergreen_blit_fini(struct radeon_device *rdev);
+/* evergreen blit */
+int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
+void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
+void evergreen_kms_blit_copy(struct radeon_device *rdev,
+ u64 src_gpu_addr, u64 dst_gpu_addr,
+ int size_bytes);
+
+/*
+ * cayman
+ */
+void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev);
+int cayman_init(struct radeon_device *rdev);
+void cayman_fini(struct radeon_device *rdev);
+int cayman_suspend(struct radeon_device *rdev);
+int cayman_resume(struct radeon_device *rdev);
+bool cayman_gpu_is_lockup(struct radeon_device *rdev);
+int cayman_asic_reset(struct radeon_device *rdev);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 02d5c415f499..f116516bfef7 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -675,7 +675,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
ATOM_ENCODER_CAP_RECORD *cap_record;
u16 caps = 0;
- while (record->ucRecordType > 0 &&
+ while (record->ucRecordSize > 0 &&
+ record->ucRecordType > 0 &&
record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
switch (record->ucRecordType) {
case ATOM_ENCODER_CAP_RECORD_TYPE:
@@ -720,7 +721,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
break;
}
- while (record->ucRecordType > 0 &&
+ while (record->ucRecordSize > 0 &&
+ record->ucRecordType > 0 &&
record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
switch (record->ucRecordType) {
case ATOM_I2C_RECORD_TYPE:
@@ -782,10 +784,9 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
ATOM_HPD_INT_RECORD *hpd_record;
ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
- while (record->ucRecordType > 0
- && record->
- ucRecordType <=
- ATOM_MAX_OBJECT_RECORD_NUMBER) {
+ while (record->ucRecordSize > 0 &&
+ record->ucRecordType > 0 &&
+ record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
switch (record->ucRecordType) {
case ATOM_I2C_RECORD_TYPE:
i2c_record =
@@ -1598,9 +1599,10 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0],
fake_edid_record->ucFakeEDIDLength);
- if (drm_edid_is_valid(edid))
+ if (drm_edid_is_valid(edid)) {
rdev->mode_info.bios_hardcoded_edid = edid;
- else
+ rdev->mode_info.bios_hardcoded_edid_size = edid_size;
+ } else
kfree(edid);
}
}
@@ -2175,24 +2177,27 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
}
}
-static u16 radeon_atombios_get_default_vddc(struct radeon_device *rdev)
+static void radeon_atombios_get_default_voltages(struct radeon_device *rdev,
+ u16 *vddc, u16 *vddci)
{
struct radeon_mode_info *mode_info = &rdev->mode_info;
int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
u8 frev, crev;
u16 data_offset;
union firmware_info *firmware_info;
- u16 vddc = 0;
+
+ *vddc = 0;
+ *vddci = 0;
if (atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset)) {
firmware_info =
(union firmware_info *)(mode_info->atom_context->bios +
data_offset);
- vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
+ *vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
+ if ((frev == 2) && (crev >= 2))
+ *vddci = le16_to_cpu(firmware_info->info_22.usBootUpVDDCIVoltage);
}
-
- return vddc;
}
static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rdev,
@@ -2202,7 +2207,9 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
int j;
u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
u32 misc2 = le16_to_cpu(non_clock_info->usClassification);
- u16 vddc = radeon_atombios_get_default_vddc(rdev);
+ u16 vddc, vddci;
+
+ radeon_atombios_get_default_voltages(rdev, &vddc, &vddci);
rdev->pm.power_state[state_index].misc = misc;
rdev->pm.power_state[state_index].misc2 = misc2;
@@ -2243,6 +2250,7 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage;
+ rdev->pm.default_vddci = rdev->pm.power_state[state_index].clock_info[0].voltage.vddci;
} else {
/* patch the table values with the default slck/mclk from firmware info */
for (j = 0; j < mode_index; j++) {
@@ -2285,6 +2293,8 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
VOLTAGE_SW;
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
le16_to_cpu(clock_info->evergreen.usVDDC);
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci =
+ le16_to_cpu(clock_info->evergreen.usVDDCI);
} else {
sclk = le16_to_cpu(clock_info->r600.usEngineClockLow);
sclk |= clock_info->r600.ucEngineClockHigh << 16;
@@ -2576,25 +2586,25 @@ union set_voltage {
struct _SET_VOLTAGE_PARAMETERS_V2 v2;
};
-void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level)
+void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type)
{
union set_voltage args;
int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
- u8 frev, crev, volt_index = level;
+ u8 frev, crev, volt_index = voltage_level;
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return;
switch (crev) {
case 1:
- args.v1.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC;
+ args.v1.ucVoltageType = voltage_type;
args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE;
args.v1.ucVoltageIndex = volt_index;
break;
case 2:
- args.v2.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC;
+ args.v2.ucVoltageType = voltage_type;
args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE;
- args.v2.usVoltageLevel = cpu_to_le16(level);
+ args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index c558685cc637..10191d9372d8 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -41,7 +41,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
size = bsize;
n = 1024;
- r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, sdomain, &sobj);
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj);
if (r) {
goto out_cleanup;
}
@@ -53,7 +53,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
if (r) {
goto out_cleanup;
}
- r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, ddomain, &dobj);
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, &dobj);
if (r) {
goto out_cleanup;
}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index cf7c8d5b4ec2..8caf546c8e92 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -448,7 +448,7 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
{
- int edid_info;
+ int edid_info, size;
struct edid *edid;
unsigned char *raw;
edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE);
@@ -456,11 +456,12 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
return false;
raw = rdev->bios + edid_info;
- edid = kmalloc(EDID_LENGTH * (raw[0x7e] + 1), GFP_KERNEL);
+ size = EDID_LENGTH * (raw[0x7e] + 1);
+ edid = kmalloc(size, GFP_KERNEL);
if (edid == NULL)
return false;
- memcpy((unsigned char *)edid, raw, EDID_LENGTH * (raw[0x7e] + 1));
+ memcpy((unsigned char *)edid, raw, size);
if (!drm_edid_is_valid(edid)) {
kfree(edid);
@@ -468,6 +469,7 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
}
rdev->mode_info.bios_hardcoded_edid = edid;
+ rdev->mode_info.bios_hardcoded_edid_size = size;
return true;
}
@@ -475,8 +477,17 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
struct edid *
radeon_bios_get_hardcoded_edid(struct radeon_device *rdev)
{
- if (rdev->mode_info.bios_hardcoded_edid)
- return rdev->mode_info.bios_hardcoded_edid;
+ struct edid *edid;
+
+ if (rdev->mode_info.bios_hardcoded_edid) {
+ edid = kmalloc(rdev->mode_info.bios_hardcoded_edid_size, GFP_KERNEL);
+ if (edid) {
+ memcpy((unsigned char *)edid,
+ (unsigned char *)rdev->mode_info.bios_hardcoded_edid,
+ rdev->mode_info.bios_hardcoded_edid_size);
+ return edid;
+ }
+ }
return NULL;
}
@@ -2068,6 +2079,19 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
&hpd);
+ /* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_TV1_SUPPORT,
+ 2),
+ ATOM_DEVICE_TV1_SUPPORT);
+ radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
break;
default:
DRM_INFO("Connector table: %d (invalid)\n",
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 22b7e3dc0eca..5f45fa12bb8b 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -40,6 +40,10 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
struct drm_encoder *encoder,
bool connected);
+extern void
+radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
+ struct drm_connector *drm_connector);
+
void radeon_connector_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
@@ -629,6 +633,8 @@ static int radeon_vga_mode_valid(struct drm_connector *connector,
static enum drm_connector_status
radeon_vga_detect(struct drm_connector *connector, bool force)
{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder;
struct drm_encoder_helper_funcs *encoder_funcs;
@@ -679,6 +685,17 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
if (ret == connector_status_connected)
ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true);
+
+ /* RN50 and some RV100 asics in servers often have a hardcoded EDID in the
+ * vbios to deal with KVMs. If we have one and are not able to detect a monitor
+ * by other means, assume the CRT is connected and use that EDID.
+ */
+ if ((!rdev->is_atom_bios) &&
+ (ret == connector_status_disconnected) &&
+ rdev->mode_info.bios_hardcoded_edid_size) {
+ ret = connector_status_connected;
+ }
+
radeon_connector_update_scratch_regs(connector, ret);
return ret;
}
@@ -790,6 +807,8 @@ static int radeon_dvi_get_modes(struct drm_connector *connector)
static enum drm_connector_status
radeon_dvi_detect(struct drm_connector *connector, bool force)
{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder = NULL;
struct drm_encoder_helper_funcs *encoder_funcs;
@@ -829,8 +848,6 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
* you don't really know what's connected to which port as both are digital.
*/
if (radeon_connector->shared_ddc && (ret == connector_status_connected)) {
- struct drm_device *dev = connector->dev;
- struct radeon_device *rdev = dev->dev_private;
struct drm_connector *list_connector;
struct radeon_connector *list_radeon_connector;
list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) {
@@ -895,6 +912,19 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true);
}
+ /* RN50 and some RV100 asics in servers often have a hardcoded EDID in the
+ * vbios to deal with KVMs. If we have one and are not able to detect a monitor
+ * by other means, assume the DFP is connected and use that EDID. In most
+ * cases the DVI port is actually a virtual KVM port connected to the service
+ * processor.
+ */
+ if ((!rdev->is_atom_bios) &&
+ (ret == connector_status_disconnected) &&
+ rdev->mode_info.bios_hardcoded_edid_size) {
+ radeon_connector->use_digital = true;
+ ret = connector_status_connected;
+ }
+
out:
/* updated in get modes as well since we need to know if it's analog or digital */
radeon_connector_update_scratch_regs(connector, ret);
@@ -972,7 +1002,16 @@ static int radeon_dvi_mode_valid(struct drm_connector *connector,
(radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
(radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B))
return MODE_OK;
- else
+ else if (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_A) {
+ if (ASIC_IS_DCE3(rdev)) {
+ /* HDMI 1.3+ supports max clock of 340 Mhz */
+ if (mode->clock > 340000)
+ return MODE_CLOCK_HIGH;
+ else
+ return MODE_OK;
+ } else
+ return MODE_CLOCK_HIGH;
+ } else
return MODE_CLOCK_HIGH;
}
return MODE_OK;
@@ -1160,7 +1199,7 @@ radeon_add_atom_connector(struct drm_device *dev,
if (router->ddc_valid || router->cd_valid) {
radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info);
if (!radeon_connector->router_bus)
- goto failed;
+ DRM_ERROR("Failed to assign router i2c bus! Check dmesg for i2c errors.\n");
}
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
@@ -1169,7 +1208,7 @@ radeon_add_atom_connector(struct drm_device *dev,
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base,
@@ -1187,7 +1226,7 @@ radeon_add_atom_connector(struct drm_device *dev,
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base,
@@ -1210,7 +1249,7 @@ radeon_add_atom_connector(struct drm_device *dev,
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
subpixel_order = SubPixelHorizontalRGB;
drm_connector_attach_property(&radeon_connector->base,
@@ -1251,7 +1290,7 @@ radeon_add_atom_connector(struct drm_device *dev,
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.coherent_mode_property,
@@ -1290,10 +1329,10 @@ radeon_add_atom_connector(struct drm_device *dev,
else
radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
if (!radeon_dig_connector->dp_i2c_bus)
- goto failed;
+ DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n");
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
subpixel_order = SubPixelHorizontalRGB;
drm_connector_attach_property(&radeon_connector->base,
@@ -1342,7 +1381,7 @@ radeon_add_atom_connector(struct drm_device *dev,
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
drm_connector_attach_property(&radeon_connector->base,
dev->mode_config.scaling_mode_property,
@@ -1418,7 +1457,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base,
@@ -1436,7 +1475,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base,
@@ -1454,7 +1493,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
@@ -1499,7 +1538,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
drm_connector_attach_property(&radeon_connector->base,
dev->mode_config.scaling_mode_property,
@@ -1517,9 +1556,15 @@ radeon_add_legacy_connector(struct drm_device *dev,
connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->display_info.subpixel_order = subpixel_order;
drm_sysfs_connector_add(connector);
- return;
+ if (connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ struct drm_encoder *drm_encoder;
-failed:
- drm_connector_cleanup(connector);
- kfree(connector);
+ list_for_each_entry(drm_encoder, &dev->mode_config.encoder_list, head) {
+ struct radeon_encoder *radeon_encoder;
+
+ radeon_encoder = to_radeon_encoder(drm_encoder);
+ if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_LVDS)
+ radeon_legacy_backlight_init(radeon_encoder, connector);
+ }
+ }
}
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index eb6b9eed7349..75867792a4e2 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -244,7 +244,7 @@ void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base)
u32 agp_base_lo = agp_base & 0xffffffff;
u32 r6xx_agp_base = (agp_base >> 22) & 0x3ffff;
- /* R6xx/R7xx must be aligned to a 4MB boundry */
+ /* R6xx/R7xx must be aligned to a 4MB boundary */
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)
RADEON_WRITE(R700_MC_VM_AGP_BASE, r6xx_agp_base);
else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
@@ -2113,9 +2113,9 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
break;
}
- if (drm_device_is_agp(dev))
+ if (drm_pci_device_is_agp(dev))
dev_priv->flags |= RADEON_IS_AGP;
- else if (drm_device_is_pcie(dev))
+ else if (drm_pci_device_is_pcie(dev))
dev_priv->flags |= RADEON_IS_PCIE;
else
dev_priv->flags |= RADEON_IS_PCI;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 35b5eb8fbe2a..8c1916941871 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -75,7 +75,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
return -ENOENT;
}
p->relocs_ptr[i] = &p->relocs[i];
- p->relocs[i].robj = p->relocs[i].gobj->driver_private;
+ p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
p->relocs[i].lobj.bo = p->relocs[i].robj;
p->relocs[i].lobj.wdomain = r->write_domain;
p->relocs[i].lobj.rdomain = r->read_domains;
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 017ac54920fb..bdf2fa1189ae 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -226,7 +226,7 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
y += crtc->y;
DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
- /* avivo cursor image can't end on 128 pixel boundry or
+ /* avivo cursor image can't end on 128 pixel boundary or
* go past the end of the frame if both crtcs are enabled
*/
list_for_each_entry(crtc_p, &crtc->dev->mode_config.crtc_list, head) {
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 4954e2d6ffa2..890217e678d3 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -85,6 +85,7 @@ static const char radeon_family_name[][16] = {
"BARTS",
"TURKS",
"CAICOS",
+ "CAYMAN",
"LAST",
};
@@ -184,7 +185,7 @@ int radeon_wb_init(struct radeon_device *rdev)
int r;
if (rdev->wb.wb_obj == NULL) {
- r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
+ r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
@@ -261,7 +262,7 @@ int radeon_wb_init(struct radeon_device *rdev)
* Note: GTT start, end, size should be initialized before calling this
* function on AGP platform.
*
- * Note: We don't explictly enforce VRAM start to be aligned on VRAM size,
+ * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
* this shouldn't be a problem as we are using the PCI aperture as a reference.
* Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
* not IGP.
@@ -860,7 +861,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
if (rfb == NULL || rfb->obj == NULL) {
continue;
}
- robj = rfb->obj->driver_private;
+ robj = gem_to_radeon_bo(rfb->obj);
/* don't unpin kernel fb objects */
if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
r = radeon_bo_reserve(robj, false);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 0e657095de7c..bdbab5c43bdc 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -371,7 +371,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
new_radeon_fb = to_radeon_framebuffer(fb);
/* schedule unpin of the old buffer */
obj = old_radeon_fb->obj;
- rbo = obj->driver_private;
+ rbo = gem_to_radeon_bo(obj);
work->old_rbo = rbo;
INIT_WORK(&work->work, radeon_unpin_work_func);
@@ -391,7 +391,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
/* pin the new buffer */
obj = new_radeon_fb->obj;
- rbo = obj->driver_private;
+ rbo = gem_to_radeon_bo(obj);
DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
work->old_rbo, rbo);
@@ -971,7 +971,7 @@ void radeon_compute_pll_legacy(struct radeon_pll *pll,
max_fractional_feed_div = pll->max_frac_feedback_div;
}
- for (post_div = min_post_div; post_div <= max_post_div; ++post_div) {
+ for (post_div = max_post_div; post_div >= min_post_div; --post_div) {
uint32_t ref_div;
if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
@@ -1492,7 +1492,7 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
*
* \return Flags, or'ed together as follows:
*
- * DRM_SCANOUTPOS_VALID = Query successfull.
+ * DRM_SCANOUTPOS_VALID = Query successful.
* DRM_SCANOUTPOS_INVBL = Inside vblank.
* DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
* this flag means that returned position may be offset by a constant but
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 275b26a708d6..63d2de8771dc 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -49,9 +49,10 @@
* - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500)
* 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs
* 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query
+ * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 8
+#define KMS_DRIVER_MINOR 9
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -84,6 +85,16 @@ extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
extern struct drm_ioctl_desc radeon_ioctls_kms[];
extern int radeon_max_kms_ioctl;
int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
+int radeon_mode_dumb_mmap(struct drm_file *filp,
+ struct drm_device *dev,
+ uint32_t handle, uint64_t *offset_p);
+int radeon_mode_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int radeon_mode_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle);
+
#if defined(CONFIG_DEBUG_FS)
int radeon_debugfs_init(struct drm_minor *minor);
void radeon_debugfs_cleanup(struct drm_minor *minor);
@@ -228,11 +239,6 @@ static struct drm_driver driver_old = {
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
-
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@@ -322,6 +328,9 @@ static struct drm_driver kms_driver = {
.gem_init_object = radeon_gem_object_init,
.gem_free_object = radeon_gem_object_free,
.dma_ioctl = radeon_dma_ioctl_kms,
+ .dumb_create = radeon_mode_dumb_create,
+ .dumb_map_offset = radeon_mode_dumb_mmap,
+ .dumb_destroy = radeon_mode_dumb_destroy,
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
@@ -336,15 +345,6 @@ static struct drm_driver kms_driver = {
#endif
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- .probe = radeon_pci_probe,
- .remove = radeon_pci_remove,
- .suspend = radeon_pci_suspend,
- .resume = radeon_pci_resume,
- },
-
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@@ -354,15 +354,32 @@ static struct drm_driver kms_driver = {
};
static struct drm_driver *driver;
+static struct pci_driver *pdriver;
+
+static struct pci_driver radeon_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+};
+
+static struct pci_driver radeon_kms_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = radeon_pci_probe,
+ .remove = radeon_pci_remove,
+ .suspend = radeon_pci_suspend,
+ .resume = radeon_pci_resume,
+};
static int __init radeon_init(void)
{
driver = &driver_old;
+ pdriver = &radeon_pci_driver;
driver->num_ioctls = radeon_max_ioctl;
#ifdef CONFIG_VGA_CONSOLE
if (vgacon_text_force() && radeon_modeset == -1) {
DRM_INFO("VGACON disable radeon kernel modesetting.\n");
driver = &driver_old;
+ pdriver = &radeon_pci_driver;
driver->driver_features &= ~DRIVER_MODESET;
radeon_modeset = 0;
}
@@ -380,18 +397,19 @@ static int __init radeon_init(void)
if (radeon_modeset == 1) {
DRM_INFO("radeon kernel modesetting enabled.\n");
driver = &kms_driver;
+ pdriver = &radeon_kms_pci_driver;
driver->driver_features |= DRIVER_MODESET;
driver->num_ioctls = radeon_max_kms_ioctl;
radeon_register_atpx_handler();
}
/* if the vga console setting is enabled still
* let modprobe override it */
- return drm_init(driver);
+ return drm_pci_init(driver, pdriver);
}
static void __exit radeon_exit(void)
{
- drm_exit(driver);
+ drm_pci_exit(driver, pdriver);
radeon_unregister_atpx_handler();
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 5cba46b9779a..a1b59ca96d01 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -271,7 +271,7 @@ typedef struct drm_radeon_private {
int have_z_offset;
- /* starting from here on, data is preserved accross an open */
+ /* starting from here on, data is preserved across an open */
uint32_t flags; /* see radeon_chip_flags */
resource_size_t fb_aper_offset;
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index 1ca55eb09ad3..6f1d9e563e77 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -84,6 +84,7 @@ enum radeon_family {
CHIP_BARTS,
CHIP_TURKS,
CHIP_CAICOS,
+ CHIP_CAYMAN,
CHIP_LAST,
};
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 66324b5bb5ba..0b7b486c97e8 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -64,7 +64,7 @@ static struct fb_ops radeonfb_ops = {
};
-static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled)
+int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled)
{
int aligned = width;
int align_large = (ASIC_IS_AVIVO(rdev)) || tiled;
@@ -90,7 +90,7 @@ static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bo
static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
{
- struct radeon_bo *rbo = gobj->driver_private;
+ struct radeon_bo *rbo = gem_to_radeon_bo(gobj);
int ret;
ret = radeon_bo_reserve(rbo, false);
@@ -113,11 +113,14 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
u32 tiling_flags = 0;
int ret;
int aligned_size, size;
+ int height = mode_cmd->height;
/* need to align pitch with crtc limits */
mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8);
- size = mode_cmd->pitch * mode_cmd->height;
+ if (rdev->family >= CHIP_R600)
+ height = ALIGN(mode_cmd->height, 8);
+ size = mode_cmd->pitch * height;
aligned_size = ALIGN(size, PAGE_SIZE);
ret = radeon_gem_object_create(rdev, aligned_size, 0,
RADEON_GEM_DOMAIN_VRAM,
@@ -128,7 +131,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
aligned_size);
return -ENOMEM;
}
- rbo = gobj->driver_private;
+ rbo = gem_to_radeon_bo(gobj);
if (fb_tiled)
tiling_flags = RADEON_TILING_MACRO;
@@ -202,7 +205,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
mode_cmd.depth = sizes->surface_depth;
ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
- rbo = gobj->driver_private;
+ rbo = gem_to_radeon_bo(gobj);
/* okay we have an object now allocate the framebuffer */
info = framebuffer_alloc(0, device);
@@ -403,14 +406,14 @@ int radeon_fbdev_total_size(struct radeon_device *rdev)
struct radeon_bo *robj;
int size = 0;
- robj = rdev->mode_info.rfbdev->rfb.obj->driver_private;
+ robj = gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj);
size += radeon_bo_size(robj);
return size;
}
bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
{
- if (robj == rdev->mode_info.rfbdev->rfb.obj->driver_private)
+ if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj))
return true;
return false;
}
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 171b0b2e3a64..bbcd1dd7bac0 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -60,8 +60,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
trace_radeon_fence_emit(rdev->ddev, fence->seq);
fence->emited = true;
- list_del(&fence->list);
- list_add_tail(&fence->list, &rdev->fence_drv.emited);
+ list_move_tail(&fence->list, &rdev->fence_drv.emited);
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
return 0;
}
@@ -80,7 +79,7 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
else
scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
- seq = rdev->wb.wb[scratch_index/4];
+ seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
} else
seq = RREG32(rdev->fence_drv.scratch_reg);
if (seq != rdev->fence_drv.last_seq) {
@@ -121,8 +120,7 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
i = n;
do {
n = i->prev;
- list_del(i);
- list_add_tail(i, &rdev->fence_drv.signaled);
+ list_move_tail(i, &rdev->fence_drv.signaled);
fence = list_entry(i, struct radeon_fence, list);
fence->signaled = true;
i = n;
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 65016117d95f..8a955bbdb608 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -78,7 +78,7 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
int r;
if (rdev->gart.table.vram.robj == NULL) {
- r = radeon_bo_create(rdev, NULL, rdev->gart.table_size,
+ r = radeon_bo_create(rdev, rdev->gart.table_size,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
&rdev->gart.table.vram.robj);
if (r) {
@@ -149,8 +149,9 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
if (rdev->gart.pages[p]) {
- pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (!rdev->gart.ttm_alloced[p])
+ pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
rdev->gart.pages[p] = NULL;
rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
page_base = rdev->gart.pages_addr[p];
@@ -165,7 +166,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
}
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
- int pages, struct page **pagelist)
+ int pages, struct page **pagelist, dma_addr_t *dma_addr)
{
unsigned t;
unsigned p;
@@ -180,15 +181,22 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
- /* we need to support large memory configurations */
- /* assume that unbind have already been call on the range */
- rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i],
+ /* On TTM path, we only use the DMA API if TTM_PAGE_FLAG_DMA32
+ * is requested. */
+ if (dma_addr[i] != DMA_ERROR_CODE) {
+ rdev->gart.ttm_alloced[p] = true;
+ rdev->gart.pages_addr[p] = dma_addr[i];
+ } else {
+ /* we need to support large memory configurations */
+ /* assume that unbind have already been call on the range */
+ rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i],
0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) {
- /* FIXME: failed to map page (return -ENOMEM?) */
- radeon_gart_unbind(rdev, offset, pages);
- return -ENOMEM;
+ if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) {
+ /* FIXME: failed to map page (return -ENOMEM?) */
+ radeon_gart_unbind(rdev, offset, pages);
+ return -ENOMEM;
+ }
}
rdev->gart.pages[p] = pagelist[i];
page_base = rdev->gart.pages_addr[p];
@@ -251,6 +259,12 @@ int radeon_gart_init(struct radeon_device *rdev)
radeon_gart_fini(rdev);
return -ENOMEM;
}
+ rdev->gart.ttm_alloced = kzalloc(sizeof(bool) *
+ rdev->gart.num_cpu_pages, GFP_KERNEL);
+ if (rdev->gart.ttm_alloced == NULL) {
+ radeon_gart_fini(rdev);
+ return -ENOMEM;
+ }
/* set GART entry to point to the dummy page by default */
for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
@@ -267,6 +281,10 @@ void radeon_gart_fini(struct radeon_device *rdev)
rdev->gart.ready = false;
kfree(rdev->gart.pages);
kfree(rdev->gart.pages_addr);
+ kfree(rdev->gart.ttm_alloced);
rdev->gart.pages = NULL;
rdev->gart.pages_addr = NULL;
+ rdev->gart.ttm_alloced = NULL;
+
+ radeon_dummy_page_fini(rdev);
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index df95eb83dac6..aa1ca2dea42f 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -32,21 +32,18 @@
int radeon_gem_object_init(struct drm_gem_object *obj)
{
- /* we do nothings here */
+ BUG();
+
return 0;
}
void radeon_gem_object_free(struct drm_gem_object *gobj)
{
- struct radeon_bo *robj = gobj->driver_private;
+ struct radeon_bo *robj = gem_to_radeon_bo(gobj);
- gobj->driver_private = NULL;
if (robj) {
radeon_bo_unref(&robj);
}
-
- drm_gem_object_release(gobj);
- kfree(gobj);
}
int radeon_gem_object_create(struct radeon_device *rdev, int size,
@@ -54,36 +51,34 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
bool discardable, bool kernel,
struct drm_gem_object **obj)
{
- struct drm_gem_object *gobj;
struct radeon_bo *robj;
int r;
*obj = NULL;
- gobj = drm_gem_object_alloc(rdev->ddev, size);
- if (!gobj) {
- return -ENOMEM;
- }
/* At least align on page size */
if (alignment < PAGE_SIZE) {
alignment = PAGE_SIZE;
}
- r = radeon_bo_create(rdev, gobj, size, alignment, kernel, initial_domain, &robj);
+ r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, &robj);
if (r) {
if (r != -ERESTARTSYS)
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
size, initial_domain, alignment, r);
- drm_gem_object_unreference_unlocked(gobj);
return r;
}
- gobj->driver_private = robj;
- *obj = gobj;
+ *obj = &robj->gem_base;
+
+ mutex_lock(&rdev->gem.mutex);
+ list_add_tail(&robj->list, &rdev->gem.objects);
+ mutex_unlock(&rdev->gem.mutex);
+
return 0;
}
int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
uint64_t *gpu_addr)
{
- struct radeon_bo *robj = obj->driver_private;
+ struct radeon_bo *robj = gem_to_radeon_bo(obj);
int r;
r = radeon_bo_reserve(robj, false);
@@ -96,7 +91,7 @@ int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
void radeon_gem_object_unpin(struct drm_gem_object *obj)
{
- struct radeon_bo *robj = obj->driver_private;
+ struct radeon_bo *robj = gem_to_radeon_bo(obj);
int r;
r = radeon_bo_reserve(robj, false);
@@ -114,7 +109,7 @@ int radeon_gem_set_domain(struct drm_gem_object *gobj,
int r;
/* FIXME: reeimplement */
- robj = gobj->driver_private;
+ robj = gem_to_radeon_bo(gobj);
/* work out where to validate the buffer to */
domain = wdomain;
if (!domain) {
@@ -156,9 +151,12 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
{
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_info *args = data;
+ struct ttm_mem_type_manager *man;
+
+ man = &rdev->mman.bdev.man[TTM_PL_VRAM];
args->vram_size = rdev->mc.real_vram_size;
- args->vram_visible = rdev->mc.real_vram_size;
+ args->vram_visible = (u64)man->size << PAGE_SHIFT;
if (rdev->stollen_vga_memory)
args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
args->vram_visible -= radeon_fbdev_total_size(rdev);
@@ -228,7 +226,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
if (gobj == NULL) {
return -ENOENT;
}
- robj = gobj->driver_private;
+ robj = gem_to_radeon_bo(gobj);
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
@@ -236,23 +234,31 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
return r;
}
-int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *filp)
+int radeon_mode_dumb_mmap(struct drm_file *filp,
+ struct drm_device *dev,
+ uint32_t handle, uint64_t *offset_p)
{
- struct drm_radeon_gem_mmap *args = data;
struct drm_gem_object *gobj;
struct radeon_bo *robj;
- gobj = drm_gem_object_lookup(dev, filp, args->handle);
+ gobj = drm_gem_object_lookup(dev, filp, handle);
if (gobj == NULL) {
return -ENOENT;
}
- robj = gobj->driver_private;
- args->addr_ptr = radeon_bo_mmap_offset(robj);
+ robj = gem_to_radeon_bo(gobj);
+ *offset_p = radeon_bo_mmap_offset(robj);
drm_gem_object_unreference_unlocked(gobj);
return 0;
}
+int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct drm_radeon_gem_mmap *args = data;
+
+ return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
+}
+
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
@@ -266,7 +272,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
if (gobj == NULL) {
return -ENOENT;
}
- robj = gobj->driver_private;
+ robj = gem_to_radeon_bo(gobj);
r = radeon_bo_wait(robj, &cur_placement, true);
switch (cur_placement) {
case TTM_PL_VRAM:
@@ -296,7 +302,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
if (gobj == NULL) {
return -ENOENT;
}
- robj = gobj->driver_private;
+ robj = gem_to_radeon_bo(gobj);
r = radeon_bo_wait(robj, NULL, false);
/* callback hw specific functions if any */
if (robj->rdev->asic->ioctl_wait_idle)
@@ -317,7 +323,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL)
return -ENOENT;
- robj = gobj->driver_private;
+ robj = gem_to_radeon_bo(gobj);
r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
drm_gem_object_unreference_unlocked(gobj);
return r;
@@ -335,7 +341,7 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL)
return -ENOENT;
- rbo = gobj->driver_private;
+ rbo = gem_to_radeon_bo(gobj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
goto out;
@@ -345,3 +351,40 @@ out:
drm_gem_object_unreference_unlocked(gobj);
return r;
}
+
+int radeon_mode_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_gem_object *gobj;
+ uint32_t handle;
+ int r;
+
+ args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
+ args->size = args->pitch * args->height;
+ args->size = ALIGN(args->size, PAGE_SIZE);
+
+ r = radeon_gem_object_create(rdev, args->size, 0,
+ RADEON_GEM_DOMAIN_VRAM,
+ false, ttm_bo_type_device,
+ &gobj);
+ if (r)
+ return -ENOMEM;
+
+ r = drm_gem_handle_create(file_priv, gobj, &handle);
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_unreference_unlocked(gobj);
+ if (r) {
+ return r;
+ }
+ args->handle = handle;
+ return 0;
+}
+
+int radeon_mode_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file_priv, handle);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index ded2a45bc95c..983cbac75af0 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -1062,7 +1062,7 @@ void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus,
*val = in_buf[0];
DRM_DEBUG("val = 0x%02x\n", *val);
} else {
- DRM_ERROR("i2c 0x%02x 0x%02x read failed\n",
+ DRM_DEBUG("i2c 0x%02x 0x%02x read failed\n",
addr, *val);
}
}
@@ -1084,7 +1084,7 @@ void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus,
out_buf[1] = val;
if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1)
- DRM_ERROR("i2c 0x%02x 0x%02x write failed\n",
+ DRM_DEBUG("i2c 0x%02x 0x%02x write failed\n",
addr, val);
}
@@ -1096,6 +1096,9 @@ void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector)
if (!radeon_connector->router.ddc_valid)
return;
+ if (!radeon_connector->router_bus)
+ return;
+
radeon_i2c_get_byte(radeon_connector->router_bus,
radeon_connector->router.i2c_addr,
0x3, &val);
@@ -1121,6 +1124,9 @@ void radeon_router_select_cd_port(struct radeon_connector *radeon_connector)
if (!radeon_connector->router.cd_valid)
return;
+ if (!radeon_connector->router_bus)
+ return;
+
radeon_i2c_get_byte(radeon_connector->router_bus,
radeon_connector->router.i2c_addr,
0x3, &val);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 8387d32caaa7..bd58af658581 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -58,9 +58,9 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
dev->dev_private = (void *)rdev;
/* update BUS flag */
- if (drm_device_is_agp(dev)) {
+ if (drm_pci_device_is_agp(dev)) {
flags |= RADEON_IS_AGP;
- } else if (drm_device_is_pcie(dev)) {
+ } else if (drm_pci_device_is_pcie(dev)) {
flags |= RADEON_IS_PCIE;
} else {
flags |= RADEON_IS_PCI;
@@ -169,7 +169,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
value = rdev->accel_working;
break;
case RADEON_INFO_TILING_CONFIG:
- if (rdev->family >= CHIP_CEDAR)
+ if (rdev->family >= CHIP_CAYMAN)
+ value = rdev->config.cayman.tile_config;
+ else if (rdev->family >= CHIP_CEDAR)
value = rdev->config.evergreen.tile_config;
else if (rdev->family >= CHIP_RV770)
value = rdev->config.rv770.tile_config;
@@ -205,6 +207,36 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
/* return clock value in KHz */
value = rdev->clock.spll.reference_freq * 10;
break;
+ case RADEON_INFO_NUM_BACKENDS:
+ if (rdev->family >= CHIP_CAYMAN)
+ value = rdev->config.cayman.max_backends_per_se *
+ rdev->config.cayman.max_shader_engines;
+ else if (rdev->family >= CHIP_CEDAR)
+ value = rdev->config.evergreen.max_backends;
+ else if (rdev->family >= CHIP_RV770)
+ value = rdev->config.rv770.max_backends;
+ else if (rdev->family >= CHIP_R600)
+ value = rdev->config.r600.max_backends;
+ else {
+ return -EINVAL;
+ }
+ break;
+ case RADEON_INFO_NUM_TILE_PIPES:
+ if (rdev->family >= CHIP_CAYMAN)
+ value = rdev->config.cayman.max_tile_pipes;
+ else if (rdev->family >= CHIP_CEDAR)
+ value = rdev->config.evergreen.max_tile_pipes;
+ else if (rdev->family >= CHIP_RV770)
+ value = rdev->config.rv770.max_tile_pipes;
+ else if (rdev->family >= CHIP_R600)
+ value = rdev->config.r600.max_tile_pipes;
+ else {
+ return -EINVAL;
+ }
+ break;
+ case RADEON_INFO_FUSION_GART_WORKING:
+ value = 1;
+ break;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index cf0638c3b7c7..41a5d48e657b 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -415,7 +415,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
/* Pin framebuffer & get tilling informations */
obj = radeon_fb->obj;
- rbo = obj->driver_private;
+ rbo = gem_to_radeon_bo(obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
@@ -443,7 +443,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
(target_fb->bits_per_pixel * 8));
crtc_pitch |= crtc_pitch << 16;
-
+ crtc_offset_cntl |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN;
if (tiling_flags & RADEON_TILING_MACRO) {
if (ASIC_IS_R300(rdev))
crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN |
@@ -502,6 +502,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
gen_cntl_val = RREG32(gen_cntl_reg);
gen_cntl_val &= ~(0xf << 8);
gen_cntl_val |= (format << 8);
+ gen_cntl_val &= ~RADEON_CRTC_VSTAT_MODE_MASK;
WREG32(gen_cntl_reg, gen_cntl_val);
crtc_offset = (u32)base;
@@ -520,7 +521,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
if (!atomic && fb && fb != crtc->fb) {
radeon_fb = to_radeon_framebuffer(fb);
- rbo = radeon_fb->obj->driver_private;
+ rbo = gem_to_radeon_bo(radeon_fb->obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
@@ -888,7 +889,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
}
if (rdev->flags & RADEON_IS_MOBILITY) {
- /* A temporal workaround for the occational blanking on certain laptop panels.
+ /* A temporal workaround for the occasional blanking on certain laptop panels.
This appears to related to the PLL divider registers (fail to lock?).
It occurs even when all dividers are the same with their old settings.
In this case we really don't need to fiddle with PLL registers.
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 59f834ba283d..2f46e0c8df53 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -28,6 +28,10 @@
#include "radeon_drm.h"
#include "radeon.h"
#include "atom.h"
+#include <linux/backlight.h>
+#ifdef CONFIG_PMAC_BACKLIGHT
+#include <asm/backlight.h>
+#endif
static void radeon_legacy_encoder_disable(struct drm_encoder *encoder)
{
@@ -39,7 +43,7 @@ static void radeon_legacy_encoder_disable(struct drm_encoder *encoder)
radeon_encoder->active_device = 0;
}
-static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
+static void radeon_legacy_lvds_update(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -47,15 +51,23 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
uint32_t lvds_gen_cntl, lvds_pll_cntl, pixclks_cntl, disp_pwr_man;
int panel_pwr_delay = 2000;
bool is_mac = false;
+ uint8_t backlight_level;
DRM_DEBUG_KMS("\n");
+ lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+ backlight_level = (lvds_gen_cntl >> RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
+
if (radeon_encoder->enc_priv) {
if (rdev->is_atom_bios) {
struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
panel_pwr_delay = lvds->panel_pwr_delay;
+ if (lvds->bl_dev)
+ backlight_level = lvds->backlight_level;
} else {
struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
panel_pwr_delay = lvds->panel_pwr_delay;
+ if (lvds->bl_dev)
+ backlight_level = lvds->backlight_level;
}
}
@@ -82,11 +94,13 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
lvds_pll_cntl &= ~RADEON_LVDS_PLL_RESET;
WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl);
- lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
- lvds_gen_cntl |= (RADEON_LVDS_ON | RADEON_LVDS_EN | RADEON_LVDS_DIGON | RADEON_LVDS_BLON);
+ lvds_gen_cntl &= ~(RADEON_LVDS_DISPLAY_DIS |
+ RADEON_LVDS_BL_MOD_LEVEL_MASK);
+ lvds_gen_cntl |= (RADEON_LVDS_ON | RADEON_LVDS_EN |
+ RADEON_LVDS_DIGON | RADEON_LVDS_BLON |
+ (backlight_level << RADEON_LVDS_BL_MOD_LEVEL_SHIFT));
if (is_mac)
lvds_gen_cntl |= RADEON_LVDS_BL_MOD_EN;
- lvds_gen_cntl &= ~(RADEON_LVDS_DISPLAY_DIS);
udelay(panel_pwr_delay * 1000);
WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
break;
@@ -95,7 +109,6 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
case DRM_MODE_DPMS_OFF:
pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL);
WREG32_PLL_P(RADEON_PIXCLKS_CNTL, 0, ~RADEON_PIXCLK_LVDS_ALWAYS_ONb);
- lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS;
if (is_mac) {
lvds_gen_cntl &= ~RADEON_LVDS_BL_MOD_EN;
@@ -119,6 +132,25 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
}
+static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ DRM_DEBUG("\n");
+
+ if (radeon_encoder->enc_priv) {
+ if (rdev->is_atom_bios) {
+ struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
+ lvds->dpms_mode = mode;
+ } else {
+ struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
+ lvds->dpms_mode = mode;
+ }
+ }
+
+ radeon_legacy_lvds_update(encoder, mode);
+}
+
static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder)
{
struct radeon_device *rdev = encoder->dev->dev_private;
@@ -237,9 +269,222 @@ static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
.disable = radeon_legacy_encoder_disable,
};
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+#define MAX_RADEON_LEVEL 0xFF
+
+struct radeon_backlight_privdata {
+ struct radeon_encoder *encoder;
+ uint8_t negative;
+};
+
+static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd)
+{
+ struct radeon_backlight_privdata *pdata = bl_get_data(bd);
+ uint8_t level;
+
+ /* Convert brightness to hardware level */
+ if (bd->props.brightness < 0)
+ level = 0;
+ else if (bd->props.brightness > MAX_RADEON_LEVEL)
+ level = MAX_RADEON_LEVEL;
+ else
+ level = bd->props.brightness;
+
+ if (pdata->negative)
+ level = MAX_RADEON_LEVEL - level;
+
+ return level;
+}
+
+static int radeon_legacy_backlight_update_status(struct backlight_device *bd)
+{
+ struct radeon_backlight_privdata *pdata = bl_get_data(bd);
+ struct radeon_encoder *radeon_encoder = pdata->encoder;
+ struct drm_device *dev = radeon_encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ int dpms_mode = DRM_MODE_DPMS_ON;
+
+ if (radeon_encoder->enc_priv) {
+ if (rdev->is_atom_bios) {
+ struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
+ dpms_mode = lvds->dpms_mode;
+ lvds->backlight_level = radeon_legacy_lvds_level(bd);
+ } else {
+ struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
+ dpms_mode = lvds->dpms_mode;
+ lvds->backlight_level = radeon_legacy_lvds_level(bd);
+ }
+ }
+
+ if (bd->props.brightness > 0)
+ radeon_legacy_lvds_update(&radeon_encoder->base, dpms_mode);
+ else
+ radeon_legacy_lvds_update(&radeon_encoder->base, DRM_MODE_DPMS_OFF);
+
+ return 0;
+}
+
+static int radeon_legacy_backlight_get_brightness(struct backlight_device *bd)
+{
+ struct radeon_backlight_privdata *pdata = bl_get_data(bd);
+ struct radeon_encoder *radeon_encoder = pdata->encoder;
+ struct drm_device *dev = radeon_encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint8_t backlight_level;
+
+ backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >>
+ RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
+
+ return pdata->negative ? MAX_RADEON_LEVEL - backlight_level : backlight_level;
+}
+
+static const struct backlight_ops radeon_backlight_ops = {
+ .get_brightness = radeon_legacy_backlight_get_brightness,
+ .update_status = radeon_legacy_backlight_update_status,
+};
+
+void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
+ struct drm_connector *drm_connector)
+{
+ struct drm_device *dev = radeon_encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct backlight_device *bd;
+ struct backlight_properties props;
+ struct radeon_backlight_privdata *pdata;
+ uint8_t backlight_level;
+
+ if (!radeon_encoder->enc_priv)
+ return;
+
+#ifdef CONFIG_PMAC_BACKLIGHT
+ if (!pmac_has_backlight_type("ati") &&
+ !pmac_has_backlight_type("mnca"))
+ return;
+#endif
+
+ pdata = kmalloc(sizeof(struct radeon_backlight_privdata), GFP_KERNEL);
+ if (!pdata) {
+ DRM_ERROR("Memory allocation failed\n");
+ goto error;
+ }
+
+ props.max_brightness = MAX_RADEON_LEVEL;
+ props.type = BACKLIGHT_RAW;
+ bd = backlight_device_register("radeon_bl", &drm_connector->kdev,
+ pdata, &radeon_backlight_ops, &props);
+ if (IS_ERR(bd)) {
+ DRM_ERROR("Backlight registration failed\n");
+ goto error;
+ }
+
+ pdata->encoder = radeon_encoder;
+
+ backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >>
+ RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
+
+ /* First, try to detect backlight level sense based on the assumption
+ * that firmware set it up at full brightness
+ */
+ if (backlight_level == 0)
+ pdata->negative = true;
+ else if (backlight_level == 0xff)
+ pdata->negative = false;
+ else {
+ /* XXX hack... maybe some day we can figure out in what direction
+ * backlight should work on a given panel?
+ */
+ pdata->negative = (rdev->family != CHIP_RV200 &&
+ rdev->family != CHIP_RV250 &&
+ rdev->family != CHIP_RV280 &&
+ rdev->family != CHIP_RV350);
+
+#ifdef CONFIG_PMAC_BACKLIGHT
+ pdata->negative = (pdata->negative ||
+ of_machine_is_compatible("PowerBook4,3") ||
+ of_machine_is_compatible("PowerBook6,3") ||
+ of_machine_is_compatible("PowerBook6,5"));
+#endif
+ }
+
+ if (rdev->is_atom_bios) {
+ struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
+ lvds->bl_dev = bd;
+ } else {
+ struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
+ lvds->bl_dev = bd;
+ }
+
+ bd->props.brightness = radeon_legacy_backlight_get_brightness(bd);
+ bd->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(bd);
+
+ DRM_INFO("radeon legacy LVDS backlight initialized\n");
+
+ return;
+
+error:
+ kfree(pdata);
+ return;
+}
+
+static void radeon_legacy_backlight_exit(struct radeon_encoder *radeon_encoder)
+{
+ struct drm_device *dev = radeon_encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct backlight_device *bd = NULL;
+
+ if (!radeon_encoder->enc_priv)
+ return;
+
+ if (rdev->is_atom_bios) {
+ struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
+ bd = lvds->bl_dev;
+ lvds->bl_dev = NULL;
+ } else {
+ struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
+ bd = lvds->bl_dev;
+ lvds->bl_dev = NULL;
+ }
+
+ if (bd) {
+ struct radeon_legacy_backlight_privdata *pdata;
+
+ pdata = bl_get_data(bd);
+ backlight_device_unregister(bd);
+ kfree(pdata);
+
+ DRM_INFO("radeon legacy LVDS backlight unloaded\n");
+ }
+}
+
+#else /* !CONFIG_BACKLIGHT_CLASS_DEVICE */
+
+void radeon_legacy_backlight_init(struct radeon_encoder *encoder)
+{
+}
+
+static void radeon_legacy_backlight_exit(struct radeon_encoder *encoder)
+{
+}
+
+#endif
+
+
+static void radeon_lvds_enc_destroy(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+ if (radeon_encoder->enc_priv) {
+ radeon_legacy_backlight_exit(radeon_encoder);
+ kfree(radeon_encoder->enc_priv);
+ }
+ drm_encoder_cleanup(encoder);
+ kfree(radeon_encoder);
+}
static const struct drm_encoder_funcs radeon_legacy_lvds_enc_funcs = {
- .destroy = radeon_enc_destroy,
+ .destroy = radeon_lvds_enc_destroy,
};
static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode)
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index a670caaee29e..9c57538231d5 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -239,6 +239,7 @@ struct radeon_mode_info {
struct drm_property *underscan_vborder_property;
/* hardcoded DFP edid from BIOS */
struct edid *bios_hardcoded_edid;
+ int bios_hardcoded_edid_size;
/* pointer to fbdev info structure */
struct radeon_fbdev *rfbdev;
@@ -302,6 +303,9 @@ struct radeon_encoder_lvds {
uint32_t lvds_gen_cntl;
/* panel mode */
struct drm_display_mode native_mode;
+ struct backlight_device *bl_dev;
+ int dpms_mode;
+ uint8_t backlight_level;
};
struct radeon_encoder_tv_dac {
@@ -355,6 +359,9 @@ struct radeon_encoder_atom_dig {
uint32_t lcd_ss_id;
/* panel mode */
struct drm_display_mode native_mode;
+ struct backlight_device *bl_dev;
+ int dpms_mode;
+ uint8_t backlight_level;
};
struct radeon_encoder_atom_dac {
@@ -676,4 +683,5 @@ void radeon_fb_output_poll_changed(struct radeon_device *rdev);
void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id);
+int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 7d6b8e88f746..976c3b1b1b6e 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -55,6 +55,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
list_del_init(&bo->list);
mutex_unlock(&bo->rdev->gem.mutex);
radeon_bo_clear_surface_reg(bo);
+ drm_gem_object_release(&bo->gem_base);
kfree(bo);
}
@@ -86,7 +87,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
rbo->placement.num_busy_placement = c;
}
-int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
+int radeon_bo_create(struct radeon_device *rdev,
unsigned long size, int byte_align, bool kernel, u32 domain,
struct radeon_bo **bo_ptr)
{
@@ -96,6 +97,8 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
unsigned long max_size = 0;
int r;
+ size = ALIGN(size, PAGE_SIZE);
+
if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
}
@@ -118,8 +121,13 @@ retry:
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
+ r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
+ if (unlikely(r)) {
+ kfree(bo);
+ return r;
+ }
bo->rdev = rdev;
- bo->gobj = gobj;
+ bo->gem_base.driver_private = NULL;
bo->surface_reg = -1;
INIT_LIST_HEAD(&bo->list);
radeon_ttm_placement_from_domain(bo, domain);
@@ -142,12 +150,9 @@ retry:
return r;
}
*bo_ptr = bo;
- if (gobj) {
- mutex_lock(&bo->rdev->gem.mutex);
- list_add_tail(&bo->list, &rdev->gem.objects);
- mutex_unlock(&bo->rdev->gem.mutex);
- }
+
trace_radeon_bo_create(bo);
+
return 0;
}
@@ -260,7 +265,6 @@ int radeon_bo_evict_vram(struct radeon_device *rdev)
void radeon_bo_force_delete(struct radeon_device *rdev)
{
struct radeon_bo *bo, *n;
- struct drm_gem_object *gobj;
if (list_empty(&rdev->gem.objects)) {
return;
@@ -268,16 +272,14 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
dev_err(rdev->dev, "Userspace still has active objects !\n");
list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
mutex_lock(&rdev->ddev->struct_mutex);
- gobj = bo->gobj;
dev_err(rdev->dev, "%p %p %lu %lu force free\n",
- gobj, bo, (unsigned long)gobj->size,
- *((unsigned long *)&gobj->refcount));
+ &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
+ *((unsigned long *)&bo->gem_base.refcount));
mutex_lock(&bo->rdev->gem.mutex);
list_del_init(&bo->list);
mutex_unlock(&bo->rdev->gem.mutex);
- radeon_bo_unref(&bo);
- gobj->driver_private = NULL;
- drm_gem_object_unreference(gobj);
+ /* this should unref the ttm bo */
+ drm_gem_object_unreference(&bo->gem_base);
mutex_unlock(&rdev->ddev->struct_mutex);
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 22d4c237dea5..ede6c13628f2 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -87,7 +87,7 @@ static inline void radeon_bo_unreserve(struct radeon_bo *bo)
* Returns current GPU offset of the object.
*
* Note: object should either be pinned or reserved when calling this
- * function, it might be usefull to add check for this for debugging.
+ * function, it might be useful to add check for this for debugging.
*/
static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
{
@@ -137,10 +137,9 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
}
extern int radeon_bo_create(struct radeon_device *rdev,
- struct drm_gem_object *gobj, unsigned long size,
- int byte_align,
- bool kernel, u32 domain,
- struct radeon_bo **bo_ptr);
+ unsigned long size, int byte_align,
+ bool kernel, u32 domain,
+ struct radeon_bo **bo_ptr);
extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
extern void radeon_bo_kunmap(struct radeon_bo *bo);
extern void radeon_bo_unref(struct radeon_bo **bo);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 2aed03bde4b2..86eda1ea94df 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -23,6 +23,7 @@
#include "drmP.h"
#include "radeon.h"
#include "avivod.h"
+#include "atom.h"
#ifdef CONFIG_ACPI
#include <linux/acpi.h>
#endif
@@ -365,12 +366,14 @@ static ssize_t radeon_set_pm_profile(struct device *dev,
else if (strncmp("high", buf, strlen("high")) == 0)
rdev->pm.profile = PM_PROFILE_HIGH;
else {
- DRM_ERROR("invalid power profile!\n");
+ count = -EINVAL;
goto fail;
}
radeon_pm_update_profile(rdev);
radeon_pm_set_clocks(rdev);
- }
+ } else
+ count = -EINVAL;
+
fail:
mutex_unlock(&rdev->pm.mutex);
@@ -413,7 +416,7 @@ static ssize_t radeon_set_pm_method(struct device *dev,
mutex_unlock(&rdev->pm.mutex);
cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
} else {
- DRM_ERROR("invalid power method!\n");
+ count = -EINVAL;
goto fail;
}
radeon_pm_compute_clocks(rdev);
@@ -533,7 +536,11 @@ void radeon_pm_resume(struct radeon_device *rdev)
/* set up the default clocks if the MC ucode is loaded */
if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
if (rdev->pm.default_vddc)
- radeon_atom_set_voltage(rdev, rdev->pm.default_vddc);
+ radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
+ SET_VOLTAGE_TYPE_ASIC_VDDC);
+ if (rdev->pm.default_vddci)
+ radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
+ SET_VOLTAGE_TYPE_ASIC_VDDCI);
if (rdev->pm.default_sclk)
radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
if (rdev->pm.default_mclk)
@@ -546,6 +553,7 @@ void radeon_pm_resume(struct radeon_device *rdev)
rdev->pm.current_sclk = rdev->pm.default_sclk;
rdev->pm.current_mclk = rdev->pm.default_mclk;
rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+ rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
if (rdev->pm.pm_method == PM_METHOD_DYNPM
&& rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
@@ -583,7 +591,8 @@ int radeon_pm_init(struct radeon_device *rdev)
/* set up the default clocks if the MC ucode is loaded */
if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
if (rdev->pm.default_vddc)
- radeon_atom_set_voltage(rdev, rdev->pm.default_vddc);
+ radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
+ SET_VOLTAGE_TYPE_ASIC_VDDC);
if (rdev->pm.default_sclk)
radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
if (rdev->pm.default_mclk)
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 06e79822a2bf..c6776e48fdde 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -151,7 +151,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
/* 64 dwords should be enough for fence too */
r = radeon_ring_lock(rdev, 64);
if (r) {
- DRM_ERROR("radeon: scheduling IB failled (%d).\n", r);
+ DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
return r;
}
radeon_ring_ib_execute(rdev, ib);
@@ -175,7 +175,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
return 0;
INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
/* Allocate 1M object buffer */
- r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
+ r = radeon_bo_create(rdev, RADEON_IB_POOL_SIZE*64*1024,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
&rdev->ib_pool.robj);
if (r) {
@@ -248,7 +248,7 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
void radeon_ring_free_size(struct radeon_device *rdev)
{
if (rdev->wb.enabled)
- rdev->cp.rptr = rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4];
+ rdev->cp.rptr = le32_to_cpu(rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]);
else {
if (rdev->family >= CHIP_R600)
rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
@@ -332,7 +332,7 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
rdev->cp.ring_size = ring_size;
/* Allocate ring buffer */
if (rdev->cp.ring_obj == NULL) {
- r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, PAGE_SIZE, true,
+ r = radeon_bo_create(rdev, rdev->cp.ring_size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT,
&rdev->cp.ring_obj);
if (r) {
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 4ae5a3d1074e..92e7ea73b7c5 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -980,7 +980,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
}
/* hyper z clear */
- /* no docs available, based on reverse engeneering by Stephane Marchesin */
+ /* no docs available, based on reverse engineering by Stephane Marchesin */
if ((flags & (RADEON_DEPTH | RADEON_STENCIL))
&& (flags & RADEON_CLEAR_FASTZ)) {
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 5b44f652145c..dee4a0c1b4b2 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -52,7 +52,7 @@ void radeon_test_moves(struct radeon_device *rdev)
goto out_cleanup;
}
- r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
&vram_obj);
if (r) {
DRM_ERROR("Failed to create VRAM object\n");
@@ -71,7 +71,7 @@ void radeon_test_moves(struct radeon_device *rdev)
void **gtt_start, **gtt_end;
void **vram_start, **vram_end;
- r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true,
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, gtt_obj + i);
if (r) {
DRM_ERROR("Failed to create GTT object %d\n", i);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index e5b2cf10cbf4..60125ddba1e9 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -529,7 +529,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
DRM_ERROR("Failed initializing VRAM heap.\n");
return r;
}
- r = radeon_bo_create(rdev, NULL, 256 * 1024, PAGE_SIZE, true,
+ r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM,
&rdev->stollen_vga_memory);
if (r) {
@@ -589,6 +589,20 @@ void radeon_ttm_fini(struct radeon_device *rdev)
DRM_INFO("radeon: ttm finalized\n");
}
+/* this should only be called at bootup or when userspace
+ * isn't running */
+void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
+{
+ struct ttm_mem_type_manager *man;
+
+ if (!rdev->mman.initialized)
+ return;
+
+ man = &rdev->mman.bdev.man[TTM_PL_VRAM];
+ /* this just adjusts TTM size idea, which sets lpfn to the correct value */
+ man->size = size >> PAGE_SHIFT;
+}
+
static struct vm_operations_struct radeon_ttm_vm_ops;
static const struct vm_operations_struct *ttm_vm_ops = NULL;
@@ -647,6 +661,7 @@ struct radeon_ttm_backend {
unsigned long num_pages;
struct page **pages;
struct page *dummy_read_page;
+ dma_addr_t *dma_addrs;
bool populated;
bool bound;
unsigned offset;
@@ -655,12 +670,14 @@ struct radeon_ttm_backend {
static int radeon_ttm_backend_populate(struct ttm_backend *backend,
unsigned long num_pages,
struct page **pages,
- struct page *dummy_read_page)
+ struct page *dummy_read_page,
+ dma_addr_t *dma_addrs)
{
struct radeon_ttm_backend *gtt;
gtt = container_of(backend, struct radeon_ttm_backend, backend);
gtt->pages = pages;
+ gtt->dma_addrs = dma_addrs;
gtt->num_pages = num_pages;
gtt->dummy_read_page = dummy_read_page;
gtt->populated = true;
@@ -673,6 +690,7 @@ static void radeon_ttm_backend_clear(struct ttm_backend *backend)
gtt = container_of(backend, struct radeon_ttm_backend, backend);
gtt->pages = NULL;
+ gtt->dma_addrs = NULL;
gtt->num_pages = 0;
gtt->dummy_read_page = NULL;
gtt->populated = false;
@@ -693,7 +711,7 @@ static int radeon_ttm_backend_bind(struct ttm_backend *backend,
gtt->num_pages, bo_mem, backend);
}
r = radeon_gart_bind(gtt->rdev, gtt->offset,
- gtt->num_pages, gtt->pages);
+ gtt->num_pages, gtt->pages, gtt->dma_addrs);
if (r) {
DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
gtt->num_pages, gtt->offset);
diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman
new file mode 100644
index 000000000000..6334f8ac1209
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/cayman
@@ -0,0 +1,619 @@
+cayman 0x9400
+0x0000802C GRBM_GFX_INDEX
+0x000088B0 VGT_VTX_VECT_EJECT_REG
+0x000088C4 VGT_CACHE_INVALIDATION
+0x000088D4 VGT_GS_VERTEX_REUSE
+0x00008958 VGT_PRIMITIVE_TYPE
+0x0000895C VGT_INDEX_TYPE
+0x00008970 VGT_NUM_INDICES
+0x00008974 VGT_NUM_INSTANCES
+0x00008990 VGT_COMPUTE_DIM_X
+0x00008994 VGT_COMPUTE_DIM_Y
+0x00008998 VGT_COMPUTE_DIM_Z
+0x0000899C VGT_COMPUTE_START_X
+0x000089A0 VGT_COMPUTE_START_Y
+0x000089A4 VGT_COMPUTE_START_Z
+0x000089A8 VGT_COMPUTE_INDEX
+0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
+0x000089B0 VGT_HS_OFFCHIP_PARAM
+0x00008A14 PA_CL_ENHANCE
+0x00008A60 PA_SC_LINE_STIPPLE_VALUE
+0x00008B10 PA_SC_LINE_STIPPLE_STATE
+0x00008BF0 PA_SC_ENHANCE
+0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
+0x00008D94 SQ_DYN_GPR_SIMD_LOCK_EN
+0x00008C00 SQ_CONFIG
+0x00008C04 SQ_GPR_RESOURCE_MGMT_1
+0x00008C10 SQ_GLOBAL_GPR_RESOURCE_MGMT_1
+0x00008C14 SQ_GLOBAL_GPR_RESOURCE_MGMT_2
+0x00008DF8 SQ_CONST_MEM_BASE
+0x00008E20 SQ_STATIC_THREAD_MGMT_1
+0x00008E24 SQ_STATIC_THREAD_MGMT_2
+0x00008E28 SQ_STATIC_THREAD_MGMT_3
+0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS
+0x00009100 SPI_CONFIG_CNTL
+0x0000913C SPI_CONFIG_CNTL_1
+0x00009830 DB_DEBUG
+0x00009834 DB_DEBUG2
+0x00009838 DB_DEBUG3
+0x0000983C DB_DEBUG4
+0x00009854 DB_WATERMARKS
+0x0000A400 TD_PS_BORDER_COLOR_INDEX
+0x0000A404 TD_PS_BORDER_COLOR_RED
+0x0000A408 TD_PS_BORDER_COLOR_GREEN
+0x0000A40C TD_PS_BORDER_COLOR_BLUE
+0x0000A410 TD_PS_BORDER_COLOR_ALPHA
+0x0000A414 TD_VS_BORDER_COLOR_INDEX
+0x0000A418 TD_VS_BORDER_COLOR_RED
+0x0000A41C TD_VS_BORDER_COLOR_GREEN
+0x0000A420 TD_VS_BORDER_COLOR_BLUE
+0x0000A424 TD_VS_BORDER_COLOR_ALPHA
+0x0000A428 TD_GS_BORDER_COLOR_INDEX
+0x0000A42C TD_GS_BORDER_COLOR_RED
+0x0000A430 TD_GS_BORDER_COLOR_GREEN
+0x0000A434 TD_GS_BORDER_COLOR_BLUE
+0x0000A438 TD_GS_BORDER_COLOR_ALPHA
+0x0000A43C TD_HS_BORDER_COLOR_INDEX
+0x0000A440 TD_HS_BORDER_COLOR_RED
+0x0000A444 TD_HS_BORDER_COLOR_GREEN
+0x0000A448 TD_HS_BORDER_COLOR_BLUE
+0x0000A44C TD_HS_BORDER_COLOR_ALPHA
+0x0000A450 TD_LS_BORDER_COLOR_INDEX
+0x0000A454 TD_LS_BORDER_COLOR_RED
+0x0000A458 TD_LS_BORDER_COLOR_GREEN
+0x0000A45C TD_LS_BORDER_COLOR_BLUE
+0x0000A460 TD_LS_BORDER_COLOR_ALPHA
+0x0000A464 TD_CS_BORDER_COLOR_INDEX
+0x0000A468 TD_CS_BORDER_COLOR_RED
+0x0000A46C TD_CS_BORDER_COLOR_GREEN
+0x0000A470 TD_CS_BORDER_COLOR_BLUE
+0x0000A474 TD_CS_BORDER_COLOR_ALPHA
+0x00028000 DB_RENDER_CONTROL
+0x00028004 DB_COUNT_CONTROL
+0x0002800C DB_RENDER_OVERRIDE
+0x00028010 DB_RENDER_OVERRIDE2
+0x00028028 DB_STENCIL_CLEAR
+0x0002802C DB_DEPTH_CLEAR
+0x00028030 PA_SC_SCREEN_SCISSOR_TL
+0x00028034 PA_SC_SCREEN_SCISSOR_BR
+0x0002805C DB_DEPTH_SLICE
+0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
+0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
+0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2
+0x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3
+0x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4
+0x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5
+0x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6
+0x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7
+0x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8
+0x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9
+0x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10
+0x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11
+0x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12
+0x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13
+0x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14
+0x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15
+0x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0
+0x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1
+0x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2
+0x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3
+0x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4
+0x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5
+0x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6
+0x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7
+0x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8
+0x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9
+0x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10
+0x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11
+0x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12
+0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13
+0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14
+0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15
+0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0
+0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1
+0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2
+0x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3
+0x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4
+0x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5
+0x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6
+0x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7
+0x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8
+0x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9
+0x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10
+0x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11
+0x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12
+0x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13
+0x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14
+0x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15
+0x00028200 PA_SC_WINDOW_OFFSET
+0x00028204 PA_SC_WINDOW_SCISSOR_TL
+0x00028208 PA_SC_WINDOW_SCISSOR_BR
+0x0002820C PA_SC_CLIPRECT_RULE
+0x00028210 PA_SC_CLIPRECT_0_TL
+0x00028214 PA_SC_CLIPRECT_0_BR
+0x00028218 PA_SC_CLIPRECT_1_TL
+0x0002821C PA_SC_CLIPRECT_1_BR
+0x00028220 PA_SC_CLIPRECT_2_TL
+0x00028224 PA_SC_CLIPRECT_2_BR
+0x00028228 PA_SC_CLIPRECT_3_TL
+0x0002822C PA_SC_CLIPRECT_3_BR
+0x00028230 PA_SC_EDGERULE
+0x00028234 PA_SU_HARDWARE_SCREEN_OFFSET
+0x00028240 PA_SC_GENERIC_SCISSOR_TL
+0x00028244 PA_SC_GENERIC_SCISSOR_BR
+0x00028250 PA_SC_VPORT_SCISSOR_0_TL
+0x00028254 PA_SC_VPORT_SCISSOR_0_BR
+0x00028258 PA_SC_VPORT_SCISSOR_1_TL
+0x0002825C PA_SC_VPORT_SCISSOR_1_BR
+0x00028260 PA_SC_VPORT_SCISSOR_2_TL
+0x00028264 PA_SC_VPORT_SCISSOR_2_BR
+0x00028268 PA_SC_VPORT_SCISSOR_3_TL
+0x0002826C PA_SC_VPORT_SCISSOR_3_BR
+0x00028270 PA_SC_VPORT_SCISSOR_4_TL
+0x00028274 PA_SC_VPORT_SCISSOR_4_BR
+0x00028278 PA_SC_VPORT_SCISSOR_5_TL
+0x0002827C PA_SC_VPORT_SCISSOR_5_BR
+0x00028280 PA_SC_VPORT_SCISSOR_6_TL
+0x00028284 PA_SC_VPORT_SCISSOR_6_BR
+0x00028288 PA_SC_VPORT_SCISSOR_7_TL
+0x0002828C PA_SC_VPORT_SCISSOR_7_BR
+0x00028290 PA_SC_VPORT_SCISSOR_8_TL
+0x00028294 PA_SC_VPORT_SCISSOR_8_BR
+0x00028298 PA_SC_VPORT_SCISSOR_9_TL
+0x0002829C PA_SC_VPORT_SCISSOR_9_BR
+0x000282A0 PA_SC_VPORT_SCISSOR_10_TL
+0x000282A4 PA_SC_VPORT_SCISSOR_10_BR
+0x000282A8 PA_SC_VPORT_SCISSOR_11_TL
+0x000282AC PA_SC_VPORT_SCISSOR_11_BR
+0x000282B0 PA_SC_VPORT_SCISSOR_12_TL
+0x000282B4 PA_SC_VPORT_SCISSOR_12_BR
+0x000282B8 PA_SC_VPORT_SCISSOR_13_TL
+0x000282BC PA_SC_VPORT_SCISSOR_13_BR
+0x000282C0 PA_SC_VPORT_SCISSOR_14_TL
+0x000282C4 PA_SC_VPORT_SCISSOR_14_BR
+0x000282C8 PA_SC_VPORT_SCISSOR_15_TL
+0x000282CC PA_SC_VPORT_SCISSOR_15_BR
+0x000282D0 PA_SC_VPORT_ZMIN_0
+0x000282D4 PA_SC_VPORT_ZMAX_0
+0x000282D8 PA_SC_VPORT_ZMIN_1
+0x000282DC PA_SC_VPORT_ZMAX_1
+0x000282E0 PA_SC_VPORT_ZMIN_2
+0x000282E4 PA_SC_VPORT_ZMAX_2
+0x000282E8 PA_SC_VPORT_ZMIN_3
+0x000282EC PA_SC_VPORT_ZMAX_3
+0x000282F0 PA_SC_VPORT_ZMIN_4
+0x000282F4 PA_SC_VPORT_ZMAX_4
+0x000282F8 PA_SC_VPORT_ZMIN_5
+0x000282FC PA_SC_VPORT_ZMAX_5
+0x00028300 PA_SC_VPORT_ZMIN_6
+0x00028304 PA_SC_VPORT_ZMAX_6
+0x00028308 PA_SC_VPORT_ZMIN_7
+0x0002830C PA_SC_VPORT_ZMAX_7
+0x00028310 PA_SC_VPORT_ZMIN_8
+0x00028314 PA_SC_VPORT_ZMAX_8
+0x00028318 PA_SC_VPORT_ZMIN_9
+0x0002831C PA_SC_VPORT_ZMAX_9
+0x00028320 PA_SC_VPORT_ZMIN_10
+0x00028324 PA_SC_VPORT_ZMAX_10
+0x00028328 PA_SC_VPORT_ZMIN_11
+0x0002832C PA_SC_VPORT_ZMAX_11
+0x00028330 PA_SC_VPORT_ZMIN_12
+0x00028334 PA_SC_VPORT_ZMAX_12
+0x00028338 PA_SC_VPORT_ZMIN_13
+0x0002833C PA_SC_VPORT_ZMAX_13
+0x00028340 PA_SC_VPORT_ZMIN_14
+0x00028344 PA_SC_VPORT_ZMAX_14
+0x00028348 PA_SC_VPORT_ZMIN_15
+0x0002834C PA_SC_VPORT_ZMAX_15
+0x00028350 SX_MISC
+0x00028354 SX_SURFACE_SYNC
+0x00028380 SQ_VTX_SEMANTIC_0
+0x00028384 SQ_VTX_SEMANTIC_1
+0x00028388 SQ_VTX_SEMANTIC_2
+0x0002838C SQ_VTX_SEMANTIC_3
+0x00028390 SQ_VTX_SEMANTIC_4
+0x00028394 SQ_VTX_SEMANTIC_5
+0x00028398 SQ_VTX_SEMANTIC_6
+0x0002839C SQ_VTX_SEMANTIC_7
+0x000283A0 SQ_VTX_SEMANTIC_8
+0x000283A4 SQ_VTX_SEMANTIC_9
+0x000283A8 SQ_VTX_SEMANTIC_10
+0x000283AC SQ_VTX_SEMANTIC_11
+0x000283B0 SQ_VTX_SEMANTIC_12
+0x000283B4 SQ_VTX_SEMANTIC_13
+0x000283B8 SQ_VTX_SEMANTIC_14
+0x000283BC SQ_VTX_SEMANTIC_15
+0x000283C0 SQ_VTX_SEMANTIC_16
+0x000283C4 SQ_VTX_SEMANTIC_17
+0x000283C8 SQ_VTX_SEMANTIC_18
+0x000283CC SQ_VTX_SEMANTIC_19
+0x000283D0 SQ_VTX_SEMANTIC_20
+0x000283D4 SQ_VTX_SEMANTIC_21
+0x000283D8 SQ_VTX_SEMANTIC_22
+0x000283DC SQ_VTX_SEMANTIC_23
+0x000283E0 SQ_VTX_SEMANTIC_24
+0x000283E4 SQ_VTX_SEMANTIC_25
+0x000283E8 SQ_VTX_SEMANTIC_26
+0x000283EC SQ_VTX_SEMANTIC_27
+0x000283F0 SQ_VTX_SEMANTIC_28
+0x000283F4 SQ_VTX_SEMANTIC_29
+0x000283F8 SQ_VTX_SEMANTIC_30
+0x000283FC SQ_VTX_SEMANTIC_31
+0x00028400 VGT_MAX_VTX_INDX
+0x00028404 VGT_MIN_VTX_INDX
+0x00028408 VGT_INDX_OFFSET
+0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX
+0x00028410 SX_ALPHA_TEST_CONTROL
+0x00028414 CB_BLEND_RED
+0x00028418 CB_BLEND_GREEN
+0x0002841C CB_BLEND_BLUE
+0x00028420 CB_BLEND_ALPHA
+0x00028430 DB_STENCILREFMASK
+0x00028434 DB_STENCILREFMASK_BF
+0x00028438 SX_ALPHA_REF
+0x0002843C PA_CL_VPORT_XSCALE_0
+0x00028440 PA_CL_VPORT_XOFFSET_0
+0x00028444 PA_CL_VPORT_YSCALE_0
+0x00028448 PA_CL_VPORT_YOFFSET_0
+0x0002844C PA_CL_VPORT_ZSCALE_0
+0x00028450 PA_CL_VPORT_ZOFFSET_0
+0x00028454 PA_CL_VPORT_XSCALE_1
+0x00028458 PA_CL_VPORT_XOFFSET_1
+0x0002845C PA_CL_VPORT_YSCALE_1
+0x00028460 PA_CL_VPORT_YOFFSET_1
+0x00028464 PA_CL_VPORT_ZSCALE_1
+0x00028468 PA_CL_VPORT_ZOFFSET_1
+0x0002846C PA_CL_VPORT_XSCALE_2
+0x00028470 PA_CL_VPORT_XOFFSET_2
+0x00028474 PA_CL_VPORT_YSCALE_2
+0x00028478 PA_CL_VPORT_YOFFSET_2
+0x0002847C PA_CL_VPORT_ZSCALE_2
+0x00028480 PA_CL_VPORT_ZOFFSET_2
+0x00028484 PA_CL_VPORT_XSCALE_3
+0x00028488 PA_CL_VPORT_XOFFSET_3
+0x0002848C PA_CL_VPORT_YSCALE_3
+0x00028490 PA_CL_VPORT_YOFFSET_3
+0x00028494 PA_CL_VPORT_ZSCALE_3
+0x00028498 PA_CL_VPORT_ZOFFSET_3
+0x0002849C PA_CL_VPORT_XSCALE_4
+0x000284A0 PA_CL_VPORT_XOFFSET_4
+0x000284A4 PA_CL_VPORT_YSCALE_4
+0x000284A8 PA_CL_VPORT_YOFFSET_4
+0x000284AC PA_CL_VPORT_ZSCALE_4
+0x000284B0 PA_CL_VPORT_ZOFFSET_4
+0x000284B4 PA_CL_VPORT_XSCALE_5
+0x000284B8 PA_CL_VPORT_XOFFSET_5
+0x000284BC PA_CL_VPORT_YSCALE_5
+0x000284C0 PA_CL_VPORT_YOFFSET_5
+0x000284C4 PA_CL_VPORT_ZSCALE_5
+0x000284C8 PA_CL_VPORT_ZOFFSET_5
+0x000284CC PA_CL_VPORT_XSCALE_6
+0x000284D0 PA_CL_VPORT_XOFFSET_6
+0x000284D4 PA_CL_VPORT_YSCALE_6
+0x000284D8 PA_CL_VPORT_YOFFSET_6
+0x000284DC PA_CL_VPORT_ZSCALE_6
+0x000284E0 PA_CL_VPORT_ZOFFSET_6
+0x000284E4 PA_CL_VPORT_XSCALE_7
+0x000284E8 PA_CL_VPORT_XOFFSET_7
+0x000284EC PA_CL_VPORT_YSCALE_7
+0x000284F0 PA_CL_VPORT_YOFFSET_7
+0x000284F4 PA_CL_VPORT_ZSCALE_7
+0x000284F8 PA_CL_VPORT_ZOFFSET_7
+0x000284FC PA_CL_VPORT_XSCALE_8
+0x00028500 PA_CL_VPORT_XOFFSET_8
+0x00028504 PA_CL_VPORT_YSCALE_8
+0x00028508 PA_CL_VPORT_YOFFSET_8
+0x0002850C PA_CL_VPORT_ZSCALE_8
+0x00028510 PA_CL_VPORT_ZOFFSET_8
+0x00028514 PA_CL_VPORT_XSCALE_9
+0x00028518 PA_CL_VPORT_XOFFSET_9
+0x0002851C PA_CL_VPORT_YSCALE_9
+0x00028520 PA_CL_VPORT_YOFFSET_9
+0x00028524 PA_CL_VPORT_ZSCALE_9
+0x00028528 PA_CL_VPORT_ZOFFSET_9
+0x0002852C PA_CL_VPORT_XSCALE_10
+0x00028530 PA_CL_VPORT_XOFFSET_10
+0x00028534 PA_CL_VPORT_YSCALE_10
+0x00028538 PA_CL_VPORT_YOFFSET_10
+0x0002853C PA_CL_VPORT_ZSCALE_10
+0x00028540 PA_CL_VPORT_ZOFFSET_10
+0x00028544 PA_CL_VPORT_XSCALE_11
+0x00028548 PA_CL_VPORT_XOFFSET_11
+0x0002854C PA_CL_VPORT_YSCALE_11
+0x00028550 PA_CL_VPORT_YOFFSET_11
+0x00028554 PA_CL_VPORT_ZSCALE_11
+0x00028558 PA_CL_VPORT_ZOFFSET_11
+0x0002855C PA_CL_VPORT_XSCALE_12
+0x00028560 PA_CL_VPORT_XOFFSET_12
+0x00028564 PA_CL_VPORT_YSCALE_12
+0x00028568 PA_CL_VPORT_YOFFSET_12
+0x0002856C PA_CL_VPORT_ZSCALE_12
+0x00028570 PA_CL_VPORT_ZOFFSET_12
+0x00028574 PA_CL_VPORT_XSCALE_13
+0x00028578 PA_CL_VPORT_XOFFSET_13
+0x0002857C PA_CL_VPORT_YSCALE_13
+0x00028580 PA_CL_VPORT_YOFFSET_13
+0x00028584 PA_CL_VPORT_ZSCALE_13
+0x00028588 PA_CL_VPORT_ZOFFSET_13
+0x0002858C PA_CL_VPORT_XSCALE_14
+0x00028590 PA_CL_VPORT_XOFFSET_14
+0x00028594 PA_CL_VPORT_YSCALE_14
+0x00028598 PA_CL_VPORT_YOFFSET_14
+0x0002859C PA_CL_VPORT_ZSCALE_14
+0x000285A0 PA_CL_VPORT_ZOFFSET_14
+0x000285A4 PA_CL_VPORT_XSCALE_15
+0x000285A8 PA_CL_VPORT_XOFFSET_15
+0x000285AC PA_CL_VPORT_YSCALE_15
+0x000285B0 PA_CL_VPORT_YOFFSET_15
+0x000285B4 PA_CL_VPORT_ZSCALE_15
+0x000285B8 PA_CL_VPORT_ZOFFSET_15
+0x000285BC PA_CL_UCP_0_X
+0x000285C0 PA_CL_UCP_0_Y
+0x000285C4 PA_CL_UCP_0_Z
+0x000285C8 PA_CL_UCP_0_W
+0x000285CC PA_CL_UCP_1_X
+0x000285D0 PA_CL_UCP_1_Y
+0x000285D4 PA_CL_UCP_1_Z
+0x000285D8 PA_CL_UCP_1_W
+0x000285DC PA_CL_UCP_2_X
+0x000285E0 PA_CL_UCP_2_Y
+0x000285E4 PA_CL_UCP_2_Z
+0x000285E8 PA_CL_UCP_2_W
+0x000285EC PA_CL_UCP_3_X
+0x000285F0 PA_CL_UCP_3_Y
+0x000285F4 PA_CL_UCP_3_Z
+0x000285F8 PA_CL_UCP_3_W
+0x000285FC PA_CL_UCP_4_X
+0x00028600 PA_CL_UCP_4_Y
+0x00028604 PA_CL_UCP_4_Z
+0x00028608 PA_CL_UCP_4_W
+0x0002860C PA_CL_UCP_5_X
+0x00028610 PA_CL_UCP_5_Y
+0x00028614 PA_CL_UCP_5_Z
+0x00028618 PA_CL_UCP_5_W
+0x0002861C SPI_VS_OUT_ID_0
+0x00028620 SPI_VS_OUT_ID_1
+0x00028624 SPI_VS_OUT_ID_2
+0x00028628 SPI_VS_OUT_ID_3
+0x0002862C SPI_VS_OUT_ID_4
+0x00028630 SPI_VS_OUT_ID_5
+0x00028634 SPI_VS_OUT_ID_6
+0x00028638 SPI_VS_OUT_ID_7
+0x0002863C SPI_VS_OUT_ID_8
+0x00028640 SPI_VS_OUT_ID_9
+0x00028644 SPI_PS_INPUT_CNTL_0
+0x00028648 SPI_PS_INPUT_CNTL_1
+0x0002864C SPI_PS_INPUT_CNTL_2
+0x00028650 SPI_PS_INPUT_CNTL_3
+0x00028654 SPI_PS_INPUT_CNTL_4
+0x00028658 SPI_PS_INPUT_CNTL_5
+0x0002865C SPI_PS_INPUT_CNTL_6
+0x00028660 SPI_PS_INPUT_CNTL_7
+0x00028664 SPI_PS_INPUT_CNTL_8
+0x00028668 SPI_PS_INPUT_CNTL_9
+0x0002866C SPI_PS_INPUT_CNTL_10
+0x00028670 SPI_PS_INPUT_CNTL_11
+0x00028674 SPI_PS_INPUT_CNTL_12
+0x00028678 SPI_PS_INPUT_CNTL_13
+0x0002867C SPI_PS_INPUT_CNTL_14
+0x00028680 SPI_PS_INPUT_CNTL_15
+0x00028684 SPI_PS_INPUT_CNTL_16
+0x00028688 SPI_PS_INPUT_CNTL_17
+0x0002868C SPI_PS_INPUT_CNTL_18
+0x00028690 SPI_PS_INPUT_CNTL_19
+0x00028694 SPI_PS_INPUT_CNTL_20
+0x00028698 SPI_PS_INPUT_CNTL_21
+0x0002869C SPI_PS_INPUT_CNTL_22
+0x000286A0 SPI_PS_INPUT_CNTL_23
+0x000286A4 SPI_PS_INPUT_CNTL_24
+0x000286A8 SPI_PS_INPUT_CNTL_25
+0x000286AC SPI_PS_INPUT_CNTL_26
+0x000286B0 SPI_PS_INPUT_CNTL_27
+0x000286B4 SPI_PS_INPUT_CNTL_28
+0x000286B8 SPI_PS_INPUT_CNTL_29
+0x000286BC SPI_PS_INPUT_CNTL_30
+0x000286C0 SPI_PS_INPUT_CNTL_31
+0x000286C4 SPI_VS_OUT_CONFIG
+0x000286C8 SPI_THREAD_GROUPING
+0x000286CC SPI_PS_IN_CONTROL_0
+0x000286D0 SPI_PS_IN_CONTROL_1
+0x000286D4 SPI_INTERP_CONTROL_0
+0x000286D8 SPI_INPUT_Z
+0x000286DC SPI_FOG_CNTL
+0x000286E0 SPI_BARYC_CNTL
+0x000286E4 SPI_PS_IN_CONTROL_2
+0x000286E8 SPI_COMPUTE_INPUT_CNTL
+0x000286EC SPI_COMPUTE_NUM_THREAD_X
+0x000286F0 SPI_COMPUTE_NUM_THREAD_Y
+0x000286F4 SPI_COMPUTE_NUM_THREAD_Z
+0x000286F8 SPI_GPR_MGMT
+0x000286FC SPI_LDS_MGMT
+0x00028700 SPI_STACK_MGMT
+0x00028704 SPI_WAVE_MGMT_1
+0x00028708 SPI_WAVE_MGMT_2
+0x00028724 GDS_ADDR_SIZE
+0x00028780 CB_BLEND0_CONTROL
+0x00028784 CB_BLEND1_CONTROL
+0x00028788 CB_BLEND2_CONTROL
+0x0002878C CB_BLEND3_CONTROL
+0x00028790 CB_BLEND4_CONTROL
+0x00028794 CB_BLEND5_CONTROL
+0x00028798 CB_BLEND6_CONTROL
+0x0002879C CB_BLEND7_CONTROL
+0x000287CC CS_COPY_STATE
+0x000287D0 GFX_COPY_STATE
+0x000287D4 PA_CL_POINT_X_RAD
+0x000287D8 PA_CL_POINT_Y_RAD
+0x000287DC PA_CL_POINT_SIZE
+0x000287E0 PA_CL_POINT_CULL_RAD
+0x00028808 CB_COLOR_CONTROL
+0x0002880C DB_SHADER_CONTROL
+0x00028810 PA_CL_CLIP_CNTL
+0x00028814 PA_SU_SC_MODE_CNTL
+0x00028818 PA_CL_VTE_CNTL
+0x0002881C PA_CL_VS_OUT_CNTL
+0x00028820 PA_CL_NANINF_CNTL
+0x00028824 PA_SU_LINE_STIPPLE_CNTL
+0x00028828 PA_SU_LINE_STIPPLE_SCALE
+0x0002882C PA_SU_PRIM_FILTER_CNTL
+0x00028844 SQ_PGM_RESOURCES_PS
+0x00028848 SQ_PGM_RESOURCES_2_PS
+0x0002884C SQ_PGM_EXPORTS_PS
+0x00028860 SQ_PGM_RESOURCES_VS
+0x00028864 SQ_PGM_RESOURCES_2_VS
+0x00028878 SQ_PGM_RESOURCES_GS
+0x0002887C SQ_PGM_RESOURCES_2_GS
+0x00028890 SQ_PGM_RESOURCES_ES
+0x00028894 SQ_PGM_RESOURCES_2_ES
+0x000288A8 SQ_PGM_RESOURCES_FS
+0x000288BC SQ_PGM_RESOURCES_HS
+0x000288C0 SQ_PGM_RESOURCES_2_HS
+0x000288D4 SQ_PGM_RESOURCES_LS
+0x000288D8 SQ_PGM_RESOURCES_2_LS
+0x000288E8 SQ_LDS_ALLOC
+0x000288EC SQ_LDS_ALLOC_PS
+0x000288F0 SQ_VTX_SEMANTIC_CLEAR
+0x00028A00 PA_SU_POINT_SIZE
+0x00028A04 PA_SU_POINT_MINMAX
+0x00028A08 PA_SU_LINE_CNTL
+0x00028A0C PA_SC_LINE_STIPPLE
+0x00028A10 VGT_OUTPUT_PATH_CNTL
+0x00028A14 VGT_HOS_CNTL
+0x00028A18 VGT_HOS_MAX_TESS_LEVEL
+0x00028A1C VGT_HOS_MIN_TESS_LEVEL
+0x00028A20 VGT_HOS_REUSE_DEPTH
+0x00028A24 VGT_GROUP_PRIM_TYPE
+0x00028A28 VGT_GROUP_FIRST_DECR
+0x00028A2C VGT_GROUP_DECR
+0x00028A30 VGT_GROUP_VECT_0_CNTL
+0x00028A34 VGT_GROUP_VECT_1_CNTL
+0x00028A38 VGT_GROUP_VECT_0_FMT_CNTL
+0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
+0x00028A40 VGT_GS_MODE
+0x00028A48 PA_SC_MODE_CNTL_0
+0x00028A4C PA_SC_MODE_CNTL_1
+0x00028A50 VGT_ENHANCE
+0x00028A54 VGT_GS_PER_ES
+0x00028A58 VGT_ES_PER_GS
+0x00028A5C VGT_GS_PER_VS
+0x00028A6C VGT_GS_OUT_PRIM_TYPE
+0x00028A70 IA_ENHANCE
+0x00028A84 VGT_PRIMITIVEID_EN
+0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN
+0x00028AA0 VGT_INSTANCE_STEP_RATE_0
+0x00028AA4 VGT_INSTANCE_STEP_RATE_1
+0x00028AA8 IA_MULTI_VGT_PARAM
+0x00028AB4 VGT_REUSE_OFF
+0x00028AB8 VGT_VTX_CNT_EN
+0x00028ABC DB_HTILE_SURFACE
+0x00028AC0 DB_SRESULTS_COMPARE_STATE0
+0x00028AC4 DB_SRESULTS_COMPARE_STATE1
+0x00028AC8 DB_PRELOAD_CONTROL
+0x00028B38 VGT_GS_MAX_VERT_OUT
+0x00028B54 VGT_SHADER_STAGES_EN
+0x00028B58 VGT_LS_HS_CONFIG
+0x00028B6C VGT_TF_PARAM
+0x00028B70 DB_ALPHA_TO_MASK
+0x00028B74 VGT_DISPATCH_INITIATOR
+0x00028B78 PA_SU_POLY_OFFSET_DB_FMT_CNTL
+0x00028B7C PA_SU_POLY_OFFSET_CLAMP
+0x00028B80 PA_SU_POLY_OFFSET_FRONT_SCALE
+0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET
+0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE
+0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET
+0x00028B74 VGT_GS_INSTANCE_CNT
+0x00028BD4 PA_SC_CENTROID_PRIORITY_0
+0x00028BD8 PA_SC_CENTROID_PRIORITY_1
+0x00028BDC PA_SC_LINE_CNTL
+0x00028BE4 PA_SU_VTX_CNTL
+0x00028BE8 PA_CL_GB_VERT_CLIP_ADJ
+0x00028BEC PA_CL_GB_VERT_DISC_ADJ
+0x00028BF0 PA_CL_GB_HORZ_CLIP_ADJ
+0x00028BF4 PA_CL_GB_HORZ_DISC_ADJ
+0x00028BF8 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y0_0
+0x00028BFC PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y0_1
+0x00028C00 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y0_2
+0x00028C04 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y0_3
+0x00028C08 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y0_0
+0x00028C0C PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y0_1
+0x00028C10 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y0_2
+0x00028C14 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y0_3
+0x00028C18 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y1_0
+0x00028C1C PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y1_1
+0x00028C20 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y1_2
+0x00028C24 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y1_3
+0x00028C28 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_0
+0x00028C2C PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_1
+0x00028C30 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_2
+0x00028C34 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_3
+0x00028C38 PA_SC_AA_MASK_X0_Y0_X1_Y0
+0x00028C3C PA_SC_AA_MASK_X0_Y1_X1_Y1
+0x00028C8C CB_COLOR0_CLEAR_WORD0
+0x00028C90 CB_COLOR0_CLEAR_WORD1
+0x00028C94 CB_COLOR0_CLEAR_WORD2
+0x00028C98 CB_COLOR0_CLEAR_WORD3
+0x00028CC8 CB_COLOR1_CLEAR_WORD0
+0x00028CCC CB_COLOR1_CLEAR_WORD1
+0x00028CD0 CB_COLOR1_CLEAR_WORD2
+0x00028CD4 CB_COLOR1_CLEAR_WORD3
+0x00028D04 CB_COLOR2_CLEAR_WORD0
+0x00028D08 CB_COLOR2_CLEAR_WORD1
+0x00028D0C CB_COLOR2_CLEAR_WORD2
+0x00028D10 CB_COLOR2_CLEAR_WORD3
+0x00028D40 CB_COLOR3_CLEAR_WORD0
+0x00028D44 CB_COLOR3_CLEAR_WORD1
+0x00028D48 CB_COLOR3_CLEAR_WORD2
+0x00028D4C CB_COLOR3_CLEAR_WORD3
+0x00028D7C CB_COLOR4_CLEAR_WORD0
+0x00028D80 CB_COLOR4_CLEAR_WORD1
+0x00028D84 CB_COLOR4_CLEAR_WORD2
+0x00028D88 CB_COLOR4_CLEAR_WORD3
+0x00028DB8 CB_COLOR5_CLEAR_WORD0
+0x00028DBC CB_COLOR5_CLEAR_WORD1
+0x00028DC0 CB_COLOR5_CLEAR_WORD2
+0x00028DC4 CB_COLOR5_CLEAR_WORD3
+0x00028DF4 CB_COLOR6_CLEAR_WORD0
+0x00028DF8 CB_COLOR6_CLEAR_WORD1
+0x00028DFC CB_COLOR6_CLEAR_WORD2
+0x00028E00 CB_COLOR6_CLEAR_WORD3
+0x00028E30 CB_COLOR7_CLEAR_WORD0
+0x00028E34 CB_COLOR7_CLEAR_WORD1
+0x00028E38 CB_COLOR7_CLEAR_WORD2
+0x00028E3C CB_COLOR7_CLEAR_WORD3
+0x00028F80 SQ_ALU_CONST_BUFFER_SIZE_HS_0
+0x00028F84 SQ_ALU_CONST_BUFFER_SIZE_HS_1
+0x00028F88 SQ_ALU_CONST_BUFFER_SIZE_HS_2
+0x00028F8C SQ_ALU_CONST_BUFFER_SIZE_HS_3
+0x00028F90 SQ_ALU_CONST_BUFFER_SIZE_HS_4
+0x00028F94 SQ_ALU_CONST_BUFFER_SIZE_HS_5
+0x00028F98 SQ_ALU_CONST_BUFFER_SIZE_HS_6
+0x00028F9C SQ_ALU_CONST_BUFFER_SIZE_HS_7
+0x00028FA0 SQ_ALU_CONST_BUFFER_SIZE_HS_8
+0x00028FA4 SQ_ALU_CONST_BUFFER_SIZE_HS_9
+0x00028FA8 SQ_ALU_CONST_BUFFER_SIZE_HS_10
+0x00028FAC SQ_ALU_CONST_BUFFER_SIZE_HS_11
+0x00028FB0 SQ_ALU_CONST_BUFFER_SIZE_HS_12
+0x00028FB4 SQ_ALU_CONST_BUFFER_SIZE_HS_13
+0x00028FB8 SQ_ALU_CONST_BUFFER_SIZE_HS_14
+0x00028FBC SQ_ALU_CONST_BUFFER_SIZE_HS_15
+0x00028FC0 SQ_ALU_CONST_BUFFER_SIZE_LS_0
+0x00028FC4 SQ_ALU_CONST_BUFFER_SIZE_LS_1
+0x00028FC8 SQ_ALU_CONST_BUFFER_SIZE_LS_2
+0x00028FCC SQ_ALU_CONST_BUFFER_SIZE_LS_3
+0x00028FD0 SQ_ALU_CONST_BUFFER_SIZE_LS_4
+0x00028FD4 SQ_ALU_CONST_BUFFER_SIZE_LS_5
+0x00028FD8 SQ_ALU_CONST_BUFFER_SIZE_LS_6
+0x00028FDC SQ_ALU_CONST_BUFFER_SIZE_LS_7
+0x00028FE0 SQ_ALU_CONST_BUFFER_SIZE_LS_8
+0x00028FE4 SQ_ALU_CONST_BUFFER_SIZE_LS_9
+0x00028FE8 SQ_ALU_CONST_BUFFER_SIZE_LS_10
+0x00028FEC SQ_ALU_CONST_BUFFER_SIZE_LS_11
+0x00028FF0 SQ_ALU_CONST_BUFFER_SIZE_LS_12
+0x00028FF4 SQ_ALU_CONST_BUFFER_SIZE_LS_13
+0x00028FF8 SQ_ALU_CONST_BUFFER_SIZE_LS_14
+0x00028FFC SQ_ALU_CONST_BUFFER_SIZE_LS_15
+0x0003CFF0 SQ_VTX_BASE_VTX_LOC
+0x0003CFF4 SQ_VTX_START_INST_LOC
+0x0003FF00 SQ_TEX_SAMPLER_CLEAR
+0x0003FF04 SQ_TEX_RESOURCE_CLEAR
+0x0003FF08 SQ_LOOP_BOOL_CLEAR
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
index 9177f9191837..7e1637176e08 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/evergreen
+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -1,4 +1,5 @@
evergreen 0x9400
+0x0000802C GRBM_GFX_INDEX
0x00008040 WAIT_UNTIL
0x00008044 WAIT_UNTIL_POLL_CNTL
0x00008048 WAIT_UNTIL_POLL_MASK
@@ -220,6 +221,7 @@ evergreen 0x9400
0x00028348 PA_SC_VPORT_ZMIN_15
0x0002834C PA_SC_VPORT_ZMAX_15
0x00028350 SX_MISC
+0x00028354 SX_SURFACE_SYNC
0x00028380 SQ_VTX_SEMANTIC_0
0x00028384 SQ_VTX_SEMANTIC_1
0x00028388 SQ_VTX_SEMANTIC_2
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
index af0da4ae3f55..92f1900dc7ca 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r600
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -708,6 +708,7 @@ r600 0x9400
0x00028D0C DB_RENDER_CONTROL
0x00028D10 DB_RENDER_OVERRIDE
0x0002880C DB_SHADER_CONTROL
+0x00028D28 DB_SRESULTS_COMPARE_STATE0
0x00028D2C DB_SRESULTS_COMPARE_STATE1
0x00028430 DB_STENCILREFMASK
0x00028434 DB_STENCILREFMASK_BF
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index c76283d9eb3d..aa6a66eeb4ec 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -412,12 +412,12 @@ static int rs400_startup(struct radeon_device *rdev)
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
- dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
r = r100_ib_init(rdev);
if (r) {
- dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 5afe294ed51f..6e3b11e5abbe 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -48,17 +48,6 @@ int rs600_mc_wait_for_idle(struct radeon_device *rdev);
void rs600_pre_page_flip(struct radeon_device *rdev, int crtc)
{
- struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
- u32 tmp;
-
- /* make sure flip is at vb rather than hb */
- tmp = RREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset);
- tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN;
- WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
-
- /* set pageflip to happen anywhere in vblank interval */
- WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
-
/* enable the pflip int */
radeon_irq_kms_pflip_irq_get(rdev, crtc);
}
@@ -125,7 +114,7 @@ void rs600_pm_misc(struct radeon_device *rdev)
udelay(voltage->delay);
}
} else if (voltage->type == VOLTAGE_VDDC)
- radeon_atom_set_voltage(rdev, voltage->vddc_id);
+ radeon_atom_set_voltage(rdev, voltage->vddc_id, SET_VOLTAGE_TYPE_ASIC_VDDC);
dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH);
dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf);
@@ -751,7 +740,6 @@ void rs600_mc_init(struct radeon_device *rdev)
rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
rdev->mc.visible_vram_size = rdev->mc.aper_size;
- rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
base = RREG32_MC(R_000004_MC_FB_LOCATION);
base = G_000004_MC_FB_START(base) << 16;
@@ -866,12 +854,12 @@ static int rs600_startup(struct radeon_device *rdev)
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
- dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
r = r100_ib_init(rdev);
if (r) {
- dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 6638c8e4c81b..a9049ed1a519 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -157,7 +157,6 @@ void rs690_mc_init(struct radeon_device *rdev)
rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
rdev->mc.visible_vram_size = rdev->mc.aper_size;
- rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
base = G_000100_MC_FB_START(base) << 16;
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
@@ -628,12 +627,12 @@ static int rs690_startup(struct radeon_device *rdev)
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
- dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
r = r100_ib_init(rdev);
if (r) {
- dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 64b57af93714..6613ee9ecca3 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -398,12 +398,12 @@ static int rv515_startup(struct radeon_device *rdev)
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
- dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
r = r100_ib_init(rdev);
if (r) {
- dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index d8ba67690656..ef8a5babe9f7 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -106,7 +106,7 @@ void rv770_pm_misc(struct radeon_device *rdev)
if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
if (voltage->voltage != rdev->pm.current_vddc) {
- radeon_atom_set_voltage(rdev, voltage->voltage);
+ radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
rdev->pm.current_vddc = voltage->voltage;
DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
}
@@ -307,7 +307,7 @@ static void rv770_mc_program(struct radeon_device *rdev)
*/
void r700_cp_stop(struct radeon_device *rdev)
{
- rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
WREG32(SCRATCH_UMSK, 0);
}
@@ -1003,7 +1003,7 @@ static int rv770_vram_scratch_init(struct radeon_device *rdev)
u64 gpu_addr;
if (rdev->vram_scratch.robj == NULL) {
- r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE,
+ r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
&rdev->vram_scratch.robj);
if (r) {
@@ -1123,7 +1123,6 @@ int rv770_mc_init(struct radeon_device *rdev)
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.visible_vram_size = rdev->mc.aper_size;
- rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
r700_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
@@ -1210,7 +1209,7 @@ int rv770_resume(struct radeon_device *rdev)
r = r600_ib_test(rdev);
if (r) {
- DRM_ERROR("radeon: failled testing IB (%d).\n", r);
+ DRM_ERROR("radeon: failed testing IB (%d).\n", r);
return r;
}
@@ -1256,9 +1255,6 @@ int rv770_init(struct radeon_device *rdev)
{
int r;
- r = radeon_dummy_page_init(rdev);
- if (r)
- return r;
/* This don't do much */
r = radeon_gem_init(rdev);
if (r)
@@ -1373,7 +1369,6 @@ void rv770_fini(struct radeon_device *rdev)
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
- radeon_dummy_page_fini(rdev);
}
static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index fa64d25d4248..6464490b240b 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -55,11 +55,6 @@ static struct drm_driver driver = {
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
-
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@@ -68,15 +63,20 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct pci_driver savage_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+};
+
static int __init savage_init(void)
{
driver.num_ioctls = savage_max_ioctl;
- return drm_init(&driver);
+ return drm_pci_init(&driver, &savage_pci_driver);
}
static void __exit savage_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &savage_pci_driver);
}
module_init(savage_init);
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 4caf5d01cfd3..46d5be6e97e5 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -82,10 +82,6 @@ static struct drm_driver driver = {
.fasync = drm_fasync,
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -95,15 +91,20 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct pci_driver sis_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+};
+
static int __init sis_init(void)
{
driver.num_ioctls = sis_max_ioctl;
- return drm_init(&driver);
+ return drm_pci_init(&driver, &sis_pci_driver);
}
static void __exit sis_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &sis_pci_driver);
}
module_init(sis_init);
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index b70fa91d761a..8bf98810a8d6 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -52,10 +52,6 @@ static struct drm_driver driver = {
.fasync = drm_fasync,
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -65,14 +61,19 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct pci_driver tdfx_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+};
+
static int __init tdfx_init(void)
{
- return drm_init(&driver);
+ return drm_pci_init(&driver, &tdfx_pci_driver);
}
static void __exit tdfx_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &tdfx_pci_driver);
}
module_init(tdfx_init);
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index f999e36f30b4..1c4a72f681c1 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -47,7 +47,8 @@ struct ttm_agp_backend {
static int ttm_agp_populate(struct ttm_backend *backend,
unsigned long num_pages, struct page **pages,
- struct page *dummy_read_page)
+ struct page *dummy_read_page,
+ dma_addr_t *dma_addrs)
{
struct ttm_agp_backend *agp_be =
container_of(backend, struct ttm_agp_backend, backend);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index af61fc29e843..2e618b5ac465 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -406,11 +406,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
}
if (bo->mem.mem_type == TTM_PL_SYSTEM) {
+ if (bdev->driver->move_notify)
+ bdev->driver->move_notify(bo, mem);
bo->mem = *mem;
mem->mm_node = NULL;
goto moved;
}
-
}
if (bdev->driver->move_notify)
@@ -1167,7 +1168,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
uint32_t page_alignment,
unsigned long buffer_start,
bool interruptible,
- struct file *persistant_swap_storage,
+ struct file *persistent_swap_storage,
size_t acc_size,
void (*destroy) (struct ttm_buffer_object *))
{
@@ -1210,7 +1211,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
bo->priv_flags = 0;
bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
bo->seq_valid = false;
- bo->persistant_swap_storage = persistant_swap_storage;
+ bo->persistent_swap_storage = persistent_swap_storage;
bo->acc_size = acc_size;
atomic_inc(&bo->glob->bo_count);
@@ -1259,7 +1260,7 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
uint32_t page_alignment,
unsigned long buffer_start,
bool interruptible,
- struct file *persistant_swap_storage,
+ struct file *persistent_swap_storage,
struct ttm_buffer_object **p_bo)
{
struct ttm_buffer_object *bo;
@@ -1281,7 +1282,7 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
buffer_start, interruptible,
- persistant_swap_storage, acc_size, NULL);
+ persistent_swap_storage, acc_size, NULL);
if (likely(ret == 0))
*p_bo = bo;
@@ -1862,7 +1863,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
if (bo->bdev->driver->swap_notify)
bo->bdev->driver->swap_notify(bo);
- ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
+ ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
out:
/**
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index b1e02fffd3cc..9d9d92945f8c 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -38,6 +38,7 @@
#include <linux/mm.h>
#include <linux/seq_file.h> /* for seq_printf */
#include <linux/slab.h>
+#include <linux/dma-mapping.h>
#include <asm/atomic.h>
@@ -662,7 +663,8 @@ out:
* cached pages.
*/
int ttm_get_pages(struct list_head *pages, int flags,
- enum ttm_caching_state cstate, unsigned count)
+ enum ttm_caching_state cstate, unsigned count,
+ dma_addr_t *dma_address)
{
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
struct page *p = NULL;
@@ -720,7 +722,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
printk(KERN_ERR TTM_PFX
"Failed to allocate extra pages "
"for large request.");
- ttm_put_pages(pages, 0, flags, cstate);
+ ttm_put_pages(pages, 0, flags, cstate, NULL);
return r;
}
}
@@ -731,7 +733,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
/* Put all pages in pages list to correct pool to wait for reuse */
void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
- enum ttm_caching_state cstate)
+ enum ttm_caching_state cstate, dma_addr_t *dma_address)
{
unsigned long irq_flags;
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index af789dc869b9..90e23e0bfadb 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -49,12 +49,16 @@ static int ttm_tt_swapin(struct ttm_tt *ttm);
static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
{
ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages));
+ ttm->dma_address = drm_calloc_large(ttm->num_pages,
+ sizeof(*ttm->dma_address));
}
static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
{
drm_free_large(ttm->pages);
ttm->pages = NULL;
+ drm_free_large(ttm->dma_address);
+ ttm->dma_address = NULL;
}
static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
@@ -105,7 +109,8 @@ static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
INIT_LIST_HEAD(&h);
- ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1);
+ ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1,
+ &ttm->dma_address[index]);
if (ret != 0)
return NULL;
@@ -164,7 +169,7 @@ int ttm_tt_populate(struct ttm_tt *ttm)
}
be->func->populate(be, ttm->num_pages, ttm->pages,
- ttm->dummy_read_page);
+ ttm->dummy_read_page, ttm->dma_address);
ttm->state = tt_unbound;
return 0;
}
@@ -298,7 +303,8 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
count++;
}
}
- ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state);
+ ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state,
+ ttm->dma_address);
ttm->state = tt_unpopulated;
ttm->first_himem_page = ttm->num_pages;
ttm->last_lomem_page = -1;
@@ -326,7 +332,7 @@ void ttm_tt_destroy(struct ttm_tt *ttm)
ttm_tt_free_page_directory(ttm);
}
- if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP) &&
+ if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
ttm->swap_storage)
fput(ttm->swap_storage);
@@ -497,7 +503,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
page_cache_release(from_page);
}
- if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP))
+ if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
fput(swap_storage);
ttm->swap_storage = NULL;
ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
@@ -508,7 +514,7 @@ out_err:
return ret;
}
-int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
+int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
{
struct address_space *swap_space;
struct file *swap_storage;
@@ -534,7 +540,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
return 0;
}
- if (!persistant_swap_storage) {
+ if (!persistent_swap_storage) {
swap_storage = shmem_file_setup("ttm swap",
ttm->num_pages << PAGE_SHIFT,
0);
@@ -543,7 +549,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
return PTR_ERR(swap_storage);
}
} else
- swap_storage = persistant_swap_storage;
+ swap_storage = persistent_swap_storage;
swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
@@ -571,12 +577,12 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
ttm_tt_free_alloced_pages(ttm);
ttm->swap_storage = swap_storage;
ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
- if (persistant_swap_storage)
- ttm->page_flags |= TTM_PAGE_FLAG_PERSISTANT_SWAP;
+ if (persistent_swap_storage)
+ ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
return 0;
out_err:
- if (!persistant_swap_storage)
+ if (!persistent_swap_storage)
fput(swap_storage);
return ret;
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index e1ff4e7a6eb0..920a55214bcf 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -62,10 +62,6 @@ static struct drm_driver driver = {
.fasync = drm_fasync,
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -75,16 +71,21 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct pci_driver via_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+};
+
static int __init via_init(void)
{
driver.num_ioctls = via_max_ioctl;
via_init_command_verifier();
- return drm_init(&driver);
+ return drm_pci_init(&driver, &via_pci_driver);
}
static void __exit via_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &via_pci_driver);
}
module_init(via_init);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 80bc37b274e7..87e43e0733bf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -102,7 +102,8 @@ struct vmw_ttm_backend {
static int vmw_ttm_populate(struct ttm_backend *backend,
unsigned long num_pages, struct page **pages,
- struct page *dummy_read_page)
+ struct page *dummy_read_page,
+ dma_addr_t *dma_addrs)
{
struct vmw_ttm_backend *vmw_be =
container_of(backend, struct vmw_ttm_backend, backend);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 10ca97ee0206..96949b93d920 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -909,15 +909,6 @@ static struct drm_driver driver = {
#endif
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = VMWGFX_DRIVER_NAME,
- .id_table = vmw_pci_id_list,
- .probe = vmw_probe,
- .remove = vmw_remove,
- .driver = {
- .pm = &vmw_pm_ops
- }
- },
.name = VMWGFX_DRIVER_NAME,
.desc = VMWGFX_DRIVER_DESC,
.date = VMWGFX_DRIVER_DATE,
@@ -926,6 +917,16 @@ static struct drm_driver driver = {
.patchlevel = VMWGFX_DRIVER_PATCHLEVEL
};
+static struct pci_driver vmw_pci_driver = {
+ .name = VMWGFX_DRIVER_NAME,
+ .id_table = vmw_pci_id_list,
+ .probe = vmw_probe,
+ .remove = vmw_remove,
+ .driver = {
+ .pm = &vmw_pm_ops
+ }
+};
+
static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
return drm_get_pci_dev(pdev, ent, &driver);
@@ -934,7 +935,7 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
static int __init vmwgfx_init(void)
{
int ret;
- ret = drm_init(&driver);
+ ret = drm_pci_init(&driver, &vmw_pci_driver);
if (ret)
DRM_ERROR("Failed initializing DRM.\n");
return ret;
@@ -942,7 +943,7 @@ static int __init vmwgfx_init(void)
static void __exit vmwgfx_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &vmw_pci_driver);
}
module_init(vmwgfx_init);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index cceeb42789b6..dfe32e62bd90 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -245,7 +245,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
/* TODO handle none page aligned offsets */
/* TODO handle partial uploads and pitch != 256 */
/* TODO handle more then one copy (size != 64) */
- DRM_ERROR("lazy programer, cant handle wierd stuff\n");
+ DRM_ERROR("lazy programmer, can't handle weird stuff\n");
return;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 29113c9b26a8..b3a2cd5118d7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -345,7 +345,7 @@ static enum drm_connector_status
return connector_status_disconnected;
}
-static struct drm_display_mode vmw_ldu_connector_builtin[] = {
+static const struct drm_display_mode vmw_ldu_connector_builtin[] = {
/* 640x480@60Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 489, 492, 525, 0,
@@ -429,7 +429,6 @@ static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
struct drm_device *dev = connector->dev;
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_display_mode *mode = NULL;
- struct drm_display_mode *bmode;
struct drm_display_mode prefmode = { DRM_MODE("preferred",
DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -459,6 +458,8 @@ static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
}
for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) {
+ const struct drm_display_mode *bmode;
+
bmode = &vmw_ldu_connector_builtin[i];
if (bmode->hdisplay > max_width ||
bmode->vdisplay > max_height)
diff --git a/drivers/gpu/stub/Kconfig b/drivers/gpu/stub/Kconfig
index 70e60a4bb678..419917955bf6 100644
--- a/drivers/gpu/stub/Kconfig
+++ b/drivers/gpu/stub/Kconfig
@@ -5,6 +5,7 @@ config STUB_POULSBO
# Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
select BACKLIGHT_CLASS_DEVICE if ACPI
+ select VIDEO_OUTPUT_CONTROL if ACPI
select INPUT if ACPI
select ACPI_VIDEO if ACPI
select THERMAL if ACPI
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index ace2b1623b21..be8d4cb5861c 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -151,7 +151,7 @@ static inline void vga_irq_set_state(struct vga_device *vgadev, bool state)
static void vga_check_first_use(void)
{
/* we should inform all GPUs in the system that
- * VGA arb has occured and to try and disable resources
+ * VGA arb has occurred and to try and disable resources
* if they can */
if (!vga_arbiter_used) {
vga_arbiter_used = true;
@@ -774,7 +774,7 @@ static ssize_t vga_arb_read(struct file *file, char __user * buf,
*/
spin_lock_irqsave(&vga_lock, flags);
- /* If we are targetting the default, use it */
+ /* If we are targeting the default, use it */
pdev = priv->target;
if (pdev == NULL || pdev == PCI_INVALID_CARD) {
spin_unlock_irqrestore(&vga_lock, flags);