aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c121
1 files changed, 35 insertions, 86 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 441e2502b889..de8c0747aaef 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -853,6 +853,9 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
GEN6_WIZ_HASHING_MASK,
GEN6_WIZ_HASHING_16x4);
+ /* WaProgramL3SqcReg1Default:bdw */
+ WA_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
+
return 0;
}
@@ -966,6 +969,15 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_CCS_TLB_PREFETCH_ENABLE);
+ /*
+ * FIXME: don't apply the following on BXT for stepping C. On BXT A0
+ * the flag reads back as 0.
+ */
+ /* WaDisableMaskBasedCammingInRCC:sklC,bxtA */
+ if (INTEL_REVID(dev) == SKL_REVID_C0 || IS_BROXTON(dev))
+ WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
+ PIXEL_MASK_CAMMING_DISABLE);
+
return 0;
}
@@ -1027,6 +1039,13 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
return skl_tune_iz_hashing(ring);
}
+static int bxt_init_workarounds(struct intel_engine_cs *ring)
+{
+ gen9_init_workarounds(ring);
+
+ return 0;
+}
+
int init_workarounds_ring(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
@@ -1044,8 +1063,9 @@ int init_workarounds_ring(struct intel_engine_cs *ring)
if (IS_SKYLAKE(dev))
return skl_init_workarounds(ring);
- else if (IS_GEN9(dev))
- return gen9_init_workarounds(ring);
+
+ if (IS_BROXTON(dev))
+ return bxt_init_workarounds(ring);
return 0;
}
@@ -1972,6 +1992,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
INIT_LIST_HEAD(&ring->execlist_queue);
+ i915_gem_batch_pool_init(dev, &ring->batch_pool);
ringbuf->size = 32 * PAGE_SIZE;
ringbuf->ring = ring;
memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
@@ -2050,28 +2071,29 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
cleanup_status_page(ring);
i915_cmd_parser_fini_ring(ring);
+ i915_gem_batch_pool_fini(&ring->batch_pool);
kfree(ringbuf);
ring->buffer = NULL;
}
-static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
+static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
{
struct intel_ringbuffer *ringbuf = ring->buffer;
struct drm_i915_gem_request *request;
- int ret;
+ int ret, new_space;
if (intel_ring_space(ringbuf) >= n)
return 0;
list_for_each_entry(request, &ring->request_list, list) {
- if (__intel_ring_space(request->postfix, ringbuf->tail,
- ringbuf->size) >= n) {
+ new_space = __intel_ring_space(request->postfix, ringbuf->tail,
+ ringbuf->size);
+ if (new_space >= n)
break;
- }
}
- if (&request->list == &ring->request_list)
+ if (WARN_ON(&request->list == &ring->request_list))
return -ENOSPC;
ret = i915_wait_request(request);
@@ -2080,59 +2102,9 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
i915_gem_retire_requests_ring(ring);
- return 0;
-}
-
-static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
-{
- struct drm_device *dev = ring->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ringbuffer *ringbuf = ring->buffer;
- unsigned long end;
- int ret;
-
- ret = intel_ring_wait_request(ring, n);
- if (ret != -ENOSPC)
- return ret;
-
- /* force the tail write in case we have been skipping them */
- __intel_ring_advance(ring);
-
- /* With GEM the hangcheck timer should kick us out of the loop,
- * leaving it early runs the risk of corrupting GEM state (due
- * to running on almost untested codepaths). But on resume
- * timers don't work yet, so prevent a complete hang in that
- * case by choosing an insanely large timeout. */
- end = jiffies + 60 * HZ;
-
- ret = 0;
- trace_i915_ring_wait_begin(ring);
- do {
- if (intel_ring_space(ringbuf) >= n)
- break;
- ringbuf->head = I915_READ_HEAD(ring);
- if (intel_ring_space(ringbuf) >= n)
- break;
-
- msleep(1);
-
- if (dev_priv->mm.interruptible && signal_pending(current)) {
- ret = -ERESTARTSYS;
- break;
- }
-
- ret = i915_gem_check_wedge(&dev_priv->gpu_error,
- dev_priv->mm.interruptible);
- if (ret)
- break;
+ WARN_ON(intel_ring_space(ringbuf) < new_space);
- if (time_after(jiffies, end)) {
- ret = -EBUSY;
- break;
- }
- } while (1);
- trace_i915_ring_wait_end(ring);
- return ret;
+ return 0;
}
static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
@@ -2181,32 +2153,9 @@ int intel_ring_idle(struct intel_engine_cs *ring)
return i915_wait_request(req);
}
-static int
-intel_ring_alloc_request(struct intel_engine_cs *ring)
+int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
{
- int ret;
- struct drm_i915_gem_request *request;
- struct drm_i915_private *dev_private = ring->dev->dev_private;
-
- if (ring->outstanding_lazy_request)
- return 0;
-
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL)
- return -ENOMEM;
-
- kref_init(&request->ref);
- request->ring = ring;
- request->ringbuf = ring->buffer;
- request->uniq = dev_private->request_uniq++;
-
- ret = i915_gem_get_seqno(ring->dev, &request->seqno);
- if (ret) {
- kfree(request);
- return ret;
- }
-
- ring->outstanding_lazy_request = request;
+ request->ringbuf = request->ring->buffer;
return 0;
}
@@ -2247,7 +2196,7 @@ int intel_ring_begin(struct intel_engine_cs *ring,
return ret;
/* Preallocate the olr before touching the ring */
- ret = intel_ring_alloc_request(ring);
+ ret = i915_gem_request_alloc(ring, ring->default_context);
if (ret)
return ret;