aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/display/panel/boe,tv101wum-nl6.yaml7
-rw-r--r--Documentation/gpu/drm-kms-helpers.rst12
-rw-r--r--Documentation/gpu/todo.rst17
-rw-r--r--Documentation/locking/ww-mutex-design.rst2
-rw-r--r--drivers/dma-buf/dma-buf.c152
-rw-r--r--drivers/dma-buf/dma-resv.c418
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c25
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h2
-rw-r--r--drivers/gpu/drm/ast/ast_mm.c27
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c274
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c6
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c25
-rw-r--r--drivers/gpu/drm/drm_bridge.c71
-rw-r--r--drivers/gpu/drm/drm_edid.c148
-rw-r--r--drivers/gpu/drm/drm_format_helper.c88
-rw-r--r--drivers/gpu/drm/drm_fourcc.c1
-rw-r--r--drivers/gpu/drm/drm_lease.c39
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c81
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c119
-rw-r--r--drivers/gpu/drm/gma500/backlight.c12
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c24
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c10
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c12
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c22
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c16
-rw-r--r--drivers/gpu/drm/gma500/gem.c2
-rw-r--r--drivers/gpu/drm/gma500/gma_device.c2
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c14
-rw-r--r--drivers/gpu/drm/gma500/gtt.c18
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.c10
-rw-r--r--drivers/gpu/drm/gma500/intel_gmbus.c12
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.c11
-rw-r--r--drivers/gpu/drm/gma500/mmu.c12
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c8
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c20
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c18
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c14
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c2
-rw-r--r--drivers/gpu/drm/gma500/opregion.c14
-rw-r--r--drivers/gpu/drm/gma500/power.c20
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c16
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c147
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h24
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c10
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c31
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c10
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c26
-rw-r--r--drivers/gpu/drm/gma500/psb_lid.c2
-rw-r--r--drivers/gpu/drm/gud/gud_drv.c6
-rw-r--r--drivers/gpu/drm/gud/gud_internal.h12
-rw-r--r--drivers/gpu/drm/gud/gud_pipe.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c6
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mm.c35
-rw-r--r--drivers/gpu/drm/msm/Kconfig4
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c4
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c743
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c23
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c15
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c54
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c10
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h24
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c390
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c5
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.h1
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_ttm.c17
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c5
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_debugfs.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c44
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h28
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c30
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c195
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c26
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c19
-rw-r--r--include/drm/drm_bridge.h23
-rw-r--r--include/drm/drm_dp_helper.h20
-rw-r--r--include/drm/drm_edid.h14
-rw-r--r--include/drm/drm_format_helper.h4
-rw-r--r--include/drm/drm_mipi_dsi.h4
-rw-r--r--include/drm/drm_print.h30
-rw-r--r--include/drm/drm_probe_helper.h1
-rw-r--r--include/drm/gud.h6
-rw-r--r--include/drm/ttm/ttm_device.h2
-rw-r--r--include/drm/ttm/ttm_tt.h82
-rw-r--r--include/linux/dma-buf.h2
-rw-r--r--include/linux/dma-resv.h95
-rw-r--r--include/linux/io.h5
-rw-r--r--include/uapi/drm/drm_mode.h3
-rw-r--r--include/uapi/drm/v3d_drm.h78
-rw-r--r--include/uapi/drm/virtgpu_drm.h27
-rw-r--r--include/uapi/linux/virtio_gpu.h18
-rw-r--r--lib/devres.c82
99 files changed, 3148 insertions, 1131 deletions
diff --git a/Documentation/devicetree/bindings/display/panel/boe,tv101wum-nl6.yaml b/Documentation/devicetree/bindings/display/panel/boe,tv101wum-nl6.yaml
index b87a2e28c866..a2384bd74cf2 100644
--- a/Documentation/devicetree/bindings/display/panel/boe,tv101wum-nl6.yaml
+++ b/Documentation/devicetree/bindings/display/panel/boe,tv101wum-nl6.yaml
@@ -26,6 +26,10 @@ properties:
- auo,b101uan08.3
# BOE TV105WUM-NW0 10.5" WUXGA TFT LCD panel
- boe,tv105wum-nw0
+ # BOE TV110C9M-LL3 10.95" WUXGA TFT LCD panel
+ - boe,tv110c9m-ll3
+ # INX HJ110IZ-01A 10.95" WUXGA TFT LCD panel
+ - innolux,hj110iz-01a
reg:
description: the virtual channel number of a DSI peripheral
@@ -36,6 +40,9 @@ properties:
pp1800-supply:
description: core voltage supply
+ pp3300-supply:
+ description: core voltage supply
+
avdd-supply:
description: phandle of the regulator that provides positive voltage
diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst
index 389892f36185..ec2f65b31930 100644
--- a/Documentation/gpu/drm-kms-helpers.rst
+++ b/Documentation/gpu/drm-kms-helpers.rst
@@ -151,6 +151,18 @@ Overview
.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
:doc: overview
+Display Driver Integration
+--------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
+ :doc: display driver integration
+
+Special Care with MIPI-DSI bridges
+----------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
+ :doc: special care dsi
+
Bridge Operations
-----------------
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index 12e61869939e..6613543955e9 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -353,23 +353,6 @@ converted, except for struct drm_driver.gem_prime_mmap.
Level: Intermediate
-Use DRM_MODESET_LOCK_ALL_* helpers instead of boilerplate
----------------------------------------------------------
-
-For cases where drivers are attempting to grab the modeset locks with a local
-acquire context. Replace the boilerplate code surrounding
-drm_modeset_lock_all_ctx() with DRM_MODESET_LOCK_ALL_BEGIN() and
-DRM_MODESET_LOCK_ALL_END() instead.
-
-This should also be done for all places where drm_modeset_lock_all() is still
-used.
-
-As a reference, take a look at the conversions already completed in drm core.
-
-Contact: Sean Paul, respective driver maintainers
-
-Level: Starter
-
Rename CMA helpers to DMA helpers
---------------------------------
diff --git a/Documentation/locking/ww-mutex-design.rst b/Documentation/locking/ww-mutex-design.rst
index 6a4d7319f8f0..6a8f8beb9ec4 100644
--- a/Documentation/locking/ww-mutex-design.rst
+++ b/Documentation/locking/ww-mutex-design.rst
@@ -60,7 +60,7 @@ Concepts
Compared to normal mutexes two additional concepts/objects show up in the lock
interface for w/w mutexes:
-Acquire context: To ensure eventual forward progress it is important the a task
+Acquire context: To ensure eventual forward progress it is important that a task
trying to acquire locks doesn't grab a new reservation id, but keeps the one it
acquired when starting the lock acquisition. This ticket is stored in the
acquire context. Furthermore the acquire context keeps track of debugging state
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 474de2d988ca..61e20ae7b08b 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -74,7 +74,7 @@ static void dma_buf_release(struct dentry *dentry)
* If you hit this BUG() it means someone dropped their ref to the
* dma-buf while still having pending operation to the buffer.
*/
- BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
+ BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);
dma_buf_stats_teardown(dmabuf);
dmabuf->ops->release(dmabuf);
@@ -206,16 +206,55 @@ static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
wake_up_locked_poll(dcb->poll, dcb->active);
dcb->active = 0;
spin_unlock_irqrestore(&dcb->poll->lock, flags);
+ dma_fence_put(fence);
+}
+
+static bool dma_buf_poll_shared(struct dma_resv *resv,
+ struct dma_buf_poll_cb_t *dcb)
+{
+ struct dma_resv_list *fobj = dma_resv_shared_list(resv);
+ struct dma_fence *fence;
+ int i, r;
+
+ if (!fobj)
+ return false;
+
+ for (i = 0; i < fobj->shared_count; ++i) {
+ fence = rcu_dereference_protected(fobj->shared[i],
+ dma_resv_held(resv));
+ dma_fence_get(fence);
+ r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
+ if (!r)
+ return true;
+ dma_fence_put(fence);
+ }
+
+ return false;
+}
+
+static bool dma_buf_poll_excl(struct dma_resv *resv,
+ struct dma_buf_poll_cb_t *dcb)
+{
+ struct dma_fence *fence = dma_resv_excl_fence(resv);
+ int r;
+
+ if (!fence)
+ return false;
+
+ dma_fence_get(fence);
+ r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
+ if (!r)
+ return true;
+ dma_fence_put(fence);
+
+ return false;
}
static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
{
struct dma_buf *dmabuf;
struct dma_resv *resv;
- struct dma_resv_list *fobj;
- struct dma_fence *fence_excl;
__poll_t events;
- unsigned shared_count, seq;
dmabuf = file->private_data;
if (!dmabuf || !dmabuf->resv)
@@ -229,101 +268,50 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
if (!events)
return 0;
-retry:
- seq = read_seqcount_begin(&resv->seq);
- rcu_read_lock();
-
- fobj = rcu_dereference(resv->fence);
- if (fobj)
- shared_count = fobj->shared_count;
- else
- shared_count = 0;
- fence_excl = dma_resv_excl_fence(resv);
- if (read_seqcount_retry(&resv->seq, seq)) {
- rcu_read_unlock();
- goto retry;
- }
-
- if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
- struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
- __poll_t pevents = EPOLLIN;
+ dma_resv_lock(resv, NULL);
- if (shared_count == 0)
- pevents |= EPOLLOUT;
+ if (events & EPOLLOUT) {
+ struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out;
+ /* Check that callback isn't busy */
spin_lock_irq(&dmabuf->poll.lock);
- if (dcb->active) {
- dcb->active |= pevents;
- events &= ~pevents;
- } else
- dcb->active = pevents;
+ if (dcb->active)
+ events &= ~EPOLLOUT;
+ else
+ dcb->active = EPOLLOUT;
spin_unlock_irq(&dmabuf->poll.lock);
- if (events & pevents) {
- if (!dma_fence_get_rcu(fence_excl)) {
- /* force a recheck */
- events &= ~pevents;
- dma_buf_poll_cb(NULL, &dcb->cb);
- } else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
- dma_buf_poll_cb)) {
- events &= ~pevents;
- dma_fence_put(fence_excl);
- } else {
- /*
- * No callback queued, wake up any additional
- * waiters.
- */
- dma_fence_put(fence_excl);
+ if (events & EPOLLOUT) {
+ if (!dma_buf_poll_shared(resv, dcb) &&
+ !dma_buf_poll_excl(resv, dcb))
+ /* No callback queued, wake up any other waiters */
dma_buf_poll_cb(NULL, &dcb->cb);
- }
+ else
+ events &= ~EPOLLOUT;
}
}
- if ((events & EPOLLOUT) && shared_count > 0) {
- struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
- int i;
+ if (events & EPOLLIN) {
+ struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in;
- /* Only queue a new callback if no event has fired yet */
+ /* Check that callback isn't busy */
spin_lock_irq(&dmabuf->poll.lock);
if (dcb->active)
- events &= ~EPOLLOUT;
+ events &= ~EPOLLIN;
else
- dcb->active = EPOLLOUT;
+ dcb->active = EPOLLIN;
spin_unlock_irq(&dmabuf->poll.lock);
- if (!(events & EPOLLOUT))
- goto out;
-
- for (i = 0; i < shared_count; ++i) {
- struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
-
- if (!dma_fence_get_rcu(fence)) {
- /*
- * fence refcount dropped to zero, this means
- * that fobj has been freed
- *
- * call dma_buf_poll_cb and force a recheck!
- */
- events &= ~EPOLLOUT;
+ if (events & EPOLLIN) {
+ if (!dma_buf_poll_excl(resv, dcb))
+ /* No callback queued, wake up any other waiters */
dma_buf_poll_cb(NULL, &dcb->cb);
- break;
- }
- if (!dma_fence_add_callback(fence, &dcb->cb,
- dma_buf_poll_cb)) {
- dma_fence_put(fence);
- events &= ~EPOLLOUT;
- break;
- }
- dma_fence_put(fence);
+ else
+ events &= ~EPOLLIN;
}
-
- /* No callback queued, wake up any additional waiters. */
- if (i == shared_count)
- dma_buf_poll_cb(NULL, &dcb->cb);
}
-out:
- rcu_read_unlock();
+ dma_resv_unlock(resv);
return events;
}
@@ -566,8 +554,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
dmabuf->owner = exp_info->owner;
spin_lock_init(&dmabuf->name_lock);
init_waitqueue_head(&dmabuf->poll);
- dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
- dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
+ dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
+ dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
if (!resv) {
resv = (struct dma_resv *)&dmabuf[1];
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 84fbe60629e3..a480af9581bd 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -324,6 +324,106 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
EXPORT_SYMBOL(dma_resv_add_excl_fence);
/**
+ * dma_resv_iter_restart_unlocked - restart the unlocked iterator
+ * @cursor: The dma_resv_iter object to restart
+ *
+ * Restart the unlocked iteration by initializing the cursor object.
+ */
+static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor)
+{
+ cursor->seq = read_seqcount_begin(&cursor->obj->seq);
+ cursor->index = -1;
+ if (cursor->all_fences)
+ cursor->fences = dma_resv_shared_list(cursor->obj);
+ else
+ cursor->fences = NULL;
+ cursor->is_restarted = true;
+}
+
+/**
+ * dma_resv_iter_walk_unlocked - walk over fences in a dma_resv obj
+ * @cursor: cursor to record the current position
+ *
+ * Return all the fences in the dma_resv object which are not yet signaled.
+ * The returned fence has an extra local reference so will stay alive.
+ * If a concurrent modify is detected the whole iteration is started over again.
+ */
+static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor)
+{
+ struct dma_resv *obj = cursor->obj;
+
+ do {
+ /* Drop the reference from the previous round */
+ dma_fence_put(cursor->fence);
+
+ if (cursor->index == -1) {
+ cursor->fence = dma_resv_excl_fence(obj);
+ cursor->index++;
+ if (!cursor->fence)
+ continue;
+
+ } else if (!cursor->fences ||
+ cursor->index >= cursor->fences->shared_count) {
+ cursor->fence = NULL;
+ break;
+
+ } else {
+ struct dma_resv_list *fences = cursor->fences;
+ unsigned int idx = cursor->index++;
+
+ cursor->fence = rcu_dereference(fences->shared[idx]);
+ }
+ cursor->fence = dma_fence_get_rcu(cursor->fence);
+ if (!cursor->fence || !dma_fence_is_signaled(cursor->fence))
+ break;
+ } while (true);
+}
+
+/**
+ * dma_resv_iter_first_unlocked - first fence in an unlocked dma_resv obj.
+ * @cursor: the cursor with the current position
+ *
+ * Returns the first fence from an unlocked dma_resv obj.
+ */
+struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor)
+{
+ rcu_read_lock();
+ do {
+ dma_resv_iter_restart_unlocked(cursor);
+ dma_resv_iter_walk_unlocked(cursor);
+ } while (read_seqcount_retry(&cursor->obj->seq, cursor->seq));
+ rcu_read_unlock();
+
+ return cursor->fence;
+}
+EXPORT_SYMBOL(dma_resv_iter_first_unlocked);
+
+/**
+ * dma_resv_iter_next_unlocked - next fence in an unlocked dma_resv obj.
+ * @cursor: the cursor with the current position
+ *
+ * Returns the next fence from an unlocked dma_resv obj.
+ */
+struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor)
+{
+ bool restart;
+
+ rcu_read_lock();
+ cursor->is_restarted = false;
+ restart = read_seqcount_retry(&cursor->obj->seq, cursor->seq);
+ do {
+ if (restart)
+ dma_resv_iter_restart_unlocked(cursor);
+ dma_resv_iter_walk_unlocked(cursor);
+ restart = true;
+ } while (read_seqcount_retry(&cursor->obj->seq, cursor->seq));
+ rcu_read_unlock();
+
+ return cursor->fence;
+}
+EXPORT_SYMBOL(dma_resv_iter_next_unlocked);
+
+/**
* dma_resv_copy_fences - Copy all fences from src to dst.
* @dst: the destination reservation object
* @src: the source reservation object
@@ -332,74 +432,54 @@ EXPORT_SYMBOL(dma_resv_add_excl_fence);
*/
int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
{
- struct dma_resv_list *src_list, *dst_list;
- struct dma_fence *old, *new;
- unsigned int i;
+ struct dma_resv_iter cursor;
+ struct dma_resv_list *list;
+ struct dma_fence *f, *excl;
dma_resv_assert_held(dst);
- rcu_read_lock();
- src_list = dma_resv_shared_list(src);
-
-retry:
- if (src_list) {
- unsigned int shared_count = src_list->shared_count;
-
- rcu_read_unlock();
+ list = NULL;
+ excl = NULL;
- dst_list = dma_resv_list_alloc(shared_count);
- if (!dst_list)
- return -ENOMEM;
+ dma_resv_iter_begin(&cursor, src, true);
+ dma_resv_for_each_fence_unlocked(&cursor, f) {
- rcu_read_lock();
- src_list = dma_resv_shared_list(src);
- if (!src_list || src_list->shared_count > shared_count) {
- kfree(dst_list);
- goto retry;
- }
+ if (dma_resv_iter_is_restarted(&cursor)) {
+ dma_resv_list_free(list);
+ dma_fence_put(excl);
- dst_list->shared_count = 0;
- for (i = 0; i < src_list->shared_count; ++i) {
- struct dma_fence __rcu **dst;
- struct dma_fence *fence;
+ if (cursor.fences) {
+ unsigned int cnt = cursor.fences->shared_count;
- fence = rcu_dereference(src_list->shared[i]);
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &fence->flags))
- continue;
+ list = dma_resv_list_alloc(cnt);
+ if (!list) {
+ dma_resv_iter_end(&cursor);
+ return -ENOMEM;
+ }
- if (!dma_fence_get_rcu(fence)) {
- dma_resv_list_free(dst_list);
- src_list = dma_resv_shared_list(src);
- goto retry;
- }
+ list->shared_count = 0;
- if (dma_fence_is_signaled(fence)) {
- dma_fence_put(fence);
- continue;
+ } else {
+ list = NULL;
}
-
- dst = &dst_list->shared[dst_list->shared_count++];
- rcu_assign_pointer(*dst, fence);
+ excl = NULL;
}
- } else {
- dst_list = NULL;
- }
-
- new = dma_fence_get_rcu_safe(&src->fence_excl);
- rcu_read_unlock();
- src_list = dma_resv_shared_list(dst);
- old = dma_resv_excl_fence(dst);
+ dma_fence_get(f);
+ if (dma_resv_iter_is_exclusive(&cursor))
+ excl = f;
+ else
+ RCU_INIT_POINTER(list->shared[list->shared_count++], f);
+ }
+ dma_resv_iter_end(&cursor);
write_seqcount_begin(&dst->seq);
- /* write_seqcount_begin provides the necessary memory barrier */
- RCU_INIT_POINTER(dst->fence_excl, new);
- RCU_INIT_POINTER(dst->fence, dst_list);
+ excl = rcu_replace_pointer(dst->fence_excl, excl, dma_resv_held(dst));
+ list = rcu_replace_pointer(dst->fence, list, dma_resv_held(dst));
write_seqcount_end(&dst->seq);
- dma_resv_list_free(src_list);
- dma_fence_put(old);
+ dma_resv_list_free(list);
+ dma_fence_put(excl);
return 0;
}
@@ -409,99 +489,61 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
* dma_resv_get_fences - Get an object's shared and exclusive
* fences without update side lock held
* @obj: the reservation object
- * @pfence_excl: the returned exclusive fence (or NULL)
- * @pshared_count: the number of shared fences returned
- * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
+ * @fence_excl: the returned exclusive fence (or NULL)
+ * @shared_count: the number of shared fences returned
+ * @shared: the array of shared fence ptrs returned (array is krealloc'd to
* the required size, and must be freed by caller)
*
* Retrieve all fences from the reservation object. If the pointer for the
* exclusive fence is not specified the fence is put into the array of the
* shared fences as well. Returns either zero or -ENOMEM.
*/
-int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
- unsigned int *pshared_count,
- struct dma_fence ***pshared)
+int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **fence_excl,
+ unsigned int *shared_count, struct dma_fence ***shared)
{
- struct dma_fence **shared = NULL;
- struct dma_fence *fence_excl;
- unsigned int shared_count;
- int ret = 1;
-
- do {
- struct dma_resv_list *fobj;
- unsigned int i, seq;
- size_t sz = 0;
-
- shared_count = i = 0;
-
- rcu_read_lock();
- seq = read_seqcount_begin(&obj->seq);
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
- fence_excl = dma_resv_excl_fence(obj);
- if (fence_excl && !dma_fence_get_rcu(fence_excl))
- goto unlock;
+ *shared_count = 0;
+ *shared = NULL;
- fobj = dma_resv_shared_list(obj);
- if (fobj)
- sz += sizeof(*shared) * fobj->shared_max;
+ if (fence_excl)
+ *fence_excl = NULL;
- if (!pfence_excl && fence_excl)
- sz += sizeof(*shared);
+ dma_resv_iter_begin(&cursor, obj, true);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
- if (sz) {
- struct dma_fence **nshared;
+ if (dma_resv_iter_is_restarted(&cursor)) {
+ unsigned int count;
- nshared = krealloc(shared, sz,
- GFP_NOWAIT | __GFP_NOWARN);
- if (!nshared) {
- rcu_read_unlock();
+ while (*shared_count)
+ dma_fence_put((*shared)[--(*shared_count)]);
- dma_fence_put(fence_excl);
- fence_excl = NULL;
+ if (fence_excl)
+ dma_fence_put(*fence_excl);
- nshared = krealloc(shared, sz, GFP_KERNEL);
- if (nshared) {
- shared = nshared;
- continue;
- }
+ count = cursor.fences ? cursor.fences->shared_count : 0;
+ count += fence_excl ? 0 : 1;
- ret = -ENOMEM;
- break;
- }
- shared = nshared;
- shared_count = fobj ? fobj->shared_count : 0;
- for (i = 0; i < shared_count; ++i) {
- shared[i] = rcu_dereference(fobj->shared[i]);
- if (!dma_fence_get_rcu(shared[i]))
- break;
+ /* Eventually re-allocate the array */
+ *shared = krealloc_array(*shared, count,
+ sizeof(void *),
+ GFP_KERNEL);
+ if (count && !*shared) {
+ dma_resv_iter_end(&cursor);
+ return -ENOMEM;
}
}
- if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
- while (i--)
- dma_fence_put(shared[i]);
- dma_fence_put(fence_excl);
- goto unlock;
- }
-
- ret = 0;
-unlock:
- rcu_read_unlock();
- } while (ret);
-
- if (pfence_excl)
- *pfence_excl = fence_excl;
- else if (fence_excl)
- shared[shared_count++] = fence_excl;
-
- if (!shared_count) {
- kfree(shared);
- shared = NULL;
+ dma_fence_get(fence);
+ if (dma_resv_iter_is_exclusive(&cursor) && fence_excl)
+ *fence_excl = fence;
+ else
+ (*shared)[(*shared_count)++] = fence;
}
+ dma_resv_iter_end(&cursor);
- *pshared_count = shared_count;
- *pshared = shared;
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(dma_resv_get_fences);
@@ -523,94 +565,25 @@ long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
unsigned long timeout)
{
long ret = timeout ? timeout : 1;
- unsigned int seq, shared_count;
+ struct dma_resv_iter cursor;
struct dma_fence *fence;
- int i;
-
-retry:
- shared_count = 0;
- seq = read_seqcount_begin(&obj->seq);
- rcu_read_lock();
- i = -1;
-
- fence = dma_resv_excl_fence(obj);
- if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
- if (!dma_fence_get_rcu(fence))
- goto unlock_retry;
-
- if (dma_fence_is_signaled(fence)) {
- dma_fence_put(fence);
- fence = NULL;
- }
-
- } else {
- fence = NULL;
- }
-
- if (wait_all) {
- struct dma_resv_list *fobj = dma_resv_shared_list(obj);
-
- if (fobj)
- shared_count = fobj->shared_count;
-
- for (i = 0; !fence && i < shared_count; ++i) {
- struct dma_fence *lfence;
-
- lfence = rcu_dereference(fobj->shared[i]);
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &lfence->flags))
- continue;
-
- if (!dma_fence_get_rcu(lfence))
- goto unlock_retry;
- if (dma_fence_is_signaled(lfence)) {
- dma_fence_put(lfence);
- continue;
- }
+ dma_resv_iter_begin(&cursor, obj, wait_all);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
- fence = lfence;
- break;
+ ret = dma_fence_wait_timeout(fence, intr, ret);
+ if (ret <= 0) {
+ dma_resv_iter_end(&cursor);
+ return ret;
}
}
+ dma_resv_iter_end(&cursor);
- rcu_read_unlock();
- if (fence) {
- if (read_seqcount_retry(&obj->seq, seq)) {
- dma_fence_put(fence);
- goto retry;
- }
-
- ret = dma_fence_wait_timeout(fence, intr, ret);
- dma_fence_put(fence);
- if (ret > 0 && wait_all && (i + 1 < shared_count))
- goto retry;
- }
return ret;
-
-unlock_retry:
- rcu_read_unlock();
- goto retry;
}
EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
-static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
-{
- struct dma_fence *fence, *lfence = passed_fence;
- int ret = 1;
-
- if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
- fence = dma_fence_get_rcu(lfence);
- if (!fence)
- return -1;
-
- ret = !!dma_fence_is_signaled(fence);
- dma_fence_put(fence);
- }
- return ret;
-}
-
/**
* dma_resv_test_signaled - Test if a reservation object's fences have been
* signaled.
@@ -627,43 +600,16 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
*/
bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
{
+ struct dma_resv_iter cursor;
struct dma_fence *fence;
- unsigned int seq;
- int ret;
- rcu_read_lock();
-retry:
- ret = true;
- seq = read_seqcount_begin(&obj->seq);
-
- if (test_all) {
- struct dma_resv_list *fobj = dma_resv_shared_list(obj);
- unsigned int i, shared_count;
-
- shared_count = fobj ? fobj->shared_count : 0;
- for (i = 0; i < shared_count; ++i) {
- fence = rcu_dereference(fobj->shared[i]);
- ret = dma_resv_test_signaled_single(fence);
- if (ret < 0)
- goto retry;
- else if (!ret)
- break;
- }
+ dma_resv_iter_begin(&cursor, obj, test_all);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
+ dma_resv_iter_end(&cursor);
+ return false;
}
-
- fence = dma_resv_excl_fence(obj);
- if (ret && fence) {
- ret = dma_resv_test_signaled_single(fence);
- if (ret < 0)
- goto retry;
-
- }
-
- if (read_seqcount_retry(&obj->seq, seq))
- goto retry;
-
- rcu_read_unlock();
- return ret;
+ dma_resv_iter_end(&cursor);
+ return true;
}
EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index b17e231ca6f7..2a926d0de423 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -103,7 +103,7 @@ config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
config DRM_FBDEV_EMULATION
bool "Enable legacy fbdev support for your modesetting driver"
depends on DRM
- depends on FB
+ depends on FB=y || FB=DRM
select DRM_KMS_HELPER
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 5d5c42ae388b..19d4d8b54490 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -894,7 +894,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
DRM_ERROR("failed to pin userptr\n");
return r;
}
- } else if (ttm->page_flags & TTM_PAGE_FLAG_SG) {
+ } else if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) {
if (!ttm->sg) {
struct dma_buf_attachment *attach;
struct sg_table *sgt;
@@ -1119,6 +1119,8 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ pgoff_t i;
+ int ret;
/* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
if (gtt->userptr) {
@@ -1128,10 +1130,17 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
return 0;
}
- if (ttm->page_flags & TTM_PAGE_FLAG_SG)
+ if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
return 0;
- return ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx);
+ ret = ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ttm->num_pages; ++i)
+ ttm->pages[i]->mapping = bdev->dev_mapping;
+
+ return 0;
}
/*
@@ -1145,6 +1154,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
struct amdgpu_device *adev;
+ pgoff_t i;
amdgpu_ttm_backend_unbind(bdev, ttm);
@@ -1155,9 +1165,12 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
return;
}
- if (ttm->page_flags & TTM_PAGE_FLAG_SG)
+ if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
return;
+ for (i = 0; i < ttm->num_pages; ++i)
+ ttm->pages[i]->mapping = NULL;
+
adev = amdgpu_ttm_adev(bdev);
return ttm_pool_free(&adev->mman.bdev.pool, ttm);
}
@@ -1185,8 +1198,8 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
return -ENOMEM;
}
- /* Set TTM_PAGE_FLAG_SG before populate but after create. */
- bo->ttm->page_flags |= TTM_PAGE_FLAG_SG;
+ /* Set TTM_TT_FLAG_EXTERNAL before populate but after create. */
+ bo->ttm->page_flags |= TTM_TT_FLAG_EXTERNAL;
gtt = (void *)bo->ttm;
gtt->userptr = addr;
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 39ca338eb80b..2cfce7dc95af 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -158,8 +158,6 @@ struct ast_private {
uint32_t dram_type;
uint32_t mclk;
- int fb_mtrr;
-
struct drm_plane primary_plane;
struct ast_cursor_plane cursor_plane;
struct drm_crtc crtc;
diff --git a/drivers/gpu/drm/ast/ast_mm.c b/drivers/gpu/drm/ast/ast_mm.c
index 7592f1b9e1f1..6e999408dda9 100644
--- a/drivers/gpu/drm/ast/ast_mm.c
+++ b/drivers/gpu/drm/ast/ast_mm.c
@@ -74,35 +74,28 @@ static u32 ast_get_vram_size(struct ast_private *ast)
return vram_size;
}
-static void ast_mm_release(struct drm_device *dev, void *ptr)
-{
- struct ast_private *ast = to_ast_private(dev);
- struct pci_dev *pdev = to_pci_dev(dev->dev);
-
- arch_phys_wc_del(ast->fb_mtrr);
- arch_io_free_memtype_wc(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
-}
-
int ast_mm_init(struct ast_private *ast)
{
struct drm_device *dev = &ast->base;
struct pci_dev *pdev = to_pci_dev(dev->dev);
+ resource_size_t base, size;
u32 vram_size;
int ret;
+ base = pci_resource_start(pdev, 0);
+ size = pci_resource_len(pdev, 0);
+
+ /* Don't fail on errors, but performance might be reduced. */
+ devm_arch_io_reserve_memtype_wc(dev->dev, base, size);
+ devm_arch_phys_wc_add(dev->dev, base, size);
+
vram_size = ast_get_vram_size(ast);
- ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0), vram_size);
+ ret = drmm_vram_helper_init(dev, base, vram_size);
if (ret) {
drm_err(dev, "Error initializing VRAM MM; %d\n", ret);
return ret;
}
- arch_io_reserve_memtype_wc(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
- ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
-
- return drmm_add_action_or_reset(dev, ast_mm_release, NULL);
+ return 0;
}
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
index 685e9c38b2db..3aaa90913bf8 100644
--- a/drivers/gpu/drm/bridge/parade-ps8640.c
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -9,14 +9,40 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of_graph.h>
+#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_dp_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
+#define PAGE0_AUXCH_CFG3 0x76
+#define AUXCH_CFG3_RESET 0xff
+#define PAGE0_SWAUX_ADDR_7_0 0x7d
+#define PAGE0_SWAUX_ADDR_15_8 0x7e
+#define PAGE0_SWAUX_ADDR_23_16 0x7f
+#define SWAUX_ADDR_MASK GENMASK(19, 0)
+#define PAGE0_SWAUX_LENGTH 0x80
+#define SWAUX_LENGTH_MASK GENMASK(3, 0)
+#define SWAUX_NO_PAYLOAD BIT(7)
+#define PAGE0_SWAUX_WDATA 0x81
+#define PAGE0_SWAUX_RDATA 0x82
+#define PAGE0_SWAUX_CTRL 0x83
+#define SWAUX_SEND BIT(0)
+#define PAGE0_SWAUX_STATUS 0x84
+#define SWAUX_M_MASK GENMASK(4, 0)
+#define SWAUX_STATUS_MASK GENMASK(7, 5)
+#define SWAUX_STATUS_NACK (0x1 << 5)
+#define SWAUX_STATUS_DEFER (0x2 << 5)
+#define SWAUX_STATUS_ACKM (0x3 << 5)
+#define SWAUX_STATUS_INVALID (0x4 << 5)
+#define SWAUX_STATUS_I2C_NACK (0x5 << 5)
+#define SWAUX_STATUS_I2C_DEFER (0x6 << 5)
+#define SWAUX_STATUS_TIMEOUT (0x7 << 5)
+
#define PAGE2_GPIO_H 0xa7
#define PS_GPIO9 BIT(1)
#define PAGE2_I2C_BYPASS 0xea
@@ -31,6 +57,11 @@
#define NUM_MIPI_LANES 4
+#define COMMON_PS8640_REGMAP_CONFIG \
+ .reg_bits = 8, \
+ .val_bits = 8, \
+ .cache_type = REGCACHE_NONE
+
/*
* PS8640 uses multiple addresses:
* page[0]: for DP control
@@ -62,29 +93,197 @@ enum ps8640_vdo_control {
struct ps8640 {
struct drm_bridge bridge;
struct drm_bridge *panel_bridge;
+ struct drm_dp_aux aux;
struct mipi_dsi_device *dsi;
struct i2c_client *page[MAX_DEVS];
+ struct regmap *regmap[MAX_DEVS];
struct regulator_bulk_data supplies[2];
struct gpio_desc *gpio_reset;
struct gpio_desc *gpio_powerdown;
bool powered;
};
+static const struct regmap_config ps8640_regmap_config[] = {
+ [PAGE0_DP_CNTL] = {
+ COMMON_PS8640_REGMAP_CONFIG,
+ .max_register = 0xbf,
+ },
+ [PAGE1_VDO_BDG] = {
+ COMMON_PS8640_REGMAP_CONFIG,
+ .max_register = 0xff,
+ },
+ [PAGE2_TOP_CNTL] = {
+ COMMON_PS8640_REGMAP_CONFIG,
+ .max_register = 0xff,
+ },
+ [PAGE3_DSI_CNTL1] = {
+ COMMON_PS8640_REGMAP_CONFIG,
+ .max_register = 0xff,
+ },
+ [PAGE4_MIPI_PHY] = {
+ COMMON_PS8640_REGMAP_CONFIG,
+ .max_register = 0xff,
+ },
+ [PAGE5_VPLL] = {
+ COMMON_PS8640_REGMAP_CONFIG,
+ .max_register = 0x7f,
+ },
+ [PAGE6_DSI_CNTL2] = {
+ COMMON_PS8640_REGMAP_CONFIG,
+ .max_register = 0xff,
+ },
+ [PAGE7_SPI_CNTL] = {
+ COMMON_PS8640_REGMAP_CONFIG,
+ .max_register = 0xff,
+ },
+};
+
static inline struct ps8640 *bridge_to_ps8640(struct drm_bridge *e)
{
return container_of(e, struct ps8640, bridge);
}
+static inline struct ps8640 *aux_to_ps8640(struct drm_dp_aux *aux)
+{
+ return container_of(aux, struct ps8640, aux);
+}
+
+static ssize_t ps8640_aux_transfer(struct drm_dp_aux *aux,
+ struct drm_dp_aux_msg *msg)
+{
+ struct ps8640 *ps_bridge = aux_to_ps8640(aux);
+ struct regmap *map = ps_bridge->regmap[PAGE0_DP_CNTL];
+ struct device *dev = &ps_bridge->page[PAGE0_DP_CNTL]->dev;
+ unsigned int len = msg->size;
+ unsigned int data;
+ unsigned int base;
+ int ret;
+ u8 request = msg->request &
+ ~(DP_AUX_I2C_MOT | DP_AUX_I2C_WRITE_STATUS_UPDATE);
+ u8 *buf = msg->buffer;
+ u8 addr_len[PAGE0_SWAUX_LENGTH + 1 - PAGE0_SWAUX_ADDR_7_0];
+ u8 i;
+ bool is_native_aux = false;
+
+ if (len > DP_AUX_MAX_PAYLOAD_BYTES)
+ return -EINVAL;
+
+ if (msg->address & ~SWAUX_ADDR_MASK)
+ return -EINVAL;
+
+ switch (request) {
+ case DP_AUX_NATIVE_WRITE:
+ case DP_AUX_NATIVE_READ:
+ is_native_aux = true;
+ fallthrough;
+ case DP_AUX_I2C_WRITE:
+ case DP_AUX_I2C_READ:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_write(map, PAGE0_AUXCH_CFG3, AUXCH_CFG3_RESET);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to write PAGE0_AUXCH_CFG3: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* Assume it's good */
+ msg->reply = 0;
+
+ base = PAGE0_SWAUX_ADDR_7_0;
+ addr_len[PAGE0_SWAUX_ADDR_7_0 - base] = msg->address;
+ addr_len[PAGE0_SWAUX_ADDR_15_8 - base] = msg->address >> 8;
+ addr_len[PAGE0_SWAUX_ADDR_23_16 - base] = (msg->address >> 16) |
+ (msg->request << 4);
+ addr_len[PAGE0_SWAUX_LENGTH - base] = (len == 0) ? SWAUX_NO_PAYLOAD :
+ ((len - 1) & SWAUX_LENGTH_MASK);
+
+ regmap_bulk_write(map, PAGE0_SWAUX_ADDR_7_0, addr_len,
+ ARRAY_SIZE(addr_len));
+
+ if (len && (request == DP_AUX_NATIVE_WRITE ||
+ request == DP_AUX_I2C_WRITE)) {
+ /* Write to the internal FIFO buffer */
+ for (i = 0; i < len; i++) {
+ ret = regmap_write(map, PAGE0_SWAUX_WDATA, buf[i]);
+ if (ret) {
+ DRM_DEV_ERROR(dev,
+ "failed to write WDATA: %d\n",
+ ret);
+ return ret;
+ }
+ }
+ }
+
+ regmap_write(map, PAGE0_SWAUX_CTRL, SWAUX_SEND);
+
+ /* Zero delay loop because i2c transactions are slow already */
+ regmap_read_poll_timeout(map, PAGE0_SWAUX_CTRL, data,
+ !(data & SWAUX_SEND), 0, 50 * 1000);
+
+ regmap_read(map, PAGE0_SWAUX_STATUS, &data);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to read PAGE0_SWAUX_STATUS: %d\n",
+ ret);
+ return ret;
+ }
+
+ switch (data & SWAUX_STATUS_MASK) {
+ /* Ignore the DEFER cases as they are already handled in hardware */
+ case SWAUX_STATUS_NACK:
+ case SWAUX_STATUS_I2C_NACK:
+ /*
+ * The programming guide is not clear about whether a I2C NACK
+ * would trigger SWAUX_STATUS_NACK or SWAUX_STATUS_I2C_NACK. So
+ * we handle both cases together.
+ */
+ if (is_native_aux)
+ msg->reply |= DP_AUX_NATIVE_REPLY_NACK;
+ else
+ msg->reply |= DP_AUX_I2C_REPLY_NACK;
+
+ fallthrough;
+ case SWAUX_STATUS_ACKM:
+ len = data & SWAUX_M_MASK;
+ break;
+ case SWAUX_STATUS_INVALID:
+ return -EOPNOTSUPP;
+ case SWAUX_STATUS_TIMEOUT:
+ return -ETIMEDOUT;
+ }
+
+ if (len && (request == DP_AUX_NATIVE_READ ||
+ request == DP_AUX_I2C_READ)) {
+ /* Read from the internal FIFO buffer */
+ for (i = 0; i < len; i++) {
+ ret = regmap_read(map, PAGE0_SWAUX_RDATA, &data);
+ if (ret) {
+ DRM_DEV_ERROR(dev,
+ "failed to read RDATA: %d\n",
+ ret);
+ return ret;
+ }
+
+ buf[i] = data;
+ }
+ }
+
+ return len;
+}
+
static int ps8640_bridge_vdo_control(struct ps8640 *ps_bridge,
const enum ps8640_vdo_control ctrl)
{
- struct i2c_client *client = ps_bridge->page[PAGE3_DSI_CNTL1];
+ struct regmap *map = ps_bridge->regmap[PAGE3_DSI_CNTL1];
u8 vdo_ctrl_buf[] = { VDO_CTL_ADD, ctrl };
int ret;
- ret = i2c_smbus_write_i2c_block_data(client, PAGE3_SET_ADD,
- sizeof(vdo_ctrl_buf),
- vdo_ctrl_buf);
+ ret = regmap_bulk_write(map, PAGE3_SET_ADD,
+ vdo_ctrl_buf, sizeof(vdo_ctrl_buf));
+
if (ret < 0) {
DRM_ERROR("failed to %sable VDO: %d\n",
ctrl == ENABLE ? "en" : "dis", ret);
@@ -96,8 +295,7 @@ static int ps8640_bridge_vdo_control(struct ps8640 *ps_bridge,
static void ps8640_bridge_poweron(struct ps8640 *ps_bridge)
{
- struct i2c_client *client = ps_bridge->page[PAGE2_TOP_CNTL];
- unsigned long timeout;
+ struct regmap *map = ps_bridge->regmap[PAGE2_TOP_CNTL];
int ret, status;
if (ps_bridge->powered)
@@ -121,18 +319,12 @@ static void ps8640_bridge_poweron(struct ps8640 *ps_bridge)
*/
msleep(200);
- timeout = jiffies + msecs_to_jiffies(200) + 1;
-
- while (time_is_after_jiffies(timeout)) {
- status = i2c_smbus_read_byte_data(client, PAGE2_GPIO_H);
- if (status < 0) {
- DRM_ERROR("failed read PAGE2_GPIO_H: %d\n", status);
- goto err_regulators_disable;
- }
- if ((status & PS_GPIO9) == PS_GPIO9)
- break;
+ ret = regmap_read_poll_timeout(map, PAGE2_GPIO_H, status,
+ status & PS_GPIO9, 20 * 1000, 200 * 1000);
- msleep(20);
+ if (ret < 0) {
+ DRM_ERROR("failed read PAGE2_GPIO_H: %d\n", ret);
+ goto err_regulators_disable;
}
msleep(50);
@@ -144,22 +336,15 @@ static void ps8640_bridge_poweron(struct ps8640 *ps_bridge)
* disabled by the manufacturer. Once disabled, all MCS commands are
* ignored by the display interface.
*/
- status = i2c_smbus_read_byte_data(client, PAGE2_MCS_EN);
- if (status < 0) {
- DRM_ERROR("failed read PAGE2_MCS_EN: %d\n", status);
- goto err_regulators_disable;
- }
- ret = i2c_smbus_write_byte_data(client, PAGE2_MCS_EN,
- status & ~MCS_EN);
+ ret = regmap_update_bits(map, PAGE2_MCS_EN, MCS_EN, 0);
if (ret < 0) {
DRM_ERROR("failed write PAGE2_MCS_EN: %d\n", ret);
goto err_regulators_disable;
}
/* Switch access edp panel's edid through i2c */
- ret = i2c_smbus_write_byte_data(client, PAGE2_I2C_BYPASS,
- I2C_BYPASS_EN);
+ ret = regmap_write(map, PAGE2_I2C_BYPASS, I2C_BYPASS_EN);
if (ret < 0) {
DRM_ERROR("failed write PAGE2_I2C_BYPASS: %d\n", ret);
goto err_regulators_disable;
@@ -258,18 +443,33 @@ static int ps8640_bridge_attach(struct drm_bridge *bridge,
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->lanes = NUM_MIPI_LANES;
ret = mipi_dsi_attach(dsi);
- if (ret)
+ if (ret) {
+ dev_err(dev, "failed to attach dsi device: %d\n", ret);
goto err_dsi_attach;
+ }
+
+ ret = drm_dp_aux_register(&ps_bridge->aux);
+ if (ret) {
+ dev_err(dev, "failed to register DP AUX channel: %d\n", ret);
+ goto err_aux_register;
+ }
/* Attach the panel-bridge to the dsi bridge */
return drm_bridge_attach(bridge->encoder, ps_bridge->panel_bridge,
&ps_bridge->bridge, flags);
+err_aux_register:
+ mipi_dsi_detach(dsi);
err_dsi_attach:
mipi_dsi_device_unregister(dsi);
return ret;
}
+static void ps8640_bridge_detach(struct drm_bridge *bridge)
+{
+ drm_dp_aux_unregister(&bridge_to_ps8640(bridge)->aux);
+}
+
static struct edid *ps8640_bridge_get_edid(struct drm_bridge *bridge,
struct drm_connector *connector)
{
@@ -306,6 +506,7 @@ static struct edid *ps8640_bridge_get_edid(struct drm_bridge *bridge,
static const struct drm_bridge_funcs ps8640_bridge_funcs = {
.attach = ps8640_bridge_attach,
+ .detach = ps8640_bridge_detach,
.get_edid = ps8640_bridge_get_edid,
.post_disable = ps8640_post_disable,
.pre_enable = ps8640_pre_enable,
@@ -362,19 +563,30 @@ static int ps8640_probe(struct i2c_client *client)
ps_bridge->page[PAGE0_DP_CNTL] = client;
+ ps_bridge->regmap[PAGE0_DP_CNTL] = devm_regmap_init_i2c(client, ps8640_regmap_config);
+ if (IS_ERR(ps_bridge->regmap[PAGE0_DP_CNTL]))
+ return PTR_ERR(ps_bridge->regmap[PAGE0_DP_CNTL]);
+
for (i = 1; i < ARRAY_SIZE(ps_bridge->page); i++) {
ps_bridge->page[i] = devm_i2c_new_dummy_device(&client->dev,
client->adapter,
client->addr + i);
- if (IS_ERR(ps_bridge->page[i])) {
- dev_err(dev, "failed i2c dummy device, address %02x\n",
- client->addr + i);
+ if (IS_ERR(ps_bridge->page[i]))
return PTR_ERR(ps_bridge->page[i]);
- }
+
+ ps_bridge->regmap[i] = devm_regmap_init_i2c(ps_bridge->page[i],
+ ps8640_regmap_config + i);
+ if (IS_ERR(ps_bridge->regmap[i]))
+ return PTR_ERR(ps_bridge->regmap[i]);
}
i2c_set_clientdata(client, ps_bridge);
+ ps_bridge->aux.name = "parade-ps8640-aux";
+ ps_bridge->aux.dev = dev;
+ ps_bridge->aux.transfer = ps8640_aux_transfer;
+ drm_dp_aux_init(&ps_bridge->aux);
+
drm_bridge_add(&ps_bridge->bridge);
return 0;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
index 70ab4fbdc23e..c8f44bcb298a 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
@@ -265,11 +265,9 @@ static int dw_hdmi_cec_probe(struct platform_device *pdev)
/* override the module pointer */
cec->adap->owner = THIS_MODULE;
- ret = devm_add_action(&pdev->dev, dw_hdmi_cec_del, cec);
- if (ret) {
- cec_delete_adapter(cec->adap);
+ ret = devm_add_action_or_reset(&pdev->dev, dw_hdmi_cec_del, cec);
+ if (ret)
return ret;
- }
ret = devm_request_threaded_irq(&pdev->dev, cec->irq,
dw_hdmi_cec_hardirq,
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 41d48a393e7f..6154bed0af5b 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -615,20 +615,8 @@ static int ti_sn_bridge_connector_get_modes(struct drm_connector *connector)
return drm_bridge_get_modes(pdata->next_bridge, connector);
}
-static enum drm_mode_status
-ti_sn_bridge_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- /* maximum supported resolution is 4K at 60 fps */
- if (mode->clock > 594000)
- return MODE_CLOCK_HIGH;
-
- return MODE_OK;
-}
-
static struct drm_connector_helper_funcs ti_sn_bridge_connector_helper_funcs = {
.get_modes = ti_sn_bridge_connector_get_modes,
- .mode_valid = ti_sn_bridge_connector_mode_valid,
};
static const struct drm_connector_funcs ti_sn_bridge_connector_funcs = {
@@ -766,6 +754,18 @@ static void ti_sn_bridge_detach(struct drm_bridge *bridge)
drm_dp_aux_unregister(&bridge_to_ti_sn65dsi86(bridge)->aux);
}
+static enum drm_mode_status
+ti_sn_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ /* maximum supported resolution is 4K at 60 fps */
+ if (mode->clock > 594000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
static void ti_sn_bridge_disable(struct drm_bridge *bridge)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
@@ -1127,6 +1127,7 @@ static void ti_sn_bridge_post_disable(struct drm_bridge *bridge)
static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
.attach = ti_sn_bridge_attach,
.detach = ti_sn_bridge_detach,
+ .mode_valid = ti_sn_bridge_mode_valid,
.pre_enable = ti_sn_bridge_pre_enable,
.enable = ti_sn_bridge_enable,
.disable = ti_sn_bridge_disable,
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index 7ee29f073857..c96847fc0ebc 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -50,6 +50,15 @@
* Chaining multiple bridges to the output of a bridge, or the same bridge to
* the output of different bridges, is not supported.
*
+ * &drm_bridge, like &drm_panel, aren't &drm_mode_object entities like planes,
+ * CRTCs, encoders or connectors and hence are not visible to userspace. They
+ * just provide additional hooks to get the desired output at the end of the
+ * encoder chain.
+ */
+
+/**
+ * DOC: display driver integration
+ *
* Display drivers are responsible for linking encoders with the first bridge
* in the chains. This is done by acquiring the appropriate bridge with
* devm_drm_of_get_bridge(). Once acquired, the bridge shall be attached to the
@@ -84,11 +93,63 @@
* helper to create the &drm_connector, or implement it manually on top of the
* connector-related operations exposed by the bridge (see the overview
* documentation of bridge operations for more details).
- *
- * &drm_bridge, like &drm_panel, aren't &drm_mode_object entities like planes,
- * CRTCs, encoders or connectors and hence are not visible to userspace. They
- * just provide additional hooks to get the desired output at the end of the
- * encoder chain.
+ */
+
+/**
+ * DOC: special care dsi
+ *
+ * The interaction between the bridges and other frameworks involved in
+ * the probing of the upstream driver and the bridge driver can be
+ * challenging. Indeed, there's multiple cases that needs to be
+ * considered:
+ *
+ * - The upstream driver doesn't use the component framework and isn't a
+ * MIPI-DSI host. In this case, the bridge driver will probe at some
+ * point and the upstream driver should try to probe again by returning
+ * EPROBE_DEFER as long as the bridge driver hasn't probed.
+ *
+ * - The upstream driver doesn't use the component framework, but is a
+ * MIPI-DSI host. The bridge device uses the MIPI-DCS commands to be
+ * controlled. In this case, the bridge device is a child of the
+ * display device and when it will probe it's assured that the display
+ * device (and MIPI-DSI host) is present. The upstream driver will be
+ * assured that the bridge driver is connected between the
+ * &mipi_dsi_host_ops.attach and &mipi_dsi_host_ops.detach operations.
+ * Therefore, it must run mipi_dsi_host_register() in its probe
+ * function, and then run drm_bridge_attach() in its
+ * &mipi_dsi_host_ops.attach hook.
+ *
+ * - The upstream driver uses the component framework and is a MIPI-DSI
+ * host. The bridge device uses the MIPI-DCS commands to be
+ * controlled. This is the same situation than above, and can run
+ * mipi_dsi_host_register() in either its probe or bind hooks.
+ *
+ * - The upstream driver uses the component framework and is a MIPI-DSI
+ * host. The bridge device uses a separate bus (such as I2C) to be
+ * controlled. In this case, there's no correlation between the probe
+ * of the bridge and upstream drivers, so care must be taken to avoid
+ * an endless EPROBE_DEFER loop, with each driver waiting for the
+ * other to probe.
+ *
+ * The ideal pattern to cover the last item (and all the others in the
+ * MIPI-DSI host driver case) is to split the operations like this:
+ *
+ * - The MIPI-DSI host driver must run mipi_dsi_host_register() in its
+ * probe hook. It will make sure that the MIPI-DSI host sticks around,
+ * and that the driver's bind can be called.
+ *
+ * - In its probe hook, the bridge driver must try to find its MIPI-DSI
+ * host, register as a MIPI-DSI device and attach the MIPI-DSI device
+ * to its host. The bridge driver is now functional.
+ *
+ * - In its &struct mipi_dsi_host_ops.attach hook, the MIPI-DSI host can
+ * now add its component. Its bind hook will now be called and since
+ * the bridge driver is attached and registered, we can now look for
+ * and attach it.
+ *
+ * At this point, we're now certain that both the upstream driver and
+ * the bridge driver are functional and we can't have a deadlock-like
+ * situation when probing.
*/
static DEFINE_MUTEX(bridge_lock);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 53b342c058be..9c9463ec5465 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -100,9 +100,10 @@ struct detailed_mode_closure {
#define LEVEL_GTF2 2
#define LEVEL_CVT 3
-#define EDID_QUIRK(vend, product_id, _quirks) \
+#define EDID_QUIRK(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _quirks) \
{ \
- .panel_id = drm_edid_encode_panel_id(vend, product_id), \
+ .panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, \
+ product_id), \
.quirks = _quirks \
}
@@ -111,116 +112,116 @@ static const struct edid_quirk {
u32 quirks;
} edid_quirk_list[] = {
/* Acer AL1706 */
- EDID_QUIRK("ACR", 44358, EDID_QUIRK_PREFER_LARGE_60),
+ EDID_QUIRK('A', 'C', 'R', 44358, EDID_QUIRK_PREFER_LARGE_60),
/* Acer F51 */
- EDID_QUIRK("API", 0x7602, EDID_QUIRK_PREFER_LARGE_60),
+ EDID_QUIRK('A', 'P', 'I', 0x7602, EDID_QUIRK_PREFER_LARGE_60),
/* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
- EDID_QUIRK("AEO", 0, EDID_QUIRK_FORCE_6BPC),
+ EDID_QUIRK('A', 'E', 'O', 0, EDID_QUIRK_FORCE_6BPC),
/* BOE model on HP Pavilion 15-n233sl reports 8 bpc, but is a 6 bpc panel */
- EDID_QUIRK("BOE", 0x78b, EDID_QUIRK_FORCE_6BPC),
+ EDID_QUIRK('B', 'O', 'E', 0x78b, EDID_QUIRK_FORCE_6BPC),
/* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */
- EDID_QUIRK("CPT", 0x17df, EDID_QUIRK_FORCE_6BPC),
+ EDID_QUIRK('C', 'P', 'T', 0x17df, EDID_QUIRK_FORCE_6BPC),
/* SDC panel of Lenovo B50-80 reports 8 bpc, but is a 6 bpc panel */
- EDID_QUIRK("SDC", 0x3652, EDID_QUIRK_FORCE_6BPC),
+ EDID_QUIRK('S', 'D', 'C', 0x3652, EDID_QUIRK_FORCE_6BPC),
/* BOE model 0x0771 reports 8 bpc, but is a 6 bpc panel */
- EDID_QUIRK("BOE", 0x0771, EDID_QUIRK_FORCE_6BPC),
+ EDID_QUIRK('B', 'O', 'E', 0x0771, EDID_QUIRK_FORCE_6BPC),
/* Belinea 10 15 55 */
- EDID_QUIRK("MAX", 1516, EDID_QUIRK_PREFER_LARGE_60),
- EDID_QUIRK("MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60),
+ EDID_QUIRK('M', 'A', 'X', 1516, EDID_QUIRK_PREFER_LARGE_60),
+ EDID_QUIRK('M', 'A', 'X', 0x77e, EDID_QUIRK_PREFER_LARGE_60),
/* Envision Peripherals, Inc. EN-7100e */
- EDID_QUIRK("EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH),
+ EDID_QUIRK('E', 'P', 'I', 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH),
/* Envision EN2028 */
- EDID_QUIRK("EPI", 8232, EDID_QUIRK_PREFER_LARGE_60),
+ EDID_QUIRK('E', 'P', 'I', 8232, EDID_QUIRK_PREFER_LARGE_60),
/* Funai Electronics PM36B */
- EDID_QUIRK("FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
+ EDID_QUIRK('F', 'C', 'M', 13600, EDID_QUIRK_PREFER_LARGE_75 |
EDID_QUIRK_DETAILED_IN_CM),
/* LGD panel of HP zBook 17 G2, eDP 10 bpc, but reports unknown bpc */
- EDID_QUIRK("LGD", 764, EDID_QUIRK_FORCE_10BPC),
+ EDID_QUIRK('L', 'G', 'D', 764, EDID_QUIRK_FORCE_10BPC),
/* LG Philips LCD LP154W01-A5 */
- EDID_QUIRK("LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE),
- EDID_QUIRK("LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE),
+ EDID_QUIRK('L', 'P', 'L', 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE),
+ EDID_QUIRK('L', 'P', 'L', 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE),
/* Samsung SyncMaster 205BW. Note: irony */
- EDID_QUIRK("SAM", 541, EDID_QUIRK_DETAILED_SYNC_PP),
+ EDID_QUIRK('S', 'A', 'M', 541, EDID_QUIRK_DETAILED_SYNC_PP),
/* Samsung SyncMaster 22[5-6]BW */
- EDID_QUIRK("SAM", 596, EDID_QUIRK_PREFER_LARGE_60),
- EDID_QUIRK("SAM", 638, EDID_QUIRK_PREFER_LARGE_60),
+ EDID_QUIRK('S', 'A', 'M', 596, EDID_QUIRK_PREFER_LARGE_60),
+ EDID_QUIRK('S', 'A', 'M', 638, EDID_QUIRK_PREFER_LARGE_60),
/* Sony PVM-2541A does up to 12 bpc, but only reports max 8 bpc */
- EDID_QUIRK("SNY", 0x2541, EDID_QUIRK_FORCE_12BPC),
+ EDID_QUIRK('S', 'N', 'Y', 0x2541, EDID_QUIRK_FORCE_12BPC),
/* ViewSonic VA2026w */
- EDID_QUIRK("VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING),
+ EDID_QUIRK('V', 'S', 'C', 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING),
/* Medion MD 30217 PG */
- EDID_QUIRK("MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75),
+ EDID_QUIRK('M', 'E', 'D', 0x7b8, EDID_QUIRK_PREFER_LARGE_75),
/* Lenovo G50 */
- EDID_QUIRK("SDC", 18514, EDID_QUIRK_FORCE_6BPC),
+ EDID_QUIRK('S', 'D', 'C', 18514, EDID_QUIRK_FORCE_6BPC),
/* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
- EDID_QUIRK("SEC", 0xd033, EDID_QUIRK_FORCE_8BPC),
+ EDID_QUIRK('S', 'E', 'C', 0xd033, EDID_QUIRK_FORCE_8BPC),
/* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
- EDID_QUIRK("ETR", 13896, EDID_QUIRK_FORCE_8BPC),
+ EDID_QUIRK('E', 'T', 'R', 13896, EDID_QUIRK_FORCE_8BPC),
/* Valve Index Headset */
- EDID_QUIRK("VLV", 0x91a8, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK("VLV", 0x91b0, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK("VLV", 0x91b1, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK("VLV", 0x91b2, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK("VLV", 0x91b3, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK("VLV", 0x91b4, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK("VLV", 0x91b5, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK("VLV", 0x91b6, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK("VLV", 0x91b7, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK("VLV", 0x91b8, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK("VLV", 0x91b9, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK("VLV", 0x91ba, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK("VLV", 0x91bb, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK("VLV", 0x91bc, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK("VLV", 0x91bd, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK("VLV", 0x91be, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK("VLV", 0x91bf, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91a8, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91b0, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91b1, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91b2, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91b3, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91b4, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91b5, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91b6, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91b7, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91b8, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91b9, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91ba, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91bb, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91bc, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91bd, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91be, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91bf, EDID_QUIRK_NON_DESKTOP),
/* HTC Vive and Vive Pro VR Headsets */
- EDID_QUIRK("HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK("HVR", 0xaa02, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('H', 'V', 'R', 0xaa01, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('H', 'V', 'R', 0xaa02, EDID_QUIRK_NON_DESKTOP),
/* Oculus Rift DK1, DK2, CV1 and Rift S VR Headsets */
- EDID_QUIRK("OVR", 0x0001, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK("OVR", 0x0003, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK("OVR", 0x0004, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK("OVR", 0x0012, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('O', 'V', 'R', 0x0001, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('O', 'V', 'R', 0x0003, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('O', 'V', 'R', 0x0004, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('O', 'V', 'R', 0x0012, EDID_QUIRK_NON_DESKTOP),
/* Windows Mixed Reality Headsets */
- EDID_QUIRK("ACR", 0x7fce, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK("HPN", 0x3515, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK("LEN", 0x0408, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK("LEN", 0xb800, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK("FUJ", 0x1970, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK("DEL", 0x7fce, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK("SEC", 0x144a, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK("AUS", 0xc102, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('A', 'C', 'R', 0x7fce, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('H', 'P', 'N', 0x3515, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('L', 'E', 'N', 0x0408, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('L', 'E', 'N', 0xb800, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('F', 'U', 'J', 0x1970, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('D', 'E', 'L', 0x7fce, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('S', 'E', 'C', 0x144a, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('A', 'U', 'S', 0xc102, EDID_QUIRK_NON_DESKTOP),
/* Sony PlayStation VR Headset */
- EDID_QUIRK("SNY", 0x0704, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('S', 'N', 'Y', 0x0704, EDID_QUIRK_NON_DESKTOP),
/* Sensics VR Headsets */
- EDID_QUIRK("SEN", 0x1019, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('S', 'E', 'N', 0x1019, EDID_QUIRK_NON_DESKTOP),
/* OSVR HDK and HDK2 VR Headsets */
- EDID_QUIRK("SVR", 0x1019, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('S', 'V', 'R', 0x1019, EDID_QUIRK_NON_DESKTOP),
};
/*
@@ -1910,13 +1911,15 @@ int drm_add_override_edid_modes(struct drm_connector *connector)
}
EXPORT_SYMBOL(drm_add_override_edid_modes);
-static struct edid *drm_do_get_edid_base_block(
+static struct edid *drm_do_get_edid_base_block(struct drm_connector *connector,
int (*get_edid_block)(void *data, u8 *buf, unsigned int block,
size_t len),
- void *data, bool *edid_corrupt, int *null_edid_counter)
+ void *data)
{
- int i;
+ int *null_edid_counter = connector ? &connector->null_edid_counter : NULL;
+ bool *edid_corrupt = connector ? &connector->edid_corrupt : NULL;
void *edid;
+ int i;
edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
if (edid == NULL)
@@ -1940,9 +1943,8 @@ static struct edid *drm_do_get_edid_base_block(
return edid;
carp:
- kfree(edid);
- return ERR_PTR(-EINVAL);
-
+ if (connector)
+ connector_bad_edid(connector, edid, 1);
out:
kfree(edid);
return NULL;
@@ -1981,14 +1983,9 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
if (override)
return override;
- edid = (u8 *)drm_do_get_edid_base_block(get_edid_block, data,
- &connector->edid_corrupt,
- &connector->null_edid_counter);
- if (IS_ERR_OR_NULL(edid)) {
- if (IS_ERR(edid))
- connector_bad_edid(connector, edid, 1);
+ edid = (u8 *)drm_do_get_edid_base_block(connector, get_edid_block, data);
+ if (!edid)
return NULL;
- }
/* if there's no extensions or no connector, we're done */
valid_extensions = edid[0x7e];
@@ -2141,14 +2138,13 @@ u32 drm_edid_get_panel_id(struct i2c_adapter *adapter)
struct edid *edid;
u32 panel_id;
- edid = drm_do_get_edid_base_block(drm_do_probe_ddc_edid, adapter,
- NULL, NULL);
+ edid = drm_do_get_edid_base_block(NULL, drm_do_probe_ddc_edid, adapter);
/*
* There are no manufacturer IDs of 0, so if there is a problem reading
* the EDID then we'll just return 0.
*/
- if (IS_ERR_OR_NULL(edid))
+ if (!edid)
return 0;
panel_id = edid_extract_panel_id(edid);
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index 5231104b1498..69fde60e36b3 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -135,6 +135,56 @@ void drm_fb_swab(void *dst, void *src, struct drm_framebuffer *fb,
}
EXPORT_SYMBOL(drm_fb_swab);
+static void drm_fb_xrgb8888_to_rgb332_line(u8 *dbuf, __le32 *sbuf, unsigned int pixels)
+{
+ unsigned int x;
+ u32 pix;
+
+ for (x = 0; x < pixels; x++) {
+ pix = le32_to_cpu(sbuf[x]);
+ dbuf[x] = ((pix & 0x00e00000) >> 16) |
+ ((pix & 0x0000e000) >> 11) |
+ ((pix & 0x000000c0) >> 6);
+ }
+}
+
+/**
+ * drm_fb_xrgb8888_to_rgb332 - Convert XRGB8888 to RGB332 clip buffer
+ * @dst: RGB332 destination buffer
+ * @src: XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ *
+ * Drivers can use this function for RGB332 devices that don't natively support XRGB8888.
+ *
+ * This function does not apply clipping on dst, i.e. the destination is a small buffer
+ * containing the clip rect only.
+ */
+void drm_fb_xrgb8888_to_rgb332(void *dst, void *src, struct drm_framebuffer *fb,
+ struct drm_rect *clip)
+{
+ size_t width = drm_rect_width(clip);
+ size_t src_len = width * sizeof(u32);
+ unsigned int y;
+ void *sbuf;
+
+ /* Use a buffer to speed up access on buffers with uncached read mapping (i.e. WC) */
+ sbuf = kmalloc(src_len, GFP_KERNEL);
+ if (!sbuf)
+ return;
+
+ src += clip_offset(clip, fb->pitches[0], sizeof(u32));
+ for (y = 0; y < drm_rect_height(clip); y++) {
+ memcpy(sbuf, src, src_len);
+ drm_fb_xrgb8888_to_rgb332_line(dst, sbuf, width);
+ src += fb->pitches[0];
+ dst += width;
+ }
+
+ kfree(sbuf);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb332);
+
static void drm_fb_xrgb8888_to_rgb565_line(u16 *dbuf, u32 *sbuf,
unsigned int pixels,
bool swab)
@@ -251,6 +301,44 @@ static void drm_fb_xrgb8888_to_rgb888_line(u8 *dbuf, u32 *sbuf,
}
/**
+ * drm_fb_xrgb8888_to_rgb888 - Convert XRGB8888 to RGB888 clip buffer
+ * @dst: RGB888 destination buffer
+ * @src: XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ *
+ * Drivers can use this function for RGB888 devices that don't natively
+ * support XRGB8888.
+ *
+ * This function does not apply clipping on dst, i.e. the destination
+ * is a small buffer containing the clip rect only.
+ */
+void drm_fb_xrgb8888_to_rgb888(void *dst, void *src, struct drm_framebuffer *fb,
+ struct drm_rect *clip)
+{
+ size_t width = drm_rect_width(clip);
+ size_t src_len = width * sizeof(u32);
+ unsigned int y;
+ void *sbuf;
+
+ /* Use a buffer to speed up access on buffers with uncached read mapping (i.e. WC) */
+ sbuf = kmalloc(src_len, GFP_KERNEL);
+ if (!sbuf)
+ return;
+
+ src += clip_offset(clip, fb->pitches[0], sizeof(u32));
+ for (y = 0; y < drm_rect_height(clip); y++) {
+ memcpy(sbuf, src, src_len);
+ drm_fb_xrgb8888_to_rgb888_line(dst, sbuf, width);
+ src += fb->pitches[0];
+ dst += width * 3;
+ }
+
+ kfree(sbuf);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888);
+
+/**
* drm_fb_xrgb8888_to_rgb888_dstclip - Convert XRGB8888 to RGB888 clip buffer
* @dst: RGB565 destination buffer (iomem)
* @dst_pitch: destination buffer pitch
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index eda832f9200d..783844bfecc1 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -133,6 +133,7 @@ const struct drm_format_info *__drm_format_info(u32 format)
{
static const struct drm_format_info formats[] = {
{ .format = DRM_FORMAT_C8, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_R8, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_RGB332, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGR233, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_XRGB4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index dee4f24a1808..d72c2fac0ff1 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -489,12 +489,6 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
- /* need some objects */
- if (cl->object_count == 0) {
- DRM_DEBUG_LEASE("no objects in lease\n");
- return -EINVAL;
- }
-
if (cl->flags && (cl->flags & ~(O_CLOEXEC | O_NONBLOCK))) {
DRM_DEBUG_LEASE("invalid flags\n");
return -EINVAL;
@@ -510,23 +504,26 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
object_count = cl->object_count;
- object_ids = memdup_user(u64_to_user_ptr(cl->object_ids),
- array_size(object_count, sizeof(__u32)));
- if (IS_ERR(object_ids)) {
- ret = PTR_ERR(object_ids);
- goto out_lessor;
- }
-
+ /* Handle leased objects, if any */
idr_init(&leases);
+ if (object_count != 0) {
+ object_ids = memdup_user(u64_to_user_ptr(cl->object_ids),
+ array_size(object_count, sizeof(__u32)));
+ if (IS_ERR(object_ids)) {
+ ret = PTR_ERR(object_ids);
+ idr_destroy(&leases);
+ goto out_lessor;
+ }
- /* fill and validate the object idr */
- ret = fill_object_idr(dev, lessor_priv, &leases,
- object_count, object_ids);
- kfree(object_ids);
- if (ret) {
- DRM_DEBUG_LEASE("lease object lookup failed: %i\n", ret);
- idr_destroy(&leases);
- goto out_lessor;
+ /* fill and validate the object idr */
+ ret = fill_object_idr(dev, lessor_priv, &leases,
+ object_count, object_ids);
+ kfree(object_ids);
+ if (ret) {
+ DRM_DEBUG_LEASE("lease object lookup failed: %i\n", ret);
+ idr_destroy(&leases);
+ goto out_lessor;
+ }
}
/* Allocate a file descriptor for the lease */
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 5dd475e82995..18cef04df2f2 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -246,6 +246,52 @@ void mipi_dsi_device_unregister(struct mipi_dsi_device *dsi)
}
EXPORT_SYMBOL(mipi_dsi_device_unregister);
+static void devm_mipi_dsi_device_unregister(void *arg)
+{
+ struct mipi_dsi_device *dsi = arg;
+
+ mipi_dsi_device_unregister(dsi);
+}
+
+/**
+ * devm_mipi_dsi_device_register_full - create a managed MIPI DSI device
+ * @dev: device to tie the MIPI-DSI device lifetime to
+ * @host: DSI host to which this device is connected
+ * @info: pointer to template containing DSI device information
+ *
+ * Create a MIPI DSI device by using the device information provided by
+ * mipi_dsi_device_info template
+ *
+ * This is the managed version of mipi_dsi_device_register_full() which
+ * automatically calls mipi_dsi_device_unregister() when @dev is
+ * unbound.
+ *
+ * Returns:
+ * A pointer to the newly created MIPI DSI device, or, a pointer encoded
+ * with an error
+ */
+struct mipi_dsi_device *
+devm_mipi_dsi_device_register_full(struct device *dev,
+ struct mipi_dsi_host *host,
+ const struct mipi_dsi_device_info *info)
+{
+ struct mipi_dsi_device *dsi;
+ int ret;
+
+ dsi = mipi_dsi_device_register_full(host, info);
+ if (IS_ERR(dsi))
+ return dsi;
+
+ ret = devm_add_action_or_reset(dev,
+ devm_mipi_dsi_device_unregister,
+ dsi);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return dsi;
+}
+EXPORT_SYMBOL_GPL(devm_mipi_dsi_device_register_full);
+
static DEFINE_MUTEX(host_lock);
static LIST_HEAD(host_list);
@@ -345,6 +391,41 @@ int mipi_dsi_detach(struct mipi_dsi_device *dsi)
}
EXPORT_SYMBOL(mipi_dsi_detach);
+static void devm_mipi_dsi_detach(void *arg)
+{
+ struct mipi_dsi_device *dsi = arg;
+
+ mipi_dsi_detach(dsi);
+}
+
+/**
+ * devm_mipi_dsi_attach - Attach a MIPI-DSI device to its DSI Host
+ * @dev: device to tie the MIPI-DSI device attachment lifetime to
+ * @dsi: DSI peripheral
+ *
+ * This is the managed version of mipi_dsi_attach() which automatically
+ * calls mipi_dsi_detach() when @dev is unbound.
+ *
+ * Returns:
+ * 0 on success, a negative error code on failure.
+ */
+int devm_mipi_dsi_attach(struct device *dev,
+ struct mipi_dsi_device *dsi)
+{
+ int ret;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, devm_mipi_dsi_detach, dsi);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_mipi_dsi_attach);
+
static ssize_t mipi_dsi_device_transfer(struct mipi_dsi_device *dsi,
struct mipi_dsi_msg *msg)
{
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 5606bca3caa8..61d5c57f23e1 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -795,6 +795,86 @@ void drm_kms_helper_poll_fini(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_kms_helper_poll_fini);
+static bool check_connector_changed(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ enum drm_connector_status old_status;
+ u64 old_epoch_counter;
+
+ /* Only handle HPD capable connectors. */
+ drm_WARN_ON(dev, !(connector->polled & DRM_CONNECTOR_POLL_HPD));
+
+ drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex));
+
+ old_status = connector->status;
+ old_epoch_counter = connector->epoch_counter;
+ connector->status = drm_helper_probe_detect(connector, NULL, false);
+
+ if (old_epoch_counter == connector->epoch_counter) {
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Same epoch counter %llu\n",
+ connector->base.id,
+ connector->name,
+ connector->epoch_counter);
+
+ return false;
+ }
+
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s\n",
+ connector->base.id,
+ connector->name,
+ drm_get_connector_status_name(old_status),
+ drm_get_connector_status_name(connector->status));
+
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Changed epoch counter %llu => %llu\n",
+ connector->base.id,
+ connector->name,
+ old_epoch_counter,
+ connector->epoch_counter);
+
+ return true;
+}
+
+/**
+ * drm_connector_helper_hpd_irq_event - hotplug processing
+ * @connector: drm_connector
+ *
+ * Drivers can use this helper function to run a detect cycle on a connector
+ * which has the DRM_CONNECTOR_POLL_HPD flag set in its &polled member.
+ *
+ * This helper function is useful for drivers which can track hotplug
+ * interrupts for a single connector. Drivers that want to send a
+ * hotplug event for all connectors or can't track hotplug interrupts
+ * per connector need to use drm_helper_hpd_irq_event().
+ *
+ * This function must be called from process context with no mode
+ * setting locks held.
+ *
+ * Note that a connector can be both polled and probed from the hotplug
+ * handler, in case the hotplug interrupt is known to be unreliable.
+ *
+ * Returns:
+ * A boolean indicating whether the connector status changed or not
+ */
+bool drm_connector_helper_hpd_irq_event(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ bool changed;
+
+ mutex_lock(&dev->mode_config.mutex);
+ changed = check_connector_changed(connector);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (changed) {
+ drm_kms_helper_hotplug_event(dev);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Sent hotplug event\n",
+ connector->base.id,
+ connector->name);
+ }
+
+ return changed;
+}
+EXPORT_SYMBOL(drm_connector_helper_hpd_irq_event);
+
/**
* drm_helper_hpd_irq_event - hotplug processing
* @dev: drm_device
@@ -808,23 +888,25 @@ EXPORT_SYMBOL(drm_kms_helper_poll_fini);
* interrupts for each connector.
*
* Drivers which support hotplug interrupts for each connector individually and
- * which have a more fine-grained detect logic should bypass this code and
- * directly call drm_kms_helper_hotplug_event() in case the connector state
- * changed.
+ * which have a more fine-grained detect logic can use
+ * drm_connector_helper_hpd_irq_event(). Alternatively, they should bypass this
+ * code and directly call drm_kms_helper_hotplug_event() in case the connector
+ * state changed.
*
* This function must be called from process context with no mode
* setting locks held.
*
* Note that a connector can be both polled and probed from the hotplug handler,
* in case the hotplug interrupt is known to be unreliable.
+ *
+ * Returns:
+ * A boolean indicating whether the connector status changed or not
*/
bool drm_helper_hpd_irq_event(struct drm_device *dev)
{
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
- enum drm_connector_status old_status;
bool changed = false;
- u64 old_epoch_counter;
if (!dev->mode_config.poll_enabled)
return false;
@@ -836,33 +918,8 @@ bool drm_helper_hpd_irq_event(struct drm_device *dev)
if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
continue;
- old_status = connector->status;
-
- old_epoch_counter = connector->epoch_counter;
-
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Old epoch counter %llu\n", connector->base.id,
- connector->name,
- old_epoch_counter);
-
- connector->status = drm_helper_probe_detect(connector, NULL, false);
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
- connector->base.id,
- connector->name,
- drm_get_connector_status_name(old_status),
- drm_get_connector_status_name(connector->status));
-
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] New epoch counter %llu\n",
- connector->base.id,
- connector->name,
- connector->epoch_counter);
-
- /*
- * Check if epoch counter had changed, meaning that we need
- * to send a uevent.
- */
- if (old_epoch_counter != connector->epoch_counter)
+ if (check_connector_changed(connector))
changed = true;
-
}
drm_connector_list_iter_end(&conn_iter);
mutex_unlock(&dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/gma500/backlight.c b/drivers/gpu/drm/gma500/backlight.c
index 9e90258541a4..46b9c0f13d6d 100644
--- a/drivers/gpu/drm/gma500/backlight.c
+++ b/drivers/gpu/drm/gma500/backlight.c
@@ -16,7 +16,7 @@
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
static void do_gma_backlight_set(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
backlight_update_status(dev_priv->backlight_device);
}
#endif
@@ -24,7 +24,7 @@ static void do_gma_backlight_set(struct drm_device *dev)
void gma_backlight_enable(struct drm_device *dev)
{
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
dev_priv->backlight_enabled = true;
if (dev_priv->backlight_device) {
dev_priv->backlight_device->props.brightness = dev_priv->backlight_level;
@@ -36,7 +36,7 @@ void gma_backlight_enable(struct drm_device *dev)
void gma_backlight_disable(struct drm_device *dev)
{
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
dev_priv->backlight_enabled = false;
if (dev_priv->backlight_device) {
dev_priv->backlight_device->props.brightness = 0;
@@ -48,7 +48,7 @@ void gma_backlight_disable(struct drm_device *dev)
void gma_backlight_set(struct drm_device *dev, int v)
{
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
dev_priv->backlight_level = v;
if (dev_priv->backlight_device && dev_priv->backlight_enabled) {
dev_priv->backlight_device->props.brightness = v;
@@ -60,7 +60,7 @@ void gma_backlight_set(struct drm_device *dev, int v)
int gma_backlight_init(struct drm_device *dev)
{
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
dev_priv->backlight_enabled = true;
return dev_priv->ops->backlight_init(dev);
#else
@@ -71,7 +71,7 @@ int gma_backlight_init(struct drm_device *dev)
void gma_backlight_exit(struct drm_device *dev)
{
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (dev_priv->backlight_device) {
dev_priv->backlight_device->props.brightness = 0;
backlight_update_status(dev_priv->backlight_device);
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 1342e7fb382f..d7c6cca23e94 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -38,7 +38,7 @@ static void cdv_disable_vga(struct drm_device *dev)
static int cdv_output_init(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
drm_mode_create_scaling_mode_property(dev);
@@ -146,7 +146,7 @@ static const struct backlight_ops cdv_ops = {
static int cdv_backlight_init(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct backlight_properties props;
memset(&props, 0, sizeof(struct backlight_properties));
@@ -206,7 +206,7 @@ static inline void CDV_MSG_WRITE32(int domain, uint port, uint offset,
static void cdv_init_pm(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 pwr_cnt;
int domain = pci_domain_nr(pdev->bus);
@@ -259,7 +259,7 @@ static void cdv_errata(struct drm_device *dev)
*/
static int cdv_save_display_registers(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct psb_save_area *regs = &dev_priv->regs;
struct drm_connector *connector;
@@ -314,7 +314,7 @@ static int cdv_save_display_registers(struct drm_device *dev)
*/
static int cdv_restore_display_registers(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct psb_save_area *regs = &dev_priv->regs;
struct drm_connector *connector;
@@ -383,7 +383,7 @@ static int cdv_restore_display_registers(struct drm_device *dev)
static int cdv_power_down(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 pwr_cnt, pwr_mask, pwr_sts;
int tries = 5;
@@ -405,7 +405,7 @@ static int cdv_power_down(struct drm_device *dev)
static int cdv_power_up(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 pwr_cnt, pwr_mask, pwr_sts;
int tries = 5;
@@ -429,7 +429,7 @@ static void cdv_hotplug_work_func(struct work_struct *work)
{
struct drm_psb_private *dev_priv = container_of(work, struct drm_psb_private,
hotplug_work);
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->dev;
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(dev);
@@ -440,7 +440,7 @@ static void cdv_hotplug_work_func(struct work_struct *work)
static int cdv_hotplug_event(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
schedule_work(&dev_priv->hotplug_work);
REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
return 1;
@@ -468,7 +468,7 @@ static const char *force_audio_names[] = {
void cdv_intel_attach_force_audio_property(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct drm_property *prop;
int i;
@@ -497,7 +497,7 @@ static const char *broadcast_rgb_names[] = {
void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct drm_property *prop;
int i;
@@ -574,7 +574,7 @@ static const struct psb_offset cdv_regmap[2] = {
static int cdv_chip_setup(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
INIT_WORK(&dev_priv->hotplug_work, cdv_hotplug_work_func);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index c3a9f6b3c848..94ebc48a4349 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -455,7 +455,7 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
static bool cdv_intel_pipe_enabled(struct drm_device *dev, int pipe)
{
struct drm_crtc *crtc;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = NULL;
crtc = dev_priv->pipe_to_crtc_mapping[pipe];
@@ -489,7 +489,7 @@ void cdv_disable_sr(struct drm_device *dev)
void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
/* Is only one pipe enabled? */
@@ -574,7 +574,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
@@ -829,7 +829,7 @@ static void i8xx_clock(int refclk, struct gma_clock_t *clock)
static int cdv_intel_crtc_clock_get(struct drm_device *dev,
struct drm_crtc *crtc)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
@@ -910,7 +910,7 @@ struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
{
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int pipe = gma_crtc->pipe;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
const struct psb_offset *map = &dev_priv->regmap[pipe];
struct drm_display_mode *mode;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 595b765ecc71..ba6ad1466374 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -515,7 +515,7 @@ cdv_intel_dp_mode_valid(struct drm_connector *connector,
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
int max_link_clock = cdv_intel_dp_link_clock(cdv_intel_dp_max_link_bw(encoder));
int max_lanes = cdv_intel_dp_max_lane_count(encoder);
- struct drm_psb_private *dev_priv = connector->dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(connector->dev);
if (is_edp(encoder) && intel_dp->panel_fixed_mode) {
if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
@@ -896,7 +896,7 @@ static bool
cdv_intel_dp_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct drm_psb_private *dev_priv = encoder->dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(encoder->dev);
struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
int lane_count, clock;
@@ -988,7 +988,7 @@ cdv_intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_encoder *encoder;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
@@ -1744,7 +1744,7 @@ static int cdv_intel_dp_get_modes(struct drm_connector *connector)
if (is_edp(intel_encoder)) {
struct drm_device *dev = connector->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
cdv_intel_edp_panel_vdd_off(intel_encoder);
if (ret) {
@@ -1809,7 +1809,7 @@ cdv_intel_dp_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
- struct drm_psb_private *dev_priv = connector->dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(connector->dev);
struct gma_encoder *encoder = gma_attached_encoder(connector);
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
int ret;
@@ -1908,7 +1908,7 @@ static void cdv_intel_dp_add_properties(struct drm_connector *connector)
/* check the VBT to see whether the eDP is on DP-D port */
static bool cdv_intel_dpc_is_edp(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct child_device_config *p_child;
int i;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index 8a2219fcf9b4..9e1cdb11023c 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -57,7 +57,7 @@ struct cdv_intel_lvds_priv {
*/
static u32 cdv_intel_lvds_get_max_backlight(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 retval;
if (gma_power_begin(dev, false)) {
@@ -81,7 +81,7 @@ static u32 cdv_intel_lvds_get_max_backlight(struct drm_device *dev)
*/
static void cdv_intel_lvds_set_backlight(struct drm_device *dev, int level)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 blc_pwm_ctl;
if (gma_power_begin(dev, false)) {
@@ -105,7 +105,7 @@ static void cdv_intel_lvds_set_backlight(struct drm_device *dev, int level)
static void cdv_intel_lvds_set_power(struct drm_device *dev,
struct drm_encoder *encoder, bool on)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 pp_status;
if (!gma_power_begin(dev, true))
@@ -154,7 +154,7 @@ static enum drm_mode_status cdv_intel_lvds_mode_valid(struct drm_connector *conn
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct drm_display_mode *fixed_mode =
dev_priv->mode_dev.panel_fixed_mode;
@@ -180,7 +180,7 @@ static bool cdv_intel_lvds_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
struct drm_encoder *tmp_encoder;
struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
@@ -227,7 +227,7 @@ static bool cdv_intel_lvds_mode_fixup(struct drm_encoder *encoder,
static void cdv_intel_lvds_prepare(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
if (!gma_power_begin(dev, true))
@@ -245,7 +245,7 @@ static void cdv_intel_lvds_prepare(struct drm_encoder *encoder)
static void cdv_intel_lvds_commit(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
if (mode_dev->backlight_duty_cycle == 0)
@@ -260,7 +260,7 @@ static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(encoder->crtc);
u32 pfit_control;
@@ -297,7 +297,7 @@ static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
int ret;
@@ -428,7 +428,7 @@ static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = {
static bool lvds_is_present_in_vbt(struct drm_device *dev,
u8 *i2c_pin)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int i;
if (!dev_priv->child_dev_num)
@@ -486,7 +486,7 @@ void cdv_intel_lvds_init(struct drm_device *dev,
struct drm_encoder *encoder;
struct drm_display_mode *scan;
struct drm_crtc *crtc;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 lvds;
int pipe;
u8 pin;
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 0b8648396fb2..321e416489a9 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -81,7 +81,7 @@ static vm_fault_t psbfb_vm_fault(struct vm_fault *vmf)
struct vm_area_struct *vma = vmf->vma;
struct drm_framebuffer *fb = vma->vm_private_data;
struct drm_device *dev = fb->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gtt_range *gtt = to_gtt_range(fb->obj[0]);
int page_num;
int i;
@@ -261,7 +261,7 @@ static int psbfb_create(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_device *dev = fb_helper->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct fb_info *info;
struct drm_framebuffer *fb;
@@ -374,7 +374,7 @@ static int psbfb_probe(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_device *dev = fb_helper->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned int fb_size;
int bytespp;
@@ -422,7 +422,7 @@ static int psb_fbdev_destroy(struct drm_device *dev,
int psb_fbdev_init(struct drm_device *dev)
{
struct drm_fb_helper *fb_helper;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int ret;
fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
@@ -457,7 +457,7 @@ free:
static void psb_fbdev_fini(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (!dev_priv->fb_helper)
return;
@@ -474,7 +474,7 @@ static const struct drm_mode_config_funcs psb_mode_funcs = {
static void psb_setup_outputs(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct drm_connector *connector;
drm_mode_create_scaling_mode_property(dev);
@@ -533,7 +533,7 @@ static void psb_setup_outputs(struct drm_device *dev)
void psb_modeset_init(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
struct pci_dev *pdev = to_pci_dev(dev->dev);
int i;
@@ -566,7 +566,7 @@ void psb_modeset_init(struct drm_device *dev)
void psb_modeset_cleanup(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (dev_priv->modeset) {
drm_kms_helper_poll_fini(dev);
psb_fbdev_fini(dev);
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index fbf420051ef5..5ae54c9d2819 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -147,7 +147,7 @@ static vm_fault_t psb_gem_fault(struct vm_fault *vmf)
obj = vma->vm_private_data; /* GEM object */
dev = obj->dev;
- dev_priv = dev->dev_private;
+ dev_priv = to_drm_psb_private(dev);
r = container_of(obj, struct gtt_range, gem); /* Get the gtt range */
diff --git a/drivers/gpu/drm/gma500/gma_device.c b/drivers/gpu/drm/gma500/gma_device.c
index 4c91e86f4b14..954f3a275d81 100644
--- a/drivers/gpu/drm/gma500/gma_device.c
+++ b/drivers/gpu/drm/gma500/gma_device.c
@@ -15,7 +15,7 @@ void gma_get_core_freq(struct drm_device *dev)
struct pci_dev *pci_root =
pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
0, 0);
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
/*pci_write_config_dword(pci_root, 0xD4, 0x00C32004);*/
/*pci_write_config_dword(pci_root, 0xD0, 0xE0033000);*/
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index b03f7b8241f2..cbcecbaa041b 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -51,7 +51,7 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct drm_framebuffer *fb = crtc->primary->fb;
struct gtt_range *gtt;
@@ -136,7 +136,7 @@ gma_pipe_set_base_exit:
void gma_crtc_load_lut(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
int palreg = map->palette;
@@ -189,7 +189,7 @@ int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
void gma_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
@@ -324,7 +324,7 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
uint32_t width, uint32_t height)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int pipe = gma_crtc->pipe;
uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
@@ -553,7 +553,7 @@ int gma_crtc_set_config(struct drm_mode_set *set,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_device *dev = set->crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int ret;
if (!dev_priv->rpm_enabled)
@@ -572,7 +572,7 @@ int gma_crtc_set_config(struct drm_mode_set *set,
void gma_crtc_save(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
@@ -615,7 +615,7 @@ void gma_crtc_save(struct drm_crtc *crtc)
void gma_crtc_restore(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index df9b611b856a..55a2a6919533 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -53,7 +53,7 @@ static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
*/
static u32 __iomem *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long offset;
offset = r->resource.start - dev_priv->gtt_mem->start;
@@ -118,7 +118,7 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r,
*/
static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 __iomem *gtt_slot;
u32 pte;
int i;
@@ -188,7 +188,7 @@ int psb_gtt_pin(struct gtt_range *gt)
{
int ret = 0;
struct drm_device *dev = gt->gem.dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 gpu_base = dev_priv->gtt.gatt_start;
mutex_lock(&dev_priv->gtt_mutex);
@@ -226,7 +226,7 @@ out:
void psb_gtt_unpin(struct gtt_range *gt)
{
struct drm_device *dev = gt->gem.dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 gpu_base = dev_priv->gtt.gatt_start;
mutex_lock(&dev_priv->gtt_mutex);
@@ -266,7 +266,7 @@ void psb_gtt_unpin(struct gtt_range *gt)
struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
const char *name, int backed, u32 align)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gtt_range *gt;
struct resource *r = dev_priv->gtt_mem;
int ret;
@@ -322,13 +322,13 @@ void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt)
static void psb_gtt_alloc(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
init_rwsem(&dev_priv->gtt.sem);
}
void psb_gtt_takedown(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
if (dev_priv->gtt_map) {
@@ -347,7 +347,7 @@ void psb_gtt_takedown(struct drm_device *dev)
int psb_gtt_init(struct drm_device *dev, int resume)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
unsigned gtt_pages;
unsigned long stolen_size, vram_stolen_size;
@@ -496,7 +496,7 @@ out_err:
int psb_gtt_restore(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct resource *r = dev_priv->gtt_mem->child;
struct gtt_range *range;
unsigned int restored = 0, total = 0, size = 0;
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
index d838369f0119..d5ca5f241974 100644
--- a/drivers/gpu/drm/gma500/intel_bios.c
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -207,7 +207,7 @@ static void parse_backlight_data(struct drm_psb_private *dev_priv,
lvds_bl = kmemdup(vbt_lvds_bl, sizeof(*vbt_lvds_bl), GFP_KERNEL);
if (!lvds_bl) {
- dev_err(dev_priv->dev->dev, "out of memory for backlight data\n");
+ dev_err(dev_priv->dev.dev, "out of memory for backlight data\n");
return;
}
dev_priv->lvds_bl = lvds_bl;
@@ -248,7 +248,7 @@ static void parse_lfp_panel_data(struct drm_psb_private *dev_priv,
panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode),
GFP_KERNEL);
if (panel_fixed_mode == NULL) {
- dev_err(dev_priv->dev->dev, "out of memory for fixed panel mode\n");
+ dev_err(dev_priv->dev.dev, "out of memory for fixed panel mode\n");
return;
}
@@ -259,7 +259,7 @@ static void parse_lfp_panel_data(struct drm_psb_private *dev_priv,
dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
drm_mode_debug_printmodeline(panel_fixed_mode);
} else {
- dev_dbg(dev_priv->dev->dev, "ignoring invalid LVDS VBT\n");
+ dev_dbg(dev_priv->dev.dev, "ignoring invalid LVDS VBT\n");
dev_priv->lvds_vbt = 0;
kfree(panel_fixed_mode);
}
@@ -515,7 +515,7 @@ parse_device_mapping(struct drm_psb_private *dev_priv,
*/
int psb_intel_init_bios(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct vbt_header *vbt = NULL;
struct bdb_header *bdb = NULL;
@@ -579,7 +579,7 @@ int psb_intel_init_bios(struct drm_device *dev)
*/
void psb_intel_destroy_bios(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
kfree(dev_priv->sdvo_lvds_vbt_mode);
kfree(dev_priv->lfp_lvds_vbt_mode);
diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c
index c17cbafa468a..09cedabf4776 100644
--- a/drivers/gpu/drm/gma500/intel_gmbus.c
+++ b/drivers/gpu/drm/gma500/intel_gmbus.c
@@ -75,7 +75,7 @@ struct intel_gpio {
void
gma_intel_i2c_reset(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
GMBUS_REG_WRITE(GMBUS0, 0);
}
@@ -196,7 +196,7 @@ intel_gpio_create(struct drm_psb_private *dev_priv, u32 pin)
"gma500 GPIO%c", "?BACDE?F"[pin]);
gpio->adapter.owner = THIS_MODULE;
gpio->adapter.algo_data = &gpio->algo;
- gpio->adapter.dev.parent = dev_priv->dev->dev;
+ gpio->adapter.dev.parent = dev_priv->dev.dev;
gpio->algo.setsda = set_data;
gpio->algo.setscl = set_clock;
gpio->algo.getsda = get_data;
@@ -226,7 +226,7 @@ intel_i2c_quirk_xfer(struct drm_psb_private *dev_priv,
adapter);
int ret;
- gma_intel_i2c_reset(dev_priv->dev);
+ gma_intel_i2c_reset(&dev_priv->dev);
intel_i2c_quirk_set(dev_priv, true);
set_data(gpio, 1);
@@ -394,7 +394,7 @@ int gma_intel_setup_gmbus(struct drm_device *dev)
"reserved",
"dpd",
};
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int ret, i;
dev_priv->gmbus = kcalloc(GMBUS_NUM_PORTS, sizeof(struct intel_gmbus),
@@ -432,7 +432,7 @@ int gma_intel_setup_gmbus(struct drm_device *dev)
bus->force_bit = intel_gpio_create(dev_priv, i);
}
- gma_intel_i2c_reset(dev_priv->dev);
+ gma_intel_i2c_reset(&dev_priv->dev);
return 0;
@@ -480,7 +480,7 @@ void gma_intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
void gma_intel_teardown_gmbus(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int i;
if (dev_priv->gmbus == NULL)
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
index 68e787924ed0..7e76790c6a81 100644
--- a/drivers/gpu/drm/gma500/mid_bios.c
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -18,7 +18,7 @@
static void mid_get_fuse_settings(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct pci_dev *pci_root =
pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
@@ -94,7 +94,7 @@ static void mid_get_fuse_settings(struct drm_device *dev)
static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
{
uint32_t platform_rev_id = 0;
- struct pci_dev *pdev = to_pci_dev(dev_priv->dev->dev);
+ struct pci_dev *pdev = to_pci_dev(dev_priv->dev.dev);
int domain = pci_domain_nr(pdev->bus);
struct pci_dev *pci_gfx_root =
pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(2, 0));
@@ -106,8 +106,7 @@ static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
pci_read_config_dword(pci_gfx_root, 0x08, &platform_rev_id);
dev_priv->platform_rev_id = (uint8_t) platform_rev_id;
pci_dev_put(pci_gfx_root);
- dev_dbg(dev_priv->dev->dev, "platform_rev_id is %x\n",
- dev_priv->platform_rev_id);
+ dev_dbg(dev_priv->dev.dev, "platform_rev_id is %x\n", dev_priv->platform_rev_id);
}
struct mid_vbt_header {
@@ -270,7 +269,7 @@ out:
static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->dev;
struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 addr;
u8 __iomem *vbt_virtual;
@@ -325,7 +324,7 @@ out:
int mid_chip_setup(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
mid_get_fuse_settings(dev);
mid_get_vbt_data(dev_priv);
mid_get_pci_revID(dev_priv);
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
index d856580b8111..fe9ace2a7967 100644
--- a/drivers/gpu/drm/gma500/mmu.c
+++ b/drivers/gpu/drm/gma500/mmu.c
@@ -66,7 +66,7 @@ static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, int force)
{
struct drm_device *dev = driver->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (atomic_read(&driver->needs_tlbflush) || force) {
uint32_t val = PSB_RSGX32(PSB_CR_BIF_CTRL);
@@ -94,7 +94,7 @@ static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
void psb_mmu_flush(struct psb_mmu_driver *driver)
{
struct drm_device *dev = driver->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
uint32_t val;
down_write(&driver->sem);
@@ -120,7 +120,7 @@ void psb_mmu_flush(struct psb_mmu_driver *driver)
void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
{
struct drm_device *dev = pd->driver->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 :
PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4;
@@ -230,7 +230,7 @@ void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
{
struct psb_mmu_driver *driver = pd->driver;
struct drm_device *dev = driver->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_mmu_pt *pt;
int i;
@@ -409,7 +409,7 @@ struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
{
struct drm_device *dev = driver->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
PSB_WSGX32(driver->bif_ctrl, PSB_CR_BIF_CTRL);
psb_mmu_free_pagedir(driver->default_pd);
@@ -422,7 +422,7 @@ struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
atomic_t *msvdx_mmu_invaldc)
{
struct psb_mmu_driver *driver;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
driver = kmalloc(sizeof(*driver), GFP_KERNEL);
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 129f87971002..c6b115954b7d 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -82,7 +82,7 @@ static const struct gma_limit_t *mrst_limit(struct drm_crtc *crtc,
{
const struct gma_limit_t *limit = NULL;
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
|| gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
@@ -214,7 +214,7 @@ static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit,
static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
@@ -361,7 +361,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
int refclk = 0;
@@ -589,7 +589,7 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
int x, int y, struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct drm_framebuffer *fb = crtc->primary->fb;
int pipe = gma_crtc->pipe;
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index 454156fcbec7..5c75eae630b5 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -20,7 +20,7 @@
static int oaktrail_output_init(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (dev_priv->iLVDS_enable)
oaktrail_lvds_init(dev, &dev_priv->mode_dev);
else
@@ -51,7 +51,7 @@ static int oaktrail_brightness;
static int oaktrail_set_brightness(struct backlight_device *bd)
{
struct drm_device *dev = bl_get_data(oaktrail_backlight_device);
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int level = bd->props.brightness;
u32 blc_pwm_ctl;
u32 max_pwm_blc;
@@ -96,7 +96,7 @@ static int oaktrail_get_brightness(struct backlight_device *bd)
static int device_backlight_init(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long core_clock;
u16 bl_max_freq;
uint32_t value;
@@ -133,7 +133,7 @@ static const struct backlight_ops oaktrail_ops = {
static int oaktrail_backlight_init(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int ret;
struct backlight_properties props;
@@ -175,7 +175,7 @@ static int oaktrail_backlight_init(struct drm_device *dev)
*/
static int oaktrail_save_display_registers(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_save_area *regs = &dev_priv->regs;
struct psb_pipe *p = &regs->pipe[0];
int i;
@@ -289,7 +289,7 @@ static int oaktrail_save_display_registers(struct drm_device *dev)
*/
static int oaktrail_restore_display_registers(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_save_area *regs = &dev_priv->regs;
struct psb_pipe *p = &regs->pipe[0];
u32 pp_stat;
@@ -404,7 +404,7 @@ static int oaktrail_restore_display_registers(struct drm_device *dev)
*/
static int oaktrail_power_down(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 pwr_mask ;
u32 pwr_sts;
@@ -428,7 +428,7 @@ static int oaktrail_power_down(struct drm_device *dev)
*/
static int oaktrail_power_up(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 pwr_mask = PSB_PWRGT_DISPLAY_MASK;
u32 pwr_sts, pwr_cnt;
@@ -500,7 +500,7 @@ static const struct psb_offset oaktrail_regmap[2] = {
static int oaktrail_chip_setup(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
int ret;
@@ -524,7 +524,7 @@ static int oaktrail_chip_setup(struct drm_device *dev)
static void oaktrail_teardown(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
gma_intel_teardown_gmbus(dev);
oaktrail_hdmi_teardown(dev);
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index a097a59a9eae..6eef60a5ac27 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -130,7 +130,7 @@ static const struct oaktrail_hdmi_limit oaktrail_hdmi_limit = {
static void oaktrail_hdmi_audio_enable(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
HDMI_WRITE(HDMI_HCR, 0x67);
@@ -145,7 +145,7 @@ static void oaktrail_hdmi_audio_enable(struct drm_device *dev)
static void oaktrail_hdmi_audio_disable(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
HDMI_WRITE(0x51a8, 0x0);
@@ -264,7 +264,7 @@ int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
int pipe = 1;
int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
@@ -494,7 +494,7 @@ static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode)
static int dpms_mode = -1;
struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
u32 temp;
@@ -529,7 +529,7 @@ oaktrail_hdmi_detect(struct drm_connector *connector, bool force)
{
enum drm_connector_status status;
struct drm_device *dev = connector->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
u32 temp;
@@ -665,7 +665,7 @@ failed_connector:
void oaktrail_hdmi_setup(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev;
struct oaktrail_hdmi_dev *hdmi_dev;
int ret;
@@ -718,7 +718,7 @@ out:
void oaktrail_hdmi_teardown(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
struct pci_dev *pdev;
@@ -735,7 +735,7 @@ void oaktrail_hdmi_teardown(struct drm_device *dev)
/* save HDMI register state */
void oaktrail_hdmi_save(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
struct psb_state *regs = &dev_priv->regs.psb;
struct psb_pipe *pipeb = &dev_priv->regs.pipe[1];
@@ -788,7 +788,7 @@ void oaktrail_hdmi_save(struct drm_device *dev)
/* restore HDMI register state */
void oaktrail_hdmi_restore(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
struct psb_state *regs = &dev_priv->regs.psb;
struct psb_pipe *pipeb = &dev_priv->regs.pipe[1];
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index f9b1f88c73bd..28b995ef2844 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -37,7 +37,7 @@ static void oaktrail_lvds_set_power(struct drm_device *dev,
bool on)
{
u32 pp_status;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (!gma_power_begin(dev, true))
return;
@@ -83,7 +83,7 @@ static void oaktrail_lvds_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector = NULL;
@@ -155,7 +155,7 @@ static void oaktrail_lvds_mode_set(struct drm_encoder *encoder,
static void oaktrail_lvds_prepare(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
@@ -171,7 +171,7 @@ static void oaktrail_lvds_prepare(struct drm_encoder *encoder)
static u32 oaktrail_lvds_get_max_backlight(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 ret;
if (gma_power_begin(dev, false)) {
@@ -191,7 +191,7 @@ static u32 oaktrail_lvds_get_max_backlight(struct drm_device *dev)
static void oaktrail_lvds_commit(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
@@ -215,7 +215,7 @@ static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev)
{
struct drm_display_mode *mode = NULL;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct oaktrail_timing_info *ti = &dev_priv->gct_data.DTD;
mode_dev->panel_fixed_mode = NULL;
@@ -294,7 +294,7 @@ void oaktrail_lvds_init(struct drm_device *dev,
struct gma_connector *gma_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct edid *edid;
struct i2c_adapter *i2c_adap;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c b/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c
index 1d2dd6ea1c71..d1ae91fcd224 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c
@@ -133,7 +133,7 @@ void oaktrail_lvds_i2c_init(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_i2c_chan *chan;
chan = kzalloc(sizeof(struct psb_intel_i2c_chan), GFP_KERNEL);
diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c
index a1ffc6a1c255..fef04ff8c3a9 100644
--- a/drivers/gpu/drm/gma500/opregion.c
+++ b/drivers/gpu/drm/gma500/opregion.c
@@ -147,7 +147,7 @@ static struct psb_intel_opregion *system_opregion;
static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct opregion_asle *asle = dev_priv->opregion.asle;
struct backlight_device *bd = dev_priv->backlight_device;
@@ -190,7 +190,7 @@ static void psb_intel_opregion_asle_work(struct work_struct *work)
}
if (asle_req & ASLE_SET_BACKLIGHT)
- asle_stat |= asle_set_backlight(dev_priv->dev, asle->bclp);
+ asle_stat |= asle_set_backlight(&dev_priv->dev, asle->bclp);
asle->aslc = asle_stat;
@@ -198,7 +198,7 @@ static void psb_intel_opregion_asle_work(struct work_struct *work)
void psb_intel_opregion_asle_intr(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (dev_priv->opregion.asle)
schedule_work(&dev_priv->opregion.asle_work);
@@ -211,7 +211,7 @@ void psb_intel_opregion_asle_intr(struct drm_device *dev)
void psb_intel_opregion_enable_asle(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct opregion_asle *asle = dev_priv->opregion.asle;
if (asle && system_opregion ) {
@@ -258,7 +258,7 @@ static struct notifier_block psb_intel_opregion_notifier = {
void psb_intel_opregion_init(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_opregion *opregion = &dev_priv->opregion;
if (!opregion->header)
@@ -278,7 +278,7 @@ void psb_intel_opregion_init(struct drm_device *dev)
void psb_intel_opregion_fini(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_opregion *opregion = &dev_priv->opregion;
if (!opregion->header)
@@ -304,7 +304,7 @@ void psb_intel_opregion_fini(struct drm_device *dev)
int psb_intel_opregion_setup(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct psb_intel_opregion *opregion = &dev_priv->opregion;
u32 opregion_phy, mboxes;
diff --git a/drivers/gpu/drm/gma500/power.c b/drivers/gpu/drm/gma500/power.c
index 20ace6010f9f..d2a46d96e746 100644
--- a/drivers/gpu/drm/gma500/power.c
+++ b/drivers/gpu/drm/gma500/power.c
@@ -47,7 +47,7 @@ static DEFINE_SPINLOCK(power_ctrl_lock); /* Serialize power claim */
*/
void gma_power_init(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
/* FIXME: Move APM/OSPM base into relevant device code */
dev_priv->apm_base = dev_priv->apm_reg & 0xffff;
@@ -82,7 +82,7 @@ void gma_power_uninit(struct drm_device *dev)
*/
static void gma_suspend_display(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (dev_priv->suspended)
return;
@@ -101,7 +101,7 @@ static void gma_suspend_display(struct drm_device *dev)
static void gma_resume_display(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
/* turn on the display power island */
dev_priv->ops->power_up(dev);
@@ -125,7 +125,7 @@ static void gma_resume_display(struct pci_dev *pdev)
static void gma_suspend_pci(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int bsm, vbt;
if (dev_priv->suspended)
@@ -155,7 +155,7 @@ static void gma_suspend_pci(struct pci_dev *pdev)
static bool gma_resume_pci(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int ret;
if (!dev_priv->suspended)
@@ -189,7 +189,7 @@ int gma_power_suspend(struct device *_dev)
{
struct pci_dev *pdev = to_pci_dev(_dev);
struct drm_device *dev = pci_get_drvdata(pdev);
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
mutex_lock(&power_mutex);
if (!dev_priv->suspended) {
@@ -234,7 +234,7 @@ int gma_power_resume(struct device *_dev)
*/
bool gma_power_is_on(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
return dev_priv->display_power;
}
@@ -248,7 +248,7 @@ bool gma_power_is_on(struct drm_device *dev)
*/
bool gma_power_begin(struct drm_device *dev, bool force_on)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
int ret;
unsigned long flags;
@@ -288,7 +288,7 @@ out_false:
*/
void gma_power_end(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long flags;
spin_lock_irqsave(&power_ctrl_lock, flags);
dev_priv->display_count--;
@@ -310,7 +310,7 @@ int psb_runtime_resume(struct device *dev)
int psb_runtime_idle(struct device *dev)
{
struct drm_device *drmdev = pci_get_drvdata(to_pci_dev(dev));
- struct drm_psb_private *dev_priv = drmdev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(drmdev);
if (dev_priv->display_count)
return 0;
else
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index 951725a0f7a3..3030f18ba022 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -18,7 +18,7 @@
static int psb_output_init(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
psb_intel_lvds_init(dev, &dev_priv->mode_dev);
psb_intel_sdvo_init(dev, SDVOB);
return 0;
@@ -55,7 +55,7 @@ static int psb_get_brightness(struct backlight_device *bd)
static int psb_backlight_setup(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long core_clock;
/* u32 bl_max_freq; */
/* unsigned long value; */
@@ -110,7 +110,7 @@ static const struct backlight_ops psb_ops = {
static int psb_backlight_init(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int ret;
struct backlight_properties props;
@@ -149,7 +149,7 @@ static int psb_backlight_init(struct drm_device *dev)
static void psb_init_pm(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 gating = PSB_RSGX32(PSB_CR_CLKGATECTL);
gating &= ~3; /* Disable 2D clock gating */
@@ -167,7 +167,7 @@ static void psb_init_pm(struct drm_device *dev)
*/
static int psb_save_display_registers(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct drm_crtc *crtc;
struct gma_connector *connector;
struct psb_state *regs = &dev_priv->regs.psb;
@@ -205,7 +205,7 @@ static int psb_save_display_registers(struct drm_device *dev)
*/
static int psb_restore_display_registers(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct drm_crtc *crtc;
struct gma_connector *connector;
struct psb_state *regs = &dev_priv->regs.psb;
@@ -300,7 +300,7 @@ static const struct psb_offset psb_regmap[2] = {
static int psb_chip_setup(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
dev_priv->regmap = psb_regmap;
gma_get_core_freq(dev);
gma_intel_setup_gmbus(dev);
@@ -311,7 +311,7 @@ static int psb_chip_setup(struct drm_device *dev)
static void psb_chip_teardown(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
psb_lid_timer_takedown(dev_priv);
gma_intel_teardown_gmbus(dev);
}
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 58bce1a60a4d..7a10bb39ef0b 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -124,7 +124,7 @@ void psb_spank(struct drm_psb_private *dev_priv)
static int psb_do_init(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_gtt *pg = &dev_priv->gtt;
uint32_t stolen_gtt;
@@ -163,71 +163,74 @@ static int psb_do_init(struct drm_device *dev)
static void psb_driver_unload(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
/* TODO: Kill vblank etc here */
- if (dev_priv) {
- if (dev_priv->backlight_device)
- gma_backlight_exit(dev);
- psb_modeset_cleanup(dev);
+ if (dev_priv->backlight_device)
+ gma_backlight_exit(dev);
+ psb_modeset_cleanup(dev);
- if (dev_priv->ops->chip_teardown)
- dev_priv->ops->chip_teardown(dev);
+ if (dev_priv->ops->chip_teardown)
+ dev_priv->ops->chip_teardown(dev);
- psb_intel_opregion_fini(dev);
+ psb_intel_opregion_fini(dev);
- if (dev_priv->pf_pd) {
- psb_mmu_free_pagedir(dev_priv->pf_pd);
- dev_priv->pf_pd = NULL;
- }
- if (dev_priv->mmu) {
- struct psb_gtt *pg = &dev_priv->gtt;
-
- down_read(&pg->sem);
- psb_mmu_remove_pfn_sequence(
- psb_mmu_get_default_pd
- (dev_priv->mmu),
- pg->mmu_gatt_start,
- dev_priv->vram_stolen_size >> PAGE_SHIFT);
- up_read(&pg->sem);
- psb_mmu_driver_takedown(dev_priv->mmu);
- dev_priv->mmu = NULL;
- }
- psb_gtt_takedown(dev);
- if (dev_priv->scratch_page) {
- set_pages_wb(dev_priv->scratch_page, 1);
- __free_page(dev_priv->scratch_page);
- dev_priv->scratch_page = NULL;
- }
- if (dev_priv->vdc_reg) {
- iounmap(dev_priv->vdc_reg);
- dev_priv->vdc_reg = NULL;
- }
- if (dev_priv->sgx_reg) {
- iounmap(dev_priv->sgx_reg);
- dev_priv->sgx_reg = NULL;
- }
- if (dev_priv->aux_reg) {
- iounmap(dev_priv->aux_reg);
- dev_priv->aux_reg = NULL;
- }
- pci_dev_put(dev_priv->aux_pdev);
- pci_dev_put(dev_priv->lpc_pdev);
+ if (dev_priv->pf_pd) {
+ psb_mmu_free_pagedir(dev_priv->pf_pd);
+ dev_priv->pf_pd = NULL;
+ }
+ if (dev_priv->mmu) {
+ struct psb_gtt *pg = &dev_priv->gtt;
+
+ down_read(&pg->sem);
+ psb_mmu_remove_pfn_sequence(
+ psb_mmu_get_default_pd
+ (dev_priv->mmu),
+ pg->mmu_gatt_start,
+ dev_priv->vram_stolen_size >> PAGE_SHIFT);
+ up_read(&pg->sem);
+ psb_mmu_driver_takedown(dev_priv->mmu);
+ dev_priv->mmu = NULL;
+ }
+ psb_gtt_takedown(dev);
+ if (dev_priv->scratch_page) {
+ set_pages_wb(dev_priv->scratch_page, 1);
+ __free_page(dev_priv->scratch_page);
+ dev_priv->scratch_page = NULL;
+ }
+ if (dev_priv->vdc_reg) {
+ iounmap(dev_priv->vdc_reg);
+ dev_priv->vdc_reg = NULL;
+ }
+ if (dev_priv->sgx_reg) {
+ iounmap(dev_priv->sgx_reg);
+ dev_priv->sgx_reg = NULL;
+ }
+ if (dev_priv->aux_reg) {
+ iounmap(dev_priv->aux_reg);
+ dev_priv->aux_reg = NULL;
+ }
+ pci_dev_put(dev_priv->aux_pdev);
+ pci_dev_put(dev_priv->lpc_pdev);
- /* Destroy VBT data */
- psb_intel_destroy_bios(dev);
+ /* Destroy VBT data */
+ psb_intel_destroy_bios(dev);
- kfree(dev_priv);
- dev->dev_private = NULL;
- }
gma_power_uninit(dev);
}
+static void psb_device_release(void *data)
+{
+ struct drm_device *dev = data;
+
+ psb_driver_unload(dev);
+}
+
static int psb_driver_load(struct drm_device *dev, unsigned long flags)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
- struct drm_psb_private *dev_priv;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long resource_start, resource_len;
unsigned long irqflags;
int ret = -ENOMEM;
@@ -235,14 +238,9 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
struct gma_encoder *gma_encoder;
struct psb_gtt *pg;
- /* allocating and initializing driver private data */
- dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
- if (dev_priv == NULL)
- return -ENOMEM;
+ /* initializing driver private data */
dev_priv->ops = (struct psb_ops *)flags;
- dev_priv->dev = dev;
- dev->dev_private = (void *) dev_priv;
pg = &dev_priv->gtt;
@@ -409,8 +407,9 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
pm_runtime_enable(dev->dev);
pm_runtime_set_active(dev->dev);
#endif
- /* Intel drm driver load is done, continue doing pvr load */
- return 0;
+
+ return devm_add_action_or_reset(dev->dev, psb_device_release, dev);
+
out_err:
psb_driver_unload(dev);
return ret;
@@ -431,7 +430,7 @@ static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
{
struct drm_file *file_priv = filp->private_data;
struct drm_device *dev = file_priv->minor->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
static unsigned int runtime_allowed;
if (runtime_allowed == 1 && dev_priv->is_lvds_on) {
@@ -445,38 +444,30 @@ static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ struct drm_psb_private *dev_priv;
struct drm_device *dev;
int ret;
- ret = pci_enable_device(pdev);
+ ret = pcim_enable_device(pdev);
if (ret)
return ret;
- dev = drm_dev_alloc(&driver, &pdev->dev);
- if (IS_ERR(dev)) {
- ret = PTR_ERR(dev);
- goto err_pci_disable_device;
- }
+ dev_priv = devm_drm_dev_alloc(&pdev->dev, &driver, struct drm_psb_private, dev);
+ if (IS_ERR(dev_priv))
+ return PTR_ERR(dev_priv);
+ dev = &dev_priv->dev;
pci_set_drvdata(pdev, dev);
ret = psb_driver_load(dev, ent->driver_data);
if (ret)
- goto err_drm_dev_put;
+ return ret;
ret = drm_dev_register(dev, ent->driver_data);
if (ret)
- goto err_psb_driver_unload;
+ return ret;
return 0;
-
-err_psb_driver_unload:
- psb_driver_unload(dev);
-err_drm_dev_put:
- drm_dev_put(dev);
-err_pci_disable_device:
- pci_disable_device(pdev);
- return ret;
}
static void psb_pci_remove(struct pci_dev *pdev)
@@ -484,8 +475,6 @@ static void psb_pci_remove(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
drm_dev_unregister(dev);
- psb_driver_unload(dev);
- drm_dev_put(dev);
}
static const struct dev_pm_ops psb_pm_ops = {
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index f2bae270ca7b..0439b10d3db5 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -389,7 +389,8 @@ struct psb_ops;
struct intel_scu_ipc_dev;
struct drm_psb_private {
- struct drm_device *dev;
+ struct drm_device dev;
+
struct pci_dev *aux_pdev; /* Currently only used by mrst */
struct pci_dev *lpc_pdev; /* Currently only used by mrst */
const struct psb_ops *ops;
@@ -567,6 +568,10 @@ struct drm_psb_private {
uint8_t panel_type;
};
+static inline struct drm_psb_private *to_drm_psb_private(struct drm_device *dev)
+{
+ return container_of(dev, struct drm_psb_private, dev);
+}
/* Operations for each board type */
struct psb_ops {
@@ -618,11 +623,6 @@ struct psb_ops {
extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int);
extern int drm_pick_crtcs(struct drm_device *dev);
-static inline struct drm_psb_private *psb_priv(struct drm_device *dev)
-{
- return (struct drm_psb_private *) dev->dev_private;
-}
-
/* psb_irq.c */
extern void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands);
extern int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
@@ -729,13 +729,13 @@ static inline void MRST_MSG_WRITE32(int domain, uint port, uint offset,
static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
return ioread32(dev_priv->vdc_reg + reg);
}
static inline uint32_t REGISTER_READ_AUX(struct drm_device *dev, uint32_t reg)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
return ioread32(dev_priv->aux_reg + reg);
}
@@ -761,14 +761,14 @@ static inline uint32_t REGISTER_READ_WITH_AUX(struct drm_device *dev,
static inline void REGISTER_WRITE(struct drm_device *dev, uint32_t reg,
uint32_t val)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
iowrite32((val), dev_priv->vdc_reg + (reg));
}
static inline void REGISTER_WRITE_AUX(struct drm_device *dev, uint32_t reg,
uint32_t val)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
iowrite32((val), dev_priv->aux_reg + (reg));
}
@@ -789,7 +789,7 @@ static inline void REGISTER_WRITE_WITH_AUX(struct drm_device *dev, uint32_t reg,
static inline void REGISTER_WRITE16(struct drm_device *dev,
uint32_t reg, uint32_t val)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
iowrite16((val), dev_priv->vdc_reg + (reg));
}
@@ -798,7 +798,7 @@ static inline void REGISTER_WRITE16(struct drm_device *dev,
static inline void REGISTER_WRITE8(struct drm_device *dev,
uint32_t reg, uint32_t val)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
iowrite8((val), dev_priv->vdc_reg + (reg));
}
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 359606429316..f5f259fde88e 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -95,7 +95,7 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
int pipe = gma_crtc->pipe;
@@ -298,7 +298,7 @@ static int psb_intel_crtc_clock_get(struct drm_device *dev,
struct drm_crtc *crtc)
{
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 dpll;
@@ -380,7 +380,7 @@ struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
int hsync;
int vtot;
int vsync;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
const struct psb_offset *map = &dev_priv->regmap[pipe];
@@ -451,7 +451,7 @@ const struct gma_clock_funcs psb_clock_funcs = {
static void psb_intel_cursor_init(struct drm_device *dev,
struct gma_crtc *gma_crtc)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 control[3] = { CURACNTR, CURBCNTR, CURCCNTR };
u32 base[3] = { CURABASE, CURBBASE, CURCBASE };
struct gtt_range *cursor_gt;
@@ -481,7 +481,7 @@ out:
void psb_intel_crtc_init(struct drm_device *dev, int pipe,
struct psb_intel_mode_device *mode_dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc;
int i;
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index ace95d4bdb6f..ac97e0d3c7dd 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -59,7 +59,7 @@ struct psb_intel_lvds_priv {
*/
static u32 psb_intel_lvds_get_max_backlight(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 ret;
if (gma_power_begin(dev, false)) {
@@ -88,8 +88,7 @@ static u32 psb_intel_lvds_get_max_backlight(struct drm_device *dev)
static int psb_lvds_i2c_set_brightness(struct drm_device *dev,
unsigned int level)
{
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *)dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus;
u8 out_buf[2];
@@ -128,8 +127,7 @@ static int psb_lvds_i2c_set_brightness(struct drm_device *dev,
static int psb_lvds_pwm_set_brightness(struct drm_device *dev, int level)
{
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *)dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 max_pwm_blc;
u32 blc_pwm_duty_cycle;
@@ -161,7 +159,7 @@ static int psb_lvds_pwm_set_brightness(struct drm_device *dev, int level)
*/
void psb_intel_lvds_set_brightness(struct drm_device *dev, int level)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
dev_dbg(dev->dev, "backlight level is %d\n", level);
@@ -183,7 +181,7 @@ void psb_intel_lvds_set_brightness(struct drm_device *dev, int level)
*/
static void psb_intel_lvds_set_backlight(struct drm_device *dev, int level)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 blc_pwm_ctl;
if (gma_power_begin(dev, false)) {
@@ -208,7 +206,7 @@ static void psb_intel_lvds_set_backlight(struct drm_device *dev, int level)
*/
static void psb_intel_lvds_set_power(struct drm_device *dev, bool on)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
u32 pp_status;
@@ -254,8 +252,7 @@ static void psb_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
static void psb_intel_lvds_save(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *)dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct psb_intel_lvds_priv *lvds_priv =
(struct psb_intel_lvds_priv *)gma_encoder->dev_priv;
@@ -335,7 +332,7 @@ static void psb_intel_lvds_restore(struct drm_connector *connector)
enum drm_mode_status psb_intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct drm_psb_private *dev_priv = connector->dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(connector->dev);
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct drm_display_mode *fixed_mode =
dev_priv->mode_dev.panel_fixed_mode;
@@ -365,7 +362,7 @@ bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
struct gma_crtc *gma_crtc = to_gma_crtc(encoder->crtc);
struct drm_encoder *tmp_encoder;
@@ -426,7 +423,7 @@ bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
static void psb_intel_lvds_prepare(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
if (!gma_power_begin(dev, true))
@@ -444,7 +441,7 @@ static void psb_intel_lvds_prepare(struct drm_encoder *encoder)
static void psb_intel_lvds_commit(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
if (mode_dev->backlight_duty_cycle == 0)
@@ -459,7 +456,7 @@ static void psb_intel_lvds_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 pfit_control;
/*
@@ -493,7 +490,7 @@ static void psb_intel_lvds_mode_set(struct drm_encoder *encoder,
static int psb_intel_lvds_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct psb_intel_lvds_priv *lvds_priv = gma_encoder->dev_priv;
@@ -641,7 +638,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
struct drm_encoder *encoder;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
struct drm_crtc *crtc;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 lvds;
int pipe;
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 355da2856389..042c4392e676 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -1217,7 +1217,7 @@ psb_intel_sdvo_get_edid(struct drm_connector *connector)
static struct edid *
psb_intel_sdvo_get_analog_edid(struct drm_connector *connector)
{
- struct drm_psb_private *dev_priv = connector->dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(connector->dev);
return drm_get_edid(connector,
&dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
@@ -1486,7 +1486,7 @@ static void psb_intel_sdvo_get_tv_modes(struct drm_connector *connector)
static void psb_intel_sdvo_get_lvds_modes(struct drm_connector *connector)
{
struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
- struct drm_psb_private *dev_priv = connector->dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(connector->dev);
struct drm_display_mode *newmode;
/*
@@ -1570,7 +1570,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
{
struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
- struct drm_psb_private *dev_priv = connector->dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(connector->dev);
uint16_t temp_value;
uint8_t cmd;
int ret;
@@ -1878,7 +1878,7 @@ psb_intel_sdvo_is_hdmi_connector(struct psb_intel_sdvo *psb_intel_sdvo, int devi
static u8
psb_intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct sdvo_device_mapping *my_mapping, *other_mapping;
if (IS_SDVOB(sdvo_reg)) {
@@ -2415,7 +2415,7 @@ psb_intel_sdvo_init_ddc_proxy(struct psb_intel_sdvo *sdvo,
bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_encoder *gma_encoder;
struct psb_intel_sdvo *psb_intel_sdvo;
int i;
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index deb1fbc1f748..ccf402007beb 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -76,12 +76,12 @@ psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
u32 reg = psb_pipestat(pipe);
dev_priv->pipestat[pipe] |= mask;
/* Enable the interrupt, clear any pending status */
- if (gma_power_begin(dev_priv->dev, false)) {
+ if (gma_power_begin(&dev_priv->dev, false)) {
u32 writeVal = PSB_RVDC32(reg);
writeVal |= (mask | (mask >> 16));
PSB_WVDC32(writeVal, reg);
(void) PSB_RVDC32(reg);
- gma_power_end(dev_priv->dev);
+ gma_power_end(&dev_priv->dev);
}
}
}
@@ -92,12 +92,12 @@ psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
if ((dev_priv->pipestat[pipe] & mask) != 0) {
u32 reg = psb_pipestat(pipe);
dev_priv->pipestat[pipe] &= ~mask;
- if (gma_power_begin(dev_priv->dev, false)) {
+ if (gma_power_begin(&dev_priv->dev, false)) {
u32 writeVal = PSB_RVDC32(reg);
writeVal &= ~mask;
PSB_WVDC32(writeVal, reg);
(void) PSB_RVDC32(reg);
- gma_power_end(dev_priv->dev);
+ gma_power_end(&dev_priv->dev);
}
}
}
@@ -107,8 +107,7 @@ psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
*/
static void mid_pipe_event_handler(struct drm_device *dev, int pipe)
{
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *) dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
uint32_t pipe_stat_val = 0;
uint32_t pipe_stat_reg = psb_pipestat(pipe);
@@ -178,7 +177,7 @@ static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
*/
static void psb_sgx_interrupt(struct drm_device *dev, u32 stat_1, u32 stat_2)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 val, addr;
if (stat_1 & _PSB_CE_TWOD_COMPLETE)
@@ -226,7 +225,7 @@ static void psb_sgx_interrupt(struct drm_device *dev, u32 stat_1, u32 stat_2)
static irqreturn_t psb_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
uint32_t vdc_stat, dsp_int = 0, sgx_int = 0, hotplug_int = 0;
u32 sgx_stat_1, sgx_stat_2;
int handled = 0;
@@ -277,8 +276,7 @@ static irqreturn_t psb_irq_handler(int irq, void *arg)
void psb_irq_preinstall(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *) dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
@@ -307,7 +305,7 @@ void psb_irq_preinstall(struct drm_device *dev)
void psb_irq_postinstall(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long irqflags;
unsigned int i;
@@ -356,7 +354,7 @@ int psb_irq_install(struct drm_device *dev, unsigned int irq)
void psb_irq_uninstall(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
unsigned long irqflags;
unsigned int i;
@@ -397,7 +395,7 @@ int psb_enable_vblank(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = crtc->index;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long irqflags;
uint32_t reg_val = 0;
uint32_t pipeconf_reg = mid_pipeconf(pipe);
@@ -433,7 +431,7 @@ void psb_disable_vblank(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = crtc->index;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
diff --git a/drivers/gpu/drm/gma500/psb_lid.c b/drivers/gpu/drm/gma500/psb_lid.c
index 97b0c52bfd8a..58a7fe392636 100644
--- a/drivers/gpu/drm/gma500/psb_lid.c
+++ b/drivers/gpu/drm/gma500/psb_lid.c
@@ -14,7 +14,7 @@
static void psb_lid_timer_func(struct timer_list *t)
{
struct drm_psb_private *dev_priv = from_timer(dev_priv, t, lid_timer);
- struct drm_device *dev = (struct drm_device *)dev_priv->dev;
+ struct drm_device *dev = (struct drm_device *)&dev_priv->dev;
struct timer_list *lid_timer = &dev_priv->lid_timer;
unsigned long irq_flags;
u32 __iomem *lid_state = dev_priv->opregion.lid_state;
diff --git a/drivers/gpu/drm/gud/gud_drv.c b/drivers/gpu/drm/gud/gud_drv.c
index eb4e08846da4..3f9d4b9a1e3d 100644
--- a/drivers/gpu/drm/gud/gud_drv.c
+++ b/drivers/gpu/drm/gud/gud_drv.c
@@ -523,7 +523,13 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
switch (format) {
case GUD_DRM_FORMAT_R1:
fallthrough;
+ case DRM_FORMAT_R8:
+ fallthrough;
case GUD_DRM_FORMAT_XRGB1111:
+ fallthrough;
+ case DRM_FORMAT_RGB332:
+ fallthrough;
+ case DRM_FORMAT_RGB888:
if (!xrgb8888_emulation_format)
xrgb8888_emulation_format = info;
break;
diff --git a/drivers/gpu/drm/gud/gud_internal.h b/drivers/gpu/drm/gud/gud_internal.h
index 2a388e27d5d7..e351a1f1420d 100644
--- a/drivers/gpu/drm/gud/gud_internal.h
+++ b/drivers/gpu/drm/gud/gud_internal.h
@@ -80,10 +80,16 @@ static inline u8 gud_from_fourcc(u32 fourcc)
switch (fourcc) {
case GUD_DRM_FORMAT_R1:
return GUD_PIXEL_FORMAT_R1;
+ case DRM_FORMAT_R8:
+ return GUD_PIXEL_FORMAT_R8;
case GUD_DRM_FORMAT_XRGB1111:
return GUD_PIXEL_FORMAT_XRGB1111;
+ case DRM_FORMAT_RGB332:
+ return GUD_PIXEL_FORMAT_RGB332;
case DRM_FORMAT_RGB565:
return GUD_PIXEL_FORMAT_RGB565;
+ case DRM_FORMAT_RGB888:
+ return GUD_PIXEL_FORMAT_RGB888;
case DRM_FORMAT_XRGB8888:
return GUD_PIXEL_FORMAT_XRGB8888;
case DRM_FORMAT_ARGB8888:
@@ -98,10 +104,16 @@ static inline u32 gud_to_fourcc(u8 format)
switch (format) {
case GUD_PIXEL_FORMAT_R1:
return GUD_DRM_FORMAT_R1;
+ case GUD_PIXEL_FORMAT_R8:
+ return DRM_FORMAT_R8;
case GUD_PIXEL_FORMAT_XRGB1111:
return GUD_DRM_FORMAT_XRGB1111;
+ case GUD_PIXEL_FORMAT_RGB332:
+ return DRM_FORMAT_RGB332;
case GUD_PIXEL_FORMAT_RGB565:
return DRM_FORMAT_RGB565;
+ case GUD_PIXEL_FORMAT_RGB888:
+ return DRM_FORMAT_RGB888;
case GUD_PIXEL_FORMAT_XRGB8888:
return DRM_FORMAT_XRGB8888;
case GUD_PIXEL_FORMAT_ARGB8888:
diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c
index b9b0e435ea0f..daf75c178c2b 100644
--- a/drivers/gpu/drm/gud/gud_pipe.c
+++ b/drivers/gpu/drm/gud/gud_pipe.c
@@ -189,8 +189,14 @@ retry:
ret = -ENOMEM;
goto end_cpu_access;
}
+ } else if (format->format == DRM_FORMAT_R8) {
+ drm_fb_xrgb8888_to_gray8(buf, vaddr, fb, rect);
+ } else if (format->format == DRM_FORMAT_RGB332) {
+ drm_fb_xrgb8888_to_rgb332(buf, vaddr, fb, rect);
} else if (format->format == DRM_FORMAT_RGB565) {
drm_fb_xrgb8888_to_rgb565(buf, vaddr, fb, rect, gud_is_big_endian());
+ } else if (format->format == DRM_FORMAT_RGB888) {
+ drm_fb_xrgb8888_to_rgb888(buf, vaddr, fb, rect);
} else {
len = gud_xrgb8888_to_color(buf, format, vaddr, fb, rect);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index f0a61a9474fc..8beef57ba52b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -182,7 +182,7 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
if (obj->flags & I915_BO_ALLOC_CPU_CLEAR &&
man->use_tt)
- page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
+ page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags,
i915_ttm_select_tt_caching(obj));
@@ -451,7 +451,7 @@ static int i915_ttm_accel_move(struct ttm_buffer_object *bo,
if (bo->type == ttm_bo_type_kernel)
return -EINVAL;
- if (ttm && !(ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC))
+ if (ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC))
return 0;
intel_engine_pm_get(i915->gt.migrate.context->engine);
@@ -525,7 +525,7 @@ static int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
/* Populate ttm with pages if needed. Typically system memory. */
if (bo->ttm && (dst_man->use_tt ||
- (bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED))) {
+ (bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED))) {
ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 196f74a0834e..4368112023f7 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -224,8 +224,6 @@ struct mga_device {
enum mga_type type;
- int fb_mtrr;
-
union {
struct {
long ref_clk;
diff --git a/drivers/gpu/drm/mgag200/mgag200_mm.c b/drivers/gpu/drm/mgag200/mgag200_mm.c
index b667371b69a4..fa996d46feed 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mm.c
@@ -75,26 +75,12 @@ static size_t mgag200_probe_vram(struct mga_device *mdev, void __iomem *mem,
return offset - 65536;
}
-static void mgag200_mm_release(struct drm_device *dev, void *ptr)
-{
- struct mga_device *mdev = to_mga_device(dev);
- struct pci_dev *pdev = to_pci_dev(dev->dev);
-
- mdev->vram_fb_available = 0;
- iounmap(mdev->vram);
- arch_io_free_memtype_wc(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
- arch_phys_wc_del(mdev->fb_mtrr);
- mdev->fb_mtrr = 0;
-}
-
int mgag200_mm_init(struct mga_device *mdev)
{
struct drm_device *dev = &mdev->base;
struct pci_dev *pdev = to_pci_dev(dev->dev);
u8 misc;
resource_size_t start, len;
- int ret;
WREG_ECRT(0x04, 0x00);
@@ -112,15 +98,13 @@ int mgag200_mm_init(struct mga_device *mdev)
return -ENXIO;
}
- arch_io_reserve_memtype_wc(start, len);
-
- mdev->fb_mtrr = arch_phys_wc_add(start, len);
+ /* Don't fail on errors, but performance might be reduced. */
+ devm_arch_io_reserve_memtype_wc(dev->dev, start, len);
+ devm_arch_phys_wc_add(dev->dev, start, len);
- mdev->vram = ioremap(start, len);
- if (!mdev->vram) {
- ret = -ENOMEM;
- goto err_arch_phys_wc_del;
- }
+ mdev->vram = devm_ioremap(dev->dev, start, len);
+ if (!mdev->vram)
+ return -ENOMEM;
mdev->mc.vram_size = mgag200_probe_vram(mdev, mdev->vram, len);
mdev->mc.vram_base = start;
@@ -128,10 +112,5 @@ int mgag200_mm_init(struct mga_device *mdev)
mdev->vram_fb_available = mdev->mc.vram_size;
- return drmm_add_action_or_reset(dev, mgag200_mm_release, NULL);
-
-err_arch_phys_wc_del:
- arch_phys_wc_del(mdev->fb_mtrr);
- arch_io_free_memtype_wc(start, len);
- return ret;
+ return 0;
}
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index e9c6af78b1d7..5879f67bc88c 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -3,9 +3,9 @@
config DRM_MSM
tristate "MSM DRM"
depends on DRM
- depends on ARCH_QCOM || SOC_IMX5 || (ARM && COMPILE_TEST)
+ depends on ARCH_QCOM || SOC_IMX5 || COMPILE_TEST
depends on IOMMU_SUPPORT
- depends on OF && COMMON_CLK
+ depends on (OF && COMMON_CLK) || COMPILE_TEST
depends on QCOM_OCMEM || QCOM_OCMEM=n
depends on QCOM_LLCC || QCOM_LLCC=n
depends on QCOM_COMMAND_DB || QCOM_COMMAND_DB=n
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 22308a1b66fc..40a9863f5951 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -85,7 +85,7 @@ static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
paddr = physaddr(obj);
for (i = 0; i < npages; i++) {
- p[i] = phys_to_page(paddr);
+ p[i] = pfn_to_page(__phys_to_pfn(paddr));
paddr += PAGE_SIZE;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 33dca2565cca..b2c7e0802ac3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1249,7 +1249,7 @@ nouveau_ttm_tt_populate(struct ttm_device *bdev,
struct ttm_tt *ttm_dma = (void *)ttm;
struct nouveau_drm *drm;
struct device *dev;
- bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+ bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
if (ttm_tt_is_populated(ttm))
return 0;
@@ -1272,7 +1272,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_device *bdev,
{
struct nouveau_drm *drm;
struct device *dev;
- bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+ bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
if (slave)
return;
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index db9d0b86d542..529561b4fbbc 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -45,6 +45,7 @@ struct boe_panel {
const struct panel_desc *desc;
enum drm_panel_orientation orientation;
+ struct regulator *pp3300;
struct regulator *pp1800;
struct regulator *avee;
struct regulator *avdd;
@@ -74,6 +75,670 @@ struct panel_init_cmd {
.len = sizeof((char[]){__VA_ARGS__}), \
.data = (char[]){__VA_ARGS__} }
+static const struct panel_init_cmd boe_tv110c9m_init_cmd[] = {
+ _INIT_DCS_CMD(0xFF, 0x20),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0x05, 0xD9),
+ _INIT_DCS_CMD(0x07, 0x78),
+ _INIT_DCS_CMD(0x08, 0x5A),
+ _INIT_DCS_CMD(0x0D, 0x63),
+ _INIT_DCS_CMD(0x0E, 0x91),
+ _INIT_DCS_CMD(0x0F, 0x73),
+ _INIT_DCS_CMD(0x95, 0xEB),
+ _INIT_DCS_CMD(0x96, 0xEB),
+ _INIT_DCS_CMD(0x30, 0x11),
+ _INIT_DCS_CMD(0x6D, 0x66),
+ _INIT_DCS_CMD(0x75, 0xA2),
+ _INIT_DCS_CMD(0x77, 0x3B),
+
+ _INIT_DCS_CMD(0xB0, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4D, 0x00, 0x6D, 0x00, 0x89, 0x00, 0xA1, 0x00, 0xB6, 0x00, 0xC9),
+ _INIT_DCS_CMD(0xB1, 0x00, 0xDA, 0x01, 0x13, 0x01, 0x3C, 0x01, 0x7E, 0x01, 0xAB, 0x01, 0xF7, 0x02, 0x2F, 0x02, 0x31),
+ _INIT_DCS_CMD(0xB2, 0x02, 0x67, 0x02, 0xA6, 0x02, 0xD1, 0x03, 0x08, 0x03, 0x2E, 0x03, 0x5B, 0x03, 0x6B, 0x03, 0x7B),
+ _INIT_DCS_CMD(0xB3, 0x03, 0x8E, 0x03, 0xA2, 0x03, 0xB7, 0x03, 0xE7, 0x03, 0xFD, 0x03, 0xFF),
+
+ _INIT_DCS_CMD(0xB4, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4D, 0x00, 0x6D, 0x00, 0x89, 0x00, 0xA1, 0x00, 0xB6, 0x00, 0xC9),
+ _INIT_DCS_CMD(0xB5, 0x00, 0xDA, 0x01, 0x13, 0x01, 0x3C, 0x01, 0x7E, 0x01, 0xAB, 0x01, 0xF7, 0x02, 0x2F, 0x02, 0x31),
+ _INIT_DCS_CMD(0xB6, 0x02, 0x67, 0x02, 0xA6, 0x02, 0xD1, 0x03, 0x08, 0x03, 0x2E, 0x03, 0x5B, 0x03, 0x6B, 0x03, 0x7B),
+ _INIT_DCS_CMD(0xB7, 0x03, 0x8E, 0x03, 0xA2, 0x03, 0xB7, 0x03, 0xE7, 0x03, 0xFD, 0x03, 0xFF),
+ _INIT_DCS_CMD(0xB8, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4D, 0x00, 0x6D, 0x00, 0x89, 0x00, 0xA1, 0x00, 0xB6, 0x00, 0xC9),
+ _INIT_DCS_CMD(0xB9, 0x00, 0xDA, 0x01, 0x13, 0x01, 0x3C, 0x01, 0x7E, 0x01, 0xAB, 0x01, 0xF7, 0x02, 0x2F, 0x02, 0x31),
+ _INIT_DCS_CMD(0xBA, 0x02, 0x67, 0x02, 0xA6, 0x02, 0xD1, 0x03, 0x08, 0x03, 0x2E, 0x03, 0x5B, 0x03, 0x6B, 0x03, 0x7B),
+ _INIT_DCS_CMD(0xBB, 0x03, 0x8E, 0x03, 0xA2, 0x03, 0xB7, 0x03, 0xE7, 0x03, 0xFD, 0x03, 0xFF),
+
+ _INIT_DCS_CMD(0xFF, 0x21),
+ _INIT_DCS_CMD(0xFB, 0x01),
+
+ _INIT_DCS_CMD(0xB0, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x45, 0x00, 0x65, 0x00, 0x81, 0x00, 0x99, 0x00, 0xAE, 0x00, 0xC1),
+ _INIT_DCS_CMD(0xB1, 0x00, 0xD2, 0x01, 0x0B, 0x01, 0x34, 0x01, 0x76, 0x01, 0xA3, 0x01, 0xEF, 0x02, 0x27, 0x02, 0x29),
+ _INIT_DCS_CMD(0xB2, 0x02, 0x5F, 0x02, 0x9E, 0x02, 0xC9, 0x03, 0x00, 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73),
+ _INIT_DCS_CMD(0xB3, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xAF, 0x03, 0xDF, 0x03, 0xF5, 0x03, 0xF7),
+
+ _INIT_DCS_CMD(0xB4, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x45, 0x00, 0x65, 0x00, 0x81, 0x00, 0x99, 0x00, 0xAE, 0x00, 0xC1),
+ _INIT_DCS_CMD(0xB5, 0x00, 0xD2, 0x01, 0x0B, 0x01, 0x34, 0x01, 0x76, 0x01, 0xA3, 0x01, 0xEF, 0x02, 0x27, 0x02, 0x29),
+ _INIT_DCS_CMD(0xB6, 0x02, 0x5F, 0x02, 0x9E, 0x02, 0xC9, 0x03, 0x00, 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73),
+ _INIT_DCS_CMD(0xB7, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xAF, 0x03, 0xDF, 0x03, 0xF5, 0x03, 0xF7),
+
+ _INIT_DCS_CMD(0xB8, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x45, 0x00, 0x65, 0x00, 0x81, 0x00, 0x99, 0x00, 0xAE, 0x00, 0xC1),
+ _INIT_DCS_CMD(0xB9, 0x00, 0xD2, 0x01, 0x0B, 0x01, 0x34, 0x01, 0x76, 0x01, 0xA3, 0x01, 0xEF, 0x02, 0x27, 0x02, 0x29),
+ _INIT_DCS_CMD(0xBA, 0x02, 0x5F, 0x02, 0x9E, 0x02, 0xC9, 0x03, 0x00, 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73),
+ _INIT_DCS_CMD(0xBB, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xAF, 0x03, 0xDF, 0x03, 0xF5, 0x03, 0xF7),
+
+ _INIT_DCS_CMD(0xFF, 0x24),
+ _INIT_DCS_CMD(0xFB, 0x01),
+
+ _INIT_DCS_CMD(0x00, 0x00),
+ _INIT_DCS_CMD(0x01, 0x00),
+
+ _INIT_DCS_CMD(0x02, 0x1C),
+ _INIT_DCS_CMD(0x03, 0x1C),
+
+ _INIT_DCS_CMD(0x04, 0x1D),
+ _INIT_DCS_CMD(0x05, 0x1D),
+
+ _INIT_DCS_CMD(0x06, 0x04),
+ _INIT_DCS_CMD(0x07, 0x04),
+
+ _INIT_DCS_CMD(0x08, 0x0F),
+ _INIT_DCS_CMD(0x09, 0x0F),
+
+ _INIT_DCS_CMD(0x0A, 0x0E),
+ _INIT_DCS_CMD(0x0B, 0x0E),
+
+ _INIT_DCS_CMD(0x0C, 0x0D),
+ _INIT_DCS_CMD(0x0D, 0x0D),
+
+ _INIT_DCS_CMD(0x0E, 0x0C),
+ _INIT_DCS_CMD(0x0F, 0x0C),
+
+ _INIT_DCS_CMD(0x10, 0x08),
+ _INIT_DCS_CMD(0x11, 0x08),
+
+ _INIT_DCS_CMD(0x12, 0x00),
+ _INIT_DCS_CMD(0x13, 0x00),
+ _INIT_DCS_CMD(0x14, 0x00),
+ _INIT_DCS_CMD(0x15, 0x00),
+
+ _INIT_DCS_CMD(0x16, 0x00),
+ _INIT_DCS_CMD(0x17, 0x00),
+
+ _INIT_DCS_CMD(0x18, 0x1C),
+ _INIT_DCS_CMD(0x19, 0x1C),
+
+ _INIT_DCS_CMD(0x1A, 0x1D),
+ _INIT_DCS_CMD(0x1B, 0x1D),
+
+ _INIT_DCS_CMD(0x1C, 0x04),
+ _INIT_DCS_CMD(0x1D, 0x04),
+
+ _INIT_DCS_CMD(0x1E, 0x0F),
+ _INIT_DCS_CMD(0x1F, 0x0F),
+
+ _INIT_DCS_CMD(0x20, 0x0E),
+ _INIT_DCS_CMD(0x21, 0x0E),
+
+ _INIT_DCS_CMD(0x22, 0x0D),
+ _INIT_DCS_CMD(0x23, 0x0D),
+
+ _INIT_DCS_CMD(0x24, 0x0C),
+ _INIT_DCS_CMD(0x25, 0x0C),
+
+ _INIT_DCS_CMD(0x26, 0x08),
+ _INIT_DCS_CMD(0x27, 0x08),
+
+ _INIT_DCS_CMD(0x28, 0x00),
+ _INIT_DCS_CMD(0x29, 0x00),
+ _INIT_DCS_CMD(0x2A, 0x00),
+ _INIT_DCS_CMD(0x2B, 0x00),
+
+ _INIT_DCS_CMD(0x2D, 0x20),
+ _INIT_DCS_CMD(0x2F, 0x0A),
+ _INIT_DCS_CMD(0x30, 0x44),
+ _INIT_DCS_CMD(0x33, 0x0C),
+ _INIT_DCS_CMD(0x34, 0x32),
+
+ _INIT_DCS_CMD(0x37, 0x44),
+ _INIT_DCS_CMD(0x38, 0x40),
+ _INIT_DCS_CMD(0x39, 0x00),
+ _INIT_DCS_CMD(0x3A, 0x5D),
+ _INIT_DCS_CMD(0x3B, 0x60),
+ _INIT_DCS_CMD(0x3D, 0x42),
+ _INIT_DCS_CMD(0x3F, 0x06),
+ _INIT_DCS_CMD(0x43, 0x06),
+ _INIT_DCS_CMD(0x47, 0x66),
+ _INIT_DCS_CMD(0x4A, 0x5D),
+ _INIT_DCS_CMD(0x4B, 0x60),
+ _INIT_DCS_CMD(0x4C, 0x91),
+ _INIT_DCS_CMD(0x4D, 0x21),
+ _INIT_DCS_CMD(0x4E, 0x43),
+ _INIT_DCS_CMD(0x51, 0x12),
+ _INIT_DCS_CMD(0x52, 0x34),
+ _INIT_DCS_CMD(0x55, 0x82, 0x02),
+ _INIT_DCS_CMD(0x56, 0x04),
+ _INIT_DCS_CMD(0x58, 0x21),
+ _INIT_DCS_CMD(0x59, 0x30),
+ _INIT_DCS_CMD(0x5A, 0x60),
+ _INIT_DCS_CMD(0x5B, 0x50),
+ _INIT_DCS_CMD(0x5E, 0x00, 0x06),
+ _INIT_DCS_CMD(0x5F, 0x00),
+ _INIT_DCS_CMD(0x65, 0x82),
+ _INIT_DCS_CMD(0x7E, 0x20),
+ _INIT_DCS_CMD(0x7F, 0x3C),
+ _INIT_DCS_CMD(0x82, 0x04),
+ _INIT_DCS_CMD(0x97, 0xC0),
+ _INIT_DCS_CMD(0xB6, 0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x05, 0x00, 0x00),
+ _INIT_DCS_CMD(0x91, 0x44),
+ _INIT_DCS_CMD(0x92, 0xA9),
+ _INIT_DCS_CMD(0x93, 0x1A),
+ _INIT_DCS_CMD(0x94, 0x96),
+ _INIT_DCS_CMD(0xD7, 0x55),
+ _INIT_DCS_CMD(0xDA, 0x0A),
+ _INIT_DCS_CMD(0xDE, 0x08),
+ _INIT_DCS_CMD(0xDB, 0x05),
+ _INIT_DCS_CMD(0xDC, 0xA9),
+ _INIT_DCS_CMD(0xDD, 0x22),
+
+ _INIT_DCS_CMD(0xDF, 0x05),
+ _INIT_DCS_CMD(0xE0, 0xA9),
+ _INIT_DCS_CMD(0xE1, 0x05),
+ _INIT_DCS_CMD(0xE2, 0xA9),
+ _INIT_DCS_CMD(0xE3, 0x05),
+ _INIT_DCS_CMD(0xE4, 0xA9),
+ _INIT_DCS_CMD(0xE5, 0x05),
+ _INIT_DCS_CMD(0xE6, 0xA9),
+ _INIT_DCS_CMD(0x5C, 0x00),
+ _INIT_DCS_CMD(0x5D, 0x00),
+ _INIT_DCS_CMD(0x8D, 0x00),
+ _INIT_DCS_CMD(0x8E, 0x00),
+ _INIT_DCS_CMD(0xB5, 0x90),
+ _INIT_DCS_CMD(0xFF, 0x25),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0x05, 0x00),
+ _INIT_DCS_CMD(0x19, 0x07),
+ _INIT_DCS_CMD(0x1F, 0x60),
+ _INIT_DCS_CMD(0x20, 0x50),
+ _INIT_DCS_CMD(0x26, 0x60),
+ _INIT_DCS_CMD(0x27, 0x50),
+ _INIT_DCS_CMD(0x33, 0x60),
+ _INIT_DCS_CMD(0x34, 0x50),
+ _INIT_DCS_CMD(0x3F, 0xE0),
+ _INIT_DCS_CMD(0x40, 0x00),
+ _INIT_DCS_CMD(0x44, 0x00),
+ _INIT_DCS_CMD(0x45, 0x40),
+ _INIT_DCS_CMD(0x48, 0x60),
+ _INIT_DCS_CMD(0x49, 0x50),
+ _INIT_DCS_CMD(0x5B, 0x00),
+ _INIT_DCS_CMD(0x5C, 0x00),
+ _INIT_DCS_CMD(0x5D, 0x00),
+ _INIT_DCS_CMD(0x5E, 0xD0),
+ _INIT_DCS_CMD(0x61, 0x60),
+ _INIT_DCS_CMD(0x62, 0x50),
+ _INIT_DCS_CMD(0xF1, 0x10),
+ _INIT_DCS_CMD(0xFF, 0x2A),
+ _INIT_DCS_CMD(0xFB, 0x01),
+
+ _INIT_DCS_CMD(0x64, 0x16),
+ _INIT_DCS_CMD(0x67, 0x16),
+ _INIT_DCS_CMD(0x6A, 0x16),
+
+ _INIT_DCS_CMD(0x70, 0x30),
+
+ _INIT_DCS_CMD(0xA2, 0xF3),
+ _INIT_DCS_CMD(0xA3, 0xFF),
+ _INIT_DCS_CMD(0xA4, 0xFF),
+ _INIT_DCS_CMD(0xA5, 0xFF),
+
+ _INIT_DCS_CMD(0xD6, 0x08),
+
+ _INIT_DCS_CMD(0xFF, 0x26),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0x00, 0xA1),
+
+ _INIT_DCS_CMD(0x02, 0x31),
+ _INIT_DCS_CMD(0x04, 0x28),
+ _INIT_DCS_CMD(0x06, 0x30),
+ _INIT_DCS_CMD(0x0C, 0x16),
+ _INIT_DCS_CMD(0x0D, 0x0D),
+ _INIT_DCS_CMD(0x0F, 0x00),
+ _INIT_DCS_CMD(0x11, 0x00),
+ _INIT_DCS_CMD(0x12, 0x50),
+ _INIT_DCS_CMD(0x13, 0x56),
+ _INIT_DCS_CMD(0x14, 0x57),
+ _INIT_DCS_CMD(0x15, 0x00),
+ _INIT_DCS_CMD(0x16, 0x10),
+ _INIT_DCS_CMD(0x17, 0xA0),
+ _INIT_DCS_CMD(0x18, 0x86),
+ _INIT_DCS_CMD(0x19, 0x0D),
+ _INIT_DCS_CMD(0x1A, 0x7F),
+ _INIT_DCS_CMD(0x1B, 0x0C),
+ _INIT_DCS_CMD(0x1C, 0xBF),
+ _INIT_DCS_CMD(0x22, 0x00),
+ _INIT_DCS_CMD(0x23, 0x00),
+ _INIT_DCS_CMD(0x2A, 0x0D),
+ _INIT_DCS_CMD(0x2B, 0x7F),
+
+ _INIT_DCS_CMD(0x1D, 0x00),
+ _INIT_DCS_CMD(0x1E, 0x65),
+ _INIT_DCS_CMD(0x1F, 0x65),
+ _INIT_DCS_CMD(0x24, 0x00),
+ _INIT_DCS_CMD(0x25, 0x65),
+ _INIT_DCS_CMD(0x2F, 0x05),
+ _INIT_DCS_CMD(0x30, 0x65),
+ _INIT_DCS_CMD(0x31, 0x05),
+ _INIT_DCS_CMD(0x32, 0x7D),
+ _INIT_DCS_CMD(0x39, 0x00),
+ _INIT_DCS_CMD(0x3A, 0x65),
+ _INIT_DCS_CMD(0x20, 0x01),
+ _INIT_DCS_CMD(0x33, 0x11),
+ _INIT_DCS_CMD(0x34, 0x78),
+ _INIT_DCS_CMD(0x35, 0x16),
+ _INIT_DCS_CMD(0xC8, 0x04),
+ _INIT_DCS_CMD(0xC9, 0x80),
+ _INIT_DCS_CMD(0xCA, 0x4E),
+ _INIT_DCS_CMD(0xCB, 0x00),
+ _INIT_DCS_CMD(0xA9, 0x4C),
+ _INIT_DCS_CMD(0xAA, 0x47),
+
+ _INIT_DCS_CMD(0xFF, 0x27),
+ _INIT_DCS_CMD(0xFB, 0x01),
+
+ _INIT_DCS_CMD(0x56, 0x06),
+ _INIT_DCS_CMD(0x58, 0x80),
+ _INIT_DCS_CMD(0x59, 0x75),
+ _INIT_DCS_CMD(0x5A, 0x00),
+ _INIT_DCS_CMD(0x5B, 0x02),
+ _INIT_DCS_CMD(0x5C, 0x00),
+ _INIT_DCS_CMD(0x5D, 0x00),
+ _INIT_DCS_CMD(0x5E, 0x20),
+ _INIT_DCS_CMD(0x5F, 0x10),
+ _INIT_DCS_CMD(0x60, 0x00),
+ _INIT_DCS_CMD(0x61, 0x2E),
+ _INIT_DCS_CMD(0x62, 0x00),
+ _INIT_DCS_CMD(0x63, 0x01),
+ _INIT_DCS_CMD(0x64, 0x43),
+ _INIT_DCS_CMD(0x65, 0x2D),
+ _INIT_DCS_CMD(0x66, 0x00),
+ _INIT_DCS_CMD(0x67, 0x01),
+ _INIT_DCS_CMD(0x68, 0x44),
+
+ _INIT_DCS_CMD(0x00, 0x00),
+ _INIT_DCS_CMD(0x78, 0x00),
+ _INIT_DCS_CMD(0xC3, 0x00),
+
+ _INIT_DCS_CMD(0xFF, 0x2A),
+ _INIT_DCS_CMD(0xFB, 0x01),
+
+ _INIT_DCS_CMD(0x22, 0x2F),
+ _INIT_DCS_CMD(0x23, 0x08),
+
+ _INIT_DCS_CMD(0x24, 0x00),
+ _INIT_DCS_CMD(0x25, 0x65),
+ _INIT_DCS_CMD(0x26, 0xF8),
+ _INIT_DCS_CMD(0x27, 0x00),
+ _INIT_DCS_CMD(0x28, 0x1A),
+ _INIT_DCS_CMD(0x29, 0x00),
+ _INIT_DCS_CMD(0x2A, 0x1A),
+ _INIT_DCS_CMD(0x2B, 0x00),
+ _INIT_DCS_CMD(0x2D, 0x1A),
+
+ _INIT_DCS_CMD(0xFF, 0x23),
+ _INIT_DCS_CMD(0xFB, 0x01),
+
+ _INIT_DCS_CMD(0x00, 0x80),
+ _INIT_DCS_CMD(0x07, 0x00),
+
+ _INIT_DCS_CMD(0xFF, 0xE0),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0x14, 0x60),
+ _INIT_DCS_CMD(0x16, 0xC0),
+
+ _INIT_DCS_CMD(0xFF, 0xF0),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0x3A, 0x08),
+
+ _INIT_DCS_CMD(0xFF, 0x10),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0xB9, 0x01),
+ _INIT_DCS_CMD(0xFF, 0x20),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0x18, 0x40),
+
+ _INIT_DCS_CMD(0xFF, 0x10),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0xB9, 0x02),
+ _INIT_DCS_CMD(0x35, 0x00),
+ _INIT_DCS_CMD(0x51, 0x00, 0xFF),
+ _INIT_DCS_CMD(0x53, 0x24),
+ _INIT_DCS_CMD(0x55, 0x00),
+ _INIT_DCS_CMD(0xBB, 0x13),
+ _INIT_DCS_CMD(0x3B, 0x03, 0x96, 0x1A, 0x04, 0x04),
+ _INIT_DELAY_CMD(100),
+ _INIT_DCS_CMD(0x11),
+ _INIT_DELAY_CMD(200),
+ _INIT_DCS_CMD(0x29),
+ _INIT_DELAY_CMD(100),
+ {},
+};
+
+static const struct panel_init_cmd inx_init_cmd[] = {
+ _INIT_DCS_CMD(0xFF, 0x20),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0x05, 0xD1),
+ _INIT_DCS_CMD(0x0D, 0x63),
+ _INIT_DCS_CMD(0x07, 0x8C),
+ _INIT_DCS_CMD(0x08, 0x4B),
+ _INIT_DCS_CMD(0x0E, 0x91),
+ _INIT_DCS_CMD(0x0F, 0x69),
+ _INIT_DCS_CMD(0x95, 0xFF),
+ _INIT_DCS_CMD(0x96, 0xFF),
+ _INIT_DCS_CMD(0x9D, 0x0A),
+ _INIT_DCS_CMD(0x9E, 0x0A),
+ _INIT_DCS_CMD(0x69, 0x98),
+ _INIT_DCS_CMD(0x75, 0xA2),
+ _INIT_DCS_CMD(0x77, 0xB3),
+ _INIT_DCS_CMD(0xFF, 0x24),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0x91, 0x44),
+ _INIT_DCS_CMD(0x92, 0x7A),
+ _INIT_DCS_CMD(0x93, 0x1A),
+ _INIT_DCS_CMD(0x94, 0x40),
+ _INIT_DCS_CMD(0x9A, 0x08),
+ _INIT_DCS_CMD(0x60, 0x96),
+ _INIT_DCS_CMD(0x61, 0xD0),
+ _INIT_DCS_CMD(0x63, 0x70),
+ _INIT_DCS_CMD(0xC2, 0xCF),
+ _INIT_DCS_CMD(0x9B, 0x0F),
+ _INIT_DCS_CMD(0x9A, 0x08),
+ _INIT_DCS_CMD(0x00, 0x03),
+ _INIT_DCS_CMD(0x01, 0x03),
+ _INIT_DCS_CMD(0x02, 0x03),
+ _INIT_DCS_CMD(0x03, 0x03),
+ _INIT_DCS_CMD(0x04, 0x03),
+ _INIT_DCS_CMD(0x05, 0x03),
+ _INIT_DCS_CMD(0x06, 0x22),
+ _INIT_DCS_CMD(0x07, 0x06),
+ _INIT_DCS_CMD(0x08, 0x00),
+ _INIT_DCS_CMD(0x09, 0x1D),
+ _INIT_DCS_CMD(0x0A, 0x1C),
+ _INIT_DCS_CMD(0x0B, 0x13),
+ _INIT_DCS_CMD(0x0C, 0x12),
+ _INIT_DCS_CMD(0x0D, 0x11),
+ _INIT_DCS_CMD(0x0E, 0x10),
+ _INIT_DCS_CMD(0x0F, 0x0F),
+ _INIT_DCS_CMD(0x10, 0x0E),
+ _INIT_DCS_CMD(0x11, 0x0D),
+ _INIT_DCS_CMD(0x12, 0x0C),
+ _INIT_DCS_CMD(0x13, 0x04),
+ _INIT_DCS_CMD(0x14, 0x03),
+ _INIT_DCS_CMD(0x15, 0x03),
+ _INIT_DCS_CMD(0x16, 0x03),
+ _INIT_DCS_CMD(0x17, 0x03),
+ _INIT_DCS_CMD(0x18, 0x03),
+ _INIT_DCS_CMD(0x19, 0x03),
+ _INIT_DCS_CMD(0x1A, 0x03),
+ _INIT_DCS_CMD(0x1B, 0x03),
+ _INIT_DCS_CMD(0x1C, 0x22),
+ _INIT_DCS_CMD(0x1D, 0x06),
+ _INIT_DCS_CMD(0x1E, 0x00),
+ _INIT_DCS_CMD(0x1F, 0x1D),
+ _INIT_DCS_CMD(0x20, 0x1C),
+ _INIT_DCS_CMD(0x21, 0x13),
+ _INIT_DCS_CMD(0x22, 0x12),
+ _INIT_DCS_CMD(0x23, 0x11),
+ _INIT_DCS_CMD(0x24, 0x10),
+ _INIT_DCS_CMD(0x25, 0x0F),
+ _INIT_DCS_CMD(0x26, 0x0E),
+ _INIT_DCS_CMD(0x27, 0x0D),
+ _INIT_DCS_CMD(0x28, 0x0C),
+ _INIT_DCS_CMD(0x29, 0x04),
+ _INIT_DCS_CMD(0x2A, 0x03),
+ _INIT_DCS_CMD(0x2B, 0x03),
+
+ _INIT_DCS_CMD(0x2F, 0x06),
+ _INIT_DCS_CMD(0x30, 0x32),
+ _INIT_DCS_CMD(0x31, 0x43),
+ _INIT_DCS_CMD(0x33, 0x06),
+ _INIT_DCS_CMD(0x34, 0x32),
+ _INIT_DCS_CMD(0x35, 0x43),
+ _INIT_DCS_CMD(0x37, 0x44),
+ _INIT_DCS_CMD(0x38, 0x40),
+ _INIT_DCS_CMD(0x39, 0x00),
+ _INIT_DCS_CMD(0x3A, 0x01),
+ _INIT_DCS_CMD(0x3B, 0x48),
+ _INIT_DCS_CMD(0x3D, 0x93),
+ _INIT_DCS_CMD(0xAB, 0x44),
+ _INIT_DCS_CMD(0xAC, 0x40),
+
+ _INIT_DCS_CMD(0x4D, 0x21),
+ _INIT_DCS_CMD(0x4E, 0x43),
+ _INIT_DCS_CMD(0x4F, 0x65),
+ _INIT_DCS_CMD(0x50, 0x87),
+ _INIT_DCS_CMD(0x51, 0x78),
+ _INIT_DCS_CMD(0x52, 0x56),
+ _INIT_DCS_CMD(0x53, 0x34),
+ _INIT_DCS_CMD(0x54, 0x21),
+ _INIT_DCS_CMD(0x55, 0x83),
+ _INIT_DCS_CMD(0x56, 0x08),
+ _INIT_DCS_CMD(0x58, 0x21),
+ _INIT_DCS_CMD(0x59, 0x40),
+ _INIT_DCS_CMD(0x5A, 0x09),
+ _INIT_DCS_CMD(0x5B, 0x48),
+ _INIT_DCS_CMD(0x5E, 0x00, 0x10),
+ _INIT_DCS_CMD(0x5F, 0x00),
+
+ _INIT_DCS_CMD(0x7A, 0x00),
+ _INIT_DCS_CMD(0x7B, 0x00),
+ _INIT_DCS_CMD(0x7C, 0x00),
+ _INIT_DCS_CMD(0x7D, 0x00),
+ _INIT_DCS_CMD(0x7E, 0x20),
+ _INIT_DCS_CMD(0x7F, 0x3C),
+ _INIT_DCS_CMD(0x80, 0x00),
+ _INIT_DCS_CMD(0x81, 0x00),
+ _INIT_DCS_CMD(0x82, 0x08),
+ _INIT_DCS_CMD(0x97, 0x02),
+ _INIT_DCS_CMD(0xC5, 0x10),
+ _INIT_DCS_CMD(0xDA, 0x05),
+ _INIT_DCS_CMD(0xDB, 0x01),
+ _INIT_DCS_CMD(0xDC, 0x7A),
+ _INIT_DCS_CMD(0xDD, 0x55),
+ _INIT_DCS_CMD(0xDE, 0x27),
+ _INIT_DCS_CMD(0xDF, 0x01),
+ _INIT_DCS_CMD(0xE0, 0x7A),
+ _INIT_DCS_CMD(0xE1, 0x01),
+ _INIT_DCS_CMD(0xE2, 0x7A),
+ _INIT_DCS_CMD(0xE3, 0x01),
+ _INIT_DCS_CMD(0xE4, 0x7A),
+ _INIT_DCS_CMD(0xE5, 0x01),
+ _INIT_DCS_CMD(0xE6, 0x7A),
+ _INIT_DCS_CMD(0xE7, 0x00),
+ _INIT_DCS_CMD(0xE8, 0x00),
+ _INIT_DCS_CMD(0xE9, 0x01),
+ _INIT_DCS_CMD(0xEA, 0x7A),
+ _INIT_DCS_CMD(0xEB, 0x01),
+ _INIT_DCS_CMD(0xEE, 0x7A),
+ _INIT_DCS_CMD(0xEF, 0x01),
+ _INIT_DCS_CMD(0xF0, 0x7A),
+
+ _INIT_DCS_CMD(0xFF, 0x25),
+ _INIT_DCS_CMD(0xFB, 0x01),
+
+ _INIT_DCS_CMD(0x05, 0x00),
+
+ _INIT_DCS_CMD(0xF1, 0x10),
+ _INIT_DCS_CMD(0x1E, 0x00),
+ _INIT_DCS_CMD(0x1F, 0x09),
+ _INIT_DCS_CMD(0x20, 0x46),
+ _INIT_DCS_CMD(0x25, 0x00),
+ _INIT_DCS_CMD(0x26, 0x09),
+ _INIT_DCS_CMD(0x27, 0x46),
+ _INIT_DCS_CMD(0x3F, 0x80),
+ _INIT_DCS_CMD(0x40, 0x00),
+ _INIT_DCS_CMD(0x43, 0x00),
+
+ _INIT_DCS_CMD(0x44, 0x09),
+ _INIT_DCS_CMD(0x45, 0x46),
+
+ _INIT_DCS_CMD(0x48, 0x09),
+ _INIT_DCS_CMD(0x49, 0x46),
+ _INIT_DCS_CMD(0x5B, 0x80),
+ _INIT_DCS_CMD(0x5C, 0x00),
+ _INIT_DCS_CMD(0x5D, 0x01),
+ _INIT_DCS_CMD(0x5E, 0x46),
+ _INIT_DCS_CMD(0x61, 0x01),
+ _INIT_DCS_CMD(0x62, 0x46),
+ _INIT_DCS_CMD(0x68, 0x10),
+ _INIT_DCS_CMD(0xFF, 0x26),
+ _INIT_DCS_CMD(0xFB, 0x01),
+
+ _INIT_DCS_CMD(0x00, 0xA1),
+ _INIT_DCS_CMD(0x02, 0x31),
+ _INIT_DCS_CMD(0x0A, 0xF2),
+ _INIT_DCS_CMD(0x04, 0x28),
+ _INIT_DCS_CMD(0x06, 0x30),
+ _INIT_DCS_CMD(0x0C, 0x16),
+ _INIT_DCS_CMD(0x0D, 0x0D),
+ _INIT_DCS_CMD(0x0F, 0x00),
+ _INIT_DCS_CMD(0x11, 0x00),
+ _INIT_DCS_CMD(0x12, 0x50),
+ _INIT_DCS_CMD(0x13, 0x56),
+ _INIT_DCS_CMD(0x14, 0x57),
+ _INIT_DCS_CMD(0x15, 0x00),
+ _INIT_DCS_CMD(0x16, 0x10),
+ _INIT_DCS_CMD(0x17, 0xA0),
+ _INIT_DCS_CMD(0x18, 0x86),
+ _INIT_DCS_CMD(0x22, 0x00),
+ _INIT_DCS_CMD(0x23, 0x00),
+ _INIT_DCS_CMD(0x19, 0x0D),
+ _INIT_DCS_CMD(0x1A, 0x7F),
+ _INIT_DCS_CMD(0x1B, 0x0C),
+ _INIT_DCS_CMD(0x1C, 0xBF),
+ _INIT_DCS_CMD(0x2A, 0x0D),
+ _INIT_DCS_CMD(0x2B, 0x7F),
+ _INIT_DCS_CMD(0x20, 0x00),
+
+ _INIT_DCS_CMD(0x1D, 0x00),
+ _INIT_DCS_CMD(0x1E, 0x78),
+ _INIT_DCS_CMD(0x1F, 0x78),
+
+ _INIT_DCS_CMD(0x2F, 0x03),
+ _INIT_DCS_CMD(0x30, 0x78),
+ _INIT_DCS_CMD(0x33, 0x78),
+ _INIT_DCS_CMD(0x34, 0x66),
+ _INIT_DCS_CMD(0x35, 0x11),
+
+ _INIT_DCS_CMD(0x39, 0x10),
+ _INIT_DCS_CMD(0x3A, 0x78),
+ _INIT_DCS_CMD(0x3B, 0x06),
+
+ _INIT_DCS_CMD(0xC8, 0x04),
+ _INIT_DCS_CMD(0xC9, 0x84),
+ _INIT_DCS_CMD(0xCA, 0x4E),
+ _INIT_DCS_CMD(0xCB, 0x00),
+
+ _INIT_DCS_CMD(0xA9, 0x50),
+ _INIT_DCS_CMD(0xAA, 0x4F),
+ _INIT_DCS_CMD(0xAB, 0x4D),
+ _INIT_DCS_CMD(0xAC, 0x4A),
+ _INIT_DCS_CMD(0xAD, 0x48),
+ _INIT_DCS_CMD(0xAE, 0x46),
+ _INIT_DCS_CMD(0xFF, 0x27),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0xC0, 0x18),
+ _INIT_DCS_CMD(0xC1, 0x00),
+ _INIT_DCS_CMD(0xC2, 0x00),
+ _INIT_DCS_CMD(0x56, 0x06),
+ _INIT_DCS_CMD(0x58, 0x80),
+ _INIT_DCS_CMD(0x59, 0x75),
+ _INIT_DCS_CMD(0x5A, 0x00),
+ _INIT_DCS_CMD(0x5B, 0x02),
+ _INIT_DCS_CMD(0x5C, 0x00),
+ _INIT_DCS_CMD(0x5D, 0x00),
+ _INIT_DCS_CMD(0x5E, 0x20),
+ _INIT_DCS_CMD(0x5F, 0x10),
+ _INIT_DCS_CMD(0x60, 0x00),
+ _INIT_DCS_CMD(0x61, 0x2E),
+ _INIT_DCS_CMD(0x62, 0x00),
+ _INIT_DCS_CMD(0x63, 0x01),
+ _INIT_DCS_CMD(0x64, 0x43),
+ _INIT_DCS_CMD(0x65, 0x2D),
+ _INIT_DCS_CMD(0x66, 0x00),
+ _INIT_DCS_CMD(0x67, 0x01),
+ _INIT_DCS_CMD(0x68, 0x43),
+ _INIT_DCS_CMD(0x98, 0x01),
+ _INIT_DCS_CMD(0xB4, 0x03),
+ _INIT_DCS_CMD(0x9B, 0xBD),
+ _INIT_DCS_CMD(0xA0, 0x90),
+ _INIT_DCS_CMD(0xAB, 0x1B),
+ _INIT_DCS_CMD(0xBC, 0x0C),
+ _INIT_DCS_CMD(0xBD, 0x28),
+
+ _INIT_DCS_CMD(0xFF, 0x2A),
+ _INIT_DCS_CMD(0xFB, 0x01),
+
+ _INIT_DCS_CMD(0x22, 0x2F),
+ _INIT_DCS_CMD(0x23, 0x08),
+
+ _INIT_DCS_CMD(0x24, 0x00),
+ _INIT_DCS_CMD(0x25, 0x65),
+ _INIT_DCS_CMD(0x26, 0xF8),
+ _INIT_DCS_CMD(0x27, 0x00),
+ _INIT_DCS_CMD(0x28, 0x1A),
+ _INIT_DCS_CMD(0x29, 0x00),
+ _INIT_DCS_CMD(0x2A, 0x1A),
+ _INIT_DCS_CMD(0x2B, 0x00),
+ _INIT_DCS_CMD(0x2D, 0x1A),
+
+ _INIT_DCS_CMD(0x64, 0x96),
+ _INIT_DCS_CMD(0x65, 0x00),
+ _INIT_DCS_CMD(0x66, 0x00),
+ _INIT_DCS_CMD(0x6A, 0x96),
+ _INIT_DCS_CMD(0x6B, 0x00),
+ _INIT_DCS_CMD(0x6C, 0x00),
+ _INIT_DCS_CMD(0x70, 0x92),
+ _INIT_DCS_CMD(0x71, 0x00),
+ _INIT_DCS_CMD(0x72, 0x00),
+ _INIT_DCS_CMD(0xA2, 0x33),
+ _INIT_DCS_CMD(0xA3, 0x30),
+ _INIT_DCS_CMD(0xA4, 0xC0),
+ _INIT_DCS_CMD(0xE8, 0x00),
+ _INIT_DCS_CMD(0xFF, 0xF0),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0x3A, 0x08),
+ _INIT_DCS_CMD(0xFF, 0xD0),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0x00, 0x33),
+ _INIT_DCS_CMD(0x02, 0x77),
+ _INIT_DCS_CMD(0x08, 0x01),
+ _INIT_DCS_CMD(0x09, 0xBF),
+ _INIT_DCS_CMD(0x28, 0x30),
+ _INIT_DCS_CMD(0x2F, 0x33),
+ _INIT_DCS_CMD(0xFF, 0x23),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0x00, 0x80),
+ _INIT_DCS_CMD(0x07, 0x00),
+ _INIT_DCS_CMD(0xFF, 0x20),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0x30, 0x00),
+ _INIT_DCS_CMD(0xFF, 0x10),
+ _INIT_DCS_CMD(0xB9, 0x01),
+ _INIT_DCS_CMD(0xFF, 0x20),
+ _INIT_DCS_CMD(0x18, 0x40),
+ _INIT_DCS_CMD(0xFF, 0x10),
+ _INIT_DCS_CMD(0xB9, 0x02),
+ _INIT_DCS_CMD(0xFF, 0x10),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0xBB, 0x13),
+ _INIT_DCS_CMD(0x3B, 0x03, 0x96, 0x1A, 0x04, 0x04),
+ _INIT_DCS_CMD(0x35, 0x00),
+ _INIT_DCS_CMD(0x51, 0x0F, 0xFF),
+ _INIT_DCS_CMD(0x53, 0x24),
+ _INIT_DELAY_CMD(100),
+ _INIT_DCS_CMD(0x11),
+ _INIT_DELAY_CMD(200),
+ _INIT_DCS_CMD(0x29),
+ _INIT_DELAY_CMD(100),
+ {},
+};
+
static const struct panel_init_cmd boe_init_cmd[] = {
_INIT_DELAY_CMD(24),
_INIT_DCS_CMD(0xB0, 0x05),
@@ -511,13 +1176,15 @@ static int boe_panel_unprepare(struct drm_panel *panel)
gpiod_set_value(boe->enable_gpio, 0);
usleep_range(5000, 7000);
regulator_disable(boe->pp1800);
+ regulator_disable(boe->pp3300);
} else {
gpiod_set_value(boe->enable_gpio, 0);
- usleep_range(500, 1000);
+ usleep_range(1000, 2000);
regulator_disable(boe->avee);
regulator_disable(boe->avdd);
usleep_range(5000, 7000);
regulator_disable(boe->pp1800);
+ regulator_disable(boe->pp3300);
}
boe->prepared = false;
@@ -536,6 +1203,10 @@ static int boe_panel_prepare(struct drm_panel *panel)
gpiod_set_value(boe->enable_gpio, 0);
usleep_range(1000, 1500);
+ ret = regulator_enable(boe->pp3300);
+ if (ret < 0)
+ return ret;
+
ret = regulator_enable(boe->pp1800);
if (ret < 0)
return ret;
@@ -549,7 +1220,7 @@ static int boe_panel_prepare(struct drm_panel *panel)
if (ret < 0)
goto poweroffavdd;
- usleep_range(5000, 10000);
+ usleep_range(10000, 11000);
gpiod_set_value(boe->enable_gpio, 1);
usleep_range(1000, 2000);
@@ -586,6 +1257,64 @@ static int boe_panel_enable(struct drm_panel *panel)
return 0;
}
+static const struct drm_display_mode boe_tv110c9m_default_mode = {
+ .clock = 166594,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 40,
+ .hsync_end = 1200 + 40 + 8,
+ .htotal = 1200 + 40 + 8 + 28,
+ .vdisplay = 2000,
+ .vsync_start = 2000 + 26,
+ .vsync_end = 2000 + 26 + 2,
+ .vtotal = 2000 + 26 + 2 + 148,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct panel_desc boe_tv110c9m_desc = {
+ .modes = &boe_tv110c9m_default_mode,
+ .bpc = 8,
+ .size = {
+ .width_mm = 143,
+ .height_mm = 238,
+ },
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .mode_flags = MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_VIDEO
+ | MIPI_DSI_MODE_VIDEO_HSE
+ | MIPI_DSI_CLOCK_NON_CONTINUOUS
+ | MIPI_DSI_MODE_VIDEO_BURST,
+ .init_cmds = boe_tv110c9m_init_cmd,
+};
+
+static const struct drm_display_mode inx_hj110iz_default_mode = {
+ .clock = 166594,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 40,
+ .hsync_end = 1200 + 40 + 8,
+ .htotal = 1200 + 40 + 8 + 28,
+ .vdisplay = 2000,
+ .vsync_start = 2000 + 26,
+ .vsync_end = 2000 + 26 + 1,
+ .vtotal = 2000 + 26 + 1 + 149,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct panel_desc inx_hj110iz_desc = {
+ .modes = &inx_hj110iz_default_mode,
+ .bpc = 8,
+ .size = {
+ .width_mm = 143,
+ .height_mm = 238,
+ },
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .mode_flags = MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_VIDEO
+ | MIPI_DSI_MODE_VIDEO_HSE
+ | MIPI_DSI_CLOCK_NON_CONTINUOUS
+ | MIPI_DSI_MODE_VIDEO_BURST,
+ .init_cmds = inx_init_cmd,
+};
+
static const struct drm_display_mode boe_tv101wum_nl6_default_mode = {
.clock = 159425,
.hdisplay = 1200,
@@ -767,6 +1496,10 @@ static int boe_panel_add(struct boe_panel *boe)
if (IS_ERR(boe->avee))
return PTR_ERR(boe->avee);
+ boe->pp3300 = devm_regulator_get(dev, "pp3300");
+ if (IS_ERR(boe->pp3300))
+ return PTR_ERR(boe->pp3300);
+
boe->pp1800 = devm_regulator_get(dev, "pp1800");
if (IS_ERR(boe->pp1800))
return PTR_ERR(boe->pp1800);
@@ -870,6 +1603,12 @@ static const struct of_device_id boe_of_match[] = {
{ .compatible = "boe,tv105wum-nw0",
.data = &boe_tv105wum_nw0_desc
},
+ { .compatible = "boe,tv110c9m-ll3",
+ .data = &boe_tv110c9m_desc
+ },
+ { .compatible = "innolux,hj110iz-01a",
+ .data = &inx_hj110iz_desc
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, boe_of_match);
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index 4c37c4f6d26c..fc03046de134 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -1745,10 +1745,11 @@ static const struct panel_delay delay_200_500_e50 = {
.enable = 50,
};
-#define EDP_PANEL_ENTRY(vend, product_id, _delay, _name) \
+#define EDP_PANEL_ENTRY(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name) \
{ \
.name = _name, \
- .panel_id = drm_edid_encode_panel_id(vend, product_id), \
+ .panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, \
+ product_id), \
.delay = _delay \
}
@@ -1760,19 +1761,19 @@ static const struct panel_delay delay_200_500_e50 = {
* Sort first by vendor, then by product ID.
*/
static const struct edp_panel_entry edp_panels[] = {
- EDP_PANEL_ENTRY("AUO", 0x405c, &auo_b116xak01.delay, "B116XAK01"),
- EDP_PANEL_ENTRY("AUO", 0x615c, &delay_200_500_e50, "B116XAN06.1"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"),
- EDP_PANEL_ENTRY("BOE", 0x0786, &delay_200_500_p2e80, "NV116WHM-T01"),
- EDP_PANEL_ENTRY("BOE", 0x07d1, &boe_nv133fhm_n61.delay, "NV133FHM-N61"),
- EDP_PANEL_ENTRY("BOE", 0x082d, &boe_nv133fhm_n61.delay, "NV133FHM-N62"),
- EDP_PANEL_ENTRY("BOE", 0x098d, &boe_nv110wtm_n61.delay, "NV110WTM-N61"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0786, &delay_200_500_p2e80, "NV116WHM-T01"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x07d1, &boe_nv133fhm_n61.delay, "NV133FHM-N61"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x082d, &boe_nv133fhm_n61.delay, "NV133FHM-N62"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x098d, &boe_nv110wtm_n61.delay, "NV110WTM-N61"),
- EDP_PANEL_ENTRY("CMN", 0x114c, &innolux_n116bca_ea1.delay, "N116BCA-EA1"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x114c, &innolux_n116bca_ea1.delay, "N116BCA-EA1"),
- EDP_PANEL_ENTRY("KDB", 0x0624, &kingdisplay_kd116n21_30nv_a010.delay, "116N21-30NV-A010"),
+ EDP_PANEL_ENTRY('K', 'D', 'B', 0x0624, &kingdisplay_kd116n21_30nv_a010.delay, "116N21-30NV-A010"),
- EDP_PANEL_ENTRY("SHP", 0x154c, &delay_200_500_p2e100, "LQ116M1JW10"),
+ EDP_PANEL_ENTRY('S', 'H', 'P', 0x154c, &delay_200_500_p2e100, "LQ116M1JW10"),
{ /* sentinal */ }
};
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 7793249bc549..11b21d605584 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -545,14 +545,14 @@ static int radeon_ttm_tt_populate(struct ttm_device *bdev,
{
struct radeon_device *rdev = radeon_get_rdev(bdev);
struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
- bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+ bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
if (gtt && gtt->userptr) {
ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!ttm->sg)
return -ENOMEM;
- ttm->page_flags |= TTM_PAGE_FLAG_SG;
+ ttm->page_flags |= TTM_TT_FLAG_EXTERNAL;
return 0;
}
@@ -569,13 +569,13 @@ static void radeon_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm
{
struct radeon_device *rdev = radeon_get_rdev(bdev);
struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
- bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+ bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
radeon_ttm_tt_unbind(bdev, ttm);
if (gtt && gtt->userptr) {
kfree(ttm->sg);
- ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
+ ttm->page_flags &= ~TTM_TT_FLAG_EXTERNAL;
return;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 3b22c0013dbf..d62b2013c367 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1115,8 +1115,8 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
return -EBUSY;
if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
- bo->ttm->page_flags & TTM_PAGE_FLAG_SG ||
- bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED ||
+ bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL ||
+ bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED ||
!ttm_bo_get_unless_zero(bo)) {
if (locked)
dma_resv_unlock(bo->base.resv);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 1c5ffe2935af..82af095f6b81 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -103,7 +103,7 @@ void ttm_move_memcpy(struct ttm_buffer_object *bo,
/* Don't move nonexistent data. Clear destination instead. */
if (src_ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm))) {
- if (ttm && !(ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC))
+ if (ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC))
return;
for (i = 0; i < num_pages; ++i) {
@@ -150,7 +150,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
struct ttm_kmap_iter *dst_iter, *src_iter;
int ret = 0;
- if (ttm && ((ttm->page_flags & TTM_PAGE_FLAG_SWAPPED) ||
+ if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) ||
dst_man->use_tt)) {
ret = ttm_tt_populate(bdev, ttm, ctx);
if (ret)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index f56be5bc0861..33680c94127c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -162,9 +162,11 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
* Refuse to fault imported pages. This should be handled
* (if at all) by redirecting mmap to the exporter.
*/
- if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
- dma_resv_unlock(bo->base.resv);
- return VM_FAULT_SIGBUS;
+ if (bo->ttm && (bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
+ if (!(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)) {
+ dma_resv_unlock(bo->base.resv);
+ return VM_FAULT_SIGBUS;
+ }
}
return 0;
@@ -346,8 +348,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
} else if (unlikely(!page)) {
break;
}
- page->index = drm_vma_node_start(&bo->base.vma_node) +
- page_offset;
pfn = page_to_pfn(page);
}
@@ -519,11 +519,6 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
switch (bo->resource->mem_type) {
case TTM_PL_SYSTEM:
- if (unlikely(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
- ret = ttm_tt_swapin(bo->ttm);
- if (unlikely(ret != 0))
- return ret;
- }
fallthrough;
case TTM_PL_TT:
ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write);
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index c961a788b519..1bba0a0ed3f9 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -371,7 +371,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
WARN_ON(!num_pages || ttm_tt_is_populated(tt));
WARN_ON(dma_addr && !pool->dev);
- if (tt->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+ if (tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC)
gfp_flags |= __GFP_ZERO;
if (ctx->gfp_retry_mayfail)
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index dae52433beeb..7e83c00a3f48 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -68,12 +68,12 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
switch (bo->type) {
case ttm_bo_type_device:
if (zero_alloc)
- page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
+ page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
break;
case ttm_bo_type_kernel:
break;
case ttm_bo_type_sg:
- page_flags |= TTM_PAGE_FLAG_SG;
+ page_flags |= TTM_TT_FLAG_EXTERNAL;
break;
default:
pr_err("Illegal buffer object type\n");
@@ -84,6 +84,9 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
if (unlikely(bo->ttm == NULL))
return -ENOMEM;
+ WARN_ON(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE &&
+ !(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL));
+
return 0;
}
@@ -156,7 +159,7 @@ EXPORT_SYMBOL(ttm_tt_init);
void ttm_tt_fini(struct ttm_tt *ttm)
{
- WARN_ON(ttm->page_flags & TTM_PAGE_FLAG_PRIV_POPULATED);
+ WARN_ON(ttm->page_flags & TTM_TT_FLAG_PRIV_POPULATED);
if (ttm->swap_storage)
fput(ttm->swap_storage);
@@ -178,7 +181,7 @@ int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
ttm_tt_init_fields(ttm, bo, page_flags, caching);
- if (page_flags & TTM_PAGE_FLAG_SG)
+ if (page_flags & TTM_TT_FLAG_EXTERNAL)
ret = ttm_sg_tt_alloc_page_directory(ttm);
else
ret = ttm_dma_tt_alloc_page_directory(ttm);
@@ -224,7 +227,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
fput(swap_storage);
ttm->swap_storage = NULL;
- ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
+ ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED;
return 0;
@@ -279,7 +282,7 @@ int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm,
ttm_tt_unpopulate(bdev, ttm);
ttm->swap_storage = swap_storage;
- ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
+ ttm->page_flags |= TTM_TT_FLAG_SWAPPED;
return ttm->num_pages;
@@ -289,17 +292,6 @@ out_err:
return ret;
}
-static void ttm_tt_add_mapping(struct ttm_device *bdev, struct ttm_tt *ttm)
-{
- pgoff_t i;
-
- if (ttm->page_flags & TTM_PAGE_FLAG_SG)
- return;
-
- for (i = 0; i < ttm->num_pages; ++i)
- ttm->pages[i]->mapping = bdev->dev_mapping;
-}
-
int ttm_tt_populate(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
@@ -311,7 +303,7 @@ int ttm_tt_populate(struct ttm_device *bdev,
if (ttm_tt_is_populated(ttm))
return 0;
- if (!(ttm->page_flags & TTM_PAGE_FLAG_SG)) {
+ if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
atomic_long_add(ttm->num_pages, &ttm_pages_allocated);
if (bdev->pool.use_dma32)
atomic_long_add(ttm->num_pages,
@@ -336,9 +328,8 @@ int ttm_tt_populate(struct ttm_device *bdev,
if (ret)
goto error;
- ttm_tt_add_mapping(bdev, ttm);
- ttm->page_flags |= TTM_PAGE_FLAG_PRIV_POPULATED;
- if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+ ttm->page_flags |= TTM_TT_FLAG_PRIV_POPULATED;
+ if (unlikely(ttm->page_flags & TTM_TT_FLAG_SWAPPED)) {
ret = ttm_tt_swapin(ttm);
if (unlikely(ret != 0)) {
ttm_tt_unpopulate(bdev, ttm);
@@ -349,7 +340,7 @@ int ttm_tt_populate(struct ttm_device *bdev,
return 0;
error:
- if (!(ttm->page_flags & TTM_PAGE_FLAG_SG)) {
+ if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
if (bdev->pool.use_dma32)
atomic_long_sub(ttm->num_pages,
@@ -359,39 +350,24 @@ error:
}
EXPORT_SYMBOL(ttm_tt_populate);
-static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
-{
- pgoff_t i;
- struct page **page = ttm->pages;
-
- if (ttm->page_flags & TTM_PAGE_FLAG_SG)
- return;
-
- for (i = 0; i < ttm->num_pages; ++i) {
- (*page)->mapping = NULL;
- (*page++)->index = 0;
- }
-}
-
void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
{
if (!ttm_tt_is_populated(ttm))
return;
- ttm_tt_clear_mapping(ttm);
if (bdev->funcs->ttm_tt_unpopulate)
bdev->funcs->ttm_tt_unpopulate(bdev, ttm);
else
ttm_pool_free(&bdev->pool, ttm);
- if (!(ttm->page_flags & TTM_PAGE_FLAG_SG)) {
+ if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
if (bdev->pool.use_dma32)
atomic_long_sub(ttm->num_pages,
&ttm_dma32_pages_allocated);
}
- ttm->page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED;
+ ttm->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED;
}
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index c1deab2cf38d..bd46396a1ae0 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -83,7 +83,6 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
return 0;
}
-
switch (args->param) {
case DRM_V3D_PARAM_SUPPORTS_TFU:
args->value = 1;
@@ -97,6 +96,9 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
case DRM_V3D_PARAM_SUPPORTS_PERFMON:
args->value = (v3d->ver >= 40);
return 0;
+ case DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT:
+ args->value = 1;
+ return 0;
default:
DRM_DEBUG("Unknown parameter %d\n", args->param);
return -EINVAL;
@@ -136,9 +138,8 @@ v3d_postclose(struct drm_device *dev, struct drm_file *file)
struct v3d_file_priv *v3d_priv = file->driver_priv;
enum v3d_queue q;
- for (q = 0; q < V3D_MAX_QUEUES; q++) {
+ for (q = 0; q < V3D_MAX_QUEUES; q++)
drm_sched_entity_destroy(&v3d_priv->sched_entity[q]);
- }
v3d_perfmon_close_file(v3d_priv);
kfree(v3d_priv);
@@ -147,7 +148,7 @@ v3d_postclose(struct drm_device *dev, struct drm_file *file)
DEFINE_DRM_GEM_FOPS(v3d_drm_fops);
/* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP
- * protection between clients. Note that render nodes would be be
+ * protection between clients. Note that render nodes would be
* able to submit CLs that could access BOs from clients authenticated
* with the master node. The TFU doesn't use the GMP, so it would
* need to stay DRM_AUTH until we do buffer size/offset validation.
@@ -219,7 +220,6 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
u32 mmu_debug;
u32 ident1;
-
v3d = devm_drm_dev_alloc(dev, &v3d_drm_driver, struct v3d_dev, drm);
if (IS_ERR(v3d))
return PTR_ERR(v3d);
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index b900a050d5e2..b74b1351bfc8 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -19,15 +19,6 @@ struct reset_control;
#define GMP_GRANULARITY (128 * 1024)
-/* Enum for each of the V3D queues. */
-enum v3d_queue {
- V3D_BIN,
- V3D_RENDER,
- V3D_TFU,
- V3D_CSD,
- V3D_CACHE_CLEAN,
-};
-
#define V3D_MAX_QUEUES (V3D_CACHE_CLEAN + 1)
struct v3d_queue_state {
@@ -294,6 +285,21 @@ struct v3d_csd_job {
struct drm_v3d_submit_csd args;
};
+struct v3d_submit_outsync {
+ struct drm_syncobj *syncobj;
+};
+
+struct v3d_submit_ext {
+ u32 flags;
+ u32 wait_stage;
+
+ u32 in_sync_count;
+ u64 in_syncs;
+
+ u32 out_sync_count;
+ struct v3d_submit_outsync *out_syncs;
+};
+
/**
* __wait_for - magic wait macro
*
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index ead0be8d48a7..6a000d77c568 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -392,6 +392,9 @@ v3d_render_job_free(struct kref *ref)
void v3d_job_cleanup(struct v3d_job *job)
{
+ if (!job)
+ return;
+
drm_sched_job_cleanup(&job->base);
v3d_job_put(job);
}
@@ -416,7 +419,7 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
ret = drm_gem_dma_resv_wait(file_priv, args->handle,
- true, timeout_jiffies);
+ true, timeout_jiffies);
/* Decrement the user's timeout, in case we got interrupted
* such that the ioctl will be restarted.
@@ -435,41 +438,83 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
}
static int
-v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
- struct v3d_job *job, void (*free)(struct kref *ref),
- u32 in_sync, enum v3d_queue queue)
+v3d_job_add_deps(struct drm_file *file_priv, struct v3d_job *job,
+ u32 in_sync, u32 point)
{
struct dma_fence *in_fence = NULL;
- struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
int ret;
+ ret = drm_syncobj_find_fence(file_priv, in_sync, point, 0, &in_fence);
+ if (ret == -EINVAL)
+ return ret;
+
+ return drm_sched_job_add_dependency(&job->base, in_fence);
+}
+
+static int
+v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
+ void **container, size_t size, void (*free)(struct kref *ref),
+ u32 in_sync, struct v3d_submit_ext *se, enum v3d_queue queue)
+{
+ struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
+ struct v3d_job *job;
+ bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC);
+ int ret, i;
+
+ *container = kcalloc(1, size, GFP_KERNEL);
+ if (!*container) {
+ DRM_ERROR("Cannot allocate memory for v3d job.");
+ return -ENOMEM;
+ }
+
+ job = *container;
job->v3d = v3d;
job->free = free;
ret = pm_runtime_get_sync(v3d->drm.dev);
if (ret < 0)
- return ret;
+ goto fail;
ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
v3d_priv);
if (ret)
- goto fail;
-
- ret = drm_syncobj_find_fence(file_priv, in_sync, 0, 0, &in_fence);
- if (ret == -EINVAL)
goto fail_job;
- ret = drm_sched_job_add_dependency(&job->base, in_fence);
- if (ret)
- goto fail_job;
+ if (has_multisync) {
+ if (se->in_sync_count && se->wait_stage == queue) {
+ struct drm_v3d_sem __user *handle = u64_to_user_ptr(se->in_syncs);
+
+ for (i = 0; i < se->in_sync_count; i++) {
+ struct drm_v3d_sem in;
+
+ ret = copy_from_user(&in, handle++, sizeof(in));
+ if (ret) {
+ DRM_DEBUG("Failed to copy wait dep handle.\n");
+ goto fail_deps;
+ }
+ ret = v3d_job_add_deps(file_priv, job, in.handle, 0);
+ if (ret)
+ goto fail_deps;
+ }
+ }
+ } else {
+ ret = v3d_job_add_deps(file_priv, job, in_sync, 0);
+ if (ret)
+ goto fail_deps;
+ }
kref_init(&job->refcount);
return 0;
-fail_job:
+
+fail_deps:
drm_sched_job_cleanup(&job->base);
-fail:
+fail_job:
pm_runtime_put_autosuspend(v3d->drm.dev);
+fail:
+ kfree(*container);
+ *container = NULL;
+
return ret;
}
@@ -491,25 +536,171 @@ v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv,
struct v3d_job *job,
struct ww_acquire_ctx *acquire_ctx,
u32 out_sync,
+ struct v3d_submit_ext *se,
struct dma_fence *done_fence)
{
struct drm_syncobj *sync_out;
+ bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC);
int i;
for (i = 0; i < job->bo_count; i++) {
/* XXX: Use shared fences for read-only objects. */
dma_resv_add_excl_fence(job->bo[i]->resv,
- job->done_fence);
+ job->done_fence);
}
drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
/* Update the return sync object for the job */
- sync_out = drm_syncobj_find(file_priv, out_sync);
- if (sync_out) {
- drm_syncobj_replace_fence(sync_out, done_fence);
- drm_syncobj_put(sync_out);
+ /* If it only supports a single signal semaphore*/
+ if (!has_multisync) {
+ sync_out = drm_syncobj_find(file_priv, out_sync);
+ if (sync_out) {
+ drm_syncobj_replace_fence(sync_out, done_fence);
+ drm_syncobj_put(sync_out);
+ }
+ return;
+ }
+
+ /* If multiple semaphores extension is supported */
+ if (se->out_sync_count) {
+ for (i = 0; i < se->out_sync_count; i++) {
+ drm_syncobj_replace_fence(se->out_syncs[i].syncobj,
+ done_fence);
+ drm_syncobj_put(se->out_syncs[i].syncobj);
+ }
+ kvfree(se->out_syncs);
+ }
+}
+
+static void
+v3d_put_multisync_post_deps(struct v3d_submit_ext *se)
+{
+ unsigned int i;
+
+ if (!(se && se->out_sync_count))
+ return;
+
+ for (i = 0; i < se->out_sync_count; i++)
+ drm_syncobj_put(se->out_syncs[i].syncobj);
+ kvfree(se->out_syncs);
+}
+
+static int
+v3d_get_multisync_post_deps(struct drm_file *file_priv,
+ struct v3d_submit_ext *se,
+ u32 count, u64 handles)
+{
+ struct drm_v3d_sem __user *post_deps;
+ int i, ret;
+
+ if (!count)
+ return 0;
+
+ se->out_syncs = (struct v3d_submit_outsync *)
+ kvmalloc_array(count,
+ sizeof(struct v3d_submit_outsync),
+ GFP_KERNEL);
+ if (!se->out_syncs)
+ return -ENOMEM;
+
+ post_deps = u64_to_user_ptr(handles);
+
+ for (i = 0; i < count; i++) {
+ struct drm_v3d_sem out;
+
+ ret = copy_from_user(&out, post_deps++, sizeof(out));
+ if (ret) {
+ DRM_DEBUG("Failed to copy post dep handles\n");
+ goto fail;
+ }
+
+ se->out_syncs[i].syncobj = drm_syncobj_find(file_priv,
+ out.handle);
+ if (!se->out_syncs[i].syncobj) {
+ ret = -EINVAL;
+ goto fail;
+ }
}
+ se->out_sync_count = count;
+
+ return 0;
+
+fail:
+ for (i--; i >= 0; i--)
+ drm_syncobj_put(se->out_syncs[i].syncobj);
+ kvfree(se->out_syncs);
+
+ return ret;
+}
+
+/* Get data for multiple binary semaphores synchronization. Parse syncobj
+ * to be signaled when job completes (out_sync).
+ */
+static int
+v3d_get_multisync_submit_deps(struct drm_file *file_priv,
+ struct drm_v3d_extension __user *ext,
+ void *data)
+{
+ struct drm_v3d_multi_sync multisync;
+ struct v3d_submit_ext *se = data;
+ int ret;
+
+ ret = copy_from_user(&multisync, ext, sizeof(multisync));
+ if (ret)
+ return ret;
+
+ if (multisync.pad)
+ return -EINVAL;
+
+ ret = v3d_get_multisync_post_deps(file_priv, data, multisync.out_sync_count,
+ multisync.out_syncs);
+ if (ret)
+ return ret;
+
+ se->in_sync_count = multisync.in_sync_count;
+ se->in_syncs = multisync.in_syncs;
+ se->flags |= DRM_V3D_EXT_ID_MULTI_SYNC;
+ se->wait_stage = multisync.wait_stage;
+
+ return 0;
+}
+
+/* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data
+ * according to the extension id (name).
+ */
+static int
+v3d_get_extensions(struct drm_file *file_priv,
+ u64 ext_handles,
+ void *data)
+{
+ struct drm_v3d_extension __user *user_ext;
+ int ret;
+
+ user_ext = u64_to_user_ptr(ext_handles);
+ while (user_ext) {
+ struct drm_v3d_extension ext;
+
+ if (copy_from_user(&ext, user_ext, sizeof(ext))) {
+ DRM_DEBUG("Failed to copy submit extension\n");
+ return -EFAULT;
+ }
+
+ switch (ext.id) {
+ case DRM_V3D_EXT_ID_MULTI_SYNC:
+ ret = v3d_get_multisync_submit_deps(file_priv, user_ext, data);
+ if (ret)
+ return ret;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("Unknown extension id: %d\n", ext.id);
+ return -EINVAL;
+ }
+
+ user_ext = u64_to_user_ptr(ext.next);
+ }
+
+ return 0;
}
/**
@@ -531,8 +722,9 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
struct v3d_dev *v3d = to_v3d_dev(dev);
struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
struct drm_v3d_submit_cl *args = data;
+ struct v3d_submit_ext se = {0};
struct v3d_bin_job *bin = NULL;
- struct v3d_render_job *render;
+ struct v3d_render_job *render = NULL;
struct v3d_job *clean_job = NULL;
struct v3d_job *last_job;
struct ww_acquire_ctx acquire_ctx;
@@ -540,44 +732,38 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end);
- if (args->pad != 0)
+ if (args->pad)
return -EINVAL;
- if (args->flags != 0 &&
- args->flags != DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
+ if (args->flags &&
+ args->flags & ~(DRM_V3D_SUBMIT_CL_FLUSH_CACHE |
+ DRM_V3D_SUBMIT_EXTENSION)) {
DRM_INFO("invalid flags: %d\n", args->flags);
return -EINVAL;
}
- render = kcalloc(1, sizeof(*render), GFP_KERNEL);
- if (!render)
- return -ENOMEM;
+ if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
+ ret = v3d_get_extensions(file_priv, args->extensions, &se);
+ if (ret) {
+ DRM_DEBUG("Failed to get extensions.\n");
+ return ret;
+ }
+ }
+
+ ret = v3d_job_init(v3d, file_priv, (void *)&render, sizeof(*render),
+ v3d_render_job_free, args->in_sync_rcl, &se, V3D_RENDER);
+ if (ret)
+ goto fail;
render->start = args->rcl_start;
render->end = args->rcl_end;
INIT_LIST_HEAD(&render->unref_list);
- ret = v3d_job_init(v3d, file_priv, &render->base,
- v3d_render_job_free, args->in_sync_rcl, V3D_RENDER);
- if (ret) {
- kfree(render);
- return ret;
- }
-
if (args->bcl_start != args->bcl_end) {
- bin = kcalloc(1, sizeof(*bin), GFP_KERNEL);
- if (!bin) {
- v3d_job_cleanup(&render->base);
- return -ENOMEM;
- }
-
- ret = v3d_job_init(v3d, file_priv, &bin->base,
- v3d_job_free, args->in_sync_bcl, V3D_BIN);
- if (ret) {
- v3d_job_cleanup(&render->base);
- kfree(bin);
- return ret;
- }
+ ret = v3d_job_init(v3d, file_priv, (void *)&bin, sizeof(*bin),
+ v3d_job_free, args->in_sync_bcl, &se, V3D_BIN);
+ if (ret)
+ goto fail;
bin->start = args->bcl_start;
bin->end = args->bcl_end;
@@ -588,18 +774,10 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
}
if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
- clean_job = kcalloc(1, sizeof(*clean_job), GFP_KERNEL);
- if (!clean_job) {
- ret = -ENOMEM;
- goto fail;
- }
-
- ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0, V3D_CACHE_CLEAN);
- if (ret) {
- kfree(clean_job);
- clean_job = NULL;
+ ret = v3d_job_init(v3d, file_priv, (void *)&clean_job, sizeof(*clean_job),
+ v3d_job_free, 0, 0, V3D_CACHE_CLEAN);
+ if (ret)
goto fail;
- }
last_job = clean_job;
} else {
@@ -657,6 +835,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
last_job,
&acquire_ctx,
args->out_sync,
+ &se,
last_job->done_fence);
if (bin)
@@ -672,11 +851,10 @@ fail_unreserve:
drm_gem_unlock_reservations(last_job->bo,
last_job->bo_count, &acquire_ctx);
fail:
- if (bin)
- v3d_job_cleanup(&bin->base);
- v3d_job_cleanup(&render->base);
- if (clean_job)
- v3d_job_cleanup(clean_job);
+ v3d_job_cleanup((void *)bin);
+ v3d_job_cleanup((void *)render);
+ v3d_job_cleanup(clean_job);
+ v3d_put_multisync_post_deps(&se);
return ret;
}
@@ -696,28 +874,36 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
{
struct v3d_dev *v3d = to_v3d_dev(dev);
struct drm_v3d_submit_tfu *args = data;
- struct v3d_tfu_job *job;
+ struct v3d_submit_ext se = {0};
+ struct v3d_tfu_job *job = NULL;
struct ww_acquire_ctx acquire_ctx;
int ret = 0;
trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia);
- job = kcalloc(1, sizeof(*job), GFP_KERNEL);
- if (!job)
- return -ENOMEM;
+ if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
+ DRM_DEBUG("invalid flags: %d\n", args->flags);
+ return -EINVAL;
+ }
- ret = v3d_job_init(v3d, file_priv, &job->base,
- v3d_job_free, args->in_sync, V3D_TFU);
- if (ret) {
- kfree(job);
- return ret;
+ if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
+ ret = v3d_get_extensions(file_priv, args->extensions, &se);
+ if (ret) {
+ DRM_DEBUG("Failed to get extensions.\n");
+ return ret;
+ }
}
+ ret = v3d_job_init(v3d, file_priv, (void *)&job, sizeof(*job),
+ v3d_job_free, args->in_sync, &se, V3D_TFU);
+ if (ret)
+ goto fail;
+
job->base.bo = kcalloc(ARRAY_SIZE(args->bo_handles),
sizeof(*job->base.bo), GFP_KERNEL);
if (!job->base.bo) {
- v3d_job_cleanup(&job->base);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto fail;
}
job->args = *args;
@@ -757,6 +943,7 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
v3d_attach_fences_and_unlock_reservation(file_priv,
&job->base, &acquire_ctx,
args->out_sync,
+ &se,
job->base.done_fence);
v3d_job_put(&job->base);
@@ -764,7 +951,8 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
return 0;
fail:
- v3d_job_cleanup(&job->base);
+ v3d_job_cleanup((void *)job);
+ v3d_put_multisync_post_deps(&se);
return ret;
}
@@ -785,41 +973,44 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
struct v3d_dev *v3d = to_v3d_dev(dev);
struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
struct drm_v3d_submit_csd *args = data;
- struct v3d_csd_job *job;
- struct v3d_job *clean_job;
+ struct v3d_submit_ext se = {0};
+ struct v3d_csd_job *job = NULL;
+ struct v3d_job *clean_job = NULL;
struct ww_acquire_ctx acquire_ctx;
int ret;
trace_v3d_submit_csd_ioctl(&v3d->drm, args->cfg[5], args->cfg[6]);
+ if (args->pad)
+ return -EINVAL;
+
if (!v3d_has_csd(v3d)) {
DRM_DEBUG("Attempting CSD submit on non-CSD hardware\n");
return -EINVAL;
}
- job = kcalloc(1, sizeof(*job), GFP_KERNEL);
- if (!job)
- return -ENOMEM;
-
- ret = v3d_job_init(v3d, file_priv, &job->base,
- v3d_job_free, args->in_sync, V3D_CSD);
- if (ret) {
- kfree(job);
- return ret;
+ if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
+ DRM_INFO("invalid flags: %d\n", args->flags);
+ return -EINVAL;
}
- clean_job = kcalloc(1, sizeof(*clean_job), GFP_KERNEL);
- if (!clean_job) {
- v3d_job_cleanup(&job->base);
- return -ENOMEM;
+ if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
+ ret = v3d_get_extensions(file_priv, args->extensions, &se);
+ if (ret) {
+ DRM_DEBUG("Failed to get extensions.\n");
+ return ret;
+ }
}
- ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0, V3D_CACHE_CLEAN);
- if (ret) {
- v3d_job_cleanup(&job->base);
- kfree(clean_job);
- return ret;
- }
+ ret = v3d_job_init(v3d, file_priv, (void *)&job, sizeof(*job),
+ v3d_job_free, args->in_sync, &se, V3D_CSD);
+ if (ret)
+ goto fail;
+
+ ret = v3d_job_init(v3d, file_priv, (void *)&clean_job, sizeof(*clean_job),
+ v3d_job_free, 0, 0, V3D_CACHE_CLEAN);
+ if (ret)
+ goto fail;
job->args = *args;
@@ -856,6 +1047,7 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
clean_job,
&acquire_ctx,
args->out_sync,
+ &se,
clean_job->done_fence);
v3d_job_put(&job->base);
@@ -868,8 +1060,9 @@ fail_unreserve:
drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
&acquire_ctx);
fail:
- v3d_job_cleanup(&job->base);
+ v3d_job_cleanup((void *)job);
v3d_job_cleanup(clean_job);
+ v3d_put_multisync_post_deps(&se);
return ret;
}
@@ -903,8 +1096,7 @@ v3d_gem_init(struct drm_device *dev)
if (!v3d->pt) {
drm_mm_takedown(&v3d->mm);
dev_err(v3d->drm.dev,
- "Failed to allocate page tables. "
- "Please ensure you have CMA enabled.\n");
+ "Failed to allocate page tables. Please ensure you have CMA enabled.\n");
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
index 2b81cb259d23..a6c81af37345 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -69,7 +69,7 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = vbox_mode_init(vbox);
if (ret)
- goto err_mm_fini;
+ goto err_hw_fini;
ret = vbox_irq_init(vbox);
if (ret)
@@ -87,8 +87,6 @@ err_irq_fini:
vbox_irq_fini(vbox);
err_mode_fini:
vbox_mode_fini(vbox);
-err_mm_fini:
- vbox_mm_fini(vbox);
err_hw_fini:
vbox_hw_fini(vbox);
return ret;
@@ -101,7 +99,6 @@ static void vbox_pci_remove(struct pci_dev *pdev)
drm_dev_unregister(&vbox->ddev);
vbox_irq_fini(vbox);
vbox_mode_fini(vbox);
- vbox_mm_fini(vbox);
vbox_hw_fini(vbox);
}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.h b/drivers/gpu/drm/vboxvideo/vbox_drv.h
index 4903b91d7fe4..e77bd6512eb1 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.h
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.h
@@ -139,7 +139,6 @@ void vbox_mode_fini(struct vbox_private *vbox);
void vbox_report_caps(struct vbox_private *vbox);
int vbox_mm_init(struct vbox_private *vbox);
-void vbox_mm_fini(struct vbox_private *vbox);
/* vbox_irq.c */
int vbox_irq_init(struct vbox_private *vbox);
diff --git a/drivers/gpu/drm/vboxvideo/vbox_ttm.c b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
index fd8a53a4d8d6..dc24c2172fd4 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_ttm.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
@@ -13,22 +13,21 @@
int vbox_mm_init(struct vbox_private *vbox)
{
int ret;
+ resource_size_t base, size;
struct drm_device *dev = &vbox->ddev;
struct pci_dev *pdev = to_pci_dev(dev->dev);
- ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0),
- vbox->available_vram_size);
+ base = pci_resource_start(pdev, 0);
+ size = pci_resource_len(pdev, 0);
+
+ /* Don't fail on errors, but performance might be reduced. */
+ devm_arch_phys_wc_add(&pdev->dev, base, size);
+
+ ret = drmm_vram_helper_init(dev, base, vbox->available_vram_size);
if (ret) {
DRM_ERROR("Error initializing VRAM MM; %d\n", ret);
return ret;
}
- vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
return 0;
}
-
-void vbox_mm_fini(struct vbox_private *vbox)
-{
- arch_phys_wc_del(vbox->fb_mtrr);
-}
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index b4b4653fe301..1bc675d727b6 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -1564,10 +1564,11 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
static irqreturn_t vc4_hdmi_hpd_irq_thread(int irq, void *priv)
{
struct vc4_hdmi *vc4_hdmi = priv;
- struct drm_device *dev = vc4_hdmi->connector.dev;
+ struct drm_connector *connector = &vc4_hdmi->connector;
+ struct drm_device *dev = connector->dev;
if (dev && dev->registered)
- drm_kms_helper_hotplug_event(dev);
+ drm_connector_helper_hpd_irq_event(connector);
return IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
index c2b20e0ee030..b6954e2f75e6 100644
--- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c
+++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
@@ -52,6 +52,7 @@ static int virtio_gpu_features(struct seq_file *m, void *data)
vgdev->has_resource_assign_uuid);
virtio_gpu_add_bool(m, "blob resources", vgdev->has_resource_blob);
+ virtio_gpu_add_bool(m, "context init", vgdev->has_context_init);
virtio_gpu_add_int(m, "cap sets", vgdev->num_capsets);
virtio_gpu_add_int(m, "scanouts", vgdev->num_scanouts);
if (vgdev->host_visible_region.len) {
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index ed85a7863256..749db18dcfa2 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -29,6 +29,8 @@
#include <linux/module.h>
#include <linux/console.h>
#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/wait.h>
#include <drm/drm.h>
#include <drm/drm_aperture.h>
@@ -155,6 +157,35 @@ static void virtio_gpu_config_changed(struct virtio_device *vdev)
schedule_work(&vgdev->config_changed_work);
}
+static __poll_t virtio_gpu_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ struct drm_file *drm_file = filp->private_data;
+ struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
+ struct drm_device *dev = drm_file->minor->dev;
+ struct drm_pending_event *e = NULL;
+ __poll_t mask = 0;
+
+ if (!vfpriv->ring_idx_mask)
+ return drm_poll(filp, wait);
+
+ poll_wait(filp, &drm_file->event_wait, wait);
+
+ if (!list_empty(&drm_file->event_list)) {
+ spin_lock_irq(&dev->event_lock);
+ e = list_first_entry(&drm_file->event_list,
+ struct drm_pending_event, link);
+ drm_file->event_space += e->event->length;
+ list_del(&e->link);
+ spin_unlock_irq(&dev->event_lock);
+
+ kfree(e);
+ mask |= EPOLLIN | EPOLLRDNORM;
+ }
+
+ return mask;
+}
+
static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_GPU, VIRTIO_DEV_ANY_ID },
{ 0 },
@@ -172,6 +203,7 @@ static unsigned int features[] = {
VIRTIO_GPU_F_EDID,
VIRTIO_GPU_F_RESOURCE_UUID,
VIRTIO_GPU_F_RESOURCE_BLOB,
+ VIRTIO_GPU_F_CONTEXT_INIT,
};
static struct virtio_driver virtio_gpu_driver = {
.feature_table = features,
@@ -193,7 +225,17 @@ MODULE_AUTHOR("Dave Airlie <airlied@redhat.com>");
MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
MODULE_AUTHOR("Alon Levy");
-DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops);
+static const struct file_operations virtio_gpu_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .compat_ioctl = drm_compat_ioctl,
+ .poll = virtio_gpu_poll,
+ .read = drm_read,
+ .llseek = noop_llseek,
+ .mmap = drm_gem_mmap
+};
static const struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 0c4810982530..e0265fe74aa5 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -55,6 +55,9 @@
#define STATE_OK 1
#define STATE_ERR 2
+#define MAX_CAPSET_ID 63
+#define MAX_RINGS 64
+
struct virtio_gpu_object_params {
unsigned long size;
bool dumb;
@@ -135,9 +138,18 @@ struct virtio_gpu_fence_driver {
spinlock_t lock;
};
+#define VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL 0x10000000
+struct virtio_gpu_fence_event {
+ struct drm_pending_event base;
+ struct drm_event event;
+};
+
struct virtio_gpu_fence {
struct dma_fence f;
+ uint32_t ring_idx;
uint64_t fence_id;
+ bool emit_fence_info;
+ struct virtio_gpu_fence_event *e;
struct virtio_gpu_fence_driver *drv;
struct list_head node;
};
@@ -234,6 +246,7 @@ struct virtio_gpu_device {
bool has_resource_assign_uuid;
bool has_resource_blob;
bool has_host_visible;
+ bool has_context_init;
struct virtio_shm_region host_visible_region;
struct drm_mm host_visible_mm;
@@ -245,6 +258,7 @@ struct virtio_gpu_device {
struct virtio_gpu_drv_capset *capsets;
uint32_t num_capsets;
+ uint64_t capset_id_mask;
struct list_head cap_cache;
/* protects uuid state when exporting */
@@ -255,12 +269,16 @@ struct virtio_gpu_device {
struct virtio_gpu_fpriv {
uint32_t ctx_id;
+ uint32_t context_init;
bool context_created;
+ uint32_t num_rings;
+ uint64_t base_fence_ctx;
+ uint64_t ring_idx_mask;
struct mutex context_lock;
};
/* virtgpu_ioctl.c */
-#define DRM_VIRTIO_NUM_IOCTLS 11
+#define DRM_VIRTIO_NUM_IOCTLS 12
extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file);
@@ -338,7 +356,8 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
struct virtio_gpu_drv_cap_cache **cache_p);
int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev);
void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
- uint32_t nlen, const char *name);
+ uint32_t context_init, uint32_t nlen,
+ const char *name);
void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
uint32_t id);
void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
@@ -418,8 +437,9 @@ struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
int index);
/* virtgpu_fence.c */
-struct virtio_gpu_fence *virtio_gpu_fence_alloc(
- struct virtio_gpu_device *vgdev);
+struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev,
+ uint64_t base_fence_ctx,
+ uint32_t ring_idx);
void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
struct virtio_gpu_ctrl_hdr *cmd_hdr,
struct virtio_gpu_fence *fence);
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index d28e25e8409b..f28357dbde35 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -71,22 +71,29 @@ static const struct dma_fence_ops virtio_gpu_fence_ops = {
.timeline_value_str = virtio_gpu_timeline_value_str,
};
-struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev)
+struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev,
+ uint64_t base_fence_ctx,
+ uint32_t ring_idx)
{
+ uint64_t fence_context = base_fence_ctx + ring_idx;
struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
struct virtio_gpu_fence *fence = kzalloc(sizeof(struct virtio_gpu_fence),
GFP_KERNEL);
+
if (!fence)
return fence;
fence->drv = drv;
+ fence->ring_idx = ring_idx;
+ fence->emit_fence_info = !(base_fence_ctx == drv->context);
/* This only partially initializes the fence because the seqno is
* unknown yet. The fence must not be used outside of the driver
* until virtio_gpu_fence_emit is called.
*/
- dma_fence_init(&fence->f, &virtio_gpu_fence_ops, &drv->lock, drv->context,
- 0);
+
+ dma_fence_init(&fence->f, &virtio_gpu_fence_ops, &drv->lock,
+ fence_context, 0);
return fence;
}
@@ -108,6 +115,13 @@ void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE);
cmd_hdr->fence_id = cpu_to_le64(fence->fence_id);
+
+ /* Only currently defined fence param. */
+ if (fence->emit_fence_info) {
+ cmd_hdr->flags |=
+ cpu_to_le32(VIRTIO_GPU_FLAG_INFO_RING_IDX);
+ cmd_hdr->ring_idx = (u8)fence->ring_idx;
+ }
}
void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
@@ -138,11 +152,21 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
continue;
dma_fence_signal_locked(&curr->f);
+ if (curr->e) {
+ drm_send_event(vgdev->ddev, &curr->e->base);
+ curr->e = NULL;
+ }
+
list_del(&curr->node);
dma_fence_put(&curr->f);
}
dma_fence_signal_locked(&signaled->f);
+ if (signaled->e) {
+ drm_send_event(vgdev->ddev, &signaled->e->base);
+ signaled->e = NULL;
+ }
+
list_del(&signaled->node);
dma_fence_put(&signaled->f);
break;
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 5c1ad1596889..5618a1d5879c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -38,20 +38,60 @@
VIRTGPU_BLOB_FLAG_USE_SHAREABLE | \
VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)
+static int virtio_gpu_fence_event_create(struct drm_device *dev,
+ struct drm_file *file,
+ struct virtio_gpu_fence *fence,
+ uint32_t ring_idx)
+{
+ struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
+ struct virtio_gpu_fence_event *e = NULL;
+ int ret;
+
+ if (!(vfpriv->ring_idx_mask & (1 << ring_idx)))
+ return 0;
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL;
+ e->event.length = sizeof(e->event);
+
+ ret = drm_event_reserve_init(dev, file, &e->base, &e->event);
+ if (ret)
+ goto free;
+
+ fence->e = e;
+ return 0;
+free:
+ kfree(e);
+ return ret;
+}
+
+/* Must be called with &virtio_gpu_fpriv.struct_mutex held. */
+static void virtio_gpu_create_context_locked(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_fpriv *vfpriv)
+{
+ char dbgname[TASK_COMM_LEN];
+
+ get_task_comm(dbgname, current);
+ virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
+ vfpriv->context_init, strlen(dbgname),
+ dbgname);
+
+ vfpriv->context_created = true;
+}
+
void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
- char dbgname[TASK_COMM_LEN];
mutex_lock(&vfpriv->context_lock);
if (vfpriv->context_created)
goto out_unlock;
- get_task_comm(dbgname, current);
- virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
- strlen(dbgname), dbgname);
- vfpriv->context_created = true;
+ virtio_gpu_create_context_locked(vgdev, vfpriv);
out_unlock:
mutex_unlock(&vfpriv->context_lock);
@@ -89,6 +129,11 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
int in_fence_fd = exbuf->fence_fd;
int out_fence_fd = -1;
void *buf;
+ uint64_t fence_ctx;
+ uint32_t ring_idx;
+
+ fence_ctx = vgdev->fence_drv.context;
+ ring_idx = 0;
if (vgdev->has_virgl_3d == false)
return -ENOSYS;
@@ -96,6 +141,17 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS))
return -EINVAL;
+ if ((exbuf->flags & VIRTGPU_EXECBUF_RING_IDX)) {
+ if (exbuf->ring_idx >= vfpriv->num_rings)
+ return -EINVAL;
+
+ if (!vfpriv->base_fence_ctx)
+ return -EINVAL;
+
+ fence_ctx = vfpriv->base_fence_ctx;
+ ring_idx = exbuf->ring_idx;
+ }
+
exbuf->fence_fd = -1;
virtio_gpu_create_context(dev, file);
@@ -163,12 +219,16 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
goto out_memdup;
}
- out_fence = virtio_gpu_fence_alloc(vgdev);
+ out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);
if(!out_fence) {
ret = -ENOMEM;
goto out_unresv;
}
+ ret = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx);
+ if (ret)
+ goto out_unresv;
+
if (out_fence_fd >= 0) {
sync_file = sync_file_create(&out_fence->f);
if (!sync_file) {
@@ -226,6 +286,12 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
case VIRTGPU_PARAM_CROSS_DEVICE:
value = vgdev->has_resource_assign_uuid ? 1 : 0;
break;
+ case VIRTGPU_PARAM_CONTEXT_INIT:
+ value = vgdev->has_context_init ? 1 : 0;
+ break;
+ case VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs:
+ value = vgdev->capset_id_mask;
+ break;
default:
return -EINVAL;
}
@@ -278,7 +344,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
if (params.size == 0)
params.size = PAGE_SIZE;
- fence = virtio_gpu_fence_alloc(vgdev);
+ fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
if (!fence)
return -ENOMEM;
ret = virtio_gpu_object_create(vgdev, &params, &qobj, fence);
@@ -357,7 +423,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
if (ret != 0)
goto err_put_free;
- fence = virtio_gpu_fence_alloc(vgdev);
+ fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
if (!fence) {
ret = -ENOMEM;
goto err_unlock;
@@ -417,7 +483,8 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
goto err_put_free;
ret = -ENOMEM;
- fence = virtio_gpu_fence_alloc(vgdev);
+ fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
+ 0);
if (!fence)
goto err_unlock;
@@ -662,6 +729,113 @@ static int virtio_gpu_resource_create_blob_ioctl(struct drm_device *dev,
return 0;
}
+static int virtio_gpu_context_init_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file)
+{
+ int ret = 0;
+ uint32_t num_params, i, param, value;
+ uint64_t valid_ring_mask;
+ size_t len;
+ struct drm_virtgpu_context_set_param *ctx_set_params = NULL;
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
+ struct drm_virtgpu_context_init *args = data;
+
+ num_params = args->num_params;
+ len = num_params * sizeof(struct drm_virtgpu_context_set_param);
+
+ if (!vgdev->has_context_init || !vgdev->has_virgl_3d)
+ return -EINVAL;
+
+ /* Number of unique parameters supported at this time. */
+ if (num_params > 3)
+ return -EINVAL;
+
+ ctx_set_params = memdup_user(u64_to_user_ptr(args->ctx_set_params),
+ len);
+
+ if (IS_ERR(ctx_set_params))
+ return PTR_ERR(ctx_set_params);
+
+ mutex_lock(&vfpriv->context_lock);
+ if (vfpriv->context_created) {
+ ret = -EEXIST;
+ goto out_unlock;
+ }
+
+ for (i = 0; i < num_params; i++) {
+ param = ctx_set_params[i].param;
+ value = ctx_set_params[i].value;
+
+ switch (param) {
+ case VIRTGPU_CONTEXT_PARAM_CAPSET_ID:
+ if (value > MAX_CAPSET_ID) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if ((vgdev->capset_id_mask & (1 << value)) == 0) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ /* Context capset ID already set */
+ if (vfpriv->context_init &
+ VIRTIO_GPU_CONTEXT_INIT_CAPSET_ID_MASK) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ vfpriv->context_init |= value;
+ break;
+ case VIRTGPU_CONTEXT_PARAM_NUM_RINGS:
+ if (vfpriv->base_fence_ctx) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (value > MAX_RINGS) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ vfpriv->base_fence_ctx = dma_fence_context_alloc(value);
+ vfpriv->num_rings = value;
+ break;
+ case VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK:
+ if (vfpriv->ring_idx_mask) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ vfpriv->ring_idx_mask = value;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ }
+
+ if (vfpriv->ring_idx_mask) {
+ valid_ring_mask = 0;
+ for (i = 0; i < vfpriv->num_rings; i++)
+ valid_ring_mask |= 1 << i;
+
+ if (~valid_ring_mask & vfpriv->ring_idx_mask) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ }
+
+ virtio_gpu_create_context_locked(vgdev, vfpriv);
+ virtio_gpu_notify(vgdev);
+
+out_unlock:
+ mutex_unlock(&vfpriv->context_lock);
+ kfree(ctx_set_params);
+ return ret;
+}
+
struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
DRM_RENDER_ALLOW),
@@ -698,4 +872,7 @@ struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE_BLOB,
virtio_gpu_resource_create_blob_ioctl,
DRM_RENDER_ALLOW),
+
+ DRM_IOCTL_DEF_DRV(VIRTGPU_CONTEXT_INIT, virtio_gpu_context_init_ioctl,
+ DRM_RENDER_ALLOW),
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index f3379059f324..21f410901694 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -65,6 +65,7 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
int num_capsets)
{
int i, ret;
+ bool invalid_capset_id = false;
vgdev->capsets = kcalloc(num_capsets,
sizeof(struct virtio_gpu_drv_capset),
@@ -78,19 +79,34 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
virtio_gpu_notify(vgdev);
ret = wait_event_timeout(vgdev->resp_wq,
vgdev->capsets[i].id > 0, 5 * HZ);
- if (ret == 0) {
+ /*
+ * Capability ids are defined in the virtio-gpu spec and are
+ * between 1 to 63, inclusive.
+ */
+ if (!vgdev->capsets[i].id ||
+ vgdev->capsets[i].id > MAX_CAPSET_ID)
+ invalid_capset_id = true;
+
+ if (ret == 0)
DRM_ERROR("timed out waiting for cap set %d\n", i);
+ else if (invalid_capset_id)
+ DRM_ERROR("invalid capset id %u", vgdev->capsets[i].id);
+
+ if (ret == 0 || invalid_capset_id) {
spin_lock(&vgdev->display_info_lock);
kfree(vgdev->capsets);
vgdev->capsets = NULL;
spin_unlock(&vgdev->display_info_lock);
return;
}
+
+ vgdev->capset_id_mask |= 1 << vgdev->capsets[i].id;
DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n",
i, vgdev->capsets[i].id,
vgdev->capsets[i].max_version,
vgdev->capsets[i].max_size);
}
+
vgdev->num_capsets = num_capsets;
}
@@ -175,13 +191,19 @@ int virtio_gpu_init(struct drm_device *dev)
(unsigned long)vgdev->host_visible_region.addr,
(unsigned long)vgdev->host_visible_region.len);
}
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_CONTEXT_INIT)) {
+ vgdev->has_context_init = true;
+ }
- DRM_INFO("features: %cvirgl %cedid %cresource_blob %chost_visible\n",
+ DRM_INFO("features: %cvirgl %cedid %cresource_blob %chost_visible",
vgdev->has_virgl_3d ? '+' : '-',
vgdev->has_edid ? '+' : '-',
vgdev->has_resource_blob ? '+' : '-',
vgdev->has_host_visible ? '+' : '-');
+ DRM_INFO("features: %ccontext_init\n",
+ vgdev->has_context_init ? '+' : '-');
+
ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL);
if (ret) {
DRM_ERROR("failed to find virt queues\n");
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index a49fd9480381..6d3cc9e238a4 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -256,7 +256,8 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
return 0;
if (bo->dumb && (plane->state->fb != new_state->fb)) {
- vgfb->fence = virtio_gpu_fence_alloc(vgdev);
+ vgfb->fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
+ 0);
if (!vgfb->fence)
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 93a41d018dca..7c052efe8836 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -199,7 +199,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
struct list_head reclaim_list;
struct virtio_gpu_vbuffer *entry, *tmp;
struct virtio_gpu_ctrl_hdr *resp;
- u64 fence_id = 0;
+ u64 fence_id;
INIT_LIST_HEAD(&reclaim_list);
spin_lock(&vgdev->ctrlq.qlock);
@@ -226,23 +226,14 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
}
if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
- u64 f = le64_to_cpu(resp->fence_id);
-
- if (fence_id > f) {
- DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
- __func__, fence_id, f);
- } else {
- fence_id = f;
- }
+ fence_id = le64_to_cpu(resp->fence_id);
+ virtio_gpu_fence_event_process(vgdev, fence_id);
}
if (entry->resp_cb)
entry->resp_cb(vgdev, entry);
}
wake_up(&vgdev->ctrlq.ack_queue);
- if (fence_id)
- virtio_gpu_fence_event_process(vgdev, fence_id);
-
list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
if (entry->objs)
virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
@@ -911,7 +902,8 @@ int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
}
void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
- uint32_t nlen, const char *name)
+ uint32_t context_init, uint32_t nlen,
+ const char *name)
{
struct virtio_gpu_ctx_create *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -922,6 +914,7 @@ void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
cmd_p->hdr.ctx_id = cpu_to_le32(id);
cmd_p->nlen = cpu_to_le32(nlen);
+ cmd_p->context_init = cpu_to_le32(context_init);
strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index 9cdbd209388e..061d87313fac 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -790,11 +790,19 @@ drm_priv_to_bridge(struct drm_private_obj *priv)
void drm_bridge_add(struct drm_bridge *bridge);
void drm_bridge_remove(struct drm_bridge *bridge);
-struct drm_bridge *of_drm_find_bridge(struct device_node *np);
int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
struct drm_bridge *previous,
enum drm_bridge_attach_flags flags);
+#ifdef CONFIG_OF
+struct drm_bridge *of_drm_find_bridge(struct device_node *np);
+#else
+static inline struct drm_bridge *of_drm_find_bridge(struct device_node *np)
+{
+ return NULL;
+}
+#endif
+
/**
* drm_bridge_get_next_bridge() - Get the next bridge in the chain
* @bridge: bridge object
@@ -911,9 +919,20 @@ struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev,
struct drm_bridge *devm_drm_panel_bridge_add_typed(struct device *dev,
struct drm_panel *panel,
u32 connector_type);
+struct drm_connector *drm_panel_bridge_connector(struct drm_bridge *bridge);
+#endif
+
+#if defined(CONFIG_OF) && defined(CONFIG_DRM_PANEL_BRIDGE)
struct drm_bridge *devm_drm_of_get_bridge(struct device *dev, struct device_node *node,
u32 port, u32 endpoint);
-struct drm_connector *drm_panel_bridge_connector(struct drm_bridge *bridge);
+#else
+static inline struct drm_bridge *devm_drm_of_get_bridge(struct device *dev,
+ struct device_node *node,
+ u32 port,
+ u32 endpoint)
+{
+ return ERR_PTR(-ENODEV);
+}
#endif
#endif
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 1d5b3dbb6e56..a1df35aa6e68 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -453,6 +453,7 @@ struct drm_panel;
# define DP_FEC_UNCORR_BLK_ERROR_COUNT_CAP (1 << 1)
# define DP_FEC_CORR_BLK_ERROR_COUNT_CAP (1 << 2)
# define DP_FEC_BIT_ERROR_COUNT_CAP (1 << 3)
+#define DP_FEC_CAPABILITY_1 0x091 /* 2.0 */
/* DP-HDMI2.1 PCON DSC ENCODER SUPPORT */
#define DP_PCON_DSC_ENCODER_CAP_SIZE 0xC /* 0x9E - 0x92 */
@@ -537,6 +538,9 @@ struct drm_panel;
#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_1 0x0a1
#define DP_DSC_BRANCH_MAX_LINE_WIDTH 0x0a2
+/* DFP Capability Extension */
+#define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+
/* Link Configuration */
#define DP_LINK_BW_SET 0x100
# define DP_LINK_RATE_TABLE 0x00 /* eDP 1.4 */
@@ -688,6 +692,7 @@ struct drm_panel;
#define DP_DSC_ENABLE 0x160 /* DP 1.4 */
# define DP_DECOMPRESSION_EN (1 << 0)
+#define DP_DSC_CONFIGURATION 0x161 /* DP 2.0 */
#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */
# define DP_PSR_ENABLE BIT(0)
@@ -743,6 +748,7 @@ struct drm_panel;
# define DP_RECEIVE_PORT_0_STATUS (1 << 0)
# define DP_RECEIVE_PORT_1_STATUS (1 << 1)
# define DP_STREAM_REGENERATION_STATUS (1 << 2) /* 2.0 */
+# define DP_INTRA_HOP_AUX_REPLY_INDICATION (1 << 3) /* 2.0 */
#define DP_ADJUST_REQUEST_LANE0_1 0x206
#define DP_ADJUST_REQUEST_LANE2_3 0x207
@@ -865,6 +871,8 @@ struct drm_panel;
# define DP_PHY_TEST_PATTERN_80BIT_CUSTOM 0x4
# define DP_PHY_TEST_PATTERN_CP2520 0x5
+#define DP_PHY_SQUARE_PATTERN 0x249
+
#define DP_TEST_HBR2_SCRAMBLER_RESET 0x24A
#define DP_TEST_80BIT_CUSTOM_PATTERN_7_0 0x250
#define DP_TEST_80BIT_CUSTOM_PATTERN_15_8 0x251
@@ -1109,6 +1117,18 @@ struct drm_panel;
#define DP_128B132B_TRAINING_AUX_RD_INTERVAL 0x2216 /* 2.0 */
# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK 0x7f
+#define DP_TEST_264BIT_CUSTOM_PATTERN_7_0 0x2230
+#define DP_TEST_264BIT_CUSTOM_PATTERN_263_256 0x2250
+
+/* DSC Extended Capability Branch Total DSC Resources */
+#define DP_DSC_SUPPORT_AND_DSC_DECODER_COUNT 0x2260 /* 2.0 */
+# define DP_DSC_DECODER_COUNT_MASK (0b111 << 5)
+# define DP_DSC_DECODER_COUNT_SHIFT 5
+#define DP_DSC_MAX_SLICE_COUNT_AND_AGGREGATION_0 0x2270 /* 2.0 */
+# define DP_DSC_DECODER_0_MAXIMUM_SLICE_COUNT_MASK (1 << 0)
+# define DP_DSC_DECODER_0_AGGREGATION_SUPPORT_MASK (0b111 << 1)
+# define DP_DSC_DECODER_0_AGGREGATION_SUPPORT_SHIFT 1
+
/* Protocol Converter Extension */
/* HDMI CEC tunneling over AUX DP 1.3 section 5.3.3.3.1 DPCD 1.4+ */
#define DP_CEC_TUNNELING_CAPABILITY 0x3000
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index ccc80cb7f86a..18f6c700f6d0 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -510,21 +510,23 @@ static inline u8 drm_eld_get_conn_type(const uint8_t *eld)
/**
* drm_edid_encode_panel_id - Encode an ID for matching against drm_edid_get_panel_id()
- * @vend: 3-character vendor string
+ * @vend_chr_0: First character of the vendor string.
+ * @vend_chr_1: Second character of the vendor string.
+ * @vend_chr_2: Third character of the vendor string.
* @product_id: The 16-bit product ID.
*
* This is a macro so that it can be calculated at compile time and used
* as an initializer.
*
* For instance:
- * drm_edid_encode_panel_id("BOE", 0x2d08) => 0x09e52d08
+ * drm_edid_encode_panel_id('B', 'O', 'E', 0x2d08) => 0x09e52d08
*
* Return: a 32-bit ID per panel.
*/
-#define drm_edid_encode_panel_id(vend, product_id) \
- ((((u32)((vend)[0]) - '@') & 0x1f) << 26 | \
- (((u32)((vend)[1]) - '@') & 0x1f) << 21 | \
- (((u32)((vend)[2]) - '@') & 0x1f) << 16 | \
+#define drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, product_id) \
+ ((((u32)(vend_chr_0) - '@') & 0x1f) << 26 | \
+ (((u32)(vend_chr_1) - '@') & 0x1f) << 21 | \
+ (((u32)(vend_chr_2) - '@') & 0x1f) << 16 | \
((product_id) & 0xffff))
/**
diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h
index 4e0258a61311..e86925cf07b9 100644
--- a/include/drm/drm_format_helper.h
+++ b/include/drm/drm_format_helper.h
@@ -16,12 +16,16 @@ void drm_fb_memcpy_dstclip(void __iomem *dst, unsigned int dst_pitch, void *vadd
struct drm_rect *clip);
void drm_fb_swab(void *dst, void *src, struct drm_framebuffer *fb,
struct drm_rect *clip, bool cached);
+void drm_fb_xrgb8888_to_rgb332(void *dst, void *vaddr, struct drm_framebuffer *fb,
+ struct drm_rect *clip);
void drm_fb_xrgb8888_to_rgb565(void *dst, void *vaddr,
struct drm_framebuffer *fb,
struct drm_rect *clip, bool swab);
void drm_fb_xrgb8888_to_rgb565_dstclip(void __iomem *dst, unsigned int dst_pitch,
void *vaddr, struct drm_framebuffer *fb,
struct drm_rect *clip, bool swab);
+void drm_fb_xrgb8888_to_rgb888(void *dst, void *src, struct drm_framebuffer *fb,
+ struct drm_rect *clip);
void drm_fb_xrgb8888_to_rgb888_dstclip(void __iomem *dst, unsigned int dst_pitch,
void *vaddr, struct drm_framebuffer *fb,
struct drm_rect *clip);
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index af7ba8071eb0..147e51b6d241 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -227,9 +227,13 @@ struct mipi_dsi_device *
mipi_dsi_device_register_full(struct mipi_dsi_host *host,
const struct mipi_dsi_device_info *info);
void mipi_dsi_device_unregister(struct mipi_dsi_device *dsi);
+struct mipi_dsi_device *
+devm_mipi_dsi_device_register_full(struct device *dev, struct mipi_dsi_host *host,
+ const struct mipi_dsi_device_info *info);
struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np);
int mipi_dsi_attach(struct mipi_dsi_device *dsi);
int mipi_dsi_detach(struct mipi_dsi_device *dsi);
+int devm_mipi_dsi_attach(struct device *dev, struct mipi_dsi_device *dsi);
int mipi_dsi_shutdown_peripheral(struct mipi_dsi_device *dsi);
int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi);
int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index 15a089a87c22..22fabdeed297 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -340,6 +340,8 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
/**
* DRM_DEV_ERROR() - Error output.
*
+ * NOTE: this is deprecated in favor of drm_err() or dev_err().
+ *
* @dev: device pointer
* @fmt: printf() like format string.
*/
@@ -349,6 +351,9 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
/**
* DRM_DEV_ERROR_RATELIMITED() - Rate limited error output.
*
+ * NOTE: this is deprecated in favor of drm_err_ratelimited() or
+ * dev_err_ratelimited().
+ *
* @dev: device pointer
* @fmt: printf() like format string.
*
@@ -364,9 +369,11 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
DRM_DEV_ERROR(dev, fmt, ##__VA_ARGS__); \
})
+/* NOTE: this is deprecated in favor of drm_info() or dev_info(). */
#define DRM_DEV_INFO(dev, fmt, ...) \
drm_dev_printk(dev, KERN_INFO, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_info_once() or dev_info_once(). */
#define DRM_DEV_INFO_ONCE(dev, fmt, ...) \
({ \
static bool __print_once __read_mostly; \
@@ -379,6 +386,8 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
/**
* DRM_DEV_DEBUG() - Debug output for generic drm code
*
+ * NOTE: this is deprecated in favor of drm_dbg_core().
+ *
* @dev: device pointer
* @fmt: printf() like format string.
*/
@@ -387,6 +396,8 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
/**
* DRM_DEV_DEBUG_DRIVER() - Debug output for vendor specific part of the driver
*
+ * NOTE: this is deprecated in favor of drm_dbg() or dev_dbg().
+ *
* @dev: device pointer
* @fmt: printf() like format string.
*/
@@ -395,6 +406,8 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
/**
* DRM_DEV_DEBUG_KMS() - Debug output for modesetting code
*
+ * NOTE: this is deprecated in favor of drm_dbg_kms().
+ *
* @dev: device pointer
* @fmt: printf() like format string.
*/
@@ -480,47 +493,63 @@ void __drm_err(const char *format, ...);
#define _DRM_PRINTK(once, level, fmt, ...) \
printk##once(KERN_##level "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_info(). */
#define DRM_INFO(fmt, ...) \
_DRM_PRINTK(, INFO, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_notice(). */
#define DRM_NOTE(fmt, ...) \
_DRM_PRINTK(, NOTICE, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_warn(). */
#define DRM_WARN(fmt, ...) \
_DRM_PRINTK(, WARNING, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_info_once(). */
#define DRM_INFO_ONCE(fmt, ...) \
_DRM_PRINTK(_once, INFO, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_notice_once(). */
#define DRM_NOTE_ONCE(fmt, ...) \
_DRM_PRINTK(_once, NOTICE, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_warn_once(). */
#define DRM_WARN_ONCE(fmt, ...) \
_DRM_PRINTK(_once, WARNING, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_err(). */
#define DRM_ERROR(fmt, ...) \
__drm_err(fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_err_ratelimited(). */
#define DRM_ERROR_RATELIMITED(fmt, ...) \
DRM_DEV_ERROR_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_core(NULL, ...). */
#define DRM_DEBUG(fmt, ...) \
__drm_dbg(DRM_UT_CORE, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg(NULL, ...). */
#define DRM_DEBUG_DRIVER(fmt, ...) \
__drm_dbg(DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_kms(NULL, ...). */
#define DRM_DEBUG_KMS(fmt, ...) \
__drm_dbg(DRM_UT_KMS, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_prime(NULL, ...). */
#define DRM_DEBUG_PRIME(fmt, ...) \
__drm_dbg(DRM_UT_PRIME, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_atomic(NULL, ...). */
#define DRM_DEBUG_ATOMIC(fmt, ...) \
__drm_dbg(DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_vbl(NULL, ...). */
#define DRM_DEBUG_VBL(fmt, ...) \
__drm_dbg(DRM_UT_VBL, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_lease(NULL, ...). */
#define DRM_DEBUG_LEASE(fmt, ...) \
__drm_dbg(DRM_UT_LEASE, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_dp(NULL, ...). */
#define DRM_DEBUG_DP(fmt, ...) \
__drm_dbg(DRM_UT_DP, fmt, ## __VA_ARGS__)
@@ -536,6 +565,7 @@ void __drm_err(const char *format, ...);
#define drm_dbg_kms_ratelimited(drm, fmt, ...) \
__DRM_DEFINE_DBG_RATELIMITED(KMS, drm, fmt, ## __VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_kms_ratelimited(NULL, ...). */
#define DRM_DEBUG_KMS_RATELIMITED(fmt, ...) drm_dbg_kms_ratelimited(NULL, fmt, ## __VA_ARGS__)
/*
diff --git a/include/drm/drm_probe_helper.h b/include/drm/drm_probe_helper.h
index 8d3ed2834d34..04c57564c397 100644
--- a/include/drm/drm_probe_helper.h
+++ b/include/drm/drm_probe_helper.h
@@ -18,6 +18,7 @@ int drm_helper_probe_detect(struct drm_connector *connector,
void drm_kms_helper_poll_init(struct drm_device *dev);
void drm_kms_helper_poll_fini(struct drm_device *dev);
bool drm_helper_hpd_irq_event(struct drm_device *dev);
+bool drm_connector_helper_hpd_irq_event(struct drm_connector *connector);
void drm_kms_helper_hotplug_event(struct drm_device *dev);
void drm_kms_helper_poll_disable(struct drm_device *dev);
diff --git a/include/drm/gud.h b/include/drm/gud.h
index 0b46b54fe56e..c52a8ba4ae4e 100644
--- a/include/drm/gud.h
+++ b/include/drm/gud.h
@@ -246,10 +246,12 @@ struct gud_state_req {
/* Get supported pixel formats as a byte array of GUD_PIXEL_FORMAT_* */
#define GUD_REQ_GET_FORMATS 0x40
#define GUD_FORMATS_MAX_NUM 32
- /* R1 is a 1-bit monochrome transfer format presented to userspace as XRGB8888 */
- #define GUD_PIXEL_FORMAT_R1 0x01
+ #define GUD_PIXEL_FORMAT_R1 0x01 /* 1-bit monochrome */
+ #define GUD_PIXEL_FORMAT_R8 0x08 /* 8-bit greyscale */
#define GUD_PIXEL_FORMAT_XRGB1111 0x20
+ #define GUD_PIXEL_FORMAT_RGB332 0x30
#define GUD_PIXEL_FORMAT_RGB565 0x40
+ #define GUD_PIXEL_FORMAT_RGB888 0x50
#define GUD_PIXEL_FORMAT_XRGB8888 0x80
#define GUD_PIXEL_FORMAT_ARGB8888 0x81
diff --git a/include/drm/ttm/ttm_device.h b/include/drm/ttm/ttm_device.h
index cbe03d45e883..0a4ddec78d8f 100644
--- a/include/drm/ttm/ttm_device.h
+++ b/include/drm/ttm/ttm_device.h
@@ -65,7 +65,7 @@ struct ttm_device_funcs {
* ttm_tt_create
*
* @bo: The buffer object to create the ttm for.
- * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ * @page_flags: Page flags as identified by TTM_TT_FLAG_XX flags.
*
* Create a struct ttm_tt to back data with system memory pages.
* No pages are actually allocated.
diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h
index 89b15d673b22..f20832139815 100644
--- a/include/drm/ttm/ttm_tt.h
+++ b/include/drm/ttm/ttm_tt.h
@@ -38,36 +38,70 @@ struct ttm_resource;
struct ttm_buffer_object;
struct ttm_operation_ctx;
-#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
-#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
-#define TTM_PAGE_FLAG_SG (1 << 8)
-#define TTM_PAGE_FLAG_NO_RETRY (1 << 9)
-
-#define TTM_PAGE_FLAG_PRIV_POPULATED (1 << 31)
-
/**
- * struct ttm_tt
- *
- * @pages: Array of pages backing the data.
- * @page_flags: see TTM_PAGE_FLAG_*
- * @num_pages: Number of pages in the page array.
- * @sg: for SG objects via dma-buf
- * @dma_address: The DMA (bus) addresses of the pages
- * @swap_storage: Pointer to shmem struct file for swap storage.
- * @pages_list: used by some page allocation backend
- * @caching: The current caching state of the pages, see enum ttm_caching.
- *
- * This is a structure holding the pages, caching- and aperture binding
- * status for a buffer object that isn't backed by fixed (VRAM / AGP)
+ * struct ttm_tt - This is a structure holding the pages, caching- and aperture
+ * binding status for a buffer object that isn't backed by fixed (VRAM / AGP)
* memory.
*/
struct ttm_tt {
+ /** @pages: Array of pages backing the data. */
struct page **pages;
+ /**
+ * @page_flags: The page flags.
+ *
+ * Supported values:
+ *
+ * TTM_TT_FLAG_SWAPPED: Set by TTM when the pages have been unpopulated
+ * and swapped out by TTM. Calling ttm_tt_populate() will then swap the
+ * pages back in, and unset the flag. Drivers should in general never
+ * need to touch this.
+ *
+ * TTM_TT_FLAG_ZERO_ALLOC: Set if the pages will be zeroed on
+ * allocation.
+ *
+ * TTM_TT_FLAG_EXTERNAL: Set if the underlying pages were allocated
+ * externally, like with dma-buf or userptr. This effectively disables
+ * TTM swapping out such pages. Also important is to prevent TTM from
+ * ever directly mapping these pages.
+ *
+ * Note that enum ttm_bo_type.ttm_bo_type_sg objects will always enable
+ * this flag.
+ *
+ * TTM_TT_FLAG_EXTERNAL_MAPPABLE: Same behaviour as
+ * TTM_TT_FLAG_EXTERNAL, but with the reduced restriction that it is
+ * still valid to use TTM to map the pages directly. This is useful when
+ * implementing a ttm_tt backend which still allocates driver owned
+ * pages underneath(say with shmem).
+ *
+ * Note that since this also implies TTM_TT_FLAG_EXTERNAL, the usage
+ * here should always be:
+ *
+ * page_flags = TTM_TT_FLAG_EXTERNAL |
+ * TTM_TT_FLAG_EXTERNAL_MAPPABLE;
+ *
+ * TTM_TT_FLAG_PRIV_POPULATED: TTM internal only. DO NOT USE. This is
+ * set by TTM after ttm_tt_populate() has successfully returned, and is
+ * then unset when TTM calls ttm_tt_unpopulate().
+ */
+#define TTM_TT_FLAG_SWAPPED (1 << 0)
+#define TTM_TT_FLAG_ZERO_ALLOC (1 << 1)
+#define TTM_TT_FLAG_EXTERNAL (1 << 2)
+#define TTM_TT_FLAG_EXTERNAL_MAPPABLE (1 << 3)
+
+#define TTM_TT_FLAG_PRIV_POPULATED (1 << 31)
uint32_t page_flags;
+ /** @num_pages: Number of pages in the page array. */
uint32_t num_pages;
+ /** @sg: for SG objects via dma-buf. */
struct sg_table *sg;
+ /** @dma_address: The DMA (bus) addresses of the pages. */
dma_addr_t *dma_address;
+ /** @swap_storage: Pointer to shmem struct file for swap storage. */
struct file *swap_storage;
+ /**
+ * @caching: The current caching state of the pages, see enum
+ * ttm_caching.
+ */
enum ttm_caching caching;
};
@@ -85,7 +119,7 @@ struct ttm_kmap_iter_tt {
static inline bool ttm_tt_is_populated(struct ttm_tt *tt)
{
- return tt->page_flags & TTM_PAGE_FLAG_PRIV_POPULATED;
+ return tt->page_flags & TTM_TT_FLAG_PRIV_POPULATED;
}
/**
@@ -104,7 +138,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc);
*
* @ttm: The struct ttm_tt.
* @bo: The buffer object we create the ttm for.
- * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ * @page_flags: Page flags as identified by TTM_TT_FLAG_XX flags.
* @caching: the desired caching state of the pages
*
* Create a struct ttm_tt to back data with system memory pages.
@@ -179,7 +213,7 @@ void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm);
*/
static inline void ttm_tt_mark_for_clear(struct ttm_tt *ttm)
{
- ttm->page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
+ ttm->page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
}
void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages);
@@ -195,7 +229,7 @@ struct ttm_kmap_iter *ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
*
* @bo: Buffer object we allocate the ttm for.
* @bridge: The agp bridge this device is sitting on.
- * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ * @page_flags: Page flags as identified by TTM_TT_FLAG_XX flags.
*
*
* Create a TTM backend that uses the indicated AGP bridge as an aperture
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 66470c37e471..02c2eb874da6 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -440,7 +440,7 @@ struct dma_buf {
wait_queue_head_t *poll;
__poll_t active;
- } cb_excl, cb_shared;
+ } cb_in, cb_out;
#ifdef CONFIG_DMABUF_SYSFS_STATS
/**
* @sysfs_entry:
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index 9100dd3dc21f..764138ad8583 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -149,6 +149,101 @@ struct dma_resv {
struct dma_resv_list __rcu *fence;
};
+/**
+ * struct dma_resv_iter - current position into the dma_resv fences
+ *
+ * Don't touch this directly in the driver, use the accessor function instead.
+ */
+struct dma_resv_iter {
+ /** @obj: The dma_resv object we iterate over */
+ struct dma_resv *obj;
+
+ /** @all_fences: If all fences should be returned */
+ bool all_fences;
+
+ /** @fence: the currently handled fence */
+ struct dma_fence *fence;
+
+ /** @seq: sequence number to check for modifications */
+ unsigned int seq;
+
+ /** @index: index into the shared fences */
+ unsigned int index;
+
+ /** @fences: the shared fences */
+ struct dma_resv_list *fences;
+
+ /** @is_restarted: true if this is the first returned fence */
+ bool is_restarted;
+};
+
+struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor);
+struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor);
+
+/**
+ * dma_resv_iter_begin - initialize a dma_resv_iter object
+ * @cursor: The dma_resv_iter object to initialize
+ * @obj: The dma_resv object which we want to iterate over
+ * @all_fences: If all fences should be returned or just the exclusive one
+ */
+static inline void dma_resv_iter_begin(struct dma_resv_iter *cursor,
+ struct dma_resv *obj,
+ bool all_fences)
+{
+ cursor->obj = obj;
+ cursor->all_fences = all_fences;
+ cursor->fence = NULL;
+}
+
+/**
+ * dma_resv_iter_end - cleanup a dma_resv_iter object
+ * @cursor: the dma_resv_iter object which should be cleaned up
+ *
+ * Make sure that the reference to the fence in the cursor is properly
+ * dropped.
+ */
+static inline void dma_resv_iter_end(struct dma_resv_iter *cursor)
+{
+ dma_fence_put(cursor->fence);
+}
+
+/**
+ * dma_resv_iter_is_exclusive - test if the current fence is the exclusive one
+ * @cursor: the cursor of the current position
+ *
+ * Returns true if the currently returned fence is the exclusive one.
+ */
+static inline bool dma_resv_iter_is_exclusive(struct dma_resv_iter *cursor)
+{
+ return cursor->index == 0;
+}
+
+/**
+ * dma_resv_iter_is_restarted - test if this is the first fence after a restart
+ * @cursor: the cursor with the current position
+ *
+ * Return true if this is the first fence in an iteration after a restart.
+ */
+static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor)
+{
+ return cursor->is_restarted;
+}
+
+/**
+ * dma_resv_for_each_fence_unlocked - unlocked fence iterator
+ * @cursor: a struct dma_resv_iter pointer
+ * @fence: the current fence
+ *
+ * Iterate over the fences in a struct dma_resv object without holding the
+ * &dma_resv.lock and using RCU instead. The cursor needs to be initialized
+ * with dma_resv_iter_begin() and cleaned up with dma_resv_iter_end(). Inside
+ * the iterator a reference to the dma_fence is held and the RCU lock dropped.
+ * When the dma_resv is modified the iteration starts over again.
+ */
+#define dma_resv_for_each_fence_unlocked(cursor, fence) \
+ for (fence = dma_resv_iter_first_unlocked(cursor); \
+ fence; fence = dma_resv_iter_next_unlocked(cursor))
+
#define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
#define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
diff --git a/include/linux/io.h b/include/linux/io.h
index 9595151d800d..5fc800390fe4 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -132,6 +132,8 @@ static inline int arch_phys_wc_index(int handle)
#endif
#endif
+int devm_arch_phys_wc_add(struct device *dev, unsigned long base, unsigned long size);
+
enum {
/* See memremap() kernel-doc for usage description... */
MEMREMAP_WB = 1 << 0,
@@ -166,4 +168,7 @@ static inline void arch_io_free_memtype_wc(resource_size_t base,
}
#endif
+int devm_arch_io_reserve_memtype_wc(struct device *dev, resource_size_t start,
+ resource_size_t size);
+
#endif /* _LINUX_IO_H */
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index e4a2570a6058..e1e351682872 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -1112,7 +1112,8 @@ struct drm_mode_destroy_blob {
* Lease mode resources, creating another drm_master.
*
* The @object_ids array must reference at least one CRTC, one connector and
- * one plane if &DRM_CLIENT_CAP_UNIVERSAL_PLANES is enabled.
+ * one plane if &DRM_CLIENT_CAP_UNIVERSAL_PLANES is enabled. Alternatively,
+ * the lease can be completely empty.
*/
struct drm_mode_create_lease {
/** @object_ids: Pointer to array of object ids (__u32) */
diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h
index 4104f22fb3d3..3dfc0af8756a 100644
--- a/include/uapi/drm/v3d_drm.h
+++ b/include/uapi/drm/v3d_drm.h
@@ -58,6 +58,67 @@ extern "C" {
struct drm_v3d_perfmon_get_values)
#define DRM_V3D_SUBMIT_CL_FLUSH_CACHE 0x01
+#define DRM_V3D_SUBMIT_EXTENSION 0x02
+
+/* struct drm_v3d_extension - ioctl extensions
+ *
+ * Linked-list of generic extensions where the id identify which struct is
+ * pointed by ext_data. Therefore, DRM_V3D_EXT_ID_* is used on id to identify
+ * the extension type.
+ */
+struct drm_v3d_extension {
+ __u64 next;
+ __u32 id;
+#define DRM_V3D_EXT_ID_MULTI_SYNC 0x01
+ __u32 flags; /* mbz */
+};
+
+/* struct drm_v3d_sem - wait/signal semaphore
+ *
+ * If binary semaphore, it only takes syncobj handle and ignores flags and
+ * point fields. Point is defined for timeline syncobj feature.
+ */
+struct drm_v3d_sem {
+ __u32 handle; /* syncobj */
+ /* rsv below, for future uses */
+ __u32 flags;
+ __u64 point; /* for timeline sem support */
+ __u64 mbz[2]; /* must be zero, rsv */
+};
+
+/* Enum for each of the V3D queues. */
+enum v3d_queue {
+ V3D_BIN,
+ V3D_RENDER,
+ V3D_TFU,
+ V3D_CSD,
+ V3D_CACHE_CLEAN,
+};
+
+/**
+ * struct drm_v3d_multi_sync - ioctl extension to add support multiples
+ * syncobjs for commands submission.
+ *
+ * When an extension of DRM_V3D_EXT_ID_MULTI_SYNC id is defined, it points to
+ * this extension to define wait and signal dependencies, instead of single
+ * in/out sync entries on submitting commands. The field flags is used to
+ * determine the stage to set wait dependencies.
+ */
+struct drm_v3d_multi_sync {
+ struct drm_v3d_extension base;
+ /* Array of wait and signal semaphores */
+ __u64 in_syncs;
+ __u64 out_syncs;
+
+ /* Number of entries */
+ __u32 in_sync_count;
+ __u32 out_sync_count;
+
+ /* set the stage (v3d_queue) to sync */
+ __u32 wait_stage;
+
+ __u32 pad; /* mbz */
+};
/**
* struct drm_v3d_submit_cl - ioctl argument for submitting commands to the 3D
@@ -135,12 +196,16 @@ struct drm_v3d_submit_cl {
/* Number of BO handles passed in (size is that times 4). */
__u32 bo_handle_count;
+ /* DRM_V3D_SUBMIT_* properties */
__u32 flags;
/* ID of the perfmon to attach to this job. 0 means no perfmon. */
__u32 perfmon_id;
__u32 pad;
+
+ /* Pointer to an array of ioctl extensions*/
+ __u64 extensions;
};
/**
@@ -210,6 +275,7 @@ enum drm_v3d_param {
DRM_V3D_PARAM_SUPPORTS_CSD,
DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH,
DRM_V3D_PARAM_SUPPORTS_PERFMON,
+ DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT,
};
struct drm_v3d_get_param {
@@ -248,6 +314,11 @@ struct drm_v3d_submit_tfu {
__u32 in_sync;
/* Sync object to signal when the TFU job is done. */
__u32 out_sync;
+
+ __u32 flags;
+
+ /* Pointer to an array of ioctl extensions*/
+ __u64 extensions;
};
/* Submits a compute shader for dispatch. This job will block on any
@@ -276,6 +347,13 @@ struct drm_v3d_submit_csd {
/* ID of the perfmon to attach to this job. 0 means no perfmon. */
__u32 perfmon_id;
+
+ /* Pointer to an array of ioctl extensions*/
+ __u64 extensions;
+
+ __u32 flags;
+
+ __u32 pad;
};
enum {
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
index b9ec26e9c646..a13e20cc66b4 100644
--- a/include/uapi/drm/virtgpu_drm.h
+++ b/include/uapi/drm/virtgpu_drm.h
@@ -47,12 +47,15 @@ extern "C" {
#define DRM_VIRTGPU_WAIT 0x08
#define DRM_VIRTGPU_GET_CAPS 0x09
#define DRM_VIRTGPU_RESOURCE_CREATE_BLOB 0x0a
+#define DRM_VIRTGPU_CONTEXT_INIT 0x0b
#define VIRTGPU_EXECBUF_FENCE_FD_IN 0x01
#define VIRTGPU_EXECBUF_FENCE_FD_OUT 0x02
+#define VIRTGPU_EXECBUF_RING_IDX 0x04
#define VIRTGPU_EXECBUF_FLAGS (\
VIRTGPU_EXECBUF_FENCE_FD_IN |\
VIRTGPU_EXECBUF_FENCE_FD_OUT |\
+ VIRTGPU_EXECBUF_RING_IDX |\
0)
struct drm_virtgpu_map {
@@ -68,6 +71,8 @@ struct drm_virtgpu_execbuffer {
__u64 bo_handles;
__u32 num_bo_handles;
__s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */
+ __u32 ring_idx; /* command ring index (see VIRTGPU_EXECBUF_RING_IDX) */
+ __u32 pad;
};
#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
@@ -75,6 +80,8 @@ struct drm_virtgpu_execbuffer {
#define VIRTGPU_PARAM_RESOURCE_BLOB 3 /* DRM_VIRTGPU_RESOURCE_CREATE_BLOB */
#define VIRTGPU_PARAM_HOST_VISIBLE 4 /* Host blob resources are mappable */
#define VIRTGPU_PARAM_CROSS_DEVICE 5 /* Cross virtio-device resource sharing */
+#define VIRTGPU_PARAM_CONTEXT_INIT 6 /* DRM_VIRTGPU_CONTEXT_INIT */
+#define VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs 7 /* Bitmask of supported capability set ids */
struct drm_virtgpu_getparam {
__u64 param;
@@ -173,6 +180,22 @@ struct drm_virtgpu_resource_create_blob {
__u64 blob_id;
};
+#define VIRTGPU_CONTEXT_PARAM_CAPSET_ID 0x0001
+#define VIRTGPU_CONTEXT_PARAM_NUM_RINGS 0x0002
+#define VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK 0x0003
+struct drm_virtgpu_context_set_param {
+ __u64 param;
+ __u64 value;
+};
+
+struct drm_virtgpu_context_init {
+ __u32 num_params;
+ __u32 pad;
+
+ /* pointer to drm_virtgpu_context_set_param array */
+ __u64 ctx_set_params;
+};
+
#define DRM_IOCTL_VIRTGPU_MAP \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
@@ -212,6 +235,10 @@ struct drm_virtgpu_resource_create_blob {
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE_BLOB, \
struct drm_virtgpu_resource_create_blob)
+#define DRM_IOCTL_VIRTGPU_CONTEXT_INIT \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_CONTEXT_INIT, \
+ struct drm_virtgpu_context_init)
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h
index 97523a95781d..f556fde07b76 100644
--- a/include/uapi/linux/virtio_gpu.h
+++ b/include/uapi/linux/virtio_gpu.h
@@ -59,6 +59,11 @@
* VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB
*/
#define VIRTIO_GPU_F_RESOURCE_BLOB 3
+/*
+ * VIRTIO_GPU_CMD_CREATE_CONTEXT with
+ * context_init and multiple timelines
+ */
+#define VIRTIO_GPU_F_CONTEXT_INIT 4
enum virtio_gpu_ctrl_type {
VIRTIO_GPU_UNDEFINED = 0,
@@ -122,14 +127,20 @@ enum virtio_gpu_shm_id {
VIRTIO_GPU_SHM_ID_HOST_VISIBLE = 1
};
-#define VIRTIO_GPU_FLAG_FENCE (1 << 0)
+#define VIRTIO_GPU_FLAG_FENCE (1 << 0)
+/*
+ * If the following flag is set, then ring_idx contains the index
+ * of the command ring that needs to used when creating the fence
+ */
+#define VIRTIO_GPU_FLAG_INFO_RING_IDX (1 << 1)
struct virtio_gpu_ctrl_hdr {
__le32 type;
__le32 flags;
__le64 fence_id;
__le32 ctx_id;
- __le32 padding;
+ __u8 ring_idx;
+ __u8 padding[3];
};
/* data passed in the cursor vq */
@@ -269,10 +280,11 @@ struct virtio_gpu_resource_create_3d {
};
/* VIRTIO_GPU_CMD_CTX_CREATE */
+#define VIRTIO_GPU_CONTEXT_INIT_CAPSET_ID_MASK 0x000000ff
struct virtio_gpu_ctx_create {
struct virtio_gpu_ctrl_hdr hdr;
__le32 nlen;
- __le32 padding;
+ __le32 context_init;
char debug_name[64];
};
diff --git a/lib/devres.c b/lib/devres.c
index b0e1c6702c71..14664bbb4875 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -528,3 +528,85 @@ void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
}
EXPORT_SYMBOL(pcim_iounmap_regions);
#endif /* CONFIG_PCI */
+
+static void devm_arch_phys_ac_add_release(struct device *dev, void *res)
+{
+ arch_phys_wc_del(*((int *)res));
+}
+
+/**
+ * devm_arch_phys_wc_add - Managed arch_phys_wc_add()
+ * @dev: Managed device
+ * @base: Memory base address
+ * @size: Size of memory range
+ *
+ * Adds a WC MTRR using arch_phys_wc_add() and sets up a release callback.
+ * See arch_phys_wc_add() for more information.
+ */
+int devm_arch_phys_wc_add(struct device *dev, unsigned long base, unsigned long size)
+{
+ int *mtrr;
+ int ret;
+
+ mtrr = devres_alloc(devm_arch_phys_ac_add_release, sizeof(*mtrr), GFP_KERNEL);
+ if (!mtrr)
+ return -ENOMEM;
+
+ ret = arch_phys_wc_add(base, size);
+ if (ret < 0) {
+ devres_free(mtrr);
+ return ret;
+ }
+
+ *mtrr = ret;
+ devres_add(dev, mtrr);
+
+ return ret;
+}
+EXPORT_SYMBOL(devm_arch_phys_wc_add);
+
+struct arch_io_reserve_memtype_wc_devres {
+ resource_size_t start;
+ resource_size_t size;
+};
+
+static void devm_arch_io_free_memtype_wc_release(struct device *dev, void *res)
+{
+ const struct arch_io_reserve_memtype_wc_devres *this = res;
+
+ arch_io_free_memtype_wc(this->start, this->size);
+}
+
+/**
+ * devm_arch_io_reserve_memtype_wc - Managed arch_io_reserve_memtype_wc()
+ * @dev: Managed device
+ * @start: Memory base address
+ * @size: Size of memory range
+ *
+ * Reserves a memory range with WC caching using arch_io_reserve_memtype_wc()
+ * and sets up a release callback See arch_io_reserve_memtype_wc() for more
+ * information.
+ */
+int devm_arch_io_reserve_memtype_wc(struct device *dev, resource_size_t start,
+ resource_size_t size)
+{
+ struct arch_io_reserve_memtype_wc_devres *dr;
+ int ret;
+
+ dr = devres_alloc(devm_arch_io_free_memtype_wc_release, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ ret = arch_io_reserve_memtype_wc(start, size);
+ if (ret < 0) {
+ devres_free(dr);
+ return ret;
+ }
+
+ dr->start = start;
+ dr->size = size;
+ devres_add(dev, dr);
+
+ return ret;
+}
+EXPORT_SYMBOL(devm_arch_io_reserve_memtype_wc);