aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/msm/msm_gem.c
diff options
context:
space:
mode:
authorRob Clark <robdclark@gmail.com>2013-07-19 12:59:32 -0400
committerRob Clark <robdclark@gmail.com>2013-08-24 14:57:18 -0400
commit7198e6b03155f6dadecadba004eb83b81a6ffe4c (patch)
treeed4ae3e859fd9a722524242a145008d13606a95a /drivers/gpu/drm/msm/msm_gem.c
parentdrm/msm: add register definitions for gpu (diff)
downloadlinux-dev-7198e6b03155f6dadecadba004eb83b81a6ffe4c.tar.xz
linux-dev-7198e6b03155f6dadecadba004eb83b81a6ffe4c.zip
drm/msm: add a3xx gpu support
Add initial support for a3xx 3d core. So far, with hardware that I've seen to date, we can have: + zero, one, or two z180 2d cores + a3xx or a2xx 3d core, which share a common CP (the firmware for the CP seems to implement some different PM4 packet types but the basics of cmdstream submission are the same) Which means that the eventual complete "class" hierarchy, once support for all past and present hw is in place, becomes: + msm_gpu + adreno_gpu + a3xx_gpu + a2xx_gpu + z180_gpu This commit splits out the parts that will eventually be common between a2xx/a3xx into adreno_gpu, and the parts that are even common to z180 into msm_gpu. Note that there is no cmdstream validation required. All memory access from the GPU is via IOMMU/MMU. So as long as you don't map silly things to the GPU, there isn't much damage that the GPU can do. Signed-off-by: Rob Clark <robdclark@gmail.com>
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem.c')
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c84
1 files changed, 80 insertions, 4 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index a52e6cca8403..6b5a6c8c7658 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -20,6 +20,7 @@
#include "msm_drv.h"
#include "msm_gem.h"
+#include "msm_gpu.h"
/* called with dev->struct_mutex held */
@@ -375,10 +376,74 @@ int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
{
struct drm_device *dev = obj->dev;
struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ int ret = 0;
+
+ mutex_lock(&dev->struct_mutex);
+ if (!list_empty(&work->entry)) {
+ ret = -EINVAL;
+ } else if (is_active(msm_obj)) {
+ list_add_tail(&work->entry, &msm_obj->inactive_work);
+ } else {
+ queue_work(priv->wq, work);
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+void msm_gem_move_to_active(struct drm_gem_object *obj,
+ struct msm_gpu *gpu, uint32_t fence)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ msm_obj->gpu = gpu;
+ msm_obj->fence = fence;
+ list_del_init(&msm_obj->mm_list);
+ list_add_tail(&msm_obj->mm_list, &gpu->active_list);
+}
+
+void msm_gem_move_to_inactive(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ msm_obj->gpu = NULL;
+ msm_obj->fence = 0;
+ list_del_init(&msm_obj->mm_list);
+ list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+
+ while (!list_empty(&msm_obj->inactive_work)) {
+ struct work_struct *work;
+
+ work = list_first_entry(&msm_obj->inactive_work,
+ struct work_struct, entry);
+
+ list_del_init(&work->entry);
+ queue_work(priv->wq, work);
+ }
+}
+
+int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
+ struct timespec *timeout)
+{
+ struct drm_device *dev = obj->dev;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ int ret = 0;
+
+ if (is_active(msm_obj) && !(op & MSM_PREP_NOSYNC))
+ ret = msm_wait_fence_interruptable(dev, msm_obj->fence, timeout);
+
+ /* TODO cache maintenance */
- /* just a place-holder until we have gpu.. */
- queue_work(priv->wq, work);
+ return ret;
+}
+int msm_gem_cpu_fini(struct drm_gem_object *obj)
+{
+ /* TODO cache maintenance */
return 0;
}
@@ -390,8 +455,9 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
uint64_t off = drm_vma_node_start(&obj->vma_node);
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- seq_printf(m, "%08x: %2d (%2d) %08llx %p %d\n",
- msm_obj->flags, obj->name, obj->refcount.refcount.counter,
+ seq_printf(m, "%08x: %c(%d) %2d (%2d) %08llx %p %d\n",
+ msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
+ msm_obj->fence, obj->name, obj->refcount.refcount.counter,
off, msm_obj->vaddr, obj->size);
}
@@ -421,6 +487,9 @@ void msm_gem_free_object(struct drm_gem_object *obj)
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ /* object should not be on active list: */
+ WARN_ON(is_active(msm_obj));
+
list_del(&msm_obj->mm_list);
for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
@@ -439,6 +508,9 @@ void msm_gem_free_object(struct drm_gem_object *obj)
put_pages(obj);
+ if (msm_obj->resv == &msm_obj->_resv)
+ reservation_object_fini(msm_obj->resv);
+
drm_gem_object_release(obj);
kfree(msm_obj);
@@ -508,7 +580,11 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
msm_obj->flags = flags;
+ msm_obj->resv = &msm_obj->_resv;
+ reservation_object_init(msm_obj->resv);
+ INIT_LIST_HEAD(&msm_obj->submit_entry);
+ INIT_LIST_HEAD(&msm_obj->inactive_work);
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
return obj;