aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/vc4
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/vc4')
-rw-r--r--drivers/gpu/drm/vc4/Kconfig2
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c44
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c14
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c6
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h18
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_render_cl.c26
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c16
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate_shaders.c2
12 files changed, 71 insertions, 71 deletions
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig
index b0f3117102ca..246305d17a52 100644
--- a/drivers/gpu/drm/vc4/Kconfig
+++ b/drivers/gpu/drm/vc4/Kconfig
@@ -12,7 +12,7 @@ config DRM_VC4
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_PANEL_BRIDGE
select SND_PCM
select SND_PCM_ELD
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 2ccf96b764db..07f57910c9e8 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -8,10 +8,10 @@
*
* The VC4 GPU architecture (both scanout and rendering) has direct
* access to system memory with no MMU in between. To support it, we
- * use the GEM CMA helper functions to allocate contiguous ranges of
+ * use the GEM DMA helper functions to allocate contiguous ranges of
* physical memory for our BOs.
*
- * Since the CMA allocator is very slow, we keep a cache of recently
+ * Since the DMA allocator is very slow, we keep a cache of recently
* freed BOs around so that the kernel's allocation of objects for 3D
* rendering can return quickly.
*/
@@ -179,7 +179,7 @@ static void vc4_bo_destroy(struct vc4_bo *bo)
bo->validated_shader = NULL;
}
- drm_gem_cma_free(&bo->base);
+ drm_gem_dma_free(&bo->base);
}
static void vc4_bo_remove_from_cache(struct vc4_bo *bo)
@@ -387,7 +387,7 @@ out:
* @dev: DRM device
* @size: Size in bytes of the memory the object will reference
*
- * This lets the CMA helpers allocate object structs for us, and keep
+ * This lets the DMA helpers allocate object structs for us, and keep
* our BO stats correct.
*/
struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
@@ -426,7 +426,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
{
size_t size = roundup(unaligned_size, PAGE_SIZE);
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
struct vc4_bo *bo;
if (WARN_ON_ONCE(vc4->is_vc5))
@@ -443,39 +443,39 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
return bo;
}
- cma_obj = drm_gem_cma_create(dev, size);
- if (IS_ERR(cma_obj)) {
+ dma_obj = drm_gem_dma_create(dev, size);
+ if (IS_ERR(dma_obj)) {
/*
- * If we've run out of CMA memory, kill the cache of
- * CMA allocations we've got laying around and try again.
+ * If we've run out of DMA memory, kill the cache of
+ * DMA allocations we've got laying around and try again.
*/
vc4_bo_cache_purge(dev);
- cma_obj = drm_gem_cma_create(dev, size);
+ dma_obj = drm_gem_dma_create(dev, size);
}
- if (IS_ERR(cma_obj)) {
+ if (IS_ERR(dma_obj)) {
/*
- * Still not enough CMA memory, purge the userspace BO
+ * Still not enough DMA memory, purge the userspace BO
* cache and retry.
* This is sub-optimal since we purge the whole userspace
* BO cache which forces user that want to re-use the BO to
* restore its initial content.
* Ideally, we should purge entries one by one and retry
- * after each to see if CMA allocation succeeds. Or even
+ * after each to see if DMA allocation succeeds. Or even
* better, try to find an entry with at least the same
* size.
*/
vc4_bo_userspace_cache_purge(dev);
- cma_obj = drm_gem_cma_create(dev, size);
+ dma_obj = drm_gem_dma_create(dev, size);
}
- if (IS_ERR(cma_obj)) {
+ if (IS_ERR(dma_obj)) {
struct drm_printer p = drm_info_printer(vc4->base.dev);
- DRM_ERROR("Failed to allocate from CMA:\n");
+ DRM_ERROR("Failed to allocate from GEM DMA helper:\n");
vc4_bo_stats_print(&p, vc4);
return ERR_PTR(-ENOMEM);
}
- bo = to_vc4_bo(&cma_obj->base);
+ bo = to_vc4_bo(&dma_obj->base);
/* By default, BOs do not support the MADV ioctl. This will be enabled
* only on BOs that are exposed to userspace (V3D, V3D_SHADER and DUMB
@@ -484,7 +484,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
bo->madv = __VC4_MADV_NOTSUPP;
mutex_lock(&vc4->bo_lock);
- vc4_bo_set_label(&cma_obj->base, type);
+ vc4_bo_set_label(&dma_obj->base, type);
mutex_unlock(&vc4->bo_lock);
return bo;
@@ -569,7 +569,7 @@ static void vc4_free_object(struct drm_gem_object *gem_bo)
goto out;
}
- /* If this object was partially constructed but CMA allocation
+ /* If this object was partially constructed but DMA allocation
* had failed, just free it. Can also happen when the BO has been
* purged.
*/
@@ -747,7 +747,7 @@ static int vc4_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct
return -EINVAL;
}
- return drm_gem_cma_mmap(&bo->base, vma);
+ return drm_gem_dma_mmap(&bo->base, vma);
}
static const struct vm_operations_struct vc4_vm_ops = {
@@ -759,8 +759,8 @@ static const struct vm_operations_struct vc4_vm_ops = {
static const struct drm_gem_object_funcs vc4_gem_object_funcs = {
.free = vc4_free_object,
.export = vc4_prime_export,
- .get_sg_table = drm_gem_cma_object_get_sg_table,
- .vmap = drm_gem_cma_object_vmap,
+ .get_sg_table = drm_gem_dma_object_get_sg_table,
+ .vmap = drm_gem_dma_object_vmap,
.mmap = vc4_gem_object_mmap,
.vm_ops = &vc4_vm_ops,
};
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index f85788401707..2def6e2ad6f0 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -823,9 +823,9 @@ static void vc4_async_page_flip_seqno_complete(struct vc4_seqno_cb *cb)
struct vc4_bo *bo = NULL;
if (flip_state->old_fb) {
- struct drm_gem_cma_object *cma_bo =
+ struct drm_gem_dma_object *dma_bo =
drm_fb_dma_get_gem_obj(flip_state->old_fb, 0);
- bo = to_vc4_bo(&cma_bo->base);
+ bo = to_vc4_bo(&dma_bo->base);
}
vc4_async_page_flip_complete(flip_state);
@@ -857,19 +857,19 @@ static int vc4_async_set_fence_cb(struct drm_device *dev,
struct vc4_async_flip_state *flip_state)
{
struct drm_framebuffer *fb = flip_state->fb;
- struct drm_gem_cma_object *cma_bo = drm_fb_dma_get_gem_obj(fb, 0);
+ struct drm_gem_dma_object *dma_bo = drm_fb_dma_get_gem_obj(fb, 0);
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct dma_fence *fence;
int ret;
if (!vc4->is_vc5) {
- struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
+ struct vc4_bo *bo = to_vc4_bo(&dma_bo->base);
return vc4_queue_seqno_cb(dev, &flip_state->cb.seqno, bo->seqno,
vc4_async_page_flip_seqno_complete);
}
- ret = dma_resv_get_singleton(cma_bo->base.resv, DMA_RESV_USAGE_READ, &fence);
+ ret = dma_resv_get_singleton(dma_bo->base.resv, DMA_RESV_USAGE_READ, &fence);
if (ret)
return ret;
@@ -945,8 +945,8 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct drm_gem_cma_object *cma_bo = drm_fb_dma_get_gem_obj(fb, 0);
- struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
+ struct drm_gem_dma_object *dma_bo = drm_fb_dma_get_gem_obj(fb, 0);
+ struct vc4_bo *bo = to_vc4_bo(&dma_bo->base);
int ret;
if (WARN_ON_ONCE(vc4->is_vc5))
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index d33baf2667dd..ffbbb454c9e8 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -85,7 +85,7 @@ static int vc5_dumb_create(struct drm_file *file_priv,
if (ret)
return ret;
- return drm_gem_cma_dumb_create_internal(file_priv, dev, args);
+ return drm_gem_dma_dumb_create_internal(file_priv, dev, args);
}
static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
@@ -211,7 +211,7 @@ static const struct drm_driver vc4_drm_driver = {
.gem_create_object = vc4_create_object,
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(vc4_bo_dumb_create),
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(vc4_bo_dumb_create),
.ioctls = vc4_drm_ioctls,
.num_ioctls = ARRAY_SIZE(vc4_drm_ioctls),
@@ -234,7 +234,7 @@ static const struct drm_driver vc5_drm_driver = {
.debugfs_init = vc4_debugfs_init,
#endif
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(vc5_dumb_create),
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(vc5_dumb_create),
.fops = &vc4_drm_fops,
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 1649350b9efd..418a8242691f 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -14,7 +14,7 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mm.h>
#include <drm/drm_modeset_lock.h>
@@ -239,7 +239,7 @@ to_vc4_dev(struct drm_device *dev)
}
struct vc4_bo {
- struct drm_gem_cma_object base;
+ struct drm_gem_dma_object base;
/* seqno of the last job to render using this BO. */
uint64_t seqno;
@@ -288,7 +288,7 @@ struct vc4_bo {
static inline struct vc4_bo *
to_vc4_bo(struct drm_gem_object *bo)
{
- return container_of(to_drm_gem_cma_obj(bo), struct vc4_bo, base);
+ return container_of(to_drm_gem_dma_obj(bo), struct vc4_bo, base);
}
struct vc4_fence {
@@ -602,14 +602,14 @@ struct vc4_exec_info {
/* This is the array of BOs that were looked up at the start of exec.
* Command validation will use indices into this array.
*/
- struct drm_gem_cma_object **bo;
+ struct drm_gem_dma_object **bo;
uint32_t bo_count;
/* List of BOs that are being written by the RCL. Other than
* the binner temporary storage, this is all the BOs written
* by the job.
*/
- struct drm_gem_cma_object *rcl_write_bo[4];
+ struct drm_gem_dma_object *rcl_write_bo[4];
uint32_t rcl_write_bo_count;
/* Pointers for our position in vc4->job_list */
@@ -628,7 +628,7 @@ struct vc4_exec_info {
/* This is the BO where we store the validated command lists, shader
* records, and uniforms.
*/
- struct drm_gem_cma_object *exec_bo;
+ struct drm_gem_dma_object *exec_bo;
/**
* This tracks the per-shader-record state (packet 64) that
@@ -989,19 +989,19 @@ vc4_validate_bin_cl(struct drm_device *dev,
int
vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec);
-struct drm_gem_cma_object *vc4_use_bo(struct vc4_exec_info *exec,
+struct drm_gem_dma_object *vc4_use_bo(struct vc4_exec_info *exec,
uint32_t hindex);
int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec);
bool vc4_check_tex_size(struct vc4_exec_info *exec,
- struct drm_gem_cma_object *fbo,
+ struct drm_gem_dma_object *fbo,
uint32_t offset, uint8_t tiling_format,
uint32_t width, uint32_t height, uint8_t cpp);
/* vc4_validate_shader.c */
struct vc4_validated_shader_info *
-vc4_validate_shader(struct drm_gem_cma_object *shader_obj);
+vc4_validate_shader(struct drm_gem_dma_object *shader_obj);
/* vc4_perfmon.c */
void vc4_perfmon_get(struct vc4_perfmon *perfmon);
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 7acb43972e69..56a0af281b92 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -764,7 +764,7 @@ vc4_cl_lookup_bos(struct drm_device *dev,
}
exec->bo = kvmalloc_array(exec->bo_count,
- sizeof(struct drm_gem_cma_object *),
+ sizeof(struct drm_gem_dma_object *),
GFP_KERNEL | __GFP_ZERO);
if (!exec->bo) {
DRM_ERROR("Failed to allocate validated BO pointers\n");
@@ -797,7 +797,7 @@ vc4_cl_lookup_bos(struct drm_device *dev,
}
drm_gem_object_get(bo);
- exec->bo[i] = (struct drm_gem_cma_object *)bo;
+ exec->bo[i] = (struct drm_gem_dma_object *)bo;
}
spin_unlock(&file_priv->table_lock);
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 51e0e8aa2135..b76b0e602c81 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -339,7 +339,7 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
struct drm_framebuffer *fb = state->fb;
- struct drm_gem_cma_object *bo = drm_fb_dma_get_gem_obj(fb, 0);
+ struct drm_gem_dma_object *bo = drm_fb_dma_get_gem_obj(fb, 0);
int num_planes = fb->format->num_planes;
struct drm_crtc_state *crtc_state;
u32 h_subsample = fb->format->hsub;
@@ -1243,7 +1243,7 @@ u32 vc4_plane_dlist_size(const struct drm_plane_state *state)
void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
- struct drm_gem_cma_object *bo = drm_fb_dma_get_gem_obj(fb, 0);
+ struct drm_gem_dma_object *bo = drm_fb_dma_get_gem_obj(fb, 0);
uint32_t addr;
/* We're skipping the address adjustment for negative origin,
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
index f6b7dc3df08c..c8a92023238c 100644
--- a/drivers/gpu/drm/vc4/vc4_render_cl.c
+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
@@ -40,14 +40,14 @@
#include "vc4_packet.h"
struct vc4_rcl_setup {
- struct drm_gem_cma_object *color_read;
- struct drm_gem_cma_object *color_write;
- struct drm_gem_cma_object *zs_read;
- struct drm_gem_cma_object *zs_write;
- struct drm_gem_cma_object *msaa_color_write;
- struct drm_gem_cma_object *msaa_zs_write;
-
- struct drm_gem_cma_object *rcl;
+ struct drm_gem_dma_object *color_read;
+ struct drm_gem_dma_object *color_write;
+ struct drm_gem_dma_object *zs_read;
+ struct drm_gem_dma_object *zs_write;
+ struct drm_gem_dma_object *msaa_color_write;
+ struct drm_gem_dma_object *msaa_zs_write;
+
+ struct drm_gem_dma_object *rcl;
u32 next_offset;
u32 next_write_bo_index;
@@ -97,7 +97,7 @@ static void vc4_store_before_load(struct vc4_rcl_setup *setup)
* coordinates packet, and instead just store to the address given.
*/
static uint32_t vc4_full_res_offset(struct vc4_exec_info *exec,
- struct drm_gem_cma_object *bo,
+ struct drm_gem_dma_object *bo,
struct drm_vc4_submit_rcl_surface *surf,
uint8_t x, uint8_t y)
{
@@ -381,7 +381,7 @@ static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec,
}
static int vc4_full_res_bounds_check(struct vc4_exec_info *exec,
- struct drm_gem_cma_object *obj,
+ struct drm_gem_dma_object *obj,
struct drm_vc4_submit_rcl_surface *surf)
{
struct drm_vc4_submit_cl *args = exec->args;
@@ -407,7 +407,7 @@ static int vc4_full_res_bounds_check(struct vc4_exec_info *exec,
}
static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec,
- struct drm_gem_cma_object **obj,
+ struct drm_gem_dma_object **obj,
struct drm_vc4_submit_rcl_surface *surf)
{
if (surf->flags != 0 || surf->bits != 0) {
@@ -433,7 +433,7 @@ static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec,
}
static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
- struct drm_gem_cma_object **obj,
+ struct drm_gem_dma_object **obj,
struct drm_vc4_submit_rcl_surface *surf,
bool is_write)
{
@@ -533,7 +533,7 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
static int
vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec,
struct vc4_rcl_setup *setup,
- struct drm_gem_cma_object **obj,
+ struct drm_gem_dma_object **obj,
struct drm_vc4_submit_rcl_surface *surf)
{
uint8_t tiling = VC4_GET_FIELD(surf->bits,
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index af48ae68f213..811bc7d9b90c 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -280,7 +280,7 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
struct drm_connector_state *conn_state = drm_atomic_get_new_connector_state(state,
conn);
struct vc4_txp *txp = connector_to_vc4_txp(conn);
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
struct drm_display_mode *mode;
struct drm_framebuffer *fb;
u32 ctrl;
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index 40f04157ea39..6222c884dcbb 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -231,7 +231,7 @@ try_again:
* if it doesn't fit within the buffer that we allocated up front.
* However, it turns out that 16MB is "enough for anybody", and
* real-world applications run into allocation failures from the
- * overall CMA pool before they make scenes complicated enough to run
+ * overall DMA pool before they make scenes complicated enough to run
* out of bin space.
*/
static int bin_bo_alloc(struct vc4_dev *vc4)
@@ -261,7 +261,7 @@ static int bin_bo_alloc(struct vc4_dev *vc4)
dev_err(&v3d->pdev->dev,
"Failed to allocate memory for tile binning: "
- "%d. You may need to enable CMA or give it "
+ "%d. You may need to enable DMA or give it "
"more memory.",
ret);
break;
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
index 2feba55bcef7..d5dc6b51ec69 100644
--- a/drivers/gpu/drm/vc4/vc4_validate.c
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -102,11 +102,11 @@ size_is_lt(uint32_t width, uint32_t height, int cpp)
height <= 4 * utile_height(cpp));
}
-struct drm_gem_cma_object *
+struct drm_gem_dma_object *
vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex)
{
struct vc4_dev *vc4 = exec->dev;
- struct drm_gem_cma_object *obj;
+ struct drm_gem_dma_object *obj;
struct vc4_bo *bo;
if (WARN_ON_ONCE(vc4->is_vc5))
@@ -129,7 +129,7 @@ vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex)
return obj;
}
-static struct drm_gem_cma_object *
+static struct drm_gem_dma_object *
vc4_use_handle(struct vc4_exec_info *exec, uint32_t gem_handles_packet_index)
{
return vc4_use_bo(exec, exec->bo_index[gem_handles_packet_index]);
@@ -160,7 +160,7 @@ gl_shader_rec_size(uint32_t pointer_bits)
}
bool
-vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo,
+vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_dma_object *fbo,
uint32_t offset, uint8_t tiling_format,
uint32_t width, uint32_t height, uint8_t cpp)
{
@@ -263,7 +263,7 @@ validate_increment_semaphore(VALIDATE_ARGS)
static int
validate_indexed_prim_list(VALIDATE_ARGS)
{
- struct drm_gem_cma_object *ib;
+ struct drm_gem_dma_object *ib;
uint32_t length = *(uint32_t *)(untrusted + 1);
uint32_t offset = *(uint32_t *)(untrusted + 5);
uint32_t max_index = *(uint32_t *)(untrusted + 9);
@@ -575,7 +575,7 @@ reloc_tex(struct vc4_exec_info *exec,
struct vc4_texture_sample_info *sample,
uint32_t texture_handle_index, bool is_cs)
{
- struct drm_gem_cma_object *tex;
+ struct drm_gem_dma_object *tex;
uint32_t p0 = *(uint32_t *)(uniform_data_u + sample->p_offset[0]);
uint32_t p1 = *(uint32_t *)(uniform_data_u + sample->p_offset[1]);
uint32_t p2 = (sample->p_offset[2] != ~0 ?
@@ -765,7 +765,7 @@ validate_gl_shader_rec(struct drm_device *dev,
28, /* cs */
};
uint32_t shader_reloc_count = ARRAY_SIZE(shader_reloc_offsets);
- struct drm_gem_cma_object *bo[ARRAY_SIZE(shader_reloc_offsets) + 8];
+ struct drm_gem_dma_object *bo[ARRAY_SIZE(shader_reloc_offsets) + 8];
uint32_t nr_attributes, nr_relocs, packet_size;
int i;
@@ -896,7 +896,7 @@ validate_gl_shader_rec(struct drm_device *dev,
}
for (i = 0; i < nr_attributes; i++) {
- struct drm_gem_cma_object *vbo =
+ struct drm_gem_dma_object *vbo =
bo[ARRAY_SIZE(shader_reloc_offsets) + i];
uint32_t o = 36 + i * 8;
uint32_t offset = *(uint32_t *)(pkt_u + o + 0);
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
index e315aeb5fef5..9745f8810eca 100644
--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
@@ -776,7 +776,7 @@ vc4_handle_branch_target(struct vc4_shader_validation_state *validation_state)
}
struct vc4_validated_shader_info *
-vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
+vc4_validate_shader(struct drm_gem_dma_object *shader_obj)
{
struct vc4_dev *vc4 = to_vc4_dev(shader_obj->base.dev);
bool found_shader_end = false;