aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ast/ast_mode.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ast/ast_mode.c')
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c157
1 files changed, 89 insertions, 68 deletions
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 97fed0627d1c..ffccbef962a4 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -521,7 +521,6 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
-/* ast is different - we will force move buffers out of VRAM */
static int ast_crtc_do_set_base(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y, int atomic)
@@ -529,50 +528,54 @@ static int ast_crtc_do_set_base(struct drm_crtc *crtc,
struct ast_private *ast = crtc->dev->dev_private;
struct drm_gem_object *obj;
struct ast_framebuffer *ast_fb;
- struct ast_bo *bo;
+ struct drm_gem_vram_object *gbo;
int ret;
- u64 gpu_addr;
+ s64 gpu_addr;
+ void *base;
- /* push the previous fb to system ram */
if (!atomic && fb) {
ast_fb = to_ast_framebuffer(fb);
obj = ast_fb->obj;
- bo = gem_to_ast_bo(obj);
- ret = ast_bo_reserve(bo, false);
- if (ret)
- return ret;
- ast_bo_push_sysram(bo);
- ast_bo_unreserve(bo);
+ gbo = drm_gem_vram_of_gem(obj);
+
+ /* unmap if console */
+ if (&ast->fbdev->afb == ast_fb)
+ drm_gem_vram_kunmap(gbo);
+ drm_gem_vram_unpin(gbo);
}
ast_fb = to_ast_framebuffer(crtc->primary->fb);
obj = ast_fb->obj;
- bo = gem_to_ast_bo(obj);
+ gbo = drm_gem_vram_of_gem(obj);
- ret = ast_bo_reserve(bo, false);
+ ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
if (ret)
return ret;
-
- ret = ast_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
- if (ret) {
- ast_bo_unreserve(bo);
- return ret;
+ gpu_addr = drm_gem_vram_offset(gbo);
+ if (gpu_addr < 0) {
+ ret = (int)gpu_addr;
+ goto err_drm_gem_vram_unpin;
}
if (&ast->fbdev->afb == ast_fb) {
/* if pushing console in kmap it */
- ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
- if (ret)
+ base = drm_gem_vram_kmap(gbo, true, NULL);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
DRM_ERROR("failed to kmap fbcon\n");
- else
+ } else {
ast_fbdev_set_base(ast, gpu_addr);
+ }
}
- ast_bo_unreserve(bo);
ast_set_offset_reg(crtc);
ast_set_start_address_crt1(crtc, (u32)gpu_addr);
return 0;
+
+err_drm_gem_vram_unpin:
+ drm_gem_vram_unpin(gbo);
+ return ret;
}
static int ast_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
@@ -618,21 +621,18 @@ static int ast_crtc_mode_set(struct drm_crtc *crtc,
static void ast_crtc_disable(struct drm_crtc *crtc)
{
- int ret;
-
DRM_DEBUG_KMS("\n");
ast_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
if (crtc->primary->fb) {
+ struct ast_private *ast = crtc->dev->dev_private;
struct ast_framebuffer *ast_fb = to_ast_framebuffer(crtc->primary->fb);
struct drm_gem_object *obj = ast_fb->obj;
- struct ast_bo *bo = gem_to_ast_bo(obj);
+ struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(obj);
- ret = ast_bo_reserve(bo, false);
- if (ret)
- return;
-
- ast_bo_push_sysram(bo);
- ast_bo_unreserve(bo);
+ /* unmap if console */
+ if (&ast->fbdev->afb == ast_fb)
+ drm_gem_vram_kunmap(gbo);
+ drm_gem_vram_unpin(gbo);
}
crtc->primary->fb = NULL;
}
@@ -918,32 +918,34 @@ static int ast_cursor_init(struct drm_device *dev)
int size;
int ret;
struct drm_gem_object *obj;
- struct ast_bo *bo;
- uint64_t gpu_addr;
+ struct drm_gem_vram_object *gbo;
+ s64 gpu_addr;
+ void *base;
size = (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE) * AST_DEFAULT_HWC_NUM;
ret = ast_gem_create(dev, size, true, &obj);
if (ret)
return ret;
- bo = gem_to_ast_bo(obj);
- ret = ast_bo_reserve(bo, false);
- if (unlikely(ret != 0))
- goto fail;
-
- ret = ast_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
- ast_bo_unreserve(bo);
+ gbo = drm_gem_vram_of_gem(obj);
+ ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
if (ret)
goto fail;
+ gpu_addr = drm_gem_vram_offset(gbo);
+ if (gpu_addr < 0) {
+ drm_gem_vram_unpin(gbo);
+ ret = (int)gpu_addr;
+ goto fail;
+ }
/* kmap the object */
- ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &ast->cache_kmap);
- if (ret)
+ base = drm_gem_vram_kmap(gbo, true, NULL);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
goto fail;
+ }
ast->cursor_cache = obj;
- ast->cursor_cache_gpu_addr = gpu_addr;
- DRM_DEBUG_KMS("pinned cursor cache at %llx\n", ast->cursor_cache_gpu_addr);
return 0;
fail:
return ret;
@@ -952,7 +954,10 @@ fail:
static void ast_cursor_fini(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
- ttm_bo_kunmap(&ast->cache_kmap);
+ struct drm_gem_vram_object *gbo =
+ drm_gem_vram_of_gem(ast->cursor_cache);
+ drm_gem_vram_kunmap(gbo);
+ drm_gem_vram_unpin(gbo);
drm_gem_object_put_unlocked(ast->cursor_cache);
}
@@ -1173,13 +1178,13 @@ static int ast_cursor_set(struct drm_crtc *crtc,
struct ast_private *ast = crtc->dev->dev_private;
struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
struct drm_gem_object *obj;
- struct ast_bo *bo;
- uint64_t gpu_addr;
+ struct drm_gem_vram_object *gbo;
+ s64 dst_gpu;
+ u64 gpu_addr;
u32 csum;
int ret;
- struct ttm_bo_kmap_obj uobj_map;
u8 *src, *dst;
- bool src_isiomem, dst_isiomem;
+
if (!handle) {
ast_hide_cursor(crtc);
return 0;
@@ -1193,21 +1198,28 @@ static int ast_cursor_set(struct drm_crtc *crtc,
DRM_ERROR("Cannot find cursor object %x for crtc\n", handle);
return -ENOENT;
}
- bo = gem_to_ast_bo(obj);
+ gbo = drm_gem_vram_of_gem(obj);
- ret = ast_bo_reserve(bo, false);
+ ret = drm_gem_vram_pin(gbo, 0);
if (ret)
- goto fail;
-
- ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &uobj_map);
-
- src = ttm_kmap_obj_virtual(&uobj_map, &src_isiomem);
- dst = ttm_kmap_obj_virtual(&ast->cache_kmap, &dst_isiomem);
+ goto err_drm_gem_object_put_unlocked;
+ src = drm_gem_vram_kmap(gbo, true, NULL);
+ if (IS_ERR(src)) {
+ ret = PTR_ERR(src);
+ goto err_drm_gem_vram_unpin;
+ }
- if (src_isiomem == true)
- DRM_ERROR("src cursor bo should be in main memory\n");
- if (dst_isiomem == false)
- DRM_ERROR("dst bo should be in VRAM\n");
+ dst = drm_gem_vram_kmap(drm_gem_vram_of_gem(ast->cursor_cache),
+ false, NULL);
+ if (IS_ERR(dst)) {
+ ret = PTR_ERR(dst);
+ goto err_drm_gem_vram_kunmap;
+ }
+ dst_gpu = drm_gem_vram_offset(drm_gem_vram_of_gem(ast->cursor_cache));
+ if (dst_gpu < 0) {
+ ret = (int)dst_gpu;
+ goto err_drm_gem_vram_kunmap;
+ }
dst += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor;
@@ -1215,10 +1227,11 @@ static int ast_cursor_set(struct drm_crtc *crtc,
csum = copy_cursor_image(src, dst, width, height);
/* write checksum + signature */
- ttm_bo_kunmap(&uobj_map);
- ast_bo_unreserve(bo);
{
- u8 *dst = (u8 *)ast->cache_kmap.virtual + (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE;
+ struct drm_gem_vram_object *dst_gbo =
+ drm_gem_vram_of_gem(ast->cursor_cache);
+ u8 *dst = drm_gem_vram_kmap(dst_gbo, false, NULL);
+ dst += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE;
writel(csum, dst);
writel(width, dst + AST_HWC_SIGNATURE_SizeX);
writel(height, dst + AST_HWC_SIGNATURE_SizeY);
@@ -1226,15 +1239,13 @@ static int ast_cursor_set(struct drm_crtc *crtc,
writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY);
/* set pattern offset */
- gpu_addr = ast->cursor_cache_gpu_addr;
+ gpu_addr = (u64)dst_gpu;
gpu_addr += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor;
gpu_addr >>= 3;
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc8, gpu_addr & 0xff);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc9, (gpu_addr >> 8) & 0xff);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, (gpu_addr >> 16) & 0xff);
}
- ast_crtc->cursor_width = width;
- ast_crtc->cursor_height = height;
ast_crtc->offset_x = AST_MAX_HWC_WIDTH - width;
ast_crtc->offset_y = AST_MAX_HWC_WIDTH - height;
@@ -1242,9 +1253,17 @@ static int ast_cursor_set(struct drm_crtc *crtc,
ast_show_cursor(crtc);
+ drm_gem_vram_kunmap(gbo);
+ drm_gem_vram_unpin(gbo);
drm_gem_object_put_unlocked(obj);
+
return 0;
-fail:
+
+err_drm_gem_vram_kunmap:
+ drm_gem_vram_kunmap(gbo);
+err_drm_gem_vram_unpin:
+ drm_gem_vram_unpin(gbo);
+err_drm_gem_object_put_unlocked:
drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -1257,7 +1276,9 @@ static int ast_cursor_move(struct drm_crtc *crtc,
int x_offset, y_offset;
u8 *sig;
- sig = (u8 *)ast->cache_kmap.virtual + (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE;
+ sig = drm_gem_vram_kmap(drm_gem_vram_of_gem(ast->cursor_cache),
+ false, NULL);
+ sig += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE;
writel(x, sig + AST_HWC_SIGNATURE_X);
writel(y, sig + AST_HWC_SIGNATURE_Y);