aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/qxl
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/qxl')
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c11
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h2
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c275
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c10
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c46
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c11
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c13
9 files changed, 104 insertions, 268 deletions
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 97823644d347..fdc1833b1af8 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -248,7 +248,7 @@ int qxl_garbage_collect(struct qxl_device *qdev)
}
}
- QXL_INFO(qdev, "%s: %lld\n", __func__, i);
+ QXL_INFO(qdev, "%s: %d\n", __func__, i);
return i;
}
@@ -505,6 +505,7 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_SURFACE_CMD_CREATE;
+ cmd->flags = QXL_SURF_FLAG_KEEP_DATA;
cmd->u.surface_create.format = surf->surf.format;
cmd->u.surface_create.width = surf->surf.width;
cmd->u.surface_create.height = surf->surf.height;
@@ -617,8 +618,8 @@ static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stal
int ret;
ret = qxl_bo_reserve(surf, false);
- if (ret == -EBUSY)
- return -EBUSY;
+ if (ret)
+ return ret;
if (stall)
mutex_unlock(&qdev->surf_evict_mutex);
@@ -627,9 +628,9 @@ static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stal
if (stall)
mutex_lock(&qdev->surf_evict_mutex);
- if (ret == -EBUSY) {
+ if (ret) {
qxl_bo_unreserve(surf);
- return -EBUSY;
+ return ret;
}
qxl_surface_evict_locked(qdev, surf, true);
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 4a0a8b29b0a1..a8dbb3ef4e3c 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -67,7 +67,7 @@ static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
crc = crc32(0, (const uint8_t *)&qdev->rom->client_monitors_config,
sizeof(qdev->rom->client_monitors_config));
if (crc != qdev->rom->client_monitors_config_crc) {
- qxl_io_log(qdev, "crc mismatch: have %X (%d) != %X\n", crc,
+ qxl_io_log(qdev, "crc mismatch: have %X (%zd) != %X\n", crc,
sizeof(qdev->rom->client_monitors_config),
qdev->rom->client_monitors_config_crc);
return 1;
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index e2d07085b6a5..83f6f0b5e9ef 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -198,7 +198,7 @@ static int qxl_pm_restore(struct device *dev)
static u32 qxl_noop_get_vblank_counter(struct drm_device *dev, int crtc)
{
- return dev->vblank[crtc].count.counter;
+ return 0;
}
static int qxl_noop_enable_vblank(struct drm_device *dev, int crtc)
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 7c6cafe21f5f..d8549690801d 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -328,7 +328,7 @@ struct qxl_device {
};
/* forward declaration for QXL_INFO_IO */
-void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...);
+__printf(2,3) void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...);
extern const struct drm_ioctl_desc qxl_ioctls[];
extern int qxl_max_ioctl;
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index f778c0e8ae3c..6b6e57e8c2d6 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -37,21 +37,6 @@
#define QXL_DIRTY_DELAY (HZ / 30)
-#define QXL_FB_OP_FILLRECT 1
-#define QXL_FB_OP_COPYAREA 2
-#define QXL_FB_OP_IMAGEBLIT 3
-
-struct qxl_fb_op {
- struct list_head head;
- int op_type;
- union {
- struct fb_fillrect fr;
- struct fb_copyarea ca;
- struct fb_image ib;
- } op;
- void *img_data;
-};
-
struct qxl_fbdev {
struct drm_fb_helper helper;
struct qxl_framebuffer qfb;
@@ -66,7 +51,6 @@ struct qxl_fbdev {
/* dirty memory logging */
struct {
spinlock_t lock;
- bool active;
unsigned x1;
unsigned y1;
unsigned x2;
@@ -105,23 +89,33 @@ static void qxl_fb_dirty_flush(struct fb_info *info)
struct qxl_device *qdev = qfbdev->qdev;
struct qxl_fb_image qxl_fb_image;
struct fb_image *image = &qxl_fb_image.fb_image;
+ unsigned long flags;
u32 x1, x2, y1, y2;
/* TODO: hard coding 32 bpp */
int stride = qfbdev->qfb.base.pitches[0];
+ spin_lock_irqsave(&qfbdev->dirty.lock, flags);
+
x1 = qfbdev->dirty.x1;
x2 = qfbdev->dirty.x2;
y1 = qfbdev->dirty.y1;
y2 = qfbdev->dirty.y2;
+ qfbdev->dirty.x1 = 0;
+ qfbdev->dirty.x2 = 0;
+ qfbdev->dirty.y1 = 0;
+ qfbdev->dirty.y2 = 0;
+
+ spin_unlock_irqrestore(&qfbdev->dirty.lock, flags);
+
/*
* we are using a shadow draw buffer, at qdev->surface0_shadow
*/
qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]", x1, x2, y1, y2);
image->dx = x1;
image->dy = y1;
- image->width = x2 - x1;
- image->height = y2 - y1;
+ image->width = x2 - x1 + 1;
+ image->height = y2 - y1 + 1;
image->fg_color = 0xffffffff; /* unused, just to avoid uninitialized
warnings */
image->bg_color = 0;
@@ -136,10 +130,37 @@ static void qxl_fb_dirty_flush(struct fb_info *info)
qxl_fb_image_init(&qxl_fb_image, qdev, info, NULL);
qxl_draw_opaque_fb(&qxl_fb_image, stride);
- qfbdev->dirty.x1 = 0;
- qfbdev->dirty.x2 = 0;
- qfbdev->dirty.y1 = 0;
- qfbdev->dirty.y2 = 0;
+}
+
+static void qxl_dirty_update(struct qxl_fbdev *qfbdev,
+ int x, int y, int width, int height)
+{
+ struct qxl_device *qdev = qfbdev->qdev;
+ unsigned long flags;
+ int x2, y2;
+
+ x2 = x + width - 1;
+ y2 = y + height - 1;
+
+ spin_lock_irqsave(&qfbdev->dirty.lock, flags);
+
+ if (qfbdev->dirty.y1 < y)
+ y = qfbdev->dirty.y1;
+ if (qfbdev->dirty.y2 > y2)
+ y2 = qfbdev->dirty.y2;
+ if (qfbdev->dirty.x1 < x)
+ x = qfbdev->dirty.x1;
+ if (qfbdev->dirty.x2 > x2)
+ x2 = qfbdev->dirty.x2;
+
+ qfbdev->dirty.x1 = x;
+ qfbdev->dirty.x2 = x2;
+ qfbdev->dirty.y1 = y;
+ qfbdev->dirty.y2 = y2;
+
+ spin_unlock_irqrestore(&qfbdev->dirty.lock, flags);
+
+ schedule_work(&qdev->fb_work);
}
static void qxl_deferred_io(struct fb_info *info,
@@ -162,234 +183,51 @@ static void qxl_deferred_io(struct fb_info *info,
if (min < max) {
y1 = min / info->fix.line_length;
y2 = (max / info->fix.line_length) + 1;
-
- /* TODO: add spin lock? */
- /* spin_lock_irqsave(&qfbdev->dirty.lock, flags); */
- qfbdev->dirty.x1 = 0;
- qfbdev->dirty.y1 = y1;
- qfbdev->dirty.x2 = info->var.xres;
- qfbdev->dirty.y2 = y2;
- /* spin_unlock_irqrestore(&qfbdev->dirty.lock, flags); */
+ qxl_dirty_update(qfbdev, 0, y1, info->var.xres, y2 - y1);
}
-
- qxl_fb_dirty_flush(info);
};
-
static struct fb_deferred_io qxl_defio = {
.delay = QXL_DIRTY_DELAY,
.deferred_io = qxl_deferred_io,
};
-static void qxl_fb_delayed_fillrect(struct qxl_fbdev *qfbdev,
- const struct fb_fillrect *fb_rect)
-{
- struct qxl_fb_op *op;
- unsigned long flags;
-
- op = kmalloc(sizeof(struct qxl_fb_op), GFP_ATOMIC | __GFP_NOWARN);
- if (!op)
- return;
-
- op->op.fr = *fb_rect;
- op->img_data = NULL;
- op->op_type = QXL_FB_OP_FILLRECT;
-
- spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
- list_add_tail(&op->head, &qfbdev->delayed_ops);
- spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
-}
-
-static void qxl_fb_delayed_copyarea(struct qxl_fbdev *qfbdev,
- const struct fb_copyarea *fb_copy)
-{
- struct qxl_fb_op *op;
- unsigned long flags;
-
- op = kmalloc(sizeof(struct qxl_fb_op), GFP_ATOMIC | __GFP_NOWARN);
- if (!op)
- return;
-
- op->op.ca = *fb_copy;
- op->img_data = NULL;
- op->op_type = QXL_FB_OP_COPYAREA;
-
- spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
- list_add_tail(&op->head, &qfbdev->delayed_ops);
- spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
-}
-
-static void qxl_fb_delayed_imageblit(struct qxl_fbdev *qfbdev,
- const struct fb_image *fb_image)
-{
- struct qxl_fb_op *op;
- unsigned long flags;
- uint32_t size = fb_image->width * fb_image->height * (fb_image->depth >= 8 ? fb_image->depth / 8 : 1);
-
- op = kmalloc(sizeof(struct qxl_fb_op) + size, GFP_ATOMIC | __GFP_NOWARN);
- if (!op)
- return;
-
- op->op.ib = *fb_image;
- op->img_data = (void *)(op + 1);
- op->op_type = QXL_FB_OP_IMAGEBLIT;
-
- memcpy(op->img_data, fb_image->data, size);
-
- op->op.ib.data = op->img_data;
- spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
- list_add_tail(&op->head, &qfbdev->delayed_ops);
- spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
-}
-
-static void qxl_fb_fillrect_internal(struct fb_info *info,
- const struct fb_fillrect *fb_rect)
-{
- struct qxl_fbdev *qfbdev = info->par;
- struct qxl_device *qdev = qfbdev->qdev;
- struct qxl_rect rect;
- uint32_t color;
- int x = fb_rect->dx;
- int y = fb_rect->dy;
- int width = fb_rect->width;
- int height = fb_rect->height;
- uint16_t rop;
- struct qxl_draw_fill qxl_draw_fill_rec;
-
- if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
- info->fix.visual == FB_VISUAL_DIRECTCOLOR)
- color = ((u32 *) (info->pseudo_palette))[fb_rect->color];
- else
- color = fb_rect->color;
- rect.left = x;
- rect.right = x + width;
- rect.top = y;
- rect.bottom = y + height;
- switch (fb_rect->rop) {
- case ROP_XOR:
- rop = SPICE_ROPD_OP_XOR;
- break;
- case ROP_COPY:
- rop = SPICE_ROPD_OP_PUT;
- break;
- default:
- pr_err("qxl_fb_fillrect(): unknown rop, "
- "defaulting to SPICE_ROPD_OP_PUT\n");
- rop = SPICE_ROPD_OP_PUT;
- }
- qxl_draw_fill_rec.qdev = qdev;
- qxl_draw_fill_rec.rect = rect;
- qxl_draw_fill_rec.color = color;
- qxl_draw_fill_rec.rop = rop;
-
- qxl_draw_fill(&qxl_draw_fill_rec);
-}
-
static void qxl_fb_fillrect(struct fb_info *info,
- const struct fb_fillrect *fb_rect)
+ const struct fb_fillrect *rect)
{
struct qxl_fbdev *qfbdev = info->par;
- struct qxl_device *qdev = qfbdev->qdev;
- if (!drm_can_sleep()) {
- qxl_fb_delayed_fillrect(qfbdev, fb_rect);
- schedule_work(&qdev->fb_work);
- return;
- }
- /* make sure any previous work is done */
- flush_work(&qdev->fb_work);
- qxl_fb_fillrect_internal(info, fb_rect);
-}
-
-static void qxl_fb_copyarea_internal(struct fb_info *info,
- const struct fb_copyarea *region)
-{
- struct qxl_fbdev *qfbdev = info->par;
-
- qxl_draw_copyarea(qfbdev->qdev,
- region->width, region->height,
- region->sx, region->sy,
- region->dx, region->dy);
+ sys_fillrect(info, rect);
+ qxl_dirty_update(qfbdev, rect->dx, rect->dy, rect->width,
+ rect->height);
}
static void qxl_fb_copyarea(struct fb_info *info,
- const struct fb_copyarea *region)
+ const struct fb_copyarea *area)
{
struct qxl_fbdev *qfbdev = info->par;
- struct qxl_device *qdev = qfbdev->qdev;
- if (!drm_can_sleep()) {
- qxl_fb_delayed_copyarea(qfbdev, region);
- schedule_work(&qdev->fb_work);
- return;
- }
- /* make sure any previous work is done */
- flush_work(&qdev->fb_work);
- qxl_fb_copyarea_internal(info, region);
-}
-
-static void qxl_fb_imageblit_safe(struct qxl_fb_image *qxl_fb_image)
-{
- qxl_draw_opaque_fb(qxl_fb_image, 0);
-}
-
-static void qxl_fb_imageblit_internal(struct fb_info *info,
- const struct fb_image *image)
-{
- struct qxl_fbdev *qfbdev = info->par;
- struct qxl_fb_image qxl_fb_image;
-
- /* ensure proper order rendering operations - TODO: must do this
- * for everything. */
- qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image);
- qxl_fb_imageblit_safe(&qxl_fb_image);
+ sys_copyarea(info, area);
+ qxl_dirty_update(qfbdev, area->dx, area->dy, area->width,
+ area->height);
}
static void qxl_fb_imageblit(struct fb_info *info,
const struct fb_image *image)
{
struct qxl_fbdev *qfbdev = info->par;
- struct qxl_device *qdev = qfbdev->qdev;
- if (!drm_can_sleep()) {
- qxl_fb_delayed_imageblit(qfbdev, image);
- schedule_work(&qdev->fb_work);
- return;
- }
- /* make sure any previous work is done */
- flush_work(&qdev->fb_work);
- qxl_fb_imageblit_internal(info, image);
+ sys_imageblit(info, image);
+ qxl_dirty_update(qfbdev, image->dx, image->dy, image->width,
+ image->height);
}
static void qxl_fb_work(struct work_struct *work)
{
struct qxl_device *qdev = container_of(work, struct qxl_device, fb_work);
- unsigned long flags;
- struct qxl_fb_op *entry, *tmp;
struct qxl_fbdev *qfbdev = qdev->mode_info.qfbdev;
- /* since the irq context just adds entries to the end of the
- list dropping the lock should be fine, as entry isn't modified
- in the operation code */
- spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
- list_for_each_entry_safe(entry, tmp, &qfbdev->delayed_ops, head) {
- spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
- switch (entry->op_type) {
- case QXL_FB_OP_FILLRECT:
- qxl_fb_fillrect_internal(qfbdev->helper.fbdev, &entry->op.fr);
- break;
- case QXL_FB_OP_COPYAREA:
- qxl_fb_copyarea_internal(qfbdev->helper.fbdev, &entry->op.ca);
- break;
- case QXL_FB_OP_IMAGEBLIT:
- qxl_fb_imageblit_internal(qfbdev->helper.fbdev, &entry->op.ib);
- break;
- }
- spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
- list_del(&entry->head);
- kfree(entry);
- }
- spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
+ qxl_fb_dirty_flush(qfbdev->helper.fbdev);
}
int qxl_fb_init(struct qxl_device *qdev)
@@ -678,6 +516,7 @@ int qxl_fbdev_init(struct qxl_device *qdev)
qfbdev->qdev = qdev;
qdev->mode_info.qfbdev = qfbdev;
spin_lock_init(&qfbdev->delayed_ops_lock);
+ spin_lock_init(&qfbdev->dirty.lock);
INIT_LIST_HEAD(&qfbdev->delayed_ops);
drm_fb_helper_prepare(qdev->ddev, &qfbdev->helper,
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index b96f0c9d89b2..d9746e904ef1 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -31,9 +31,15 @@
void qxl_gem_object_free(struct drm_gem_object *gobj)
{
struct qxl_bo *qobj = gem_to_qxl_bo(gobj);
+ struct qxl_device *qdev;
+ struct ttm_buffer_object *tbo;
- if (qobj)
- qxl_bo_unref(&qobj);
+ qdev = (struct qxl_device *)gobj->dev->dev_private;
+
+ qxl_surface_evict(qdev, qobj, false);
+
+ tbo = &qobj->tbo;
+ ttm_bo_unref(&tbo);
}
int qxl_gem_object_create(struct qxl_device *qdev, int size,
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index b110883f8253..bda5c5f80c24 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -107,9 +107,9 @@ apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
}
/* return holding the reference to this object */
-static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
- struct drm_file *file_priv, uint64_t handle,
- struct qxl_release *release)
+static int qxlhw_handle_to_bo(struct qxl_device *qdev,
+ struct drm_file *file_priv, uint64_t handle,
+ struct qxl_release *release, struct qxl_bo **qbo_p)
{
struct drm_gem_object *gobj;
struct qxl_bo *qobj;
@@ -117,15 +117,17 @@ static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle);
if (!gobj)
- return NULL;
+ return -EINVAL;
qobj = gem_to_qxl_bo(gobj);
ret = qxl_release_list_add(release, qobj);
+ drm_gem_object_unreference_unlocked(gobj);
if (ret)
- return NULL;
+ return ret;
- return qobj;
+ *qbo_p = qobj;
+ return 0;
}
/*
@@ -143,7 +145,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
struct qxl_release *release;
struct qxl_bo *cmd_bo;
void *fb_cmd;
- int i, j, ret, num_relocs;
+ int i, ret, num_relocs;
int unwritten;
switch (cmd->type) {
@@ -210,7 +212,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
/* add the bos to the list of bos to validate -
need to validate first then process relocs? */
if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) {
- DRM_DEBUG("unknown reloc type %d\n", reloc_info[i].type);
+ DRM_DEBUG("unknown reloc type %d\n", reloc.reloc_type);
ret = -EINVAL;
goto out_free_bos;
@@ -218,13 +220,10 @@ static int qxl_process_single_command(struct qxl_device *qdev,
reloc_info[i].type = reloc.reloc_type;
if (reloc.dst_handle) {
- reloc_info[i].dst_bo = qxlhw_handle_to_bo(qdev, file_priv,
- reloc.dst_handle, release);
- if (!reloc_info[i].dst_bo) {
- ret = -EINVAL;
- reloc_info[i].src_bo = NULL;
+ ret = qxlhw_handle_to_bo(qdev, file_priv, reloc.dst_handle, release,
+ &reloc_info[i].dst_bo);
+ if (ret)
goto out_free_bos;
- }
reloc_info[i].dst_offset = reloc.dst_offset;
} else {
reloc_info[i].dst_bo = cmd_bo;
@@ -233,16 +232,11 @@ static int qxl_process_single_command(struct qxl_device *qdev,
num_relocs++;
/* reserve and validate the reloc dst bo */
- if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) {
- reloc_info[i].src_bo =
- qxlhw_handle_to_bo(qdev, file_priv,
- reloc.src_handle, release);
- if (!reloc_info[i].src_bo) {
- if (reloc_info[i].dst_bo != cmd_bo)
- drm_gem_object_unreference_unlocked(&reloc_info[i].dst_bo->gem_base);
- ret = -EINVAL;
+ if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle) {
+ ret = qxlhw_handle_to_bo(qdev, file_priv, reloc.src_handle, release,
+ &reloc_info[i].src_bo);
+ if (ret)
goto out_free_bos;
- }
reloc_info[i].src_offset = reloc.src_offset;
} else {
reloc_info[i].src_bo = NULL;
@@ -269,12 +263,6 @@ static int qxl_process_single_command(struct qxl_device *qdev,
qxl_release_fence_buffer_objects(release);
out_free_bos:
- for (j = 0; j < num_relocs; j++) {
- if (reloc_info[j].dst_bo != cmd_bo)
- drm_gem_object_unreference_unlocked(&reloc_info[j].dst_bo->gem_base);
- if (reloc_info[j].src_bo && reloc_info[j].src_bo != cmd_bo)
- drm_gem_object_unreference_unlocked(&reloc_info[j].src_bo->gem_base);
- }
out_free_release:
if (ret)
qxl_release_free(qdev, release);
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index cdeaf08fdc74..6d6f33de48f4 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -208,19 +208,16 @@ void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
void qxl_bo_unref(struct qxl_bo **bo)
{
- struct ttm_buffer_object *tbo;
-
if ((*bo) == NULL)
return;
- tbo = &((*bo)->tbo);
- ttm_bo_unref(&tbo);
- if (tbo == NULL)
- *bo = NULL;
+
+ drm_gem_object_unreference_unlocked(&(*bo)->gem_base);
+ *bo = NULL;
}
struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
{
- ttm_bo_reference(&bo->tbo);
+ drm_gem_object_reference(&bo->gem_base);
return bo;
}
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index d9b25684ac98..b66ec331c17c 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -122,7 +122,7 @@ static const struct fence_ops qxl_fence_ops = {
.wait = qxl_fence_wait,
};
-static uint64_t
+static int
qxl_release_alloc(struct qxl_device *qdev, int type,
struct qxl_release **ret)
{
@@ -153,7 +153,7 @@ qxl_release_alloc(struct qxl_device *qdev, int type,
return handle;
}
*ret = release;
- QXL_INFO(qdev, "allocated release %lld\n", handle);
+ QXL_INFO(qdev, "allocated release %d\n", handle);
release->id = handle;
return handle;
}
@@ -363,6 +363,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]);
if (ret) {
mutex_unlock(&qdev->release_mutex);
+ qxl_release_free(qdev, *release);
return ret;
}
}
@@ -377,13 +378,17 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
mutex_unlock(&qdev->release_mutex);
- qxl_release_list_add(*release, bo);
+ ret = qxl_release_list_add(*release, bo);
+ qxl_bo_unref(&bo);
+ if (ret) {
+ qxl_release_free(qdev, *release);
+ return ret;
+ }
info = qxl_release_map(qdev, *release);
info->id = idr_ret;
qxl_release_unmap(qdev, *release, info);
- qxl_bo_unref(&bo);
return ret;
}