aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c16
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_lock.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c18
5 files changed, 22 insertions, 18 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 2e618b5ac465..a4d38d85909a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -37,7 +37,7 @@
#include <linux/mm.h>
#include <linux/file.h>
#include <linux/module.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#define TTM_ASSERT_LOCKED(param)
#define TTM_DEBUG(fmt, arg...)
@@ -353,8 +353,10 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
ret = ttm_tt_set_user(bo->ttm, current,
bo->buffer_start, bo->num_pages);
- if (unlikely(ret != 0))
+ if (unlikely(ret != 0)) {
ttm_tt_destroy(bo->ttm);
+ bo->ttm = NULL;
+ }
break;
default:
printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
@@ -390,10 +392,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
* Create and bind a ttm if required.
*/
- if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
- ret = ttm_bo_add_ttm(bo, false);
- if (ret)
- goto out_err;
+ if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
+ if (bo->ttm == NULL) {
+ ret = ttm_bo_add_ttm(bo, false);
+ if (ret)
+ goto out_err;
+ }
ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
if (ret)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 77dbf408c0d0..ae3c6f5dd2b7 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -635,13 +635,13 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
if (ret)
return ret;
- ttm_bo_free_old_node(bo);
if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
(bo->ttm != NULL)) {
ttm_tt_unbind(bo->ttm);
ttm_tt_destroy(bo->ttm);
bo->ttm = NULL;
}
+ ttm_bo_free_old_node(bo);
} else {
/**
* This should help pipeline ordinary buffer moves.
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
index de41e55a944a..075daf44bce4 100644
--- a/drivers/gpu/drm/ttm/ttm_lock.c
+++ b/drivers/gpu/drm/ttm/ttm_lock.c
@@ -30,7 +30,7 @@
#include "ttm/ttm_lock.h"
#include "ttm/ttm_module.h"
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/sched.h>
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index ebddd443d91a..93577f2e2954 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -55,7 +55,7 @@
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
struct ttm_object_file {
struct ttm_object_device *tdev;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index d948575717bf..727e93daac3b 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -40,7 +40,7 @@
#include <linux/slab.h>
#include <linux/dma-mapping.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_page_alloc.h"
@@ -355,7 +355,7 @@ restart:
if (nr_free)
goto restart;
- /* Not allowed to fall tough or break because
+ /* Not allowed to fall through or break because
* following context is inside spinlock while we are
* outside here.
*/
@@ -556,7 +556,7 @@ out:
}
/**
- * Fill the given pool if there isn't enough pages and requested number of
+ * Fill the given pool if there aren't enough pages and the requested number of
* pages is small.
*/
static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
@@ -576,8 +576,8 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
pool->fill_lock = true;
- /* If allocation request is small and there is not enough
- * pages in pool we fill the pool first */
+ /* If allocation request is small and there are not enough
+ * pages in a pool we fill the pool up first. */
if (count < _manager->options.small
&& count > pool->npages) {
struct list_head new_pages;
@@ -614,9 +614,9 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
}
/**
- * Cut count nubmer of pages from the pool and put them to return list
+ * Cut 'count' number of pages from the pool and put them on the return list.
*
- * @return count of pages still to allocate to fill the request.
+ * @return count of pages still required to fulfill the request.
*/
static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
struct list_head *pages, int ttm_flags,
@@ -637,7 +637,7 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
goto out;
}
/* find the last pages to include for requested number of pages. Split
- * pool to begin and halves to reduce search space. */
+ * pool to begin and halve it to reduce search space. */
if (count <= pool->npages/2) {
i = 0;
list_for_each(p, &pool->list) {
@@ -651,7 +651,7 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
break;
}
}
- /* Cut count number of pages from pool */
+ /* Cut 'count' number of pages from the pool */
list_cut_position(pages, &pool->list, p);
pool->npages -= count;
count = 0;