aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/omapdrm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/omapdrm')
-rw-r--r--drivers/staging/omapdrm/Makefile1
-rw-r--r--drivers/staging/omapdrm/omap_dmm_tiler.c4
-rw-r--r--drivers/staging/omapdrm/omap_drv.c6
-rw-r--r--drivers/staging/omapdrm/omap_drv.h14
-rw-r--r--drivers/staging/omapdrm/omap_fb.c7
-rw-r--r--drivers/staging/omapdrm/omap_gem.c130
-rw-r--r--drivers/staging/omapdrm/omap_gem_dmabuf.c220
-rw-r--r--drivers/staging/omapdrm/tcm-sita.c2
8 files changed, 367 insertions, 17 deletions
diff --git a/drivers/staging/omapdrm/Makefile b/drivers/staging/omapdrm/Makefile
index d9cdc120d122..1ca0e0016de4 100644
--- a/drivers/staging/omapdrm/Makefile
+++ b/drivers/staging/omapdrm/Makefile
@@ -13,6 +13,7 @@ omapdrm-y := omap_drv.o \
omap_fb.o \
omap_fbdev.o \
omap_gem.o \
+ omap_gem_dmabuf.o \
omap_dmm_tiler.o \
tcm-sita.o
diff --git a/drivers/staging/omapdrm/omap_dmm_tiler.c b/drivers/staging/omapdrm/omap_dmm_tiler.c
index 1ecb6a73d790..9d83060e753a 100644
--- a/drivers/staging/omapdrm/omap_dmm_tiler.c
+++ b/drivers/staging/omapdrm/omap_dmm_tiler.c
@@ -347,7 +347,7 @@ struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
ret = tcm_reserve_2d(containers[fmt], w, h, align, &block->area);
if (ret) {
kfree(block);
- return 0;
+ return ERR_PTR(-ENOMEM);
}
/* add to allocation list */
@@ -371,7 +371,7 @@ struct tiler_block *tiler_reserve_1d(size_t size)
if (tcm_reserve_1d(containers[TILFMT_PAGE], num_pages,
&block->area)) {
kfree(block);
- return 0;
+ return ERR_PTR(-ENOMEM);
}
spin_lock(&omap_dmm->list_lock);
diff --git a/drivers/staging/omapdrm/omap_drv.c b/drivers/staging/omapdrm/omap_drv.c
index 620b8d54223d..0d2acca376ca 100644
--- a/drivers/staging/omapdrm/omap_drv.c
+++ b/drivers/staging/omapdrm/omap_drv.c
@@ -746,7 +746,7 @@ static const struct file_operations omapdriver_fops = {
static struct drm_driver omap_drm_driver = {
.driver_features =
- DRIVER_HAVE_IRQ | DRIVER_MODESET | DRIVER_GEM,
+ DRIVER_HAVE_IRQ | DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
.load = dev_load,
.unload = dev_unload,
.open = dev_open,
@@ -766,6 +766,10 @@ static struct drm_driver omap_drm_driver = {
.debugfs_init = omap_debugfs_init,
.debugfs_cleanup = omap_debugfs_cleanup,
#endif
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = omap_gem_prime_export,
+ .gem_prime_import = omap_gem_prime_import,
.gem_init_object = omap_gem_init_object,
.gem_free_object = omap_gem_free_object,
.gem_vm_ops = &omap_gem_vm_ops,
diff --git a/drivers/staging/omapdrm/omap_drv.h b/drivers/staging/omapdrm/omap_drv.h
index b7e0f0773003..f238d574da0c 100644
--- a/drivers/staging/omapdrm/omap_drv.h
+++ b/drivers/staging/omapdrm/omap_drv.h
@@ -138,6 +138,8 @@ int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+int omap_gem_mmap_obj(struct drm_gem_object *obj,
+ struct vm_area_struct *vma);
int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op);
int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op);
@@ -145,12 +147,24 @@ int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op);
int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
void (*fxn)(void *arg), void *arg);
int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll);
+void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff);
+void omap_gem_dma_sync(struct drm_gem_object *obj,
+ enum dma_data_direction dir);
int omap_gem_get_paddr(struct drm_gem_object *obj,
dma_addr_t *paddr, bool remap);
int omap_gem_put_paddr(struct drm_gem_object *obj);
+int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
+ bool remap);
+int omap_gem_put_pages(struct drm_gem_object *obj);
+uint32_t omap_gem_flags(struct drm_gem_object *obj);
uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj);
size_t omap_gem_mmap_size(struct drm_gem_object *obj);
+struct dma_buf * omap_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj, int flags);
+struct drm_gem_object * omap_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *buffer);
+
static inline int align_pitch(int pitch, int width, int bpp)
{
int bytespp = (bpp + 7) / 8;
diff --git a/drivers/staging/omapdrm/omap_fb.c b/drivers/staging/omapdrm/omap_fb.c
index 04b235b6724a..74260f043ab1 100644
--- a/drivers/staging/omapdrm/omap_fb.c
+++ b/drivers/staging/omapdrm/omap_fb.c
@@ -167,7 +167,7 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, int x, int y,
}
/* Call for unpin 'a' (if not NULL), and pin 'b' (if not NULL). Although
- * buffers to unpin are just just pushed to the unpin fifo so that the
+ * buffers to unpin are just pushed to the unpin fifo so that the
* caller can defer unpin until vblank.
*
* Note if this fails (ie. something went very wrong!), all buffers are
@@ -197,8 +197,11 @@ int omap_framebuffer_replace(struct drm_framebuffer *a,
pa->paddr = 0;
}
- if (pb && !ret)
+ if (pb && !ret) {
ret = omap_gem_get_paddr(pb->bo, &pb->paddr, true);
+ if (!ret)
+ omap_gem_dma_sync(pb->bo, DMA_TO_DEVICE);
+ }
}
if (ret) {
diff --git a/drivers/staging/omapdrm/omap_gem.c b/drivers/staging/omapdrm/omap_gem.c
index 921f058cc6a4..3a0d035a9e03 100644
--- a/drivers/staging/omapdrm/omap_gem.c
+++ b/drivers/staging/omapdrm/omap_gem.c
@@ -207,13 +207,27 @@ static inline bool is_shmem(struct drm_gem_object *obj)
return obj->filp != NULL;
}
+/**
+ * shmem buffers that are mapped cached can simulate coherency via using
+ * page faulting to keep track of dirty pages
+ */
+static inline bool is_cached_coherent(struct drm_gem_object *obj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ return is_shmem(obj) &&
+ ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
+}
+
static DEFINE_SPINLOCK(sync_lock);
/** ensure backing pages are allocated */
static int omap_gem_attach_pages(struct drm_gem_object *obj)
{
+ struct drm_device *dev = obj->dev;
struct omap_gem_object *omap_obj = to_omap_bo(obj);
struct page **pages;
+ int i, npages = obj->size >> PAGE_SHIFT;
+ dma_addr_t *addrs;
WARN_ON(omap_obj->pages);
@@ -231,16 +245,18 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
* DSS, GPU, etc. are not cache coherent:
*/
if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
- int i, npages = obj->size >> PAGE_SHIFT;
- dma_addr_t *addrs = kmalloc(npages * sizeof(addrs), GFP_KERNEL);
+ addrs = kmalloc(npages * sizeof(addrs), GFP_KERNEL);
for (i = 0; i < npages; i++) {
- addrs[i] = dma_map_page(obj->dev->dev, pages[i],
+ addrs[i] = dma_map_page(dev->dev, pages[i],
0, PAGE_SIZE, DMA_BIDIRECTIONAL);
}
- omap_obj->addrs = addrs;
+ } else {
+ addrs = kzalloc(npages * sizeof(addrs), GFP_KERNEL);
}
+ omap_obj->addrs = addrs;
omap_obj->pages = pages;
+
return 0;
}
@@ -258,14 +274,21 @@ static void omap_gem_detach_pages(struct drm_gem_object *obj)
dma_unmap_page(obj->dev->dev, omap_obj->addrs[i],
PAGE_SIZE, DMA_BIDIRECTIONAL);
}
- kfree(omap_obj->addrs);
- omap_obj->addrs = NULL;
}
+ kfree(omap_obj->addrs);
+ omap_obj->addrs = NULL;
+
_drm_gem_put_pages(obj, omap_obj->pages, true, false);
omap_obj->pages = NULL;
}
+/* get buffer flags */
+uint32_t omap_gem_flags(struct drm_gem_object *obj)
+{
+ return to_omap_bo(obj)->flags;
+}
+
/** get mmap offset */
static uint64_t mmap_offset(struct drm_gem_object *obj)
{
@@ -330,6 +353,7 @@ static int fault_1d(struct drm_gem_object *obj,
vma->vm_start) >> PAGE_SHIFT;
if (omap_obj->pages) {
+ omap_gem_cpu_sync(obj, pgoff);
pfn = page_to_pfn(omap_obj->pages[pgoff]);
} else {
BUG_ON(!(omap_obj->flags & OMAP_BO_DMA));
@@ -504,7 +528,6 @@ fail:
/** We override mainly to fix up some of the vm mapping flags.. */
int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
- struct omap_gem_object *omap_obj;
int ret;
ret = drm_gem_mmap(filp, vma);
@@ -513,8 +536,13 @@ int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return ret;
}
- /* after drm_gem_mmap(), it is safe to access the obj */
- omap_obj = to_omap_bo(vma->vm_private_data);
+ return omap_gem_mmap_obj(vma->vm_private_data, vma);
+}
+
+int omap_gem_mmap_obj(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags |= VM_MIXEDMAP;
@@ -524,12 +552,31 @@ int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
} else if (omap_obj->flags & OMAP_BO_UNCACHED) {
vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
} else {
+ /*
+ * We do have some private objects, at least for scanout buffers
+ * on hardware without DMM/TILER. But these are allocated write-
+ * combine
+ */
+ if (WARN_ON(!obj->filp))
+ return -EINVAL;
+
+ /*
+ * Shunt off cached objs to shmem file so they have their own
+ * address_space (so unmap_mapping_range does what we want,
+ * in particular in the case of mmap'd dmabufs)
+ */
+ fput(vma->vm_file);
+ get_file(obj->filp);
+ vma->vm_pgoff = 0;
+ vma->vm_file = obj->filp;
+
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
}
- return ret;
+ return 0;
}
+
/**
* omap_gem_dumb_create - create a dumb buffer
* @drm_file: our client file
@@ -639,6 +686,48 @@ fail:
return ret;
}
+/* Sync the buffer for CPU access.. note pages should already be
+ * attached, ie. omap_gem_get_pages()
+ */
+void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff)
+{
+ struct drm_device *dev = obj->dev;
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+
+ if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) {
+ dma_unmap_page(dev->dev, omap_obj->addrs[pgoff],
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ omap_obj->addrs[pgoff] = 0;
+ }
+}
+
+/* sync the buffer for DMA access */
+void omap_gem_dma_sync(struct drm_gem_object *obj,
+ enum dma_data_direction dir)
+{
+ struct drm_device *dev = obj->dev;
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+
+ if (is_cached_coherent(obj)) {
+ int i, npages = obj->size >> PAGE_SHIFT;
+ struct page **pages = omap_obj->pages;
+ bool dirty = false;
+
+ for (i = 0; i < npages; i++) {
+ if (!omap_obj->addrs[i]) {
+ omap_obj->addrs[i] = dma_map_page(dev->dev, pages[i], 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ dirty = true;
+ }
+ }
+
+ if (dirty) {
+ unmap_mapping_range(obj->filp->f_mapping, 0,
+ omap_gem_mmap_size(obj), 1);
+ }
+ }
+}
+
/* Get physical address for DMA.. if 'remap' is true, and the buffer is not
* already contiguous, remap it to pin in physically contiguous memory.. (ie.
* map in TILER)
@@ -703,6 +792,7 @@ int omap_gem_get_paddr(struct drm_gem_object *obj,
*paddr = omap_obj->paddr;
} else {
ret = -EINVAL;
+ goto fail;
}
fail:
@@ -764,9 +854,27 @@ static int get_pages(struct drm_gem_object *obj, struct page ***pages)
return 0;
}
-int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages)
+/* if !remap, and we don't have pages backing, then fail, rather than
+ * increasing the pin count (which we don't really do yet anyways,
+ * because we don't support swapping pages back out). And 'remap'
+ * might not be quite the right name, but I wanted to keep it working
+ * similarly to omap_gem_get_paddr(). Note though that mutex is not
+ * aquired if !remap (because this can be called in atomic ctxt),
+ * but probably omap_gem_get_paddr() should be changed to work in the
+ * same way. If !remap, a matching omap_gem_put_pages() call is not
+ * required (and should not be made).
+ */
+int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
+ bool remap)
{
int ret;
+ if (!remap) {
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ if (!omap_obj->pages)
+ return -ENOMEM;
+ *pages = omap_obj->pages;
+ return 0;
+ }
mutex_lock(&obj->dev->struct_mutex);
ret = get_pages(obj, pages);
mutex_unlock(&obj->dev->struct_mutex);
diff --git a/drivers/staging/omapdrm/omap_gem_dmabuf.c b/drivers/staging/omapdrm/omap_gem_dmabuf.c
new file mode 100644
index 000000000000..42728e0cc194
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_gem_dmabuf.c
@@ -0,0 +1,220 @@
+/*
+ * drivers/staging/omapdrm/omap_gem_dmabuf.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob.clark@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+#include <linux/dma-buf.h>
+
+static struct sg_table *omap_gem_map_dma_buf(
+ struct dma_buf_attachment *attachment,
+ enum dma_data_direction dir)
+{
+ struct drm_gem_object *obj = attachment->dmabuf->priv;
+ struct sg_table *sg;
+ dma_addr_t paddr;
+ int ret;
+
+ sg = kzalloc(sizeof(*sg), GFP_KERNEL);
+ if (!sg)
+ return ERR_PTR(-ENOMEM);
+
+ /* camera, etc, need physically contiguous.. but we need a
+ * better way to know this..
+ */
+ ret = omap_gem_get_paddr(obj, &paddr, true);
+ if (ret)
+ goto out;
+
+ ret = sg_alloc_table(sg, 1, GFP_KERNEL);
+ if (ret)
+ goto out;
+
+ sg_init_table(sg->sgl, 1);
+ sg_dma_len(sg->sgl) = obj->size;
+ sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(paddr)), obj->size, 0);
+ sg_dma_address(sg->sgl) = paddr;
+
+ /* this should be after _get_paddr() to ensure we have pages attached */
+ omap_gem_dma_sync(obj, dir);
+
+out:
+ if (ret)
+ return ERR_PTR(ret);
+ return sg;
+}
+
+static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *sg, enum dma_data_direction dir)
+{
+ struct drm_gem_object *obj = attachment->dmabuf->priv;
+ omap_gem_put_paddr(obj);
+ sg_free_table(sg);
+ kfree(sg);
+}
+
+static void omap_gem_dmabuf_release(struct dma_buf *buffer)
+{
+ struct drm_gem_object *obj = buffer->priv;
+ /* release reference that was taken when dmabuf was exported
+ * in omap_gem_prime_set()..
+ */
+ drm_gem_object_unreference_unlocked(obj);
+}
+
+
+static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
+ size_t start, size_t len, enum dma_data_direction dir)
+{
+ struct drm_gem_object *obj = buffer->priv;
+ struct page **pages;
+ if (omap_gem_flags(obj) & OMAP_BO_TILED) {
+ /* TODO we would need to pin at least part of the buffer to
+ * get de-tiled view. For now just reject it.
+ */
+ return -ENOMEM;
+ }
+ /* make sure we have the pages: */
+ return omap_gem_get_pages(obj, &pages, true);
+}
+
+static void omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
+ size_t start, size_t len, enum dma_data_direction dir)
+{
+ struct drm_gem_object *obj = buffer->priv;
+ omap_gem_put_pages(obj);
+}
+
+
+static void *omap_gem_dmabuf_kmap_atomic(struct dma_buf *buffer,
+ unsigned long page_num)
+{
+ struct drm_gem_object *obj = buffer->priv;
+ struct page **pages;
+ omap_gem_get_pages(obj, &pages, false);
+ omap_gem_cpu_sync(obj, page_num);
+ return kmap_atomic(pages[page_num]);
+}
+
+static void omap_gem_dmabuf_kunmap_atomic(struct dma_buf *buffer,
+ unsigned long page_num, void *addr)
+{
+ kunmap_atomic(addr);
+}
+
+static void *omap_gem_dmabuf_kmap(struct dma_buf *buffer,
+ unsigned long page_num)
+{
+ struct drm_gem_object *obj = buffer->priv;
+ struct page **pages;
+ omap_gem_get_pages(obj, &pages, false);
+ omap_gem_cpu_sync(obj, page_num);
+ return kmap(pages[page_num]);
+}
+
+static void omap_gem_dmabuf_kunmap(struct dma_buf *buffer,
+ unsigned long page_num, void *addr)
+{
+ struct drm_gem_object *obj = buffer->priv;
+ struct page **pages;
+ omap_gem_get_pages(obj, &pages, false);
+ kunmap(pages[page_num]);
+}
+
+/*
+ * TODO maybe we can split up drm_gem_mmap to avoid duplicating
+ * some here.. or at least have a drm_dmabuf_mmap helper.
+ */
+static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
+ struct vm_area_struct *vma)
+{
+ struct drm_gem_object *obj = buffer->priv;
+ int ret = 0;
+
+ if (WARN_ON(!obj->filp))
+ return -EINVAL;
+
+ /* Check for valid size. */
+ if (omap_gem_mmap_size(obj) < vma->vm_end - vma->vm_start) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (!obj->dev->driver->gem_vm_ops) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
+ vma->vm_ops = obj->dev->driver->gem_vm_ops;
+ vma->vm_private_data = obj;
+ vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+
+ /* Take a ref for this mapping of the object, so that the fault
+ * handler can dereference the mmap offset's pointer to the object.
+ * This reference is cleaned up by the corresponding vm_close
+ * (which should happen whether the vma was created by this call, or
+ * by a vm_open due to mremap or partial unmap or whatever).
+ */
+ vma->vm_ops->open(vma);
+
+out_unlock:
+
+ return omap_gem_mmap_obj(obj, vma);
+}
+
+struct dma_buf_ops omap_dmabuf_ops = {
+ .map_dma_buf = omap_gem_map_dma_buf,
+ .unmap_dma_buf = omap_gem_unmap_dma_buf,
+ .release = omap_gem_dmabuf_release,
+ .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
+ .end_cpu_access = omap_gem_dmabuf_end_cpu_access,
+ .kmap_atomic = omap_gem_dmabuf_kmap_atomic,
+ .kunmap_atomic = omap_gem_dmabuf_kunmap_atomic,
+ .kmap = omap_gem_dmabuf_kmap,
+ .kunmap = omap_gem_dmabuf_kunmap,
+ .mmap = omap_gem_dmabuf_mmap,
+};
+
+struct dma_buf * omap_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj, int flags)
+{
+ return dma_buf_export(obj, &omap_dmabuf_ops, obj->size, 0600);
+}
+
+struct drm_gem_object * omap_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *buffer)
+{
+ struct drm_gem_object *obj;
+
+ /* is this one of own objects? */
+ if (buffer->ops == &omap_dmabuf_ops) {
+ obj = buffer->priv;
+ /* is it from our device? */
+ if (obj->dev == dev) {
+ drm_gem_object_reference(obj);
+ return obj;
+ }
+ }
+
+ /*
+ * TODO add support for importing buffers from other devices..
+ * for now we don't need this but would be nice to add eventually
+ */
+ return ERR_PTR(-EINVAL);
+}
diff --git a/drivers/staging/omapdrm/tcm-sita.c b/drivers/staging/omapdrm/tcm-sita.c
index 10d5ac3dae4b..efb609510540 100644
--- a/drivers/staging/omapdrm/tcm-sita.c
+++ b/drivers/staging/omapdrm/tcm-sita.c
@@ -200,7 +200,7 @@ static s32 sita_reserve_1d(struct tcm *tcm, u32 num_slots,
*
* @param w width
* @param h height
- * @param area pointer to the area that will be populated with the reesrved
+ * @param area pointer to the area that will be populated with the reserved
* area
*
* @return 0 on success, non-0 error value on failure.