aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/tidspbridge/pmgr/cmm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/tidspbridge/pmgr/cmm.c')
-rw-r--r--drivers/staging/tidspbridge/pmgr/cmm.c675
1 files changed, 274 insertions, 401 deletions
diff --git a/drivers/staging/tidspbridge/pmgr/cmm.c b/drivers/staging/tidspbridge/pmgr/cmm.c
index 93a7c4fd57e4..e6b2c8962f81 100644
--- a/drivers/staging/tidspbridge/pmgr/cmm.c
+++ b/drivers/staging/tidspbridge/pmgr/cmm.c
@@ -12,7 +12,7 @@
* describes a block of physically contiguous shared memory used for
* future allocations by CMM.
*
- * Memory is coelesced back to the appropriate heap when a buffer is
+ * Memory is coalesced back to the appropriate heap when a buffer is
* freed.
*
* Notes:
@@ -30,6 +30,7 @@
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#include <linux/types.h>
+#include <linux/list.h>
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
@@ -38,9 +39,7 @@
#include <dspbridge/dbc.h>
/* ----------------------------------- OS Adaptation Layer */
-#include <dspbridge/list.h>
#include <dspbridge/sync.h>
-#include <dspbridge/utildefs.h>
/* ----------------------------------- Platform Manager */
#include <dspbridge/dev.h>
@@ -50,7 +49,7 @@
#include <dspbridge/cmm.h>
/* ----------------------------------- Defines, Data Structures, Typedefs */
-#define NEXT_PA(pnode) (pnode->dw_pa + pnode->ul_size)
+#define NEXT_PA(pnode) (pnode->pa + pnode->size)
/* Other bus/platform translations */
#define DSPPA2GPPPA(base, x, y) ((x)+(y))
@@ -64,32 +63,32 @@
*/
struct cmm_allocator { /* sma */
unsigned int shm_base; /* Start of physical SM block */
- u32 ul_sm_size; /* Size of SM block in bytes */
- unsigned int dw_vm_base; /* Start of VM block. (Dev driver
+ u32 sm_size; /* Size of SM block in bytes */
+ unsigned int vm_base; /* Start of VM block. (Dev driver
* context for 'sma') */
- u32 dw_dsp_phys_addr_offset; /* DSP PA to GPP PA offset for this
+ u32 dsp_phys_addr_offset; /* DSP PA to GPP PA offset for this
* SM space */
s8 c_factor; /* DSPPa to GPPPa Conversion Factor */
- unsigned int dw_dsp_base; /* DSP virt base byte address */
- u32 ul_dsp_size; /* DSP seg size in bytes */
- struct cmm_object *hcmm_mgr; /* back ref to parent mgr */
+ unsigned int dsp_base; /* DSP virt base byte address */
+ u32 dsp_size; /* DSP seg size in bytes */
+ struct cmm_object *cmm_mgr; /* back ref to parent mgr */
/* node list of available memory */
- struct lst_list *free_list_head;
+ struct list_head free_list;
/* node list of memory in use */
- struct lst_list *in_use_list_head;
+ struct list_head in_use_list;
};
struct cmm_xlator { /* Pa<->Va translator object */
/* CMM object this translator associated */
- struct cmm_object *hcmm_mgr;
+ struct cmm_object *cmm_mgr;
/*
* Client process virtual base address that corresponds to phys SM
- * base address for translator's ul_seg_id.
+ * base address for translator's seg_id.
* Only 1 segment ID currently supported.
*/
- unsigned int dw_virt_base; /* virtual base address */
- u32 ul_virt_size; /* size of virt space in bytes */
- u32 ul_seg_id; /* Segment Id */
+ unsigned int virt_base; /* virtual base address */
+ u32 virt_size; /* size of virt space in bytes */
+ u32 seg_id; /* Segment Id */
};
/* CMM Mgr */
@@ -98,40 +97,40 @@ struct cmm_object {
* Cmm Lock is used to serialize access mem manager for multi-threads.
*/
struct mutex cmm_lock; /* Lock to access cmm mgr */
- struct lst_list *node_free_list_head; /* Free list of memory nodes */
- u32 ul_min_block_size; /* Min SM block; default 16 bytes */
- u32 dw_page_size; /* Memory Page size (1k/4k) */
+ struct list_head node_free_list; /* Free list of memory nodes */
+ u32 min_block_size; /* Min SM block; default 16 bytes */
+ u32 page_size; /* Memory Page size (1k/4k) */
/* GPP SM segment ptrs */
struct cmm_allocator *pa_gppsm_seg_tab[CMM_MAXGPPSEGS];
};
/* Default CMM Mgr attributes */
static struct cmm_mgrattrs cmm_dfltmgrattrs = {
- /* ul_min_block_size, min block size(bytes) allocated by cmm mgr */
+ /* min_block_size, min block size(bytes) allocated by cmm mgr */
16
};
/* Default allocation attributes */
static struct cmm_attrs cmm_dfltalctattrs = {
- 1 /* ul_seg_id, default segment Id for allocator */
+ 1 /* seg_id, default segment Id for allocator */
};
/* Address translator default attrs */
static struct cmm_xlatorattrs cmm_dfltxlatorattrs = {
- /* ul_seg_id, does not have to match cmm_dfltalctattrs ul_seg_id */
+ /* seg_id, does not have to match cmm_dfltalctattrs ul_seg_id */
1,
- 0, /* dw_dsp_bufs */
- 0, /* dw_dsp_buf_size */
+ 0, /* dsp_bufs */
+ 0, /* dsp_buf_size */
NULL, /* vm_base */
- 0, /* dw_vm_size */
+ 0, /* vm_size */
};
/* SM node representing a block of memory. */
struct cmm_mnode {
struct list_head link; /* must be 1st element */
- u32 dw_pa; /* Phys addr */
- u32 dw_va; /* Virtual address in device process context */
- u32 ul_size; /* SM block size in bytes */
+ u32 pa; /* Phys addr */
+ u32 va; /* Virtual address in device process context */
+ u32 size; /* SM block size in bytes */
u32 client_proc; /* Process that allocated this mem block */
};
@@ -181,32 +180,32 @@ void *cmm_calloc_buf(struct cmm_object *hcmm_mgr, u32 usize,
*pp_buf_va = NULL;
if (cmm_mgr_obj && (usize != 0)) {
- if (pattrs->ul_seg_id > 0) {
+ if (pattrs->seg_id > 0) {
/* SegId > 0 is SM */
/* get the allocator object for this segment id */
allocator =
- get_allocator(cmm_mgr_obj, pattrs->ul_seg_id);
- /* keep block size a multiple of ul_min_block_size */
+ get_allocator(cmm_mgr_obj, pattrs->seg_id);
+ /* keep block size a multiple of min_block_size */
usize =
- ((usize - 1) & ~(cmm_mgr_obj->ul_min_block_size -
+ ((usize - 1) & ~(cmm_mgr_obj->min_block_size -
1))
- + cmm_mgr_obj->ul_min_block_size;
+ + cmm_mgr_obj->min_block_size;
mutex_lock(&cmm_mgr_obj->cmm_lock);
pnode = get_free_block(allocator, usize);
}
if (pnode) {
- delta_size = (pnode->ul_size - usize);
- if (delta_size >= cmm_mgr_obj->ul_min_block_size) {
+ delta_size = (pnode->size - usize);
+ if (delta_size >= cmm_mgr_obj->min_block_size) {
/* create a new block with the leftovers and
* add to freelist */
new_node =
- get_node(cmm_mgr_obj, pnode->dw_pa + usize,
- pnode->dw_va + usize,
+ get_node(cmm_mgr_obj, pnode->pa + usize,
+ pnode->va + usize,
(u32) delta_size);
/* leftovers go free */
add_to_free_list(allocator, new_node);
/* adjust our node's size */
- pnode->ul_size = usize;
+ pnode->size = usize;
}
/* Tag node with client process requesting allocation
* We'll need to free up a process's alloc'd SM if the
@@ -216,17 +215,16 @@ void *cmm_calloc_buf(struct cmm_object *hcmm_mgr, u32 usize,
pnode->client_proc = current->tgid;
/* put our node on InUse list */
- lst_put_tail(allocator->in_use_list_head,
- (struct list_head *)pnode);
- buf_pa = (void *)pnode->dw_pa; /* physical address */
+ list_add_tail(&pnode->link, &allocator->in_use_list);
+ buf_pa = (void *)pnode->pa; /* physical address */
/* clear mem */
- pbyte = (u8 *) pnode->dw_va;
+ pbyte = (u8 *) pnode->va;
for (cnt = 0; cnt < (s32) usize; cnt++, pbyte++)
*pbyte = 0;
if (pp_buf_va != NULL) {
/* Virtual address */
- *pp_buf_va = (void *)pnode->dw_va;
+ *pp_buf_va = (void *)pnode->va;
}
}
mutex_unlock(&cmm_mgr_obj->cmm_lock);
@@ -245,7 +243,6 @@ int cmm_create(struct cmm_object **ph_cmm_mgr,
{
struct cmm_object *cmm_obj = NULL;
int status = 0;
- struct util_sysinfo sys_info;
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(ph_cmm_mgr != NULL);
@@ -253,40 +250,23 @@ int cmm_create(struct cmm_object **ph_cmm_mgr,
*ph_cmm_mgr = NULL;
/* create, zero, and tag a cmm mgr object */
cmm_obj = kzalloc(sizeof(struct cmm_object), GFP_KERNEL);
- if (cmm_obj != NULL) {
- if (mgr_attrts == NULL)
- mgr_attrts = &cmm_dfltmgrattrs; /* set defaults */
-
- /* 4 bytes minimum */
- DBC_ASSERT(mgr_attrts->ul_min_block_size >= 4);
- /* save away smallest block allocation for this cmm mgr */
- cmm_obj->ul_min_block_size = mgr_attrts->ul_min_block_size;
- /* save away the systems memory page size */
- sys_info.dw_page_size = PAGE_SIZE;
- sys_info.dw_allocation_granularity = PAGE_SIZE;
- sys_info.dw_number_of_processors = 1;
-
- cmm_obj->dw_page_size = sys_info.dw_page_size;
-
- /* Note: DSP SM seg table(aDSPSMSegTab[]) zero'd by
- * MEM_ALLOC_OBJECT */
-
- /* create node free list */
- cmm_obj->node_free_list_head =
- kzalloc(sizeof(struct lst_list),
- GFP_KERNEL);
- if (cmm_obj->node_free_list_head == NULL) {
- status = -ENOMEM;
- cmm_destroy(cmm_obj, true);
- } else {
- INIT_LIST_HEAD(&cmm_obj->
- node_free_list_head->head);
- mutex_init(&cmm_obj->cmm_lock);
- *ph_cmm_mgr = cmm_obj;
- }
- } else {
- status = -ENOMEM;
- }
+ if (!cmm_obj)
+ return -ENOMEM;
+
+ if (mgr_attrts == NULL)
+ mgr_attrts = &cmm_dfltmgrattrs; /* set defaults */
+
+ /* 4 bytes minimum */
+ DBC_ASSERT(mgr_attrts->min_block_size >= 4);
+ /* save away smallest block allocation for this cmm mgr */
+ cmm_obj->min_block_size = mgr_attrts->min_block_size;
+ cmm_obj->page_size = PAGE_SIZE;
+
+ /* create node free list */
+ INIT_LIST_HEAD(&cmm_obj->node_free_list);
+ mutex_init(&cmm_obj->cmm_lock);
+ *ph_cmm_mgr = cmm_obj;
+
return status;
}
@@ -301,7 +281,7 @@ int cmm_destroy(struct cmm_object *hcmm_mgr, bool force)
struct cmm_info temp_info;
int status = 0;
s32 slot_seg;
- struct cmm_mnode *pnode;
+ struct cmm_mnode *node, *tmp;
DBC_REQUIRE(refs > 0);
if (!hcmm_mgr) {
@@ -314,7 +294,7 @@ int cmm_destroy(struct cmm_object *hcmm_mgr, bool force)
/* Check for outstanding memory allocations */
status = cmm_get_info(hcmm_mgr, &temp_info);
if (!status) {
- if (temp_info.ul_total_in_use_cnt > 0) {
+ if (temp_info.total_in_use_cnt > 0) {
/* outstanding allocations */
status = -EPERM;
}
@@ -331,15 +311,10 @@ int cmm_destroy(struct cmm_object *hcmm_mgr, bool force)
}
}
}
- if (cmm_mgr_obj->node_free_list_head != NULL) {
- /* Free the free nodes */
- while (!LST_IS_EMPTY(cmm_mgr_obj->node_free_list_head)) {
- pnode = (struct cmm_mnode *)
- lst_get_head(cmm_mgr_obj->node_free_list_head);
- kfree(pnode);
- }
- /* delete NodeFreeList list */
- kfree(cmm_mgr_obj->node_free_list_head);
+ list_for_each_entry_safe(node, tmp, &cmm_mgr_obj->node_free_list,
+ link) {
+ list_del(&node->link);
+ kfree(node);
}
mutex_unlock(&cmm_mgr_obj->cmm_lock);
if (!status) {
@@ -368,13 +343,12 @@ void cmm_exit(void)
* Purpose:
* Free the given buffer.
*/
-int cmm_free_buf(struct cmm_object *hcmm_mgr, void *buf_pa,
- u32 ul_seg_id)
+int cmm_free_buf(struct cmm_object *hcmm_mgr, void *buf_pa, u32 ul_seg_id)
{
struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
int status = -EFAULT;
- struct cmm_mnode *mnode_obj = NULL;
- struct cmm_allocator *allocator = NULL;
+ struct cmm_mnode *curr, *tmp;
+ struct cmm_allocator *allocator;
struct cmm_attrs *pattrs;
DBC_REQUIRE(refs > 0);
@@ -382,35 +356,28 @@ int cmm_free_buf(struct cmm_object *hcmm_mgr, void *buf_pa,
if (ul_seg_id == 0) {
pattrs = &cmm_dfltalctattrs;
- ul_seg_id = pattrs->ul_seg_id;
+ ul_seg_id = pattrs->seg_id;
}
if (!hcmm_mgr || !(ul_seg_id > 0)) {
status = -EFAULT;
return status;
}
- /* get the allocator for this segment id */
+
allocator = get_allocator(cmm_mgr_obj, ul_seg_id);
- if (allocator != NULL) {
- mutex_lock(&cmm_mgr_obj->cmm_lock);
- mnode_obj =
- (struct cmm_mnode *)lst_first(allocator->in_use_list_head);
- while (mnode_obj) {
- if ((u32) buf_pa == mnode_obj->dw_pa) {
- /* Found it */
- lst_remove_elem(allocator->in_use_list_head,
- (struct list_head *)mnode_obj);
- /* back to freelist */
- add_to_free_list(allocator, mnode_obj);
- status = 0; /* all right! */
- break;
- }
- /* next node. */
- mnode_obj = (struct cmm_mnode *)
- lst_next(allocator->in_use_list_head,
- (struct list_head *)mnode_obj);
+ if (!allocator)
+ return status;
+
+ mutex_lock(&cmm_mgr_obj->cmm_lock);
+ list_for_each_entry_safe(curr, tmp, &allocator->in_use_list, link) {
+ if (curr->pa == (u32) buf_pa) {
+ list_del(&curr->link);
+ add_to_free_list(allocator, curr);
+ status = 0;
+ break;
}
- mutex_unlock(&cmm_mgr_obj->cmm_lock);
}
+ mutex_unlock(&cmm_mgr_obj->cmm_lock);
+
return status;
}
@@ -450,7 +417,7 @@ int cmm_get_info(struct cmm_object *hcmm_mgr,
u32 ul_seg;
int status = 0;
struct cmm_allocator *altr;
- struct cmm_mnode *mnode_obj = NULL;
+ struct cmm_mnode *curr;
DBC_REQUIRE(cmm_info_obj != NULL);
@@ -459,46 +426,39 @@ int cmm_get_info(struct cmm_object *hcmm_mgr,
return status;
}
mutex_lock(&cmm_mgr_obj->cmm_lock);
- cmm_info_obj->ul_num_gppsm_segs = 0; /* # of SM segments */
+ cmm_info_obj->num_gppsm_segs = 0; /* # of SM segments */
/* Total # of outstanding alloc */
- cmm_info_obj->ul_total_in_use_cnt = 0;
+ cmm_info_obj->total_in_use_cnt = 0;
/* min block size */
- cmm_info_obj->ul_min_block_size = cmm_mgr_obj->ul_min_block_size;
+ cmm_info_obj->min_block_size = cmm_mgr_obj->min_block_size;
/* check SM memory segments */
for (ul_seg = 1; ul_seg <= CMM_MAXGPPSEGS; ul_seg++) {
/* get the allocator object for this segment id */
altr = get_allocator(cmm_mgr_obj, ul_seg);
- if (altr != NULL) {
- cmm_info_obj->ul_num_gppsm_segs++;
- cmm_info_obj->seg_info[ul_seg - 1].dw_seg_base_pa =
- altr->shm_base - altr->ul_dsp_size;
- cmm_info_obj->seg_info[ul_seg - 1].ul_total_seg_size =
- altr->ul_dsp_size + altr->ul_sm_size;
- cmm_info_obj->seg_info[ul_seg - 1].dw_gpp_base_pa =
- altr->shm_base;
- cmm_info_obj->seg_info[ul_seg - 1].ul_gpp_size =
- altr->ul_sm_size;
- cmm_info_obj->seg_info[ul_seg - 1].dw_dsp_base_va =
- altr->dw_dsp_base;
- cmm_info_obj->seg_info[ul_seg - 1].ul_dsp_size =
- altr->ul_dsp_size;
- cmm_info_obj->seg_info[ul_seg - 1].dw_seg_base_va =
- altr->dw_vm_base - altr->ul_dsp_size;
- cmm_info_obj->seg_info[ul_seg - 1].ul_in_use_cnt = 0;
- mnode_obj = (struct cmm_mnode *)
- lst_first(altr->in_use_list_head);
- /* Count inUse blocks */
- while (mnode_obj) {
- cmm_info_obj->ul_total_in_use_cnt++;
- cmm_info_obj->seg_info[ul_seg -
- 1].ul_in_use_cnt++;
- /* next node. */
- mnode_obj = (struct cmm_mnode *)
- lst_next(altr->in_use_list_head,
- (struct list_head *)mnode_obj);
- }
+ if (!altr)
+ continue;
+ cmm_info_obj->num_gppsm_segs++;
+ cmm_info_obj->seg_info[ul_seg - 1].seg_base_pa =
+ altr->shm_base - altr->dsp_size;
+ cmm_info_obj->seg_info[ul_seg - 1].total_seg_size =
+ altr->dsp_size + altr->sm_size;
+ cmm_info_obj->seg_info[ul_seg - 1].gpp_base_pa =
+ altr->shm_base;
+ cmm_info_obj->seg_info[ul_seg - 1].gpp_size =
+ altr->sm_size;
+ cmm_info_obj->seg_info[ul_seg - 1].dsp_base_va =
+ altr->dsp_base;
+ cmm_info_obj->seg_info[ul_seg - 1].dsp_size =
+ altr->dsp_size;
+ cmm_info_obj->seg_info[ul_seg - 1].seg_base_va =
+ altr->vm_base - altr->dsp_size;
+ cmm_info_obj->seg_info[ul_seg - 1].in_use_cnt = 0;
+
+ list_for_each_entry(curr, &altr->in_use_list, link) {
+ cmm_info_obj->total_in_use_cnt++;
+ cmm_info_obj->seg_info[ul_seg - 1].in_use_cnt++;
}
- } /* end for */
+ }
mutex_unlock(&cmm_mgr_obj->cmm_lock);
return status;
}
@@ -544,75 +504,62 @@ int cmm_register_gppsm_seg(struct cmm_object *hcmm_mgr,
DBC_REQUIRE(dw_gpp_base_pa != 0);
DBC_REQUIRE(gpp_base_va != 0);
DBC_REQUIRE((c_factor <= CMM_ADDTODSPPA) &&
- (c_factor >= CMM_SUBFROMDSPPA));
+ (c_factor >= CMM_SUBFROMDSPPA));
+
dev_dbg(bridge, "%s: dw_gpp_base_pa %x ul_size %x dsp_addr_offset %x "
- "dw_dsp_base %x ul_dsp_size %x gpp_base_va %x\n", __func__,
- dw_gpp_base_pa, ul_size, dsp_addr_offset, dw_dsp_base,
- ul_dsp_size, gpp_base_va);
- if (!hcmm_mgr) {
- status = -EFAULT;
- return status;
- }
+ "dw_dsp_base %x ul_dsp_size %x gpp_base_va %x\n",
+ __func__, dw_gpp_base_pa, ul_size, dsp_addr_offset,
+ dw_dsp_base, ul_dsp_size, gpp_base_va);
+
+ if (!hcmm_mgr)
+ return -EFAULT;
+
/* make sure we have room for another allocator */
mutex_lock(&cmm_mgr_obj->cmm_lock);
+
slot_seg = get_slot(cmm_mgr_obj);
if (slot_seg < 0) {
- /* get a slot number */
status = -EPERM;
goto func_end;
}
+
/* Check if input ul_size is big enough to alloc at least one block */
- if (ul_size < cmm_mgr_obj->ul_min_block_size) {
+ if (ul_size < cmm_mgr_obj->min_block_size) {
status = -EINVAL;
goto func_end;
}
/* create, zero, and tag an SM allocator object */
psma = kzalloc(sizeof(struct cmm_allocator), GFP_KERNEL);
- if (psma != NULL) {
- psma->hcmm_mgr = hcmm_mgr; /* ref to parent */
- psma->shm_base = dw_gpp_base_pa; /* SM Base phys */
- psma->ul_sm_size = ul_size; /* SM segment size in bytes */
- psma->dw_vm_base = gpp_base_va;
- psma->dw_dsp_phys_addr_offset = dsp_addr_offset;
- psma->c_factor = c_factor;
- psma->dw_dsp_base = dw_dsp_base;
- psma->ul_dsp_size = ul_dsp_size;
- if (psma->dw_vm_base == 0) {
- status = -EPERM;
- goto func_end;
- }
- /* return the actual segment identifier */
- *sgmt_id = (u32) slot_seg + 1;
- /* create memory free list */
- psma->free_list_head = kzalloc(sizeof(struct lst_list),
- GFP_KERNEL);
- if (psma->free_list_head == NULL) {
- status = -ENOMEM;
- goto func_end;
- }
- INIT_LIST_HEAD(&psma->free_list_head->head);
-
- /* create memory in-use list */
- psma->in_use_list_head = kzalloc(sizeof(struct
- lst_list), GFP_KERNEL);
- if (psma->in_use_list_head == NULL) {
- status = -ENOMEM;
- goto func_end;
- }
- INIT_LIST_HEAD(&psma->in_use_list_head->head);
-
- /* Get a mem node for this hunk-o-memory */
- new_node = get_node(cmm_mgr_obj, dw_gpp_base_pa,
- psma->dw_vm_base, ul_size);
- /* Place node on the SM allocator's free list */
- if (new_node) {
- lst_put_tail(psma->free_list_head,
- (struct list_head *)new_node);
- } else {
- status = -ENOMEM;
- goto func_end;
- }
+ if (!psma) {
+ status = -ENOMEM;
+ goto func_end;
+ }
+
+ psma->cmm_mgr = hcmm_mgr; /* ref to parent */
+ psma->shm_base = dw_gpp_base_pa; /* SM Base phys */
+ psma->sm_size = ul_size; /* SM segment size in bytes */
+ psma->vm_base = gpp_base_va;
+ psma->dsp_phys_addr_offset = dsp_addr_offset;
+ psma->c_factor = c_factor;
+ psma->dsp_base = dw_dsp_base;
+ psma->dsp_size = ul_dsp_size;
+ if (psma->vm_base == 0) {
+ status = -EPERM;
+ goto func_end;
+ }
+ /* return the actual segment identifier */
+ *sgmt_id = (u32) slot_seg + 1;
+
+ INIT_LIST_HEAD(&psma->free_list);
+ INIT_LIST_HEAD(&psma->in_use_list);
+
+ /* Get a mem node for this hunk-o-memory */
+ new_node = get_node(cmm_mgr_obj, dw_gpp_base_pa,
+ psma->vm_base, ul_size);
+ /* Place node on the SM allocator's free list */
+ if (new_node) {
+ list_add_tail(&new_node->link, &psma->free_list);
} else {
status = -ENOMEM;
goto func_end;
@@ -621,12 +568,11 @@ int cmm_register_gppsm_seg(struct cmm_object *hcmm_mgr,
cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = psma;
func_end:
- if (status && psma) {
- /* Cleanup allocator */
+ /* Cleanup allocator */
+ if (status && psma)
un_register_gppsm_seg(psma);
- }
-
mutex_unlock(&cmm_mgr_obj->cmm_lock);
+
return status;
}
@@ -644,36 +590,36 @@ int cmm_un_register_gppsm_seg(struct cmm_object *hcmm_mgr,
u32 ul_id = ul_seg_id;
DBC_REQUIRE(ul_seg_id > 0);
- if (hcmm_mgr) {
- if (ul_seg_id == CMM_ALLSEGMENTS)
- ul_id = 1;
-
- if ((ul_id > 0) && (ul_id <= CMM_MAXGPPSEGS)) {
- while (ul_id <= CMM_MAXGPPSEGS) {
- mutex_lock(&cmm_mgr_obj->cmm_lock);
- /* slot = seg_id-1 */
- psma = cmm_mgr_obj->pa_gppsm_seg_tab[ul_id - 1];
- if (psma != NULL) {
- un_register_gppsm_seg(psma);
- /* Set alctr ptr to NULL for future
- * reuse */
- cmm_mgr_obj->pa_gppsm_seg_tab[ul_id -
- 1] = NULL;
- } else if (ul_seg_id != CMM_ALLSEGMENTS) {
- status = -EPERM;
- }
- mutex_unlock(&cmm_mgr_obj->cmm_lock);
- if (ul_seg_id != CMM_ALLSEGMENTS)
- break;
-
- ul_id++;
- } /* end while */
- } else {
- status = -EINVAL;
+ if (!hcmm_mgr)
+ return -EFAULT;
+
+ if (ul_seg_id == CMM_ALLSEGMENTS)
+ ul_id = 1;
+
+ if ((ul_id <= 0) || (ul_id > CMM_MAXGPPSEGS))
+ return -EINVAL;
+
+ /*
+ * FIXME: CMM_MAXGPPSEGS == 1. why use a while cycle? Seems to me like
+ * the ul_seg_id is not needed here. It must be always 1.
+ */
+ while (ul_id <= CMM_MAXGPPSEGS) {
+ mutex_lock(&cmm_mgr_obj->cmm_lock);
+ /* slot = seg_id-1 */
+ psma = cmm_mgr_obj->pa_gppsm_seg_tab[ul_id - 1];
+ if (psma != NULL) {
+ un_register_gppsm_seg(psma);
+ /* Set alctr ptr to NULL for future reuse */
+ cmm_mgr_obj->pa_gppsm_seg_tab[ul_id - 1] = NULL;
+ } else if (ul_seg_id != CMM_ALLSEGMENTS) {
+ status = -EPERM;
}
- } else {
- status = -EFAULT;
- }
+ mutex_unlock(&cmm_mgr_obj->cmm_lock);
+ if (ul_seg_id != CMM_ALLSEGMENTS)
+ break;
+
+ ul_id++;
+ } /* end while */
return status;
}
@@ -687,43 +633,24 @@ int cmm_un_register_gppsm_seg(struct cmm_object *hcmm_mgr,
*/
static void un_register_gppsm_seg(struct cmm_allocator *psma)
{
- struct cmm_mnode *mnode_obj = NULL;
- struct cmm_mnode *next_node = NULL;
+ struct cmm_mnode *curr, *tmp;
DBC_REQUIRE(psma != NULL);
- if (psma->free_list_head != NULL) {
- /* free nodes on free list */
- mnode_obj = (struct cmm_mnode *)lst_first(psma->free_list_head);
- while (mnode_obj) {
- next_node =
- (struct cmm_mnode *)lst_next(psma->free_list_head,
- (struct list_head *)
- mnode_obj);
- lst_remove_elem(psma->free_list_head,
- (struct list_head *)mnode_obj);
- kfree((void *)mnode_obj);
- /* next node. */
- mnode_obj = next_node;
- }
- kfree(psma->free_list_head); /* delete freelist */
- /* free nodes on InUse list */
- mnode_obj =
- (struct cmm_mnode *)lst_first(psma->in_use_list_head);
- while (mnode_obj) {
- next_node =
- (struct cmm_mnode *)lst_next(psma->in_use_list_head,
- (struct list_head *)
- mnode_obj);
- lst_remove_elem(psma->in_use_list_head,
- (struct list_head *)mnode_obj);
- kfree((void *)mnode_obj);
- /* next node. */
- mnode_obj = next_node;
- }
- kfree(psma->in_use_list_head); /* delete InUse list */
+
+ /* free nodes on free list */
+ list_for_each_entry_safe(curr, tmp, &psma->free_list, link) {
+ list_del(&curr->link);
+ kfree(curr);
}
- if ((void *)psma->dw_vm_base != NULL)
- MEM_UNMAP_LINEAR_ADDRESS((void *)psma->dw_vm_base);
+
+ /* free nodes on InUse list */
+ list_for_each_entry_safe(curr, tmp, &psma->in_use_list, link) {
+ list_del(&curr->link);
+ kfree(curr);
+ }
+
+ if ((void *)psma->vm_base != NULL)
+ MEM_UNMAP_LINEAR_ADDRESS((void *)psma->vm_base);
/* Free allocator itself */
kfree(psma);
@@ -758,26 +685,29 @@ static s32 get_slot(struct cmm_object *cmm_mgr_obj)
static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
u32 dw_va, u32 ul_size)
{
- struct cmm_mnode *pnode = NULL;
+ struct cmm_mnode *pnode;
DBC_REQUIRE(cmm_mgr_obj != NULL);
DBC_REQUIRE(dw_pa != 0);
DBC_REQUIRE(dw_va != 0);
DBC_REQUIRE(ul_size != 0);
+
/* Check cmm mgr's node freelist */
- if (LST_IS_EMPTY(cmm_mgr_obj->node_free_list_head)) {
+ if (list_empty(&cmm_mgr_obj->node_free_list)) {
pnode = kzalloc(sizeof(struct cmm_mnode), GFP_KERNEL);
+ if (!pnode)
+ return NULL;
} else {
/* surely a valid element */
- pnode = (struct cmm_mnode *)
- lst_get_head(cmm_mgr_obj->node_free_list_head);
- }
- if (pnode) {
- lst_init_elem((struct list_head *)pnode); /* set self */
- pnode->dw_pa = dw_pa; /* Physical addr of start of block */
- pnode->dw_va = dw_va; /* Virtual " " */
- pnode->ul_size = ul_size; /* Size of block */
+ pnode = list_first_entry(&cmm_mgr_obj->node_free_list,
+ struct cmm_mnode, link);
+ list_del_init(&pnode->link);
}
+
+ pnode->pa = dw_pa;
+ pnode->va = dw_va;
+ pnode->size = ul_size;
+
return pnode;
}
@@ -790,9 +720,7 @@ static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
static void delete_node(struct cmm_object *cmm_mgr_obj, struct cmm_mnode *pnode)
{
DBC_REQUIRE(pnode != NULL);
- lst_init_elem((struct list_head *)pnode); /* init .self ptr */
- lst_put_tail(cmm_mgr_obj->node_free_list_head,
- (struct list_head *)pnode);
+ list_add_tail(&pnode->link, &cmm_mgr_obj->node_free_list);
}
/*
@@ -804,103 +732,57 @@ static void delete_node(struct cmm_object *cmm_mgr_obj, struct cmm_mnode *pnode)
static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator,
u32 usize)
{
- if (allocator) {
- struct cmm_mnode *mnode_obj = (struct cmm_mnode *)
- lst_first(allocator->free_list_head);
- while (mnode_obj) {
- if (usize <= (u32) mnode_obj->ul_size) {
- lst_remove_elem(allocator->free_list_head,
- (struct list_head *)mnode_obj);
- return mnode_obj;
- }
- /* next node. */
- mnode_obj = (struct cmm_mnode *)
- lst_next(allocator->free_list_head,
- (struct list_head *)mnode_obj);
+ struct cmm_mnode *node, *tmp;
+
+ if (!allocator)
+ return NULL;
+
+ list_for_each_entry_safe(node, tmp, &allocator->free_list, link) {
+ if (usize <= node->size) {
+ list_del(&node->link);
+ return node;
}
}
+
return NULL;
}
/*
* ======== add_to_free_list ========
* Purpose:
- * Coelesce node into the freelist in ascending size order.
+ * Coalesce node into the freelist in ascending size order.
*/
static void add_to_free_list(struct cmm_allocator *allocator,
- struct cmm_mnode *pnode)
+ struct cmm_mnode *node)
{
- struct cmm_mnode *node_prev = NULL;
- struct cmm_mnode *node_next = NULL;
- struct cmm_mnode *mnode_obj;
- u32 dw_this_pa;
- u32 dw_next_pa;
+ struct cmm_mnode *curr;
- DBC_REQUIRE(pnode != NULL);
- DBC_REQUIRE(allocator != NULL);
- dw_this_pa = pnode->dw_pa;
- dw_next_pa = NEXT_PA(pnode);
- mnode_obj = (struct cmm_mnode *)lst_first(allocator->free_list_head);
- while (mnode_obj) {
- if (dw_this_pa == NEXT_PA(mnode_obj)) {
- /* found the block ahead of this one */
- node_prev = mnode_obj;
- } else if (dw_next_pa == mnode_obj->dw_pa) {
- node_next = mnode_obj;
- }
- if ((node_prev == NULL) || (node_next == NULL)) {
- /* next node. */
- mnode_obj = (struct cmm_mnode *)
- lst_next(allocator->free_list_head,
- (struct list_head *)mnode_obj);
- } else {
- /* got 'em */
- break;
- }
- } /* while */
- if (node_prev != NULL) {
- /* combine with previous block */
- lst_remove_elem(allocator->free_list_head,
- (struct list_head *)node_prev);
- /* grow node to hold both */
- pnode->ul_size += node_prev->ul_size;
- pnode->dw_pa = node_prev->dw_pa;
- pnode->dw_va = node_prev->dw_va;
- /* place node on mgr nodeFreeList */
- delete_node((struct cmm_object *)allocator->hcmm_mgr,
- node_prev);
- }
- if (node_next != NULL) {
- /* combine with next block */
- lst_remove_elem(allocator->free_list_head,
- (struct list_head *)node_next);
- /* grow da node */
- pnode->ul_size += node_next->ul_size;
- /* place node on mgr nodeFreeList */
- delete_node((struct cmm_object *)allocator->hcmm_mgr,
- node_next);
+ if (!node) {
+ pr_err("%s: failed - node is NULL\n", __func__);
+ return;
}
- /* Now, let's add to freelist in increasing size order */
- mnode_obj = (struct cmm_mnode *)lst_first(allocator->free_list_head);
- while (mnode_obj) {
- if (pnode->ul_size <= mnode_obj->ul_size)
- break;
- /* next node. */
- mnode_obj =
- (struct cmm_mnode *)lst_next(allocator->free_list_head,
- (struct list_head *)mnode_obj);
+ list_for_each_entry(curr, &allocator->free_list, link) {
+ if (NEXT_PA(curr) == node->pa) {
+ curr->size += node->size;
+ delete_node(allocator->cmm_mgr, node);
+ return;
+ }
+ if (curr->pa == NEXT_PA(node)) {
+ curr->pa = node->pa;
+ curr->va = node->va;
+ curr->size += node->size;
+ delete_node(allocator->cmm_mgr, node);
+ return;
+ }
}
- /* if mnode_obj is NULL then add our pnode to the end of the freelist */
- if (mnode_obj == NULL) {
- lst_put_tail(allocator->free_list_head,
- (struct list_head *)pnode);
- } else {
- /* insert our node before the current traversed node */
- lst_insert_before(allocator->free_list_head,
- (struct list_head *)pnode,
- (struct list_head *)mnode_obj);
+ list_for_each_entry(curr, &allocator->free_list, link) {
+ if (curr->size >= node->size) {
+ list_add_tail(&node->link, &curr->link);
+ return;
+ }
}
+ list_add_tail(&node->link, &allocator->free_list);
}
/*
@@ -912,19 +794,10 @@ static void add_to_free_list(struct cmm_allocator *allocator,
static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj,
u32 ul_seg_id)
{
- struct cmm_allocator *allocator = NULL;
-
DBC_REQUIRE(cmm_mgr_obj != NULL);
DBC_REQUIRE((ul_seg_id > 0) && (ul_seg_id <= CMM_MAXGPPSEGS));
- allocator = cmm_mgr_obj->pa_gppsm_seg_tab[ul_seg_id - 1];
- if (allocator != NULL) {
- /* make sure it's for real */
- if (!allocator) {
- allocator = NULL;
- DBC_ASSERT(false);
- }
- }
- return allocator;
+
+ return cmm_mgr_obj->pa_gppsm_seg_tab[ul_seg_id - 1];
}
/*
@@ -955,9 +828,9 @@ int cmm_xlator_create(struct cmm_xlatorobject **xlator,
xlator_object = kzalloc(sizeof(struct cmm_xlator), GFP_KERNEL);
if (xlator_object != NULL) {
- xlator_object->hcmm_mgr = hcmm_mgr; /* ref back to CMM */
+ xlator_object->cmm_mgr = hcmm_mgr; /* ref back to CMM */
/* SM seg_id */
- xlator_object->ul_seg_id = xlator_attrs->ul_seg_id;
+ xlator_object->seg_id = xlator_attrs->seg_id;
} else {
status = -ENOMEM;
}
@@ -980,17 +853,17 @@ void *cmm_xlator_alloc_buf(struct cmm_xlatorobject *xlator, void *va_buf,
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(xlator != NULL);
- DBC_REQUIRE(xlator_obj->hcmm_mgr != NULL);
+ DBC_REQUIRE(xlator_obj->cmm_mgr != NULL);
DBC_REQUIRE(va_buf != NULL);
DBC_REQUIRE(pa_size > 0);
- DBC_REQUIRE(xlator_obj->ul_seg_id > 0);
+ DBC_REQUIRE(xlator_obj->seg_id > 0);
if (xlator_obj) {
- attrs.ul_seg_id = xlator_obj->ul_seg_id;
+ attrs.seg_id = xlator_obj->seg_id;
__raw_writel(0, va_buf);
/* Alloc SM */
pbuf =
- cmm_calloc_buf(xlator_obj->hcmm_mgr, pa_size, &attrs, NULL);
+ cmm_calloc_buf(xlator_obj->cmm_mgr, pa_size, &attrs, NULL);
if (pbuf) {
/* convert to translator(node/strm) process Virtual
* address */
@@ -1016,14 +889,14 @@ int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator, void *buf_va)
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(buf_va != NULL);
- DBC_REQUIRE(xlator_obj->ul_seg_id > 0);
+ DBC_REQUIRE(xlator_obj->seg_id > 0);
if (xlator_obj) {
/* convert Va to Pa so we can free it. */
buf_pa = cmm_xlator_translate(xlator, buf_va, CMM_VA2PA);
if (buf_pa) {
- status = cmm_free_buf(xlator_obj->hcmm_mgr, buf_pa,
- xlator_obj->ul_seg_id);
+ status = cmm_free_buf(xlator_obj->cmm_mgr, buf_pa,
+ xlator_obj->seg_id);
if (status) {
/* Uh oh, this shouldn't happen. Descriptor
* gone! */
@@ -1052,10 +925,10 @@ int cmm_xlator_info(struct cmm_xlatorobject *xlator, u8 ** paddr,
if (xlator_obj) {
if (set_info) {
/* set translators virtual address range */
- xlator_obj->dw_virt_base = (u32) *paddr;
- xlator_obj->ul_virt_size = ul_size;
+ xlator_obj->virt_base = (u32) *paddr;
+ xlator_obj->virt_size = ul_size;
} else { /* return virt base address */
- *paddr = (u8 *) xlator_obj->dw_virt_base;
+ *paddr = (u8 *) xlator_obj->virt_base;
}
} else {
status = -EFAULT;
@@ -1082,10 +955,10 @@ void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr,
if (!xlator_obj)
goto loop_cont;
- cmm_mgr_obj = (struct cmm_object *)xlator_obj->hcmm_mgr;
+ cmm_mgr_obj = (struct cmm_object *)xlator_obj->cmm_mgr;
/* get this translator's default SM allocator */
- DBC_ASSERT(xlator_obj->ul_seg_id > 0);
- allocator = cmm_mgr_obj->pa_gppsm_seg_tab[xlator_obj->ul_seg_id - 1];
+ DBC_ASSERT(xlator_obj->seg_id > 0);
+ allocator = cmm_mgr_obj->pa_gppsm_seg_tab[xlator_obj->seg_id - 1];
if (!allocator)
goto loop_cont;
@@ -1095,21 +968,21 @@ void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr,
/* Gpp Va = Va Base + offset */
dw_offset = (u8 *) paddr - (u8 *) (allocator->shm_base -
allocator->
- ul_dsp_size);
- dw_addr_xlate = xlator_obj->dw_virt_base + dw_offset;
+ dsp_size);
+ dw_addr_xlate = xlator_obj->virt_base + dw_offset;
/* Check if translated Va base is in range */
- if ((dw_addr_xlate < xlator_obj->dw_virt_base) ||
+ if ((dw_addr_xlate < xlator_obj->virt_base) ||
(dw_addr_xlate >=
- (xlator_obj->dw_virt_base +
- xlator_obj->ul_virt_size))) {
+ (xlator_obj->virt_base +
+ xlator_obj->virt_size))) {
dw_addr_xlate = 0; /* bad address */
}
} else {
/* Gpp PA = Gpp Base + offset */
dw_offset =
- (u8 *) paddr - (u8 *) xlator_obj->dw_virt_base;
+ (u8 *) paddr - (u8 *) xlator_obj->virt_base;
dw_addr_xlate =
- allocator->shm_base - allocator->ul_dsp_size +
+ allocator->shm_base - allocator->dsp_size +
dw_offset;
}
} else {
@@ -1119,16 +992,16 @@ void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr,
if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_PA2DSPPA)) {
/* Got Gpp Pa now, convert to DSP Pa */
dw_addr_xlate =
- GPPPA2DSPPA((allocator->shm_base - allocator->ul_dsp_size),
+ GPPPA2DSPPA((allocator->shm_base - allocator->dsp_size),
dw_addr_xlate,
- allocator->dw_dsp_phys_addr_offset *
+ allocator->dsp_phys_addr_offset *
allocator->c_factor);
} else if (xtype == CMM_DSPPA2PA) {
/* Got DSP Pa, convert to GPP Pa */
dw_addr_xlate =
- DSPPA2GPPPA(allocator->shm_base - allocator->ul_dsp_size,
+ DSPPA2GPPPA(allocator->shm_base - allocator->dsp_size,
dw_addr_xlate,
- allocator->dw_dsp_phys_addr_offset *
+ allocator->dsp_phys_addr_offset *
allocator->c_factor);
}
loop_cont: