aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/tidspbridge/core/tiomap3430.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/tidspbridge/core/tiomap3430.c')
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430.c1083
1 files changed, 975 insertions, 108 deletions
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
index f22bc12bc0d3..1be081f917a7 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -23,7 +23,6 @@
#include <dspbridge/host_os.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
-#include <plat/control.h>
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
@@ -35,6 +34,10 @@
#include <dspbridge/drv.h>
#include <dspbridge/sync.h>
+/* ------------------------------------ Hardware Abstraction Layer */
+#include <hw_defs.h>
+#include <hw_mmu.h>
+
/* ----------------------------------- Link Driver */
#include <dspbridge/dspdefs.h>
#include <dspbridge/dspchnl.h>
@@ -47,6 +50,7 @@
/* ----------------------------------- Platform Manager */
#include <dspbridge/dev.h>
#include <dspbridge/dspapi.h>
+#include <dspbridge/dmm.h>
#include <dspbridge/wdt.h>
/* ----------------------------------- Local */
@@ -67,6 +71,20 @@
#define MMU_SMALL_PAGE_MASK 0xFFFFF000
#define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00
#define PAGES_II_LVL_TABLE 512
+#define PHYS_TO_PAGE(phys) pfn_to_page((phys) >> PAGE_SHIFT)
+
+/*
+ * This is a totally ugly layer violation, but needed until
+ * omap_ctrl_set_dsp_boot*() are provided.
+ */
+#define OMAP3_IVA2_BOOTMOD_IDLE 1
+#define OMAP2_CONTROL_GENERAL 0x270
+#define OMAP343X_CONTROL_IVA2_BOOTADDR (OMAP2_CONTROL_GENERAL + 0x0190)
+#define OMAP343X_CONTROL_IVA2_BOOTMOD (OMAP2_CONTROL_GENERAL + 0x0194)
+
+#define OMAP343X_CTRL_REGADDR(reg) \
+ OMAP2_L4_IO_ADDRESS(OMAP343X_CTRL_BASE + (reg))
+
/* Forward Declarations: */
static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt);
@@ -91,6 +109,12 @@ static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt,
static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
u8 *host_buff, u32 dsp_addr,
u32 ul_num_bytes, u32 mem_type);
+static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
+ u32 ul_mpu_addr, u32 virt_addr,
+ u32 ul_num_bytes, u32 ul_map_attr,
+ struct page **mapped_pages);
+static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
+ u32 virt_addr, u32 ul_num_bytes);
static int bridge_dev_create(struct bridge_dev_context
**dev_cntxt,
struct dev_object *hdev_obj,
@@ -98,8 +122,57 @@ static int bridge_dev_create(struct bridge_dev_context
static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
u32 dw_cmd, void *pargs);
static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt);
+static u32 user_va2_pa(struct mm_struct *mm, u32 address);
+static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
+ u32 va, u32 size,
+ struct hw_mmu_map_attrs_t *map_attrs);
+static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
+ u32 size, struct hw_mmu_map_attrs_t *attrs);
+static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
+ u32 ul_mpu_addr, u32 virt_addr,
+ u32 ul_num_bytes,
+ struct hw_mmu_map_attrs_t *hw_attrs);
+
bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr);
+/* ----------------------------------- Globals */
+
+/* Attributes of L2 page tables for DSP MMU */
+struct page_info {
+ u32 num_entries; /* Number of valid PTEs in the L2 PT */
+};
+
+/* Attributes used to manage the DSP MMU page tables */
+struct pg_table_attrs {
+ spinlock_t pg_lock; /* Critical section object handle */
+
+ u32 l1_base_pa; /* Physical address of the L1 PT */
+ u32 l1_base_va; /* Virtual address of the L1 PT */
+ u32 l1_size; /* Size of the L1 PT */
+ u32 l1_tbl_alloc_pa;
+ /* Physical address of Allocated mem for L1 table. May not be aligned */
+ u32 l1_tbl_alloc_va;
+ /* Virtual address of Allocated mem for L1 table. May not be aligned */
+ u32 l1_tbl_alloc_sz;
+ /* Size of consistent memory allocated for L1 table.
+ * May not be aligned */
+
+ u32 l2_base_pa; /* Physical address of the L2 PT */
+ u32 l2_base_va; /* Virtual address of the L2 PT */
+ u32 l2_size; /* Size of the L2 PT */
+ u32 l2_tbl_alloc_pa;
+ /* Physical address of Allocated mem for L2 table. May not be aligned */
+ u32 l2_tbl_alloc_va;
+ /* Virtual address of Allocated mem for L2 table. May not be aligned */
+ u32 l2_tbl_alloc_sz;
+ /* Size of consistent memory allocated for L2 table.
+ * May not be aligned */
+
+ u32 l2_num_pages; /* Number of allocated L2 PT */
+ /* Array [l2_num_pages] of L2 PT info structs */
+ struct page_info *pg_info;
+};
+
/*
* This Bridge driver's function interface table.
*/
@@ -119,6 +192,8 @@ static struct bridge_drv_interface drv_interface_fxns = {
bridge_brd_set_state,
bridge_brd_mem_copy,
bridge_brd_mem_write,
+ bridge_brd_mem_map,
+ bridge_brd_mem_un_map,
/* The following CHNL functions are provided by chnl_io.lib: */
bridge_chnl_create,
bridge_chnl_destroy,
@@ -148,6 +223,27 @@ static struct bridge_drv_interface drv_interface_fxns = {
bridge_msg_set_queue_id,
};
+static inline void flush_all(struct bridge_dev_context *dev_context)
+{
+ if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION ||
+ dev_context->dw_brd_state == BRD_HIBERNATION)
+ wake_dsp(dev_context, NULL);
+
+ hw_mmu_tlb_flush_all(dev_context->dw_dsp_mmu_base);
+}
+
+static void bad_page_dump(u32 pa, struct page *pg)
+{
+ pr_emerg("DSPBRIDGE: MAP function: COUNT 0 FOR PA 0x%x\n", pa);
+ pr_emerg("Bad page state in process '%s'\n"
+ "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
+ "Backtrace:\n",
+ current->comm, pg, (int)(2 * sizeof(unsigned long)),
+ (unsigned long)pg->flags, pg->mapping,
+ page_mapcount(pg), page_count(pg));
+ dump_stack();
+}
+
/*
* ======== bridge_drv_entry ========
* purpose:
@@ -203,7 +299,8 @@ static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt)
(*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO,
OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
}
-
+ (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
+ OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
dsp_clk_enable(DSP_CLK_IVA2);
/* set the device state to IDLE */
@@ -274,17 +371,14 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
{
int status = 0;
struct bridge_dev_context *dev_context = dev_ctxt;
- struct iommu *mmu = NULL;
- struct shm_segs *sm_sg;
- int l4_i = 0, tlb_i = 0;
- u32 sg0_da = 0, sg1_da = 0;
- struct bridge_ioctl_extproc *tlb = dev_context->atlb_entry;
u32 dw_sync_addr = 0;
u32 ul_shm_base; /* Gpp Phys SM base addr(byte) */
u32 ul_shm_base_virt; /* Dsp Virt SM base addr */
u32 ul_tlb_base_virt; /* Base of MMU TLB entry */
/* Offset of shm_base_virt from tlb_base_virt */
u32 ul_shm_offset_virt;
+ s32 entry_ndx;
+ s32 itmp_entry_ndx = 0; /* DSP-MMU TLB entry base address */
struct cfg_hostres *resources = NULL;
u32 temp;
u32 ul_dsp_clk_rate;
@@ -305,12 +399,12 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
ul_shm_base_virt *= DSPWORDSIZE;
DBC_ASSERT(ul_shm_base_virt != 0);
/* DSP Virtual address */
- ul_tlb_base_virt = dev_context->sh_s.seg0_da;
+ ul_tlb_base_virt = dev_context->atlb_entry[0].ul_dsp_va;
DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
ul_shm_offset_virt =
ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE);
/* Kernel logical address */
- ul_shm_base = dev_context->sh_s.seg0_va + ul_shm_offset_virt;
+ ul_shm_base = dev_context->atlb_entry[0].ul_gpp_va + ul_shm_offset_virt;
DBC_ASSERT(ul_shm_base != 0);
/* 2nd wd is used as sync field */
@@ -345,83 +439,78 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
OMAP343X_CONTROL_IVA2_BOOTMOD));
}
}
-
if (!status) {
+ /* Reset and Unreset the RST2, so that BOOTADDR is copied to
+ * IVA2 SYSC register */
+ (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
+ OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
+ udelay(100);
(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
- mmu = dev_context->dsp_mmu;
- if (mmu)
- dsp_mmu_exit(mmu);
- mmu = dsp_mmu_init();
- if (IS_ERR(mmu)) {
- dev_err(bridge, "dsp_mmu_init failed!\n");
- dev_context->dsp_mmu = NULL;
- status = (int)mmu;
- }
- }
- if (!status) {
- dev_context->dsp_mmu = mmu;
- sm_sg = &dev_context->sh_s;
- sg0_da = iommu_kmap(mmu, sm_sg->seg0_da, sm_sg->seg0_pa,
- sm_sg->seg0_size, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
- if (IS_ERR_VALUE(sg0_da)) {
- status = (int)sg0_da;
- sg0_da = 0;
- }
- }
- if (!status) {
- sg1_da = iommu_kmap(mmu, sm_sg->seg1_da, sm_sg->seg1_pa,
- sm_sg->seg1_size, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
- if (IS_ERR_VALUE(sg1_da)) {
- status = (int)sg1_da;
- sg1_da = 0;
- }
- }
- if (!status) {
- u32 da;
- for (tlb_i = 0; tlb_i < BRDIOCTL_NUMOFMMUTLB; tlb_i++) {
- if (!tlb[tlb_i].ul_gpp_pa)
+ udelay(100);
+
+ /* Disbale the DSP MMU */
+ hw_mmu_disable(resources->dw_dmmu_base);
+ /* Disable TWL */
+ hw_mmu_twl_disable(resources->dw_dmmu_base);
+
+ /* Only make TLB entry if both addresses are non-zero */
+ for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB;
+ entry_ndx++) {
+ struct bridge_ioctl_extproc *e = &dev_context->atlb_entry[entry_ndx];
+ struct hw_mmu_map_attrs_t map_attrs = {
+ .endianism = e->endianism,
+ .element_size = e->elem_size,
+ .mixed_size = e->mixed_mode,
+ };
+
+ if (!e->ul_gpp_pa || !e->ul_dsp_va)
continue;
- dev_dbg(bridge, "IOMMU %d GppPa: 0x%x DspVa 0x%x Size"
- " 0x%x\n", tlb_i, tlb[tlb_i].ul_gpp_pa,
- tlb[tlb_i].ul_dsp_va, tlb[tlb_i].ul_size);
-
- da = iommu_kmap(mmu, tlb[tlb_i].ul_dsp_va,
- tlb[tlb_i].ul_gpp_pa, PAGE_SIZE,
- IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
- if (IS_ERR_VALUE(da)) {
- status = (int)da;
- break;
- }
- }
- }
- if (!status) {
- u32 da;
- l4_i = 0;
- while (l4_peripheral_table[l4_i].phys_addr) {
- da = iommu_kmap(mmu, l4_peripheral_table[l4_i].
- dsp_virt_addr, l4_peripheral_table[l4_i].
- phys_addr, PAGE_SIZE,
- IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
- if (IS_ERR_VALUE(da)) {
- status = (int)da;
- break;
- }
- l4_i++;
+ dev_dbg(bridge,
+ "MMU %d, pa: 0x%x, va: 0x%x, size: 0x%x",
+ itmp_entry_ndx,
+ e->ul_gpp_pa,
+ e->ul_dsp_va,
+ e->ul_size);
+
+ hw_mmu_tlb_add(dev_context->dw_dsp_mmu_base,
+ e->ul_gpp_pa,
+ e->ul_dsp_va,
+ e->ul_size,
+ itmp_entry_ndx,
+ &map_attrs, 1, 1);
+
+ itmp_entry_ndx++;
}
}
/* Lock the above TLB entries and get the BIOS and load monitor timer
* information */
if (!status) {
+ hw_mmu_num_locked_set(resources->dw_dmmu_base, itmp_entry_ndx);
+ hw_mmu_victim_num_set(resources->dw_dmmu_base, itmp_entry_ndx);
+ hw_mmu_ttb_set(resources->dw_dmmu_base,
+ dev_context->pt_attrs->l1_base_pa);
+ hw_mmu_twl_enable(resources->dw_dmmu_base);
+ /* Enable the SmartIdle and AutoIdle bit for MMU_SYSCONFIG */
+
+ temp = __raw_readl((resources->dw_dmmu_base) + 0x10);
+ temp = (temp & 0xFFFFFFEF) | 0x11;
+ __raw_writel(temp, (resources->dw_dmmu_base) + 0x10);
+
+ /* Let the DSP MMU run */
+ hw_mmu_enable(resources->dw_dmmu_base);
+
/* Enable the BIOS clock */
(void)dev_get_symbol(dev_context->hdev_obj,
BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer);
(void)dev_get_symbol(dev_context->hdev_obj,
BRIDGEINIT_LOADMON_GPTIMER,
&ul_load_monitor_timer);
+ }
+ if (!status) {
if (ul_load_monitor_timer != 0xFFFF) {
clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
ul_load_monitor_timer;
@@ -430,7 +519,9 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
dev_dbg(bridge, "Not able to get the symbol for Load "
"Monitor Timer\n");
}
+ }
+ if (!status) {
if (ul_bios_gp_timer != 0xFFFF) {
clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
ul_bios_gp_timer;
@@ -439,7 +530,9 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
dev_dbg(bridge,
"Not able to get the symbol for BIOS Timer\n");
}
+ }
+ if (!status) {
/* Set the DSP clock rate */
(void)dev_get_symbol(dev_context->hdev_obj,
"_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr);
@@ -492,6 +585,9 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
/* Let DSP go */
dev_dbg(bridge, "%s Unreset\n", __func__);
+ /* Enable DSP MMU Interrupts */
+ hw_mmu_event_enable(resources->dw_dmmu_base,
+ HW_MMU_ALL_INTERRUPTS);
/* release the RST1, DSP starts executing now .. */
(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0,
OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
@@ -521,23 +617,11 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
/* update board state */
dev_context->dw_brd_state = BRD_RUNNING;
- return 0;
+ /* (void)chnlsm_enable_interrupt(dev_context); */
} else {
dev_context->dw_brd_state = BRD_UNKNOWN;
}
}
-
- while (tlb_i--) {
- if (!tlb[tlb_i].ul_gpp_pa)
- continue;
- iommu_kunmap(mmu, tlb[tlb_i].ul_gpp_va);
- }
- while (l4_i--)
- iommu_kunmap(mmu, l4_peripheral_table[l4_i].dsp_virt_addr);
- if (sg0_da)
- iommu_kunmap(mmu, sg0_da);
- if (sg1_da)
- iommu_kunmap(mmu, sg1_da);
return status;
}
@@ -553,9 +637,8 @@ static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
{
int status = 0;
struct bridge_dev_context *dev_context = dev_ctxt;
+ struct pg_table_attrs *pt_attrs;
u32 dsp_pwr_state;
- int i;
- struct bridge_ioctl_extproc *tlb = dev_context->atlb_entry;
struct omap_dsp_platform_data *pdata =
omap_dspbridge_dev->dev.platform_data;
@@ -591,37 +674,23 @@ static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
dsp_wdt_enable(false);
- /* Reset DSP */
- (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK,
- OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
-
+ /* This is a good place to clear the MMU page tables as well */
+ if (dev_context->pt_attrs) {
+ pt_attrs = dev_context->pt_attrs;
+ memset((u8 *) pt_attrs->l1_base_va, 0x00, pt_attrs->l1_size);
+ memset((u8 *) pt_attrs->l2_base_va, 0x00, pt_attrs->l2_size);
+ memset((u8 *) pt_attrs->pg_info, 0x00,
+ (pt_attrs->l2_num_pages * sizeof(struct page_info)));
+ }
/* Disable the mailbox interrupts */
if (dev_context->mbox) {
omap_mbox_disable_irq(dev_context->mbox, IRQ_RX);
omap_mbox_put(dev_context->mbox);
dev_context->mbox = NULL;
}
- if (dev_context->dsp_mmu) {
- pr_err("Proc stop mmu if statement\n");
- for (i = 0; i < BRDIOCTL_NUMOFMMUTLB; i++) {
- if (!tlb[i].ul_gpp_pa)
- continue;
- iommu_kunmap(dev_context->dsp_mmu, tlb[i].ul_gpp_va);
- }
- i = 0;
- while (l4_peripheral_table[i].phys_addr) {
- iommu_kunmap(dev_context->dsp_mmu,
- l4_peripheral_table[i].dsp_virt_addr);
- i++;
- }
- iommu_kunmap(dev_context->dsp_mmu, dev_context->sh_s.seg0_da);
- iommu_kunmap(dev_context->dsp_mmu, dev_context->sh_s.seg1_da);
- dsp_mmu_exit(dev_context->dsp_mmu);
- dev_context->dsp_mmu = NULL;
- }
- /* Reset IVA IOMMU*/
- (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
- OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
+ /* Reset IVA2 clocks*/
+ (*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK |
+ OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
dsp_clock_disable_all(dev_context->dsp_per_clks);
dsp_clk_disable(DSP_CLK_IVA2);
@@ -681,6 +750,10 @@ static int bridge_dev_create(struct bridge_dev_context
struct bridge_dev_context *dev_context = NULL;
s32 entry_ndx;
struct cfg_hostres *resources = config_param;
+ struct pg_table_attrs *pt_attrs;
+ u32 pg_tbl_pa;
+ u32 pg_tbl_va;
+ u32 align_size;
struct drv_data *drv_datap = dev_get_drvdata(bridge);
/* Allocate and initialize a data structure to contain the bridge driver
@@ -711,8 +784,97 @@ static int bridge_dev_create(struct bridge_dev_context
if (!dev_context->dw_dsp_base_addr)
status = -EPERM;
+ pt_attrs = kzalloc(sizeof(struct pg_table_attrs), GFP_KERNEL);
+ if (pt_attrs != NULL) {
+ /* Assuming that we use only DSP's memory map
+ * until 0x4000:0000 , we would need only 1024
+ * L1 enties i.e L1 size = 4K */
+ pt_attrs->l1_size = 0x1000;
+ align_size = pt_attrs->l1_size;
+ /* Align sizes are expected to be power of 2 */
+ /* we like to get aligned on L1 table size */
+ pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l1_size,
+ align_size, &pg_tbl_pa);
+
+ /* Check if the PA is aligned for us */
+ if ((pg_tbl_pa) & (align_size - 1)) {
+ /* PA not aligned to page table size ,
+ * try with more allocation and align */
+ mem_free_phys_mem((void *)pg_tbl_va, pg_tbl_pa,
+ pt_attrs->l1_size);
+ /* we like to get aligned on L1 table size */
+ pg_tbl_va =
+ (u32) mem_alloc_phys_mem((pt_attrs->l1_size) * 2,
+ align_size, &pg_tbl_pa);
+ /* We should be able to get aligned table now */
+ pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
+ pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
+ pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size * 2;
+ /* Align the PA to the next 'align' boundary */
+ pt_attrs->l1_base_pa =
+ ((pg_tbl_pa) +
+ (align_size - 1)) & (~(align_size - 1));
+ pt_attrs->l1_base_va =
+ pg_tbl_va + (pt_attrs->l1_base_pa - pg_tbl_pa);
+ } else {
+ /* We got aligned PA, cool */
+ pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
+ pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
+ pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size;
+ pt_attrs->l1_base_pa = pg_tbl_pa;
+ pt_attrs->l1_base_va = pg_tbl_va;
+ }
+ if (pt_attrs->l1_base_va)
+ memset((u8 *) pt_attrs->l1_base_va, 0x00,
+ pt_attrs->l1_size);
+
+ /* number of L2 page tables = DMM pool used + SHMMEM +EXTMEM +
+ * L4 pages */
+ pt_attrs->l2_num_pages = ((DMMPOOLSIZE >> 20) + 6);
+ pt_attrs->l2_size = HW_MMU_COARSE_PAGE_SIZE *
+ pt_attrs->l2_num_pages;
+ align_size = 4; /* Make it u32 aligned */
+ /* we like to get aligned on L1 table size */
+ pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l2_size,
+ align_size, &pg_tbl_pa);
+ pt_attrs->l2_tbl_alloc_pa = pg_tbl_pa;
+ pt_attrs->l2_tbl_alloc_va = pg_tbl_va;
+ pt_attrs->l2_tbl_alloc_sz = pt_attrs->l2_size;
+ pt_attrs->l2_base_pa = pg_tbl_pa;
+ pt_attrs->l2_base_va = pg_tbl_va;
+
+ if (pt_attrs->l2_base_va)
+ memset((u8 *) pt_attrs->l2_base_va, 0x00,
+ pt_attrs->l2_size);
+
+ pt_attrs->pg_info = kzalloc(pt_attrs->l2_num_pages *
+ sizeof(struct page_info), GFP_KERNEL);
+ dev_dbg(bridge,
+ "L1 pa %x, va %x, size %x\n L2 pa %x, va "
+ "%x, size %x\n", pt_attrs->l1_base_pa,
+ pt_attrs->l1_base_va, pt_attrs->l1_size,
+ pt_attrs->l2_base_pa, pt_attrs->l2_base_va,
+ pt_attrs->l2_size);
+ dev_dbg(bridge, "pt_attrs %p L2 NumPages %x pg_info %p\n",
+ pt_attrs, pt_attrs->l2_num_pages, pt_attrs->pg_info);
+ }
+ if ((pt_attrs != NULL) && (pt_attrs->l1_base_va != 0) &&
+ (pt_attrs->l2_base_va != 0) && (pt_attrs->pg_info != NULL))
+ dev_context->pt_attrs = pt_attrs;
+ else
+ status = -ENOMEM;
+
if (!status) {
+ spin_lock_init(&pt_attrs->pg_lock);
dev_context->tc_word_swap_on = drv_datap->tc_wordswapon;
+
+ /* Set the Clock Divisor for the DSP module */
+ udelay(5);
+ /* MMU address is obtained from the host
+ * resources struct */
+ dev_context->dw_dsp_mmu_base = resources->dw_dmmu_base;
+ }
+ if (!status) {
dev_context->hdev_obj = hdev_obj;
/* Store current board state. */
dev_context->dw_brd_state = BRD_UNKNOWN;
@@ -722,6 +884,23 @@ static int bridge_dev_create(struct bridge_dev_context
/* Return ptr to our device state to the DSP API for storage */
*dev_cntxt = dev_context;
} else {
+ if (pt_attrs != NULL) {
+ kfree(pt_attrs->pg_info);
+
+ if (pt_attrs->l2_tbl_alloc_va) {
+ mem_free_phys_mem((void *)
+ pt_attrs->l2_tbl_alloc_va,
+ pt_attrs->l2_tbl_alloc_pa,
+ pt_attrs->l2_tbl_alloc_sz);
+ }
+ if (pt_attrs->l1_tbl_alloc_va) {
+ mem_free_phys_mem((void *)
+ pt_attrs->l1_tbl_alloc_va,
+ pt_attrs->l1_tbl_alloc_pa,
+ pt_attrs->l1_tbl_alloc_sz);
+ }
+ }
+ kfree(pt_attrs);
kfree(dev_context);
}
func_end:
@@ -789,6 +968,7 @@ static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
*/
static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
{
+ struct pg_table_attrs *pt_attrs;
int status = 0;
struct bridge_dev_context *dev_context = (struct bridge_dev_context *)
dev_ctxt;
@@ -802,6 +982,23 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
/* first put the device to stop state */
bridge_brd_stop(dev_context);
+ if (dev_context->pt_attrs) {
+ pt_attrs = dev_context->pt_attrs;
+ kfree(pt_attrs->pg_info);
+
+ if (pt_attrs->l2_tbl_alloc_va) {
+ mem_free_phys_mem((void *)pt_attrs->l2_tbl_alloc_va,
+ pt_attrs->l2_tbl_alloc_pa,
+ pt_attrs->l2_tbl_alloc_sz);
+ }
+ if (pt_attrs->l1_tbl_alloc_va) {
+ mem_free_phys_mem((void *)pt_attrs->l1_tbl_alloc_va,
+ pt_attrs->l1_tbl_alloc_pa,
+ pt_attrs->l1_tbl_alloc_sz);
+ }
+ kfree(pt_attrs);
+
+ }
if (dev_context->resources) {
host_res = dev_context->resources;
@@ -832,6 +1029,8 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
iounmap((void *)host_res->dw_mem_base[3]);
if (host_res->dw_mem_base[4])
iounmap((void *)host_res->dw_mem_base[4]);
+ if (host_res->dw_dmmu_base)
+ iounmap(host_res->dw_dmmu_base);
if (host_res->dw_per_base)
iounmap(host_res->dw_per_base);
if (host_res->dw_per_pm_base)
@@ -845,6 +1044,7 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
host_res->dw_mem_base[2] = (u32) NULL;
host_res->dw_mem_base[3] = (u32) NULL;
host_res->dw_mem_base[4] = (u32) NULL;
+ host_res->dw_dmmu_base = NULL;
host_res->dw_sys_ctrl_base = NULL;
kfree(host_res);
@@ -928,6 +1128,673 @@ static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
}
/*
+ * ======== bridge_brd_mem_map ========
+ * This function maps MPU buffer to the DSP address space. It performs
+ * linear to physical address translation if required. It translates each
+ * page since linear addresses can be physically non-contiguous
+ * All address & size arguments are assumed to be page aligned (in proc.c)
+ *
+ * TODO: Disable MMU while updating the page tables (but that'll stall DSP)
+ */
+static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
+ u32 ul_mpu_addr, u32 virt_addr,
+ u32 ul_num_bytes, u32 ul_map_attr,
+ struct page **mapped_pages)
+{
+ u32 attrs;
+ int status = 0;
+ struct bridge_dev_context *dev_context = dev_ctxt;
+ struct hw_mmu_map_attrs_t hw_attrs;
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+ u32 write = 0;
+ u32 num_usr_pgs = 0;
+ struct page *mapped_page, *pg;
+ s32 pg_num;
+ u32 va = virt_addr;
+ struct task_struct *curr_task = current;
+ u32 pg_i = 0;
+ u32 mpu_addr, pa;
+
+ dev_dbg(bridge,
+ "%s hDevCtxt %p, pa %x, va %x, size %x, ul_map_attr %x\n",
+ __func__, dev_ctxt, ul_mpu_addr, virt_addr, ul_num_bytes,
+ ul_map_attr);
+ if (ul_num_bytes == 0)
+ return -EINVAL;
+
+ if (ul_map_attr & DSP_MAP_DIR_MASK) {
+ attrs = ul_map_attr;
+ } else {
+ /* Assign default attributes */
+ attrs = ul_map_attr | (DSP_MAPVIRTUALADDR | DSP_MAPELEMSIZE16);
+ }
+ /* Take mapping properties */
+ if (attrs & DSP_MAPBIGENDIAN)
+ hw_attrs.endianism = HW_BIG_ENDIAN;
+ else
+ hw_attrs.endianism = HW_LITTLE_ENDIAN;
+
+ hw_attrs.mixed_size = (enum hw_mmu_mixed_size_t)
+ ((attrs & DSP_MAPMIXEDELEMSIZE) >> 2);
+ /* Ignore element_size if mixed_size is enabled */
+ if (hw_attrs.mixed_size == 0) {
+ if (attrs & DSP_MAPELEMSIZE8) {
+ /* Size is 8 bit */
+ hw_attrs.element_size = HW_ELEM_SIZE8BIT;
+ } else if (attrs & DSP_MAPELEMSIZE16) {
+ /* Size is 16 bit */
+ hw_attrs.element_size = HW_ELEM_SIZE16BIT;
+ } else if (attrs & DSP_MAPELEMSIZE32) {
+ /* Size is 32 bit */
+ hw_attrs.element_size = HW_ELEM_SIZE32BIT;
+ } else if (attrs & DSP_MAPELEMSIZE64) {
+ /* Size is 64 bit */
+ hw_attrs.element_size = HW_ELEM_SIZE64BIT;
+ } else {
+ /*
+ * Mixedsize isn't enabled, so size can't be
+ * zero here
+ */
+ return -EINVAL;
+ }
+ }
+ if (attrs & DSP_MAPDONOTLOCK)
+ hw_attrs.donotlockmpupage = 1;
+ else
+ hw_attrs.donotlockmpupage = 0;
+
+ if (attrs & DSP_MAPVMALLOCADDR) {
+ return mem_map_vmalloc(dev_ctxt, ul_mpu_addr, virt_addr,
+ ul_num_bytes, &hw_attrs);
+ }
+ /*
+ * Do OS-specific user-va to pa translation.
+ * Combine physically contiguous regions to reduce TLBs.
+ * Pass the translated pa to pte_update.
+ */
+ if ((attrs & DSP_MAPPHYSICALADDR)) {
+ status = pte_update(dev_context, ul_mpu_addr, virt_addr,
+ ul_num_bytes, &hw_attrs);
+ goto func_cont;
+ }
+
+ /*
+ * Important Note: ul_mpu_addr is mapped from user application process
+ * to current process - it must lie completely within the current
+ * virtual memory address space in order to be of use to us here!
+ */
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, ul_mpu_addr);
+ if (vma)
+ dev_dbg(bridge,
+ "VMAfor UserBuf: ul_mpu_addr=%x, ul_num_bytes=%x, "
+ "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
+ ul_num_bytes, vma->vm_start, vma->vm_end,
+ vma->vm_flags);
+
+ /*
+ * It is observed that under some circumstances, the user buffer is
+ * spread across several VMAs. So loop through and check if the entire
+ * user buffer is covered
+ */
+ while ((vma) && (ul_mpu_addr + ul_num_bytes > vma->vm_end)) {
+ /* jump to the next VMA region */
+ vma = find_vma(mm, vma->vm_end + 1);
+ dev_dbg(bridge,
+ "VMA for UserBuf ul_mpu_addr=%x ul_num_bytes=%x, "
+ "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
+ ul_num_bytes, vma->vm_start, vma->vm_end,
+ vma->vm_flags);
+ }
+ if (!vma) {
+ pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
+ __func__, ul_mpu_addr, ul_num_bytes);
+ status = -EINVAL;
+ up_read(&mm->mmap_sem);
+ goto func_cont;
+ }
+
+ if (vma->vm_flags & VM_IO) {
+ num_usr_pgs = ul_num_bytes / PG_SIZE4K;
+ mpu_addr = ul_mpu_addr;
+
+ /* Get the physical addresses for user buffer */
+ for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
+ pa = user_va2_pa(mm, mpu_addr);
+ if (!pa) {
+ status = -EPERM;
+ pr_err("DSPBRIDGE: VM_IO mapping physical"
+ "address is invalid\n");
+ break;
+ }
+ if (pfn_valid(__phys_to_pfn(pa))) {
+ pg = PHYS_TO_PAGE(pa);
+ get_page(pg);
+ if (page_count(pg) < 1) {
+ pr_err("Bad page in VM_IO buffer\n");
+ bad_page_dump(pa, pg);
+ }
+ }
+ status = pte_set(dev_context->pt_attrs, pa,
+ va, HW_PAGE_SIZE4KB, &hw_attrs);
+ if (status)
+ break;
+
+ va += HW_PAGE_SIZE4KB;
+ mpu_addr += HW_PAGE_SIZE4KB;
+ pa += HW_PAGE_SIZE4KB;
+ }
+ } else {
+ num_usr_pgs = ul_num_bytes / PG_SIZE4K;
+ if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
+ write = 1;
+
+ for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
+ pg_num = get_user_pages(curr_task, mm, ul_mpu_addr, 1,
+ write, 1, &mapped_page, NULL);
+ if (pg_num > 0) {
+ if (page_count(mapped_page) < 1) {
+ pr_err("Bad page count after doing"
+ "get_user_pages on"
+ "user buffer\n");
+ bad_page_dump(page_to_phys(mapped_page),
+ mapped_page);
+ }
+ status = pte_set(dev_context->pt_attrs,
+ page_to_phys(mapped_page), va,
+ HW_PAGE_SIZE4KB, &hw_attrs);
+ if (status)
+ break;
+
+ if (mapped_pages)
+ mapped_pages[pg_i] = mapped_page;
+
+ va += HW_PAGE_SIZE4KB;
+ ul_mpu_addr += HW_PAGE_SIZE4KB;
+ } else {
+ pr_err("DSPBRIDGE: get_user_pages FAILED,"
+ "MPU addr = 0x%x,"
+ "vma->vm_flags = 0x%lx,"
+ "get_user_pages Err"
+ "Value = %d, Buffer"
+ "size=0x%x\n", ul_mpu_addr,
+ vma->vm_flags, pg_num, ul_num_bytes);
+ status = -EPERM;
+ break;
+ }
+ }
+ }
+ up_read(&mm->mmap_sem);
+func_cont:
+ if (status) {
+ /*
+ * Roll out the mapped pages incase it failed in middle of
+ * mapping
+ */
+ if (pg_i) {
+ bridge_brd_mem_un_map(dev_context, virt_addr,
+ (pg_i * PG_SIZE4K));
+ }
+ status = -EPERM;
+ }
+ /*
+ * In any case, flush the TLB
+ * This is called from here instead from pte_update to avoid unnecessary
+ * repetition while mapping non-contiguous physical regions of a virtual
+ * region
+ */
+ flush_all(dev_context);
+ dev_dbg(bridge, "%s status %x\n", __func__, status);
+ return status;
+}
+
+/*
+ * ======== bridge_brd_mem_un_map ========
+ * Invalidate the PTEs for the DSP VA block to be unmapped.
+ *
+ * PTEs of a mapped memory block are contiguous in any page table
+ * So, instead of looking up the PTE address for every 4K block,
+ * we clear consecutive PTEs until we unmap all the bytes
+ */
+static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
+ u32 virt_addr, u32 ul_num_bytes)
+{
+ u32 l1_base_va;
+ u32 l2_base_va;
+ u32 l2_base_pa;
+ u32 l2_page_num;
+ u32 pte_val;
+ u32 pte_size;
+ u32 pte_count;
+ u32 pte_addr_l1;
+ u32 pte_addr_l2 = 0;
+ u32 rem_bytes;
+ u32 rem_bytes_l2;
+ u32 va_curr;
+ struct page *pg = NULL;
+ int status = 0;
+ struct bridge_dev_context *dev_context = dev_ctxt;
+ struct pg_table_attrs *pt = dev_context->pt_attrs;
+ u32 temp;
+ u32 paddr;
+ u32 numof4k_pages = 0;
+
+ va_curr = virt_addr;
+ rem_bytes = ul_num_bytes;
+ rem_bytes_l2 = 0;
+ l1_base_va = pt->l1_base_va;
+ pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
+ dev_dbg(bridge, "%s dev_ctxt %p, va %x, NumBytes %x l1_base_va %x, "
+ "pte_addr_l1 %x\n", __func__, dev_ctxt, virt_addr,
+ ul_num_bytes, l1_base_va, pte_addr_l1);
+
+ while (rem_bytes && !status) {
+ u32 va_curr_orig = va_curr;
+ /* Find whether the L1 PTE points to a valid L2 PT */
+ pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
+ pte_val = *(u32 *) pte_addr_l1;
+ pte_size = hw_mmu_pte_size_l1(pte_val);
+
+ if (pte_size != HW_MMU_COARSE_PAGE_SIZE)
+ goto skip_coarse_page;
+
+ /*
+ * Get the L2 PA from the L1 PTE, and find
+ * corresponding L2 VA
+ */
+ l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
+ l2_base_va = l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
+ l2_page_num =
+ (l2_base_pa - pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
+ /*
+ * Find the L2 PTE address from which we will start
+ * clearing, the number of PTEs to be cleared on this
+ * page, and the size of VA space that needs to be
+ * cleared on this L2 page
+ */
+ pte_addr_l2 = hw_mmu_pte_addr_l2(l2_base_va, va_curr);
+ pte_count = pte_addr_l2 & (HW_MMU_COARSE_PAGE_SIZE - 1);
+ pte_count = (HW_MMU_COARSE_PAGE_SIZE - pte_count) / sizeof(u32);
+ if (rem_bytes < (pte_count * PG_SIZE4K))
+ pte_count = rem_bytes / PG_SIZE4K;
+ rem_bytes_l2 = pte_count * PG_SIZE4K;
+
+ /*
+ * Unmap the VA space on this L2 PT. A quicker way
+ * would be to clear pte_count entries starting from
+ * pte_addr_l2. However, below code checks that we don't
+ * clear invalid entries or less than 64KB for a 64KB
+ * entry. Similar checking is done for L1 PTEs too
+ * below
+ */
+ while (rem_bytes_l2 && !status) {
+ pte_val = *(u32 *) pte_addr_l2;
+ pte_size = hw_mmu_pte_size_l2(pte_val);
+ /* va_curr aligned to pte_size? */
+ if (pte_size == 0 || rem_bytes_l2 < pte_size ||
+ va_curr & (pte_size - 1)) {
+ status = -EPERM;
+ break;
+ }
+
+ /* Collect Physical addresses from VA */
+ paddr = (pte_val & ~(pte_size - 1));
+ if (pte_size == HW_PAGE_SIZE64KB)
+ numof4k_pages = 16;
+ else
+ numof4k_pages = 1;
+ temp = 0;
+ while (temp++ < numof4k_pages) {
+ if (!pfn_valid(__phys_to_pfn(paddr))) {
+ paddr += HW_PAGE_SIZE4KB;
+ continue;
+ }
+ pg = PHYS_TO_PAGE(paddr);
+ if (page_count(pg) < 1) {
+ pr_info("DSPBRIDGE: UNMAP function: "
+ "COUNT 0 FOR PA 0x%x, size = "
+ "0x%x\n", paddr, ul_num_bytes);
+ bad_page_dump(paddr, pg);
+ } else {
+ set_page_dirty(pg);
+ page_cache_release(pg);
+ }
+ paddr += HW_PAGE_SIZE4KB;
+ }
+ if (hw_mmu_pte_clear(pte_addr_l2, va_curr, pte_size)) {
+ status = -EPERM;
+ goto EXIT_LOOP;
+ }
+
+ status = 0;
+ rem_bytes_l2 -= pte_size;
+ va_curr += pte_size;
+ pte_addr_l2 += (pte_size >> 12) * sizeof(u32);
+ }
+ spin_lock(&pt->pg_lock);
+ if (rem_bytes_l2 == 0) {
+ pt->pg_info[l2_page_num].num_entries -= pte_count;
+ if (pt->pg_info[l2_page_num].num_entries == 0) {
+ /*
+ * Clear the L1 PTE pointing to the L2 PT
+ */
+ if (!hw_mmu_pte_clear(l1_base_va, va_curr_orig,
+ HW_MMU_COARSE_PAGE_SIZE))
+ status = 0;
+ else {
+ status = -EPERM;
+ spin_unlock(&pt->pg_lock);
+ goto EXIT_LOOP;
+ }
+ }
+ rem_bytes -= pte_count * PG_SIZE4K;
+ } else
+ status = -EPERM;
+
+ spin_unlock(&pt->pg_lock);
+ continue;
+skip_coarse_page:
+ /* va_curr aligned to pte_size? */
+ /* pte_size = 1 MB or 16 MB */
+ if (pte_size == 0 || rem_bytes < pte_size ||
+ va_curr & (pte_size - 1)) {
+ status = -EPERM;
+ break;
+ }
+
+ if (pte_size == HW_PAGE_SIZE1MB)
+ numof4k_pages = 256;
+ else
+ numof4k_pages = 4096;
+ temp = 0;
+ /* Collect Physical addresses from VA */
+ paddr = (pte_val & ~(pte_size - 1));
+ while (temp++ < numof4k_pages) {
+ if (pfn_valid(__phys_to_pfn(paddr))) {
+ pg = PHYS_TO_PAGE(paddr);
+ if (page_count(pg) < 1) {
+ pr_info("DSPBRIDGE: UNMAP function: "
+ "COUNT 0 FOR PA 0x%x, size = "
+ "0x%x\n", paddr, ul_num_bytes);
+ bad_page_dump(paddr, pg);
+ } else {
+ set_page_dirty(pg);
+ page_cache_release(pg);
+ }
+ }
+ paddr += HW_PAGE_SIZE4KB;
+ }
+ if (!hw_mmu_pte_clear(l1_base_va, va_curr, pte_size)) {
+ status = 0;
+ rem_bytes -= pte_size;
+ va_curr += pte_size;
+ } else {
+ status = -EPERM;
+ goto EXIT_LOOP;
+ }
+ }
+ /*
+ * It is better to flush the TLB here, so that any stale old entries
+ * get flushed
+ */
+EXIT_LOOP:
+ flush_all(dev_context);
+ dev_dbg(bridge,
+ "%s: va_curr %x, pte_addr_l1 %x pte_addr_l2 %x rem_bytes %x,"
+ " rem_bytes_l2 %x status %x\n", __func__, va_curr, pte_addr_l1,
+ pte_addr_l2, rem_bytes, rem_bytes_l2, status);
+ return status;
+}
+
+/*
+ * ======== user_va2_pa ========
+ * Purpose:
+ * This function walks through the page tables to convert a userland
+ * virtual address to physical address
+ */
+static u32 user_va2_pa(struct mm_struct *mm, u32 address)
+{
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *ptep, pte;
+
+ pgd = pgd_offset(mm, address);
+ if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
+ pmd = pmd_offset(pgd, address);
+ if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
+ ptep = pte_offset_map(pmd, address);
+ if (ptep) {
+ pte = *ptep;
+ if (pte_present(pte))
+ return pte & PAGE_MASK;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * ======== pte_update ========
+ * This function calculates the optimum page-aligned addresses and sizes
+ * Caller must pass page-aligned values
+ */
+static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
+ u32 va, u32 size,
+ struct hw_mmu_map_attrs_t *map_attrs)
+{
+ u32 i;
+ u32 all_bits;
+ u32 pa_curr = pa;
+ u32 va_curr = va;
+ u32 num_bytes = size;
+ struct bridge_dev_context *dev_context = dev_ctxt;
+ int status = 0;
+ u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
+ HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
+ };
+
+ while (num_bytes && !status) {
+ /* To find the max. page size with which both PA & VA are
+ * aligned */
+ all_bits = pa_curr | va_curr;
+
+ for (i = 0; i < 4; i++) {
+ if ((num_bytes >= page_size[i]) && ((all_bits &
+ (page_size[i] -
+ 1)) == 0)) {
+ status =
+ pte_set(dev_context->pt_attrs, pa_curr,
+ va_curr, page_size[i], map_attrs);
+ pa_curr += page_size[i];
+ va_curr += page_size[i];
+ num_bytes -= page_size[i];
+ /* Don't try smaller sizes. Hopefully we have
+ * reached an address aligned to a bigger page
+ * size */
+ break;
+ }
+ }
+ }
+
+ return status;
+}
+
+/*
+ * ======== pte_set ========
+ * This function calculates PTE address (MPU virtual) to be updated
+ * It also manages the L2 page tables
+ */
+static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
+ u32 size, struct hw_mmu_map_attrs_t *attrs)
+{
+ u32 i;
+ u32 pte_val;
+ u32 pte_addr_l1;
+ u32 pte_size;
+ /* Base address of the PT that will be updated */
+ u32 pg_tbl_va;
+ u32 l1_base_va;
+ /* Compiler warns that the next three variables might be used
+ * uninitialized in this function. Doesn't seem so. Working around,
+ * anyways. */
+ u32 l2_base_va = 0;
+ u32 l2_base_pa = 0;
+ u32 l2_page_num = 0;
+ int status = 0;
+
+ l1_base_va = pt->l1_base_va;
+ pg_tbl_va = l1_base_va;
+ if ((size == HW_PAGE_SIZE64KB) || (size == HW_PAGE_SIZE4KB)) {
+ /* Find whether the L1 PTE points to a valid L2 PT */
+ pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va);
+ if (pte_addr_l1 <= (pt->l1_base_va + pt->l1_size)) {
+ pte_val = *(u32 *) pte_addr_l1;
+ pte_size = hw_mmu_pte_size_l1(pte_val);
+ } else {
+ return -EPERM;
+ }
+ spin_lock(&pt->pg_lock);
+ if (pte_size == HW_MMU_COARSE_PAGE_SIZE) {
+ /* Get the L2 PA from the L1 PTE, and find
+ * corresponding L2 VA */
+ l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
+ l2_base_va =
+ l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
+ l2_page_num =
+ (l2_base_pa -
+ pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
+ } else if (pte_size == 0) {
+ /* L1 PTE is invalid. Allocate a L2 PT and
+ * point the L1 PTE to it */
+ /* Find a free L2 PT. */
+ for (i = 0; (i < pt->l2_num_pages) &&
+ (pt->pg_info[i].num_entries != 0); i++)
+ ;;
+ if (i < pt->l2_num_pages) {
+ l2_page_num = i;
+ l2_base_pa = pt->l2_base_pa + (l2_page_num *
+ HW_MMU_COARSE_PAGE_SIZE);
+ l2_base_va = pt->l2_base_va + (l2_page_num *
+ HW_MMU_COARSE_PAGE_SIZE);
+ /* Endianness attributes are ignored for
+ * HW_MMU_COARSE_PAGE_SIZE */
+ status =
+ hw_mmu_pte_set(l1_base_va, l2_base_pa, va,
+ HW_MMU_COARSE_PAGE_SIZE,
+ attrs);
+ } else {
+ status = -ENOMEM;
+ }
+ } else {
+ /* Found valid L1 PTE of another size.
+ * Should not overwrite it. */
+ status = -EPERM;
+ }
+ if (!status) {
+ pg_tbl_va = l2_base_va;
+ if (size == HW_PAGE_SIZE64KB)
+ pt->pg_info[l2_page_num].num_entries += 16;
+ else
+ pt->pg_info[l2_page_num].num_entries++;
+ dev_dbg(bridge, "PTE: L2 BaseVa %x, BasePa %x, PageNum "
+ "%x, num_entries %x\n", l2_base_va,
+ l2_base_pa, l2_page_num,
+ pt->pg_info[l2_page_num].num_entries);
+ }
+ spin_unlock(&pt->pg_lock);
+ }
+ if (!status) {
+ dev_dbg(bridge, "PTE: pg_tbl_va %x, pa %x, va %x, size %x\n",
+ pg_tbl_va, pa, va, size);
+ dev_dbg(bridge, "PTE: endianism %x, element_size %x, "
+ "mixed_size %x\n", attrs->endianism,
+ attrs->element_size, attrs->mixed_size);
+ status = hw_mmu_pte_set(pg_tbl_va, pa, va, size, attrs);
+ }
+
+ return status;
+}
+
+/* Memory map kernel VA -- memory allocated with vmalloc */
+static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
+ u32 ul_mpu_addr, u32 virt_addr,
+ u32 ul_num_bytes,
+ struct hw_mmu_map_attrs_t *hw_attrs)
+{
+ int status = 0;
+ struct page *page[1];
+ u32 i;
+ u32 pa_curr;
+ u32 pa_next;
+ u32 va_curr;
+ u32 size_curr;
+ u32 num_pages;
+ u32 pa;
+ u32 num_of4k_pages;
+ u32 temp = 0;
+
+ /*
+ * Do Kernel va to pa translation.
+ * Combine physically contiguous regions to reduce TLBs.
+ * Pass the translated pa to pte_update.
+ */
+ num_pages = ul_num_bytes / PAGE_SIZE; /* PAGE_SIZE = OS page size */
+ i = 0;
+ va_curr = ul_mpu_addr;
+ page[0] = vmalloc_to_page((void *)va_curr);
+ pa_next = page_to_phys(page[0]);
+ while (!status && (i < num_pages)) {
+ /*
+ * Reuse pa_next from the previous iteraion to avoid
+ * an extra va2pa call
+ */
+ pa_curr = pa_next;
+ size_curr = PAGE_SIZE;
+ /*
+ * If the next page is physically contiguous,
+ * map it with the current one by increasing
+ * the size of the region to be mapped
+ */
+ while (++i < num_pages) {
+ page[0] =
+ vmalloc_to_page((void *)(va_curr + size_curr));
+ pa_next = page_to_phys(page[0]);
+
+ if (pa_next == (pa_curr + size_curr))
+ size_curr += PAGE_SIZE;
+ else
+ break;
+
+ }
+ if (pa_next == 0) {
+ status = -ENOMEM;
+ break;
+ }
+ pa = pa_curr;
+ num_of4k_pages = size_curr / HW_PAGE_SIZE4KB;
+ while (temp++ < num_of4k_pages) {
+ get_page(PHYS_TO_PAGE(pa));
+ pa += HW_PAGE_SIZE4KB;
+ }
+ status = pte_update(dev_context, pa_curr, virt_addr +
+ (va_curr - ul_mpu_addr), size_curr,
+ hw_attrs);
+ va_curr += size_curr;
+ }
+ /*
+ * In any case, flush the TLB
+ * This is called from here instead from pte_update to avoid unnecessary
+ * repetition while mapping non-contiguous physical regions of a virtual
+ * region
+ */
+ flush_all(dev_context);
+ dev_dbg(bridge, "%s status %x\n", __func__, status);
+ return status;
+}
+
+/*
* ======== wait_for_start ========
* Wait for the singal from DSP that it has started, or time out.
*/