diff options
Diffstat (limited to 'drivers/staging/tidspbridge/core')
-rw-r--r-- | drivers/staging/tidspbridge/core/_cmm.h | 45 | ||||
-rw-r--r-- | drivers/staging/tidspbridge/core/_deh.h | 35 | ||||
-rw-r--r-- | drivers/staging/tidspbridge/core/_msg_sm.h | 142 | ||||
-rw-r--r-- | drivers/staging/tidspbridge/core/_tiomap.h | 382 | ||||
-rw-r--r-- | drivers/staging/tidspbridge/core/_tiomap_pwr.h | 85 | ||||
-rw-r--r-- | drivers/staging/tidspbridge/core/chnl_sm.c | 907 | ||||
-rw-r--r-- | drivers/staging/tidspbridge/core/dsp-clock.c | 391 | ||||
-rw-r--r-- | drivers/staging/tidspbridge/core/io_sm.c | 2245 | ||||
-rw-r--r-- | drivers/staging/tidspbridge/core/msg_sm.c | 564 | ||||
-rw-r--r-- | drivers/staging/tidspbridge/core/sync.c | 121 | ||||
-rw-r--r-- | drivers/staging/tidspbridge/core/tiomap3430.c | 1813 | ||||
-rw-r--r-- | drivers/staging/tidspbridge/core/tiomap3430_pwr.c | 556 | ||||
-rw-r--r-- | drivers/staging/tidspbridge/core/tiomap_io.c | 438 | ||||
-rw-r--r-- | drivers/staging/tidspbridge/core/tiomap_io.h | 104 | ||||
-rw-r--r-- | drivers/staging/tidspbridge/core/ue_deh.c | 272 | ||||
-rw-r--r-- | drivers/staging/tidspbridge/core/wdt.c | 143 |
16 files changed, 0 insertions, 8243 deletions
diff --git a/drivers/staging/tidspbridge/core/_cmm.h b/drivers/staging/tidspbridge/core/_cmm.h deleted file mode 100644 index 7660bef6ebb3..000000000000 --- a/drivers/staging/tidspbridge/core/_cmm.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * _cmm.h - * - * DSP-BIOS Bridge driver support functions for TI OMAP processors. - * - * Private header file defining CMM manager objects and defines needed - * by IO manager to register shared memory regions when DSP base image - * is loaded(bridge_io_on_loaded). - * - * Copyright (C) 2005-2006 Texas Instruments, Inc. - * - * This package is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED - * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. - */ - -#ifndef _CMM_ -#define _CMM_ - -/* - * These target side symbols define the beginning and ending addresses - * of the section of shared memory used for shared memory manager CMM. - * They are defined in the *cfg.cmd file by cdb code. - */ -#define SHM0_SHARED_BASE_SYM "_SHM0_BEG" -#define SHM0_SHARED_END_SYM "_SHM0_END" -#define SHM0_SHARED_RESERVED_BASE_SYM "_SHM0_RSVDSTRT" - -/* - * Shared Memory Region #0(SHMSEG0) is used in the following way: - * - * |(_SHM0_BEG) | (_SHM0_RSVDSTRT) | (_SHM0_END) - * V V V - * ------------------------------------------------------------ - * | DSP-side allocations | GPP-side allocations | - * ------------------------------------------------------------ - * - * - */ - -#endif /* _CMM_ */ diff --git a/drivers/staging/tidspbridge/core/_deh.h b/drivers/staging/tidspbridge/core/_deh.h deleted file mode 100644 index 025d34320e7e..000000000000 --- a/drivers/staging/tidspbridge/core/_deh.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * _deh.h - * - * DSP-BIOS Bridge driver support functions for TI OMAP processors. - * - * Private header for DEH module. - * - * Copyright (C) 2005-2006 Texas Instruments, Inc. - * Copyright (C) 2010 Felipe Contreras - * - * This package is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED - * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. - */ - -#ifndef _DEH_ -#define _DEH_ - -#include <dspbridge/ntfy.h> -#include <dspbridge/dspdefs.h> - -/* DEH Manager: only one created per board: */ -struct deh_mgr { - struct bridge_dev_context *bridge_context; /* Bridge context. */ - struct ntfy_object *ntfy_obj; /* NTFY object */ - - /* MMU Fault DPC */ - struct tasklet_struct dpc_tasklet; -}; - -#endif /* _DEH_ */ diff --git a/drivers/staging/tidspbridge/core/_msg_sm.h b/drivers/staging/tidspbridge/core/_msg_sm.h deleted file mode 100644 index f6e58e3f3b48..000000000000 --- a/drivers/staging/tidspbridge/core/_msg_sm.h +++ /dev/null @@ -1,142 +0,0 @@ -/* - * _msg_sm.h - * - * DSP-BIOS Bridge driver support functions for TI OMAP processors. - * - * Private header file defining msg_ctrl manager objects and defines needed - * by IO manager. - * - * Copyright (C) 2005-2006 Texas Instruments, Inc. - * - * This package is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED - * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. - */ - -#ifndef _MSG_SM_ -#define _MSG_SM_ - -#include <linux/list.h> -#include <dspbridge/msgdefs.h> - -/* - * These target side symbols define the beginning and ending addresses - * of the section of shared memory used for messages. They are - * defined in the *cfg.cmd file by cdb code. - */ -#define MSG_SHARED_BUFFER_BASE_SYM "_MSG_BEG" -#define MSG_SHARED_BUFFER_LIMIT_SYM "_MSG_END" - -#ifndef _CHNL_WORDSIZE -#define _CHNL_WORDSIZE 4 /* default _CHNL_WORDSIZE is 2 bytes/word */ -#endif - -/* - * ======== msg_ctrl ======== - * There is a control structure for messages to the DSP, and a control - * structure for messages from the DSP. The shared memory region for - * transferring messages is partitioned as follows: - * - * ---------------------------------------------------------- - * |Control | Messages from DSP | Control | Messages to DSP | - * ---------------------------------------------------------- - * - * msg_ctrl control structure for messages to the DSP is used in the following - * way: - * - * buf_empty - This flag is set to FALSE by the GPP after it has output - * messages for the DSP. The DSP host driver sets it to - * TRUE after it has copied the messages. - * post_swi - Set to 1 by the GPP after it has written the messages, - * set the size, and set buf_empty to FALSE. - * The DSP Host driver uses SWI_andn of the post_swi field - * when a host interrupt occurs. The host driver clears - * this after posting the SWI. - * size - Number of messages to be read by the DSP. - * - * For messages from the DSP: - * buf_empty - This flag is set to FALSE by the DSP after it has output - * messages for the GPP. The DPC on the GPP sets it to - * TRUE after it has copied the messages. - * post_swi - Set to 1 the DPC on the GPP after copying the messages. - * size - Number of messages to be read by the GPP. - */ -struct msg_ctrl { - u32 buf_empty; /* to/from DSP buffer is empty */ - u32 post_swi; /* Set to "1" to post msg_ctrl SWI */ - u32 size; /* Number of messages to/from the DSP */ - u32 resvd; -}; - -/* - * ======== msg_mgr ======== - * The msg_mgr maintains a list of all MSG_QUEUEs. Each NODE object can - * have msg_queue to hold all messages that come up from the corresponding - * node on the DSP. The msg_mgr also has a shared queue of messages - * ready to go to the DSP. - */ -struct msg_mgr { - /* The first field must match that in msgobj.h */ - - /* Function interface to Bridge driver */ - struct bridge_drv_interface *intf_fxns; - - struct io_mgr *iomgr; /* IO manager */ - struct list_head queue_list; /* List of MSG_QUEUEs */ - spinlock_t msg_mgr_lock; /* For critical sections */ - /* Signalled when MsgFrame is available */ - struct sync_object *sync_event; - struct list_head msg_free_list; /* Free MsgFrames ready to be filled */ - struct list_head msg_used_list; /* MsgFrames ready to go to DSP */ - u32 msgs_pending; /* # of queued messages to go to DSP */ - u32 max_msgs; /* Max # of msgs that fit in buffer */ - msg_onexit on_exit; /* called when RMS_EXIT is received */ -}; - -/* - * ======== msg_queue ======== - * Each NODE has a msg_queue for receiving messages from the - * corresponding node on the DSP. The msg_queue object maintains a list - * of messages that have been sent to the host, but not yet read (MSG_Get), - * and a list of free frames that can be filled when new messages arrive - * from the DSP. - * The msg_queue's hSynEvent gets posted when a message is ready. - */ -struct msg_queue { - struct list_head list_elem; - struct msg_mgr *msg_mgr; - u32 max_msgs; /* Node message depth */ - u32 msgq_id; /* Node environment pointer */ - struct list_head msg_free_list; /* Free MsgFrames ready to be filled */ - /* Filled MsgFramess waiting to be read */ - struct list_head msg_used_list; - void *arg; /* Handle passed to mgr on_exit callback */ - struct sync_object *sync_event; /* Signalled when message is ready */ - struct sync_object *sync_done; /* For synchronizing cleanup */ - struct sync_object *sync_done_ack; /* For synchronizing cleanup */ - struct ntfy_object *ntfy_obj; /* For notification of message ready */ - bool done; /* TRUE <==> deleting the object */ - u32 io_msg_pend; /* Number of pending MSG_get/put calls */ -}; - -/* - * ======== msg_dspmsg ======== - */ -struct msg_dspmsg { - struct dsp_msg msg; - u32 msgq_id; /* Identifies the node the message goes to */ -}; - -/* - * ======== msg_frame ======== - */ -struct msg_frame { - struct list_head list_elem; - struct msg_dspmsg msg_data; -}; - -#endif /* _MSG_SM_ */ diff --git a/drivers/staging/tidspbridge/core/_tiomap.h b/drivers/staging/tidspbridge/core/_tiomap.h deleted file mode 100644 index 65971b784b78..000000000000 --- a/drivers/staging/tidspbridge/core/_tiomap.h +++ /dev/null @@ -1,382 +0,0 @@ -/* - * _tiomap.h - * - * DSP-BIOS Bridge driver support functions for TI OMAP processors. - * - * Definitions and types private to this Bridge driver. - * - * Copyright (C) 2005-2006 Texas Instruments, Inc. - * - * This package is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED - * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. - */ - -#ifndef _TIOMAP_ -#define _TIOMAP_ - -/* - * XXX These powerdomain.h/clockdomain.h includes are wrong and should - * be removed. No driver should call pwrdm_* or clkdm_* functions - * directly; they should rely on OMAP core code to do this. - */ -#include <mach-omap2/powerdomain.h> -#include <mach-omap2/clockdomain.h> -/* - * XXX These mach-omap2/ includes are wrong and should be removed. No - * driver should read or write to PRM/CM registers directly; they - * should rely on OMAP core code to do this. - */ -#include <mach-omap2/cm3xxx.h> -#include <mach-omap2/prm-regbits-34xx.h> -#include <mach-omap2/cm-regbits-34xx.h> -#include <dspbridge/devdefs.h> -#include <hw_defs.h> -#include <dspbridge/dspioctl.h> /* for bridge_ioctl_extproc defn */ -#include <dspbridge/sync.h> -#include <dspbridge/clk.h> - -struct map_l4_peripheral { - u32 phys_addr; - u32 dsp_virt_addr; -}; - -#define ARM_MAILBOX_START 0xfffcf000 -#define ARM_MAILBOX_LENGTH 0x800 - -/* New Registers in OMAP3.1 */ - -#define TESTBLOCK_ID_START 0xfffed400 -#define TESTBLOCK_ID_LENGTH 0xff - -/* ID Returned by OMAP1510 */ -#define TBC_ID_VALUE 0xB47002F - -#define SPACE_LENGTH 0x2000 -#define API_CLKM_DPLL_DMA 0xfffec000 -#define ARM_INTERRUPT_OFFSET 0xb00 - -#define BIOS24XX - -#define L4_PERIPHERAL_NULL 0x0 -#define DSPVA_PERIPHERAL_NULL 0x0 - -#define MAX_LOCK_TLB_ENTRIES 15 - -#define L4_PERIPHERAL_PRM 0x48306000 /*PRM L4 Peripheral */ -#define DSPVA_PERIPHERAL_PRM 0x1181e000 -#define L4_PERIPHERAL_SCM 0x48002000 /*SCM L4 Peripheral */ -#define DSPVA_PERIPHERAL_SCM 0x1181f000 -#define L4_PERIPHERAL_MMU 0x5D000000 /*MMU L4 Peripheral */ -#define DSPVA_PERIPHERAL_MMU 0x11820000 -#define L4_PERIPHERAL_CM 0x48004000 /* Core L4, Clock Management */ -#define DSPVA_PERIPHERAL_CM 0x1181c000 -#define L4_PERIPHERAL_PER 0x48005000 /* PER */ -#define DSPVA_PERIPHERAL_PER 0x1181d000 - -#define L4_PERIPHERAL_GPIO1 0x48310000 -#define DSPVA_PERIPHERAL_GPIO1 0x11809000 -#define L4_PERIPHERAL_GPIO2 0x49050000 -#define DSPVA_PERIPHERAL_GPIO2 0x1180a000 -#define L4_PERIPHERAL_GPIO3 0x49052000 -#define DSPVA_PERIPHERAL_GPIO3 0x1180b000 -#define L4_PERIPHERAL_GPIO4 0x49054000 -#define DSPVA_PERIPHERAL_GPIO4 0x1180c000 -#define L4_PERIPHERAL_GPIO5 0x49056000 -#define DSPVA_PERIPHERAL_GPIO5 0x1180d000 - -#define L4_PERIPHERAL_IVA2WDT 0x49030000 -#define DSPVA_PERIPHERAL_IVA2WDT 0x1180e000 - -#define L4_PERIPHERAL_DISPLAY 0x48050000 -#define DSPVA_PERIPHERAL_DISPLAY 0x1180f000 - -#define L4_PERIPHERAL_SSI 0x48058000 -#define DSPVA_PERIPHERAL_SSI 0x11804000 -#define L4_PERIPHERAL_GDD 0x48059000 -#define DSPVA_PERIPHERAL_GDD 0x11805000 -#define L4_PERIPHERAL_SS1 0x4805a000 -#define DSPVA_PERIPHERAL_SS1 0x11806000 -#define L4_PERIPHERAL_SS2 0x4805b000 -#define DSPVA_PERIPHERAL_SS2 0x11807000 - -#define L4_PERIPHERAL_CAMERA 0x480BC000 -#define DSPVA_PERIPHERAL_CAMERA 0x11819000 - -#define L4_PERIPHERAL_SDMA 0x48056000 -#define DSPVA_PERIPHERAL_SDMA 0x11810000 /* 0x1181d000 conflict w/ PER */ - -#define L4_PERIPHERAL_UART1 0x4806a000 -#define DSPVA_PERIPHERAL_UART1 0x11811000 -#define L4_PERIPHERAL_UART2 0x4806c000 -#define DSPVA_PERIPHERAL_UART2 0x11812000 -#define L4_PERIPHERAL_UART3 0x49020000 -#define DSPVA_PERIPHERAL_UART3 0x11813000 - -#define L4_PERIPHERAL_MCBSP1 0x48074000 -#define DSPVA_PERIPHERAL_MCBSP1 0x11814000 -#define L4_PERIPHERAL_MCBSP2 0x49022000 -#define DSPVA_PERIPHERAL_MCBSP2 0x11815000 -#define L4_PERIPHERAL_MCBSP3 0x49024000 -#define DSPVA_PERIPHERAL_MCBSP3 0x11816000 -#define L4_PERIPHERAL_MCBSP4 0x49026000 -#define DSPVA_PERIPHERAL_MCBSP4 0x11817000 -#define L4_PERIPHERAL_MCBSP5 0x48096000 -#define DSPVA_PERIPHERAL_MCBSP5 0x11818000 - -#define L4_PERIPHERAL_GPTIMER5 0x49038000 -#define DSPVA_PERIPHERAL_GPTIMER5 0x11800000 -#define L4_PERIPHERAL_GPTIMER6 0x4903a000 -#define DSPVA_PERIPHERAL_GPTIMER6 0x11801000 -#define L4_PERIPHERAL_GPTIMER7 0x4903c000 -#define DSPVA_PERIPHERAL_GPTIMER7 0x11802000 -#define L4_PERIPHERAL_GPTIMER8 0x4903e000 -#define DSPVA_PERIPHERAL_GPTIMER8 0x11803000 - -#define L4_PERIPHERAL_SPI1 0x48098000 -#define DSPVA_PERIPHERAL_SPI1 0x1181a000 -#define L4_PERIPHERAL_SPI2 0x4809a000 -#define DSPVA_PERIPHERAL_SPI2 0x1181b000 - -#define L4_PERIPHERAL_MBOX 0x48094000 -#define DSPVA_PERIPHERAL_MBOX 0x11808000 - -#define PM_GRPSEL_BASE 0x48307000 -#define DSPVA_GRPSEL_BASE 0x11821000 - -#define L4_PERIPHERAL_SIDETONE_MCBSP2 0x49028000 -#define DSPVA_PERIPHERAL_SIDETONE_MCBSP2 0x11824000 -#define L4_PERIPHERAL_SIDETONE_MCBSP3 0x4902a000 -#define DSPVA_PERIPHERAL_SIDETONE_MCBSP3 0x11825000 - -/* define a static array with L4 mappings */ -static const struct map_l4_peripheral l4_peripheral_table[] = { - {L4_PERIPHERAL_MBOX, DSPVA_PERIPHERAL_MBOX}, - {L4_PERIPHERAL_SCM, DSPVA_PERIPHERAL_SCM}, - {L4_PERIPHERAL_MMU, DSPVA_PERIPHERAL_MMU}, - {L4_PERIPHERAL_GPTIMER5, DSPVA_PERIPHERAL_GPTIMER5}, - {L4_PERIPHERAL_GPTIMER6, DSPVA_PERIPHERAL_GPTIMER6}, - {L4_PERIPHERAL_GPTIMER7, DSPVA_PERIPHERAL_GPTIMER7}, - {L4_PERIPHERAL_GPTIMER8, DSPVA_PERIPHERAL_GPTIMER8}, - {L4_PERIPHERAL_GPIO1, DSPVA_PERIPHERAL_GPIO1}, - {L4_PERIPHERAL_GPIO2, DSPVA_PERIPHERAL_GPIO2}, - {L4_PERIPHERAL_GPIO3, DSPVA_PERIPHERAL_GPIO3}, - {L4_PERIPHERAL_GPIO4, DSPVA_PERIPHERAL_GPIO4}, - {L4_PERIPHERAL_GPIO5, DSPVA_PERIPHERAL_GPIO5}, - {L4_PERIPHERAL_IVA2WDT, DSPVA_PERIPHERAL_IVA2WDT}, - {L4_PERIPHERAL_DISPLAY, DSPVA_PERIPHERAL_DISPLAY}, - {L4_PERIPHERAL_SSI, DSPVA_PERIPHERAL_SSI}, - {L4_PERIPHERAL_GDD, DSPVA_PERIPHERAL_GDD}, - {L4_PERIPHERAL_SS1, DSPVA_PERIPHERAL_SS1}, - {L4_PERIPHERAL_SS2, DSPVA_PERIPHERAL_SS2}, - {L4_PERIPHERAL_UART1, DSPVA_PERIPHERAL_UART1}, - {L4_PERIPHERAL_UART2, DSPVA_PERIPHERAL_UART2}, - {L4_PERIPHERAL_UART3, DSPVA_PERIPHERAL_UART3}, - {L4_PERIPHERAL_MCBSP1, DSPVA_PERIPHERAL_MCBSP1}, - {L4_PERIPHERAL_MCBSP2, DSPVA_PERIPHERAL_MCBSP2}, - {L4_PERIPHERAL_MCBSP3, DSPVA_PERIPHERAL_MCBSP3}, - {L4_PERIPHERAL_MCBSP4, DSPVA_PERIPHERAL_MCBSP4}, - {L4_PERIPHERAL_MCBSP5, DSPVA_PERIPHERAL_MCBSP5}, - {L4_PERIPHERAL_CAMERA, DSPVA_PERIPHERAL_CAMERA}, - {L4_PERIPHERAL_SPI1, DSPVA_PERIPHERAL_SPI1}, - {L4_PERIPHERAL_SPI2, DSPVA_PERIPHERAL_SPI2}, - {L4_PERIPHERAL_PRM, DSPVA_PERIPHERAL_PRM}, - {L4_PERIPHERAL_CM, DSPVA_PERIPHERAL_CM}, - {L4_PERIPHERAL_PER, DSPVA_PERIPHERAL_PER}, - {PM_GRPSEL_BASE, DSPVA_GRPSEL_BASE}, - {L4_PERIPHERAL_SIDETONE_MCBSP2, DSPVA_PERIPHERAL_SIDETONE_MCBSP2}, - {L4_PERIPHERAL_SIDETONE_MCBSP3, DSPVA_PERIPHERAL_SIDETONE_MCBSP3}, - {L4_PERIPHERAL_NULL, DSPVA_PERIPHERAL_NULL} -}; - -/* - * 15 10 0 - * --------------------------------- - * |0|0|1|0|0|0|c|c|c|i|i|i|i|i|i|i| - * --------------------------------- - * | (class) | (module specific) | - * - * where c -> Externel Clock Command: Clk & Autoidle Disable/Enable - * i -> External Clock ID Timers 5,6,7,8, McBSP1,2 and WDT3 - */ - -/* MBX_PM_CLK_IDMASK: DSP External clock id mask. */ -#define MBX_PM_CLK_IDMASK 0x7F - -/* MBX_PM_CLK_CMDSHIFT: DSP External clock command shift. */ -#define MBX_PM_CLK_CMDSHIFT 7 - -/* MBX_PM_CLK_CMDMASK: DSP External clock command mask. */ -#define MBX_PM_CLK_CMDMASK 7 - -/* MBX_PM_MAX_RESOURCES: CORE 1 Clock resources. */ -#define MBX_CORE1_RESOURCES 7 - -/* MBX_PM_MAX_RESOURCES: CORE 2 Clock Resources. */ -#define MBX_CORE2_RESOURCES 1 - -/* MBX_PM_MAX_RESOURCES: TOTAL Clock Resources. */ -#define MBX_PM_MAX_RESOURCES 11 - -/* Power Management Commands */ -#define BPWR_DISABLE_CLOCK 0 -#define BPWR_ENABLE_CLOCK 1 - -/* OMAP242x specific resources */ -enum bpwr_ext_clock_id { - BPWR_GP_TIMER5 = 0x10, - BPWR_GP_TIMER6, - BPWR_GP_TIMER7, - BPWR_GP_TIMER8, - BPWR_WD_TIMER3, - BPWR_MCBSP1, - BPWR_MCBSP2, - BPWR_MCBSP3, - BPWR_MCBSP4, - BPWR_MCBSP5, - BPWR_SSI = 0x20 -}; - -static const u32 bpwr_clkid[] = { - (u32) BPWR_GP_TIMER5, - (u32) BPWR_GP_TIMER6, - (u32) BPWR_GP_TIMER7, - (u32) BPWR_GP_TIMER8, - (u32) BPWR_WD_TIMER3, - (u32) BPWR_MCBSP1, - (u32) BPWR_MCBSP2, - (u32) BPWR_MCBSP3, - (u32) BPWR_MCBSP4, - (u32) BPWR_MCBSP5, - (u32) BPWR_SSI -}; - -struct bpwr_clk_t { - u32 clk_id; - enum dsp_clk_id clk; -}; - -static const struct bpwr_clk_t bpwr_clks[] = { - {(u32) BPWR_GP_TIMER5, DSP_CLK_GPT5}, - {(u32) BPWR_GP_TIMER6, DSP_CLK_GPT6}, - {(u32) BPWR_GP_TIMER7, DSP_CLK_GPT7}, - {(u32) BPWR_GP_TIMER8, DSP_CLK_GPT8}, - {(u32) BPWR_WD_TIMER3, DSP_CLK_WDT3}, - {(u32) BPWR_MCBSP1, DSP_CLK_MCBSP1}, - {(u32) BPWR_MCBSP2, DSP_CLK_MCBSP2}, - {(u32) BPWR_MCBSP3, DSP_CLK_MCBSP3}, - {(u32) BPWR_MCBSP4, DSP_CLK_MCBSP4}, - {(u32) BPWR_MCBSP5, DSP_CLK_MCBSP5}, - {(u32) BPWR_SSI, DSP_CLK_SSI} -}; - -/* Interrupt Register Offsets */ -#define INTH_IT_REG_OFFSET 0x00 /* Interrupt register offset */ -#define INTH_MASK_IT_REG_OFFSET 0x04 /* Mask Interrupt reg offset */ - -#define DSP_MAILBOX1_INT 10 -/* - * Bit definition of Interrupt Level Registers - */ - -/* Mail Box defines */ -#define MB_ARM2DSP1_REG_OFFSET 0x00 - -#define MB_ARM2DSP1B_REG_OFFSET 0x04 - -#define MB_DSP2ARM1B_REG_OFFSET 0x0C - -#define MB_ARM2DSP1_FLAG_REG_OFFSET 0x18 - -#define MB_ARM2DSP_FLAG 0x0001 - -#define MBOX_ARM2DSP HW_MBOX_ID0 -#define MBOX_DSP2ARM HW_MBOX_ID1 -#define MBOX_ARM HW_MBOX_U0_ARM -#define MBOX_DSP HW_MBOX_U1_DSP1 - -#define ENABLE true -#define DISABLE false - -#define HIGH_LEVEL true -#define LOW_LEVEL false - -/* Macro's */ -#define CLEAR_BIT(reg, mask) (reg &= ~mask) -#define SET_BIT(reg, mask) (reg |= mask) - -#define SET_GROUP_BITS16(reg, position, width, value) \ - do {\ - reg &= ~((0xFFFF >> (16 - (width))) << (position)); \ - reg |= ((value & (0xFFFF >> (16 - (width)))) << (position)); \ - } while (0); - -#define CLEAR_BIT_INDEX(reg, index) (reg &= ~(1 << (index))) - -/* This Bridge driver's device context: */ -struct bridge_dev_context { - struct dev_object *dev_obj; /* Handle to Bridge device object. */ - u32 dsp_base_addr; /* Arm's API to DSP virt base addr */ - /* - * DSP External memory prog address as seen virtually by the OS on - * the host side. - */ - u32 dsp_ext_base_addr; /* See the comment above */ - u32 api_reg_base; /* API mem map'd registers */ - void __iomem *dsp_mmu_base; /* DSP MMU Mapped registers */ - u32 api_clk_base; /* CLK Registers */ - u32 dsp_clk_m2_base; /* DSP Clock Module m2 */ - u32 public_rhea; /* Pub Rhea */ - u32 int_addr; /* MB INTR reg */ - u32 tc_endianism; /* TC Endianism register */ - u32 test_base; /* DSP MMU Mapped registers */ - u32 self_loop; /* Pointer to the selfloop */ - u32 dsp_start_add; /* API Boot vector */ - u32 internal_size; /* Internal memory size */ - - struct omap_mbox *mbox; /* Mail box handle */ - - struct cfg_hostres *resources; /* Host Resources */ - - /* - * Processor specific info is set when prog loaded and read from DCD. - * [See bridge_dev_ctrl()] PROC info contains DSP-MMU TLB entries. - */ - /* DMMU TLB entries */ - struct bridge_ioctl_extproc atlb_entry[BRDIOCTL_NUMOFMMUTLB]; - u32 brd_state; /* Last known board state. */ - - /* TC Settings */ - bool tc_word_swap_on; /* Traffic Controller Word Swap */ - struct pg_table_attrs *pt_attrs; - u32 dsp_per_clks; -}; - -/* - * If dsp_debug is true, do not branch to the DSP entry - * point and wait for DSP to boot. - */ -extern s32 dsp_debug; - -/* - * ======== sm_interrupt_dsp ======== - * Purpose: - * Set interrupt value & send an interrupt to the DSP processor(s). - * This is typically used when mailbox interrupt mechanisms allow data - * to be associated with interrupt such as for OMAP's CMD/DATA regs. - * Parameters: - * dev_context: Handle to Bridge driver defined device info. - * mb_val: Value associated with interrupt(e.g. mailbox value). - * Returns: - * 0: Interrupt sent; - * else: Unable to send interrupt. - * Requires: - * Ensures: - */ -int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val); - -#endif /* _TIOMAP_ */ diff --git a/drivers/staging/tidspbridge/core/_tiomap_pwr.h b/drivers/staging/tidspbridge/core/_tiomap_pwr.h deleted file mode 100644 index 7bbd3802c15f..000000000000 --- a/drivers/staging/tidspbridge/core/_tiomap_pwr.h +++ /dev/null @@ -1,85 +0,0 @@ -/* - * _tiomap_pwr.h - * - * DSP-BIOS Bridge driver support functions for TI OMAP processors. - * - * Definitions and types for the DSP wake/sleep routines. - * - * Copyright (C) 2005-2006 Texas Instruments, Inc. - * - * This package is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED - * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. - */ - -#ifndef _TIOMAP_PWR_ -#define _TIOMAP_PWR_ - -#ifdef CONFIG_PM -extern s32 dsp_test_sleepstate; -#endif - -extern struct mailbox_context mboxsetting; - -/* - * ======== wake_dsp ========= - * Wakes up the DSP from DeepSleep - */ -extern int wake_dsp(struct bridge_dev_context *dev_context, - void *pargs); - -/* - * ======== sleep_dsp ========= - * Places the DSP in DeepSleep. - */ -extern int sleep_dsp(struct bridge_dev_context *dev_context, - u32 dw_cmd, void *pargs); -/* - * ========interrupt_dsp======== - * Sends an interrupt to DSP unconditionally. - */ -extern void interrupt_dsp(struct bridge_dev_context *dev_context, - u16 mb_val); - -/* - * ======== wake_dsp ========= - * Wakes up the DSP from DeepSleep - */ -extern int dsp_peripheral_clk_ctrl(struct bridge_dev_context - *dev_context, void *pargs); -/* - * ======== handle_hibernation_from_dsp ======== - * Handle Hibernation requested from DSP - */ -int handle_hibernation_from_dsp(struct bridge_dev_context *dev_context); -/* - * ======== post_scale_dsp ======== - * Handle Post Scale notification to DSP - */ -int post_scale_dsp(struct bridge_dev_context *dev_context, - void *pargs); -/* - * ======== pre_scale_dsp ======== - * Handle Pre Scale notification to DSP - */ -int pre_scale_dsp(struct bridge_dev_context *dev_context, - void *pargs); -/* - * ======== handle_constraints_set ======== - * Handle constraints request from DSP - */ -int handle_constraints_set(struct bridge_dev_context *dev_context, - void *pargs); - -/* - * ======== dsp_clk_wakeup_event_ctrl ======== - * This function sets the group selction bits for while - * enabling/disabling. - */ -void dsp_clk_wakeup_event_ctrl(u32 clock_id, bool enable); - -#endif /* _TIOMAP_PWR_ */ diff --git a/drivers/staging/tidspbridge/core/chnl_sm.c b/drivers/staging/tidspbridge/core/chnl_sm.c deleted file mode 100644 index 16fa3462fbbe..000000000000 --- a/drivers/staging/tidspbridge/core/chnl_sm.c +++ /dev/null @@ -1,907 +0,0 @@ -/* - * chnl_sm.c - * - * DSP-BIOS Bridge driver support functions for TI OMAP processors. - * - * Implements upper edge functions for Bridge driver channel module. - * - * Copyright (C) 2005-2006 Texas Instruments, Inc. - * - * This package is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED - * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. - */ - -/* - * The lower edge functions must be implemented by the Bridge driver - * writer, and are declared in chnl_sm.h. - * - * Care is taken in this code to prevent simultaneous access to channel - * queues from - * 1. Threads. - * 2. io_dpc(), scheduled from the io_isr() as an event. - * - * This is done primarily by: - * - Semaphores. - * - state flags in the channel object; and - * - ensuring the IO_Dispatch() routine, which is called from both - * CHNL_AddIOReq() and the DPC(if implemented), is not re-entered. - * - * Channel Invariant: - * There is an important invariant condition which must be maintained per - * channel outside of bridge_chnl_get_ioc() and IO_Dispatch(), violation of - * which may cause timeouts and/or failure of function sync_wait_on_event. - * This invariant condition is: - * - * list_empty(&pchnl->io_completions) ==> pchnl->sync_event is reset - * and - * !list_empty(&pchnl->io_completions) ==> pchnl->sync_event is set. - */ - -#include <linux/types.h> - -/* ----------------------------------- OS */ -#include <dspbridge/host_os.h> - -/* ----------------------------------- DSP/BIOS Bridge */ -#include <dspbridge/dbdefs.h> - -/* ----------------------------------- OS Adaptation Layer */ -#include <dspbridge/sync.h> - -/* ----------------------------------- Bridge Driver */ -#include <dspbridge/dspdefs.h> -#include <dspbridge/dspchnl.h> -#include "_tiomap.h" - -/* ----------------------------------- Platform Manager */ -#include <dspbridge/dev.h> - -/* ----------------------------------- Others */ -#include <dspbridge/io_sm.h> - -/* ----------------------------------- Define for This */ -#define USERMODE_ADDR PAGE_OFFSET - -#define MAILBOX_IRQ INT_MAIL_MPU_IRQ - -/* ----------------------------------- Function Prototypes */ -static int create_chirp_list(struct list_head *list, u32 chirps); - -static void free_chirp_list(struct list_head *list); - -static int search_free_channel(struct chnl_mgr *chnl_mgr_obj, - u32 *chnl); - -/* - * ======== bridge_chnl_add_io_req ======== - * Enqueue an I/O request for data transfer on a channel to the DSP. - * The direction (mode) is specified in the channel object. Note the DSP - * address is specified for channels opened in direct I/O mode. - */ -int bridge_chnl_add_io_req(struct chnl_object *chnl_obj, void *host_buf, - u32 byte_size, u32 buf_size, - u32 dw_dsp_addr, u32 dw_arg) -{ - int status = 0; - struct chnl_object *pchnl = (struct chnl_object *)chnl_obj; - struct chnl_irp *chnl_packet_obj = NULL; - struct bridge_dev_context *dev_ctxt; - struct dev_object *dev_obj; - u8 dw_state; - bool is_eos; - struct chnl_mgr *chnl_mgr_obj; - u8 *host_sys_buf = NULL; - bool sched_dpc = false; - u16 mb_val = 0; - - is_eos = (byte_size == 0); - - /* Validate args */ - if (!host_buf || !pchnl) - return -EFAULT; - - if (is_eos && CHNL_IS_INPUT(pchnl->chnl_mode)) - return -EPERM; - - /* - * Check the channel state: only queue chirp if channel state - * allows it. - */ - dw_state = pchnl->state; - if (dw_state != CHNL_STATEREADY) { - if (dw_state & CHNL_STATECANCEL) - return -ECANCELED; - if ((dw_state & CHNL_STATEEOS) && - CHNL_IS_OUTPUT(pchnl->chnl_mode)) - return -EPIPE; - /* No other possible states left */ - } - - dev_obj = dev_get_first(); - dev_get_bridge_context(dev_obj, &dev_ctxt); - if (!dev_ctxt) - return -EFAULT; - - if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1 && host_buf) { - if (!(host_buf < (void *)USERMODE_ADDR)) { - host_sys_buf = host_buf; - goto func_cont; - } - /* if addr in user mode, then copy to kernel space */ - host_sys_buf = kmalloc(buf_size, GFP_KERNEL); - if (host_sys_buf == NULL) - return -ENOMEM; - - if (CHNL_IS_OUTPUT(pchnl->chnl_mode)) { - status = copy_from_user(host_sys_buf, host_buf, - buf_size); - if (status) { - kfree(host_sys_buf); - host_sys_buf = NULL; - return -EFAULT; - } - } - } -func_cont: - /* Mailbox IRQ is disabled to avoid race condition with DMA/ZCPY - * channels. DPCCS is held to avoid race conditions with PCPY channels. - * If DPC is scheduled in process context (iosm_schedule) and any - * non-mailbox interrupt occurs, that DPC will run and break CS. Hence - * we disable ALL DPCs. We will try to disable ONLY IO DPC later. */ - chnl_mgr_obj = pchnl->chnl_mgr_obj; - spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock); - omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX); - if (pchnl->chnl_type == CHNL_PCPY) { - /* This is a processor-copy channel. */ - if (CHNL_IS_OUTPUT(pchnl->chnl_mode)) { - /* Check buffer size on output channels for fit. */ - if (byte_size > io_buf_size( - pchnl->chnl_mgr_obj->iomgr)) { - status = -EINVAL; - goto out; - } - } - } - - /* Get a free chirp: */ - if (list_empty(&pchnl->free_packets_list)) { - status = -EIO; - goto out; - } - chnl_packet_obj = list_first_entry(&pchnl->free_packets_list, - struct chnl_irp, link); - list_del(&chnl_packet_obj->link); - - /* Enqueue the chirp on the chnl's IORequest queue: */ - chnl_packet_obj->host_user_buf = chnl_packet_obj->host_sys_buf = - host_buf; - if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1) - chnl_packet_obj->host_sys_buf = host_sys_buf; - - /* - * Note: for dma chans dw_dsp_addr contains dsp address - * of SM buffer. - */ - /* DSP address */ - chnl_packet_obj->dsp_tx_addr = dw_dsp_addr / chnl_mgr_obj->word_size; - chnl_packet_obj->byte_size = byte_size; - chnl_packet_obj->buf_size = buf_size; - /* Only valid for output channel */ - chnl_packet_obj->arg = dw_arg; - chnl_packet_obj->status = (is_eos ? CHNL_IOCSTATEOS : - CHNL_IOCSTATCOMPLETE); - list_add_tail(&chnl_packet_obj->link, &pchnl->io_requests); - pchnl->cio_reqs++; - /* - * If end of stream, update the channel state to prevent - * more IOR's. - */ - if (is_eos) - pchnl->state |= CHNL_STATEEOS; - - /* Request IO from the DSP */ - io_request_chnl(chnl_mgr_obj->iomgr, pchnl, - (CHNL_IS_INPUT(pchnl->chnl_mode) ? IO_INPUT : - IO_OUTPUT), &mb_val); - sched_dpc = true; -out: - omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX); - spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock); - if (mb_val != 0) - sm_interrupt_dsp(dev_ctxt, mb_val); - - /* Schedule a DPC, to do the actual data transfer */ - if (sched_dpc) - iosm_schedule(chnl_mgr_obj->iomgr); - - return status; -} - -/* - * ======== bridge_chnl_cancel_io ======== - * Return all I/O requests to the client which have not yet been - * transferred. The channel's I/O completion object is - * signalled, and all the I/O requests are queued as IOC's, with the - * status field set to CHNL_IOCSTATCANCEL. - * This call is typically used in abort situations, and is a prelude to - * chnl_close(); - */ -int bridge_chnl_cancel_io(struct chnl_object *chnl_obj) -{ - struct chnl_object *pchnl = (struct chnl_object *)chnl_obj; - u32 chnl_id = -1; - s8 chnl_mode; - struct chnl_irp *chirp, *tmp; - struct chnl_mgr *chnl_mgr_obj = NULL; - - /* Check args: */ - if (!pchnl || !pchnl->chnl_mgr_obj) - return -EFAULT; - - chnl_id = pchnl->chnl_id; - chnl_mode = pchnl->chnl_mode; - chnl_mgr_obj = pchnl->chnl_mgr_obj; - - /* Mark this channel as cancelled, to prevent further IORequests or - * IORequests or dispatching. */ - spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock); - - pchnl->state |= CHNL_STATECANCEL; - - if (list_empty(&pchnl->io_requests)) { - spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock); - return 0; - } - - if (pchnl->chnl_type == CHNL_PCPY) { - /* Indicate we have no more buffers available for transfer: */ - if (CHNL_IS_INPUT(pchnl->chnl_mode)) { - io_cancel_chnl(chnl_mgr_obj->iomgr, chnl_id); - } else { - /* Record that we no longer have output buffers - * available: */ - chnl_mgr_obj->output_mask &= ~(1 << chnl_id); - } - } - /* Move all IOR's to IOC queue: */ - list_for_each_entry_safe(chirp, tmp, &pchnl->io_requests, link) { - list_del(&chirp->link); - chirp->byte_size = 0; - chirp->status |= CHNL_IOCSTATCANCEL; - list_add_tail(&chirp->link, &pchnl->io_completions); - pchnl->cio_cs++; - pchnl->cio_reqs--; - } - - spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock); - - return 0; -} - -/* - * ======== bridge_chnl_close ======== - * Purpose: - * Ensures all pending I/O on this channel is cancelled, discards all - * queued I/O completion notifications, then frees the resources allocated - * for this channel, and makes the corresponding logical channel id - * available for subsequent use. - */ -int bridge_chnl_close(struct chnl_object *chnl_obj) -{ - int status; - struct chnl_object *pchnl = (struct chnl_object *)chnl_obj; - - /* Check args: */ - if (!pchnl) - return -EFAULT; - /* Cancel IO: this ensures no further IO requests or notifications */ - status = bridge_chnl_cancel_io(chnl_obj); - if (status) - return status; - /* Invalidate channel object: Protects from CHNL_GetIOCompletion() */ - /* Free the slot in the channel manager: */ - pchnl->chnl_mgr_obj->channels[pchnl->chnl_id] = NULL; - spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock); - pchnl->chnl_mgr_obj->open_channels -= 1; - spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock); - if (pchnl->ntfy_obj) { - ntfy_delete(pchnl->ntfy_obj); - kfree(pchnl->ntfy_obj); - pchnl->ntfy_obj = NULL; - } - /* Reset channel event: (NOTE: user_event freed in user context) */ - if (pchnl->sync_event) { - sync_reset_event(pchnl->sync_event); - kfree(pchnl->sync_event); - pchnl->sync_event = NULL; - } - /* Free I/O request and I/O completion queues: */ - free_chirp_list(&pchnl->io_completions); - pchnl->cio_cs = 0; - - free_chirp_list(&pchnl->io_requests); - pchnl->cio_reqs = 0; - - free_chirp_list(&pchnl->free_packets_list); - - /* Release channel object. */ - kfree(pchnl); - - return status; -} - -/* - * ======== bridge_chnl_create ======== - * Create a channel manager object, responsible for opening new channels - * and closing old ones for a given board. - */ -int bridge_chnl_create(struct chnl_mgr **channel_mgr, - struct dev_object *hdev_obj, - const struct chnl_mgrattrs *mgr_attrts) -{ - int status = 0; - struct chnl_mgr *chnl_mgr_obj = NULL; - u8 max_channels; - - /* Allocate channel manager object */ - chnl_mgr_obj = kzalloc(sizeof(struct chnl_mgr), GFP_KERNEL); - if (chnl_mgr_obj) { - /* - * The max_channels attr must equal the # of supported chnls for - * each transport(# chnls for PCPY = DDMA = ZCPY): i.e. - * mgr_attrts->max_channels = CHNL_MAXCHANNELS = - * DDMA_MAXDDMACHNLS = DDMA_MAXZCPYCHNLS. - */ - max_channels = CHNL_MAXCHANNELS + CHNL_MAXCHANNELS * CHNL_PCPY; - /* Create array of channels */ - chnl_mgr_obj->channels = kzalloc(sizeof(struct chnl_object *) - * max_channels, GFP_KERNEL); - if (chnl_mgr_obj->channels) { - /* Initialize chnl_mgr object */ - chnl_mgr_obj->type = CHNL_TYPESM; - chnl_mgr_obj->word_size = mgr_attrts->word_size; - /* Total # chnls supported */ - chnl_mgr_obj->max_channels = max_channels; - chnl_mgr_obj->open_channels = 0; - chnl_mgr_obj->output_mask = 0; - chnl_mgr_obj->last_output = 0; - chnl_mgr_obj->dev_obj = hdev_obj; - spin_lock_init(&chnl_mgr_obj->chnl_mgr_lock); - } else { - status = -ENOMEM; - } - } else { - status = -ENOMEM; - } - - if (status) { - bridge_chnl_destroy(chnl_mgr_obj); - *channel_mgr = NULL; - } else { - /* Return channel manager object to caller... */ - *channel_mgr = chnl_mgr_obj; - } - return status; -} - -/* - * ======== bridge_chnl_destroy ======== - * Purpose: - * Close all open channels, and destroy the channel manager. - */ -int bridge_chnl_destroy(struct chnl_mgr *hchnl_mgr) -{ - int status = 0; - struct chnl_mgr *chnl_mgr_obj = hchnl_mgr; - u32 chnl_id; - - if (hchnl_mgr) { - /* Close all open channels: */ - for (chnl_id = 0; chnl_id < chnl_mgr_obj->max_channels; - chnl_id++) { - status = - bridge_chnl_close(chnl_mgr_obj->channels - [chnl_id]); - if (status) - dev_dbg(bridge, "%s: Error status 0x%x\n", - __func__, status); - } - - /* Free channel manager object: */ - kfree(chnl_mgr_obj->channels); - - /* Set hchnl_mgr to NULL in device object. */ - dev_set_chnl_mgr(chnl_mgr_obj->dev_obj, NULL); - /* Free this Chnl Mgr object: */ - kfree(hchnl_mgr); - } else { - status = -EFAULT; - } - return status; -} - -/* - * ======== bridge_chnl_flush_io ======== - * purpose: - * Flushes all the outstanding data requests on a channel. - */ -int bridge_chnl_flush_io(struct chnl_object *chnl_obj, u32 timeout) -{ - int status = 0; - struct chnl_object *pchnl = (struct chnl_object *)chnl_obj; - s8 chnl_mode = -1; - struct chnl_mgr *chnl_mgr_obj; - struct chnl_ioc chnl_ioc_obj; - /* Check args: */ - if (pchnl) { - if ((timeout == CHNL_IOCNOWAIT) - && CHNL_IS_OUTPUT(pchnl->chnl_mode)) { - status = -EINVAL; - } else { - chnl_mode = pchnl->chnl_mode; - chnl_mgr_obj = pchnl->chnl_mgr_obj; - } - } else { - status = -EFAULT; - } - if (!status) { - /* Note: Currently, if another thread continues to add IO - * requests to this channel, this function will continue to - * flush all such queued IO requests. */ - if (CHNL_IS_OUTPUT(chnl_mode) - && (pchnl->chnl_type == CHNL_PCPY)) { - /* Wait for IO completions, up to the specified - * timeout: */ - while (!list_empty(&pchnl->io_requests) && !status) { - status = bridge_chnl_get_ioc(chnl_obj, - timeout, &chnl_ioc_obj); - if (status) - continue; - - if (chnl_ioc_obj.status & CHNL_IOCSTATTIMEOUT) - status = -ETIMEDOUT; - - } - } else { - status = bridge_chnl_cancel_io(chnl_obj); - /* Now, leave the channel in the ready state: */ - pchnl->state &= ~CHNL_STATECANCEL; - } - } - return status; -} - -/* - * ======== bridge_chnl_get_info ======== - * Purpose: - * Retrieve information related to a channel. - */ -int bridge_chnl_get_info(struct chnl_object *chnl_obj, - struct chnl_info *channel_info) -{ - int status = 0; - struct chnl_object *pchnl = (struct chnl_object *)chnl_obj; - if (channel_info != NULL) { - if (pchnl) { - /* Return the requested information: */ - channel_info->chnl_mgr = pchnl->chnl_mgr_obj; - channel_info->event_obj = pchnl->user_event; - channel_info->cnhl_id = pchnl->chnl_id; - channel_info->mode = pchnl->chnl_mode; - channel_info->bytes_tx = pchnl->bytes_moved; - channel_info->process = pchnl->process; - channel_info->sync_event = pchnl->sync_event; - channel_info->cio_cs = pchnl->cio_cs; - channel_info->cio_reqs = pchnl->cio_reqs; - channel_info->state = pchnl->state; - } else { - status = -EFAULT; - } - } else { - status = -EFAULT; - } - return status; -} - -/* - * ======== bridge_chnl_get_ioc ======== - * Optionally wait for I/O completion on a channel. Dequeue an I/O - * completion record, which contains information about the completed - * I/O request. - * Note: Ensures Channel Invariant (see notes above). - */ -int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 timeout, - struct chnl_ioc *chan_ioc) -{ - int status = 0; - struct chnl_object *pchnl = (struct chnl_object *)chnl_obj; - struct chnl_irp *chnl_packet_obj; - int stat_sync; - bool dequeue_ioc = true; - struct chnl_ioc ioc = { NULL, 0, 0, 0, 0 }; - u8 *host_sys_buf = NULL; - struct bridge_dev_context *dev_ctxt; - struct dev_object *dev_obj; - - /* Check args: */ - if (!chan_ioc || !pchnl) { - status = -EFAULT; - } else if (timeout == CHNL_IOCNOWAIT) { - if (list_empty(&pchnl->io_completions)) - status = -EREMOTEIO; - - } - - dev_obj = dev_get_first(); - dev_get_bridge_context(dev_obj, &dev_ctxt); - if (!dev_ctxt) - status = -EFAULT; - - if (status) - goto func_end; - - ioc.status = CHNL_IOCSTATCOMPLETE; - if (timeout != - CHNL_IOCNOWAIT && list_empty(&pchnl->io_completions)) { - if (timeout == CHNL_IOCINFINITE) - timeout = SYNC_INFINITE; - - stat_sync = sync_wait_on_event(pchnl->sync_event, timeout); - if (stat_sync == -ETIME) { - /* No response from DSP */ - ioc.status |= CHNL_IOCSTATTIMEOUT; - dequeue_ioc = false; - } else if (stat_sync == -EPERM) { - /* This can occur when the user mode thread is - * aborted (^C), or when _VWIN32_WaitSingleObject() - * fails due to unknown causes. */ - /* Even though Wait failed, there may be something in - * the Q: */ - if (list_empty(&pchnl->io_completions)) { - ioc.status |= CHNL_IOCSTATCANCEL; - dequeue_ioc = false; - } - } - } - /* See comment in AddIOReq */ - spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock); - omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX); - if (dequeue_ioc) { - /* Dequeue IOC and set chan_ioc; */ - chnl_packet_obj = list_first_entry(&pchnl->io_completions, - struct chnl_irp, link); - list_del(&chnl_packet_obj->link); - /* Update chan_ioc from channel state and chirp: */ - pchnl->cio_cs--; - /* - * If this is a zero-copy channel, then set IOC's pbuf - * to the DSP's address. This DSP address will get - * translated to user's virtual addr later. - */ - host_sys_buf = chnl_packet_obj->host_sys_buf; - ioc.buf = chnl_packet_obj->host_user_buf; - ioc.byte_size = chnl_packet_obj->byte_size; - ioc.buf_size = chnl_packet_obj->buf_size; - ioc.arg = chnl_packet_obj->arg; - ioc.status |= chnl_packet_obj->status; - /* Place the used chirp on the free list: */ - list_add_tail(&chnl_packet_obj->link, - &pchnl->free_packets_list); - } else { - ioc.buf = NULL; - ioc.byte_size = 0; - ioc.arg = 0; - ioc.buf_size = 0; - } - /* Ensure invariant: If any IOC's are queued for this channel... */ - if (!list_empty(&pchnl->io_completions)) { - /* Since DSPStream_Reclaim() does not take a timeout - * parameter, we pass the stream's timeout value to - * bridge_chnl_get_ioc. We cannot determine whether or not - * we have waited in user mode. Since the stream's timeout - * value may be non-zero, we still have to set the event. - * Therefore, this optimization is taken out. - * - * if (timeout == CHNL_IOCNOWAIT) { - * ... ensure event is set.. - * sync_set_event(pchnl->sync_event); - * } */ - sync_set_event(pchnl->sync_event); - } else { - /* else, if list is empty, ensure event is reset. */ - sync_reset_event(pchnl->sync_event); - } - omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX); - spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock); - if (dequeue_ioc - && (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)) { - if (!(ioc.buf < (void *)USERMODE_ADDR)) - goto func_cont; - - /* If the addr is in user mode, then copy it */ - if (!host_sys_buf || !ioc.buf) { - status = -EFAULT; - goto func_cont; - } - if (!CHNL_IS_INPUT(pchnl->chnl_mode)) - goto func_cont1; - - /*host_user_buf */ - status = copy_to_user(ioc.buf, host_sys_buf, ioc.byte_size); - if (status) { - if (current->flags & PF_EXITING) - status = 0; - } - if (status) - status = -EFAULT; -func_cont1: - kfree(host_sys_buf); - } -func_cont: - /* Update User's IOC block: */ - *chan_ioc = ioc; -func_end: - return status; -} - -/* - * ======== bridge_chnl_get_mgr_info ======== - * Retrieve information related to the channel manager. - */ -int bridge_chnl_get_mgr_info(struct chnl_mgr *hchnl_mgr, u32 ch_id, - struct chnl_mgrinfo *mgr_info) -{ - struct chnl_mgr *chnl_mgr_obj = (struct chnl_mgr *)hchnl_mgr; - - if (!mgr_info || !hchnl_mgr) - return -EFAULT; - - if (ch_id > CHNL_MAXCHANNELS) - return -ECHRNG; - - /* Return the requested information: */ - mgr_info->chnl_obj = chnl_mgr_obj->channels[ch_id]; - mgr_info->open_channels = chnl_mgr_obj->open_channels; - mgr_info->type = chnl_mgr_obj->type; - /* total # of chnls */ - mgr_info->max_channels = chnl_mgr_obj->max_channels; - - return 0; -} - -/* - * ======== bridge_chnl_idle ======== - * Idles a particular channel. - */ -int bridge_chnl_idle(struct chnl_object *chnl_obj, u32 timeout, - bool flush_data) -{ - s8 chnl_mode; - struct chnl_mgr *chnl_mgr_obj; - int status = 0; - - chnl_mode = chnl_obj->chnl_mode; - chnl_mgr_obj = chnl_obj->chnl_mgr_obj; - - if (CHNL_IS_OUTPUT(chnl_mode) && !flush_data) { - /* Wait for IO completions, up to the specified timeout: */ - status = bridge_chnl_flush_io(chnl_obj, timeout); - } else { - status = bridge_chnl_cancel_io(chnl_obj); - - /* Reset the byte count and put channel back in ready state. */ - chnl_obj->bytes_moved = 0; - chnl_obj->state &= ~CHNL_STATECANCEL; - } - - return status; -} - -/* - * ======== bridge_chnl_open ======== - * Open a new half-duplex channel to the DSP board. - */ -int bridge_chnl_open(struct chnl_object **chnl, - struct chnl_mgr *hchnl_mgr, s8 chnl_mode, - u32 ch_id, const struct chnl_attr *pattrs) -{ - int status = 0; - struct chnl_mgr *chnl_mgr_obj = hchnl_mgr; - struct chnl_object *pchnl = NULL; - struct sync_object *sync_event = NULL; - - *chnl = NULL; - - /* Validate Args: */ - if (!pattrs->uio_reqs) - return -EINVAL; - - if (!hchnl_mgr) - return -EFAULT; - - if (ch_id != CHNL_PICKFREE) { - if (ch_id >= chnl_mgr_obj->max_channels) - return -ECHRNG; - if (chnl_mgr_obj->channels[ch_id] != NULL) - return -EALREADY; - } else { - /* Check for free channel */ - status = search_free_channel(chnl_mgr_obj, &ch_id); - if (status) - return status; - } - - - /* Create channel object: */ - pchnl = kzalloc(sizeof(struct chnl_object), GFP_KERNEL); - if (!pchnl) - return -ENOMEM; - - /* Protect queues from io_dpc: */ - pchnl->state = CHNL_STATECANCEL; - - /* Allocate initial IOR and IOC queues: */ - status = create_chirp_list(&pchnl->free_packets_list, - pattrs->uio_reqs); - if (status) - goto out_err; - - INIT_LIST_HEAD(&pchnl->io_requests); - INIT_LIST_HEAD(&pchnl->io_completions); - - pchnl->chnl_packets = pattrs->uio_reqs; - pchnl->cio_cs = 0; - pchnl->cio_reqs = 0; - - sync_event = kzalloc(sizeof(struct sync_object), GFP_KERNEL); - if (!sync_event) { - status = -ENOMEM; - goto out_err; - } - sync_init_event(sync_event); - - pchnl->ntfy_obj = kmalloc(sizeof(struct ntfy_object), GFP_KERNEL); - if (!pchnl->ntfy_obj) { - status = -ENOMEM; - goto out_err; - } - ntfy_init(pchnl->ntfy_obj); - - /* Initialize CHNL object fields: */ - pchnl->chnl_mgr_obj = chnl_mgr_obj; - pchnl->chnl_id = ch_id; - pchnl->chnl_mode = chnl_mode; - pchnl->user_event = sync_event; - pchnl->sync_event = sync_event; - /* Get the process handle */ - pchnl->process = current->tgid; - pchnl->cb_arg = 0; - pchnl->bytes_moved = 0; - /* Default to proc-copy */ - pchnl->chnl_type = CHNL_PCPY; - - /* Insert channel object in channel manager: */ - chnl_mgr_obj->channels[pchnl->chnl_id] = pchnl; - spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock); - chnl_mgr_obj->open_channels++; - spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock); - /* Return result... */ - pchnl->state = CHNL_STATEREADY; - *chnl = pchnl; - - return status; - -out_err: - /* Free memory */ - free_chirp_list(&pchnl->io_completions); - free_chirp_list(&pchnl->io_requests); - free_chirp_list(&pchnl->free_packets_list); - - kfree(sync_event); - - if (pchnl->ntfy_obj) { - ntfy_delete(pchnl->ntfy_obj); - kfree(pchnl->ntfy_obj); - pchnl->ntfy_obj = NULL; - } - kfree(pchnl); - - return status; -} - -/* - * ======== bridge_chnl_register_notify ======== - * Registers for events on a particular channel. - */ -int bridge_chnl_register_notify(struct chnl_object *chnl_obj, - u32 event_mask, u32 notify_type, - struct dsp_notification *hnotification) -{ - int status = 0; - - - if (event_mask) - status = ntfy_register(chnl_obj->ntfy_obj, hnotification, - event_mask, notify_type); - else - status = ntfy_unregister(chnl_obj->ntfy_obj, hnotification); - - return status; -} - -/* - * ======== create_chirp_list ======== - * Purpose: - * Initialize a queue of channel I/O Request/Completion packets. - * Parameters: - * list: Pointer to a list_head - * chirps: Number of Chirps to allocate. - * Returns: - * 0 if successful, error code otherwise. - * Requires: - * Ensures: - */ -static int create_chirp_list(struct list_head *list, u32 chirps) -{ - struct chnl_irp *chirp; - u32 i; - - INIT_LIST_HEAD(list); - - /* Make N chirps and place on queue. */ - for (i = 0; i < chirps; i++) { - chirp = kzalloc(sizeof(struct chnl_irp), GFP_KERNEL); - if (!chirp) - break; - list_add_tail(&chirp->link, list); - } - - /* If we couldn't allocate all chirps, free those allocated: */ - if (i != chirps) { - free_chirp_list(list); - return -ENOMEM; - } - - return 0; -} - -/* - * ======== free_chirp_list ======== - * Purpose: - * Free the queue of Chirps. - */ -static void free_chirp_list(struct list_head *chirp_list) -{ - struct chnl_irp *chirp, *tmp; - - list_for_each_entry_safe(chirp, tmp, chirp_list, link) { - list_del(&chirp->link); - kfree(chirp); - } -} - -/* - * ======== search_free_channel ======== - * Search for a free channel slot in the array of channel pointers. - */ -static int search_free_channel(struct chnl_mgr *chnl_mgr_obj, - u32 *chnl) -{ - int status = -ENOSR; - u32 i; - - for (i = 0; i < chnl_mgr_obj->max_channels; i++) { - if (chnl_mgr_obj->channels[i] == NULL) { - status = 0; - *chnl = i; - break; - } - } - - return status; -} diff --git a/drivers/staging/tidspbridge/core/dsp-clock.c b/drivers/staging/tidspbridge/core/dsp-clock.c deleted file mode 100644 index a1aca4416ca7..000000000000 --- a/drivers/staging/tidspbridge/core/dsp-clock.c +++ /dev/null @@ -1,391 +0,0 @@ -/* - * clk.c - * - * DSP-BIOS Bridge driver support functions for TI OMAP processors. - * - * Clock and Timer services. - * - * Copyright (C) 2005-2006 Texas Instruments, Inc. - * - * This package is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED - * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. - */ - -#define L4_34XX_BASE 0x48000000 - -#include <linux/types.h> - -/* ----------------------------------- Host OS */ -#include <dspbridge/host_os.h> -#include <plat/dmtimer.h> -#include <linux/platform_data/asoc-ti-mcbsp.h> - -/* ----------------------------------- DSP/BIOS Bridge */ -#include <dspbridge/dbdefs.h> -#include <dspbridge/drv.h> -#include <dspbridge/dev.h> -#include "_tiomap.h" - -/* ----------------------------------- This */ -#include <dspbridge/clk.h> - -/* ----------------------------------- Defines, Data Structures, Typedefs */ - -#define OMAP_SSI_OFFSET 0x58000 -#define OMAP_SSI_SIZE 0x1000 -#define OMAP_SSI_SYSCONFIG_OFFSET 0x10 - -#define SSI_AUTOIDLE (1 << 0) -#define SSI_SIDLE_SMARTIDLE (2 << 3) -#define SSI_MIDLE_NOIDLE (1 << 12) - -/* Clk types requested by the dsp */ -#define IVA2_CLK 0 -#define GPT_CLK 1 -#define WDT_CLK 2 -#define MCBSP_CLK 3 -#define SSI_CLK 4 - -/* Bridge GPT id (1 - 4), DM Timer id (5 - 8) */ -#define DMT_ID(id) ((id) + 4) -#define DM_TIMER_CLOCKS 4 - -/* Bridge MCBSP id (6 - 10), OMAP Mcbsp id (0 - 4) */ -#define MCBSP_ID(id) ((id) - 6) - -static struct omap_dm_timer *timer[4]; - -struct clk *iva2_clk; - -struct dsp_ssi { - struct clk *sst_fck; - struct clk *ssr_fck; - struct clk *ick; -}; - -static struct dsp_ssi ssi; - -static u32 dsp_clocks; - -static inline u32 is_dsp_clk_active(u32 clk, u8 id) -{ - return clk & (1 << id); -} - -static inline void set_dsp_clk_active(u32 *clk, u8 id) -{ - *clk |= (1 << id); -} - -static inline void set_dsp_clk_inactive(u32 *clk, u8 id) -{ - *clk &= ~(1 << id); -} - -static s8 get_clk_type(u8 id) -{ - s8 type; - - if (id == DSP_CLK_IVA2) - type = IVA2_CLK; - else if (id <= DSP_CLK_GPT8) - type = GPT_CLK; - else if (id == DSP_CLK_WDT3) - type = WDT_CLK; - else if (id <= DSP_CLK_MCBSP5) - type = MCBSP_CLK; - else if (id == DSP_CLK_SSI) - type = SSI_CLK; - else - type = -1; - - return type; -} - -/* - * ======== dsp_clk_exit ======== - * Purpose: - * Cleanup CLK module. - */ -void dsp_clk_exit(void) -{ - int i; - - dsp_clock_disable_all(dsp_clocks); - - for (i = 0; i < DM_TIMER_CLOCKS; i++) - omap_dm_timer_free(timer[i]); - - clk_unprepare(iva2_clk); - clk_put(iva2_clk); - clk_unprepare(ssi.sst_fck); - clk_put(ssi.sst_fck); - clk_unprepare(ssi.ssr_fck); - clk_put(ssi.ssr_fck); - clk_unprepare(ssi.ick); - clk_put(ssi.ick); -} - -/* - * ======== dsp_clk_init ======== - * Purpose: - * Initialize CLK module. - */ -void dsp_clk_init(void) -{ - static struct platform_device dspbridge_device; - int i, id; - - dspbridge_device.dev.bus = &platform_bus_type; - - for (i = 0, id = 5; i < DM_TIMER_CLOCKS; i++, id++) - timer[i] = omap_dm_timer_request_specific(id); - - iva2_clk = clk_get(&dspbridge_device.dev, "iva2_ck"); - if (IS_ERR(iva2_clk)) - dev_err(bridge, "failed to get iva2 clock %p\n", iva2_clk); - else - clk_prepare(iva2_clk); - - ssi.sst_fck = clk_get(&dspbridge_device.dev, "ssi_sst_fck"); - ssi.ssr_fck = clk_get(&dspbridge_device.dev, "ssi_ssr_fck"); - ssi.ick = clk_get(&dspbridge_device.dev, "ssi_ick"); - - if (IS_ERR(ssi.sst_fck) || IS_ERR(ssi.ssr_fck) || IS_ERR(ssi.ick)) { - dev_err(bridge, "failed to get ssi: sst %p, ssr %p, ick %p\n", - ssi.sst_fck, ssi.ssr_fck, ssi.ick); - } else { - clk_prepare(ssi.sst_fck); - clk_prepare(ssi.ssr_fck); - clk_prepare(ssi.ick); - } -} - -/** - * dsp_gpt_wait_overflow - set gpt overflow and wait for fixed timeout - * @clk_id: GP Timer clock id. - * @load: Overflow value. - * - * Sets an overflow interrupt for the desired GPT waiting for a timeout - * of 5 msecs for the interrupt to occur. - */ -void dsp_gpt_wait_overflow(short int clk_id, unsigned int load) -{ - struct omap_dm_timer *gpt = timer[clk_id - 1]; - unsigned long timeout; - - if (!gpt) - return; - - /* Enable overflow interrupt */ - omap_dm_timer_set_int_enable(gpt, OMAP_TIMER_INT_OVERFLOW); - - /* - * Set counter value to overflow counter after - * one tick and start timer. - */ - omap_dm_timer_set_load_start(gpt, 0, load); - - /* Wait 80us for timer to overflow */ - udelay(80); - - timeout = msecs_to_jiffies(5); - /* Check interrupt status and wait for interrupt */ - while (!(omap_dm_timer_read_status(gpt) & OMAP_TIMER_INT_OVERFLOW)) { - if (time_is_after_jiffies(timeout)) { - pr_err("%s: GPTimer interrupt failed\n", __func__); - break; - } - } -} - -/* - * ======== dsp_clk_enable ======== - * Purpose: - * Enable Clock . - * - */ -int dsp_clk_enable(enum dsp_clk_id clk_id) -{ - int status = 0; - - if (is_dsp_clk_active(dsp_clocks, clk_id)) { - dev_err(bridge, "WARN: clock id %d already enabled\n", clk_id); - goto out; - } - - switch (get_clk_type(clk_id)) { - case IVA2_CLK: - clk_enable(iva2_clk); - break; - case GPT_CLK: - status = omap_dm_timer_start(timer[clk_id - 1]); - break; -#ifdef CONFIG_SND_OMAP_SOC_MCBSP - case MCBSP_CLK: - omap_mcbsp_request(MCBSP_ID(clk_id)); - omap2_mcbsp_set_clks_src(MCBSP_ID(clk_id), MCBSP_CLKS_PAD_SRC); - break; -#endif - case WDT_CLK: - dev_err(bridge, "ERROR: DSP requested to enable WDT3 clk\n"); - break; - case SSI_CLK: - clk_enable(ssi.sst_fck); - clk_enable(ssi.ssr_fck); - clk_enable(ssi.ick); - - /* - * The SSI module need to configured not to have the Forced - * idle for master interface. If it is set to forced idle, - * the SSI module is transitioning to standby thereby causing - * the client in the DSP hang waiting for the SSI module to - * be active after enabling the clocks - */ - ssi_clk_prepare(true); - break; - default: - dev_err(bridge, "Invalid clock id for enable\n"); - status = -EPERM; - } - - if (!status) - set_dsp_clk_active(&dsp_clocks, clk_id); - -out: - return status; -} - -/** - * dsp_clock_enable_all - Enable clocks used by the DSP - * @dev_context Driver's device context strucure - * - * This function enables all the peripheral clocks that were requested by DSP. - */ -u32 dsp_clock_enable_all(u32 dsp_per_clocks) -{ - u32 clk_id; - u32 status = -EPERM; - - for (clk_id = 0; clk_id < DSP_CLK_NOT_DEFINED; clk_id++) { - if (is_dsp_clk_active(dsp_per_clocks, clk_id)) - status = dsp_clk_enable(clk_id); - } - - return status; -} - -/* - * ======== dsp_clk_disable ======== - * Purpose: - * Disable the clock. - * - */ -int dsp_clk_disable(enum dsp_clk_id clk_id) -{ - int status = 0; - - if (!is_dsp_clk_active(dsp_clocks, clk_id)) { - dev_err(bridge, "ERR: clock id %d already disabled\n", clk_id); - goto out; - } - - switch (get_clk_type(clk_id)) { - case IVA2_CLK: - clk_disable(iva2_clk); - break; - case GPT_CLK: - status = omap_dm_timer_stop(timer[clk_id - 1]); - break; -#ifdef CONFIG_SND_OMAP_SOC_MCBSP - case MCBSP_CLK: - omap2_mcbsp_set_clks_src(MCBSP_ID(clk_id), MCBSP_CLKS_PRCM_SRC); - omap_mcbsp_free(MCBSP_ID(clk_id)); - break; -#endif - case WDT_CLK: - dev_err(bridge, "ERROR: DSP requested to disable WDT3 clk\n"); - break; - case SSI_CLK: - ssi_clk_prepare(false); - ssi_clk_prepare(false); - clk_disable(ssi.sst_fck); - clk_disable(ssi.ssr_fck); - clk_disable(ssi.ick); - break; - default: - dev_err(bridge, "Invalid clock id for disable\n"); - status = -EPERM; - } - - if (!status) - set_dsp_clk_inactive(&dsp_clocks, clk_id); - -out: - return status; -} - -/** - * dsp_clock_disable_all - Disable all active clocks - * @dev_context Driver's device context structure - * - * This function disables all the peripheral clocks that were enabled by DSP. - * It is meant to be called only when DSP is entering hibernation or when DSP - * is in error state. - */ -u32 dsp_clock_disable_all(u32 dsp_per_clocks) -{ - u32 clk_id; - u32 status = -EPERM; - - for (clk_id = 0; clk_id < DSP_CLK_NOT_DEFINED; clk_id++) { - if (is_dsp_clk_active(dsp_per_clocks, clk_id)) - status = dsp_clk_disable(clk_id); - } - - return status; -} - -u32 dsp_clk_get_iva2_rate(void) -{ - u32 clk_speed_khz; - - clk_speed_khz = clk_get_rate(iva2_clk); - clk_speed_khz /= 1000; - dev_dbg(bridge, "%s: clk speed Khz = %d\n", __func__, clk_speed_khz); - - return clk_speed_khz; -} - -void ssi_clk_prepare(bool FLAG) -{ - void __iomem *ssi_base; - unsigned int value; - - ssi_base = ioremap(L4_34XX_BASE + OMAP_SSI_OFFSET, OMAP_SSI_SIZE); - if (!ssi_base) { - pr_err("%s: error, SSI not configured\n", __func__); - return; - } - - if (FLAG) { - /* Set Autoidle, SIDLEMode to smart idle, and MIDLEmode to - * no idle - */ - value = SSI_AUTOIDLE | SSI_SIDLE_SMARTIDLE | SSI_MIDLE_NOIDLE; - } else { - /* Set Autoidle, SIDLEMode to forced idle, and MIDLEmode to - * forced idle - */ - value = SSI_AUTOIDLE; - } - - __raw_writel(value, ssi_base + OMAP_SSI_SYSCONFIG_OFFSET); - iounmap(ssi_base); -} - diff --git a/drivers/staging/tidspbridge/core/io_sm.c b/drivers/staging/tidspbridge/core/io_sm.c deleted file mode 100644 index c2829aa7780f..000000000000 --- a/drivers/staging/tidspbridge/core/io_sm.c +++ /dev/null @@ -1,2245 +0,0 @@ -/* - * io_sm.c - * - * DSP-BIOS Bridge driver support functions for TI OMAP processors. - * - * IO dispatcher for a shared memory channel driver. - * - * Copyright (C) 2005-2006 Texas Instruments, Inc. - * - * This package is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED - * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. - */ - -/* - * Channel Invariant: - * There is an important invariant condition which must be maintained per - * channel outside of bridge_chnl_get_ioc() and IO_Dispatch(), violation of - * which may cause timeouts and/or failure of the sync_wait_on_event - * function. - */ -#include <linux/types.h> -#include <linux/list.h> - -/* Host OS */ -#include <dspbridge/host_os.h> -#include <linux/workqueue.h> - -/* ----------------------------------- DSP/BIOS Bridge */ -#include <dspbridge/dbdefs.h> - -/* Services Layer */ -#include <dspbridge/ntfy.h> -#include <dspbridge/sync.h> - -/* Hardware Abstraction Layer */ -#include <hw_defs.h> -#include <hw_mmu.h> - -/* Bridge Driver */ -#include <dspbridge/dspdeh.h> -#include <dspbridge/dspio.h> -#include <dspbridge/dspioctl.h> -#include <dspbridge/wdt.h> -#include <_tiomap.h> -#include <tiomap_io.h> -#include <_tiomap_pwr.h> - -/* Platform Manager */ -#include <dspbridge/cod.h> -#include <dspbridge/node.h> -#include <dspbridge/dev.h> - -/* Others */ -#include <dspbridge/rms_sh.h> -#include <dspbridge/mgr.h> -#include <dspbridge/drv.h> -#include "_cmm.h" -#include "module_list.h" - -/* This */ -#include <dspbridge/io_sm.h> -#include "_msg_sm.h" - -/* Defines, Data Structures, Typedefs */ -#define OUTPUTNOTREADY 0xffff -#define NOTENABLED 0xffff /* Channel(s) not enabled */ - -#define EXTEND "_EXT_END" - -#define SWAP_WORD(x) (x) -#define UL_PAGE_ALIGN_SIZE 0x10000 /* Page Align Size */ - -#define MAX_PM_REQS 32 - -#define MMU_FAULT_HEAD1 0xa5a5a5a5 -#define MMU_FAULT_HEAD2 0x96969696 -#define POLL_MAX 1000 -#define MAX_MMU_DBGBUFF 10240 - -/* IO Manager: only one created per board */ -struct io_mgr { - /* These four fields must be the first fields in a io_mgr_ struct */ - /* Bridge device context */ - struct bridge_dev_context *bridge_context; - /* Function interface to Bridge driver */ - struct bridge_drv_interface *intf_fxns; - struct dev_object *dev_obj; /* Device this board represents */ - - /* These fields initialized in bridge_io_create() */ - struct chnl_mgr *chnl_mgr; - struct shm *shared_mem; /* Shared Memory control */ - u8 *input; /* Address of input channel */ - u8 *output; /* Address of output channel */ - struct msg_mgr *msg_mgr; /* Message manager */ - /* Msg control for from DSP messages */ - struct msg_ctrl *msg_input_ctrl; - /* Msg control for to DSP messages */ - struct msg_ctrl *msg_output_ctrl; - u8 *msg_input; /* Address of input messages */ - u8 *msg_output; /* Address of output messages */ - u32 sm_buf_size; /* Size of a shared memory I/O channel */ - bool shared_irq; /* Is this IRQ shared? */ - u32 word_size; /* Size in bytes of DSP word */ - u16 intr_val; /* Interrupt value */ - /* Private extnd proc info; mmu setup */ - struct mgr_processorextinfo ext_proc_info; - struct cmm_object *cmm_mgr; /* Shared Mem Mngr */ - struct work_struct io_workq; /* workqueue */ -#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) - u32 trace_buffer_begin; /* Trace message start address */ - u32 trace_buffer_end; /* Trace message end address */ - u32 trace_buffer_current; /* Trace message current address */ - u32 gpp_read_pointer; /* GPP Read pointer to Trace buffer */ - u8 *msg; - u32 gpp_va; - u32 dsp_va; -#endif - /* IO Dpc */ - u32 dpc_req; /* Number of requested DPC's. */ - u32 dpc_sched; /* Number of executed DPC's. */ - struct tasklet_struct dpc_tasklet; - spinlock_t dpc_lock; - -}; - -struct shm_symbol_val { - u32 shm_base; - u32 shm_lim; - u32 msg_base; - u32 msg_lim; - u32 shm0_end; - u32 dyn_ext; - u32 ext_end; -}; - -/* Function Prototypes */ -static void io_dispatch_pm(struct io_mgr *pio_mgr); -static void notify_chnl_complete(struct chnl_object *pchnl, - struct chnl_irp *chnl_packet_obj); -static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl, - u8 io_mode); -static void output_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl, - u8 io_mode); -static void input_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr); -static void output_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr); -static u32 find_ready_output(struct chnl_mgr *chnl_mgr_obj, - struct chnl_object *pchnl, u32 mask); - -/* Bus Addr (cached kernel) */ -static int register_shm_segs(struct io_mgr *hio_mgr, - struct cod_manager *cod_man, - u32 dw_gpp_base_pa); - -static inline void set_chnl_free(struct shm *sm, u32 chnl) -{ - sm->host_free_mask &= ~(1 << chnl); -} - -static inline void set_chnl_busy(struct shm *sm, u32 chnl) -{ - sm->host_free_mask |= 1 << chnl; -} - - -/* - * ======== bridge_io_create ======== - * Create an IO manager object. - */ -int bridge_io_create(struct io_mgr **io_man, - struct dev_object *hdev_obj, - const struct io_attrs *mgr_attrts) -{ - struct io_mgr *pio_mgr = NULL; - struct bridge_dev_context *hbridge_context = NULL; - struct cfg_devnode *dev_node_obj; - struct chnl_mgr *hchnl_mgr; - u8 dev_type; - - /* Check requirements */ - if (!io_man || !mgr_attrts || mgr_attrts->word_size == 0) - return -EFAULT; - - *io_man = NULL; - - dev_get_chnl_mgr(hdev_obj, &hchnl_mgr); - if (!hchnl_mgr || hchnl_mgr->iomgr) - return -EFAULT; - - /* - * Message manager will be created when a file is loaded, since - * size of message buffer in shared memory is configurable in - * the base image. - */ - dev_get_bridge_context(hdev_obj, &hbridge_context); - if (!hbridge_context) - return -EFAULT; - - dev_get_dev_type(hdev_obj, &dev_type); - - /* Allocate IO manager object */ - pio_mgr = kzalloc(sizeof(struct io_mgr), GFP_KERNEL); - if (!pio_mgr) - return -ENOMEM; - - /* Initialize chnl_mgr object */ - pio_mgr->chnl_mgr = hchnl_mgr; - pio_mgr->word_size = mgr_attrts->word_size; - - if (dev_type == DSP_UNIT) { - /* Create an IO DPC */ - tasklet_init(&pio_mgr->dpc_tasklet, io_dpc, (u32) pio_mgr); - - /* Initialize DPC counters */ - pio_mgr->dpc_req = 0; - pio_mgr->dpc_sched = 0; - - spin_lock_init(&pio_mgr->dpc_lock); - - if (dev_get_dev_node(hdev_obj, &dev_node_obj)) { - bridge_io_destroy(pio_mgr); - return -EIO; - } - } - - pio_mgr->bridge_context = hbridge_context; - pio_mgr->shared_irq = mgr_attrts->irq_shared; - if (dsp_wdt_init()) { - bridge_io_destroy(pio_mgr); - return -EPERM; - } - - /* Return IO manager object to caller... */ - hchnl_mgr->iomgr = pio_mgr; - *io_man = pio_mgr; - - return 0; -} - -/* - * ======== bridge_io_destroy ======== - * Purpose: - * Disable interrupts, destroy the IO manager. - */ -int bridge_io_destroy(struct io_mgr *hio_mgr) -{ - int status = 0; - if (hio_mgr) { - /* Free IO DPC object */ - tasklet_kill(&hio_mgr->dpc_tasklet); - -#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) - kfree(hio_mgr->msg); -#endif - dsp_wdt_exit(); - /* Free this IO manager object */ - kfree(hio_mgr); - } else { - status = -EFAULT; - } - - return status; -} - -struct shm_symbol_val *_get_shm_symbol_values(struct io_mgr *hio_mgr) -{ - struct shm_symbol_val *s; - struct cod_manager *cod_man; - int status; - - s = kzalloc(sizeof(*s), GFP_KERNEL); - if (!s) - return ERR_PTR(-ENOMEM); - - status = dev_get_cod_mgr(hio_mgr->dev_obj, &cod_man); - if (status) - goto free_symbol; - - /* Get start and length of channel part of shared memory */ - status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_BASE_SYM, - &s->shm_base); - if (status) - goto free_symbol; - - status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_LIMIT_SYM, - &s->shm_lim); - if (status) - goto free_symbol; - - if (s->shm_lim <= s->shm_base) { - status = -EINVAL; - goto free_symbol; - } - - /* Get start and length of message part of shared memory */ - status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_BASE_SYM, - &s->msg_base); - if (status) - goto free_symbol; - - status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_LIMIT_SYM, - &s->msg_lim); - if (status) - goto free_symbol; - - if (s->msg_lim <= s->msg_base) { - status = -EINVAL; - goto free_symbol; - } - -#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE - status = cod_get_sym_value(cod_man, DSP_TRACESEC_END, &s->shm0_end); -#else - status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM, &s->shm0_end); -#endif - if (status) - goto free_symbol; - - status = cod_get_sym_value(cod_man, DYNEXTBASE, &s->dyn_ext); - if (status) - goto free_symbol; - - status = cod_get_sym_value(cod_man, EXTEND, &s->ext_end); - if (status) - goto free_symbol; - - return s; - -free_symbol: - kfree(s); - return ERR_PTR(status); -} - -/* - * ======== bridge_io_on_loaded ======== - * Purpose: - * Called when a new program is loaded to get shared memory buffer - * parameters from COFF file. ulSharedBufferBase and ulSharedBufferLimit - * are in DSP address units. - */ -int bridge_io_on_loaded(struct io_mgr *hio_mgr) -{ - struct bridge_dev_context *dc = hio_mgr->bridge_context; - struct cfg_hostres *cfg_res = dc->resources; - struct bridge_ioctl_extproc *eproc; - struct cod_manager *cod_man; - struct chnl_mgr *hchnl_mgr; - struct msg_mgr *hmsg_mgr; - struct shm_symbol_val *s; - int status; - u8 num_procs; - s32 ndx; - u32 i; - u32 mem_sz, msg_sz, pad_sz, shm_sz, shm_base_offs; - u32 seg0_sz, seg1_sz; - u32 pa, va, da; - u32 pa_curr, va_curr, da_curr; - u32 bytes; - u32 all_bits = 0; - u32 page_size[] = { - HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB, - HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB - }; - u32 map_attrs = DSP_MAPLITTLEENDIAN | DSP_MAPPHYSICALADDR | - DSP_MAPELEMSIZE32 | DSP_MAPDONOTLOCK; - - status = dev_get_cod_mgr(hio_mgr->dev_obj, &cod_man); - if (status) - return status; - - hchnl_mgr = hio_mgr->chnl_mgr; - - /* The message manager is destroyed when the board is stopped */ - dev_get_msg_mgr(hio_mgr->dev_obj, &hio_mgr->msg_mgr); - hmsg_mgr = hio_mgr->msg_mgr; - if (!hchnl_mgr || !hmsg_mgr) - return -EFAULT; - - if (hio_mgr->shared_mem) - hio_mgr->shared_mem = NULL; - - s = _get_shm_symbol_values(hio_mgr); - if (IS_ERR(s)) - return PTR_ERR(s); - - /* Get total length in bytes */ - shm_sz = (s->shm_lim - s->shm_base + 1) * hio_mgr->word_size; - - /* Calculate size of a PROCCOPY shared memory region */ - dev_dbg(bridge, "%s: (proc)proccopy shmmem size: 0x%x bytes\n", - __func__, shm_sz - sizeof(struct shm)); - - /* Length (bytes) of messaging part of shared memory */ - msg_sz = (s->msg_lim - s->msg_base + 1) * hio_mgr->word_size; - - /* Total length (bytes) of shared memory: chnl + msg */ - mem_sz = shm_sz + msg_sz; - - /* Get memory reserved in host resources */ - (void)mgr_enum_processor_info(0, - (struct dsp_processorinfo *) - &hio_mgr->ext_proc_info, - sizeof(struct mgr_processorextinfo), - &num_procs); - - /* IO supports only one DSP for now */ - if (num_procs != 1) { - status = -EINVAL; - goto free_symbol; - } - - /* The first MMU TLB entry(TLB_0) in DCD is ShmBase */ - pa = cfg_res->mem_phys[1]; - va = cfg_res->mem_base[1]; - - /* This is the virtual uncached ioremapped address!!! */ - /* Why can't we directly take the DSPVA from the symbols? */ - da = hio_mgr->ext_proc_info.ty_tlb[0].dsp_virt; - seg0_sz = (s->shm0_end - da) * hio_mgr->word_size; - seg1_sz = (s->ext_end - s->dyn_ext) * hio_mgr->word_size; - - /* 4K align */ - seg1_sz = (seg1_sz + 0xFFF) & (~0xFFFUL); - - /* 64K align */ - seg0_sz = (seg0_sz + 0xFFFF) & (~0xFFFFUL); - - pad_sz = UL_PAGE_ALIGN_SIZE - ((pa + seg1_sz) % UL_PAGE_ALIGN_SIZE); - if (pad_sz == UL_PAGE_ALIGN_SIZE) - pad_sz = 0x0; - - dev_dbg(bridge, "%s: pa %x, va %x, da %x\n", __func__, pa, va, da); - dev_dbg(bridge, - "shm0_end %x, dyn_ext %x, ext_end %x, seg0_sz %x seg1_sz %x\n", - s->shm0_end, s->dyn_ext, s->ext_end, seg0_sz, seg1_sz); - - if ((seg0_sz + seg1_sz + pad_sz) > cfg_res->mem_length[1]) { - pr_err("%s: shm Error, reserved 0x%x required 0x%x\n", - __func__, cfg_res->mem_length[1], - seg0_sz + seg1_sz + pad_sz); - status = -ENOMEM; - goto free_symbol; - } - - pa_curr = pa; - va_curr = s->dyn_ext * hio_mgr->word_size; - da_curr = va; - bytes = seg1_sz; - - /* - * Try to fit into TLB entries. If not possible, push them to page - * tables. It is quite possible that if sections are not on - * bigger page boundary, we may end up making several small pages. - * So, push them onto page tables, if that is the case. - */ - while (bytes) { - /* - * To find the max. page size with which both PA & VA are - * aligned. - */ - all_bits = pa_curr | va_curr; - dev_dbg(bridge, - "seg all_bits %x, pa_curr %x, va_curr %x, bytes %x\n", - all_bits, pa_curr, va_curr, bytes); - - for (i = 0; i < 4; i++) { - if ((bytes >= page_size[i]) && - ((all_bits & (page_size[i] - 1)) == 0)) { - status = hio_mgr->intf_fxns->brd_mem_map(dc, - pa_curr, va_curr, - page_size[i], map_attrs, - NULL); - if (status) - goto free_symbol; - - pa_curr += page_size[i]; - va_curr += page_size[i]; - da_curr += page_size[i]; - bytes -= page_size[i]; - /* - * Don't try smaller sizes. Hopefully we have - * reached an address aligned to a bigger page - * size. - */ - break; - } - } - } - - pa_curr += pad_sz; - va_curr += pad_sz; - da_curr += pad_sz; - bytes = seg0_sz; - va_curr = da * hio_mgr->word_size; - - eproc = kzalloc(sizeof(*eproc) * BRDIOCTL_NUMOFMMUTLB, GFP_KERNEL); - if (!eproc) { - status = -ENOMEM; - goto free_symbol; - } - - ndx = 0; - /* Configure the TLB entries for the next cacheable segment */ - while (bytes) { - /* - * To find the max. page size with which both PA & VA are - * aligned. - */ - all_bits = pa_curr | va_curr; - dev_dbg(bridge, - "seg1 all_bits %x, pa_curr %x, va_curr %x, bytes %x\n", - all_bits, pa_curr, va_curr, bytes); - - for (i = 0; i < 4; i++) { - if (!(bytes >= page_size[i]) || - !((all_bits & (page_size[i] - 1)) == 0)) - continue; - - if (ndx >= MAX_LOCK_TLB_ENTRIES) { - status = hio_mgr->intf_fxns->brd_mem_map(dc, - pa_curr, va_curr, - page_size[i], map_attrs, - NULL); - dev_dbg(bridge, - "PTE pa %x va %x dsp_va %x sz %x\n", - eproc[ndx].gpp_pa, - eproc[ndx].gpp_va, - eproc[ndx].dsp_va * - hio_mgr->word_size, page_size[i]); - if (status) - goto free_eproc; - } - - /* This is the physical address written to DSP MMU */ - eproc[ndx].gpp_pa = pa_curr; - - /* - * This is the virtual uncached ioremapped - * address!!! - */ - eproc[ndx].gpp_va = da_curr; - eproc[ndx].dsp_va = va_curr / hio_mgr->word_size; - eproc[ndx].size = page_size[i]; - eproc[ndx].endianism = HW_LITTLE_ENDIAN; - eproc[ndx].elem_size = HW_ELEM_SIZE16BIT; - eproc[ndx].mixed_mode = HW_MMU_CPUES; - dev_dbg(bridge, "%s: tlb pa %x va %x dsp_va %x sz %x\n", - __func__, eproc[ndx].gpp_pa, - eproc[ndx].gpp_va, - eproc[ndx].dsp_va * hio_mgr->word_size, - page_size[i]); - ndx++; - - pa_curr += page_size[i]; - va_curr += page_size[i]; - da_curr += page_size[i]; - bytes -= page_size[i]; - /* - * Don't try smaller sizes. Hopefully we have reached - * an address aligned to a bigger page size. - */ - break; - } - } - - /* - * Copy remaining entries from CDB. All entries are 1 MB and - * should not conflict with shm entries on MPU or DSP side. - */ - for (i = 3; i < 7 && ndx < BRDIOCTL_NUMOFMMUTLB; i++) { - struct mgr_processorextinfo *ep = &hio_mgr->ext_proc_info; - u32 word_sz = hio_mgr->word_size; - - if (ep->ty_tlb[i].gpp_phys == 0) - continue; - - if ((ep->ty_tlb[i].gpp_phys > pa - 0x100000 && - ep->ty_tlb[i].gpp_phys <= pa + seg0_sz) || - (ep->ty_tlb[i].dsp_virt > da - 0x100000 / word_sz && - ep->ty_tlb[i].dsp_virt <= da + seg0_sz / word_sz)) { - dev_dbg(bridge, - "err cdb%d pa %x da %x shm pa %x da %x sz %x\n", - i, ep->ty_tlb[i].gpp_phys, - ep->ty_tlb[i].dsp_virt, pa, da, seg0_sz); - status = -EPERM; - goto free_eproc; - } - - if (ndx >= MAX_LOCK_TLB_ENTRIES) { - status = hio_mgr->intf_fxns->brd_mem_map(dc, - ep->ty_tlb[i].gpp_phys, - ep->ty_tlb[i].dsp_virt, - 0x100000, map_attrs, NULL); - if (status) - goto free_eproc; - } - - eproc[ndx].dsp_va = ep->ty_tlb[i].dsp_virt; - eproc[ndx].gpp_pa = ep->ty_tlb[i].gpp_phys; - eproc[ndx].gpp_va = 0; - - /* 1 MB */ - eproc[ndx].size = 0x100000; - dev_dbg(bridge, "shm MMU entry pa %x da 0x%x\n", - eproc[ndx].gpp_pa, eproc[ndx].dsp_va); - ndx++; - } - - /* Map the L4 peripherals */ - i = 0; - while (l4_peripheral_table[i].phys_addr) { - status = hio_mgr->intf_fxns->brd_mem_map(dc, - l4_peripheral_table[i].phys_addr, - l4_peripheral_table[i].dsp_virt_addr, - HW_PAGE_SIZE4KB, map_attrs, NULL); - if (status) - goto free_eproc; - i++; - } - - for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) { - eproc[i].dsp_va = 0; - eproc[i].gpp_pa = 0; - eproc[i].gpp_va = 0; - eproc[i].size = 0; - } - - /* - * Set the shm physical address entry (grayed out in CDB file) - * to the virtual uncached ioremapped address of shm reserved - * on MPU. - */ - hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys = - (va + seg1_sz + pad_sz); - - /* - * Need shm Phys addr. IO supports only one DSP for now: - * num_procs = 1. - */ - if (!hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys) - return -EFAULT; - - if (eproc[0].dsp_va > s->shm_base) - return -EPERM; - - /* shm_base may not be at ul_dsp_va address */ - shm_base_offs = (s->shm_base - eproc[0].dsp_va) * - hio_mgr->word_size; - /* - * bridge_dev_ctrl() will set dev context dsp-mmu info. In - * bridge_brd_start() the MMU will be re-programed with MMU - * DSPVa-GPPPa pair info while DSP is in a known - * (reset) state. - */ - status = hio_mgr->intf_fxns->dev_cntrl(hio_mgr->bridge_context, - BRDIOCTL_SETMMUCONFIG, eproc); - if (status) - goto free_eproc; - - s->shm_base = hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys; - s->shm_base += shm_base_offs; - s->shm_base = (u32) MEM_LINEAR_ADDRESS((void *)s->shm_base, - mem_sz); - if (!s->shm_base) { - status = -EFAULT; - goto free_eproc; - } - - /* Register SM */ - status = register_shm_segs(hio_mgr, cod_man, eproc[0].gpp_pa); - - hio_mgr->shared_mem = (struct shm *)s->shm_base; - hio_mgr->input = (u8 *) hio_mgr->shared_mem + sizeof(struct shm); - hio_mgr->output = hio_mgr->input + (shm_sz - - sizeof(struct shm)) / 2; - hio_mgr->sm_buf_size = hio_mgr->output - hio_mgr->input; - - /* Set up Shared memory addresses for messaging */ - hio_mgr->msg_input_ctrl = - (struct msg_ctrl *)((u8 *) hio_mgr->shared_mem + shm_sz); - hio_mgr->msg_input = - (u8 *) hio_mgr->msg_input_ctrl + sizeof(struct msg_ctrl); - hio_mgr->msg_output_ctrl = - (struct msg_ctrl *)((u8 *) hio_mgr->msg_input_ctrl + - msg_sz / 2); - hio_mgr->msg_output = - (u8 *) hio_mgr->msg_output_ctrl + sizeof(struct msg_ctrl); - hmsg_mgr->max_msgs = - ((u8 *) hio_mgr->msg_output_ctrl - hio_mgr->msg_input) / - sizeof(struct msg_dspmsg); - - dev_dbg(bridge, "IO MGR shm details: shared_mem %p, input %p, " - "output %p, msg_input_ctrl %p, msg_input %p, " - "msg_output_ctrl %p, msg_output %p\n", - (u8 *) hio_mgr->shared_mem, hio_mgr->input, - hio_mgr->output, (u8 *) hio_mgr->msg_input_ctrl, - hio_mgr->msg_input, (u8 *) hio_mgr->msg_output_ctrl, - hio_mgr->msg_output); - dev_dbg(bridge, "(proc) Mas msgs in shared memory: 0x%x\n", - hmsg_mgr->max_msgs); - memset((void *)hio_mgr->shared_mem, 0, sizeof(struct shm)); - -#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) - /* Get the start address of trace buffer */ - status = cod_get_sym_value(cod_man, SYS_PUTCBEG, - &hio_mgr->trace_buffer_begin); - if (status) - goto free_eproc; - - hio_mgr->gpp_read_pointer = - hio_mgr->trace_buffer_begin = - (va + seg1_sz + pad_sz) + - (hio_mgr->trace_buffer_begin - da); - - /* Get the end address of trace buffer */ - status = cod_get_sym_value(cod_man, SYS_PUTCEND, - &hio_mgr->trace_buffer_end); - if (status) - goto free_eproc; - - hio_mgr->trace_buffer_end = - (va + seg1_sz + pad_sz) + - (hio_mgr->trace_buffer_end - da); - - /* Get the current address of DSP write pointer */ - status = cod_get_sym_value(cod_man, BRIDGE_SYS_PUTC_CURRENT, - &hio_mgr->trace_buffer_current); - if (status) - goto free_eproc; - - hio_mgr->trace_buffer_current = - (va + seg1_sz + pad_sz) + - (hio_mgr->trace_buffer_current - da); - - /* Calculate the size of trace buffer */ - kfree(hio_mgr->msg); - hio_mgr->msg = kmalloc(((hio_mgr->trace_buffer_end - - hio_mgr->trace_buffer_begin) * - hio_mgr->word_size) + 2, GFP_KERNEL); - if (!hio_mgr->msg) { - status = -ENOMEM; - goto free_eproc; - } - - hio_mgr->dsp_va = da; - hio_mgr->gpp_va = (va + seg1_sz + pad_sz); -#endif - -free_eproc: - kfree(eproc); -free_symbol: - kfree(s); - - return status; -} - -/* - * ======== io_buf_size ======== - * Size of shared memory I/O channel. - */ -u32 io_buf_size(struct io_mgr *hio_mgr) -{ - if (hio_mgr) - return hio_mgr->sm_buf_size; - else - return 0; -} - -/* - * ======== io_cancel_chnl ======== - * Cancel IO on a given PCPY channel. - */ -void io_cancel_chnl(struct io_mgr *hio_mgr, u32 chnl) -{ - struct io_mgr *pio_mgr = (struct io_mgr *)hio_mgr; - struct shm *sm; - - if (!hio_mgr) - goto func_end; - sm = hio_mgr->shared_mem; - - /* Inform DSP that we have no more buffers on this channel */ - set_chnl_free(sm, chnl); - - sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS); -func_end: - return; -} - - -/* - * ======== io_dispatch_pm ======== - * Performs I/O dispatch on PM related messages from DSP - */ -static void io_dispatch_pm(struct io_mgr *pio_mgr) -{ - int status; - u32 parg[2]; - - /* Perform Power message processing here */ - parg[0] = pio_mgr->intr_val; - - /* Send the command to the Bridge clk/pwr manager to handle */ - if (parg[0] == MBX_PM_HIBERNATE_EN) { - dev_dbg(bridge, "PM: Hibernate command\n"); - status = pio_mgr->intf_fxns-> - dev_cntrl(pio_mgr->bridge_context, - BRDIOCTL_PWR_HIBERNATE, parg); - if (status) - pr_err("%s: hibernate cmd failed 0x%x\n", - __func__, status); - } else if (parg[0] == MBX_PM_OPP_REQ) { - parg[1] = pio_mgr->shared_mem->opp_request.rqst_opp_pt; - dev_dbg(bridge, "PM: Requested OPP = 0x%x\n", parg[1]); - status = pio_mgr->intf_fxns-> - dev_cntrl(pio_mgr->bridge_context, - BRDIOCTL_CONSTRAINT_REQUEST, parg); - if (status) - dev_dbg(bridge, "PM: Failed to set constraint " - "= 0x%x\n", parg[1]); - } else { - dev_dbg(bridge, "PM: clk control value of msg = 0x%x\n", - parg[0]); - status = pio_mgr->intf_fxns-> - dev_cntrl(pio_mgr->bridge_context, - BRDIOCTL_CLK_CTRL, parg); - if (status) - dev_dbg(bridge, "PM: Failed to ctrl the DSP clk" - "= 0x%x\n", *parg); - } -} - -/* - * ======== io_dpc ======== - * Deferred procedure call for shared memory channel driver ISR. Carries - * out the dispatch of I/O as a non-preemptible event. It can only be - * pre-empted by an ISR. - */ -void io_dpc(unsigned long ref_data) -{ - struct io_mgr *pio_mgr = (struct io_mgr *)ref_data; - struct chnl_mgr *chnl_mgr_obj; - struct msg_mgr *msg_mgr_obj; - struct deh_mgr *hdeh_mgr; - u32 requested; - u32 serviced; - - if (!pio_mgr) - goto func_end; - chnl_mgr_obj = pio_mgr->chnl_mgr; - dev_get_msg_mgr(pio_mgr->dev_obj, &msg_mgr_obj); - dev_get_deh_mgr(pio_mgr->dev_obj, &hdeh_mgr); - if (!chnl_mgr_obj) - goto func_end; - - requested = pio_mgr->dpc_req; - serviced = pio_mgr->dpc_sched; - - if (serviced == requested) - goto func_end; - - /* Process pending DPC's */ - do { - /* Check value of interrupt reg to ensure it's a valid error */ - if ((pio_mgr->intr_val > DEH_BASE) && - (pio_mgr->intr_val < DEH_LIMIT)) { - /* Notify DSP/BIOS exception */ - if (hdeh_mgr) { -#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE - print_dsp_debug_trace(pio_mgr); -#endif - bridge_deh_notify(hdeh_mgr, DSP_SYSERROR, - pio_mgr->intr_val); - } - } - /* Proc-copy channel dispatch */ - input_chnl(pio_mgr, NULL, IO_SERVICE); - output_chnl(pio_mgr, NULL, IO_SERVICE); - -#ifdef CHNL_MESSAGES - if (msg_mgr_obj) { - /* Perform I/O dispatch on message queues */ - input_msg(pio_mgr, msg_mgr_obj); - output_msg(pio_mgr, msg_mgr_obj); - } - -#endif -#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE - if (pio_mgr->intr_val & MBX_DBG_SYSPRINTF) { - /* Notify DSP Trace message */ - print_dsp_debug_trace(pio_mgr); - } -#endif - serviced++; - } while (serviced != requested); - pio_mgr->dpc_sched = requested; -func_end: - return; -} - -/* - * ======== io_mbox_msg ======== - * Main interrupt handler for the shared memory IO manager. - * Calls the Bridge's CHNL_ISR to determine if this interrupt is ours, then - * schedules a DPC to dispatch I/O. - */ -int io_mbox_msg(struct notifier_block *self, unsigned long len, void *msg) -{ - struct io_mgr *pio_mgr; - struct dev_object *dev_obj; - unsigned long flags; - - dev_obj = dev_get_first(); - dev_get_io_mgr(dev_obj, &pio_mgr); - - if (!pio_mgr) - return NOTIFY_BAD; - - pio_mgr->intr_val = (u16)((u32)msg); - if (pio_mgr->intr_val & MBX_PM_CLASS) - io_dispatch_pm(pio_mgr); - - if (pio_mgr->intr_val == MBX_DEH_RESET) { - pio_mgr->intr_val = 0; - } else { - spin_lock_irqsave(&pio_mgr->dpc_lock, flags); - pio_mgr->dpc_req++; - spin_unlock_irqrestore(&pio_mgr->dpc_lock, flags); - tasklet_schedule(&pio_mgr->dpc_tasklet); - } - return NOTIFY_OK; -} - -/* - * ======== io_request_chnl ======== - * Purpose: - * Request channel I/O from the DSP. Sets flags in shared memory, then - * interrupts the DSP. - */ -void io_request_chnl(struct io_mgr *io_manager, struct chnl_object *pchnl, - u8 io_mode, u16 *mbx_val) -{ - struct chnl_mgr *chnl_mgr_obj; - struct shm *sm; - - if (!pchnl || !mbx_val) - goto func_end; - chnl_mgr_obj = io_manager->chnl_mgr; - sm = io_manager->shared_mem; - if (io_mode == IO_INPUT) { - /* Indicate to the DSP we have a buffer available for input */ - set_chnl_busy(sm, pchnl->chnl_id); - *mbx_val = MBX_PCPY_CLASS; - } else if (io_mode == IO_OUTPUT) { - /* - * Record the fact that we have a buffer available for - * output. - */ - chnl_mgr_obj->output_mask |= (1 << pchnl->chnl_id); - } else { - } -func_end: - return; -} - -/* - * ======== iosm_schedule ======== - * Schedule DPC for IO. - */ -void iosm_schedule(struct io_mgr *io_manager) -{ - unsigned long flags; - - if (!io_manager) - return; - - /* Increment count of DPC's pending. */ - spin_lock_irqsave(&io_manager->dpc_lock, flags); - io_manager->dpc_req++; - spin_unlock_irqrestore(&io_manager->dpc_lock, flags); - - /* Schedule DPC */ - tasklet_schedule(&io_manager->dpc_tasklet); -} - -/* - * ======== find_ready_output ======== - * Search for a host output channel which is ready to send. If this is - * called as a result of servicing the DPC, then implement a round - * robin search; otherwise, this was called by a client thread (via - * IO_Dispatch()), so just start searching from the current channel id. - */ -static u32 find_ready_output(struct chnl_mgr *chnl_mgr_obj, - struct chnl_object *pchnl, u32 mask) -{ - u32 ret = OUTPUTNOTREADY; - u32 id, start_id; - u32 shift; - - id = (pchnl != - NULL ? pchnl->chnl_id : (chnl_mgr_obj->last_output + 1)); - id = ((id == CHNL_MAXCHANNELS) ? 0 : id); - if (id >= CHNL_MAXCHANNELS) - goto func_end; - if (mask) { - shift = (1 << id); - start_id = id; - do { - if (mask & shift) { - ret = id; - if (pchnl == NULL) - chnl_mgr_obj->last_output = id; - break; - } - id = id + 1; - id = ((id == CHNL_MAXCHANNELS) ? 0 : id); - shift = (1 << id); - } while (id != start_id); - } -func_end: - return ret; -} - -/* - * ======== input_chnl ======== - * Dispatch a buffer on an input channel. - */ -static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl, - u8 io_mode) -{ - struct chnl_mgr *chnl_mgr_obj; - struct shm *sm; - u32 chnl_id; - u32 bytes; - struct chnl_irp *chnl_packet_obj = NULL; - u32 dw_arg; - bool clear_chnl = false; - bool notify_client = false; - - sm = pio_mgr->shared_mem; - chnl_mgr_obj = pio_mgr->chnl_mgr; - - /* Attempt to perform input */ - if (!sm->input_full) - goto func_end; - - bytes = sm->input_size * chnl_mgr_obj->word_size; - chnl_id = sm->input_id; - dw_arg = sm->arg; - if (chnl_id >= CHNL_MAXCHANNELS) { - /* Shouldn't be here: would indicate corrupted shm. */ - goto func_end; - } - pchnl = chnl_mgr_obj->channels[chnl_id]; - if ((pchnl != NULL) && CHNL_IS_INPUT(pchnl->chnl_mode)) { - if ((pchnl->state & ~CHNL_STATEEOS) == CHNL_STATEREADY) { - /* Get the I/O request, and attempt a transfer */ - if (!list_empty(&pchnl->io_requests)) { - if (!pchnl->cio_reqs) - goto func_end; - - chnl_packet_obj = list_first_entry( - &pchnl->io_requests, - struct chnl_irp, link); - list_del(&chnl_packet_obj->link); - pchnl->cio_reqs--; - - /* - * Ensure we don't overflow the client's - * buffer. - */ - bytes = min(bytes, chnl_packet_obj->byte_size); - memcpy(chnl_packet_obj->host_sys_buf, - pio_mgr->input, bytes); - pchnl->bytes_moved += bytes; - chnl_packet_obj->byte_size = bytes; - chnl_packet_obj->arg = dw_arg; - chnl_packet_obj->status = CHNL_IOCSTATCOMPLETE; - - if (bytes == 0) { - /* - * This assertion fails if the DSP - * sends EOS more than once on this - * channel. - */ - if (pchnl->state & CHNL_STATEEOS) - goto func_end; - /* - * Zero bytes indicates EOS. Update - * IOC status for this chirp, and also - * the channel state. - */ - chnl_packet_obj->status |= - CHNL_IOCSTATEOS; - pchnl->state |= CHNL_STATEEOS; - /* - * Notify that end of stream has - * occurred. - */ - ntfy_notify(pchnl->ntfy_obj, - DSP_STREAMDONE); - } - /* Tell DSP if no more I/O buffers available */ - if (list_empty(&pchnl->io_requests)) - set_chnl_free(sm, pchnl->chnl_id); - clear_chnl = true; - notify_client = true; - } else { - /* - * Input full for this channel, but we have no - * buffers available. The channel must be - * "idling". Clear out the physical input - * channel. - */ - clear_chnl = true; - } - } else { - /* Input channel cancelled: clear input channel */ - clear_chnl = true; - } - } else { - /* DPC fired after host closed channel: clear input channel */ - clear_chnl = true; - } - if (clear_chnl) { - /* Indicate to the DSP we have read the input */ - sm->input_full = 0; - sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS); - } - if (notify_client) { - /* Notify client with IO completion record */ - notify_chnl_complete(pchnl, chnl_packet_obj); - } -func_end: - return; -} - -/* - * ======== input_msg ======== - * Copies messages from shared memory to the message queues. - */ -static void input_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr) -{ - u32 num_msgs; - u32 i; - u8 *msg_input; - struct msg_queue *msg_queue_obj; - struct msg_frame *pmsg; - struct msg_dspmsg msg; - struct msg_ctrl *msg_ctr_obj; - u32 input_empty; - u32 addr; - - msg_ctr_obj = pio_mgr->msg_input_ctrl; - /* Get the number of input messages to be read */ - input_empty = msg_ctr_obj->buf_empty; - num_msgs = msg_ctr_obj->size; - if (input_empty) - return; - - msg_input = pio_mgr->msg_input; - for (i = 0; i < num_msgs; i++) { - /* Read the next message */ - addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.cmd); - msg.msg.cmd = - read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr); - addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.arg1); - msg.msg.arg1 = - read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr); - addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.arg2); - msg.msg.arg2 = - read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr); - addr = (u32) &(((struct msg_dspmsg *)msg_input)->msgq_id); - msg.msgq_id = - read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr); - msg_input += sizeof(struct msg_dspmsg); - - /* Determine which queue to put the message in */ - dev_dbg(bridge, "input msg: cmd=0x%x arg1=0x%x " - "arg2=0x%x msgq_id=0x%x\n", msg.msg.cmd, - msg.msg.arg1, msg.msg.arg2, msg.msgq_id); - /* - * Interrupt may occur before shared memory and message - * input locations have been set up. If all nodes were - * cleaned up, hmsg_mgr->max_msgs should be 0. - */ - list_for_each_entry(msg_queue_obj, &hmsg_mgr->queue_list, - list_elem) { - if (msg.msgq_id != msg_queue_obj->msgq_id) - continue; - /* Found it */ - if (msg.msg.cmd == RMS_EXITACK) { - /* - * Call the node exit notification. - * The exit message does not get - * queued. - */ - (*hmsg_mgr->on_exit)(msg_queue_obj->arg, - msg.msg.arg1); - break; - } - /* - * Not an exit acknowledgement, queue - * the message. - */ - if (list_empty(&msg_queue_obj->msg_free_list)) { - /* - * No free frame to copy the - * message into. - */ - pr_err("%s: no free msg frames," - " discarding msg\n", - __func__); - break; - } - - pmsg = list_first_entry(&msg_queue_obj->msg_free_list, - struct msg_frame, list_elem); - list_del(&pmsg->list_elem); - pmsg->msg_data = msg; - list_add_tail(&pmsg->list_elem, - &msg_queue_obj->msg_used_list); - ntfy_notify(msg_queue_obj->ntfy_obj, - DSP_NODEMESSAGEREADY); - sync_set_event(msg_queue_obj->sync_event); - } - } - /* Set the post SWI flag */ - if (num_msgs > 0) { - /* Tell the DSP we've read the messages */ - msg_ctr_obj->buf_empty = true; - msg_ctr_obj->post_swi = true; - sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS); - } -} - -/* - * ======== notify_chnl_complete ======== - * Purpose: - * Signal the channel event, notifying the client that I/O has completed. - */ -static void notify_chnl_complete(struct chnl_object *pchnl, - struct chnl_irp *chnl_packet_obj) -{ - bool signal_event; - - if (!pchnl || !pchnl->sync_event || !chnl_packet_obj) - goto func_end; - - /* - * Note: we signal the channel event only if the queue of IO - * completions is empty. If it is not empty, the event is sure to be - * signalled by the only IO completion list consumer: - * bridge_chnl_get_ioc(). - */ - signal_event = list_empty(&pchnl->io_completions); - /* Enqueue the IO completion info for the client */ - list_add_tail(&chnl_packet_obj->link, &pchnl->io_completions); - pchnl->cio_cs++; - - if (pchnl->cio_cs > pchnl->chnl_packets) - goto func_end; - /* Signal the channel event (if not already set) that IO is complete */ - if (signal_event) - sync_set_event(pchnl->sync_event); - - /* Notify that IO is complete */ - ntfy_notify(pchnl->ntfy_obj, DSP_STREAMIOCOMPLETION); -func_end: - return; -} - -/* - * ======== output_chnl ======== - * Purpose: - * Dispatch a buffer on an output channel. - */ -static void output_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl, - u8 io_mode) -{ - struct chnl_mgr *chnl_mgr_obj; - struct shm *sm; - u32 chnl_id; - struct chnl_irp *chnl_packet_obj; - u32 dw_dsp_f_mask; - - chnl_mgr_obj = pio_mgr->chnl_mgr; - sm = pio_mgr->shared_mem; - /* Attempt to perform output */ - if (sm->output_full) - goto func_end; - - if (pchnl && !((pchnl->state & ~CHNL_STATEEOS) == CHNL_STATEREADY)) - goto func_end; - - /* Look to see if both a PC and DSP output channel are ready */ - dw_dsp_f_mask = sm->dsp_free_mask; - chnl_id = - find_ready_output(chnl_mgr_obj, pchnl, - (chnl_mgr_obj->output_mask & dw_dsp_f_mask)); - if (chnl_id == OUTPUTNOTREADY) - goto func_end; - - pchnl = chnl_mgr_obj->channels[chnl_id]; - if (!pchnl || list_empty(&pchnl->io_requests)) { - /* Shouldn't get here */ - goto func_end; - } - - if (!pchnl->cio_reqs) - goto func_end; - - /* Get the I/O request, and attempt a transfer */ - chnl_packet_obj = list_first_entry(&pchnl->io_requests, - struct chnl_irp, link); - list_del(&chnl_packet_obj->link); - - pchnl->cio_reqs--; - - /* Record fact that no more I/O buffers available */ - if (list_empty(&pchnl->io_requests)) - chnl_mgr_obj->output_mask &= ~(1 << chnl_id); - - /* Transfer buffer to DSP side */ - chnl_packet_obj->byte_size = min(pio_mgr->sm_buf_size, - chnl_packet_obj->byte_size); - memcpy(pio_mgr->output, chnl_packet_obj->host_sys_buf, - chnl_packet_obj->byte_size); - pchnl->bytes_moved += chnl_packet_obj->byte_size; - /* Write all 32 bits of arg */ - sm->arg = chnl_packet_obj->arg; -#if _CHNL_WORDSIZE == 2 - /* Access can be different SM access word size (e.g. 16/32 bit words) */ - sm->output_id = (u16) chnl_id; - sm->output_size = (u16) (chnl_packet_obj->byte_size + - chnl_mgr_obj->word_size - 1) / - (u16) chnl_mgr_obj->word_size; -#else - sm->output_id = chnl_id; - sm->output_size = (chnl_packet_obj->byte_size + - chnl_mgr_obj->word_size - 1) / chnl_mgr_obj->word_size; -#endif - sm->output_full = 1; - /* Indicate to the DSP we have written the output */ - sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS); - /* Notify client with IO completion record (keep EOS) */ - chnl_packet_obj->status &= CHNL_IOCSTATEOS; - notify_chnl_complete(pchnl, chnl_packet_obj); - /* Notify if stream is done. */ - if (chnl_packet_obj->status & CHNL_IOCSTATEOS) - ntfy_notify(pchnl->ntfy_obj, DSP_STREAMDONE); - -func_end: - return; -} - -/* - * ======== output_msg ======== - * Copies messages from the message queues to the shared memory. - */ -static void output_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr) -{ - u32 num_msgs = 0; - u32 i; - struct msg_dspmsg *msg_output; - struct msg_frame *pmsg; - struct msg_ctrl *msg_ctr_obj; - u32 val; - u32 addr; - - msg_ctr_obj = pio_mgr->msg_output_ctrl; - - /* Check if output has been cleared */ - if (!msg_ctr_obj->buf_empty) - return; - - num_msgs = (hmsg_mgr->msgs_pending > hmsg_mgr->max_msgs) ? - hmsg_mgr->max_msgs : hmsg_mgr->msgs_pending; - msg_output = (struct msg_dspmsg *) pio_mgr->msg_output; - - /* Copy num_msgs messages into shared memory */ - for (i = 0; i < num_msgs; i++) { - if (list_empty(&hmsg_mgr->msg_used_list)) - continue; - - pmsg = list_first_entry(&hmsg_mgr->msg_used_list, - struct msg_frame, list_elem); - list_del(&pmsg->list_elem); - - val = (pmsg->msg_data).msgq_id; - addr = (u32) &msg_output->msgq_id; - write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val); - - val = (pmsg->msg_data).msg.cmd; - addr = (u32) &msg_output->msg.cmd; - write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val); - - val = (pmsg->msg_data).msg.arg1; - addr = (u32) &msg_output->msg.arg1; - write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val); - - val = (pmsg->msg_data).msg.arg2; - addr = (u32) &msg_output->msg.arg2; - write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val); - - msg_output++; - list_add_tail(&pmsg->list_elem, &hmsg_mgr->msg_free_list); - sync_set_event(hmsg_mgr->sync_event); - } - - if (num_msgs > 0) { - hmsg_mgr->msgs_pending -= num_msgs; -#if _CHNL_WORDSIZE == 2 - /* - * Access can be different SM access word size - * (e.g. 16/32 bit words) - */ - msg_ctr_obj->size = (u16) num_msgs; -#else - msg_ctr_obj->size = num_msgs; -#endif - msg_ctr_obj->buf_empty = false; - /* Set the post SWI flag */ - msg_ctr_obj->post_swi = true; - /* Tell the DSP we have written the output. */ - sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS); - } -} - -/* - * ======== register_shm_segs ======== - * purpose: - * Registers GPP SM segment with CMM. - */ -static int register_shm_segs(struct io_mgr *hio_mgr, - struct cod_manager *cod_man, - u32 dw_gpp_base_pa) -{ - int status = 0; - u32 ul_shm0_base = 0; - u32 shm0_end = 0; - u32 ul_shm0_rsrvd_start = 0; - u32 ul_rsrvd_size = 0; - u32 ul_gpp_phys; - u32 ul_dsp_virt; - u32 ul_shm_seg_id0 = 0; - u32 dw_offset, dw_gpp_base_va, ul_dsp_size; - - /* - * Read address and size info for first SM region. - * Get start of 1st SM Heap region. - */ - status = - cod_get_sym_value(cod_man, SHM0_SHARED_BASE_SYM, &ul_shm0_base); - if (ul_shm0_base == 0) { - status = -EPERM; - goto func_end; - } - /* Get end of 1st SM Heap region */ - if (!status) { - /* Get start and length of message part of shared memory */ - status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM, - &shm0_end); - if (shm0_end == 0) { - status = -EPERM; - goto func_end; - } - } - /* Start of Gpp reserved region */ - if (!status) { - /* Get start and length of message part of shared memory */ - status = - cod_get_sym_value(cod_man, SHM0_SHARED_RESERVED_BASE_SYM, - &ul_shm0_rsrvd_start); - if (ul_shm0_rsrvd_start == 0) { - status = -EPERM; - goto func_end; - } - } - /* Register with CMM */ - if (!status) { - status = dev_get_cmm_mgr(hio_mgr->dev_obj, &hio_mgr->cmm_mgr); - if (!status) { - status = cmm_un_register_gppsm_seg(hio_mgr->cmm_mgr, - CMM_ALLSEGMENTS); - } - } - /* Register new SM region(s) */ - if (!status && (shm0_end - ul_shm0_base) > 0) { - /* Calc size (bytes) of SM the GPP can alloc from */ - ul_rsrvd_size = - (shm0_end - ul_shm0_rsrvd_start + 1) * hio_mgr->word_size; - if (ul_rsrvd_size <= 0) { - status = -EPERM; - goto func_end; - } - /* Calc size of SM DSP can alloc from */ - ul_dsp_size = - (ul_shm0_rsrvd_start - ul_shm0_base) * hio_mgr->word_size; - if (ul_dsp_size <= 0) { - status = -EPERM; - goto func_end; - } - /* First TLB entry reserved for Bridge SM use. */ - ul_gpp_phys = hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys; - /* Get size in bytes */ - ul_dsp_virt = - hio_mgr->ext_proc_info.ty_tlb[0].dsp_virt * - hio_mgr->word_size; - /* - * Calc byte offset used to convert GPP phys <-> DSP byte - * address. - */ - if (dw_gpp_base_pa > ul_dsp_virt) - dw_offset = dw_gpp_base_pa - ul_dsp_virt; - else - dw_offset = ul_dsp_virt - dw_gpp_base_pa; - - if (ul_shm0_rsrvd_start * hio_mgr->word_size < ul_dsp_virt) { - status = -EPERM; - goto func_end; - } - /* - * Calc Gpp phys base of SM region. - * This is actually uncached kernel virtual address. - */ - dw_gpp_base_va = - ul_gpp_phys + ul_shm0_rsrvd_start * hio_mgr->word_size - - ul_dsp_virt; - /* - * Calc Gpp phys base of SM region. - * This is the physical address. - */ - dw_gpp_base_pa = - dw_gpp_base_pa + ul_shm0_rsrvd_start * hio_mgr->word_size - - ul_dsp_virt; - /* Register SM Segment 0. */ - status = - cmm_register_gppsm_seg(hio_mgr->cmm_mgr, dw_gpp_base_pa, - ul_rsrvd_size, dw_offset, - (dw_gpp_base_pa > - ul_dsp_virt) ? CMM_ADDTODSPPA : - CMM_SUBFROMDSPPA, - (u32) (ul_shm0_base * - hio_mgr->word_size), - ul_dsp_size, &ul_shm_seg_id0, - dw_gpp_base_va); - /* First SM region is seg_id = 1 */ - if (ul_shm_seg_id0 != 1) - status = -EPERM; - } -func_end: - return status; -} - -/* ZCPY IO routines. */ -/* - * ======== IO_SHMcontrol ======== - * Sets the requested shm setting. - */ -int io_sh_msetting(struct io_mgr *hio_mgr, u8 desc, void *pargs) -{ -#ifdef CONFIG_TIDSPBRIDGE_DVFS - u32 i; - struct dspbridge_platform_data *pdata = - omap_dspbridge_dev->dev.platform_data; - - switch (desc) { - case SHM_CURROPP: - /* Update the shared memory with requested OPP information */ - if (pargs != NULL) - hio_mgr->shared_mem->opp_table_struct.curr_opp_pt = - *(u32 *) pargs; - else - return -EPERM; - break; - case SHM_OPPINFO: - /* - * Update the shared memory with the voltage, frequency, - * min and max frequency values for an OPP. - */ - for (i = 0; i <= dsp_max_opps; i++) { - hio_mgr->shared_mem->opp_table_struct.opp_point[i]. - voltage = vdd1_dsp_freq[i][0]; - dev_dbg(bridge, "OPP-shm: voltage: %d\n", - vdd1_dsp_freq[i][0]); - hio_mgr->shared_mem->opp_table_struct. - opp_point[i].frequency = vdd1_dsp_freq[i][1]; - dev_dbg(bridge, "OPP-shm: frequency: %d\n", - vdd1_dsp_freq[i][1]); - hio_mgr->shared_mem->opp_table_struct.opp_point[i]. - min_freq = vdd1_dsp_freq[i][2]; - dev_dbg(bridge, "OPP-shm: min freq: %d\n", - vdd1_dsp_freq[i][2]); - hio_mgr->shared_mem->opp_table_struct.opp_point[i]. - max_freq = vdd1_dsp_freq[i][3]; - dev_dbg(bridge, "OPP-shm: max freq: %d\n", - vdd1_dsp_freq[i][3]); - } - hio_mgr->shared_mem->opp_table_struct.num_opp_pts = - dsp_max_opps; - dev_dbg(bridge, "OPP-shm: max OPP number: %d\n", dsp_max_opps); - /* Update the current OPP number */ - if (pdata->dsp_get_opp) - i = (*pdata->dsp_get_opp) (); - hio_mgr->shared_mem->opp_table_struct.curr_opp_pt = i; - dev_dbg(bridge, "OPP-shm: value programmed = %d\n", i); - break; - case SHM_GETOPP: - /* Get the OPP that DSP has requested */ - *(u32 *) pargs = hio_mgr->shared_mem->opp_request.rqst_opp_pt; - break; - default: - break; - } -#endif - return 0; -} - -/* - * ======== bridge_io_get_proc_load ======== - * Gets the Processor's Load information - */ -int bridge_io_get_proc_load(struct io_mgr *hio_mgr, - struct dsp_procloadstat *proc_lstat) -{ - if (!hio_mgr->shared_mem) - return -EFAULT; - - proc_lstat->curr_load = - hio_mgr->shared_mem->load_mon_info.curr_dsp_load; - proc_lstat->predicted_load = - hio_mgr->shared_mem->load_mon_info.pred_dsp_load; - proc_lstat->curr_dsp_freq = - hio_mgr->shared_mem->load_mon_info.curr_dsp_freq; - proc_lstat->predicted_freq = - hio_mgr->shared_mem->load_mon_info.pred_dsp_freq; - - dev_dbg(bridge, "Curr Load = %d, Pred Load = %d, Curr Freq = %d, " - "Pred Freq = %d\n", proc_lstat->curr_load, - proc_lstat->predicted_load, proc_lstat->curr_dsp_freq, - proc_lstat->predicted_freq); - return 0; -} - - -#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) -void print_dsp_debug_trace(struct io_mgr *hio_mgr) -{ - u32 ul_new_message_length = 0, ul_gpp_cur_pointer; - - while (true) { - /* Get the DSP current pointer */ - ul_gpp_cur_pointer = - *(u32 *) (hio_mgr->trace_buffer_current); - ul_gpp_cur_pointer = - hio_mgr->gpp_va + (ul_gpp_cur_pointer - - hio_mgr->dsp_va); - - /* No new debug messages available yet */ - if (ul_gpp_cur_pointer == hio_mgr->gpp_read_pointer) { - break; - } else if (ul_gpp_cur_pointer > hio_mgr->gpp_read_pointer) { - /* Continuous data */ - ul_new_message_length = - ul_gpp_cur_pointer - hio_mgr->gpp_read_pointer; - - memcpy(hio_mgr->msg, - (char *)hio_mgr->gpp_read_pointer, - ul_new_message_length); - hio_mgr->msg[ul_new_message_length] = '\0'; - /* - * Advance the GPP trace pointer to DSP current - * pointer. - */ - hio_mgr->gpp_read_pointer += ul_new_message_length; - /* Print the trace messages */ - pr_info("DSPTrace: %s\n", hio_mgr->msg); - } else if (ul_gpp_cur_pointer < hio_mgr->gpp_read_pointer) { - /* Handle trace buffer wraparound */ - memcpy(hio_mgr->msg, - (char *)hio_mgr->gpp_read_pointer, - hio_mgr->trace_buffer_end - - hio_mgr->gpp_read_pointer); - ul_new_message_length = - ul_gpp_cur_pointer - hio_mgr->trace_buffer_begin; - memcpy(&hio_mgr->msg[hio_mgr->trace_buffer_end - - hio_mgr->gpp_read_pointer], - (char *)hio_mgr->trace_buffer_begin, - ul_new_message_length); - hio_mgr->msg[hio_mgr->trace_buffer_end - - hio_mgr->gpp_read_pointer + - ul_new_message_length] = '\0'; - /* - * Advance the GPP trace pointer to DSP current - * pointer. - */ - hio_mgr->gpp_read_pointer = - hio_mgr->trace_buffer_begin + - ul_new_message_length; - /* Print the trace messages */ - pr_info("DSPTrace: %s\n", hio_mgr->msg); - } - } -} -#endif - -#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE -/* - * ======== print_dsp_trace_buffer ======== - * Prints the trace buffer returned from the DSP (if DBG_Trace is enabled). - * Parameters: - * hdeh_mgr: Handle to DEH manager object - * number of extra carriage returns to generate. - * Returns: - * 0: Success. - * -ENOMEM: Unable to allocate memory. - * Requires: - * hdeh_mgr muse be valid. Checked in bridge_deh_notify. - */ -int print_dsp_trace_buffer(struct bridge_dev_context *hbridge_context) -{ - int status = 0; - struct cod_manager *cod_mgr; - u32 ul_trace_end; - u32 ul_trace_begin; - u32 trace_cur_pos; - u32 ul_num_bytes = 0; - u32 ul_num_words = 0; - u32 ul_word_size = 2; - char *psz_buf; - char *str_beg; - char *trace_end; - char *buf_end; - char *new_line; - - struct bridge_dev_context *pbridge_context = hbridge_context; - struct bridge_drv_interface *intf_fxns; - struct dev_object *dev_obj = (struct dev_object *) - pbridge_context->dev_obj; - - status = dev_get_cod_mgr(dev_obj, &cod_mgr); - - if (cod_mgr) { - /* Look for SYS_PUTCBEG/SYS_PUTCEND */ - status = - cod_get_sym_value(cod_mgr, COD_TRACEBEG, &ul_trace_begin); - } else { - status = -EFAULT; - } - if (!status) - status = - cod_get_sym_value(cod_mgr, COD_TRACEEND, &ul_trace_end); - - if (!status) - /* trace_cur_pos will hold the address of a DSP pointer */ - status = cod_get_sym_value(cod_mgr, COD_TRACECURPOS, - &trace_cur_pos); - - if (status) - goto func_end; - - ul_num_bytes = (ul_trace_end - ul_trace_begin); - - ul_num_words = ul_num_bytes * ul_word_size; - status = dev_get_intf_fxns(dev_obj, &intf_fxns); - - if (status) - goto func_end; - - psz_buf = kzalloc(ul_num_bytes + 2, GFP_ATOMIC); - if (psz_buf != NULL) { - /* Read trace buffer data */ - status = (*intf_fxns->brd_read)(pbridge_context, - (u8 *)psz_buf, (u32)ul_trace_begin, - ul_num_bytes, 0); - - if (status) - goto func_end; - - /* Pack and do newline conversion */ - pr_debug("PrintDspTraceBuffer: " - "before pack and unpack.\n"); - pr_debug("%s: DSP Trace Buffer Begin:\n" - "=======================\n%s\n", - __func__, psz_buf); - - /* Read the value at the DSP address in trace_cur_pos. */ - status = (*intf_fxns->brd_read)(pbridge_context, - (u8 *)&trace_cur_pos, (u32)trace_cur_pos, - 4, 0); - if (status) - goto func_end; - /* Pack and do newline conversion */ - pr_info("DSP Trace Buffer Begin:\n" - "=======================\n%s\n", - psz_buf); - - - /* convert to offset */ - trace_cur_pos = trace_cur_pos - ul_trace_begin; - - if (ul_num_bytes) { - /* - * The buffer is not full, find the end of the - * data -- buf_end will be >= pszBuf after - * while. - */ - buf_end = &psz_buf[ul_num_bytes+1]; - /* DSP print position */ - trace_end = &psz_buf[trace_cur_pos]; - - /* - * Search buffer for a new_line and replace it - * with '\0', then print as string. - * Continue until end of buffer is reached. - */ - str_beg = trace_end; - ul_num_bytes = buf_end - str_beg; - - while (str_beg < buf_end) { - new_line = strnchr(str_beg, ul_num_bytes, - '\n'); - if (new_line && new_line < buf_end) { - *new_line = 0; - pr_debug("%s\n", str_beg); - str_beg = ++new_line; - ul_num_bytes = buf_end - str_beg; - } else { - /* - * Assume buffer empty if it contains - * a zero - */ - if (*str_beg != '\0') { - str_beg[ul_num_bytes] = 0; - pr_debug("%s\n", str_beg); - } - str_beg = buf_end; - ul_num_bytes = 0; - } - } - /* - * Search buffer for a nNewLine and replace it - * with '\0', then print as string. - * Continue until buffer is exhausted. - */ - str_beg = psz_buf; - ul_num_bytes = trace_end - str_beg; - - while (str_beg < trace_end) { - new_line = strnchr(str_beg, ul_num_bytes, '\n'); - if (new_line != NULL && new_line < trace_end) { - *new_line = 0; - pr_debug("%s\n", str_beg); - str_beg = ++new_line; - ul_num_bytes = trace_end - str_beg; - } else { - /* - * Assume buffer empty if it contains - * a zero - */ - if (*str_beg != '\0') { - str_beg[ul_num_bytes] = 0; - pr_debug("%s\n", str_beg); - } - str_beg = trace_end; - ul_num_bytes = 0; - } - } - } - pr_info("\n=======================\n" - "DSP Trace Buffer End:\n"); - kfree(psz_buf); - } else { - status = -ENOMEM; - } -func_end: - if (status) - dev_dbg(bridge, "%s Failed, status 0x%x\n", __func__, status); - return status; -} - -/** - * dump_dsp_stack() - This function dumps the data on the DSP stack. - * @bridge_context: Bridge driver's device context pointer. - * - */ -int dump_dsp_stack(struct bridge_dev_context *bridge_context) -{ - int status = 0; - struct cod_manager *code_mgr; - struct node_mgr *node_mgr; - u32 trace_begin; - char name[256]; - struct { - u32 head[2]; - u32 size; - } mmu_fault_dbg_info; - u32 *buffer; - u32 *buffer_beg; - u32 *buffer_end; - u32 exc_type; - u32 dyn_ext_base; - u32 i; - u32 offset_output; - u32 total_size; - u32 poll_cnt; - const char *dsp_regs[] = {"EFR", "IERR", "ITSR", "NTSR", - "IRP", "NRP", "AMR", "SSR", - "ILC", "RILC", "IER", "CSR"}; - const char *exec_ctxt[] = {"Task", "SWI", "HWI", "Unknown"}; - struct bridge_drv_interface *intf_fxns; - struct dev_object *dev_object = bridge_context->dev_obj; - - status = dev_get_cod_mgr(dev_object, &code_mgr); - if (!code_mgr) { - pr_debug("%s: Failed on dev_get_cod_mgr.\n", __func__); - status = -EFAULT; - } - - if (!status) { - status = dev_get_node_manager(dev_object, &node_mgr); - if (!node_mgr) { - pr_debug("%s: Failed on dev_get_node_manager.\n", - __func__); - status = -EFAULT; - } - } - - if (!status) { - /* Look for SYS_PUTCBEG/SYS_PUTCEND: */ - status = - cod_get_sym_value(code_mgr, COD_TRACEBEG, &trace_begin); - pr_debug("%s: trace_begin Value 0x%x\n", - __func__, trace_begin); - if (status) - pr_debug("%s: Failed on cod_get_sym_value.\n", - __func__); - } - if (!status) - status = dev_get_intf_fxns(dev_object, &intf_fxns); - /* - * Check for the "magic number" in the trace buffer. If it has - * yet to appear then poll the trace buffer to wait for it. Its - * appearance signals that the DSP has finished dumping its state. - */ - mmu_fault_dbg_info.head[0] = 0; - mmu_fault_dbg_info.head[1] = 0; - if (!status) { - poll_cnt = 0; - while ((mmu_fault_dbg_info.head[0] != MMU_FAULT_HEAD1 || - mmu_fault_dbg_info.head[1] != MMU_FAULT_HEAD2) && - poll_cnt < POLL_MAX) { - - /* Read DSP dump size from the DSP trace buffer... */ - status = (*intf_fxns->brd_read)(bridge_context, - (u8 *)&mmu_fault_dbg_info, (u32)trace_begin, - sizeof(mmu_fault_dbg_info), 0); - - if (status) - break; - - poll_cnt++; - } - - if (mmu_fault_dbg_info.head[0] != MMU_FAULT_HEAD1 && - mmu_fault_dbg_info.head[1] != MMU_FAULT_HEAD2) { - status = -ETIME; - pr_err("%s:No DSP MMU-Fault information available.\n", - __func__); - } - } - - if (!status) { - total_size = mmu_fault_dbg_info.size; - /* Limit the size in case DSP went crazy */ - if (total_size > MAX_MMU_DBGBUFF) - total_size = MAX_MMU_DBGBUFF; - - buffer = kzalloc(total_size, GFP_ATOMIC); - if (!buffer) { - status = -ENOMEM; - pr_debug("%s: Failed to " - "allocate stack dump buffer.\n", __func__); - goto func_end; - } - - buffer_beg = buffer; - buffer_end = buffer + total_size / 4; - - /* Read bytes from the DSP trace buffer... */ - status = (*intf_fxns->brd_read)(bridge_context, - (u8 *)buffer, (u32)trace_begin, - total_size, 0); - if (status) { - pr_debug("%s: Failed to Read Trace Buffer.\n", - __func__); - goto func_end; - } - - pr_err("\nAproximate Crash Position:\n" - "--------------------------\n"); - - exc_type = buffer[3]; - if (!exc_type) - i = buffer[79]; /* IRP */ - else - i = buffer[80]; /* NRP */ - - status = - cod_get_sym_value(code_mgr, DYNEXTBASE, &dyn_ext_base); - if (status) { - status = -EFAULT; - goto func_end; - } - - if ((i > dyn_ext_base) && (node_find_addr(node_mgr, i, - 0x1000, &offset_output, name) == 0)) - pr_err("0x%-8x [\"%s\" + 0x%x]\n", i, name, - i - offset_output); - else - pr_err("0x%-8x [Unable to match to a symbol.]\n", i); - - buffer += 4; - - pr_err("\nExecution Info:\n" - "---------------\n"); - - if (*buffer < ARRAY_SIZE(exec_ctxt)) { - pr_err("Execution context \t%s\n", - exec_ctxt[*buffer++]); - } else { - pr_err("Execution context corrupt\n"); - kfree(buffer_beg); - return -EFAULT; - } - pr_err("Task Handle\t\t0x%x\n", *buffer++); - pr_err("Stack Pointer\t\t0x%x\n", *buffer++); - pr_err("Stack Top\t\t0x%x\n", *buffer++); - pr_err("Stack Bottom\t\t0x%x\n", *buffer++); - pr_err("Stack Size\t\t0x%x\n", *buffer++); - pr_err("Stack Size In Use\t0x%x\n", *buffer++); - - pr_err("\nCPU Registers\n" - "---------------\n"); - - for (i = 0; i < 32; i++) { - if (i == 4 || i == 6 || i == 8) - pr_err("A%d 0x%-8x [Function Argument %d]\n", - i, *buffer++, i-3); - else if (i == 15) - pr_err("A15 0x%-8x [Frame Pointer]\n", - *buffer++); - else - pr_err("A%d 0x%x\n", i, *buffer++); - } - - pr_err("\nB0 0x%x\n", *buffer++); - pr_err("B1 0x%x\n", *buffer++); - pr_err("B2 0x%x\n", *buffer++); - - if ((*buffer > dyn_ext_base) && (node_find_addr(node_mgr, - *buffer, 0x1000, &offset_output, name) == 0)) - - pr_err("B3 0x%-8x [Function Return Pointer:" - " \"%s\" + 0x%x]\n", *buffer, name, - *buffer - offset_output); - else - pr_err("B3 0x%-8x [Function Return Pointer:" - "Unable to match to a symbol.]\n", *buffer); - - buffer++; - - for (i = 4; i < 32; i++) { - if (i == 4 || i == 6 || i == 8) - pr_err("B%d 0x%-8x [Function Argument %d]\n", - i, *buffer++, i-2); - else if (i == 14) - pr_err("B14 0x%-8x [Data Page Pointer]\n", - *buffer++); - else - pr_err("B%d 0x%x\n", i, *buffer++); - } - - pr_err("\n"); - - for (i = 0; i < ARRAY_SIZE(dsp_regs); i++) - pr_err("%s 0x%x\n", dsp_regs[i], *buffer++); - - pr_err("\nStack:\n" - "------\n"); - - for (i = 0; buffer < buffer_end; i++, buffer++) { - if ((*buffer > dyn_ext_base) && ( - node_find_addr(node_mgr, *buffer , 0x600, - &offset_output, name) == 0)) - pr_err("[%d] 0x%-8x [\"%s\" + 0x%x]\n", - i, *buffer, name, - *buffer - offset_output); - else - pr_err("[%d] 0x%x\n", i, *buffer); - } - kfree(buffer_beg); - } -func_end: - return status; -} - -/** - * dump_dl_modules() - This functions dumps the _DLModules loaded in DSP side - * @bridge_context: Bridge driver's device context pointer. - * - */ -void dump_dl_modules(struct bridge_dev_context *bridge_context) -{ - struct cod_manager *code_mgr; - struct bridge_drv_interface *intf_fxns; - struct bridge_dev_context *bridge_ctxt = bridge_context; - struct dev_object *dev_object = bridge_ctxt->dev_obj; - struct modules_header modules_hdr; - struct dll_module *module_struct = NULL; - u32 module_dsp_addr; - u32 module_size; - u32 module_struct_size = 0; - u32 sect_ndx; - char *sect_str; - int status = 0; - - status = dev_get_intf_fxns(dev_object, &intf_fxns); - if (status) { - pr_debug("%s: Failed on dev_get_intf_fxns.\n", __func__); - goto func_end; - } - - status = dev_get_cod_mgr(dev_object, &code_mgr); - if (!code_mgr) { - pr_debug("%s: Failed on dev_get_cod_mgr.\n", __func__); - status = -EFAULT; - goto func_end; - } - - /* Lookup the address of the modules_header structure */ - status = cod_get_sym_value(code_mgr, "_DLModules", &module_dsp_addr); - if (status) { - pr_debug("%s: Failed on cod_get_sym_value for _DLModules.\n", - __func__); - goto func_end; - } - - pr_debug("%s: _DLModules at 0x%x\n", __func__, module_dsp_addr); - - /* Copy the modules_header structure from DSP memory. */ - status = (*intf_fxns->brd_read)(bridge_context, (u8 *) &modules_hdr, - (u32) module_dsp_addr, sizeof(modules_hdr), 0); - - if (status) { - pr_debug("%s: Failed failed to read modules header.\n", - __func__); - goto func_end; - } - - module_dsp_addr = modules_hdr.first_module; - module_size = modules_hdr.first_module_size; - - pr_debug("%s: dll_module_header 0x%x %d\n", __func__, module_dsp_addr, - module_size); - - pr_err("\nDynamically Loaded Modules:\n" - "---------------------------\n"); - - /* For each dll_module structure in the list... */ - while (module_size) { - /* - * Allocate/re-allocate memory to hold the dll_module - * structure. The memory is re-allocated only if the existing - * allocation is too small. - */ - if (module_size > module_struct_size) { - kfree(module_struct); - module_struct = kzalloc(module_size+128, GFP_ATOMIC); - module_struct_size = module_size+128; - pr_debug("%s: allocated module struct %p %d\n", - __func__, module_struct, module_struct_size); - if (!module_struct) - goto func_end; - } - /* Copy the dll_module structure from DSP memory */ - status = (*intf_fxns->brd_read)(bridge_context, - (u8 *)module_struct, module_dsp_addr, module_size, 0); - - if (status) { - pr_debug( - "%s: Failed to read dll_module struct for 0x%x.\n", - __func__, module_dsp_addr); - break; - } - - /* Update info regarding the _next_ module in the list. */ - module_dsp_addr = module_struct->next_module; - module_size = module_struct->next_module_size; - - pr_debug("%s: next module 0x%x %d, this module num sects %d\n", - __func__, module_dsp_addr, module_size, - module_struct->num_sects); - - /* - * The section name strings start immediately following - * the array of dll_sect structures. - */ - sect_str = (char *) &module_struct-> - sects[module_struct->num_sects]; - pr_err("%s\n", sect_str); - - /* - * Advance to the first section name string. - * Each string follows the one before. - */ - sect_str += strlen(sect_str) + 1; - - /* Access each dll_sect structure and its name string. */ - for (sect_ndx = 0; - sect_ndx < module_struct->num_sects; sect_ndx++) { - pr_err(" Section: 0x%x ", - module_struct->sects[sect_ndx].sect_load_adr); - - if (((u32) sect_str - (u32) module_struct) < - module_struct_size) { - pr_err("%s\n", sect_str); - /* Each string follows the one before. */ - sect_str += strlen(sect_str)+1; - } else { - pr_err("<string error>\n"); - pr_debug("%s: section name sting address " - "is invalid %p\n", __func__, sect_str); - } - } - } -func_end: - kfree(module_struct); -} -#endif diff --git a/drivers/staging/tidspbridge/core/msg_sm.c b/drivers/staging/tidspbridge/core/msg_sm.c deleted file mode 100644 index 7b517eb827fe..000000000000 --- a/drivers/staging/tidspbridge/core/msg_sm.c +++ /dev/null @@ -1,564 +0,0 @@ -/* - * msg_sm.c - * - * DSP-BIOS Bridge driver support functions for TI OMAP processors. - * - * Implements upper edge functions for Bridge message module. - * - * Copyright (C) 2005-2006 Texas Instruments, Inc. - * - * This package is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED - * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. - */ -#include <linux/types.h> - -/* ----------------------------------- DSP/BIOS Bridge */ -#include <dspbridge/dbdefs.h> - -/* ----------------------------------- OS Adaptation Layer */ -#include <dspbridge/sync.h> - -/* ----------------------------------- Platform Manager */ -#include <dspbridge/dev.h> - -/* ----------------------------------- Others */ -#include <dspbridge/io_sm.h> - -/* ----------------------------------- This */ -#include <_msg_sm.h> -#include <dspbridge/dspmsg.h> - -/* ----------------------------------- Function Prototypes */ -static int add_new_msg(struct list_head *msg_list); -static void delete_msg_mgr(struct msg_mgr *hmsg_mgr); -static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 num_to_dsp); -static void free_msg_list(struct list_head *msg_list); - -/* - * ======== bridge_msg_create ======== - * Create an object to manage message queues. Only one of these objects - * can exist per device object. - */ -int bridge_msg_create(struct msg_mgr **msg_man, - struct dev_object *hdev_obj, - msg_onexit msg_callback) -{ - struct msg_mgr *msg_mgr_obj; - struct io_mgr *hio_mgr; - int status = 0; - - if (!msg_man || !msg_callback || !hdev_obj) - return -EFAULT; - - dev_get_io_mgr(hdev_obj, &hio_mgr); - if (!hio_mgr) - return -EFAULT; - - *msg_man = NULL; - /* Allocate msg_ctrl manager object */ - msg_mgr_obj = kzalloc(sizeof(struct msg_mgr), GFP_KERNEL); - if (!msg_mgr_obj) - return -ENOMEM; - - msg_mgr_obj->on_exit = msg_callback; - msg_mgr_obj->iomgr = hio_mgr; - /* List of MSG_QUEUEs */ - INIT_LIST_HEAD(&msg_mgr_obj->queue_list); - /* - * Queues of message frames for messages to the DSP. Message - * frames will only be added to the free queue when a - * msg_queue object is created. - */ - INIT_LIST_HEAD(&msg_mgr_obj->msg_free_list); - INIT_LIST_HEAD(&msg_mgr_obj->msg_used_list); - spin_lock_init(&msg_mgr_obj->msg_mgr_lock); - - /* - * Create an event to be used by bridge_msg_put() in waiting - * for an available free frame from the message manager. - */ - msg_mgr_obj->sync_event = - kzalloc(sizeof(struct sync_object), GFP_KERNEL); - if (!msg_mgr_obj->sync_event) { - kfree(msg_mgr_obj); - return -ENOMEM; - } - sync_init_event(msg_mgr_obj->sync_event); - - *msg_man = msg_mgr_obj; - - return status; -} - -/* - * ======== bridge_msg_create_queue ======== - * Create a msg_queue for sending/receiving messages to/from a node - * on the DSP. - */ -int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr, struct msg_queue **msgq, - u32 msgq_id, u32 max_msgs, void *arg) -{ - u32 i; - u32 num_allocated = 0; - struct msg_queue *msg_q; - int status = 0; - - if (!hmsg_mgr || msgq == NULL) - return -EFAULT; - - *msgq = NULL; - /* Allocate msg_queue object */ - msg_q = kzalloc(sizeof(struct msg_queue), GFP_KERNEL); - if (!msg_q) - return -ENOMEM; - - msg_q->max_msgs = max_msgs; - msg_q->msg_mgr = hmsg_mgr; - msg_q->arg = arg; /* Node handle */ - msg_q->msgq_id = msgq_id; /* Node env (not valid yet) */ - /* Queues of Message frames for messages from the DSP */ - INIT_LIST_HEAD(&msg_q->msg_free_list); - INIT_LIST_HEAD(&msg_q->msg_used_list); - - /* Create event that will be signalled when a message from - * the DSP is available. */ - msg_q->sync_event = kzalloc(sizeof(struct sync_object), GFP_KERNEL); - if (!msg_q->sync_event) { - status = -ENOMEM; - goto out_err; - - } - sync_init_event(msg_q->sync_event); - - /* Create a notification list for message ready notification. */ - msg_q->ntfy_obj = kmalloc(sizeof(struct ntfy_object), GFP_KERNEL); - if (!msg_q->ntfy_obj) { - status = -ENOMEM; - goto out_err; - } - ntfy_init(msg_q->ntfy_obj); - - /* Create events that will be used to synchronize cleanup - * when the object is deleted. sync_done will be set to - * unblock threads in MSG_Put() or MSG_Get(). sync_done_ack - * will be set by the unblocked thread to signal that it - * is unblocked and will no longer reference the object. */ - msg_q->sync_done = kzalloc(sizeof(struct sync_object), GFP_KERNEL); - if (!msg_q->sync_done) { - status = -ENOMEM; - goto out_err; - } - sync_init_event(msg_q->sync_done); - - msg_q->sync_done_ack = kzalloc(sizeof(struct sync_object), GFP_KERNEL); - if (!msg_q->sync_done_ack) { - status = -ENOMEM; - goto out_err; - } - sync_init_event(msg_q->sync_done_ack); - - /* Enter critical section */ - spin_lock_bh(&hmsg_mgr->msg_mgr_lock); - /* Initialize message frames and put in appropriate queues */ - for (i = 0; i < max_msgs && !status; i++) { - status = add_new_msg(&hmsg_mgr->msg_free_list); - if (!status) { - num_allocated++; - status = add_new_msg(&msg_q->msg_free_list); - } - } - if (status) { - spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); - goto out_err; - } - - list_add_tail(&msg_q->list_elem, &hmsg_mgr->queue_list); - *msgq = msg_q; - /* Signal that free frames are now available */ - if (!list_empty(&hmsg_mgr->msg_free_list)) - sync_set_event(hmsg_mgr->sync_event); - - /* Exit critical section */ - spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); - - return 0; -out_err: - delete_msg_queue(msg_q, num_allocated); - return status; -} - -/* - * ======== bridge_msg_delete ======== - * Delete a msg_ctrl manager allocated in bridge_msg_create(). - */ -void bridge_msg_delete(struct msg_mgr *hmsg_mgr) -{ - delete_msg_mgr(hmsg_mgr); -} - -/* - * ======== bridge_msg_delete_queue ======== - * Delete a msg_ctrl queue allocated in bridge_msg_create_queue. - */ -void bridge_msg_delete_queue(struct msg_queue *msg_queue_obj) -{ - struct msg_mgr *hmsg_mgr; - u32 io_msg_pend; - - if (!msg_queue_obj || !msg_queue_obj->msg_mgr) - return; - - hmsg_mgr = msg_queue_obj->msg_mgr; - msg_queue_obj->done = true; - /* Unblock all threads blocked in MSG_Get() or MSG_Put(). */ - io_msg_pend = msg_queue_obj->io_msg_pend; - while (io_msg_pend) { - /* Unblock thread */ - sync_set_event(msg_queue_obj->sync_done); - /* Wait for acknowledgement */ - sync_wait_on_event(msg_queue_obj->sync_done_ack, SYNC_INFINITE); - io_msg_pend = msg_queue_obj->io_msg_pend; - } - /* Remove message queue from hmsg_mgr->queue_list */ - spin_lock_bh(&hmsg_mgr->msg_mgr_lock); - list_del(&msg_queue_obj->list_elem); - /* Free the message queue object */ - delete_msg_queue(msg_queue_obj, msg_queue_obj->max_msgs); - if (list_empty(&hmsg_mgr->msg_free_list)) - sync_reset_event(hmsg_mgr->sync_event); - spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); -} - -/* - * ======== bridge_msg_get ======== - * Get a message from a msg_ctrl queue. - */ -int bridge_msg_get(struct msg_queue *msg_queue_obj, - struct dsp_msg *pmsg, u32 utimeout) -{ - struct msg_frame *msg_frame_obj; - struct msg_mgr *hmsg_mgr; - struct sync_object *syncs[2]; - u32 index; - int status = 0; - - if (!msg_queue_obj || pmsg == NULL) - return -ENOMEM; - - hmsg_mgr = msg_queue_obj->msg_mgr; - - spin_lock_bh(&hmsg_mgr->msg_mgr_lock); - /* If a message is already there, get it */ - if (!list_empty(&msg_queue_obj->msg_used_list)) { - msg_frame_obj = list_first_entry(&msg_queue_obj->msg_used_list, - struct msg_frame, list_elem); - list_del(&msg_frame_obj->list_elem); - *pmsg = msg_frame_obj->msg_data.msg; - list_add_tail(&msg_frame_obj->list_elem, - &msg_queue_obj->msg_free_list); - if (list_empty(&msg_queue_obj->msg_used_list)) - sync_reset_event(msg_queue_obj->sync_event); - spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); - return 0; - } - - if (msg_queue_obj->done) { - spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); - return -EPERM; - } - msg_queue_obj->io_msg_pend++; - spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); - - /* - * Wait til message is available, timeout, or done. We don't - * have to schedule the DPC, since the DSP will send messages - * when they are available. - */ - syncs[0] = msg_queue_obj->sync_event; - syncs[1] = msg_queue_obj->sync_done; - status = sync_wait_on_multiple_events(syncs, 2, utimeout, &index); - - spin_lock_bh(&hmsg_mgr->msg_mgr_lock); - if (msg_queue_obj->done) { - msg_queue_obj->io_msg_pend--; - spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); - /* - * Signal that we're not going to access msg_queue_obj - * anymore, so it can be deleted. - */ - sync_set_event(msg_queue_obj->sync_done_ack); - return -EPERM; - } - if (!status && !list_empty(&msg_queue_obj->msg_used_list)) { - /* Get msg from used list */ - msg_frame_obj = list_first_entry(&msg_queue_obj->msg_used_list, - struct msg_frame, list_elem); - list_del(&msg_frame_obj->list_elem); - /* Copy message into pmsg and put frame on the free list */ - *pmsg = msg_frame_obj->msg_data.msg; - list_add_tail(&msg_frame_obj->list_elem, - &msg_queue_obj->msg_free_list); - } - msg_queue_obj->io_msg_pend--; - /* Reset the event if there are still queued messages */ - if (!list_empty(&msg_queue_obj->msg_used_list)) - sync_set_event(msg_queue_obj->sync_event); - - spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); - - return status; -} - -/* - * ======== bridge_msg_put ======== - * Put a message onto a msg_ctrl queue. - */ -int bridge_msg_put(struct msg_queue *msg_queue_obj, - const struct dsp_msg *pmsg, u32 utimeout) -{ - struct msg_frame *msg_frame_obj; - struct msg_mgr *hmsg_mgr; - struct sync_object *syncs[2]; - u32 index; - int status; - - if (!msg_queue_obj || !pmsg || !msg_queue_obj->msg_mgr) - return -EFAULT; - - hmsg_mgr = msg_queue_obj->msg_mgr; - - spin_lock_bh(&hmsg_mgr->msg_mgr_lock); - - /* If a message frame is available, use it */ - if (!list_empty(&hmsg_mgr->msg_free_list)) { - msg_frame_obj = list_first_entry(&hmsg_mgr->msg_free_list, - struct msg_frame, list_elem); - list_del(&msg_frame_obj->list_elem); - msg_frame_obj->msg_data.msg = *pmsg; - msg_frame_obj->msg_data.msgq_id = - msg_queue_obj->msgq_id; - list_add_tail(&msg_frame_obj->list_elem, - &hmsg_mgr->msg_used_list); - hmsg_mgr->msgs_pending++; - - if (list_empty(&hmsg_mgr->msg_free_list)) - sync_reset_event(hmsg_mgr->sync_event); - - /* Release critical section before scheduling DPC */ - spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); - /* Schedule a DPC, to do the actual data transfer: */ - iosm_schedule(hmsg_mgr->iomgr); - return 0; - } - - if (msg_queue_obj->done) { - spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); - return -EPERM; - } - msg_queue_obj->io_msg_pend++; - - spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); - - /* Wait til a free message frame is available, timeout, or done */ - syncs[0] = hmsg_mgr->sync_event; - syncs[1] = msg_queue_obj->sync_done; - status = sync_wait_on_multiple_events(syncs, 2, utimeout, &index); - if (status) - return status; - - /* Enter critical section */ - spin_lock_bh(&hmsg_mgr->msg_mgr_lock); - if (msg_queue_obj->done) { - msg_queue_obj->io_msg_pend--; - /* Exit critical section */ - spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); - /* - * Signal that we're not going to access msg_queue_obj - * anymore, so it can be deleted. - */ - sync_set_event(msg_queue_obj->sync_done_ack); - return -EPERM; - } - - if (list_empty(&hmsg_mgr->msg_free_list)) { - spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); - return -EFAULT; - } - - /* Get msg from free list */ - msg_frame_obj = list_first_entry(&hmsg_mgr->msg_free_list, - struct msg_frame, list_elem); - /* - * Copy message into pmsg and put frame on the - * used list. - */ - list_del(&msg_frame_obj->list_elem); - msg_frame_obj->msg_data.msg = *pmsg; - msg_frame_obj->msg_data.msgq_id = msg_queue_obj->msgq_id; - list_add_tail(&msg_frame_obj->list_elem, &hmsg_mgr->msg_used_list); - hmsg_mgr->msgs_pending++; - /* - * Schedule a DPC, to do the actual - * data transfer. - */ - iosm_schedule(hmsg_mgr->iomgr); - - msg_queue_obj->io_msg_pend--; - /* Reset event if there are still frames available */ - if (!list_empty(&hmsg_mgr->msg_free_list)) - sync_set_event(hmsg_mgr->sync_event); - - /* Exit critical section */ - spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); - - return 0; -} - -/* - * ======== bridge_msg_register_notify ======== - */ -int bridge_msg_register_notify(struct msg_queue *msg_queue_obj, - u32 event_mask, u32 notify_type, - struct dsp_notification *hnotification) -{ - int status = 0; - - if (!msg_queue_obj || !hnotification) { - status = -ENOMEM; - goto func_end; - } - - if (!(event_mask == DSP_NODEMESSAGEREADY || event_mask == 0)) { - status = -EPERM; - goto func_end; - } - - if (notify_type != DSP_SIGNALEVENT) { - status = -EBADR; - goto func_end; - } - - if (event_mask) - status = ntfy_register(msg_queue_obj->ntfy_obj, hnotification, - event_mask, notify_type); - else - status = ntfy_unregister(msg_queue_obj->ntfy_obj, - hnotification); - - if (status == -EINVAL) { - /* Not registered. Ok, since we couldn't have known. Node - * notifications are split between node state change handled - * by NODE, and message ready handled by msg_ctrl. */ - status = 0; - } -func_end: - return status; -} - -/* - * ======== bridge_msg_set_queue_id ======== - */ -void bridge_msg_set_queue_id(struct msg_queue *msg_queue_obj, u32 msgq_id) -{ - /* - * A message queue must be created when a node is allocated, - * so that node_register_notify() can be called before the node - * is created. Since we don't know the node environment until the - * node is created, we need this function to set msg_queue_obj->msgq_id - * to the node environment, after the node is created. - */ - if (msg_queue_obj) - msg_queue_obj->msgq_id = msgq_id; -} - -/* - * ======== add_new_msg ======== - * Must be called in message manager critical section. - */ -static int add_new_msg(struct list_head *msg_list) -{ - struct msg_frame *pmsg; - - pmsg = kzalloc(sizeof(struct msg_frame), GFP_ATOMIC); - if (!pmsg) - return -ENOMEM; - - list_add_tail(&pmsg->list_elem, msg_list); - - return 0; -} - -/* - * ======== delete_msg_mgr ======== - */ -static void delete_msg_mgr(struct msg_mgr *hmsg_mgr) -{ - if (!hmsg_mgr) - return; - - /* FIXME: free elements from queue_list? */ - free_msg_list(&hmsg_mgr->msg_free_list); - free_msg_list(&hmsg_mgr->msg_used_list); - kfree(hmsg_mgr->sync_event); - kfree(hmsg_mgr); -} - -/* - * ======== delete_msg_queue ======== - */ -static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 num_to_dsp) -{ - struct msg_mgr *hmsg_mgr; - struct msg_frame *pmsg, *tmp; - u32 i; - - if (!msg_queue_obj || !msg_queue_obj->msg_mgr) - return; - - hmsg_mgr = msg_queue_obj->msg_mgr; - - /* Pull off num_to_dsp message frames from Msg manager and free */ - i = 0; - list_for_each_entry_safe(pmsg, tmp, &hmsg_mgr->msg_free_list, - list_elem) { - list_del(&pmsg->list_elem); - kfree(pmsg); - if (i++ >= num_to_dsp) - break; - } - - free_msg_list(&msg_queue_obj->msg_free_list); - free_msg_list(&msg_queue_obj->msg_used_list); - - if (msg_queue_obj->ntfy_obj) { - ntfy_delete(msg_queue_obj->ntfy_obj); - kfree(msg_queue_obj->ntfy_obj); - } - - kfree(msg_queue_obj->sync_event); - kfree(msg_queue_obj->sync_done); - kfree(msg_queue_obj->sync_done_ack); - - kfree(msg_queue_obj); -} - -/* - * ======== free_msg_list ======== - */ -static void free_msg_list(struct list_head *msg_list) -{ - struct msg_frame *pmsg, *tmp; - - if (!msg_list) - return; - - list_for_each_entry_safe(pmsg, tmp, msg_list, list_elem) { - list_del(&pmsg->list_elem); - kfree(pmsg); - } -} diff --git a/drivers/staging/tidspbridge/core/sync.c b/drivers/staging/tidspbridge/core/sync.c deleted file mode 100644 index 743ff09d82d2..000000000000 --- a/drivers/staging/tidspbridge/core/sync.c +++ /dev/null @@ -1,121 +0,0 @@ -/* - * sync.c - * - * DSP-BIOS Bridge driver support functions for TI OMAP processors. - * - * Synchronization services. - * - * Copyright (C) 2005-2006 Texas Instruments, Inc. - * - * This package is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED - * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. - */ - -/* ----------------------------------- Host OS */ -#include <dspbridge/host_os.h> - -/* ----------------------------------- This */ -#include <dspbridge/sync.h> -#include <dspbridge/ntfy.h> - -DEFINE_SPINLOCK(sync_lock); - -/** - * sync_set_event() - set or signal and specified event - * @event: Event to be set.. - * - * set the @event, if there is an thread waiting for the event - * it will be waken up, this function only wakes one thread. - */ - -void sync_set_event(struct sync_object *event) -{ - spin_lock_bh(&sync_lock); - complete(&event->comp); - if (event->multi_comp) - complete(event->multi_comp); - spin_unlock_bh(&sync_lock); -} - -/** - * sync_wait_on_multiple_events() - waits for multiple events to be set. - * @events: Array of events to wait for them. - * @count: number of elements of the array. - * @timeout timeout on waiting for the evetns. - * @pu_index index of the event set. - * - * These functions will wait until any of the array element is set or until - * timeout. In case of success the function will return 0 and - * @pu_index will store the index of the array element set or in case - * of timeout the function will return -ETIME or in case of - * interrupting by a signal it will return -EPERM. - */ - -int sync_wait_on_multiple_events(struct sync_object **events, - unsigned count, unsigned timeout, - unsigned *index) -{ - unsigned i; - int status = -EPERM; - struct completion m_comp; - - init_completion(&m_comp); - - if (SYNC_INFINITE == timeout) - timeout = MAX_SCHEDULE_TIMEOUT; - - spin_lock_bh(&sync_lock); - for (i = 0; i < count; i++) { - if (completion_done(&events[i]->comp)) { - reinit_completion(&events[i]->comp); - *index = i; - spin_unlock_bh(&sync_lock); - status = 0; - goto func_end; - } - } - - for (i = 0; i < count; i++) - events[i]->multi_comp = &m_comp; - - spin_unlock_bh(&sync_lock); - - if (!wait_for_completion_interruptible_timeout(&m_comp, - msecs_to_jiffies(timeout))) - status = -ETIME; - - spin_lock_bh(&sync_lock); - for (i = 0; i < count; i++) { - if (completion_done(&events[i]->comp)) { - reinit_completion(&events[i]->comp); - *index = i; - status = 0; - } - events[i]->multi_comp = NULL; - } - spin_unlock_bh(&sync_lock); -func_end: - return status; -} - -/** - * dsp_notifier_event() - callback function to nofity events - * @this: pointer to itself struct notifier_block - * @event: event to be notified. - * @data: Currently not used. - * - */ -int dsp_notifier_event(struct notifier_block *this, unsigned long event, - void *data) -{ - struct ntfy_event *ne = container_of(this, struct ntfy_event, - noti_block); - if (ne->event & event) - sync_set_event(&ne->sync_obj); - return NOTIFY_OK; -} diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c deleted file mode 100644 index cb50120ed7b5..000000000000 --- a/drivers/staging/tidspbridge/core/tiomap3430.c +++ /dev/null @@ -1,1813 +0,0 @@ -/* - * tiomap.c - * - * DSP-BIOS Bridge driver support functions for TI OMAP processors. - * - * Processor Manager Driver for TI OMAP3430 EVM. - * - * Copyright (C) 2005-2006 Texas Instruments, Inc. - * - * This package is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED - * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. - */ - -#include <linux/platform_data/dsp-omap.h> - -#include <linux/types.h> -/* ----------------------------------- Host OS */ -#include <dspbridge/host_os.h> -#include <linux/mm.h> -#include <linux/mmzone.h> - -/* ----------------------------------- DSP/BIOS Bridge */ -#include <dspbridge/dbdefs.h> - -/* ----------------------------------- OS Adaptation Layer */ -#include <dspbridge/drv.h> -#include <dspbridge/sync.h> - -/* ------------------------------------ Hardware Abstraction Layer */ -#include <hw_defs.h> -#include <hw_mmu.h> - -/* ----------------------------------- Link Driver */ -#include <dspbridge/dspdefs.h> -#include <dspbridge/dspchnl.h> -#include <dspbridge/dspdeh.h> -#include <dspbridge/dspio.h> -#include <dspbridge/dspmsg.h> -#include <dspbridge/pwr.h> -#include <dspbridge/io_sm.h> - -/* ----------------------------------- Platform Manager */ -#include <dspbridge/dev.h> -#include <dspbridge/dspapi.h> -#include <dspbridge/dmm.h> -#include <dspbridge/wdt.h> - -/* ----------------------------------- Local */ -#include "_tiomap.h" -#include "_tiomap_pwr.h" -#include "tiomap_io.h" - -/* Offset in shared mem to write to in order to synchronize start with DSP */ -#define SHMSYNCOFFSET 4 /* GPP byte offset */ - -#define BUFFERSIZE 1024 - -#define TIHELEN_ACKTIMEOUT 10000 - -#define MMU_SECTION_ADDR_MASK 0xFFF00000 -#define MMU_SSECTION_ADDR_MASK 0xFF000000 -#define MMU_LARGE_PAGE_MASK 0xFFFF0000 -#define MMU_SMALL_PAGE_MASK 0xFFFFF000 -#define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00 -#define PAGES_II_LVL_TABLE 512 -#define PHYS_TO_PAGE(phys) pfn_to_page((phys) >> PAGE_SHIFT) - -/* IVA Boot modes */ -#define DIRECT 0 -#define IDLE 1 - -/* Forward Declarations: */ -static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt); -static int bridge_brd_read(struct bridge_dev_context *dev_ctxt, - u8 *host_buff, - u32 dsp_addr, u32 ul_num_bytes, - u32 mem_type); -static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, - u32 dsp_addr); -static int bridge_brd_status(struct bridge_dev_context *dev_ctxt, - int *board_state); -static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt); -static int bridge_brd_write(struct bridge_dev_context *dev_ctxt, - u8 *host_buff, - u32 dsp_addr, u32 ul_num_bytes, - u32 mem_type); -static int bridge_brd_set_state(struct bridge_dev_context *dev_ctxt, - u32 brd_state); -static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt, - u32 dsp_dest_addr, u32 dsp_src_addr, - u32 ul_num_bytes, u32 mem_type); -static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt, - u8 *host_buff, u32 dsp_addr, - u32 ul_num_bytes, u32 mem_type); -static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt, - u32 ul_mpu_addr, u32 virt_addr, - u32 ul_num_bytes, u32 ul_map_attr, - struct page **mapped_pages); -static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt, - u32 virt_addr, u32 ul_num_bytes); -static int bridge_dev_create(struct bridge_dev_context - **dev_cntxt, - struct dev_object *hdev_obj, - struct cfg_hostres *config_param); -static int bridge_dev_ctrl(struct bridge_dev_context *dev_context, - u32 dw_cmd, void *pargs); -static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt); -static u32 user_va2_pa(struct mm_struct *mm, u32 address); -static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa, - u32 va, u32 size, - struct hw_mmu_map_attrs_t *map_attrs); -static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va, - u32 size, struct hw_mmu_map_attrs_t *attrs); -static int mem_map_vmalloc(struct bridge_dev_context *dev_context, - u32 ul_mpu_addr, u32 virt_addr, - u32 ul_num_bytes, - struct hw_mmu_map_attrs_t *hw_attrs); - -bool wait_for_start(struct bridge_dev_context *dev_context, - void __iomem *sync_addr); - -/* ----------------------------------- Globals */ - -/* Attributes of L2 page tables for DSP MMU */ -struct page_info { - u32 num_entries; /* Number of valid PTEs in the L2 PT */ -}; - -/* Attributes used to manage the DSP MMU page tables */ -struct pg_table_attrs { - spinlock_t pg_lock; /* Critical section object handle */ - - u32 l1_base_pa; /* Physical address of the L1 PT */ - u32 l1_base_va; /* Virtual address of the L1 PT */ - u32 l1_size; /* Size of the L1 PT */ - u32 l1_tbl_alloc_pa; - /* Physical address of Allocated mem for L1 table. May not be aligned */ - u32 l1_tbl_alloc_va; - /* Virtual address of Allocated mem for L1 table. May not be aligned */ - u32 l1_tbl_alloc_sz; - /* Size of consistent memory allocated for L1 table. - * May not be aligned */ - - u32 l2_base_pa; /* Physical address of the L2 PT */ - u32 l2_base_va; /* Virtual address of the L2 PT */ - u32 l2_size; /* Size of the L2 PT */ - u32 l2_tbl_alloc_pa; - /* Physical address of Allocated mem for L2 table. May not be aligned */ - u32 l2_tbl_alloc_va; - /* Virtual address of Allocated mem for L2 table. May not be aligned */ - u32 l2_tbl_alloc_sz; - /* Size of consistent memory allocated for L2 table. - * May not be aligned */ - - u32 l2_num_pages; /* Number of allocated L2 PT */ - /* Array [l2_num_pages] of L2 PT info structs */ - struct page_info *pg_info; -}; - -/* - * This Bridge driver's function interface table. - */ -static struct bridge_drv_interface drv_interface_fxns = { - /* Bridge API ver. for which this bridge driver is built. */ - BRD_API_MAJOR_VERSION, - BRD_API_MINOR_VERSION, - bridge_dev_create, - bridge_dev_destroy, - bridge_dev_ctrl, - bridge_brd_monitor, - bridge_brd_start, - bridge_brd_stop, - bridge_brd_status, - bridge_brd_read, - bridge_brd_write, - bridge_brd_set_state, - bridge_brd_mem_copy, - bridge_brd_mem_write, - bridge_brd_mem_map, - bridge_brd_mem_un_map, - /* The following CHNL functions are provided by chnl_io.lib: */ - bridge_chnl_create, - bridge_chnl_destroy, - bridge_chnl_open, - bridge_chnl_close, - bridge_chnl_add_io_req, - bridge_chnl_get_ioc, - bridge_chnl_cancel_io, - bridge_chnl_flush_io, - bridge_chnl_get_info, - bridge_chnl_get_mgr_info, - bridge_chnl_idle, - bridge_chnl_register_notify, - /* The following IO functions are provided by chnl_io.lib: */ - bridge_io_create, - bridge_io_destroy, - bridge_io_on_loaded, - bridge_io_get_proc_load, - /* The following msg_ctrl functions are provided by chnl_io.lib: */ - bridge_msg_create, - bridge_msg_create_queue, - bridge_msg_delete, - bridge_msg_delete_queue, - bridge_msg_get, - bridge_msg_put, - bridge_msg_register_notify, - bridge_msg_set_queue_id, -}; - -static struct notifier_block dsp_mbox_notifier = { - .notifier_call = io_mbox_msg, -}; - -static inline void flush_all(struct bridge_dev_context *dev_context) -{ - if (dev_context->brd_state == BRD_DSP_HIBERNATION || - dev_context->brd_state == BRD_HIBERNATION) - wake_dsp(dev_context, NULL); - - hw_mmu_tlb_flush_all(dev_context->dsp_mmu_base); -} - -static void bad_page_dump(u32 pa, struct page *pg) -{ - pr_emerg("DSPBRIDGE: MAP function: COUNT 0 FOR PA 0x%x\n", pa); - pr_emerg("Bad page state in process '%s'\n" - "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n" - "Backtrace:\n", - current->comm, pg, (int)(2 * sizeof(unsigned long)), - (unsigned long)pg->flags, pg->mapping, - page_mapcount(pg), page_count(pg)); - dump_stack(); -} - -/* - * ======== bridge_drv_entry ======== - * purpose: - * Bridge Driver entry point. - */ -void bridge_drv_entry(struct bridge_drv_interface **drv_intf, - const char *driver_file_name) -{ - if (strcmp(driver_file_name, "UMA") == 0) - *drv_intf = &drv_interface_fxns; - else - dev_dbg(bridge, "%s Unknown Bridge file name", __func__); - -} - -/* - * ======== bridge_brd_monitor ======== - * purpose: - * This bridge_brd_monitor puts DSP into a Loadable state. - * i.e Application can load and start the device. - * - * Preconditions: - * Device in 'OFF' state. - */ -static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt) -{ - struct bridge_dev_context *dev_context = dev_ctxt; - u32 temp; - struct omap_dsp_platform_data *pdata = - omap_dspbridge_dev->dev.platform_data; - - temp = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) & - OMAP_POWERSTATEST_MASK; - if (!(temp & 0x02)) { - /* IVA2 is not in ON state */ - /* Read and set PM_PWSTCTRL_IVA2 to ON */ - (*pdata->dsp_prm_rmw_bits)(OMAP_POWERSTATEST_MASK, - PWRDM_POWER_ON, OMAP3430_IVA2_MOD, OMAP2_PM_PWSTCTRL); - /* Set the SW supervised state transition */ - (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_FORCE_WAKEUP, - OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); - - /* Wait until the state has moved to ON */ - while ((*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, - OMAP2_PM_PWSTST) & - OMAP_INTRANSITION_MASK) - ; - /* Disable Automatic transition */ - (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO, - OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); - } - (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0, - OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); - dsp_clk_enable(DSP_CLK_IVA2); - - /* set the device state to IDLE */ - dev_context->brd_state = BRD_IDLE; - - return 0; -} - -/* - * ======== bridge_brd_read ======== - * purpose: - * Reads buffers for DSP memory. - */ -static int bridge_brd_read(struct bridge_dev_context *dev_ctxt, - u8 *host_buff, u32 dsp_addr, - u32 ul_num_bytes, u32 mem_type) -{ - int status = 0; - struct bridge_dev_context *dev_context = dev_ctxt; - u32 offset; - u32 dsp_base_addr = dev_ctxt->dsp_base_addr; - - if (dsp_addr < dev_context->dsp_start_add) { - status = -EPERM; - return status; - } - /* change here to account for the 3 bands of the DSP internal memory */ - if ((dsp_addr - dev_context->dsp_start_add) < - dev_context->internal_size) { - offset = dsp_addr - dev_context->dsp_start_add; - } else { - status = read_ext_dsp_data(dev_context, host_buff, dsp_addr, - ul_num_bytes, mem_type); - return status; - } - /* copy the data from DSP memory */ - memcpy(host_buff, (void *)(dsp_base_addr + offset), ul_num_bytes); - return status; -} - -/* - * ======== bridge_brd_set_state ======== - * purpose: - * This routine updates the Board status. - */ -static int bridge_brd_set_state(struct bridge_dev_context *dev_ctxt, - u32 brd_state) -{ - int status = 0; - struct bridge_dev_context *dev_context = dev_ctxt; - - dev_context->brd_state = brd_state; - return status; -} - -/* - * ======== bridge_brd_start ======== - * purpose: - * Initializes DSP MMU and Starts DSP. - * - * Preconditions: - * a) DSP domain is 'ACTIVE'. - * b) DSP_RST1 is asserted. - * b) DSP_RST2 is released. - */ -static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, - u32 dsp_addr) -{ - int status = 0; - struct bridge_dev_context *dev_context = dev_ctxt; - void __iomem *sync_addr; - u32 ul_shm_base; /* Gpp Phys SM base addr(byte) */ - u32 ul_shm_base_virt; /* Dsp Virt SM base addr */ - u32 ul_tlb_base_virt; /* Base of MMU TLB entry */ - u32 shm_sync_pa; - /* Offset of shm_base_virt from tlb_base_virt */ - u32 ul_shm_offset_virt; - s32 entry_ndx; - s32 itmp_entry_ndx = 0; /* DSP-MMU TLB entry base address */ - struct cfg_hostres *resources = NULL; - u32 temp; - u32 ul_dsp_clk_rate; - u32 ul_dsp_clk_addr; - u32 ul_bios_gp_timer; - u32 clk_cmd; - struct io_mgr *hio_mgr; - u32 ul_load_monitor_timer; - u32 wdt_en = 0; - struct omap_dsp_platform_data *pdata = - omap_dspbridge_dev->dev.platform_data; - - /* The device context contains all the mmu setup info from when the - * last dsp base image was loaded. The first entry is always - * SHMMEM base. */ - /* Get SHM_BEG - convert to byte address */ - (void)dev_get_symbol(dev_context->dev_obj, SHMBASENAME, - &ul_shm_base_virt); - ul_shm_base_virt *= DSPWORDSIZE; - /* DSP Virtual address */ - ul_tlb_base_virt = dev_context->atlb_entry[0].dsp_va; - ul_shm_offset_virt = - ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE); - /* Kernel logical address */ - ul_shm_base = dev_context->atlb_entry[0].gpp_va + ul_shm_offset_virt; - - /* SHM physical sync address */ - shm_sync_pa = dev_context->atlb_entry[0].gpp_pa + ul_shm_offset_virt + - SHMSYNCOFFSET; - - /* 2nd wd is used as sync field */ - sync_addr = ioremap(shm_sync_pa, SZ_32); - if (!sync_addr) - return -ENOMEM; - - /* Write a signature into the shm base + offset; this will - * get cleared when the DSP program starts. */ - if ((ul_shm_base_virt == 0) || (ul_shm_base == 0)) { - pr_err("%s: Illegal SM base\n", __func__); - status = -EPERM; - } else - __raw_writel(0xffffffff, sync_addr); - - if (!status) { - resources = dev_context->resources; - if (!resources) - status = -EPERM; - - /* Assert RST1 i.e only the RST only for DSP megacell */ - if (!status) { - (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, - OMAP3430_RST1_IVA2_MASK, - OMAP3430_IVA2_MOD, - OMAP2_RM_RSTCTRL); - - /* Mask address with 1K for compatibility */ - pdata->set_bootaddr(dsp_addr & - OMAP3_IVA2_BOOTADDR_MASK); - pdata->set_bootmode(dsp_debug ? IDLE : DIRECT); - } - } - if (!status) { - /* Reset and Unreset the RST2, so that BOOTADDR is copied to - * IVA2 SYSC register */ - (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, - OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, - OMAP2_RM_RSTCTRL); - udelay(100); - (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0, - OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); - udelay(100); - - /* Disbale the DSP MMU */ - hw_mmu_disable(resources->dmmu_base); - /* Disable TWL */ - hw_mmu_twl_disable(resources->dmmu_base); - - /* Only make TLB entry if both addresses are non-zero */ - for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB; - entry_ndx++) { - struct bridge_ioctl_extproc *e = - &dev_context->atlb_entry[entry_ndx]; - struct hw_mmu_map_attrs_t map_attrs = { - .endianism = e->endianism, - .element_size = e->elem_size, - .mixed_size = e->mixed_mode, - }; - - if (!e->gpp_pa || !e->dsp_va) - continue; - - dev_dbg(bridge, - "MMU %d, pa: 0x%x, va: 0x%x, size: 0x%x", - itmp_entry_ndx, - e->gpp_pa, - e->dsp_va, - e->size); - - hw_mmu_tlb_add(dev_context->dsp_mmu_base, - e->gpp_pa, - e->dsp_va, - e->size, - itmp_entry_ndx, - &map_attrs, 1, 1); - - itmp_entry_ndx++; - } - } - - /* Lock the above TLB entries and get the BIOS and load monitor timer - * information */ - if (!status) { - hw_mmu_num_locked_set(resources->dmmu_base, itmp_entry_ndx); - hw_mmu_victim_num_set(resources->dmmu_base, itmp_entry_ndx); - hw_mmu_ttb_set(resources->dmmu_base, - dev_context->pt_attrs->l1_base_pa); - hw_mmu_twl_enable(resources->dmmu_base); - /* Enable the SmartIdle and AutoIdle bit for MMU_SYSCONFIG */ - - temp = __raw_readl((resources->dmmu_base) + 0x10); - temp = (temp & 0xFFFFFFEF) | 0x11; - __raw_writel(temp, (resources->dmmu_base) + 0x10); - - /* Let the DSP MMU run */ - hw_mmu_enable(resources->dmmu_base); - - /* Enable the BIOS clock */ - (void)dev_get_symbol(dev_context->dev_obj, - BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer); - (void)dev_get_symbol(dev_context->dev_obj, - BRIDGEINIT_LOADMON_GPTIMER, - &ul_load_monitor_timer); - } - - if (!status) { - if (ul_load_monitor_timer != 0xFFFF) { - clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) | - ul_load_monitor_timer; - dsp_peripheral_clk_ctrl(dev_context, &clk_cmd); - } else { - dev_dbg(bridge, "Not able to get the symbol for Load " - "Monitor Timer\n"); - } - } - - if (!status) { - if (ul_bios_gp_timer != 0xFFFF) { - clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) | - ul_bios_gp_timer; - dsp_peripheral_clk_ctrl(dev_context, &clk_cmd); - } else { - dev_dbg(bridge, - "Not able to get the symbol for BIOS Timer\n"); - } - } - - if (!status) { - /* Set the DSP clock rate */ - (void)dev_get_symbol(dev_context->dev_obj, - "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr); - /*Set Autoidle Mode for IVA2 PLL */ - (*pdata->dsp_cm_write)(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT, - OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL); - - if ((unsigned int *)ul_dsp_clk_addr != NULL) { - /* Get the clock rate */ - ul_dsp_clk_rate = dsp_clk_get_iva2_rate(); - dev_dbg(bridge, "%s: DSP clock rate (KHZ): 0x%x \n", - __func__, ul_dsp_clk_rate); - (void)bridge_brd_write(dev_context, - (u8 *) &ul_dsp_clk_rate, - ul_dsp_clk_addr, sizeof(u32), 0); - } - /* - * Enable Mailbox events and also drain any pending - * stale messages. - */ - dev_context->mbox = omap_mbox_get("dsp", &dsp_mbox_notifier); - if (IS_ERR(dev_context->mbox)) { - dev_context->mbox = NULL; - pr_err("%s: Failed to get dsp mailbox handle\n", - __func__); - status = -EPERM; - } - - } - if (!status) { -/*PM_IVA2GRPSEL_PER = 0xC0;*/ - temp = readl(resources->per_pm_base + 0xA8); - temp = (temp & 0xFFFFFF30) | 0xC0; - writel(temp, resources->per_pm_base + 0xA8); - -/*PM_MPUGRPSEL_PER &= 0xFFFFFF3F; */ - temp = readl(resources->per_pm_base + 0xA4); - temp = (temp & 0xFFFFFF3F); - writel(temp, resources->per_pm_base + 0xA4); -/*CM_SLEEPDEP_PER |= 0x04; */ - temp = readl(resources->per_base + 0x44); - temp = (temp & 0xFFFFFFFB) | 0x04; - writel(temp, resources->per_base + 0x44); - -/*CM_CLKSTCTRL_IVA2 = 0x00000003 -To Allow automatic transitions */ - (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_ENABLE_AUTO, - OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); - - /* Let DSP go */ - dev_dbg(bridge, "%s Unreset\n", __func__); - /* Enable DSP MMU Interrupts */ - hw_mmu_event_enable(resources->dmmu_base, - HW_MMU_ALL_INTERRUPTS); - /* release the RST1, DSP starts executing now .. */ - (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0, - OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); - - dev_dbg(bridge, "Waiting for Sync @ 0x%x\n", *(u32 *)sync_addr); - dev_dbg(bridge, "DSP c_int00 Address = 0x%x\n", dsp_addr); - if (dsp_debug) - while (__raw_readw(sync_addr)) - ; - - /* Wait for DSP to clear word in shared memory */ - /* Read the Location */ - if (!wait_for_start(dev_context, sync_addr)) - status = -ETIMEDOUT; - - dev_get_symbol(dev_context->dev_obj, "_WDT_enable", &wdt_en); - if (wdt_en) { - /* Start wdt */ - dsp_wdt_sm_set((void *)ul_shm_base); - dsp_wdt_enable(true); - } - - status = dev_get_io_mgr(dev_context->dev_obj, &hio_mgr); - if (hio_mgr) { - io_sh_msetting(hio_mgr, SHM_OPPINFO, NULL); - /* Write the synchronization bit to indicate the - * completion of OPP table update to DSP - */ - __raw_writel(0XCAFECAFE, sync_addr); - - /* update board state */ - dev_context->brd_state = BRD_RUNNING; - /* (void)chnlsm_enable_interrupt(dev_context); */ - } else { - dev_context->brd_state = BRD_UNKNOWN; - } - } - - iounmap(sync_addr); - - return status; -} - -/* - * ======== bridge_brd_stop ======== - * purpose: - * Puts DSP in self loop. - * - * Preconditions : - * a) None - */ -static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt) -{ - int status = 0; - struct bridge_dev_context *dev_context = dev_ctxt; - struct pg_table_attrs *pt_attrs; - u32 dsp_pwr_state; - struct omap_dsp_platform_data *pdata = - omap_dspbridge_dev->dev.platform_data; - - if (dev_context->brd_state == BRD_STOPPED) - return status; - - /* as per TRM, it is advised to first drive the IVA2 to 'Standby' mode, - * before turning off the clocks.. This is to ensure that there are no - * pending L3 or other transactons from IVA2 */ - dsp_pwr_state = (*pdata->dsp_prm_read) - (OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) & OMAP_POWERSTATEST_MASK; - if (dsp_pwr_state != PWRDM_POWER_OFF) { - (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0, - OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); - sm_interrupt_dsp(dev_context, MBX_PM_DSPIDLE); - mdelay(10); - - /* IVA2 is not in OFF state */ - /* Set PM_PWSTCTRL_IVA2 to OFF */ - (*pdata->dsp_prm_rmw_bits)(OMAP_POWERSTATEST_MASK, - PWRDM_POWER_OFF, OMAP3430_IVA2_MOD, OMAP2_PM_PWSTCTRL); - /* Set the SW supervised state transition for Sleep */ - (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_FORCE_SLEEP, - OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); - } - udelay(10); - /* Release the Ext Base virtual Address as the next DSP Program - * may have a different load address */ - if (dev_context->dsp_ext_base_addr) - dev_context->dsp_ext_base_addr = 0; - - dev_context->brd_state = BRD_STOPPED; /* update board state */ - - dsp_wdt_enable(false); - - /* This is a good place to clear the MMU page tables as well */ - if (dev_context->pt_attrs) { - pt_attrs = dev_context->pt_attrs; - memset((u8 *) pt_attrs->l1_base_va, 0x00, pt_attrs->l1_size); - memset((u8 *) pt_attrs->l2_base_va, 0x00, pt_attrs->l2_size); - memset((u8 *) pt_attrs->pg_info, 0x00, - (pt_attrs->l2_num_pages * sizeof(struct page_info))); - } - /* Disable the mailbox interrupts */ - if (dev_context->mbox) { - omap_mbox_disable_irq(dev_context->mbox, IRQ_RX); - omap_mbox_put(dev_context->mbox, &dsp_mbox_notifier); - dev_context->mbox = NULL; - } - /* Reset IVA2 clocks*/ - (*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK | - OMAP3430_RST2_IVA2_MASK | OMAP3430_RST3_IVA2_MASK, - OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); - - dsp_clock_disable_all(dev_context->dsp_per_clks); - dsp_clk_disable(DSP_CLK_IVA2); - - return status; -} - -/* - * ======== bridge_brd_status ======== - * Returns the board status. - */ -static int bridge_brd_status(struct bridge_dev_context *dev_ctxt, - int *board_state) -{ - struct bridge_dev_context *dev_context = dev_ctxt; - *board_state = dev_context->brd_state; - return 0; -} - -/* - * ======== bridge_brd_write ======== - * Copies the buffers to DSP internal or external memory. - */ -static int bridge_brd_write(struct bridge_dev_context *dev_ctxt, - u8 *host_buff, u32 dsp_addr, - u32 ul_num_bytes, u32 mem_type) -{ - int status = 0; - struct bridge_dev_context *dev_context = dev_ctxt; - - if (dsp_addr < dev_context->dsp_start_add) { - status = -EPERM; - return status; - } - if ((dsp_addr - dev_context->dsp_start_add) < - dev_context->internal_size) { - status = write_dsp_data(dev_ctxt, host_buff, dsp_addr, - ul_num_bytes, mem_type); - } else { - status = write_ext_dsp_data(dev_context, host_buff, dsp_addr, - ul_num_bytes, mem_type, false); - } - - return status; -} - -/* - * ======== bridge_dev_create ======== - * Creates a driver object. Puts DSP in self loop. - */ -static int bridge_dev_create(struct bridge_dev_context - **dev_cntxt, - struct dev_object *hdev_obj, - struct cfg_hostres *config_param) -{ - int status = 0; - struct bridge_dev_context *dev_context = NULL; - s32 entry_ndx; - struct cfg_hostres *resources = config_param; - struct pg_table_attrs *pt_attrs; - u32 pg_tbl_pa; - u32 pg_tbl_va; - u32 align_size; - struct drv_data *drv_datap = dev_get_drvdata(bridge); - - /* Allocate and initialize a data structure to contain the bridge driver - * state, which becomes the context for later calls into this driver */ - dev_context = kzalloc(sizeof(struct bridge_dev_context), GFP_KERNEL); - if (!dev_context) { - status = -ENOMEM; - goto func_end; - } - - dev_context->dsp_start_add = (u32) OMAP_GEM_BASE; - dev_context->self_loop = (u32) NULL; - dev_context->dsp_per_clks = 0; - dev_context->internal_size = OMAP_DSP_SIZE; - /* Clear dev context MMU table entries. - * These get set on bridge_io_on_loaded() call after program loaded. */ - for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB; entry_ndx++) { - dev_context->atlb_entry[entry_ndx].gpp_pa = - dev_context->atlb_entry[entry_ndx].dsp_va = 0; - } - dev_context->dsp_base_addr = (u32) MEM_LINEAR_ADDRESS((void *) - (config_param-> - mem_base - [3]), - config_param-> - mem_length - [3]); - if (!dev_context->dsp_base_addr) - status = -EPERM; - - pt_attrs = kzalloc(sizeof(struct pg_table_attrs), GFP_KERNEL); - if (pt_attrs != NULL) { - pt_attrs->l1_size = SZ_16K; /* 4096 entries of 32 bits */ - align_size = pt_attrs->l1_size; - /* Align sizes are expected to be power of 2 */ - /* we like to get aligned on L1 table size */ - pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l1_size, - align_size, &pg_tbl_pa); - - /* Check if the PA is aligned for us */ - if ((pg_tbl_pa) & (align_size - 1)) { - /* PA not aligned to page table size , - * try with more allocation and align */ - mem_free_phys_mem((void *)pg_tbl_va, pg_tbl_pa, - pt_attrs->l1_size); - /* we like to get aligned on L1 table size */ - pg_tbl_va = - (u32) mem_alloc_phys_mem((pt_attrs->l1_size) * 2, - align_size, &pg_tbl_pa); - /* We should be able to get aligned table now */ - pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa; - pt_attrs->l1_tbl_alloc_va = pg_tbl_va; - pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size * 2; - /* Align the PA to the next 'align' boundary */ - pt_attrs->l1_base_pa = - ((pg_tbl_pa) + - (align_size - 1)) & (~(align_size - 1)); - pt_attrs->l1_base_va = - pg_tbl_va + (pt_attrs->l1_base_pa - pg_tbl_pa); - } else { - /* We got aligned PA, cool */ - pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa; - pt_attrs->l1_tbl_alloc_va = pg_tbl_va; - pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size; - pt_attrs->l1_base_pa = pg_tbl_pa; - pt_attrs->l1_base_va = pg_tbl_va; - } - if (pt_attrs->l1_base_va) - memset((u8 *) pt_attrs->l1_base_va, 0x00, - pt_attrs->l1_size); - - /* number of L2 page tables = DMM pool used + SHMMEM +EXTMEM + - * L4 pages */ - pt_attrs->l2_num_pages = ((DMMPOOLSIZE >> 20) + 6); - pt_attrs->l2_size = HW_MMU_COARSE_PAGE_SIZE * - pt_attrs->l2_num_pages; - align_size = 4; /* Make it u32 aligned */ - /* we like to get aligned on L1 table size */ - pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l2_size, - align_size, &pg_tbl_pa); - pt_attrs->l2_tbl_alloc_pa = pg_tbl_pa; - pt_attrs->l2_tbl_alloc_va = pg_tbl_va; - pt_attrs->l2_tbl_alloc_sz = pt_attrs->l2_size; - pt_attrs->l2_base_pa = pg_tbl_pa; - pt_attrs->l2_base_va = pg_tbl_va; - - if (pt_attrs->l2_base_va) - memset((u8 *) pt_attrs->l2_base_va, 0x00, - pt_attrs->l2_size); - - pt_attrs->pg_info = kzalloc(pt_attrs->l2_num_pages * - sizeof(struct page_info), GFP_KERNEL); - dev_dbg(bridge, - "L1 pa %x, va %x, size %x\n L2 pa %x, va " - "%x, size %x\n", pt_attrs->l1_base_pa, - pt_attrs->l1_base_va, pt_attrs->l1_size, - pt_attrs->l2_base_pa, pt_attrs->l2_base_va, - pt_attrs->l2_size); - dev_dbg(bridge, "pt_attrs %p L2 NumPages %x pg_info %p\n", - pt_attrs, pt_attrs->l2_num_pages, pt_attrs->pg_info); - } - if ((pt_attrs != NULL) && (pt_attrs->l1_base_va != 0) && - (pt_attrs->l2_base_va != 0) && (pt_attrs->pg_info != NULL)) - dev_context->pt_attrs = pt_attrs; - else - status = -ENOMEM; - - if (!status) { - spin_lock_init(&pt_attrs->pg_lock); - dev_context->tc_word_swap_on = drv_datap->tc_wordswapon; - - /* Set the Clock Divisor for the DSP module */ - udelay(5); - /* MMU address is obtained from the host - * resources struct */ - dev_context->dsp_mmu_base = resources->dmmu_base; - } - if (!status) { - dev_context->dev_obj = hdev_obj; - /* Store current board state. */ - dev_context->brd_state = BRD_UNKNOWN; - dev_context->resources = resources; - dsp_clk_enable(DSP_CLK_IVA2); - bridge_brd_stop(dev_context); - /* Return ptr to our device state to the DSP API for storage */ - *dev_cntxt = dev_context; - } else { - if (pt_attrs != NULL) { - kfree(pt_attrs->pg_info); - - if (pt_attrs->l2_tbl_alloc_va) { - mem_free_phys_mem((void *) - pt_attrs->l2_tbl_alloc_va, - pt_attrs->l2_tbl_alloc_pa, - pt_attrs->l2_tbl_alloc_sz); - } - if (pt_attrs->l1_tbl_alloc_va) { - mem_free_phys_mem((void *) - pt_attrs->l1_tbl_alloc_va, - pt_attrs->l1_tbl_alloc_pa, - pt_attrs->l1_tbl_alloc_sz); - } - } - kfree(pt_attrs); - kfree(dev_context); - } -func_end: - return status; -} - -/* - * ======== bridge_dev_ctrl ======== - * Receives device specific commands. - */ -static int bridge_dev_ctrl(struct bridge_dev_context *dev_context, - u32 dw_cmd, void *pargs) -{ - int status = 0; - struct bridge_ioctl_extproc *pa_ext_proc = - (struct bridge_ioctl_extproc *)pargs; - s32 ndx; - - switch (dw_cmd) { - case BRDIOCTL_CHNLREAD: - break; - case BRDIOCTL_CHNLWRITE: - break; - case BRDIOCTL_SETMMUCONFIG: - /* store away dsp-mmu setup values for later use */ - for (ndx = 0; ndx < BRDIOCTL_NUMOFMMUTLB; ndx++, pa_ext_proc++) - dev_context->atlb_entry[ndx] = *pa_ext_proc; - break; - case BRDIOCTL_DEEPSLEEP: - case BRDIOCTL_EMERGENCYSLEEP: - /* Currently only DSP Idle is supported Need to update for - * later releases */ - status = sleep_dsp(dev_context, PWR_DEEPSLEEP, pargs); - break; - case BRDIOCTL_WAKEUP: - status = wake_dsp(dev_context, pargs); - break; - case BRDIOCTL_CLK_CTRL: - status = 0; - /* Looking For Baseport Fix for Clocks */ - status = dsp_peripheral_clk_ctrl(dev_context, pargs); - break; - case BRDIOCTL_PWR_HIBERNATE: - status = handle_hibernation_from_dsp(dev_context); - break; - case BRDIOCTL_PRESCALE_NOTIFY: - status = pre_scale_dsp(dev_context, pargs); - break; - case BRDIOCTL_POSTSCALE_NOTIFY: - status = post_scale_dsp(dev_context, pargs); - break; - case BRDIOCTL_CONSTRAINT_REQUEST: - status = handle_constraints_set(dev_context, pargs); - break; - default: - status = -EPERM; - break; - } - return status; -} - -/* - * ======== bridge_dev_destroy ======== - * Destroys the driver object. - */ -static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt) -{ - struct pg_table_attrs *pt_attrs; - int status = 0; - struct bridge_dev_context *dev_context = (struct bridge_dev_context *) - dev_ctxt; - struct cfg_hostres *host_res; - u32 shm_size; - struct drv_data *drv_datap = dev_get_drvdata(bridge); - - /* It should never happen */ - if (!dev_ctxt) - return -EFAULT; - - /* first put the device to stop state */ - bridge_brd_stop(dev_context); - if (dev_context->pt_attrs) { - pt_attrs = dev_context->pt_attrs; - kfree(pt_attrs->pg_info); - - if (pt_attrs->l2_tbl_alloc_va) { - mem_free_phys_mem((void *)pt_attrs->l2_tbl_alloc_va, - pt_attrs->l2_tbl_alloc_pa, - pt_attrs->l2_tbl_alloc_sz); - } - if (pt_attrs->l1_tbl_alloc_va) { - mem_free_phys_mem((void *)pt_attrs->l1_tbl_alloc_va, - pt_attrs->l1_tbl_alloc_pa, - pt_attrs->l1_tbl_alloc_sz); - } - kfree(pt_attrs); - - } - - if (dev_context->resources) { - host_res = dev_context->resources; - shm_size = drv_datap->shm_size; - if (shm_size >= 0x10000) { - if ((host_res->mem_base[1]) && - (host_res->mem_phys[1])) { - mem_free_phys_mem((void *) - host_res->mem_base - [1], - host_res->mem_phys - [1], shm_size); - } - } else { - dev_dbg(bridge, "%s: Error getting shm size " - "from registry: %x. Not calling " - "mem_free_phys_mem\n", __func__, - status); - } - host_res->mem_base[1] = 0; - host_res->mem_phys[1] = 0; - - if (host_res->mem_base[0]) - iounmap((void *)host_res->mem_base[0]); - if (host_res->mem_base[2]) - iounmap((void *)host_res->mem_base[2]); - if (host_res->mem_base[3]) - iounmap((void *)host_res->mem_base[3]); - if (host_res->mem_base[4]) - iounmap((void *)host_res->mem_base[4]); - if (host_res->dmmu_base) - iounmap(host_res->dmmu_base); - if (host_res->per_base) - iounmap(host_res->per_base); - if (host_res->per_pm_base) - iounmap((void *)host_res->per_pm_base); - if (host_res->core_pm_base) - iounmap((void *)host_res->core_pm_base); - - host_res->mem_base[0] = (u32) NULL; - host_res->mem_base[2] = (u32) NULL; - host_res->mem_base[3] = (u32) NULL; - host_res->mem_base[4] = (u32) NULL; - host_res->dmmu_base = NULL; - - kfree(host_res); - } - - /* Free the driver's device context: */ - kfree(drv_datap->base_img); - kfree((void *)dev_ctxt); - return status; -} - -static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt, - u32 dsp_dest_addr, u32 dsp_src_addr, - u32 ul_num_bytes, u32 mem_type) -{ - int status = 0; - u32 src_addr = dsp_src_addr; - u32 dest_addr = dsp_dest_addr; - u32 copy_bytes = 0; - u32 total_bytes = ul_num_bytes; - u8 host_buf[BUFFERSIZE]; - struct bridge_dev_context *dev_context = dev_ctxt; - while (total_bytes > 0 && !status) { - copy_bytes = - total_bytes > BUFFERSIZE ? BUFFERSIZE : total_bytes; - /* Read from External memory */ - status = read_ext_dsp_data(dev_ctxt, host_buf, src_addr, - copy_bytes, mem_type); - if (!status) { - if (dest_addr < (dev_context->dsp_start_add + - dev_context->internal_size)) { - /* Write to Internal memory */ - status = write_dsp_data(dev_ctxt, host_buf, - dest_addr, copy_bytes, - mem_type); - } else { - /* Write to External memory */ - status = - write_ext_dsp_data(dev_ctxt, host_buf, - dest_addr, copy_bytes, - mem_type, false); - } - } - total_bytes -= copy_bytes; - src_addr += copy_bytes; - dest_addr += copy_bytes; - } - return status; -} - -/* Mem Write does not halt the DSP to write unlike bridge_brd_write */ -static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt, - u8 *host_buff, u32 dsp_addr, - u32 ul_num_bytes, u32 mem_type) -{ - int status = 0; - struct bridge_dev_context *dev_context = dev_ctxt; - u32 ul_remain_bytes = 0; - u32 ul_bytes = 0; - ul_remain_bytes = ul_num_bytes; - while (ul_remain_bytes > 0 && !status) { - ul_bytes = - ul_remain_bytes > BUFFERSIZE ? BUFFERSIZE : ul_remain_bytes; - if (dsp_addr < (dev_context->dsp_start_add + - dev_context->internal_size)) { - status = - write_dsp_data(dev_ctxt, host_buff, dsp_addr, - ul_bytes, mem_type); - } else { - status = write_ext_dsp_data(dev_ctxt, host_buff, - dsp_addr, ul_bytes, - mem_type, true); - } - ul_remain_bytes -= ul_bytes; - dsp_addr += ul_bytes; - host_buff = host_buff + ul_bytes; - } - return status; -} - -/* - * ======== bridge_brd_mem_map ======== - * This function maps MPU buffer to the DSP address space. It performs - * linear to physical address translation if required. It translates each - * page since linear addresses can be physically non-contiguous - * All address & size arguments are assumed to be page aligned (in proc.c) - * - * TODO: Disable MMU while updating the page tables (but that'll stall DSP) - */ -static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt, - u32 ul_mpu_addr, u32 virt_addr, - u32 ul_num_bytes, u32 ul_map_attr, - struct page **mapped_pages) -{ - u32 attrs; - int status = 0; - struct bridge_dev_context *dev_context = dev_ctxt; - struct hw_mmu_map_attrs_t hw_attrs; - struct vm_area_struct *vma; - struct mm_struct *mm = current->mm; - u32 write = 0; - u32 num_usr_pgs = 0; - struct page *mapped_page, *pg; - s32 pg_num; - u32 va = virt_addr; - struct task_struct *curr_task = current; - u32 pg_i = 0; - u32 mpu_addr, pa; - - dev_dbg(bridge, - "%s hDevCtxt %p, pa %x, va %x, size %x, ul_map_attr %x\n", - __func__, dev_ctxt, ul_mpu_addr, virt_addr, ul_num_bytes, - ul_map_attr); - if (ul_num_bytes == 0) - return -EINVAL; - - if (ul_map_attr & DSP_MAP_DIR_MASK) { - attrs = ul_map_attr; - } else { - /* Assign default attributes */ - attrs = ul_map_attr | (DSP_MAPVIRTUALADDR | DSP_MAPELEMSIZE16); - } - /* Take mapping properties */ - if (attrs & DSP_MAPBIGENDIAN) - hw_attrs.endianism = HW_BIG_ENDIAN; - else - hw_attrs.endianism = HW_LITTLE_ENDIAN; - - hw_attrs.mixed_size = (enum hw_mmu_mixed_size_t) - ((attrs & DSP_MAPMIXEDELEMSIZE) >> 2); - /* Ignore element_size if mixed_size is enabled */ - if (hw_attrs.mixed_size == 0) { - if (attrs & DSP_MAPELEMSIZE8) { - /* Size is 8 bit */ - hw_attrs.element_size = HW_ELEM_SIZE8BIT; - } else if (attrs & DSP_MAPELEMSIZE16) { - /* Size is 16 bit */ - hw_attrs.element_size = HW_ELEM_SIZE16BIT; - } else if (attrs & DSP_MAPELEMSIZE32) { - /* Size is 32 bit */ - hw_attrs.element_size = HW_ELEM_SIZE32BIT; - } else if (attrs & DSP_MAPELEMSIZE64) { - /* Size is 64 bit */ - hw_attrs.element_size = HW_ELEM_SIZE64BIT; - } else { - /* - * Mixedsize isn't enabled, so size can't be - * zero here - */ - return -EINVAL; - } - } - if (attrs & DSP_MAPDONOTLOCK) - hw_attrs.donotlockmpupage = 1; - else - hw_attrs.donotlockmpupage = 0; - - if (attrs & DSP_MAPVMALLOCADDR) { - return mem_map_vmalloc(dev_ctxt, ul_mpu_addr, virt_addr, - ul_num_bytes, &hw_attrs); - } - /* - * Do OS-specific user-va to pa translation. - * Combine physically contiguous regions to reduce TLBs. - * Pass the translated pa to pte_update. - */ - if ((attrs & DSP_MAPPHYSICALADDR)) { - status = pte_update(dev_context, ul_mpu_addr, virt_addr, - ul_num_bytes, &hw_attrs); - goto func_cont; - } - - /* - * Important Note: ul_mpu_addr is mapped from user application process - * to current process - it must lie completely within the current - * virtual memory address space in order to be of use to us here! - */ - down_read(&mm->mmap_sem); - vma = find_vma(mm, ul_mpu_addr); - if (vma) - dev_dbg(bridge, - "VMAfor UserBuf: ul_mpu_addr=%x, ul_num_bytes=%x, " - "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr, - ul_num_bytes, vma->vm_start, vma->vm_end, - vma->vm_flags); - - /* - * It is observed that under some circumstances, the user buffer is - * spread across several VMAs. So loop through and check if the entire - * user buffer is covered - */ - while ((vma) && (ul_mpu_addr + ul_num_bytes > vma->vm_end)) { - /* jump to the next VMA region */ - vma = find_vma(mm, vma->vm_end + 1); - dev_dbg(bridge, - "VMA for UserBuf ul_mpu_addr=%x ul_num_bytes=%x, " - "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr, - ul_num_bytes, vma->vm_start, vma->vm_end, - vma->vm_flags); - } - if (!vma) { - pr_err("%s: Failed to get VMA region for 0x%x (%d)\n", - __func__, ul_mpu_addr, ul_num_bytes); - status = -EINVAL; - up_read(&mm->mmap_sem); - goto func_cont; - } - - if (vma->vm_flags & VM_IO) { - num_usr_pgs = ul_num_bytes / PG_SIZE4K; - mpu_addr = ul_mpu_addr; - - /* Get the physical addresses for user buffer */ - for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) { - pa = user_va2_pa(mm, mpu_addr); - if (!pa) { - status = -EPERM; - pr_err("DSPBRIDGE: VM_IO mapping physical" - "address is invalid\n"); - break; - } - if (pfn_valid(__phys_to_pfn(pa))) { - pg = PHYS_TO_PAGE(pa); - get_page(pg); - if (page_count(pg) < 1) { - pr_err("Bad page in VM_IO buffer\n"); - bad_page_dump(pa, pg); - } - } - status = pte_set(dev_context->pt_attrs, pa, - va, HW_PAGE_SIZE4KB, &hw_attrs); - if (status) - break; - - va += HW_PAGE_SIZE4KB; - mpu_addr += HW_PAGE_SIZE4KB; - pa += HW_PAGE_SIZE4KB; - } - } else { - num_usr_pgs = ul_num_bytes / PG_SIZE4K; - if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) - write = 1; - - for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) { - pg_num = get_user_pages(curr_task, mm, ul_mpu_addr, 1, - write, 1, &mapped_page, NULL); - if (pg_num > 0) { - if (page_count(mapped_page) < 1) { - pr_err("Bad page count after doing" - "get_user_pages on" - "user buffer\n"); - bad_page_dump(page_to_phys(mapped_page), - mapped_page); - } - status = pte_set(dev_context->pt_attrs, - page_to_phys(mapped_page), va, - HW_PAGE_SIZE4KB, &hw_attrs); - if (status) - break; - - if (mapped_pages) - mapped_pages[pg_i] = mapped_page; - - va += HW_PAGE_SIZE4KB; - ul_mpu_addr += HW_PAGE_SIZE4KB; - } else { - pr_err("DSPBRIDGE: get_user_pages FAILED," - "MPU addr = 0x%x," - "vma->vm_flags = 0x%lx," - "get_user_pages Err" - "Value = %d, Buffer" - "size=0x%x\n", ul_mpu_addr, - vma->vm_flags, pg_num, ul_num_bytes); - status = -EPERM; - break; - } - } - } - up_read(&mm->mmap_sem); -func_cont: - if (status) { - /* - * Roll out the mapped pages incase it failed in middle of - * mapping - */ - if (pg_i) { - bridge_brd_mem_un_map(dev_context, virt_addr, - (pg_i * PG_SIZE4K)); - } - status = -EPERM; - } - /* - * In any case, flush the TLB - * This is called from here instead from pte_update to avoid unnecessary - * repetition while mapping non-contiguous physical regions of a virtual - * region - */ - flush_all(dev_context); - dev_dbg(bridge, "%s status %x\n", __func__, status); - return status; -} - -/* - * ======== bridge_brd_mem_un_map ======== - * Invalidate the PTEs for the DSP VA block to be unmapped. - * - * PTEs of a mapped memory block are contiguous in any page table - * So, instead of looking up the PTE address for every 4K block, - * we clear consecutive PTEs until we unmap all the bytes - */ -static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt, - u32 virt_addr, u32 ul_num_bytes) -{ - u32 l1_base_va; - u32 l2_base_va; - u32 l2_base_pa; - u32 l2_page_num; - u32 pte_val; - u32 pte_size; - u32 pte_count; - u32 pte_addr_l1; - u32 pte_addr_l2 = 0; - u32 rem_bytes; - u32 rem_bytes_l2; - u32 va_curr; - struct page *pg = NULL; - int status = 0; - struct bridge_dev_context *dev_context = dev_ctxt; - struct pg_table_attrs *pt = dev_context->pt_attrs; - u32 temp; - u32 paddr; - u32 numof4k_pages = 0; - - va_curr = virt_addr; - rem_bytes = ul_num_bytes; - rem_bytes_l2 = 0; - l1_base_va = pt->l1_base_va; - pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr); - dev_dbg(bridge, "%s dev_ctxt %p, va %x, NumBytes %x l1_base_va %x, " - "pte_addr_l1 %x\n", __func__, dev_ctxt, virt_addr, - ul_num_bytes, l1_base_va, pte_addr_l1); - - while (rem_bytes && !status) { - u32 va_curr_orig = va_curr; - /* Find whether the L1 PTE points to a valid L2 PT */ - pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr); - pte_val = *(u32 *) pte_addr_l1; - pte_size = hw_mmu_pte_size_l1(pte_val); - - if (pte_size != HW_MMU_COARSE_PAGE_SIZE) - goto skip_coarse_page; - - /* - * Get the L2 PA from the L1 PTE, and find - * corresponding L2 VA - */ - l2_base_pa = hw_mmu_pte_coarse_l1(pte_val); - l2_base_va = l2_base_pa - pt->l2_base_pa + pt->l2_base_va; - l2_page_num = - (l2_base_pa - pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE; - /* - * Find the L2 PTE address from which we will start - * clearing, the number of PTEs to be cleared on this - * page, and the size of VA space that needs to be - * cleared on this L2 page - */ - pte_addr_l2 = hw_mmu_pte_addr_l2(l2_base_va, va_curr); - pte_count = pte_addr_l2 & (HW_MMU_COARSE_PAGE_SIZE - 1); - pte_count = (HW_MMU_COARSE_PAGE_SIZE - pte_count) / sizeof(u32); - if (rem_bytes < (pte_count * PG_SIZE4K)) - pte_count = rem_bytes / PG_SIZE4K; - rem_bytes_l2 = pte_count * PG_SIZE4K; - - /* - * Unmap the VA space on this L2 PT. A quicker way - * would be to clear pte_count entries starting from - * pte_addr_l2. However, below code checks that we don't - * clear invalid entries or less than 64KB for a 64KB - * entry. Similar checking is done for L1 PTEs too - * below - */ - while (rem_bytes_l2 && !status) { - pte_val = *(u32 *) pte_addr_l2; - pte_size = hw_mmu_pte_size_l2(pte_val); - /* va_curr aligned to pte_size? */ - if (pte_size == 0 || rem_bytes_l2 < pte_size || - va_curr & (pte_size - 1)) { - status = -EPERM; - break; - } - - /* Collect Physical addresses from VA */ - paddr = (pte_val & ~(pte_size - 1)); - if (pte_size == HW_PAGE_SIZE64KB) - numof4k_pages = 16; - else - numof4k_pages = 1; - temp = 0; - while (temp++ < numof4k_pages) { - if (!pfn_valid(__phys_to_pfn(paddr))) { - paddr += HW_PAGE_SIZE4KB; - continue; - } - pg = PHYS_TO_PAGE(paddr); - if (page_count(pg) < 1) { - pr_info("DSPBRIDGE: UNMAP function: " - "COUNT 0 FOR PA 0x%x, size = " - "0x%x\n", paddr, ul_num_bytes); - bad_page_dump(paddr, pg); - } else { - set_page_dirty(pg); - page_cache_release(pg); - } - paddr += HW_PAGE_SIZE4KB; - } - if (hw_mmu_pte_clear(pte_addr_l2, va_curr, pte_size)) { - status = -EPERM; - goto EXIT_LOOP; - } - - status = 0; - rem_bytes_l2 -= pte_size; - va_curr += pte_size; - pte_addr_l2 += (pte_size >> 12) * sizeof(u32); - } - spin_lock(&pt->pg_lock); - if (rem_bytes_l2 == 0) { - pt->pg_info[l2_page_num].num_entries -= pte_count; - if (pt->pg_info[l2_page_num].num_entries == 0) { - /* - * Clear the L1 PTE pointing to the L2 PT - */ - if (!hw_mmu_pte_clear(l1_base_va, va_curr_orig, - HW_MMU_COARSE_PAGE_SIZE)) - status = 0; - else { - status = -EPERM; - spin_unlock(&pt->pg_lock); - goto EXIT_LOOP; - } - } - rem_bytes -= pte_count * PG_SIZE4K; - } else - status = -EPERM; - - spin_unlock(&pt->pg_lock); - continue; -skip_coarse_page: - /* va_curr aligned to pte_size? */ - /* pte_size = 1 MB or 16 MB */ - if (pte_size == 0 || rem_bytes < pte_size || - va_curr & (pte_size - 1)) { - status = -EPERM; - break; - } - - if (pte_size == HW_PAGE_SIZE1MB) - numof4k_pages = 256; - else - numof4k_pages = 4096; - temp = 0; - /* Collect Physical addresses from VA */ - paddr = (pte_val & ~(pte_size - 1)); - while (temp++ < numof4k_pages) { - if (pfn_valid(__phys_to_pfn(paddr))) { - pg = PHYS_TO_PAGE(paddr); - if (page_count(pg) < 1) { - pr_info("DSPBRIDGE: UNMAP function: " - "COUNT 0 FOR PA 0x%x, size = " - "0x%x\n", paddr, ul_num_bytes); - bad_page_dump(paddr, pg); - } else { - set_page_dirty(pg); - page_cache_release(pg); - } - } - paddr += HW_PAGE_SIZE4KB; - } - if (!hw_mmu_pte_clear(l1_base_va, va_curr, pte_size)) { - status = 0; - rem_bytes -= pte_size; - va_curr += pte_size; - } else { - status = -EPERM; - goto EXIT_LOOP; - } - } - /* - * It is better to flush the TLB here, so that any stale old entries - * get flushed - */ -EXIT_LOOP: - flush_all(dev_context); - dev_dbg(bridge, - "%s: va_curr %x, pte_addr_l1 %x pte_addr_l2 %x rem_bytes %x," - " rem_bytes_l2 %x status %x\n", __func__, va_curr, pte_addr_l1, - pte_addr_l2, rem_bytes, rem_bytes_l2, status); - return status; -} - -/* - * ======== user_va2_pa ======== - * Purpose: - * This function walks through the page tables to convert a userland - * virtual address to physical address - */ -static u32 user_va2_pa(struct mm_struct *mm, u32 address) -{ - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *ptep, pte; - - pgd = pgd_offset(mm, address); - if (pgd_none(*pgd) || pgd_bad(*pgd)) - return 0; - - pud = pud_offset(pgd, address); - if (pud_none(*pud) || pud_bad(*pud)) - return 0; - - pmd = pmd_offset(pud, address); - if (pmd_none(*pmd) || pmd_bad(*pmd)) - return 0; - - ptep = pte_offset_map(pmd, address); - if (ptep) { - pte = *ptep; - if (pte_present(pte)) - return pte & PAGE_MASK; - } - - return 0; -} - -/* - * ======== pte_update ======== - * This function calculates the optimum page-aligned addresses and sizes - * Caller must pass page-aligned values - */ -static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa, - u32 va, u32 size, - struct hw_mmu_map_attrs_t *map_attrs) -{ - u32 i; - u32 all_bits; - u32 pa_curr = pa; - u32 va_curr = va; - u32 num_bytes = size; - struct bridge_dev_context *dev_context = dev_ctxt; - int status = 0; - u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB, - HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB - }; - - while (num_bytes && !status) { - /* To find the max. page size with which both PA & VA are - * aligned */ - all_bits = pa_curr | va_curr; - - for (i = 0; i < 4; i++) { - if ((num_bytes >= page_size[i]) && ((all_bits & - (page_size[i] - - 1)) == 0)) { - status = - pte_set(dev_context->pt_attrs, pa_curr, - va_curr, page_size[i], map_attrs); - pa_curr += page_size[i]; - va_curr += page_size[i]; - num_bytes -= page_size[i]; - /* Don't try smaller sizes. Hopefully we have - * reached an address aligned to a bigger page - * size */ - break; - } - } - } - - return status; -} - -/* - * ======== pte_set ======== - * This function calculates PTE address (MPU virtual) to be updated - * It also manages the L2 page tables - */ -static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va, - u32 size, struct hw_mmu_map_attrs_t *attrs) -{ - u32 i; - u32 pte_val; - u32 pte_addr_l1; - u32 pte_size; - /* Base address of the PT that will be updated */ - u32 pg_tbl_va; - u32 l1_base_va; - /* Compiler warns that the next three variables might be used - * uninitialized in this function. Doesn't seem so. Working around, - * anyways. */ - u32 l2_base_va = 0; - u32 l2_base_pa = 0; - u32 l2_page_num = 0; - int status = 0; - - l1_base_va = pt->l1_base_va; - pg_tbl_va = l1_base_va; - if ((size == HW_PAGE_SIZE64KB) || (size == HW_PAGE_SIZE4KB)) { - /* Find whether the L1 PTE points to a valid L2 PT */ - pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va); - if (pte_addr_l1 <= (pt->l1_base_va + pt->l1_size)) { - pte_val = *(u32 *) pte_addr_l1; - pte_size = hw_mmu_pte_size_l1(pte_val); - } else { - return -EPERM; - } - spin_lock(&pt->pg_lock); - if (pte_size == HW_MMU_COARSE_PAGE_SIZE) { - /* Get the L2 PA from the L1 PTE, and find - * corresponding L2 VA */ - l2_base_pa = hw_mmu_pte_coarse_l1(pte_val); - l2_base_va = - l2_base_pa - pt->l2_base_pa + pt->l2_base_va; - l2_page_num = - (l2_base_pa - - pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE; - } else if (pte_size == 0) { - /* L1 PTE is invalid. Allocate a L2 PT and - * point the L1 PTE to it */ - /* Find a free L2 PT. */ - for (i = 0; (i < pt->l2_num_pages) && - (pt->pg_info[i].num_entries != 0); i++) - ; - if (i < pt->l2_num_pages) { - l2_page_num = i; - l2_base_pa = pt->l2_base_pa + (l2_page_num * - HW_MMU_COARSE_PAGE_SIZE); - l2_base_va = pt->l2_base_va + (l2_page_num * - HW_MMU_COARSE_PAGE_SIZE); - /* Endianness attributes are ignored for - * HW_MMU_COARSE_PAGE_SIZE */ - status = - hw_mmu_pte_set(l1_base_va, l2_base_pa, va, - HW_MMU_COARSE_PAGE_SIZE, - attrs); - } else { - status = -ENOMEM; - } - } else { - /* Found valid L1 PTE of another size. - * Should not overwrite it. */ - status = -EPERM; - } - if (!status) { - pg_tbl_va = l2_base_va; - if (size == HW_PAGE_SIZE64KB) - pt->pg_info[l2_page_num].num_entries += 16; - else - pt->pg_info[l2_page_num].num_entries++; - dev_dbg(bridge, "PTE: L2 BaseVa %x, BasePa %x, PageNum " - "%x, num_entries %x\n", l2_base_va, - l2_base_pa, l2_page_num, - pt->pg_info[l2_page_num].num_entries); - } - spin_unlock(&pt->pg_lock); - } - if (!status) { - dev_dbg(bridge, "PTE: pg_tbl_va %x, pa %x, va %x, size %x\n", - pg_tbl_va, pa, va, size); - dev_dbg(bridge, "PTE: endianism %x, element_size %x, " - "mixed_size %x\n", attrs->endianism, - attrs->element_size, attrs->mixed_size); - status = hw_mmu_pte_set(pg_tbl_va, pa, va, size, attrs); - } - - return status; -} - -/* Memory map kernel VA -- memory allocated with vmalloc */ -static int mem_map_vmalloc(struct bridge_dev_context *dev_context, - u32 ul_mpu_addr, u32 virt_addr, - u32 ul_num_bytes, - struct hw_mmu_map_attrs_t *hw_attrs) -{ - int status = 0; - struct page *page[1]; - u32 i; - u32 pa_curr; - u32 pa_next; - u32 va_curr; - u32 size_curr; - u32 num_pages; - u32 pa; - u32 num_of4k_pages; - u32 temp = 0; - - /* - * Do Kernel va to pa translation. - * Combine physically contiguous regions to reduce TLBs. - * Pass the translated pa to pte_update. - */ - num_pages = ul_num_bytes / PAGE_SIZE; /* PAGE_SIZE = OS page size */ - i = 0; - va_curr = ul_mpu_addr; - page[0] = vmalloc_to_page((void *)va_curr); - pa_next = page_to_phys(page[0]); - while (!status && (i < num_pages)) { - /* - * Reuse pa_next from the previous iteration to avoid - * an extra va2pa call - */ - pa_curr = pa_next; - size_curr = PAGE_SIZE; - /* - * If the next page is physically contiguous, - * map it with the current one by increasing - * the size of the region to be mapped - */ - while (++i < num_pages) { - page[0] = - vmalloc_to_page((void *)(va_curr + size_curr)); - pa_next = page_to_phys(page[0]); - - if (pa_next == (pa_curr + size_curr)) - size_curr += PAGE_SIZE; - else - break; - - } - if (pa_next == 0) { - status = -ENOMEM; - break; - } - pa = pa_curr; - num_of4k_pages = size_curr / HW_PAGE_SIZE4KB; - while (temp++ < num_of4k_pages) { - get_page(PHYS_TO_PAGE(pa)); - pa += HW_PAGE_SIZE4KB; - } - status = pte_update(dev_context, pa_curr, virt_addr + - (va_curr - ul_mpu_addr), size_curr, - hw_attrs); - va_curr += size_curr; - } - /* - * In any case, flush the TLB - * This is called from here instead from pte_update to avoid unnecessary - * repetition while mapping non-contiguous physical regions of a virtual - * region - */ - flush_all(dev_context); - dev_dbg(bridge, "%s status %x\n", __func__, status); - return status; -} - -/* - * ======== wait_for_start ======== - * Wait for the singal from DSP that it has started, or time out. - */ -bool wait_for_start(struct bridge_dev_context *dev_context, - void __iomem *sync_addr) -{ - u16 timeout = TIHELEN_ACKTIMEOUT; - - /* Wait for response from board */ - while (__raw_readw(sync_addr) && --timeout) - udelay(10); - - /* If timed out: return false */ - if (!timeout) { - pr_err("%s: Timed out waiting DSP to Start\n", __func__); - return false; - } - return true; -} diff --git a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c deleted file mode 100644 index 657104f37f7d..000000000000 --- a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c +++ /dev/null @@ -1,556 +0,0 @@ -/* - * tiomap_pwr.c - * - * DSP-BIOS Bridge driver support functions for TI OMAP processors. - * - * Implementation of DSP wake/sleep routines. - * - * Copyright (C) 2007-2008 Texas Instruments, Inc. - * - * This package is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED - * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. - */ - -/* ----------------------------------- Host OS */ -#include <dspbridge/host_os.h> - -#include <linux/platform_data/dsp-omap.h> - -/* ----------------------------------- DSP/BIOS Bridge */ -#include <dspbridge/dbdefs.h> -#include <dspbridge/drv.h> -#include <dspbridge/io_sm.h> - -/* ----------------------------------- Platform Manager */ -#include <dspbridge/brddefs.h> -#include <dspbridge/dev.h> -#include <dspbridge/io.h> - -/* ------------------------------------ Hardware Abstraction Layer */ -#include <hw_defs.h> -#include <hw_mmu.h> - -#include <dspbridge/pwr.h> - -/* ----------------------------------- Bridge Driver */ -#include <dspbridge/dspdeh.h> -#include <dspbridge/wdt.h> - -/* ----------------------------------- specific to this file */ -#include "_tiomap.h" -#include "_tiomap_pwr.h" -#include <mach-omap2/prm-regbits-34xx.h> -#include <mach-omap2/cm-regbits-34xx.h> - -#define PWRSTST_TIMEOUT 200 - -/* - * ======== handle_constraints_set ======== - * Sets new DSP constraint - */ -int handle_constraints_set(struct bridge_dev_context *dev_context, - void *pargs) -{ -#ifdef CONFIG_TIDSPBRIDGE_DVFS - u32 *constraint_val; - struct omap_dsp_platform_data *pdata = - omap_dspbridge_dev->dev.platform_data; - - constraint_val = (u32 *) (pargs); - /* Read the target value requested by DSP */ - dev_dbg(bridge, "OPP: %s opp requested = 0x%x\n", __func__, - (u32) *(constraint_val + 1)); - - /* Set the new opp value */ - if (pdata->dsp_set_min_opp) - (*pdata->dsp_set_min_opp) ((u32) *(constraint_val + 1)); -#endif /* #ifdef CONFIG_TIDSPBRIDGE_DVFS */ - return 0; -} - -/* - * ======== handle_hibernation_from_dsp ======== - * Handle Hibernation requested from DSP - */ -int handle_hibernation_from_dsp(struct bridge_dev_context *dev_context) -{ - int status = 0; -#ifdef CONFIG_PM - u16 timeout = PWRSTST_TIMEOUT / 10; - u32 pwr_state; -#ifdef CONFIG_TIDSPBRIDGE_DVFS - u32 opplevel; - struct io_mgr *hio_mgr; -#endif - struct omap_dsp_platform_data *pdata = - omap_dspbridge_dev->dev.platform_data; - - pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) & - OMAP_POWERSTATEST_MASK; - /* Wait for DSP to move into OFF state */ - while ((pwr_state != PWRDM_POWER_OFF) && --timeout) { - if (msleep_interruptible(10)) { - pr_err("Waiting for DSP OFF mode interrupted\n"); - return -EPERM; - } - pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, - OMAP2_PM_PWSTST) & - OMAP_POWERSTATEST_MASK; - } - if (timeout == 0) { - pr_err("%s: Timed out waiting for DSP off mode\n", __func__); - status = -ETIMEDOUT; - return status; - } else { - - /* Save mailbox settings */ - omap_mbox_save_ctx(dev_context->mbox); - - /* Turn off DSP Peripheral clocks and DSP Load monitor timer */ - status = dsp_clock_disable_all(dev_context->dsp_per_clks); - - /* Disable wdt on hibernation. */ - dsp_wdt_enable(false); - - if (!status) { - /* Update the Bridger Driver state */ - dev_context->brd_state = BRD_DSP_HIBERNATION; -#ifdef CONFIG_TIDSPBRIDGE_DVFS - status = - dev_get_io_mgr(dev_context->dev_obj, &hio_mgr); - if (!hio_mgr) { - status = DSP_EHANDLE; - return status; - } - io_sh_msetting(hio_mgr, SHM_GETOPP, &opplevel); - - /* - * Set the OPP to low level before moving to OFF - * mode - */ - if (pdata->dsp_set_min_opp) - (*pdata->dsp_set_min_opp) (VDD1_OPP1); - status = 0; -#endif /* CONFIG_TIDSPBRIDGE_DVFS */ - } - } -#endif - return status; -} - -/* - * ======== sleep_dsp ======== - * Put DSP in low power consuming state. - */ -int sleep_dsp(struct bridge_dev_context *dev_context, u32 dw_cmd, - void *pargs) -{ - int status = 0; -#ifdef CONFIG_PM -#ifdef CONFIG_TIDSPBRIDGE_NTFY_PWRERR - struct deh_mgr *hdeh_mgr; -#endif /* CONFIG_TIDSPBRIDGE_NTFY_PWRERR */ - u16 timeout = PWRSTST_TIMEOUT / 10; - u32 pwr_state, target_pwr_state; - struct omap_dsp_platform_data *pdata = - omap_dspbridge_dev->dev.platform_data; - - /* Check if sleep code is valid */ - if ((dw_cmd != PWR_DEEPSLEEP) && (dw_cmd != PWR_EMERGENCYDEEPSLEEP)) - return -EINVAL; - - switch (dev_context->brd_state) { - case BRD_RUNNING: - omap_mbox_save_ctx(dev_context->mbox); - if (dsp_test_sleepstate == PWRDM_POWER_OFF) { - sm_interrupt_dsp(dev_context, MBX_PM_DSPHIBERNATE); - dev_dbg(bridge, "PM: %s - sent hibernate cmd to DSP\n", - __func__); - target_pwr_state = PWRDM_POWER_OFF; - } else { - sm_interrupt_dsp(dev_context, MBX_PM_DSPRETENTION); - target_pwr_state = PWRDM_POWER_RET; - } - break; - case BRD_RETENTION: - omap_mbox_save_ctx(dev_context->mbox); - if (dsp_test_sleepstate == PWRDM_POWER_OFF) { - sm_interrupt_dsp(dev_context, MBX_PM_DSPHIBERNATE); - target_pwr_state = PWRDM_POWER_OFF; - } else - return 0; - break; - case BRD_HIBERNATION: - case BRD_DSP_HIBERNATION: - /* Already in Hibernation, so just return */ - dev_dbg(bridge, "PM: %s - DSP already in hibernation\n", - __func__); - return 0; - case BRD_STOPPED: - dev_dbg(bridge, "PM: %s - Board in STOP state\n", __func__); - return 0; - default: - dev_dbg(bridge, "PM: %s - Bridge in Illegal state\n", __func__); - return -EPERM; - } - - /* Get the PRCM DSP power domain status */ - pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) & - OMAP_POWERSTATEST_MASK; - - /* Wait for DSP to move into target power state */ - while ((pwr_state != target_pwr_state) && --timeout) { - if (msleep_interruptible(10)) { - pr_err("Waiting for DSP to Suspend interrupted\n"); - return -EPERM; - } - pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, - OMAP2_PM_PWSTST) & - OMAP_POWERSTATEST_MASK; - } - - if (!timeout) { - pr_err("%s: Timed out waiting for DSP off mode, state %x\n", - __func__, pwr_state); -#ifdef CONFIG_TIDSPBRIDGE_NTFY_PWRERR - dev_get_deh_mgr(dev_context->dev_obj, &hdeh_mgr); - bridge_deh_notify(hdeh_mgr, DSP_PWRERROR, 0); -#endif /* CONFIG_TIDSPBRIDGE_NTFY_PWRERR */ - return -ETIMEDOUT; - } else { - /* Update the Bridger Driver state */ - if (dsp_test_sleepstate == PWRDM_POWER_OFF) - dev_context->brd_state = BRD_HIBERNATION; - else - dev_context->brd_state = BRD_RETENTION; - - /* Disable wdt on hibernation. */ - dsp_wdt_enable(false); - - /* Turn off DSP Peripheral clocks */ - status = dsp_clock_disable_all(dev_context->dsp_per_clks); - if (status) - return status; -#ifdef CONFIG_TIDSPBRIDGE_DVFS - else if (target_pwr_state == PWRDM_POWER_OFF) { - /* - * Set the OPP to low level before moving to OFF mode - */ - if (pdata->dsp_set_min_opp) - (*pdata->dsp_set_min_opp) (VDD1_OPP1); - } -#endif /* CONFIG_TIDSPBRIDGE_DVFS */ - } -#endif /* CONFIG_PM */ - return status; -} - -/* - * ======== wake_dsp ======== - * Wake up DSP from sleep. - */ -int wake_dsp(struct bridge_dev_context *dev_context, void *pargs) -{ - int status = 0; -#ifdef CONFIG_PM - - /* Check the board state, if it is not 'SLEEP' then return */ - if (dev_context->brd_state == BRD_RUNNING || - dev_context->brd_state == BRD_STOPPED) { - /* The Device is in 'RET' or 'OFF' state and Bridge state is not - * 'SLEEP', this means state inconsistency, so return */ - return 0; - } - - /* Send a wakeup message to DSP */ - sm_interrupt_dsp(dev_context, MBX_PM_DSPWAKEUP); - - /* Set the device state to RUNNIG */ - dev_context->brd_state = BRD_RUNNING; -#endif /* CONFIG_PM */ - return status; -} - -/* - * ======== dsp_peripheral_clk_ctrl ======== - * Enable/Disable the DSP peripheral clocks as needed.. - */ -int dsp_peripheral_clk_ctrl(struct bridge_dev_context *dev_context, - void *pargs) -{ - u32 ext_clk = 0; - u32 ext_clk_id = 0; - u32 ext_clk_cmd = 0; - u32 clk_id_index = MBX_PM_MAX_RESOURCES; - u32 tmp_index; - u32 dsp_per_clks_before; - int status = 0; - - dsp_per_clks_before = dev_context->dsp_per_clks; - - ext_clk = (u32) *((u32 *) pargs); - ext_clk_id = ext_clk & MBX_PM_CLK_IDMASK; - - /* process the power message -- TODO, keep it in a separate function */ - for (tmp_index = 0; tmp_index < MBX_PM_MAX_RESOURCES; tmp_index++) { - if (ext_clk_id == bpwr_clkid[tmp_index]) { - clk_id_index = tmp_index; - break; - } - } - /* TODO -- Assert may be a too hard restriction here.. May be we should - * just return with failure when the CLK ID does not match */ - if (clk_id_index == MBX_PM_MAX_RESOURCES) { - /* return with a more meaningfull error code */ - return -EPERM; - } - ext_clk_cmd = (ext_clk >> MBX_PM_CLK_CMDSHIFT) & MBX_PM_CLK_CMDMASK; - switch (ext_clk_cmd) { - case BPWR_DISABLE_CLOCK: - status = dsp_clk_disable(bpwr_clks[clk_id_index].clk); - dsp_clk_wakeup_event_ctrl(bpwr_clks[clk_id_index].clk_id, - false); - if (!status) { - (dev_context->dsp_per_clks) &= - (~((u32) (1 << bpwr_clks[clk_id_index].clk))); - } - break; - case BPWR_ENABLE_CLOCK: - status = dsp_clk_enable(bpwr_clks[clk_id_index].clk); - dsp_clk_wakeup_event_ctrl(bpwr_clks[clk_id_index].clk_id, true); - if (!status) - (dev_context->dsp_per_clks) |= - (1 << bpwr_clks[clk_id_index].clk); - break; - default: - dev_dbg(bridge, "%s: Unsupported CMD\n", __func__); - /* unsupported cmd */ - /* TODO -- provide support for AUTOIDLE Enable/Disable - * commands */ - } - return status; -} - -/* - * ========pre_scale_dsp======== - * Sends prescale notification to DSP - * - */ -int pre_scale_dsp(struct bridge_dev_context *dev_context, void *pargs) -{ -#ifdef CONFIG_TIDSPBRIDGE_DVFS - u32 level; - u32 voltage_domain; - - voltage_domain = *((u32 *) pargs); - level = *((u32 *) pargs + 1); - - dev_dbg(bridge, "OPP: %s voltage_domain = %x, level = 0x%x\n", - __func__, voltage_domain, level); - if ((dev_context->brd_state == BRD_HIBERNATION) || - (dev_context->brd_state == BRD_RETENTION) || - (dev_context->brd_state == BRD_DSP_HIBERNATION)) { - dev_dbg(bridge, "OPP: %s IVA in sleep. No message to DSP\n"); - return 0; - } else if (dev_context->brd_state == BRD_RUNNING) { - /* Send a prenotification to DSP */ - dev_dbg(bridge, "OPP: %s sent notification to DSP\n", __func__); - sm_interrupt_dsp(dev_context, MBX_PM_SETPOINT_PRENOTIFY); - return 0; - } else { - return -EPERM; - } -#endif /* #ifdef CONFIG_TIDSPBRIDGE_DVFS */ - return 0; -} - -/* - * ========post_scale_dsp======== - * Sends postscale notification to DSP - * - */ -int post_scale_dsp(struct bridge_dev_context *dev_context, - void *pargs) -{ - int status = 0; -#ifdef CONFIG_TIDSPBRIDGE_DVFS - u32 level; - u32 voltage_domain; - struct io_mgr *hio_mgr; - - status = dev_get_io_mgr(dev_context->dev_obj, &hio_mgr); - if (!hio_mgr) - return -EFAULT; - - voltage_domain = *((u32 *) pargs); - level = *((u32 *) pargs + 1); - dev_dbg(bridge, "OPP: %s voltage_domain = %x, level = 0x%x\n", - __func__, voltage_domain, level); - if ((dev_context->brd_state == BRD_HIBERNATION) || - (dev_context->brd_state == BRD_RETENTION) || - (dev_context->brd_state == BRD_DSP_HIBERNATION)) { - /* Update the OPP value in shared memory */ - io_sh_msetting(hio_mgr, SHM_CURROPP, &level); - dev_dbg(bridge, "OPP: %s IVA in sleep. Wrote to shm\n", - __func__); - } else if (dev_context->brd_state == BRD_RUNNING) { - /* Update the OPP value in shared memory */ - io_sh_msetting(hio_mgr, SHM_CURROPP, &level); - /* Send a post notification to DSP */ - sm_interrupt_dsp(dev_context, MBX_PM_SETPOINT_POSTNOTIFY); - dev_dbg(bridge, - "OPP: %s wrote to shm. Sent post notification to DSP\n", - __func__); - } else { - status = -EPERM; - } -#endif /* #ifdef CONFIG_TIDSPBRIDGE_DVFS */ - return status; -} - -void dsp_clk_wakeup_event_ctrl(u32 clock_id, bool enable) -{ - struct cfg_hostres *resources; - int status = 0; - u32 iva2_grpsel; - u32 mpu_grpsel; - struct dev_object *hdev_object = NULL; - struct bridge_dev_context *bridge_context = NULL; - - hdev_object = (struct dev_object *)drv_get_first_dev_object(); - if (!hdev_object) - return; - - status = dev_get_bridge_context(hdev_object, &bridge_context); - if (!bridge_context) - return; - - resources = bridge_context->resources; - if (!resources) - return; - - switch (clock_id) { - case BPWR_GP_TIMER5: - iva2_grpsel = readl(resources->per_pm_base + 0xA8); - mpu_grpsel = readl(resources->per_pm_base + 0xA4); - if (enable) { - iva2_grpsel |= OMAP3430_GRPSEL_GPT5_MASK; - mpu_grpsel &= ~OMAP3430_GRPSEL_GPT5_MASK; - } else { - mpu_grpsel |= OMAP3430_GRPSEL_GPT5_MASK; - iva2_grpsel &= ~OMAP3430_GRPSEL_GPT5_MASK; - } - writel(iva2_grpsel, resources->per_pm_base + 0xA8); - writel(mpu_grpsel, resources->per_pm_base + 0xA4); - break; - case BPWR_GP_TIMER6: - iva2_grpsel = readl(resources->per_pm_base + 0xA8); - mpu_grpsel = readl(resources->per_pm_base + 0xA4); - if (enable) { - iva2_grpsel |= OMAP3430_GRPSEL_GPT6_MASK; - mpu_grpsel &= ~OMAP3430_GRPSEL_GPT6_MASK; - } else { - mpu_grpsel |= OMAP3430_GRPSEL_GPT6_MASK; - iva2_grpsel &= ~OMAP3430_GRPSEL_GPT6_MASK; - } - writel(iva2_grpsel, resources->per_pm_base + 0xA8); - writel(mpu_grpsel, resources->per_pm_base + 0xA4); - break; - case BPWR_GP_TIMER7: - iva2_grpsel = readl(resources->per_pm_base + 0xA8); - mpu_grpsel = readl(resources->per_pm_base + 0xA4); - if (enable) { - iva2_grpsel |= OMAP3430_GRPSEL_GPT7_MASK; - mpu_grpsel &= ~OMAP3430_GRPSEL_GPT7_MASK; - } else { - mpu_grpsel |= OMAP3430_GRPSEL_GPT7_MASK; - iva2_grpsel &= ~OMAP3430_GRPSEL_GPT7_MASK; - } - writel(iva2_grpsel, resources->per_pm_base + 0xA8); - writel(mpu_grpsel, resources->per_pm_base + 0xA4); - break; - case BPWR_GP_TIMER8: - iva2_grpsel = readl(resources->per_pm_base + 0xA8); - mpu_grpsel = readl(resources->per_pm_base + 0xA4); - if (enable) { - iva2_grpsel |= OMAP3430_GRPSEL_GPT8_MASK; - mpu_grpsel &= ~OMAP3430_GRPSEL_GPT8_MASK; - } else { - mpu_grpsel |= OMAP3430_GRPSEL_GPT8_MASK; - iva2_grpsel &= ~OMAP3430_GRPSEL_GPT8_MASK; - } - writel(iva2_grpsel, resources->per_pm_base + 0xA8); - writel(mpu_grpsel, resources->per_pm_base + 0xA4); - break; - case BPWR_MCBSP1: - iva2_grpsel = readl(resources->core_pm_base + 0xA8); - mpu_grpsel = readl(resources->core_pm_base + 0xA4); - if (enable) { - iva2_grpsel |= OMAP3430_GRPSEL_MCBSP1_MASK; - mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP1_MASK; - } else { - mpu_grpsel |= OMAP3430_GRPSEL_MCBSP1_MASK; - iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP1_MASK; - } - writel(iva2_grpsel, resources->core_pm_base + 0xA8); - writel(mpu_grpsel, resources->core_pm_base + 0xA4); - break; - case BPWR_MCBSP2: - iva2_grpsel = readl(resources->per_pm_base + 0xA8); - mpu_grpsel = readl(resources->per_pm_base + 0xA4); - if (enable) { - iva2_grpsel |= OMAP3430_GRPSEL_MCBSP2_MASK; - mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP2_MASK; - } else { - mpu_grpsel |= OMAP3430_GRPSEL_MCBSP2_MASK; - iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP2_MASK; - } - writel(iva2_grpsel, resources->per_pm_base + 0xA8); - writel(mpu_grpsel, resources->per_pm_base + 0xA4); - break; - case BPWR_MCBSP3: - iva2_grpsel = readl(resources->per_pm_base + 0xA8); - mpu_grpsel = readl(resources->per_pm_base + 0xA4); - if (enable) { - iva2_grpsel |= OMAP3430_GRPSEL_MCBSP3_MASK; - mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP3_MASK; - } else { - mpu_grpsel |= OMAP3430_GRPSEL_MCBSP3_MASK; - iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP3_MASK; - } - writel(iva2_grpsel, resources->per_pm_base + 0xA8); - writel(mpu_grpsel, resources->per_pm_base + 0xA4); - break; - case BPWR_MCBSP4: - iva2_grpsel = readl(resources->per_pm_base + 0xA8); - mpu_grpsel = readl(resources->per_pm_base + 0xA4); - if (enable) { - iva2_grpsel |= OMAP3430_GRPSEL_MCBSP4_MASK; - mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP4_MASK; - } else { - mpu_grpsel |= OMAP3430_GRPSEL_MCBSP4_MASK; - iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP4_MASK; - } - writel(iva2_grpsel, resources->per_pm_base + 0xA8); - writel(mpu_grpsel, resources->per_pm_base + 0xA4); - break; - case BPWR_MCBSP5: - iva2_grpsel = readl(resources->per_pm_base + 0xA8); - mpu_grpsel = readl(resources->per_pm_base + 0xA4); - if (enable) { - iva2_grpsel |= OMAP3430_GRPSEL_MCBSP5_MASK; - mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP5_MASK; - } else { - mpu_grpsel |= OMAP3430_GRPSEL_MCBSP5_MASK; - iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP5_MASK; - } - writel(iva2_grpsel, resources->per_pm_base + 0xA8); - writel(mpu_grpsel, resources->per_pm_base + 0xA4); - break; - } -} diff --git a/drivers/staging/tidspbridge/core/tiomap_io.c b/drivers/staging/tidspbridge/core/tiomap_io.c deleted file mode 100644 index f53ed98d18c1..000000000000 --- a/drivers/staging/tidspbridge/core/tiomap_io.c +++ /dev/null @@ -1,438 +0,0 @@ -/* - * tiomap_io.c - * - * DSP-BIOS Bridge driver support functions for TI OMAP processors. - * - * Implementation for the io read/write routines. - * - * Copyright (C) 2005-2006 Texas Instruments, Inc. - * - * This package is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED - * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. - */ - -#include <linux/platform_data/dsp-omap.h> - -/* ----------------------------------- DSP/BIOS Bridge */ -#include <dspbridge/dbdefs.h> - -/* ----------------------------------- Platform Manager */ -#include <dspbridge/dev.h> -#include <dspbridge/drv.h> - -/* ----------------------------------- OS Adaptation Layer */ -#include <dspbridge/wdt.h> - -/* ----------------------------------- specific to this file */ -#include "_tiomap.h" -#include "_tiomap_pwr.h" -#include "tiomap_io.h" - -static u32 ul_ext_base; -static u32 ul_ext_end; - -static u32 shm0_end; -static u32 ul_dyn_ext_base; -static u32 ul_trace_sec_beg; -static u32 ul_trace_sec_end; -static u32 ul_shm_base_virt; - -bool symbols_reloaded = true; - -/* - * ======== read_ext_dsp_data ======== - * Copies DSP external memory buffers to the host side buffers. - */ -int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt, - u8 *host_buff, u32 dsp_addr, - u32 ul_num_bytes, u32 mem_type) -{ - int status = 0; - struct bridge_dev_context *dev_context = dev_ctxt; - u32 offset; - u32 ul_tlb_base_virt = 0; - u32 ul_shm_offset_virt = 0; - u32 dw_ext_prog_virt_mem; - u32 dw_base_addr = dev_context->dsp_ext_base_addr; - bool trace_read = false; - - if (!ul_shm_base_virt) { - status = dev_get_symbol(dev_context->dev_obj, - SHMBASENAME, &ul_shm_base_virt); - } - - /* Check if it is a read of Trace section */ - if (!status && !ul_trace_sec_beg) { - status = dev_get_symbol(dev_context->dev_obj, - DSP_TRACESEC_BEG, &ul_trace_sec_beg); - } - - if (!status && !ul_trace_sec_end) { - status = dev_get_symbol(dev_context->dev_obj, - DSP_TRACESEC_END, &ul_trace_sec_end); - } - - if (!status) { - if ((dsp_addr <= ul_trace_sec_end) && - (dsp_addr >= ul_trace_sec_beg)) - trace_read = true; - } - - /* If reading from TRACE, force remap/unmap */ - if (trace_read && dw_base_addr) { - dw_base_addr = 0; - dev_context->dsp_ext_base_addr = 0; - } - - if (!dw_base_addr) { - /* Initialize ul_ext_base and ul_ext_end */ - ul_ext_base = 0; - ul_ext_end = 0; - - /* Get DYNEXT_BEG, EXT_BEG and EXT_END. */ - if (!status && !ul_dyn_ext_base) { - status = dev_get_symbol(dev_context->dev_obj, - DYNEXTBASE, &ul_dyn_ext_base); - } - - if (!status) { - status = dev_get_symbol(dev_context->dev_obj, - EXTBASE, &ul_ext_base); - } - - if (!status) { - status = dev_get_symbol(dev_context->dev_obj, - EXTEND, &ul_ext_end); - } - - /* Trace buffer is right after the shm SEG0, - * so set the base address to SHMBASE */ - if (trace_read) { - ul_ext_base = ul_shm_base_virt; - ul_ext_end = ul_trace_sec_end; - } - - - if (ul_ext_end < ul_ext_base) - status = -EPERM; - - if (!status) { - ul_tlb_base_virt = - dev_context->atlb_entry[0].dsp_va * DSPWORDSIZE; - dw_ext_prog_virt_mem = - dev_context->atlb_entry[0].gpp_va; - - if (!trace_read) { - ul_shm_offset_virt = - ul_shm_base_virt - ul_tlb_base_virt; - ul_shm_offset_virt += - PG_ALIGN_HIGH(ul_ext_end - ul_dyn_ext_base + - 1, HW_PAGE_SIZE64KB); - dw_ext_prog_virt_mem -= ul_shm_offset_virt; - dw_ext_prog_virt_mem += - (ul_ext_base - ul_dyn_ext_base); - dev_context->dsp_ext_base_addr = - dw_ext_prog_virt_mem; - - /* - * This dsp_ext_base_addr will get cleared - * only when the board is stopped. - */ - if (!dev_context->dsp_ext_base_addr) - status = -EPERM; - } - - dw_base_addr = dw_ext_prog_virt_mem; - } - } - - if (!dw_base_addr || !ul_ext_base || !ul_ext_end) - status = -EPERM; - - offset = dsp_addr - ul_ext_base; - - if (!status) - memcpy(host_buff, (u8 *) dw_base_addr + offset, ul_num_bytes); - - return status; -} - -/* - * ======== write_dsp_data ======== - * purpose: - * Copies buffers to the DSP internal/external memory. - */ -int write_dsp_data(struct bridge_dev_context *dev_context, - u8 *host_buff, u32 dsp_addr, u32 ul_num_bytes, - u32 mem_type) -{ - u32 offset; - u32 dw_base_addr = dev_context->dsp_base_addr; - struct cfg_hostres *resources = dev_context->resources; - int status = 0; - u32 base1, base2, base3; - base1 = OMAP_DSP_MEM1_SIZE; - base2 = OMAP_DSP_MEM2_BASE - OMAP_DSP_MEM1_BASE; - base3 = OMAP_DSP_MEM3_BASE - OMAP_DSP_MEM1_BASE; - - if (!resources) - return -EPERM; - - offset = dsp_addr - dev_context->dsp_start_add; - if (offset < base1) { - dw_base_addr = MEM_LINEAR_ADDRESS(resources->mem_base[2], - resources->mem_length[2]); - } else if (offset > base1 && offset < base2 + OMAP_DSP_MEM2_SIZE) { - dw_base_addr = MEM_LINEAR_ADDRESS(resources->mem_base[3], - resources->mem_length[3]); - offset = offset - base2; - } else if (offset >= base2 + OMAP_DSP_MEM2_SIZE && - offset < base3 + OMAP_DSP_MEM3_SIZE) { - dw_base_addr = MEM_LINEAR_ADDRESS(resources->mem_base[4], - resources->mem_length[4]); - offset = offset - base3; - } else { - return -EPERM; - } - if (ul_num_bytes) - memcpy((u8 *) (dw_base_addr + offset), host_buff, ul_num_bytes); - else - *((u32 *) host_buff) = dw_base_addr + offset; - - return status; -} - -/* - * ======== write_ext_dsp_data ======== - * purpose: - * Copies buffers to the external memory. - * - */ -int write_ext_dsp_data(struct bridge_dev_context *dev_context, - u8 *host_buff, u32 dsp_addr, - u32 ul_num_bytes, u32 mem_type, - bool dynamic_load) -{ - u32 dw_base_addr = dev_context->dsp_ext_base_addr; - u32 dw_offset = 0; - u8 temp_byte1, temp_byte2; - u8 remain_byte[4]; - s32 i; - int ret = 0; - u32 dw_ext_prog_virt_mem; - u32 ul_tlb_base_virt = 0; - u32 ul_shm_offset_virt = 0; - struct cfg_hostres *host_res = dev_context->resources; - bool trace_load = false; - temp_byte1 = 0x0; - temp_byte2 = 0x0; - - if (symbols_reloaded) { - /* Check if it is a load to Trace section */ - ret = dev_get_symbol(dev_context->dev_obj, - DSP_TRACESEC_BEG, &ul_trace_sec_beg); - if (!ret) - ret = dev_get_symbol(dev_context->dev_obj, - DSP_TRACESEC_END, - &ul_trace_sec_end); - } - if (!ret) { - if ((dsp_addr <= ul_trace_sec_end) && - (dsp_addr >= ul_trace_sec_beg)) - trace_load = true; - } - - /* If dynamic, force remap/unmap */ - if ((dynamic_load || trace_load) && dw_base_addr) { - dw_base_addr = 0; - MEM_UNMAP_LINEAR_ADDRESS((void *) - dev_context->dsp_ext_base_addr); - dev_context->dsp_ext_base_addr = 0x0; - } - if (!dw_base_addr) { - if (symbols_reloaded) - /* Get SHM_BEG EXT_BEG and EXT_END. */ - ret = dev_get_symbol(dev_context->dev_obj, - SHMBASENAME, &ul_shm_base_virt); - if (dynamic_load) { - if (!ret) { - if (symbols_reloaded) - ret = - dev_get_symbol - (dev_context->dev_obj, DYNEXTBASE, - &ul_ext_base); - } - if (!ret) { - /* DR OMAPS00013235 : DLModules array may be - * in EXTMEM. It is expected that DYNEXTMEM and - * EXTMEM are contiguous, so checking for the - * upper bound at EXTEND should be Ok. */ - if (symbols_reloaded) - ret = - dev_get_symbol - (dev_context->dev_obj, EXTEND, - &ul_ext_end); - } - } else { - if (symbols_reloaded) { - if (!ret) - ret = - dev_get_symbol - (dev_context->dev_obj, EXTBASE, - &ul_ext_base); - if (!ret) - ret = - dev_get_symbol - (dev_context->dev_obj, EXTEND, - &ul_ext_end); - } - } - /* Trace buffer it right after the shm SEG0, so set the - * base address to SHMBASE */ - if (trace_load) - ul_ext_base = ul_shm_base_virt; - - if (ul_ext_end < ul_ext_base) - ret = -EPERM; - - if (!ret) { - ul_tlb_base_virt = - dev_context->atlb_entry[0].dsp_va * DSPWORDSIZE; - - if (symbols_reloaded) { - ret = dev_get_symbol - (dev_context->dev_obj, - DSP_TRACESEC_END, &shm0_end); - if (!ret) { - ret = - dev_get_symbol - (dev_context->dev_obj, DYNEXTBASE, - &ul_dyn_ext_base); - } - } - ul_shm_offset_virt = - ul_shm_base_virt - ul_tlb_base_virt; - if (trace_load) { - dw_ext_prog_virt_mem = - dev_context->atlb_entry[0].gpp_va; - } else { - dw_ext_prog_virt_mem = host_res->mem_base[1]; - dw_ext_prog_virt_mem += - (ul_ext_base - ul_dyn_ext_base); - } - - dev_context->dsp_ext_base_addr = - (u32) MEM_LINEAR_ADDRESS((void *) - dw_ext_prog_virt_mem, - ul_ext_end - ul_ext_base); - dw_base_addr += dev_context->dsp_ext_base_addr; - /* This dsp_ext_base_addr will get cleared only when - * the board is stopped. */ - if (!dev_context->dsp_ext_base_addr) - ret = -EPERM; - } - } - if (!dw_base_addr || !ul_ext_base || !ul_ext_end) - ret = -EPERM; - - if (!ret) { - for (i = 0; i < 4; i++) - remain_byte[i] = 0x0; - - dw_offset = dsp_addr - ul_ext_base; - /* Also make sure the dsp_addr is < ul_ext_end */ - if (dsp_addr > ul_ext_end || dw_offset > dsp_addr) - ret = -EPERM; - } - if (!ret) { - if (ul_num_bytes) - memcpy((u8 *) dw_base_addr + dw_offset, host_buff, - ul_num_bytes); - else - *((u32 *) host_buff) = dw_base_addr + dw_offset; - } - /* Unmap here to force remap for other Ext loads */ - if ((dynamic_load || trace_load) && dev_context->dsp_ext_base_addr) { - MEM_UNMAP_LINEAR_ADDRESS((void *) - dev_context->dsp_ext_base_addr); - dev_context->dsp_ext_base_addr = 0x0; - } - symbols_reloaded = false; - return ret; -} - -int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val) -{ -#ifdef CONFIG_TIDSPBRIDGE_DVFS - u32 opplevel = 0; -#endif - struct omap_dsp_platform_data *pdata = - omap_dspbridge_dev->dev.platform_data; - struct cfg_hostres *resources = dev_context->resources; - int status = 0; - u32 temp; - - if (!dev_context->mbox) - return 0; - - if (!resources) - return -EPERM; - - if (dev_context->brd_state == BRD_DSP_HIBERNATION || - dev_context->brd_state == BRD_HIBERNATION) { -#ifdef CONFIG_TIDSPBRIDGE_DVFS - if (pdata->dsp_get_opp) - opplevel = (*pdata->dsp_get_opp) (); - if (opplevel == VDD1_OPP1) { - if (pdata->dsp_set_min_opp) - (*pdata->dsp_set_min_opp) (VDD1_OPP2); - } -#endif - /* Restart the peripheral clocks */ - dsp_clock_enable_all(dev_context->dsp_per_clks); - dsp_wdt_enable(true); - - /* - * 2:0 AUTO_IVA2_DPLL - Enabling IVA2 DPLL auto control - * in CM_AUTOIDLE_PLL_IVA2 register - */ - (*pdata->dsp_cm_write)(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT, - OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL); - - /* - * 7:4 IVA2_DPLL_FREQSEL - IVA2 internal frq set to - * 0.75 MHz - 1.0 MHz - * 2:0 EN_IVA2_DPLL - Enable IVA2 DPLL in lock mode - */ - (*pdata->dsp_cm_rmw_bits)(OMAP3430_IVA2_DPLL_FREQSEL_MASK | - OMAP3430_EN_IVA2_DPLL_MASK, - 0x3 << OMAP3430_IVA2_DPLL_FREQSEL_SHIFT | - 0x7 << OMAP3430_EN_IVA2_DPLL_SHIFT, - OMAP3430_IVA2_MOD, OMAP3430_CM_CLKEN_PLL); - - /* Restore mailbox settings */ - omap_mbox_restore_ctx(dev_context->mbox); - - /* Access MMU SYS CONFIG register to generate a short wakeup */ - temp = readl(resources->dmmu_base + 0x10); - - dev_context->brd_state = BRD_RUNNING; - } else if (dev_context->brd_state == BRD_RETENTION) { - /* Restart the peripheral clocks */ - dsp_clock_enable_all(dev_context->dsp_per_clks); - } - - status = omap_mbox_msg_send(dev_context->mbox, mb_val); - - if (status) { - pr_err("omap_mbox_msg_send Fail and status = %d\n", status); - status = -EPERM; - } - - return 0; -} diff --git a/drivers/staging/tidspbridge/core/tiomap_io.h b/drivers/staging/tidspbridge/core/tiomap_io.h deleted file mode 100644 index a3f19c7b79f3..000000000000 --- a/drivers/staging/tidspbridge/core/tiomap_io.h +++ /dev/null @@ -1,104 +0,0 @@ -/* - * tiomap_io.h - * - * DSP-BIOS Bridge driver support functions for TI OMAP processors. - * - * Definitions, types and function prototypes for the io (r/w external mem). - * - * Copyright (C) 2005-2006 Texas Instruments, Inc. - * - * This package is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED - * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. - */ - -#ifndef _TIOMAP_IO_ -#define _TIOMAP_IO_ - -/* - * Symbol that defines beginning of shared memory. - * For OMAP (Helen) this is the DSP Virtual base address of SDRAM. - * This will be used to program DSP MMU to map DSP Virt to GPP phys. - * (see dspMmuTlbEntry()). - */ -#define SHMBASENAME "SHM_BEG" -#define EXTBASE "EXT_BEG" -#define EXTEND "_EXT_END" -#define DYNEXTBASE "_DYNEXT_BEG" -#define DYNEXTEND "_DYNEXT_END" -#define IVAEXTMEMBASE "_IVAEXTMEM_BEG" -#define IVAEXTMEMEND "_IVAEXTMEM_END" - -#define DSP_TRACESEC_BEG "_BRIDGE_TRACE_BEG" -#define DSP_TRACESEC_END "_BRIDGE_TRACE_END" - -#define SYS_PUTCBEG "_SYS_PUTCBEG" -#define SYS_PUTCEND "_SYS_PUTCEND" -#define BRIDGE_SYS_PUTC_CURRENT "_BRIDGE_SYS_PUTC_current" - -#define WORDSWAP_ENABLE 0x3 /* Enable word swap */ - -/* - * ======== read_ext_dsp_data ======== - * Reads it from DSP External memory. The external memory for the DSP - * is configured by the combination of DSP MMU and shm Memory manager in the CDB - */ -extern int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt, - u8 *host_buff, u32 dsp_addr, - u32 ul_num_bytes, u32 mem_type); - -/* - * ======== write_dsp_data ======== - */ -extern int write_dsp_data(struct bridge_dev_context *dev_context, - u8 *host_buff, u32 dsp_addr, - u32 ul_num_bytes, u32 mem_type); - -/* - * ======== write_ext_dsp_data ======== - * Writes to the DSP External memory for external program. - * The ext mem for progra is configured by the combination of DSP MMU and - * shm Memory manager in the CDB - */ -extern int write_ext_dsp_data(struct bridge_dev_context *dev_context, - u8 *host_buff, u32 dsp_addr, - u32 ul_num_bytes, u32 mem_type, - bool dynamic_load); - -/* - * ======== write_ext32_bit_dsp_data ======== - * Writes 32 bit data to the external memory - */ -extern inline void write_ext32_bit_dsp_data(const - struct bridge_dev_context *dev_context, - u32 dsp_addr, u32 val) -{ - *(u32 *) dsp_addr = ((dev_context->tc_word_swap_on) ? (((val << 16) & - 0xFFFF0000) | - ((val >> 16) & - 0x0000FFFF)) : - val); -} - -/* - * ======== read_ext32_bit_dsp_data ======== - * Reads 32 bit data from the external memory - */ -extern inline u32 read_ext32_bit_dsp_data(const struct bridge_dev_context - *dev_context, u32 dsp_addr) -{ - u32 ret; - ret = *(u32 *) dsp_addr; - - ret = ((dev_context->tc_word_swap_on) ? (((ret << 16) - & 0xFFFF0000) | ((ret >> 16) & - 0x0000FFFF)) - : ret); - return ret; -} - -#endif /* _TIOMAP_IO_ */ diff --git a/drivers/staging/tidspbridge/core/ue_deh.c b/drivers/staging/tidspbridge/core/ue_deh.c deleted file mode 100644 index e68f0ba8e12b..000000000000 --- a/drivers/staging/tidspbridge/core/ue_deh.c +++ /dev/null @@ -1,272 +0,0 @@ -/* - * ue_deh.c - * - * DSP-BIOS Bridge driver support functions for TI OMAP processors. - * - * Implements upper edge DSP exception handling (DEH) functions. - * - * Copyright (C) 2005-2006 Texas Instruments, Inc. - * Copyright (C) 2010 Felipe Contreras - * - * This package is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED - * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. - */ - -#include <linux/kernel.h> -#include <linux/interrupt.h> - -#include <dspbridge/dbdefs.h> -#include <dspbridge/dspdeh.h> -#include <dspbridge/dev.h> -#include "_tiomap.h" -#include "_deh.h" - -#include <dspbridge/io_sm.h> -#include <dspbridge/drv.h> -#include <dspbridge/wdt.h> - -static u32 fault_addr; - -static void mmu_fault_dpc(unsigned long data) -{ - struct deh_mgr *deh = (void *)data; - - if (!deh) - return; - - bridge_deh_notify(deh, DSP_MMUFAULT, 0); -} - -static irqreturn_t mmu_fault_isr(int irq, void *data) -{ - struct deh_mgr *deh = data; - struct cfg_hostres *resources; - u32 event; - - if (!deh) - return IRQ_HANDLED; - - resources = deh->bridge_context->resources; - if (!resources) { - dev_dbg(bridge, "%s: Failed to get Host Resources\n", - __func__); - return IRQ_HANDLED; - } - - hw_mmu_event_status(resources->dmmu_base, &event); - if (event == HW_MMU_TRANSLATION_FAULT) { - hw_mmu_fault_addr_read(resources->dmmu_base, &fault_addr); - dev_dbg(bridge, "%s: event=0x%x, fault_addr=0x%x\n", __func__, - event, fault_addr); - /* - * Schedule a DPC directly. In the future, it may be - * necessary to check if DSP MMU fault is intended for - * Bridge. - */ - tasklet_schedule(&deh->dpc_tasklet); - - /* Disable the MMU events, else once we clear it will - * start to raise INTs again */ - hw_mmu_event_disable(resources->dmmu_base, - HW_MMU_TRANSLATION_FAULT); - } else { - hw_mmu_event_disable(resources->dmmu_base, - HW_MMU_ALL_INTERRUPTS); - } - return IRQ_HANDLED; -} - -int bridge_deh_create(struct deh_mgr **ret_deh, - struct dev_object *hdev_obj) -{ - int status; - struct deh_mgr *deh; - struct bridge_dev_context *hbridge_context = NULL; - - /* Message manager will be created when a file is loaded, since - * size of message buffer in shared memory is configurable in - * the base image. */ - /* Get Bridge context info. */ - dev_get_bridge_context(hdev_obj, &hbridge_context); - /* Allocate IO manager object: */ - deh = kzalloc(sizeof(*deh), GFP_KERNEL); - if (!deh) { - status = -ENOMEM; - goto err; - } - - /* Create an NTFY object to manage notifications */ - deh->ntfy_obj = kmalloc(sizeof(struct ntfy_object), GFP_KERNEL); - if (!deh->ntfy_obj) { - status = -ENOMEM; - goto err; - } - ntfy_init(deh->ntfy_obj); - - /* Create a MMUfault DPC */ - tasklet_init(&deh->dpc_tasklet, mmu_fault_dpc, (u32) deh); - - /* Fill in context structure */ - deh->bridge_context = hbridge_context; - - /* Install ISR function for DSP MMU fault */ - status = request_irq(INT_DSP_MMU_IRQ, mmu_fault_isr, 0, - "DspBridge\tiommu fault", deh); - if (status < 0) - goto err; - - *ret_deh = deh; - return 0; - -err: - bridge_deh_destroy(deh); - *ret_deh = NULL; - return status; -} - -int bridge_deh_destroy(struct deh_mgr *deh) -{ - if (!deh) - return -EFAULT; - - /* If notification object exists, delete it */ - if (deh->ntfy_obj) { - ntfy_delete(deh->ntfy_obj); - kfree(deh->ntfy_obj); - } - /* Disable DSP MMU fault */ - free_irq(INT_DSP_MMU_IRQ, deh); - - /* Free DPC object */ - tasklet_kill(&deh->dpc_tasklet); - - /* Deallocate the DEH manager object */ - kfree(deh); - - return 0; -} - -int bridge_deh_register_notify(struct deh_mgr *deh, u32 event_mask, - u32 notify_type, - struct dsp_notification *hnotification) -{ - if (!deh) - return -EFAULT; - - if (event_mask) - return ntfy_register(deh->ntfy_obj, hnotification, - event_mask, notify_type); - else - return ntfy_unregister(deh->ntfy_obj, hnotification); -} - -#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE -static void mmu_fault_print_stack(struct bridge_dev_context *dev_context) -{ - struct cfg_hostres *resources; - struct hw_mmu_map_attrs_t map_attrs = { - .endianism = HW_LITTLE_ENDIAN, - .element_size = HW_ELEM_SIZE16BIT, - .mixed_size = HW_MMU_CPUES, - }; - void *dummy_va_addr; - - resources = dev_context->resources; - dummy_va_addr = (void *)__get_free_page(GFP_ATOMIC); - - /* - * Before acking the MMU fault, let's make sure MMU can only - * access entry #0. Then add a new entry so that the DSP OS - * can continue in order to dump the stack. - */ - hw_mmu_twl_disable(resources->dmmu_base); - hw_mmu_tlb_flush_all(resources->dmmu_base); - - hw_mmu_tlb_add(resources->dmmu_base, - virt_to_phys(dummy_va_addr), fault_addr, - HW_PAGE_SIZE4KB, 1, - &map_attrs, HW_SET, HW_SET); - - dsp_clk_enable(DSP_CLK_GPT8); - - dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe); - - /* Clear MMU interrupt */ - hw_mmu_event_ack(resources->dmmu_base, - HW_MMU_TRANSLATION_FAULT); - dump_dsp_stack(dev_context); - dsp_clk_disable(DSP_CLK_GPT8); - - hw_mmu_disable(resources->dmmu_base); - free_page((unsigned long)dummy_va_addr); -} -#endif - -static inline const char *event_to_string(int event) -{ - switch (event) { - case DSP_SYSERROR: return "DSP_SYSERROR"; break; - case DSP_MMUFAULT: return "DSP_MMUFAULT"; break; - case DSP_PWRERROR: return "DSP_PWRERROR"; break; - case DSP_WDTOVERFLOW: return "DSP_WDTOVERFLOW"; break; - default: return "unknown event"; break; - } -} - -void bridge_deh_notify(struct deh_mgr *deh, int event, int info) -{ - struct bridge_dev_context *dev_context; - const char *str = event_to_string(event); - - if (!deh) - return; - - dev_dbg(bridge, "%s: device exception", __func__); - dev_context = deh->bridge_context; - - switch (event) { - case DSP_SYSERROR: - dev_err(bridge, "%s: %s, info=0x%x", __func__, - str, info); -#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE - dump_dl_modules(dev_context); - dump_dsp_stack(dev_context); -#endif - break; - case DSP_MMUFAULT: - dev_err(bridge, "%s: %s, addr=0x%x", __func__, - str, fault_addr); -#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE - print_dsp_trace_buffer(dev_context); - dump_dl_modules(dev_context); - mmu_fault_print_stack(dev_context); -#endif - break; - default: - dev_err(bridge, "%s: %s", __func__, str); - break; - } - - /* Filter subsequent notifications when an error occurs */ - if (dev_context->brd_state != BRD_ERROR) { - ntfy_notify(deh->ntfy_obj, event); -#ifdef CONFIG_TIDSPBRIDGE_RECOVERY - bridge_recover_schedule(); -#endif - } - - /* Set the Board state as ERROR */ - dev_context->brd_state = BRD_ERROR; - /* Disable all the clocks that were enabled by DSP */ - dsp_clock_disable_all(dev_context->dsp_per_clks); - /* - * Avoid the subsequent WDT if it happens once, - * also if fatal error occurs. - */ - dsp_wdt_enable(false); -} diff --git a/drivers/staging/tidspbridge/core/wdt.c b/drivers/staging/tidspbridge/core/wdt.c deleted file mode 100644 index c7ee467f0f12..000000000000 --- a/drivers/staging/tidspbridge/core/wdt.c +++ /dev/null @@ -1,143 +0,0 @@ -/* - * wdt.c - * - * DSP-BIOS Bridge driver support functions for TI OMAP processors. - * - * IO dispatcher for a shared memory channel driver. - * - * Copyright (C) 2010 Texas Instruments, Inc. - * - * This package is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED - * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. - */ -#include <linux/types.h> - -#include <dspbridge/dbdefs.h> -#include <dspbridge/dspdeh.h> -#include <dspbridge/dev.h> -#include <dspbridge/_chnl_sm.h> -#include <dspbridge/wdt.h> -#include <dspbridge/host_os.h> - - -#define OMAP34XX_WDT3_BASE (0x49000000 + 0x30000) -#define INT_34XX_WDT3_IRQ (36 + NR_IRQS) - -static struct dsp_wdt_setting dsp_wdt; - -void dsp_wdt_dpc(unsigned long data) -{ - struct deh_mgr *deh_mgr; - dev_get_deh_mgr(dev_get_first(), &deh_mgr); - if (deh_mgr) - bridge_deh_notify(deh_mgr, DSP_WDTOVERFLOW, 0); -} - -irqreturn_t dsp_wdt_isr(int irq, void *data) -{ - u32 value; - /* ack wdt3 interrupt */ - value = __raw_readl(dsp_wdt.reg_base + OMAP3_WDT3_ISR_OFFSET); - __raw_writel(value, dsp_wdt.reg_base + OMAP3_WDT3_ISR_OFFSET); - - tasklet_schedule(&dsp_wdt.wdt3_tasklet); - return IRQ_HANDLED; -} - -int dsp_wdt_init(void) -{ - int ret = 0; - - dsp_wdt.sm_wdt = NULL; - dsp_wdt.reg_base = ioremap(OMAP34XX_WDT3_BASE, SZ_4K); - if (!dsp_wdt.reg_base) - return -ENOMEM; - - tasklet_init(&dsp_wdt.wdt3_tasklet, dsp_wdt_dpc, 0); - - dsp_wdt.fclk = clk_get(NULL, "wdt3_fck"); - - if (!IS_ERR(dsp_wdt.fclk)) { - clk_prepare(dsp_wdt.fclk); - - dsp_wdt.iclk = clk_get(NULL, "wdt3_ick"); - if (IS_ERR(dsp_wdt.iclk)) { - clk_put(dsp_wdt.fclk); - dsp_wdt.fclk = NULL; - ret = -EFAULT; - } else { - clk_prepare(dsp_wdt.iclk); - } - } else - ret = -EFAULT; - - if (!ret) - ret = request_irq(INT_34XX_WDT3_IRQ, dsp_wdt_isr, 0, - "dsp_wdt", &dsp_wdt); - - /* Disable at this moment, it will be enabled when DSP starts */ - if (!ret) - disable_irq(INT_34XX_WDT3_IRQ); - - return ret; -} - -void dsp_wdt_sm_set(void *data) -{ - dsp_wdt.sm_wdt = data; - dsp_wdt.sm_wdt->wdt_overflow = 5; /* in seconds */ -} - - -void dsp_wdt_exit(void) -{ - free_irq(INT_34XX_WDT3_IRQ, &dsp_wdt); - tasklet_kill(&dsp_wdt.wdt3_tasklet); - - if (dsp_wdt.fclk) { - clk_unprepare(dsp_wdt.fclk); - clk_put(dsp_wdt.fclk); - } - if (dsp_wdt.iclk) { - clk_unprepare(dsp_wdt.iclk); - clk_put(dsp_wdt.iclk); - } - - dsp_wdt.fclk = NULL; - dsp_wdt.iclk = NULL; - dsp_wdt.sm_wdt = NULL; - - if (dsp_wdt.reg_base) - iounmap(dsp_wdt.reg_base); - dsp_wdt.reg_base = NULL; -} - -void dsp_wdt_enable(bool enable) -{ - u32 tmp; - static bool wdt_enable; - - if (wdt_enable == enable || !dsp_wdt.fclk || !dsp_wdt.iclk) - return; - - wdt_enable = enable; - - if (enable) { - clk_enable(dsp_wdt.fclk); - clk_enable(dsp_wdt.iclk); - dsp_wdt.sm_wdt->wdt_setclocks = 1; - tmp = __raw_readl(dsp_wdt.reg_base + OMAP3_WDT3_ISR_OFFSET); - __raw_writel(tmp, dsp_wdt.reg_base + OMAP3_WDT3_ISR_OFFSET); - enable_irq(INT_34XX_WDT3_IRQ); - } else { - disable_irq(INT_34XX_WDT3_IRQ); - dsp_wdt.sm_wdt->wdt_setclocks = 0; - clk_disable(dsp_wdt.iclk); - clk_disable(dsp_wdt.fclk); - } -} |