aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin/mach-common/smp.c
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--arch/blackfin/mach-common/smp.c38
1 files changed, 30 insertions, 8 deletions
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 9f251406a76a..1fbd94c44457 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -40,6 +40,10 @@
*/
struct corelock_slot corelock __attribute__ ((__section__(".l2.bss")));
+#ifdef CONFIG_ICACHE_FLUSH_L1
+unsigned long blackfin_iflush_l1_entry[NR_CPUS];
+#endif
+
void __cpuinitdata *init_retx_coreb, *init_saved_retx_coreb,
*init_saved_seqstat_coreb, *init_saved_icplb_fault_addr_coreb,
*init_saved_dcplb_fault_addr_coreb;
@@ -108,6 +112,19 @@ static void ipi_flush_icache(void *info)
blackfin_dcache_invalidate_range((unsigned long)fdata,
(unsigned long)fdata + sizeof(*fdata));
+ /* Make sure all write buffers in the data side of the core
+ * are flushed before trying to invalidate the icache. This
+ * needs to be after the data flush and before the icache
+ * flush so that the SSYNC does the right thing in preventing
+ * the instruction prefetcher from hitting things in cached
+ * memory at the wrong time -- it runs much further ahead than
+ * the pipeline.
+ */
+ SSYNC();
+
+ /* ipi_flaush_icache is invoked by generic flush_icache_range,
+ * so call blackfin arch icache flush directly here.
+ */
blackfin_icache_flush_range(fdata->start, fdata->end);
}
@@ -160,6 +177,9 @@ static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
while (msg_queue->count) {
msg = &msg_queue->ipi_message[msg_queue->head];
switch (msg->type) {
+ case BFIN_IPI_RESCHEDULE:
+ scheduler_ipi();
+ break;
case BFIN_IPI_CALL_FUNC:
spin_unlock_irqrestore(&msg_queue->lock, flags);
ipi_call_function(cpu, msg);
@@ -244,12 +264,13 @@ int smp_call_function(void (*func)(void *info), void *info, int wait)
{
cpumask_t callmap;
+ preempt_disable();
callmap = cpu_online_map;
cpu_clear(smp_processor_id(), callmap);
- if (cpus_empty(callmap))
- return 0;
+ if (!cpus_empty(callmap))
+ smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
- smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
+ preempt_enable();
return 0;
}
@@ -286,12 +307,13 @@ void smp_send_stop(void)
{
cpumask_t callmap;
+ preempt_disable();
callmap = cpu_online_map;
cpu_clear(smp_processor_id(), callmap);
- if (cpus_empty(callmap))
- return;
+ if (!cpus_empty(callmap))
+ smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0);
- smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0);
+ preempt_enable();
return;
}
@@ -361,8 +383,6 @@ void __cpuinit secondary_start_kernel(void)
*/
init_exception_vectors();
- bfin_setup_caches(cpu);
-
local_irq_disable();
/* Attach the new idle task to the global mm. */
@@ -381,6 +401,8 @@ void __cpuinit secondary_start_kernel(void)
local_irq_enable();
+ bfin_setup_caches(cpu);
+
/*
* Calibrate loops per jiffy value.
* IRQs need to be enabled here - D-cache can be invalidated