aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin/include/asm/mmu_context.h
diff options
context:
space:
mode:
authorBarry Song <barry.song@analog.com>2009-12-07 10:05:58 +0000
committerMike Frysinger <vapier@gentoo.org>2010-03-09 00:30:46 -0500
commite18e7dd33454f277b9438af66d25984362278021 (patch)
tree3c809ab2ebf8ad6276570f7eab238bd45e7216e8 /arch/blackfin/include/asm/mmu_context.h
parentBlackfin: flush caches on SMP when one core calls another via IPI (diff)
downloadlinux-dev-e18e7dd33454f277b9438af66d25984362278021.tar.xz
linux-dev-e18e7dd33454f277b9438af66d25984362278021.zip
Blackfin: fix MPU page permission masks overflow when dealing with async memory
Attempting to use the MPU while doing XIP out of parallel flash hooked up to the async memory bus would often result in random crashes as the MPU slowly corrupted memory. The fallout here is that the async banks gain MPU protection from user space too. So any accesses have to go through the mmap() interface rather than just using hardcoded pointers. Signed-off-by: Barry Song <barry.song@analog.com> Signed-off-by: Mike Frysinger <vapier@gentoo.org>
Diffstat (limited to 'arch/blackfin/include/asm/mmu_context.h')
-rw-r--r--arch/blackfin/include/asm/mmu_context.h14
1 files changed, 11 insertions, 3 deletions
diff --git a/arch/blackfin/include/asm/mmu_context.h b/arch/blackfin/include/asm/mmu_context.h
index ae8ef4ffd806..7f363d7e43a5 100644
--- a/arch/blackfin/include/asm/mmu_context.h
+++ b/arch/blackfin/include/asm/mmu_context.h
@@ -13,6 +13,7 @@
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/cplbinit.h>
+#include <asm/sections.h>
/* Note: L1 stacks are CPU-private things, so we bluntly disable this
feature in SMP mode, and use the per-CPU scratch SRAM bank only to
@@ -117,9 +118,16 @@ static inline void protect_page(struct mm_struct *mm, unsigned long addr,
unsigned long flags)
{
unsigned long *mask = mm->context.page_rwx_mask;
- unsigned long page = addr >> 12;
- unsigned long idx = page >> 5;
- unsigned long bit = 1 << (page & 31);
+ unsigned long page;
+ unsigned long idx;
+ unsigned long bit;
+
+ if (unlikely(addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE))
+ page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> 12;
+ else
+ page = addr >> 12;
+ idx = page >> 5;
+ bit = 1 << (page & 31);
if (flags & VM_READ)
mask[idx] |= bit;