aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mman.h
diff options
context:
space:
mode:
authorDave Kleikamp <shaggy@linux.vnet.ibm.com>2008-07-08 00:28:51 +1000
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2008-07-09 16:30:45 +1000
commitb845f313d78e4e259ec449909e3bbadf77b53a6d (patch)
tree03239e77dbc43f627ce112963736c8b4c53117e6 /include/linux/mman.h
parentpowerpc: Implement task_pt_regs() accessor (diff)
downloadlinux-dev-b845f313d78e4e259ec449909e3bbadf77b53a6d.tar.xz
linux-dev-b845f313d78e4e259ec449909e3bbadf77b53a6d.zip
mm: Allow architectures to define additional protection bits
This patch allows architectures to define functions to deal with additional protections bits for mmap() and mprotect(). arch_calc_vm_prot_bits() maps additonal protection bits to vm_flags arch_vm_get_page_prot() maps additional vm_flags to the vma's vm_page_prot arch_validate_prot() checks for valid values of the protection bits Note: vm_get_page_prot() is now pretty ugly, but the generated code should be identical for architectures that don't define additional protection bits. Signed-off-by: Dave Kleikamp <shaggy@linux.vnet.ibm.com> Acked-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'include/linux/mman.h')
-rw-r--r--include/linux/mman.h29
1 files changed, 28 insertions, 1 deletions
diff --git a/include/linux/mman.h b/include/linux/mman.h
index dab8892e6ff1..30d1073bac3b 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -34,6 +34,32 @@ static inline void vm_unacct_memory(long pages)
}
/*
+ * Allow architectures to handle additional protection bits
+ */
+
+#ifndef arch_calc_vm_prot_bits
+#define arch_calc_vm_prot_bits(prot) 0
+#endif
+
+#ifndef arch_vm_get_page_prot
+#define arch_vm_get_page_prot(vm_flags) __pgprot(0)
+#endif
+
+#ifndef arch_validate_prot
+/*
+ * This is called from mprotect(). PROT_GROWSDOWN and PROT_GROWSUP have
+ * already been masked out.
+ *
+ * Returns true if the prot flags are valid
+ */
+static inline int arch_validate_prot(unsigned long prot)
+{
+ return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0;
+}
+#define arch_validate_prot arch_validate_prot
+#endif
+
+/*
* Optimisation macro. It is equivalent to:
* (x & bit1) ? bit2 : 0
* but this version is faster.
@@ -51,7 +77,8 @@ calc_vm_prot_bits(unsigned long prot)
{
return _calc_vm_trans(prot, PROT_READ, VM_READ ) |
_calc_vm_trans(prot, PROT_WRITE, VM_WRITE) |
- _calc_vm_trans(prot, PROT_EXEC, VM_EXEC );
+ _calc_vm_trans(prot, PROT_EXEC, VM_EXEC) |
+ arch_calc_vm_prot_bits(prot);
}
/*