aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/book3s/64/hash.h
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2016-04-29 23:25:30 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2016-05-01 18:32:21 +1000
commitc7d54842deb1fa357cff75b988275a1c9f259140 (patch)
treecab739cf4650a0397c0057a2878b386d613370a0 /arch/powerpc/include/asm/book3s/64/hash.h
parentpowerpc/mm: Use pte_raw() in pte_same()/pmd_same() (diff)
downloadlinux-dev-c7d54842deb1fa357cff75b988275a1c9f259140.tar.xz
linux-dev-c7d54842deb1fa357cff75b988275a1c9f259140.zip
powerpc/mm: Use _PAGE_READ to indicate Read access
This splits the _PAGE_RW bit into _PAGE_READ and _PAGE_WRITE. It also removes the dependency on _PAGE_USER for implying read only. Few things to note here is that, we have read implied with write and execute permission. Hence we should always find _PAGE_READ set on hash pte fault. We still can't switch PROT_NONE to !(_PAGE_RWX). Auto numa depends on marking a prot none pte _PAGE_WRITE. (For more details look at b191f9b106ea "mm: numa: preserve PTE write permissions across a NUMA hinting fault") Cc: Arnd Bergmann <arnd@arndb.de> Cc: Jeremy Kerr <jk@ozlabs.org> Cc: Frederic Barrat <fbarrat@linux.vnet.ibm.com> Acked-by: Ian Munsie <imunsie@au1.ibm.com> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/include/asm/book3s/64/hash.h')
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash.h35
1 files changed, 21 insertions, 14 deletions
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index 22064bd329f2..fb76f9cf49c9 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -16,8 +16,10 @@
#define _PAGE_BIT_SWAP_TYPE 0
#define _PAGE_EXEC 0x00001 /* execute permission */
-#define _PAGE_RW 0x00002 /* read & write access allowed */
+#define _PAGE_WRITE 0x00002 /* write access allowed */
#define _PAGE_READ 0x00004 /* read access allowed */
+#define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
+#define _PAGE_RWX (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
#define _PAGE_USER 0x00008 /* page may be accessed by userspace */
#define _PAGE_GUARDED 0x00010 /* G: guarded (side-effect) page */
/* M (memory coherence) is always set in the HPTE, so we don't need it here */
@@ -145,8 +147,8 @@
*/
#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
_PAGE_WRITETHRU | _PAGE_4K_PFN | \
- _PAGE_USER | _PAGE_ACCESSED | \
- _PAGE_RW | _PAGE_DIRTY | _PAGE_EXEC | \
+ _PAGE_USER | _PAGE_ACCESSED | _PAGE_READ |\
+ _PAGE_WRITE | _PAGE_DIRTY | _PAGE_EXEC | \
_PAGE_SOFT_DIRTY)
/*
* We define 2 sets of base prot bits, one for basic pages (ie,
@@ -171,10 +173,12 @@
#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | \
_PAGE_EXEC)
-#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER )
-#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
-#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER )
-#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_READ)
+#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_READ| \
+ _PAGE_EXEC)
+#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_READ)
+#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_READ| \
+ _PAGE_EXEC)
#define __P000 PAGE_NONE
#define __P001 PAGE_READONLY
@@ -296,19 +300,19 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
- if ((pte_val(*ptep) & _PAGE_RW) == 0)
+ if ((pte_val(*ptep) & _PAGE_WRITE) == 0)
return;
- pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
+ pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
}
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- if ((pte_val(*ptep) & _PAGE_RW) == 0)
+ if ((pte_val(*ptep) & _PAGE_WRITE) == 0)
return;
- pte_update(mm, addr, ptep, _PAGE_RW, 0, 1);
+ pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1);
}
/*
@@ -349,7 +353,7 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
{
__be64 old, tmp, val, mask;
- mask = cpu_to_be64(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW |
+ mask = cpu_to_be64(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_READ | _PAGE_WRITE |
_PAGE_EXEC | _PAGE_SOFT_DIRTY);
val = pte_raw(entry) & mask;
@@ -384,7 +388,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
/* Generic accessors to PTE bits */
-static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);}
+static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_WRITE);}
static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); }
static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); }
static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); }
@@ -445,7 +449,7 @@ static inline unsigned long pte_pfn(pte_t pte)
/* Generic modifiers for PTE bits */
static inline pte_t pte_wrprotect(pte_t pte)
{
- return __pte(pte_val(pte) & ~_PAGE_RW);
+ return __pte(pte_val(pte) & ~_PAGE_WRITE);
}
static inline pte_t pte_mkclean(pte_t pte)
@@ -460,6 +464,9 @@ static inline pte_t pte_mkold(pte_t pte)
static inline pte_t pte_mkwrite(pte_t pte)
{
+ /*
+ * write implies read, hence set both
+ */
return __pte(pte_val(pte) | _PAGE_RW);
}