From eb9b2b69d3bdfe9cd98cd9b2c5715346a0f0140d Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 26 Nov 2010 17:39:28 +0000 Subject: ARM: pgtable: move pgprot functions to one place Rather than scattering them throughout the file, group them together. Signed-off-by: Russell King --- arch/arm/include/asm/pgtable.h | 43 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 22 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 53d1d5deb111..3a53d495874b 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -219,6 +219,27 @@ extern pgprot_t pgprot_kernel; #define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER) #define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC) +#define __pgprot_modify(prot,mask,bits) \ + __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) + +#define pgprot_noncached(prot) \ + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED) + +#define pgprot_writecombine(prot) \ + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE) + +#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE +#define pgprot_dmacoherent(prot) \ + __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE) +#define __HAVE_PHYS_MEM_ACCESS_PROT +struct file; +extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t vma_prot); +#else +#define pgprot_dmacoherent(prot) \ + __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED) +#endif + #endif /* __ASSEMBLY__ */ /* @@ -322,28 +343,6 @@ PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG); static inline pte_t pte_mkspecial(pte_t pte) { return pte; } -#define __pgprot_modify(prot,mask,bits) \ - __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) - -/* - * Mark the prot value as uncacheable and unbufferable. - */ -#define pgprot_noncached(prot) \ - __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED) -#define pgprot_writecombine(prot) \ - __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE) -#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE -#define pgprot_dmacoherent(prot) \ - __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE) -#define __HAVE_PHYS_MEM_ACCESS_PROT -struct file; -extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, - unsigned long size, pgprot_t vma_prot); -#else -#define pgprot_dmacoherent(prot) \ - __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED) -#endif - #define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_present(pmd) (pmd_val(pmd)) #define pmd_bad(pmd) (pmd_val(pmd) & 2) -- cgit v1.2.3-59-g8ed1b From 4eec4b1396ac6a6a602b4521d40e9cf596ab776d Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 26 Nov 2010 20:12:12 +0000 Subject: ARM: pgtable: group pgd functions and data together Signed-off-by: Russell King --- arch/arm/include/asm/pgtable.h | 44 ++++++++++++++++++++++-------------------- 1 file changed, 23 insertions(+), 21 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 3a53d495874b..8ee9dce10b83 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -276,6 +276,29 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, extern struct page *empty_zero_page; #define ZERO_PAGE(vaddr) (empty_zero_page) + +extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; + +/* to find an entry in a page-table-directory */ +#define pgd_index(addr) ((addr) >> PGDIR_SHIFT) + +#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) + +/* to find an entry in a kernel page-table-directory */ +#define pgd_offset_k(addr) pgd_offset(&init_mm, addr) + +/* + * The "pgd_xxx()" functions here are trivial for a folded two-level + * setup: the pgd is never bad, and a pmd always exists (as it's folded + * into the pgd entry) + */ +#define pgd_none(pgd) (0) +#define pgd_bad(pgd) (0) +#define pgd_present(pgd) (1) +#define pgd_clear(pgdp) do { } while (0) +#define set_pgd(pgd,pgdp) do { } while (0) + + #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) #define pfn_pte(pfn,prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))) @@ -382,25 +405,6 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) */ #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot) -/* - * The "pgd_xxx()" functions here are trivial for a folded two-level - * setup: the pgd is never bad, and a pmd always exists (as it's folded - * into the pgd entry) - */ -#define pgd_none(pgd) (0) -#define pgd_bad(pgd) (0) -#define pgd_present(pgd) (1) -#define pgd_clear(pgdp) do { } while (0) -#define set_pgd(pgd,pgdp) do { } while (0) - -/* to find an entry in a page-table-directory */ -#define pgd_index(addr) ((addr) >> PGDIR_SHIFT) - -#define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr)) - -/* to find an entry in a kernel page-table-directory */ -#define pgd_offset_k(addr) pgd_offset(&init_mm, addr) - /* Find an entry in the second-level page table.. */ #define pmd_offset(dir, addr) ((pmd_t *)(dir)) @@ -414,8 +418,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) return pte; } -extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; - /* * Encode and decode a swap entry. Swap entries are stored in the Linux * page tables as follows: -- cgit v1.2.3-59-g8ed1b From b510b049b549500816280f7ceaa087cfefdec581 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 26 Nov 2010 20:35:25 +0000 Subject: ARM: pgtable: group pte functions together Signed-off-by: Russell King --- arch/arm/include/asm/pgtable.h | 110 +++++++++++++++++++---------------------- 1 file changed, 51 insertions(+), 59 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 8ee9dce10b83..0ebe1165ba0a 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -299,26 +299,66 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; #define set_pgd(pgd,pgdp) do { } while (0) -#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) -#define pfn_pte(pfn,prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))) +/* Find an entry in the second-level page table.. */ +#define pmd_offset(dir, addr) ((pmd_t *)(dir)) -#define pte_none(pte) (!pte_val(pte)) -#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) -#define pte_page(pte) (pfn_to_page(pte_pfn(pte))) -#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) +#define pmd_none(pmd) (!pmd_val(pmd)) +#define pmd_present(pmd) (pmd_val(pmd)) +#define pmd_bad(pmd) (pmd_val(pmd) & 2) + +#define copy_pmd(pmdpd,pmdps) \ + do { \ + pmdpd[0] = pmdps[0]; \ + pmdpd[1] = pmdps[1]; \ + flush_pmd_entry(pmdpd); \ + } while (0) + +#define pmd_clear(pmdp) \ + do { \ + pmdp[0] = __pmd(0); \ + pmdp[1] = __pmd(0); \ + clean_pmd_entry(pmdp); \ + } while (0) + +static inline pte_t *pmd_page_vaddr(pmd_t pmd) +{ + unsigned long ptr; + + ptr = pmd_val(pmd) & ~(PTRS_PER_PTE * sizeof(void *) - 1); + ptr += PTRS_PER_PTE * sizeof(void *); + + return __va(ptr); +} + +#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd))) + +/* we don't need complex calculations here as the pmd is folded into the pgd */ +#define pmd_addr_end(addr,end) (end) -#define pte_offset_map(dir,addr) (__pte_map(dir) + __pte_index(addr)) -#define pte_unmap(pte) __pte_unmap(pte) #ifndef CONFIG_HIGHPTE -#define __pte_map(dir) pmd_page_vaddr(*(dir)) +#define __pte_map(pmd) pmd_page_vaddr(*(pmd)) #define __pte_unmap(pte) do { } while (0) #else -#define __pte_map(dir) ((pte_t *)kmap_atomic(pmd_page(*(dir))) + PTRS_PER_PTE) +#define __pte_map(pmd) ((pte_t *)kmap_atomic(pmd_page(*(pmd))) + PTRS_PER_PTE) #define __pte_unmap(pte) kunmap_atomic((pte - PTRS_PER_PTE)) #endif +#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) + +#define pte_offset_kernel(pmd,addr) (pmd_page_vaddr(*(pmd)) + pte_index(addr)) + +#define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr)) +#define pte_unmap(pte) __pte_unmap(pte) + +#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) +#define pfn_pte(pfn,prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) + +#define pte_page(pte) pfn_to_page(pte_pfn(pte)) +#define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot) + #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) +#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) #if __LINUX_ARM_ARCH__ < 6 static inline void __sync_icache_dcache(pte_t pteval) @@ -339,10 +379,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, } } -/* - * The following only work if pte_present() is true. - * Undefined behaviour if not.. - */ +#define pte_none(pte) (!pte_val(pte)) #define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) #define pte_write(pte) (pte_val(pte) & L_PTE_WRITE) #define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) @@ -366,51 +403,6 @@ PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG); static inline pte_t pte_mkspecial(pte_t pte) { return pte; } -#define pmd_none(pmd) (!pmd_val(pmd)) -#define pmd_present(pmd) (pmd_val(pmd)) -#define pmd_bad(pmd) (pmd_val(pmd) & 2) - -#define copy_pmd(pmdpd,pmdps) \ - do { \ - pmdpd[0] = pmdps[0]; \ - pmdpd[1] = pmdps[1]; \ - flush_pmd_entry(pmdpd); \ - } while (0) - -#define pmd_clear(pmdp) \ - do { \ - pmdp[0] = __pmd(0); \ - pmdp[1] = __pmd(0); \ - clean_pmd_entry(pmdp); \ - } while (0) - -static inline pte_t *pmd_page_vaddr(pmd_t pmd) -{ - unsigned long ptr; - - ptr = pmd_val(pmd) & ~(PTRS_PER_PTE * sizeof(void *) - 1); - ptr += PTRS_PER_PTE * sizeof(void *); - - return __va(ptr); -} - -#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd))) - -/* we don't need complex calculations here as the pmd is folded into the pgd */ -#define pmd_addr_end(addr,end) (end) - -/* - * Conversion functions: convert a page and protection to a page entry, - * and a page entry and page directory to the page they refer to. - */ -#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot) - -/* Find an entry in the second-level page table.. */ -#define pmd_offset(dir, addr) ((pmd_t *)(dir)) - -/* Find an entry in the third-level page table.. */ -#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) - static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { const unsigned long mask = L_PTE_EXEC | L_PTE_WRITE | L_PTE_USER; -- cgit v1.2.3-59-g8ed1b From 69529c0eb76469168f1dd5851f363dbab17ce8fd Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 16 Nov 2010 00:19:55 +0000 Subject: ARM: pgtable: directly pass pgd/pmd/pte to their error functions Rather than passing the pte value to __pte_error, pass the raw pte_t cookie instead. Do the same for pmd and pgd functions. Signed-off-by: Russell King --- arch/arm/include/asm/pgtable.h | 12 ++++++------ arch/arm/kernel/traps.c | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 0ebe1165ba0a..d00ef7ecee64 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -112,13 +112,13 @@ #define LIBRARY_TEXT_START 0x0c000000 #ifndef __ASSEMBLY__ -extern void __pte_error(const char *file, int line, unsigned long val); -extern void __pmd_error(const char *file, int line, unsigned long val); -extern void __pgd_error(const char *file, int line, unsigned long val); +extern void __pte_error(const char *file, int line, pte_t); +extern void __pmd_error(const char *file, int line, pmd_t); +extern void __pgd_error(const char *file, int line, pgd_t); -#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) -#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd)) -#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) +#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte) +#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd) +#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd) #endif /* !__ASSEMBLY__ */ #define PMD_SIZE (1UL << PMD_SHIFT) diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 446aee97436f..354cd4ce400a 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -708,19 +708,19 @@ void __readwrite_bug(const char *fn) } EXPORT_SYMBOL(__readwrite_bug); -void __pte_error(const char *file, int line, unsigned long val) +void __pte_error(const char *file, int line, pte_t pte) { - printk("%s:%d: bad pte %08lx.\n", file, line, val); + printk("%s:%d: bad pte %08lx.\n", file, line, pte_val(pte)); } -void __pmd_error(const char *file, int line, unsigned long val) +void __pmd_error(const char *file, int line, pmd_t pmd) { - printk("%s:%d: bad pmd %08lx.\n", file, line, val); + printk("%s:%d: bad pmd %08lx.\n", file, line, pmd_val(pmd)); } -void __pgd_error(const char *file, int line, unsigned long val) +void __pgd_error(const char *file, int line, pgd_t pgd) { - printk("%s:%d: bad pgd %08lx.\n", file, line, val); + printk("%s:%d: bad pgd %08lx.\n", file, line, pgd_val(pgd)); } asmlinkage void __div0(void) -- cgit v1.2.3-59-g8ed1b From b0d03745b18c39b8e86e70f7778f2093d2cd4ed7 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 21 Nov 2010 11:00:56 +0000 Subject: ARM: pgtable: get rid of get_pgd_slow()/free_pgd_slow() These old names are just aliases for pgd_alloc/pgd_free. Just use the new names. Signed-off-by: Russell King --- arch/arm/include/asm/pgalloc.h | 7 ++----- arch/arm/mm/pgd.c | 4 ++-- 2 files changed, 4 insertions(+), 7 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h index b12cc98bbe04..e9c39352cee1 100644 --- a/arch/arm/include/asm/pgalloc.h +++ b/arch/arm/include/asm/pgalloc.h @@ -30,11 +30,8 @@ #define pmd_free(mm, pmd) do { } while (0) #define pgd_populate(mm,pmd,pte) BUG() -extern pgd_t *get_pgd_slow(struct mm_struct *mm); -extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd); - -#define pgd_alloc(mm) get_pgd_slow(mm) -#define pgd_free(mm, pgd) free_pgd_slow(mm, pgd) +extern pgd_t *pgd_alloc(struct mm_struct *mm); +extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); #define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index 69bbfc6645a6..e3eda56f4788 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c @@ -22,7 +22,7 @@ /* * need to get a 16k page for level 1 */ -pgd_t *get_pgd_slow(struct mm_struct *mm) +pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *new_pgd, *init_pgd; pmd_t *new_pmd, *init_pmd; @@ -73,7 +73,7 @@ no_pgd: return NULL; } -void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd) +void pgd_free(struct mm_struct *mm, pgd_t *pgd) { pmd_t *pmd; pgtable_t pte; -- cgit v1.2.3-59-g8ed1b From 6e4beb5e682953212da48ebb9e5c90408b8d38ae Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 21 Nov 2010 23:48:55 +0000 Subject: ARM: pgtable: use pXd_none_or_clear_bad() in pgd_free() Remove knowledge of the 2-level wrapping in pgd_free(), and use the pXd_none_or_clear_bad() macros when checking the entries. Signed-off-by: Russell King --- arch/arm/mm/pgd.c | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index e3eda56f4788..d15785eb73a7 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c @@ -73,28 +73,29 @@ no_pgd: return NULL; } -void pgd_free(struct mm_struct *mm, pgd_t *pgd) +void pgd_free(struct mm_struct *mm, pgd_t *pgd_base) { + pgd_t *pgd; pmd_t *pmd; pgtable_t pte; - if (!pgd) + if (!pgd_base) return; - /* pgd is always present and good */ - pmd = pmd_off(pgd, 0); - if (pmd_none(*pmd)) - goto free; - if (pmd_bad(*pmd)) { - pmd_ERROR(*pmd); - pmd_clear(pmd); - goto free; - } + pgd = pgd_base + pgd_index(0); + if (pgd_none_or_clear_bad(pgd)) + goto no_pgd; + + pmd = pmd_offset(pgd, 0); + if (pmd_none_or_clear_bad(pmd)) + goto no_pmd; pte = pmd_pgtable(*pmd); pmd_clear(pmd); pte_free(mm, pte); +no_pmd: + pgd_clear(pgd); pmd_free(mm, pmd); -free: - free_pages((unsigned long) pgd, 2); +no_pgd: + free_pages((unsigned long) pgd_base, 2); } -- cgit v1.2.3-59-g8ed1b From 97092e0c56830457af0639f6bd904537a150ea4a Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 16 Nov 2010 00:16:01 +0000 Subject: ARM: pgtable: use phys_addr_t for physical addresses Ensure that physical addresses are typed as phys_addr_t Signed-off-by: Russell King --- arch/arm/include/asm/pgalloc.h | 8 +++++--- arch/arm/include/asm/pgtable.h | 2 +- arch/arm/mm/mmu.c | 7 ++++--- 3 files changed, 10 insertions(+), 7 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h index e9c39352cee1..1f1064b519c0 100644 --- a/arch/arm/include/asm/pgalloc.h +++ b/arch/arm/include/asm/pgalloc.h @@ -103,8 +103,10 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte) __free_page(pte); } -static inline void __pmd_populate(pmd_t *pmdp, unsigned long pmdval) +static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte, + unsigned long prot) { + unsigned long pmdval = pte | prot; pmdp[0] = __pmd(pmdval); pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); flush_pmd_entry(pmdp); @@ -126,13 +128,13 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) * address of the PTE table */ pte_ptr -= PTRS_PER_PTE * sizeof(void *); - __pmd_populate(pmdp, __pa(pte_ptr) | _PAGE_KERNEL_TABLE); + __pmd_populate(pmdp, __pa(pte_ptr), _PAGE_KERNEL_TABLE); } static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep) { - __pmd_populate(pmdp, page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE); + __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE); } #define pmd_pgtable(pmd) pmd_page(pmd) diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index d00ef7ecee64..30b3a07dd998 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -322,7 +322,7 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; static inline pte_t *pmd_page_vaddr(pmd_t pmd) { - unsigned long ptr; + phys_addr_t ptr; ptr = pmd_val(pmd) & ~(PTRS_PER_PTE * sizeof(void *) - 1); ptr += PTRS_PER_PTE * sizeof(void *); diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 72ad3e1f56cf..9568f8632ae3 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -535,7 +535,7 @@ static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned l { if (pmd_none(*pmd)) { pte_t *pte = early_alloc(2 * PTRS_PER_PTE * sizeof(pte_t)); - __pmd_populate(pmd, __pa(pte) | prot); + __pmd_populate(pmd, __pa(pte), prot); } BUG_ON(pmd_bad(*pmd)); return pte_offset_kernel(pmd, addr); @@ -553,7 +553,7 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, } static void __init alloc_init_section(pgd_t *pgd, unsigned long addr, - unsigned long end, unsigned long phys, + unsigned long end, phys_addr_t phys, const struct mem_type *type) { pmd_t *pmd = pmd_offset(pgd, addr); @@ -588,7 +588,8 @@ static void __init alloc_init_section(pgd_t *pgd, unsigned long addr, static void __init create_36bit_mapping(struct map_desc *md, const struct mem_type *type) { - unsigned long phys, addr, length, end; + unsigned long addr, length, end; + phys_addr_t phys; pgd_t *pgd; addr = md->virtual; -- cgit v1.2.3-59-g8ed1b From f6e3354d02aa1f30672e3671098c12cb49c7da25 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 16 Nov 2010 00:22:09 +0000 Subject: ARM: pgtable: introduce pteval_t to represent a pte value This makes everywhere dealing with pte values use the same type. Signed-off-by: Russell King --- arch/arm/include/asm/page.h | 6 ++++-- arch/arm/include/asm/pgtable.h | 41 +++++++++++++++++++++-------------------- arch/arm/mm/fault-armv.c | 2 +- arch/arm/mm/mm.h | 2 +- arch/arm/mm/mmu.c | 2 +- 5 files changed, 28 insertions(+), 25 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index a485ac3c8696..f51a69595f6e 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -151,13 +151,15 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from, #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) extern void copy_page(void *to, const void *from); +typedef unsigned long pteval_t; + #undef STRICT_MM_TYPECHECKS #ifdef STRICT_MM_TYPECHECKS /* * These are used to make use of C type-checking.. */ -typedef struct { unsigned long pte; } pte_t; +typedef struct { pteval_t pte; } pte_t; typedef struct { unsigned long pmd; } pmd_t; typedef struct { unsigned long pgd[2]; } pgd_t; typedef struct { unsigned long pgprot; } pgprot_t; @@ -175,7 +177,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; /* * .. while these make it easier on the compiler */ -typedef unsigned long pte_t; +typedef pteval_t pte_t; typedef unsigned long pmd_t; typedef unsigned long pgd_t[2]; typedef unsigned long pgprot_t; diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 30b3a07dd998..50eb0b4278ec 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -10,6 +10,7 @@ #ifndef _ASMARM_PGTABLE_H #define _ASMARM_PGTABLE_H +#include #include #include @@ -161,30 +162,30 @@ extern void __pgd_error(const char *file, int line, pgd_t); * The PTE table pointer refers to the hardware entries; the "Linux" * entries are stored 1024 bytes below. */ -#define L_PTE_PRESENT (1 << 0) -#define L_PTE_YOUNG (1 << 1) -#define L_PTE_FILE (1 << 2) /* only when !PRESENT */ -#define L_PTE_DIRTY (1 << 6) -#define L_PTE_WRITE (1 << 7) -#define L_PTE_USER (1 << 8) -#define L_PTE_EXEC (1 << 9) -#define L_PTE_SHARED (1 << 10) /* shared(v6), coherent(xsc3) */ +#define L_PTE_PRESENT (_AT(pteval_t, 1) << 0) +#define L_PTE_YOUNG (_AT(pteval_t, 1) << 1) +#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */ +#define L_PTE_DIRTY (_AT(pteval_t, 1) << 6) +#define L_PTE_WRITE (_AT(pteval_t, 1) << 7) +#define L_PTE_USER (_AT(pteval_t, 1) << 8) +#define L_PTE_EXEC (_AT(pteval_t, 1) << 9) +#define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */ /* * These are the memory types, defined to be compatible with * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB */ -#define L_PTE_MT_UNCACHED (0x00 << 2) /* 0000 */ -#define L_PTE_MT_BUFFERABLE (0x01 << 2) /* 0001 */ -#define L_PTE_MT_WRITETHROUGH (0x02 << 2) /* 0010 */ -#define L_PTE_MT_WRITEBACK (0x03 << 2) /* 0011 */ -#define L_PTE_MT_MINICACHE (0x06 << 2) /* 0110 (sa1100, xscale) */ -#define L_PTE_MT_WRITEALLOC (0x07 << 2) /* 0111 */ -#define L_PTE_MT_DEV_SHARED (0x04 << 2) /* 0100 */ -#define L_PTE_MT_DEV_NONSHARED (0x0c << 2) /* 1100 */ -#define L_PTE_MT_DEV_WC (0x09 << 2) /* 1001 */ -#define L_PTE_MT_DEV_CACHED (0x0b << 2) /* 1011 */ -#define L_PTE_MT_MASK (0x0f << 2) +#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */ +#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */ +#define L_PTE_MT_WRITETHROUGH (_AT(pteval_t, 0x02) << 2) /* 0010 */ +#define L_PTE_MT_WRITEBACK (_AT(pteval_t, 0x03) << 2) /* 0011 */ +#define L_PTE_MT_MINICACHE (_AT(pteval_t, 0x06) << 2) /* 0110 (sa1100, xscale) */ +#define L_PTE_MT_WRITEALLOC (_AT(pteval_t, 0x07) << 2) /* 0111 */ +#define L_PTE_MT_DEV_SHARED (_AT(pteval_t, 0x04) << 2) /* 0100 */ +#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */ +#define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */ +#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */ +#define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2) #ifndef __ASSEMBLY__ @@ -405,7 +406,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { - const unsigned long mask = L_PTE_EXEC | L_PTE_WRITE | L_PTE_USER; + const pteval_t mask = L_PTE_EXEC | L_PTE_WRITE | L_PTE_USER; pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); return pte; } diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 83e59f870426..01210dba0221 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -26,7 +26,7 @@ #include "mm.h" -static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE; +static pteval_t shared_pte_mask = L_PTE_MT_BUFFERABLE; #if __LINUX_ARM_ARCH__ < 6 /* diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 6630620380a4..36960df5fb76 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -16,7 +16,7 @@ static inline pmd_t *pmd_off_k(unsigned long virt) } struct mem_type { - unsigned int prot_pte; + pteval_t prot_pte; unsigned int prot_l1; unsigned int prot_sect; unsigned int domain; diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 9568f8632ae3..94ee0930d690 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -62,7 +62,7 @@ struct cachepolicy { const char policy[16]; unsigned int cr_mask; unsigned int pmd; - unsigned int pte; + pteval_t pte; }; static struct cachepolicy cache_policies[] __initdata = { -- cgit v1.2.3-59-g8ed1b From d30e45eeabefadc6039d7f876a59e5f5f6cb11c6 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 16 Nov 2010 00:16:01 +0000 Subject: ARM: pgtable: switch order of Linux vs hardware page tables This switches the ordering of the Linux vs hardware page tables in each page, thereby eliminating some of the arithmetic in the page table walks. As we now place the Linux page table at the beginning of the page, we can deal with the offset in the pgt by simply masking it away, along with the other control bits. This also makes the arithmetic all be positive, rather than a mixture. Signed-off-by: Russell King --- arch/arm/include/asm/pgalloc.h | 39 +++++++++++++++++---------------------- arch/arm/include/asm/pgtable.h | 31 +++++++++++++++---------------- arch/arm/mm/fault.c | 2 +- arch/arm/mm/proc-macros.S | 10 +++++----- arch/arm/mm/proc-v7.S | 8 +++----- 5 files changed, 41 insertions(+), 49 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h index 1f1064b519c0..9763be04f77e 100644 --- a/arch/arm/include/asm/pgalloc.h +++ b/arch/arm/include/asm/pgalloc.h @@ -35,6 +35,11 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); #define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) +static inline void clean_pte_table(pte_t *pte) +{ + clean_dcache_area(pte + PTE_HWTABLE_PTRS, PTE_HWTABLE_SIZE); +} + /* * Allocate one PTE table. * @@ -42,14 +47,14 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); * into one table thus: * * +------------+ - * | h/w pt 0 | - * +------------+ - * | h/w pt 1 | - * +------------+ * | Linux pt 0 | * +------------+ * | Linux pt 1 | * +------------+ + * | h/w pt 0 | + * +------------+ + * | h/w pt 1 | + * +------------+ */ static inline pte_t * pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) @@ -57,10 +62,8 @@ pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) pte_t *pte; pte = (pte_t *)__get_free_page(PGALLOC_GFP); - if (pte) { - clean_dcache_area(pte, sizeof(pte_t) * PTRS_PER_PTE); - pte += PTRS_PER_PTE; - } + if (pte) + clean_pte_table(pte); return pte; } @@ -76,10 +79,8 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr) pte = alloc_pages(PGALLOC_GFP, 0); #endif if (pte) { - if (!PageHighMem(pte)) { - void *page = page_address(pte); - clean_dcache_area(page, sizeof(pte_t) * PTRS_PER_PTE); - } + if (!PageHighMem(pte)) + clean_pte_table(page_address(pte)); pgtable_page_ctor(pte); } @@ -91,10 +92,8 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr) */ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) { - if (pte) { - pte -= PTRS_PER_PTE; + if (pte) free_page((unsigned long)pte); - } } static inline void pte_free(struct mm_struct *mm, pgtable_t pte) @@ -106,7 +105,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte) static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte, unsigned long prot) { - unsigned long pmdval = pte | prot; + unsigned long pmdval = (pte + PTE_HWTABLE_OFF) | prot; pmdp[0] = __pmd(pmdval); pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); flush_pmd_entry(pmdp); @@ -121,14 +120,10 @@ static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte, static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) { - unsigned long pte_ptr = (unsigned long)ptep; - /* - * The pmd must be loaded with the physical - * address of the PTE table + * The pmd must be loaded with the physical address of the PTE table */ - pte_ptr -= PTRS_PER_PTE * sizeof(void *); - __pmd_populate(pmdp, __pa(pte_ptr), _PAGE_KERNEL_TABLE); + __pmd_populate(pmdp, __pa(ptep), _PAGE_KERNEL_TABLE); } static inline void diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 50eb0b4278ec..e582214b00df 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -55,7 +55,7 @@ * Therefore, we tweak the implementation slightly - we tell Linux that we * have 2048 entries in the first level, each of which is 8 bytes (iow, two * hardware pointers to the second level.) The second level contains two - * hardware PTE tables arranged contiguously, followed by Linux versions + * hardware PTE tables arranged contiguously, preceded by Linux versions * which contain the state information Linux needs. We, therefore, end up * with 512 entries in the "PTE" level. * @@ -63,15 +63,15 @@ * * pgd pte * | | - * +--------+ +0 - * | |-----> +------------+ +0 + * +--------+ + * | | +------------+ +0 + * +- - - - + | Linux pt 0 | + * | | +------------+ +1024 + * +--------+ +0 | Linux pt 1 | + * | |-----> +------------+ +2048 * +- - - - + +4 | h/w pt 0 | - * | |-----> +------------+ +1024 + * | |-----> +------------+ +3072 * +--------+ +8 | h/w pt 1 | - * | | +------------+ +2048 - * +- - - - + | Linux pt 0 | - * | | +------------+ +3072 - * +--------+ | Linux pt 1 | * | | +------------+ +4096 * * See L_PTE_xxx below for definitions of bits in the "Linux pt", and @@ -103,6 +103,10 @@ #define PTRS_PER_PMD 1 #define PTRS_PER_PGD 2048 +#define PTE_HWTABLE_PTRS (PTRS_PER_PTE) +#define PTE_HWTABLE_OFF (PTE_HWTABLE_PTRS * sizeof(pte_t)) +#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u32)) + /* * PMD_SHIFT determines the size of the area a second-level page table can map * PGDIR_SHIFT determines what a third-level page table entry can map @@ -323,12 +327,7 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; static inline pte_t *pmd_page_vaddr(pmd_t pmd) { - phys_addr_t ptr; - - ptr = pmd_val(pmd) & ~(PTRS_PER_PTE * sizeof(void *) - 1); - ptr += PTRS_PER_PTE * sizeof(void *); - - return __va(ptr); + return __va(pmd_val(pmd) & PAGE_MASK); } #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd))) @@ -341,8 +340,8 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) #define __pte_map(pmd) pmd_page_vaddr(*(pmd)) #define __pte_unmap(pte) do { } while (0) #else -#define __pte_map(pmd) ((pte_t *)kmap_atomic(pmd_page(*(pmd))) + PTRS_PER_PTE) -#define __pte_unmap(pte) kunmap_atomic((pte - PTRS_PER_PTE)) +#define __pte_map(pmd) (pte_t *)kmap_atomic(pmd_page(*(pmd))) +#define __pte_unmap(pte) kunmap_atomic(pte) #endif #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 1e21e125fe3a..f10f9bac2206 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -108,7 +108,7 @@ void show_pte(struct mm_struct *mm, unsigned long addr) pte = pte_offset_map(pmd, addr); printk(", *pte=%08lx", pte_val(*pte)); - printk(", *ppte=%08lx", pte_val(pte[-PTRS_PER_PTE])); + printk(", *ppte=%08lx", pte_val(pte[PTE_HWTABLE_PTRS])); pte_unmap(pte); } while(0); diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index 7d63beaf9745..cbedf9c46b9d 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S @@ -121,7 +121,7 @@ .endm .macro armv6_set_pte_ext pfx - str r1, [r0], #-2048 @ linux version + str r1, [r0], #2048 @ linux version bic r3, r1, #0x000003fc bic r3, r3, #PTE_TYPE_MASK @@ -170,7 +170,7 @@ * 1111 0xff r/w r/w */ .macro armv3_set_pte_ext wc_disable=1 - str r1, [r0], #-2048 @ linux version + str r1, [r0], #2048 @ linux version eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY @@ -193,7 +193,7 @@ bicne r2, r2, #PTE_BUFFERABLE #endif .endif - str r2, [r0] @ hardware version + str r2, [r0] @ hardware version .endm @@ -213,7 +213,7 @@ * 1111 11 r/w r/w */ .macro xscale_set_pte_ext_prologue - str r1, [r0], #-2048 @ linux version + str r1, [r0] @ linux version eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY @@ -232,7 +232,7 @@ tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young? movne r2, #0 @ no -> fault - str r2, [r0] @ hardware version + str r2, [r0, #2048]! @ hardware version mov ip, #0 mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line mcr p15, 0, ip, c7, c10, 4 @ data write barrier diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 53cbe2225153..89c31a6dae5c 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -124,15 +124,13 @@ ENDPROC(cpu_v7_switch_mm) * Set a level 2 translation table entry. * * - ptep - pointer to level 2 translation table entry - * (hardware version is stored at -1024 bytes) + * (hardware version is stored at +2048 bytes) * - pte - PTE value to store * - ext - value for extended PTE bits */ ENTRY(cpu_v7_set_pte_ext) #ifdef CONFIG_MMU - ARM( str r1, [r0], #-2048 ) @ linux version - THUMB( str r1, [r0] ) @ linux version - THUMB( sub r0, r0, #2048 ) + str r1, [r0] @ linux version bic r3, r1, #0x000003f0 bic r3, r3, #PTE_TYPE_MASK @@ -158,7 +156,7 @@ ENTRY(cpu_v7_set_pte_ext) tstne r1, #L_PTE_PRESENT moveq r3, #0 - str r3, [r0] + str r3, [r0, #2048]! mcr p15, 0, r0, c7, c10, 1 @ flush_pte #endif mov pc, lr -- cgit v1.2.3-59-g8ed1b From 26bbf0b57a0848932f725076bcb1245ca696e8d3 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 21 Nov 2010 11:30:36 +0000 Subject: ARM: pgtable: remove L2 cache flushes for SMP page table bring-up The MMU is always configured to read page tables from the L2 cache so there's little point flushing them out of the L2 cache back to RAM. Remove these flushes. Signed-off-by: Russell King --- arch/arm/kernel/smp.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 8c1959590252..46313805f430 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -85,7 +85,6 @@ static inline void identity_mapping_add(pgd_t *pgd, unsigned long start, pmd[1] = __pmd(addr | prot); addr += SECTION_SIZE; flush_pmd_entry(pmd); - outer_clean_range(__pa(pmd), __pa(pmd + 1)); } } @@ -100,7 +99,6 @@ static inline void identity_mapping_del(pgd_t *pgd, unsigned long start, pmd[0] = __pmd(0); pmd[1] = __pmd(0); clean_pmd_entry(pmd); - outer_clean_range(__pa(pmd), __pa(pmd + 1)); } } -- cgit v1.2.3-59-g8ed1b From 614dd0585f376a25c638abbed9c5fbd21d7baece Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 21 Nov 2010 11:41:57 +0000 Subject: ARM: pgtable: collect up identity mapping functions We have two places where we create identity mappings - one when we bring secondary CPUs online, and one where we setup some mappings for soft- reboot. Combine these two into a single implementation. Also collect the identity mapping deletion function. Signed-off-by: Russell King --- arch/arm/include/asm/pgtable.h | 3 +++ arch/arm/kernel/smp.c | 34 ---------------------------- arch/arm/mm/Makefile | 4 ++-- arch/arm/mm/idmap.c | 51 ++++++++++++++++++++++++++++++++++++++++++ arch/arm/mm/mmu.c | 35 ----------------------------- 5 files changed, 56 insertions(+), 71 deletions(-) create mode 100644 arch/arm/mm/idmap.c (limited to 'arch/arm') diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index e582214b00df..1e31af232a36 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -474,6 +474,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) #define pgtable_cache_init() do { } while (0) +void identity_mapping_add(pgd_t *, unsigned long, unsigned long); +void identity_mapping_del(pgd_t *, unsigned long, unsigned long); + #endif /* !__ASSEMBLY__ */ #endif /* CONFIG_MMU */ diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 46313805f430..73cef403352a 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -68,40 +68,6 @@ enum ipi_msg_type { IPI_CPU_STOP, }; -static inline void identity_mapping_add(pgd_t *pgd, unsigned long start, - unsigned long end) -{ - unsigned long addr, prot; - pmd_t *pmd; - - prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE; - if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) - prot |= PMD_BIT4; - - for (addr = start & PGDIR_MASK; addr < end;) { - pmd = pmd_offset(pgd + pgd_index(addr), addr); - pmd[0] = __pmd(addr | prot); - addr += SECTION_SIZE; - pmd[1] = __pmd(addr | prot); - addr += SECTION_SIZE; - flush_pmd_entry(pmd); - } -} - -static inline void identity_mapping_del(pgd_t *pgd, unsigned long start, - unsigned long end) -{ - unsigned long addr; - pmd_t *pmd; - - for (addr = start & PGDIR_MASK; addr < end; addr += PGDIR_SIZE) { - pmd = pmd_offset(pgd + pgd_index(addr), addr); - pmd[0] = __pmd(0); - pmd[1] = __pmd(0); - clean_pmd_entry(pmd); - } -} - int __cpuinit __cpu_up(unsigned int cpu) { struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index d63b6c413758..00d74a04af3a 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile @@ -5,8 +5,8 @@ obj-y := dma-mapping.o extable.o fault.o init.o \ iomap.o -obj-$(CONFIG_MMU) += fault-armv.o flush.o ioremap.o mmap.o \ - pgd.o mmu.o vmregion.o +obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \ + mmap.o pgd.o mmu.o vmregion.o ifneq ($(CONFIG_MMU),y) obj-y += nommu.o diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c new file mode 100644 index 000000000000..034124d1272d --- /dev/null +++ b/arch/arm/mm/idmap.c @@ -0,0 +1,51 @@ +#include + +#include +#include +#include + +void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end) +{ + unsigned long prot; + + prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE; + if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) + prot |= PMD_BIT4; + + for (addr &= PGDIR_MASK; addr < end;) { + pmd_t *pmd = pmd_offset(pgd + pgd_index(addr), addr); + pmd[0] = __pmd(addr | prot); + addr += SECTION_SIZE; + pmd[1] = __pmd(addr | prot); + addr += SECTION_SIZE; + flush_pmd_entry(pmd); + } +} + +#ifdef CONFIG_SMP +void identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end) +{ + for (addr &= PGDIR_MASK; addr < end; addr += PGDIR_SIZE) { + pmd_t *pmd = pmd_offset(pgd + pgd_index(addr), addr); + pmd[0] = __pmd(0); + pmd[1] = __pmd(0); + clean_pmd_entry(pmd); + } +} +#endif + +/* + * In order to soft-boot, we need to insert a 1:1 mapping in place of + * the user-mode pages. This will then ensure that we have predictable + * results when turning the mmu off + */ +void setup_mm_for_reboot(char mode) +{ + /* + * We need to access to user-mode page tables here. For kernel threads + * we don't have any user-mode mappings so we use the context that we + * "borrowed". + */ + identity_mapping_add(current->active_mm->pgd, 0, TASK_SIZE); + local_flush_tlb_all(); +} diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 94ee0930d690..bd1a11e62f4e 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1045,38 +1045,3 @@ void __init paging_init(struct machine_desc *mdesc) empty_zero_page = virt_to_page(zero_page); __flush_dcache_page(NULL, empty_zero_page); } - -/* - * In order to soft-boot, we need to insert a 1:1 mapping in place of - * the user-mode pages. This will then ensure that we have predictable - * results when turning the mmu off - */ -void setup_mm_for_reboot(char mode) -{ - unsigned long base_pmdval; - pgd_t *pgd; - int i; - - /* - * We need to access to user-mode page tables here. For kernel threads - * we don't have any user-mode mappings so we use the context that we - * "borrowed". - */ - pgd = current->active_mm->pgd; - - base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; - if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) - base_pmdval |= PMD_BIT4; - - for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) { - unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval; - pmd_t *pmd; - - pmd = pmd_off(pgd, i << PGDIR_SHIFT); - pmd[0] = __pmd(pmdval); - pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1))); - flush_pmd_entry(pmd); - } - - local_flush_tlb_all(); -} -- cgit v1.2.3-59-g8ed1b From af3813d6a5bf8d0f71b23d3ce458fa5f9916c6b7 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 21 Nov 2010 11:48:16 +0000 Subject: ARM: pgtable: use conventional page table code for identity mappings Remove some knowledge of our 2-level page table layout from the identity mapping code - we assume that a step size of PGDIR_SIZE will allow us to step over all entries. While this is true today, it won't be true in the near future. Signed-off-by: Russell King --- arch/arm/mm/idmap.c | 46 +++++++++++++++++++++++++++++++--------------- 1 file changed, 31 insertions(+), 15 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c index 034124d1272d..57299446f787 100644 --- a/arch/arm/mm/idmap.c +++ b/arch/arm/mm/idmap.c @@ -4,33 +4,49 @@ #include #include +static void idmap_add_pmd(pgd_t *pgd, unsigned long addr, unsigned long end, + unsigned long prot) +{ + pmd_t *pmd = pmd_offset(pgd, addr); + + addr = (addr & PMD_MASK) | prot; + pmd[0] = __pmd(addr); + addr += SECTION_SIZE; + pmd[1] = __pmd(addr); + flush_pmd_entry(pmd); +} + void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end) { - unsigned long prot; + unsigned long prot, next; prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE; if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) prot |= PMD_BIT4; - for (addr &= PGDIR_MASK; addr < end;) { - pmd_t *pmd = pmd_offset(pgd + pgd_index(addr), addr); - pmd[0] = __pmd(addr | prot); - addr += SECTION_SIZE; - pmd[1] = __pmd(addr | prot); - addr += SECTION_SIZE; - flush_pmd_entry(pmd); - } + pgd += pgd_index(addr); + do { + next = pgd_addr_end(addr, end); + idmap_add_pmd(pgd, addr, next, prot); + } while (pgd++, addr = next, addr != end); } #ifdef CONFIG_SMP +static void idmap_del_pmd(pgd_t *pgd, unsigned long addr, unsigned long end) +{ + pmd_t *pmd = pmd_offset(pgd, addr); + pmd_clear(pmd); +} + void identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end) { - for (addr &= PGDIR_MASK; addr < end; addr += PGDIR_SIZE) { - pmd_t *pmd = pmd_offset(pgd + pgd_index(addr), addr); - pmd[0] = __pmd(0); - pmd[1] = __pmd(0); - clean_pmd_entry(pmd); - } + unsigned long next; + + pgd += pgd_index(addr); + do { + next = pgd_addr_end(addr, end); + idmap_del_pmd(pgd, addr, next); + } while (pgd++, addr = next, addr != end); } #endif -- cgit v1.2.3-59-g8ed1b From e926f4495e202500a6265987277fab217e235f08 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 21 Nov 2010 11:55:37 +0000 Subject: ARM: pgtable: remove FIRST_USER_PGD_NR FIRST_USER_PGD_NR is now unnecessary, as this has been replaced by FIRST_USER_ADDRESS except in the architecture code. Fix up the last usage of FIRST_USER_PGD_NR, and remove the definition. Signed-off-by: Russell King --- arch/arm/include/asm/pgtable.h | 3 +-- arch/arm/mm/pgd.c | 8 +++----- 2 files changed, 4 insertions(+), 7 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 1e31af232a36..4701a6b758b7 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -138,8 +138,7 @@ extern void __pgd_error(const char *file, int line, pgd_t); */ #define FIRST_USER_ADDRESS PAGE_SIZE -#define FIRST_USER_PGD_NR 1 -#define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR) +#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) /* * section address mask and size definitions. diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index d15785eb73a7..93292a18cf77 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c @@ -17,8 +17,6 @@ #include "mm.h" -#define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD) - /* * need to get a 16k page for level 1 */ @@ -32,14 +30,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm) if (!new_pgd) goto no_pgd; - memset(new_pgd, 0, FIRST_KERNEL_PGD_NR * sizeof(pgd_t)); + memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); /* * Copy over the kernel and IO PGD entries */ init_pgd = pgd_offset_k(0); - memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, - (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); + memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD, + (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); -- cgit v1.2.3-59-g8ed1b From 9522d7e4cb5e0858122fc55d33a2c07728f0b10d Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 16 Nov 2010 00:23:31 +0000 Subject: ARM: pgtable: invert L_PTE_EXEC to L_PTE_XN The hardware page tables use an XN bit 'execute never'. Historically, we've had a Linux 'execute allow' bit, in the positive sense. Get rid of this artifact as future hardware will continue to have the XN sense. Signed-off-by: Russell King --- arch/arm/include/asm/pgtable.h | 44 +++++++++++++++++++++--------------------- arch/arm/mm/mmu.c | 15 +++++++------- arch/arm/mm/proc-macros.S | 6 +++--- arch/arm/mm/proc-v7.S | 4 ++-- 4 files changed, 34 insertions(+), 35 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 4701a6b758b7..f9b3f4bd7410 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -171,7 +171,7 @@ extern void __pgd_error(const char *file, int line, pgd_t); #define L_PTE_DIRTY (_AT(pteval_t, 1) << 6) #define L_PTE_WRITE (_AT(pteval_t, 1) << 7) #define L_PTE_USER (_AT(pteval_t, 1) << 8) -#define L_PTE_EXEC (_AT(pteval_t, 1) << 9) +#define L_PTE_XN (_AT(pteval_t, 1) << 9) #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */ /* @@ -205,23 +205,23 @@ extern pgprot_t pgprot_kernel; #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) -#define PAGE_NONE pgprot_user -#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE) -#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC) -#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER) -#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC) -#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER) -#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC) -#define PAGE_KERNEL pgprot_kernel -#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_kernel, L_PTE_EXEC) - -#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT) -#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE) -#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC) -#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER) -#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC) -#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER) -#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC) +#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN) +#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE | L_PTE_XN) +#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE) +#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN) +#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER) +#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN) +#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER) +#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN) +#define PAGE_KERNEL_EXEC pgprot_kernel + +#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_XN) +#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE | L_PTE_XN) +#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE) +#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) +#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER) +#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) +#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER) #define __pgprot_modify(prot,mask,bits) \ __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) @@ -234,14 +234,14 @@ extern pgprot_t pgprot_kernel; #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE #define pgprot_dmacoherent(prot) \ - __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE) + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN) #define __HAVE_PHYS_MEM_ACCESS_PROT struct file; extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot); #else #define pgprot_dmacoherent(prot) \ - __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED) + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN) #endif #endif /* __ASSEMBLY__ */ @@ -383,7 +383,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, #define pte_write(pte) (pte_val(pte) & L_PTE_WRITE) #define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) #define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) -#define pte_exec(pte) (pte_val(pte) & L_PTE_EXEC) +#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) #define pte_special(pte) (0) #define pte_present_user(pte) \ @@ -404,7 +404,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { - const pteval_t mask = L_PTE_EXEC | L_PTE_WRITE | L_PTE_USER; + const pteval_t mask = L_PTE_XN | L_PTE_WRITE | L_PTE_USER; pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); return pte; } diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index bd1a11e62f4e..bd5a94b2d610 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -190,7 +190,7 @@ void adjust_cr(unsigned long mask, unsigned long set) } #endif -#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE +#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE|L_PTE_XN #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE static struct mem_type mem_types[] = { @@ -234,20 +234,19 @@ static struct mem_type mem_types[] = { .domain = DOMAIN_KERNEL, }, [MT_LOW_VECTORS] = { - .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | - L_PTE_EXEC, + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, .prot_l1 = PMD_TYPE_TABLE, .domain = DOMAIN_USER, }, [MT_HIGH_VECTORS] = { .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | - L_PTE_USER | L_PTE_EXEC, + L_PTE_USER, .prot_l1 = PMD_TYPE_TABLE, .domain = DOMAIN_USER, }, [MT_MEMORY] = { .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | - L_PTE_WRITE | L_PTE_EXEC, + L_PTE_WRITE, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, .domain = DOMAIN_KERNEL, @@ -258,21 +257,21 @@ static struct mem_type mem_types[] = { }, [MT_MEMORY_NONCACHED] = { .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | - L_PTE_WRITE | L_PTE_EXEC | L_PTE_MT_BUFFERABLE, + L_PTE_WRITE| L_PTE_MT_BUFFERABLE, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, .domain = DOMAIN_KERNEL, }, [MT_MEMORY_DTCM] = { .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | - L_PTE_WRITE, + L_PTE_WRITE | L_PTE_XN, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, .domain = DOMAIN_KERNEL, }, [MT_MEMORY_ITCM] = { .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | - L_PTE_WRITE | L_PTE_EXEC, + L_PTE_WRITE, .prot_l1 = PMD_TYPE_TABLE, .domain = DOMAIN_KERNEL, }, diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index cbedf9c46b9d..4a7a9e142e85 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S @@ -81,7 +81,7 @@ #if L_PTE_SHARED != PTE_EXT_SHARED #error PTE shared bit mismatch #endif -#if (L_PTE_EXEC+L_PTE_USER+L_PTE_WRITE+L_PTE_DIRTY+L_PTE_YOUNG+\ +#if (L_PTE_XN+L_PTE_USER+L_PTE_WRITE+L_PTE_DIRTY+L_PTE_YOUNG+\ L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED #error Invalid Linux PTE bit settings #endif @@ -141,8 +141,8 @@ tstne r3, #PTE_EXT_APX bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 - tst r1, #L_PTE_EXEC - orreq r3, r3, #PTE_EXT_XN + tst r1, #L_PTE_XN + orrne r3, r3, #PTE_EXT_XN orr r3, r3, r2 diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 89c31a6dae5c..4f5a594aa5fd 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -149,8 +149,8 @@ ENTRY(cpu_v7_set_pte_ext) tstne r3, #PTE_EXT_APX bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 - tst r1, #L_PTE_EXEC - orreq r3, r3, #PTE_EXT_XN + tst r1, #L_PTE_XN + orrne r3, r3, #PTE_EXT_XN tst r1, #L_PTE_YOUNG tstne r1, #L_PTE_PRESENT -- cgit v1.2.3-59-g8ed1b From 36bb94ba36f332de767cfaa3af6a5136435a3a9c Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 16 Nov 2010 08:40:36 +0000 Subject: ARM: pgtable: provide RDONLY page table bit rather than WRITE bit Signed-off-by: Russell King --- arch/arm/include/asm/pgtable.h | 38 +++++++++++++++++++------------------- arch/arm/mm/mmu.c | 19 +++++++++---------- arch/arm/mm/proc-macros.S | 16 ++++++++-------- arch/arm/mm/proc-v7.S | 6 +++--- arch/arm/mm/proc-xscale.S | 4 ++-- 5 files changed, 41 insertions(+), 42 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index f9b3f4bd7410..ebcb6432f45f 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -169,7 +169,7 @@ extern void __pgd_error(const char *file, int line, pgd_t); #define L_PTE_YOUNG (_AT(pteval_t, 1) << 1) #define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */ #define L_PTE_DIRTY (_AT(pteval_t, 1) << 6) -#define L_PTE_WRITE (_AT(pteval_t, 1) << 7) +#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) #define L_PTE_USER (_AT(pteval_t, 1) << 8) #define L_PTE_XN (_AT(pteval_t, 1) << 9) #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */ @@ -205,23 +205,23 @@ extern pgprot_t pgprot_kernel; #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) -#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN) -#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE | L_PTE_XN) -#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE) -#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN) -#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER) -#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN) -#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER) +#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY) +#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN) +#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER) +#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) +#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY) +#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) +#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY) #define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN) #define PAGE_KERNEL_EXEC pgprot_kernel -#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_XN) -#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE | L_PTE_XN) -#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE) -#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) -#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER) -#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) -#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER) +#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN) +#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) +#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER) +#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) +#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY) +#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) +#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY) #define __pgprot_modify(prot,mask,bits) \ __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) @@ -380,7 +380,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, #define pte_none(pte) (!pte_val(pte)) #define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) -#define pte_write(pte) (pte_val(pte) & L_PTE_WRITE) +#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY)) #define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) #define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) #define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) @@ -393,8 +393,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, #define PTE_BIT_FUNC(fn,op) \ static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } -PTE_BIT_FUNC(wrprotect, &= ~L_PTE_WRITE); -PTE_BIT_FUNC(mkwrite, |= L_PTE_WRITE); +PTE_BIT_FUNC(wrprotect, |= L_PTE_RDONLY); +PTE_BIT_FUNC(mkwrite, &= ~L_PTE_RDONLY); PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY); PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY); PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG); @@ -404,7 +404,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { - const pteval_t mask = L_PTE_XN | L_PTE_WRITE | L_PTE_USER; + const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER; pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); return pte; } diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index bd5a94b2d610..546e44734db0 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -190,7 +190,7 @@ void adjust_cr(unsigned long mask, unsigned long set) } #endif -#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE|L_PTE_XN +#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE static struct mem_type mem_types[] = { @@ -234,19 +234,19 @@ static struct mem_type mem_types[] = { .domain = DOMAIN_KERNEL, }, [MT_LOW_VECTORS] = { - .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | + L_PTE_RDONLY, .prot_l1 = PMD_TYPE_TABLE, .domain = DOMAIN_USER, }, [MT_HIGH_VECTORS] = { .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | - L_PTE_USER, + L_PTE_USER | L_PTE_RDONLY, .prot_l1 = PMD_TYPE_TABLE, .domain = DOMAIN_USER, }, [MT_MEMORY] = { - .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | - L_PTE_WRITE, + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, .domain = DOMAIN_KERNEL, @@ -257,21 +257,20 @@ static struct mem_type mem_types[] = { }, [MT_MEMORY_NONCACHED] = { .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | - L_PTE_WRITE| L_PTE_MT_BUFFERABLE, + L_PTE_MT_BUFFERABLE, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, .domain = DOMAIN_KERNEL, }, [MT_MEMORY_DTCM] = { .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | - L_PTE_WRITE | L_PTE_XN, + L_PTE_XN, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, .domain = DOMAIN_KERNEL, }, [MT_MEMORY_ITCM] = { - .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | - L_PTE_WRITE, + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, .prot_l1 = PMD_TYPE_TABLE, .domain = DOMAIN_KERNEL, }, @@ -478,7 +477,7 @@ static void __init build_mem_type_table(void) pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | - L_PTE_DIRTY | L_PTE_WRITE | kern_pgprot); + L_PTE_DIRTY | kern_pgprot); mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index 4a7a9e142e85..f5ca6aaecdbd 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S @@ -81,7 +81,7 @@ #if L_PTE_SHARED != PTE_EXT_SHARED #error PTE shared bit mismatch #endif -#if (L_PTE_XN+L_PTE_USER+L_PTE_WRITE+L_PTE_DIRTY+L_PTE_YOUNG+\ +#if (L_PTE_XN+L_PTE_USER+L_PTE_RDONLY+L_PTE_DIRTY+L_PTE_YOUNG+\ L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED #error Invalid Linux PTE bit settings #endif @@ -132,9 +132,9 @@ and r2, r1, #L_PTE_MT_MASK ldr r2, [ip, r2] - tst r1, #L_PTE_WRITE - tstne r1, #L_PTE_DIRTY - orreq r3, r3, #PTE_EXT_APX + eor r1, r1, #L_PTE_DIRTY + tst r1, #L_PTE_DIRTY|L_PTE_RDONLY + orrne r3, r3, #PTE_EXT_APX tst r1, #L_PTE_USER orrne r3, r3, #PTE_EXT_AP1 @@ -172,7 +172,7 @@ .macro armv3_set_pte_ext wc_disable=1 str r1, [r0], #2048 @ linux version - eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY + eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY bic r2, r1, #PTE_SMALL_AP_MASK @ keep C, B bits bic r2, r2, #PTE_TYPE_MASK @@ -181,7 +181,7 @@ tst r3, #L_PTE_USER @ user? orrne r2, r2, #PTE_SMALL_AP_URO_SRW - tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ write and dirty? + tst r3, #L_PTE_RDONLY | L_PTE_DIRTY @ write and dirty? orreq r2, r2, #PTE_SMALL_AP_UNO_SRW tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young? @@ -215,7 +215,7 @@ .macro xscale_set_pte_ext_prologue str r1, [r0] @ linux version - eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY + eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY bic r2, r1, #PTE_SMALL_AP_MASK @ keep C, B bits orr r2, r2, #PTE_TYPE_EXT @ extended page @@ -223,7 +223,7 @@ tst r3, #L_PTE_USER @ user? orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w - tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ write and dirty? + tst r3, #L_PTE_RDONLY | L_PTE_DIRTY @ write and dirty? orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w @ combined with user -> user r/w .endm diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 4f5a594aa5fd..210d051c54d7 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -140,9 +140,9 @@ ENTRY(cpu_v7_set_pte_ext) tst r1, #1 << 4 orrne r3, r3, #PTE_EXT_TEX(1) - tst r1, #L_PTE_WRITE - tstne r1, #L_PTE_DIRTY - orreq r3, r3, #PTE_EXT_APX + eor r1, r1, #L_PTE_DIRTY + tst r1, #L_PTE_RDONLY | L_PTE_DIRTY + orrne r3, r3, #PTE_EXT_APX tst r1, #L_PTE_USER orrne r3, r3, #PTE_EXT_AP1 diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 523408c0bb38..5a37c5e45c41 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -500,8 +500,8 @@ ENTRY(cpu_xscale_set_pte_ext) @ @ Erratum 40: must set memory to write-through for user read-only pages @ - and ip, r1, #(L_PTE_MT_MASK | L_PTE_USER | L_PTE_WRITE) & ~(4 << 2) - teq ip, #L_PTE_MT_WRITEBACK | L_PTE_USER + and ip, r1, #(L_PTE_MT_MASK | L_PTE_USER | L_PTE_RDONLY) & ~(4 << 2) + teq ip, #L_PTE_MT_WRITEBACK | L_PTE_USER | L_PTE_RDONLY moveq r1, #L_PTE_MT_WRITETHROUGH and r1, r1, #L_PTE_MT_MASK -- cgit v1.2.3-59-g8ed1b