aboutsummaryrefslogtreecommitdiffstats
path: root/arch/metag/include/asm/pgtable.h
blob: a3422f06c03b0a85e787744d48a28ea71bf2d2d3 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Macros and functions to manipulate Meta page tables.
 */

#ifndef _METAG_PGTABLE_H
#define _METAG_PGTABLE_H

#include <asm/pgtable-bits.h>
#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>

/* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */
#if PAGE_OFFSET >= LINGLOBAL_BASE
#define CONSISTENT_START	0xF7000000
#define CONSISTENT_END		0xF73FFFFF
#define VMALLOC_START		0xF8000000
#define VMALLOC_END		0xFFFEFFFF
#else
#define CONSISTENT_START	0x77000000
#define CONSISTENT_END		0x773FFFFF
#define VMALLOC_START		0x78000000
#define VMALLOC_END		0x7FFFFFFF
#endif

/*
 * The Linux memory management assumes a three-level page table setup. On
 * Meta, we use that, but "fold" the mid level into the top-level page
 * table.
 */

/* PGDIR_SHIFT determines the size of the area a second-level page table can
 * map. This is always 4MB.
 */

#define PGDIR_SHIFT	22
#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
#define PGDIR_MASK	(~(PGDIR_SIZE-1))

/*
 * Entries per page directory level: we use a two-level, so
 * we don't really have any PMD directory physically. First level tables
 * always map 2Gb (local or global) at a granularity of 4MB, second-level
 * tables map 4MB with a granularity between 4MB and 4kB (between 1 and
 * 1024 entries).
 */
#define PTRS_PER_PTE	(PGDIR_SIZE/PAGE_SIZE)
#define HPTRS_PER_PTE	(PGDIR_SIZE/HPAGE_SIZE)
#define PTRS_PER_PGD	512

#define USER_PTRS_PER_PGD	256
#define FIRST_USER_ADDRESS	META_MEMORY_BASE
#define FIRST_USER_PGD_NR	pgd_index(FIRST_USER_ADDRESS)

#define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
				 _PAGE_CACHEABLE)

#define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
				 _PAGE_ACCESSED | _PAGE_CACHEABLE)
#define PAGE_SHARED_C	PAGE_SHARED
#define PAGE_COPY	__pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
				 _PAGE_CACHEABLE)
#define PAGE_COPY_C	PAGE_COPY

#define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
				 _PAGE_CACHEABLE)
#define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \
				 _PAGE_ACCESSED | _PAGE_WRITE | \
				 _PAGE_CACHEABLE | _PAGE_KERNEL)

#define __P000	PAGE_NONE
#define __P001	PAGE_READONLY
#define __P010	PAGE_COPY
#define __P011	PAGE_COPY
#define __P100	PAGE_READONLY
#define __P101	PAGE_READONLY
#define __P110	PAGE_COPY_C
#define __P111	PAGE_COPY_C

#define __S000	PAGE_NONE
#define __S001	PAGE_READONLY
#define __S010	PAGE_SHARED
#define __S011	PAGE_SHARED
#define __S100	PAGE_READONLY
#define __S101	PAGE_READONLY
#define __S110	PAGE_SHARED_C
#define __S111	PAGE_SHARED_C

#ifndef __ASSEMBLY__

#include <asm/page.h>

/* zero page used for uninitialized stuff */
extern unsigned long empty_zero_page;
#define ZERO_PAGE(vaddr)	(virt_to_page(empty_zero_page))

/* Certain architectures need to do special things when pte's
 * within a page table are directly modified.  Thus, the following
 * hook is made available.
 */
#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)

#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)

#define pte_pfn(pte)		(pte_val(pte) >> PAGE_SHIFT)

#define pfn_pte(pfn, prot)	__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))

#define pte_none(x)		(!pte_val(x))
#define pte_present(x)		(pte_val(x) & _PAGE_PRESENT)
#define pte_clear(mm, addr, xp)	do { pte_val(*(xp)) = 0; } while (0)

#define pmd_none(x)		(!pmd_val(x))
#define pmd_bad(x)		((pmd_val(x) & ~(PAGE_MASK | _PAGE_SZ_MASK)) \
					!= (_PAGE_TABLE & ~_PAGE_SZ_MASK))
#define pmd_present(x)		(pmd_val(x) & _PAGE_PRESENT)
#define pmd_clear(xp)		do { pmd_val(*(xp)) = 0; } while (0)

#define pte_page(x)		pfn_to_page(pte_pfn(x))

/*
 * The following only work if pte_present() is true.
 * Undefined behaviour if not..
 */

static inline int pte_write(pte_t pte)   { return pte_val(pte) & _PAGE_WRITE; }
static inline int pte_dirty(pte_t pte)   { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte)   { return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_special(pte_t pte) { return 0; }

static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= (~_PAGE_WRITE); return pte; }
static inline pte_t pte_mkclean(pte_t pte)   { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
static inline pte_t pte_mkold(pte_t pte)     { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
static inline pte_t pte_mkwrite(pte_t pte)   { pte_val(pte) |= _PAGE_WRITE; return pte; }
static inline pte_t pte_mkdirty(pte_t pte)   { pte_val(pte) |= _PAGE_DIRTY; return pte; }
static inline pte_t pte_mkyoung(pte_t pte)   { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
static inline pte_t pte_mkhuge(pte_t pte)    { return pte; }

/*
 * Macro and implementation to make a page protection as uncacheable.
 */
#define pgprot_writecombine(prot)					\
	__pgprot(pgprot_val(prot) & ~(_PAGE_CACHE_CTRL1 | _PAGE_CACHE_CTRL0))

#define pgprot_noncached(prot)						\
	__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE)


/*
 * Conversion functions: convert a page and protection to a page entry,
 * and a page entry and page directory to the page they refer to.
 */

#define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))

static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
	pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
	return pte;
}

static inline unsigned long pmd_page_vaddr(pmd_t pmd)
{
	unsigned long paddr = pmd_val(pmd) & PAGE_MASK;
	if (!paddr)
		return 0;
	return (unsigned long)__va(paddr);
}

#define pmd_page(pmd)		(pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
#define pmd_page_shift(pmd)	(12 + ((pmd_val(pmd) & _PAGE_SZ_MASK) \
					>> _PAGE_SZ_SHIFT))
#define pmd_num_ptrs(pmd)	(PGDIR_SIZE >> pmd_page_shift(pmd))

/*
 * Each pgd is only 2k, mapping 2Gb (local or global). If we're in global
 * space drop the top bit before indexing the pgd.
 */
#if PAGE_OFFSET >= LINGLOBAL_BASE
#define pgd_index(address)	((((address) & ~0x80000000) >> PGDIR_SHIFT) \
							& (PTRS_PER_PGD-1))
#else
#define pgd_index(address)	(((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
#endif

#define pgd_offset(mm, address)	((mm)->pgd + pgd_index(address))

#define pgd_offset_k(address)	pgd_offset(&init_mm, address)

#define pmd_index(address)	(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))

/* Find an entry in the second-level page table.. */
#if !defined(CONFIG_HUGETLB_PAGE)
  /* all pages are of size (1 << PAGE_SHIFT), so no need to read 1st level pt */
# define pte_index(pmd, address) \
	(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#else
  /* some pages are huge, so read 1st level pt to find out */
# define pte_index(pmd, address) \
	(((address) >> pmd_page_shift(pmd)) & (pmd_num_ptrs(pmd) - 1))
#endif
#define pte_offset_kernel(dir, address) \
	((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(*(dir), address))
#define pte_offset_map(dir, address)		pte_offset_kernel(dir, address)
#define pte_offset_map_nested(dir, address)	pte_offset_kernel(dir, address)

#define pte_unmap(pte)		do { } while (0)
#define pte_unmap_nested(pte)	do { } while (0)

#define pte_ERROR(e) \
	pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
#define pgd_ERROR(e) \
	pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))

/*
 * Meta doesn't have any external MMU info: the kernel page
 * tables contain all the necessary information.
 */
static inline void update_mmu_cache(struct vm_area_struct *vma,
				    unsigned long address, pte_t *pte)
{
}

/*
 * Encode and decode a swap entry (must be !pte_none(e) && !pte_present(e))
 * Since PAGE_PRESENT is bit 1, we can use the bits above that.
 */
#define __swp_type(x)			(((x).val >> 1) & 0xff)
#define __swp_offset(x)			((x).val >> 10)
#define __swp_entry(type, offset)	((swp_entry_t) { ((type) << 1) | \
					 ((offset) << 10) })
#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x)		((pte_t) { (x).val })

#define kern_addr_valid(addr)	(1)

/*
 * No page table caches to initialise
 */
#define pgtable_cache_init()	do { } while (0)

extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
void paging_init(unsigned long mem_end);

#ifdef CONFIG_METAG_META12
/* This is a workaround for an issue in Meta 1 cores. These cores cache
 * invalid entries in the TLB so we always need to flush whenever we add
 * a new pte. Unfortunately we can only flush the whole TLB not shoot down
 * single entries so this is sub-optimal. This implementation ensures that
 * we will get a flush at the second attempt, so we may still get repeated
 * faults, we just don't overflow the kernel stack handling them.
 */
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
({									  \
	int __changed = !pte_same(*(__ptep), __entry);			  \
	if (__changed) {						  \
		set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
	}								  \
	flush_tlb_page(__vma, __address);				  \
	__changed;							  \
})
#endif

#include <asm-generic/pgtable.h>

#endif /* __ASSEMBLY__ */
#endif /* _METAG_PGTABLE_H */