aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/book3s/64/tlbflush.h
blob: 67655cd6054563bbdcbd84396921a5f05ff419d4 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
#define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H

#define MMU_NO_CONTEXT	~0UL

#include <linux/mm_types.h>
#include <asm/book3s/64/tlbflush-hash.h>
#include <asm/book3s/64/tlbflush-radix.h>

/* TLB flush actions. Used as argument to tlbiel_all() */
enum {
	TLB_INVAL_SCOPE_GLOBAL = 0,	/* invalidate all TLBs */
	TLB_INVAL_SCOPE_LPID = 1,	/* invalidate TLBs for current LPID */
};

static inline void tlbiel_all(void)
{
	/*
	 * This is used for host machine check and bootup.
	 *
	 * This uses early_radix_enabled and implementations use
	 * early_cpu_has_feature etc because that works early in boot
	 * and this is the machine check path which is not performance
	 * critical.
	 */
	if (early_radix_enabled())
		radix__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
	else
		hash__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
}

static inline void tlbiel_all_lpid(bool radix)
{
	/*
	 * This is used for guest machine check.
	 */
	if (radix)
		radix__tlbiel_all(TLB_INVAL_SCOPE_LPID);
	else
		hash__tlbiel_all(TLB_INVAL_SCOPE_LPID);
}


#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
static inline void flush_pmd_tlb_range(struct vm_area_struct *vma,
				       unsigned long start, unsigned long end)
{
	if (radix_enabled())
		return radix__flush_pmd_tlb_range(vma, start, end);
	return hash__flush_tlb_range(vma, start, end);
}

#define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
					   unsigned long start,
					   unsigned long end)
{
	if (radix_enabled())
		return radix__flush_hugetlb_tlb_range(vma, start, end);
	return hash__flush_tlb_range(vma, start, end);
}

static inline void flush_tlb_range(struct vm_area_struct *vma,
				   unsigned long start, unsigned long end)
{
	if (radix_enabled())
		return radix__flush_tlb_range(vma, start, end);
	return hash__flush_tlb_range(vma, start, end);
}

static inline void flush_tlb_kernel_range(unsigned long start,
					  unsigned long end)
{
	if (radix_enabled())
		return radix__flush_tlb_kernel_range(start, end);
	return hash__flush_tlb_kernel_range(start, end);
}

static inline void local_flush_tlb_mm(struct mm_struct *mm)
{
	if (radix_enabled())
		return radix__local_flush_tlb_mm(mm);
	return hash__local_flush_tlb_mm(mm);
}

static inline void local_flush_tlb_page(struct vm_area_struct *vma,
					unsigned long vmaddr)
{
	if (radix_enabled())
		return radix__local_flush_tlb_page(vma, vmaddr);
	return hash__local_flush_tlb_page(vma, vmaddr);
}

static inline void local_flush_all_mm(struct mm_struct *mm)
{
	if (radix_enabled())
		return radix__local_flush_all_mm(mm);
	return hash__local_flush_all_mm(mm);
}

static inline void tlb_flush(struct mmu_gather *tlb)
{
	if (radix_enabled())
		return radix__tlb_flush(tlb);
	return hash__tlb_flush(tlb);
}

#ifdef CONFIG_SMP
static inline void flush_tlb_mm(struct mm_struct *mm)
{
	if (radix_enabled())
		return radix__flush_tlb_mm(mm);
	return hash__flush_tlb_mm(mm);
}

static inline void flush_tlb_page(struct vm_area_struct *vma,
				  unsigned long vmaddr)
{
	if (radix_enabled())
		return radix__flush_tlb_page(vma, vmaddr);
	return hash__flush_tlb_page(vma, vmaddr);
}

static inline void flush_all_mm(struct mm_struct *mm)
{
	if (radix_enabled())
		return radix__flush_all_mm(mm);
	return hash__flush_all_mm(mm);
}
#else
#define flush_tlb_mm(mm)		local_flush_tlb_mm(mm)
#define flush_tlb_page(vma, addr)	local_flush_tlb_page(vma, addr)
#define flush_all_mm(mm)		local_flush_all_mm(mm)
#endif /* CONFIG_SMP */

#define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault
static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
						unsigned long address)
{
	/*
	 * Book3S 64 does not require spurious fault flushes because the PTE
	 * must be re-fetched in case of an access permission problem. So the
	 * only reason for a spurious fault should be concurrent modification
	 * to the PTE, in which case the PTE will eventually be re-fetched by
	 * the MMU when it attempts the access again.
	 *
	 * See: Power ISA Version 3.1B, 6.10.1.2 Modifying a Translation Table
	 * Entry, Setting a Reference or Change Bit or Upgrading Access
	 * Authority (PTE Subject to Atomic Hardware Updates):
	 *
	 * "If the only change being made to a valid PTE that is subject to
	 *  atomic hardware updates is to set the Reference or Change bit to
	 *  1 or to upgrade access authority, a simpler sequence suffices
	 *  because the translation hardware will refetch the PTE if an
	 *  access is attempted for which the only problems were reference
	 *  and/or change bits needing to be set or insufficient access
	 *  authority."
	 *
	 * The nest MMU in POWER9 does not perform this PTE re-fetch, but
	 * it avoids the spurious fault problem by flushing the TLB before
	 * upgrading PTE permissions, see radix__ptep_set_access_flags.
	 */
}

static inline bool __pte_flags_need_flush(unsigned long oldval,
					  unsigned long newval)
{
	unsigned long delta = oldval ^ newval;

	/*
	 * The return value of this function doesn't matter for hash,
	 * ptep_modify_prot_start() does a pte_update() which does or schedules
	 * any necessary hash table update and flush.
	 */
	if (!radix_enabled())
		return true;

	/*
	 * We do not expect kernel mappings or non-PTEs or not-present PTEs.
	 */
	VM_WARN_ON_ONCE(oldval & _PAGE_PRIVILEGED);
	VM_WARN_ON_ONCE(newval & _PAGE_PRIVILEGED);
	VM_WARN_ON_ONCE(!(oldval & _PAGE_PTE));
	VM_WARN_ON_ONCE(!(newval & _PAGE_PTE));
	VM_WARN_ON_ONCE(!(oldval & _PAGE_PRESENT));
	VM_WARN_ON_ONCE(!(newval & _PAGE_PRESENT));

	/*
	*  Must flush on any change except READ, WRITE, EXEC, DIRTY, ACCESSED.
	*
	 * In theory, some changed software bits could be tolerated, in
	 * practice those should rarely if ever matter.
	 */

	if (delta & ~(_PAGE_RWX | _PAGE_DIRTY | _PAGE_ACCESSED))
		return true;

	/*
	 * If any of the above was present in old but cleared in new, flush.
	 * With the exception of _PAGE_ACCESSED, don't worry about flushing
	 * if that was cleared (see the comment in ptep_clear_flush_young()).
	 */
	if ((delta & ~_PAGE_ACCESSED) & oldval)
		return true;

	return false;
}

static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
{
	return __pte_flags_need_flush(pte_val(oldpte), pte_val(newpte));
}
#define pte_needs_flush pte_needs_flush

static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
{
	return __pte_flags_need_flush(pmd_val(oldpmd), pmd_val(newpmd));
}
#define huge_pmd_needs_flush huge_pmd_needs_flush

extern bool tlbie_capable;
extern bool tlbie_enabled;

static inline bool cputlb_use_tlbie(void)
{
	return tlbie_enabled;
}

#endif /*  _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */