aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/pte-walk.h
blob: 2d633e9d686c4f9d3b4ea39ebd160d93806467a7 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
#ifndef _ASM_POWERPC_PTE_WALK_H
#define _ASM_POWERPC_PTE_WALK_H

#include <linux/sched.h>

/* Don't use this directly */
extern pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
			       bool *is_thp, unsigned *hshift);

static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea,
				    bool *is_thp, unsigned *hshift)
{
	VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__);
	return __find_linux_pte(pgdir, ea, is_thp, hshift);
}

static inline pte_t *find_init_mm_pte(unsigned long ea, unsigned *hshift)
{
	pgd_t *pgdir = init_mm.pgd;
	return __find_linux_pte(pgdir, ea, NULL, hshift);
}
/*
 * This is what we should always use. Any other lockless page table lookup needs
 * careful audit against THP split.
 */
static inline pte_t *find_current_mm_pte(pgd_t *pgdir, unsigned long ea,
					 bool *is_thp, unsigned *hshift)
{
	VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__);
	VM_WARN(pgdir != current->mm->pgd,
		"%s lock less page table lookup called on wrong mm\n", __func__);
	return __find_linux_pte(pgdir, ea, is_thp, hshift);
}

#endif /* _ASM_POWERPC_PTE_WALK_H */