aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/include/asm/pgalloc.h
blob: 1b902508b664d16f95985203e3cf6fc3d8a5ff21 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
/*
 * Copyright 2010 Tilera Corporation. All Rights Reserved.
 *
 *   This program is free software; you can redistribute it and/or
 *   modify it under the terms of the GNU General Public License
 *   as published by the Free Software Foundation, version 2.
 *
 *   This program is distributed in the hope that it will be useful, but
 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 *   NON INFRINGEMENT.  See the GNU General Public License for
 *   more details.
 */

#ifndef _ASM_TILE_PGALLOC_H
#define _ASM_TILE_PGALLOC_H

#include <linux/threads.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <asm/fixmap.h>
#include <asm/page.h>
#include <hv/hypervisor.h>

/* Bits for the size of the second-level page table. */
#define L2_KERNEL_PGTABLE_SHIFT _HV_LOG2_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)

/* How big is a kernel L2 page table? */
#define L2_KERNEL_PGTABLE_SIZE (1UL << L2_KERNEL_PGTABLE_SHIFT)

/* We currently allocate user L2 page tables by page (unlike kernel L2s). */
#if L2_KERNEL_PGTABLE_SHIFT < PAGE_SHIFT
#define L2_USER_PGTABLE_SHIFT PAGE_SHIFT
#else
#define L2_USER_PGTABLE_SHIFT L2_KERNEL_PGTABLE_SHIFT
#endif

/* How many pages do we need, as an "order", for a user L2 page table? */
#define L2_USER_PGTABLE_ORDER (L2_USER_PGTABLE_SHIFT - PAGE_SHIFT)

static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
{
#ifdef CONFIG_64BIT
	set_pte(pmdp, pmd);
#else
	set_pte(&pmdp->pud.pgd, pmd.pud.pgd);
#endif
}

static inline void pmd_populate_kernel(struct mm_struct *mm,
				       pmd_t *pmd, pte_t *ptep)
{
	set_pmd(pmd, ptfn_pmd(HV_CPA_TO_PTFN(__pa(ptep)),
			      __pgprot(_PAGE_PRESENT)));
}

static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
				pgtable_t page)
{
	set_pmd(pmd, ptfn_pmd(HV_CPA_TO_PTFN(PFN_PHYS(page_to_pfn(page))),
			      __pgprot(_PAGE_PRESENT)));
}

/*
 * Allocate and free page tables.
 */

extern pgd_t *pgd_alloc(struct mm_struct *mm);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);

extern pgtable_t pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
				   int order);
extern void pgtable_free(struct mm_struct *mm, struct page *pte, int order);

static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
				      unsigned long address)
{
	return pgtable_alloc_one(mm, address, L2_USER_PGTABLE_ORDER);
}

static inline void pte_free(struct mm_struct *mm, struct page *pte)
{
	pgtable_free(mm, pte, L2_USER_PGTABLE_ORDER);
}

#define pmd_pgtable(pmd) pmd_page(pmd)

static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
	return pfn_to_kaddr(page_to_pfn(pte_alloc_one(mm, address)));
}

static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
	BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
	pte_free(mm, virt_to_page(pte));
}

extern void __pgtable_free_tlb(struct mmu_gather *tlb, struct page *pte,
			       unsigned long address, int order);
static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
				  unsigned long address)
{
	__pgtable_free_tlb(tlb, pte, address, L2_USER_PGTABLE_ORDER);
}

#define check_pgt_cache()	do { } while (0)

/*
 * Get the small-page pte_t lowmem entry for a given pfn.
 * This may or may not be in use, depending on whether the initial
 * huge-page entry for the page has already been shattered.
 */
pte_t *get_prealloc_pte(unsigned long pfn);

/* During init, we can shatter kernel huge pages if needed. */
void shatter_pmd(pmd_t *pmd);

/* After init, a more complex technique is required. */
void shatter_huge_page(unsigned long addr);

#ifdef __tilegx__

#define pud_populate(mm, pud, pmd) \
  pmd_populate_kernel((mm), (pmd_t *)(pud), (pte_t *)(pmd))

/* Bits for the size of the L1 (intermediate) page table. */
#define L1_KERNEL_PGTABLE_SHIFT _HV_LOG2_L1_SIZE(HPAGE_SHIFT)

/* How big is a kernel L2 page table? */
#define L1_KERNEL_PGTABLE_SIZE (1UL << L1_KERNEL_PGTABLE_SHIFT)

/* We currently allocate L1 page tables by page. */
#if L1_KERNEL_PGTABLE_SHIFT < PAGE_SHIFT
#define L1_USER_PGTABLE_SHIFT PAGE_SHIFT
#else
#define L1_USER_PGTABLE_SHIFT L1_KERNEL_PGTABLE_SHIFT
#endif

/* How many pages do we need, as an "order", for an L1 page table? */
#define L1_USER_PGTABLE_ORDER (L1_USER_PGTABLE_SHIFT - PAGE_SHIFT)

static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
	struct page *p = pgtable_alloc_one(mm, address, L1_USER_PGTABLE_ORDER);
	return (pmd_t *)page_to_virt(p);
}

static inline void pmd_free(struct mm_struct *mm, pmd_t *pmdp)
{
	pgtable_free(mm, virt_to_page(pmdp), L1_USER_PGTABLE_ORDER);
}

static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
				  unsigned long address)
{
	__pgtable_free_tlb(tlb, virt_to_page(pmdp), address,
			   L1_USER_PGTABLE_ORDER);
}

#endif /* __tilegx__ */

#endif /* _ASM_TILE_PGALLOC_H */