aboutsummaryrefslogtreecommitdiffstats
path: root/arch/csky/include/asm/pgalloc.h
blob: bf4f4a0e140e938e013f14dde4362bb60c767559 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.

#ifndef __ASM_CSKY_PGALLOC_H
#define __ASM_CSKY_PGALLOC_H

#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/sched.h>

static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
					pte_t *pte)
{
	set_pmd(pmd, __pmd(__pa(pte)));
}

static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
					pgtable_t pte)
{
	set_pmd(pmd, __pmd(__pa(page_address(pte))));
}

#define pmd_pgtable(pmd) pmd_page(pmd)

extern void pgd_init(unsigned long *p);

static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
					unsigned long address)
{
	pte_t *pte;
	unsigned long *kaddr, i;

	pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_RETRY_MAYFAIL,
					 PTE_ORDER);
	kaddr = (unsigned long *)pte;
	if (address & 0x80000000)
		for (i = 0; i < (PAGE_SIZE/4); i++)
			*(kaddr + i) = 0x1;
	else
		clear_page(kaddr);

	return pte;
}

static inline struct page *pte_alloc_one(struct mm_struct *mm,
						unsigned long address)
{
	struct page *pte;
	unsigned long *kaddr, i;

	pte = alloc_pages(GFP_KERNEL | __GFP_RETRY_MAYFAIL, PTE_ORDER);
	if (pte) {
		kaddr = kmap_atomic(pte);
		if (address & 0x80000000) {
			for (i = 0; i < (PAGE_SIZE/4); i++)
				*(kaddr + i) = 0x1;
		} else
			clear_page(kaddr);
		kunmap_atomic(kaddr);
		pgtable_page_ctor(pte);
	}
	return pte;
}

static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
	free_pages((unsigned long)pte, PTE_ORDER);
}

static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
{
	pgtable_page_dtor(pte);
	__free_pages(pte, PTE_ORDER);
}

static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
	free_pages((unsigned long)pgd, PGD_ORDER);
}

static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
	pgd_t *ret;
	pgd_t *init;

	ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
	if (ret) {
		init = pgd_offset(&init_mm, 0UL);
		pgd_init((unsigned long *)ret);
		memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
			(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
		/* prevent out of order excute */
		smp_mb();
#ifdef CONFIG_CPU_NEED_TLBSYNC
		dcache_wb_range((unsigned int)ret,
				(unsigned int)(ret + PTRS_PER_PGD));
#endif
	}

	return ret;
}

#define __pte_free_tlb(tlb, pte, address)		\
do {							\
	pgtable_page_dtor(pte);				\
	tlb_remove_page(tlb, pte);			\
} while (0)

#define check_pgt_cache()	do {} while (0)

extern void pagetable_init(void);
extern void pre_mmu_init(void);
extern void pre_trap_init(void);

#endif /* __ASM_CSKY_PGALLOC_H */