aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/mm/init.c')
-rw-r--r--arch/sh/mm/init.c69
1 files changed, 47 insertions, 22 deletions
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index d0e249100e98..3385b28acaac 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -47,7 +47,6 @@ static pte_t *__get_pte_phys(unsigned long addr)
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
- pte_t *pte;
pgd = pgd_offset_k(addr);
if (pgd_none(*pgd)) {
@@ -67,8 +66,7 @@ static pte_t *__get_pte_phys(unsigned long addr)
return NULL;
}
- pte = pte_offset_kernel(pmd, addr);
- return pte;
+ return pte_offset_kernel(pmd, addr);
}
static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
@@ -125,13 +123,45 @@ void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
clear_pte_phys(address, prot);
}
+static pmd_t * __init one_md_table_init(pud_t *pud)
+{
+ if (pud_none(*pud)) {
+ pmd_t *pmd;
+
+ pmd = alloc_bootmem_pages(PAGE_SIZE);
+ pud_populate(&init_mm, pud, pmd);
+ BUG_ON(pmd != pmd_offset(pud, 0));
+ }
+
+ return pmd_offset(pud, 0);
+}
+
+static pte_t * __init one_page_table_init(pmd_t *pmd)
+{
+ if (pmd_none(*pmd)) {
+ pte_t *pte;
+
+ pte = alloc_bootmem_pages(PAGE_SIZE);
+ pmd_populate_kernel(&init_mm, pmd, pte);
+ BUG_ON(pte != pte_offset_kernel(pmd, 0));
+ }
+
+ return pte_offset_kernel(pmd, 0);
+}
+
+static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
+ unsigned long vaddr, pte_t *lastpte)
+{
+ return pte;
+}
+
void __init page_table_range_init(unsigned long start, unsigned long end,
pgd_t *pgd_base)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
- pte_t *pte;
+ pte_t *pte = NULL;
int i, j, k;
unsigned long vaddr;
@@ -144,19 +174,13 @@ void __init page_table_range_init(unsigned long start, unsigned long end,
for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
pud = (pud_t *)pgd;
for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
-#ifdef __PAGETABLE_PMD_FOLDED
- pmd = (pmd_t *)pud;
-#else
- pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
- pud_populate(&init_mm, pud, pmd);
+ pmd = one_md_table_init(pud);
+#ifndef __PAGETABLE_PMD_FOLDED
pmd += k;
#endif
for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
- if (pmd_none(*pmd)) {
- pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
- pmd_populate_kernel(&init_mm, pmd, pte);
- BUG_ON(pte != pte_offset_kernel(pmd, 0));
- }
+ pte = page_table_kmap_check(one_page_table_init(pmd),
+ pmd, vaddr, pte);
vaddr += PMD_SIZE;
}
k = 0;
@@ -200,7 +224,6 @@ static void __init bootmem_init_one_node(unsigned int nid)
unsigned long total_pages, paddr;
unsigned long end_pfn;
struct pglist_data *p;
- int i;
p = NODE_DATA(nid);
@@ -226,11 +249,12 @@ static void __init bootmem_init_one_node(unsigned int nid)
* reservations in other nodes.
*/
if (nid == 0) {
+ struct memblock_region *reg;
+
/* Reserve the sections we're already using. */
- for (i = 0; i < memblock.reserved.cnt; i++)
- reserve_bootmem(memblock.reserved.region[i].base,
- memblock_size_bytes(&memblock.reserved, i),
- BOOTMEM_DEFAULT);
+ for_each_memblock(reserved, reg) {
+ reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
+ }
}
sparse_memory_present_with_active_regions(nid);
@@ -238,13 +262,14 @@ static void __init bootmem_init_one_node(unsigned int nid)
static void __init do_init_bootmem(void)
{
+ struct memblock_region *reg;
int i;
/* Add active regions with valid PFNs. */
- for (i = 0; i < memblock.memory.cnt; i++) {
+ for_each_memblock(memory, reg) {
unsigned long start_pfn, end_pfn;
- start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT;
- end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i);
+ start_pfn = memblock_region_memory_base_pfn(reg);
+ end_pfn = memblock_region_memory_end_pfn(reg);
__add_active_range(0, start_pfn, end_pfn);
}