aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/kernel/prom.c5
-rw-r--r--arch/powerpc/kernel/rtas.c4
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c2
-rw-r--r--arch/powerpc/mm/hugetlbpage.c4
-rw-r--r--arch/powerpc/mm/pgtable_64.c4
5 files changed, 6 insertions, 13 deletions
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 099f27e6d1b0..6af05fc1dec9 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -696,10 +696,7 @@ void __init early_init_devtree(void *params)
reserve_crashkernel();
early_reserve_mem();
- /*
- * Ensure that total memory size is page-aligned, because otherwise
- * mark_bootmem() gets upset.
- */
+ /* Ensure that total memory size is page-aligned. */
limit = ALIGN(memory_limit ?: memblock_phys_mem_size(), PAGE_SIZE);
memblock_enforce_memory_limit(limit);
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 8b4c857c1421..4af905e81ab0 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -1091,8 +1091,8 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
}
/*
- * Call early during boot, before mem init or bootmem, to retrieve the RTAS
- * informations from the device-tree and allocate the RMO buffer for userland
+ * Call early during boot, before mem init, to retrieve the RTAS
+ * information from the device-tree and allocate the RMO buffer for userland
* accesses.
*/
void __init rtas_initialize(void)
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 4fdc27c80f4c..e64868c0b8de 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -154,7 +154,7 @@ EXPORT_SYMBOL_GPL(kvm_release_hpt);
* kvm_cma_reserve() - reserve area for kvm hash pagetable
*
* This function reserves memory from early allocator. It should be
- * called by arch specific code once the early allocator (memblock or bootmem)
+ * called by arch specific code once the memblock allocator
* has been activated and all other subsystems have already allocated/reserved
* memory.
*/
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 8aa04f03fd31..b460e723f0ec 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -276,7 +276,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
#ifdef CONFIG_PPC_FSL_BOOK3E
/* Build list of addresses of gigantic pages. This function is used in early
- * boot before the buddy or bootmem allocator is setup.
+ * boot before the buddy allocator is setup.
*/
void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
{
@@ -399,7 +399,7 @@ void __init reserve_hugetlb_gpages(void)
#else /* !PPC_FSL_BOOK3E */
/* Build list of addresses of gigantic pages. This function is used in early
- * boot before the buddy or bootmem allocator is setup.
+ * boot before the buddy allocator is setup.
*/
void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
{
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index cdb19ab859d3..aa9173745cf4 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -109,10 +109,6 @@ int map_kernel_page(unsigned long ea, unsigned long pa, int flags)
__pgprot(flags)));
} else {
#ifdef CONFIG_PPC_MMU_NOHASH
- /* Warning ! This will blow up if bootmem is not initialized
- * which our ppc64 code is keen to do that, we'll need to
- * fix it and/or be more careful
- */
pgdp = pgd_offset_k(ea);
#ifdef PUD_TABLE_SIZE
if (pgd_none(*pgdp)) {