aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/mmu_context_book3s64.c
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>2019-04-17 18:33:50 +0530
committerMichael Ellerman <mpe@ellerman.id.au>2019-04-21 23:12:39 +1000
commit701101865f5d3e268281ce7a254eb4a97d16cbdc (patch)
tree91f8229331ca06eb7448fe9bc22d3c69c8f2ff03 /arch/powerpc/mm/mmu_context_book3s64.c
parentpowerpc/mm: Move slb_addr_linit to early_init_mmu (diff)
downloadlinux-dev-701101865f5d3e268281ce7a254eb4a97d16cbdc.tar.xz
linux-dev-701101865f5d3e268281ce7a254eb4a97d16cbdc.zip
powerpc/mm: Reduce memory usage for mm_context_t for radix
Currently, our mm_context_t on book3s64 include all hash specific context details like slice mask and subpage protection details. We can skip allocating these with radix translation. This will help us to save 8K per mm_context with radix translation. With the patch applied we have sizeof(mm_context_t) = 136 sizeof(struct hash_mm_context) = 8288 Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to '')
-rw-r--r--arch/powerpc/mm/mmu_context_book3s64.c16
1 files changed, 15 insertions, 1 deletions
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index f720c5cc0b5e..6eef5a36b2e9 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -63,6 +63,12 @@ static int hash__init_new_context(struct mm_struct *mm)
if (index < 0)
return index;
+ mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context), GFP_KERNEL);
+ if (!mm->context.hash_context) {
+ ida_free(&mmu_context_ida, index);
+ return -ENOMEM;
+ }
+
/*
* The old code would re-promote on fork, we don't do that when using
* slices as it could cause problem promoting slices that have been
@@ -77,8 +83,14 @@ static int hash__init_new_context(struct mm_struct *mm)
* We should not be calling init_new_context() on init_mm. Hence a
* check against 0 is OK.
*/
- if (mm->context.id == 0)
+ if (mm->context.id == 0) {
+ memset(mm->context.hash_context, 0, sizeof(struct hash_mm_context));
slice_init_new_context_exec(mm);
+ } else {
+ /* This is fork. Copy hash_context details from current->mm */
+ memcpy(mm->context.hash_context, current->mm->context.hash_context, sizeof(struct hash_mm_context));
+
+ }
subpage_prot_init_new_context(mm);
@@ -118,6 +130,7 @@ static int radix__init_new_context(struct mm_struct *mm)
asm volatile("ptesync;isync" : : : "memory");
mm->context.npu_context = NULL;
+ mm->context.hash_context = NULL;
return index;
}
@@ -162,6 +175,7 @@ static void destroy_contexts(mm_context_t *ctx)
if (context_id)
ida_free(&mmu_context_ida, context_id);
}
+ kfree(ctx->hash_context);
}
static void pmd_frag_destroy(void *pmd_frag)