aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/subpage-prot.c
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>2019-04-17 18:33:51 +0530
committerMichael Ellerman <mpe@ellerman.id.au>2019-04-21 23:12:39 +1000
commitef629cc5bf0543eb57d6e344ba776880ac35fd00 (patch)
tree3713d40f241169a87dae20faae74ee4e744701ca /arch/powerpc/mm/subpage-prot.c
parentpowerpc/mm: Reduce memory usage for mm_context_t for radix (diff)
downloadlinux-dev-ef629cc5bf0543eb57d6e344ba776880ac35fd00.tar.xz
linux-dev-ef629cc5bf0543eb57d6e344ba776880ac35fd00.zip
powerc/mm/hash: Reduce hash_mm_context size
Allocate subpage protect related variables only if we use the feature. This helps in reducing the hash related mm context struct by around 4K Before the patch sizeof(struct hash_mm_context) = 8288 After the patch sizeof(struct hash_mm_context) = 4160 Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to '')
-rw-r--r--arch/powerpc/mm/subpage-prot.c28
1 files changed, 21 insertions, 7 deletions
diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c
index c72252542210..c9dff4e1f295 100644
--- a/arch/powerpc/mm/subpage-prot.c
+++ b/arch/powerpc/mm/subpage-prot.c
@@ -29,6 +29,9 @@ void subpage_prot_free(struct mm_struct *mm)
unsigned long i, j, addr;
u32 **p;
+ if (!spt)
+ return;
+
for (i = 0; i < 4; ++i) {
if (spt->low_prot[i]) {
free_page((unsigned long)spt->low_prot[i]);
@@ -48,13 +51,7 @@ void subpage_prot_free(struct mm_struct *mm)
free_page((unsigned long)p);
}
spt->maxaddr = 0;
-}
-
-void subpage_prot_init_new_context(struct mm_struct *mm)
-{
- struct subpage_prot_table *spt = mm_ctx_subpage_prot(&mm->context);
-
- memset(spt, 0, sizeof(*spt));
+ kfree(spt);
}
static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
@@ -99,6 +96,9 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len)
size_t nw;
unsigned long next, limit;
+ if (!spt)
+ return ;
+
down_write(&mm->mmap_sem);
limit = addr + len;
if (limit > spt->maxaddr)
@@ -218,6 +218,20 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
return -EFAULT;
down_write(&mm->mmap_sem);
+
+ if (!spt) {
+ /*
+ * Allocate subpage prot table if not already done.
+ * Do this with mmap_sem held
+ */
+ spt = kzalloc(sizeof(struct subpage_prot_table), GFP_KERNEL);
+ if (!spt) {
+ err = -ENOMEM;
+ goto out;
+ }
+ mm->context.hash_context->spt = spt;
+ }
+
subpage_mark_vma_nohuge(mm, addr, len);
for (limit = addr + len; addr < limit; addr = next) {
next = pmd_addr_end(addr, limit);