/* * This file contains common routines for dealing with free of page tables * * Derived from arch/powerpc/mm/tlb_64.c: * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) * and Cort Dougan (PReP) (cort@cs.nmt.edu) * Copyright (C) 1996 Paul Mackerras * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * * Dave Engebretsen * Rework for PPC64 port. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include #include #include #include #include #include #include #include static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); static unsigned long pte_freelist_forced_free; struct pte_freelist_batch { struct rcu_head rcu; unsigned int index; pgtable_free_t tables[0]; }; #define PTE_FREELIST_SIZE \ ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \ / sizeof(pgtable_free_t)) static void pte_free_smp_sync(void *arg) { /* Do nothing, just ensure we sync with all CPUs */ } /* This is only called when we are critically out of memory * (and fail to get a page in pte_free_tlb). */ static void pgtable_free_now(pgtable_free_t pgf) { pte_freelist_forced_free++; smp_call_function(pte_free_smp_sync, NULL, 1); pgtable_free(pgf); } static void pte_free_rcu_callback(struct rcu_head *head) { struct pte_freelist_batch *batch = container_of(head, struct pte_freelist_batch, rcu); unsigned int i; for (i = 0; i < batch->index; i++) pgtable_free(batch->tables[i]); free_page((unsigned long)batch); } static void pte_free_submit(struct pte_freelist_batch *batch) { INIT_RCU_HEAD(&batch->rcu); call_rcu(&batch->rcu, pte_free_rcu_callback); } void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf) { /* This is safe since tlb_gather_mmu has disabled preemption */ cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id()); struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); if (atomic_read(&tlb->mm->mm_users) < 2 || cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) { pgtable_free(pgf); return; } if (*batchp == NULL) { *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC); if (*batchp == NULL) { pgtable_free_now(pgf); return; } (*batchp)->index = 0; } (*batchp)->tables[(*batchp)->index++] = pgf; if ((*batchp)->index == PTE_FREELIST_SIZE) { pte_free_submit(*batchp); *batchp = NULL; } } void pte_free_finish(void) { /* This is safe since tlb_gather_mmu has disabled preemption */ struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); if (*batchp == NULL) return; pte_free_submit(*batchp); *batchp = NULL; }