aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2008-03-09 13:14:37 +0100
committerIngo Molnar <mingo@elte.hu>2008-03-11 17:11:55 +0100
commit985a34bd75cc8c96e43f00dcdda7c3fdb51a3026 (patch)
tree86418dbb69daebb1d96818318e45fa3e427834f6 /arch
parentx86: ia32 syscall restart fix (diff)
downloadlinux-dev-985a34bd75cc8c96e43f00dcdda7c3fdb51a3026.tar.xz
linux-dev-985a34bd75cc8c96e43f00dcdda7c3fdb51a3026.zip
x86: remove quicklists
quicklists cause a serious memory leak on 32-bit x86, as documented at: http://bugzilla.kernel.org/show_bug.cgi?id=9991 the reason is that the quicklist pool is a special-purpose cache that grows out of proportion. It is not accounted for anywhere and users have no way to even realize that it's the quicklists that are causing RAM usage spikes. It was supposed to be a relatively small pool, but as demonstrated by KOSAKI Motohiro, they can grow as large as: Quicklists: 1194304 kB given how much trouble this code has caused historically, and given that Andrew objected to its introduction on x86 (years ago), the best option at this point is to remove them. [ any performance benefits of caching constructed pgds should be implemented in a more generic way (possibly within the page allocator), while still allowing constructed pages to be allocated by other workloads. ] Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/Kconfig3
-rw-r--r--arch/x86/mm/pgtable_32.c18
2 files changed, 9 insertions, 12 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index f41c9538ca30..237fc128143d 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -66,9 +66,6 @@ config MMU
config ZONE_DMA
def_bool y
-config QUICKLIST
- def_bool X86_32
-
config SBUS
bool
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
index 73aba7125203..2f9e9afcb9f4 100644
--- a/arch/x86/mm/pgtable_32.c
+++ b/arch/x86/mm/pgtable_32.c
@@ -342,12 +342,16 @@ static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
pgd_t *pgd_alloc(struct mm_struct *mm)
{
- pgd_t *pgd = quicklist_alloc(0, GFP_KERNEL, pgd_ctor);
+ pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
- mm->pgd = pgd; /* so that alloc_pd can use it */
+ /* so that alloc_pd can use it */
+ mm->pgd = pgd;
+ if (pgd)
+ pgd_ctor(pgd);
if (pgd && !pgd_prepopulate_pmd(mm, pgd)) {
- quicklist_free(0, pgd_dtor, pgd);
+ pgd_dtor(pgd);
+ free_page((unsigned long)pgd);
pgd = NULL;
}
@@ -357,12 +361,8 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
pgd_mop_up_pmds(mm, pgd);
- quicklist_free(0, pgd_dtor, pgd);
-}
-
-void check_pgt_cache(void)
-{
- quicklist_trim(0, pgd_dtor, 25, 16);
+ pgd_dtor(pgd);
+ free_page((unsigned long)pgd);
}
void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte)