summaryrefslogtreecommitdiffstats
path: root/sys/kern/subr_pool.c
diff options
context:
space:
mode:
authorart <art@openbsd.org>2002-01-23 00:39:46 +0000
committerart <art@openbsd.org>2002-01-23 00:39:46 +0000
commitc5790e3dc873e45d340e291e969620ee2c2b5833 (patch)
tree2960fda2237ce91790ef97f7088167ac23b1b81e /sys/kern/subr_pool.c
parentopen for writing only if needed so, accept multiple var assignments for -w; from meself and Vladimir Popov <jumbo@narod.ru> (diff)
downloadwireguard-openbsd-c5790e3dc873e45d340e291e969620ee2c2b5833.tar.xz
wireguard-openbsd-c5790e3dc873e45d340e291e969620ee2c2b5833.zip
Pool deals fairly well with physical memory shortage, but it doesn't deal
well (not at all) with shortages of the vm_map where the pages are mapped (usually kmem_map). Try to deal with it: - group all information the backend allocator for a pool in a separate struct. The pool will only have a pointer to that struct. - change the pool_init API to reflect that. - link all pools allocating from the same allocator on a linked list. - Since an allocator is responsible to wait for physical memory it will only fail (waitok) when it runs out of its backing vm_map, carefully drain pools using the same allocator so that va space is freed. (see comments in code for caveats and details). - change pool_reclaim to return if it actually succeeded to free some memory, use that information to make draining easier and more efficient. - get rid of PR_URGENT, noone uses it.
Diffstat (limited to 'sys/kern/subr_pool.c')
-rw-r--r--sys/kern/subr_pool.c308
1 files changed, 206 insertions, 102 deletions
diff --git a/sys/kern/subr_pool.c b/sys/kern/subr_pool.c
index 18d1ca0a1a4..f8a42136896 100644
--- a/sys/kern/subr_pool.c
+++ b/sys/kern/subr_pool.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: subr_pool.c,v 1.19 2002/01/10 18:56:03 art Exp $ */
+/* $OpenBSD: subr_pool.c,v 1.20 2002/01/23 00:39:47 art Exp $ */
/* $NetBSD: subr_pool.c,v 1.61 2001/09/26 07:14:56 chs Exp $ */
/*-
@@ -109,7 +109,7 @@ struct pool_item {
};
#define PR_HASH_INDEX(pp,addr) \
- (((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1))
+ (((u_long)(addr) >> (pp)->pr_alloc->pa_pageshift) & (PR_HASHTABSIZE - 1))
#define POOL_NEEDS_CATCHUP(pp) \
((pp)->pr_nitems < (pp)->pr_minitems)
@@ -164,8 +164,9 @@ static void pool_cache_reclaim(struct pool_cache *);
static int pool_catchup(struct pool *);
static void pool_prime_page(struct pool *, caddr_t,
struct pool_item_header *);
-static void *pool_page_alloc(unsigned long, int, int);
-static void pool_page_free(void *, unsigned long, int);
+
+void *pool_allocator_alloc(struct pool *, int);
+void pool_allocator_free(struct pool *, void *);
static void pool_print1(struct pool *, const char *,
int (*)(const char *, ...));
@@ -339,7 +340,7 @@ pr_rmpage(struct pool *pp, struct pool_item_header *ph,
if (pq) {
TAILQ_INSERT_HEAD(pq, ph, ph_pagelist);
} else {
- (*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype);
+ pool_allocator_free(pp, ph->ph_page);
if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
LIST_REMOVE(ph, ph_hashlist);
s = splhigh();
@@ -372,10 +373,7 @@ pr_rmpage(struct pool *pp, struct pool_item_header *ph,
*/
void
pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
- const char *wchan, size_t pagesz,
- void *(*alloc)(unsigned long, int, int),
- void (*release)(void *, unsigned long, int),
- int mtype)
+ const char *wchan, struct pool_allocator *palloc)
{
int off, slack, i;
@@ -390,20 +388,19 @@ pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
/*
* Check arguments and construct default values.
*/
- if (!powerof2(pagesz))
- panic("pool_init: page size invalid (%lx)\n", (u_long)pagesz);
-
- if (alloc == NULL && release == NULL) {
- alloc = pool_page_alloc;
- release = pool_page_free;
- pagesz = PAGE_SIZE; /* Rounds to PAGE_SIZE anyhow. */
- } else if ((alloc != NULL && release != NULL) == 0) {
- /* If you specifiy one, must specify both. */
- panic("pool_init: must specify alloc and release together");
+ if (palloc == NULL)
+ palloc = &pool_allocator_kmem;
+ if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
+ if (palloc->pa_pagesz == 0)
+ palloc->pa_pagesz = PAGE_SIZE;
+
+ TAILQ_INIT(&palloc->pa_list);
+
+ simple_lock_init(&palloc->pa_slock);
+ palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
+ palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
+ palloc->pa_flags |= PA_INITIALIZED;
}
-
- if (pagesz == 0)
- pagesz = PAGE_SIZE;
if (align == 0)
align = ALIGN(1);
@@ -412,9 +409,11 @@ pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
size = sizeof(struct pool_item);
size = ALIGN(size);
- if (size > pagesz)
+#ifdef DIAGNOSTIC
+ if (size > palloc->pa_pagesz)
panic("pool_init: pool item size (%lu) too large",
(u_long)size);
+#endif
/*
* Initialize the pool structure.
@@ -431,12 +430,7 @@ pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
pp->pr_size = size;
pp->pr_align = align;
pp->pr_wchan = wchan;
- pp->pr_mtype = mtype;
- pp->pr_alloc = alloc;
- pp->pr_free = release;
- pp->pr_pagesz = pagesz;
- pp->pr_pagemask = ~(pagesz - 1);
- pp->pr_pageshift = ffs(pagesz) - 1;
+ pp->pr_alloc = palloc;
pp->pr_nitems = 0;
pp->pr_nout = 0;
pp->pr_hardlimit = UINT_MAX;
@@ -456,15 +450,15 @@ pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
* with its header based on the page address.
* We use 1/16 of the page size as the threshold (XXX: tune)
*/
- if (pp->pr_size < pagesz/16) {
+ if (pp->pr_size < palloc->pa_pagesz/16) {
/* Use the end of the page for the page header */
pp->pr_roflags |= PR_PHINPAGE;
pp->pr_phoffset = off =
- pagesz - ALIGN(sizeof(struct pool_item_header));
+ palloc->pa_pagesz - ALIGN(sizeof(struct pool_item_header));
} else {
/* The page header will be taken from our page header pool */
pp->pr_phoffset = 0;
- off = pagesz;
+ off = palloc->pa_pagesz;
for (i = 0; i < PR_HASHTABSIZE; i++) {
LIST_INIT(&pp->pr_hashtab[i]);
}
@@ -520,15 +514,20 @@ pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
*/
if (phpool.pr_size == 0) {
pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
- 0, "phpool", 0, 0, 0, 0);
+ 0, "phpool", NULL);
pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
- 0, "pcgpool", 0, 0, 0, 0);
+ 0, "pcgpool", NULL);
}
/* Insert into the list of all pools. */
simple_lock(&pool_head_slock);
TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
simple_unlock(&pool_head_slock);
+
+ /* Insert into the list of pools using this allocator. */
+ simple_lock(&palloc->pa_slock);
+ TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
+ simple_unlock(&palloc->pa_slock);
}
/*
@@ -540,6 +539,13 @@ pool_destroy(struct pool *pp)
struct pool_item_header *ph;
struct pool_cache *pc;
+ /*
+ * Locking order: pool_allocator -> pool
+ */
+ simple_lock(&pp->pr_alloc->pa_slock);
+ TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
+ simple_unlock(&pp->pr_alloc->pa_slock);
+
/* Destroy all caches for this pool. */
while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)
pool_cache_destroy(pc);
@@ -661,9 +667,6 @@ pool_get(struct pool *pp, int flags)
&pp->pr_hardlimit_ratecap))
log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
- if (flags & PR_URGENT)
- panic("pool_get: urgent");
-
pp->pr_nfail++;
pr_leave(pp);
@@ -694,7 +697,7 @@ pool_get(struct pool *pp, int flags)
*/
pr_leave(pp);
simple_unlock(&pp->pr_slock);
- v = (*pp->pr_alloc)(pp->pr_pagesz, flags, pp->pr_mtype);
+ v = pool_allocator_alloc(pp, flags);
if (__predict_true(v != NULL))
ph = pool_alloc_item_header(pp, v, flags);
simple_lock(&pp->pr_slock);
@@ -702,7 +705,7 @@ pool_get(struct pool *pp, int flags)
if (__predict_false(v == NULL || ph == NULL)) {
if (v != NULL)
- (*pp->pr_free)(v, pp->pr_pagesz, pp->pr_mtype);
+ pool_allocator_free(pp, v);
/*
* We were unable to allocate a page or item
@@ -713,9 +716,6 @@ pool_get(struct pool *pp, int flags)
if (pp->pr_curpage != NULL)
goto startover;
- if (flags & PR_URGENT)
- panic("pool_get: urgent");
-
if ((flags & PR_WAITOK) == 0) {
pp->pr_nfail++;
pr_leave(pp);
@@ -726,15 +726,11 @@ pool_get(struct pool *pp, int flags)
/*
* Wait for items to be returned to this pool.
*
- * XXX: we actually want to wait just until
- * the page allocator has memory again. Depending
- * on this pool's usage, we might get stuck here
- * for a long time.
- *
* XXX: maybe we should wake up once a second and
* try again?
*/
pp->pr_flags |= PR_WANTED;
+ /* PA_WANTED is already set on the allocator */
pr_leave(pp);
ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
pr_enter(pp, file, line);
@@ -852,7 +848,7 @@ pool_do_put(struct pool *pp, void *v)
LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
- page = (caddr_t)((u_long)v & pp->pr_pagemask);
+ page = (caddr_t)((vaddr_t)v & pp->pr_alloc->pa_pagemask);
#ifdef DIAGNOSTIC
if (__predict_false(pp->pr_nout == 0)) {
@@ -1020,7 +1016,7 @@ pool_prime(struct pool *pp, int n)
while (newpages-- > 0) {
simple_unlock(&pp->pr_slock);
- cp = (*pp->pr_alloc)(pp->pr_pagesz, PR_NOWAIT, pp->pr_mtype);
+ cp = pool_allocator_alloc(pp, PR_NOWAIT);
if (__predict_true(cp != NULL))
ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
simple_lock(&pp->pr_slock);
@@ -1028,7 +1024,7 @@ pool_prime(struct pool *pp, int n)
if (__predict_false(cp == NULL || ph == NULL)) {
error = ENOMEM;
if (cp != NULL)
- (*pp->pr_free)(cp, pp->pr_pagesz, pp->pr_mtype);
+ pool_allocator_free(pp, cp);
break;
}
@@ -1058,8 +1054,10 @@ pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
unsigned int ioff = pp->pr_itemoffset;
int n;
- if (((u_long)cp & (pp->pr_pagesz - 1)) != 0)
+#ifdef DIAGNOSTIC
+ if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
+#endif
if ((pp->pr_roflags & PR_PHINPAGE) == 0)
LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)],
@@ -1154,13 +1152,13 @@ pool_catchup(struct pool *pp)
* the pool descriptor?
*/
simple_unlock(&pp->pr_slock);
- cp = (*pp->pr_alloc)(pp->pr_pagesz, PR_NOWAIT, pp->pr_mtype);
+ cp = pool_allocator_alloc(pp, PR_NOWAIT);
if (__predict_true(cp != NULL))
ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
simple_lock(&pp->pr_slock);
if (__predict_false(cp == NULL || ph == NULL)) {
if (cp != NULL)
- (*pp->pr_free)(cp, pp->pr_pagesz, pp->pr_mtype);
+ pool_allocator_free(pp, cp);
error = ENOMEM;
break;
}
@@ -1232,48 +1230,11 @@ pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
}
/*
- * Default page allocator.
- */
-static void *
-pool_page_alloc(unsigned long sz, int flags, int mtype)
-{
- boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
-
- return ((void *)uvm_km_alloc_poolpage(waitok));
-}
-
-static void
-pool_page_free(void *v, unsigned long sz, int mtype)
-{
-
- uvm_km_free_poolpage((vaddr_t)v);
-}
-
-/*
- * Alternate pool page allocator for pools that know they will
- * never be accessed in interrupt context.
- */
-void *
-pool_page_alloc_nointr(unsigned long sz, int flags, int mtype)
-{
- boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
-
- return ((void *)uvm_km_alloc_poolpage1(kernel_map, uvm.kernel_object,
- waitok));
-}
-
-void
-pool_page_free_nointr(void *v, unsigned long sz, int mtype)
-{
-
- uvm_km_free_poolpage1(kernel_map, (vaddr_t)v);
-}
-
-
-/*
* Release all complete pages that have not been used recently.
+ *
+ * Returns non-zero if any pages have been reclaimed.
*/
-void
+int
#ifdef POOL_DIAGNOSTIC
_pool_reclaim(struct pool *pp, const char *file, long line)
#else
@@ -1287,10 +1248,10 @@ pool_reclaim(struct pool *pp)
int s;
if (pp->pr_roflags & PR_STATIC)
- return;
+ return 0;
if (simple_lock_try(&pp->pr_slock) == 0)
- return;
+ return 0;
pr_enter(pp, file, line);
TAILQ_INIT(&pq);
@@ -1332,11 +1293,11 @@ pool_reclaim(struct pool *pp)
pr_leave(pp);
simple_unlock(&pp->pr_slock);
if (TAILQ_EMPTY(&pq)) {
- return;
+ return 0;
}
while ((ph = TAILQ_FIRST(&pq)) != NULL) {
TAILQ_REMOVE(&pq, ph, ph_pagelist);
- (*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype);
+ pool_allocator_free(pp, ph->ph_page);
if (pp->pr_roflags & PR_PHINPAGE) {
continue;
}
@@ -1345,6 +1306,8 @@ pool_reclaim(struct pool *pp)
pool_put(&phpool, ph);
splx(s);
}
+
+ return 1;
}
@@ -1374,7 +1337,6 @@ pool_drain(void *arg)
splx(s);
}
-
/*
* Diagnostic helpers.
*/
@@ -1420,8 +1382,7 @@ pool_print1(struct pool *pp, const char *modif, int (*pr)(const char *, ...))
(*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
pp->pr_roflags);
- (*pr)("\tpagesz %u, mtype %d\n", pp->pr_pagesz, pp->pr_mtype);
- (*pr)("\talloc %p, release %p\n", pp->pr_alloc, pp->pr_free);
+ (*pr)("\talloc %p\n", pp->pr_alloc);
(*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
(*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
@@ -1502,7 +1463,7 @@ pool_chk(struct pool *pp, const char *label)
int n;
caddr_t page;
- page = (caddr_t)((u_long)ph & pp->pr_pagemask);
+ page = (caddr_t)((vaddr_t)ph & pp->pr_alloc->pa_pagemask);
if (page != ph->ph_page &&
(pp->pr_roflags & PR_PHINPAGE) != 0) {
if (label != NULL)
@@ -1531,7 +1492,7 @@ pool_chk(struct pool *pp, const char *label)
panic("pool");
}
#endif
- page = (caddr_t)((u_long)pi & pp->pr_pagemask);
+ page = (caddr_t)((vaddr_t)pi & pp->pr_alloc->pa_pagemask);
if (page == ph->ph_page)
continue;
@@ -1899,3 +1860,146 @@ sysctl_dopool(int *name, u_int namelen, char *where, size_t *sizep)
/* NOTREACHED */
return (0); /* XXX - Stupid gcc */
}
+
+/*
+ * Pool backend allocators.
+ *
+ * Each pool has a backend allocator that handles allocation, deallocation
+ * and any additional draining that might be needed.
+ *
+ * We provide two standard allocators.
+ * pool_alloc_kmem - the default used when no allocator is specified.
+ * pool_alloc_nointr - used for pools that will not be accessed in
+ * interrupt context.
+ */
+void *pool_page_alloc(struct pool *, int);
+void pool_page_free(struct pool *, void *);
+void *pool_page_alloc_nointr(struct pool *, int);
+void pool_page_free_nointr(struct pool *, void *);
+
+struct pool_allocator pool_allocator_kmem = {
+ pool_page_alloc, pool_page_free, 0,
+};
+struct pool_allocator pool_allocator_nointr = {
+ pool_page_alloc_nointr, pool_page_free_nointr, 0,
+};
+
+/*
+ * XXX - we have at least three different resources for the same allocation
+ * and each resource can be depleted. First we have the ready elements in
+ * the pool. Then we have the resource (typically a vm_map) for this
+ * allocator, then we have physical memory. Waiting for any of these can
+ * be unnecessary when any other is freed, but the kernel doesn't support
+ * sleeping on multiple addresses, so we have to fake. The caller sleeps on
+ * the pool (so that we can be awakened when an item is returned to the pool),
+ * but we set PA_WANT on the allocator. When a page is returned to
+ * the allocator and PA_WANT is set pool_allocator_free will wakeup all
+ * sleeping pools belonging to this allocator. (XXX - thundering herd).
+ */
+
+void *
+pool_allocator_alloc(struct pool *org, int flags)
+{
+ struct pool_allocator *pa = org->pr_alloc;
+ struct pool *pp, *start;
+ int s, freed;
+ void *res;
+
+ do {
+ if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
+ return (res);
+ if ((flags & PR_WAITOK) == 0)
+ break;
+
+ /*
+ * Drain all pools, except 'org', that use this allocator.
+ * We do this to reclaim va space. pa_alloc is responsible
+ * for waiting for physical memory.
+ * XXX - we risk looping forever if start if someone calls
+ * pool_destroy on 'start'. But there is no other way to
+ * have potentially sleeping pool_reclaim, non-sleeping
+ * locks on pool_allocator and some stirring of drained
+ * pools in the allocator.
+ */
+ freed = 0;
+
+ s = splvm();
+ simple_lock(&pa->pa_slock);
+ pp = start = TAILQ_FIRST(&pa->pa_list);
+ do {
+ TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list);
+ TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list);
+ if (pp == org)
+ continue;
+ simple_unlock(&pa->pa_list);
+ freed = pool_reclaim(pp)
+ simple_lock(&pa->pa_list);
+ } while ((pp = TAILQ_FIRST(&pa->pa_list)) != start && !freed);
+
+ if (!freed) {
+ /*
+ * We set PA_WANT here, the caller will most likely
+ * sleep waiting for pages (if not, this won't hurt
+ * that much) and there is no way to set this in the
+ * caller without violating locking order.
+ */
+ pa->pa_flags |= PA_WANT;
+ }
+ simple_unlock(&pa->pa_slock);
+ splx(s);
+ } while (freed);
+ return (NULL);
+}
+
+void
+pool_allocator_free(struct pool *pp, void *v)
+{
+ struct pool_allocator *pa = pp->pr_alloc;
+
+ (*pa->pa_free)(pp, v);
+
+ simple_lock(&pa->pa_slock);
+ if ((pa->pa_flags & PA_WANT) == 0) {
+ simple_unlock(&pa->pa_slock);
+ return;
+ }
+
+ TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
+ simple_lock(&pp->pr_slock);
+ if ((pp->pr_flags & PR_WANTED) != 0) {
+ pp->pr_flags &= ~PR_WANTED;
+ wakeup(pp);
+ }
+ }
+ pa->pa_flags &= ~PA_WANT;
+ simple_unlock(&pa->pa_slock);
+}
+
+void *
+pool_page_alloc(struct pool *pp, int flags)
+{
+ boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
+
+ return ((void *)uvm_km_alloc_poolpage(waitok));
+}
+
+void
+pool_page_free(struct pool *pp, void *v)
+{
+ uvm_km_free_poolpage((vaddr_t)v);
+}
+
+void *
+pool_page_alloc_nointr(struct pool *pp, int flags)
+{
+ boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
+
+ return ((void *)uvm_km_alloc_poolpage1(kernel_map, uvm.kernel_object,
+ waitok));
+}
+
+void
+pool_page_free_nointr(struct pool *pp, void *v)
+{
+ uvm_km_free_poolpage1(kernel_map, (vaddr_t)v);
+}