diff options
author | 2016-11-21 01:44:06 +0000 | |
---|---|---|
committer | 2016-11-21 01:44:06 +0000 | |
commit | ff77302977a2768263ba8b94c6733be6a51bdb42 (patch) | |
tree | 0c7a2a6dec48e23f48af71506950b14115f0756c | |
parent | Remove some old code that isn't needed anymore. (diff) | |
download | wireguard-openbsd-ff77302977a2768263ba8b94c6733be6a51bdb42.tar.xz wireguard-openbsd-ff77302977a2768263ba8b94c6733be6a51bdb42.zip |
let pool page allocators advertise what sizes they can provide.
to keep things concise i let the multi page allocators provide
multiple sizes of pages, but this feature was implicit inside
pool_init and only usable if the caller of pool_init did not specify
a page allocator.
callers of pool_init can now suplly a page allocator that provides
multiple page sizes. pool_init will try to fit 8 items onto a page
still, but will scale its page size down until it fits into what
the allocator provides.
supported page sizes are specified as a bit field in the pa_pagesz
member of a pool_allocator. setting the low bit in that word indicates
that the pages can be aligned to their size.
-rw-r--r-- | sys/kern/subr_pool.c | 56 | ||||
-rw-r--r-- | sys/sys/pool.h | 34 |
2 files changed, 72 insertions, 18 deletions
diff --git a/sys/kern/subr_pool.c b/sys/kern/subr_pool.c index 3317cfad2fa..a5f0cfb577a 100644 --- a/sys/kern/subr_pool.c +++ b/sys/kern/subr_pool.c @@ -1,4 +1,4 @@ -/* $OpenBSD: subr_pool.c,v 1.203 2016/11/07 23:45:27 dlg Exp $ */ +/* $OpenBSD: subr_pool.c,v 1.204 2016/11/21 01:44:06 dlg Exp $ */ /* $NetBSD: subr_pool.c,v 1.61 2001/09/26 07:14:56 chs Exp $ */ /*- @@ -170,7 +170,8 @@ void pool_page_free(struct pool *, void *); */ struct pool_allocator pool_allocator_single = { pool_page_alloc, - pool_page_free + pool_page_free, + POOL_ALLOC_SIZE(PAGE_SIZE, POOL_ALLOC_ALIGNED) }; void *pool_multi_alloc(struct pool *, int, int *); @@ -178,7 +179,8 @@ void pool_multi_free(struct pool *, void *); struct pool_allocator pool_allocator_multi = { pool_multi_alloc, - pool_multi_free + pool_multi_free, + POOL_ALLOC_SIZES(PAGE_SIZE, (1UL << 31), POOL_ALLOC_ALIGNED) }; void *pool_multi_alloc_ni(struct pool *, int, int *); @@ -186,7 +188,8 @@ void pool_multi_free_ni(struct pool *, void *); struct pool_allocator pool_allocator_multi_ni = { pool_multi_alloc_ni, - pool_multi_free_ni + pool_multi_free_ni, + POOL_ALLOC_SIZES(PAGE_SIZE, (1UL << 31), POOL_ALLOC_ALIGNED) }; #ifdef DDB @@ -264,6 +267,7 @@ pool_init(struct pool *pp, size_t size, u_int align, int ipl, int flags, { int off = 0, space; unsigned int pgsize = PAGE_SIZE, items; + size_t pa_pagesz; #ifdef DIAGNOSTIC struct pool *iter; #endif @@ -276,17 +280,38 @@ pool_init(struct pool *pp, size_t size, u_int align, int ipl, int flags, size = roundup(size, align); - if (palloc == NULL) { - while (size * 8 > pgsize) - pgsize <<= 1; + while (size * 8 > pgsize) + pgsize <<= 1; + if (palloc == NULL) { if (pgsize > PAGE_SIZE) { palloc = ISSET(flags, PR_WAITOK) ? &pool_allocator_multi_ni : &pool_allocator_multi; } else palloc = &pool_allocator_single; - } else - pgsize = palloc->pa_pagesz ? palloc->pa_pagesz : PAGE_SIZE; + + pa_pagesz = palloc->pa_pagesz; + } else { + size_t pgsizes; + + pa_pagesz = palloc->pa_pagesz; + if (pa_pagesz == 0) + pa_pagesz = POOL_ALLOC_DEFAULT; + + pgsizes = pa_pagesz & ~POOL_ALLOC_ALIGNED; + + /* make sure the allocator can fit at least one item */ + if (size > pgsizes) { + panic("%s: pool %s item size 0x%zx > " + "allocator %p sizes 0x%zx", __func__, wchan, + size, palloc, pgsizes); + } + + /* shrink pgsize until it fits into the range */ + while (!ISSET(pgsizes, pgsize)) + pgsize >>= 1; + } + KASSERT(ISSET(pa_pagesz, pgsize)); items = pgsize / size; @@ -296,11 +321,14 @@ pool_init(struct pool *pp, size_t size, u_int align, int ipl, int flags, * go into an RB tree, so we can match a returned item with * its header based on the page address. */ - if (pgsize - (size * items) > sizeof(struct pool_page_header)) { - off = pgsize - sizeof(struct pool_page_header); - } else if (sizeof(struct pool_page_header) * 2 >= size) { - off = pgsize - sizeof(struct pool_page_header); - items = off / size; + if (ISSET(pa_pagesz, POOL_ALLOC_ALIGNED)) { + if (pgsize - (size * items) > + sizeof(struct pool_page_header)) { + off = pgsize - sizeof(struct pool_page_header); + } else if (sizeof(struct pool_page_header) * 2 >= size) { + off = pgsize - sizeof(struct pool_page_header); + items = off / size; + } } KASSERT(items > 0); diff --git a/sys/sys/pool.h b/sys/sys/pool.h index 6b08e2774af..ce0c881161c 100644 --- a/sys/sys/pool.h +++ b/sys/sys/pool.h @@ -1,4 +1,4 @@ -/* $OpenBSD: pool.h,v 1.67 2016/11/07 23:45:27 dlg Exp $ */ +/* $OpenBSD: pool.h,v 1.68 2016/11/21 01:44:06 dlg Exp $ */ /* $NetBSD: pool.h,v 1.27 2001/06/06 22:00:17 rafal Exp $ */ /*- @@ -77,11 +77,37 @@ struct pool_request; TAILQ_HEAD(pool_requests, pool_request); struct pool_allocator { - void *(*pa_alloc)(struct pool *, int, int *); - void (*pa_free)(struct pool *, void *); - int pa_pagesz; + void *(*pa_alloc)(struct pool *, int, int *); + void (*pa_free)(struct pool *, void *); + size_t pa_pagesz; }; +/* + * The pa_pagesz member encodes the sizes of pages that can be + * provided by the allocator, and whether the allocations can be + * aligned to their size. + * + * Page sizes can only be powers of two. Each available page size is + * represented by its value set as a bit. e.g., to indicate that an + * allocator can provide 16k and 32k pages you initialise pa_pagesz + * to (32768 | 16384). + * + * If the allocator can provide aligned pages the low bit in pa_pagesz + * is set. The POOL_ALLOC_ALIGNED macro is provided as a convenience. + * + * If pa_pagesz is unset (i.e. 0), POOL_ALLOC_DEFAULT will be used + * instead. + */ + +#define POOL_ALLOC_ALIGNED 1UL +#define POOL_ALLOC_SIZE(_sz, _a) ((_sz) | (_a)) +#define POOL_ALLOC_SIZES(_min, _max, _a) \ + ((_max) | \ + (((_max) - 1) & ~((_min) - 1)) | (_a)) + +#define POOL_ALLOC_DEFAULT \ + POOL_ALLOC_SIZE(PAGE_SIZE, POOL_ALLOC_ALIGNED) + TAILQ_HEAD(pool_pagelist, pool_page_header); struct pool_cache_item; |