diff options
| author | 2017-06-16 01:55:45 +0000 | |
|---|---|---|
| committer | 2017-06-16 01:55:45 +0000 | |
| commit | 338a5c6849299c181ddfaa0bcbb0a7cf20fcfe17 (patch) | |
| tree | 7fdc2628913153829869e33fbc07faedeba54606 /sys/kern/subr_pool.c | |
| parent | split returning an item to the pool pages out of pool_put as pool_do_put. (diff) | |
| download | wireguard-openbsd-338a5c6849299c181ddfaa0bcbb0a7cf20fcfe17.tar.xz wireguard-openbsd-338a5c6849299c181ddfaa0bcbb0a7cf20fcfe17.zip | |
add garbage collection of unused lists percpu cached items.
the cpu caches in pools amortise the cost of accessing global
structures by moving lists of items around instead of individual
items. excess lists of items are stored in the global pool struct,
but these idle lists never get returned back to the system for use
elsewhere.
this adds a timestamp to the global idle list, which is updated
when the idle list stops being empty. if the idle list hasn't been
empty for a while, it means the per cpu caches arent using the idle
entries and they can be recovered. timestamping the pages prevents
recovery of a lot of items that may be used again shortly. eg, rx
ring processing and replenishing from rate limited interrupts tends
to allocate and free items in large chunks, which the timestamping
smooths out.
gc'ed lists are returned to the pool pages, which in turn get gc'ed
back to uvm.
ok visa@
Diffstat (limited to 'sys/kern/subr_pool.c')
| -rw-r--r-- | sys/kern/subr_pool.c | 42 |
1 files changed, 40 insertions, 2 deletions
diff --git a/sys/kern/subr_pool.c b/sys/kern/subr_pool.c index 975d0d55720..62ac8cc6564 100644 --- a/sys/kern/subr_pool.c +++ b/sys/kern/subr_pool.c @@ -1,4 +1,4 @@ -/* $OpenBSD: subr_pool.c,v 1.213 2017/06/16 01:33:20 dlg Exp $ */ +/* $OpenBSD: subr_pool.c,v 1.214 2017/06/16 01:55:45 dlg Exp $ */ /* $NetBSD: subr_pool.c,v 1.61 2001/09/26 07:14:56 chs Exp $ */ /*- @@ -135,6 +135,7 @@ struct pool_cache { void *pool_cache_get(struct pool *); void pool_cache_put(struct pool *, void *); void pool_cache_destroy(struct pool *); +void pool_cache_gc(struct pool *); #endif void pool_cache_pool_info(struct pool *, struct kinfo_pool *); int pool_cache_info(struct pool *, void *, size_t *); @@ -1476,6 +1477,11 @@ pool_gc_pages(void *null) rw_enter_read(&pool_lock); s = splvm(); /* XXX go to splvm until all pools _setipl properly */ SIMPLEQ_FOREACH(pp, &pool_head, pr_poollist) { +#ifdef MULTIPROCESSOR + if (pp->pr_cache != NULL) + pool_cache_gc(pp); +#endif + if (pp->pr_nidle <= pp->pr_minpages || /* guess */ !mtx_enter_try(&pp->pr_mtx)) /* try */ continue; @@ -1642,8 +1648,10 @@ pool_cache_init(struct pool *pp) arc4random_buf(pp->pr_cache_magic, sizeof(pp->pr_cache_magic)); TAILQ_INIT(&pp->pr_cache_lists); pp->pr_cache_nlist = 0; + pp->pr_cache_tick = ticks; pp->pr_cache_items = 8; pp->pr_cache_contention = 0; + pp->pr_cache_ngc = 0; CPUMEM_FOREACH(pc, &i, cm) { pc->pc_actv = NULL; @@ -1659,6 +1667,8 @@ pool_cache_init(struct pool *pp) pc->pc_nout = 0; } + membar_producer(); + pp->pr_cache = cm; } @@ -1740,6 +1750,9 @@ pool_cache_list_free(struct pool *pp, struct pool_cache *pc, struct pool_cache_item *ci) { pool_list_enter(pp); + if (TAILQ_EMPTY(&pp->pr_cache_lists)) + pp->pr_cache_tick = ticks; + TAILQ_INSERT_TAIL(&pp->pr_cache_lists, ci, ci_nextl); pp->pr_cache_nlist++; @@ -1893,8 +1906,10 @@ pool_cache_destroy(struct pool *pp) struct cpumem_iter i; struct cpumem *cm; + rw_enter_write(&pool_lock); /* serialise with the gc */ cm = pp->pr_cache; pp->pr_cache = NULL; /* make pool_put avoid the cache */ + rw_exit_write(&pool_lock); CPUMEM_FOREACH(pc, &i, cm) { pool_cache_list_put(pp, pc->pc_actv); @@ -1909,6 +1924,29 @@ pool_cache_destroy(struct pool *pp) } void +pool_cache_gc(struct pool *pp) +{ + if ((ticks - pp->pr_cache_tick) > (hz * pool_wait_gc) && + !TAILQ_EMPTY(&pp->pr_cache_lists) && + mtx_enter_try(&pp->pr_cache_mtx)) { + struct pool_cache_item *pl = NULL; + + pl = TAILQ_FIRST(&pp->pr_cache_lists); + if (pl != NULL) { + TAILQ_REMOVE(&pp->pr_cache_lists, pl, ci_nextl); + pp->pr_cache_tick = ticks; + pp->pr_cache_nlist--; + + pp->pr_cache_ngc++; + } + + mtx_leave(&pp->pr_cache_mtx); + + pool_cache_list_put(pp, pl); + } +} + +void pool_cache_pool_info(struct pool *pp, struct kinfo_pool *pi) { struct pool_cache *pc; @@ -1955,7 +1993,7 @@ pool_cache_info(struct pool *pp, void *oldp, size_t *oldlenp) memset(&kpc, 0, sizeof(kpc)); /* don't leak padding */ mtx_enter(&pp->pr_cache_mtx); - kpc.pr_ngc = 0; /* notyet */ + kpc.pr_ngc = pp->pr_cache_ngc; kpc.pr_len = pp->pr_cache_items; kpc.pr_nlist = pp->pr_cache_nlist; kpc.pr_contention = pp->pr_cache_contention; |
