summaryrefslogtreecommitdiffstats
path: root/sys/kern/subr_pool.c
diff options
context:
space:
mode:
authordlg <dlg@openbsd.org>2017-06-19 23:57:12 +0000
committerdlg <dlg@openbsd.org>2017-06-19 23:57:12 +0000
commita11cecbbcfd10e2b38201d497c4c3514a5acf20d (patch)
tree75439925a1edfdaa0bb4455a550fe3c9120c4bdc /sys/kern/subr_pool.c
parents/a active/an active/ (diff)
downloadwireguard-openbsd-a11cecbbcfd10e2b38201d497c4c3514a5acf20d.tar.xz
wireguard-openbsd-a11cecbbcfd10e2b38201d497c4c3514a5acf20d.zip
dynamically scale the size of the per cpu cache lists.
if the lock around the global depot of extra cache lists is contented a lot in between the gc task runs, consider growing the number of entries a free list can hold. the size of the list is bounded by the number of pool items the current set of pages can represent to avoid having cpus starve each other. im not sure this semantic is right (or the least worst) but we're putting it in now to see what happens. this also means reality matches the documentation i just committed in pool_cache_init.9. tested by hrvoje popovski and amit kulkarni ok visa@
Diffstat (limited to 'sys/kern/subr_pool.c')
-rw-r--r--sys/kern/subr_pool.c23
1 files changed, 22 insertions, 1 deletions
diff --git a/sys/kern/subr_pool.c b/sys/kern/subr_pool.c
index 62ac8cc6564..72d73633dfc 100644
--- a/sys/kern/subr_pool.c
+++ b/sys/kern/subr_pool.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: subr_pool.c,v 1.214 2017/06/16 01:55:45 dlg Exp $ */
+/* $OpenBSD: subr_pool.c,v 1.215 2017/06/19 23:57:12 dlg Exp $ */
/* $NetBSD: subr_pool.c,v 1.61 2001/09/26 07:14:56 chs Exp $ */
/*-
@@ -1926,6 +1926,8 @@ pool_cache_destroy(struct pool *pp)
void
pool_cache_gc(struct pool *pp)
{
+ unsigned int contention;
+
if ((ticks - pp->pr_cache_tick) > (hz * pool_wait_gc) &&
!TAILQ_EMPTY(&pp->pr_cache_lists) &&
mtx_enter_try(&pp->pr_cache_mtx)) {
@@ -1944,6 +1946,25 @@ pool_cache_gc(struct pool *pp)
pool_cache_list_put(pp, pl);
}
+
+ /*
+ * if there's a lot of contention on the pr_cache_mtx then consider
+ * growing the length of the list to reduce the need to access the
+ * global pool.
+ */
+
+ contention = pp->pr_cache_contention;
+ if ((contention - pp->pr_cache_contention_prev) > 8 /* magic */) {
+ unsigned int limit = pp->pr_npages * pp->pr_itemsperpage;
+ unsigned int items = pp->pr_cache_items + 8;
+ unsigned int cache = ncpusfound * items * 2;
+
+ /* are there enough items around so every cpu can hold some? */
+
+ if (cache < limit)
+ pp->pr_cache_items = items;
+ }
+ pp->pr_cache_contention_prev = contention;
}
void