diff options
author | 2016-10-21 06:27:50 +0000 | |
---|---|---|
committer | 2016-10-21 06:27:50 +0000 | |
commit | 04e271afd236a4e788ea5f35c3ff6ba63689e51a (patch) | |
tree | 04b8e46a84a01d1ba20732f5ceb66f94b7a555eb | |
parent | vmm(4) for i386. Userland changes forthcoming. Note that for the time being, (diff) | |
download | wireguard-openbsd-04e271afd236a4e788ea5f35c3ff6ba63689e51a.tar.xz wireguard-openbsd-04e271afd236a4e788ea5f35c3ff6ba63689e51a.zip |
add generalised access to per cpu data structures and counters.
both the cpumem and counters api simply allocates memory for each cpu in
the system that can be used for arbitrary per cpu data (via cpumem), or
a versioned set of counters per cpu (counters).
there is an alternate backend for uniprocessor systems that basically
turns the percpu data access into an immediate access to a single
allocation.
there is also support for percpu data structures that are available at
boot time by providing an allocation for the boot cpu. after autoconf,
these allocations have to be resized to provide for all cpus that were
enumerated by boot.
ok mpi@
-rw-r--r-- | sys/conf/files | 3 | ||||
-rw-r--r-- | sys/kern/init_main.c | 6 | ||||
-rw-r--r-- | sys/kern/subr_percpu.c | 325 | ||||
-rw-r--r-- | sys/sys/percpu.h | 172 | ||||
-rw-r--r-- | sys/sys/srp.h | 4 |
5 files changed, 507 insertions, 3 deletions
diff --git a/sys/conf/files b/sys/conf/files index 3aff3e45aa5..c670e793e56 100644 --- a/sys/conf/files +++ b/sys/conf/files @@ -1,4 +1,4 @@ -# $OpenBSD: files,v 1.632 2016/09/16 19:13:17 jasper Exp $ +# $OpenBSD: files,v 1.633 2016/10/21 06:27:50 dlg Exp $ # $NetBSD: files,v 1.87 1996/05/19 17:17:50 jonathan Exp $ # @(#)files.newconf 7.5 (Berkeley) 5/10/93 @@ -691,6 +691,7 @@ file kern/subr_evcount.c file kern/subr_extent.c file kern/subr_hibernate.c hibernate file kern/subr_log.c +file kern/subr_percpu.c file kern/subr_poison.c diagnostic file kern/subr_pool.c file kern/subr_tree.c diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c index f781069a92e..d65e00cfa82 100644 --- a/sys/kern/init_main.c +++ b/sys/kern/init_main.c @@ -1,4 +1,4 @@ -/* $OpenBSD: init_main.c,v 1.259 2016/09/22 12:55:24 mpi Exp $ */ +/* $OpenBSD: init_main.c,v 1.260 2016/10/21 06:27:50 dlg Exp $ */ /* $NetBSD: init_main.c,v 1.84.4.1 1996/06/02 09:08:06 mrg Exp $ */ /* @@ -146,6 +146,7 @@ void kqueue_init(void); void taskq_init(void); void timeout_proc_init(void); void pool_gc_pages(void *); +void percpu_init(void); extern char sigcode[], esigcode[], sigcoderet[]; #ifdef SYSCALL_DEBUG @@ -360,6 +361,9 @@ main(void *framep) /* Configure virtual memory system, set vm rlimits. */ uvm_init_limits(p); + /* Per CPU memory allocation */ + percpu_init(); + /* Initialize the file systems. */ #if defined(NFSSERVER) || defined(NFSCLIENT) nfs_init(); /* initialize server/shared data */ diff --git a/sys/kern/subr_percpu.c b/sys/kern/subr_percpu.c new file mode 100644 index 00000000000..ddd92c66a4c --- /dev/null +++ b/sys/kern/subr_percpu.c @@ -0,0 +1,325 @@ +/* $OpenBSD: subr_percpu.c,v 1.1 2016/10/21 06:27:50 dlg Exp $ */ + +/* + * Copyright (c) 2016 David Gwynne <dlg@openbsd.org> + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <sys/param.h> +#include <sys/systm.h> +#include <sys/pool.h> +#include <sys/malloc.h> +#include <sys/types.h> + +#include <sys/percpu.h> + +#ifdef MULTIPROCESSOR +struct pool cpumem_pl; + +void +percpu_init(void) +{ + pool_init(&cpumem_pl, sizeof(struct cpumem) * ncpus, 0, IPL_NONE, + PR_WAITOK, "percpumem", &pool_allocator_single); +} + +struct cpumem * +cpumem_get(struct pool *pp) +{ + struct cpumem *cm; + unsigned int cpu; + + cm = pool_get(&cpumem_pl, PR_WAITOK); + + for (cpu = 0; cpu < ncpus; cpu++) + cm[cpu].mem = pool_get(pp, PR_WAITOK | PR_ZERO); + + return (cm); +} + +void +cpumem_put(struct pool *pp, struct cpumem *cm) +{ + unsigned int cpu; + + for (cpu = 0; cpu < ncpus; cpu++) + pool_put(pp, cm[cpu].mem); + + pool_put(&cpumem_pl, cm); +} + +struct cpumem * +cpumem_malloc(size_t sz, int type) +{ + struct cpumem *cm; + unsigned int cpu; + + sz = roundup(sz, CACHELINESIZE); + + cm = pool_get(&cpumem_pl, PR_WAITOK); + + for (cpu = 0; cpu < ncpus; cpu++) + cm[cpu].mem = malloc(sz, type, M_WAITOK | M_ZERO); + + return (cm); +} + +struct cpumem * +cpumem_realloc(struct cpumem *bootcm, size_t sz, int type) +{ + struct cpumem *cm; + unsigned int cpu; + + sz = roundup(sz, CACHELINESIZE); + + cm = pool_get(&cpumem_pl, PR_WAITOK); + + cm[0].mem = bootcm[0].mem; + for (cpu = 1; cpu < ncpus; cpu++) + cm[cpu].mem = malloc(sz, type, M_WAITOK | M_ZERO); + + return (cm); +} + +void +cpumem_free(struct cpumem *cm, int type, size_t sz) +{ + unsigned int cpu; + + sz = roundup(sz, CACHELINESIZE); + + for (cpu = 0; cpu < ncpus; cpu++) + free(cm[cpu].mem, type, sz); + + pool_put(&cpumem_pl, cm); +} + +void * +cpumem_first(struct cpumem_iter *i, struct cpumem *cm) +{ + i->cpu = 0; + + return (cm[0].mem); +} + +void * +cpumem_next(struct cpumem_iter *i, struct cpumem *cm) +{ + unsigned int cpu = ++i->cpu; + + if (cpu >= ncpus) + return (NULL); + + return (cm[cpu].mem); +} + +struct cpumem * +counters_alloc(unsigned int n, int type) +{ + struct cpumem *cm; + struct cpumem_iter cmi; + uint64_t *counters; + unsigned int i; + + KASSERT(n > 0); + + n++; /* add space for a generation number */ + cm = cpumem_malloc(n * sizeof(uint64_t), type); + + CPUMEM_FOREACH(counters, &cmi, cm) { + for (i = 0; i < n; i++) + counters[i] = 0; + } + + return (cm); +} + +struct cpumem * +counters_realloc(struct cpumem *cm, unsigned int n, int type) +{ + n++; /* the generation number */ + return (cpumem_realloc(cm, n * sizeof(uint64_t), type)); +} + +void +counters_free(struct cpumem *cm, int type, unsigned int n) +{ + n++; /* generation number */ + cpumem_free(cm, type, n * sizeof(uint64_t)); +} + +void +counters_read(struct cpumem *cm, uint64_t *output, unsigned int n) +{ + struct cpumem_iter cmi; + uint64_t *gen, *counters, *temp; + uint64_t enter, leave; + unsigned int i; + + for (i = 0; i < n; i++) + output[i] = 0; + + temp = mallocarray(n, sizeof(uint64_t), M_TEMP, M_WAITOK); + + gen = cpumem_first(&cmi, cm); + do { + counters = gen + 1; + + enter = *gen; + for (;;) { + /* the generation number is odd during an update */ + while (enter & 1) { + yield(); + membar_consumer(); + enter = *gen; + } + + for (i = 0; i < n; i++) + temp[i] = counters[i]; + + membar_consumer(); + leave = *gen; + + if (enter == leave) + break; + + enter = leave; + } + + for (i = 0; i < n; i++) + output[i] += temp[i]; + + gen = cpumem_next(&cmi, cm); + } while (gen != NULL); + + free(temp, M_TEMP, n * sizeof(uint64_t)); +} + +void +counters_zero(struct cpumem *cm, unsigned int n) +{ + struct cpumem_iter cmi; + uint64_t *counters; + unsigned int i; + + n++; /* zero the generation numbers too */ + + counters = cpumem_first(&cmi, cm); + do { + for (i = 0; i < n; i++) + counters[i] = 0; + + counters = cpumem_next(&cmi, cm); + } while (counters != NULL); +} + +#else /* MULTIPROCESSOR */ + +/* + * Uniprocessor implementation of per-CPU data structures. + * + * UP percpu memory is a single memory allocation cast to/from the + * cpumem struct. It is not scaled up to the size of cacheline because + * there's no other cache to contend with. + */ + +void +percpu_init(void) +{ + /* nop */ +} + +struct cpumem * +cpumem_get(struct pool *pp) +{ + return (pool_get(pp, PR_WAITOK)); +} + +void +cpumem_put(struct pool *pp, struct cpumem *cm) +{ + pool_put(pp, cm); +} + +struct cpumem * +cpumem_malloc(size_t sz, int type) +{ + return (malloc(sz, type, M_WAITOK)); +} + +struct cpumem * +cpumem_realloc(struct cpumem *cm, size_t sz, int type) +{ + return (cm); +} + +void +cpumem_free(struct cpumem *cm, int type, size_t sz) +{ + free(cm, type, sz); +} + +struct cpumem * +counters_alloc(unsigned int n, int type) +{ + KASSERT(n > 0); + + return (cpumem_malloc(n * sizeof(uint64_t), type)); +} + +struct cpumem * +counters_realloc(struct cpumem *cm, unsigned int n, int type) +{ + /* this is unecessary, but symmetrical */ + return (cpumem_realloc(cm, n * sizeof(uint64_t), type)); +} + +void +counters_free(struct cpumem *cm, int type, unsigned int n) +{ + cpumem_free(cm, type, n * sizeof(uint64_t)); +} + +void +counters_read(struct cpumem *cm, uint64_t *output, unsigned int n) +{ + uint64_t *counters; + unsigned int i; + int s; + + counters = (uint64_t *)cm; + + s = splhigh(); + for (i = 0; i < n; i++) + output[i] = counters[i]; + splx(s); +} + +void +counters_zero(struct cpumem *cm, unsigned int n) +{ + uint64_t *counters; + unsigned int i; + int s; + + counters = (uint64_t *)cm; + + s = splhigh(); + for (i = 0; i < n; i++) + counters[i] = 0; + splx(s); +} + +#endif /* MULTIPROCESSOR */ + diff --git a/sys/sys/percpu.h b/sys/sys/percpu.h new file mode 100644 index 00000000000..d3b90b59027 --- /dev/null +++ b/sys/sys/percpu.h @@ -0,0 +1,172 @@ +/* $OpenBSD: percpu.h,v 1.1 2016/10/21 06:27:50 dlg Exp $ */ + +/* + * Copyright (c) 2016 David Gwynne <dlg@openbsd.org> + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _SYS_PERCPU_H_ +#define _SYS_PERCPU_H_ + +#ifndef CACHELINESIZE +#define CACHELINESIZE 64 +#endif + +#ifndef __upunused /* this should go in param.h */ +#ifdef MULTIPROCESSOR +#define __upunused +#else +#define __upunused __attribute__((__unused__)) +#endif +#endif + +struct cpumem { + void *mem; +}; + +struct cpumem_iter { + unsigned int cpu; +} __upunused; + +struct counters_ref { + uint64_t g; + uint64_t *c; +}; + +#ifdef _KERNEL + +#include <sys/atomic.h> + +struct pool; + +struct cpumem *cpumem_get(struct pool *); +void cpumem_put(struct pool *, struct cpumem *); + +struct cpumem *cpumem_malloc(size_t, int); +struct cpumem *cpumem_realloc(struct cpumem *, size_t, int); +void cpumem_free(struct cpumem *, int, size_t); + +#ifdef MULTIPROCESSOR +static inline void * +cpumem_enter(struct cpumem *cm) +{ + return (cm[cpu_number()].mem); +} + +static inline void +cpumem_leave(struct cpumem *cm, void *mem) +{ + /* KDASSERT? */ +} + +void *cpumem_first(struct cpumem_iter *, struct cpumem *); +void *cpumem_next(struct cpumem_iter *, struct cpumem *); + +#define CPUMEM_BOOT_MEMORY(_name, _sz) \ +static struct { \ + unsigned char mem[_sz]; \ + struct cpumem cpumem; \ +} __aligned(CACHELINESIZE) _name##_boot_cpumem = { \ + .cpumem = { _name##_boot_cpumem.mem } \ +} + +#define CPUMEM_BOOT_INITIALIZER(_name) \ + { &_name##_boot_cpumem.cpumem } + +#else /* MULTIPROCESSOR */ +static inline void * +cpumem_enter(struct cpumem *cm) +{ + return (cm); +} + +static inline void +cpumem_leave(struct cpumem *cm, void *mem) +{ + /* KDASSERT? */ +} + +static inline void * +cpumem_first(struct cpumem_iter *i, struct cpumem *cm) +{ + return (cm); +} + +static inline void * +cpumem_next(struct cpumem_iter *i, struct cpumem *cm) +{ + return (NULL); +} + +#define CPUMEM_BOOT_MEMORY(_name, _sz) \ +static struct { \ + unsigned char mem[_sz]; \ +} _name##_boot_cpumem + +#define CPUMEM_BOOT_INITIALIZER(_name) \ + { (struct cpumem *)&_name##_boot_cpumem.mem } + +#endif /* MULTIPROCESSOR */ + +#define CPUMEM_FOREACH(_var, _iter, _cpumem) \ + for ((_var) = cpumem_first((_iter), (_cpumem)); \ + (_var) != NULL; \ + (_var) = cpumem_next((_iter), (_cpumem))) + +struct cpumem *counters_alloc(unsigned int, int); +struct cpumem *counters_realloc(struct cpumem *, unsigned int, int); +void counters_free(struct cpumem *, int, unsigned int); +void counters_read(struct cpumem *, uint64_t *, unsigned int); +void counters_zero(struct cpumem *, unsigned int); + +#ifdef MULTIPROCESSOR +static inline uint64_t * +counters_enter(struct counters_ref *ref, struct cpumem *cm) +{ + ref->c = cpumem_enter(cm); + ref->g = ++(*ref->c); /* make the generation number odd */ + return (ref->c + 1); +} + +static inline void +counters_leave(struct counters_ref *ref, struct cpumem *cm) +{ + membar_producer(); + (*ref->c) = ++ref->g; /* make the generation number even again */ + cpumem_leave(cm, ref->c); +} +#define COUNTERS_BOOT_MEMORY(_name, _n) \ + CPUMEM_BOOT_MEMORY(_name, ((_n) + 1) * sizeof(uint64_t)) +#else +static inline uint64_t * +counters_enter(struct counters_ref *r, struct cpumem *cm) +{ + r->c = cpumem_enter(cm); + return (r->c); +} + +static inline void +counters_leave(struct counters_ref *r, struct cpumem *cm) +{ + cpumem_leave(cm, r->c); +} + +#define COUNTERS_BOOT_MEMORY(_name, _n) \ + CPUMEM_BOOT_MEMORY(_name, (_n) * sizeof(uint64_t)) +#endif + +#define COUNTERS_BOOT_INITIALIZER(_name) CPUMEM_BOOT_INITIALIZER(_name) + +#endif /* _KERNEL */ +#endif /* _SYS_PERCPU_H_ */ diff --git a/sys/sys/srp.h b/sys/sys/srp.h index 61cedc1d799..cedbd20bd87 100644 --- a/sys/sys/srp.h +++ b/sys/sys/srp.h @@ -1,4 +1,4 @@ -/* $OpenBSD: srp.h,v 1.11 2016/06/07 07:53:33 mpi Exp $ */ +/* $OpenBSD: srp.h,v 1.12 2016/10/21 06:27:50 dlg Exp $ */ /* * Copyright (c) 2014 Jonathan Matthew <jmatthew@openbsd.org> @@ -21,11 +21,13 @@ #include <sys/refcnt.h> +#ifndef __upunused #ifdef MULTIPROCESSOR #define __upunused #else #define __upunused __attribute__((__unused__)) #endif +#endif /* __upunused */ struct srp { void *ref; |