summaryrefslogtreecommitdiffstats
path: root/sys/kern/subr_pool.c
diff options
context:
space:
mode:
authorthib <thib@openbsd.org>2010-06-27 03:03:48 +0000
committerthib <thib@openbsd.org>2010-06-27 03:03:48 +0000
commitb426ab7bc6c256bfb7af9a9f082a20534b39a28b (patch)
tree7f774065c2cbc7aa539230517b7ca590794564df /sys/kern/subr_pool.c
parentStore the current working directory in the session, change the default-path (diff)
downloadwireguard-openbsd-b426ab7bc6c256bfb7af9a9f082a20534b39a28b.tar.xz
wireguard-openbsd-b426ab7bc6c256bfb7af9a9f082a20534b39a28b.zip
uvm constraints. Add two mandatory MD symbols, uvm_md_constraints
which contains the constraints for DMA/memory allocation for each architecture, and dma_constraints which contains the range of addresses that are dma accessable by the system. This is based on ariane@'s physcontig diff, with lots of bugfixes and additions the following additions by my self: Introduce a new function pool_set_constraints() which sets the address range for which we allocate pages for the pool from, this is now used for the mbuf/mbuf cluster pools to keep them dma accessible. The !direct archs no longer stuff pages into the kernel object in uvm_km_getpage_pla but rather do a pmap_extract() in uvm_km_putpages. Tested heavily by my self on i386, amd64 and sparc64. Some tests on alpha and SGI. "commit it" beck, art, oga, deraadt "i like the diff" deraadt
Diffstat (limited to 'sys/kern/subr_pool.c')
-rw-r--r--sys/kern/subr_pool.c43
1 files changed, 36 insertions, 7 deletions
diff --git a/sys/kern/subr_pool.c b/sys/kern/subr_pool.c
index ba56aaf367e..a8c7771c468 100644
--- a/sys/kern/subr_pool.c
+++ b/sys/kern/subr_pool.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: subr_pool.c,v 1.92 2010/06/17 16:11:20 miod Exp $ */
+/* $OpenBSD: subr_pool.c,v 1.93 2010/06/27 03:03:48 thib Exp $ */
/* $NetBSD: subr_pool.c,v 1.61 2001/09/26 07:14:56 chs Exp $ */
/*-
@@ -94,6 +94,12 @@ struct pool_item {
((pp)->pr_nitems < (pp)->pr_minitems)
/*
+ * Default constraint range for pools, that cover the whole
+ * address space.
+ */
+struct uvm_constraint_range pool_full_range = { 0x0, (paddr_t)-1 };
+
+/*
* Every pool gets a unique serial number assigned to it. If this counter
* wraps, we're screwed, but we shouldn't create so many pools anyway.
*/
@@ -393,6 +399,10 @@ pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
pool_setipl(&phpool, IPL_HIGH);
}
+ /* pglistalloc/constraint parameters */
+ pp->pr_crange = &pool_full_range;
+ pp->pr_pa_nsegs = 0;
+
/* Insert this into the list of all pools. */
TAILQ_INSERT_HEAD(&pool_head, pp, pr_poollist);
}
@@ -999,6 +1009,21 @@ done:
}
void
+pool_set_constraints(struct pool *pp, struct uvm_constraint_range *range,
+ int nsegs)
+{
+ /*
+ * Subsequent changes to the constrictions are only
+ * allowed to make them _more_ strict.
+ */
+ KASSERT(pp->pr_crange->ucr_high >= range->ucr_high &&
+ pp->pr_crange->ucr_low <= range->ucr_low);
+
+ pp->pr_crange = range;
+ pp->pr_pa_nsegs = nsegs;
+}
+
+void
pool_set_ctordtor(struct pool *pp, int (*ctor)(void *, void *, int),
void (*dtor)(void *, void *), void *arg)
{
@@ -1452,15 +1477,15 @@ pool_allocator_free(struct pool *pp, void *v)
void *
pool_page_alloc(struct pool *pp, int flags, int *slowdown)
{
- boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
+ int kfl = (flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT;
- return (uvm_km_getpage(waitok, slowdown));
+ return (uvm_km_getpage_pla(kfl, slowdown, pp->pr_crange->ucr_low,
+ pp->pr_crange->ucr_high, 0, 0));
}
void
pool_page_free(struct pool *pp, void *v)
{
-
uvm_km_putpage(v);
}
@@ -1472,7 +1497,9 @@ pool_large_alloc(struct pool *pp, int flags, int *slowdown)
int s;
s = splvm();
- va = uvm_km_kmemalloc(kmem_map, NULL, pp->pr_alloc->pa_pagesz, kfl);
+ va = uvm_km_kmemalloc_pla(kmem_map, NULL, pp->pr_alloc->pa_pagesz, kfl,
+ pp->pr_crange->ucr_low, pp->pr_crange->ucr_high,
+ 0, 0, pp->pr_pa_nsegs);
splx(s);
return ((void *)va);
@@ -1493,8 +1520,10 @@ pool_large_alloc_ni(struct pool *pp, int flags, int *slowdown)
{
int kfl = (flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT;
- return ((void *)uvm_km_kmemalloc(kernel_map, uvm.kernel_object,
- pp->pr_alloc->pa_pagesz, kfl));
+ return ((void *)uvm_km_kmemalloc_pla(kernel_map, uvm.kernel_object,
+ pp->pr_alloc->pa_pagesz, kfl,
+ pp->pr_crange->ucr_low, pp->pr_crange->ucr_high,
+ 0, 0, pp->pr_pa_nsegs));
}
void