summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/uvm/uvm_addr.c6
-rw-r--r--sys/uvm/uvm_amap.c17
-rw-r--r--sys/uvm/uvm_aobj.c4
-rw-r--r--sys/uvm/uvm_fault.c6
-rw-r--r--sys/uvm/uvm_map.c12
-rw-r--r--sys/uvm/uvm_page.c6
-rw-r--r--sys/uvm/uvm_page.h6
7 files changed, 27 insertions, 30 deletions
diff --git a/sys/uvm/uvm_addr.c b/sys/uvm/uvm_addr.c
index 7d3ac99b952..ca72453cfbb 100644
--- a/sys/uvm/uvm_addr.c
+++ b/sys/uvm/uvm_addr.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_addr.c,v 1.28 2020/09/13 10:05:25 mpi Exp $ */
+/* $OpenBSD: uvm_addr.c,v 1.29 2020/09/22 14:31:08 mpi Exp $ */
/*
* Copyright (c) 2011 Ariane van der Steldt <ariane@stack.nl>
@@ -186,7 +186,7 @@ uvm_addr_entrybyspace(struct uaddr_free_rbtree *free, vsize_t sz)
}
#endif /* !SMALL_KERNEL */
-static __inline vaddr_t
+static inline vaddr_t
uvm_addr_align_forward(vaddr_t addr, vaddr_t align, vaddr_t offset)
{
vaddr_t adjusted;
@@ -201,7 +201,7 @@ uvm_addr_align_forward(vaddr_t addr, vaddr_t align, vaddr_t offset)
return (adjusted < addr ? adjusted + align : adjusted);
}
-static __inline vaddr_t
+static inline vaddr_t
uvm_addr_align_backward(vaddr_t addr, vaddr_t align, vaddr_t offset)
{
vaddr_t adjusted;
diff --git a/sys/uvm/uvm_amap.c b/sys/uvm/uvm_amap.c
index 93389432bd7..c3f56621473 100644
--- a/sys/uvm/uvm_amap.c
+++ b/sys/uvm/uvm_amap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_amap.c,v 1.82 2020/01/04 16:17:29 beck Exp $ */
+/* $OpenBSD: uvm_amap.c,v 1.83 2020/09/22 14:31:08 mpi Exp $ */
/* $NetBSD: uvm_amap.c,v 1.27 2000/11/25 06:27:59 chs Exp $ */
/*
@@ -63,20 +63,20 @@ static char amap_small_pool_names[UVM_AMAP_CHUNK][9];
*/
static struct vm_amap *amap_alloc1(int, int, int);
-static __inline void amap_list_insert(struct vm_amap *);
-static __inline void amap_list_remove(struct vm_amap *);
+static inline void amap_list_insert(struct vm_amap *);
+static inline void amap_list_remove(struct vm_amap *);
struct vm_amap_chunk *amap_chunk_get(struct vm_amap *, int, int, int);
void amap_chunk_free(struct vm_amap *, struct vm_amap_chunk *);
void amap_wiperange_chunk(struct vm_amap *, struct vm_amap_chunk *, int, int);
-static __inline void
+static inline void
amap_list_insert(struct vm_amap *amap)
{
LIST_INSERT_HEAD(&amap_list, amap, am_list);
}
-static __inline void
+static inline void
amap_list_remove(struct vm_amap *amap)
{
LIST_REMOVE(amap, am_list);
@@ -190,13 +190,10 @@ amap_chunk_free(struct vm_amap *amap, struct vm_amap_chunk *chunk)
* here are some in-line functions to help us.
*/
-static __inline void pp_getreflen(int *, int, int *, int *);
-static __inline void pp_setreflen(int *, int, int, int);
-
/*
* pp_getreflen: get the reference and length for a specific offset
*/
-static __inline void
+static inline void
pp_getreflen(int *ppref, int offset, int *refp, int *lenp)
{
@@ -212,7 +209,7 @@ pp_getreflen(int *ppref, int offset, int *refp, int *lenp)
/*
* pp_setreflen: set the reference and length for a specific offset
*/
-static __inline void
+static inline void
pp_setreflen(int *ppref, int offset, int ref, int len)
{
if (len == 1) {
diff --git a/sys/uvm/uvm_aobj.c b/sys/uvm/uvm_aobj.c
index e0f56921fdd..31276a5a0da 100644
--- a/sys/uvm/uvm_aobj.c
+++ b/sys/uvm/uvm_aobj.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_aobj.c,v 1.86 2019/07/18 23:47:33 cheloha Exp $ */
+/* $OpenBSD: uvm_aobj.c,v 1.87 2020/09/22 14:31:08 mpi Exp $ */
/* $NetBSD: uvm_aobj.c,v 1.39 2001/02/18 21:19:08 chs Exp $ */
/*
@@ -256,7 +256,7 @@ uao_find_swhash_elt(struct uvm_aobj *aobj, int pageidx, boolean_t create)
/*
* uao_find_swslot: find the swap slot number for an aobj/pageidx
*/
-__inline static int
+inline static int
uao_find_swslot(struct uvm_aobj *aobj, int pageidx)
{
diff --git a/sys/uvm/uvm_fault.c b/sys/uvm/uvm_fault.c
index 54ab2926206..51be5f2af7f 100644
--- a/sys/uvm/uvm_fault.c
+++ b/sys/uvm/uvm_fault.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_fault.c,v 1.99 2020/09/22 14:29:20 mpi Exp $ */
+/* $OpenBSD: uvm_fault.c,v 1.100 2020/09/22 14:31:08 mpi Exp $ */
/* $NetBSD: uvm_fault.c,v 1.51 2000/08/06 00:22:53 thorpej Exp $ */
/*
@@ -159,7 +159,7 @@ static struct uvm_advice uvmadvice[MADV_MASK + 1];
* private prototypes
*/
static void uvmfault_amapcopy(struct uvm_faultinfo *);
-static __inline void uvmfault_anonflush(struct vm_anon **, int);
+static inline void uvmfault_anonflush(struct vm_anon **, int);
void uvmfault_unlockmaps(struct uvm_faultinfo *, boolean_t);
void uvmfault_update_stats(struct uvm_faultinfo *);
@@ -171,7 +171,7 @@ void uvmfault_update_stats(struct uvm_faultinfo *);
*
* => does not have to deactivate page if it is busy
*/
-static __inline void
+static inline void
uvmfault_anonflush(struct vm_anon **anons, int n)
{
int lcv;
diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c
index a7eac332bdd..7b2351faeeb 100644
--- a/sys/uvm/uvm_map.c
+++ b/sys/uvm/uvm_map.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_map.c,v 1.267 2020/09/14 20:31:09 kettenis Exp $ */
+/* $OpenBSD: uvm_map.c,v 1.268 2020/09/22 14:31:08 mpi Exp $ */
/* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */
/*
@@ -167,7 +167,7 @@ boolean_t uvm_map_inentry_fix(struct proc *, struct p_inentry *,
* Tree management functions.
*/
-static __inline void uvm_mapent_copy(struct vm_map_entry*,
+static inline void uvm_mapent_copy(struct vm_map_entry*,
struct vm_map_entry*);
static inline int uvm_mapentry_addrcmp(const struct vm_map_entry*,
const struct vm_map_entry*);
@@ -361,7 +361,7 @@ uvm_mapentry_addrcmp(const struct vm_map_entry *e1,
/*
* Copy mapentry.
*/
-static __inline void
+static inline void
uvm_mapent_copy(struct vm_map_entry *src, struct vm_map_entry *dst)
{
caddr_t csrc, cdst;
@@ -563,7 +563,7 @@ uvm_map_entrybyaddr(struct uvm_map_addr *atree, vaddr_t addr)
* *head must be initialized to NULL before the first call to this macro.
* uvm_unmap_detach(*head, 0) will remove dead entries.
*/
-static __inline void
+static inline void
dead_entry_push(struct uvm_map_deadq *deadq, struct vm_map_entry *entry)
{
TAILQ_INSERT_TAIL(deadq, entry, dfree.deadq);
@@ -4812,8 +4812,8 @@ uvm_map_clip_start(struct vm_map *map, struct vm_map_entry *entry, vaddr_t addr)
/*
* Boundary fixer.
*/
-static __inline vaddr_t uvm_map_boundfix(vaddr_t, vaddr_t, vaddr_t);
-static __inline vaddr_t
+static inline vaddr_t uvm_map_boundfix(vaddr_t, vaddr_t, vaddr_t);
+static inline vaddr_t
uvm_map_boundfix(vaddr_t min, vaddr_t max, vaddr_t bound)
{
return (min < bound && max > bound) ? bound : max;
diff --git a/sys/uvm/uvm_page.c b/sys/uvm/uvm_page.c
index 194150cf7fd..57111599b33 100644
--- a/sys/uvm/uvm_page.c
+++ b/sys/uvm/uvm_page.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_page.c,v 1.149 2019/11/29 18:32:40 kettenis Exp $ */
+/* $OpenBSD: uvm_page.c,v 1.150 2020/09/22 14:31:08 mpi Exp $ */
/* $NetBSD: uvm_page.c,v 1.44 2000/11/27 08:40:04 chs Exp $ */
/*
@@ -129,7 +129,7 @@ static void uvm_pageremove(struct vm_page *);
* => call should have already set pg's object and offset pointers
* and bumped the version counter
*/
-__inline static void
+inline static void
uvm_pageinsert(struct vm_page *pg)
{
struct vm_page *dupe;
@@ -147,7 +147,7 @@ uvm_pageinsert(struct vm_page *pg)
*
* => caller must lock page queues
*/
-static __inline void
+static inline void
uvm_pageremove(struct vm_page *pg)
{
KASSERT(pg->pg_flags & PG_TABLED);
diff --git a/sys/uvm/uvm_page.h b/sys/uvm/uvm_page.h
index 415cae0a4cc..6cf40f30e2b 100644
--- a/sys/uvm/uvm_page.h
+++ b/sys/uvm/uvm_page.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_page.h,v 1.64 2019/11/29 18:32:40 kettenis Exp $ */
+/* $OpenBSD: uvm_page.h,v 1.65 2020/09/22 14:31:08 mpi Exp $ */
/* $NetBSD: uvm_page.h,v 1.19 2000/12/28 08:24:55 chs Exp $ */
/*
@@ -249,7 +249,7 @@ psize_t uvm_pagecount(struct uvm_constraint_range*);
/*
* vm_physseg_find: find vm_physseg structure that belongs to a PA
*/
-static __inline int
+static inline int
vm_physseg_find(paddr_t pframe, int *offp)
{
/* 'contig' case */
@@ -265,7 +265,7 @@ vm_physseg_find(paddr_t pframe, int *offp)
* PHYS_TO_VM_PAGE: find vm_page for a PA. used by MI code to get vm_pages
* back from an I/O mapping (ugh!). used in some MD code as well.
*/
-static __inline struct vm_page *
+static inline struct vm_page *
PHYS_TO_VM_PAGE(paddr_t pa)
{
paddr_t pf = atop(pa);