aboutsummaryrefslogtreecommitdiffstats
path: root/old/glougloud/external
diff options
context:
space:
mode:
authorLaurent Ghigonis <laurent@p1sec.com>2012-11-29 10:58:30 +0100
committerLaurent Ghigonis <laurent@p1sec.com>2012-11-29 10:58:30 +0100
commitb9decf4304d2204173a8d8a706e003cdde2b1bf7 (patch)
treed2a271e59d13cf1c01268931e79602f239a86ea4 /old/glougloud/external
parentinclude event.h here, and also the default queue.h of the system instead of the (diff)
downloadglouglou-b9decf4304d2204173a8d8a706e003cdde2b1bf7.tar.xz
glouglou-b9decf4304d2204173a8d8a706e003cdde2b1bf7.zip
move glougloud to old
Diffstat (limited to 'old/glougloud/external')
-rw-r--r--old/glougloud/external/README.txt8
-rw-r--r--old/glougloud/external/imsg-buffer.c305
-rw-r--r--old/glougloud/external/imsg.c301
-rw-r--r--old/glougloud/external/imsg.h118
-rw-r--r--old/glougloud/external/imsgev.c170
-rw-r--r--old/glougloud/external/imsgev.h49
-rw-r--r--old/glougloud/external/queue.h568
7 files changed, 1519 insertions, 0 deletions
diff --git a/old/glougloud/external/README.txt b/old/glougloud/external/README.txt
new file mode 100644
index 0000000..7e7509f
--- /dev/null
+++ b/old/glougloud/external/README.txt
@@ -0,0 +1,8 @@
+- imsg framework, and IPC mechanism from OpenBSD -
+imsg-buffer.c
+imsg.c
+imsg.h
+imsgev.c
+imsgev.h
+- queue implementation, from OpenBSD -
+queue.h
diff --git a/old/glougloud/external/imsg-buffer.c b/old/glougloud/external/imsg-buffer.c
new file mode 100644
index 0000000..9f04757
--- /dev/null
+++ b/old/glougloud/external/imsg-buffer.c
@@ -0,0 +1,305 @@
+/* $OpenBSD: imsg-buffer.c,v 1.2 2012/06/02 21:46:53 gilles Exp $ */
+
+/*
+ * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <sys/param.h>
+#include <sys/queue.h>
+#include <sys/socket.h>
+#include <sys/uio.h>
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "imsg.h"
+
+int ibuf_realloc(struct ibuf *, size_t);
+void ibuf_enqueue(struct msgbuf *, struct ibuf *);
+void ibuf_dequeue(struct msgbuf *, struct ibuf *);
+
+struct ibuf *
+ibuf_open(size_t len)
+{
+ struct ibuf *buf;
+
+ if ((buf = calloc(1, sizeof(struct ibuf))) == NULL)
+ return (NULL);
+ if ((buf->buf = malloc(len)) == NULL) {
+ free(buf);
+ return (NULL);
+ }
+ buf->size = buf->max = len;
+ buf->fd = -1;
+
+ return (buf);
+}
+
+struct ibuf *
+ibuf_dynamic(size_t len, size_t max)
+{
+ struct ibuf *buf;
+
+ if (max < len)
+ return (NULL);
+
+ if ((buf = ibuf_open(len)) == NULL)
+ return (NULL);
+
+ if (max > 0)
+ buf->max = max;
+
+ return (buf);
+}
+
+int
+ibuf_realloc(struct ibuf *buf, size_t len)
+{
+ u_char *b;
+
+ /* on static buffers max is eq size and so the following fails */
+ if (buf->wpos + len > buf->max) {
+ errno = ENOMEM;
+ return (-1);
+ }
+
+ b = realloc(buf->buf, buf->wpos + len);
+ if (b == NULL)
+ return (-1);
+ buf->buf = b;
+ buf->size = buf->wpos + len;
+
+ return (0);
+}
+
+int
+ibuf_add(struct ibuf *buf, const void *data, size_t len)
+{
+ if (buf->wpos + len > buf->size)
+ if (ibuf_realloc(buf, len) == -1)
+ return (-1);
+
+ memcpy(buf->buf + buf->wpos, data, len);
+ buf->wpos += len;
+ return (0);
+}
+
+void *
+ibuf_reserve(struct ibuf *buf, size_t len)
+{
+ void *b;
+
+ if (buf->wpos + len > buf->size)
+ if (ibuf_realloc(buf, len) == -1)
+ return (NULL);
+
+ b = buf->buf + buf->wpos;
+ buf->wpos += len;
+ return (b);
+}
+
+void *
+ibuf_seek(struct ibuf *buf, size_t pos, size_t len)
+{
+ /* only allowed to seek in already written parts */
+ if (pos + len > buf->wpos)
+ return (NULL);
+
+ return (buf->buf + pos);
+}
+
+size_t
+ibuf_size(struct ibuf *buf)
+{
+ return (buf->wpos);
+}
+
+size_t
+ibuf_left(struct ibuf *buf)
+{
+ return (buf->max - buf->wpos);
+}
+
+void
+ibuf_close(struct msgbuf *msgbuf, struct ibuf *buf)
+{
+ ibuf_enqueue(msgbuf, buf);
+}
+
+int
+ibuf_write(struct msgbuf *msgbuf)
+{
+ struct iovec iov[IOV_MAX];
+ struct ibuf *buf;
+ unsigned int i = 0;
+ ssize_t n;
+
+ bzero(&iov, sizeof(iov));
+ TAILQ_FOREACH(buf, &msgbuf->bufs, entry) {
+ if (i >= IOV_MAX)
+ break;
+ iov[i].iov_base = buf->buf + buf->rpos;
+ iov[i].iov_len = buf->wpos - buf->rpos;
+ i++;
+ }
+
+again:
+ if ((n = writev(msgbuf->fd, iov, i)) == -1) {
+ if (errno == EAGAIN || errno == EINTR)
+ goto again;
+ if (errno == ENOBUFS)
+ errno = EAGAIN;
+ return (-1);
+ }
+
+ if (n == 0) { /* connection closed */
+ errno = 0;
+ return (0);
+ }
+
+ msgbuf_drain(msgbuf, n);
+
+ return (1);
+}
+
+void
+ibuf_free(struct ibuf *buf)
+{
+ free(buf->buf);
+ free(buf);
+}
+
+void
+msgbuf_init(struct msgbuf *msgbuf)
+{
+ msgbuf->queued = 0;
+ msgbuf->fd = -1;
+ TAILQ_INIT(&msgbuf->bufs);
+}
+
+void
+msgbuf_drain(struct msgbuf *msgbuf, size_t n)
+{
+ struct ibuf *buf, *next;
+
+ for (buf = TAILQ_FIRST(&msgbuf->bufs); buf != NULL && n > 0;
+ buf = next) {
+ next = TAILQ_NEXT(buf, entry);
+ if (buf->rpos + n >= buf->wpos) {
+ n -= buf->wpos - buf->rpos;
+ ibuf_dequeue(msgbuf, buf);
+ } else {
+ buf->rpos += n;
+ n = 0;
+ }
+ }
+}
+
+void
+msgbuf_clear(struct msgbuf *msgbuf)
+{
+ struct ibuf *buf;
+
+ while ((buf = TAILQ_FIRST(&msgbuf->bufs)) != NULL)
+ ibuf_dequeue(msgbuf, buf);
+}
+
+int
+msgbuf_write(struct msgbuf *msgbuf)
+{
+ struct iovec iov[IOV_MAX];
+ struct ibuf *buf;
+ unsigned int i = 0;
+ ssize_t n;
+ struct msghdr msg;
+ struct cmsghdr *cmsg;
+ union {
+ struct cmsghdr hdr;
+ char buf[CMSG_SPACE(sizeof(int))];
+ } cmsgbuf;
+
+ bzero(&iov, sizeof(iov));
+ bzero(&msg, sizeof(msg));
+ TAILQ_FOREACH(buf, &msgbuf->bufs, entry) {
+ if (i >= IOV_MAX)
+ break;
+ iov[i].iov_base = buf->buf + buf->rpos;
+ iov[i].iov_len = buf->wpos - buf->rpos;
+ i++;
+ if (buf->fd != -1)
+ break;
+ }
+
+ msg.msg_iov = iov;
+ msg.msg_iovlen = i;
+
+ if (buf != NULL && buf->fd != -1) {
+ msg.msg_control = (caddr_t)&cmsgbuf.buf;
+ msg.msg_controllen = sizeof(cmsgbuf.buf);
+ cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_len = CMSG_LEN(sizeof(int));
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_RIGHTS;
+ *(int *)CMSG_DATA(cmsg) = buf->fd;
+ }
+
+again:
+ if ((n = sendmsg(msgbuf->fd, &msg, 0)) == -1) {
+ if (errno == EAGAIN || errno == EINTR)
+ goto again;
+ if (errno == ENOBUFS)
+ errno = EAGAIN;
+ return (-1);
+ }
+
+ if (n == 0) { /* connection closed */
+ errno = 0;
+ return (0);
+ }
+
+ /*
+ * assumption: fd got sent if sendmsg sent anything
+ * this works because fds are passed one at a time
+ */
+ if (buf != NULL && buf->fd != -1) {
+ close(buf->fd);
+ buf->fd = -1;
+ }
+
+ msgbuf_drain(msgbuf, n);
+
+ return (1);
+}
+
+void
+ibuf_enqueue(struct msgbuf *msgbuf, struct ibuf *buf)
+{
+ TAILQ_INSERT_TAIL(&msgbuf->bufs, buf, entry);
+ msgbuf->queued++;
+}
+
+void
+ibuf_dequeue(struct msgbuf *msgbuf, struct ibuf *buf)
+{
+ TAILQ_REMOVE(&msgbuf->bufs, buf, entry);
+
+ if (buf->fd != -1)
+ close(buf->fd);
+
+ msgbuf->queued--;
+ ibuf_free(buf);
+}
diff --git a/old/glougloud/external/imsg.c b/old/glougloud/external/imsg.c
new file mode 100644
index 0000000..05e57c7
--- /dev/null
+++ b/old/glougloud/external/imsg.c
@@ -0,0 +1,301 @@
+/* $OpenBSD: imsg.c,v 1.2 2012/06/02 21:46:53 gilles Exp $ */
+
+/*
+ * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <sys/param.h>
+#include <sys/queue.h>
+#include <sys/socket.h>
+#include <sys/uio.h>
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "imsg.h"
+
+int imsg_fd_overhead = 0;
+
+int imsg_get_fd(struct imsgbuf *);
+
+void
+imsg_init(struct imsgbuf *ibuf, int fd)
+{
+ msgbuf_init(&ibuf->w);
+ bzero(&ibuf->r, sizeof(ibuf->r));
+ ibuf->fd = fd;
+ ibuf->w.fd = fd;
+ ibuf->pid = getpid();
+ TAILQ_INIT(&ibuf->fds);
+}
+
+ssize_t
+imsg_read(struct imsgbuf *ibuf)
+{
+ struct msghdr msg;
+ struct cmsghdr *cmsg;
+ union {
+ struct cmsghdr hdr;
+ char buf[CMSG_SPACE(sizeof(int) * 1)];
+ } cmsgbuf;
+ struct iovec iov;
+ ssize_t n = -1;
+ int fd;
+ struct imsg_fd *ifd;
+
+ bzero(&msg, sizeof(msg));
+
+ iov.iov_base = ibuf->r.buf + ibuf->r.wpos;
+ iov.iov_len = sizeof(ibuf->r.buf) - ibuf->r.wpos;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = &cmsgbuf.buf;
+ msg.msg_controllen = sizeof(cmsgbuf.buf);
+
+ if ((ifd = calloc(1, sizeof(struct imsg_fd))) == NULL)
+ return (-1);
+
+again:
+ if (getdtablecount() + imsg_fd_overhead +
+ (CMSG_SPACE(sizeof(int))-CMSG_SPACE(0))/sizeof(int)
+ >= getdtablesize()) {
+ errno = EAGAIN;
+ return (-1);
+ }
+
+ if ((n = recvmsg(ibuf->fd, &msg, 0)) == -1) {
+ if (errno == EMSGSIZE)
+ goto fail;
+ if (errno != EINTR && errno != EAGAIN)
+ goto fail;
+ goto again;
+ }
+
+ ibuf->r.wpos += n;
+
+ for (cmsg = CMSG_FIRSTHDR(&msg); cmsg != NULL;
+ cmsg = CMSG_NXTHDR(&msg, cmsg)) {
+ if (cmsg->cmsg_level == SOL_SOCKET &&
+ cmsg->cmsg_type == SCM_RIGHTS) {
+ int i;
+ int j;
+
+ /*
+ * We only accept one file descriptor. Due to C
+ * padding rules, our control buffer might contain
+ * more than one fd, and we must close them.
+ */
+ j = ((char *)cmsg + cmsg->cmsg_len -
+ (char *)CMSG_DATA(cmsg)) / sizeof(int);
+ for (i = 0; i < j; i++) {
+ fd = ((int *)CMSG_DATA(cmsg))[i];
+ if (i == 0) {
+ ifd->fd = fd;
+ TAILQ_INSERT_TAIL(&ibuf->fds, ifd,
+ entry);
+ ifd = NULL;
+ } else
+ close(fd);
+ }
+ }
+ /* we do not handle other ctl data level */
+ }
+
+fail:
+ if (ifd)
+ free(ifd);
+ return (n);
+}
+
+ssize_t
+imsg_get(struct imsgbuf *ibuf, struct imsg *imsg)
+{
+ size_t av, left, datalen;
+
+ av = ibuf->r.wpos;
+
+ if (IMSG_HEADER_SIZE > av)
+ return (0);
+
+ memcpy(&imsg->hdr, ibuf->r.buf, sizeof(imsg->hdr));
+ if (imsg->hdr.len < IMSG_HEADER_SIZE ||
+ imsg->hdr.len > MAX_IMSGSIZE) {
+ errno = ERANGE;
+ return (-1);
+ }
+ if (imsg->hdr.len > av)
+ return (0);
+ datalen = imsg->hdr.len - IMSG_HEADER_SIZE;
+ ibuf->r.rptr = ibuf->r.buf + IMSG_HEADER_SIZE;
+ if ((imsg->data = malloc(datalen)) == NULL)
+ return (-1);
+
+ if (imsg->hdr.flags & IMSGF_HASFD)
+ imsg->fd = imsg_get_fd(ibuf);
+ else
+ imsg->fd = -1;
+
+ memcpy(imsg->data, ibuf->r.rptr, datalen);
+
+ if (imsg->hdr.len < av) {
+ left = av - imsg->hdr.len;
+ memmove(&ibuf->r.buf, ibuf->r.buf + imsg->hdr.len, left);
+ ibuf->r.wpos = left;
+ } else
+ ibuf->r.wpos = 0;
+
+ return (datalen + IMSG_HEADER_SIZE);
+}
+
+int
+imsg_compose(struct imsgbuf *ibuf, u_int32_t type, u_int32_t peerid,
+ pid_t pid, int fd, void *data, u_int16_t datalen)
+{
+ struct ibuf *wbuf;
+
+ if ((wbuf = imsg_create(ibuf, type, peerid, pid, datalen)) == NULL)
+ return (-1);
+
+ if (imsg_add(wbuf, data, datalen) == -1)
+ return (-1);
+
+ wbuf->fd = fd;
+
+ imsg_close(ibuf, wbuf);
+
+ return (1);
+}
+
+int
+imsg_composev(struct imsgbuf *ibuf, u_int32_t type, u_int32_t peerid,
+ pid_t pid, int fd, const struct iovec *iov, int iovcnt)
+{
+ struct ibuf *wbuf;
+ int i, datalen = 0;
+
+ for (i = 0; i < iovcnt; i++)
+ datalen += iov[i].iov_len;
+
+ if ((wbuf = imsg_create(ibuf, type, peerid, pid, datalen)) == NULL)
+ return (-1);
+
+ for (i = 0; i < iovcnt; i++)
+ if (imsg_add(wbuf, iov[i].iov_base, iov[i].iov_len) == -1)
+ return (-1);
+
+ wbuf->fd = fd;
+
+ imsg_close(ibuf, wbuf);
+
+ return (1);
+}
+
+/* ARGSUSED */
+struct ibuf *
+imsg_create(struct imsgbuf *ibuf, u_int32_t type, u_int32_t peerid,
+ pid_t pid, u_int16_t datalen)
+{
+ struct ibuf *wbuf;
+ struct imsg_hdr hdr;
+
+ datalen += IMSG_HEADER_SIZE;
+ if (datalen > MAX_IMSGSIZE) {
+ errno = ERANGE;
+ return (NULL);
+ }
+
+ hdr.type = type;
+ hdr.flags = 0;
+ hdr.peerid = peerid;
+ if ((hdr.pid = pid) == 0)
+ hdr.pid = ibuf->pid;
+ if ((wbuf = ibuf_dynamic(datalen, MAX_IMSGSIZE)) == NULL) {
+ return (NULL);
+ }
+ if (imsg_add(wbuf, &hdr, sizeof(hdr)) == -1)
+ return (NULL);
+
+ return (wbuf);
+}
+
+int
+imsg_add(struct ibuf *msg, void *data, u_int16_t datalen)
+{
+ if (datalen)
+ if (ibuf_add(msg, data, datalen) == -1) {
+ ibuf_free(msg);
+ return (-1);
+ }
+ return (datalen);
+}
+
+void
+imsg_close(struct imsgbuf *ibuf, struct ibuf *msg)
+{
+ struct imsg_hdr *hdr;
+
+ hdr = (struct imsg_hdr *)msg->buf;
+
+ hdr->flags &= ~IMSGF_HASFD;
+ if (msg->fd != -1)
+ hdr->flags |= IMSGF_HASFD;
+
+ hdr->len = (u_int16_t)msg->wpos;
+
+ ibuf_close(&ibuf->w, msg);
+}
+
+void
+imsg_free(struct imsg *imsg)
+{
+ free(imsg->data);
+}
+
+int
+imsg_get_fd(struct imsgbuf *ibuf)
+{
+ int fd;
+ struct imsg_fd *ifd;
+
+ if ((ifd = TAILQ_FIRST(&ibuf->fds)) == NULL)
+ return (-1);
+
+ fd = ifd->fd;
+ TAILQ_REMOVE(&ibuf->fds, ifd, entry);
+ free(ifd);
+
+ return (fd);
+}
+
+int
+imsg_flush(struct imsgbuf *ibuf)
+{
+ while (ibuf->w.queued)
+ if (msgbuf_write(&ibuf->w) < 0)
+ return (-1);
+ return (0);
+}
+
+void
+imsg_clear(struct imsgbuf *ibuf)
+{
+ int fd;
+
+ msgbuf_clear(&ibuf->w);
+ while ((fd = imsg_get_fd(ibuf)) != -1)
+ close(fd);
+}
diff --git a/old/glougloud/external/imsg.h b/old/glougloud/external/imsg.h
new file mode 100644
index 0000000..25cc2c4
--- /dev/null
+++ b/old/glougloud/external/imsg.h
@@ -0,0 +1,118 @@
+/* $OpenBSD: imsg.h,v 1.2 2010/06/23 07:53:55 nicm Exp $ */
+
+/*
+ * Copyright (c) 2006, 2007 Pierre-Yves Ritschard <pyr@openbsd.org>
+ * Copyright (c) 2006, 2007, 2008 Reyk Floeter <reyk@openbsd.org>
+ * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _IMSG_H_
+#define _IMSG_H_
+
+/* compat hacks added by laurent */
+#if !defined(__OpenBSD__)
+#define IOV_MAX 1024
+#define getdtablecount(x) 3
+#endif
+
+#define IBUF_READ_SIZE 65535
+#define IMSG_HEADER_SIZE sizeof(struct imsg_hdr)
+#define MAX_IMSGSIZE 16384
+
+struct ibuf {
+ TAILQ_ENTRY(ibuf) entry;
+ u_char *buf;
+ size_t size;
+ size_t max;
+ size_t wpos;
+ size_t rpos;
+ int fd;
+};
+
+struct msgbuf {
+ TAILQ_HEAD(, ibuf) bufs;
+ u_int32_t queued;
+ int fd;
+};
+
+struct ibuf_read {
+ u_char buf[IBUF_READ_SIZE];
+ u_char *rptr;
+ size_t wpos;
+};
+
+struct imsg_fd {
+ TAILQ_ENTRY(imsg_fd) entry;
+ int fd;
+};
+
+struct imsgbuf {
+ TAILQ_HEAD(, imsg_fd) fds;
+ struct ibuf_read r;
+ struct msgbuf w;
+ int fd;
+ pid_t pid;
+};
+
+#define IMSGF_HASFD 1
+
+struct imsg_hdr {
+ u_int32_t type;
+ u_int16_t len;
+ u_int16_t flags;
+ u_int32_t peerid;
+ u_int32_t pid;
+};
+
+struct imsg {
+ struct imsg_hdr hdr;
+ int fd;
+ void *data;
+};
+
+
+/* buffer.c */
+struct ibuf *ibuf_open(size_t);
+struct ibuf *ibuf_dynamic(size_t, size_t);
+int ibuf_add(struct ibuf *, const void *, size_t);
+void *ibuf_reserve(struct ibuf *, size_t);
+void *ibuf_seek(struct ibuf *, size_t, size_t);
+size_t ibuf_size(struct ibuf *);
+size_t ibuf_left(struct ibuf *);
+void ibuf_close(struct msgbuf *, struct ibuf *);
+int ibuf_write(struct msgbuf *);
+void ibuf_free(struct ibuf *);
+void msgbuf_init(struct msgbuf *);
+void msgbuf_clear(struct msgbuf *);
+int msgbuf_write(struct msgbuf *);
+void msgbuf_drain(struct msgbuf *, size_t);
+
+/* imsg.c */
+void imsg_init(struct imsgbuf *, int);
+ssize_t imsg_read(struct imsgbuf *);
+ssize_t imsg_get(struct imsgbuf *, struct imsg *);
+int imsg_compose(struct imsgbuf *, u_int32_t, u_int32_t, pid_t,
+ int, void *, u_int16_t);
+int imsg_composev(struct imsgbuf *, u_int32_t, u_int32_t, pid_t,
+ int, const struct iovec *, int);
+struct ibuf *imsg_create(struct imsgbuf *, u_int32_t, u_int32_t, pid_t,
+ u_int16_t);
+int imsg_add(struct ibuf *, void *, u_int16_t);
+void imsg_close(struct imsgbuf *, struct ibuf *);
+void imsg_free(struct imsg *);
+int imsg_flush(struct imsgbuf *);
+void imsg_clear(struct imsgbuf *);
+
+#endif
diff --git a/old/glougloud/external/imsgev.c b/old/glougloud/external/imsgev.c
new file mode 100644
index 0000000..6b92d79
--- /dev/null
+++ b/old/glougloud/external/imsgev.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2009 Eric Faurot <eric@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <sys/param.h>
+#include <sys/queue.h>
+#include <sys/socket.h>
+#include <sys/uio.h>
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "imsgev.h"
+
+void imsgev_add(struct imsgev *);
+void imsgev_dispatch(int, short, void *);
+void imsgev_disconnect(struct imsgev *, int);
+
+void
+imsgev_init(struct imsgev *iev, int fd, void *data,
+ void (*callback)(struct imsgev *, int, struct imsg *),
+ void (*needfd)(struct imsgev *))
+{
+ imsg_init(&iev->ibuf, fd);
+ iev->terminate = 0;
+
+ iev->data = data;
+ iev->handler = imsgev_dispatch;
+ iev->callback = callback;
+ iev->needfd = needfd;
+
+ iev->events = EV_READ;
+ event_set(&iev->ev, iev->ibuf.fd, iev->events, iev->handler, iev);
+ event_add(&iev->ev, NULL);
+}
+
+int
+imsgev_compose(struct imsgev *iev, u_int16_t type, u_int32_t peerid,
+ uint32_t pid, int fd, void *data, u_int16_t datalen)
+{
+ int r;
+
+ r = imsg_compose(&iev->ibuf, type, peerid, pid, fd, data, datalen);
+ if (r != -1)
+ imsgev_add(iev);
+
+ return (r);
+}
+
+void
+imsgev_close(struct imsgev *iev)
+{
+ iev->terminate = 1;
+ imsgev_add(iev);
+}
+
+void
+imsgev_clear(struct imsgev *iev)
+{
+ event_del(&iev->ev);
+ msgbuf_clear(&iev->ibuf.w);
+ close(iev->ibuf.fd);
+}
+
+void
+imsgev_add(struct imsgev *iev)
+{
+ short events = 0;
+
+ if (!iev->terminate)
+ events = EV_READ;
+ if (iev->ibuf.w.queued || iev->terminate)
+ events |= EV_WRITE;
+
+ /* optimization: skip event_{del/set/add} if already set */
+ if (events == iev->events)
+ return;
+
+ iev->events = events;
+ event_del(&iev->ev);
+ event_set(&iev->ev, iev->ibuf.fd, iev->events, iev->handler, iev);
+ event_add(&iev->ev, NULL);
+}
+
+void
+imsgev_dispatch(int fd, short ev, void *humppa)
+{
+ struct imsgev *iev = humppa;
+ struct imsgbuf *ibuf = &iev->ibuf;
+ struct imsg imsg;
+ ssize_t n;
+
+ iev->events = 0;
+
+ if (ev & EV_READ) {
+ if ((n = imsg_read(ibuf)) == -1) {
+ /* if we don't have enough fds, free one up and retry */
+ if (errno == EAGAIN) {
+ iev->needfd(iev);
+ n = imsg_read(ibuf);
+ }
+
+ if (n == -1) {
+ imsgev_disconnect(iev, IMSGEV_EREAD);
+ return;
+ }
+ }
+ if (n == 0) {
+ /*
+ * Connection is closed for reading, and we assume
+ * it is also closed for writing, so we error out
+ * if write data is pending.
+ */
+ imsgev_disconnect(iev,
+ (iev->ibuf.w.queued) ? IMSGEV_EWRITE : IMSGEV_DONE);
+ return;
+ }
+ }
+
+ if (ev & EV_WRITE) {
+ /*
+ * We wanted to write data out but the connection is either
+ * closed, or some error occured. Both case are not recoverable
+ * from the imsg perspective, so we treat it as a WRITE error.
+ */
+ if ((n = msgbuf_write(&ibuf->w)) != 1) {
+ imsgev_disconnect(iev, IMSGEV_EWRITE);
+ return;
+ }
+ }
+
+ while (iev->terminate == 0) {
+ if ((n = imsg_get(ibuf, &imsg)) == -1) {
+ imsgev_disconnect(iev, IMSGEV_EIMSG);
+ return;
+ }
+ if (n == 0)
+ break;
+ iev->callback(iev, IMSGEV_IMSG, &imsg);
+ imsg_free(&imsg);
+ }
+
+ if (iev->terminate && iev->ibuf.w.queued == 0) {
+ imsgev_disconnect(iev, IMSGEV_DONE);
+ return;
+ }
+
+ imsgev_add(iev);
+}
+
+void
+imsgev_disconnect(struct imsgev *iev, int code)
+{
+ iev->callback(iev, code, NULL);
+}
diff --git a/old/glougloud/external/imsgev.h b/old/glougloud/external/imsgev.h
new file mode 100644
index 0000000..a0d0947
--- /dev/null
+++ b/old/glougloud/external/imsgev.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2009 Eric Faurot <eric@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __IMSGEV_H__
+#define __IMSGEV_H__
+
+#include <event.h>
+#include "imsg.h"
+
+#define IMSG_LEN(m) ((m)->hdr.len - IMSG_HEADER_SIZE)
+
+struct imsgev {
+ struct imsgbuf ibuf;
+ void (*handler)(int, short, void *);
+ struct event ev;
+ void *data;
+ short events;
+ int terminate;
+ void (*callback)(struct imsgev *, int, struct imsg *);
+ void (*needfd)(struct imsgev *);
+};
+
+#define IMSGEV_IMSG 0
+#define IMSGEV_DONE 1
+#define IMSGEV_EREAD 2
+#define IMSGEV_EWRITE 3
+#define IMSGEV_EIMSG 4
+
+void imsgev_init(struct imsgev *, int, void *, void (*)(struct imsgev *,
+ int, struct imsg *), void (*)(struct imsgev *));
+int imsgev_compose(struct imsgev *, u_int16_t, u_int32_t, u_int32_t, int,
+ void *, u_int16_t);
+void imsgev_close(struct imsgev *);
+void imsgev_clear(struct imsgev *);
+
+#endif /* __IMSGEV_H__ */
diff --git a/old/glougloud/external/queue.h b/old/glougloud/external/queue.h
new file mode 100644
index 0000000..622301d
--- /dev/null
+++ b/old/glougloud/external/queue.h
@@ -0,0 +1,568 @@
+/* $OpenBSD: queue.h,v 1.36 2012/04/11 13:29:14 naddy Exp $ */
+/* $NetBSD: queue.h,v 1.11 1996/05/16 05:17:14 mycroft Exp $ */
+
+/*
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)queue.h 8.5 (Berkeley) 8/20/94
+ */
+
+#ifndef _SYS_QUEUE_H_
+#define _SYS_QUEUE_H_
+
+/*
+ * This file defines five types of data structures: singly-linked lists,
+ * lists, simple queues, tail queues, and circular queues.
+ *
+ *
+ * A singly-linked list is headed by a single forward pointer. The elements
+ * are singly linked for minimum space and pointer manipulation overhead at
+ * the expense of O(n) removal for arbitrary elements. New elements can be
+ * added to the list after an existing element or at the head of the list.
+ * Elements being removed from the head of the list should use the explicit
+ * macro for this purpose for optimum efficiency. A singly-linked list may
+ * only be traversed in the forward direction. Singly-linked lists are ideal
+ * for applications with large datasets and few or no removals or for
+ * implementing a LIFO queue.
+ *
+ * A list is headed by a single forward pointer (or an array of forward
+ * pointers for a hash table header). The elements are doubly linked
+ * so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before
+ * or after an existing element or at the head of the list. A list
+ * may only be traversed in the forward direction.
+ *
+ * A simple queue is headed by a pair of pointers, one the head of the
+ * list and the other to the tail of the list. The elements are singly
+ * linked to save space, so elements can only be removed from the
+ * head of the list. New elements can be added to the list before or after
+ * an existing element, at the head of the list, or at the end of the
+ * list. A simple queue may only be traversed in the forward direction.
+ *
+ * A tail queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or
+ * after an existing element, at the head of the list, or at the end of
+ * the list. A tail queue may be traversed in either direction.
+ *
+ * A circle queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or after
+ * an existing element, at the head of the list, or at the end of the list.
+ * A circle queue may be traversed in either direction, but has a more
+ * complex end of list detection.
+ *
+ * For details on the use of these macros, see the queue(3) manual page.
+ */
+
+#if defined(QUEUE_MACRO_DEBUG) || (defined(_KERNEL) && defined(DIAGNOSTIC))
+#define _Q_INVALIDATE(a) (a) = ((void *)-1)
+#else
+#define _Q_INVALIDATE(a)
+#endif
+
+/*
+ * Singly-linked List definitions.
+ */
+#define SLIST_HEAD(name, type) \
+struct name { \
+ struct type *slh_first; /* first element */ \
+}
+
+#define SLIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#define SLIST_ENTRY(type) \
+struct { \
+ struct type *sle_next; /* next element */ \
+}
+
+/*
+ * Singly-linked List access methods.
+ */
+#define SLIST_FIRST(head) ((head)->slh_first)
+#define SLIST_END(head) NULL
+#define SLIST_EMPTY(head) (SLIST_FIRST(head) == SLIST_END(head))
+#define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
+
+#define SLIST_FOREACH(var, head, field) \
+ for((var) = SLIST_FIRST(head); \
+ (var) != SLIST_END(head); \
+ (var) = SLIST_NEXT(var, field))
+
+#define SLIST_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = SLIST_FIRST(head); \
+ (var) && ((tvar) = SLIST_NEXT(var, field), 1); \
+ (var) = (tvar))
+
+/*
+ * Singly-linked List functions.
+ */
+#define SLIST_INIT(head) { \
+ SLIST_FIRST(head) = SLIST_END(head); \
+}
+
+#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
+ (elm)->field.sle_next = (slistelm)->field.sle_next; \
+ (slistelm)->field.sle_next = (elm); \
+} while (0)
+
+#define SLIST_INSERT_HEAD(head, elm, field) do { \
+ (elm)->field.sle_next = (head)->slh_first; \
+ (head)->slh_first = (elm); \
+} while (0)
+
+#define SLIST_REMOVE_AFTER(elm, field) do { \
+ (elm)->field.sle_next = (elm)->field.sle_next->field.sle_next; \
+} while (0)
+
+#define SLIST_REMOVE_HEAD(head, field) do { \
+ (head)->slh_first = (head)->slh_first->field.sle_next; \
+} while (0)
+
+#define SLIST_REMOVE(head, elm, type, field) do { \
+ if ((head)->slh_first == (elm)) { \
+ SLIST_REMOVE_HEAD((head), field); \
+ } else { \
+ struct type *curelm = (head)->slh_first; \
+ \
+ while (curelm->field.sle_next != (elm)) \
+ curelm = curelm->field.sle_next; \
+ curelm->field.sle_next = \
+ curelm->field.sle_next->field.sle_next; \
+ _Q_INVALIDATE((elm)->field.sle_next); \
+ } \
+} while (0)
+
+/*
+ * List definitions.
+ */
+#define LIST_HEAD(name, type) \
+struct name { \
+ struct type *lh_first; /* first element */ \
+}
+
+#define LIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#define LIST_ENTRY(type) \
+struct { \
+ struct type *le_next; /* next element */ \
+ struct type **le_prev; /* address of previous next element */ \
+}
+
+/*
+ * List access methods
+ */
+#define LIST_FIRST(head) ((head)->lh_first)
+#define LIST_END(head) NULL
+#define LIST_EMPTY(head) (LIST_FIRST(head) == LIST_END(head))
+#define LIST_NEXT(elm, field) ((elm)->field.le_next)
+
+#define LIST_FOREACH(var, head, field) \
+ for((var) = LIST_FIRST(head); \
+ (var)!= LIST_END(head); \
+ (var) = LIST_NEXT(var, field))
+
+#define LIST_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = LIST_FIRST(head); \
+ (var) && ((tvar) = LIST_NEXT(var, field), 1); \
+ (var) = (tvar))
+
+/*
+ * List functions.
+ */
+#define LIST_INIT(head) do { \
+ LIST_FIRST(head) = LIST_END(head); \
+} while (0)
+
+#define LIST_INSERT_AFTER(listelm, elm, field) do { \
+ if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \
+ (listelm)->field.le_next->field.le_prev = \
+ &(elm)->field.le_next; \
+ (listelm)->field.le_next = (elm); \
+ (elm)->field.le_prev = &(listelm)->field.le_next; \
+} while (0)
+
+#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
+ (elm)->field.le_prev = (listelm)->field.le_prev; \
+ (elm)->field.le_next = (listelm); \
+ *(listelm)->field.le_prev = (elm); \
+ (listelm)->field.le_prev = &(elm)->field.le_next; \
+} while (0)
+
+#define LIST_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.le_next = (head)->lh_first) != NULL) \
+ (head)->lh_first->field.le_prev = &(elm)->field.le_next;\
+ (head)->lh_first = (elm); \
+ (elm)->field.le_prev = &(head)->lh_first; \
+} while (0)
+
+#define LIST_REMOVE(elm, field) do { \
+ if ((elm)->field.le_next != NULL) \
+ (elm)->field.le_next->field.le_prev = \
+ (elm)->field.le_prev; \
+ *(elm)->field.le_prev = (elm)->field.le_next; \
+ _Q_INVALIDATE((elm)->field.le_prev); \
+ _Q_INVALIDATE((elm)->field.le_next); \
+} while (0)
+
+#define LIST_REPLACE(elm, elm2, field) do { \
+ if (((elm2)->field.le_next = (elm)->field.le_next) != NULL) \
+ (elm2)->field.le_next->field.le_prev = \
+ &(elm2)->field.le_next; \
+ (elm2)->field.le_prev = (elm)->field.le_prev; \
+ *(elm2)->field.le_prev = (elm2); \
+ _Q_INVALIDATE((elm)->field.le_prev); \
+ _Q_INVALIDATE((elm)->field.le_next); \
+} while (0)
+
+/*
+ * Simple queue definitions.
+ */
+#define SIMPLEQ_HEAD(name, type) \
+struct name { \
+ struct type *sqh_first; /* first element */ \
+ struct type **sqh_last; /* addr of last next element */ \
+}
+
+#define SIMPLEQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).sqh_first }
+
+#define SIMPLEQ_ENTRY(type) \
+struct { \
+ struct type *sqe_next; /* next element */ \
+}
+
+/*
+ * Simple queue access methods.
+ */
+#define SIMPLEQ_FIRST(head) ((head)->sqh_first)
+#define SIMPLEQ_END(head) NULL
+#define SIMPLEQ_EMPTY(head) (SIMPLEQ_FIRST(head) == SIMPLEQ_END(head))
+#define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
+
+#define SIMPLEQ_FOREACH(var, head, field) \
+ for((var) = SIMPLEQ_FIRST(head); \
+ (var) != SIMPLEQ_END(head); \
+ (var) = SIMPLEQ_NEXT(var, field))
+
+#define SIMPLEQ_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = SIMPLEQ_FIRST(head); \
+ (var) && ((tvar) = SIMPLEQ_NEXT(var, field), 1); \
+ (var) = (tvar))
+
+/*
+ * Simple queue functions.
+ */
+#define SIMPLEQ_INIT(head) do { \
+ (head)->sqh_first = NULL; \
+ (head)->sqh_last = &(head)->sqh_first; \
+} while (0)
+
+#define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+ (head)->sqh_first = (elm); \
+} while (0)
+
+#define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.sqe_next = NULL; \
+ *(head)->sqh_last = (elm); \
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+} while (0)
+
+#define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+ (listelm)->field.sqe_next = (elm); \
+} while (0)
+
+#define SIMPLEQ_REMOVE_HEAD(head, field) do { \
+ if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \
+ (head)->sqh_last = &(head)->sqh_first; \
+} while (0)
+
+#define SIMPLEQ_REMOVE_AFTER(head, elm, field) do { \
+ if (((elm)->field.sqe_next = (elm)->field.sqe_next->field.sqe_next) \
+ == NULL) \
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+} while (0)
+
+/*
+ * Tail queue definitions.
+ */
+#define TAILQ_HEAD(name, type) \
+struct name { \
+ struct type *tqh_first; /* first element */ \
+ struct type **tqh_last; /* addr of last next element */ \
+}
+
+#define TAILQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).tqh_first }
+
+#define TAILQ_ENTRY(type) \
+struct { \
+ struct type *tqe_next; /* next element */ \
+ struct type **tqe_prev; /* address of previous next element */ \
+}
+
+/*
+ * tail queue access methods
+ */
+#define TAILQ_FIRST(head) ((head)->tqh_first)
+#define TAILQ_END(head) NULL
+#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
+#define TAILQ_LAST(head, headname) \
+ (*(((struct headname *)((head)->tqh_last))->tqh_last))
+/* XXX */
+#define TAILQ_PREV(elm, headname, field) \
+ (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
+#define TAILQ_EMPTY(head) \
+ (TAILQ_FIRST(head) == TAILQ_END(head))
+
+#define TAILQ_FOREACH(var, head, field) \
+ for((var) = TAILQ_FIRST(head); \
+ (var) != TAILQ_END(head); \
+ (var) = TAILQ_NEXT(var, field))
+
+#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = TAILQ_FIRST(head); \
+ (var) != TAILQ_END(head) && \
+ ((tvar) = TAILQ_NEXT(var, field), 1); \
+ (var) = (tvar))
+
+
+#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
+ for((var) = TAILQ_LAST(head, headname); \
+ (var) != TAILQ_END(head); \
+ (var) = TAILQ_PREV(var, headname, field))
+
+#define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar) \
+ for ((var) = TAILQ_LAST(head, headname); \
+ (var) != TAILQ_END(head) && \
+ ((tvar) = TAILQ_PREV(var, headname, field), 1); \
+ (var) = (tvar))
+
+/*
+ * Tail queue functions.
+ */
+#define TAILQ_INIT(head) do { \
+ (head)->tqh_first = NULL; \
+ (head)->tqh_last = &(head)->tqh_first; \
+} while (0)
+
+#define TAILQ_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \
+ (head)->tqh_first->field.tqe_prev = \
+ &(elm)->field.tqe_next; \
+ else \
+ (head)->tqh_last = &(elm)->field.tqe_next; \
+ (head)->tqh_first = (elm); \
+ (elm)->field.tqe_prev = &(head)->tqh_first; \
+} while (0)
+
+#define TAILQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.tqe_next = NULL; \
+ (elm)->field.tqe_prev = (head)->tqh_last; \
+ *(head)->tqh_last = (elm); \
+ (head)->tqh_last = &(elm)->field.tqe_next; \
+} while (0)
+
+#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
+ (elm)->field.tqe_next->field.tqe_prev = \
+ &(elm)->field.tqe_next; \
+ else \
+ (head)->tqh_last = &(elm)->field.tqe_next; \
+ (listelm)->field.tqe_next = (elm); \
+ (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \
+} while (0)
+
+#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
+ (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
+ (elm)->field.tqe_next = (listelm); \
+ *(listelm)->field.tqe_prev = (elm); \
+ (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
+} while (0)
+
+#define TAILQ_REMOVE(head, elm, field) do { \
+ if (((elm)->field.tqe_next) != NULL) \
+ (elm)->field.tqe_next->field.tqe_prev = \
+ (elm)->field.tqe_prev; \
+ else \
+ (head)->tqh_last = (elm)->field.tqe_prev; \
+ *(elm)->field.tqe_prev = (elm)->field.tqe_next; \
+ _Q_INVALIDATE((elm)->field.tqe_prev); \
+ _Q_INVALIDATE((elm)->field.tqe_next); \
+} while (0)
+
+#define TAILQ_REPLACE(head, elm, elm2, field) do { \
+ if (((elm2)->field.tqe_next = (elm)->field.tqe_next) != NULL) \
+ (elm2)->field.tqe_next->field.tqe_prev = \
+ &(elm2)->field.tqe_next; \
+ else \
+ (head)->tqh_last = &(elm2)->field.tqe_next; \
+ (elm2)->field.tqe_prev = (elm)->field.tqe_prev; \
+ *(elm2)->field.tqe_prev = (elm2); \
+ _Q_INVALIDATE((elm)->field.tqe_prev); \
+ _Q_INVALIDATE((elm)->field.tqe_next); \
+} while (0)
+
+/*
+ * Circular queue definitions.
+ */
+#define CIRCLEQ_HEAD(name, type) \
+struct name { \
+ struct type *cqh_first; /* first element */ \
+ struct type *cqh_last; /* last element */ \
+}
+
+#define CIRCLEQ_HEAD_INITIALIZER(head) \
+ { CIRCLEQ_END(&head), CIRCLEQ_END(&head) }
+
+#define CIRCLEQ_ENTRY(type) \
+struct { \
+ struct type *cqe_next; /* next element */ \
+ struct type *cqe_prev; /* previous element */ \
+}
+
+/*
+ * Circular queue access methods
+ */
+#define CIRCLEQ_FIRST(head) ((head)->cqh_first)
+#define CIRCLEQ_LAST(head) ((head)->cqh_last)
+#define CIRCLEQ_END(head) ((void *)(head))
+#define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
+#define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
+#define CIRCLEQ_EMPTY(head) \
+ (CIRCLEQ_FIRST(head) == CIRCLEQ_END(head))
+
+#define CIRCLEQ_FOREACH(var, head, field) \
+ for((var) = CIRCLEQ_FIRST(head); \
+ (var) != CIRCLEQ_END(head); \
+ (var) = CIRCLEQ_NEXT(var, field))
+
+#define CIRCLEQ_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = CIRCLEQ_FIRST(head); \
+ (var) != CIRCLEQ_END(head) && \
+ ((tvar) = CIRCLEQ_NEXT(var, field), 1); \
+ (var) = (tvar))
+
+#define CIRCLEQ_FOREACH_REVERSE(var, head, field) \
+ for((var) = CIRCLEQ_LAST(head); \
+ (var) != CIRCLEQ_END(head); \
+ (var) = CIRCLEQ_PREV(var, field))
+
+#define CIRCLEQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar) \
+ for ((var) = CIRCLEQ_LAST(head, headname); \
+ (var) != CIRCLEQ_END(head) && \
+ ((tvar) = CIRCLEQ_PREV(var, headname, field), 1); \
+ (var) = (tvar))
+
+/*
+ * Circular queue functions.
+ */
+#define CIRCLEQ_INIT(head) do { \
+ (head)->cqh_first = CIRCLEQ_END(head); \
+ (head)->cqh_last = CIRCLEQ_END(head); \
+} while (0)
+
+#define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ (elm)->field.cqe_next = (listelm)->field.cqe_next; \
+ (elm)->field.cqe_prev = (listelm); \
+ if ((listelm)->field.cqe_next == CIRCLEQ_END(head)) \
+ (head)->cqh_last = (elm); \
+ else \
+ (listelm)->field.cqe_next->field.cqe_prev = (elm); \
+ (listelm)->field.cqe_next = (elm); \
+} while (0)
+
+#define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
+ (elm)->field.cqe_next = (listelm); \
+ (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
+ if ((listelm)->field.cqe_prev == CIRCLEQ_END(head)) \
+ (head)->cqh_first = (elm); \
+ else \
+ (listelm)->field.cqe_prev->field.cqe_next = (elm); \
+ (listelm)->field.cqe_prev = (elm); \
+} while (0)
+
+#define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
+ (elm)->field.cqe_next = (head)->cqh_first; \
+ (elm)->field.cqe_prev = CIRCLEQ_END(head); \
+ if ((head)->cqh_last == CIRCLEQ_END(head)) \
+ (head)->cqh_last = (elm); \
+ else \
+ (head)->cqh_first->field.cqe_prev = (elm); \
+ (head)->cqh_first = (elm); \
+} while (0)
+
+#define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.cqe_next = CIRCLEQ_END(head); \
+ (elm)->field.cqe_prev = (head)->cqh_last; \
+ if ((head)->cqh_first == CIRCLEQ_END(head)) \
+ (head)->cqh_first = (elm); \
+ else \
+ (head)->cqh_last->field.cqe_next = (elm); \
+ (head)->cqh_last = (elm); \
+} while (0)
+
+#define CIRCLEQ_REMOVE(head, elm, field) do { \
+ if ((elm)->field.cqe_next == CIRCLEQ_END(head)) \
+ (head)->cqh_last = (elm)->field.cqe_prev; \
+ else \
+ (elm)->field.cqe_next->field.cqe_prev = \
+ (elm)->field.cqe_prev; \
+ if ((elm)->field.cqe_prev == CIRCLEQ_END(head)) \
+ (head)->cqh_first = (elm)->field.cqe_next; \
+ else \
+ (elm)->field.cqe_prev->field.cqe_next = \
+ (elm)->field.cqe_next; \
+ _Q_INVALIDATE((elm)->field.cqe_prev); \
+ _Q_INVALIDATE((elm)->field.cqe_next); \
+} while (0)
+
+#define CIRCLEQ_REPLACE(head, elm, elm2, field) do { \
+ if (((elm2)->field.cqe_next = (elm)->field.cqe_next) == \
+ CIRCLEQ_END(head)) \
+ (head).cqh_last = (elm2); \
+ else \
+ (elm2)->field.cqe_next->field.cqe_prev = (elm2); \
+ if (((elm2)->field.cqe_prev = (elm)->field.cqe_prev) == \
+ CIRCLEQ_END(head)) \
+ (head).cqh_first = (elm2); \
+ else \
+ (elm2)->field.cqe_prev->field.cqe_next = (elm2); \
+ _Q_INVALIDATE((elm)->field.cqe_prev); \
+ _Q_INVALIDATE((elm)->field.cqe_next); \
+} while (0)
+
+#endif /* !_SYS_QUEUE_H_ */