summaryrefslogtreecommitdiffstats
path: root/usr.sbin/smtpd/scheduler_ramqueue.c
diff options
context:
space:
mode:
authorgilles <gilles@openbsd.org>2012-01-31 21:05:26 +0000
committergilles <gilles@openbsd.org>2012-01-31 21:05:26 +0000
commitc6369e99a8383d98678892f00c656135ef4a49dc (patch)
tree6cd87f8a0d93b43a06dcc40ad6bc5007b16807b9 /usr.sbin/smtpd/scheduler_ramqueue.c
parentredirect stdout to /dev/null instead of closing it (diff)
downloadwireguard-openbsd-c6369e99a8383d98678892f00c656135ef4a49dc.tar.xz
wireguard-openbsd-c6369e99a8383d98678892f00c656135ef4a49dc.zip
fix an issue observed this week-end while flooding ajacoutot@ :
we keep track of available fd's to prevent scheduling of messages if we know that we are going to fail. however, since the envelope is not removed from the scheduler, it will be rescheduled right away leading to a busy loop in the scheduler. we know flag the mda/mta processes as BUSY and do not schedule envelopes that target a BUSY process. also, fix a potential bug that could lead to a use after free when doing a batch/message/host traversal of schedulable envelopes. while at it fix misuse of env->sc_opts as env->sc_flags, was not really causing any issue as the misuse was constant ...
Diffstat (limited to 'usr.sbin/smtpd/scheduler_ramqueue.c')
-rw-r--r--usr.sbin/smtpd/scheduler_ramqueue.c40
1 files changed, 32 insertions, 8 deletions
diff --git a/usr.sbin/smtpd/scheduler_ramqueue.c b/usr.sbin/smtpd/scheduler_ramqueue.c
index aeaeae3b954..c622ae4c576 100644
--- a/usr.sbin/smtpd/scheduler_ramqueue.c
+++ b/usr.sbin/smtpd/scheduler_ramqueue.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: scheduler_ramqueue.c,v 1.3 2012/01/30 10:02:55 chl Exp $ */
+/* $OpenBSD: scheduler_ramqueue.c,v 1.4 2012/01/31 21:05:26 gilles Exp $ */
/*
* Copyright (c) 2012 Gilles Chehade <gilles@openbsd.org>
@@ -118,7 +118,7 @@ static void scheduler_ramqueue_init(void);
static int scheduler_ramqueue_setup(time_t, time_t);
static int scheduler_ramqueue_next(u_int64_t *, time_t *);
static void scheduler_ramqueue_insert(struct envelope *);
-static void scheduler_ramqueue_remove(u_int64_t);
+static void scheduler_ramqueue_remove(void *, u_int64_t);
static void *scheduler_ramqueue_host(char *);
static void *scheduler_ramqueue_message(u_int32_t);
static void *scheduler_ramqueue_batch(u_int64_t);
@@ -263,10 +263,10 @@ scheduler_ramqueue_next(u_int64_t *evpid, time_t *sched)
log_debug("scheduler_ramqueue: next");
TAILQ_FOREACH(rq_evp, &ramqueue.queue, queue_entry) {
if (rq_evp->rq_batch->type == D_MDA)
- if (env->sc_opts & SMTPD_MDA_PAUSED)
+ if (env->sc_flags & (SMTPD_MDA_PAUSED|SMTPD_MDA_BUSY))
continue;
if (rq_evp->rq_batch->type == D_MTA)
- if (env->sc_opts & SMTPD_MTA_PAUSED)
+ if (env->sc_flags & (SMTPD_MTA_PAUSED|SMTPD_MTA_BUSY))
continue;
if (evpid)
*evpid = rq_evp->evpid;
@@ -331,8 +331,9 @@ scheduler_ramqueue_insert(struct envelope *envelope)
}
static void
-scheduler_ramqueue_remove(u_int64_t evpid)
+scheduler_ramqueue_remove(void *hdl, u_int64_t evpid)
{
+ struct ramqueue_iter *iter = hdl;
struct ramqueue_batch *rq_batch;
struct ramqueue_message *rq_msg;
struct ramqueue_envelope *rq_evp;
@@ -349,17 +350,33 @@ scheduler_ramqueue_remove(u_int64_t evpid)
TAILQ_REMOVE(&ramqueue.queue, rq_evp, queue_entry);
stat_decrement(STATS_RAMQUEUE_ENVELOPE);
+
/* check if we are the last of a batch */
- if (TAILQ_FIRST(&rq_batch->envelope_queue) == NULL)
+ if (TAILQ_FIRST(&rq_batch->envelope_queue) == NULL) {
ramqueue_remove_batch(rq_host, rq_batch);
+ if (iter != NULL && iter->type == RAMQUEUE_ITER_BATCH) {
+ log_debug("scheduler_ramqueue_remove: batch removed");
+ iter->u.batch = NULL;
+ }
+ }
/* check if we are the last of a message */
- if (RB_ROOT(&rq_msg->evptree) == NULL)
+ if (RB_ROOT(&rq_msg->evptree) == NULL) {
ramqueue_remove_message(rq_msg);
+ if (iter != NULL && iter->type == RAMQUEUE_ITER_MESSAGE) {
+ log_debug("scheduler_ramqueue_remove: message removed");
+ iter->u.message = NULL;
+ }
+ }
/* check if we are the last of a host */
- if (TAILQ_FIRST(&rq_host->batch_queue) == NULL)
+ if (TAILQ_FIRST(&rq_host->batch_queue) == NULL) {
ramqueue_remove_host(rq_host);
+ if (iter != NULL && iter->type == RAMQUEUE_ITER_HOST) {
+ log_debug("scheduler_ramqueue_remove: host removed");
+ iter->u.host = NULL;
+ }
+ }
free(rq_evp);
}
@@ -422,6 +439,7 @@ scheduler_ramqueue_message(u_int32_t msgid)
iter->u.message = rq_msg;
return iter;
+
}
static void *
@@ -453,6 +471,8 @@ scheduler_ramqueue_fetch(void *hdl, u_int64_t *evpid)
switch (iter->type) {
case RAMQUEUE_ITER_HOST: {
+ if (iter->u.host == NULL)
+ return 0;
rq_batch = TAILQ_FIRST(&iter->u.host->batch_queue);
if (rq_batch == NULL)
break;
@@ -464,6 +484,8 @@ scheduler_ramqueue_fetch(void *hdl, u_int64_t *evpid)
}
case RAMQUEUE_ITER_BATCH:
+ if (iter->u.batch == NULL)
+ return 0;
rq_evp = TAILQ_FIRST(&iter->u.batch->envelope_queue);
if (rq_evp == NULL)
break;
@@ -471,6 +493,8 @@ scheduler_ramqueue_fetch(void *hdl, u_int64_t *evpid)
return 1;
case RAMQUEUE_ITER_MESSAGE:
+ if (iter->u.message == NULL)
+ return 0;
rq_evp = RB_ROOT(&iter->u.message->evptree);
if (rq_evp == NULL)
break;