aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNeil Brown <neilb@suse.de>2007-02-08 14:20:37 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-09 09:25:47 -0800
commitda6e1a32fb8d7539a27f699c8671f64d7fefd0cc (patch)
tree8faa7713ad1333b30d8ba668627251dbc1e0b4ad
parent[PATCH] knfsd: fix a race in closing NFSd connections (diff)
downloadlinux-dev-da6e1a32fb8d7539a27f699c8671f64d7fefd0cc.tar.xz
linux-dev-da6e1a32fb8d7539a27f699c8671f64d7fefd0cc.zip
[PATCH] md: avoid possible BUG_ON in md bitmap handling
md/bitmap tracks how many active write requests are pending on blocks associated with each bit in the bitmap, so that it knows when it can clear the bit (when count hits zero). The counter has 14 bits of space, so if there are ever more than 16383, we cannot cope. Currently the code just calles BUG_ON as "all" drivers have request queue limits much smaller than this. However is seems that some don't. Apparently some multipath configurations can allow more than 16383 concurrent write requests. So, in this unlikely situation, instead of calling BUG_ON we now wait for the count to drop down a bit. This requires a new wait_queue_head, some waiting code, and a wakeup call. Tested by limiting the counter to 20 instead of 16383 (writes go a lot slower in that case...). Signed-off-by: Neil Brown <neilb@suse.de> Cc: <stable@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--drivers/md/bitmap.c22
-rw-r--r--include/linux/raid/bitmap.h1
2 files changed, 22 insertions, 1 deletions
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 11108165e264..059704fbb753 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1160,6 +1160,22 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
return 0;
}
+ if (unlikely((*bmc & COUNTER_MAX) == COUNTER_MAX)) {
+ DEFINE_WAIT(__wait);
+ /* note that it is safe to do the prepare_to_wait
+ * after the test as long as we do it before dropping
+ * the spinlock.
+ */
+ prepare_to_wait(&bitmap->overflow_wait, &__wait,
+ TASK_UNINTERRUPTIBLE);
+ spin_unlock_irq(&bitmap->lock);
+ bitmap->mddev->queue
+ ->unplug_fn(bitmap->mddev->queue);
+ schedule();
+ finish_wait(&bitmap->overflow_wait, &__wait);
+ continue;
+ }
+
switch(*bmc) {
case 0:
bitmap_file_set_bit(bitmap, offset);
@@ -1169,7 +1185,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
case 1:
*bmc = 2;
}
- BUG_ON((*bmc & COUNTER_MAX) == COUNTER_MAX);
+
(*bmc)++;
spin_unlock_irq(&bitmap->lock);
@@ -1207,6 +1223,9 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
if (!success && ! (*bmc & NEEDED_MASK))
*bmc |= NEEDED_MASK;
+ if ((*bmc & COUNTER_MAX) == COUNTER_MAX)
+ wake_up(&bitmap->overflow_wait);
+
(*bmc)--;
if (*bmc <= 2) {
set_page_attr(bitmap,
@@ -1431,6 +1450,7 @@ int bitmap_create(mddev_t *mddev)
spin_lock_init(&bitmap->lock);
atomic_set(&bitmap->pending_writes, 0);
init_waitqueue_head(&bitmap->write_wait);
+ init_waitqueue_head(&bitmap->overflow_wait);
bitmap->mddev = mddev;
diff --git a/include/linux/raid/bitmap.h b/include/linux/raid/bitmap.h
index ebd42a3710b4..6db9a4c15355 100644
--- a/include/linux/raid/bitmap.h
+++ b/include/linux/raid/bitmap.h
@@ -247,6 +247,7 @@ struct bitmap {
atomic_t pending_writes; /* pending writes to the bitmap file */
wait_queue_head_t write_wait;
+ wait_queue_head_t overflow_wait;
};