aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2009-03-17 09:38:40 +0100
committerJens Axboe <jens.axboe@oracle.com>2009-03-26 11:01:36 +0100
commita2a9537ac0b37a5da6fbe7e1e9cb06c524d2a9c4 (patch)
tree798376ab3cb0fdbea5fc067562e2adfabdb4d9d6
parentbtrfs: get rid of current_is_pdflush() in btrfs_btree_balance_dirty (diff)
downloadlinux-dev-a2a9537ac0b37a5da6fbe7e1e9cb06c524d2a9c4.tar.xz
linux-dev-a2a9537ac0b37a5da6fbe7e1e9cb06c524d2a9c4.zip
Get rid of pdflush_operation() in emergency sync and remount
Opencode a cheasy approach with kevent. The idea here is that we'll add some generic delayed work infrastructure, which probably wont be based on pdflush (or maybe it will, in which case we can just add it back). This is in preparation for getting rid of pdflush completely. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r--fs/super.c11
-rw-r--r--fs/sync.c14
2 files changed, 22 insertions, 3 deletions
diff --git a/fs/super.c b/fs/super.c
index 6ce501447ada..dd4acb158b5e 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -674,7 +674,7 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
return 0;
}
-static void do_emergency_remount(unsigned long foo)
+static void do_emergency_remount(struct work_struct *work)
{
struct super_block *sb;
@@ -697,12 +697,19 @@ static void do_emergency_remount(unsigned long foo)
spin_lock(&sb_lock);
}
spin_unlock(&sb_lock);
+ kfree(work);
printk("Emergency Remount complete\n");
}
void emergency_remount(void)
{
- pdflush_operation(do_emergency_remount, 0);
+ struct work_struct *work;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (work) {
+ INIT_WORK(work, do_emergency_remount);
+ schedule_work(work);
+ }
}
/*
diff --git a/fs/sync.c b/fs/sync.c
index a16d53e5fe9d..ec95a69d17aa 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -42,9 +42,21 @@ SYSCALL_DEFINE0(sync)
return 0;
}
+static void do_sync_work(struct work_struct *work)
+{
+ do_sync(0);
+ kfree(work);
+}
+
void emergency_sync(void)
{
- pdflush_operation(do_sync, 0);
+ struct work_struct *work;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (work) {
+ INIT_WORK(work, do_sync_work);
+ schedule_work(work);
+ }
}
/*