aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2006-03-24 03:18:10 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-24 07:33:26 -0800
commitfa5a734e406b53761fcc5ee22366006f71112c2d (patch)
tree003a238b9207e38f747bfb119a30fb52f1cd5ae9
parent[PATCH] HOTPLUG_CPU: avoid hitting too many cachelines in recalc_bh_state() (diff)
downloadlinux-dev-fa5a734e406b53761fcc5ee22366006f71112c2d.tar.xz
linux-dev-fa5a734e406b53761fcc5ee22366006f71112c2d.zip
[PATCH] balance_dirty_pages_ratelimited: take nr_pages arg
Modify balance_dirty_pages_ratelimited() so that it can take a number-of-pages-which-I-just-dirtied argument. For msync(). Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/linux/writeback.h10
-rw-r--r--mm/page-writeback.c24
2 files changed, 24 insertions, 10 deletions
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 609565961494..56f92fcbe94a 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -99,7 +99,15 @@ int dirty_writeback_centisecs_handler(struct ctl_table *, int, struct file *,
void __user *, size_t *, loff_t *);
void page_writeback_init(void);
-void balance_dirty_pages_ratelimited(struct address_space *mapping);
+void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
+ unsigned long nr_pages_dirtied);
+
+static inline void
+balance_dirty_pages_ratelimited(struct address_space *mapping)
+{
+ balance_dirty_pages_ratelimited_nr(mapping, 1);
+}
+
int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0);
int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
int sync_page_range(struct inode *inode, struct address_space *mapping,
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index c1052ee79f01..c67ddc464721 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -256,8 +256,9 @@ static void balance_dirty_pages(struct address_space *mapping)
}
/**
- * balance_dirty_pages_ratelimited - balance dirty memory state
+ * balance_dirty_pages_ratelimited_nr - balance dirty memory state
* @mapping: address_space which was dirtied
+ * @nr_pages: number of pages which the caller has just dirtied
*
* Processes which are dirtying memory should call in here once for each page
* which was newly dirtied. The function will periodically check the system's
@@ -268,10 +269,12 @@ static void balance_dirty_pages(struct address_space *mapping)
* limit we decrease the ratelimiting by a lot, to prevent individual processes
* from overshooting the limit by (ratelimit_pages) each.
*/
-void balance_dirty_pages_ratelimited(struct address_space *mapping)
+void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
+ unsigned long nr_pages_dirtied)
{
- static DEFINE_PER_CPU(int, ratelimits) = 0;
- long ratelimit;
+ static DEFINE_PER_CPU(unsigned long, ratelimits) = 0;
+ unsigned long ratelimit;
+ unsigned long *p;
ratelimit = ratelimit_pages;
if (dirty_exceeded)
@@ -281,15 +284,18 @@ void balance_dirty_pages_ratelimited(struct address_space *mapping)
* Check the rate limiting. Also, we do not want to throttle real-time
* tasks in balance_dirty_pages(). Period.
*/
- if (get_cpu_var(ratelimits)++ >= ratelimit) {
- __get_cpu_var(ratelimits) = 0;
- put_cpu_var(ratelimits);
+ preempt_disable();
+ p = &__get_cpu_var(ratelimits);
+ *p += nr_pages_dirtied;
+ if (unlikely(*p >= ratelimit)) {
+ *p = 0;
+ preempt_enable();
balance_dirty_pages(mapping);
return;
}
- put_cpu_var(ratelimits);
+ preempt_enable();
}
-EXPORT_SYMBOL(balance_dirty_pages_ratelimited);
+EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
void throttle_vm_writeout(void)
{