aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-cgroup.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-04-01 14:38:44 -0700
committerTejun Heo <tj@kernel.org>2012-04-01 14:38:44 -0700
commit629ed0b10209ffc4e1d439e5508d52d5e3a090b8 (patch)
tree69caa493e4ea0714aff247c8415c4fef7ebfe996 /block/blk-cgroup.c
parentcfq: collapse cfq.h into cfq-iosched.c (diff)
downloadlinux-dev-629ed0b10209ffc4e1d439e5508d52d5e3a090b8.tar.xz
linux-dev-629ed0b10209ffc4e1d439e5508d52d5e3a090b8.zip
blkcg: move statistics update code to policies
As with conf/stats file handling code, there's no reason for stat update code to live in blkcg core with policies calling into update them. The current organization is both inflexible and complex. This patch moves stat update code to specific policies. All blkiocg_update_*_stats() functions which deal with BLKIO_POLICY_PROP stats are collapsed into their cfq_blkiocg_update_*_stats() counterparts. blkiocg_update_dispatch_stats() is used by both policies and duplicated as throtl_update_dispatch_stats() and cfq_blkiocg_update_dispatch_stats(). This will be cleaned up later. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r--block/blk-cgroup.c245
1 files changed, 0 insertions, 245 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index b963fb4b3995..821a0a393e85 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -63,251 +63,6 @@ struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio)
}
EXPORT_SYMBOL_GPL(bio_blkio_cgroup);
-#ifdef CONFIG_DEBUG_BLK_CGROUP
-/* This should be called with the queue_lock held. */
-static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
- struct blkio_policy_type *pol,
- struct blkio_group *curr_blkg)
-{
- struct blkg_policy_data *pd = blkg->pd[pol->plid];
-
- if (blkio_blkg_waiting(&pd->stats))
- return;
- if (blkg == curr_blkg)
- return;
- pd->stats.start_group_wait_time = sched_clock();
- blkio_mark_blkg_waiting(&pd->stats);
-}
-
-/* This should be called with the queue_lock held. */
-static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
-{
- unsigned long long now;
-
- if (!blkio_blkg_waiting(stats))
- return;
-
- now = sched_clock();
- if (time_after64(now, stats->start_group_wait_time))
- blkg_stat_add(&stats->group_wait_time,
- now - stats->start_group_wait_time);
- blkio_clear_blkg_waiting(stats);
-}
-
-/* This should be called with the queue_lock held. */
-static void blkio_end_empty_time(struct blkio_group_stats *stats)
-{
- unsigned long long now;
-
- if (!blkio_blkg_empty(stats))
- return;
-
- now = sched_clock();
- if (time_after64(now, stats->start_empty_time))
- blkg_stat_add(&stats->empty_time,
- now - stats->start_empty_time);
- blkio_clear_blkg_empty(stats);
-}
-
-void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
- struct blkio_policy_type *pol)
-{
- struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
-
- lockdep_assert_held(blkg->q->queue_lock);
- BUG_ON(blkio_blkg_idling(stats));
-
- stats->start_idle_time = sched_clock();
- blkio_mark_blkg_idling(stats);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
-
-void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
- struct blkio_policy_type *pol)
-{
- struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
-
- lockdep_assert_held(blkg->q->queue_lock);
-
- if (blkio_blkg_idling(stats)) {
- unsigned long long now = sched_clock();
-
- if (time_after64(now, stats->start_idle_time))
- blkg_stat_add(&stats->idle_time,
- now - stats->start_idle_time);
- blkio_clear_blkg_idling(stats);
- }
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
-
-void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
- struct blkio_policy_type *pol)
-{
- struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
-
- lockdep_assert_held(blkg->q->queue_lock);
-
- blkg_stat_add(&stats->avg_queue_size_sum,
- blkg_rwstat_sum(&stats->queued));
- blkg_stat_add(&stats->avg_queue_size_samples, 1);
- blkio_update_group_wait_time(stats);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
-
-void blkiocg_set_start_empty_time(struct blkio_group *blkg,
- struct blkio_policy_type *pol)
-{
- struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
-
- lockdep_assert_held(blkg->q->queue_lock);
-
- if (blkg_rwstat_sum(&stats->queued))
- return;
-
- /*
- * group is already marked empty. This can happen if cfqq got new
- * request in parent group and moved to this group while being added
- * to service tree. Just ignore the event and move on.
- */
- if (blkio_blkg_empty(stats))
- return;
-
- stats->start_empty_time = sched_clock();
- blkio_mark_blkg_empty(stats);
-}
-EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
-
-void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
- struct blkio_policy_type *pol,
- unsigned long dequeue)
-{
- struct blkg_policy_data *pd = blkg->pd[pol->plid];
-
- lockdep_assert_held(blkg->q->queue_lock);
-
- blkg_stat_add(&pd->stats.dequeue, dequeue);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
-#else
-static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
- struct blkio_policy_type *pol,
- struct blkio_group *curr_blkg) { }
-static inline void blkio_end_empty_time(struct blkio_group_stats *stats) { }
-#endif
-
-void blkiocg_update_io_add_stats(struct blkio_group *blkg,
- struct blkio_policy_type *pol,
- struct blkio_group *curr_blkg, bool direction,
- bool sync)
-{
- struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
- int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
-
- lockdep_assert_held(blkg->q->queue_lock);
-
- blkg_rwstat_add(&stats->queued, rw, 1);
- blkio_end_empty_time(stats);
- blkio_set_start_group_wait_time(blkg, pol, curr_blkg);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
-
-void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
- struct blkio_policy_type *pol,
- bool direction, bool sync)
-{
- struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
- int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
-
- lockdep_assert_held(blkg->q->queue_lock);
-
- blkg_rwstat_add(&stats->queued, rw, -1);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
-
-void blkiocg_update_timeslice_used(struct blkio_group *blkg,
- struct blkio_policy_type *pol,
- unsigned long time,
- unsigned long unaccounted_time)
-{
- struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
-
- lockdep_assert_held(blkg->q->queue_lock);
-
- blkg_stat_add(&stats->time, time);
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- blkg_stat_add(&stats->unaccounted_time, unaccounted_time);
-#endif
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
-
-/*
- * should be called under rcu read lock or queue lock to make sure blkg pointer
- * is valid.
- */
-void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
- struct blkio_policy_type *pol,
- uint64_t bytes, bool direction, bool sync)
-{
- int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
- struct blkg_policy_data *pd = blkg->pd[pol->plid];
- struct blkio_group_stats_cpu *stats_cpu;
- unsigned long flags;
-
- /* If per cpu stats are not allocated yet, don't do any accounting. */
- if (pd->stats_cpu == NULL)
- return;
-
- /*
- * Disabling interrupts to provide mutual exclusion between two
- * writes on same cpu. It probably is not needed for 64bit. Not
- * optimizing that case yet.
- */
- local_irq_save(flags);
-
- stats_cpu = this_cpu_ptr(pd->stats_cpu);
-
- blkg_stat_add(&stats_cpu->sectors, bytes >> 9);
- blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
- blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
-
- local_irq_restore(flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
-
-void blkiocg_update_completion_stats(struct blkio_group *blkg,
- struct blkio_policy_type *pol,
- uint64_t start_time,
- uint64_t io_start_time, bool direction,
- bool sync)
-{
- struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
- unsigned long long now = sched_clock();
- int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
-
- lockdep_assert_held(blkg->q->queue_lock);
-
- if (time_after64(now, io_start_time))
- blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
- if (time_after64(io_start_time, start_time))
- blkg_rwstat_add(&stats->wait_time, rw,
- io_start_time - start_time);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
-
-/* Merged stats are per cpu. */
-void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
- struct blkio_policy_type *pol,
- bool direction, bool sync)
-{
- struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
- int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
-
- lockdep_assert_held(blkg->q->queue_lock);
-
- blkg_rwstat_add(&stats->merged, rw, 1);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
-
/*
* Worker for allocating per cpu stat for blk groups. This is scheduled on
* the system_nrt_wq once there are some groups on the alloc_list waiting