diff options
| author | 2014-03-12 11:40:15 -0700 | |
|---|---|---|
| committer | 2014-03-12 11:40:15 -0700 | |
| commit | 192c028b6ac972df25fd624f94a94d038fbdb66c (patch) | |
| tree | 4dd9d13ffd239e4d7c61401f892989742c671fa8 /kernel/sched/deadline.c | |
| parent | Merge tag 'for_3.15' of git://git.kernel.org/pub/scm/linux/kernel/git/kishon/linux-phy into usb-next (diff) | |
| parent | Linux 3.14-rc6 (diff) | |
| download | linux-dev-192c028b6ac972df25fd624f94a94d038fbdb66c.tar.xz linux-dev-192c028b6ac972df25fd624f94a94d038fbdb66c.zip | |
Merge 3.14-rc6 into usb-next
We want the USB fixes in here as well.
Diffstat (limited to 'kernel/sched/deadline.c')
| -rw-r--r-- | kernel/sched/deadline.c | 10 |
1 files changed, 6 insertions, 4 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 15cbc17fbf84..6e79b3faa4cd 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -135,7 +135,6 @@ static void update_dl_migration(struct dl_rq *dl_rq) static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) { struct task_struct *p = dl_task_of(dl_se); - dl_rq = &rq_of_dl_rq(dl_rq)->dl; if (p->nr_cpus_allowed > 1) dl_rq->dl_nr_migratory++; @@ -146,7 +145,6 @@ static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) { struct task_struct *p = dl_task_of(dl_se); - dl_rq = &rq_of_dl_rq(dl_rq)->dl; if (p->nr_cpus_allowed > 1) dl_rq->dl_nr_migratory--; @@ -564,6 +562,8 @@ int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se) return 1; } +extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); + /* * Update the current task's runtime statistics (provided it is still * a -deadline task and has not been removed from the dl_rq). @@ -627,11 +627,13 @@ static void update_curr_dl(struct rq *rq) struct rt_rq *rt_rq = &rq->rt; raw_spin_lock(&rt_rq->rt_runtime_lock); - rt_rq->rt_time += delta_exec; /* * We'll let actual RT tasks worry about the overflow here, we - * have our own CBS to keep us inline -- see above. + * have our own CBS to keep us inline; only account when RT + * bandwidth is relevant. */ + if (sched_rt_bandwidth_account(rt_rq)) + rt_rq->rt_time += delta_exec; raw_spin_unlock(&rt_rq->rt_runtime_lock); } } |
