]> rtime.felk.cvut.cz Git - can-eth-gw-linux.git/blobdiff - kernel/sched/core.c
Merge tag 'kvm-3.8-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[can-eth-gw-linux.git] / kernel / sched / core.c
index 80f80dfca70e17953fb3809e17a4686c51f63562..0533496b6228cfde3afe20051006683da12e0ca5 100644 (file)
@@ -923,6 +923,13 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
                rq->skip_clock_update = 1;
 }
 
+static ATOMIC_NOTIFIER_HEAD(task_migration_notifier);
+
+void register_task_migration_notifier(struct notifier_block *n)
+{
+       atomic_notifier_chain_register(&task_migration_notifier, n);
+}
+
 #ifdef CONFIG_SMP
 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
 {
@@ -953,8 +960,18 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
        trace_sched_migrate_task(p, new_cpu);
 
        if (task_cpu(p) != new_cpu) {
+               struct task_migration_notifier tmn;
+
+               if (p->sched_class->migrate_task_rq)
+                       p->sched_class->migrate_task_rq(p, new_cpu);
                p->se.nr_migrations++;
                perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
+
+               tmn.task = p;
+               tmn.from_cpu = task_cpu(p);
+               tmn.to_cpu = new_cpu;
+
+               atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn);
        }
 
        __set_task_cpu(p, new_cpu);
@@ -1525,6 +1542,15 @@ static void __sched_fork(struct task_struct *p)
        p->se.vruntime                  = 0;
        INIT_LIST_HEAD(&p->se.group_node);
 
+/*
+ * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
+ * removed when useful for applications beyond shares distribution (e.g.
+ * load-balance).
+ */
+#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
+       p->se.avg.runnable_avg_period = 0;
+       p->se.avg.runnable_avg_sum = 0;
+#endif
 #ifdef CONFIG_SCHEDSTATS
        memset(&p->se.statistics, 0, sizeof(p->se.statistics));
 #endif
@@ -7473,7 +7499,7 @@ static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
                            struct task_group, css);
 }
 
-static struct cgroup_subsys_state *cpu_cgroup_create(struct cgroup *cgrp)
+static struct cgroup_subsys_state *cpu_cgroup_css_alloc(struct cgroup *cgrp)
 {
        struct task_group *tg, *parent;
 
@@ -7490,7 +7516,7 @@ static struct cgroup_subsys_state *cpu_cgroup_create(struct cgroup *cgrp)
        return &tg->css;
 }
 
-static void cpu_cgroup_destroy(struct cgroup *cgrp)
+static void cpu_cgroup_css_free(struct cgroup *cgrp)
 {
        struct task_group *tg = cgroup_tg(cgrp);
 
@@ -7850,8 +7876,8 @@ static struct cftype cpu_files[] = {
 
 struct cgroup_subsys cpu_cgroup_subsys = {
        .name           = "cpu",
-       .create         = cpu_cgroup_create,
-       .destroy        = cpu_cgroup_destroy,
+       .css_alloc      = cpu_cgroup_css_alloc,
+       .css_free       = cpu_cgroup_css_free,
        .can_attach     = cpu_cgroup_can_attach,
        .attach         = cpu_cgroup_attach,
        .exit           = cpu_cgroup_exit,
@@ -7874,7 +7900,7 @@ struct cgroup_subsys cpu_cgroup_subsys = {
 struct cpuacct root_cpuacct;
 
 /* create a new cpu accounting group */
-static struct cgroup_subsys_state *cpuacct_create(struct cgroup *cgrp)
+static struct cgroup_subsys_state *cpuacct_css_alloc(struct cgroup *cgrp)
 {
        struct cpuacct *ca;
 
@@ -7904,7 +7930,7 @@ out:
 }
 
 /* destroy an existing cpu accounting group */
-static void cpuacct_destroy(struct cgroup *cgrp)
+static void cpuacct_css_free(struct cgroup *cgrp)
 {
        struct cpuacct *ca = cgroup_ca(cgrp);
 
@@ -8075,8 +8101,8 @@ void cpuacct_charge(struct task_struct *tsk, u64 cputime)
 
 struct cgroup_subsys cpuacct_subsys = {
        .name = "cpuacct",
-       .create = cpuacct_create,
-       .destroy = cpuacct_destroy,
+       .css_alloc = cpuacct_css_alloc,
+       .css_free = cpuacct_css_free,
        .subsys_id = cpuacct_subsys_id,
        .base_cftypes = files,
 };