]> rtime.felk.cvut.cz Git - can-eth-gw-linux.git/blobdiff - kernel/sched/core.c
Merge tag 'kvm-3.8-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[can-eth-gw-linux.git] / kernel / sched / core.c
index 00898f1fb69e9d83b9dda63a866d98f1d0746b6c..0533496b6228cfde3afe20051006683da12e0ca5 100644 (file)
@@ -72,6 +72,7 @@
 #include <linux/slab.h>
 #include <linux/init_task.h>
 #include <linux/binfmts.h>
+#include <linux/context_tracking.h>
 
 #include <asm/switch_to.h>
 #include <asm/tlb.h>
@@ -922,6 +923,13 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
                rq->skip_clock_update = 1;
 }
 
+static ATOMIC_NOTIFIER_HEAD(task_migration_notifier);
+
+void register_task_migration_notifier(struct notifier_block *n)
+{
+       atomic_notifier_chain_register(&task_migration_notifier, n);
+}
+
 #ifdef CONFIG_SMP
 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
 {
@@ -952,8 +960,18 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
        trace_sched_migrate_task(p, new_cpu);
 
        if (task_cpu(p) != new_cpu) {
+               struct task_migration_notifier tmn;
+
+               if (p->sched_class->migrate_task_rq)
+                       p->sched_class->migrate_task_rq(p, new_cpu);
                p->se.nr_migrations++;
                perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
+
+               tmn.task = p;
+               tmn.from_cpu = task_cpu(p);
+               tmn.to_cpu = new_cpu;
+
+               atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn);
        }
 
        __set_task_cpu(p, new_cpu);
@@ -1524,7 +1542,12 @@ static void __sched_fork(struct task_struct *p)
        p->se.vruntime                  = 0;
        INIT_LIST_HEAD(&p->se.group_node);
 
-#ifdef CONFIG_SMP
+/*
+ * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
+ * removed when useful for applications beyond shares distribution (e.g.
+ * load-balance).
+ */
+#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
        p->se.avg.runnable_avg_period = 0;
        p->se.avg.runnable_avg_sum = 0;
 #endif
@@ -1890,8 +1913,8 @@ context_switch(struct rq *rq, struct task_struct *prev,
        spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
 #endif
 
+       context_tracking_task_switch(prev, next);
        /* Here we just switch the register state and the stack. */
-       rcu_switch(prev, next);
        switch_to(prev, next, prev);
 
        barrier();
@@ -2915,7 +2938,7 @@ asmlinkage void __sched schedule(void)
 }
 EXPORT_SYMBOL(schedule);
 
-#ifdef CONFIG_RCU_USER_QS
+#ifdef CONFIG_CONTEXT_TRACKING
 asmlinkage void __sched schedule_user(void)
 {
        /*
@@ -2924,9 +2947,9 @@ asmlinkage void __sched schedule_user(void)
         * we haven't yet exited the RCU idle mode. Do it here manually until
         * we find a better solution.
         */
-       rcu_user_exit();
+       user_exit();
        schedule();
-       rcu_user_enter();
+       user_enter();
 }
 #endif
 
@@ -3031,7 +3054,7 @@ asmlinkage void __sched preempt_schedule_irq(void)
        /* Catch callers which need to be fixed */
        BUG_ON(ti->preempt_count || !irqs_disabled());
 
-       rcu_user_exit();
+       user_exit();
        do {
                add_preempt_count(PREEMPT_ACTIVE);
                local_irq_enable();
@@ -4478,6 +4501,7 @@ static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
 void sched_show_task(struct task_struct *p)
 {
        unsigned long free = 0;
+       int ppid;
        unsigned state;
 
        state = p->state ? __ffs(p->state) + 1 : 0;
@@ -4497,8 +4521,11 @@ void sched_show_task(struct task_struct *p)
 #ifdef CONFIG_DEBUG_STACK_USAGE
        free = stack_not_used(p);
 #endif
+       rcu_read_lock();
+       ppid = task_pid_nr(rcu_dereference(p->real_parent));
+       rcu_read_unlock();
        printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
-               task_pid_nr(p), task_pid_nr(rcu_dereference(p->real_parent)),
+               task_pid_nr(p), ppid,
                (unsigned long)task_thread_info(p)->flags);
 
        show_stack(p, NULL);
@@ -7472,7 +7499,7 @@ static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
                            struct task_group, css);
 }
 
-static struct cgroup_subsys_state *cpu_cgroup_create(struct cgroup *cgrp)
+static struct cgroup_subsys_state *cpu_cgroup_css_alloc(struct cgroup *cgrp)
 {
        struct task_group *tg, *parent;
 
@@ -7489,7 +7516,7 @@ static struct cgroup_subsys_state *cpu_cgroup_create(struct cgroup *cgrp)
        return &tg->css;
 }
 
-static void cpu_cgroup_destroy(struct cgroup *cgrp)
+static void cpu_cgroup_css_free(struct cgroup *cgrp)
 {
        struct task_group *tg = cgroup_tg(cgrp);
 
@@ -7849,8 +7876,8 @@ static struct cftype cpu_files[] = {
 
 struct cgroup_subsys cpu_cgroup_subsys = {
        .name           = "cpu",
-       .create         = cpu_cgroup_create,
-       .destroy        = cpu_cgroup_destroy,
+       .css_alloc      = cpu_cgroup_css_alloc,
+       .css_free       = cpu_cgroup_css_free,
        .can_attach     = cpu_cgroup_can_attach,
        .attach         = cpu_cgroup_attach,
        .exit           = cpu_cgroup_exit,
@@ -7873,7 +7900,7 @@ struct cgroup_subsys cpu_cgroup_subsys = {
 struct cpuacct root_cpuacct;
 
 /* create a new cpu accounting group */
-static struct cgroup_subsys_state *cpuacct_create(struct cgroup *cgrp)
+static struct cgroup_subsys_state *cpuacct_css_alloc(struct cgroup *cgrp)
 {
        struct cpuacct *ca;
 
@@ -7903,7 +7930,7 @@ out:
 }
 
 /* destroy an existing cpu accounting group */
-static void cpuacct_destroy(struct cgroup *cgrp)
+static void cpuacct_css_free(struct cgroup *cgrp)
 {
        struct cpuacct *ca = cgroup_ca(cgrp);
 
@@ -8074,9 +8101,15 @@ void cpuacct_charge(struct task_struct *tsk, u64 cputime)
 
 struct cgroup_subsys cpuacct_subsys = {
        .name = "cpuacct",
-       .create = cpuacct_create,
-       .destroy = cpuacct_destroy,
+       .css_alloc = cpuacct_css_alloc,
+       .css_free = cpuacct_css_free,
        .subsys_id = cpuacct_subsys_id,
        .base_cftypes = files,
 };
 #endif /* CONFIG_CGROUP_CPUACCT */
+
+void dump_cpu_task(int cpu)
+{
+       pr_info("Task dump for CPU %d:\n", cpu);
+       sched_show_task(cpu_curr(cpu));
+}