2 #include "per_cpu_data.h"
4 EXTENSION class Sched_context
7 class Ready_queue : public Ready_queue_base
10 void set_current_sched(Sched_context *sched);
11 void invalidate_sched() { activate(0); }
12 bool deblock(Sched_context *sc, Sched_context *crs, bool lazy_q = false);
13 void deblock(Sched_context *sc);
14 void ready_enqueue(Sched_context *sc)
16 assert_kdb(cpu_lock.test());
18 // Don't enqueue threads which are already enqueued
19 if (EXPECT_FALSE (sc->in_ready_list()))
22 enqueue(sc, sc == current_sched());
25 void ready_dequeue(Sched_context *sc)
27 assert_kdb (cpu_lock.test());
29 // Don't dequeue threads which aren't enqueued
30 if (EXPECT_FALSE (!sc->in_ready_list()))
36 void switch_sched(Sched_context *from, Sched_context *to)
38 assert_kdb (cpu_lock.test());
40 // If we're leaving the global timeslice, invalidate it This causes
41 // schedule() to select a new timeslice via set_current_sched()
42 if (from == current_sched())
45 if (from->in_ready_list())
51 Context *schedule_in_progress;
54 static Per_cpu<Ready_queue> rq;
65 DEFINE_PER_CPU Per_cpu<Sched_context::Ready_queue> Sched_context::rq;
68 * Set currently active global Sched_context.
72 Sched_context::Ready_queue::set_current_sched(Sched_context *sched)
75 // Save remainder of previous timeslice or refresh it, unless it had
77 Timeout * const tt = timeslice_timeout.current();
78 Unsigned64 clock = Timer::system_clock();
79 if (Sched_context *s = current_sched())
81 Signed64 left = tt->get_timeout(clock);
90 // Program new end-of-timeslice timeout
92 tt->set(clock + sched->left(), current_cpu());
94 // Make this timeslice current
97 LOG_SCHED_LOAD(sched);
102 * \param cpu must be current_cpu()
104 IMPLEMENT inline NEEDS["kdb_ke.h"]
106 Sched_context::Ready_queue::deblock(Sched_context *sc)
108 assert_kdb(cpu_lock.test());
110 Sched_context *cs = current_sched();
118 * \param cpu must be current_cpu()
119 * \param crs the Sched_context of the current context
120 * \param lazy_q queue lazily if applicable
122 IMPLEMENT inline NEEDS["kdb_ke.h"]
124 Sched_context::Ready_queue::deblock(Sched_context *sc, Sched_context *crs, bool lazy_q)
126 assert_kdb(cpu_lock.test());
128 Sched_context *cs = current_sched();
132 if (crs->dominates(sc))
139 if ((EXPECT_TRUE(cs != 0) && cs->dominates(sc)) || crs->dominates(sc))