3 * Timeslice infrastructure
6 INTERFACE [sched_fixed_prio]:
9 #include "member_offs.h"
12 #include "ready_queue_fp.h"
15 class Sched_context : public cxx::D_list_item
18 friend class Jdb_list_timeouts;
19 friend class Jdb_thread_list;
22 friend struct Jdb_thread_list_policy;
25 typedef cxx::Sd_list<Sched_context> Fp_list;
27 class Ready_queue : public Ready_queue_fp<Sched_context>
30 Context *schedule_in_progress;
31 void activate(Sched_context *s)
32 { _current_sched = s; }
33 Sched_context *current_sched() const { return _current_sched; }
36 Sched_context *_current_sched;
39 Context *context() const { return context_of(this); }
46 friend class Ready_queue_fp<Sched_context>;
50 IMPLEMENTATION [sched_fixed_prio]:
55 #include "std_macros.h"
63 Sched_context::Sched_context()
64 : _prio(Config::Default_prio),
65 _quantum(Config::Default_time_slice),
66 _left(Config::Default_time_slice)
71 * Return priority of Sched_context
75 Sched_context::prio() const
81 * Set priority of Sched_context
85 Sched_context::set_prio (unsigned short const prio)
91 * Return full time quantum of Sched_context
95 Sched_context::quantum() const
101 * Set full time quantum of Sched_context
105 Sched_context::set_quantum (Unsigned64 const quantum)
111 * Return remaining time quantum of Sched_context
115 Sched_context::left() const
120 PUBLIC inline NEEDS[Sched_context::set_left, Sched_context::quantum]
122 Sched_context::replenish()
123 { set_left(quantum()); }
126 * Set remaining time quantum of Sched_context
130 Sched_context::set_left (Unsigned64 const left)
137 * Check if Context is in ready-list.
138 * @return 1 if thread is in ready-list, 0 otherwise
142 Sched_context::in_ready_list() const
144 return Fp_list::in_list(this);
148 * Remove context from ready-list.
150 PUBLIC inline NEEDS ["cpu_lock.h", "kdb_ke.h", "std_macros.h"]
152 Sched_context::ready_dequeue()
154 assert_kdb (cpu_lock.test());
156 // Don't dequeue threads which aren't enqueued
157 if (EXPECT_FALSE (!in_ready_list()))
160 unsigned cpu = current_cpu();
162 _ready_q.cpu(cpu).dequeue(this);
166 * Enqueue context in ready-list.
170 Sched_context::ready_enqueue(unsigned cpu)
172 assert_kdb(cpu_lock.test());
174 // Don't enqueue threads which are already enqueued
175 if (EXPECT_FALSE (in_ready_list()))
178 Ready_queue &rq = _ready_q.cpu(cpu);
180 rq.enqueue(this, this == rq.current_sched());
185 Sched_context::requeue(unsigned cpu)
187 _ready_q.cpu(cpu).requeue(this);
191 * Return if there is currently a schedule() in progress
195 Sched_context::schedule_in_progress(unsigned cpu)
197 return _ready_q.cpu(cpu).schedule_in_progress;
202 Sched_context::reset_schedule_in_progress(unsigned cpu)
203 { _ready_q.cpu(cpu).schedule_in_progress = 0; }
207 * Invalidate (expire) currently active global Sched_context.
211 Sched_context::invalidate_sched(unsigned cpu)
213 _ready_q.cpu(cpu).activate(0);
218 Sched_context::dominates(Sched_context *sc)
219 { return prio() > sc->prio(); }
223 Sched_context::deblock_refill(unsigned)