3 #include "thread_object.h"
5 class Kernel_thread : public Thread_object
9 * Frees the memory of the initcall sections.
11 * Virtually initcall sections are freed by not marking them
12 * reserved in the KIP. This method just invalidates the contents of
13 * the memory, by filling it with some invalid data and may be
16 void free_initcall_section();
17 void bootstrap() asm ("call_bootstrap") FIASCO_FASTCALL;
18 void bootstrap_arch();
20 void do_idle() __attribute__((noreturn));
34 #include "delayloop.h"
36 #include "helping_lock.h"
37 #include "kernel_task.h"
38 #include "per_cpu_data_alloc.h"
39 #include "processor.h"
42 #include "thread_state.h"
44 #include "timer_tick.h"
49 Kernel_thread::Kernel_thread() : Thread_object(Thread::Kernel)
54 Kernel_thread::init_stack()
55 { return _kernel_sp; }
57 // the kernel bootstrap routine
60 Kernel_thread::bootstrap()
62 // Initializations done -- Helping_lock can now use helping lock
63 Helping_lock::threading_system_active = true;
65 // we need per CPU data for our never running dummy CPU too
66 // FIXME: we in fact need only the _pending_rqq lock
67 Per_cpu_data_alloc::alloc(Cpu::Invalid);
69 set_cpu_of(this, Cpu::boot_cpu()->id());
72 state_change_dirty(0, Thread_ready); // Set myself ready
74 Timer::init_system_clock();
75 Sched_context::rq.current().set_idle(this->sched());
77 Kernel_task::kernel_task()->make_current();
79 // Setup initial timeslice
80 Sched_context::rq.current().set_current_sched(sched());
82 Timer_tick::setup(cpu(true)); assert (cpu(true) == 0); // currently the boot cpu must be 0
83 Timer_tick::enable(cpu(true));
84 enable_tlb(cpu(true));
88 Per_cpu_data::run_late_ctors(0);
92 printf("Calibrating timer loop... ");
93 // Init delay loop, needs working timer interrupt
102 * NEVER inline this function, because our caller is an initcall
104 IMPLEMENT FIASCO_NOINLINE FIASCO_NORETURN
108 free_initcall_section();
110 // No initcalls after this point!
112 kernel_context(cpu(), this);
114 Rcu::leave_idle(cpu());
115 // init_workload cannot be an initcall, because it fires up the userland
116 // applications which then have access to initcall frames as per kinfo page.
123 // ------------------------------------------------------------------------
124 IMPLEMENTATION [!arch_idle && !tickless_idle]:
126 PUBLIC inline NEEDS["processor.h"]
128 Kernel_thread::idle_op()
130 if (Config::hlt_works_ok)
131 Proc::halt(); // stop the CPU, waiting for an int
137 // ------------------------------------------------------------------------
138 IMPLEMENTATION [tickless_idle]:
140 #include <rcupdate.h>
142 EXTENSION class Kernel_thread
145 friend class Jdb_idle_stats;
146 static Per_cpu<unsigned long> _idle_counter;
147 static Per_cpu<unsigned long> _deep_idle_counter;
151 DEFINE_PER_CPU Per_cpu<unsigned long> Kernel_thread::_idle_counter;
152 DEFINE_PER_CPU Per_cpu<unsigned long> Kernel_thread::_deep_idle_counter;
154 // template code for arch idle
157 Kernel_thread::idle_op()
159 // this version must run with disabled IRQs and a wakup must continue directly
160 // after the wait for event.
161 auto guard = lock_guard(cpu_lock);
162 unsigned cpu = this->cpu();
163 ++_idle_counter.cpu(cpu);
164 // 1. check for latency requirements that prevent low power modes
165 // 2. check for timouts on this CPU ignore the idle thread's timeslice
166 // 3. check for RCU work on this cpu
168 && !Timeout_q::timeout_queue.cpu(cpu).have_timeouts(timeslice_timeout.cpu(cpu)))
170 ++_deep_idle_counter.cpu(cpu);
171 Rcu::enter_idle(cpu);
172 Timer_tick::disable(cpu);
176 // do everything to do to a deep sleep state:
180 arch_tickless_idle(cpu);
183 Rcu::leave_idle(cpu);
184 Timer_tick::enable(cpu);