1 IMPLEMENTATION [ia32,amd64,ux]:
7 #include "globals.h" // current()
9 #include "lock_guard.h"
11 #include "thread_state.h"
16 Context::spill_user_state()
21 Context::fill_user_state()
26 Context::access_utcb() const
31 Context::access_vcpu(bool = false) const
32 { return vcpu_state(); }
34 /** Thread context switchin. Called on every re-activation of a thread
35 (switch_exec()). This method is public only because it is called from
36 from assembly code in switch_cpu().
40 Context::switchin_context(Context *from)
42 assert_kdb (this == current());
43 assert_kdb (state() & Thread_ready_mask);
45 // Set kernel-esp in case we want to return to the user.
46 // kmem::kernel_sp() returns a pointer to the kernel SP (in the
47 // TSS) the CPU uses when next switching from user to kernel mode.
48 // regs() + 1 returns a pointer to the end of our kernel stack.
49 Cpu::cpus.cpu(cpu()).kernel_sp() = reinterpret_cast<Address>(regs() + 1);
51 // switch to our page directory if necessary
52 vcpu_aware_space()->switchin_context(from->vcpu_aware_space());
54 // load new segment selectors
57 // update the global UTCB pointer to make the thread find its UTCB
59 Mem_layout::user_utcb_ptr(current_cpu()) = local_id();
62 //---------------------------------------------------------------------------
63 IMPLEMENTATION [ia32 || ux]:
65 PROTECTED inline NEEDS["cpu.h"]
67 Context::load_segments()
69 Cpu::set_es((Unsigned32)_es);
70 Cpu::set_fs((Unsigned32)_fs);
71 Cpu::set_gs((Unsigned32)_gs);
74 PROTECTED inline NEEDS["cpu.h"]
76 Context::store_segments()
84 //---------------------------------------------------------------------------
85 IMPLEMENTATION [amd64]:
89 Context::load_segments()
94 Context::store_segments()
99 Context::switch_gdt_user_entries(Context *)