]> rtime.felk.cvut.cz Git - l4.git/blobdiff - l4/pkg/l4sys/include/thread.h
update
[l4.git] / l4 / pkg / l4sys / include / thread.h
index c6b213aa82b7b8260ec4106e3f8a7e1c670b2eed..13b3dc5ab1f2a88003e70d0ab03e095a3f1f2ffa 100644 (file)
@@ -166,7 +166,6 @@ l4_thread_ex_regs_ret_u(l4_cap_idx_t thread, l4_addr_t *ip, l4_addr_t *sp,
  * After this functions any of following functions may be called in any order.
  * - l4_thread_control_pager()
  * - l4_thread_control_exc_handler()
- * - l4_thread_control_scheduler()
  * - l4_thread_control_bind()
  * - l4_thread_control_alien()
  * - l4_thread_control_ux_host_syscall() (Fiasco-UX only)
@@ -228,25 +227,6 @@ L4_INLINE void
 l4_thread_control_exc_handler_u(l4_cap_idx_t exc_handler,
                                 l4_utcb_t *utcb) L4_NOTHROW;
 
-/**
- * \brief Set the scheduler.
- * \ingroup l4_thread_control_api
- *
- * \param scheduler Capability selector invoked to send a scheduling IPC.
- *
- * \note The scheduler capability selector is interpreted in the task the
- *       thread is bound to (executes in).
- */
-L4_INLINE void
-l4_thread_control_scheduler(l4_cap_idx_t scheduler) L4_NOTHROW;
-
-/**
- * \internal
- * \ingroup l4_thread_control_api
- */
-L4_INLINE void
-l4_thread_control_scheduler_u(l4_cap_idx_t scheduler, l4_utcb_t *utcb) L4_NOTHROW;
-
 /**
  * \brief Bind the thread to a task.
  * \ingroup l4_thread_control_api
@@ -318,21 +298,7 @@ l4_thread_control_ux_host_syscall(int on) L4_NOTHROW;
 L4_INLINE void
 l4_thread_control_ux_host_syscall_u(l4_utcb_t *utcb, int on) L4_NOTHROW;
 
-/**
- * \brief Enable VCPU functionality for the thread.
- * \ingroup l4_thread_control_api
- * \param   on    Boolean value defining the state of the feature.
- *
- */
-L4_INLINE void
-l4_thread_control_vcpu_enable(int on) L4_NOTHROW;
 
-/**
- * \internal
- * \ingroup l4_thread_control_api
- */
-L4_INLINE void
-l4_thread_control_vcpu_enable_u(l4_utcb_t *utcb, int on) L4_NOTHROW;
 
 /**
  * \brief Commit the thread control parameters.
@@ -427,7 +393,9 @@ l4_thread_vcpu_resume_start_u(l4_utcb_t *utcb) L4_NOTHROW;
  *                  for the current thread.
  * \param tag       Tag to use, returned by l4_thread_vcpu_resume_start()
  *
- * \return System call result message tag.
+ * \return System call result message tag. In extended vCPU mode and when
+ * the virtual interrupts are cleared, the return code 1 flags an incoming
+ * IPC message, whereas 0 indicates a VM exit.
  *
  * To resume into another address space the capability to the target task
  * must be set in the vCPU-state (\see l4_vcpu_state_t). The task needs
@@ -449,6 +417,64 @@ l4_thread_vcpu_resume_commit_u(l4_cap_idx_t thread,
                                l4_msgtag_t tag, l4_utcb_t *utcb) L4_NOTHROW;
 
 
+/**
+ * \brief Enable or disable the vCPU feature for the thread.
+ * \ingroup l4_thread_api
+ *
+ * \param thread The thread for which the vCPU feature shall be enabled or
+ *               disabled.
+ * \param vcpu_state The virtual address where the kernel shall store the vCPU
+ *                   state in case of vCPU exits. The address must be a valid
+ *                   kernel-user-memory address.
+ * \return Systemcall result message tag.
+ *
+ * This function enables the vCPU feature of the \a thread if \a vcpu_state
+ * is set to a valid kernel-user-memory address, or disables the vCPU feature
+ * if \a vcpu_state is 0.
+ *
+ */
+L4_INLINE l4_msgtag_t
+l4_thread_vcpu_control(l4_cap_idx_t thread, l4_addr_t vcpu_state) L4_NOTHROW;
+
+/**
+ * \internal
+ * \ingroup l4_thread_api
+ */
+L4_INLINE l4_msgtag_t
+l4_thread_vcpu_control_u(l4_cap_idx_t thread, l4_addr_t vcpu_state,
+                         l4_utcb_t *utcb) L4_NOTHROW;
+
+/**
+ * \brief Enable or disable the extended vCPU feature for the thread.
+ * \ingroup l4_thread_api
+ *
+ * \param thread The thread for which the extended vCPU feature shall be
+ *               enabled or disabled.
+ * \param vcpu_state The virtual address where the kernel shall store the vCPU
+ *                   state in case of vCPU exits. The address must be a valid
+ *                   kernel-user-memory address.
+ * \return Systemcall result message tag.
+ *
+ * The extended vCPU feature allows the use of hardware-virtualization
+ * features such as Intel's VT os AMD's SVM.
+ *
+ * This function enables the extended vCPU feature of the \a thread
+ * if \a vcpu_state is set to a valid kernel-user-memory address, or disables
+ * the vCPU feature if \a vcpu_state is 0.
+ *
+ */
+L4_INLINE l4_msgtag_t
+l4_thread_vcpu_control_ext(l4_cap_idx_t thread, l4_addr_t ext_vcpu_state) L4_NOTHROW;
+
+/**
+ * \internal
+ * \ingroup l4_thread_api
+ */
+L4_INLINE l4_msgtag_t
+l4_thread_vcpu_control_ext_u(l4_cap_idx_t thread, l4_addr_t ext_vcpu_state,
+                             l4_utcb_t *utcb) L4_NOTHROW;
+
+
 /**
  * \brief Register an IRQ that will trigger upon deletion events.
  * \ingroup l4_thread_api
@@ -474,7 +500,7 @@ l4_thread_register_del_irq_u(l4_cap_idx_t thread, l4_cap_idx_t irq,
  * \ingroup l4_thread_api
  *
  * Add modification rules with l4_thread_modify_sender_add() and commit with
- * l4_thread_modify_sender_commit(). To not touch the UTCB between
+ * l4_thread_modify_sender_commit(). Do not touch the UTCB between
  * l4_thread_modify_sender_start() and l4_thread_modify_sender_commit().
  *
  * \see l4_thread_modify_sender_add
@@ -491,7 +517,7 @@ L4_INLINE l4_msgtag_t
 l4_thread_modify_sender_start_u(l4_utcb_t *u) L4_NOTHROW;
 
 /**
- * \brief Add a modifition pattern to a sender modifiction sequence.
+ * \brief Add a modification pattern to a sender modification sequence.
  * \ingroup l4_thread_api
  *
  * \param tag        Tag received from l4_thread_modify_sender_start() or
@@ -533,7 +559,7 @@ l4_thread_modify_sender_add_u(l4_umword_t match_mask,
                               l4_msgtag_t *tag, l4_utcb_t *u) L4_NOTHROW;
 
 /**
- * \brief Apply (commit) a sender modifiction sequence.
+ * \brief Apply (commit) a sender modification sequence.
  * \ingroup l4_thread_api
  *
  * \see l4_thread_modify_sender_start
@@ -565,6 +591,8 @@ enum L4_thread_ops
   L4_THREAD_VCPU_RESUME_OP        = 4UL,    /**< VCPU resume */
   L4_THREAD_REGISTER_DELETE_IRQ   = 5UL,    /**< Register an IPC-gate deletion IRQ */
   L4_THREAD_MODIFY_SENDER         = 6UL,    /**< Modify all senders IDs that match the given pattern */
+  L4_THREAD_VCPU_CONTROL          = 7UL,    /**< Enable / disable VCPU feature */
+  L4_THREAD_VCPU_CONTROL_EXT      = L4_THREAD_VCPU_CONTROL | 0x10000,
   L4_THREAD_GDT_X86_OP            = 0x10UL, /**< Gdt */
   L4_THREAD_OPCODE_MASK           = 0xffff, /**< Mask for opcodes */
 };
@@ -583,8 +611,6 @@ enum L4_thread_control_flags
 {
   /** The pager will be given. */
   L4_THREAD_CONTROL_SET_PAGER       = 0x0010000,
-  /** The scheduler will be given. */
-  L4_THREAD_CONTROL_SET_SCHEDULER   = 0x0020000,
   /** The task to bind the thread to will be given. */
   L4_THREAD_CONTROL_BIND_TASK       = 0x0200000,
   /** Alien state of the thread is set. */
@@ -593,8 +619,6 @@ enum L4_thread_control_flags
   L4_THREAD_CONTROL_UX_NATIVE       = 0x0800000,
   /** The exception handler of the thread will be given. */
   L4_THREAD_CONTROL_SET_EXC_HANDLER = 0x1000000,
-  /** The vCPU functionality is set. */
-  L4_THREAD_CONTROL_VCPU_ENABLED    = 0x2000000,
 };
 
 /**
@@ -611,7 +635,6 @@ enum L4_thread_control_mr_indices
   L4_THREAD_CONTROL_MR_IDX_FLAGS       = 0, /**< \see #L4_thread_control_flags. */
   L4_THREAD_CONTROL_MR_IDX_PAGER       = 1, /**< Index for pager cap */
   L4_THREAD_CONTROL_MR_IDX_EXC_HANDLER = 2, /**< Index for exception handler */
-  L4_THREAD_CONTROL_MR_IDX_SCHEDULER   = 3, /**< Index for scheduler */
   L4_THREAD_CONTROL_MR_IDX_FLAG_VALS   = 4, /**< Index for feature values */
   L4_THREAD_CONTROL_MR_IDX_BIND_UTCB   = 5, /**< Index for UTCB address for bind */
   L4_THREAD_CONTROL_MR_IDX_BIND_TASK   = 6, /**< Index for task flex-page for bind */
@@ -684,14 +707,6 @@ l4_thread_control_exc_handler_u(l4_cap_idx_t exc_handler,
   v->mr[L4_THREAD_CONTROL_MR_IDX_EXC_HANDLER]  = exc_handler;
 }
 
-L4_INLINE void
-l4_thread_control_scheduler_u(l4_cap_idx_t scheduler, l4_utcb_t *utcb) L4_NOTHROW
-{
-  l4_msg_regs_t *v = l4_utcb_mr_u(utcb);
-  v->mr[L4_THREAD_CONTROL_MR_IDX_FLAGS]     |= L4_THREAD_CONTROL_SET_SCHEDULER;
-  v->mr[L4_THREAD_CONTROL_MR_IDX_SCHEDULER]  = scheduler;
-}
-
 L4_INLINE void
 l4_thread_control_bind_u(l4_utcb_t *thread_utcb, l4_cap_idx_t task,
                          l4_utcb_t *utcb) L4_NOTHROW
@@ -719,14 +734,6 @@ l4_thread_control_ux_host_syscall_u(l4_utcb_t *utcb, int on) L4_NOTHROW
   v->mr[L4_THREAD_CONTROL_MR_IDX_FLAG_VALS] |= on ? L4_THREAD_CONTROL_UX_NATIVE : 0;
 }
 
-L4_INLINE void
-l4_thread_control_vcpu_enable_u(l4_utcb_t *utcb, int on) L4_NOTHROW
-{
-  l4_msg_regs_t *v = l4_utcb_mr_u(utcb);
-  v->mr[L4_THREAD_CONTROL_MR_IDX_FLAGS]     |= L4_THREAD_CONTROL_VCPU_ENABLED;
-  v->mr[L4_THREAD_CONTROL_MR_IDX_FLAG_VALS] |= on ? L4_THREAD_CONTROL_VCPU_ENABLED : 0;
-}
-
 L4_INLINE l4_msgtag_t
 l4_thread_control_commit_u(l4_cap_idx_t thread, l4_utcb_t *utcb) L4_NOTHROW
 {
@@ -737,7 +744,6 @@ l4_thread_control_commit_u(l4_cap_idx_t thread, l4_utcb_t *utcb) L4_NOTHROW
 }
 
 
-
 L4_INLINE l4_msgtag_t
 l4_thread_yield(void) L4_NOTHROW
 {
@@ -810,12 +816,6 @@ l4_thread_control_exc_handler(l4_cap_idx_t exc_handler) L4_NOTHROW
   l4_thread_control_exc_handler_u(exc_handler, l4_utcb());
 }
 
-L4_INLINE void
-l4_thread_control_scheduler(l4_cap_idx_t scheduler) L4_NOTHROW
-{
-  l4_thread_control_scheduler_u(scheduler, l4_utcb());
-}
-
 
 L4_INLINE void
 l4_thread_control_bind(l4_utcb_t *thread_utcb, l4_cap_idx_t task) L4_NOTHROW
@@ -835,12 +835,6 @@ l4_thread_control_ux_host_syscall(int on) L4_NOTHROW
   l4_thread_control_ux_host_syscall_u(l4_utcb(), on);
 }
 
-L4_INLINE void
-l4_thread_control_vcpu_enable(int on) L4_NOTHROW
-{
-  l4_thread_control_vcpu_enable_u(l4_utcb(), on);
-}
-
 L4_INLINE l4_msgtag_t
 l4_thread_control_commit(l4_cap_idx_t thread) L4_NOTHROW
 {
@@ -898,6 +892,35 @@ l4_thread_register_del_irq(l4_cap_idx_t thread, l4_cap_idx_t irq) L4_NOTHROW
 }
 
 
+L4_INLINE l4_msgtag_t
+l4_thread_vcpu_control_u(l4_cap_idx_t thread, l4_addr_t vcpu_state,
+                         l4_utcb_t *utcb) L4_NOTHROW
+{
+  l4_msg_regs_t *v = l4_utcb_mr_u(utcb);
+  v->mr[0] = L4_THREAD_VCPU_CONTROL;
+  v->mr[1] = vcpu_state;
+  return l4_ipc_call(thread, utcb, l4_msgtag(L4_PROTO_THREAD, 2, 0, 0), L4_IPC_NEVER);
+}
+
+L4_INLINE l4_msgtag_t
+l4_thread_vcpu_control(l4_cap_idx_t thread, l4_addr_t vcpu_state) L4_NOTHROW
+{ return l4_thread_vcpu_control_u(thread, vcpu_state, l4_utcb()); }
+
+
+L4_INLINE l4_msgtag_t
+l4_thread_vcpu_control_ext_u(l4_cap_idx_t thread, l4_addr_t ext_vcpu_state,
+                             l4_utcb_t *utcb) L4_NOTHROW
+{
+  l4_msg_regs_t *v = l4_utcb_mr_u(utcb);
+  v->mr[0] = L4_THREAD_VCPU_CONTROL_EXT;
+  v->mr[1] = ext_vcpu_state;
+  return l4_ipc_call(thread, utcb, l4_msgtag(L4_PROTO_THREAD, 2, 0, 0), L4_IPC_NEVER);
+}
+
+L4_INLINE l4_msgtag_t
+l4_thread_vcpu_control_ext(l4_cap_idx_t thread, l4_addr_t ext_vcpu_state) L4_NOTHROW
+{ return l4_thread_vcpu_control_ext_u(thread, ext_vcpu_state, l4_utcb()); }
+
 L4_INLINE l4_msgtag_t
 l4_thread_modify_sender_start_u(l4_utcb_t *u) L4_NOTHROW
 {