2 * arch. independent L4 Types
9 #include "l4_buf_desc.h"
12 typedef Address Local_id;
17 * A reference to a kernel object (capability selector),
18 * as passed from user level.
20 * A capability selector contains an index into the capability table/object
21 * space of a task. The index is usually stored in the most significant bits
22 * of the binary representation. The twelve least significant bits are used to
23 * to denote the type of operation that shall be invoked and also so flags for
24 * special capabilities, such as the invalid cap, the reply capability, or the
27 * Generally all operations on kernel objects are modelled as message passing
28 * primitives that consist of two phases, the send phase and the receive phase.
29 * However, both phases are optional and come in slightly different flavors.
30 * \see L4_obj_ref::Operation.
31 * The terms send and receive are from the invokers point of view. This means,
32 * a client doing RPC needs a send for sending the requested operation an
33 * parameters and a receive to receive the return code of the RPC.
39 * Operation codes, stored in the four least significant bits of a capability
44 /// A no-op on the capability (undefined from user level).
48 * \deprecated Use #Ipc_call_ipc.
49 * Deprecated call code, do not use this operation code.
54 * Set this bit to include a send phase.
56 * In the case of a send phase, the message is send to the object
57 * denoted by either the capability selector (cap()), the reply capability
58 * (if #Ipc_reply is also set), or to the thread itself (if the cap is the
59 * special self capability).
64 * Set this bit to include a receive phase.
66 * During the receive phase the caller waits for a message from either a
67 * specific sender (closed wait) or from any possible sender
68 * (#Ipc_open_wait) that has a capability to send messages to the invoker.
73 * Set this bit to denote an open-wait receive phase.
75 * An open wait means that the invoker shall wait for a message from any
76 * sender that has a capability to send messages to the invoker. In this
77 * case the index (cap()) in the capability selector are ignored for the
83 * Set this bit to make the send phase a reply.
85 * A reply operation uses the implicit reply capability that is stored
86 * in per thread storage and can be used only once. The reply capability
87 * also vanishes in the case of an abort due to the caller or a newly
88 * received call operation by the same thread.
94 * Denotes a wait operation. (#Ipc_recv | #Ipc_open_wait).
96 * The wait operation is usually used by servers to implement remote
99 Ipc_wait = Ipc_open_wait | Ipc_recv,
102 * Denotes a combination of a send and a wait operation (#Ipc_send |
103 * #Ipc_recv | #Ipc_open_wait).
105 * \note this is not used for usual RPC, see #Ipc_reply_and_wait for that.
107 Ipc_send_and_wait = Ipc_open_wait | Ipc_send | Ipc_recv,
110 * Denotes a reply and wait operation (#Ipc_send | #Ipc_reply | #Ipc_recv |
113 * This operation is usually used to send replies to RPC requests.
115 Ipc_reply_and_wait = Ipc_open_wait | Ipc_send | Ipc_recv | Ipc_reply,
118 * Denotes a call operation (#Ipc_send | #Ipc_recv).
120 * A call is usually used by a client to invoke an operation on a remote
121 * object and wait for a result. The call operation establishes the
122 * implicit reply capability for the partner thread (see #Ipc_reply)
123 * and enables the implementation of an object to respond to an invocation
124 * without knowledge of the invoker thread.
126 Ipc_call_ipc = Ipc_send | Ipc_recv,
130 * Special capability selectors (e.g., Invalid cap).
135 * Invalid capability selector.
137 Invalid = 1UL << 11UL,
140 * Bit that flags a capability selector as special.
142 Special_bit = 1UL << 11UL,
145 * Value for the self capability selector. This means, the invoking thread
148 Self = (~0UL) << 11UL,
151 * Mask for getting all bits of special capabilities.
153 Special_mask = (~0UL) << 11UL,
162 * Create a special capability selector from \a s.
163 * \param s which special cap selector shall be created
164 * (see L4_obj_ref::Special).
166 * Special capability selectors are the invalid capability and the self
167 * Capability. All special capability selectors must have the #Special_bit
170 L4_obj_ref(Special s = Invalid) : _raw(s) {}
173 * Create a capability selector from it's binary representation.
174 * \param raw the raw binary representation of a capability selector. As
175 * passed from user land.
177 static L4_obj_ref from_raw(Mword raw) { return L4_obj_ref(true, raw); }
180 * Is the capability selector a valid capability (no special capability).
181 * \return true if the capability selector is a valid index into the
182 * capability table, or false if the selector is a special
185 bool valid() const { return !(_raw & Special_bit); }
188 * Is the capability selector a special capability (i.e., not an index
189 * into the capability table).
190 * \return true if the capability selector denotes a special capability
191 * (see L4_obj_ref::Special), or false if this capability is a
192 * valid index into a capability table.
195 bool special() const { return _raw & Special_bit; }
198 * Is this capability selector the special \a self capability.
199 * \return true if this capability is the special self capability for the
202 bool self() const { return special(); }
205 * Get the value of a special capability.
206 * \pre special() == true
207 * \return the value of a special capability selector, see
208 * L4_obj_ref::Special.
210 Special special_cap() const { return Special(_raw & Special_mask); }
211 //bool self() const { return (_raw & Invalid_mask) == Self; }
214 * Does the operation contain a receive phase?
215 * \return true if the operation encoded in the capability selector
216 * comprises a receive phase, see #L4_obj_ref::Ipc_recv.
218 unsigned have_recv() const { return _raw & Ipc_recv; }
221 * Get the index into the capability table.
222 * \pre valid() == true
223 * \return The index into the capability table stored in the capability
224 * selector (i.e., the most significant bits of the selector).
226 unsigned long cap() const { return _raw >> 12; }
229 * Get the operation stored in this selector (see L4_obj_ref::Operation).
230 * \return The operation encoded in the lower 4 bits of the capability
231 * selector, see L4_obj_ref::Operation.
233 Operation op() const { return Operation(_raw & 0xf); }
236 * Get the raw binary representation of this capability selector.
237 * \return the binary representation of this cap selector.
239 Mword raw() const { return _raw; }
242 * Create a valid capability selector for the shifted cap-table index
244 * \param cap the shifted (<< #Cap_shift) capability-table index.
245 * \param op the operation to be encoded in bits 0..3.
247 explicit L4_obj_ref(Mword cap, Operation op = None) : _raw(cap | op) {}
250 * Create a capability selector (index 0) with the given operation.
251 * \param op the operation to be encoded into the capability selector,
252 * see L4_obj_ref::Operation.
254 L4_obj_ref(Operation op) : _raw(op) {}
257 * Compare two capability selectors for equality.
258 * \param o the right hand side for te comparison.
259 * \note Capability selectors are compared by their binary representation.
261 bool operator == (L4_obj_ref const &o) const { return _raw == o._raw; }
264 L4_obj_ref(bool, Mword raw) : _raw(raw) {}
270 * Flags for unmap operations.
277 * Create a from binary representation.
278 * \param raw the binary representation, as passed from user level.
280 explicit L4_map_mask(Mword raw = 0) : _raw(raw) {}
283 * Get the flags for a full unmap.
284 * \return A L4_map_mask for doing a full unmap operation.
286 static L4_map_mask full() { return L4_map_mask(0xc0000002); }
289 * Get the raw binary representation for the map mask.
290 * \return the binary value of the flags.
292 Mword raw() const { return _raw; }
295 * Unmap from the calling Task too.
296 * \return true if the caller wishes to unmap from its own address space too.
298 Mword self_unmap() const { return _raw & 0x80000000; }
301 * Shall the unmap delete the object if allowed?
302 * \return true if the unmap operation shall also delete the kernel
303 * object if permitted to the caller.
305 Mword do_delete() const { return _raw & 0x40000000; }
313 * Description of a message to the kernel or other thread.
315 * A message tag determines the number of untyped message words (words()), the
316 * number of typed message items (items(), L4_msg_item), some flags, and a
317 * protocol ID. The number of typed and untyped items in the UTCB's message
318 * registers, as well as the flags, control the kernels message passing
319 * mechanism. The protocol ID is not interpreted by the message passing
320 * itself, however is interpreted by the receiving object itself. In thread to
321 * thread IPC the all contents besides the flags are copied from the sender to
322 * the receiver. The flags on the receiver side contain some information about
323 * the operation itself.
325 * The untyped message words are copied to the receiving object/thread
326 * uninterpreted. The typed items directly following the untyped words in
327 * the message registers are interpreted by the message passing and contain,
328 * for example, map items for memory or kernel objects (see L4_msg_item,
335 * Flags in the message tag.
337 * The input flags control the send phase of an IPC operation. Flags might
338 * have a different semantics in the returned message tag, the result of an
339 * IPC operation, see L4_msg_tag::Output_flags. However, the #Transfer_fpu
340 * and #Schedule flags are passed to the receiver.
345 * The sender is transferring the state of the floating-point unit (FPU)
346 * as part of the message.
347 * \note The receiver needs to agree with that by setting
348 * L4_buf_desc::Inherit_fpu in its buffer descriptor register (BDR).
349 * \note This flag is passed through to the receiver.
351 Transfer_fpu = 0x1000,
354 * The sender does not want to donate its remaining time-slice to the
355 * receiver (partner) thread.
356 * \note This flag is passed on to the receiver.
361 * The sender wants to propagate an incoming call operation to a different
363 * \note Not implemented in Fiasco.OC.
365 * Propagation means that the reply capability shall be passed on to the
366 * receiver of this message to enable a direct reply.
368 Propagate = 0x4000, // snd only flag
372 * Result flags for IPC operations.
374 * These flags are dedicated return values for an IPC operation.
379 * The IPC operation did not succeed, the detailed error code
380 * is in the error register in the UTCB.
385 * The IPC operation did cross CPU boundaries.
390 * Combination of flags that are not pass through.
392 Rcv_flags = Error | X_cpu,
396 * Protocol IDs that are defined by the kernel ABI.
398 * These protocol IDs are used for either kernel implemented
399 * objects, or used for kernel-synthesized requests to user
404 Label_none = 0, ///< No protocol, the default
406 * Value to allow the current system call for an alien thread.
408 * This value is used in the reply to an alien pre-syscall exception IPC.
410 Label_allow_syscall = 1,
412 Label_irq = -1L, ///< IRQ object protocol.
413 Label_page_fault = -2L, ///< Page fault messages use this protocol.
414 Label_preemption = -3L, ///< Preemption IPC protocol. \note unused.
415 Label_sys_exception = -4L, ///< Sys exception protocol. \note unused.
416 Label_exception = -5L, ///< Exception IPC protocol.
417 Label_sigma0 = -6L, ///< Protocol for sigma0 objects.
418 Label_io_page_fault = -8L, ///< Protocol for I/O-port page faults.
419 Label_kobject = -10L, ///< Control protocol iD for IPC gates (server
421 Label_task = -11L, ///< Protocol ID for task and VM objects.
422 Label_thread = -12L, ///< Protocol ID for thread objects.
423 Label_log = -13L, ///< Protocol ID for log / vcon objects.
424 Label_scheduler = -14L, ///< Protocol ID for scheduler objects.
425 Label_factory = -15L, ///< Protocol ID for factory objects.
426 Label_vm = -16L, ///< Protocol ID for VM objects (used for create
427 /// operations on a factory).
428 Label_semaphore = -20L, ///< Protocol ID for semaphore objects.
435 * L4 timeouts data type.
440 /// Typical timeout constants.
442 Never = 0, ///< Never timeout.
443 Zero = 0x400, ///< Zero timeout.
447 * Create the specified timeout.
448 * @param man mantissa of the send timeout.
449 * @param exp exponent of the send timeout
450 * (exp=0: infinite timeout,
451 * exp>0: t=2^(exp)*man,
452 * man=0 & exp!=0: t=0).
454 L4_timeout(Mword man, Mword exp);
455 L4_timeout(Mword man, Mword exp, bool clock);
458 * Create a timeout from it's binary representation.
459 * @param t the binary timeout value.
461 L4_timeout(unsigned short t = 0);
464 * Get the binary representation of the timeout.
465 * @return The timeout as binary representation.
467 unsigned short raw() const;
470 * Get the receive exponent.
471 * @return The exponent of the receive timeout.
477 * Set the exponent of the receive timeout.
478 * @param er the exponent for the receive timeout (see L4_timeout()).
484 * Get the receive timout's mantissa.
485 * @return The mantissa of the receive timeout (see L4_timeout()).
491 * Set the mantissa of the receive timeout.
492 * @param mr the mantissa of the recieve timeout (see L4_timeout()).
498 * Get the relative receive timeout in microseconds.
499 * @param clock Current value of kernel clock
500 * @return The receive timeout in micro seconds.
502 Unsigned64 microsecs_rel(Unsigned64 clock) const;
505 * Get the absolute receive timeout in microseconds.
506 * @param clock Current value of kernel clock
507 * @return The receive timeout in micro seconds.
509 Unsigned64 microsecs_abs(Utcb *u) const;
525 } __attribute__((packed));
527 struct L4_timeout_pair
532 L4_timeout_pair(L4_timeout const &rcv, L4_timeout const &snd)
533 : rcv(rcv), snd(snd) {}
535 L4_timeout_pair(unsigned long v) : rcv(v), snd(v >> 16) {}
537 Mword raw() const { return (Mword)rcv.raw() | (Mword)snd.raw() << 16; }
541 * This class contains constants for the message size for exception IPC.
543 * This information is architecture dependent, see #Msg_size.
545 class L4_exception_ipc
556 * Constants for error codes returned by kernel objects.
563 EPerm = 1, ///< Permission denied.
564 ENoent = 2, ///< Some object was not found.
565 ENomem = 12, ///< Out of memory.
566 EBusy = 16, ///< The object is busy, try again.
567 EExists = 17, ///< Some object does already exist.
568 ENodev = 19, ///< Objects of the specified type cannot be created.
569 EInval = 22, ///< Invalid parameters passed.
570 ERange = 34, ///< Parameter out of range
571 ENosys = 38, ///< No such operation.
572 EBadproto = 39, ///< Protocol not supported by object.
574 EAddrnotavail = 99, ///< The given address is not available.
579 class L4_cpu_set_descr
585 Mword offset() const { return (_w & 0x00ffffff) & (~0 << granularity()); }
586 Mword granularity() const { return (_w >> 24) & (MWORD_BITS-1) ; }
589 class L4_cpu_set : public L4_cpu_set_descr
595 bool contains(unsigned cpu) const
601 cpu >>= granularity();
602 if (cpu >= MWORD_BITS)
605 return _map & (1UL << cpu);
608 template<typename MAP>
609 Mword first(MAP const &bm, unsigned max) const
611 unsigned cpu = offset();
615 unsigned b = (cpu - offset()) >> granularity();
616 if (cpu >= max || b >= MWORD_BITS)
619 if (!(_map & (1UL << b)))
621 cpu += 1UL << granularity();
633 struct L4_sched_param
636 Smword sched_class; // legacy prio when positive
637 Mword length; // sizeof (...)
640 struct L4_sched_param_legacy
643 Smword prio; // must be positive, overlays with sched_class
648 //----------------------------------------------------------------------------
649 INTERFACE [ia32 || ux]:
651 EXTENSION class L4_exception_ipc
654 enum { Msg_size = 16 };
657 //----------------------------------------------------------------------------
660 EXTENSION class L4_exception_ipc
663 enum { Msg_size = 20 };
666 //----------------------------------------------------------------------------
669 EXTENSION class L4_exception_ipc
672 enum { Msg_size = 23 };
675 //----------------------------------------------------------------------------
678 EXTENSION class L4_exception_ipc
681 enum { Msg_size = 39 };
685 EXTENSION class L4_exception_ipc
688 enum { Msg_size = 12 }; // XXX whatever?
691 //----------------------------------------------------------------------------
695 * User-level Thread Control Block (UTCB).
697 * The UTCB is a virtual extension of the registers of a thread. A UTCB
698 * comprises three sets of registers: the message registers (MRs), the buffer
699 * registers (BRs and BDR), and the control registers (TCRs).
701 * The message registers (MRs) contain the contents of the messages that are
702 * sent to objects or received from objects. The message contents consist of
703 * untyped data and typed message items (see L4_msg_tag). The untyped must be
704 * stored in the first \a n message registers (\a n = L4_msg_tag::words()) and
705 * are transferred / copied uninterpreted to the receiving object (MRs of
706 * receiver thread or kernel object). The typed items follow starting at MR[\a
707 * n+1]. Each typed item is stored in two MRs and is interpreted by the kernel
708 * (see L4_msg_item, L4_fpage). The number of items is denoted by
709 * L4_msg_tag::items(). On the receiver side the typed items are translated
710 * into a format that is useful for the receiver and stored at into the same
711 * MRs in the receivers UTCB.
713 * The buffer registers (BRs and BDR) contain information that describe receive
714 * buffers for incoming typed items. The contents of these registers are not
715 * altered by the kernel. The buffer descriptor register (BDR, Utcb::buf_desc)
716 * contains information about the items in the buffer registers (BRs) and
717 * flags to enable FPU state transfer. The BRs contain a set of receive
718 * message items (L4_msg_item) that describe receive buffers, such as, virtual
719 * memory regions for incoming memory mappings or buffers for capabilities.
720 * The BRs are also used to store absolute 64bit timeout values for operations,
721 * The value of the timeout pair encodes the number of the BR if an absolute
724 * The thread control registers (TCRs) comprise an error code for errors during
725 * message passing and a set of user-level registers. The user-level registers
726 * are not used by the kernel and provide and anchor for thread-local storage.
730 /* must be 2^n bytes */
734 * Type for time values in the UTCB (size is fix 64bit).
736 * On 32bit architectures this type uses two MRs on 64bit one Mr is used.
737 * This type is used for conversion of time values stored in MRs or BRs.
741 enum { Words = sizeof(Cpu_time)/sizeof(Mword) /**< Number of MRs used. */ };
742 Mword b[Words]; ///< The array of MRs to use.
743 Cpu_time t; ///< The time value itself.
748 Max_words = 63, ///< Number of MRs.
749 Max_buffers = 58 ///< Number of BRs.
752 /// The message registers (MRs).
753 Mword values[Max_words];
756 /// The buffer descriptor register (BDR).
757 L4_buf_desc buf_desc;
758 /// The buffer registers (BRs).
759 Mword buffers[Max_buffers];
761 /// The error code for IPC (TCR).
763 /// \deprecated transfer timeout is not used currently (TCR).
764 L4_timeout_pair xfer;
765 /// The user-level registers for TLS (TCR).
769 //----------------------------------------------------------------------------
775 * Receiver is ready to receive FPU contents?
776 * \return true if the receiver is ready to receive the state of the FPU as
778 * \see L4_buf_desc::Inherit_fpu, L4_buf_desc.
781 bool Utcb::inherit_fpu() const
782 { return buf_desc.flags() & L4_buf_desc::Inherit_fpu; }
786 * Create a message tag from its parts.
787 * \param words the number of untyped message words to transfer.
788 * \param items the number of typed message items, following the untyped words
789 * in the message registers. See L4_msg_item.
790 * \param flags the flags, see L4_msg_tag::Flags and L4_msg_tag::Output_flags.
791 * \param proto the protocol ID to use.
794 L4_msg_tag::L4_msg_tag(unsigned words, unsigned items, unsigned long flags,
796 : _tag((words & 0x3f) | ((items & 0x3f) << 6) | flags | (proto << 16))
800 * Create an uninitialized message tag.
801 * \note the value of the tag is unpredictable.
804 L4_msg_tag::L4_msg_tag()
808 * Create a message tag from another message tag, replacing
809 * the L4_msg_tag::Output_flags.
810 * \param o the message tag to copy.
811 * \param flags the output flags to set in the new tag.
812 * \pre (flags & ~Rcv_flags) == 0
815 L4_msg_tag::L4_msg_tag(L4_msg_tag const &o, Mword flags)
816 : _tag((o.raw() & ~Mword(Rcv_flags)) | flags)
820 * Create msg tag from the binary representation.
821 * \param raw the raw binary representation, as passed from user level.
823 PUBLIC explicit inline
824 L4_msg_tag::L4_msg_tag(Mword raw)
829 * Get the protocol ID.
830 * \return the protocol ID.
834 L4_msg_tag::proto() const
835 { return long(_tag) >> 16; }
838 * Get the binary representation.
839 * \return the binary value of the tag.
843 L4_msg_tag::raw() const
847 * Get the number of untyped words to deliver.
848 * \return number message registers that shall be transferred
849 * uninterpreted to the receiving object.
852 unsigned L4_msg_tag::words() const
853 { return _tag & 63; }
856 * Get the number of typed message items in the message.
857 * \return the number of typed items, directly following the
858 * untyped words in the message registers.
862 unsigned L4_msg_tag::items() const
863 { return (_tag >> 6) & 0x3f; }
866 * Get the flags of the tag.
867 * \return the flags of the message tag, note reserved bits might be
871 Mword L4_msg_tag::flags() const
876 * \return true if the sender wishes to transfer FPU contents.
877 * \see #Transfer_fpu.
880 bool L4_msg_tag::transfer_fpu() const
881 { return _tag & Transfer_fpu; }
884 * Do time-slice donation?
885 * \return true if the sender is willing to donate its remaining time-
886 * slice to the receiver.
890 bool L4_msg_tag::do_switch() const
891 { return !(_tag & Schedule); }
894 * Set the error flag to \a e.
895 * \param e the value of the error flag to be set.
898 void L4_msg_tag::set_error(bool e = true)
899 { if (e) _tag |= Error; else _tag &= ~Mword(Error); }
902 * Is there an error flagged?
903 * \return true if the error flag of the message tag is set.
906 bool L4_msg_tag::has_error() const
907 { return _tag & Error; }
909 // L4_timeout implementation
912 IMPLEMENT inline L4_timeout::L4_timeout(unsigned short t)
916 IMPLEMENT inline unsigned short L4_timeout::raw() const
920 Mword L4_timeout::abs_exp() const
921 { return (_t >> 11) & 0xf; }
924 bool L4_timeout::abs_clock() const
925 { return _t & Clock_mask; }
929 L4_timeout::microsecs_rel(Unsigned64 clock) const
934 return clock + ((Unsigned64)man() << exp());
937 IMPLEMENT inline NEEDS[<minmax.h>]
939 L4_timeout::microsecs_abs(Utcb *u) const
941 int idx = min<int>(_t & 0x3f, Utcb::Max_buffers);
942 Utcb::Time_val const *top
943 = reinterpret_cast<Utcb::Time_val const *>(&u->buffers[idx]);
949 L4_timeout::is_absolute() const
950 { return _t & Abs_mask; }
954 L4_timeout::microsecs(Unsigned64 clock, Utcb *u) const
957 return microsecs_abs(u);
959 return microsecs_rel(clock);
963 bool L4_timeout::is_never() const
967 bool L4_timeout::is_zero() const
968 { return _t == Zero; }
971 unsigned short L4_timeout::is_finite() const
976 // L4_timeout implementation
980 L4_timeout::L4_timeout(Mword man, Mword exp)
981 : _t (((man & Man_mask) | ((exp << Exp_shift) & Exp_mask)))
985 L4_timeout::L4_timeout(Mword man, Mword exp, bool clock)
986 : _t (((man & Man_mask) | ((exp << (Exp_shift+1)) & Exp_mask)
987 | (clock ? Clock_mask : 0) | Abs_mask))
990 IMPLEMENT inline Mword L4_timeout::exp() const
991 { return (_t & Exp_mask) >> Exp_shift; }
993 IMPLEMENT inline void L4_timeout::exp(Mword w)
994 { _t = (_t & ~Exp_mask) | ((w << Exp_shift) & Exp_mask); }
996 IMPLEMENT inline Mword L4_timeout::man() const
997 { return (_t & Man_mask) >> Man_shift; }
999 IMPLEMENT inline void L4_timeout::man (Mword w)
1000 { _t = (_t & ~Man_mask) | ((w << Man_shift) & Man_mask); }