3 #include "member_offs.h"
9 #define NO_INSTRUMENT __attribute__((no_instrument_function))
14 /** A lock that implements priority inheritance.
16 * The lock uses a validity checker (VALID) for doing an existence check
17 * before the lock is actually acquired. With this mechanism the lock itself
18 * may disappear while it is locked (see clear_no_switch_dirty() and
19 * switch_dirty()), even if it is under contention. When the lock no longer
20 * exists VALID::valid(void const *lock) must return false, true if it
21 * exists (see Switch_lock_valid). This mechanism is used in Thread_lock
22 * when thread control blocks are deallocated.
24 * The operations lock(), lock_dirty(), try_lock(), test(), test_and_set(),
25 * and test_and_set_dirty() may return #Invalid if the lock does
28 * The operations initialize(), lock_owner(), clear(), clear_dirty(), and
29 * clear_no_switch_dirty() must not being called on and invalid lock,
30 * thus the lock itself must be held for using these operations.
31 * (Except initialize(), which is only useful for locks that are always
34 * @param VALID must be set to a validity checker for the lock.
36 * The validity checker is used while aquiring the lock to test
37 * if the lock itself existis. We assume that a lock may disappear
38 * while we are blocked on it.
45 // Warning: This lock's member variables must not need a
46 // constructor. Switch_lock instances must assume
47 // zero-initialization or be initialized using the initialize()
49 // Reason: to avoid overwriting the lock in the thread-ctor
54 * @brief The result type of lock operations.
58 Not_locked, ///< The lock was formerly not aquired and -- we got it
59 Locked, ///< The lock was already aquired by ourselves
60 Invalid ///< The lock does not exist (is invalid)
64 * Stores the context of the lock for a later switch.
65 * (see clear_no_switch_dirty(), switch_dirty())
84 #include "lock_guard.h"
87 #include "processor.h"
91 // Switch_lock inlines
95 * Test if the lock is valid (uses the validity checker).
96 * @return true if the lock really exists, false if not.
101 Switch_lock::valid() const
102 { return (_lock_owner & 1) == 0; }
104 /** Initialize Switch_lock. Call this function if you cannot
105 guarantee that your Switch_lock instance is allocated from
106 zero-initialized memory. */
110 Switch_lock::initialize()
116 * @pre The lock must be valid (see valid()).
117 * @return current owner of the lock. 0 if there is no owner.
121 Context * NO_INSTRUMENT
122 Switch_lock::lock_owner() const
124 Lock_guard<Cpu_lock> guard(&cpu_lock);
125 return (Context*)(_lock_owner & ~1UL);
129 @return #Locked if lock is set, #Not_locked if not locked, and #Invalid if
130 the lock does not exist (see valid()).
134 Switch_lock::Status NO_INSTRUMENT
135 Switch_lock::test() const
137 Lock_guard<Cpu_lock> guard(&cpu_lock);
138 if (EXPECT_FALSE(!valid()))
140 return (_lock_owner & ~1UL) ? Locked : Not_locked;
143 /** Try to acquire the lock.
144 @return #Locked if successful: current context is now the owner of the lock.
145 #Not_locked if lock has previously been set. Returns Not_locked
146 even if the current context is already the lock owner.
147 The result is #Invalid if the lock does not exist (see valid()).
150 inline NEEDS["atomic.h"]
151 Switch_lock::Status NO_INSTRUMENT
152 Switch_lock::try_lock()
154 Lock_guard<Cpu_lock> guard(&cpu_lock);
156 if (EXPECT_FALSE(!valid()))
159 bool ret = cas(&_lock_owner, (Address)0, Address(current()));
162 current()->inc_lock_cnt(); // Do not lose this lock if current is deleted
164 return ret ? Locked : Not_locked;
167 /** Acquire the lock with priority inheritance.
168 * If the lock is occupied, enqueue in list of helpers and lend CPU
169 * to current lock owner until we are the lock owner.
170 * @return #Locked if the lock was already locked by the current context.
171 * #Not_locked if the current context got the lock (the usual case).
172 * #Invalid if the lock does not exist (see valid()).
175 Switch_lock::Status NO_INSTRUMENT
178 Lock_guard <Cpu_lock> guard(&cpu_lock);
182 /** Acquire the lock with priority inheritance.
183 * If the lock is occupied, enqueue in list of helpers and lend CPU
184 * to current lock owner until we are the lock owner (see lock()).
185 * @pre caller holds cpu lock
186 * @return #Locked if the lock was already locked by the current context.
187 * #Not_locked if the current context got the lock (the usual case).
188 * #Invalid if the lock does not exist (see valid()).
191 inline NEEDS["cpu.h","context.h", "processor.h", Switch_lock::set_lock_owner]
192 Switch_lock::Status NO_INSTRUMENT
193 Switch_lock::lock_dirty()
195 assert(cpu_lock.test());
200 // have we already the lock?
201 if ((_lock_owner & ~1UL) == Address(current()))
206 assert(cpu_lock.test());
208 // Help lock owner until lock becomes free
210 bool tmp = current()->switch_exec_locked(lock_owner(), Context::Helping);
219 set_lock_owner(current());
221 current()->inc_lock_cnt(); // Do not lose this lock if current is deleted
226 /** Acquire the lock with priority inheritance.
227 * @return #Locked if we owned the lock already. #Not_locked otherwise.
228 * #Invalid is returned if the lock does not exist (see valid()).
231 inline NEEDS["globals.h"]
232 Switch_lock::Status NO_INSTRUMENT
233 Switch_lock::test_and_set()
238 /** Acquire the lock with priority inheritance (see test_and_set()).
239 * @return #Locked if we owned the lock already. #Not_locked otherwise.
240 * #Invalid is returned if the lock does not exist (see valid()).
241 * @pre caller holds cpu lock
244 inline NEEDS["globals.h"]
245 Switch_lock::Status NO_INSTRUMENT
246 Switch_lock::test_and_set_dirty()
253 Switch_lock::set_lock_owner(Context *o)
255 _lock_owner = Address(o) | (_lock_owner & 1);
259 * Clear the lock, however do not switch to a potential helper yet.
260 * This function is used when the lock must be cleared and the object
261 * containing the lock will be deallocated atomically. We have to do the
262 * switch later using switch_dirty(Lock_context).
263 * @return the context for a later switch_dirty()
264 * @pre The lock must be valid (see valid()).
265 * @pre caller hold cpu lock
266 * @post switch_dirty() must be called in the same atomical section
269 inline NEEDS[Switch_lock::set_lock_owner]
270 Switch_lock::Lock_context NO_INSTRUMENT
271 Switch_lock::clear_no_switch_dirty()
274 c.owner = lock_owner();
276 c.owner->dec_lock_cnt();
281 * Do the switch part of clear() after a clear_no_switch_dirty().
282 * This function does not touch the lock itself (may be called on
284 * @param c the context returned by a former clear_no_switch_dirty().
285 * @pre must be called atomically with clear_no_switch_dirty(),
286 * under the same cpu lock
291 Switch_lock::switch_dirty(Lock_context const &c)
293 assert_kdb (current() == c.owner);
295 Context *h = c.owner->helper();
297 * If someone helped us by lending its time slice to us.
298 * Just switch back to the helper without changing its helping state.
300 bool need_sched = false;
302 need_sched = c.owner->switch_exec_locked(h, Context::Ignore_Helping);
305 * Someone apparently tries to delete us. Therefore we aren't
306 * allowed to continue to run and therefore let the scheduler
307 * pick the next thread to execute.
309 if (need_sched || (c.owner->lock_cnt() == 0 && c.owner->donatee()))
314 Return the CPU to helper if there is one, since it had to have a
315 higher priority to be able to help (priority may be its own, it
316 may run on a donated timeslice or round robin scheduling may have
317 selected a thread on the same priority level as me)
319 @pre The lock must be valid (see valid()).
325 Lock_guard<Cpu_lock> guard(&cpu_lock);
327 switch_dirty(clear_no_switch_dirty());
332 Switch_lock::set(Status s)
339 Return the CPU to helper if there is one, since it had to have a
340 higher priority to be able to help (priority may be its own, it
341 may run on a donated timeslice or round robin scheduling may have
342 selected a thread on the same priority level as me).
343 If _lock_owner is 0, then this is a no op
345 @pre The lock must be valid (see valid())
346 @pre caller holds cpu lock
351 Switch_lock::clear_dirty()
353 assert(cpu_lock.test());
355 switch_dirty(clear_no_switch_dirty());
360 Switch_lock::invalidate()
362 Lock_guard<Cpu_lock> guard(&cpu_lock);
368 Switch_lock::wait_free()
370 Lock_guard<Cpu_lock> guard(&cpu_lock);
374 // have we already the lock?
375 if ((_lock_owner & ~1UL) == (Address)current())
378 current()->dec_lock_cnt();
382 while (Address(_lock_owner) & ~1UL)
384 assert(cpu_lock.test());
386 // Help lock owner until lock becomes free
388 check (!current()->switch_exec_locked((Context*)(Address(_lock_owner & ~1UL)), Context::Helping));