3 #include "member_offs.h"
9 #define NO_INSTRUMENT __attribute__((no_instrument_function))
14 /** A lock that implements priority inheritance.
16 * The lock uses a validity checker (VALID) for doing an existence check
17 * before the lock is actually acquired. With this mechanism the lock itself
18 * may disappear while it is locked (see clear_no_switch_dirty() and
19 * switch_dirty()), even if it is under contention. When the lock no longer
20 * exists VALID::valid(void const *lock) must return false, true if it
21 * exists (see Switch_lock_valid). This mechanism is used in Thread_lock
22 * when thread control blocks are deallocated.
24 * The operations lock(), lock_dirty(), try_lock(), test(), test_and_set(),
25 * and test_and_set_dirty() may return #Invalid if the lock does
28 * The operations initialize(), lock_owner(), clear(), clear_dirty(), and
29 * clear_no_switch_dirty() must not being called on and invalid lock,
30 * thus the lock itself must be held for using these operations.
31 * (Except initialize(), which is only useful for locks that are always
34 * @param VALID must be set to a validity checker for the lock.
36 * The validity checker is used while aquiring the lock to test
37 * if the lock itself existis. We assume that a lock may disappear
38 * while we are blocked on it.
45 // Warning: This lock's member variables must not need a
46 // constructor. Switch_lock instances must assume
47 // zero-initialization or be initialized using the initialize()
49 // Reason: to avoid overwriting the lock in the thread-ctor
54 * @brief The result type of lock operations.
58 Not_locked, ///< The lock was formerly not aquired and -- we got it
59 Locked, ///< The lock was already aquired by ourselves
60 Invalid ///< The lock does not exist (is invalid)
64 * Stores the context of the lock for a later switch.
65 * (see clear_no_switch_dirty(), switch_dirty())
83 #include "lock_guard.h"
86 #include "processor.h"
90 // Switch_lock inlines
94 * Test if the lock is valid (uses the validity checker).
95 * @return true if the lock really exists, false if not.
100 Switch_lock::valid() const
101 { return (_lock_owner & 1) == 0; }
103 /** Initialize Switch_lock. Call this function if you cannot
104 guarantee that your Switch_lock instance is allocated from
105 zero-initialized memory. */
109 Switch_lock::initialize()
115 * @pre The lock must be valid (see valid()).
116 * @return current owner of the lock. 0 if there is no owner.
120 Context * NO_INSTRUMENT
121 Switch_lock::lock_owner() const
123 auto guard = lock_guard(cpu_lock);
124 return (Context*)(_lock_owner & ~1UL);
128 @return #Locked if lock is set, #Not_locked if not locked, and #Invalid if
129 the lock does not exist (see valid()).
133 Switch_lock::Status NO_INSTRUMENT
134 Switch_lock::test() const
136 auto guard = lock_guard(cpu_lock);
137 Address o = access_once(&_lock_owner);
138 if (EXPECT_FALSE(o & 1))
140 return o ? Locked : Not_locked;
143 /** Try to acquire the lock.
144 @return #Locked if successful: current context is now the owner of the lock.
145 #Not_locked if lock has previously been set. Returns Not_locked
146 even if the current context is already the lock owner.
147 The result is #Invalid if the lock does not exist (see valid()).
150 inline NEEDS["atomic.h"]
151 Switch_lock::Status NO_INSTRUMENT
152 Switch_lock::try_lock()
154 auto guard = lock_guard(cpu_lock);
156 if (EXPECT_FALSE(!valid()))
159 Context *c = current();
160 bool ret = set_lock_owner(c);
163 c->inc_lock_cnt(); // Do not lose this lock if current is deleted
165 return ret ? Locked : Not_locked;
168 /** Acquire the lock with priority inheritance.
169 * If the lock is occupied, enqueue in list of helpers and lend CPU
170 * to current lock owner until we are the lock owner.
171 * @return #Locked if the lock was already locked by the current context.
172 * #Not_locked if the current context got the lock (the usual case).
173 * #Invalid if the lock does not exist (see valid()).
176 Switch_lock::Status NO_INSTRUMENT
179 auto guard = lock_guard(cpu_lock);
185 Switch_lock::help(Context *curr, Context *owner, Address owner_id)
187 auto s = curr->switch_exec_helping(owner, Context::Helping, &_lock_owner,
189 if (s == Context::Switch::Failed)
191 if (curr->home_cpu() != current_cpu())
194 Proc::preemption_point();
198 /** Acquire the lock with priority inheritance.
199 * If the lock is occupied, enqueue in list of helpers and lend CPU
200 * to current lock owner until we are the lock owner (see lock()).
201 * @pre caller holds cpu lock
202 * @return #Locked if the lock was already locked by the current context.
203 * #Not_locked if the current context got the lock (the usual case).
204 * #Invalid if the lock does not exist (see valid()).
207 inline NEEDS["context.h", "processor.h", Switch_lock::set_lock_owner]
208 Switch_lock::Status NO_INSTRUMENT
209 Switch_lock::lock_dirty()
211 assert(cpu_lock.test());
213 Mword o = access_once(&_lock_owner);
214 if (EXPECT_FALSE(o & 1))
217 Context *c = current();
225 Mword o = access_once(&_lock_owner);
232 help(c, (Context *)o, o);
235 while (!set_lock_owner(c));
237 c->inc_lock_cnt(); // Do not lose this lock if current is deleted
242 /** Acquire the lock with priority inheritance.
243 * @return #Locked if we owned the lock already. #Not_locked otherwise.
244 * #Invalid is returned if the lock does not exist (see valid()).
247 inline NEEDS["globals.h"]
248 Switch_lock::Status NO_INSTRUMENT
249 Switch_lock::test_and_set()
254 /** Acquire the lock with priority inheritance (see test_and_set()).
255 * @return #Locked if we owned the lock already. #Not_locked otherwise.
256 * #Invalid is returned if the lock does not exist (see valid()).
257 * @pre caller holds cpu lock
260 inline NEEDS["globals.h"]
261 Switch_lock::Status NO_INSTRUMENT
262 Switch_lock::test_and_set_dirty()
267 IMPLEMENTATION [!mp]:
271 Switch_lock::clear_lock_owner()
278 Switch_lock::set_lock_owner(Context *o)
280 _lock_owner = Address(o) | (_lock_owner & 1);
289 Switch_lock::clear_lock_owner()
291 atomic_mp_and(&_lock_owner, 1);
296 Switch_lock::set_lock_owner(Context *o)
298 bool have_no_locks = access_once(&o->_lock_cnt) < 1;
302 assert (current_cpu() == o->home_cpu());
305 if (EXPECT_FALSE(access_once(&o->_running_under_lock)))
307 if (EXPECT_TRUE(mp_cas(&o->_running_under_lock, Mword(false), Mword(true))))
312 assert (o->_running_under_lock);
316 if (EXPECT_FALSE(!mp_cas(&_lock_owner, Mword(0), Address(o))))
321 write_now(&o->_running_under_lock, Mword(false));
333 * Clear the lock, however do not switch to a potential helper yet.
334 * This function is used when the lock must be cleared and the object
335 * containing the lock will be deallocated atomically. We have to do the
336 * switch later using switch_dirty(Lock_context).
337 * @return the context for a later switch_dirty()
338 * @pre The lock must be valid (see valid()).
339 * @pre caller hold cpu lock
340 * @post switch_dirty() must be called in the same atomical section
343 inline NEEDS[Switch_lock::clear_lock_owner]
344 Switch_lock::Lock_context NO_INSTRUMENT
345 Switch_lock::clear_no_switch_dirty()
349 c.owner = lock_owner();
351 c.owner->dec_lock_cnt();
356 * Do the switch part of clear() after a clear_no_switch_dirty().
357 * This function does not touch the lock itself (may be called on
359 * @param c the context returned by a former clear_no_switch_dirty().
360 * @pre must be called atomically with clear_no_switch_dirty(),
361 * under the same cpu lock
366 Switch_lock::switch_dirty(Lock_context const &c)
368 assert (current() == c.owner);
370 Context *h = c.owner->helper();
372 * If someone helped us by lending its time slice to us.
373 * Just switch back to the helper without changing its helping state.
376 if ( EXPECT_FALSE(h->home_cpu() != current_cpu())
377 || EXPECT_FALSE((long)c.owner->switch_exec_locked(h, Context::Ignore_Helping)))
380 * Someone apparently tries to delete us. Therefore we aren't
381 * allowed to continue to run and therefore let the scheduler
382 * pick the next thread to execute.
384 if ( c.owner->lock_cnt() == 0
385 && (c.owner->home_cpu() != current_cpu() || c.owner->donatee()))
390 Return the CPU to helper if there is one, since it had to have a
391 higher priority to be able to help (priority may be its own, it
392 may run on a donated timeslice or round robin scheduling may have
393 selected a thread on the same priority level as me)
395 @pre The lock must be valid (see valid()).
401 auto guard = lock_guard(cpu_lock);
403 switch_dirty(clear_no_switch_dirty());
408 Switch_lock::set(Status s)
415 Return the CPU to helper if there is one, since it had to have a
416 higher priority to be able to help (priority may be its own, it
417 may run on a donated timeslice or round robin scheduling may have
418 selected a thread on the same priority level as me).
419 If _lock_owner is 0, then this is a no op
421 @pre The lock must be valid (see valid())
422 @pre caller holds cpu lock
427 Switch_lock::clear_dirty()
429 assert(cpu_lock.test());
431 switch_dirty(clear_no_switch_dirty());
436 Switch_lock::invalidate()
438 auto guard = lock_guard(cpu_lock);
439 atomic_mp_or(&_lock_owner, 1);
444 Switch_lock::wait_free()
446 auto guard = lock_guard(cpu_lock);
447 Context *c = current();
451 if ((_lock_owner & ~1UL) == (Address)c)
460 assert(cpu_lock.test());
462 Address _owner = access_once(&_lock_owner);
463 Context *owner = (Context *)(_owner & ~1UL);
467 help(c, owner, _owner);