3 #include "mapping_tree.h"
9 #include "slab_cache_anon.h"
13 class U_semaphore : public Kobject
15 friend class Jdb_semaphore;
17 FIASCO_DECLARE_KOBJ();
20 typedef slab_cache_anon Allocator;
23 Locked_prio_list _queue;
26 enum Result { Ok, Retry, Timeout, Invalid };
29 virtual ~U_semaphore();
36 #include "entry_frame.h"
37 #include "ipc_timeout.h"
39 #include "mem_space.h"
40 #include "thread_state.h"
43 FIASCO_DEFINE_KOBJ(U_semaphore);
46 U_semaphore::U_semaphore(Ram_quota *q)
51 PRIVATE inline NOEXPORT
53 U_semaphore::set_queued(L4_semaphore *sem, bool q)
55 current()->mem_space()->poke_user(&(sem->flags), (Mword)q);
59 PRIVATE inline NOEXPORT
61 U_semaphore::pagein_set_queued(Thread *c, L4_semaphore *sem, bool q)
65 if (EXPECT_TRUE ((err = setjmp(pf_recovery)) == 0))
67 c->recover_jmp_buf(&pf_recovery);
68 // we are preemptible here, in case of a page fault
69 current()->mem_space()->poke_user(&(sem->flags), (Mword)q);
72 c->recover_jmp_buf(0);
77 PRIVATE inline NOEXPORT
79 U_semaphore::add_counter(L4_semaphore *sem, long v)
81 Smword cnt = current()->mem_space()->peek_user(&(sem->counter)) + v;
82 current()->mem_space()->poke_user(&(sem->counter), cnt);
87 PRIVATE inline NOEXPORT
89 U_semaphore::valid_semaphore(L4_semaphore *s)
91 if (EXPECT_FALSE(((unsigned long)s & (sizeof(L4_semaphore)-1)) != 0))
94 if (EXPECT_FALSE((unsigned long)s >= Mem_layout::User_max))
102 U_semaphore::block_locked(L4_timeout const &to, L4_semaphore *sem, Utcb *u)
104 if (EXPECT_FALSE(!valid_semaphore(sem)))
105 return L4_msg_tag(0, 0, 0, Invalid);
107 Thread *c = current_thread();
108 if (EXPECT_FALSE (!pagein_set_queued(c, sem, true)))
109 // unhandled page fault semaphore is considered invalid
110 return L4_msg_tag(0, 0, 0, Invalid);
112 // *counter is now paged in writable
113 if (add_counter(sem, 1) > 0)
116 set_queued(sem, false);
118 add_counter(sem, -1);
119 return L4_msg_tag(0, 0, 0, Ok);
125 t = to.microsecs(Timer::system_clock(), u);
127 return L4_msg_tag(0, 0, 0, Timeout);
130 c->wait_queue(&_queue);
131 c->sender_enqueue(&_queue, c->sched_context()->prio());
132 c->state_change_dirty(~Thread_ready, Thread_ipc_in_progress);
137 timeout.set(t, c->cpu());
138 c->set_timeout(&timeout);
142 // We go here by: (a) a wakeup, (b) a timeout, (c) wait_queue delete,
145 // The wait_queue was destroyed
146 if (EXPECT_FALSE(!_valid))
147 return L4_msg_tag(0, 0, 0, Invalid);
150 // 1. c is not in the queue, then the wakeup already occured
151 // 2. c is in the sender list an the timeout has hit a timeout is flagged
152 if (EXPECT_FALSE(c->in_sender_list() && timeout.has_hit()))
154 // The timeout really hit so remove c from the queue
155 c->sender_dequeue(&_queue);
156 return L4_msg_tag(0, 0, 0, Timeout);
159 return L4_msg_tag(0, 0, 0, Retry);
165 U_semaphore::wakeup_locked(L4_semaphore *sem, bool yield)
167 if (EXPECT_FALSE(!valid_semaphore(sem)))
170 Thread *c = current_thread();
172 // basically make queued flag writable
173 if (EXPECT_FALSE (!pagein_set_queued(c, sem, true)))
174 // semaphore is invalid
177 Prio_list_elem *h = _queue.head();
180 set_queued(sem, false); // queue is empty
184 Thread *w = static_cast<Thread*>(Sender::cast(h));
185 w->sender_dequeue(&_queue);
186 w->state_change_dirty(~Thread_ipc_in_progress, Thread_ready);
192 set_queued(sem, false); // dequeued the last thread
194 // XXX: bad hack, need to sync queue
195 if (w->cpu() != current_cpu())
201 if (c->schedule_in_progress())
204 if (w->sched()->deblock(current_cpu(), current()->sched(), true))
205 current()->switch_to_locked(w);
206 else if (yield && w->sched()->prio() == current()->sched()->prio())
208 current()->switch_to_locked(w);
211 current()->switch_sched(current()->sched());
212 current()->schedule();
221 U_semaphore::~U_semaphore()
225 while (Prio_list_elem *h = _queue.head())
227 Thread *w = static_cast<Thread*>(Sender::cast(h));
228 w->sender_dequeue(&_queue);
229 w->state_change_safely(~Thread_ipc_in_progress, Thread_ready);
234 Lock_guard<Cpu_lock> guard(&cpu_lock);
236 current()->schedule();
241 U_semaphore::alloc(Ram_quota *q)
244 if (q->alloc(sizeof(U_semaphore)) && (nq = allocator()->alloc()))
245 return new (nq) U_semaphore(q);
252 U_semaphore::operator new (size_t, void *p)
257 U_semaphore::operator delete (void *_l)
259 U_semaphore *l = reinterpret_cast<U_semaphore*>(_l);
261 l->_q->free(sizeof(U_semaphore));
263 allocator()->free(l);
267 static Kmem_slab_t<U_semaphore> _usem_allocator("U_semaphore");
270 U_semaphore::Allocator *
271 U_semaphore::allocator()
272 { return &_usem_allocator; }
277 U_semaphore::invoke(L4_obj_ref, Mword, Syscall_frame *f, Utcb *u)
279 //printf (" do it (%p)\n", l);
280 LOG_TRACE("User semaphore", "sem", ::current(), __usem_fmt,
281 Log_entry *le = tbe->payload<Log_entry>();
282 le->tag = f->tag().raw();
284 le->sem = u->values[0]);
286 switch (f->tag().proto())
288 case 0: //Sys_u_lock_frame::Sem_sleep:
289 //LOG_MSG_3VAL(this, "USBLOCK", regs->timeout().raw(), 0, 0);
290 f->tag(block_locked(f->timeout().rcv, (L4_semaphore*)u->values[0], u));
291 //LOG_MSG_3VAL(this, "USBLOCK+", res, 0, 0);
293 case 1: //Sys_u_lock_frame::Sem_wakeup:
294 //LOG_MSG(this, "USWAKE");
295 wakeup_locked((L4_semaphore*)u->values[0], true);
296 f->tag(L4_msg_tag(0,0,0,0));
298 case 2: //Sys_u_lock_frame::Sem_wakeup:
299 //LOG_MSG(this, "USWAKE");
300 wakeup_locked((L4_semaphore*)u->values[0], false);
301 f->tag(L4_msg_tag(0,0,0,0));
307 f->tag(L4_msg_tag(0,0,0,-L4_err::EInval));
311 // -----------------------------------------------------------------------
314 EXTENSION class U_semaphore
324 static unsigned log_fmt(Tb_entry *, int, char *) asm ("__usem_fmt");
328 // -----------------------------------------------------------------------
329 IMPLEMENTATION [debug]:
333 U_semaphore::log_fmt(Tb_entry *e, int maxlen, char *buf)
335 Log_entry *le = e->payload<Log_entry>();
337 L4_msg_tag tag(le->tag);
341 case 0: op = "block"; break;
342 case 1: op = "signal"; break;
343 default: op = "invalid"; break;
345 return snprintf(buf, maxlen, "sem=%lx op=%s usem=%lx", le->id,
351 #if 0 // Promela model of the lock
352 #define MAX_THREADS 4
353 #define MAX_WQ_ENTRIES MAX_THREADS
361 bit thread_state[MAX_THREADS];
362 hidden byte loops[MAX_THREADS];
363 unsigned in_critical : 4 = 0;
370 bit queue[MAX_WQ_ENTRIES];
373 sem_t sem; /* maybe move init. to init*/
376 inline init_globals()
387 :: (temp >= MAX_WQ_ENTRIES) -> break;
394 inline enqueue_thread(t)
399 inline dequeue_thread(t)
404 inline queue_head(head)
410 :: (sem.queue[local_temp]) -> head = local_temp; break;
415 :: (local_temp >= MAX_WQ_ENTRIES) -> head = local_temp; break;
421 inline block(ret, thread)
428 :: (sem.counter > 0) -> sem.counter--; ret = LOCKED; break;
433 enqueue_thread(thread);
434 thread_state[thread] = 0;
437 :: (thread_state[thread] == 1) -> skip
441 :: (sem.queue[thread] == 0) -> ret = RETRY; break;
445 dequeue_thread(thread);
459 :: (pf == MAX_THREADS) -> sem.queued = 0; break;
464 thread_state[pf] = 1;
466 if :: (pf == MAX_THREADS) -> sem.queued = 0;
482 :: (sem.counter >= 0) -> break;
490 :: (ret == LOCKED || ret == ERROR) -> break;
492 :: else assert(false);
504 :: (!sem.queued) -> break;
520 :: (thread_state[0] == 0) -> thread_state[0] = 1;
521 :: (thread_state[1] == 0) -> thread_state[1] = 1;
522 :: (thread_state[2] == 0) -> thread_state[2] = 1;
527 proctype Thread(byte thread)
531 unsigned local_temp : 4;
538 :: (ret == ERROR) -> goto L1;
543 assert (in_critical <= MAX);
551 :: (loops[thread] == 0) -> break;
558 hidden byte threads = 0;
567 loops[threads] = LOOPS - 1;
571 :: (threads >= MAX_THREADS) -> break;
581 :: (in_critical == MAX) -> assert(false)