3 #include "mapping_tree.h"
9 #include "slab_cache_anon.h"
13 class U_semaphore : public Kobject
15 friend class Jdb_semaphore;
17 FIASCO_DECLARE_KOBJ();
20 typedef slab_cache_anon Allocator;
23 Locked_prio_list _queue;
26 enum Result { Ok, Retry, Timeout, Invalid };
29 virtual ~U_semaphore();
36 #include "entry_frame.h"
37 #include "ipc_timeout.h"
39 #include "mem_space.h"
40 #include "thread_state.h"
43 FIASCO_DEFINE_KOBJ(U_semaphore);
46 U_semaphore::U_semaphore(Ram_quota *q)
51 PRIVATE inline NOEXPORT
53 U_semaphore::set_queued(L4_semaphore *sem, bool q)
55 current()->mem_space()->poke_user(&(sem->flags), (Mword)q);
59 PRIVATE inline NOEXPORT
61 U_semaphore::pagein_set_queued(Thread *c, L4_semaphore *sem, bool q)
65 if (EXPECT_TRUE ((err = setjmp(pf_recovery)) == 0))
67 c->recover_jmp_buf(&pf_recovery);
68 // we are preemptible here, in case of a page fault
69 current()->mem_space()->poke_user(&(sem->flags), (Mword)q);
72 c->recover_jmp_buf(0);
77 PRIVATE inline NOEXPORT
79 U_semaphore::add_counter(L4_semaphore *sem, long v)
81 Smword cnt = current()->mem_space()->peek_user(&(sem->counter)) + v;
82 current()->mem_space()->poke_user(&(sem->counter), cnt);
87 PRIVATE inline NOEXPORT
89 U_semaphore::valid_semaphore(L4_semaphore *s)
91 if (EXPECT_FALSE(((unsigned long)s & (sizeof(L4_semaphore)-1)) != 0))
94 if (EXPECT_FALSE((unsigned long)s >= Mem_layout::User_max))
102 U_semaphore::block_locked(L4_timeout const &to, L4_semaphore *sem, Utcb *u)
104 if (EXPECT_FALSE(!valid_semaphore(sem)))
105 return L4_msg_tag(0, 0, 0, Invalid);
107 Thread *c = current_thread();
108 if (EXPECT_FALSE (!pagein_set_queued(c, sem, true)))
109 // unhandled page fault semaphore is considered invalid
110 return L4_msg_tag(0, 0, 0, Invalid);
112 // *counter is now paged in writable
113 if (add_counter(sem, 1) > 0)
116 set_queued(sem, false);
118 add_counter(sem, -1);
119 return L4_msg_tag(0, 0, 0, Ok);
125 t = to.microsecs(Timer::system_clock(), u);
127 return L4_msg_tag(0, 0, 0, Timeout);
130 c->wait_queue(&_queue);
131 c->sender_enqueue(&_queue, c->sched_context()->prio());
132 c->state_change_dirty(~Thread_ready, Thread_send_wait);
137 timeout.set(t, c->cpu());
138 c->set_timeout(&timeout);
142 // We go here by: (a) a wakeup, (b) a timeout, (c) wait_queue delete,
144 c->state_del_dirty(~Thread_ipc_mask);
146 // The wait_queue was destroyed
147 if (EXPECT_FALSE(!_valid))
148 return L4_msg_tag(0, 0, 0, Invalid);
151 // 1. c is not in the queue, then the wakeup already occured
152 // 2. c is in the sender list an the timeout has hit a timeout is flagged
153 if (EXPECT_FALSE(c->in_sender_list() && timeout.has_hit()))
155 // The timeout really hit so remove c from the queue
156 c->sender_dequeue(&_queue);
157 return L4_msg_tag(0, 0, 0, Timeout);
160 return L4_msg_tag(0, 0, 0, Retry);
166 U_semaphore::wakeup_locked(L4_semaphore *sem, bool yield)
168 if (EXPECT_FALSE(!valid_semaphore(sem)))
171 Thread *c = current_thread();
173 // basically make queued flag writable
174 if (EXPECT_FALSE (!pagein_set_queued(c, sem, true)))
175 // semaphore is invalid
178 Prio_list_elem *h = _queue.head();
181 set_queued(sem, false); // queue is empty
185 Thread *w = static_cast<Thread*>(Sender::cast(h));
186 w->sender_dequeue(&_queue);
187 w->state_change_dirty(~Thread_ipc_mask, Thread_ready);
193 set_queued(sem, false); // dequeued the last thread
195 // XXX: bad hack, need to sync queue
196 if (w->cpu() != current_cpu())
202 if (c->schedule_in_progress())
205 if (w->sched()->deblock(current_cpu(), current()->sched(), true))
206 current()->switch_to_locked(w);
207 else if (yield && w->sched()->prio() == current()->sched()->prio())
209 current()->switch_to_locked(w);
212 current()->switch_sched(current()->sched());
213 current()->schedule();
222 U_semaphore::~U_semaphore()
226 while (Prio_list_elem *h = _queue.head())
228 Thread *w = static_cast<Thread*>(Sender::cast(h));
229 w->sender_dequeue(&_queue);
230 w->state_change_safely(~Thread_ipc_mask, Thread_ready);
235 Lock_guard<Cpu_lock> guard(&cpu_lock);
237 current()->schedule();
242 U_semaphore::alloc(Ram_quota *q)
245 if (q->alloc(sizeof(U_semaphore)) && (nq = allocator()->alloc()))
246 return new (nq) U_semaphore(q);
253 U_semaphore::operator new (size_t, void *p)
258 U_semaphore::operator delete (void *_l)
260 U_semaphore *l = reinterpret_cast<U_semaphore*>(_l);
262 l->_q->free(sizeof(U_semaphore));
264 allocator()->free(l);
268 static Kmem_slab_t<U_semaphore> _usem_allocator("U_semaphore");
271 U_semaphore::Allocator *
272 U_semaphore::allocator()
273 { return &_usem_allocator; }
278 U_semaphore::invoke(L4_obj_ref, Mword, Syscall_frame *f, Utcb *u)
280 //printf (" do it (%p)\n", l);
281 LOG_TRACE("User semaphore", "sem", ::current(), __usem_fmt,
282 Log_entry *le = tbe->payload<Log_entry>();
283 le->tag = f->tag().raw();
285 le->sem = u->values[0]);
287 switch (f->tag().proto())
289 case 0: //Sys_u_lock_frame::Sem_sleep:
290 //LOG_MSG_3VAL(this, "USBLOCK", regs->timeout().raw(), 0, 0);
291 f->tag(block_locked(f->timeout().rcv, (L4_semaphore*)u->values[0], u));
292 //LOG_MSG_3VAL(this, "USBLOCK+", res, 0, 0);
294 case 1: //Sys_u_lock_frame::Sem_wakeup:
295 //LOG_MSG(this, "USWAKE");
296 wakeup_locked((L4_semaphore*)u->values[0], true);
297 f->tag(L4_msg_tag(0,0,0,0));
299 case 2: //Sys_u_lock_frame::Sem_wakeup:
300 //LOG_MSG(this, "USWAKE");
301 wakeup_locked((L4_semaphore*)u->values[0], false);
302 f->tag(L4_msg_tag(0,0,0,0));
308 f->tag(L4_msg_tag(0,0,0,-L4_err::EInval));
312 // -----------------------------------------------------------------------
315 EXTENSION class U_semaphore
325 static unsigned log_fmt(Tb_entry *, int, char *) asm ("__usem_fmt");
329 // -----------------------------------------------------------------------
330 IMPLEMENTATION [debug]:
334 U_semaphore::log_fmt(Tb_entry *e, int maxlen, char *buf)
336 Log_entry *le = e->payload<Log_entry>();
338 L4_msg_tag tag(le->tag);
342 case 0: op = "block"; break;
343 case 1: op = "signal"; break;
344 default: op = "invalid"; break;
346 return snprintf(buf, maxlen, "sem=%lx op=%s usem=%lx", le->id,
352 #if 0 // Promela model of the lock
353 #define MAX_THREADS 4
354 #define MAX_WQ_ENTRIES MAX_THREADS
362 bit thread_state[MAX_THREADS];
363 hidden byte loops[MAX_THREADS];
364 unsigned in_critical : 4 = 0;
371 bit queue[MAX_WQ_ENTRIES];
374 sem_t sem; /* maybe move init. to init*/
377 inline init_globals()
388 :: (temp >= MAX_WQ_ENTRIES) -> break;
395 inline enqueue_thread(t)
400 inline dequeue_thread(t)
405 inline queue_head(head)
411 :: (sem.queue[local_temp]) -> head = local_temp; break;
416 :: (local_temp >= MAX_WQ_ENTRIES) -> head = local_temp; break;
422 inline block(ret, thread)
429 :: (sem.counter > 0) -> sem.counter--; ret = LOCKED; break;
434 enqueue_thread(thread);
435 thread_state[thread] = 0;
438 :: (thread_state[thread] == 1) -> skip
442 :: (sem.queue[thread] == 0) -> ret = RETRY; break;
446 dequeue_thread(thread);
460 :: (pf == MAX_THREADS) -> sem.queued = 0; break;
465 thread_state[pf] = 1;
467 if :: (pf == MAX_THREADS) -> sem.queued = 0;
483 :: (sem.counter >= 0) -> break;
491 :: (ret == LOCKED || ret == ERROR) -> break;
493 :: else assert(false);
505 :: (!sem.queued) -> break;
521 :: (thread_state[0] == 0) -> thread_state[0] = 1;
522 :: (thread_state[1] == 0) -> thread_state[1] = 1;
523 :: (thread_state[2] == 0) -> thread_state[2] = 1;
528 proctype Thread(byte thread)
532 unsigned local_temp : 4;
539 :: (ret == ERROR) -> goto L1;
544 assert (in_critical <= MAX);
552 :: (loops[thread] == 0) -> break;
559 hidden byte threads = 0;
568 loops[threads] = LOOPS - 1;
572 :: (threads >= MAX_THREADS) -> break;
582 :: (in_critical == MAX) -> assert(false)