3 #include <l4/sys/types.h>
4 #include <l4/util/atomic.h>
12 typedef struct spinlock spinlock_t;
15 #define fence() asm volatile ("" ::: "memory");
18 inline static unsigned int
19 fas_uint(volatile unsigned int *lock, unsigned int val)
21 __asm__ __volatile__("xchgl %0, %1"
22 : "+m" (*lock), "+q" (val)
32 asm volatile ("ud2" ::: "memory");
45 lock_unlocking = 0xAAAAAAAA,
46 lock_unowned = 0xFFFFFFFF,
51 LOCK_INFO_ADDR = 0xA000,
55 unsigned char trampolines[NUM_TRAMPOLINES * TRAMPOLINE_SIZE];
57 volatile l4_addr_t lockdesc; // corresponding pthread_mutex_t ptr
58 volatile l4_addr_t owner; // lock owner
59 volatile l4_addr_t owner_epoch; // lock holder's epoch
60 volatile l4_addr_t wait_count; // count how many threads wait to acquire this lock
61 volatile l4_addr_t acq_count; // count how many threads acquired this lock
62 volatile l4_addr_t wake_count; // count how many threads should be unlocked
63 volatile spinlock_t lock; // internal: lock for this row
65 volatile l4_umword_t replica_count; // number of replicas running a.t.m.
68 /* Compile-time assertion: lock_info must fit into a page) */
69 char __lock_info_size_valid[!!(sizeof(lock_info) <= L4_PAGESIZE)-1];
71 lock_info* get_lock_info(void);
72 lock_info* get_lock_info(void) {
73 return (lock_info*)LOCK_INFO_ADDR;
77 static inline void lock_li(volatile lock_info *li, unsigned idx)
79 volatile spinlock_t *lock = &li->locks[idx].lock;
80 while (fas_uint(&lock->value, 1) == 1) {
86 static inline void unlock_li(volatile lock_info* li, unsigned idx)
89 li->locks[idx].lock.value = 0;
95 lockID_to_str(unsigned id)
98 case mutex_init_id: return "mutex_init";
99 case mutex_lock_id: return "mutex_lock";
100 case mutex_unlock_id: return "mutex_unlock";
101 case pt_lock_id: return "__pthread_lock";
102 case pt_unlock_id: return "__pthread_unlock";
103 default: return "???";