3 #include <l4/util/atomic.h>
11 typedef struct spinlock spinlock_t;
14 #define fence() asm volatile ("" ::: "memory");
17 inline static unsigned int
18 fas_uint(volatile unsigned int *lock, unsigned int val)
20 __asm__ __volatile__("xchgl %0, %1"
21 : "+m" (*lock), "+q" (val)
31 asm volatile ("ud2" ::: "memory");
44 lock_unlocking = 0xAAAAAAAA,
45 lock_unowned = 0xFFFFFFFF,
50 LOCK_INFO_ADDR = 0xA000,
54 unsigned char trampolines[NUM_TRAMPOLINES * TRAMPOLINE_SIZE];
56 volatile l4_addr_t lockdesc; // corresponding pthread_mutex_t ptr
57 volatile l4_addr_t owner; // lock owner
58 volatile l4_addr_t owner_epoch; // lock holder's epoch
59 volatile l4_addr_t wait_count; // count how many threads wait to acquire this lock
60 volatile l4_addr_t acq_count; // count how many threads acquired this lock
61 volatile l4_addr_t wake_count; // count how many threads should be unlocked
62 volatile spinlock_t lock; // internal: lock for this row
64 volatile l4_umword_t replica_count; // number of replicas running a.t.m.
67 /* Compile-time assertion: lock_info must fit into a page) */
68 char __lock_info_size_valid[!!(sizeof(lock_info) <= L4_PAGESIZE)-1];
70 lock_info* get_lock_info(void);
71 lock_info* get_lock_info(void) {
72 return (lock_info*)LOCK_INFO_ADDR;
76 static inline void lock_li(volatile lock_info *li, unsigned idx)
78 volatile spinlock_t *lock = &li->locks[idx].lock;
79 while (fas_uint(&lock->value, 1) == 1) {
85 static inline void unlock_li(volatile lock_info* li, unsigned idx)
88 li->locks[idx].lock.value = 0;
94 lockID_to_str(unsigned id)
97 case mutex_init_id: return "mutex_init";
98 case mutex_lock_id: return "mutex_lock";
99 case mutex_unlock_id: return "mutex_unlock";
100 case pt_lock_id: return "__pthread_lock";
101 case pt_unlock_id: return "__pthread_unlock";
102 default: return "???";