]> rtime.felk.cvut.cz Git - l4.git/blob - l4/pkg/uclibc/lib/libpthread/src/l4.cc
e2b9c35b86a312eb24912e1682265bda8790a974
[l4.git] / l4 / pkg / uclibc / lib / libpthread / src / l4.cc
1 #ifndef PT_EI
2 #define PT_EI inline
3 #endif
4
5 #include "internals.h"
6
7 #include <l4/sys/capability>
8 #include <l4/sys/thread>
9 #include <l4/re/env>
10 #include <l4/sys/factory>
11 #include <l4/re/util/cap_alloc>
12 #include <l4/sys/kdebug.h>
13 #include <l4/sys/scheduler>
14
15 #include <pthread-l4.h>
16 #include <errno.h>
17 #include "spinlock.h"
18
19 #include "l4.h"
20
21
22 l4_cap_idx_t pthread_getl4cap(pthread_t thread_id)
23 {
24   __volatile__ pthread_descr self = thread_self();
25   pthread_handle handle = thread_handle(thread_id);
26   pthread_descr th;
27
28   __pthread_lock(handle_to_lock(handle), self);
29   if (nonexisting_handle(handle, thread_id)) {
30     __pthread_unlock(handle_to_lock(handle));
31     return L4_INVALID_CAP;
32   }
33   l4_cap_idx_t c;
34   th = handle_to_descr(handle);
35   c = th->p_th_cap;
36   __pthread_unlock(handle_to_lock(handle));
37   return c;
38 }
39
40 static void cb(void *arg, pthread_descr th)
41 {
42   void (*fn)(pthread_t) = (void (*)(pthread_t))arg;
43
44   fn(th->p_tid);
45 }
46
47 void pthread_l4_for_each_thread(void (*fn)(pthread_t))
48 {
49   struct pthread_request request;
50
51   request.req_thread = thread_self();
52   request.req_kind = REQ_FOR_EACH_THREAD;
53   request.req_args.for_each.arg = (void *)fn;
54   request.req_args.for_each.fn = cb;
55
56   __pthread_send_manager_rq(&request, 1);
57 }
58
59 // This is a rather temporary solution, it will go away when UTCBs can be
60 // freely placed.
61 l4_utcb_t *pthread_mgr_l4_reserve_consecutive_utcbs(unsigned num)
62 {
63   l4_utcb_t *i = __pthread_first_free_handle;
64   l4_utcb_t *prev = 0;
65
66   while (i)
67     {
68       l4_utcb_t *s = (l4_utcb_t*)l4_utcb_tcr_u(i)->user[0];
69       unsigned cnt = 1;
70
71       while (   s
72              && cnt < num
73              && (unsigned long)i + cnt * L4_UTCB_OFFSET == (unsigned long)s)
74         {
75           s = (l4_utcb_t*)l4_utcb_tcr_u(s)->user[0];
76           cnt++;
77         }
78
79       if (cnt == num)
80         {
81           if (prev)
82             l4_utcb_tcr_u(s)->user[0] = l4_utcb_tcr_u(s)->user[0];
83           else
84             __pthread_first_free_handle = (l4_utcb_t*)l4_utcb_tcr_u(s)->user[0];
85
86           return i;
87         }
88
89       prev = i;
90       i = s;
91     }
92
93   return 0;
94 }
95
96 l4_utcb_t *pthread_l4_reserve_consecutive_utcbs(unsigned num)
97 {
98   if (l4_is_invalid_cap(__pthread_manager_request))
99     return pthread_mgr_l4_reserve_consecutive_utcbs(num);
100
101   struct pthread_request request;
102
103   request.req_thread = thread_self();
104   request.req_kind = REQ_L4_RESERVE_CONSECUTIVE_UTCBS;
105   request.req_args.l4_reserve_consecutive_utcbs.num = num;
106
107   l4_utcb_t *u;
108   request.req_args.l4_reserve_consecutive_utcbs.retutcbp = &u;
109
110   __pthread_send_manager_rq(&request, 1);
111
112   return u;
113 }
114
115 int __pthread_l4_initialize_main_thread(pthread_descr th)
116 {
117   L4Re::Env *env = const_cast<L4Re::Env*>(L4Re::Env::env());
118   if (!env)
119     return -L4_ENODEV;
120
121   L4::Cap<Th_sem_cap> s(env->first_free_cap() << L4_CAP_SHIFT);
122   if (!s.is_valid() || !s.cap())
123     return -L4_ENOMEM;
124
125   // needed by __alloc_thread_sem
126   th->p_th_cap = env->main_thread().cap();
127
128   int err = __alloc_thread_sem(th, s);
129   if (err < 0)
130     return err;
131
132   env->first_free_cap((s.cap() + L4_CAP_OFFSET) >> L4_CAP_SHIFT);
133
134   th->p_thsem_cap = s.cap();
135
136   th->p_sched_policy = SCHED_L4;
137   th->p_priority = 0x10;
138
139   th->p_lock = handle_to_lock(l4_utcb());
140   th->p_tid  = l4_utcb();
141
142   return 0;
143 }
144
145
146 int __attribute__((weak)) __pthread_sched_idle_prio    = 0x01;
147 int __attribute__((weak)) __pthread_sched_other_prio   = 0x02;
148 int __attribute__((weak)) __pthread_sched_rr_prio_min  = 0x40;
149 int __attribute__((weak)) __pthread_sched_rr_prio_max  = 0xf0;
150
151
152 int __pthread_setschedparam(pthread_t thread, int policy,
153                             const struct sched_param *param) throw()
154 {
155   pthread_handle handle = thread_handle(thread);
156   pthread_descr th;
157
158   __pthread_lock(handle_to_lock(handle), NULL);
159   if (__builtin_expect (invalid_handle(handle, thread), 0)) {
160     __pthread_unlock(handle_to_lock(handle));
161     return ESRCH;
162   }
163   th = handle_to_descr(handle);
164   int prio = __pthread_l4_getprio(policy, param->sched_priority);
165   if (prio < 0)
166     {
167       __pthread_unlock(handle_to_lock(handle));
168       return EINVAL;
169     }
170
171   th->p_sched_policy = policy;
172   th->p_priority = param->sched_priority;
173
174     {
175       L4::Cap<L4::Thread> t(th->p_th_cap);
176       l4_sched_param_t sp = l4_sched_param(prio, 0);
177       L4Re::Env::env()->scheduler()->run_thread(t, sp);
178     }
179   __pthread_unlock(handle_to_lock(handle));
180
181   if (__pthread_manager_request > 0)
182     __pthread_manager_adjust_prio(prio);
183
184   return 0;
185 }
186 strong_alias (__pthread_setschedparam, pthread_setschedparam)
187
188 int __pthread_getschedparam(pthread_t thread, int *policy,
189                             struct sched_param *param) throw()
190 {
191   pthread_handle handle = thread_handle(thread);
192   int pol, prio;
193
194   __pthread_lock(handle_to_lock(handle), NULL);
195   if (__builtin_expect (invalid_handle(handle, thread), 0)) {
196     __pthread_unlock(handle_to_lock(handle));
197     return ESRCH;
198   }
199
200   pol = handle_to_descr(handle)->p_sched_policy;
201   prio = handle_to_descr(handle)->p_priority;
202   __pthread_unlock(handle_to_lock(handle));
203
204   *policy = pol;
205   param->sched_priority = prio;
206
207   return 0;
208 }
209 strong_alias (__pthread_getschedparam, pthread_getschedparam)