]> rtime.felk.cvut.cz Git - lincan.git/blob - lincan/src/can_quekern.c
4ffdaa0b09ac05820ac6581ca71b6ec5a7e647e8
[lincan.git] / lincan / src / can_quekern.c
1 /* can_quekern.c - CAN message queues functions for the Linux kernel
2  * Linux CAN-bus device driver.
3  * New CAN queues by Pavel Pisa - OCERA team member
4  * email:pisa@cmp.felk.cvut.cz
5  * This software is released under the GPL-License.
6  * Version lincan-0.2  9 Jul 2003
7  */
8
9 #include "../include/can.h"
10 #include "../include/can_sysdep.h"
11 #include "../include/can_queue.h"
12
13 //#define CAN_DEBUG
14
15 extern atomic_t edge_num_cnt;
16
17 #ifdef CAN_DEBUG
18         #define DEBUGQUE(fmt,args...) printk(KERN_ERR "can_queue (debug): " fmt,\
19         ##args)
20
21 #else
22         #define DEBUGQUE(fmt,args...)
23 #endif
24
25 #define ERRMSGQUE(fmt,args...) printk(KERN_ERR "can_queue: " fmt,\
26         ##args)
27
28
29 /* 
30  * Modifies Tx message processing 
31  *  0 .. local message processing disabled
32  *  1 .. local messages disabled by default but can be enabled by canque_set_filt
33  *  2 .. local messages enabled by default, can be disabled by canque_set_filt
34  */
35 extern int processlocal;
36
37 void canque_dead_func(unsigned long data);
38
39 /* Support for dead ends structures left after client close */
40 spinlock_t canque_dead_func_lock;
41 LIST_HEAD(canque_dead_ends);
42 /* retrieved by list_entry(canque_dead_ends.next,struct canque_ends_t,dead_peers) */
43 LIST_HEAD(canque_dead_edges);
44 /* retrieved by list_entry(canque_dead_edges.next,struct canque_edge_t,inpeers) */
45 DECLARE_TASKLET(canque_dead_tl, canque_dead_func, 0);
46 /* activated by tasklet_schedule(&canque_dead_tl) */
47
48
49 static inline
50 struct canque_edge_t *canque_dead_edges_cut_first(void)
51 {
52         unsigned long flags;
53         struct canque_edge_t *edge;
54         spin_lock_irqsave(&canque_dead_func_lock, flags);
55         if(list_empty(&canque_dead_edges))
56                 edge=NULL;
57         else{
58                 edge=list_entry(canque_dead_edges.next,struct canque_edge_t,inpeers);
59                 list_del(&edge->inpeers);
60         }
61         spin_unlock_irqrestore(&canque_dead_func_lock, flags);
62         return edge;
63 }
64
65 void canque_dead_func(unsigned long data)
66 {
67         unsigned long flags;
68         struct canque_edge_t *qedge;
69         struct canque_ends_t *qends;
70         struct list_head *entry;
71         int i;
72
73         while((qedge=canque_dead_edges_cut_first())){
74                 DEBUGQUE("edge %d disposed\n",qedge->edge_num);
75                 kfree(qedge);
76         }
77         
78         spin_lock_irqsave(&canque_dead_func_lock, flags);
79         entry=canque_dead_ends.next;
80         spin_unlock_irqrestore(&canque_dead_func_lock,flags);
81         while(entry!=&canque_dead_ends){
82                 qends=list_entry(canque_dead_ends.next,struct canque_ends_t,dead_peers);
83                 entry=entry->next;
84                 if(!list_empty(&qends->inlist))
85                         continue;
86                 if(!list_empty(&qends->idle))
87                         continue;
88                 for(i=CANQUEUE_PRIO_NR;i--;)
89                         if(!list_empty(&qends->active[i]))
90                                 continue;
91                 spin_lock_irqsave(&canque_dead_func_lock, flags);
92                 list_del(&qends->dead_peers);
93                 spin_unlock_irqrestore(&canque_dead_func_lock,flags);
94                 DEBUGQUE("ends structure disposed\n");
95                 kfree(qends);
96         }
97
98 }
99
100
101 void canque_edge_do_dead(struct canque_edge_t *edge, int dead_fl)
102 {
103         unsigned long flags;
104         
105         if(dead_fl) return;
106         
107         if(canqueue_disconnect_edge(edge)<0){
108                 ERRMSGQUE("canque_edge_do_dead: canqueue_disconnect_edge failed !!!\n");
109                 return;
110         }
111
112         spin_lock_irqsave(&canque_dead_func_lock, flags);
113         list_add(&edge->inpeers,&canque_dead_edges);
114         spin_unlock_irqrestore(&canque_dead_func_lock, flags);
115         tasklet_schedule(&canque_dead_tl);
116 }
117
118
119
120 /*if(qends->ends_flags & CAN_ENDSF_DEAD){
121         spin_lock_irqsave(&canque_dead_func_lock, flags);
122         list_del(&qends->dead_peers);
123         list_add(&qends->dead_peers,&canque_dead_ends);
124         spin_unlock_irqrestore(&canque_dead_func_lock, flags);
125         tasklet_schedule(&canque_dead_tl);
126 }*/
127
128
129 /**
130  * canqueue_notify_kern - notification callback handler for Linux userspace clients
131  * @qends: pointer to the callback side ends structure
132  * @qedge: edge which invoked notification 
133  * @what: notification type
134  */
135 void canqueue_notify_kern(struct canque_ends_t *qends, struct canque_edge_t *qedge, int what)
136 {
137         DEBUGQUE("canqueue_notify_kern for edge %d, use %d and event %d\n",
138                         qedge->edge_num,(int)atomic_read(&qedge->edge_used),what);
139         switch(what){
140                 case CANQUEUE_NOTIFY_EMPTY:
141                         wake_up(&qends->endinfo.fileinfo.emptyq);
142                         if(canque_fifo_test_and_clear_fl(&qedge->fifo, FREEONEMPTY))
143                                 canque_edge_decref(qedge);
144                         break;
145                 case CANQUEUE_NOTIFY_SPACE:
146                         wake_up(&qends->endinfo.fileinfo.writeq);
147                         break;
148                 case CANQUEUE_NOTIFY_PROC:
149                         wake_up(&qends->endinfo.fileinfo.readq);
150                         break;
151                 case CANQUEUE_NOTIFY_NOUSR:
152                         wake_up(&qends->endinfo.fileinfo.readq);
153                         wake_up(&qends->endinfo.fileinfo.writeq);
154                         wake_up(&qends->endinfo.fileinfo.emptyq);
155                         break;
156                 case CANQUEUE_NOTIFY_DEAD_WANTED:
157                 case CANQUEUE_NOTIFY_DEAD:
158                         if(canque_fifo_test_and_clear_fl(&qedge->fifo, READY))
159                                 canque_edge_decref(qedge);
160                         break;
161                 case CANQUEUE_NOTIFY_ATTACH:
162                         break;
163         }
164 }
165
166 /**
167  * canqueue_ends_init_kern - Linux userspace clients specific ends initialization
168  * @qends: pointer to the callback side ends structure
169  */
170 int canqueue_ends_init_kern(struct canque_ends_t *qends)
171 {
172         canqueue_ends_init_gen(qends);
173         qends->context=NULL;
174         init_waitqueue_head(&qends->endinfo.fileinfo.readq);
175         init_waitqueue_head(&qends->endinfo.fileinfo.writeq);
176         init_waitqueue_head(&qends->endinfo.fileinfo.emptyq);
177         qends->notify=canqueue_notify_kern;
178         DEBUGQUE("canqueue_ends_init_kern\n");
179         return 0;
180 }
181
182
183 /**
184  * canque_get_inslot4id_wait_kern - find or wait for best outgoing edge and slot for given ID
185  * @qends: ends structure belonging to calling communication object
186  * @qedgep: place to store pointer to found edge
187  * @slotp: place to store pointer to  allocated slot
188  * @cmd: command type for slot
189  * @id: communication ID of message to send into edge
190  * @prio: optional priority of message
191  *
192  * Same as canque_get_inslot4id(), except, that it waits for free slot
193  * in case, that queue is full. Function is specific for Linux userspace clients.
194  * Return Value: If there is no usable edge negative value is returned.
195  */
196 int canque_get_inslot4id_wait_kern(struct canque_ends_t *qends,
197         struct canque_edge_t **qedgep, struct canque_slot_t **slotp,
198         int cmd, unsigned long id, int prio)
199 {
200         int ret=-1;
201         DEBUGQUE("canque_get_inslot4id_wait_kern for cmd %d, id %ld, prio %d\n",cmd,id,prio);
202         wait_event_interruptible((qends->endinfo.fileinfo.writeq), 
203                 (ret=canque_get_inslot4id(qends,qedgep,slotp,cmd,id,prio))!=-1);
204         return ret;
205 }
206
207 /**
208  * canque_get_outslot_wait_kern - receive or wait for ready slot for given ends
209  * @qends: ends structure belonging to calling communication object
210  * @qedgep: place to store pointer to found edge
211  * @slotp: place to store pointer to received slot
212  *
213  * The same as canque_test_outslot(), except it waits in the case, that there is
214  * no ready slot for given ends. Function is specific for Linux userspace clients.
215  * Return Value: Negative value informs, that there is no ready output
216  *      slot for given ends. Positive value is equal to the command
217  *      slot has been allocated by the input side.
218  */
219 int canque_get_outslot_wait_kern(struct canque_ends_t *qends,
220         struct canque_edge_t **qedgep, struct canque_slot_t **slotp)
221 {
222         int ret=-1;
223         DEBUGQUE("canque_get_outslot_wait_kern\n");
224         wait_event_interruptible((qends->endinfo.fileinfo.readq), 
225                 (ret=canque_test_outslot(qends,qedgep,slotp))!=-1);
226         return ret;
227 }
228
229 /**
230  * canque_sync_wait_kern - wait for all slots processing
231  * @qends: ends structure belonging to calling communication object
232  * @qedge: pointer to edge
233  *
234  * Functions waits for ends transition into empty state.
235  * Return Value: Positive value indicates, that edge empty state has been reached.
236  *      Negative or zero value informs about interrupted wait or other problem.
237  */
238 int canque_sync_wait_kern(struct canque_ends_t *qends, struct canque_edge_t *qedge)
239 {
240         int ret=-1;
241         DEBUGQUE("canque_sync_wait_kern\n");
242         wait_event_interruptible((qends->endinfo.fileinfo.emptyq), 
243                 (ret=canque_fifo_test_fl(&qedge->fifo,EMPTY)?1:0));
244         return ret;
245 }
246
247
248 /**
249  * canque_new_edge_kern - allocate new edge structure in the Linux kernel context
250  * @slotsnr: required number of slots in the newly allocated edge structure
251  *
252  * Return Value: Returns pointer to allocated slot structure or %NULL if
253  *      there is not enough memory to process operation.
254  */
255 struct canque_edge_t *canque_new_edge_kern(int slotsnr)
256 {
257         struct canque_edge_t *qedge;
258         qedge = (struct canque_edge_t *)kmalloc(sizeof(struct canque_edge_t), GFP_KERNEL);
259         if(qedge == NULL) return NULL;
260
261         memset(qedge,0,sizeof(struct canque_edge_t));
262         spin_lock_init(&qedge->fifo.fifo_lock);
263         if(canque_fifo_init_slots(&qedge->fifo, slotsnr)<0){
264                 kfree(qedge);
265                 DEBUGQUE("canque_new_edge_kern failed\n");
266                 return NULL;
267         }
268         atomic_set(&qedge->edge_used,1);
269         qedge->filtid = 0;
270         qedge->filtmask = canque_filtid2internal(0l, (processlocal<2)? MSG_LOCAL:0);
271         qedge->edge_prio = 0;
272     #ifdef CAN_DEBUG
273         /* not exactly clean, but enough for debugging */
274         atomic_inc(&edge_num_cnt);
275         qedge->edge_num=atomic_read(&edge_num_cnt);
276     #endif /* CAN_DEBUG */
277         DEBUGQUE("canque_new_edge_kern %d\n",qedge->edge_num);
278         return qedge;
279 }
280
281 /**
282  * canqueue_disconnect_edge_kern - disconnect edge from communicating entities with wait
283  * @qends: ends structure belonging to calling communication object
284  * @qedge: pointer to edge
285  *
286  * Same as canqueue_disconnect_edge(), but tries to wait for state with zero
287  * use counter.
288  * Return Value: Negative value means, that edge is used and cannot
289  *      be disconnected yet. Operation has to be delayed.
290  */
291 int canqueue_disconnect_edge_kern(struct canque_ends_t *qends, struct canque_edge_t *qedge)
292 {
293         canque_fifo_set_fl(&qedge->fifo,BLOCK);
294         DEBUGQUE("canqueue_disconnect_edge_kern %d called\n",qedge->edge_num);
295         if(!canque_fifo_test_and_set_fl(&qedge->fifo,DEAD)){
296                 canque_notify_bothends(qedge, CANQUEUE_NOTIFY_DEAD);
297                 
298                 if(atomic_read(&qedge->edge_used)>0)
299                         atomic_dec(&qedge->edge_used);
300
301                 DEBUGQUE("canqueue_disconnect_edge_kern %d waiting\n",qedge->edge_num);
302                 wait_event((qends->endinfo.fileinfo.emptyq), 
303                         (canqueue_disconnect_edge(qedge)>=0));
304
305                 /*set_current_state(TASK_UNINTERRUPTIBLE);*/
306                 /*schedule_timeout(HZ);*/
307                 return 0;
308         } else {
309                 DEBUGQUE("canqueue_disconnect_edge_kern cannot set DEAD\n");
310                 return -1;
311         }
312 }
313
314
315 int canqueue_disconnect_list_kern(struct canque_ends_t *qends, struct list_head *list)
316 {
317         struct canque_edge_t *edge;
318         unsigned long flags;
319         for(;;){
320                 spin_lock_irqsave(&qends->ends_lock,flags);
321                 if(list_empty(list)){
322                         spin_unlock_irqrestore(&qends->ends_lock,flags);
323                         return 0;
324                 }
325                 if(list == &qends->inlist)
326                         edge=list_entry(list->next,struct canque_edge_t,inpeers);
327                 else
328                         edge=list_entry(list->next,struct canque_edge_t,outpeers);
329                 atomic_inc(&edge->edge_used);
330                 spin_unlock_irqrestore(&qends->ends_lock,flags);
331                 if(canqueue_disconnect_edge_kern(qends, edge)>=0) {
332                         /* Free edge memory */
333                         canque_fifo_done(&edge->fifo);
334                         kfree(edge);
335                 }else{
336                         canque_notify_bothends(edge, CANQUEUE_NOTIFY_DEAD_WANTED);
337                         canque_edge_decref(edge);
338                         DEBUGQUE("canqueue_disconnect_list_kern in troubles\n");
339                         DEBUGQUE("the edge %d has usage count %d and flags %ld\n",edge->edge_num,atomic_read(&edge->edge_used),edge->fifo.fifo_flags);
340                         return -1;
341                 }
342         }
343 }
344
345 void canqueue_block_list(struct canque_ends_t *qends, struct list_head *list)
346 {
347         struct canque_edge_t *edge;
348         struct list_head *entry;
349         
350         /* has to be called with qends->ends_lock already locked */
351         list_for_each(entry,&qends->inlist){
352                 if(list == &qends->inlist)
353                         edge=list_entry(list->next,struct canque_edge_t,inpeers);
354                 else
355                         edge=list_entry(list->next,struct canque_edge_t,outpeers);
356                 canque_fifo_set_fl(&edge->fifo,BLOCK);
357         }
358 }
359
360 int canqueue_ends_sync_all_kern(struct canque_ends_t *qends)
361 {
362         struct canque_edge_t *qedge;
363         
364         canque_for_each_inedge(qends, qedge){
365                 DEBUGQUE("canque_sync_wait_kern called for edge %d\n",qedge->edge_num);
366                 canque_sync_wait_kern(qends, qedge);
367         }
368         return 0;
369 }
370
371 int canqueue_ends_done_inends(struct canque_ends_t *qends, int send_rest)
372 {
373         struct canque_edge_t *edge;
374         
375         canque_for_each_inedge(qends, edge){
376                 canque_notify_bothends(edge, CANQUEUE_NOTIFY_DEAD_WANTED);
377                 if(send_rest){
378                         canque_edge_incref(edge);
379                         if(!canque_fifo_test_and_set_fl(&edge->fifo, FREEONEMPTY)){
380                                 if(!canque_fifo_test_fl(&edge->fifo, EMPTY))
381                                         continue;
382                                 if(!canque_fifo_test_and_clear_fl(&edge->fifo, FREEONEMPTY))
383                                         continue;
384                         }
385                         canque_edge_decref(edge);
386                 }
387         }
388         return list_empty(&qends->inlist)?0:1;
389 }
390
391
392 /**
393  * canqueue_ends_dispose_kern - finalizing of the ends structure for Linux kernel clients
394  * @qends: pointer to ends structure
395  * @sync: flag indicating, that user wants to wait for processing of all remaining
396  *      messages
397  *
398  * Return Value: Function should be designed such way to not fail.
399  */
400 int canqueue_ends_dispose_kern(struct canque_ends_t *qends, int sync)
401 {
402         unsigned long flags;
403         int i;
404         int delayed;
405
406         DEBUGQUE("canqueue_ends_dispose_kern\n");
407         spin_lock_irqsave(&qends->ends_lock,flags);
408         canqueue_block_list(qends, &qends->idle);
409         for(i=CANQUEUE_PRIO_NR;--i>=0;){
410                 canqueue_block_list(qends, &qends->active[i]);
411         }
412         canqueue_block_list(qends, &qends->idle);
413         canqueue_block_list(qends, &qends->inlist);
414         spin_unlock_irqrestore(&qends->ends_lock,flags);
415
416         /*Wait for sending of all pending messages in the output FIFOs*/
417         if(sync)
418                 canqueue_ends_sync_all_kern(qends);
419         
420         /* Finish all outgoing edges listed in inends */
421         delayed=canqueue_ends_done_inends(qends, 1);
422
423         delayed|=canqueue_disconnect_list_kern(qends, &qends->idle);
424         for(i=CANQUEUE_PRIO_NR;--i>=0;){
425                 delayed|=canqueue_disconnect_list_kern(qends, &qends->active[i]);
426         }
427
428         wake_up(&qends->endinfo.fileinfo.readq);
429         wake_up(&qends->endinfo.fileinfo.writeq);
430         wake_up(&qends->endinfo.fileinfo.emptyq);
431
432         if(delayed){
433                 spin_lock_irqsave(&canque_dead_func_lock, flags);
434                 qends->ends_flags |= CAN_ENDSF_DEAD;
435                 list_add(&qends->dead_peers,&canque_dead_ends);
436                 spin_unlock_irqrestore(&canque_dead_func_lock, flags);
437                 tasklet_schedule(&canque_dead_tl);
438
439                 DEBUGQUE("canqueue_ends_dispose_kern delayed\n");
440                 return 1;
441         }
442
443         kfree(qends);
444         DEBUGQUE("canqueue_ends_dispose_kern finished\n");
445         return 0;
446 }
447
448 void canqueue_kern_initialize()
449 {
450
451
452 }