]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/staging/ozwpan/ozpd.c
staging: ozwpan: flush workqueue
[sojka/nv-tegra/linux-3.10.git] / drivers / staging / ozwpan / ozpd.c
1 /* -----------------------------------------------------------------------------
2  * Copyright (c) 2011 Ozmo Inc
3  * Released under the GNU General Public License Version 2 (GPLv2).
4  * -----------------------------------------------------------------------------
5  */
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/timer.h>
9 #include <linux/sched.h>
10 #include <linux/netdevice.h>
11 #include <linux/errno.h>
12 #include "ozprotocol.h"
13 #include "ozeltbuf.h"
14 #include "ozpd.h"
15 #include "ozproto.h"
16 #include "oztrace.h"
17 #include "ozcdev.h"
18 #include "ozusbsvc.h"
19 #include <asm/unaligned.h>
20 #include <linux/uaccess.h>
21 #include <net/psnap.h>
22 /*------------------------------------------------------------------------------
23  */
24 #define OZ_MAX_TX_POOL_SIZE     6
25 #define AC_VO   0x106
26 /*------------------------------------------------------------------------------
27  */
28 static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd);
29 static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f);
30 static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f);
31 static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f);
32 static int oz_send_isoc_frame(struct oz_pd *pd);
33 static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f);
34 static void oz_isoc_stream_free(struct oz_isoc_stream *st);
35 static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data);
36 static void oz_isoc_destructor(struct sk_buff *skb);
37 static int oz_def_app_init(void);
38 static void oz_def_app_term(void);
39 static int oz_def_app_start(struct oz_pd *pd, int resume);
40 static void oz_def_app_stop(struct oz_pd *pd, int pause);
41 static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt);
42 static void oz_pd_free(struct work_struct *work);
43 static void oz_pd_uevent_workitem(struct work_struct *work);
44 /*------------------------------------------------------------------------------
45  * Counts the uncompleted isoc frames submitted to netcard.
46  */
47 static atomic_t g_submitted_isoc = ATOMIC_INIT(0);
48 /* Application handler functions.
49  */
50 static const struct oz_app_if g_app_if[OZ_APPID_MAX] = {
51         {oz_usb_init,
52         oz_usb_term,
53         oz_usb_start,
54         oz_usb_stop,
55         oz_usb_rx,
56         oz_usb_heartbeat,
57         oz_usb_farewell,
58         OZ_APPID_USB},
59
60         {oz_def_app_init,
61         oz_def_app_term,
62         oz_def_app_start,
63         oz_def_app_stop,
64         oz_def_app_rx,
65         NULL,
66         NULL,
67         OZ_APPID_UNUSED1},
68
69         {oz_def_app_init,
70         oz_def_app_term,
71         oz_def_app_start,
72         oz_def_app_stop,
73         oz_def_app_rx,
74         NULL,
75         NULL,
76         OZ_APPID_UNUSED2},
77
78         {oz_cdev_init,
79         oz_cdev_term,
80         oz_cdev_start,
81         oz_cdev_stop,
82         oz_cdev_rx,
83         NULL,
84         NULL,
85         OZ_APPID_SERIAL},
86
87         {oz_def_app_init,
88         oz_def_app_term,
89         oz_def_app_start,
90         oz_def_app_stop,
91         oz_def_app_rx,
92         NULL,
93         NULL,
94         OZ_APPID_UNUSED3},
95
96         {oz_def_app_init,
97         oz_def_app_term,
98         oz_def_app_start,
99         oz_def_app_stop,
100         oz_def_app_rx,
101         NULL,
102         NULL,
103         OZ_APPID_UNUSED4},
104
105         {NULL,
106         NULL,
107         NULL,
108         NULL,
109         oz_cdev_rx,
110         NULL,
111         NULL,
112         OZ_APPID_TFTP},
113 };
114 /*------------------------------------------------------------------------------
115  * Context: process
116  */
117 static int oz_def_app_init(void)
118 {
119         return 0;
120 }
121 /*------------------------------------------------------------------------------
122  * Context: process
123  */
124 static void oz_def_app_term(void)
125 {
126 }
127 /*------------------------------------------------------------------------------
128  * Context: softirq
129  */
130 static int oz_def_app_start(struct oz_pd *pd, int resume)
131 {
132         return 0;
133 }
134 /*------------------------------------------------------------------------------
135  * Context: softirq
136  */
137 static void oz_def_app_stop(struct oz_pd *pd, int pause)
138 {
139 }
140 /*------------------------------------------------------------------------------
141  * Context: softirq
142  */
143 static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt)
144 {
145 }
146 /*------------------------------------------------------------------------------
147  * Context: softirq or process
148  */
149 void oz_pd_set_state(struct oz_pd *pd, unsigned state)
150 {
151         pd->state = state;
152 }
153 /*------------------------------------------------------------------------------
154  * Context: softirq or process
155  */
156 void oz_pd_get(struct oz_pd *pd)
157 {
158         atomic_inc(&pd->ref_count);
159 }
160 /*------------------------------------------------------------------------------
161  * Context: softirq or process
162  */
163 void oz_pd_put(struct oz_pd *pd)
164 {
165         if (atomic_dec_and_test(&pd->ref_count))
166                 oz_pd_destroy(pd);
167
168         WARN_ON(atomic_read(&pd->ref_count) < 0);
169 }
170 /*------------------------------------------------------------------------------
171  * Context: softirq-serialized
172  */
173 struct oz_pd *oz_pd_alloc(const u8 *mac_addr)
174 {
175         struct oz_pd *pd = kzalloc(sizeof(struct oz_pd), GFP_ATOMIC);
176         if (pd) {
177                 int i;
178
179                 atomic_set(&pd->ref_count, 2);
180                 for (i = 0; i < OZ_APPID_MAX; i++)
181                         spin_lock_init(&pd->app_lock[i]);
182                 pd->last_rx_pkt_num = 0xffffffff;
183                 oz_pd_set_state(pd, OZ_PD_S_IDLE);
184                 pd->max_tx_size = OZ_MAX_TX_SIZE;
185                 memcpy(pd->mac_addr, mac_addr, ETH_ALEN);
186                 if (0 != oz_elt_buf_init(&pd->elt_buff)) {
187                         kfree(pd);
188                         return NULL;
189                 }
190                 spin_lock_init(&pd->tx_frame_lock);
191                 INIT_LIST_HEAD(&pd->tx_queue);
192                 INIT_LIST_HEAD(&pd->farewell_list);
193                 pd->last_sent_frame = &pd->tx_queue;
194                 spin_lock_init(&pd->stream_lock);
195                 INIT_LIST_HEAD(&pd->stream_list);
196                 tasklet_init(&pd->heartbeat_tasklet, oz_pd_heartbeat_handler,
197                                                         (unsigned long)pd);
198                 tasklet_init(&pd->timeout_tasklet, oz_pd_timeout_handler,
199                                                         (unsigned long)pd);
200                 hrtimer_init(&pd->heartbeat, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
201                 hrtimer_init(&pd->timeout, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
202                 pd->heartbeat.function = oz_pd_heartbeat_event;
203                 pd->timeout.function = oz_pd_timeout_event;
204                 pd->reset_retry = 0;
205
206                 spin_lock_init(&pd->pd_destroy_lock);
207                 pd->pd_destroy_scheduled = false;
208                 INIT_WORK(&pd->workitem, oz_pd_free);
209                 INIT_WORK(&pd->uevent_workitem, oz_pd_uevent_workitem);
210         }
211         return pd;
212 }
213 static void oz_pd_free(struct work_struct *work)
214 {
215         struct oz_pd *pd;
216         struct list_head *e;
217         struct oz_tx_frame *f;
218         struct oz_isoc_stream *st;
219         struct oz_farewell *fwell;
220         pd = container_of(work, struct oz_pd, workitem);
221         oz_trace_msg(M, "Destroying PD:%p\n", pd);
222         tasklet_kill(&pd->heartbeat_tasklet);
223         tasklet_kill(&pd->timeout_tasklet);
224
225         /* Finish scheduled uevent work, uevent might be rescheduled by
226          * oz timeout tasklet again
227          */
228         cancel_work_sync(&pd->uevent_workitem);
229
230         /* Delete any streams.
231          */
232         e = pd->stream_list.next;
233         while (e != &pd->stream_list) {
234                 st = container_of(e, struct oz_isoc_stream, link);
235                 e = e->next;
236                 oz_isoc_stream_free(st);
237         }
238         /* Free any queued tx frames.
239          */
240         e = pd->tx_queue.next;
241         while (e != &pd->tx_queue) {
242                 f = container_of(e, struct oz_tx_frame, link);
243                 e = e->next;
244                 if (f->skb != NULL)
245                         kfree_skb(f->skb);
246                 oz_retire_frame(pd, f);
247         }
248         oz_elt_buf_term(&pd->elt_buff);
249         /* Free any farewells.
250          */
251         e = pd->farewell_list.next;
252         while (e != &pd->farewell_list) {
253                 fwell = container_of(e, struct oz_farewell, link);
254                 e = e->next;
255                 kfree(fwell);
256         }
257         /* Deallocate all frames in tx pool.
258          */
259         while (pd->tx_pool) {
260                 e = pd->tx_pool;
261                 pd->tx_pool = e->next;
262                 kfree(container_of(e, struct oz_tx_frame, link));
263         }
264         if (pd->net_dev) {
265                 oz_trace_msg(M, "dev_put(%p)\n", pd->net_dev);
266                 dev_put(pd->net_dev);
267         }
268
269         kfree(pd);
270 }
271
272
273 /*------------------------------------------------------------------------------
274  * Context: softirq or Process
275  */
276 void oz_pd_destroy(struct oz_pd *pd)
277 {
278         int ret;
279
280         if (pd == NULL)
281                 return;
282         spin_lock_bh(&pd->pd_destroy_lock);
283         if (pd->pd_destroy_scheduled) {
284                 pr_info("%s: not rescheduling oz_pd_free\n", __func__);
285                 spin_unlock_bh(&pd->pd_destroy_lock);
286                 return;
287         }
288         pd->pd_destroy_scheduled = true;
289
290         if (hrtimer_active(&pd->timeout))
291                 hrtimer_cancel(&pd->timeout);
292         if (hrtimer_active(&pd->heartbeat))
293                 hrtimer_cancel(&pd->heartbeat);
294
295
296         ret = schedule_work(&pd->workitem);
297         if (!ret)
298                 pr_info("failed to schedule workitem\n");
299         spin_unlock_bh(&pd->pd_destroy_lock);
300 }
301 /*------------------------------------------------------------------------------
302  */
303 static void oz_pd_uevent_workitem(struct work_struct *work)
304 {
305         struct oz_pd *pd;
306         char mac_buf[20];
307         char *envp[2];
308
309         pd = container_of(work, struct oz_pd, uevent_workitem);
310
311         oz_trace_msg(D, "uevent ID_MAC:%pm\n", pd->mac_addr);
312         snprintf(mac_buf, sizeof(mac_buf), "ID_MAC=%pm", pd->mac_addr);
313         envp[0] = mac_buf;
314         envp[1] = NULL;
315         kobject_uevent_env(&g_oz_wpan_dev->kobj, KOBJ_CHANGE, envp);
316         oz_pd_put(pd);
317 }
318 /*------------------------------------------------------------------------------
319  */
320 void oz_pd_notify_uevent(struct oz_pd *pd)
321 {
322         int ret;
323
324         oz_pd_get(pd);
325
326         ret = schedule_work(&pd->uevent_workitem);
327         if (!ret) {
328                 pr_info("%s: failed to schedule workitem\n", __func__);
329                 oz_pd_put(pd);
330         }
331 }
332
333 /*------------------------------------------------------------------------------
334  * Context: softirq-serialized
335  */
336 int oz_services_start(struct oz_pd *pd, u16 apps, int resume)
337 {
338         const struct oz_app_if *ai;
339         int rc = 0;
340         oz_trace("oz_services_start(0x%x) resume(%d)\n", apps, resume);
341         if (apps & (1<<OZ_APPID_TFTP))
342                 apps |= 1<<OZ_APPID_SERIAL;
343         for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
344                 if (apps & (1<<ai->app_id)) {
345                         if (ai->start && ai->start(pd, resume)) {
346                                 rc = -1;
347                                 break;
348                         }
349                         oz_polling_lock_bh();
350                         pd->total_apps |= (1<<ai->app_id);
351                         if (resume)
352                                 pd->paused_apps &= ~(1<<ai->app_id);
353                         oz_polling_unlock_bh();
354                 }
355         }
356         return rc;
357 }
358 /*------------------------------------------------------------------------------
359  * Context: softirq or process
360  */
361 void oz_services_stop(struct oz_pd *pd, u16 apps, int pause)
362 {
363         const struct oz_app_if *ai;
364         oz_trace("oz_stop_services(0x%x) pause(%d)\n", apps, pause);
365         if (apps & (1<<OZ_APPID_TFTP))
366                 apps |= 1<<OZ_APPID_SERIAL;
367         for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
368                 if (apps & (1<<ai->app_id)) {
369                         oz_polling_lock_bh();
370                         if (pause) {
371                                 pd->paused_apps |= (1<<ai->app_id);
372                         } else {
373                                 pd->total_apps &= ~(1<<ai->app_id);
374                                 pd->paused_apps &= ~(1<<ai->app_id);
375                         }
376                         oz_polling_unlock_bh();
377                         if (ai->stop)
378                                 ai->stop(pd, pause);
379                 }
380         }
381 }
382 /*------------------------------------------------------------------------------
383  * Context: softirq
384  */
385 void oz_pd_heartbeat(struct oz_pd *pd, u16 apps)
386 {
387         const struct oz_app_if *ai;
388         int more = 0;
389         for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
390                 if (ai->heartbeat && (apps & (1<<ai->app_id))) {
391                         if (ai->heartbeat(pd))
392                                 more = 1;
393                 }
394         }
395         if ((!more) && (hrtimer_active(&pd->heartbeat)))
396                 hrtimer_cancel(&pd->heartbeat);
397         if (pd->mode & OZ_F_ISOC_ANYTIME) {
398                 int count = 8;
399                 while (count-- && (oz_send_isoc_frame(pd) >= 0))
400                         ;
401         }
402 }
403 /*------------------------------------------------------------------------------
404  * Context: softirq or process
405  */
406 void oz_pd_stop(struct oz_pd *pd)
407 {
408         u16 stop_apps = 0;
409         oz_trace_msg(M, "oz_pd_stop() State = 0x%x\n", pd->state);
410         oz_polling_lock_bh();
411         oz_pd_indicate_farewells(pd);
412         stop_apps = pd->total_apps;
413         pd->total_apps = 0;
414         pd->paused_apps = 0;
415         oz_polling_unlock_bh();
416         oz_services_stop(pd, stop_apps, 0);
417         oz_polling_lock_bh();
418         oz_pd_set_state(pd, OZ_PD_S_STOPPED);
419         /* Remove from PD list.*/
420         list_del(&pd->link);
421
422
423         oz_polling_unlock_bh();
424         oz_trace_msg(M, "pd ref count = %d\n", atomic_read(&pd->ref_count));
425         oz_pd_put(pd);
426 }
427 /*------------------------------------------------------------------------------
428  * Context: softirq
429  */
430 int oz_pd_sleep(struct oz_pd *pd)
431 {
432         int do_stop = 0;
433         u16 stop_apps = 0;
434         oz_polling_lock_bh();
435         if (pd->state & (OZ_PD_S_SLEEP | OZ_PD_S_STOPPED)) {
436                 oz_polling_unlock_bh();
437                 return 0;
438         }
439         if (pd->keep_alive && pd->session_id) {
440                 if (pd->keep_alive >= OZ_KALIVE_INFINITE)
441                         oz_pd_indicate_farewells(pd);
442                 oz_pd_set_state(pd, OZ_PD_S_SLEEP);
443                 oz_pd_notify_uevent(pd);
444         } else {
445                 do_stop = 1;
446         }
447         stop_apps = pd->total_apps;
448         oz_polling_unlock_bh();
449         if (do_stop) {
450                 oz_pd_stop(pd);
451         } else {
452                 oz_services_stop(pd, stop_apps, 1);
453                 oz_timer_add(pd, OZ_TIMER_STOP, pd->keep_alive);
454         }
455         return do_stop;
456 }
457 /*------------------------------------------------------------------------------
458  * Context: softirq
459  */
460 static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd)
461 {
462         struct oz_tx_frame *f = NULL;
463         spin_lock_bh(&pd->tx_frame_lock);
464         if (pd->tx_pool) {
465                 f = container_of(pd->tx_pool, struct oz_tx_frame, link);
466                 pd->tx_pool = pd->tx_pool->next;
467                 pd->tx_pool_count--;
468         }
469         spin_unlock_bh(&pd->tx_frame_lock);
470         if (f == NULL)
471                 f = kmalloc(sizeof(struct oz_tx_frame), GFP_ATOMIC);
472         if (f) {
473                 f->total_size = sizeof(struct oz_hdr);
474                 INIT_LIST_HEAD(&f->link);
475                 INIT_LIST_HEAD(&f->elt_list);
476         }
477         return f;
478 }
479 /*------------------------------------------------------------------------------
480  * Context: softirq or process
481  */
482 static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f)
483 {
484         pd->nb_queued_isoc_frames--;
485         list_del_init(&f->link);
486         if (pd->tx_pool_count < OZ_MAX_TX_POOL_SIZE) {
487                 f->link.next = pd->tx_pool;
488                 pd->tx_pool = &f->link;
489                 pd->tx_pool_count++;
490         } else {
491                 kfree(f);
492         }
493 }
494 /*------------------------------------------------------------------------------
495  * Context: softirq or process
496  */
497 static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f)
498 {
499         spin_lock_bh(&pd->tx_frame_lock);
500         if (pd->tx_pool_count < OZ_MAX_TX_POOL_SIZE) {
501                 f->link.next = pd->tx_pool;
502                 pd->tx_pool = &f->link;
503                 pd->tx_pool_count++;
504                 f = NULL;
505         }
506         spin_unlock_bh(&pd->tx_frame_lock);
507         if (f)
508                 kfree(f);
509 }
510 /*------------------------------------------------------------------------------
511  * Context: softirq-serialized
512  */
513 void oz_set_more_bit(struct sk_buff *skb)
514 {
515         struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
516         oz_hdr->control |= OZ_F_MORE_DATA;
517 }
518 /*------------------------------------------------------------------------------
519  * Context: softirq-serialized
520  */
521 void oz_set_last_pkt_nb(struct oz_pd *pd, struct sk_buff *skb)
522 {
523         struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
524         oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
525 }
526 /*------------------------------------------------------------------------------
527  * Context: softirq
528  */
529 int oz_prepare_frame(struct oz_pd *pd, int empty)
530 {
531         struct oz_tx_frame *f;
532         if ((pd->mode & OZ_MODE_MASK) != OZ_MODE_TRIGGERED)
533                 return -1;
534         if (pd->nb_queued_frames >= OZ_MAX_QUEUED_FRAMES)
535                 return -1;
536         if (!empty && !oz_are_elts_available(&pd->elt_buff))
537                 return -1;
538         f = oz_tx_frame_alloc(pd);
539         if (f == NULL)
540                 return -1;
541         f->skb = NULL;
542         f->hdr.control =
543                 (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ACK_REQUESTED;
544         ++pd->last_tx_pkt_num;
545         put_unaligned(cpu_to_le32(pd->last_tx_pkt_num), &f->hdr.pkt_num);
546         if (empty == 0) {
547                 oz_select_elts_for_tx(&pd->elt_buff, 0, &f->total_size,
548                         pd->max_tx_size, &f->elt_list);
549         }
550         spin_lock(&pd->tx_frame_lock);
551         list_add_tail(&f->link, &pd->tx_queue);
552         pd->nb_queued_frames++;
553         spin_unlock(&pd->tx_frame_lock);
554         return 0;
555 }
556 /*------------------------------------------------------------------------------
557  * Context: softirq-serialized
558  */
559 static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f)
560 {
561         struct sk_buff *skb;
562         struct net_device *dev = pd->net_dev;
563         struct oz_hdr *oz_hdr;
564         struct oz_elt *elt;
565         struct list_head *e;
566         /* Allocate skb with enough space for the lower layers as well
567          * as the space we need.
568          */
569         skb = alloc_skb(f->total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
570         if (skb == NULL)
571                 return NULL;
572         /* Reserve the head room for lower layers.
573          */
574         skb_reserve(skb, LL_RESERVED_SPACE(dev));
575         skb_reset_network_header(skb);
576         skb->dev = dev;
577         skb->protocol = htons(OZ_ETHERTYPE);
578         skb->priority = AC_VO;
579         if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
580                 dev->dev_addr, skb->len) < 0)
581                 goto fail;
582         /* Push the tail to the end of the area we are going to copy to.
583          */
584         oz_hdr = (struct oz_hdr *)skb_put(skb, f->total_size);
585         f->hdr.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
586         memcpy(oz_hdr, &f->hdr, sizeof(struct oz_hdr));
587         /* Copy the elements into the frame body.
588          */
589         elt = (struct oz_elt *)(oz_hdr+1);
590         for (e = f->elt_list.next; e != &f->elt_list; e = e->next) {
591                 struct oz_elt_info *ei;
592                 ei = container_of(e, struct oz_elt_info, link);
593                 memcpy(elt, ei->data, ei->length);
594                 elt = oz_next_elt(elt);
595         }
596         return skb;
597 fail:
598         kfree_skb(skb);
599         return NULL;
600 }
601 /*------------------------------------------------------------------------------
602  * Context: softirq or process
603  */
604 static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f)
605 {
606         struct list_head *e;
607         struct oz_elt_info *ei;
608
609         if (f == NULL) {
610                 pr_info("%s: oz_tx_frame is null\n", __func__);
611                 return;
612         }
613         e = f->elt_list.next;
614         while (e != &f->elt_list) {
615                 ei = container_of(e, struct oz_elt_info, link);
616                 e = e->next;
617                 list_del_init(&ei->link);
618                 if (ei->callback)
619                         ei->callback(pd, ei->context);
620                 spin_lock_bh(&pd->elt_buff.lock);
621                 oz_elt_info_free(&pd->elt_buff, ei);
622                 spin_unlock_bh(&pd->elt_buff.lock);
623         }
624         oz_tx_frame_free(pd, f);
625         if (pd->elt_buff.free_elts > pd->elt_buff.max_free_elts)
626                 oz_trim_elt_pool(&pd->elt_buff);
627 }
628 /*------------------------------------------------------------------------------
629  * Context: softirq-serialized
630  */
631 static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data)
632 {
633         struct sk_buff *skb;
634         struct oz_tx_frame *f;
635         struct list_head *e;
636         spin_lock(&pd->tx_frame_lock);
637         e = pd->last_sent_frame->next;
638         if (e == &pd->tx_queue) {
639                 spin_unlock(&pd->tx_frame_lock);
640                 return -1;
641         }
642         f = container_of(e, struct oz_tx_frame, link);
643
644         if (f->skb != NULL) {
645                 skb = f->skb;
646                 oz_tx_isoc_free(pd, f);
647                 spin_unlock(&pd->tx_frame_lock);
648                 if (more_data)
649                         oz_set_more_bit(skb);
650                 oz_set_last_pkt_nb(pd, skb);
651                 if ((int)atomic_read(&g_submitted_isoc) <
652                                                         OZ_MAX_SUBMITTED_ISOC) {
653                         oz_trace_skb(skb, 'T');
654                         if (dev_queue_xmit(skb) < 0) {
655                                 return -1;
656                         }
657                         atomic_inc(&g_submitted_isoc);
658                         return 0;
659                 } else {
660                         kfree_skb(skb);
661                         return -1;
662                 }
663         }
664
665         pd->last_sent_frame = e;
666         skb = oz_build_frame(pd, f);
667         spin_unlock(&pd->tx_frame_lock);
668         if (skb == 0)
669                 return -1;
670
671         if (more_data)
672                 oz_set_more_bit(skb);
673         oz_trace_skb(skb, 'T');
674         if (dev_queue_xmit(skb) < 0)
675                 return -1;
676         return 0;
677 }
678 /*------------------------------------------------------------------------------
679  * Context: softirq-serialized
680  */
681 void oz_send_queued_frames(struct oz_pd *pd, int backlog)
682 {
683         while (oz_prepare_frame(pd, 0) >= 0)
684                 backlog++;
685
686         switch (pd->mode & (OZ_F_ISOC_NO_ELTS | OZ_F_ISOC_ANYTIME)) {
687
688                 case OZ_F_ISOC_NO_ELTS: {
689                         backlog += pd->nb_queued_isoc_frames;
690                         if (backlog <= 0)
691                                 goto out;
692                         if (backlog > OZ_MAX_SUBMITTED_ISOC)
693                                 backlog = OZ_MAX_SUBMITTED_ISOC;
694                         break;
695                 }
696                 case OZ_NO_ELTS_ANYTIME: {
697                         if ((backlog <= 0) && (pd->isoc_sent == 0))
698                                 goto out;
699                         break;
700                 }
701                 default: {
702                         if (backlog <= 0)
703                                 goto out;
704                         break;
705                 }
706         }
707         while (backlog--) {
708                 if (oz_send_next_queued_frame(pd, backlog) < 0)
709                         break;
710         }
711         return;
712
713 out:    oz_prepare_frame(pd, 1);
714         oz_send_next_queued_frame(pd, 0);
715 }
716 /*------------------------------------------------------------------------------
717  * Context: softirq
718  */
719 static int oz_send_isoc_frame(struct oz_pd *pd)
720 {
721         struct sk_buff *skb;
722         struct net_device *dev = pd->net_dev;
723         struct oz_hdr *oz_hdr;
724         struct oz_elt *elt;
725         struct list_head *e;
726         struct list_head list;
727         int total_size = sizeof(struct oz_hdr);
728         INIT_LIST_HEAD(&list);
729
730         oz_select_elts_for_tx(&pd->elt_buff, 1, &total_size,
731                 pd->max_tx_size, &list);
732         if (list.next == &list)
733                 return 0;
734         skb = alloc_skb(total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
735         if (skb == NULL) {
736                 oz_elt_info_free_chain(&pd->elt_buff, &list);
737                 return -1;
738         }
739         skb_reserve(skb, LL_RESERVED_SPACE(dev));
740         skb_reset_network_header(skb);
741         skb->dev = dev;
742         skb->protocol = htons(OZ_ETHERTYPE);
743         if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
744                 dev->dev_addr, skb->len) < 0) {
745                 kfree_skb(skb);
746                 return -1;
747         }
748         oz_hdr = (struct oz_hdr *)skb_put(skb, total_size);
749         oz_hdr->control = (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
750         oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
751         elt = (struct oz_elt *)(oz_hdr+1);
752
753         for (e = list.next; e != &list; e = e->next) {
754                 struct oz_elt_info *ei;
755                 ei = container_of(e, struct oz_elt_info, link);
756                 memcpy(elt, ei->data, ei->length);
757                 elt = oz_next_elt(elt);
758         }
759         oz_trace_skb(skb, 'T');
760         dev_queue_xmit(skb);
761         oz_elt_info_free_chain(&pd->elt_buff, &list);
762         return 0;
763 }
764 /*------------------------------------------------------------------------------
765  * Context: softirq-serialized
766  */
767 void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn)
768 {
769         struct list_head *e;
770         struct oz_tx_frame *f;
771         struct list_head *first = NULL;
772         struct list_head *last = NULL;
773         u8 diff;
774         u32 pkt_num;
775
776         spin_lock(&pd->tx_frame_lock);
777         e = pd->tx_queue.next;
778         while (e != &pd->tx_queue) {
779                 f = container_of(e, struct oz_tx_frame, link);
780                 pkt_num = le32_to_cpu(get_unaligned(&f->hdr.pkt_num));
781                 diff = (lpn - (pkt_num & OZ_LAST_PN_MASK)) & OZ_LAST_PN_MASK;
782                 if ((diff > OZ_LAST_PN_HALF_CYCLE) || (pkt_num == 0))
783                         break;
784                 if (first == NULL)
785                         first = e;
786                 last = e;
787                 e = e->next;
788                 pd->nb_queued_frames--;
789         }
790         if (first) {
791                 last->next->prev = &pd->tx_queue;
792                 pd->tx_queue.next = last->next;
793                 last->next = NULL;
794         }
795         pd->last_sent_frame = &pd->tx_queue;
796         spin_unlock(&pd->tx_frame_lock);
797         while (first) {
798                 f = container_of(first, struct oz_tx_frame, link);
799                 first = first->next;
800                 oz_retire_frame(pd, f);
801         }
802 }
803 /*------------------------------------------------------------------------------
804  * Precondition: stream_lock must be held.
805  * Context: softirq
806  */
807 static struct oz_isoc_stream *pd_stream_find(struct oz_pd *pd, u8 ep_num)
808 {
809         struct list_head *e;
810         struct oz_isoc_stream *st;
811         list_for_each(e, &pd->stream_list) {
812                 st = container_of(e, struct oz_isoc_stream, link);
813                 if (st->ep_num == ep_num)
814                         return st;
815         }
816         return NULL;
817 }
818 /*------------------------------------------------------------------------------
819  * Context: softirq
820  */
821 int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num)
822 {
823         struct oz_isoc_stream *st =
824                 kzalloc(sizeof(struct oz_isoc_stream), GFP_ATOMIC);
825         if (!st)
826                 return -ENOMEM;
827         st->ep_num = ep_num;
828         spin_lock_bh(&pd->stream_lock);
829         if (!pd_stream_find(pd, ep_num)) {
830                 list_add(&st->link, &pd->stream_list);
831                 st = NULL;
832         }
833         spin_unlock_bh(&pd->stream_lock);
834         if (st)
835                 kfree(st);
836         return 0;
837 }
838 /*------------------------------------------------------------------------------
839  * Context: softirq or process
840  */
841 static void oz_isoc_stream_free(struct oz_isoc_stream *st)
842 {
843         if (st->skb)
844                 kfree_skb(st->skb);
845         kfree(st);
846 }
847 /*------------------------------------------------------------------------------
848  * Context: softirq
849  */
850 int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num)
851 {
852         struct oz_isoc_stream *st;
853         spin_lock_bh(&pd->stream_lock);
854         st = pd_stream_find(pd, ep_num);
855         if (st)
856                 list_del(&st->link);
857         spin_unlock_bh(&pd->stream_lock);
858         if (st)
859                 oz_isoc_stream_free(st);
860         return 0;
861 }
862 /*------------------------------------------------------------------------------
863  * Context: any
864  */
865 static void oz_isoc_destructor(struct sk_buff *skb)
866 {
867         atomic_dec(&g_submitted_isoc);
868 }
869 /*------------------------------------------------------------------------------
870  * Context: softirq
871  */
872 int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, const u8 *data, int len)
873 {
874         struct net_device *dev = pd->net_dev;
875         struct oz_isoc_stream *st;
876         u8 nb_units = 0;
877         struct sk_buff *skb = NULL;
878         struct oz_hdr *oz_hdr = NULL;
879         int size = 0;
880         spin_lock_bh(&pd->stream_lock);
881         st = pd_stream_find(pd, ep_num);
882         if (st) {
883                 skb = st->skb;
884                 st->skb = NULL;
885                 nb_units = st->nb_units;
886                 st->nb_units = 0;
887                 oz_hdr = st->oz_hdr;
888                 size = st->size;
889         }
890         spin_unlock_bh(&pd->stream_lock);
891         if (!st)
892                 return 0;
893         if (!skb) {
894                 /* Allocate enough space for max size frame. */
895                 skb = alloc_skb(pd->max_tx_size + OZ_ALLOCATED_SPACE(dev),
896                                 GFP_ATOMIC);
897                 if (skb == NULL)
898                         return 0;
899                 /* Reserve the head room for lower layers. */
900                 skb_reserve(skb, LL_RESERVED_SPACE(dev));
901                 skb_reset_network_header(skb);
902                 skb->dev = dev;
903                 skb->protocol = htons(OZ_ETHERTYPE);
904                 /* For audio packet set priority to AC_VO */
905                 skb->priority = AC_VO;
906                 size = sizeof(struct oz_hdr) + sizeof(struct oz_isoc_large);
907                 oz_hdr = (struct oz_hdr *)skb_put(skb, size);
908         }
909         memcpy(skb_put(skb, len), data, len);
910         size += len;
911         if ((++nb_units < pd->ms_per_isoc)
912                 && ((pd->max_tx_size - size) > len)) {
913                 spin_lock_bh(&pd->stream_lock);
914                 st->skb = skb;
915                 st->nb_units = nb_units;
916                 st->oz_hdr = oz_hdr;
917                 st->size = size;
918                 spin_unlock_bh(&pd->stream_lock);
919         } else {
920                 struct oz_hdr oz;
921                 struct oz_isoc_large iso;
922                 spin_lock_bh(&pd->stream_lock);
923                 iso.frame_number = st->frame_num;
924                 st->frame_num += nb_units;
925                 spin_unlock_bh(&pd->stream_lock);
926                 oz.control =
927                         (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
928                 oz.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
929                 oz.pkt_num = 0;
930                 iso.endpoint = ep_num;
931                 iso.format = OZ_DATA_F_ISOC_LARGE;
932                 iso.ms_data = nb_units;
933                 memcpy(oz_hdr, &oz, sizeof(oz));
934                 memcpy(oz_hdr+1, &iso, sizeof(iso));
935                 if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
936                                 dev->dev_addr, skb->len) < 0)
937                         goto out;
938
939                 skb->destructor = oz_isoc_destructor;
940                 /*Queue for Xmit if mode is not ANYTIME*/
941                 if (!(pd->mode & OZ_F_ISOC_ANYTIME)) {
942                         struct oz_tx_frame *isoc_unit = NULL;
943                         int nb = pd->nb_queued_isoc_frames;
944                         struct list_head *e;
945                         struct oz_tx_frame *f;
946                         if (nb >= pd->isoc_latency) {
947                                 spin_lock(&pd->tx_frame_lock);
948                                 list_for_each(e, &pd->tx_queue) {
949                                         f = container_of(e, struct oz_tx_frame,
950                                                                         link);
951                                         if (f->skb != NULL) {
952                                                 oz_tx_isoc_free(pd, f);
953                                                 break;
954                                         }
955                                 }
956                                 spin_unlock(&pd->tx_frame_lock);
957                         }
958                         isoc_unit = oz_tx_frame_alloc(pd);
959                         if (isoc_unit == NULL)
960                                 goto out;
961                         isoc_unit->hdr = oz;
962                         isoc_unit->skb = skb;
963                         spin_lock_bh(&pd->tx_frame_lock);
964                         list_add_tail(&isoc_unit->link, &pd->tx_queue);
965                         pd->nb_queued_isoc_frames++;
966                         spin_unlock_bh(&pd->tx_frame_lock);
967                         return 0;
968                 }
969                 /*In ANYTIME mode Xmit unit immediately*/
970                 if (atomic_read(&g_submitted_isoc) < OZ_MAX_SUBMITTED_ISOC) {
971                         atomic_inc(&g_submitted_isoc);
972                         oz_trace_skb(skb, 'T');
973                         if (dev_queue_xmit(skb) < 0) {
974                                 return -1;
975                         } else
976                                 return 0;
977                 }
978
979 out:    kfree_skb(skb);
980         return -1;
981
982         }
983         return 0;
984 }
985 /*------------------------------------------------------------------------------
986  * Context: process
987  */
988 void oz_apps_init(void)
989 {
990         int i;
991         for (i = 0; i < OZ_APPID_MAX; i++)
992                 if (g_app_if[i].init)
993                         g_app_if[i].init();
994 }
995 /*------------------------------------------------------------------------------
996  * Context: process
997  */
998 void oz_apps_term(void)
999 {
1000         int i;
1001         /* Terminate all the apps. */
1002         for (i = 0; i < OZ_APPID_MAX; i++)
1003                 if (g_app_if[i].term)
1004                         g_app_if[i].term();
1005 }
1006 /*------------------------------------------------------------------------------
1007  * Context: softirq-serialized
1008  */
1009 void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt)
1010 {
1011         const struct oz_app_if *ai;
1012         if (app_id == 0 || app_id > OZ_APPID_MAX)
1013                 return;
1014         ai = &g_app_if[app_id-1];
1015         ai->rx(pd, elt);
1016 }
1017 /*------------------------------------------------------------------------------
1018  * Context: softirq or process
1019  */
1020 void oz_pd_indicate_farewells(struct oz_pd *pd)
1021 {
1022         struct oz_farewell *f;
1023         const struct oz_app_if *ai = &g_app_if[OZ_APPID_USB-1];
1024         list_for_each_entry(f, &pd->farewell_list, link) {
1025                 if (ai->farewell)
1026                         ai->farewell(pd, f->ep_num, f->report, f->len);
1027         }
1028 }