1 /* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * -----------------------------------------------------------------------------
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/timer.h>
9 #include <linux/sched.h>
10 #include <linux/netdevice.h>
11 #include <linux/errno.h>
12 #include "ozprotocol.h"
19 #include <asm/unaligned.h>
20 #include <linux/uaccess.h>
21 #include <net/psnap.h>
22 /*------------------------------------------------------------------------------
24 #define OZ_MAX_TX_POOL_SIZE 6
26 /*------------------------------------------------------------------------------
28 static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd);
29 static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f);
30 static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f);
31 static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f);
32 static int oz_send_isoc_frame(struct oz_pd *pd);
33 static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f);
34 static void oz_isoc_stream_free(struct oz_isoc_stream *st);
35 static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data);
36 static void oz_isoc_destructor(struct sk_buff *skb);
37 static int oz_def_app_init(void);
38 static void oz_def_app_term(void);
39 static int oz_def_app_start(struct oz_pd *pd, int resume);
40 static void oz_def_app_stop(struct oz_pd *pd, int pause);
41 static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt);
42 static void oz_pd_free(struct work_struct *work);
43 static void oz_pd_uevent_workitem(struct work_struct *work);
44 /*------------------------------------------------------------------------------
45 * Counts the uncompleted isoc frames submitted to netcard.
47 static atomic_t g_submitted_isoc = ATOMIC_INIT(0);
48 /* Application handler functions.
50 static const struct oz_app_if g_app_if[OZ_APPID_MAX] = {
114 /*------------------------------------------------------------------------------
117 static int oz_def_app_init(void)
121 /*------------------------------------------------------------------------------
124 static void oz_def_app_term(void)
127 /*------------------------------------------------------------------------------
130 static int oz_def_app_start(struct oz_pd *pd, int resume)
134 /*------------------------------------------------------------------------------
137 static void oz_def_app_stop(struct oz_pd *pd, int pause)
140 /*------------------------------------------------------------------------------
143 static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt)
146 /*------------------------------------------------------------------------------
147 * Context: softirq or process
149 void oz_pd_set_state(struct oz_pd *pd, unsigned state)
153 /*------------------------------------------------------------------------------
154 * Context: softirq or process
156 void oz_pd_get(struct oz_pd *pd)
158 atomic_inc(&pd->ref_count);
160 /*------------------------------------------------------------------------------
161 * Context: softirq or process
163 void oz_pd_put(struct oz_pd *pd)
165 if (atomic_dec_and_test(&pd->ref_count))
168 WARN_ON(atomic_read(&pd->ref_count) < 0);
170 /*------------------------------------------------------------------------------
171 * Context: softirq-serialized
173 struct oz_pd *oz_pd_alloc(const u8 *mac_addr)
175 struct oz_pd *pd = kzalloc(sizeof(struct oz_pd), GFP_ATOMIC);
179 atomic_set(&pd->ref_count, 2);
180 for (i = 0; i < OZ_APPID_MAX; i++)
181 spin_lock_init(&pd->app_lock[i]);
182 pd->last_rx_pkt_num = 0xffffffff;
183 oz_pd_set_state(pd, OZ_PD_S_IDLE);
184 pd->max_tx_size = OZ_MAX_TX_SIZE;
185 memcpy(pd->mac_addr, mac_addr, ETH_ALEN);
186 if (0 != oz_elt_buf_init(&pd->elt_buff)) {
190 spin_lock_init(&pd->tx_frame_lock);
191 INIT_LIST_HEAD(&pd->tx_queue);
192 INIT_LIST_HEAD(&pd->farewell_list);
193 pd->last_sent_frame = &pd->tx_queue;
194 spin_lock_init(&pd->stream_lock);
195 INIT_LIST_HEAD(&pd->stream_list);
196 tasklet_init(&pd->heartbeat_tasklet, oz_pd_heartbeat_handler,
198 tasklet_init(&pd->timeout_tasklet, oz_pd_timeout_handler,
200 hrtimer_init(&pd->heartbeat, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
201 hrtimer_init(&pd->timeout, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
202 pd->heartbeat.function = oz_pd_heartbeat_event;
203 pd->timeout.function = oz_pd_timeout_event;
206 spin_lock_init(&pd->pd_destroy_lock);
207 pd->pd_destroy_scheduled = false;
208 INIT_WORK(&pd->workitem, oz_pd_free);
209 INIT_WORK(&pd->uevent_workitem, oz_pd_uevent_workitem);
213 static void oz_pd_free(struct work_struct *work)
217 struct oz_tx_frame *f;
218 struct oz_isoc_stream *st;
219 struct oz_farewell *fwell;
220 pd = container_of(work, struct oz_pd, workitem);
221 oz_trace_msg(M, "Destroying PD:%p\n", pd);
222 tasklet_kill(&pd->heartbeat_tasklet);
223 tasklet_kill(&pd->timeout_tasklet);
225 /* Finish scheduled uevent work, uevent might be rescheduled by
226 * oz timeout tasklet again
228 cancel_work_sync(&pd->uevent_workitem);
230 /* Delete any streams.
232 e = pd->stream_list.next;
233 while (e != &pd->stream_list) {
234 st = container_of(e, struct oz_isoc_stream, link);
236 oz_isoc_stream_free(st);
238 /* Free any queued tx frames.
240 e = pd->tx_queue.next;
241 while (e != &pd->tx_queue) {
242 f = container_of(e, struct oz_tx_frame, link);
246 oz_retire_frame(pd, f);
248 oz_elt_buf_term(&pd->elt_buff);
249 /* Free any farewells.
251 e = pd->farewell_list.next;
252 while (e != &pd->farewell_list) {
253 fwell = container_of(e, struct oz_farewell, link);
257 /* Deallocate all frames in tx pool.
259 while (pd->tx_pool) {
261 pd->tx_pool = e->next;
262 kfree(container_of(e, struct oz_tx_frame, link));
265 oz_trace_msg(M, "dev_put(%p)\n", pd->net_dev);
266 dev_put(pd->net_dev);
273 /*------------------------------------------------------------------------------
274 * Context: softirq or Process
276 void oz_pd_destroy(struct oz_pd *pd)
282 spin_lock_bh(&pd->pd_destroy_lock);
283 if (pd->pd_destroy_scheduled) {
284 pr_info("%s: not rescheduling oz_pd_free\n", __func__);
285 spin_unlock_bh(&pd->pd_destroy_lock);
288 pd->pd_destroy_scheduled = true;
290 if (hrtimer_active(&pd->timeout))
291 hrtimer_cancel(&pd->timeout);
292 if (hrtimer_active(&pd->heartbeat))
293 hrtimer_cancel(&pd->heartbeat);
296 ret = schedule_work(&pd->workitem);
298 pr_info("failed to schedule workitem\n");
299 spin_unlock_bh(&pd->pd_destroy_lock);
301 /*------------------------------------------------------------------------------
303 static void oz_pd_uevent_workitem(struct work_struct *work)
309 pd = container_of(work, struct oz_pd, uevent_workitem);
311 oz_trace_msg(D, "uevent ID_MAC:%pm\n", pd->mac_addr);
312 snprintf(mac_buf, sizeof(mac_buf), "ID_MAC=%pm", pd->mac_addr);
315 kobject_uevent_env(&g_oz_wpan_dev->kobj, KOBJ_CHANGE, envp);
318 /*------------------------------------------------------------------------------
320 void oz_pd_notify_uevent(struct oz_pd *pd)
326 ret = schedule_work(&pd->uevent_workitem);
328 pr_info("%s: failed to schedule workitem\n", __func__);
333 /*------------------------------------------------------------------------------
334 * Context: softirq-serialized
336 int oz_services_start(struct oz_pd *pd, u16 apps, int resume)
338 const struct oz_app_if *ai;
340 oz_trace("oz_services_start(0x%x) resume(%d)\n", apps, resume);
341 if (apps & (1<<OZ_APPID_TFTP))
342 apps |= 1<<OZ_APPID_SERIAL;
343 for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
344 if (apps & (1<<ai->app_id)) {
345 if (ai->start && ai->start(pd, resume)) {
349 oz_polling_lock_bh();
350 pd->total_apps |= (1<<ai->app_id);
352 pd->paused_apps &= ~(1<<ai->app_id);
353 oz_polling_unlock_bh();
358 /*------------------------------------------------------------------------------
359 * Context: softirq or process
361 void oz_services_stop(struct oz_pd *pd, u16 apps, int pause)
363 const struct oz_app_if *ai;
364 oz_trace("oz_stop_services(0x%x) pause(%d)\n", apps, pause);
365 if (apps & (1<<OZ_APPID_TFTP))
366 apps |= 1<<OZ_APPID_SERIAL;
367 for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
368 if (apps & (1<<ai->app_id)) {
369 oz_polling_lock_bh();
371 pd->paused_apps |= (1<<ai->app_id);
373 pd->total_apps &= ~(1<<ai->app_id);
374 pd->paused_apps &= ~(1<<ai->app_id);
376 oz_polling_unlock_bh();
382 /*------------------------------------------------------------------------------
385 void oz_pd_heartbeat(struct oz_pd *pd, u16 apps)
387 const struct oz_app_if *ai;
389 for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
390 if (ai->heartbeat && (apps & (1<<ai->app_id))) {
391 if (ai->heartbeat(pd))
395 if ((!more) && (hrtimer_active(&pd->heartbeat)))
396 hrtimer_cancel(&pd->heartbeat);
397 if (pd->mode & OZ_F_ISOC_ANYTIME) {
399 while (count-- && (oz_send_isoc_frame(pd) >= 0))
403 /*------------------------------------------------------------------------------
404 * Context: softirq or process
406 void oz_pd_stop(struct oz_pd *pd)
409 oz_trace_msg(M, "oz_pd_stop() State = 0x%x\n", pd->state);
410 oz_polling_lock_bh();
411 oz_pd_indicate_farewells(pd);
412 stop_apps = pd->total_apps;
415 oz_polling_unlock_bh();
416 oz_services_stop(pd, stop_apps, 0);
417 oz_polling_lock_bh();
418 oz_pd_set_state(pd, OZ_PD_S_STOPPED);
419 /* Remove from PD list.*/
423 oz_polling_unlock_bh();
424 oz_trace_msg(M, "pd ref count = %d\n", atomic_read(&pd->ref_count));
427 /*------------------------------------------------------------------------------
430 int oz_pd_sleep(struct oz_pd *pd)
434 oz_polling_lock_bh();
435 if (pd->state & (OZ_PD_S_SLEEP | OZ_PD_S_STOPPED)) {
436 oz_polling_unlock_bh();
439 if (pd->keep_alive && pd->session_id) {
440 if (pd->keep_alive >= OZ_KALIVE_INFINITE)
441 oz_pd_indicate_farewells(pd);
442 oz_pd_set_state(pd, OZ_PD_S_SLEEP);
443 oz_pd_notify_uevent(pd);
447 stop_apps = pd->total_apps;
448 oz_polling_unlock_bh();
452 oz_services_stop(pd, stop_apps, 1);
453 oz_timer_add(pd, OZ_TIMER_STOP, pd->keep_alive);
457 /*------------------------------------------------------------------------------
460 static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd)
462 struct oz_tx_frame *f = NULL;
463 spin_lock_bh(&pd->tx_frame_lock);
465 f = container_of(pd->tx_pool, struct oz_tx_frame, link);
466 pd->tx_pool = pd->tx_pool->next;
469 spin_unlock_bh(&pd->tx_frame_lock);
471 f = kmalloc(sizeof(struct oz_tx_frame), GFP_ATOMIC);
473 f->total_size = sizeof(struct oz_hdr);
474 INIT_LIST_HEAD(&f->link);
475 INIT_LIST_HEAD(&f->elt_list);
479 /*------------------------------------------------------------------------------
480 * Context: softirq or process
482 static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f)
484 pd->nb_queued_isoc_frames--;
485 list_del_init(&f->link);
486 if (pd->tx_pool_count < OZ_MAX_TX_POOL_SIZE) {
487 f->link.next = pd->tx_pool;
488 pd->tx_pool = &f->link;
494 /*------------------------------------------------------------------------------
495 * Context: softirq or process
497 static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f)
499 spin_lock_bh(&pd->tx_frame_lock);
500 if (pd->tx_pool_count < OZ_MAX_TX_POOL_SIZE) {
501 f->link.next = pd->tx_pool;
502 pd->tx_pool = &f->link;
506 spin_unlock_bh(&pd->tx_frame_lock);
510 /*------------------------------------------------------------------------------
511 * Context: softirq-serialized
513 void oz_set_more_bit(struct sk_buff *skb)
515 struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
516 oz_hdr->control |= OZ_F_MORE_DATA;
518 /*------------------------------------------------------------------------------
519 * Context: softirq-serialized
521 void oz_set_last_pkt_nb(struct oz_pd *pd, struct sk_buff *skb)
523 struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
524 oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
526 /*------------------------------------------------------------------------------
529 int oz_prepare_frame(struct oz_pd *pd, int empty)
531 struct oz_tx_frame *f;
532 if ((pd->mode & OZ_MODE_MASK) != OZ_MODE_TRIGGERED)
534 if (pd->nb_queued_frames >= OZ_MAX_QUEUED_FRAMES)
536 if (!empty && !oz_are_elts_available(&pd->elt_buff))
538 f = oz_tx_frame_alloc(pd);
543 (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ACK_REQUESTED;
544 ++pd->last_tx_pkt_num;
545 put_unaligned(cpu_to_le32(pd->last_tx_pkt_num), &f->hdr.pkt_num);
547 oz_select_elts_for_tx(&pd->elt_buff, 0, &f->total_size,
548 pd->max_tx_size, &f->elt_list);
550 spin_lock(&pd->tx_frame_lock);
551 list_add_tail(&f->link, &pd->tx_queue);
552 pd->nb_queued_frames++;
553 spin_unlock(&pd->tx_frame_lock);
556 /*------------------------------------------------------------------------------
557 * Context: softirq-serialized
559 static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f)
562 struct net_device *dev = pd->net_dev;
563 struct oz_hdr *oz_hdr;
566 /* Allocate skb with enough space for the lower layers as well
567 * as the space we need.
569 skb = alloc_skb(f->total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
572 /* Reserve the head room for lower layers.
574 skb_reserve(skb, LL_RESERVED_SPACE(dev));
575 skb_reset_network_header(skb);
577 skb->protocol = htons(OZ_ETHERTYPE);
578 skb->priority = AC_VO;
579 if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
580 dev->dev_addr, skb->len) < 0)
582 /* Push the tail to the end of the area we are going to copy to.
584 oz_hdr = (struct oz_hdr *)skb_put(skb, f->total_size);
585 f->hdr.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
586 memcpy(oz_hdr, &f->hdr, sizeof(struct oz_hdr));
587 /* Copy the elements into the frame body.
589 elt = (struct oz_elt *)(oz_hdr+1);
590 for (e = f->elt_list.next; e != &f->elt_list; e = e->next) {
591 struct oz_elt_info *ei;
592 ei = container_of(e, struct oz_elt_info, link);
593 memcpy(elt, ei->data, ei->length);
594 elt = oz_next_elt(elt);
601 /*------------------------------------------------------------------------------
602 * Context: softirq or process
604 static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f)
607 struct oz_elt_info *ei;
610 pr_info("%s: oz_tx_frame is null\n", __func__);
613 e = f->elt_list.next;
614 while (e != &f->elt_list) {
615 ei = container_of(e, struct oz_elt_info, link);
617 list_del_init(&ei->link);
619 ei->callback(pd, ei->context);
620 spin_lock_bh(&pd->elt_buff.lock);
621 oz_elt_info_free(&pd->elt_buff, ei);
622 spin_unlock_bh(&pd->elt_buff.lock);
624 oz_tx_frame_free(pd, f);
625 if (pd->elt_buff.free_elts > pd->elt_buff.max_free_elts)
626 oz_trim_elt_pool(&pd->elt_buff);
628 /*------------------------------------------------------------------------------
629 * Context: softirq-serialized
631 static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data)
634 struct oz_tx_frame *f;
636 spin_lock(&pd->tx_frame_lock);
637 e = pd->last_sent_frame->next;
638 if (e == &pd->tx_queue) {
639 spin_unlock(&pd->tx_frame_lock);
642 f = container_of(e, struct oz_tx_frame, link);
644 if (f->skb != NULL) {
646 oz_tx_isoc_free(pd, f);
647 spin_unlock(&pd->tx_frame_lock);
649 oz_set_more_bit(skb);
650 oz_set_last_pkt_nb(pd, skb);
651 if ((int)atomic_read(&g_submitted_isoc) <
652 OZ_MAX_SUBMITTED_ISOC) {
653 oz_trace_skb(skb, 'T');
654 if (dev_queue_xmit(skb) < 0) {
657 atomic_inc(&g_submitted_isoc);
665 pd->last_sent_frame = e;
666 skb = oz_build_frame(pd, f);
667 spin_unlock(&pd->tx_frame_lock);
672 oz_set_more_bit(skb);
673 oz_trace_skb(skb, 'T');
674 if (dev_queue_xmit(skb) < 0)
678 /*------------------------------------------------------------------------------
679 * Context: softirq-serialized
681 void oz_send_queued_frames(struct oz_pd *pd, int backlog)
683 while (oz_prepare_frame(pd, 0) >= 0)
686 switch (pd->mode & (OZ_F_ISOC_NO_ELTS | OZ_F_ISOC_ANYTIME)) {
688 case OZ_F_ISOC_NO_ELTS: {
689 backlog += pd->nb_queued_isoc_frames;
692 if (backlog > OZ_MAX_SUBMITTED_ISOC)
693 backlog = OZ_MAX_SUBMITTED_ISOC;
696 case OZ_NO_ELTS_ANYTIME: {
697 if ((backlog <= 0) && (pd->isoc_sent == 0))
708 if (oz_send_next_queued_frame(pd, backlog) < 0)
713 out: oz_prepare_frame(pd, 1);
714 oz_send_next_queued_frame(pd, 0);
716 /*------------------------------------------------------------------------------
719 static int oz_send_isoc_frame(struct oz_pd *pd)
722 struct net_device *dev = pd->net_dev;
723 struct oz_hdr *oz_hdr;
726 struct list_head list;
727 int total_size = sizeof(struct oz_hdr);
728 INIT_LIST_HEAD(&list);
730 oz_select_elts_for_tx(&pd->elt_buff, 1, &total_size,
731 pd->max_tx_size, &list);
732 if (list.next == &list)
734 skb = alloc_skb(total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
736 oz_elt_info_free_chain(&pd->elt_buff, &list);
739 skb_reserve(skb, LL_RESERVED_SPACE(dev));
740 skb_reset_network_header(skb);
742 skb->protocol = htons(OZ_ETHERTYPE);
743 if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
744 dev->dev_addr, skb->len) < 0) {
748 oz_hdr = (struct oz_hdr *)skb_put(skb, total_size);
749 oz_hdr->control = (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
750 oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
751 elt = (struct oz_elt *)(oz_hdr+1);
753 for (e = list.next; e != &list; e = e->next) {
754 struct oz_elt_info *ei;
755 ei = container_of(e, struct oz_elt_info, link);
756 memcpy(elt, ei->data, ei->length);
757 elt = oz_next_elt(elt);
759 oz_trace_skb(skb, 'T');
761 oz_elt_info_free_chain(&pd->elt_buff, &list);
764 /*------------------------------------------------------------------------------
765 * Context: softirq-serialized
767 void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn)
770 struct oz_tx_frame *f;
771 struct list_head *first = NULL;
772 struct list_head *last = NULL;
776 spin_lock(&pd->tx_frame_lock);
777 e = pd->tx_queue.next;
778 while (e != &pd->tx_queue) {
779 f = container_of(e, struct oz_tx_frame, link);
780 pkt_num = le32_to_cpu(get_unaligned(&f->hdr.pkt_num));
781 diff = (lpn - (pkt_num & OZ_LAST_PN_MASK)) & OZ_LAST_PN_MASK;
782 if ((diff > OZ_LAST_PN_HALF_CYCLE) || (pkt_num == 0))
788 pd->nb_queued_frames--;
791 last->next->prev = &pd->tx_queue;
792 pd->tx_queue.next = last->next;
795 pd->last_sent_frame = &pd->tx_queue;
796 spin_unlock(&pd->tx_frame_lock);
798 f = container_of(first, struct oz_tx_frame, link);
800 oz_retire_frame(pd, f);
803 /*------------------------------------------------------------------------------
804 * Precondition: stream_lock must be held.
807 static struct oz_isoc_stream *pd_stream_find(struct oz_pd *pd, u8 ep_num)
810 struct oz_isoc_stream *st;
811 list_for_each(e, &pd->stream_list) {
812 st = container_of(e, struct oz_isoc_stream, link);
813 if (st->ep_num == ep_num)
818 /*------------------------------------------------------------------------------
821 int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num)
823 struct oz_isoc_stream *st =
824 kzalloc(sizeof(struct oz_isoc_stream), GFP_ATOMIC);
828 spin_lock_bh(&pd->stream_lock);
829 if (!pd_stream_find(pd, ep_num)) {
830 list_add(&st->link, &pd->stream_list);
833 spin_unlock_bh(&pd->stream_lock);
838 /*------------------------------------------------------------------------------
839 * Context: softirq or process
841 static void oz_isoc_stream_free(struct oz_isoc_stream *st)
847 /*------------------------------------------------------------------------------
850 int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num)
852 struct oz_isoc_stream *st;
853 spin_lock_bh(&pd->stream_lock);
854 st = pd_stream_find(pd, ep_num);
857 spin_unlock_bh(&pd->stream_lock);
859 oz_isoc_stream_free(st);
862 /*------------------------------------------------------------------------------
865 static void oz_isoc_destructor(struct sk_buff *skb)
867 atomic_dec(&g_submitted_isoc);
869 /*------------------------------------------------------------------------------
872 int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, const u8 *data, int len)
874 struct net_device *dev = pd->net_dev;
875 struct oz_isoc_stream *st;
877 struct sk_buff *skb = NULL;
878 struct oz_hdr *oz_hdr = NULL;
880 spin_lock_bh(&pd->stream_lock);
881 st = pd_stream_find(pd, ep_num);
885 nb_units = st->nb_units;
890 spin_unlock_bh(&pd->stream_lock);
894 /* Allocate enough space for max size frame. */
895 skb = alloc_skb(pd->max_tx_size + OZ_ALLOCATED_SPACE(dev),
899 /* Reserve the head room for lower layers. */
900 skb_reserve(skb, LL_RESERVED_SPACE(dev));
901 skb_reset_network_header(skb);
903 skb->protocol = htons(OZ_ETHERTYPE);
904 /* For audio packet set priority to AC_VO */
905 skb->priority = AC_VO;
906 size = sizeof(struct oz_hdr) + sizeof(struct oz_isoc_large);
907 oz_hdr = (struct oz_hdr *)skb_put(skb, size);
909 memcpy(skb_put(skb, len), data, len);
911 if ((++nb_units < pd->ms_per_isoc)
912 && ((pd->max_tx_size - size) > len)) {
913 spin_lock_bh(&pd->stream_lock);
915 st->nb_units = nb_units;
918 spin_unlock_bh(&pd->stream_lock);
921 struct oz_isoc_large iso;
922 spin_lock_bh(&pd->stream_lock);
923 iso.frame_number = st->frame_num;
924 st->frame_num += nb_units;
925 spin_unlock_bh(&pd->stream_lock);
927 (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
928 oz.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
930 iso.endpoint = ep_num;
931 iso.format = OZ_DATA_F_ISOC_LARGE;
932 iso.ms_data = nb_units;
933 memcpy(oz_hdr, &oz, sizeof(oz));
934 memcpy(oz_hdr+1, &iso, sizeof(iso));
935 if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
936 dev->dev_addr, skb->len) < 0)
939 skb->destructor = oz_isoc_destructor;
940 /*Queue for Xmit if mode is not ANYTIME*/
941 if (!(pd->mode & OZ_F_ISOC_ANYTIME)) {
942 struct oz_tx_frame *isoc_unit = NULL;
943 int nb = pd->nb_queued_isoc_frames;
945 struct oz_tx_frame *f;
946 if (nb >= pd->isoc_latency) {
947 spin_lock(&pd->tx_frame_lock);
948 list_for_each(e, &pd->tx_queue) {
949 f = container_of(e, struct oz_tx_frame,
951 if (f->skb != NULL) {
952 oz_tx_isoc_free(pd, f);
956 spin_unlock(&pd->tx_frame_lock);
958 isoc_unit = oz_tx_frame_alloc(pd);
959 if (isoc_unit == NULL)
962 isoc_unit->skb = skb;
963 spin_lock_bh(&pd->tx_frame_lock);
964 list_add_tail(&isoc_unit->link, &pd->tx_queue);
965 pd->nb_queued_isoc_frames++;
966 spin_unlock_bh(&pd->tx_frame_lock);
969 /*In ANYTIME mode Xmit unit immediately*/
970 if (atomic_read(&g_submitted_isoc) < OZ_MAX_SUBMITTED_ISOC) {
971 atomic_inc(&g_submitted_isoc);
972 oz_trace_skb(skb, 'T');
973 if (dev_queue_xmit(skb) < 0) {
985 /*------------------------------------------------------------------------------
988 void oz_apps_init(void)
991 for (i = 0; i < OZ_APPID_MAX; i++)
992 if (g_app_if[i].init)
995 /*------------------------------------------------------------------------------
998 void oz_apps_term(void)
1001 /* Terminate all the apps. */
1002 for (i = 0; i < OZ_APPID_MAX; i++)
1003 if (g_app_if[i].term)
1006 /*------------------------------------------------------------------------------
1007 * Context: softirq-serialized
1009 void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt)
1011 const struct oz_app_if *ai;
1012 if (app_id == 0 || app_id > OZ_APPID_MAX)
1014 ai = &g_app_if[app_id-1];
1017 /*------------------------------------------------------------------------------
1018 * Context: softirq or process
1020 void oz_pd_indicate_farewells(struct oz_pd *pd)
1022 struct oz_farewell *f;
1023 const struct oz_app_if *ai = &g_app_if[OZ_APPID_USB-1];
1024 list_for_each_entry(f, &pd->farewell_list, link) {
1026 ai->farewell(pd, f->ep_num, f->report, f->len);