All rx skb should be consumed or dropped, if left
unconsumed it leads to netdev becoming stale situations.
Check net state before submitting tx skb.
Re-arrange oz_remove_binding() function to make sure PDs
stop before net intf packet_type callbacks are removed.
Remove old WAR to recover from enumeration failure.
Bug
1522708
Change-Id: Id13e837c9ed6941f03404762203ad2841e9af24f
Signed-off-by: Vinayak Pane <vpane@nvidia.com>
(cherry picked from commit
c2022c5288c83ad3dc31987157bf152f16d85fb4)
Reviewed-on: http://git-master/r/440892
(cherry picked from commit
523f9fe8e6bdbb147a1f186fb7aca74bd49da9a1)
Reviewed-on: http://git-master/r/554511
GVS: Gerrit_Virtual_Submit
Reviewed-by: Robert Shih <rshih@nvidia.com>
Tested-by: Robert Shih <rshih@nvidia.com>
Reviewed-by: Todd Poynter <tpoynter@nvidia.com>
ozhcd->ports[port_id-1].bus_addr = 0;
hpd = oz_claim_hpd(&ozhcd->ports[port_id-1]);
if (hpd != NULL) {
- struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
- struct oz_pd *pd = (struct oz_pd *)usb_ctx->pd;
-
- /* FIXME: Hack to reset network interface
- * Unrecoverable situation occured.
- */
- if (pd && ((++pd->reset_retry > 2) ||
- (!(pd->state & OZ_PD_S_CONNECTED)))) {
- char *_net_dev = "";
-
- pr_info("OZ: tear down network interface\n");
- spin_lock_bh(&port->port_lock);
- port->flags |= OZ_PORT_F_CHANGED;
- spin_unlock_bh(&port->port_lock);
- oz_usb_put(hpd);
-
- oz_pd_stop(pd);
- msleep(10);
- oz_protocol_term();
- msleep(100);
- oz_protocol_init(_net_dev);
- break;
- }
oz_usb_reset_device(hpd);
oz_usb_put(hpd);
}
static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt);
static void oz_pd_free(struct work_struct *work);
static void oz_pd_uevent_workitem(struct work_struct *work);
+extern struct completion oz_pd_done;
/*------------------------------------------------------------------------------
* Counts the uncompleted isoc frames submitted to netcard.
*/
hrtimer_init(&pd->timeout, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
pd->heartbeat.function = oz_pd_heartbeat_event;
pd->timeout.function = oz_pd_timeout_event;
- pd->reset_retry = 0;
spin_lock_init(&pd->pd_destroy_lock);
pd->pd_destroy_scheduled = false;
}
kfree(pd);
+ complete(&oz_pd_done);
}
oz_pd_set_state(pd, OZ_PD_S_STOPPED);
if (hrtimer_active(&pd->timeout)) {
- pr_info("hrtimer timeout active\n");
+ oz_trace_msg(M, "hrtimer timeout active\n");
hrtimer_cancel(&pd->timeout);
}
if (hrtimer_active(&pd->heartbeat)) {
- pr_info("hrtimer heartbeat active\n");
+ oz_trace_msg(M, "hrtimer heartbeat active\n");
hrtimer_cancel(&pd->heartbeat);
}
/* connect_req will restart timers */
oz_set_last_pkt_nb(pd, skb);
if ((int)atomic_read(&g_submitted_isoc) <
OZ_MAX_SUBMITTED_ISOC) {
+ if (!netif_running(skb->dev)) {
+ kfree_skb(skb);
+ return -1;
+ }
+
oz_trace_skb(skb, 'T');
if (dev_queue_xmit(skb) < 0) {
return -1;
if (more_data)
oz_set_more_bit(skb);
+
+ if (!netif_running(skb->dev)) {
+ kfree_skb(skb);
+ return -1;
+ }
+
oz_trace_skb(skb, 'T');
if (dev_queue_xmit(skb) < 0)
return -1;
memcpy(elt, ei->data, ei->length);
elt = oz_next_elt(elt);
}
- oz_trace_skb(skb, 'T');
- dev_queue_xmit(skb);
+
+ if (netif_running(skb->dev)) {
+ oz_trace_skb(skb, 'T');
+ dev_queue_xmit(skb);
+ } else
+ kfree_skb(skb);
oz_elt_info_free_chain(&pd->elt_buff, &list);
return 0;
}
if (atomic_read(&g_submitted_isoc) < OZ_MAX_SUBMITTED_ISOC) {
atomic_inc(&g_submitted_isoc);
oz_trace_skb(skb, 'T');
+
+ if (!netif_running(skb->dev))
+ goto out;
+
if (dev_queue_xmit(skb) < 0) {
return -1;
} else
struct work_struct uevent_workitem;
spinlock_t pd_destroy_lock;
bool pd_destroy_scheduled;
- unsigned int reset_retry;
u8 up_audio_buf;
};
static LIST_HEAD(g_binding);
static DEFINE_SPINLOCK(g_binding_lock);
static struct sk_buff_head g_rx_queue;
+struct completion oz_pd_done;
static u8 g_session_id;
static u16 g_apps = 0x1;
static int g_processing_rx;
body->session_id = pd->session_id;
put_unaligned(cpu_to_le16(pd->total_apps), &body->apps);
}
+ if (!netif_running(dev)) {
+ kfree_skb(skb);
+ return;
+ }
+
oz_trace_skb(skb, 'T');
dev_queue_xmit(skb);
return;
spin_unlock_bh(&g_polling_lock);
switch (type) {
case OZ_TIMER_TOUT:
- oz_trace_msg(M, "OZ_TIMER_TOUT:\n");
+ oz_trace_msg(D, "OZ_TIMER_TOUT:\n");
oz_pd_sleep(pd);
break;
case OZ_TIMER_STOP:
- oz_trace_msg(M, "OZ_TIMER_STOP:\n");
+ oz_trace_msg(D, "OZ_TIMER_STOP:\n");
oz_pd_stop(pd);
break;
}
skb = skb_share_check(skb, GFP_ATOMIC);
if (skb == NULL)
return 0;
+
+ if (unlikely(!dev || !netif_running(dev))) {
+ oz_trace_msg(M, "%s: netdev stopped, drop pkt\n", __func__);
+ kfree_skb(skb);
+ g_processing_rx = 0;
+ return 0;
+ }
+
spin_lock_bh(&g_rx_queue.lock);
if (g_processing_rx) {
/* We already hold the lock so use __ variant.
do {
spin_unlock_bh(&g_rx_queue.lock);
+ if (unlikely(!dev || !netif_running(dev))) {
+ kfree_skb(skb);
+ skb_queue_purge(&g_rx_queue);
+ g_processing_rx = 0;
+ return 0;
+ }
+
oz_rx_frame(skb);
spin_lock_bh(&g_rx_queue.lock);
if (skb_queue_empty(&g_rx_queue)) {
}
spin_unlock_bh(&g_polling_lock);
while (!list_empty(&h)) {
+ INIT_COMPLETION(oz_pd_done);
pd = list_first_entry(&h, struct oz_pd, link);
oz_pd_stop(pd);
oz_pd_put(pd);
+ /* wait for PD to get destroyed */
+ if (pd)
+ wait_for_completion_timeout(&oz_pd_done,
+ msecs_to_jiffies(50));
}
}
/*------------------------------------------------------------------------------
}
spin_unlock_bh(&g_binding_lock);
if (found) {
+ pd_stop_all_for_device(binding->ptype.dev);
+
+ /* purge pending rx skb */
+ skb_queue_purge(&g_rx_queue);
+ WARN_ON(!skb_queue_empty(&g_rx_queue));
+
dev_remove_pack(&binding->ptype);
if (binding->ptype.dev) {
oz_trace_msg(M, "%s: dev_put(%s)\n", __func__,
binding->name);
dev_put(binding->ptype.dev);
- pd_stop_all_for_device(binding->ptype.dev);
}
kfree(binding);
+ g_processing_rx = 0;
}
}
/*------------------------------------------------------------------------------
int oz_protocol_init(char *devs)
{
skb_queue_head_init(&g_rx_queue);
+ init_completion(&oz_pd_done);
if (devs && (devs[0] == '*')) {
return -1;
} else {