]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/commitdiff
staging: ozwpan: protect oz_pd_destroy
authorVinayak Pane <vpane@nvidia.com>
Wed, 18 Jun 2014 01:45:14 +0000 (18:45 -0700)
committerTodd Poynter <tpoynter@nvidia.com>
Sun, 22 Jun 2014 02:51:26 +0000 (19:51 -0700)
oz_pd_destroy() is called mulitple times when network
down notifier gets triggered. Destroy workqueue is scheduled
from tasklet and also from oz_binding_remov function.
Protecting it by strictly scheduling workqueue only once.

Bug 1522180
Bug 1522708

Change-Id: I47c92a5e7ef1067d5dc4cdf67653a785eff34bca
Signed-off-by: Vinayak Pane <vpane@nvidia.com>
Reviewed-on: http://git-master/r/426904
Reviewed-by: Bibhay Ranjan <bibhayr@nvidia.com>
Tested-by: Bibhay Ranjan <bibhayr@nvidia.com>
Reviewed-by: Mitch Luban <mluban@nvidia.com>
GVS: Gerrit_Virtual_Submit

drivers/staging/ozwpan/ozpd.c
drivers/staging/ozwpan/ozpd.h

index def50af7813213551cc3ad2cac492ca6ab5be88e..b3140fe26fc58e03bcbbb56ada9b8c5b662355f5 100644 (file)
@@ -39,6 +39,7 @@ static void oz_def_app_term(void);
 static int oz_def_app_start(struct oz_pd *pd, int resume);
 static void oz_def_app_stop(struct oz_pd *pd, int pause);
 static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt);
+static void oz_pd_free(struct work_struct *work);
 /*------------------------------------------------------------------------------
  * Counts the uncompleted isoc frames submitted to netcard.
  */
@@ -160,8 +161,12 @@ void oz_pd_get(struct oz_pd *pd)
  */
 void oz_pd_put(struct oz_pd *pd)
 {
-       if (atomic_dec_and_test(&pd->ref_count))
-               oz_pd_destroy(pd);
+       atomic_dec(&pd->ref_count);
+
+       if (atomic_read(&pd->ref_count) > 0 || atomic_read(&pd->ref_count) < 0)
+               return;
+
+       oz_pd_destroy(pd);
 }
 /*------------------------------------------------------------------------------
  * Context: softirq-serialized
@@ -197,6 +202,9 @@ struct oz_pd *oz_pd_alloc(const u8 *mac_addr)
                hrtimer_init(&pd->timeout, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
                pd->heartbeat.function = oz_pd_heartbeat_event;
                pd->timeout.function = oz_pd_timeout_event;
+               atomic_set(&pd->pd_destroy_scheduled, 0);
+               memset(&pd->workitem, 0, sizeof(pd->workitem));
+               INIT_WORK(&pd->workitem, oz_pd_free);
        }
        return pd;
 }
@@ -249,6 +257,7 @@ static void oz_pd_free(struct work_struct *work)
                oz_trace_msg(M, "dev_put(%p)\n", pd->net_dev);
                dev_put(pd->net_dev);
        }
+       atomic_set(&pd->pd_destroy_scheduled, 0);
        kfree(pd);
 }
 
@@ -260,13 +269,17 @@ void oz_pd_destroy(struct oz_pd *pd)
 {
        int ret;
 
+       if (atomic_read(&pd->pd_destroy_scheduled) > 0) {
+               pr_info("%s: not rescheduling oz_pd_free\n", __func__);
+               return;
+       }
+       atomic_inc(&pd->pd_destroy_scheduled);
+
        if (hrtimer_active(&pd->timeout))
                hrtimer_cancel(&pd->timeout);
        if (hrtimer_active(&pd->heartbeat))
                hrtimer_cancel(&pd->heartbeat);
 
-       memset(&pd->workitem, 0, sizeof(pd->workitem));
-       INIT_WORK(&pd->workitem, oz_pd_free);
        ret = schedule_work(&pd->workitem);
        if (!ret)
                pr_info("failed to schedule workitem\n");
@@ -578,6 +591,11 @@ static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f)
 {
        struct list_head *e;
        struct oz_elt_info *ei;
+
+       if (f == NULL) {
+               pr_info("%s: oz_tx_frame is null\n", __func__);
+               return;
+       }
        e = f->elt_list.next;
        while (e != &f->elt_list) {
                ei = container_of(e, struct oz_elt_info, link);
index 5f5e7241ff47978274a4ff413338cb8cae6a0ec8..f21c830db56464dff628b9e57e3fbecb97df4460 100644 (file)
@@ -110,6 +110,7 @@ struct oz_pd {
        unsigned long tasklet_sched;
        struct work_struct workitem;
        struct work_struct uevent_workitem;
+       atomic_t        pd_destroy_scheduled;
        u8      up_audio_buf;
 };