]> rtime.felk.cvut.cz Git - zynq/linux.git/commitdiff
use skbufhead with raw lock
authorThomas Gleixner <tglx@linutronix.de>
Tue, 12 Jul 2011 13:38:34 +0000 (15:38 +0200)
committerMichal Sojka <sojka@merica.cz>
Sun, 13 Sep 2015 07:47:31 +0000 (09:47 +0200)
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
include/linux/netdevice.h
include/linux/skbuff.h
net/core/dev.c

index 278738873703c119dfa322c13b2755e2176a61f5..5e38fcf82cbe8fdbd93ab67382a380f760dbb86b 100644 (file)
@@ -2471,6 +2471,7 @@ struct softnet_data {
        unsigned int            dropped;
        struct sk_buff_head     input_pkt_queue;
        struct napi_struct      backlog;
+       struct sk_buff_head     tofree_queue;
 
 };
 
index bdccc4b46f57352ce8250872292820919d344fd7..e31236ea71c45b51e8220296ef004cfcb441764e 100644 (file)
@@ -180,6 +180,7 @@ struct sk_buff_head {
 
        __u32           qlen;
        spinlock_t      lock;
+       raw_spinlock_t  raw_lock;
 };
 
 struct sk_buff;
@@ -1332,6 +1333,12 @@ static inline void skb_queue_head_init(struct sk_buff_head *list)
        __skb_queue_head_init(list);
 }
 
+static inline void skb_queue_head_init_raw(struct sk_buff_head *list)
+{
+       raw_spin_lock_init(&list->raw_lock);
+       __skb_queue_head_init(list);
+}
+
 static inline void skb_queue_head_init_class(struct sk_buff_head *list,
                struct lock_class_key *class)
 {
index a24886bdaf2263f13a5225432cd0c7e9f3ec54f4..16533c57c7bbd2585ec817b88f5aeeabc1beda7c 100644 (file)
@@ -205,14 +205,14 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
 static inline void rps_lock(struct softnet_data *sd)
 {
 #ifdef CONFIG_RPS
-       spin_lock(&sd->input_pkt_queue.lock);
+       raw_spin_lock(&sd->input_pkt_queue.raw_lock);
 #endif
 }
 
 static inline void rps_unlock(struct softnet_data *sd)
 {
 #ifdef CONFIG_RPS
-       spin_unlock(&sd->input_pkt_queue.lock);
+       raw_spin_unlock(&sd->input_pkt_queue.raw_lock);
 #endif
 }
 
@@ -3852,7 +3852,7 @@ static void flush_backlog(void *arg)
        skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
                if (skb->dev == dev) {
                        __skb_unlink(skb, &sd->input_pkt_queue);
-                       kfree_skb(skb);
+                       __skb_queue_tail(&sd->tofree_queue, skb);
                        input_queue_head_incr(sd);
                }
        }
@@ -3861,10 +3861,13 @@ static void flush_backlog(void *arg)
        skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
                if (skb->dev == dev) {
                        __skb_unlink(skb, &sd->process_queue);
-                       kfree_skb(skb);
+                       __skb_queue_tail(&sd->tofree_queue, skb);
                        input_queue_head_incr(sd);
                }
        }
+
+       if (!skb_queue_empty(&sd->tofree_queue))
+               raise_softirq_irqoff(NET_RX_SOFTIRQ);
 }
 
 static int napi_gro_complete(struct sk_buff *skb)
@@ -7139,6 +7142,9 @@ static int dev_cpu_callback(struct notifier_block *nfb,
                netif_rx_ni(skb);
                input_queue_head_incr(oldsd);
        }
+       while ((skb = __skb_dequeue(&oldsd->tofree_queue))) {
+               kfree_skb(skb);
+       }
 
        return NOTIFY_OK;
 }
@@ -7440,8 +7446,9 @@ static int __init net_dev_init(void)
        for_each_possible_cpu(i) {
                struct softnet_data *sd = &per_cpu(softnet_data, i);
 
-               skb_queue_head_init(&sd->input_pkt_queue);
-               skb_queue_head_init(&sd->process_queue);
+               skb_queue_head_init_raw(&sd->input_pkt_queue);
+               skb_queue_head_init_raw(&sd->process_queue);
+               skb_queue_head_init_raw(&sd->tofree_queue);
                INIT_LIST_HEAD(&sd->poll_list);
                sd->output_queue_tailp = &sd->output_queue;
 #ifdef CONFIG_RPS