]> rtime.felk.cvut.cz Git - zynq/linux.git/blobdiff - net/core/dev.c
Apply preempt_rt patch-4.9-rt1.patch.xz
[zynq/linux.git] / net / core / dev.c
index 6666b28b6815e1665218af967bc9e3adbb87b86f..b285d038af78d2a88d4315fcdb632574b7c97f9e 100644 (file)
@@ -190,6 +190,7 @@ static unsigned int napi_gen_id = NR_CPUS;
 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
 
 static seqcount_t devnet_rename_seq;
+static DEFINE_MUTEX(devnet_rename_mutex);
 
 static inline void dev_base_seq_inc(struct net *net)
 {
@@ -211,14 +212,14 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
 static inline void rps_lock(struct softnet_data *sd)
 {
 #ifdef CONFIG_RPS
-       spin_lock(&sd->input_pkt_queue.lock);
+       raw_spin_lock(&sd->input_pkt_queue.raw_lock);
 #endif
 }
 
 static inline void rps_unlock(struct softnet_data *sd)
 {
 #ifdef CONFIG_RPS
-       spin_unlock(&sd->input_pkt_queue.lock);
+       raw_spin_unlock(&sd->input_pkt_queue.raw_lock);
 #endif
 }
 
@@ -888,7 +889,8 @@ retry:
        strcpy(name, dev->name);
        rcu_read_unlock();
        if (read_seqcount_retry(&devnet_rename_seq, seq)) {
-               cond_resched();
+               mutex_lock(&devnet_rename_mutex);
+               mutex_unlock(&devnet_rename_mutex);
                goto retry;
        }
 
@@ -1157,20 +1159,17 @@ int dev_change_name(struct net_device *dev, const char *newname)
        if (dev->flags & IFF_UP)
                return -EBUSY;
 
-       write_seqcount_begin(&devnet_rename_seq);
+       mutex_lock(&devnet_rename_mutex);
+       __raw_write_seqcount_begin(&devnet_rename_seq);
 
-       if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
-               write_seqcount_end(&devnet_rename_seq);
-               return 0;
-       }
+       if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
+               goto outunlock;
 
        memcpy(oldname, dev->name, IFNAMSIZ);
 
        err = dev_get_valid_name(net, dev, newname);
-       if (err < 0) {
-               write_seqcount_end(&devnet_rename_seq);
-               return err;
-       }
+       if (err < 0)
+               goto outunlock;
 
        if (oldname[0] && !strchr(oldname, '%'))
                netdev_info(dev, "renamed from %s\n", oldname);
@@ -1183,11 +1182,12 @@ rollback:
        if (ret) {
                memcpy(dev->name, oldname, IFNAMSIZ);
                dev->name_assign_type = old_assign_type;
-               write_seqcount_end(&devnet_rename_seq);
-               return ret;
+               err = ret;
+               goto outunlock;
        }
 
-       write_seqcount_end(&devnet_rename_seq);
+       __raw_write_seqcount_end(&devnet_rename_seq);
+       mutex_unlock(&devnet_rename_mutex);
 
        netdev_adjacent_rename_links(dev, oldname);
 
@@ -1208,7 +1208,8 @@ rollback:
                /* err >= 0 after dev_alloc_name() or stores the first errno */
                if (err >= 0) {
                        err = ret;
-                       write_seqcount_begin(&devnet_rename_seq);
+                       mutex_lock(&devnet_rename_mutex);
+                       __raw_write_seqcount_begin(&devnet_rename_seq);
                        memcpy(dev->name, oldname, IFNAMSIZ);
                        memcpy(oldname, newname, IFNAMSIZ);
                        dev->name_assign_type = old_assign_type;
@@ -1221,6 +1222,11 @@ rollback:
        }
 
        return err;
+
+outunlock:
+       __raw_write_seqcount_end(&devnet_rename_seq);
+       mutex_unlock(&devnet_rename_mutex);
+       return err;
 }
 
 /**
@@ -2263,6 +2269,7 @@ static void __netif_reschedule(struct Qdisc *q)
        sd->output_queue_tailp = &q->next_sched;
        raise_softirq_irqoff(NET_TX_SOFTIRQ);
        local_irq_restore(flags);
+       preempt_check_resched_rt();
 }
 
 void __netif_schedule(struct Qdisc *q)
@@ -2344,6 +2351,7 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
        __this_cpu_write(softnet_data.completion_queue, skb);
        raise_softirq_irqoff(NET_TX_SOFTIRQ);
        local_irq_restore(flags);
+       preempt_check_resched_rt();
 }
 EXPORT_SYMBOL(__dev_kfree_skb_irq);
 
@@ -3078,7 +3086,11 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
         * This permits qdisc->running owner to get the lock more
         * often and dequeue packets faster.
         */
+#ifdef CONFIG_PREEMPT_RT_FULL
+       contended = true;
+#else
        contended = qdisc_is_running(q);
+#endif
        if (unlikely(contended))
                spin_lock(&q->busylock);
 
@@ -3141,8 +3153,10 @@ static void skb_update_prio(struct sk_buff *skb)
 #define skb_update_prio(skb)
 #endif
 
+#ifndef CONFIG_PREEMPT_RT_FULL
 DEFINE_PER_CPU(int, xmit_recursion);
 EXPORT_SYMBOL(xmit_recursion);
+#endif
 
 /**
  *     dev_loopback_xmit - loop back @skb
@@ -3376,8 +3390,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
                int cpu = smp_processor_id(); /* ok because BHs are off */
 
                if (txq->xmit_lock_owner != cpu) {
-                       if (unlikely(__this_cpu_read(xmit_recursion) >
-                                    XMIT_RECURSION_LIMIT))
+                       if (unlikely(xmit_rec_read() > XMIT_RECURSION_LIMIT))
                                goto recursion_alert;
 
                        skb = validate_xmit_skb(skb, dev);
@@ -3387,9 +3400,9 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
                        HARD_TX_LOCK(dev, txq, cpu);
 
                        if (!netif_xmit_stopped(txq)) {
-                               __this_cpu_inc(xmit_recursion);
+                               xmit_rec_inc();
                                skb = dev_hard_start_xmit(skb, dev, txq, &rc);
-                               __this_cpu_dec(xmit_recursion);
+                               xmit_rec_dec();
                                if (dev_xmit_complete(rc)) {
                                        HARD_TX_UNLOCK(dev, txq);
                                        goto out;
@@ -3763,6 +3776,7 @@ drop:
        rps_unlock(sd);
 
        local_irq_restore(flags);
+       preempt_check_resched_rt();
 
        atomic_long_inc(&skb->dev->rx_dropped);
        kfree_skb(skb);
@@ -3781,7 +3795,7 @@ static int netif_rx_internal(struct sk_buff *skb)
                struct rps_dev_flow voidflow, *rflow = &voidflow;
                int cpu;
 
-               preempt_disable();
+               migrate_disable();
                rcu_read_lock();
 
                cpu = get_rps_cpu(skb->dev, skb, &rflow);
@@ -3791,13 +3805,13 @@ static int netif_rx_internal(struct sk_buff *skb)
                ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
 
                rcu_read_unlock();
-               preempt_enable();
+               migrate_enable();
        } else
 #endif
        {
                unsigned int qtail;
-               ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
-               put_cpu();
+               ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail);
+               put_cpu_light();
        }
        return ret;
 }
@@ -3831,11 +3845,9 @@ int netif_rx_ni(struct sk_buff *skb)
 
        trace_netif_rx_ni_entry(skb);
 
-       preempt_disable();
+       local_bh_disable();
        err = netif_rx_internal(skb);
-       if (local_softirq_pending())
-               do_softirq();
-       preempt_enable();
+       local_bh_enable();
 
        return err;
 }
@@ -4314,7 +4326,7 @@ static void flush_backlog(struct work_struct *work)
        skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
                if (skb->dev->reg_state == NETREG_UNREGISTERING) {
                        __skb_unlink(skb, &sd->input_pkt_queue);
-                       kfree_skb(skb);
+                       __skb_queue_tail(&sd->tofree_queue, skb);
                        input_queue_head_incr(sd);
                }
        }
@@ -4324,11 +4336,14 @@ static void flush_backlog(struct work_struct *work)
        skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
                if (skb->dev->reg_state == NETREG_UNREGISTERING) {
                        __skb_unlink(skb, &sd->process_queue);
-                       kfree_skb(skb);
+                       __skb_queue_tail(&sd->tofree_queue, skb);
                        input_queue_head_incr(sd);
                }
        }
+       if (!skb_queue_empty(&sd->tofree_queue))
+               raise_softirq_irqoff(NET_RX_SOFTIRQ);
        local_bh_enable();
+
 }
 
 static void flush_all_backlogs(void)
@@ -4807,6 +4822,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
                sd->rps_ipi_list = NULL;
 
                local_irq_enable();
+               preempt_check_resched_rt();
 
                /* Send pending IPI's to kick RPS processing on remote cpus. */
                while (remsd) {
@@ -4820,6 +4836,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
        } else
 #endif
                local_irq_enable();
+       preempt_check_resched_rt();
 }
 
 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
@@ -4849,7 +4866,9 @@ static int process_backlog(struct napi_struct *napi, int quota)
        while (again) {
                struct sk_buff *skb;
 
+               local_irq_disable();
                while ((skb = __skb_dequeue(&sd->process_queue))) {
+                       local_irq_enable();
                        rcu_read_lock();
                        __netif_receive_skb(skb);
                        rcu_read_unlock();
@@ -4857,9 +4876,9 @@ static int process_backlog(struct napi_struct *napi, int quota)
                        if (++work >= quota)
                                return work;
 
+                       local_irq_disable();
                }
 
-               local_irq_disable();
                rps_lock(sd);
                if (skb_queue_empty(&sd->input_pkt_queue)) {
                        /*
@@ -4897,9 +4916,11 @@ void __napi_schedule(struct napi_struct *n)
        local_irq_save(flags);
        ____napi_schedule(this_cpu_ptr(&softnet_data), n);
        local_irq_restore(flags);
+       preempt_check_resched_rt();
 }
 EXPORT_SYMBOL(__napi_schedule);
 
+#ifndef CONFIG_PREEMPT_RT_FULL
 /**
  * __napi_schedule_irqoff - schedule for receive
  * @n: entry to schedule
@@ -4911,6 +4932,7 @@ void __napi_schedule_irqoff(struct napi_struct *n)
        ____napi_schedule(this_cpu_ptr(&softnet_data), n);
 }
 EXPORT_SYMBOL(__napi_schedule_irqoff);
+#endif
 
 void __napi_complete(struct napi_struct *n)
 {
@@ -5200,13 +5222,21 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
        struct softnet_data *sd = this_cpu_ptr(&softnet_data);
        unsigned long time_limit = jiffies + 2;
        int budget = netdev_budget;
+       struct sk_buff_head tofree_q;
+       struct sk_buff *skb;
        LIST_HEAD(list);
        LIST_HEAD(repoll);
 
+       __skb_queue_head_init(&tofree_q);
+
        local_irq_disable();
+       skb_queue_splice_init(&sd->tofree_queue, &tofree_q);
        list_splice_init(&sd->poll_list, &list);
        local_irq_enable();
 
+       while ((skb = __skb_dequeue(&tofree_q)))
+               kfree_skb(skb);
+
        for (;;) {
                struct napi_struct *n;
 
@@ -5237,7 +5267,7 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
        list_splice_tail(&repoll, &list);
        list_splice(&list, &sd->poll_list);
        if (!list_empty(&sd->poll_list))
-               __raise_softirq_irqoff(NET_RX_SOFTIRQ);
+               __raise_softirq_irqoff_ksoft(NET_RX_SOFTIRQ);
 
        net_rps_action_and_irq_enable(sd);
 }
@@ -7998,16 +8028,20 @@ static int dev_cpu_callback(struct notifier_block *nfb,
 
        raise_softirq_irqoff(NET_TX_SOFTIRQ);
        local_irq_enable();
+       preempt_check_resched_rt();
 
        /* Process offline CPU's input_pkt_queue */
        while ((skb = __skb_dequeue(&oldsd->process_queue))) {
                netif_rx_ni(skb);
                input_queue_head_incr(oldsd);
        }
-       while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
+       while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
                netif_rx_ni(skb);
                input_queue_head_incr(oldsd);
        }
+       while ((skb = __skb_dequeue(&oldsd->tofree_queue))) {
+               kfree_skb(skb);
+       }
 
        return NOTIFY_OK;
 }
@@ -8312,8 +8346,9 @@ static int __init net_dev_init(void)
 
                INIT_WORK(flush, flush_backlog);
 
-               skb_queue_head_init(&sd->input_pkt_queue);
-               skb_queue_head_init(&sd->process_queue);
+               skb_queue_head_init_raw(&sd->input_pkt_queue);
+               skb_queue_head_init_raw(&sd->process_queue);
+               skb_queue_head_init_raw(&sd->tofree_queue);
                INIT_LIST_HEAD(&sd->poll_list);
                sd->output_queue_tailp = &sd->output_queue;
 #ifdef CONFIG_RPS