]> rtime.felk.cvut.cz Git - linux-imx.git/commitdiff
netpoll: take rcu_read_lock_bh() in netpoll_send_skb_on_dev()
authorAmerigo Wang <amwang@redhat.com>
Fri, 10 Aug 2012 01:24:42 +0000 (01:24 +0000)
committerDavid S. Miller <davem@davemloft.net>
Tue, 14 Aug 2012 21:33:31 +0000 (14:33 -0700)
This patch fixes several problems in the call path of
netpoll_send_skb_on_dev():

1. Disable IRQ's before calling netpoll_send_skb_on_dev().

2. All the callees of netpoll_send_skb_on_dev() should use
   rcu_dereference_bh() to dereference ->npinfo.

3. Rename arp_reply() to netpoll_arp_reply(), the former is too generic.

Cc: "David S. Miller" <davem@davemloft.net>
Signed-off-by: Cong Wang <amwang@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/linux/netpoll.h
net/core/netpoll.c

index 2d178baa49dfcbd5299bb8bba6d0f9e2e410544a..61aee86cf21dd22ecbd171dd9c54f86ca576f486 100644 (file)
@@ -57,7 +57,10 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
                             struct net_device *dev);
 static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
 {
+       unsigned long flags;
+       local_irq_save(flags);
        netpoll_send_skb_on_dev(np, skb, np->dev);
+       local_irq_restore(flags);
 }
 
 
index d055bb01328b2d44fc49324d97ae91a05a59f638..174346ac15a0a9edd79b4dc6d8dc071eca40694f 100644 (file)
@@ -54,7 +54,7 @@ static atomic_t trapped;
         MAX_UDP_CHUNK)
 
 static void zap_completion_queue(void);
-static void arp_reply(struct sk_buff *skb);
+static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo);
 
 static unsigned int carrier_timeout = 4;
 module_param(carrier_timeout, uint, 0644);
@@ -170,7 +170,8 @@ static void poll_napi(struct net_device *dev)
        list_for_each_entry(napi, &dev->napi_list, dev_list) {
                if (napi->poll_owner != smp_processor_id() &&
                    spin_trylock(&napi->poll_lock)) {
-                       budget = poll_one_napi(dev->npinfo, napi, budget);
+                       budget = poll_one_napi(rcu_dereference_bh(dev->npinfo),
+                                              napi, budget);
                        spin_unlock(&napi->poll_lock);
 
                        if (!budget)
@@ -185,13 +186,14 @@ static void service_arp_queue(struct netpoll_info *npi)
                struct sk_buff *skb;
 
                while ((skb = skb_dequeue(&npi->arp_tx)))
-                       arp_reply(skb);
+                       netpoll_arp_reply(skb, npi);
        }
 }
 
 static void netpoll_poll_dev(struct net_device *dev)
 {
        const struct net_device_ops *ops;
+       struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
 
        if (!dev || !netif_running(dev))
                return;
@@ -206,17 +208,18 @@ static void netpoll_poll_dev(struct net_device *dev)
        poll_napi(dev);
 
        if (dev->flags & IFF_SLAVE) {
-               if (dev->npinfo) {
+               if (ni) {
                        struct net_device *bond_dev = dev->master;
                        struct sk_buff *skb;
-                       while ((skb = skb_dequeue(&dev->npinfo->arp_tx))) {
+                       struct netpoll_info *bond_ni = rcu_dereference_bh(bond_dev->npinfo);
+                       while ((skb = skb_dequeue(&ni->arp_tx))) {
                                skb->dev = bond_dev;
-                               skb_queue_tail(&bond_dev->npinfo->arp_tx, skb);
+                               skb_queue_tail(&bond_ni->arp_tx, skb);
                        }
                }
        }
 
-       service_arp_queue(dev->npinfo);
+       service_arp_queue(ni);
 
        zap_completion_queue();
 }
@@ -302,6 +305,7 @@ static int netpoll_owner_active(struct net_device *dev)
        return 0;
 }
 
+/* call with IRQ disabled */
 void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
                             struct net_device *dev)
 {
@@ -309,8 +313,11 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
        unsigned long tries;
        const struct net_device_ops *ops = dev->netdev_ops;
        /* It is up to the caller to keep npinfo alive. */
-       struct netpoll_info *npinfo = np->dev->npinfo;
+       struct netpoll_info *npinfo;
+
+       WARN_ON_ONCE(!irqs_disabled());
 
+       npinfo = rcu_dereference_bh(np->dev->npinfo);
        if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
                __kfree_skb(skb);
                return;
@@ -319,11 +326,9 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
        /* don't get messages out of order, and no recursion */
        if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
                struct netdev_queue *txq;
-               unsigned long flags;
 
                txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
 
-               local_irq_save(flags);
                /* try until next clock tick */
                for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
                     tries > 0; --tries) {
@@ -347,10 +352,9 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
                }
 
                WARN_ONCE(!irqs_disabled(),
-                       "netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n",
+                       "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
                        dev->name, ops->ndo_start_xmit);
 
-               local_irq_restore(flags);
        }
 
        if (status != NETDEV_TX_OK) {
@@ -423,9 +427,8 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
 }
 EXPORT_SYMBOL(netpoll_send_udp);
 
-static void arp_reply(struct sk_buff *skb)
+static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo)
 {
-       struct netpoll_info *npinfo = skb->dev->npinfo;
        struct arphdr *arp;
        unsigned char *arp_ptr;
        int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;