]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/commitdiff
net: sched: Enhanced Tegra Network Device Queue packet scheduling algorithm
authorMichael Hsu <mhsu@nvidia.com>
Tue, 12 May 2015 00:54:19 +0000 (17:54 -0700)
committermobile promotions <svcmobile_promotions@nvidia.com>
Thu, 5 Nov 2015 01:14:00 +0000 (17:14 -0800)
Tegra devices reserve highest priority queue for real-time network
devices, such as audio stream to a network (wifi) gaming controller.

Also control TCP RX by adding a divisor to the TCP window (causing the
TCP sendor to limit the traffic which it sends, i.e., thereby limiting
TCP RX).

Enhancements:
- added tcp window maximum value parameter
  (can be used instead of the tcp window divisor for limiting TCP RX)
- updated the TX bandwidth calculation algorithm

Bug 1689909

Change-Id: I19856c1233f4db9e47c7452cb8da4a607fab42c5
Signed-off-by: Michael Hsu <mhsu@nvidia.com>
Reviewed-on: http://git-master/r/819323
Reviewed-by: Nagaraj Annaiah <nannaiah@nvidia.com>
Reviewed-by: Ashutosh Jha <ajha@nvidia.com>
net/ipv4/tcp_output.c
net/sched/sch_mq.c
net/sched/sch_tegra.c

index 6a40f8e13497dcbcb5f2e59eeaf347df782e7485..d4ee7b9f6df8641afff4a82a11f40e990bcf7add 100644 (file)
@@ -45,7 +45,9 @@
 /* allow Tegra qdisc to restrict tcp rx datarate */
 #ifdef CONFIG_NET_SCH_TEGRA
 uint tcp_window_divisor = 1;
+uint tcp_window_max = 0;
 module_param(tcp_window_divisor, uint, 0644);
+module_param(tcp_window_max, uint, 0644);
 #endif
 
 /* People can turn this off for buggy TCP's found in printers etc. */
@@ -291,6 +293,14 @@ static u16 tcp_select_window(struct sock *sk)
        else
                new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
 
+#ifdef CONFIG_NET_SCH_TEGRA
+       if ((tcp_window_max > 0) && (new_win > tcp_window_max)) {
+               pr_debug("%s: tcp_window_max %u: new_win %d -> %d\n",
+                       __func__, tcp_window_max, new_win, tcp_window_max);
+               new_win = min(new_win, tcp_window_max);
+       }
+#endif
+
        /* RFC1323 scaling applied */
        new_win >>= tp->rx_opt.rcv_wscale;
 
index 35f78e0355d964efc680c2b5aa12e289782ca392..4ede9bd3ac6c29558f5b67429f9b56f888905fdc 100644 (file)
@@ -56,9 +56,11 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
                return -ENOMEM;
 
        for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
-               dev_queue = netdev_get_tx_queue(dev, ntx);
 #ifdef CONFIG_NET_SCH_TEGRA
                extern struct Qdisc_ops sch_tegra_pfifo_fast_ops;
+#endif
+               dev_queue = netdev_get_tx_queue(dev, ntx);
+#ifdef CONFIG_NET_SCH_TEGRA
                qdisc = qdisc_create_dflt(dev_queue, &sch_tegra_pfifo_fast_ops,
                                          TC_H_MAKE(TC_H_MAJ(sch->handle),
                                                    TC_H_MIN(ntx + 1)));
index d3c87876a20c198c6c896102ef734964d97827cc..e6cee52674822c3240fd6485f5f18bb82fc01eb6 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/rtnetlink.h>
 #include <linux/moduleparam.h>
 #include <linux/atomic.h>
+#include <linux/spinlock.h>
 #include <net/pkt_sched.h>
 #include <net/sch_generic.h>
 
@@ -45,7 +46,7 @@
 static int sch_tegra_debug;
 module_param(sch_tegra_debug, int, 0644);
 
-static int sch_tegra_enable = 1;
+static int sch_tegra_enable = 0;
 module_param(sch_tegra_enable, int, 0644);
 
 static unsigned long sch_tegra_pfifo_fast_dequeue_bits;
@@ -91,33 +92,48 @@ struct sch_tegra_pfifo_fast_priv {
        struct sk_buff_head q_highest_prio;
 };
 
+static DEFINE_SPINLOCK(sch_tegra_datarate_lock);
+
 static struct sk_buff *
 sch_tegra_pfifo_fast_dequeue_datarate(struct sk_buff *skb)
 {
+       unsigned long bits = skb ? skb->len * 8 : 0;
+       unsigned long flags;
        unsigned long delta;
 
-       if (skb)
-               sch_tegra_pfifo_fast_dequeue_bits += skb->len * 8;
+       spin_lock_irqsave(&sch_tegra_datarate_lock, flags);
+       if (ULONG_MAX - bits < sch_tegra_pfifo_fast_dequeue_bits) {
+               sch_tegra_pfifo_fast_dequeue_bits
+                       = bits;
+               sch_tegra_pfifo_fast_dequeue_jiffies0
+                       = jiffies;
+               sch_tegra_pfifo_fast_dequeue_jiffies1
+                       = sch_tegra_pfifo_fast_dequeue_jiffies0;
+               goto unlock;
+       }
+       sch_tegra_pfifo_fast_dequeue_bits += bits;
        if (!sch_tegra_pfifo_fast_dequeue_jiffies0)
                sch_tegra_pfifo_fast_dequeue_jiffies0 = jiffies;
        sch_tegra_pfifo_fast_dequeue_jiffies1 = jiffies;
        delta = sch_tegra_pfifo_fast_dequeue_jiffies1
                - sch_tegra_pfifo_fast_dequeue_jiffies0;
-       if (delta < 10)
-               return skb;
+       if (delta < msecs_to_jiffies(100))
+               goto unlock;
        if ((delta > msecs_to_jiffies(1000)) ||
                (sch_tegra_pfifo_fast_dequeue_bits / (delta + 1)
                        > ULONG_MAX / HZ)) {
                sch_tegra_pfifo_fast_dequeue_bits
-                       /= (delta + 1);
+                       = bits;
                sch_tegra_pfifo_fast_dequeue_jiffies0
                        = jiffies;
                sch_tegra_pfifo_fast_dequeue_jiffies1
                        = sch_tegra_pfifo_fast_dequeue_jiffies0;
-               delta = 0;
+               goto unlock;
        }
        sch_tegra_pfifo_fast_dequeue_bits_per_sec
                = sch_tegra_pfifo_fast_dequeue_bits / (delta + 1) * HZ;
+unlock:
+       spin_unlock_irqrestore(&sch_tegra_datarate_lock, flags);
 
        return skb;
 }