]> rtime.felk.cvut.cz Git - zynq/linux.git/blob - include/net/gen_stats.h
Apply preempt_rt patch-4.9-rt1.patch.xz
[zynq/linux.git] / include / net / gen_stats.h
1 #ifndef __NET_GEN_STATS_H
2 #define __NET_GEN_STATS_H
3
4 #include <linux/gen_stats.h>
5 #include <linux/socket.h>
6 #include <linux/rtnetlink.h>
7 #include <linux/pkt_sched.h>
8 #include <net/net_seq_lock.h>
9
10 struct gnet_stats_basic_cpu {
11         struct gnet_stats_basic_packed bstats;
12         struct u64_stats_sync syncp;
13 };
14
15 struct gnet_dump {
16         spinlock_t *      lock;
17         struct sk_buff *  skb;
18         struct nlattr *   tail;
19
20         /* Backward compatibility */
21         int               compat_tc_stats;
22         int               compat_xstats;
23         int               padattr;
24         void *            xstats;
25         int               xstats_len;
26         struct tc_stats   tc_stats;
27 };
28
29 int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
30                           struct gnet_dump *d, int padattr);
31
32 int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
33                                  int tc_stats_type, int xstats_type,
34                                  spinlock_t *lock, struct gnet_dump *d,
35                                  int padattr);
36
37 int gnet_stats_copy_basic(net_seqlock_t *running,
38                           struct gnet_dump *d,
39                           struct gnet_stats_basic_cpu __percpu *cpu,
40                           struct gnet_stats_basic_packed *b);
41 void __gnet_stats_copy_basic(net_seqlock_t *running,
42                              struct gnet_stats_basic_packed *bstats,
43                              struct gnet_stats_basic_cpu __percpu *cpu,
44                              struct gnet_stats_basic_packed *b);
45 int gnet_stats_copy_rate_est(struct gnet_dump *d,
46                              const struct gnet_stats_basic_packed *b,
47                              struct gnet_stats_rate_est64 *r);
48 int gnet_stats_copy_queue(struct gnet_dump *d,
49                           struct gnet_stats_queue __percpu *cpu_q,
50                           struct gnet_stats_queue *q, __u32 qlen);
51 int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
52
53 int gnet_stats_finish_copy(struct gnet_dump *d);
54
55 int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
56                       struct gnet_stats_basic_cpu __percpu *cpu_bstats,
57                       struct gnet_stats_rate_est64 *rate_est,
58                       spinlock_t *stats_lock,
59                       net_seqlock_t *running, struct nlattr *opt);
60 void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
61                         struct gnet_stats_rate_est64 *rate_est);
62 int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
63                           struct gnet_stats_basic_cpu __percpu *cpu_bstats,
64                           struct gnet_stats_rate_est64 *rate_est,
65                           spinlock_t *stats_lock,
66                           net_seqlock_t *running, struct nlattr *opt);
67 bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
68                           const struct gnet_stats_rate_est64 *rate_est);
69 #endif