/* PRIO section */
#define TCQ_PRIO_BANDS 16
+#define TCQ_MIN_PRIO_BANDS 2
struct tc_prio_qopt
{
unsigned char Scell_log; /* cell size for idle damping */
unsigned char flags;
#define TC_RED_ECN 1
+#define TC_RED_HARDDROP 2
};
struct tc_red_xstats
#define TCA_GRED_MAX (__TCA_GRED_MAX - 1)
-#define TCA_SET_OFF TCA_GRED_PARMS
struct tc_gred_qopt
{
- __u32 limit; /* HARD maximal queue length (bytes)
-*/
- __u32 qth_min; /* Min average length threshold (bytes)
-*/
- __u32 qth_max; /* Max average length threshold (bytes)
-*/
- __u32 DP; /* upto 2^32 DPs */
- __u32 backlog;
- __u32 qave;
- __u32 forced;
- __u32 early;
- __u32 other;
- __u32 pdrop;
-
- unsigned char Wlog; /* log(W) */
- unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
- unsigned char Scell_log; /* cell size for idle damping */
- __u8 prio; /* prio of this VQ */
- __u32 packets;
- __u32 bytesin;
+ __u32 limit; /* HARD maximal queue length (bytes) */
+ __u32 qth_min; /* Min average length threshold (bytes) */
+ __u32 qth_max; /* Max average length threshold (bytes) */
+ __u32 DP; /* upto 2^32 DPs */
+ __u32 backlog;
+ __u32 qave;
+ __u32 forced;
+ __u32 early;
+ __u32 other;
+ __u32 pdrop;
+ __u8 Wlog; /* log(W) */
+ __u8 Plog; /* log(P_max/(qth_max-qth_min)) */
+ __u8 Scell_log; /* cell size for idle damping */
+ __u8 prio; /* prio of this VQ */
+ __u32 packets;
+ __u32 bytesin;
};
+
/* gred setup */
struct tc_gred_sopt
{
- __u32 DPs;
- __u32 def_DP;
- __u8 grio;
- __u8 pad1;
- __u16 pad2;
+ __u32 DPs;
+ __u32 def_DP;
+ __u8 grio;
+ __u8 flags;
+ __u16 pad1;
};
/* HTB section */
TCA_NETEM_CORR,
TCA_NETEM_DELAY_DIST,
TCA_NETEM_REORDER,
+ TCA_NETEM_CORRUPT,
__TCA_NETEM_MAX,
};
__u32 correlation;
};
+struct tc_netem_corrupt
+{
+ __u32 probability;
+ __u32 correlation;
+};
+
#define NETEM_DIST_SCALE 8192
#endif
" [ delay TIME [ JITTER [CORRELATION]]]\n" \
" [ distribution {uniform|normal|pareto|paretonormal} ]\n" \
" [ drop PERCENT [CORRELATION]] \n" \
+" [ corrupt PERCENT [CORRELATION]] \n" \
" [ duplicate PERCENT [CORRELATION]]\n" \
" [ reorder PRECENT [CORRELATION] [ gap DISTANCE ]]\n");
}
struct tc_netem_qopt opt;
struct tc_netem_corr cor;
struct tc_netem_reorder reorder;
- __s16 dist_data[MAXDIST];
+ struct tc_netem_corrupt corrupt;
+ __s16 *dist_data = NULL;
memset(&opt, 0, sizeof(opt));
opt.limit = 1000;
memset(&cor, 0, sizeof(cor));
memset(&reorder, 0, sizeof(reorder));
+ memset(&corrupt, 0, sizeof(corrupt));
while (argc > 0) {
if (matches(*argv, "limit") == 0) {
return -1;
}
}
+ } else if (matches(*argv, "corrupt") == 0) {
+ NEXT_ARG();
+ if (get_percent(&corrupt.probability, *argv)) {
+ explain1("corrupt");
+ return -1;
+ }
+ if (NEXT_IS_NUMBER()) {
+ NEXT_ARG();
+ if (get_percent(&corrupt.correlation, *argv)) {
+ explain1("corrupt");
+ return -1;
+ }
+ }
} else if (matches(*argv, "gap") == 0) {
NEXT_ARG();
if (get_u32(&opt.gap, *argv, 0)) {
}
} else if (matches(*argv, "distribution") == 0) {
NEXT_ARG();
+ dist_data = alloca(MAXDIST);
dist_size = get_distribution(*argv, dist_data);
if (dist_size < 0)
return -1;
return -1;
}
- if (dist_size > 0 && (opt.latency == 0 || opt.jitter == 0)) {
+ if (dist_data && (opt.latency == 0 || opt.jitter == 0)) {
fprintf(stderr, "distribution specified but no latency and jitter values\n");
explain();
return -1;
}
- addattr_l(n, 1024, TCA_OPTIONS, &opt, sizeof(opt));
- addattr_l(n, 1024, TCA_NETEM_CORR, &cor, sizeof(cor));
- addattr_l(n, 1024, TCA_NETEM_REORDER, &reorder, sizeof(reorder));
+ if (addattr_l(n, TCA_BUF_MAX, TCA_OPTIONS, &opt, sizeof(opt)) < 0)
+ return -1;
- if (dist_size > 0) {
- addattr_l(n, 32768, TCA_NETEM_DELAY_DIST,
- dist_data, dist_size*sizeof(dist_data[0]));
+ if (cor.delay_corr || cor.loss_corr || cor.dup_corr) {
+ if (addattr_l(n, TCA_BUF_MAX, TCA_NETEM_CORR, &cor, sizeof(cor)) < 0)
+ return -1;
+ }
+
+ if (reorder.probability) {
+ if (addattr_l(n, TCA_BUF_MAX, TCA_NETEM_REORDER, &reorder, sizeof(reorder)) < 0)
+ return -1;
+ }
+
+ if (corrupt.probability) {
+ if (addattr_l(n, TCA_BUF_MAX, TCA_NETEM_CORRUPT, &corrupt, sizeof(corrupt)) < 0)
+ return -1;
+ }
+
+ if (dist_data) {
+ if (addattr_l(n, 32768, TCA_NETEM_DELAY_DIST,
+ dist_data, dist_size*sizeof(dist_data[0])) < 0)
+ return -1;
}
tail->rta_len = (void *) NLMSG_TAIL(n) - (void *) tail;
return 0;
{
const struct tc_netem_corr *cor = NULL;
const struct tc_netem_reorder *reorder = NULL;
+ const struct tc_netem_corrupt *corrupt = NULL;
struct tc_netem_qopt qopt;
int len = RTA_PAYLOAD(opt) - sizeof(qopt);
SPRINT_BUF(b1);
return -1;
reorder = RTA_DATA(tb[TCA_NETEM_REORDER]);
}
+ if (tb[TCA_NETEM_CORRUPT]) {
+ if (RTA_PAYLOAD(tb[TCA_NETEM_CORRUPT]) < sizeof(*corrupt))
+ return -1;
+ corrupt = RTA_DATA(tb[TCA_NETEM_REORDER]);
+ }
}
fprintf(f, "limit %d", qopt.limit);
sprint_percent(reorder->correlation, b1));
}
+ if (corrupt && corrupt->probability) {
+ fprintf(f, " corrupt %s",
+ sprint_percent(corrupt->probability, b1));
+ if (corrupt->correlation)
+ fprintf(f, " %s",
+ sprint_percent(corrupt->correlation, b1));
+ }
+
if (qopt.gap)
fprintf(f, " gap %lu", (unsigned long)qopt.gap);