1 // SPDX-License-Identifier: GPL-2.0-only
3 * net/sched/sch_netem.c Network emulator
5 * Many of the algorithms and ideas for this came from
6 * NIST Net which is not copyrighted.
8 * Authors: Stephen Hemminger <shemminger@osdl.org>
9 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/errno.h>
18 #include <linux/skbuff.h>
19 #include <linux/vmalloc.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/reciprocal_div.h>
22 #include <linux/rbtree.h>
24 #include <net/netlink.h>
25 #include <net/pkt_sched.h>
26 #include <net/inet_ecn.h>
30 /* Network Emulation Queuing algorithm.
31 ====================================
33 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
34 Network Emulation Tool
35 [2] Luigi Rizzo, DummyNet for FreeBSD
37 ----------------------------------------------------------------
39 This started out as a simple way to delay outgoing packets to
40 test TCP but has grown to include most of the functionality
41 of a full blown network emulator like NISTnet. It can delay
42 packets and add random jitter (and correlation). The random
43 distribution can be loaded from a table as well to provide
44 normal, Pareto, or experimental curves. Packet loss,
45 duplication, and reordering can also be emulated.
47 This qdisc does not do classification that can be handled in
48 layering other disciplines. It does not need to do bandwidth
49 control either since that can be handled by using token
50 bucket or other rate control.
52 Correlated Loss Generator models
54 Added generation of correlated loss according to the
55 "Gilbert-Elliot" model, a 4-state markov model.
58 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
59 [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
60 and intuitive loss model for packet networks and its implementation
61 in the Netem module in the Linux kernel", available in [1]
63 Authors: Stefano Salsano <stefano.salsano at uniroma2.it
64 Fabio Ludovici <fabio.ludovici at yahoo.it>
72 struct netem_sched_data {
73 /* internal t(ime)fifo qdisc uses t_root and sch->limit */
74 struct rb_root t_root;
76 /* a linear queue; reduces rbtree rebalancing when jitter is low */
77 struct sk_buff *t_head;
78 struct sk_buff *t_tail;
80 /* optional qdisc for classful handling (NULL at netem init) */
83 struct qdisc_watchdog watchdog;
99 struct reciprocal_value cell_size_reciprocal;
105 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
107 struct disttable *delay_dist;
116 TX_IN_GAP_PERIOD = 1,
119 LOST_IN_BURST_PERIOD,
127 /* Correlated Loss Generation models */
129 /* state of the Markov chain */
132 /* 4-states and Gilbert-Elliot models */
133 u32 a1; /* p13 for 4-states or p for GE */
134 u32 a2; /* p31 for 4-states or r for GE */
135 u32 a3; /* p32 for 4-states or h for GE */
136 u32 a4; /* p14 for 4-states or 1-k for GE */
137 u32 a5; /* p23 used only in 4-states */
140 struct tc_netem_slot slot_config;
147 struct disttable *slot_dist;
150 /* Time stamp put into socket buffer control block
151 * Only valid when skbs are in our internal t(ime)fifo queue.
153 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
154 * and skb->next & skb->prev are scratch space for a qdisc,
155 * we save skb->tstamp value in skb->cb[] before destroying it.
157 struct netem_skb_cb {
161 static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
163 /* we assume we can use skb next/prev/tstamp as storage for rb_node */
164 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
165 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
168 /* init_crandom - initialize correlated random number generator
169 * Use entropy source for initial seed.
171 static void init_crandom(struct crndstate *state, unsigned long rho)
174 state->last = prandom_u32();
177 /* get_crandom - correlated random number generator
178 * Next number depends on last value.
179 * rho is scaled to avoid floating point.
181 static u32 get_crandom(struct crndstate *state)
184 unsigned long answer;
186 if (!state || state->rho == 0) /* no correlation */
187 return prandom_u32();
189 value = prandom_u32();
190 rho = (u64)state->rho + 1;
191 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
192 state->last = answer;
196 /* loss_4state - 4-state model loss generator
197 * Generates losses according to the 4-state Markov chain adopted in
198 * the GI (General and Intuitive) loss model.
200 static bool loss_4state(struct netem_sched_data *q)
202 struct clgstate *clg = &q->clg;
203 u32 rnd = prandom_u32();
206 * Makes a comparison between rnd and the transition
207 * probabilities outgoing from the current state, then decides the
208 * next state and if the next packet has to be transmitted or lost.
209 * The four states correspond to:
210 * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
211 * LOST_IN_BURST_PERIOD => isolated losses within a gap period
212 * LOST_IN_GAP_PERIOD => lost packets within a burst period
213 * TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period
215 switch (clg->state) {
216 case TX_IN_GAP_PERIOD:
218 clg->state = LOST_IN_BURST_PERIOD;
220 } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
221 clg->state = LOST_IN_GAP_PERIOD;
223 } else if (clg->a1 + clg->a4 < rnd) {
224 clg->state = TX_IN_GAP_PERIOD;
228 case TX_IN_BURST_PERIOD:
230 clg->state = LOST_IN_GAP_PERIOD;
233 clg->state = TX_IN_BURST_PERIOD;
237 case LOST_IN_GAP_PERIOD:
239 clg->state = TX_IN_BURST_PERIOD;
240 else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
241 clg->state = TX_IN_GAP_PERIOD;
242 } else if (clg->a2 + clg->a3 < rnd) {
243 clg->state = LOST_IN_GAP_PERIOD;
247 case LOST_IN_BURST_PERIOD:
248 clg->state = TX_IN_GAP_PERIOD;
255 /* loss_gilb_ell - Gilbert-Elliot model loss generator
256 * Generates losses according to the Gilbert-Elliot loss model or
257 * its special cases (Gilbert or Simple Gilbert)
259 * Makes a comparison between random number and the transition
260 * probabilities outgoing from the current state, then decides the
261 * next state. A second random number is extracted and the comparison
262 * with the loss probability of the current state decides if the next
263 * packet will be transmitted or lost.
265 static bool loss_gilb_ell(struct netem_sched_data *q)
267 struct clgstate *clg = &q->clg;
269 switch (clg->state) {
271 if (prandom_u32() < clg->a1)
272 clg->state = BAD_STATE;
273 if (prandom_u32() < clg->a4)
277 if (prandom_u32() < clg->a2)
278 clg->state = GOOD_STATE;
279 if (prandom_u32() > clg->a3)
286 static bool loss_event(struct netem_sched_data *q)
288 switch (q->loss_model) {
290 /* Random packet drop 0 => none, ~0 => all */
291 return q->loss && q->loss >= get_crandom(&q->loss_cor);
294 /* 4state loss model algorithm (used also for GI model)
295 * Extracts a value from the markov 4 state loss generator,
296 * if it is 1 drops a packet and if needed writes the event in
299 return loss_4state(q);
302 /* Gilbert-Elliot loss model algorithm
303 * Extracts a value from the Gilbert-Elliot loss generator,
304 * if it is 1 drops a packet and if needed writes the event in
307 return loss_gilb_ell(q);
310 return false; /* not reached */
314 /* tabledist - return a pseudo-randomly distributed value with mean mu and
315 * std deviation sigma. Uses table lookup to approximate the desired
316 * distribution, and a uniformly-distributed pseudo-random source.
318 static s64 tabledist(s64 mu, s32 sigma,
319 struct crndstate *state,
320 const struct disttable *dist)
329 rnd = get_crandom(state);
331 /* default uniform distribution */
333 return ((rnd % (2 * sigma)) + mu) - sigma;
335 t = dist->table[rnd % dist->size];
336 x = (sigma % NETEM_DIST_SCALE) * t;
338 x += NETEM_DIST_SCALE/2;
340 x -= NETEM_DIST_SCALE/2;
342 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
345 static u64 packet_time_ns(u64 len, const struct netem_sched_data *q)
347 len += q->packet_overhead;
350 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
352 if (len > cells * q->cell_size) /* extra cell needed for remainder */
354 len = cells * (q->cell_size + q->cell_overhead);
357 return div64_u64(len * NSEC_PER_SEC, q->rate);
360 static void tfifo_reset(struct Qdisc *sch)
362 struct netem_sched_data *q = qdisc_priv(sch);
363 struct rb_node *p = rb_first(&q->t_root);
366 struct sk_buff *skb = rb_to_skb(p);
369 rb_erase(&skb->rbnode, &q->t_root);
370 rtnl_kfree_skbs(skb, skb);
373 rtnl_kfree_skbs(q->t_head, q->t_tail);
378 static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
380 struct netem_sched_data *q = qdisc_priv(sch);
381 u64 tnext = netem_skb_cb(nskb)->time_to_send;
383 if (!q->t_tail || tnext >= netem_skb_cb(q->t_tail)->time_to_send) {
385 q->t_tail->next = nskb;
390 struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
396 skb = rb_to_skb(parent);
397 if (tnext >= netem_skb_cb(skb)->time_to_send)
398 p = &parent->rb_right;
400 p = &parent->rb_left;
402 rb_link_node(&nskb->rbnode, parent, p);
403 rb_insert_color(&nskb->rbnode, &q->t_root);
408 /* netem can't properly corrupt a megapacket (like we get from GSO), so instead
409 * when we statistically choose to corrupt one, we instead segment it, returning
410 * the first packet to be corrupted, and re-enqueue the remaining frames
412 static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
413 struct sk_buff **to_free)
415 struct sk_buff *segs;
416 netdev_features_t features = netif_skb_features(skb);
418 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
420 if (IS_ERR_OR_NULL(segs)) {
421 qdisc_drop(skb, sch, to_free);
429 * Insert one skb into qdisc.
430 * Note: parent depends on return value to account for queue length.
431 * NET_XMIT_DROP: queue length didn't change.
432 * NET_XMIT_SUCCESS: one skb was queued.
434 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
435 struct sk_buff **to_free)
437 struct netem_sched_data *q = qdisc_priv(sch);
438 /* We don't fill cb now as skb_unshare() may invalidate it */
439 struct netem_skb_cb *cb;
440 struct sk_buff *skb2;
441 struct sk_buff *segs = NULL;
442 unsigned int prev_len = qdisc_pkt_len(skb);
444 int rc = NET_XMIT_SUCCESS;
445 int rc_drop = NET_XMIT_DROP;
447 /* Do not fool qdisc_drop_all() */
450 /* Random duplication */
451 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
456 if (q->ecn && INET_ECN_set_ce(skb))
457 qdisc_qstats_drop(sch); /* mark packet */
462 qdisc_qstats_drop(sch);
463 __qdisc_drop(skb, to_free);
464 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
467 /* If a delay is expected, orphan the skb. (orphaning usually takes
468 * place at TX completion time, so _before_ the link transit delay)
470 if (q->latency || q->jitter || q->rate)
471 skb_orphan_partial(skb);
474 * If we need to duplicate packet, then re-insert at top of the
475 * qdisc tree, since parent queuer expects that only one
476 * skb will be queued.
478 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
479 struct Qdisc *rootq = qdisc_root(sch);
480 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
483 rootq->enqueue(skb2, rootq, to_free);
484 q->duplicate = dupsave;
485 rc_drop = NET_XMIT_SUCCESS;
489 * Randomized packet corruption.
490 * Make copy if needed since we are modifying
491 * If packet is going to be hardware checksummed, then
492 * do it now in software before we mangle it.
494 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
495 if (skb_is_gso(skb)) {
496 segs = netem_segment(skb, sch, to_free);
499 qdisc_skb_cb(segs)->pkt_len = segs->len;
507 skb = skb_unshare(skb, GFP_ATOMIC);
508 if (unlikely(!skb)) {
509 qdisc_qstats_drop(sch);
512 if (skb->ip_summed == CHECKSUM_PARTIAL &&
513 skb_checksum_help(skb)) {
514 qdisc_drop(skb, sch, to_free);
518 skb->data[prandom_u32() % skb_headlen(skb)] ^=
519 1<<(prandom_u32() % 8);
522 if (unlikely(sch->q.qlen >= sch->limit)) {
523 qdisc_drop_all(skb, sch, to_free);
527 qdisc_qstats_backlog_inc(sch, skb);
529 cb = netem_skb_cb(skb);
530 if (q->gap == 0 || /* not doing reordering */
531 q->counter < q->gap - 1 || /* inside last reordering gap */
532 q->reorder < get_crandom(&q->reorder_cor)) {
536 delay = tabledist(q->latency, q->jitter,
537 &q->delay_cor, q->delay_dist);
539 now = ktime_get_ns();
542 struct netem_skb_cb *last = NULL;
545 last = netem_skb_cb(sch->q.tail);
546 if (q->t_root.rb_node) {
547 struct sk_buff *t_skb;
548 struct netem_skb_cb *t_last;
550 t_skb = skb_rb_last(&q->t_root);
551 t_last = netem_skb_cb(t_skb);
553 t_last->time_to_send > last->time_to_send)
557 struct netem_skb_cb *t_last =
558 netem_skb_cb(q->t_tail);
561 t_last->time_to_send > last->time_to_send)
567 * Last packet in queue is reference point (now),
568 * calculate this time bonus and subtract
571 delay -= last->time_to_send - now;
572 delay = max_t(s64, 0, delay);
573 now = last->time_to_send;
576 delay += packet_time_ns(qdisc_pkt_len(skb), q);
579 cb->time_to_send = now + delay;
581 tfifo_enqueue(skb, sch);
584 * Do re-ordering by putting one out of N packets at the front
587 cb->time_to_send = ktime_get_ns();
590 __qdisc_enqueue_head(skb, &sch->q);
591 sch->qstats.requeues++;
596 unsigned int len, last_len;
603 skb_mark_not_on_list(segs);
604 qdisc_skb_cb(segs)->pkt_len = segs->len;
605 last_len = segs->len;
606 rc = qdisc_enqueue(segs, sch, to_free);
607 if (rc != NET_XMIT_SUCCESS) {
608 if (net_xmit_drop_count(rc))
609 qdisc_qstats_drop(sch);
616 qdisc_tree_reduce_backlog(sch, -nb, prev_len - len);
618 return NET_XMIT_SUCCESS;
621 /* Delay the next round with a new future slot with a
622 * correct number of bytes and packets.
625 static void get_slot_next(struct netem_sched_data *q, u64 now)
630 next_delay = q->slot_config.min_delay +
632 (q->slot_config.max_delay -
633 q->slot_config.min_delay) >> 32);
635 next_delay = tabledist(q->slot_config.dist_delay,
636 (s32)(q->slot_config.dist_jitter),
639 q->slot.slot_next = now + next_delay;
640 q->slot.packets_left = q->slot_config.max_packets;
641 q->slot.bytes_left = q->slot_config.max_bytes;
644 static struct sk_buff *netem_peek(struct netem_sched_data *q)
646 struct sk_buff *skb = skb_rb_first(&q->t_root);
654 t1 = netem_skb_cb(skb)->time_to_send;
655 t2 = netem_skb_cb(q->t_head)->time_to_send;
661 static void netem_erase_head(struct netem_sched_data *q, struct sk_buff *skb)
663 if (skb == q->t_head) {
664 q->t_head = skb->next;
668 rb_erase(&skb->rbnode, &q->t_root);
672 static struct sk_buff *netem_dequeue(struct Qdisc *sch)
674 struct netem_sched_data *q = qdisc_priv(sch);
678 skb = __qdisc_dequeue_head(&sch->q);
680 qdisc_qstats_backlog_dec(sch, skb);
682 qdisc_bstats_update(sch, skb);
688 u64 now = ktime_get_ns();
690 /* if more time remaining? */
691 time_to_send = netem_skb_cb(skb)->time_to_send;
692 if (q->slot.slot_next && q->slot.slot_next < time_to_send)
693 get_slot_next(q, now);
695 if (time_to_send <= now && q->slot.slot_next <= now) {
696 netem_erase_head(q, skb);
698 qdisc_qstats_backlog_dec(sch, skb);
701 /* skb->dev shares skb->rbnode area,
702 * we need to restore its value.
704 skb->dev = qdisc_dev(sch);
706 if (q->slot.slot_next) {
707 q->slot.packets_left--;
708 q->slot.bytes_left -= qdisc_pkt_len(skb);
709 if (q->slot.packets_left <= 0 ||
710 q->slot.bytes_left <= 0)
711 get_slot_next(q, now);
715 unsigned int pkt_len = qdisc_pkt_len(skb);
716 struct sk_buff *to_free = NULL;
719 err = qdisc_enqueue(skb, q->qdisc, &to_free);
720 kfree_skb_list(to_free);
721 if (err != NET_XMIT_SUCCESS &&
722 net_xmit_drop_count(err)) {
723 qdisc_qstats_drop(sch);
724 qdisc_tree_reduce_backlog(sch, 1,
733 skb = q->qdisc->ops->dequeue(q->qdisc);
738 qdisc_watchdog_schedule_ns(&q->watchdog,
744 skb = q->qdisc->ops->dequeue(q->qdisc);
751 static void netem_reset(struct Qdisc *sch)
753 struct netem_sched_data *q = qdisc_priv(sch);
755 qdisc_reset_queue(sch);
758 qdisc_reset(q->qdisc);
759 qdisc_watchdog_cancel(&q->watchdog);
762 static void dist_free(struct disttable *d)
768 * Distribution data is a variable size payload containing
769 * signed 16 bit values.
772 static int get_dist_table(struct Qdisc *sch, struct disttable **tbl,
773 const struct nlattr *attr)
775 size_t n = nla_len(attr)/sizeof(__s16);
776 const __s16 *data = nla_data(attr);
777 spinlock_t *root_lock;
781 if (n > NETEM_DIST_MAX)
784 d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL);
789 for (i = 0; i < n; i++)
790 d->table[i] = data[i];
792 root_lock = qdisc_root_sleeping_lock(sch);
794 spin_lock_bh(root_lock);
796 spin_unlock_bh(root_lock);
802 static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
804 const struct tc_netem_slot *c = nla_data(attr);
807 if (q->slot_config.max_packets == 0)
808 q->slot_config.max_packets = INT_MAX;
809 if (q->slot_config.max_bytes == 0)
810 q->slot_config.max_bytes = INT_MAX;
811 q->slot.packets_left = q->slot_config.max_packets;
812 q->slot.bytes_left = q->slot_config.max_bytes;
813 if (q->slot_config.min_delay | q->slot_config.max_delay |
814 q->slot_config.dist_jitter)
815 q->slot.slot_next = ktime_get_ns();
817 q->slot.slot_next = 0;
820 static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
822 const struct tc_netem_corr *c = nla_data(attr);
824 init_crandom(&q->delay_cor, c->delay_corr);
825 init_crandom(&q->loss_cor, c->loss_corr);
826 init_crandom(&q->dup_cor, c->dup_corr);
829 static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
831 const struct tc_netem_reorder *r = nla_data(attr);
833 q->reorder = r->probability;
834 init_crandom(&q->reorder_cor, r->correlation);
837 static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
839 const struct tc_netem_corrupt *r = nla_data(attr);
841 q->corrupt = r->probability;
842 init_crandom(&q->corrupt_cor, r->correlation);
845 static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
847 const struct tc_netem_rate *r = nla_data(attr);
850 q->packet_overhead = r->packet_overhead;
851 q->cell_size = r->cell_size;
852 q->cell_overhead = r->cell_overhead;
854 q->cell_size_reciprocal = reciprocal_value(q->cell_size);
856 q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
859 static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
861 const struct nlattr *la;
864 nla_for_each_nested(la, attr, rem) {
865 u16 type = nla_type(la);
868 case NETEM_LOSS_GI: {
869 const struct tc_netem_gimodel *gi = nla_data(la);
871 if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
872 pr_info("netem: incorrect gi model size\n");
876 q->loss_model = CLG_4_STATES;
878 q->clg.state = TX_IN_GAP_PERIOD;
887 case NETEM_LOSS_GE: {
888 const struct tc_netem_gemodel *ge = nla_data(la);
890 if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
891 pr_info("netem: incorrect ge model size\n");
895 q->loss_model = CLG_GILB_ELL;
896 q->clg.state = GOOD_STATE;
905 pr_info("netem: unknown loss type %u\n", type);
913 static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
914 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
915 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
916 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
917 [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) },
918 [TCA_NETEM_LOSS] = { .type = NLA_NESTED },
919 [TCA_NETEM_ECN] = { .type = NLA_U32 },
920 [TCA_NETEM_RATE64] = { .type = NLA_U64 },
921 [TCA_NETEM_LATENCY64] = { .type = NLA_S64 },
922 [TCA_NETEM_JITTER64] = { .type = NLA_S64 },
923 [TCA_NETEM_SLOT] = { .len = sizeof(struct tc_netem_slot) },
926 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
927 const struct nla_policy *policy, int len)
929 int nested_len = nla_len(nla) - NLA_ALIGN(len);
931 if (nested_len < 0) {
932 pr_info("netem: invalid attributes len %d\n", nested_len);
936 if (nested_len >= nla_attr_size(0))
937 return nla_parse_deprecated(tb, maxtype,
938 nla_data(nla) + NLA_ALIGN(len),
939 nested_len, policy, NULL);
941 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
945 /* Parse netlink message to set options */
946 static int netem_change(struct Qdisc *sch, struct nlattr *opt,
947 struct netlink_ext_ack *extack)
949 struct netem_sched_data *q = qdisc_priv(sch);
950 struct nlattr *tb[TCA_NETEM_MAX + 1];
951 struct tc_netem_qopt *qopt;
952 struct clgstate old_clg;
953 int old_loss_model = CLG_RANDOM;
959 qopt = nla_data(opt);
960 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
964 /* backup q->clg and q->loss_model */
966 old_loss_model = q->loss_model;
968 if (tb[TCA_NETEM_LOSS]) {
969 ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
971 q->loss_model = old_loss_model;
975 q->loss_model = CLG_RANDOM;
978 if (tb[TCA_NETEM_DELAY_DIST]) {
979 ret = get_dist_table(sch, &q->delay_dist,
980 tb[TCA_NETEM_DELAY_DIST]);
982 goto get_table_failure;
985 if (tb[TCA_NETEM_SLOT_DIST]) {
986 ret = get_dist_table(sch, &q->slot_dist,
987 tb[TCA_NETEM_SLOT_DIST]);
989 goto get_table_failure;
992 sch->limit = qopt->limit;
994 q->latency = PSCHED_TICKS2NS(qopt->latency);
995 q->jitter = PSCHED_TICKS2NS(qopt->jitter);
996 q->limit = qopt->limit;
999 q->loss = qopt->loss;
1000 q->duplicate = qopt->duplicate;
1002 /* for compatibility with earlier versions.
1003 * if gap is set, need to assume 100% probability
1008 if (tb[TCA_NETEM_CORR])
1009 get_correlation(q, tb[TCA_NETEM_CORR]);
1011 if (tb[TCA_NETEM_REORDER])
1012 get_reorder(q, tb[TCA_NETEM_REORDER]);
1014 if (tb[TCA_NETEM_CORRUPT])
1015 get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
1017 if (tb[TCA_NETEM_RATE])
1018 get_rate(q, tb[TCA_NETEM_RATE]);
1020 if (tb[TCA_NETEM_RATE64])
1021 q->rate = max_t(u64, q->rate,
1022 nla_get_u64(tb[TCA_NETEM_RATE64]));
1024 if (tb[TCA_NETEM_LATENCY64])
1025 q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]);
1027 if (tb[TCA_NETEM_JITTER64])
1028 q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]);
1030 if (tb[TCA_NETEM_ECN])
1031 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
1033 if (tb[TCA_NETEM_SLOT])
1034 get_slot(q, tb[TCA_NETEM_SLOT]);
1039 /* recover clg and loss_model, in case of
1040 * q->clg and q->loss_model were modified
1044 q->loss_model = old_loss_model;
1048 static int netem_init(struct Qdisc *sch, struct nlattr *opt,
1049 struct netlink_ext_ack *extack)
1051 struct netem_sched_data *q = qdisc_priv(sch);
1054 qdisc_watchdog_init(&q->watchdog, sch);
1059 q->loss_model = CLG_RANDOM;
1060 ret = netem_change(sch, opt, extack);
1062 pr_info("netem: change failed\n");
1066 static void netem_destroy(struct Qdisc *sch)
1068 struct netem_sched_data *q = qdisc_priv(sch);
1070 qdisc_watchdog_cancel(&q->watchdog);
1072 qdisc_put(q->qdisc);
1073 dist_free(q->delay_dist);
1074 dist_free(q->slot_dist);
1077 static int dump_loss_model(const struct netem_sched_data *q,
1078 struct sk_buff *skb)
1080 struct nlattr *nest;
1082 nest = nla_nest_start_noflag(skb, TCA_NETEM_LOSS);
1084 goto nla_put_failure;
1086 switch (q->loss_model) {
1088 /* legacy loss model */
1089 nla_nest_cancel(skb, nest);
1090 return 0; /* no data */
1092 case CLG_4_STATES: {
1093 struct tc_netem_gimodel gi = {
1101 if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
1102 goto nla_put_failure;
1105 case CLG_GILB_ELL: {
1106 struct tc_netem_gemodel ge = {
1113 if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
1114 goto nla_put_failure;
1119 nla_nest_end(skb, nest);
1123 nla_nest_cancel(skb, nest);
1127 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
1129 const struct netem_sched_data *q = qdisc_priv(sch);
1130 struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
1131 struct tc_netem_qopt qopt;
1132 struct tc_netem_corr cor;
1133 struct tc_netem_reorder reorder;
1134 struct tc_netem_corrupt corrupt;
1135 struct tc_netem_rate rate;
1136 struct tc_netem_slot slot;
1138 qopt.latency = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->latency),
1140 qopt.jitter = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->jitter),
1142 qopt.limit = q->limit;
1143 qopt.loss = q->loss;
1145 qopt.duplicate = q->duplicate;
1146 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1147 goto nla_put_failure;
1149 if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency))
1150 goto nla_put_failure;
1152 if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter))
1153 goto nla_put_failure;
1155 cor.delay_corr = q->delay_cor.rho;
1156 cor.loss_corr = q->loss_cor.rho;
1157 cor.dup_corr = q->dup_cor.rho;
1158 if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
1159 goto nla_put_failure;
1161 reorder.probability = q->reorder;
1162 reorder.correlation = q->reorder_cor.rho;
1163 if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
1164 goto nla_put_failure;
1166 corrupt.probability = q->corrupt;
1167 corrupt.correlation = q->corrupt_cor.rho;
1168 if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
1169 goto nla_put_failure;
1171 if (q->rate >= (1ULL << 32)) {
1172 if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
1174 goto nla_put_failure;
1177 rate.rate = q->rate;
1179 rate.packet_overhead = q->packet_overhead;
1180 rate.cell_size = q->cell_size;
1181 rate.cell_overhead = q->cell_overhead;
1182 if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
1183 goto nla_put_failure;
1185 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1186 goto nla_put_failure;
1188 if (dump_loss_model(q, skb) != 0)
1189 goto nla_put_failure;
1191 if (q->slot_config.min_delay | q->slot_config.max_delay |
1192 q->slot_config.dist_jitter) {
1193 slot = q->slot_config;
1194 if (slot.max_packets == INT_MAX)
1195 slot.max_packets = 0;
1196 if (slot.max_bytes == INT_MAX)
1198 if (nla_put(skb, TCA_NETEM_SLOT, sizeof(slot), &slot))
1199 goto nla_put_failure;
1202 return nla_nest_end(skb, nla);
1205 nlmsg_trim(skb, nla);
1209 static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
1210 struct sk_buff *skb, struct tcmsg *tcm)
1212 struct netem_sched_data *q = qdisc_priv(sch);
1214 if (cl != 1 || !q->qdisc) /* only one class */
1217 tcm->tcm_handle |= TC_H_MIN(1);
1218 tcm->tcm_info = q->qdisc->handle;
1223 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1224 struct Qdisc **old, struct netlink_ext_ack *extack)
1226 struct netem_sched_data *q = qdisc_priv(sch);
1228 *old = qdisc_replace(sch, new, &q->qdisc);
1232 static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1234 struct netem_sched_data *q = qdisc_priv(sch);
1238 static unsigned long netem_find(struct Qdisc *sch, u32 classid)
1243 static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1245 if (!walker->stop) {
1246 if (walker->count >= walker->skip)
1247 if (walker->fn(sch, 1, walker) < 0) {
1255 static const struct Qdisc_class_ops netem_class_ops = {
1256 .graft = netem_graft,
1260 .dump = netem_dump_class,
1263 static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
1265 .cl_ops = &netem_class_ops,
1266 .priv_size = sizeof(struct netem_sched_data),
1267 .enqueue = netem_enqueue,
1268 .dequeue = netem_dequeue,
1269 .peek = qdisc_peek_dequeued,
1271 .reset = netem_reset,
1272 .destroy = netem_destroy,
1273 .change = netem_change,
1275 .owner = THIS_MODULE,
1279 static int __init netem_module_init(void)
1281 pr_info("netem: version " VERSION "\n");
1282 return register_qdisc(&netem_qdisc_ops);
1284 static void __exit netem_module_exit(void)
1286 unregister_qdisc(&netem_qdisc_ops);
1288 module_init(netem_module_init)
1289 module_exit(netem_module_exit)
1290 MODULE_LICENSE("GPL");