4 #include <linux/rhashtable.h>
7 struct rhashtable rhashtable ____cacheline_aligned_in_smp;
9 /* Keep atomic mem on separate cachelines in structs that include it */
10 atomic_t mem ____cacheline_aligned_in_smp;
20 * fragment queue flags
22 * @INET_FRAG_FIRST_IN: first fragment has arrived
23 * @INET_FRAG_LAST_IN: final fragment has arrived
24 * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction
27 INET_FRAG_FIRST_IN = BIT(0),
28 INET_FRAG_LAST_IN = BIT(1),
29 INET_FRAG_COMPLETE = BIT(2),
32 struct frag_v4_compare_key {
41 struct frag_v6_compare_key {
42 struct in6_addr saddr;
43 struct in6_addr daddr;
50 * struct inet_frag_queue - fragment queue
53 * @key: keys identifying this frag.
54 * @timer: queue expiration timer
55 * @lock: spinlock protecting this frag
56 * @refcnt: reference count of the queue
57 * @fragments: received fragments head
58 * @fragments_tail: received fragments tail
59 * @stamp: timestamp of the last received fragment
60 * @len: total length of the original datagram
61 * @meat: length of received fragments so far
62 * @flags: fragment queue flags
63 * @max_size: maximum received fragment size
64 * @net: namespace that this frag belongs to
65 * @rcu: rcu head for freeing deferall
67 struct inet_frag_queue {
68 struct rhash_head node;
70 struct frag_v4_compare_key v4;
71 struct frag_v6_compare_key v6;
73 struct timer_list timer;
76 struct sk_buff *fragments;
77 struct sk_buff *fragments_tail;
83 struct netns_frags *net;
90 void (*constructor)(struct inet_frag_queue *q,
92 void (*destructor)(struct inet_frag_queue *);
93 void (*frag_expire)(unsigned long data);
94 struct kmem_cache *frags_cachep;
95 const char *frags_cache_name;
96 struct rhashtable_params rhash_params;
99 int inet_frags_init(struct inet_frags *);
100 void inet_frags_fini(struct inet_frags *);
102 static inline int inet_frags_init_net(struct netns_frags *nf)
104 atomic_set(&nf->mem, 0);
105 return rhashtable_init(&nf->rhashtable, &nf->f->rhash_params);
107 void inet_frags_exit_net(struct netns_frags *nf);
109 void inet_frag_kill(struct inet_frag_queue *q);
110 void inet_frag_destroy(struct inet_frag_queue *q);
111 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key);
112 void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
115 static inline void inet_frag_put(struct inet_frag_queue *q)
117 if (atomic_dec_and_test(&q->refcnt))
118 inet_frag_destroy(q);
121 static inline bool inet_frag_evicting(struct inet_frag_queue *q)
126 /* Memory Tracking Functions. */
128 static inline int frag_mem_limit(struct netns_frags *nf)
130 return atomic_read(&nf->mem);
133 static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
135 atomic_sub(i, &nf->mem);
138 static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
140 atomic_add(i, &nf->mem);
143 static inline int sum_frag_mem_limit(struct netns_frags *nf)
145 return atomic_read(&nf->mem);
148 /* RFC 3168 support :
149 * We want to check ECN values of all fragments, do detect invalid combinations.
150 * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value.
152 #define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */
153 #define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */
154 #define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */
155 #define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */
157 extern const u8 ip_frag_ecn_table[16];