6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * Kazunori MIYAZAWA @USAGI
11 * Split up af-specific portion
12 * Derek Atkins <derek@ihtfp.com> Add the post_input processor
16 #include <linux/err.h>
17 #include <linux/slab.h>
18 #include <linux/kmod.h>
19 #include <linux/list.h>
20 #include <linux/spinlock.h>
21 #include <linux/workqueue.h>
22 #include <linux/notifier.h>
23 #include <linux/netdevice.h>
24 #include <linux/netfilter.h>
25 #include <linux/module.h>
26 #include <linux/cache.h>
27 #include <linux/audit.h>
32 #ifdef CONFIG_XFRM_STATISTICS
36 #include "xfrm_hash.h"
38 #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
39 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
40 #define XFRM_MAX_QUEUE_LEN 100
43 struct dst_entry *dst_orig;
47 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
48 static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO]
51 static struct kmem_cache *xfrm_dst_cache __read_mostly;
53 static void xfrm_init_pmtu(struct dst_entry *dst);
54 static int stale_bundle(struct dst_entry *dst);
55 static int xfrm_bundle_ok(struct xfrm_dst *xdst);
56 static void xfrm_policy_queue_process(unsigned long arg);
58 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
59 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
63 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
65 const struct flowi4 *fl4 = &fl->u.ip4;
67 return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
68 addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
69 !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
70 !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
71 (fl4->flowi4_proto == sel->proto || !sel->proto) &&
72 (fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
76 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
78 const struct flowi6 *fl6 = &fl->u.ip6;
80 return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
81 addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
82 !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
83 !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
84 (fl6->flowi6_proto == sel->proto || !sel->proto) &&
85 (fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
88 bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
89 unsigned short family)
93 return __xfrm4_selector_match(sel, fl);
95 return __xfrm6_selector_match(sel, fl);
100 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
102 struct xfrm_policy_afinfo *afinfo;
104 if (unlikely(family >= NPROTO))
107 afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
108 if (unlikely(!afinfo))
113 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
118 static inline struct dst_entry *__xfrm_dst_lookup(struct net *net,
120 const xfrm_address_t *saddr,
121 const xfrm_address_t *daddr,
124 struct xfrm_policy_afinfo *afinfo;
125 struct dst_entry *dst;
127 afinfo = xfrm_policy_get_afinfo(family);
128 if (unlikely(afinfo == NULL))
129 return ERR_PTR(-EAFNOSUPPORT);
131 dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr);
133 xfrm_policy_put_afinfo(afinfo);
138 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
140 xfrm_address_t *prev_saddr,
141 xfrm_address_t *prev_daddr,
144 struct net *net = xs_net(x);
145 xfrm_address_t *saddr = &x->props.saddr;
146 xfrm_address_t *daddr = &x->id.daddr;
147 struct dst_entry *dst;
149 if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
153 if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
158 dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family);
161 if (prev_saddr != saddr)
162 memcpy(prev_saddr, saddr, sizeof(*prev_saddr));
163 if (prev_daddr != daddr)
164 memcpy(prev_daddr, daddr, sizeof(*prev_daddr));
170 static inline unsigned long make_jiffies(long secs)
172 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
173 return MAX_SCHEDULE_TIMEOUT-1;
178 static void xfrm_policy_timer(unsigned long data)
180 struct xfrm_policy *xp = (struct xfrm_policy *)data;
181 unsigned long now = get_seconds();
182 long next = LONG_MAX;
186 read_lock(&xp->lock);
188 if (unlikely(xp->walk.dead))
191 dir = xfrm_policy_id2dir(xp->index);
193 if (xp->lft.hard_add_expires_seconds) {
194 long tmo = xp->lft.hard_add_expires_seconds +
195 xp->curlft.add_time - now;
201 if (xp->lft.hard_use_expires_seconds) {
202 long tmo = xp->lft.hard_use_expires_seconds +
203 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
209 if (xp->lft.soft_add_expires_seconds) {
210 long tmo = xp->lft.soft_add_expires_seconds +
211 xp->curlft.add_time - now;
214 tmo = XFRM_KM_TIMEOUT;
219 if (xp->lft.soft_use_expires_seconds) {
220 long tmo = xp->lft.soft_use_expires_seconds +
221 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
224 tmo = XFRM_KM_TIMEOUT;
231 km_policy_expired(xp, dir, 0, 0);
232 if (next != LONG_MAX &&
233 !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
237 read_unlock(&xp->lock);
242 read_unlock(&xp->lock);
243 if (!xfrm_policy_delete(xp, dir))
244 km_policy_expired(xp, dir, 1, 0);
248 static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo)
250 struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
252 if (unlikely(pol->walk.dead))
260 static int xfrm_policy_flo_check(struct flow_cache_object *flo)
262 struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
264 return !pol->walk.dead;
267 static void xfrm_policy_flo_delete(struct flow_cache_object *flo)
269 xfrm_pol_put(container_of(flo, struct xfrm_policy, flo));
272 static const struct flow_cache_ops xfrm_policy_fc_ops = {
273 .get = xfrm_policy_flo_get,
274 .check = xfrm_policy_flo_check,
275 .delete = xfrm_policy_flo_delete,
278 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
282 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
284 struct xfrm_policy *policy;
286 policy = kzalloc(sizeof(struct xfrm_policy), gfp);
289 write_pnet(&policy->xp_net, net);
290 INIT_LIST_HEAD(&policy->walk.all);
291 INIT_HLIST_NODE(&policy->bydst);
292 INIT_HLIST_NODE(&policy->byidx);
293 rwlock_init(&policy->lock);
294 atomic_set(&policy->refcnt, 1);
295 skb_queue_head_init(&policy->polq.hold_queue);
296 setup_timer(&policy->timer, xfrm_policy_timer,
297 (unsigned long)policy);
298 setup_timer(&policy->polq.hold_timer, xfrm_policy_queue_process,
299 (unsigned long)policy);
300 policy->flo.ops = &xfrm_policy_fc_ops;
304 EXPORT_SYMBOL(xfrm_policy_alloc);
306 static void xfrm_policy_destroy_rcu(struct rcu_head *head)
308 struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
310 security_xfrm_policy_free(policy->security);
314 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
316 void xfrm_policy_destroy(struct xfrm_policy *policy)
318 BUG_ON(!policy->walk.dead);
320 if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
323 call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
325 EXPORT_SYMBOL(xfrm_policy_destroy);
327 /* Rule must be locked. Release descentant resources, announce
328 * entry dead. The rule must be unlinked from lists to the moment.
331 static void xfrm_policy_kill(struct xfrm_policy *policy)
333 policy->walk.dead = 1;
335 atomic_inc(&policy->genid);
337 if (del_timer(&policy->polq.hold_timer))
338 xfrm_pol_put(policy);
339 skb_queue_purge(&policy->polq.hold_queue);
341 if (del_timer(&policy->timer))
342 xfrm_pol_put(policy);
344 xfrm_pol_put(policy);
347 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
349 static inline unsigned int idx_hash(struct net *net, u32 index)
351 return __idx_hash(index, net->xfrm.policy_idx_hmask);
354 /* calculate policy hash thresholds */
355 static void __get_hash_thresh(struct net *net,
356 unsigned short family, int dir,
357 u8 *dbits, u8 *sbits)
361 *dbits = net->xfrm.policy_bydst[dir].dbits4;
362 *sbits = net->xfrm.policy_bydst[dir].sbits4;
366 *dbits = net->xfrm.policy_bydst[dir].dbits6;
367 *sbits = net->xfrm.policy_bydst[dir].sbits6;
376 static struct hlist_head *policy_hash_bysel(struct net *net,
377 const struct xfrm_selector *sel,
378 unsigned short family, int dir)
380 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
385 __get_hash_thresh(net, family, dir, &dbits, &sbits);
386 hash = __sel_hash(sel, family, hmask, dbits, sbits);
388 return (hash == hmask + 1 ?
389 &net->xfrm.policy_inexact[dir] :
390 net->xfrm.policy_bydst[dir].table + hash);
393 static struct hlist_head *policy_hash_direct(struct net *net,
394 const xfrm_address_t *daddr,
395 const xfrm_address_t *saddr,
396 unsigned short family, int dir)
398 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
403 __get_hash_thresh(net, family, dir, &dbits, &sbits);
404 hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
406 return net->xfrm.policy_bydst[dir].table + hash;
409 static void xfrm_dst_hash_transfer(struct net *net,
410 struct hlist_head *list,
411 struct hlist_head *ndsttable,
412 unsigned int nhashmask,
415 struct hlist_node *tmp, *entry0 = NULL;
416 struct xfrm_policy *pol;
422 hlist_for_each_entry_safe(pol, tmp, list, bydst) {
425 __get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
426 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
427 pol->family, nhashmask, dbits, sbits);
429 hlist_del(&pol->bydst);
430 hlist_add_head(&pol->bydst, ndsttable+h);
435 hlist_del(&pol->bydst);
436 hlist_add_behind(&pol->bydst, entry0);
438 entry0 = &pol->bydst;
440 if (!hlist_empty(list)) {
446 static void xfrm_idx_hash_transfer(struct hlist_head *list,
447 struct hlist_head *nidxtable,
448 unsigned int nhashmask)
450 struct hlist_node *tmp;
451 struct xfrm_policy *pol;
453 hlist_for_each_entry_safe(pol, tmp, list, byidx) {
456 h = __idx_hash(pol->index, nhashmask);
457 hlist_add_head(&pol->byidx, nidxtable+h);
461 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
463 return ((old_hmask + 1) << 1) - 1;
466 static void xfrm_bydst_resize(struct net *net, int dir)
468 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
469 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
470 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
471 struct hlist_head *odst = net->xfrm.policy_bydst[dir].table;
472 struct hlist_head *ndst = xfrm_hash_alloc(nsize);
478 write_lock_bh(&net->xfrm.xfrm_policy_lock);
480 for (i = hmask; i >= 0; i--)
481 xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
483 net->xfrm.policy_bydst[dir].table = ndst;
484 net->xfrm.policy_bydst[dir].hmask = nhashmask;
486 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
488 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
491 static void xfrm_byidx_resize(struct net *net, int total)
493 unsigned int hmask = net->xfrm.policy_idx_hmask;
494 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
495 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
496 struct hlist_head *oidx = net->xfrm.policy_byidx;
497 struct hlist_head *nidx = xfrm_hash_alloc(nsize);
503 write_lock_bh(&net->xfrm.xfrm_policy_lock);
505 for (i = hmask; i >= 0; i--)
506 xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
508 net->xfrm.policy_byidx = nidx;
509 net->xfrm.policy_idx_hmask = nhashmask;
511 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
513 xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
516 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
518 unsigned int cnt = net->xfrm.policy_count[dir];
519 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
524 if ((hmask + 1) < xfrm_policy_hashmax &&
531 static inline int xfrm_byidx_should_resize(struct net *net, int total)
533 unsigned int hmask = net->xfrm.policy_idx_hmask;
535 if ((hmask + 1) < xfrm_policy_hashmax &&
542 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
544 read_lock_bh(&net->xfrm.xfrm_policy_lock);
545 si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
546 si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
547 si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
548 si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
549 si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
550 si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
551 si->spdhcnt = net->xfrm.policy_idx_hmask;
552 si->spdhmcnt = xfrm_policy_hashmax;
553 read_unlock_bh(&net->xfrm.xfrm_policy_lock);
555 EXPORT_SYMBOL(xfrm_spd_getinfo);
557 static DEFINE_MUTEX(hash_resize_mutex);
558 static void xfrm_hash_resize(struct work_struct *work)
560 struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
563 mutex_lock(&hash_resize_mutex);
566 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
567 if (xfrm_bydst_should_resize(net, dir, &total))
568 xfrm_bydst_resize(net, dir);
570 if (xfrm_byidx_should_resize(net, total))
571 xfrm_byidx_resize(net, total);
573 mutex_unlock(&hash_resize_mutex);
576 static void xfrm_hash_rebuild(struct work_struct *work)
578 struct net *net = container_of(work, struct net,
579 xfrm.policy_hthresh.work);
581 struct xfrm_policy *pol;
582 struct xfrm_policy *policy;
583 struct hlist_head *chain;
584 struct hlist_head *odst;
585 struct hlist_node *newpos;
589 u8 lbits4, rbits4, lbits6, rbits6;
591 mutex_lock(&hash_resize_mutex);
593 /* read selector prefixlen thresholds */
595 seq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
597 lbits4 = net->xfrm.policy_hthresh.lbits4;
598 rbits4 = net->xfrm.policy_hthresh.rbits4;
599 lbits6 = net->xfrm.policy_hthresh.lbits6;
600 rbits6 = net->xfrm.policy_hthresh.rbits6;
601 } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
603 write_lock_bh(&net->xfrm.xfrm_policy_lock);
605 /* reset the bydst and inexact table in all directions */
606 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
607 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
608 hmask = net->xfrm.policy_bydst[dir].hmask;
609 odst = net->xfrm.policy_bydst[dir].table;
610 for (i = hmask; i >= 0; i--)
611 INIT_HLIST_HEAD(odst + i);
612 if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
613 /* dir out => dst = remote, src = local */
614 net->xfrm.policy_bydst[dir].dbits4 = rbits4;
615 net->xfrm.policy_bydst[dir].sbits4 = lbits4;
616 net->xfrm.policy_bydst[dir].dbits6 = rbits6;
617 net->xfrm.policy_bydst[dir].sbits6 = lbits6;
619 /* dir in/fwd => dst = local, src = remote */
620 net->xfrm.policy_bydst[dir].dbits4 = lbits4;
621 net->xfrm.policy_bydst[dir].sbits4 = rbits4;
622 net->xfrm.policy_bydst[dir].dbits6 = lbits6;
623 net->xfrm.policy_bydst[dir].sbits6 = rbits6;
627 /* re-insert all policies by order of creation */
628 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
629 if (policy->walk.dead ||
630 xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) {
631 /* skip socket policies */
635 chain = policy_hash_bysel(net, &policy->selector,
637 xfrm_policy_id2dir(policy->index));
638 hlist_for_each_entry(pol, chain, bydst) {
639 if (policy->priority >= pol->priority)
640 newpos = &pol->bydst;
645 hlist_add_behind(&policy->bydst, newpos);
647 hlist_add_head(&policy->bydst, chain);
650 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
652 mutex_unlock(&hash_resize_mutex);
655 void xfrm_policy_hash_rebuild(struct net *net)
657 schedule_work(&net->xfrm.policy_hthresh.work);
659 EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
661 /* Generate new index... KAME seems to generate them ordered by cost
662 * of an absolute inpredictability of ordering of rules. This will not pass. */
663 static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
665 static u32 idx_generator;
668 struct hlist_head *list;
669 struct xfrm_policy *p;
674 idx = (idx_generator | dir);
683 list = net->xfrm.policy_byidx + idx_hash(net, idx);
685 hlist_for_each_entry(p, list, byidx) {
686 if (p->index == idx) {
696 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
698 u32 *p1 = (u32 *) s1;
699 u32 *p2 = (u32 *) s2;
700 int len = sizeof(struct xfrm_selector) / sizeof(u32);
703 for (i = 0; i < len; i++) {
711 static void xfrm_policy_requeue(struct xfrm_policy *old,
712 struct xfrm_policy *new)
714 struct xfrm_policy_queue *pq = &old->polq;
715 struct sk_buff_head list;
717 if (skb_queue_empty(&pq->hold_queue))
720 __skb_queue_head_init(&list);
722 spin_lock_bh(&pq->hold_queue.lock);
723 skb_queue_splice_init(&pq->hold_queue, &list);
724 if (del_timer(&pq->hold_timer))
726 spin_unlock_bh(&pq->hold_queue.lock);
730 spin_lock_bh(&pq->hold_queue.lock);
731 skb_queue_splice(&list, &pq->hold_queue);
732 pq->timeout = XFRM_QUEUE_TMO_MIN;
733 if (!mod_timer(&pq->hold_timer, jiffies))
735 spin_unlock_bh(&pq->hold_queue.lock);
738 static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
739 struct xfrm_policy *pol)
741 u32 mark = policy->mark.v & policy->mark.m;
743 if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m)
746 if ((mark & pol->mark.m) == pol->mark.v &&
747 policy->priority == pol->priority)
753 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
755 struct net *net = xp_net(policy);
756 struct xfrm_policy *pol;
757 struct xfrm_policy *delpol;
758 struct hlist_head *chain;
759 struct hlist_node *newpos;
761 write_lock_bh(&net->xfrm.xfrm_policy_lock);
762 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
765 hlist_for_each_entry(pol, chain, bydst) {
766 if (pol->type == policy->type &&
767 !selector_cmp(&pol->selector, &policy->selector) &&
768 xfrm_policy_mark_match(policy, pol) &&
769 xfrm_sec_ctx_match(pol->security, policy->security) &&
772 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
776 if (policy->priority > pol->priority)
778 } else if (policy->priority >= pol->priority) {
779 newpos = &pol->bydst;
786 hlist_add_behind(&policy->bydst, newpos);
788 hlist_add_head(&policy->bydst, chain);
789 __xfrm_policy_link(policy, dir);
790 atomic_inc(&net->xfrm.flow_cache_genid);
792 /* After previous checking, family can either be AF_INET or AF_INET6 */
793 if (policy->family == AF_INET)
794 rt_genid_bump_ipv4(net);
796 rt_genid_bump_ipv6(net);
799 xfrm_policy_requeue(delpol, policy);
800 __xfrm_policy_unlink(delpol, dir);
802 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
803 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
804 policy->curlft.add_time = get_seconds();
805 policy->curlft.use_time = 0;
806 if (!mod_timer(&policy->timer, jiffies + HZ))
807 xfrm_pol_hold(policy);
808 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
811 xfrm_policy_kill(delpol);
812 else if (xfrm_bydst_should_resize(net, dir, NULL))
813 schedule_work(&net->xfrm.policy_hash_work);
817 EXPORT_SYMBOL(xfrm_policy_insert);
819 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
820 int dir, struct xfrm_selector *sel,
821 struct xfrm_sec_ctx *ctx, int delete,
824 struct xfrm_policy *pol, *ret;
825 struct hlist_head *chain;
828 write_lock_bh(&net->xfrm.xfrm_policy_lock);
829 chain = policy_hash_bysel(net, sel, sel->family, dir);
831 hlist_for_each_entry(pol, chain, bydst) {
832 if (pol->type == type &&
833 (mark & pol->mark.m) == pol->mark.v &&
834 !selector_cmp(sel, &pol->selector) &&
835 xfrm_sec_ctx_match(ctx, pol->security)) {
838 *err = security_xfrm_policy_delete(
841 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
844 __xfrm_policy_unlink(pol, dir);
850 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
853 xfrm_policy_kill(ret);
856 EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
858 struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
859 int dir, u32 id, int delete, int *err)
861 struct xfrm_policy *pol, *ret;
862 struct hlist_head *chain;
865 if (xfrm_policy_id2dir(id) != dir)
869 write_lock_bh(&net->xfrm.xfrm_policy_lock);
870 chain = net->xfrm.policy_byidx + idx_hash(net, id);
872 hlist_for_each_entry(pol, chain, byidx) {
873 if (pol->type == type && pol->index == id &&
874 (mark & pol->mark.m) == pol->mark.v) {
877 *err = security_xfrm_policy_delete(
880 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
883 __xfrm_policy_unlink(pol, dir);
889 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
892 xfrm_policy_kill(ret);
895 EXPORT_SYMBOL(xfrm_policy_byid);
897 #ifdef CONFIG_SECURITY_NETWORK_XFRM
899 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
903 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
904 struct xfrm_policy *pol;
907 hlist_for_each_entry(pol,
908 &net->xfrm.policy_inexact[dir], bydst) {
909 if (pol->type != type)
911 err = security_xfrm_policy_delete(pol->security);
913 xfrm_audit_policy_delete(pol, 0, task_valid);
917 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
918 hlist_for_each_entry(pol,
919 net->xfrm.policy_bydst[dir].table + i,
921 if (pol->type != type)
923 err = security_xfrm_policy_delete(
926 xfrm_audit_policy_delete(pol, 0,
937 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
943 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
945 int dir, err = 0, cnt = 0;
947 write_lock_bh(&net->xfrm.xfrm_policy_lock);
949 err = xfrm_policy_flush_secctx_check(net, type, task_valid);
953 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
954 struct xfrm_policy *pol;
958 hlist_for_each_entry(pol,
959 &net->xfrm.policy_inexact[dir], bydst) {
960 if (pol->type != type)
962 __xfrm_policy_unlink(pol, dir);
963 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
966 xfrm_audit_policy_delete(pol, 1, task_valid);
968 xfrm_policy_kill(pol);
970 write_lock_bh(&net->xfrm.xfrm_policy_lock);
974 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
976 hlist_for_each_entry(pol,
977 net->xfrm.policy_bydst[dir].table + i,
979 if (pol->type != type)
981 __xfrm_policy_unlink(pol, dir);
982 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
985 xfrm_audit_policy_delete(pol, 1, task_valid);
986 xfrm_policy_kill(pol);
988 write_lock_bh(&net->xfrm.xfrm_policy_lock);
997 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
1000 EXPORT_SYMBOL(xfrm_policy_flush);
1002 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1003 int (*func)(struct xfrm_policy *, int, int, void*),
1006 struct xfrm_policy *pol;
1007 struct xfrm_policy_walk_entry *x;
1010 if (walk->type >= XFRM_POLICY_TYPE_MAX &&
1011 walk->type != XFRM_POLICY_TYPE_ANY)
1014 if (list_empty(&walk->walk.all) && walk->seq != 0)
1017 write_lock_bh(&net->xfrm.xfrm_policy_lock);
1018 if (list_empty(&walk->walk.all))
1019 x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
1021 x = list_first_entry(&walk->walk.all,
1022 struct xfrm_policy_walk_entry, all);
1024 list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
1027 pol = container_of(x, struct xfrm_policy, walk);
1028 if (walk->type != XFRM_POLICY_TYPE_ANY &&
1029 walk->type != pol->type)
1031 error = func(pol, xfrm_policy_id2dir(pol->index),
1034 list_move_tail(&walk->walk.all, &x->all);
1039 if (walk->seq == 0) {
1043 list_del_init(&walk->walk.all);
1045 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
1048 EXPORT_SYMBOL(xfrm_policy_walk);
1050 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
1052 INIT_LIST_HEAD(&walk->walk.all);
1053 walk->walk.dead = 1;
1057 EXPORT_SYMBOL(xfrm_policy_walk_init);
1059 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
1061 if (list_empty(&walk->walk.all))
1064 write_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
1065 list_del(&walk->walk.all);
1066 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
1068 EXPORT_SYMBOL(xfrm_policy_walk_done);
1071 * Find policy to apply to this flow.
1073 * Returns 0 if policy found, else an -errno.
1075 static int xfrm_policy_match(const struct xfrm_policy *pol,
1076 const struct flowi *fl,
1077 u8 type, u16 family, int dir)
1079 const struct xfrm_selector *sel = &pol->selector;
1083 if (pol->family != family ||
1084 (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
1088 match = xfrm_selector_match(sel, fl, family);
1090 ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
1096 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
1097 const struct flowi *fl,
1101 struct xfrm_policy *pol, *ret;
1102 const xfrm_address_t *daddr, *saddr;
1103 struct hlist_head *chain;
1106 daddr = xfrm_flowi_daddr(fl, family);
1107 saddr = xfrm_flowi_saddr(fl, family);
1108 if (unlikely(!daddr || !saddr))
1111 read_lock_bh(&net->xfrm.xfrm_policy_lock);
1112 chain = policy_hash_direct(net, daddr, saddr, family, dir);
1114 hlist_for_each_entry(pol, chain, bydst) {
1115 err = xfrm_policy_match(pol, fl, type, family, dir);
1125 priority = ret->priority;
1129 chain = &net->xfrm.policy_inexact[dir];
1130 hlist_for_each_entry(pol, chain, bydst) {
1131 if ((pol->priority >= priority) && ret)
1134 err = xfrm_policy_match(pol, fl, type, family, dir);
1150 read_unlock_bh(&net->xfrm.xfrm_policy_lock);
1155 static struct xfrm_policy *
1156 __xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir)
1158 #ifdef CONFIG_XFRM_SUB_POLICY
1159 struct xfrm_policy *pol;
1161 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir);
1165 return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
1168 static int flow_to_policy_dir(int dir)
1170 if (XFRM_POLICY_IN == FLOW_DIR_IN &&
1171 XFRM_POLICY_OUT == FLOW_DIR_OUT &&
1172 XFRM_POLICY_FWD == FLOW_DIR_FWD)
1178 return XFRM_POLICY_IN;
1180 return XFRM_POLICY_OUT;
1182 return XFRM_POLICY_FWD;
1186 static struct flow_cache_object *
1187 xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family,
1188 u8 dir, struct flow_cache_object *old_obj, void *ctx)
1190 struct xfrm_policy *pol;
1193 xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo));
1195 pol = __xfrm_policy_lookup(net, fl, family, flow_to_policy_dir(dir));
1196 if (IS_ERR_OR_NULL(pol))
1197 return ERR_CAST(pol);
1199 /* Resolver returns two references:
1200 * one for cache and one for caller of flow_cache_lookup() */
1206 static inline int policy_to_flow_dir(int dir)
1208 if (XFRM_POLICY_IN == FLOW_DIR_IN &&
1209 XFRM_POLICY_OUT == FLOW_DIR_OUT &&
1210 XFRM_POLICY_FWD == FLOW_DIR_FWD)
1214 case XFRM_POLICY_IN:
1216 case XFRM_POLICY_OUT:
1217 return FLOW_DIR_OUT;
1218 case XFRM_POLICY_FWD:
1219 return FLOW_DIR_FWD;
1223 static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
1224 const struct flowi *fl, u16 family)
1226 struct xfrm_policy *pol;
1227 struct net *net = sock_net(sk);
1230 read_lock_bh(&net->xfrm.xfrm_policy_lock);
1231 pol = rcu_dereference(sk->sk_policy[dir]);
1236 if (pol->family != family) {
1241 match = xfrm_selector_match(&pol->selector, fl, family);
1243 if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
1247 err = security_xfrm_policy_lookup(pol->security,
1249 policy_to_flow_dir(dir));
1252 else if (err == -ESRCH)
1260 read_unlock_bh(&net->xfrm.xfrm_policy_lock);
1265 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
1267 struct net *net = xp_net(pol);
1269 list_add(&pol->walk.all, &net->xfrm.policy_all);
1270 net->xfrm.policy_count[dir]++;
1274 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
1277 struct net *net = xp_net(pol);
1279 if (list_empty(&pol->walk.all))
1282 /* Socket policies are not hashed. */
1283 if (!hlist_unhashed(&pol->bydst)) {
1284 hlist_del(&pol->bydst);
1285 hlist_del(&pol->byidx);
1288 list_del_init(&pol->walk.all);
1289 net->xfrm.policy_count[dir]--;
1294 static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir)
1296 __xfrm_policy_link(pol, XFRM_POLICY_MAX + dir);
1299 static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir)
1301 __xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir);
1304 int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
1306 struct net *net = xp_net(pol);
1308 write_lock_bh(&net->xfrm.xfrm_policy_lock);
1309 pol = __xfrm_policy_unlink(pol, dir);
1310 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
1312 xfrm_policy_kill(pol);
1317 EXPORT_SYMBOL(xfrm_policy_delete);
1319 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
1321 struct net *net = sock_net(sk);
1322 struct xfrm_policy *old_pol;
1324 #ifdef CONFIG_XFRM_SUB_POLICY
1325 if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
1329 write_lock_bh(&net->xfrm.xfrm_policy_lock);
1330 old_pol = rcu_dereference_protected(sk->sk_policy[dir],
1331 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
1333 pol->curlft.add_time = get_seconds();
1334 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
1335 xfrm_sk_policy_link(pol, dir);
1337 rcu_assign_pointer(sk->sk_policy[dir], pol);
1340 xfrm_policy_requeue(old_pol, pol);
1342 /* Unlinking succeeds always. This is the only function
1343 * allowed to delete or replace socket policy.
1345 xfrm_sk_policy_unlink(old_pol, dir);
1347 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
1350 xfrm_policy_kill(old_pol);
1355 static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
1357 struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
1358 struct net *net = xp_net(old);
1361 newp->selector = old->selector;
1362 if (security_xfrm_policy_clone(old->security,
1365 return NULL; /* ENOMEM */
1367 newp->lft = old->lft;
1368 newp->curlft = old->curlft;
1369 newp->mark = old->mark;
1370 newp->action = old->action;
1371 newp->flags = old->flags;
1372 newp->xfrm_nr = old->xfrm_nr;
1373 newp->index = old->index;
1374 newp->type = old->type;
1375 newp->family = old->family;
1376 memcpy(newp->xfrm_vec, old->xfrm_vec,
1377 newp->xfrm_nr*sizeof(struct xfrm_tmpl));
1378 write_lock_bh(&net->xfrm.xfrm_policy_lock);
1379 xfrm_sk_policy_link(newp, dir);
1380 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
1386 int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
1388 const struct xfrm_policy *p;
1389 struct xfrm_policy *np;
1393 for (i = 0; i < 2; i++) {
1394 p = rcu_dereference(osk->sk_policy[i]);
1396 np = clone_policy(p, i);
1397 if (unlikely(!np)) {
1401 rcu_assign_pointer(sk->sk_policy[i], np);
1409 xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
1410 xfrm_address_t *remote, unsigned short family)
1413 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1415 if (unlikely(afinfo == NULL))
1417 err = afinfo->get_saddr(net, oif, local, remote);
1418 xfrm_policy_put_afinfo(afinfo);
1422 /* Resolve list of templates for the flow, given policy. */
1425 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
1426 struct xfrm_state **xfrm, unsigned short family)
1428 struct net *net = xp_net(policy);
1431 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
1432 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
1435 for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
1436 struct xfrm_state *x;
1437 xfrm_address_t *remote = daddr;
1438 xfrm_address_t *local = saddr;
1439 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
1441 if (tmpl->mode == XFRM_MODE_TUNNEL ||
1442 tmpl->mode == XFRM_MODE_BEET) {
1443 remote = &tmpl->id.daddr;
1444 local = &tmpl->saddr;
1445 if (xfrm_addr_any(local, tmpl->encap_family)) {
1446 error = xfrm_get_saddr(net, fl->flowi_oif,
1448 tmpl->encap_family);
1455 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
1457 if (x && x->km.state == XFRM_STATE_VALID) {
1464 error = (x->km.state == XFRM_STATE_ERROR ?
1467 } else if (error == -ESRCH) {
1471 if (!tmpl->optional)
1477 for (nx--; nx >= 0; nx--)
1478 xfrm_state_put(xfrm[nx]);
1483 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
1484 struct xfrm_state **xfrm, unsigned short family)
1486 struct xfrm_state *tp[XFRM_MAX_DEPTH];
1487 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
1493 for (i = 0; i < npols; i++) {
1494 if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
1499 ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
1507 /* found states are sorted for outbound processing */
1509 xfrm_state_sort(xfrm, tpp, cnx, family);
1514 for (cnx--; cnx >= 0; cnx--)
1515 xfrm_state_put(tpp[cnx]);
1520 /* Check that the bundle accepts the flow and its components are
1524 static inline int xfrm_get_tos(const struct flowi *fl, int family)
1526 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1532 tos = afinfo->get_tos(fl);
1534 xfrm_policy_put_afinfo(afinfo);
1539 static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *flo)
1541 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1542 struct dst_entry *dst = &xdst->u.dst;
1544 if (xdst->route == NULL) {
1545 /* Dummy bundle - if it has xfrms we were not
1546 * able to build bundle as template resolution failed.
1547 * It means we need to try again resolving. */
1548 if (xdst->num_xfrms > 0)
1550 } else if (dst->flags & DST_XFRM_QUEUE) {
1554 if (stale_bundle(dst))
1562 static int xfrm_bundle_flo_check(struct flow_cache_object *flo)
1564 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1565 struct dst_entry *dst = &xdst->u.dst;
1569 if (stale_bundle(dst))
1575 static void xfrm_bundle_flo_delete(struct flow_cache_object *flo)
1577 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1578 struct dst_entry *dst = &xdst->u.dst;
1583 static const struct flow_cache_ops xfrm_bundle_fc_ops = {
1584 .get = xfrm_bundle_flo_get,
1585 .check = xfrm_bundle_flo_check,
1586 .delete = xfrm_bundle_flo_delete,
1589 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1591 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1592 struct dst_ops *dst_ops;
1593 struct xfrm_dst *xdst;
1596 return ERR_PTR(-EINVAL);
1600 dst_ops = &net->xfrm.xfrm4_dst_ops;
1602 #if IS_ENABLED(CONFIG_IPV6)
1604 dst_ops = &net->xfrm.xfrm6_dst_ops;
1610 xdst = dst_alloc(dst_ops, NULL, 0, DST_OBSOLETE_NONE, 0);
1613 struct dst_entry *dst = &xdst->u.dst;
1615 memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
1616 xdst->flo.ops = &xfrm_bundle_fc_ops;
1618 xdst = ERR_PTR(-ENOBUFS);
1620 xfrm_policy_put_afinfo(afinfo);
1625 static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
1628 struct xfrm_policy_afinfo *afinfo =
1629 xfrm_policy_get_afinfo(dst->ops->family);
1635 err = afinfo->init_path(path, dst, nfheader_len);
1637 xfrm_policy_put_afinfo(afinfo);
1642 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
1643 const struct flowi *fl)
1645 struct xfrm_policy_afinfo *afinfo =
1646 xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
1652 err = afinfo->fill_dst(xdst, dev, fl);
1654 xfrm_policy_put_afinfo(afinfo);
1660 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
1661 * all the metrics... Shortly, bundle a bundle.
1664 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1665 struct xfrm_state **xfrm, int nx,
1666 const struct flowi *fl,
1667 struct dst_entry *dst)
1669 struct net *net = xp_net(policy);
1670 unsigned long now = jiffies;
1671 struct net_device *dev;
1672 struct xfrm_mode *inner_mode;
1673 struct dst_entry *dst_prev = NULL;
1674 struct dst_entry *dst0 = NULL;
1678 int nfheader_len = 0;
1679 int trailer_len = 0;
1681 int family = policy->selector.family;
1682 xfrm_address_t saddr, daddr;
1684 xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
1686 tos = xfrm_get_tos(fl, family);
1693 for (; i < nx; i++) {
1694 struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
1695 struct dst_entry *dst1 = &xdst->u.dst;
1697 err = PTR_ERR(xdst);
1703 if (xfrm[i]->sel.family == AF_UNSPEC) {
1704 inner_mode = xfrm_ip2inner_mode(xfrm[i],
1705 xfrm_af2proto(family));
1707 err = -EAFNOSUPPORT;
1712 inner_mode = xfrm[i]->inner_mode;
1717 dst_prev->child = dst_clone(dst1);
1718 dst1->flags |= DST_NOHASH;
1722 dst_copy_metrics(dst1, dst);
1724 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
1725 family = xfrm[i]->props.family;
1726 dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
1727 &saddr, &daddr, family);
1734 dst1->xfrm = xfrm[i];
1735 xdst->xfrm_genid = xfrm[i]->genid;
1737 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
1738 dst1->flags |= DST_HOST;
1739 dst1->lastuse = now;
1741 dst1->input = dst_discard;
1742 dst1->output = inner_mode->afinfo->output;
1744 dst1->next = dst_prev;
1747 header_len += xfrm[i]->props.header_len;
1748 if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
1749 nfheader_len += xfrm[i]->props.header_len;
1750 trailer_len += xfrm[i]->props.trailer_len;
1753 dst_prev->child = dst;
1761 xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
1762 xfrm_init_pmtu(dst_prev);
1764 for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
1765 struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
1767 err = xfrm_fill_dst(xdst, dev, fl);
1771 dst_prev->header_len = header_len;
1772 dst_prev->trailer_len = trailer_len;
1773 header_len -= xdst->u.dst.xfrm->props.header_len;
1774 trailer_len -= xdst->u.dst.xfrm->props.trailer_len;
1782 xfrm_state_put(xfrm[i]);
1786 dst0 = ERR_PTR(err);
1790 static int xfrm_expand_policies(const struct flowi *fl, u16 family,
1791 struct xfrm_policy **pols,
1792 int *num_pols, int *num_xfrms)
1796 if (*num_pols == 0 || !pols[0]) {
1801 if (IS_ERR(pols[0]))
1802 return PTR_ERR(pols[0]);
1804 *num_xfrms = pols[0]->xfrm_nr;
1806 #ifdef CONFIG_XFRM_SUB_POLICY
1807 if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
1808 pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
1809 pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
1810 XFRM_POLICY_TYPE_MAIN,
1814 if (IS_ERR(pols[1])) {
1815 xfrm_pols_put(pols, *num_pols);
1816 return PTR_ERR(pols[1]);
1819 (*num_xfrms) += pols[1]->xfrm_nr;
1823 for (i = 0; i < *num_pols; i++) {
1824 if (pols[i]->action != XFRM_POLICY_ALLOW) {
1834 static struct xfrm_dst *
1835 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
1836 const struct flowi *fl, u16 family,
1837 struct dst_entry *dst_orig)
1839 struct net *net = xp_net(pols[0]);
1840 struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
1841 struct dst_entry *dst;
1842 struct xfrm_dst *xdst;
1845 /* Try to instantiate a bundle */
1846 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
1852 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1853 return ERR_PTR(err);
1856 dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig);
1858 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
1859 return ERR_CAST(dst);
1862 xdst = (struct xfrm_dst *)dst;
1863 xdst->num_xfrms = err;
1864 xdst->num_pols = num_pols;
1865 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
1866 xdst->policy_genid = atomic_read(&pols[0]->genid);
1871 static void xfrm_policy_queue_process(unsigned long arg)
1873 struct sk_buff *skb;
1875 struct dst_entry *dst;
1876 struct xfrm_policy *pol = (struct xfrm_policy *)arg;
1877 struct net *net = xp_net(pol);
1878 struct xfrm_policy_queue *pq = &pol->polq;
1880 struct sk_buff_head list;
1882 spin_lock(&pq->hold_queue.lock);
1883 skb = skb_peek(&pq->hold_queue);
1885 spin_unlock(&pq->hold_queue.lock);
1890 xfrm_decode_session(skb, &fl, dst->ops->family);
1891 spin_unlock(&pq->hold_queue.lock);
1893 dst_hold(dst->path);
1894 dst = xfrm_lookup(net, dst->path, &fl, sk, 0);
1898 if (dst->flags & DST_XFRM_QUEUE) {
1901 if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
1904 pq->timeout = pq->timeout << 1;
1905 if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
1912 __skb_queue_head_init(&list);
1914 spin_lock(&pq->hold_queue.lock);
1916 skb_queue_splice_init(&pq->hold_queue, &list);
1917 spin_unlock(&pq->hold_queue.lock);
1919 while (!skb_queue_empty(&list)) {
1920 skb = __skb_dequeue(&list);
1922 xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
1923 dst_hold(skb_dst(skb)->path);
1924 dst = xfrm_lookup(net, skb_dst(skb)->path, &fl, skb->sk, 0);
1932 skb_dst_set(skb, dst);
1934 dst_output(net, skb->sk, skb);
1943 skb_queue_purge(&pq->hold_queue);
1947 static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
1949 unsigned long sched_next;
1950 struct dst_entry *dst = skb_dst(skb);
1951 struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
1952 struct xfrm_policy *pol = xdst->pols[0];
1953 struct xfrm_policy_queue *pq = &pol->polq;
1955 if (unlikely(skb_fclone_busy(sk, skb))) {
1960 if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
1967 spin_lock_bh(&pq->hold_queue.lock);
1970 pq->timeout = XFRM_QUEUE_TMO_MIN;
1972 sched_next = jiffies + pq->timeout;
1974 if (del_timer(&pq->hold_timer)) {
1975 if (time_before(pq->hold_timer.expires, sched_next))
1976 sched_next = pq->hold_timer.expires;
1980 __skb_queue_tail(&pq->hold_queue, skb);
1981 if (!mod_timer(&pq->hold_timer, sched_next))
1984 spin_unlock_bh(&pq->hold_queue.lock);
1989 static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
1990 struct xfrm_flo *xflo,
1991 const struct flowi *fl,
1996 struct net_device *dev;
1997 struct dst_entry *dst;
1998 struct dst_entry *dst1;
1999 struct xfrm_dst *xdst;
2001 xdst = xfrm_alloc_dst(net, family);
2005 if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
2006 net->xfrm.sysctl_larval_drop ||
2010 dst = xflo->dst_orig;
2011 dst1 = &xdst->u.dst;
2015 dst_copy_metrics(dst1, dst);
2017 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
2018 dst1->flags |= DST_HOST | DST_XFRM_QUEUE;
2019 dst1->lastuse = jiffies;
2021 dst1->input = dst_discard;
2022 dst1->output = xdst_queue_output;
2028 xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
2035 err = xfrm_fill_dst(xdst, dev, fl);
2044 xdst = ERR_PTR(err);
2048 static struct flow_cache_object *
2049 xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
2050 struct flow_cache_object *oldflo, void *ctx)
2052 struct xfrm_flo *xflo = (struct xfrm_flo *)ctx;
2053 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2054 struct xfrm_dst *xdst, *new_xdst;
2055 int num_pols = 0, num_xfrms = 0, i, err, pol_dead;
2057 /* Check if the policies from old bundle are usable */
2060 xdst = container_of(oldflo, struct xfrm_dst, flo);
2061 num_pols = xdst->num_pols;
2062 num_xfrms = xdst->num_xfrms;
2064 for (i = 0; i < num_pols; i++) {
2065 pols[i] = xdst->pols[i];
2066 pol_dead |= pols[i]->walk.dead;
2069 dst_free(&xdst->u.dst);
2077 /* Resolve policies to use if we couldn't get them from
2078 * previous cache entry */
2081 pols[0] = __xfrm_policy_lookup(net, fl, family,
2082 flow_to_policy_dir(dir));
2083 err = xfrm_expand_policies(fl, family, pols,
2084 &num_pols, &num_xfrms);
2090 goto make_dummy_bundle;
2093 new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
2095 if (IS_ERR(new_xdst)) {
2096 err = PTR_ERR(new_xdst);
2100 goto make_dummy_bundle;
2101 dst_hold(&xdst->u.dst);
2103 } else if (new_xdst == NULL) {
2106 goto make_dummy_bundle;
2107 xdst->num_xfrms = 0;
2108 dst_hold(&xdst->u.dst);
2112 /* Kill the previous bundle */
2114 /* The policies were stolen for newly generated bundle */
2116 dst_free(&xdst->u.dst);
2119 /* Flow cache does not have reference, it dst_free()'s,
2120 * but we do need to return one reference for original caller */
2121 dst_hold(&new_xdst->u.dst);
2122 return &new_xdst->flo;
2125 /* We found policies, but there's no bundles to instantiate:
2126 * either because the policy blocks, has no transformations or
2127 * we could not build template (no xfrm_states).*/
2128 xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
2130 xfrm_pols_put(pols, num_pols);
2131 return ERR_CAST(xdst);
2133 xdst->num_pols = num_pols;
2134 xdst->num_xfrms = num_xfrms;
2135 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2137 dst_hold(&xdst->u.dst);
2141 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2144 dst_free(&xdst->u.dst);
2146 xfrm_pols_put(pols, num_pols);
2147 return ERR_PTR(err);
2150 static struct dst_entry *make_blackhole(struct net *net, u16 family,
2151 struct dst_entry *dst_orig)
2153 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2154 struct dst_entry *ret;
2157 dst_release(dst_orig);
2158 return ERR_PTR(-EINVAL);
2160 ret = afinfo->blackhole_route(net, dst_orig);
2162 xfrm_policy_put_afinfo(afinfo);
2167 /* Main function: finds/creates a bundle for given flow.
2169 * At the moment we eat a raw IP route. Mostly to speed up lookups
2170 * on interfaces with disabled IPsec.
2172 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
2173 const struct flowi *fl,
2174 const struct sock *sk, int flags)
2176 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2177 struct flow_cache_object *flo;
2178 struct xfrm_dst *xdst;
2179 struct dst_entry *dst, *route;
2180 u16 family = dst_orig->ops->family;
2181 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
2182 int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
2188 sk = sk_const_to_full_sk(sk);
2189 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
2191 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family);
2192 err = xfrm_expand_policies(fl, family, pols,
2193 &num_pols, &num_xfrms);
2198 if (num_xfrms <= 0) {
2199 drop_pols = num_pols;
2203 xdst = xfrm_resolve_and_create_bundle(
2207 xfrm_pols_put(pols, num_pols);
2208 err = PTR_ERR(xdst);
2210 } else if (xdst == NULL) {
2212 drop_pols = num_pols;
2216 dst_hold(&xdst->u.dst);
2217 xdst->u.dst.flags |= DST_NOCACHE;
2218 route = xdst->route;
2223 struct xfrm_flo xflo;
2225 xflo.dst_orig = dst_orig;
2228 /* To accelerate a bit... */
2229 if ((dst_orig->flags & DST_NOXFRM) ||
2230 !net->xfrm.policy_count[XFRM_POLICY_OUT])
2233 flo = flow_cache_lookup(net, fl, family, dir,
2234 xfrm_bundle_lookup, &xflo);
2241 xdst = container_of(flo, struct xfrm_dst, flo);
2243 num_pols = xdst->num_pols;
2244 num_xfrms = xdst->num_xfrms;
2245 memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
2246 route = xdst->route;
2250 if (route == NULL && num_xfrms > 0) {
2251 /* The only case when xfrm_bundle_lookup() returns a
2252 * bundle with null route, is when the template could
2253 * not be resolved. It means policies are there, but
2254 * bundle could not be created, since we don't yet
2255 * have the xfrm_state's. We need to wait for KM to
2256 * negotiate new SA's or bail out with error.*/
2257 if (net->xfrm.sysctl_larval_drop) {
2258 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
2265 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
2273 if ((flags & XFRM_LOOKUP_ICMP) &&
2274 !(pols[0]->flags & XFRM_POLICY_ICMP)) {
2279 for (i = 0; i < num_pols; i++)
2280 pols[i]->curlft.use_time = get_seconds();
2282 if (num_xfrms < 0) {
2283 /* Prohibit the flow */
2284 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
2287 } else if (num_xfrms > 0) {
2288 /* Flow transformed */
2289 dst_release(dst_orig);
2291 /* Flow passes untransformed */
2296 xfrm_pols_put(pols, drop_pols);
2297 if (dst && dst->xfrm &&
2298 dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
2299 dst->flags |= DST_XFRM_TUNNEL;
2303 if (!(flags & XFRM_LOOKUP_ICMP)) {
2311 if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
2312 dst_release(dst_orig);
2313 xfrm_pols_put(pols, drop_pols);
2314 return ERR_PTR(err);
2316 EXPORT_SYMBOL(xfrm_lookup);
2318 /* Callers of xfrm_lookup_route() must ensure a call to dst_output().
2319 * Otherwise we may send out blackholed packets.
2321 struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
2322 const struct flowi *fl,
2323 const struct sock *sk, int flags)
2325 struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
2326 flags | XFRM_LOOKUP_QUEUE |
2327 XFRM_LOOKUP_KEEP_DST_REF);
2329 if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
2330 return make_blackhole(net, dst_orig->ops->family, dst_orig);
2333 dst_release(dst_orig);
2337 EXPORT_SYMBOL(xfrm_lookup_route);
2340 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
2342 struct xfrm_state *x;
2344 if (!skb->sp || idx < 0 || idx >= skb->sp->len)
2346 x = skb->sp->xvec[idx];
2347 if (!x->type->reject)
2349 return x->type->reject(x, skb, fl);
2352 /* When skb is transformed back to its "native" form, we have to
2353 * check policy restrictions. At the moment we make this in maximally
2354 * stupid way. Shame on me. :-) Of course, connected sockets must
2355 * have policy cached at them.
2359 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
2360 unsigned short family)
2362 if (xfrm_state_kern(x))
2363 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
2364 return x->id.proto == tmpl->id.proto &&
2365 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
2366 (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
2367 x->props.mode == tmpl->mode &&
2368 (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
2369 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
2370 !(x->props.mode != XFRM_MODE_TRANSPORT &&
2371 xfrm_state_addr_cmp(tmpl, x, family));
2375 * 0 or more than 0 is returned when validation is succeeded (either bypass
2376 * because of optional transport mode, or next index of the mathced secpath
2377 * state with the template.
2378 * -1 is returned when no matching template is found.
2379 * Otherwise "-2 - errored_index" is returned.
2382 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
2383 unsigned short family)
2387 if (tmpl->optional) {
2388 if (tmpl->mode == XFRM_MODE_TRANSPORT)
2392 for (; idx < sp->len; idx++) {
2393 if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
2395 if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
2404 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
2405 unsigned int family, int reverse)
2407 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2410 if (unlikely(afinfo == NULL))
2411 return -EAFNOSUPPORT;
2413 afinfo->decode_session(skb, fl, reverse);
2414 err = security_xfrm_decode_session(skb, &fl->flowi_secid);
2415 xfrm_policy_put_afinfo(afinfo);
2418 EXPORT_SYMBOL(__xfrm_decode_session);
2420 static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
2422 for (; k < sp->len; k++) {
2423 if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
2432 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
2433 unsigned short family)
2435 struct net *net = dev_net(skb->dev);
2436 struct xfrm_policy *pol;
2437 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2446 reverse = dir & ~XFRM_POLICY_MASK;
2447 dir &= XFRM_POLICY_MASK;
2448 fl_dir = policy_to_flow_dir(dir);
2450 if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
2451 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
2455 nf_nat_decode_session(skb, &fl, family);
2457 /* First, check used SA against their selectors. */
2461 for (i = skb->sp->len-1; i >= 0; i--) {
2462 struct xfrm_state *x = skb->sp->xvec[i];
2463 if (!xfrm_selector_match(&x->sel, &fl, family)) {
2464 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
2471 sk = sk_to_full_sk(sk);
2472 if (sk && sk->sk_policy[dir]) {
2473 pol = xfrm_sk_policy_lookup(sk, dir, &fl, family);
2475 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2481 struct flow_cache_object *flo;
2483 flo = flow_cache_lookup(net, &fl, family, fl_dir,
2484 xfrm_policy_lookup, NULL);
2485 if (IS_ERR_OR_NULL(flo))
2486 pol = ERR_CAST(flo);
2488 pol = container_of(flo, struct xfrm_policy, flo);
2492 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2497 if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
2498 xfrm_secpath_reject(xerr_idx, skb, &fl);
2499 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
2505 pol->curlft.use_time = get_seconds();
2509 #ifdef CONFIG_XFRM_SUB_POLICY
2510 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2511 pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
2515 if (IS_ERR(pols[1])) {
2516 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2519 pols[1]->curlft.use_time = get_seconds();
2525 if (pol->action == XFRM_POLICY_ALLOW) {
2526 struct sec_path *sp;
2527 static struct sec_path dummy;
2528 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
2529 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
2530 struct xfrm_tmpl **tpp = tp;
2534 if ((sp = skb->sp) == NULL)
2537 for (pi = 0; pi < npols; pi++) {
2538 if (pols[pi] != pol &&
2539 pols[pi]->action != XFRM_POLICY_ALLOW) {
2540 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2543 if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
2544 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
2547 for (i = 0; i < pols[pi]->xfrm_nr; i++)
2548 tpp[ti++] = &pols[pi]->xfrm_vec[i];
2552 xfrm_tmpl_sort(stp, tpp, xfrm_nr, family, net);
2556 /* For each tunnel xfrm, find the first matching tmpl.
2557 * For each tmpl before that, find corresponding xfrm.
2558 * Order is _important_. Later we will implement
2559 * some barriers, but at the moment barriers
2560 * are implied between each two transformations.
2562 for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
2563 k = xfrm_policy_ok(tpp[i], sp, k, family);
2566 /* "-2 - errored_index" returned */
2568 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2573 if (secpath_has_nontransport(sp, k, &xerr_idx)) {
2574 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2578 xfrm_pols_put(pols, npols);
2581 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2584 xfrm_secpath_reject(xerr_idx, skb, &fl);
2586 xfrm_pols_put(pols, npols);
2589 EXPORT_SYMBOL(__xfrm_policy_check);
2591 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
2593 struct net *net = dev_net(skb->dev);
2595 struct dst_entry *dst;
2598 if (xfrm_decode_session(skb, &fl, family) < 0) {
2599 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
2605 dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
2610 skb_dst_set(skb, dst);
2613 EXPORT_SYMBOL(__xfrm_route_forward);
2615 /* Optimize later using cookies and generation ids. */
2617 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
2619 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
2620 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
2621 * get validated by dst_ops->check on every use. We do this
2622 * because when a normal route referenced by an XFRM dst is
2623 * obsoleted we do not go looking around for all parent
2624 * referencing XFRM dsts so that we can invalidate them. It
2625 * is just too much work. Instead we make the checks here on
2626 * every use. For example:
2628 * XFRM dst A --> IPv4 dst X
2630 * X is the "xdst->route" of A (X is also the "dst->path" of A
2631 * in this example). If X is marked obsolete, "A" will not
2632 * notice. That's what we are validating here via the
2633 * stale_bundle() check.
2635 * When a policy's bundle is pruned, we dst_free() the XFRM
2636 * dst which causes it's ->obsolete field to be set to
2637 * DST_OBSOLETE_DEAD. If an XFRM dst has been pruned like
2638 * this, we want to force a new route lookup.
2640 if (dst->obsolete < 0 && !stale_bundle(dst))
2646 static int stale_bundle(struct dst_entry *dst)
2648 return !xfrm_bundle_ok((struct xfrm_dst *)dst);
2651 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
2653 while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
2654 dst->dev = dev_net(dev)->loopback_dev;
2659 EXPORT_SYMBOL(xfrm_dst_ifdown);
2661 static void xfrm_link_failure(struct sk_buff *skb)
2663 /* Impossible. Such dst must be popped before reaches point of failure. */
2666 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
2669 if (dst->obsolete) {
2677 void xfrm_garbage_collect(struct net *net)
2679 flow_cache_flush(net);
2681 EXPORT_SYMBOL(xfrm_garbage_collect);
2683 static void xfrm_garbage_collect_deferred(struct net *net)
2685 flow_cache_flush_deferred(net);
2688 static void xfrm_init_pmtu(struct dst_entry *dst)
2691 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2692 u32 pmtu, route_mtu_cached;
2694 pmtu = dst_mtu(dst->child);
2695 xdst->child_mtu_cached = pmtu;
2697 pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
2699 route_mtu_cached = dst_mtu(xdst->route);
2700 xdst->route_mtu_cached = route_mtu_cached;
2702 if (pmtu > route_mtu_cached)
2703 pmtu = route_mtu_cached;
2705 dst_metric_set(dst, RTAX_MTU, pmtu);
2706 } while ((dst = dst->next));
2709 /* Check that the bundle accepts the flow and its components are
2713 static int xfrm_bundle_ok(struct xfrm_dst *first)
2715 struct dst_entry *dst = &first->u.dst;
2716 struct xfrm_dst *last;
2719 if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
2720 (dst->dev && !netif_running(dst->dev)))
2723 if (dst->flags & DST_XFRM_QUEUE)
2729 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2731 if (dst->xfrm->km.state != XFRM_STATE_VALID)
2733 if (xdst->xfrm_genid != dst->xfrm->genid)
2735 if (xdst->num_pols > 0 &&
2736 xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
2739 mtu = dst_mtu(dst->child);
2740 if (xdst->child_mtu_cached != mtu) {
2742 xdst->child_mtu_cached = mtu;
2745 if (!dst_check(xdst->route, xdst->route_cookie))
2747 mtu = dst_mtu(xdst->route);
2748 if (xdst->route_mtu_cached != mtu) {
2750 xdst->route_mtu_cached = mtu;
2754 } while (dst->xfrm);
2759 mtu = last->child_mtu_cached;
2763 mtu = xfrm_state_mtu(dst->xfrm, mtu);
2764 if (mtu > last->route_mtu_cached)
2765 mtu = last->route_mtu_cached;
2766 dst_metric_set(dst, RTAX_MTU, mtu);
2771 last = (struct xfrm_dst *)last->u.dst.next;
2772 last->child_mtu_cached = mtu;
2778 static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
2780 return dst_metric_advmss(dst->path);
2783 static unsigned int xfrm_mtu(const struct dst_entry *dst)
2785 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2787 return mtu ? : dst_mtu(dst->path);
2790 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
2791 struct sk_buff *skb,
2794 return dst->path->ops->neigh_lookup(dst, skb, daddr);
2797 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
2800 if (unlikely(afinfo == NULL))
2802 if (unlikely(afinfo->family >= NPROTO))
2803 return -EAFNOSUPPORT;
2804 spin_lock(&xfrm_policy_afinfo_lock);
2805 if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
2808 struct dst_ops *dst_ops = afinfo->dst_ops;
2809 if (likely(dst_ops->kmem_cachep == NULL))
2810 dst_ops->kmem_cachep = xfrm_dst_cache;
2811 if (likely(dst_ops->check == NULL))
2812 dst_ops->check = xfrm_dst_check;
2813 if (likely(dst_ops->default_advmss == NULL))
2814 dst_ops->default_advmss = xfrm_default_advmss;
2815 if (likely(dst_ops->mtu == NULL))
2816 dst_ops->mtu = xfrm_mtu;
2817 if (likely(dst_ops->negative_advice == NULL))
2818 dst_ops->negative_advice = xfrm_negative_advice;
2819 if (likely(dst_ops->link_failure == NULL))
2820 dst_ops->link_failure = xfrm_link_failure;
2821 if (likely(dst_ops->neigh_lookup == NULL))
2822 dst_ops->neigh_lookup = xfrm_neigh_lookup;
2823 if (likely(afinfo->garbage_collect == NULL))
2824 afinfo->garbage_collect = xfrm_garbage_collect_deferred;
2825 rcu_assign_pointer(xfrm_policy_afinfo[afinfo->family], afinfo);
2827 spin_unlock(&xfrm_policy_afinfo_lock);
2831 EXPORT_SYMBOL(xfrm_policy_register_afinfo);
2833 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
2836 if (unlikely(afinfo == NULL))
2838 if (unlikely(afinfo->family >= NPROTO))
2839 return -EAFNOSUPPORT;
2840 spin_lock(&xfrm_policy_afinfo_lock);
2841 if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
2842 if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
2845 RCU_INIT_POINTER(xfrm_policy_afinfo[afinfo->family],
2848 spin_unlock(&xfrm_policy_afinfo_lock);
2850 struct dst_ops *dst_ops = afinfo->dst_ops;
2854 dst_ops->kmem_cachep = NULL;
2855 dst_ops->check = NULL;
2856 dst_ops->negative_advice = NULL;
2857 dst_ops->link_failure = NULL;
2858 afinfo->garbage_collect = NULL;
2862 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
2864 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
2866 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2870 xfrm_garbage_collect(dev_net(dev));
2875 static struct notifier_block xfrm_dev_notifier = {
2876 .notifier_call = xfrm_dev_event,
2879 #ifdef CONFIG_XFRM_STATISTICS
2880 static int __net_init xfrm_statistics_init(struct net *net)
2883 net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
2884 if (!net->mib.xfrm_statistics)
2886 rv = xfrm_proc_init(net);
2888 free_percpu(net->mib.xfrm_statistics);
2892 static void xfrm_statistics_fini(struct net *net)
2894 xfrm_proc_fini(net);
2895 free_percpu(net->mib.xfrm_statistics);
2898 static int __net_init xfrm_statistics_init(struct net *net)
2903 static void xfrm_statistics_fini(struct net *net)
2908 static int __net_init xfrm_policy_init(struct net *net)
2910 unsigned int hmask, sz;
2913 if (net_eq(net, &init_net))
2914 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
2915 sizeof(struct xfrm_dst),
2916 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2920 sz = (hmask+1) * sizeof(struct hlist_head);
2922 net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
2923 if (!net->xfrm.policy_byidx)
2925 net->xfrm.policy_idx_hmask = hmask;
2927 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
2928 struct xfrm_policy_hash *htab;
2930 net->xfrm.policy_count[dir] = 0;
2931 net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
2932 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
2934 htab = &net->xfrm.policy_bydst[dir];
2935 htab->table = xfrm_hash_alloc(sz);
2938 htab->hmask = hmask;
2944 net->xfrm.policy_hthresh.lbits4 = 32;
2945 net->xfrm.policy_hthresh.rbits4 = 32;
2946 net->xfrm.policy_hthresh.lbits6 = 128;
2947 net->xfrm.policy_hthresh.rbits6 = 128;
2949 seqlock_init(&net->xfrm.policy_hthresh.lock);
2951 INIT_LIST_HEAD(&net->xfrm.policy_all);
2952 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
2953 INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
2954 if (net_eq(net, &init_net))
2955 register_netdevice_notifier(&xfrm_dev_notifier);
2959 for (dir--; dir >= 0; dir--) {
2960 struct xfrm_policy_hash *htab;
2962 htab = &net->xfrm.policy_bydst[dir];
2963 xfrm_hash_free(htab->table, sz);
2965 xfrm_hash_free(net->xfrm.policy_byidx, sz);
2970 static void xfrm_policy_fini(struct net *net)
2975 flush_work(&net->xfrm.policy_hash_work);
2976 #ifdef CONFIG_XFRM_SUB_POLICY
2977 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
2979 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
2981 WARN_ON(!list_empty(&net->xfrm.policy_all));
2983 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
2984 struct xfrm_policy_hash *htab;
2986 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
2988 htab = &net->xfrm.policy_bydst[dir];
2989 sz = (htab->hmask + 1) * sizeof(struct hlist_head);
2990 WARN_ON(!hlist_empty(htab->table));
2991 xfrm_hash_free(htab->table, sz);
2994 sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
2995 WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
2996 xfrm_hash_free(net->xfrm.policy_byidx, sz);
2999 static int __net_init xfrm_net_init(struct net *net)
3003 /* Initialize the per-net locks here */
3004 spin_lock_init(&net->xfrm.xfrm_state_lock);
3005 rwlock_init(&net->xfrm.xfrm_policy_lock);
3006 mutex_init(&net->xfrm.xfrm_cfg_mutex);
3008 rv = xfrm_statistics_init(net);
3010 goto out_statistics;
3011 rv = xfrm_state_init(net);
3014 rv = xfrm_policy_init(net);
3017 rv = xfrm_sysctl_init(net);
3020 rv = flow_cache_init(net);
3027 xfrm_sysctl_fini(net);
3029 xfrm_policy_fini(net);
3031 xfrm_state_fini(net);
3033 xfrm_statistics_fini(net);
3038 static void __net_exit xfrm_net_exit(struct net *net)
3040 flow_cache_fini(net);
3041 xfrm_sysctl_fini(net);
3042 xfrm_policy_fini(net);
3043 xfrm_state_fini(net);
3044 xfrm_statistics_fini(net);
3047 static struct pernet_operations __net_initdata xfrm_net_ops = {
3048 .init = xfrm_net_init,
3049 .exit = xfrm_net_exit,
3052 void __init xfrm_init(void)
3054 register_pernet_subsys(&xfrm_net_ops);
3058 #ifdef CONFIG_AUDITSYSCALL
3059 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
3060 struct audit_buffer *audit_buf)
3062 struct xfrm_sec_ctx *ctx = xp->security;
3063 struct xfrm_selector *sel = &xp->selector;
3066 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
3067 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
3069 switch (sel->family) {
3071 audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
3072 if (sel->prefixlen_s != 32)
3073 audit_log_format(audit_buf, " src_prefixlen=%d",
3075 audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
3076 if (sel->prefixlen_d != 32)
3077 audit_log_format(audit_buf, " dst_prefixlen=%d",
3081 audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
3082 if (sel->prefixlen_s != 128)
3083 audit_log_format(audit_buf, " src_prefixlen=%d",
3085 audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
3086 if (sel->prefixlen_d != 128)
3087 audit_log_format(audit_buf, " dst_prefixlen=%d",
3093 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
3095 struct audit_buffer *audit_buf;
3097 audit_buf = xfrm_audit_start("SPD-add");
3098 if (audit_buf == NULL)
3100 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
3101 audit_log_format(audit_buf, " res=%u", result);
3102 xfrm_audit_common_policyinfo(xp, audit_buf);
3103 audit_log_end(audit_buf);
3105 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
3107 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
3110 struct audit_buffer *audit_buf;
3112 audit_buf = xfrm_audit_start("SPD-delete");
3113 if (audit_buf == NULL)
3115 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
3116 audit_log_format(audit_buf, " res=%u", result);
3117 xfrm_audit_common_policyinfo(xp, audit_buf);
3118 audit_log_end(audit_buf);
3120 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
3123 #ifdef CONFIG_XFRM_MIGRATE
3124 static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
3125 const struct xfrm_selector *sel_tgt)
3127 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
3128 if (sel_tgt->family == sel_cmp->family &&
3129 xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
3131 xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
3133 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
3134 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
3138 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
3145 static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
3146 u8 dir, u8 type, struct net *net)
3148 struct xfrm_policy *pol, *ret = NULL;
3149 struct hlist_head *chain;
3152 read_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME*/
3153 chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
3154 hlist_for_each_entry(pol, chain, bydst) {
3155 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
3156 pol->type == type) {
3158 priority = ret->priority;
3162 chain = &net->xfrm.policy_inexact[dir];
3163 hlist_for_each_entry(pol, chain, bydst) {
3164 if ((pol->priority >= priority) && ret)
3167 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
3168 pol->type == type) {
3176 read_unlock_bh(&net->xfrm.xfrm_policy_lock);
3181 static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
3185 if (t->mode == m->mode && t->id.proto == m->proto &&
3186 (m->reqid == 0 || t->reqid == m->reqid)) {
3188 case XFRM_MODE_TUNNEL:
3189 case XFRM_MODE_BEET:
3190 if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
3192 xfrm_addr_equal(&t->saddr, &m->old_saddr,
3197 case XFRM_MODE_TRANSPORT:
3198 /* in case of transport mode, template does not store
3199 any IP addresses, hence we just compare mode and
3210 /* update endpoint address(es) of template(s) */
3211 static int xfrm_policy_migrate(struct xfrm_policy *pol,
3212 struct xfrm_migrate *m, int num_migrate)
3214 struct xfrm_migrate *mp;
3217 write_lock_bh(&pol->lock);
3218 if (unlikely(pol->walk.dead)) {
3219 /* target policy has been deleted */
3220 write_unlock_bh(&pol->lock);
3224 for (i = 0; i < pol->xfrm_nr; i++) {
3225 for (j = 0, mp = m; j < num_migrate; j++, mp++) {
3226 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
3229 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
3230 pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
3232 /* update endpoints */
3233 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
3234 sizeof(pol->xfrm_vec[i].id.daddr));
3235 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
3236 sizeof(pol->xfrm_vec[i].saddr));
3237 pol->xfrm_vec[i].encap_family = mp->new_family;
3239 atomic_inc(&pol->genid);
3243 write_unlock_bh(&pol->lock);
3251 static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
3255 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
3258 for (i = 0; i < num_migrate; i++) {
3259 if (xfrm_addr_equal(&m[i].old_daddr, &m[i].new_daddr,
3261 xfrm_addr_equal(&m[i].old_saddr, &m[i].new_saddr,
3264 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
3265 xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
3268 /* check if there is any duplicated entry */
3269 for (j = i + 1; j < num_migrate; j++) {
3270 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
3271 sizeof(m[i].old_daddr)) &&
3272 !memcmp(&m[i].old_saddr, &m[j].old_saddr,
3273 sizeof(m[i].old_saddr)) &&
3274 m[i].proto == m[j].proto &&
3275 m[i].mode == m[j].mode &&
3276 m[i].reqid == m[j].reqid &&
3277 m[i].old_family == m[j].old_family)
3285 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
3286 struct xfrm_migrate *m, int num_migrate,
3287 struct xfrm_kmaddress *k, struct net *net)
3289 int i, err, nx_cur = 0, nx_new = 0;
3290 struct xfrm_policy *pol = NULL;
3291 struct xfrm_state *x, *xc;
3292 struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
3293 struct xfrm_state *x_new[XFRM_MAX_DEPTH];
3294 struct xfrm_migrate *mp;
3296 /* Stage 0 - sanity checks */
3297 if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
3300 if (dir >= XFRM_POLICY_MAX) {
3305 /* Stage 1 - find policy */
3306 if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
3311 /* Stage 2 - find and update state(s) */
3312 for (i = 0, mp = m; i < num_migrate; i++, mp++) {
3313 if ((x = xfrm_migrate_state_find(mp, net))) {
3316 if ((xc = xfrm_state_migrate(x, mp))) {
3326 /* Stage 3 - update policy */
3327 if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
3330 /* Stage 4 - delete old state(s) */
3332 xfrm_states_put(x_cur, nx_cur);
3333 xfrm_states_delete(x_cur, nx_cur);
3336 /* Stage 5 - announce */
3337 km_migrate(sel, dir, type, m, num_migrate, k);
3349 xfrm_states_put(x_cur, nx_cur);
3351 xfrm_states_delete(x_new, nx_new);
3355 EXPORT_SYMBOL(xfrm_migrate);