1 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
13 /* A BPF sock_map is used to store sock objects. This is primarly used
14 * for doing socket redirect with BPF helper routines.
16 * A sock map may have BPF programs attached to it, currently a program
17 * used to parse packets and a program to provide a verdict and redirect
18 * decision on the packet are supported. Any programs attached to a sock
19 * map are inherited by sock objects when they are added to the map. If
20 * no BPF programs are attached the sock object may only be used for sock
23 * A sock object may be in multiple maps, but can only inherit a single
24 * parse or verdict program. If adding a sock object to a map would result
25 * in having multiple parsing programs the update will return an EBUSY error.
27 * For reference this program is similar to devmap used in XDP context
28 * reviewing these together may be useful. For an example please review
29 * ./samples/bpf/sockmap/.
31 #include <linux/bpf.h>
33 #include <linux/filter.h>
34 #include <linux/errno.h>
35 #include <linux/file.h>
36 #include <linux/kernel.h>
37 #include <linux/net.h>
38 #include <linux/skbuff.h>
39 #include <linux/workqueue.h>
40 #include <linux/list.h>
42 #include <net/strparser.h>
44 #include <linux/ptr_ring.h>
45 #include <net/inet_common.h>
46 #include <linux/sched/signal.h>
48 #define SOCK_CREATE_FLAG_MASK \
49 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
51 struct bpf_sock_progs {
52 struct bpf_prog *bpf_tx_msg;
53 struct bpf_prog *bpf_parse;
54 struct bpf_prog *bpf_verdict;
59 struct sock **sock_map;
60 struct bpf_sock_progs progs;
64 struct hlist_head head;
70 struct bucket *buckets;
74 struct bpf_sock_progs progs;
79 struct hlist_node hash_node;
85 enum smap_psock_state {
89 struct smap_psock_map_entry {
90 struct list_head list;
92 struct htab_elem *hash_link;
93 struct bpf_htab *htab;
100 /* datapath variables */
101 struct sk_buff_head rxqueue;
104 /* datapath error path cache across tx work invocations */
107 struct sk_buff *save_skb;
109 /* datapath variables for tx_msg ULP */
110 struct sock *sk_redir;
115 struct sk_msg_buff *cork;
116 struct list_head ingress;
118 struct strparser strp;
119 struct bpf_prog *bpf_tx_msg;
120 struct bpf_prog *bpf_parse;
121 struct bpf_prog *bpf_verdict;
122 struct list_head maps;
124 /* Back reference used when sock callback trigger sockmap operations */
128 struct work_struct tx_work;
129 struct work_struct gc_work;
131 struct proto *sk_proto;
132 void (*save_close)(struct sock *sk, long timeout);
133 void (*save_data_ready)(struct sock *sk);
134 void (*save_write_space)(struct sock *sk);
137 static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
138 static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
139 int nonblock, int flags, int *addr_len);
140 static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
141 static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
142 int offset, size_t size, int flags);
144 static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
146 return rcu_dereference_sk_user_data(sk);
149 static bool bpf_tcp_stream_read(const struct sock *sk)
151 struct smap_psock *psock;
155 psock = smap_psock_sk(sk);
156 if (unlikely(!psock))
158 empty = list_empty(&psock->ingress);
164 static struct proto tcp_bpf_proto;
165 static int bpf_tcp_init(struct sock *sk)
167 struct smap_psock *psock;
170 psock = smap_psock_sk(sk);
171 if (unlikely(!psock)) {
176 if (unlikely(psock->sk_proto)) {
181 psock->save_close = sk->sk_prot->close;
182 psock->sk_proto = sk->sk_prot;
184 if (psock->bpf_tx_msg) {
185 tcp_bpf_proto.sendmsg = bpf_tcp_sendmsg;
186 tcp_bpf_proto.sendpage = bpf_tcp_sendpage;
187 tcp_bpf_proto.recvmsg = bpf_tcp_recvmsg;
188 tcp_bpf_proto.stream_memory_read = bpf_tcp_stream_read;
191 sk->sk_prot = &tcp_bpf_proto;
196 static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
197 static int free_start_sg(struct sock *sk, struct sk_msg_buff *md);
199 static void bpf_tcp_release(struct sock *sk)
201 struct smap_psock *psock;
204 psock = smap_psock_sk(sk);
205 if (unlikely(!psock))
209 free_start_sg(psock->sock, psock->cork);
214 if (psock->sk_proto) {
215 sk->sk_prot = psock->sk_proto;
216 psock->sk_proto = NULL;
222 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
224 atomic_dec(&htab->count);
228 static void bpf_tcp_close(struct sock *sk, long timeout)
230 void (*close_fun)(struct sock *sk, long timeout);
231 struct smap_psock_map_entry *e, *tmp;
232 struct sk_msg_buff *md, *mtmp;
233 struct smap_psock *psock;
237 psock = smap_psock_sk(sk);
238 if (unlikely(!psock)) {
240 return sk->sk_prot->close(sk, timeout);
243 /* The psock may be destroyed anytime after exiting the RCU critial
244 * section so by the time we use close_fun the psock may no longer
245 * be valid. However, bpf_tcp_close is called with the sock lock
246 * held so the close hook and sk are still valid.
248 close_fun = psock->save_close;
250 write_lock_bh(&sk->sk_callback_lock);
252 free_start_sg(psock->sock, psock->cork);
257 list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
259 free_start_sg(psock->sock, md);
263 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
265 osk = cmpxchg(e->entry, sk, NULL);
268 smap_release_sock(psock, sk);
271 hlist_del_rcu(&e->hash_link->hash_node);
272 smap_release_sock(psock, e->hash_link->sk);
273 free_htab_elem(e->htab, e->hash_link);
276 write_unlock_bh(&sk->sk_callback_lock);
278 close_fun(sk, timeout);
288 static struct tcp_ulp_ops bpf_tcp_ulp_ops __read_mostly = {
291 .user_visible = false,
293 .init = bpf_tcp_init,
294 .release = bpf_tcp_release,
297 static int memcopy_from_iter(struct sock *sk,
298 struct sk_msg_buff *md,
299 struct iov_iter *from, int bytes)
301 struct scatterlist *sg = md->sg_data;
302 int i = md->sg_curr, rc = -ENOSPC;
308 if (md->sg_copybreak >= sg[i].length) {
309 md->sg_copybreak = 0;
311 if (++i == MAX_SKB_FRAGS)
318 copy = sg[i].length - md->sg_copybreak;
319 to = sg_virt(&sg[i]) + md->sg_copybreak;
320 md->sg_copybreak += copy;
322 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
323 rc = copy_from_iter_nocache(to, copy, from);
325 rc = copy_from_iter(to, copy, from);
336 md->sg_copybreak = 0;
337 if (++i == MAX_SKB_FRAGS)
339 } while (i != md->sg_end);
345 static int bpf_tcp_push(struct sock *sk, int apply_bytes,
346 struct sk_msg_buff *md,
347 int flags, bool uncharge)
349 bool apply = apply_bytes;
350 struct scatterlist *sg;
356 sg = md->sg_data + md->sg_start;
357 size = (apply && apply_bytes < sg->length) ?
358 apply_bytes : sg->length;
361 tcp_rate_check_app_limited(sk);
364 ret = do_tcp_sendpages(sk, p, offset, size, flags);
375 sk_mem_uncharge(sk, ret);
387 sk_mem_uncharge(sk, ret);
392 if (md->sg_start == MAX_SKB_FRAGS)
394 sg_init_table(sg, 1);
396 if (md->sg_start == md->sg_end)
400 if (apply && !apply_bytes)
406 static inline void bpf_compute_data_pointers_sg(struct sk_msg_buff *md)
408 struct scatterlist *sg = md->sg_data + md->sg_start;
410 if (md->sg_copy[md->sg_start]) {
411 md->data = md->data_end = 0;
413 md->data = sg_virt(sg);
414 md->data_end = md->data + sg->length;
418 static void return_mem_sg(struct sock *sk, int bytes, struct sk_msg_buff *md)
420 struct scatterlist *sg = md->sg_data;
421 int i = md->sg_start;
424 int uncharge = (bytes < sg[i].length) ? bytes : sg[i].length;
426 sk_mem_uncharge(sk, uncharge);
431 if (i == MAX_SKB_FRAGS)
433 } while (i != md->sg_end);
436 static void free_bytes_sg(struct sock *sk, int bytes,
437 struct sk_msg_buff *md, bool charge)
439 struct scatterlist *sg = md->sg_data;
440 int i = md->sg_start, free;
442 while (bytes && sg[i].length) {
445 sg[i].length -= bytes;
446 sg[i].offset += bytes;
448 sk_mem_uncharge(sk, bytes);
453 sk_mem_uncharge(sk, sg[i].length);
454 put_page(sg_page(&sg[i]));
455 bytes -= sg[i].length;
461 if (i == MAX_SKB_FRAGS)
467 static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
469 struct scatterlist *sg = md->sg_data;
470 int i = start, free = 0;
472 while (sg[i].length) {
473 free += sg[i].length;
474 sk_mem_uncharge(sk, sg[i].length);
475 put_page(sg_page(&sg[i]));
481 if (i == MAX_SKB_FRAGS)
488 static int free_start_sg(struct sock *sk, struct sk_msg_buff *md)
490 int free = free_sg(sk, md->sg_start, md);
492 md->sg_start = md->sg_end;
496 static int free_curr_sg(struct sock *sk, struct sk_msg_buff *md)
498 return free_sg(sk, md->sg_curr, md);
501 static int bpf_map_msg_verdict(int _rc, struct sk_msg_buff *md)
503 return ((_rc == SK_PASS) ?
504 (md->sk_redir ? __SK_REDIRECT : __SK_PASS) :
508 static unsigned int smap_do_tx_msg(struct sock *sk,
509 struct smap_psock *psock,
510 struct sk_msg_buff *md)
512 struct bpf_prog *prog;
513 unsigned int rc, _rc;
518 /* If the policy was removed mid-send then default to 'accept' */
519 prog = READ_ONCE(psock->bpf_tx_msg);
520 if (unlikely(!prog)) {
525 bpf_compute_data_pointers_sg(md);
526 rc = (*prog->bpf_func)(md, prog->insnsi);
527 psock->apply_bytes = md->apply_bytes;
529 /* Moving return codes from UAPI namespace into internal namespace */
530 _rc = bpf_map_msg_verdict(rc, md);
532 /* The psock has a refcount on the sock but not on the map and because
533 * we need to drop rcu read lock here its possible the map could be
534 * removed between here and when we need it to execute the sock
535 * redirect. So do the map lookup now for future use.
537 if (_rc == __SK_REDIRECT) {
539 sock_put(psock->sk_redir);
540 psock->sk_redir = do_msg_redirect_map(md);
541 if (!psock->sk_redir) {
545 sock_hold(psock->sk_redir);
554 static int bpf_tcp_ingress(struct sock *sk, int apply_bytes,
555 struct smap_psock *psock,
556 struct sk_msg_buff *md, int flags)
558 bool apply = apply_bytes;
559 size_t size, copied = 0;
560 struct sk_msg_buff *r;
563 r = kzalloc(sizeof(struct sk_msg_buff), __GFP_NOWARN | GFP_KERNEL);
568 r->sg_start = md->sg_start;
572 size = (apply && apply_bytes < md->sg_data[i].length) ?
573 apply_bytes : md->sg_data[i].length;
575 if (!sk_wmem_schedule(sk, size)) {
581 sk_mem_charge(sk, size);
582 r->sg_data[i] = md->sg_data[i];
583 r->sg_data[i].length = size;
584 md->sg_data[i].length -= size;
585 md->sg_data[i].offset += size;
588 if (md->sg_data[i].length) {
589 get_page(sg_page(&r->sg_data[i]));
590 r->sg_end = (i + 1) == MAX_SKB_FRAGS ? 0 : i + 1;
593 if (i == MAX_SKB_FRAGS)
603 } while (i != md->sg_end);
608 list_add_tail(&r->list, &psock->ingress);
609 sk->sk_data_ready(sk);
611 free_start_sg(sk, r);
619 static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send,
620 struct sk_msg_buff *md,
623 bool ingress = !!(md->flags & BPF_F_INGRESS);
624 struct smap_psock *psock;
625 struct scatterlist *sg;
631 psock = smap_psock_sk(sk);
632 if (unlikely(!psock))
635 if (!refcount_inc_not_zero(&psock->refcnt))
641 err = bpf_tcp_ingress(sk, send, psock, md, flags);
644 err = bpf_tcp_push(sk, send, md, flags, false);
647 smap_release_sock(psock, sk);
654 free_bytes_sg(NULL, send, md, false);
658 static inline void bpf_md_init(struct smap_psock *psock)
660 if (!psock->apply_bytes) {
661 psock->eval = __SK_NONE;
662 if (psock->sk_redir) {
663 sock_put(psock->sk_redir);
664 psock->sk_redir = NULL;
669 static void apply_bytes_dec(struct smap_psock *psock, int i)
671 if (psock->apply_bytes) {
672 if (psock->apply_bytes < i)
673 psock->apply_bytes = 0;
675 psock->apply_bytes -= i;
679 static int bpf_exec_tx_verdict(struct smap_psock *psock,
680 struct sk_msg_buff *m,
682 int *copied, int flags)
684 bool cork = false, enospc = (m->sg_start == m->sg_end);
690 if (psock->eval == __SK_NONE)
691 psock->eval = smap_do_tx_msg(sk, psock, m);
694 m->cork_bytes > psock->sg_size && !enospc) {
695 psock->cork_bytes = m->cork_bytes - psock->sg_size;
697 psock->cork = kcalloc(1,
698 sizeof(struct sk_msg_buff),
699 GFP_ATOMIC | __GFP_NOWARN);
706 memcpy(psock->cork, m, sizeof(*m));
710 send = psock->sg_size;
711 if (psock->apply_bytes && psock->apply_bytes < send)
712 send = psock->apply_bytes;
714 switch (psock->eval) {
716 err = bpf_tcp_push(sk, send, m, flags, true);
718 *copied -= free_start_sg(sk, m);
722 apply_bytes_dec(psock, send);
723 psock->sg_size -= send;
726 redir = psock->sk_redir;
727 apply_bytes_dec(psock, send);
734 return_mem_sg(sk, send, m);
737 err = bpf_tcp_sendmsg_do_redirect(redir, send, m, flags);
740 if (unlikely(err < 0)) {
741 free_start_sg(sk, m);
746 psock->sg_size -= send;
750 free_start_sg(sk, m);
759 free_bytes_sg(sk, send, m, true);
760 apply_bytes_dec(psock, send);
762 psock->sg_size -= send;
770 m->sg_data[m->sg_start].page_link &&
771 m->sg_data[m->sg_start].length)
779 static int bpf_wait_data(struct sock *sk,
780 struct smap_psock *psk, int flags,
781 long timeo, int *err)
785 DEFINE_WAIT_FUNC(wait, woken_wake_function);
787 add_wait_queue(sk_sleep(sk), &wait);
788 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
789 rc = sk_wait_event(sk, &timeo,
790 !list_empty(&psk->ingress) ||
791 !skb_queue_empty(&sk->sk_receive_queue),
793 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
794 remove_wait_queue(sk_sleep(sk), &wait);
799 static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
800 int nonblock, int flags, int *addr_len)
802 struct iov_iter *iter = &msg->msg_iter;
803 struct smap_psock *psock;
806 if (unlikely(flags & MSG_ERRQUEUE))
807 return inet_recv_error(sk, msg, len, addr_len);
810 psock = smap_psock_sk(sk);
811 if (unlikely(!psock))
814 if (unlikely(!refcount_inc_not_zero(&psock->refcnt)))
818 if (!skb_queue_empty(&sk->sk_receive_queue))
819 return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
823 while (copied != len) {
824 struct scatterlist *sg;
825 struct sk_msg_buff *md;
828 md = list_first_entry_or_null(&psock->ingress,
829 struct sk_msg_buff, list);
837 sg = &md->sg_data[i];
841 if (copied + copy > len)
844 n = copy_page_to_iter(page, sg->offset, copy, iter);
848 smap_release_sock(psock, sk);
855 sk_mem_uncharge(sk, copy);
859 if (i == MAX_SKB_FRAGS)
866 } while (i != md->sg_end);
869 if (!sg->length && md->sg_start == md->sg_end) {
872 consume_skb(md->skb);
882 timeo = sock_rcvtimeo(sk, nonblock);
883 data = bpf_wait_data(sk, psock, flags, timeo, &err);
886 if (!skb_queue_empty(&sk->sk_receive_queue)) {
888 smap_release_sock(psock, sk);
889 copied = tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
900 smap_release_sock(psock, sk);
904 return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
908 static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
910 int flags = msg->msg_flags | MSG_NO_SHARED_FRAGS;
911 struct sk_msg_buff md = {0};
912 unsigned int sg_copy = 0;
913 struct smap_psock *psock;
914 int copied = 0, err = 0;
915 struct scatterlist *sg;
918 /* Its possible a sock event or user removed the psock _but_ the ops
919 * have not been reprogrammed yet so we get here. In this case fallback
920 * to tcp_sendmsg. Note this only works because we _only_ ever allow
921 * a single ULP there is no hierarchy here.
924 psock = smap_psock_sk(sk);
925 if (unlikely(!psock)) {
927 return tcp_sendmsg(sk, msg, size);
930 /* Increment the psock refcnt to ensure its not released while sending a
931 * message. Required because sk lookup and bpf programs are used in
932 * separate rcu critical sections. Its OK if we lose the map entry
933 * but we can't lose the sock reference.
935 if (!refcount_inc_not_zero(&psock->refcnt)) {
937 return tcp_sendmsg(sk, msg, size);
941 sg_init_marker(sg, MAX_SKB_FRAGS);
945 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
947 while (msg_data_left(msg)) {
948 struct sk_msg_buff *m;
957 copy = msg_data_left(msg);
958 if (!sk_stream_memory_free(sk))
959 goto wait_for_sndbuf;
961 m = psock->cork_bytes ? psock->cork : &md;
962 m->sg_curr = m->sg_copybreak ? m->sg_curr : m->sg_end;
963 err = sk_alloc_sg(sk, copy, m->sg_data,
964 m->sg_start, &m->sg_end, &sg_copy,
968 goto wait_for_memory;
973 err = memcopy_from_iter(sk, m, &msg->msg_iter, copy);
979 psock->sg_size += copy;
983 /* When bytes are being corked skip running BPF program and
984 * applying verdict unless there is no more buffer space. In
985 * the ENOSPC case simply run BPF prorgram with currently
986 * accumulated data. We don't have much choice at this point
987 * we could try extending the page frags or chaining complex
988 * frags but even in these cases _eventually_ we will hit an
989 * OOM scenario. More complex recovery schemes may be
990 * implemented in the future, but BPF programs must handle
991 * the case where apply_cork requests are not honored. The
992 * canonical method to verify this is to check data length.
994 if (psock->cork_bytes) {
995 if (copy > psock->cork_bytes)
996 psock->cork_bytes = 0;
998 psock->cork_bytes -= copy;
1000 if (psock->cork_bytes && !enospc)
1003 /* All cork bytes accounted for re-run filter */
1004 psock->eval = __SK_NONE;
1005 psock->cork_bytes = 0;
1008 err = bpf_exec_tx_verdict(psock, m, sk, &copied, flags);
1009 if (unlikely(err < 0))
1013 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1015 err = sk_stream_wait_memory(sk, &timeo);
1021 err = sk_stream_error(sk, msg->msg_flags, err);
1024 smap_release_sock(psock, sk);
1025 return copied ? copied : err;
1028 static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
1029 int offset, size_t size, int flags)
1031 struct sk_msg_buff md = {0}, *m = NULL;
1032 int err = 0, copied = 0;
1033 struct smap_psock *psock;
1034 struct scatterlist *sg;
1035 bool enospc = false;
1038 psock = smap_psock_sk(sk);
1039 if (unlikely(!psock))
1042 if (!refcount_inc_not_zero(&psock->refcnt))
1048 if (psock->cork_bytes) {
1050 sg = &m->sg_data[m->sg_end];
1054 sg_init_marker(sg, MAX_SKB_FRAGS);
1057 /* Catch case where ring is full and sendpage is stalled. */
1058 if (unlikely(m->sg_end == m->sg_start &&
1059 m->sg_data[m->sg_end].length))
1062 psock->sg_size += size;
1063 sg_set_page(sg, page, size, offset);
1065 m->sg_copy[m->sg_end] = true;
1066 sk_mem_charge(sk, size);
1070 if (m->sg_end == MAX_SKB_FRAGS)
1073 if (m->sg_end == m->sg_start)
1076 if (psock->cork_bytes) {
1077 if (size > psock->cork_bytes)
1078 psock->cork_bytes = 0;
1080 psock->cork_bytes -= size;
1082 if (psock->cork_bytes && !enospc)
1085 /* All cork bytes accounted for re-run filter */
1086 psock->eval = __SK_NONE;
1087 psock->cork_bytes = 0;
1090 err = bpf_exec_tx_verdict(psock, m, sk, &copied, flags);
1093 smap_release_sock(psock, sk);
1094 return copied ? copied : err;
1097 return tcp_sendpage(sk, page, offset, size, flags);
1100 static void bpf_tcp_msg_add(struct smap_psock *psock,
1102 struct bpf_prog *tx_msg)
1104 struct bpf_prog *orig_tx_msg;
1106 orig_tx_msg = xchg(&psock->bpf_tx_msg, tx_msg);
1108 bpf_prog_put(orig_tx_msg);
1111 static int bpf_tcp_ulp_register(void)
1113 tcp_bpf_proto = tcp_prot;
1114 tcp_bpf_proto.close = bpf_tcp_close;
1115 /* Once BPF TX ULP is registered it is never unregistered. It
1116 * will be in the ULP list for the lifetime of the system. Doing
1117 * duplicate registers is not a problem.
1119 return tcp_register_ulp(&bpf_tcp_ulp_ops);
1122 static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
1124 struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict);
1127 if (unlikely(!prog))
1131 /* We need to ensure that BPF metadata for maps is also cleared
1132 * when we orphan the skb so that we don't have the possibility
1133 * to reference a stale map.
1135 TCP_SKB_CB(skb)->bpf.sk_redir = NULL;
1136 skb->sk = psock->sock;
1137 bpf_compute_data_pointers(skb);
1139 rc = (*prog->bpf_func)(skb, prog->insnsi);
1143 /* Moving return codes from UAPI namespace into internal namespace */
1144 return rc == SK_PASS ?
1145 (TCP_SKB_CB(skb)->bpf.sk_redir ? __SK_REDIRECT : __SK_PASS) :
1149 static int smap_do_ingress(struct smap_psock *psock, struct sk_buff *skb)
1151 struct sock *sk = psock->sock;
1152 int copied = 0, num_sg;
1153 struct sk_msg_buff *r;
1155 r = kzalloc(sizeof(struct sk_msg_buff), __GFP_NOWARN | GFP_ATOMIC);
1159 if (!sk_rmem_schedule(sk, skb, skb->len)) {
1164 sg_init_table(r->sg_data, MAX_SKB_FRAGS);
1165 num_sg = skb_to_sgvec(skb, r->sg_data, 0, skb->len);
1166 if (unlikely(num_sg < 0)) {
1170 sk_mem_charge(sk, skb->len);
1173 r->sg_end = num_sg == MAX_SKB_FRAGS ? 0 : num_sg;
1175 list_add_tail(&r->list, &psock->ingress);
1176 sk->sk_data_ready(sk);
1180 static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
1182 struct smap_psock *peer;
1187 rc = smap_verdict_func(psock, skb);
1190 sk = do_sk_redirect_map(skb);
1196 peer = smap_psock_sk(sk);
1197 in = (TCP_SKB_CB(skb)->bpf.flags) & BPF_F_INGRESS;
1199 if (unlikely(!peer || sock_flag(sk, SOCK_DEAD) ||
1200 !test_bit(SMAP_TX_RUNNING, &peer->state))) {
1205 if (!in && sock_writeable(sk)) {
1206 skb_set_owner_w(skb, sk);
1207 skb_queue_tail(&peer->rxqueue, skb);
1208 schedule_work(&peer->tx_work);
1211 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
1212 skb_queue_tail(&peer->rxqueue, skb);
1213 schedule_work(&peer->tx_work);
1216 /* Fall through and free skb otherwise */
1223 static void smap_report_sk_error(struct smap_psock *psock, int err)
1225 struct sock *sk = psock->sock;
1228 sk->sk_error_report(sk);
1231 static void smap_read_sock_strparser(struct strparser *strp,
1232 struct sk_buff *skb)
1234 struct smap_psock *psock;
1237 psock = container_of(strp, struct smap_psock, strp);
1238 smap_do_verdict(psock, skb);
1242 /* Called with lock held on socket */
1243 static void smap_data_ready(struct sock *sk)
1245 struct smap_psock *psock;
1248 psock = smap_psock_sk(sk);
1249 if (likely(psock)) {
1250 write_lock_bh(&sk->sk_callback_lock);
1251 strp_data_ready(&psock->strp);
1252 write_unlock_bh(&sk->sk_callback_lock);
1257 static void smap_tx_work(struct work_struct *w)
1259 struct smap_psock *psock;
1260 struct sk_buff *skb;
1263 psock = container_of(w, struct smap_psock, tx_work);
1265 /* lock sock to avoid losing sk_socket at some point during loop */
1266 lock_sock(psock->sock);
1267 if (psock->save_skb) {
1268 skb = psock->save_skb;
1269 rem = psock->save_rem;
1270 off = psock->save_off;
1271 psock->save_skb = NULL;
1275 while ((skb = skb_dequeue(&psock->rxqueue))) {
1281 flags = (TCP_SKB_CB(skb)->bpf.flags) & BPF_F_INGRESS;
1283 if (likely(psock->sock->sk_socket)) {
1285 n = smap_do_ingress(psock, skb);
1287 n = skb_send_sock_locked(psock->sock,
1295 /* Retry when space is available */
1296 psock->save_skb = skb;
1297 psock->save_rem = rem;
1298 psock->save_off = off;
1301 /* Hard errors break pipe and stop xmit */
1302 smap_report_sk_error(psock, n ? -n : EPIPE);
1303 clear_bit(SMAP_TX_RUNNING, &psock->state);
1315 release_sock(psock->sock);
1318 static void smap_write_space(struct sock *sk)
1320 struct smap_psock *psock;
1323 psock = smap_psock_sk(sk);
1324 if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state)))
1325 schedule_work(&psock->tx_work);
1329 static void smap_stop_sock(struct smap_psock *psock, struct sock *sk)
1331 if (!psock->strp_enabled)
1333 sk->sk_data_ready = psock->save_data_ready;
1334 sk->sk_write_space = psock->save_write_space;
1335 psock->save_data_ready = NULL;
1336 psock->save_write_space = NULL;
1337 strp_stop(&psock->strp);
1338 psock->strp_enabled = false;
1341 static void smap_destroy_psock(struct rcu_head *rcu)
1343 struct smap_psock *psock = container_of(rcu,
1344 struct smap_psock, rcu);
1346 /* Now that a grace period has passed there is no longer
1347 * any reference to this sock in the sockmap so we can
1348 * destroy the psock, strparser, and bpf programs. But,
1349 * because we use workqueue sync operations we can not
1350 * do it in rcu context
1352 schedule_work(&psock->gc_work);
1355 static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
1357 if (refcount_dec_and_test(&psock->refcnt)) {
1358 tcp_cleanup_ulp(sock);
1359 smap_stop_sock(psock, sock);
1360 clear_bit(SMAP_TX_RUNNING, &psock->state);
1361 rcu_assign_sk_user_data(sock, NULL);
1362 call_rcu_sched(&psock->rcu, smap_destroy_psock);
1366 static int smap_parse_func_strparser(struct strparser *strp,
1367 struct sk_buff *skb)
1369 struct smap_psock *psock;
1370 struct bpf_prog *prog;
1374 psock = container_of(strp, struct smap_psock, strp);
1375 prog = READ_ONCE(psock->bpf_parse);
1377 if (unlikely(!prog)) {
1382 /* Attach socket for bpf program to use if needed we can do this
1383 * because strparser clones the skb before handing it to a upper
1384 * layer, meaning skb_orphan has been called. We NULL sk on the
1385 * way out to ensure we don't trigger a BUG_ON in skb/sk operations
1386 * later and because we are not charging the memory of this skb to
1389 skb->sk = psock->sock;
1390 bpf_compute_data_pointers(skb);
1391 rc = (*prog->bpf_func)(skb, prog->insnsi);
1397 static int smap_read_sock_done(struct strparser *strp, int err)
1402 static int smap_init_sock(struct smap_psock *psock,
1405 static const struct strp_callbacks cb = {
1406 .rcv_msg = smap_read_sock_strparser,
1407 .parse_msg = smap_parse_func_strparser,
1408 .read_sock_done = smap_read_sock_done,
1411 return strp_init(&psock->strp, sk, &cb);
1414 static void smap_init_progs(struct smap_psock *psock,
1415 struct bpf_prog *verdict,
1416 struct bpf_prog *parse)
1418 struct bpf_prog *orig_parse, *orig_verdict;
1420 orig_parse = xchg(&psock->bpf_parse, parse);
1421 orig_verdict = xchg(&psock->bpf_verdict, verdict);
1424 bpf_prog_put(orig_verdict);
1426 bpf_prog_put(orig_parse);
1429 static void smap_start_sock(struct smap_psock *psock, struct sock *sk)
1431 if (sk->sk_data_ready == smap_data_ready)
1433 psock->save_data_ready = sk->sk_data_ready;
1434 psock->save_write_space = sk->sk_write_space;
1435 sk->sk_data_ready = smap_data_ready;
1436 sk->sk_write_space = smap_write_space;
1437 psock->strp_enabled = true;
1440 static void sock_map_remove_complete(struct bpf_stab *stab)
1442 bpf_map_area_free(stab->sock_map);
1446 static void smap_gc_work(struct work_struct *w)
1448 struct smap_psock_map_entry *e, *tmp;
1449 struct sk_msg_buff *md, *mtmp;
1450 struct smap_psock *psock;
1452 psock = container_of(w, struct smap_psock, gc_work);
1454 /* no callback lock needed because we already detached sockmap ops */
1455 if (psock->strp_enabled)
1456 strp_done(&psock->strp);
1458 cancel_work_sync(&psock->tx_work);
1459 __skb_queue_purge(&psock->rxqueue);
1461 /* At this point all strparser and xmit work must be complete */
1462 if (psock->bpf_parse)
1463 bpf_prog_put(psock->bpf_parse);
1464 if (psock->bpf_verdict)
1465 bpf_prog_put(psock->bpf_verdict);
1466 if (psock->bpf_tx_msg)
1467 bpf_prog_put(psock->bpf_tx_msg);
1470 free_start_sg(psock->sock, psock->cork);
1474 list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
1475 list_del(&md->list);
1476 free_start_sg(psock->sock, md);
1480 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
1485 if (psock->sk_redir)
1486 sock_put(psock->sk_redir);
1488 sock_put(psock->sock);
1492 static struct smap_psock *smap_init_psock(struct sock *sock, int node)
1494 struct smap_psock *psock;
1496 psock = kzalloc_node(sizeof(struct smap_psock),
1497 GFP_ATOMIC | __GFP_NOWARN,
1500 return ERR_PTR(-ENOMEM);
1502 psock->eval = __SK_NONE;
1504 skb_queue_head_init(&psock->rxqueue);
1505 INIT_WORK(&psock->tx_work, smap_tx_work);
1506 INIT_WORK(&psock->gc_work, smap_gc_work);
1507 INIT_LIST_HEAD(&psock->maps);
1508 INIT_LIST_HEAD(&psock->ingress);
1509 refcount_set(&psock->refcnt, 1);
1511 rcu_assign_sk_user_data(sock, psock);
1516 static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
1518 struct bpf_stab *stab;
1522 if (!capable(CAP_NET_ADMIN))
1523 return ERR_PTR(-EPERM);
1525 /* check sanity of attributes */
1526 if (attr->max_entries == 0 || attr->key_size != 4 ||
1527 attr->value_size != 4 || attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
1528 return ERR_PTR(-EINVAL);
1530 err = bpf_tcp_ulp_register();
1531 if (err && err != -EEXIST)
1532 return ERR_PTR(err);
1534 stab = kzalloc(sizeof(*stab), GFP_USER);
1536 return ERR_PTR(-ENOMEM);
1538 bpf_map_init_from_attr(&stab->map, attr);
1540 /* make sure page count doesn't overflow */
1541 cost = (u64) stab->map.max_entries * sizeof(struct sock *);
1543 if (cost >= U32_MAX - PAGE_SIZE)
1546 stab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
1548 /* if map size is larger than memlock limit, reject it early */
1549 err = bpf_map_precharge_memlock(stab->map.pages);
1554 stab->sock_map = bpf_map_area_alloc(stab->map.max_entries *
1555 sizeof(struct sock *),
1556 stab->map.numa_node);
1557 if (!stab->sock_map)
1563 return ERR_PTR(err);
1566 static void smap_list_remove(struct smap_psock *psock,
1567 struct sock **entry,
1568 struct htab_elem *hash_link)
1570 struct smap_psock_map_entry *e, *tmp;
1572 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
1573 if (e->entry == entry || e->hash_link == hash_link) {
1580 static void sock_map_free(struct bpf_map *map)
1582 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1587 /* At this point no update, lookup or delete operations can happen.
1588 * However, be aware we can still get a socket state event updates,
1589 * and data ready callabacks that reference the psock from sk_user_data
1590 * Also psock worker threads are still in-flight. So smap_release_sock
1591 * will only free the psock after cancel_sync on the worker threads
1592 * and a grace period expire to ensure psock is really safe to remove.
1595 for (i = 0; i < stab->map.max_entries; i++) {
1596 struct smap_psock *psock;
1599 sock = xchg(&stab->sock_map[i], NULL);
1603 write_lock_bh(&sock->sk_callback_lock);
1604 psock = smap_psock_sk(sock);
1605 /* This check handles a racing sock event that can get the
1606 * sk_callback_lock before this case but after xchg happens
1607 * causing the refcnt to hit zero and sock user data (psock)
1608 * to be null and queued for garbage collection.
1610 if (likely(psock)) {
1611 smap_list_remove(psock, &stab->sock_map[i], NULL);
1612 smap_release_sock(psock, sock);
1614 write_unlock_bh(&sock->sk_callback_lock);
1618 sock_map_remove_complete(stab);
1621 static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
1623 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1624 u32 i = key ? *(u32 *)key : U32_MAX;
1625 u32 *next = (u32 *)next_key;
1627 if (i >= stab->map.max_entries) {
1632 if (i == stab->map.max_entries - 1)
1639 struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
1641 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1643 if (key >= map->max_entries)
1646 return READ_ONCE(stab->sock_map[key]);
1649 static int sock_map_delete_elem(struct bpf_map *map, void *key)
1651 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1652 struct smap_psock *psock;
1653 int k = *(u32 *)key;
1656 if (k >= map->max_entries)
1659 sock = xchg(&stab->sock_map[k], NULL);
1663 write_lock_bh(&sock->sk_callback_lock);
1664 psock = smap_psock_sk(sock);
1668 if (psock->bpf_parse)
1669 smap_stop_sock(psock, sock);
1670 smap_list_remove(psock, &stab->sock_map[k], NULL);
1671 smap_release_sock(psock, sock);
1673 write_unlock_bh(&sock->sk_callback_lock);
1677 /* Locking notes: Concurrent updates, deletes, and lookups are allowed and are
1678 * done inside rcu critical sections. This ensures on updates that the psock
1679 * will not be released via smap_release_sock() until concurrent updates/deletes
1680 * complete. All operations operate on sock_map using cmpxchg and xchg
1681 * operations to ensure we do not get stale references. Any reads into the
1682 * map must be done with READ_ONCE() because of this.
1684 * A psock is destroyed via call_rcu and after any worker threads are cancelled
1685 * and syncd so we are certain all references from the update/lookup/delete
1686 * operations as well as references in the data path are no longer in use.
1688 * Psocks may exist in multiple maps, but only a single set of parse/verdict
1689 * programs may be inherited from the maps it belongs to. A reference count
1690 * is kept with the total number of references to the psock from all maps. The
1691 * psock will not be released until this reaches zero. The psock and sock
1692 * user data data use the sk_callback_lock to protect critical data structures
1693 * from concurrent access. This allows us to avoid two updates from modifying
1694 * the user data in sock and the lock is required anyways for modifying
1695 * callbacks, we simply increase its scope slightly.
1698 * - psock must always be read inside RCU critical section
1699 * - sk_user_data must only be modified inside sk_callback_lock and read
1700 * inside RCU critical section.
1701 * - psock->maps list must only be read & modified inside sk_callback_lock
1702 * - sock_map must use READ_ONCE and (cmp)xchg operations
1703 * - BPF verdict/parse programs must use READ_ONCE and xchg operations
1706 static int __sock_map_ctx_update_elem(struct bpf_map *map,
1707 struct bpf_sock_progs *progs,
1709 struct sock **map_link,
1712 struct bpf_prog *verdict, *parse, *tx_msg;
1713 struct smap_psock_map_entry *e = NULL;
1714 struct smap_psock *psock;
1718 /* 1. If sock map has BPF programs those will be inherited by the
1719 * sock being added. If the sock is already attached to BPF programs
1720 * this results in an error.
1722 verdict = READ_ONCE(progs->bpf_verdict);
1723 parse = READ_ONCE(progs->bpf_parse);
1724 tx_msg = READ_ONCE(progs->bpf_tx_msg);
1726 if (parse && verdict) {
1727 /* bpf prog refcnt may be zero if a concurrent attach operation
1728 * removes the program after the above READ_ONCE() but before
1729 * we increment the refcnt. If this is the case abort with an
1732 verdict = bpf_prog_inc_not_zero(verdict);
1733 if (IS_ERR(verdict))
1734 return PTR_ERR(verdict);
1736 parse = bpf_prog_inc_not_zero(parse);
1737 if (IS_ERR(parse)) {
1738 bpf_prog_put(verdict);
1739 return PTR_ERR(parse);
1744 tx_msg = bpf_prog_inc_not_zero(tx_msg);
1745 if (IS_ERR(tx_msg)) {
1746 if (parse && verdict) {
1747 bpf_prog_put(parse);
1748 bpf_prog_put(verdict);
1750 return PTR_ERR(tx_msg);
1754 write_lock_bh(&sock->sk_callback_lock);
1755 psock = smap_psock_sk(sock);
1757 /* 2. Do not allow inheriting programs if psock exists and has
1758 * already inherited programs. This would create confusion on
1759 * which parser/verdict program is running. If no psock exists
1760 * create one. Inside sk_callback_lock to ensure concurrent create
1761 * doesn't update user data.
1764 if (READ_ONCE(psock->bpf_parse) && parse) {
1768 if (READ_ONCE(psock->bpf_tx_msg) && tx_msg) {
1772 if (!refcount_inc_not_zero(&psock->refcnt)) {
1777 psock = smap_init_psock(sock, map->numa_node);
1778 if (IS_ERR(psock)) {
1779 err = PTR_ERR(psock);
1783 set_bit(SMAP_TX_RUNNING, &psock->state);
1788 e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
1795 /* 3. At this point we have a reference to a valid psock that is
1796 * running. Attach any BPF programs needed.
1799 bpf_tcp_msg_add(psock, sock, tx_msg);
1801 err = tcp_set_ulp_id(sock, TCP_ULP_BPF);
1806 if (parse && verdict && !psock->strp_enabled) {
1807 err = smap_init_sock(psock, sock);
1810 smap_init_progs(psock, verdict, parse);
1811 smap_start_sock(psock, sock);
1814 /* 4. Place psock in sockmap for use and stop any programs on
1815 * the old sock assuming its not the same sock we are replacing
1816 * it with. Because we can only have a single set of programs if
1817 * old_sock has a strp we can stop it.
1820 e->entry = map_link;
1821 list_add_tail(&e->list, &psock->maps);
1823 write_unlock_bh(&sock->sk_callback_lock);
1827 smap_release_sock(psock, sock);
1829 if (parse && verdict) {
1830 bpf_prog_put(parse);
1831 bpf_prog_put(verdict);
1834 bpf_prog_put(tx_msg);
1835 write_unlock_bh(&sock->sk_callback_lock);
1840 static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
1841 struct bpf_map *map,
1842 void *key, u64 flags)
1844 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1845 struct bpf_sock_progs *progs = &stab->progs;
1846 struct sock *osock, *sock;
1847 u32 i = *(u32 *)key;
1850 if (unlikely(flags > BPF_EXIST))
1853 if (unlikely(i >= stab->map.max_entries))
1856 sock = READ_ONCE(stab->sock_map[i]);
1857 if (flags == BPF_EXIST && !sock)
1859 else if (flags == BPF_NOEXIST && sock)
1863 err = __sock_map_ctx_update_elem(map, progs, sock, &stab->sock_map[i],
1868 osock = xchg(&stab->sock_map[i], sock);
1870 struct smap_psock *opsock = smap_psock_sk(osock);
1872 write_lock_bh(&osock->sk_callback_lock);
1873 smap_list_remove(opsock, &stab->sock_map[i], NULL);
1874 smap_release_sock(opsock, osock);
1875 write_unlock_bh(&osock->sk_callback_lock);
1881 int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type)
1883 struct bpf_sock_progs *progs;
1884 struct bpf_prog *orig;
1886 if (map->map_type == BPF_MAP_TYPE_SOCKMAP) {
1887 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1889 progs = &stab->progs;
1890 } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH) {
1891 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1893 progs = &htab->progs;
1899 case BPF_SK_MSG_VERDICT:
1900 orig = xchg(&progs->bpf_tx_msg, prog);
1902 case BPF_SK_SKB_STREAM_PARSER:
1903 orig = xchg(&progs->bpf_parse, prog);
1905 case BPF_SK_SKB_STREAM_VERDICT:
1906 orig = xchg(&progs->bpf_verdict, prog);
1918 static void *sock_map_lookup(struct bpf_map *map, void *key)
1923 static int sock_map_update_elem(struct bpf_map *map,
1924 void *key, void *value, u64 flags)
1926 struct bpf_sock_ops_kern skops;
1927 u32 fd = *(u32 *)value;
1928 struct socket *socket;
1931 socket = sockfd_lookup(fd, &err);
1935 skops.sk = socket->sk;
1941 if (skops.sk->sk_type != SOCK_STREAM ||
1942 skops.sk->sk_protocol != IPPROTO_TCP) {
1947 err = sock_map_ctx_update_elem(&skops, map, key, flags);
1952 static void sock_map_release(struct bpf_map *map)
1954 struct bpf_sock_progs *progs;
1955 struct bpf_prog *orig;
1957 if (map->map_type == BPF_MAP_TYPE_SOCKMAP) {
1958 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1960 progs = &stab->progs;
1962 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1964 progs = &htab->progs;
1967 orig = xchg(&progs->bpf_parse, NULL);
1970 orig = xchg(&progs->bpf_verdict, NULL);
1974 orig = xchg(&progs->bpf_tx_msg, NULL);
1979 static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
1981 struct bpf_htab *htab;
1985 if (!capable(CAP_NET_ADMIN))
1986 return ERR_PTR(-EPERM);
1988 /* check sanity of attributes */
1989 if (attr->max_entries == 0 || attr->value_size != 4 ||
1990 attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
1991 return ERR_PTR(-EINVAL);
1993 if (attr->key_size > MAX_BPF_STACK)
1994 /* eBPF programs initialize keys on stack, so they cannot be
1995 * larger than max stack size
1997 return ERR_PTR(-E2BIG);
1999 err = bpf_tcp_ulp_register();
2000 if (err && err != -EEXIST)
2001 return ERR_PTR(err);
2003 htab = kzalloc(sizeof(*htab), GFP_USER);
2005 return ERR_PTR(-ENOMEM);
2007 bpf_map_init_from_attr(&htab->map, attr);
2009 htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
2010 htab->elem_size = sizeof(struct htab_elem) +
2011 round_up(htab->map.key_size, 8);
2013 if (htab->n_buckets == 0 ||
2014 htab->n_buckets > U32_MAX / sizeof(struct bucket))
2017 cost = (u64) htab->n_buckets * sizeof(struct bucket) +
2018 (u64) htab->elem_size * htab->map.max_entries;
2020 if (cost >= U32_MAX - PAGE_SIZE)
2023 htab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
2024 err = bpf_map_precharge_memlock(htab->map.pages);
2029 htab->buckets = bpf_map_area_alloc(
2030 htab->n_buckets * sizeof(struct bucket),
2031 htab->map.numa_node);
2035 for (i = 0; i < htab->n_buckets; i++) {
2036 INIT_HLIST_HEAD(&htab->buckets[i].head);
2037 raw_spin_lock_init(&htab->buckets[i].lock);
2043 return ERR_PTR(err);
2046 static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
2048 return &htab->buckets[hash & (htab->n_buckets - 1)];
2051 static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash)
2053 return &__select_bucket(htab, hash)->head;
2056 static void sock_hash_free(struct bpf_map *map)
2058 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2063 /* At this point no update, lookup or delete operations can happen.
2064 * However, be aware we can still get a socket state event updates,
2065 * and data ready callabacks that reference the psock from sk_user_data
2066 * Also psock worker threads are still in-flight. So smap_release_sock
2067 * will only free the psock after cancel_sync on the worker threads
2068 * and a grace period expire to ensure psock is really safe to remove.
2071 for (i = 0; i < htab->n_buckets; i++) {
2072 struct hlist_head *head = select_bucket(htab, i);
2073 struct hlist_node *n;
2074 struct htab_elem *l;
2076 hlist_for_each_entry_safe(l, n, head, hash_node) {
2077 struct sock *sock = l->sk;
2078 struct smap_psock *psock;
2080 hlist_del_rcu(&l->hash_node);
2081 write_lock_bh(&sock->sk_callback_lock);
2082 psock = smap_psock_sk(sock);
2083 /* This check handles a racing sock event that can get
2084 * the sk_callback_lock before this case but after xchg
2085 * causing the refcnt to hit zero and sock user data
2086 * (psock) to be null and queued for garbage collection.
2088 if (likely(psock)) {
2089 smap_list_remove(psock, NULL, l);
2090 smap_release_sock(psock, sock);
2092 write_unlock_bh(&sock->sk_callback_lock);
2097 bpf_map_area_free(htab->buckets);
2101 static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab,
2102 void *key, u32 key_size, u32 hash,
2104 struct htab_elem *old_elem)
2106 struct htab_elem *l_new;
2108 if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
2110 atomic_dec(&htab->count);
2111 return ERR_PTR(-E2BIG);
2114 l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
2115 htab->map.numa_node);
2117 return ERR_PTR(-ENOMEM);
2119 memcpy(l_new->key, key, key_size);
2125 static struct htab_elem *lookup_elem_raw(struct hlist_head *head,
2126 u32 hash, void *key, u32 key_size)
2128 struct htab_elem *l;
2130 hlist_for_each_entry_rcu(l, head, hash_node) {
2131 if (l->hash == hash && !memcmp(&l->key, key, key_size))
2138 static inline u32 htab_map_hash(const void *key, u32 key_len)
2140 return jhash(key, key_len, 0);
2143 static int sock_hash_get_next_key(struct bpf_map *map,
2144 void *key, void *next_key)
2146 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2147 struct htab_elem *l, *next_l;
2148 struct hlist_head *h;
2152 WARN_ON_ONCE(!rcu_read_lock_held());
2154 key_size = map->key_size;
2156 goto find_first_elem;
2157 hash = htab_map_hash(key, key_size);
2158 h = select_bucket(htab, hash);
2160 l = lookup_elem_raw(h, hash, key, key_size);
2162 goto find_first_elem;
2163 next_l = hlist_entry_safe(
2164 rcu_dereference_raw(hlist_next_rcu(&l->hash_node)),
2165 struct htab_elem, hash_node);
2167 memcpy(next_key, next_l->key, key_size);
2171 /* no more elements in this hash list, go to the next bucket */
2172 i = hash & (htab->n_buckets - 1);
2176 /* iterate over buckets */
2177 for (; i < htab->n_buckets; i++) {
2178 h = select_bucket(htab, i);
2180 /* pick first element in the bucket */
2181 next_l = hlist_entry_safe(
2182 rcu_dereference_raw(hlist_first_rcu(h)),
2183 struct htab_elem, hash_node);
2185 /* if it's not empty, just return it */
2186 memcpy(next_key, next_l->key, key_size);
2191 /* iterated over all buckets and all elements */
2195 static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops,
2196 struct bpf_map *map,
2197 void *key, u64 map_flags)
2199 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2200 struct bpf_sock_progs *progs = &htab->progs;
2201 struct htab_elem *l_new = NULL, *l_old;
2202 struct smap_psock_map_entry *e = NULL;
2203 struct hlist_head *head;
2204 struct smap_psock *psock;
2212 if (sock->sk_type != SOCK_STREAM ||
2213 sock->sk_protocol != IPPROTO_TCP)
2216 if (unlikely(map_flags > BPF_EXIST))
2219 e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
2223 WARN_ON_ONCE(!rcu_read_lock_held());
2224 key_size = map->key_size;
2225 hash = htab_map_hash(key, key_size);
2226 b = __select_bucket(htab, hash);
2229 err = __sock_map_ctx_update_elem(map, progs, sock, NULL, key);
2233 /* bpf_map_update_elem() can be called in_irq() */
2234 raw_spin_lock_bh(&b->lock);
2235 l_old = lookup_elem_raw(head, hash, key, key_size);
2236 if (l_old && map_flags == BPF_NOEXIST) {
2240 if (!l_old && map_flags == BPF_EXIST) {
2245 l_new = alloc_sock_hash_elem(htab, key, key_size, hash, sock, l_old);
2246 if (IS_ERR(l_new)) {
2247 err = PTR_ERR(l_new);
2251 psock = smap_psock_sk(sock);
2252 if (unlikely(!psock)) {
2257 e->hash_link = l_new;
2258 e->htab = container_of(map, struct bpf_htab, map);
2259 list_add_tail(&e->list, &psock->maps);
2261 /* add new element to the head of the list, so that
2262 * concurrent search will find it before old elem
2264 hlist_add_head_rcu(&l_new->hash_node, head);
2266 psock = smap_psock_sk(l_old->sk);
2268 hlist_del_rcu(&l_old->hash_node);
2269 smap_list_remove(psock, NULL, l_old);
2270 smap_release_sock(psock, l_old->sk);
2271 free_htab_elem(htab, l_old);
2273 raw_spin_unlock_bh(&b->lock);
2276 raw_spin_unlock_bh(&b->lock);
2279 psock = smap_psock_sk(sock);
2281 smap_release_sock(psock, sock);
2285 static int sock_hash_update_elem(struct bpf_map *map,
2286 void *key, void *value, u64 flags)
2288 struct bpf_sock_ops_kern skops;
2289 u32 fd = *(u32 *)value;
2290 struct socket *socket;
2293 socket = sockfd_lookup(fd, &err);
2297 skops.sk = socket->sk;
2303 err = sock_hash_ctx_update_elem(&skops, map, key, flags);
2308 static int sock_hash_delete_elem(struct bpf_map *map, void *key)
2310 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2311 struct hlist_head *head;
2313 struct htab_elem *l;
2317 key_size = map->key_size;
2318 hash = htab_map_hash(key, key_size);
2319 b = __select_bucket(htab, hash);
2322 raw_spin_lock_bh(&b->lock);
2323 l = lookup_elem_raw(head, hash, key, key_size);
2325 struct sock *sock = l->sk;
2326 struct smap_psock *psock;
2328 hlist_del_rcu(&l->hash_node);
2329 write_lock_bh(&sock->sk_callback_lock);
2330 psock = smap_psock_sk(sock);
2331 /* This check handles a racing sock event that can get the
2332 * sk_callback_lock before this case but after xchg happens
2333 * causing the refcnt to hit zero and sock user data (psock)
2334 * to be null and queued for garbage collection.
2336 if (likely(psock)) {
2337 smap_list_remove(psock, NULL, l);
2338 smap_release_sock(psock, sock);
2340 write_unlock_bh(&sock->sk_callback_lock);
2341 free_htab_elem(htab, l);
2344 raw_spin_unlock_bh(&b->lock);
2348 struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key)
2350 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2351 struct hlist_head *head;
2352 struct htab_elem *l;
2357 key_size = map->key_size;
2358 hash = htab_map_hash(key, key_size);
2359 b = __select_bucket(htab, hash);
2362 raw_spin_lock_bh(&b->lock);
2363 l = lookup_elem_raw(head, hash, key, key_size);
2364 sk = l ? l->sk : NULL;
2365 raw_spin_unlock_bh(&b->lock);
2369 const struct bpf_map_ops sock_map_ops = {
2370 .map_alloc = sock_map_alloc,
2371 .map_free = sock_map_free,
2372 .map_lookup_elem = sock_map_lookup,
2373 .map_get_next_key = sock_map_get_next_key,
2374 .map_update_elem = sock_map_update_elem,
2375 .map_delete_elem = sock_map_delete_elem,
2376 .map_release_uref = sock_map_release,
2379 const struct bpf_map_ops sock_hash_ops = {
2380 .map_alloc = sock_hash_alloc,
2381 .map_free = sock_hash_free,
2382 .map_lookup_elem = sock_map_lookup,
2383 .map_get_next_key = sock_hash_get_next_key,
2384 .map_update_elem = sock_hash_update_elem,
2385 .map_delete_elem = sock_hash_delete_elem,
2388 BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
2389 struct bpf_map *, map, void *, key, u64, flags)
2391 WARN_ON_ONCE(!rcu_read_lock_held());
2392 return sock_map_ctx_update_elem(bpf_sock, map, key, flags);
2395 const struct bpf_func_proto bpf_sock_map_update_proto = {
2396 .func = bpf_sock_map_update,
2399 .ret_type = RET_INTEGER,
2400 .arg1_type = ARG_PTR_TO_CTX,
2401 .arg2_type = ARG_CONST_MAP_PTR,
2402 .arg3_type = ARG_PTR_TO_MAP_KEY,
2403 .arg4_type = ARG_ANYTHING,
2406 BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, bpf_sock,
2407 struct bpf_map *, map, void *, key, u64, flags)
2409 WARN_ON_ONCE(!rcu_read_lock_held());
2410 return sock_hash_ctx_update_elem(bpf_sock, map, key, flags);
2413 const struct bpf_func_proto bpf_sock_hash_update_proto = {
2414 .func = bpf_sock_hash_update,
2417 .ret_type = RET_INTEGER,
2418 .arg1_type = ARG_PTR_TO_CTX,
2419 .arg2_type = ARG_CONST_MAP_PTR,
2420 .arg3_type = ARG_PTR_TO_MAP_KEY,
2421 .arg4_type = ARG_ANYTHING,