2 * Testsuite for eBPF verifier
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
5 * Copyright (c) 2017 Facebook
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of version 2 of the GNU General Public
9 * License as published by the Free Software Foundation.
13 #include <asm/types.h>
14 #include <linux/types.h>
26 #include <sys/capability.h>
28 #include <linux/unistd.h>
29 #include <linux/filter.h>
30 #include <linux/bpf_perf_event.h>
31 #include <linux/bpf.h>
32 #include <linux/if_ether.h>
37 # include "autoconf.h"
39 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
40 # define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
43 #include "bpf_rlimit.h"
46 #include "../../../include/linux/filter.h"
48 #define MAX_INSNS BPF_MAXINSNS
51 #define POINTER_VALUE 0xcafe4all
52 #define TEST_DATA_LEN 64
54 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
55 #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
57 #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
58 static bool unpriv_disabled = false;
62 struct bpf_insn insns[MAX_INSNS];
63 int fixup_map1[MAX_FIXUPS];
64 int fixup_map2[MAX_FIXUPS];
65 int fixup_map3[MAX_FIXUPS];
66 int fixup_map4[MAX_FIXUPS];
67 int fixup_prog1[MAX_FIXUPS];
68 int fixup_prog2[MAX_FIXUPS];
69 int fixup_map_in_map[MAX_FIXUPS];
70 int fixup_cgroup_storage[MAX_FIXUPS];
72 const char *errstr_unpriv;
73 uint32_t retval, retval_unpriv;
78 } result, result_unpriv;
79 enum bpf_prog_type prog_type;
81 __u8 data[TEST_DATA_LEN];
82 void (*fill_helper)(struct bpf_test *self);
85 /* Note we want this to be 64 bit aligned so that the end of our array is
86 * actually the end of the structure.
88 #define MAX_ENTRIES 11
100 static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
102 /* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */
104 unsigned int len = BPF_MAXINSNS;
105 struct bpf_insn *insn = self->insns;
108 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
110 for (j = 0; j < PUSH_CNT; j++) {
111 insn[i++] = BPF_LD_ABS(BPF_B, 0);
112 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
114 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
115 insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
116 insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
117 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
118 BPF_FUNC_skb_vlan_push),
119 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
123 for (j = 0; j < PUSH_CNT; j++) {
124 insn[i++] = BPF_LD_ABS(BPF_B, 0);
125 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
127 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
128 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
129 BPF_FUNC_skb_vlan_pop),
130 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
136 for (; i < len - 1; i++)
137 insn[i] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 0xbef);
138 insn[len - 1] = BPF_EXIT_INSN();
141 static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
143 struct bpf_insn *insn = self->insns;
144 unsigned int len = BPF_MAXINSNS;
147 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
148 insn[i++] = BPF_LD_ABS(BPF_B, 0);
149 insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
152 insn[i++] = BPF_LD_ABS(BPF_B, 1);
153 insn[i] = BPF_EXIT_INSN();
156 static void bpf_fill_rand_ld_dw(struct bpf_test *self)
158 struct bpf_insn *insn = self->insns;
162 insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0);
163 while (i < self->retval) {
164 uint64_t val = bpf_semi_rand_get();
165 struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) };
170 insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
172 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
173 insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
174 insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
175 insn[i] = BPF_EXIT_INSN();
177 self->retval = (uint32_t)res;
180 static struct bpf_test tests[] = {
184 BPF_MOV64_IMM(BPF_REG_1, 1),
185 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
186 BPF_MOV64_IMM(BPF_REG_2, 3),
187 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
188 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
189 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
190 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
197 "DIV32 by 0, zero check 1",
199 BPF_MOV32_IMM(BPF_REG_0, 42),
200 BPF_MOV32_IMM(BPF_REG_1, 0),
201 BPF_MOV32_IMM(BPF_REG_2, 1),
202 BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
209 "DIV32 by 0, zero check 2",
211 BPF_MOV32_IMM(BPF_REG_0, 42),
212 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
213 BPF_MOV32_IMM(BPF_REG_2, 1),
214 BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
221 "DIV64 by 0, zero check",
223 BPF_MOV32_IMM(BPF_REG_0, 42),
224 BPF_MOV32_IMM(BPF_REG_1, 0),
225 BPF_MOV32_IMM(BPF_REG_2, 1),
226 BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
233 "MOD32 by 0, zero check 1",
235 BPF_MOV32_IMM(BPF_REG_0, 42),
236 BPF_MOV32_IMM(BPF_REG_1, 0),
237 BPF_MOV32_IMM(BPF_REG_2, 1),
238 BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
245 "MOD32 by 0, zero check 2",
247 BPF_MOV32_IMM(BPF_REG_0, 42),
248 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
249 BPF_MOV32_IMM(BPF_REG_2, 1),
250 BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
257 "MOD64 by 0, zero check",
259 BPF_MOV32_IMM(BPF_REG_0, 42),
260 BPF_MOV32_IMM(BPF_REG_1, 0),
261 BPF_MOV32_IMM(BPF_REG_2, 1),
262 BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
269 "DIV32 by 0, zero check ok, cls",
271 BPF_MOV32_IMM(BPF_REG_0, 42),
272 BPF_MOV32_IMM(BPF_REG_1, 2),
273 BPF_MOV32_IMM(BPF_REG_2, 16),
274 BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
275 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
278 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
283 "DIV32 by 0, zero check 1, cls",
285 BPF_MOV32_IMM(BPF_REG_1, 0),
286 BPF_MOV32_IMM(BPF_REG_0, 1),
287 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
290 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
295 "DIV32 by 0, zero check 2, cls",
297 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
298 BPF_MOV32_IMM(BPF_REG_0, 1),
299 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
302 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
307 "DIV64 by 0, zero check, cls",
309 BPF_MOV32_IMM(BPF_REG_1, 0),
310 BPF_MOV32_IMM(BPF_REG_0, 1),
311 BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
314 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
319 "MOD32 by 0, zero check ok, cls",
321 BPF_MOV32_IMM(BPF_REG_0, 42),
322 BPF_MOV32_IMM(BPF_REG_1, 3),
323 BPF_MOV32_IMM(BPF_REG_2, 5),
324 BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
325 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
328 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
333 "MOD32 by 0, zero check 1, cls",
335 BPF_MOV32_IMM(BPF_REG_1, 0),
336 BPF_MOV32_IMM(BPF_REG_0, 1),
337 BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
340 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
345 "MOD32 by 0, zero check 2, cls",
347 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
348 BPF_MOV32_IMM(BPF_REG_0, 1),
349 BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
352 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
357 "MOD64 by 0, zero check 1, cls",
359 BPF_MOV32_IMM(BPF_REG_1, 0),
360 BPF_MOV32_IMM(BPF_REG_0, 2),
361 BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
364 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
369 "MOD64 by 0, zero check 2, cls",
371 BPF_MOV32_IMM(BPF_REG_1, 0),
372 BPF_MOV32_IMM(BPF_REG_0, -1),
373 BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
376 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
380 /* Just make sure that JITs used udiv/umod as otherwise we get
381 * an exception from INT_MIN/-1 overflow similarly as with div
385 "DIV32 overflow, check 1",
387 BPF_MOV32_IMM(BPF_REG_1, -1),
388 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
389 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
392 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
397 "DIV32 overflow, check 2",
399 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
400 BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, -1),
403 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
408 "DIV64 overflow, check 1",
410 BPF_MOV64_IMM(BPF_REG_1, -1),
411 BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
412 BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
415 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
420 "DIV64 overflow, check 2",
422 BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
423 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1),
426 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
431 "MOD32 overflow, check 1",
433 BPF_MOV32_IMM(BPF_REG_1, -1),
434 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
435 BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
438 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
443 "MOD32 overflow, check 2",
445 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
446 BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, -1),
449 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
454 "MOD64 overflow, check 1",
456 BPF_MOV64_IMM(BPF_REG_1, -1),
457 BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
458 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
459 BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
460 BPF_MOV32_IMM(BPF_REG_0, 0),
461 BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
462 BPF_MOV32_IMM(BPF_REG_0, 1),
465 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
470 "MOD64 overflow, check 2",
472 BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
473 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
474 BPF_ALU64_IMM(BPF_MOD, BPF_REG_2, -1),
475 BPF_MOV32_IMM(BPF_REG_0, 0),
476 BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
477 BPF_MOV32_IMM(BPF_REG_0, 1),
480 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
485 "xor32 zero extend check",
487 BPF_MOV32_IMM(BPF_REG_2, -1),
488 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
489 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 0xffff),
490 BPF_ALU32_REG(BPF_XOR, BPF_REG_2, BPF_REG_2),
491 BPF_MOV32_IMM(BPF_REG_0, 2),
492 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
493 BPF_MOV32_IMM(BPF_REG_0, 1),
496 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
504 .errstr = "unknown opcode 00",
512 .errstr = "R0 !read_ok",
521 .errstr = "unreachable",
527 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
528 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
531 .errstr = "unreachable",
537 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
540 .errstr = "jump out of range",
544 "out of range jump2",
546 BPF_JMP_IMM(BPF_JA, 0, 0, -2),
549 .errstr = "jump out of range",
555 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
556 BPF_LD_IMM64(BPF_REG_0, 0),
557 BPF_LD_IMM64(BPF_REG_0, 0),
558 BPF_LD_IMM64(BPF_REG_0, 1),
559 BPF_LD_IMM64(BPF_REG_0, 1),
560 BPF_MOV64_IMM(BPF_REG_0, 2),
563 .errstr = "invalid BPF_LD_IMM insn",
564 .errstr_unpriv = "R1 pointer comparison",
570 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
571 BPF_LD_IMM64(BPF_REG_0, 0),
572 BPF_LD_IMM64(BPF_REG_0, 0),
573 BPF_LD_IMM64(BPF_REG_0, 1),
574 BPF_LD_IMM64(BPF_REG_0, 1),
577 .errstr = "invalid BPF_LD_IMM insn",
578 .errstr_unpriv = "R1 pointer comparison",
584 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
585 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
586 BPF_LD_IMM64(BPF_REG_0, 0),
587 BPF_LD_IMM64(BPF_REG_0, 0),
588 BPF_LD_IMM64(BPF_REG_0, 1),
589 BPF_LD_IMM64(BPF_REG_0, 1),
592 .errstr = "invalid bpf_ld_imm64 insn",
598 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
601 .errstr = "invalid bpf_ld_imm64 insn",
607 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
609 .errstr = "invalid bpf_ld_imm64 insn",
615 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
616 BPF_RAW_INSN(0, 0, 0, 0, 0),
624 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
625 BPF_RAW_INSN(0, 0, 0, 0, 1),
634 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
635 BPF_RAW_INSN(0, 0, 0, 0, 1),
638 .errstr = "uses reserved fields",
644 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
645 BPF_RAW_INSN(0, 0, 0, 1, 1),
648 .errstr = "invalid bpf_ld_imm64 insn",
654 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
655 BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
658 .errstr = "invalid bpf_ld_imm64 insn",
664 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
665 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
668 .errstr = "invalid bpf_ld_imm64 insn",
674 BPF_MOV64_IMM(BPF_REG_1, 0),
675 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
676 BPF_RAW_INSN(0, 0, 0, 0, 1),
679 .errstr = "not pointing to valid bpf_map",
685 BPF_MOV64_IMM(BPF_REG_1, 0),
686 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
687 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
690 .errstr = "invalid bpf_ld_imm64 insn",
696 BPF_MOV64_IMM(BPF_REG_0, 1),
697 BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
701 .errstr = "unknown opcode c4",
706 BPF_MOV64_IMM(BPF_REG_0, 1),
707 BPF_MOV64_IMM(BPF_REG_1, 5),
708 BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
712 .errstr = "unknown opcode cc",
717 BPF_MOV64_IMM(BPF_REG_0, 1),
718 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
726 BPF_MOV64_IMM(BPF_REG_0, 1),
727 BPF_MOV64_IMM(BPF_REG_1, 5),
728 BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
736 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
738 .errstr = "not an exit",
744 BPF_JMP_IMM(BPF_JA, 0, 0, -1),
747 .errstr = "back-edge",
753 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
754 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
755 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
756 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
759 .errstr = "back-edge",
765 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
766 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
767 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
768 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
771 .errstr = "back-edge",
775 "read uninitialized register",
777 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
780 .errstr = "R2 !read_ok",
784 "read invalid register",
786 BPF_MOV64_REG(BPF_REG_0, -1),
789 .errstr = "R15 is invalid",
793 "program doesn't init R0 before exit",
795 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
798 .errstr = "R0 !read_ok",
802 "program doesn't init R0 before exit in all branches",
804 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
805 BPF_MOV64_IMM(BPF_REG_0, 1),
806 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
809 .errstr = "R0 !read_ok",
810 .errstr_unpriv = "R1 pointer comparison",
814 "stack out of bounds",
816 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
819 .errstr = "invalid stack",
823 "invalid call insn1",
825 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
828 .errstr = "unknown opcode 8d",
832 "invalid call insn2",
834 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
837 .errstr = "BPF_CALL uses reserved",
841 "invalid function call",
843 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
846 .errstr = "invalid func unknown#1234567",
850 "uninitialized stack1",
852 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
853 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
854 BPF_LD_MAP_FD(BPF_REG_1, 0),
855 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
856 BPF_FUNC_map_lookup_elem),
860 .errstr = "invalid indirect read from stack",
864 "uninitialized stack2",
866 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
867 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
870 .errstr = "invalid read from stack",
874 "invalid fp arithmetic",
875 /* If this gets ever changed, make sure JITs can deal with it. */
877 BPF_MOV64_IMM(BPF_REG_0, 0),
878 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
879 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
880 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
883 .errstr = "R1 subtraction from stack pointer",
887 "non-invalid fp arithmetic",
889 BPF_MOV64_IMM(BPF_REG_0, 0),
890 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
896 "invalid argument register",
898 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
899 BPF_FUNC_get_cgroup_classid),
900 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
901 BPF_FUNC_get_cgroup_classid),
904 .errstr = "R1 !read_ok",
906 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
909 "non-invalid argument register",
911 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
912 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
913 BPF_FUNC_get_cgroup_classid),
914 BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
915 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
916 BPF_FUNC_get_cgroup_classid),
920 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
923 "check valid spill/fill",
925 /* spill R1(ctx) into stack */
926 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
927 /* fill it back into R2 */
928 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
929 /* should be able to access R0 = *(R2 + 8) */
930 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
931 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
934 .errstr_unpriv = "R0 leaks addr",
936 .result_unpriv = REJECT,
937 .retval = POINTER_VALUE,
940 "check valid spill/fill, skb mark",
942 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
943 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
944 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
945 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
946 offsetof(struct __sk_buff, mark)),
950 .result_unpriv = ACCEPT,
953 "check corrupted spill/fill",
955 /* spill R1(ctx) into stack */
956 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
957 /* mess up with R1 pointer on stack */
958 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
959 /* fill back into R0 should fail */
960 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
963 .errstr_unpriv = "attempt to corrupt spilled",
964 .errstr = "corrupted spill",
968 "invalid src register in STX",
970 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
973 .errstr = "R15 is invalid",
977 "invalid dst register in STX",
979 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
982 .errstr = "R14 is invalid",
986 "invalid dst register in ST",
988 BPF_ST_MEM(BPF_B, 14, -1, -1),
991 .errstr = "R14 is invalid",
995 "invalid src register in LDX",
997 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
1000 .errstr = "R12 is invalid",
1004 "invalid dst register in LDX",
1006 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
1009 .errstr = "R11 is invalid",
1015 BPF_RAW_INSN(0, 0, 0, 0, 0),
1018 .errstr = "unknown opcode 00",
1024 BPF_RAW_INSN(1, 0, 0, 0, 0),
1027 .errstr = "BPF_LDX uses reserved fields",
1033 BPF_RAW_INSN(-1, 0, 0, 0, 0),
1036 .errstr = "unknown opcode ff",
1042 BPF_RAW_INSN(-1, -1, -1, -1, -1),
1045 .errstr = "unknown opcode ff",
1051 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
1054 .errstr = "BPF_ALU uses reserved fields",
1058 "misaligned read from stack",
1060 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1061 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
1064 .errstr = "misaligned stack access",
1068 "invalid map_fd for function call",
1070 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1071 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
1072 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1073 BPF_LD_MAP_FD(BPF_REG_1, 0),
1074 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1075 BPF_FUNC_map_delete_elem),
1078 .errstr = "fd 0 is not pointing to valid bpf_map",
1082 "don't check return value before access",
1084 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1085 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1086 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1087 BPF_LD_MAP_FD(BPF_REG_1, 0),
1088 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1089 BPF_FUNC_map_lookup_elem),
1090 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1093 .fixup_map1 = { 3 },
1094 .errstr = "R0 invalid mem access 'map_value_or_null'",
1098 "access memory with incorrect alignment",
1100 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1101 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1102 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1103 BPF_LD_MAP_FD(BPF_REG_1, 0),
1104 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1105 BPF_FUNC_map_lookup_elem),
1106 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1107 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
1110 .fixup_map1 = { 3 },
1111 .errstr = "misaligned value access",
1113 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1116 "sometimes access memory with incorrect alignment",
1118 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1119 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1120 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1121 BPF_LD_MAP_FD(BPF_REG_1, 0),
1122 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1123 BPF_FUNC_map_lookup_elem),
1124 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1125 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1127 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
1130 .fixup_map1 = { 3 },
1131 .errstr = "R0 invalid mem access",
1132 .errstr_unpriv = "R0 leaks addr",
1134 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1139 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1140 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
1141 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1142 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1143 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
1144 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
1145 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
1146 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
1147 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
1148 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
1149 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
1150 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
1151 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1152 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
1153 BPF_MOV64_IMM(BPF_REG_0, 0),
1156 .errstr_unpriv = "R1 pointer comparison",
1157 .result_unpriv = REJECT,
1163 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1164 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
1165 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1166 BPF_JMP_IMM(BPF_JA, 0, 0, 14),
1167 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
1168 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1169 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1170 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
1171 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1172 BPF_JMP_IMM(BPF_JA, 0, 0, 8),
1173 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
1174 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1175 BPF_JMP_IMM(BPF_JA, 0, 0, 5),
1176 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
1177 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1178 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1179 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1180 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1181 BPF_MOV64_IMM(BPF_REG_0, 0),
1184 .errstr_unpriv = "R1 pointer comparison",
1185 .result_unpriv = REJECT,
1191 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1192 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1193 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1194 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1195 BPF_JMP_IMM(BPF_JA, 0, 0, 19),
1196 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
1197 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1198 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1199 BPF_JMP_IMM(BPF_JA, 0, 0, 15),
1200 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
1201 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1202 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
1203 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1204 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
1205 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1206 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
1207 BPF_JMP_IMM(BPF_JA, 0, 0, 7),
1208 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
1209 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1210 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
1211 BPF_JMP_IMM(BPF_JA, 0, 0, 3),
1212 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
1213 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1214 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
1215 BPF_LD_MAP_FD(BPF_REG_1, 0),
1216 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1217 BPF_FUNC_map_delete_elem),
1220 .fixup_map1 = { 24 },
1221 .errstr_unpriv = "R1 pointer comparison",
1222 .result_unpriv = REJECT,
1229 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1230 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1231 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1232 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1233 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1234 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1235 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1236 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1237 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1238 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1239 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1240 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1241 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1242 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1243 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1244 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1245 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1246 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1247 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1248 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1249 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1250 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1251 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1252 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1253 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1254 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1255 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1256 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1257 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1258 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1259 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1260 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1261 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1262 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1263 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1264 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1265 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1266 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1267 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1268 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1269 BPF_MOV64_IMM(BPF_REG_0, 0),
1272 .errstr_unpriv = "R1 pointer comparison",
1273 .result_unpriv = REJECT,
1279 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1280 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1281 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1282 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1283 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1284 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1285 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1286 BPF_MOV64_IMM(BPF_REG_0, 0),
1287 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1288 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1289 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1290 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1291 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1292 BPF_MOV64_IMM(BPF_REG_0, 0),
1293 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1294 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1295 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1296 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1297 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1298 BPF_MOV64_IMM(BPF_REG_0, 0),
1299 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1300 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1301 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1302 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1303 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1304 BPF_MOV64_IMM(BPF_REG_0, 0),
1305 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1306 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1307 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1308 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1309 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1310 BPF_MOV64_IMM(BPF_REG_0, 0),
1313 .errstr_unpriv = "R1 pointer comparison",
1314 .result_unpriv = REJECT,
1318 "access skb fields ok",
1320 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1321 offsetof(struct __sk_buff, len)),
1322 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1323 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1324 offsetof(struct __sk_buff, mark)),
1325 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1326 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1327 offsetof(struct __sk_buff, pkt_type)),
1328 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1329 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1330 offsetof(struct __sk_buff, queue_mapping)),
1331 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1332 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1333 offsetof(struct __sk_buff, protocol)),
1334 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1335 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1336 offsetof(struct __sk_buff, vlan_present)),
1337 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1338 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1339 offsetof(struct __sk_buff, vlan_tci)),
1340 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1341 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1342 offsetof(struct __sk_buff, napi_id)),
1343 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1349 "access skb fields bad1",
1351 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
1354 .errstr = "invalid bpf_context access",
1358 "access skb fields bad2",
1360 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
1361 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1362 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1363 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1364 BPF_LD_MAP_FD(BPF_REG_1, 0),
1365 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1366 BPF_FUNC_map_lookup_elem),
1367 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1369 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1370 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1371 offsetof(struct __sk_buff, pkt_type)),
1374 .fixup_map1 = { 4 },
1375 .errstr = "different pointers",
1376 .errstr_unpriv = "R1 pointer comparison",
1380 "access skb fields bad3",
1382 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1383 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1384 offsetof(struct __sk_buff, pkt_type)),
1386 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1387 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1388 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1389 BPF_LD_MAP_FD(BPF_REG_1, 0),
1390 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1391 BPF_FUNC_map_lookup_elem),
1392 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1394 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1395 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
1397 .fixup_map1 = { 6 },
1398 .errstr = "different pointers",
1399 .errstr_unpriv = "R1 pointer comparison",
1403 "access skb fields bad4",
1405 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
1406 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1407 offsetof(struct __sk_buff, len)),
1408 BPF_MOV64_IMM(BPF_REG_0, 0),
1410 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1411 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1412 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1413 BPF_LD_MAP_FD(BPF_REG_1, 0),
1414 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1415 BPF_FUNC_map_lookup_elem),
1416 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1418 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1419 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
1421 .fixup_map1 = { 7 },
1422 .errstr = "different pointers",
1423 .errstr_unpriv = "R1 pointer comparison",
1427 "invalid access __sk_buff family",
1429 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1430 offsetof(struct __sk_buff, family)),
1433 .errstr = "invalid bpf_context access",
1437 "invalid access __sk_buff remote_ip4",
1439 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1440 offsetof(struct __sk_buff, remote_ip4)),
1443 .errstr = "invalid bpf_context access",
1447 "invalid access __sk_buff local_ip4",
1449 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1450 offsetof(struct __sk_buff, local_ip4)),
1453 .errstr = "invalid bpf_context access",
1457 "invalid access __sk_buff remote_ip6",
1459 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1460 offsetof(struct __sk_buff, remote_ip6)),
1463 .errstr = "invalid bpf_context access",
1467 "invalid access __sk_buff local_ip6",
1469 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1470 offsetof(struct __sk_buff, local_ip6)),
1473 .errstr = "invalid bpf_context access",
1477 "invalid access __sk_buff remote_port",
1479 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1480 offsetof(struct __sk_buff, remote_port)),
1483 .errstr = "invalid bpf_context access",
1487 "invalid access __sk_buff remote_port",
1489 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1490 offsetof(struct __sk_buff, local_port)),
1493 .errstr = "invalid bpf_context access",
1497 "valid access __sk_buff family",
1499 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1500 offsetof(struct __sk_buff, family)),
1504 .prog_type = BPF_PROG_TYPE_SK_SKB,
1507 "valid access __sk_buff remote_ip4",
1509 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1510 offsetof(struct __sk_buff, remote_ip4)),
1514 .prog_type = BPF_PROG_TYPE_SK_SKB,
1517 "valid access __sk_buff local_ip4",
1519 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1520 offsetof(struct __sk_buff, local_ip4)),
1524 .prog_type = BPF_PROG_TYPE_SK_SKB,
1527 "valid access __sk_buff remote_ip6",
1529 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1530 offsetof(struct __sk_buff, remote_ip6[0])),
1531 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1532 offsetof(struct __sk_buff, remote_ip6[1])),
1533 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1534 offsetof(struct __sk_buff, remote_ip6[2])),
1535 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1536 offsetof(struct __sk_buff, remote_ip6[3])),
1540 .prog_type = BPF_PROG_TYPE_SK_SKB,
1543 "valid access __sk_buff local_ip6",
1545 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1546 offsetof(struct __sk_buff, local_ip6[0])),
1547 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1548 offsetof(struct __sk_buff, local_ip6[1])),
1549 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1550 offsetof(struct __sk_buff, local_ip6[2])),
1551 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1552 offsetof(struct __sk_buff, local_ip6[3])),
1556 .prog_type = BPF_PROG_TYPE_SK_SKB,
1559 "valid access __sk_buff remote_port",
1561 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1562 offsetof(struct __sk_buff, remote_port)),
1566 .prog_type = BPF_PROG_TYPE_SK_SKB,
1569 "valid access __sk_buff remote_port",
1571 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1572 offsetof(struct __sk_buff, local_port)),
1576 .prog_type = BPF_PROG_TYPE_SK_SKB,
1579 "invalid access of tc_classid for SK_SKB",
1581 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1582 offsetof(struct __sk_buff, tc_classid)),
1586 .prog_type = BPF_PROG_TYPE_SK_SKB,
1587 .errstr = "invalid bpf_context access",
1590 "invalid access of skb->mark for SK_SKB",
1592 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1593 offsetof(struct __sk_buff, mark)),
1597 .prog_type = BPF_PROG_TYPE_SK_SKB,
1598 .errstr = "invalid bpf_context access",
1601 "check skb->mark is not writeable by SK_SKB",
1603 BPF_MOV64_IMM(BPF_REG_0, 0),
1604 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1605 offsetof(struct __sk_buff, mark)),
1609 .prog_type = BPF_PROG_TYPE_SK_SKB,
1610 .errstr = "invalid bpf_context access",
1613 "check skb->tc_index is writeable by SK_SKB",
1615 BPF_MOV64_IMM(BPF_REG_0, 0),
1616 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1617 offsetof(struct __sk_buff, tc_index)),
1621 .prog_type = BPF_PROG_TYPE_SK_SKB,
1624 "check skb->priority is writeable by SK_SKB",
1626 BPF_MOV64_IMM(BPF_REG_0, 0),
1627 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1628 offsetof(struct __sk_buff, priority)),
1632 .prog_type = BPF_PROG_TYPE_SK_SKB,
1635 "direct packet read for SK_SKB",
1637 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1638 offsetof(struct __sk_buff, data)),
1639 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1640 offsetof(struct __sk_buff, data_end)),
1641 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1642 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1643 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1644 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1645 BPF_MOV64_IMM(BPF_REG_0, 0),
1649 .prog_type = BPF_PROG_TYPE_SK_SKB,
1652 "direct packet write for SK_SKB",
1654 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1655 offsetof(struct __sk_buff, data)),
1656 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1657 offsetof(struct __sk_buff, data_end)),
1658 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1659 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1660 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1661 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1662 BPF_MOV64_IMM(BPF_REG_0, 0),
1666 .prog_type = BPF_PROG_TYPE_SK_SKB,
1669 "overlapping checks for direct packet access SK_SKB",
1671 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1672 offsetof(struct __sk_buff, data)),
1673 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1674 offsetof(struct __sk_buff, data_end)),
1675 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1676 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1677 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1678 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1679 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1680 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1681 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1682 BPF_MOV64_IMM(BPF_REG_0, 0),
1686 .prog_type = BPF_PROG_TYPE_SK_SKB,
1689 "valid access family in SK_MSG",
1691 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1692 offsetof(struct sk_msg_md, family)),
1696 .prog_type = BPF_PROG_TYPE_SK_MSG,
1699 "valid access remote_ip4 in SK_MSG",
1701 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1702 offsetof(struct sk_msg_md, remote_ip4)),
1706 .prog_type = BPF_PROG_TYPE_SK_MSG,
1709 "valid access local_ip4 in SK_MSG",
1711 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1712 offsetof(struct sk_msg_md, local_ip4)),
1716 .prog_type = BPF_PROG_TYPE_SK_MSG,
1719 "valid access remote_port in SK_MSG",
1721 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1722 offsetof(struct sk_msg_md, remote_port)),
1726 .prog_type = BPF_PROG_TYPE_SK_MSG,
1729 "valid access local_port in SK_MSG",
1731 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1732 offsetof(struct sk_msg_md, local_port)),
1736 .prog_type = BPF_PROG_TYPE_SK_MSG,
1739 "valid access remote_ip6 in SK_MSG",
1741 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1742 offsetof(struct sk_msg_md, remote_ip6[0])),
1743 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1744 offsetof(struct sk_msg_md, remote_ip6[1])),
1745 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1746 offsetof(struct sk_msg_md, remote_ip6[2])),
1747 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1748 offsetof(struct sk_msg_md, remote_ip6[3])),
1752 .prog_type = BPF_PROG_TYPE_SK_SKB,
1755 "valid access local_ip6 in SK_MSG",
1757 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1758 offsetof(struct sk_msg_md, local_ip6[0])),
1759 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1760 offsetof(struct sk_msg_md, local_ip6[1])),
1761 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1762 offsetof(struct sk_msg_md, local_ip6[2])),
1763 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1764 offsetof(struct sk_msg_md, local_ip6[3])),
1768 .prog_type = BPF_PROG_TYPE_SK_SKB,
1771 "invalid 64B read of family in SK_MSG",
1773 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1774 offsetof(struct sk_msg_md, family)),
1777 .errstr = "invalid bpf_context access",
1779 .prog_type = BPF_PROG_TYPE_SK_MSG,
1782 "invalid read past end of SK_MSG",
1784 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1785 offsetof(struct sk_msg_md, local_port) + 4),
1788 .errstr = "R0 !read_ok",
1790 .prog_type = BPF_PROG_TYPE_SK_MSG,
1793 "invalid read offset in SK_MSG",
1795 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1796 offsetof(struct sk_msg_md, family) + 1),
1799 .errstr = "invalid bpf_context access",
1801 .prog_type = BPF_PROG_TYPE_SK_MSG,
1804 "direct packet read for SK_MSG",
1806 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1807 offsetof(struct sk_msg_md, data)),
1808 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1809 offsetof(struct sk_msg_md, data_end)),
1810 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1811 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1812 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1813 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1814 BPF_MOV64_IMM(BPF_REG_0, 0),
1818 .prog_type = BPF_PROG_TYPE_SK_MSG,
1821 "direct packet write for SK_MSG",
1823 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1824 offsetof(struct sk_msg_md, data)),
1825 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1826 offsetof(struct sk_msg_md, data_end)),
1827 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1828 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1829 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1830 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1831 BPF_MOV64_IMM(BPF_REG_0, 0),
1835 .prog_type = BPF_PROG_TYPE_SK_MSG,
1838 "overlapping checks for direct packet access SK_MSG",
1840 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1841 offsetof(struct sk_msg_md, data)),
1842 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1843 offsetof(struct sk_msg_md, data_end)),
1844 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1845 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1846 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1847 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1848 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1849 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1850 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1851 BPF_MOV64_IMM(BPF_REG_0, 0),
1855 .prog_type = BPF_PROG_TYPE_SK_MSG,
1858 "check skb->mark is not writeable by sockets",
1860 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1861 offsetof(struct __sk_buff, mark)),
1864 .errstr = "invalid bpf_context access",
1865 .errstr_unpriv = "R1 leaks addr",
1869 "check skb->tc_index is not writeable by sockets",
1871 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1872 offsetof(struct __sk_buff, tc_index)),
1875 .errstr = "invalid bpf_context access",
1876 .errstr_unpriv = "R1 leaks addr",
1880 "check cb access: byte",
1882 BPF_MOV64_IMM(BPF_REG_0, 0),
1883 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1884 offsetof(struct __sk_buff, cb[0])),
1885 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1886 offsetof(struct __sk_buff, cb[0]) + 1),
1887 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1888 offsetof(struct __sk_buff, cb[0]) + 2),
1889 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1890 offsetof(struct __sk_buff, cb[0]) + 3),
1891 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1892 offsetof(struct __sk_buff, cb[1])),
1893 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1894 offsetof(struct __sk_buff, cb[1]) + 1),
1895 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1896 offsetof(struct __sk_buff, cb[1]) + 2),
1897 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1898 offsetof(struct __sk_buff, cb[1]) + 3),
1899 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1900 offsetof(struct __sk_buff, cb[2])),
1901 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1902 offsetof(struct __sk_buff, cb[2]) + 1),
1903 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1904 offsetof(struct __sk_buff, cb[2]) + 2),
1905 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1906 offsetof(struct __sk_buff, cb[2]) + 3),
1907 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1908 offsetof(struct __sk_buff, cb[3])),
1909 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1910 offsetof(struct __sk_buff, cb[3]) + 1),
1911 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1912 offsetof(struct __sk_buff, cb[3]) + 2),
1913 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1914 offsetof(struct __sk_buff, cb[3]) + 3),
1915 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1916 offsetof(struct __sk_buff, cb[4])),
1917 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1918 offsetof(struct __sk_buff, cb[4]) + 1),
1919 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1920 offsetof(struct __sk_buff, cb[4]) + 2),
1921 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1922 offsetof(struct __sk_buff, cb[4]) + 3),
1923 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1924 offsetof(struct __sk_buff, cb[0])),
1925 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1926 offsetof(struct __sk_buff, cb[0]) + 1),
1927 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1928 offsetof(struct __sk_buff, cb[0]) + 2),
1929 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1930 offsetof(struct __sk_buff, cb[0]) + 3),
1931 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1932 offsetof(struct __sk_buff, cb[1])),
1933 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1934 offsetof(struct __sk_buff, cb[1]) + 1),
1935 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1936 offsetof(struct __sk_buff, cb[1]) + 2),
1937 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1938 offsetof(struct __sk_buff, cb[1]) + 3),
1939 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1940 offsetof(struct __sk_buff, cb[2])),
1941 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1942 offsetof(struct __sk_buff, cb[2]) + 1),
1943 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1944 offsetof(struct __sk_buff, cb[2]) + 2),
1945 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1946 offsetof(struct __sk_buff, cb[2]) + 3),
1947 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1948 offsetof(struct __sk_buff, cb[3])),
1949 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1950 offsetof(struct __sk_buff, cb[3]) + 1),
1951 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1952 offsetof(struct __sk_buff, cb[3]) + 2),
1953 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1954 offsetof(struct __sk_buff, cb[3]) + 3),
1955 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1956 offsetof(struct __sk_buff, cb[4])),
1957 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1958 offsetof(struct __sk_buff, cb[4]) + 1),
1959 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1960 offsetof(struct __sk_buff, cb[4]) + 2),
1961 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1962 offsetof(struct __sk_buff, cb[4]) + 3),
1968 "__sk_buff->hash, offset 0, byte store not permitted",
1970 BPF_MOV64_IMM(BPF_REG_0, 0),
1971 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1972 offsetof(struct __sk_buff, hash)),
1975 .errstr = "invalid bpf_context access",
1979 "__sk_buff->tc_index, offset 3, byte store not permitted",
1981 BPF_MOV64_IMM(BPF_REG_0, 0),
1982 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1983 offsetof(struct __sk_buff, tc_index) + 3),
1986 .errstr = "invalid bpf_context access",
1990 "check skb->hash byte load permitted",
1992 BPF_MOV64_IMM(BPF_REG_0, 0),
1993 #if __BYTE_ORDER == __LITTLE_ENDIAN
1994 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1995 offsetof(struct __sk_buff, hash)),
1997 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1998 offsetof(struct __sk_buff, hash) + 3),
2005 "check skb->hash byte load permitted 1",
2007 BPF_MOV64_IMM(BPF_REG_0, 0),
2008 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2009 offsetof(struct __sk_buff, hash) + 1),
2015 "check skb->hash byte load permitted 2",
2017 BPF_MOV64_IMM(BPF_REG_0, 0),
2018 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2019 offsetof(struct __sk_buff, hash) + 2),
2025 "check skb->hash byte load permitted 3",
2027 BPF_MOV64_IMM(BPF_REG_0, 0),
2028 #if __BYTE_ORDER == __LITTLE_ENDIAN
2029 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2030 offsetof(struct __sk_buff, hash) + 3),
2032 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2033 offsetof(struct __sk_buff, hash)),
2040 "check cb access: byte, wrong type",
2042 BPF_MOV64_IMM(BPF_REG_0, 0),
2043 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2044 offsetof(struct __sk_buff, cb[0])),
2047 .errstr = "invalid bpf_context access",
2049 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2052 "check cb access: half",
2054 BPF_MOV64_IMM(BPF_REG_0, 0),
2055 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2056 offsetof(struct __sk_buff, cb[0])),
2057 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2058 offsetof(struct __sk_buff, cb[0]) + 2),
2059 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2060 offsetof(struct __sk_buff, cb[1])),
2061 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2062 offsetof(struct __sk_buff, cb[1]) + 2),
2063 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2064 offsetof(struct __sk_buff, cb[2])),
2065 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2066 offsetof(struct __sk_buff, cb[2]) + 2),
2067 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2068 offsetof(struct __sk_buff, cb[3])),
2069 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2070 offsetof(struct __sk_buff, cb[3]) + 2),
2071 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2072 offsetof(struct __sk_buff, cb[4])),
2073 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2074 offsetof(struct __sk_buff, cb[4]) + 2),
2075 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2076 offsetof(struct __sk_buff, cb[0])),
2077 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2078 offsetof(struct __sk_buff, cb[0]) + 2),
2079 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2080 offsetof(struct __sk_buff, cb[1])),
2081 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2082 offsetof(struct __sk_buff, cb[1]) + 2),
2083 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2084 offsetof(struct __sk_buff, cb[2])),
2085 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2086 offsetof(struct __sk_buff, cb[2]) + 2),
2087 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2088 offsetof(struct __sk_buff, cb[3])),
2089 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2090 offsetof(struct __sk_buff, cb[3]) + 2),
2091 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2092 offsetof(struct __sk_buff, cb[4])),
2093 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2094 offsetof(struct __sk_buff, cb[4]) + 2),
2100 "check cb access: half, unaligned",
2102 BPF_MOV64_IMM(BPF_REG_0, 0),
2103 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2104 offsetof(struct __sk_buff, cb[0]) + 1),
2107 .errstr = "misaligned context access",
2109 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2112 "check __sk_buff->hash, offset 0, half store not permitted",
2114 BPF_MOV64_IMM(BPF_REG_0, 0),
2115 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2116 offsetof(struct __sk_buff, hash)),
2119 .errstr = "invalid bpf_context access",
2123 "check __sk_buff->tc_index, offset 2, half store not permitted",
2125 BPF_MOV64_IMM(BPF_REG_0, 0),
2126 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2127 offsetof(struct __sk_buff, tc_index) + 2),
2130 .errstr = "invalid bpf_context access",
2134 "check skb->hash half load permitted",
2136 BPF_MOV64_IMM(BPF_REG_0, 0),
2137 #if __BYTE_ORDER == __LITTLE_ENDIAN
2138 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2139 offsetof(struct __sk_buff, hash)),
2141 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2142 offsetof(struct __sk_buff, hash) + 2),
2149 "check skb->hash half load permitted 2",
2151 BPF_MOV64_IMM(BPF_REG_0, 0),
2152 #if __BYTE_ORDER == __LITTLE_ENDIAN
2153 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2154 offsetof(struct __sk_buff, hash) + 2),
2156 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2157 offsetof(struct __sk_buff, hash)),
2164 "check skb->hash half load not permitted, unaligned 1",
2166 BPF_MOV64_IMM(BPF_REG_0, 0),
2167 #if __BYTE_ORDER == __LITTLE_ENDIAN
2168 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2169 offsetof(struct __sk_buff, hash) + 1),
2171 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2172 offsetof(struct __sk_buff, hash) + 3),
2176 .errstr = "invalid bpf_context access",
2180 "check skb->hash half load not permitted, unaligned 3",
2182 BPF_MOV64_IMM(BPF_REG_0, 0),
2183 #if __BYTE_ORDER == __LITTLE_ENDIAN
2184 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2185 offsetof(struct __sk_buff, hash) + 3),
2187 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2188 offsetof(struct __sk_buff, hash) + 1),
2192 .errstr = "invalid bpf_context access",
2196 "check cb access: half, wrong type",
2198 BPF_MOV64_IMM(BPF_REG_0, 0),
2199 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2200 offsetof(struct __sk_buff, cb[0])),
2203 .errstr = "invalid bpf_context access",
2205 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2208 "check cb access: word",
2210 BPF_MOV64_IMM(BPF_REG_0, 0),
2211 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2212 offsetof(struct __sk_buff, cb[0])),
2213 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2214 offsetof(struct __sk_buff, cb[1])),
2215 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2216 offsetof(struct __sk_buff, cb[2])),
2217 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2218 offsetof(struct __sk_buff, cb[3])),
2219 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2220 offsetof(struct __sk_buff, cb[4])),
2221 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2222 offsetof(struct __sk_buff, cb[0])),
2223 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2224 offsetof(struct __sk_buff, cb[1])),
2225 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2226 offsetof(struct __sk_buff, cb[2])),
2227 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2228 offsetof(struct __sk_buff, cb[3])),
2229 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2230 offsetof(struct __sk_buff, cb[4])),
2236 "check cb access: word, unaligned 1",
2238 BPF_MOV64_IMM(BPF_REG_0, 0),
2239 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2240 offsetof(struct __sk_buff, cb[0]) + 2),
2243 .errstr = "misaligned context access",
2245 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2248 "check cb access: word, unaligned 2",
2250 BPF_MOV64_IMM(BPF_REG_0, 0),
2251 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2252 offsetof(struct __sk_buff, cb[4]) + 1),
2255 .errstr = "misaligned context access",
2257 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2260 "check cb access: word, unaligned 3",
2262 BPF_MOV64_IMM(BPF_REG_0, 0),
2263 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2264 offsetof(struct __sk_buff, cb[4]) + 2),
2267 .errstr = "misaligned context access",
2269 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2272 "check cb access: word, unaligned 4",
2274 BPF_MOV64_IMM(BPF_REG_0, 0),
2275 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2276 offsetof(struct __sk_buff, cb[4]) + 3),
2279 .errstr = "misaligned context access",
2281 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2284 "check cb access: double",
2286 BPF_MOV64_IMM(BPF_REG_0, 0),
2287 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2288 offsetof(struct __sk_buff, cb[0])),
2289 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2290 offsetof(struct __sk_buff, cb[2])),
2291 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2292 offsetof(struct __sk_buff, cb[0])),
2293 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2294 offsetof(struct __sk_buff, cb[2])),
2300 "check cb access: double, unaligned 1",
2302 BPF_MOV64_IMM(BPF_REG_0, 0),
2303 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2304 offsetof(struct __sk_buff, cb[1])),
2307 .errstr = "misaligned context access",
2309 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2312 "check cb access: double, unaligned 2",
2314 BPF_MOV64_IMM(BPF_REG_0, 0),
2315 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2316 offsetof(struct __sk_buff, cb[3])),
2319 .errstr = "misaligned context access",
2321 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2324 "check cb access: double, oob 1",
2326 BPF_MOV64_IMM(BPF_REG_0, 0),
2327 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2328 offsetof(struct __sk_buff, cb[4])),
2331 .errstr = "invalid bpf_context access",
2335 "check cb access: double, oob 2",
2337 BPF_MOV64_IMM(BPF_REG_0, 0),
2338 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2339 offsetof(struct __sk_buff, cb[4])),
2342 .errstr = "invalid bpf_context access",
2346 "check __sk_buff->ifindex dw store not permitted",
2348 BPF_MOV64_IMM(BPF_REG_0, 0),
2349 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2350 offsetof(struct __sk_buff, ifindex)),
2353 .errstr = "invalid bpf_context access",
2357 "check __sk_buff->ifindex dw load not permitted",
2359 BPF_MOV64_IMM(BPF_REG_0, 0),
2360 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2361 offsetof(struct __sk_buff, ifindex)),
2364 .errstr = "invalid bpf_context access",
2368 "check cb access: double, wrong type",
2370 BPF_MOV64_IMM(BPF_REG_0, 0),
2371 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2372 offsetof(struct __sk_buff, cb[0])),
2375 .errstr = "invalid bpf_context access",
2377 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2380 "check out of range skb->cb access",
2382 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2383 offsetof(struct __sk_buff, cb[0]) + 256),
2386 .errstr = "invalid bpf_context access",
2387 .errstr_unpriv = "",
2389 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
2392 "write skb fields from socket prog",
2394 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2395 offsetof(struct __sk_buff, cb[4])),
2396 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2397 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2398 offsetof(struct __sk_buff, mark)),
2399 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2400 offsetof(struct __sk_buff, tc_index)),
2401 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2402 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2403 offsetof(struct __sk_buff, cb[0])),
2404 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2405 offsetof(struct __sk_buff, cb[2])),
2409 .errstr_unpriv = "R1 leaks addr",
2410 .result_unpriv = REJECT,
2413 "write skb fields from tc_cls_act prog",
2415 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2416 offsetof(struct __sk_buff, cb[0])),
2417 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2418 offsetof(struct __sk_buff, mark)),
2419 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2420 offsetof(struct __sk_buff, tc_index)),
2421 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2422 offsetof(struct __sk_buff, tc_index)),
2423 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2424 offsetof(struct __sk_buff, cb[3])),
2427 .errstr_unpriv = "",
2428 .result_unpriv = REJECT,
2430 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2433 "PTR_TO_STACK store/load",
2435 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2436 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2437 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2438 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2442 .retval = 0xfaceb00c,
2445 "PTR_TO_STACK store/load - bad alignment on off",
2447 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2448 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2449 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2450 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2454 .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
2457 "PTR_TO_STACK store/load - bad alignment on reg",
2459 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2460 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2461 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2462 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2466 .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
2469 "PTR_TO_STACK store/load - out of bounds low",
2471 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2472 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
2473 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2474 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2478 .errstr = "invalid stack off=-79992 size=8",
2479 .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
2482 "PTR_TO_STACK store/load - out of bounds high",
2484 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2485 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2486 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2487 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2491 .errstr = "invalid stack off=0 size=8",
2494 "unpriv: return pointer",
2496 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
2500 .result_unpriv = REJECT,
2501 .errstr_unpriv = "R0 leaks addr",
2502 .retval = POINTER_VALUE,
2505 "unpriv: add const to pointer",
2507 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2508 BPF_MOV64_IMM(BPF_REG_0, 0),
2514 "unpriv: add pointer to pointer",
2516 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2517 BPF_MOV64_IMM(BPF_REG_0, 0),
2521 .errstr = "R1 pointer += pointer",
2524 "unpriv: neg pointer",
2526 BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
2527 BPF_MOV64_IMM(BPF_REG_0, 0),
2531 .result_unpriv = REJECT,
2532 .errstr_unpriv = "R1 pointer arithmetic",
2535 "unpriv: cmp pointer with const",
2537 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2538 BPF_MOV64_IMM(BPF_REG_0, 0),
2542 .result_unpriv = REJECT,
2543 .errstr_unpriv = "R1 pointer comparison",
2546 "unpriv: cmp pointer with pointer",
2548 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
2549 BPF_MOV64_IMM(BPF_REG_0, 0),
2553 .result_unpriv = REJECT,
2554 .errstr_unpriv = "R10 pointer comparison",
2557 "unpriv: check that printk is disallowed",
2559 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2560 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2561 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2562 BPF_MOV64_IMM(BPF_REG_2, 8),
2563 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2564 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2565 BPF_FUNC_trace_printk),
2566 BPF_MOV64_IMM(BPF_REG_0, 0),
2569 .errstr_unpriv = "unknown func bpf_trace_printk#6",
2570 .result_unpriv = REJECT,
2574 "unpriv: pass pointer to helper function",
2576 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2577 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2578 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2579 BPF_LD_MAP_FD(BPF_REG_1, 0),
2580 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
2581 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2582 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2583 BPF_FUNC_map_update_elem),
2584 BPF_MOV64_IMM(BPF_REG_0, 0),
2587 .fixup_map1 = { 3 },
2588 .errstr_unpriv = "R4 leaks addr",
2589 .result_unpriv = REJECT,
2593 "unpriv: indirectly pass pointer on stack to helper function",
2595 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2596 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2597 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2598 BPF_LD_MAP_FD(BPF_REG_1, 0),
2599 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2600 BPF_FUNC_map_lookup_elem),
2601 BPF_MOV64_IMM(BPF_REG_0, 0),
2604 .fixup_map1 = { 3 },
2605 .errstr = "invalid indirect read from stack off -8+0 size 8",
2609 "unpriv: mangle pointer on stack 1",
2611 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2612 BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
2613 BPF_MOV64_IMM(BPF_REG_0, 0),
2616 .errstr_unpriv = "attempt to corrupt spilled",
2617 .result_unpriv = REJECT,
2621 "unpriv: mangle pointer on stack 2",
2623 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2624 BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
2625 BPF_MOV64_IMM(BPF_REG_0, 0),
2628 .errstr_unpriv = "attempt to corrupt spilled",
2629 .result_unpriv = REJECT,
2633 "unpriv: read pointer from stack in small chunks",
2635 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2636 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
2637 BPF_MOV64_IMM(BPF_REG_0, 0),
2640 .errstr = "invalid size",
2644 "unpriv: write pointer into ctx",
2646 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
2647 BPF_MOV64_IMM(BPF_REG_0, 0),
2650 .errstr_unpriv = "R1 leaks addr",
2651 .result_unpriv = REJECT,
2652 .errstr = "invalid bpf_context access",
2656 "unpriv: spill/fill of ctx",
2658 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2659 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2660 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2661 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2662 BPF_MOV64_IMM(BPF_REG_0, 0),
2668 "unpriv: spill/fill of ctx 2",
2670 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2671 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2672 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2673 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2674 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2675 BPF_FUNC_get_hash_recalc),
2676 BPF_MOV64_IMM(BPF_REG_0, 0),
2680 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2683 "unpriv: spill/fill of ctx 3",
2685 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2686 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2687 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2688 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2689 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2690 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2691 BPF_FUNC_get_hash_recalc),
2695 .errstr = "R1 type=fp expected=ctx",
2696 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2699 "unpriv: spill/fill of ctx 4",
2701 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2702 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2703 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2704 BPF_MOV64_IMM(BPF_REG_0, 1),
2705 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
2707 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2708 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2709 BPF_FUNC_get_hash_recalc),
2713 .errstr = "R1 type=inv expected=ctx",
2714 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2717 "unpriv: spill/fill of different pointers stx",
2719 BPF_MOV64_IMM(BPF_REG_3, 42),
2720 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2721 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2722 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2723 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2724 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
2725 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2726 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2727 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2728 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2729 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2730 offsetof(struct __sk_buff, mark)),
2731 BPF_MOV64_IMM(BPF_REG_0, 0),
2735 .errstr = "same insn cannot be used with different pointers",
2736 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2739 "unpriv: spill/fill of different pointers ldx",
2741 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2742 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2743 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2744 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2745 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
2746 -(__s32)offsetof(struct bpf_perf_event_data,
2747 sample_period) - 8),
2748 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2749 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2750 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2751 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2752 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
2753 offsetof(struct bpf_perf_event_data,
2755 BPF_MOV64_IMM(BPF_REG_0, 0),
2759 .errstr = "same insn cannot be used with different pointers",
2760 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
2763 "unpriv: write pointer into map elem value",
2765 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2766 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2767 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2768 BPF_LD_MAP_FD(BPF_REG_1, 0),
2769 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2770 BPF_FUNC_map_lookup_elem),
2771 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2772 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
2775 .fixup_map1 = { 3 },
2776 .errstr_unpriv = "R0 leaks addr",
2777 .result_unpriv = REJECT,
2781 "alu32: mov u32 const",
2783 BPF_MOV32_IMM(BPF_REG_7, 0),
2784 BPF_ALU32_IMM(BPF_AND, BPF_REG_7, 1),
2785 BPF_MOV32_REG(BPF_REG_0, BPF_REG_7),
2786 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2787 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
2794 "unpriv: partial copy of pointer",
2796 BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
2797 BPF_MOV64_IMM(BPF_REG_0, 0),
2800 .errstr_unpriv = "R10 partial copy",
2801 .result_unpriv = REJECT,
2805 "unpriv: pass pointer to tail_call",
2807 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2808 BPF_LD_MAP_FD(BPF_REG_2, 0),
2809 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2810 BPF_FUNC_tail_call),
2811 BPF_MOV64_IMM(BPF_REG_0, 0),
2814 .fixup_prog1 = { 1 },
2815 .errstr_unpriv = "R3 leaks addr into helper",
2816 .result_unpriv = REJECT,
2820 "unpriv: cmp map pointer with zero",
2822 BPF_MOV64_IMM(BPF_REG_1, 0),
2823 BPF_LD_MAP_FD(BPF_REG_1, 0),
2824 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2825 BPF_MOV64_IMM(BPF_REG_0, 0),
2828 .fixup_map1 = { 1 },
2829 .errstr_unpriv = "R1 pointer comparison",
2830 .result_unpriv = REJECT,
2834 "unpriv: write into frame pointer",
2836 BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
2837 BPF_MOV64_IMM(BPF_REG_0, 0),
2840 .errstr = "frame pointer is read only",
2844 "unpriv: spill/fill frame pointer",
2846 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2847 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2848 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2849 BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
2850 BPF_MOV64_IMM(BPF_REG_0, 0),
2853 .errstr = "frame pointer is read only",
2857 "unpriv: cmp of frame pointer",
2859 BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
2860 BPF_MOV64_IMM(BPF_REG_0, 0),
2863 .errstr_unpriv = "R10 pointer comparison",
2864 .result_unpriv = REJECT,
2868 "unpriv: adding of fp, reg",
2870 BPF_MOV64_IMM(BPF_REG_0, 0),
2871 BPF_MOV64_IMM(BPF_REG_1, 0),
2872 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2873 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
2876 .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
2877 .result_unpriv = REJECT,
2881 "unpriv: adding of fp, imm",
2883 BPF_MOV64_IMM(BPF_REG_0, 0),
2884 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2885 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
2886 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
2889 .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
2890 .result_unpriv = REJECT,
2894 "unpriv: cmp of stack pointer",
2896 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2897 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2898 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
2899 BPF_MOV64_IMM(BPF_REG_0, 0),
2902 .errstr_unpriv = "R2 pointer comparison",
2903 .result_unpriv = REJECT,
2907 "runtime/jit: tail_call within bounds, prog once",
2909 BPF_MOV64_IMM(BPF_REG_3, 0),
2910 BPF_LD_MAP_FD(BPF_REG_2, 0),
2911 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2912 BPF_FUNC_tail_call),
2913 BPF_MOV64_IMM(BPF_REG_0, 1),
2916 .fixup_prog1 = { 1 },
2921 "runtime/jit: tail_call within bounds, prog loop",
2923 BPF_MOV64_IMM(BPF_REG_3, 1),
2924 BPF_LD_MAP_FD(BPF_REG_2, 0),
2925 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2926 BPF_FUNC_tail_call),
2927 BPF_MOV64_IMM(BPF_REG_0, 1),
2930 .fixup_prog1 = { 1 },
2935 "runtime/jit: tail_call within bounds, no prog",
2937 BPF_MOV64_IMM(BPF_REG_3, 2),
2938 BPF_LD_MAP_FD(BPF_REG_2, 0),
2939 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2940 BPF_FUNC_tail_call),
2941 BPF_MOV64_IMM(BPF_REG_0, 1),
2944 .fixup_prog1 = { 1 },
2949 "runtime/jit: tail_call out of bounds",
2951 BPF_MOV64_IMM(BPF_REG_3, 256),
2952 BPF_LD_MAP_FD(BPF_REG_2, 0),
2953 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2954 BPF_FUNC_tail_call),
2955 BPF_MOV64_IMM(BPF_REG_0, 2),
2958 .fixup_prog1 = { 1 },
2963 "runtime/jit: pass negative index to tail_call",
2965 BPF_MOV64_IMM(BPF_REG_3, -1),
2966 BPF_LD_MAP_FD(BPF_REG_2, 0),
2967 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2968 BPF_FUNC_tail_call),
2969 BPF_MOV64_IMM(BPF_REG_0, 2),
2972 .fixup_prog1 = { 1 },
2977 "runtime/jit: pass > 32bit index to tail_call",
2979 BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
2980 BPF_LD_MAP_FD(BPF_REG_2, 0),
2981 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2982 BPF_FUNC_tail_call),
2983 BPF_MOV64_IMM(BPF_REG_0, 2),
2986 .fixup_prog1 = { 2 },
2989 /* Verifier rewrite for unpriv skips tail call here. */
2993 "stack pointer arithmetic",
2995 BPF_MOV64_IMM(BPF_REG_1, 4),
2996 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
2997 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
2998 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
2999 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
3000 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3001 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
3002 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
3003 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3004 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
3005 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
3006 BPF_MOV64_IMM(BPF_REG_0, 0),
3012 "raw_stack: no skb_load_bytes",
3014 BPF_MOV64_IMM(BPF_REG_2, 4),
3015 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3016 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3017 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3018 BPF_MOV64_IMM(BPF_REG_4, 8),
3019 /* Call to skb_load_bytes() omitted. */
3020 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3024 .errstr = "invalid read from stack off -8+0 size 8",
3025 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3028 "raw_stack: skb_load_bytes, negative len",
3030 BPF_MOV64_IMM(BPF_REG_2, 4),
3031 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3032 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3033 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3034 BPF_MOV64_IMM(BPF_REG_4, -8),
3035 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3036 BPF_FUNC_skb_load_bytes),
3037 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3041 .errstr = "R4 min value is negative",
3042 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3045 "raw_stack: skb_load_bytes, negative len 2",
3047 BPF_MOV64_IMM(BPF_REG_2, 4),
3048 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3049 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3050 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3051 BPF_MOV64_IMM(BPF_REG_4, ~0),
3052 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3053 BPF_FUNC_skb_load_bytes),
3054 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3058 .errstr = "R4 min value is negative",
3059 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3062 "raw_stack: skb_load_bytes, zero len",
3064 BPF_MOV64_IMM(BPF_REG_2, 4),
3065 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3066 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3067 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3068 BPF_MOV64_IMM(BPF_REG_4, 0),
3069 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3070 BPF_FUNC_skb_load_bytes),
3071 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3075 .errstr = "invalid stack type R3",
3076 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3079 "raw_stack: skb_load_bytes, no init",
3081 BPF_MOV64_IMM(BPF_REG_2, 4),
3082 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3083 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3084 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3085 BPF_MOV64_IMM(BPF_REG_4, 8),
3086 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3087 BPF_FUNC_skb_load_bytes),
3088 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3092 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3095 "raw_stack: skb_load_bytes, init",
3097 BPF_MOV64_IMM(BPF_REG_2, 4),
3098 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3099 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3100 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
3101 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3102 BPF_MOV64_IMM(BPF_REG_4, 8),
3103 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3104 BPF_FUNC_skb_load_bytes),
3105 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3109 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3112 "raw_stack: skb_load_bytes, spilled regs around bounds",
3114 BPF_MOV64_IMM(BPF_REG_2, 4),
3115 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3116 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3117 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3118 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
3119 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3120 BPF_MOV64_IMM(BPF_REG_4, 8),
3121 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3122 BPF_FUNC_skb_load_bytes),
3123 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3124 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
3125 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3126 offsetof(struct __sk_buff, mark)),
3127 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3128 offsetof(struct __sk_buff, priority)),
3129 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3133 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3136 "raw_stack: skb_load_bytes, spilled regs corruption",
3138 BPF_MOV64_IMM(BPF_REG_2, 4),
3139 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3140 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3141 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
3142 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3143 BPF_MOV64_IMM(BPF_REG_4, 8),
3144 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3145 BPF_FUNC_skb_load_bytes),
3146 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3147 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3148 offsetof(struct __sk_buff, mark)),
3152 .errstr = "R0 invalid mem access 'inv'",
3153 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3156 "raw_stack: skb_load_bytes, spilled regs corruption 2",
3158 BPF_MOV64_IMM(BPF_REG_2, 4),
3159 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3160 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3161 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3162 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
3163 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
3164 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3165 BPF_MOV64_IMM(BPF_REG_4, 8),
3166 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3167 BPF_FUNC_skb_load_bytes),
3168 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3169 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
3170 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
3171 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3172 offsetof(struct __sk_buff, mark)),
3173 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3174 offsetof(struct __sk_buff, priority)),
3175 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3176 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
3177 offsetof(struct __sk_buff, pkt_type)),
3178 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
3182 .errstr = "R3 invalid mem access 'inv'",
3183 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3186 "raw_stack: skb_load_bytes, spilled regs + data",
3188 BPF_MOV64_IMM(BPF_REG_2, 4),
3189 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3190 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3191 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3192 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
3193 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
3194 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3195 BPF_MOV64_IMM(BPF_REG_4, 8),
3196 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3197 BPF_FUNC_skb_load_bytes),
3198 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3199 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
3200 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
3201 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3202 offsetof(struct __sk_buff, mark)),
3203 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3204 offsetof(struct __sk_buff, priority)),
3205 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3206 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
3210 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3213 "raw_stack: skb_load_bytes, invalid access 1",
3215 BPF_MOV64_IMM(BPF_REG_2, 4),
3216 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3217 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
3218 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3219 BPF_MOV64_IMM(BPF_REG_4, 8),
3220 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3221 BPF_FUNC_skb_load_bytes),
3222 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3226 .errstr = "invalid stack type R3 off=-513 access_size=8",
3227 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3230 "raw_stack: skb_load_bytes, invalid access 2",
3232 BPF_MOV64_IMM(BPF_REG_2, 4),
3233 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3234 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
3235 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3236 BPF_MOV64_IMM(BPF_REG_4, 8),
3237 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3238 BPF_FUNC_skb_load_bytes),
3239 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3243 .errstr = "invalid stack type R3 off=-1 access_size=8",
3244 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3247 "raw_stack: skb_load_bytes, invalid access 3",
3249 BPF_MOV64_IMM(BPF_REG_2, 4),
3250 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3251 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
3252 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3253 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3254 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3255 BPF_FUNC_skb_load_bytes),
3256 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3260 .errstr = "R4 min value is negative",
3261 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3264 "raw_stack: skb_load_bytes, invalid access 4",
3266 BPF_MOV64_IMM(BPF_REG_2, 4),
3267 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3268 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
3269 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3270 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
3271 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3272 BPF_FUNC_skb_load_bytes),
3273 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3277 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
3278 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3281 "raw_stack: skb_load_bytes, invalid access 5",
3283 BPF_MOV64_IMM(BPF_REG_2, 4),
3284 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3285 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3286 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3287 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
3288 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3289 BPF_FUNC_skb_load_bytes),
3290 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3294 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
3295 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3298 "raw_stack: skb_load_bytes, invalid access 6",
3300 BPF_MOV64_IMM(BPF_REG_2, 4),
3301 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3302 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3303 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3304 BPF_MOV64_IMM(BPF_REG_4, 0),
3305 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3306 BPF_FUNC_skb_load_bytes),
3307 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3311 .errstr = "invalid stack type R3 off=-512 access_size=0",
3312 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3315 "raw_stack: skb_load_bytes, large access",
3317 BPF_MOV64_IMM(BPF_REG_2, 4),
3318 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3319 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3320 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3321 BPF_MOV64_IMM(BPF_REG_4, 512),
3322 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3323 BPF_FUNC_skb_load_bytes),
3324 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3328 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3331 "context stores via ST",
3333 BPF_MOV64_IMM(BPF_REG_0, 0),
3334 BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
3337 .errstr = "BPF_ST stores into R1 context is not allowed",
3339 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3342 "context stores via XADD",
3344 BPF_MOV64_IMM(BPF_REG_0, 0),
3345 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
3346 BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
3349 .errstr = "BPF_XADD stores into R1 context is not allowed",
3351 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3354 "direct packet access: test1",
3356 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3357 offsetof(struct __sk_buff, data)),
3358 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3359 offsetof(struct __sk_buff, data_end)),
3360 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3361 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3362 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3363 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3364 BPF_MOV64_IMM(BPF_REG_0, 0),
3368 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3371 "direct packet access: test2",
3373 BPF_MOV64_IMM(BPF_REG_0, 1),
3374 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
3375 offsetof(struct __sk_buff, data_end)),
3376 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3377 offsetof(struct __sk_buff, data)),
3378 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3379 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
3380 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
3381 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
3382 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
3383 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
3384 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3385 offsetof(struct __sk_buff, data)),
3386 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
3387 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3388 offsetof(struct __sk_buff, len)),
3389 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
3390 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
3391 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
3392 BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
3393 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
3394 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
3395 offsetof(struct __sk_buff, data_end)),
3396 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
3397 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
3398 BPF_MOV64_IMM(BPF_REG_0, 0),
3402 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3405 "direct packet access: test3",
3407 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3408 offsetof(struct __sk_buff, data)),
3409 BPF_MOV64_IMM(BPF_REG_0, 0),
3412 .errstr = "invalid bpf_context access off=76",
3414 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
3417 "direct packet access: test4 (write)",
3419 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3420 offsetof(struct __sk_buff, data)),
3421 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3422 offsetof(struct __sk_buff, data_end)),
3423 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3424 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3425 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3426 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3427 BPF_MOV64_IMM(BPF_REG_0, 0),
3431 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3434 "direct packet access: test5 (pkt_end >= reg, good access)",
3436 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3437 offsetof(struct __sk_buff, data)),
3438 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3439 offsetof(struct __sk_buff, data_end)),
3440 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3441 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3442 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3443 BPF_MOV64_IMM(BPF_REG_0, 1),
3445 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3446 BPF_MOV64_IMM(BPF_REG_0, 0),
3450 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3453 "direct packet access: test6 (pkt_end >= reg, bad access)",
3455 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3456 offsetof(struct __sk_buff, data)),
3457 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3458 offsetof(struct __sk_buff, data_end)),
3459 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3460 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3461 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3462 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3463 BPF_MOV64_IMM(BPF_REG_0, 1),
3465 BPF_MOV64_IMM(BPF_REG_0, 0),
3468 .errstr = "invalid access to packet",
3470 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3473 "direct packet access: test7 (pkt_end >= reg, both accesses)",
3475 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3476 offsetof(struct __sk_buff, data)),
3477 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3478 offsetof(struct __sk_buff, data_end)),
3479 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3480 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3481 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3482 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3483 BPF_MOV64_IMM(BPF_REG_0, 1),
3485 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3486 BPF_MOV64_IMM(BPF_REG_0, 0),
3489 .errstr = "invalid access to packet",
3491 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3494 "direct packet access: test8 (double test, variant 1)",
3496 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3497 offsetof(struct __sk_buff, data)),
3498 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3499 offsetof(struct __sk_buff, data_end)),
3500 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3501 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3502 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
3503 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3504 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3505 BPF_MOV64_IMM(BPF_REG_0, 1),
3507 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3508 BPF_MOV64_IMM(BPF_REG_0, 0),
3512 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3515 "direct packet access: test9 (double test, variant 2)",
3517 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3518 offsetof(struct __sk_buff, data)),
3519 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3520 offsetof(struct __sk_buff, data_end)),
3521 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3522 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3523 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3524 BPF_MOV64_IMM(BPF_REG_0, 1),
3526 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3527 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3528 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3529 BPF_MOV64_IMM(BPF_REG_0, 0),
3533 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3536 "direct packet access: test10 (write invalid)",
3538 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3539 offsetof(struct __sk_buff, data)),
3540 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3541 offsetof(struct __sk_buff, data_end)),
3542 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3543 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3544 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3545 BPF_MOV64_IMM(BPF_REG_0, 0),
3547 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3548 BPF_MOV64_IMM(BPF_REG_0, 0),
3551 .errstr = "invalid access to packet",
3553 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3556 "direct packet access: test11 (shift, good access)",
3558 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3559 offsetof(struct __sk_buff, data)),
3560 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3561 offsetof(struct __sk_buff, data_end)),
3562 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3563 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3564 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3565 BPF_MOV64_IMM(BPF_REG_3, 144),
3566 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3567 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3568 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
3569 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3570 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3571 BPF_MOV64_IMM(BPF_REG_0, 1),
3573 BPF_MOV64_IMM(BPF_REG_0, 0),
3577 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3581 "direct packet access: test12 (and, good access)",
3583 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3584 offsetof(struct __sk_buff, data)),
3585 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3586 offsetof(struct __sk_buff, data_end)),
3587 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3588 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3589 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3590 BPF_MOV64_IMM(BPF_REG_3, 144),
3591 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3592 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3593 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3594 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3595 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3596 BPF_MOV64_IMM(BPF_REG_0, 1),
3598 BPF_MOV64_IMM(BPF_REG_0, 0),
3602 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3606 "direct packet access: test13 (branches, good access)",
3608 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3609 offsetof(struct __sk_buff, data)),
3610 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3611 offsetof(struct __sk_buff, data_end)),
3612 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3613 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3614 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
3615 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3616 offsetof(struct __sk_buff, mark)),
3617 BPF_MOV64_IMM(BPF_REG_4, 1),
3618 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
3619 BPF_MOV64_IMM(BPF_REG_3, 14),
3620 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
3621 BPF_MOV64_IMM(BPF_REG_3, 24),
3622 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3623 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3624 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3625 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3626 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3627 BPF_MOV64_IMM(BPF_REG_0, 1),
3629 BPF_MOV64_IMM(BPF_REG_0, 0),
3633 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3637 "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
3639 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3640 offsetof(struct __sk_buff, data)),
3641 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3642 offsetof(struct __sk_buff, data_end)),
3643 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3644 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3645 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
3646 BPF_MOV64_IMM(BPF_REG_5, 12),
3647 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
3648 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3649 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3650 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
3651 BPF_MOV64_IMM(BPF_REG_0, 1),
3653 BPF_MOV64_IMM(BPF_REG_0, 0),
3657 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3661 "direct packet access: test15 (spill with xadd)",
3663 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3664 offsetof(struct __sk_buff, data)),
3665 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3666 offsetof(struct __sk_buff, data_end)),
3667 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3668 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3669 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3670 BPF_MOV64_IMM(BPF_REG_5, 4096),
3671 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
3672 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
3673 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
3674 BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
3675 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
3676 BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
3677 BPF_MOV64_IMM(BPF_REG_0, 0),
3680 .errstr = "R2 invalid mem access 'inv'",
3682 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3685 "direct packet access: test16 (arith on data_end)",
3687 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3688 offsetof(struct __sk_buff, data)),
3689 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3690 offsetof(struct __sk_buff, data_end)),
3691 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3692 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3693 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
3694 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3695 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3696 BPF_MOV64_IMM(BPF_REG_0, 0),
3699 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
3701 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3704 "direct packet access: test17 (pruning, alignment)",
3706 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3707 offsetof(struct __sk_buff, data)),
3708 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3709 offsetof(struct __sk_buff, data_end)),
3710 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3711 offsetof(struct __sk_buff, mark)),
3712 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3713 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
3714 BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
3715 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3716 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
3717 BPF_MOV64_IMM(BPF_REG_0, 0),
3719 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
3722 .errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
3724 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3725 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
3728 "direct packet access: test18 (imm += pkt_ptr, 1)",
3730 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3731 offsetof(struct __sk_buff, data)),
3732 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3733 offsetof(struct __sk_buff, data_end)),
3734 BPF_MOV64_IMM(BPF_REG_0, 8),
3735 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3736 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3737 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3738 BPF_MOV64_IMM(BPF_REG_0, 0),
3742 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3745 "direct packet access: test19 (imm += pkt_ptr, 2)",
3747 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3748 offsetof(struct __sk_buff, data)),
3749 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3750 offsetof(struct __sk_buff, data_end)),
3751 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3752 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3753 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
3754 BPF_MOV64_IMM(BPF_REG_4, 4),
3755 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3756 BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
3757 BPF_MOV64_IMM(BPF_REG_0, 0),
3761 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3764 "direct packet access: test20 (x += pkt_ptr, 1)",
3766 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3767 offsetof(struct __sk_buff, data)),
3768 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3769 offsetof(struct __sk_buff, data_end)),
3770 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3771 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3772 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3773 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
3774 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3775 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3776 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3777 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
3778 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3779 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3780 BPF_MOV64_IMM(BPF_REG_0, 0),
3783 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3787 "direct packet access: test21 (x += pkt_ptr, 2)",
3789 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3790 offsetof(struct __sk_buff, data)),
3791 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3792 offsetof(struct __sk_buff, data_end)),
3793 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3794 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3795 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
3796 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3797 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3798 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
3799 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
3800 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3801 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3802 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
3803 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3804 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3805 BPF_MOV64_IMM(BPF_REG_0, 0),
3808 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3812 "direct packet access: test22 (x += pkt_ptr, 3)",
3814 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3815 offsetof(struct __sk_buff, data)),
3816 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3817 offsetof(struct __sk_buff, data_end)),
3818 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3819 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3820 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
3821 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
3822 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
3823 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
3824 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
3825 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3826 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3827 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
3828 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
3829 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3830 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
3831 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
3832 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3833 BPF_MOV64_IMM(BPF_REG_2, 1),
3834 BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
3835 BPF_MOV64_IMM(BPF_REG_0, 0),
3838 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3842 "direct packet access: test23 (x += pkt_ptr, 4)",
3844 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3845 offsetof(struct __sk_buff, data)),
3846 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3847 offsetof(struct __sk_buff, data_end)),
3848 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3849 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3850 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3851 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
3852 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3853 BPF_MOV64_IMM(BPF_REG_0, 31),
3854 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3855 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3856 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3857 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
3858 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3859 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3860 BPF_MOV64_IMM(BPF_REG_0, 0),
3863 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3865 .errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
3868 "direct packet access: test24 (x += pkt_ptr, 5)",
3870 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3871 offsetof(struct __sk_buff, data)),
3872 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3873 offsetof(struct __sk_buff, data_end)),
3874 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3875 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3876 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3877 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
3878 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3879 BPF_MOV64_IMM(BPF_REG_0, 64),
3880 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3881 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3882 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3883 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
3884 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3885 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3886 BPF_MOV64_IMM(BPF_REG_0, 0),
3889 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3893 "direct packet access: test25 (marking on <, good access)",
3895 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3896 offsetof(struct __sk_buff, data)),
3897 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3898 offsetof(struct __sk_buff, data_end)),
3899 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3900 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3901 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
3902 BPF_MOV64_IMM(BPF_REG_0, 0),
3904 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3905 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
3908 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3911 "direct packet access: test26 (marking on <, bad access)",
3913 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3914 offsetof(struct __sk_buff, data)),
3915 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3916 offsetof(struct __sk_buff, data_end)),
3917 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3918 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3919 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
3920 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3921 BPF_MOV64_IMM(BPF_REG_0, 0),
3923 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
3926 .errstr = "invalid access to packet",
3927 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3930 "direct packet access: test27 (marking on <=, good access)",
3932 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3933 offsetof(struct __sk_buff, data)),
3934 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3935 offsetof(struct __sk_buff, data_end)),
3936 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3937 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3938 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
3939 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3940 BPF_MOV64_IMM(BPF_REG_0, 1),
3944 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3948 "direct packet access: test28 (marking on <=, bad access)",
3950 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3951 offsetof(struct __sk_buff, data)),
3952 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3953 offsetof(struct __sk_buff, data_end)),
3954 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3955 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3956 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
3957 BPF_MOV64_IMM(BPF_REG_0, 1),
3959 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3960 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
3963 .errstr = "invalid access to packet",
3964 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3967 "helper access to packet: test1, valid packet_ptr range",
3969 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3970 offsetof(struct xdp_md, data)),
3971 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3972 offsetof(struct xdp_md, data_end)),
3973 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3974 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
3975 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
3976 BPF_LD_MAP_FD(BPF_REG_1, 0),
3977 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
3978 BPF_MOV64_IMM(BPF_REG_4, 0),
3979 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3980 BPF_FUNC_map_update_elem),
3981 BPF_MOV64_IMM(BPF_REG_0, 0),
3984 .fixup_map1 = { 5 },
3985 .result_unpriv = ACCEPT,
3987 .prog_type = BPF_PROG_TYPE_XDP,
3990 "helper access to packet: test2, unchecked packet_ptr",
3992 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3993 offsetof(struct xdp_md, data)),
3994 BPF_LD_MAP_FD(BPF_REG_1, 0),
3995 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3996 BPF_FUNC_map_lookup_elem),
3997 BPF_MOV64_IMM(BPF_REG_0, 0),
4000 .fixup_map1 = { 1 },
4002 .errstr = "invalid access to packet",
4003 .prog_type = BPF_PROG_TYPE_XDP,
4006 "helper access to packet: test3, variable add",
4008 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4009 offsetof(struct xdp_md, data)),
4010 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4011 offsetof(struct xdp_md, data_end)),
4012 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4013 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
4014 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
4015 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
4016 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4017 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
4018 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
4019 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
4020 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
4021 BPF_LD_MAP_FD(BPF_REG_1, 0),
4022 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
4023 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4024 BPF_FUNC_map_lookup_elem),
4025 BPF_MOV64_IMM(BPF_REG_0, 0),
4028 .fixup_map1 = { 11 },
4030 .prog_type = BPF_PROG_TYPE_XDP,
4033 "helper access to packet: test4, packet_ptr with bad range",
4035 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4036 offsetof(struct xdp_md, data)),
4037 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4038 offsetof(struct xdp_md, data_end)),
4039 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4040 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
4041 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
4042 BPF_MOV64_IMM(BPF_REG_0, 0),
4044 BPF_LD_MAP_FD(BPF_REG_1, 0),
4045 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4046 BPF_FUNC_map_lookup_elem),
4047 BPF_MOV64_IMM(BPF_REG_0, 0),
4050 .fixup_map1 = { 7 },
4052 .errstr = "invalid access to packet",
4053 .prog_type = BPF_PROG_TYPE_XDP,
4056 "helper access to packet: test5, packet_ptr with too short range",
4058 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4059 offsetof(struct xdp_md, data)),
4060 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4061 offsetof(struct xdp_md, data_end)),
4062 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4063 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4064 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
4065 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
4066 BPF_LD_MAP_FD(BPF_REG_1, 0),
4067 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4068 BPF_FUNC_map_lookup_elem),
4069 BPF_MOV64_IMM(BPF_REG_0, 0),
4072 .fixup_map1 = { 6 },
4074 .errstr = "invalid access to packet",
4075 .prog_type = BPF_PROG_TYPE_XDP,
4078 "helper access to packet: test6, cls valid packet_ptr range",
4080 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4081 offsetof(struct __sk_buff, data)),
4082 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4083 offsetof(struct __sk_buff, data_end)),
4084 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4085 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
4086 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
4087 BPF_LD_MAP_FD(BPF_REG_1, 0),
4088 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
4089 BPF_MOV64_IMM(BPF_REG_4, 0),
4090 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4091 BPF_FUNC_map_update_elem),
4092 BPF_MOV64_IMM(BPF_REG_0, 0),
4095 .fixup_map1 = { 5 },
4097 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4100 "helper access to packet: test7, cls unchecked packet_ptr",
4102 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4103 offsetof(struct __sk_buff, data)),
4104 BPF_LD_MAP_FD(BPF_REG_1, 0),
4105 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4106 BPF_FUNC_map_lookup_elem),
4107 BPF_MOV64_IMM(BPF_REG_0, 0),
4110 .fixup_map1 = { 1 },
4112 .errstr = "invalid access to packet",
4113 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4116 "helper access to packet: test8, cls variable add",
4118 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4119 offsetof(struct __sk_buff, data)),
4120 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4121 offsetof(struct __sk_buff, data_end)),
4122 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4123 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
4124 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
4125 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
4126 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4127 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
4128 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
4129 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
4130 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
4131 BPF_LD_MAP_FD(BPF_REG_1, 0),
4132 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
4133 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4134 BPF_FUNC_map_lookup_elem),
4135 BPF_MOV64_IMM(BPF_REG_0, 0),
4138 .fixup_map1 = { 11 },
4140 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4143 "helper access to packet: test9, cls packet_ptr with bad range",
4145 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4146 offsetof(struct __sk_buff, data)),
4147 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4148 offsetof(struct __sk_buff, data_end)),
4149 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4150 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
4151 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
4152 BPF_MOV64_IMM(BPF_REG_0, 0),
4154 BPF_LD_MAP_FD(BPF_REG_1, 0),
4155 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4156 BPF_FUNC_map_lookup_elem),
4157 BPF_MOV64_IMM(BPF_REG_0, 0),
4160 .fixup_map1 = { 7 },
4162 .errstr = "invalid access to packet",
4163 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4166 "helper access to packet: test10, cls packet_ptr with too short range",
4168 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4169 offsetof(struct __sk_buff, data)),
4170 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4171 offsetof(struct __sk_buff, data_end)),
4172 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4173 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4174 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
4175 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
4176 BPF_LD_MAP_FD(BPF_REG_1, 0),
4177 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4178 BPF_FUNC_map_lookup_elem),
4179 BPF_MOV64_IMM(BPF_REG_0, 0),
4182 .fixup_map1 = { 6 },
4184 .errstr = "invalid access to packet",
4185 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4188 "helper access to packet: test11, cls unsuitable helper 1",
4190 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4191 offsetof(struct __sk_buff, data)),
4192 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4193 offsetof(struct __sk_buff, data_end)),
4194 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4195 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
4196 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
4197 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
4198 BPF_MOV64_IMM(BPF_REG_2, 0),
4199 BPF_MOV64_IMM(BPF_REG_4, 42),
4200 BPF_MOV64_IMM(BPF_REG_5, 0),
4201 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4202 BPF_FUNC_skb_store_bytes),
4203 BPF_MOV64_IMM(BPF_REG_0, 0),
4207 .errstr = "helper access to the packet",
4208 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4211 "helper access to packet: test12, cls unsuitable helper 2",
4213 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4214 offsetof(struct __sk_buff, data)),
4215 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4216 offsetof(struct __sk_buff, data_end)),
4217 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
4218 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
4219 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
4220 BPF_MOV64_IMM(BPF_REG_2, 0),
4221 BPF_MOV64_IMM(BPF_REG_4, 4),
4222 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4223 BPF_FUNC_skb_load_bytes),
4224 BPF_MOV64_IMM(BPF_REG_0, 0),
4228 .errstr = "helper access to the packet",
4229 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4232 "helper access to packet: test13, cls helper ok",
4234 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4235 offsetof(struct __sk_buff, data)),
4236 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4237 offsetof(struct __sk_buff, data_end)),
4238 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4239 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4240 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4241 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4242 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4243 BPF_MOV64_IMM(BPF_REG_2, 4),
4244 BPF_MOV64_IMM(BPF_REG_3, 0),
4245 BPF_MOV64_IMM(BPF_REG_4, 0),
4246 BPF_MOV64_IMM(BPF_REG_5, 0),
4247 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4248 BPF_FUNC_csum_diff),
4249 BPF_MOV64_IMM(BPF_REG_0, 0),
4253 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4256 "helper access to packet: test14, cls helper ok sub",
4258 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4259 offsetof(struct __sk_buff, data)),
4260 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4261 offsetof(struct __sk_buff, data_end)),
4262 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4263 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4264 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4265 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4266 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
4267 BPF_MOV64_IMM(BPF_REG_2, 4),
4268 BPF_MOV64_IMM(BPF_REG_3, 0),
4269 BPF_MOV64_IMM(BPF_REG_4, 0),
4270 BPF_MOV64_IMM(BPF_REG_5, 0),
4271 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4272 BPF_FUNC_csum_diff),
4273 BPF_MOV64_IMM(BPF_REG_0, 0),
4277 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4280 "helper access to packet: test15, cls helper fail sub",
4282 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4283 offsetof(struct __sk_buff, data)),
4284 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4285 offsetof(struct __sk_buff, data_end)),
4286 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4287 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4288 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4289 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4290 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
4291 BPF_MOV64_IMM(BPF_REG_2, 4),
4292 BPF_MOV64_IMM(BPF_REG_3, 0),
4293 BPF_MOV64_IMM(BPF_REG_4, 0),
4294 BPF_MOV64_IMM(BPF_REG_5, 0),
4295 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4296 BPF_FUNC_csum_diff),
4297 BPF_MOV64_IMM(BPF_REG_0, 0),
4301 .errstr = "invalid access to packet",
4302 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4305 "helper access to packet: test16, cls helper fail range 1",
4307 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4308 offsetof(struct __sk_buff, data)),
4309 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4310 offsetof(struct __sk_buff, data_end)),
4311 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4312 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4313 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4314 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4315 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4316 BPF_MOV64_IMM(BPF_REG_2, 8),
4317 BPF_MOV64_IMM(BPF_REG_3, 0),
4318 BPF_MOV64_IMM(BPF_REG_4, 0),
4319 BPF_MOV64_IMM(BPF_REG_5, 0),
4320 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4321 BPF_FUNC_csum_diff),
4322 BPF_MOV64_IMM(BPF_REG_0, 0),
4326 .errstr = "invalid access to packet",
4327 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4330 "helper access to packet: test17, cls helper fail range 2",
4332 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4333 offsetof(struct __sk_buff, data)),
4334 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4335 offsetof(struct __sk_buff, data_end)),
4336 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4337 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4338 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4339 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4340 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4341 BPF_MOV64_IMM(BPF_REG_2, -9),
4342 BPF_MOV64_IMM(BPF_REG_3, 0),
4343 BPF_MOV64_IMM(BPF_REG_4, 0),
4344 BPF_MOV64_IMM(BPF_REG_5, 0),
4345 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4346 BPF_FUNC_csum_diff),
4347 BPF_MOV64_IMM(BPF_REG_0, 0),
4351 .errstr = "R2 min value is negative",
4352 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4355 "helper access to packet: test18, cls helper fail range 3",
4357 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4358 offsetof(struct __sk_buff, data)),
4359 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4360 offsetof(struct __sk_buff, data_end)),
4361 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4362 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4363 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4364 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4365 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4366 BPF_MOV64_IMM(BPF_REG_2, ~0),
4367 BPF_MOV64_IMM(BPF_REG_3, 0),
4368 BPF_MOV64_IMM(BPF_REG_4, 0),
4369 BPF_MOV64_IMM(BPF_REG_5, 0),
4370 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4371 BPF_FUNC_csum_diff),
4372 BPF_MOV64_IMM(BPF_REG_0, 0),
4376 .errstr = "R2 min value is negative",
4377 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4380 "helper access to packet: test19, cls helper range zero",
4382 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4383 offsetof(struct __sk_buff, data)),
4384 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4385 offsetof(struct __sk_buff, data_end)),
4386 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4387 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4388 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4389 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4390 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4391 BPF_MOV64_IMM(BPF_REG_2, 0),
4392 BPF_MOV64_IMM(BPF_REG_3, 0),
4393 BPF_MOV64_IMM(BPF_REG_4, 0),
4394 BPF_MOV64_IMM(BPF_REG_5, 0),
4395 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4396 BPF_FUNC_csum_diff),
4397 BPF_MOV64_IMM(BPF_REG_0, 0),
4401 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4404 "helper access to packet: test20, pkt end as input",
4406 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4407 offsetof(struct __sk_buff, data)),
4408 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4409 offsetof(struct __sk_buff, data_end)),
4410 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4411 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4412 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4413 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4414 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
4415 BPF_MOV64_IMM(BPF_REG_2, 4),
4416 BPF_MOV64_IMM(BPF_REG_3, 0),
4417 BPF_MOV64_IMM(BPF_REG_4, 0),
4418 BPF_MOV64_IMM(BPF_REG_5, 0),
4419 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4420 BPF_FUNC_csum_diff),
4421 BPF_MOV64_IMM(BPF_REG_0, 0),
4425 .errstr = "R1 type=pkt_end expected=fp",
4426 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4429 "helper access to packet: test21, wrong reg",
4431 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4432 offsetof(struct __sk_buff, data)),
4433 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4434 offsetof(struct __sk_buff, data_end)),
4435 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4436 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4437 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4438 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4439 BPF_MOV64_IMM(BPF_REG_2, 4),
4440 BPF_MOV64_IMM(BPF_REG_3, 0),
4441 BPF_MOV64_IMM(BPF_REG_4, 0),
4442 BPF_MOV64_IMM(BPF_REG_5, 0),
4443 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4444 BPF_FUNC_csum_diff),
4445 BPF_MOV64_IMM(BPF_REG_0, 0),
4449 .errstr = "invalid access to packet",
4450 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4453 "valid map access into an array with a constant",
4455 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4456 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4457 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4458 BPF_LD_MAP_FD(BPF_REG_1, 0),
4459 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4460 BPF_FUNC_map_lookup_elem),
4461 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4462 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4463 offsetof(struct test_val, foo)),
4466 .fixup_map2 = { 3 },
4467 .errstr_unpriv = "R0 leaks addr",
4468 .result_unpriv = REJECT,
4472 "valid map access into an array with a register",
4474 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4475 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4476 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4477 BPF_LD_MAP_FD(BPF_REG_1, 0),
4478 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4479 BPF_FUNC_map_lookup_elem),
4480 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4481 BPF_MOV64_IMM(BPF_REG_1, 4),
4482 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4483 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4484 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4485 offsetof(struct test_val, foo)),
4488 .fixup_map2 = { 3 },
4489 .errstr_unpriv = "R0 leaks addr",
4490 .result_unpriv = REJECT,
4492 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4495 "valid map access into an array with a variable",
4497 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4498 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4499 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4500 BPF_LD_MAP_FD(BPF_REG_1, 0),
4501 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4502 BPF_FUNC_map_lookup_elem),
4503 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4504 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4505 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
4506 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4507 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4508 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4509 offsetof(struct test_val, foo)),
4512 .fixup_map2 = { 3 },
4513 .errstr_unpriv = "R0 leaks addr",
4514 .result_unpriv = REJECT,
4516 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4519 "valid map access into an array with a signed variable",
4521 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4522 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4523 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4524 BPF_LD_MAP_FD(BPF_REG_1, 0),
4525 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4526 BPF_FUNC_map_lookup_elem),
4527 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
4528 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4529 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
4530 BPF_MOV32_IMM(BPF_REG_1, 0),
4531 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4532 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4533 BPF_MOV32_IMM(BPF_REG_1, 0),
4534 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4535 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4536 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4537 offsetof(struct test_val, foo)),
4540 .fixup_map2 = { 3 },
4541 .errstr_unpriv = "R0 leaks addr",
4542 .result_unpriv = REJECT,
4544 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4547 "invalid map access into an array with a constant",
4549 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4550 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4551 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4552 BPF_LD_MAP_FD(BPF_REG_1, 0),
4553 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4554 BPF_FUNC_map_lookup_elem),
4555 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4556 BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
4557 offsetof(struct test_val, foo)),
4560 .fixup_map2 = { 3 },
4561 .errstr = "invalid access to map value, value_size=48 off=48 size=8",
4565 "invalid map access into an array with a register",
4567 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4568 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4569 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4570 BPF_LD_MAP_FD(BPF_REG_1, 0),
4571 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4572 BPF_FUNC_map_lookup_elem),
4573 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4574 BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
4575 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4576 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4577 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4578 offsetof(struct test_val, foo)),
4581 .fixup_map2 = { 3 },
4582 .errstr = "R0 min value is outside of the array range",
4584 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4587 "invalid map access into an array with a variable",
4589 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4590 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4591 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4592 BPF_LD_MAP_FD(BPF_REG_1, 0),
4593 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4594 BPF_FUNC_map_lookup_elem),
4595 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4596 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4597 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4598 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4599 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4600 offsetof(struct test_val, foo)),
4603 .fixup_map2 = { 3 },
4604 .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
4606 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4609 "invalid map access into an array with no floor check",
4611 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4612 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4613 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4614 BPF_LD_MAP_FD(BPF_REG_1, 0),
4615 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4616 BPF_FUNC_map_lookup_elem),
4617 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4618 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
4619 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4620 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4621 BPF_MOV32_IMM(BPF_REG_1, 0),
4622 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4623 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4624 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4625 offsetof(struct test_val, foo)),
4628 .fixup_map2 = { 3 },
4629 .errstr_unpriv = "R0 leaks addr",
4630 .errstr = "R0 unbounded memory access",
4631 .result_unpriv = REJECT,
4633 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4636 "invalid map access into an array with a invalid max check",
4638 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4639 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4640 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4641 BPF_LD_MAP_FD(BPF_REG_1, 0),
4642 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4643 BPF_FUNC_map_lookup_elem),
4644 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4645 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4646 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
4647 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
4648 BPF_MOV32_IMM(BPF_REG_1, 0),
4649 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4650 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4651 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4652 offsetof(struct test_val, foo)),
4655 .fixup_map2 = { 3 },
4656 .errstr_unpriv = "R0 leaks addr",
4657 .errstr = "invalid access to map value, value_size=48 off=44 size=8",
4658 .result_unpriv = REJECT,
4660 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4663 "invalid map access into an array with a invalid max check",
4665 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4666 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4667 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4668 BPF_LD_MAP_FD(BPF_REG_1, 0),
4669 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4670 BPF_FUNC_map_lookup_elem),
4671 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
4672 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4673 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4674 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4675 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4676 BPF_LD_MAP_FD(BPF_REG_1, 0),
4677 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4678 BPF_FUNC_map_lookup_elem),
4679 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4680 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
4681 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
4682 offsetof(struct test_val, foo)),
4685 .fixup_map2 = { 3, 11 },
4686 .errstr = "R0 pointer += pointer",
4688 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4691 "valid cgroup storage access",
4693 BPF_MOV64_IMM(BPF_REG_2, 0),
4694 BPF_LD_MAP_FD(BPF_REG_1, 0),
4695 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4696 BPF_FUNC_get_local_storage),
4697 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4698 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
4699 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
4702 .fixup_cgroup_storage = { 1 },
4704 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4707 "invalid cgroup storage access 1",
4709 BPF_MOV64_IMM(BPF_REG_2, 0),
4710 BPF_LD_MAP_FD(BPF_REG_1, 0),
4711 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4712 BPF_FUNC_get_local_storage),
4713 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4714 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
4715 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
4718 .fixup_map1 = { 1 },
4720 .errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
4721 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4724 "invalid cgroup storage access 2",
4726 BPF_MOV64_IMM(BPF_REG_2, 0),
4727 BPF_LD_MAP_FD(BPF_REG_1, 1),
4728 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4729 BPF_FUNC_get_local_storage),
4730 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
4734 .errstr = "fd 1 is not pointing to valid bpf_map",
4735 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4738 "invalid per-cgroup storage access 3",
4740 BPF_MOV64_IMM(BPF_REG_2, 0),
4741 BPF_LD_MAP_FD(BPF_REG_1, 0),
4742 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4743 BPF_FUNC_get_local_storage),
4744 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
4745 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
4746 BPF_MOV64_IMM(BPF_REG_0, 0),
4749 .fixup_cgroup_storage = { 1 },
4751 .errstr = "invalid access to map value, value_size=64 off=256 size=4",
4752 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4755 "invalid cgroup storage access 4",
4757 BPF_MOV64_IMM(BPF_REG_2, 0),
4758 BPF_LD_MAP_FD(BPF_REG_1, 0),
4759 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4760 BPF_FUNC_get_local_storage),
4761 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
4762 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
4763 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
4766 .fixup_cgroup_storage = { 1 },
4768 .errstr = "invalid access to map value, value_size=64 off=-2 size=4",
4769 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4772 "invalid cgroup storage access 5",
4774 BPF_MOV64_IMM(BPF_REG_2, 7),
4775 BPF_LD_MAP_FD(BPF_REG_1, 0),
4776 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4777 BPF_FUNC_get_local_storage),
4778 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4779 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
4780 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
4783 .fixup_cgroup_storage = { 1 },
4785 .errstr = "get_local_storage() doesn't support non-zero flags",
4786 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4789 "invalid cgroup storage access 6",
4791 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
4792 BPF_LD_MAP_FD(BPF_REG_1, 0),
4793 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4794 BPF_FUNC_get_local_storage),
4795 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4796 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
4797 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
4800 .fixup_cgroup_storage = { 1 },
4802 .errstr = "get_local_storage() doesn't support non-zero flags",
4803 .errstr_unpriv = "R2 leaks addr into helper function",
4804 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4807 "multiple registers share map_lookup_elem result",
4809 BPF_MOV64_IMM(BPF_REG_1, 10),
4810 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4811 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4812 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4813 BPF_LD_MAP_FD(BPF_REG_1, 0),
4814 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4815 BPF_FUNC_map_lookup_elem),
4816 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4817 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4818 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4821 .fixup_map1 = { 4 },
4823 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4826 "alu ops on ptr_to_map_value_or_null, 1",
4828 BPF_MOV64_IMM(BPF_REG_1, 10),
4829 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4830 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4831 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4832 BPF_LD_MAP_FD(BPF_REG_1, 0),
4833 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4834 BPF_FUNC_map_lookup_elem),
4835 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4836 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
4837 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
4838 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4839 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4842 .fixup_map1 = { 4 },
4843 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
4845 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4848 "alu ops on ptr_to_map_value_or_null, 2",
4850 BPF_MOV64_IMM(BPF_REG_1, 10),
4851 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4852 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4853 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4854 BPF_LD_MAP_FD(BPF_REG_1, 0),
4855 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4856 BPF_FUNC_map_lookup_elem),
4857 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4858 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
4859 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4860 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4863 .fixup_map1 = { 4 },
4864 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
4866 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4869 "alu ops on ptr_to_map_value_or_null, 3",
4871 BPF_MOV64_IMM(BPF_REG_1, 10),
4872 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4873 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4874 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4875 BPF_LD_MAP_FD(BPF_REG_1, 0),
4876 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4877 BPF_FUNC_map_lookup_elem),
4878 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4879 BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
4880 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4881 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4884 .fixup_map1 = { 4 },
4885 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
4887 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4890 "invalid memory access with multiple map_lookup_elem calls",
4892 BPF_MOV64_IMM(BPF_REG_1, 10),
4893 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4894 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4895 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4896 BPF_LD_MAP_FD(BPF_REG_1, 0),
4897 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
4898 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
4899 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4900 BPF_FUNC_map_lookup_elem),
4901 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4902 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
4903 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
4904 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4905 BPF_FUNC_map_lookup_elem),
4906 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4907 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4910 .fixup_map1 = { 4 },
4912 .errstr = "R4 !read_ok",
4913 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4916 "valid indirect map_lookup_elem access with 2nd lookup in branch",
4918 BPF_MOV64_IMM(BPF_REG_1, 10),
4919 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4920 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4921 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4922 BPF_LD_MAP_FD(BPF_REG_1, 0),
4923 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
4924 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
4925 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4926 BPF_FUNC_map_lookup_elem),
4927 BPF_MOV64_IMM(BPF_REG_2, 10),
4928 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
4929 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
4930 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
4931 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4932 BPF_FUNC_map_lookup_elem),
4933 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4934 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4935 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4938 .fixup_map1 = { 4 },
4940 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4943 "invalid map access from else condition",
4945 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4946 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4947 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4948 BPF_LD_MAP_FD(BPF_REG_1, 0),
4949 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
4950 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4951 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4952 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
4953 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
4954 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4955 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4956 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
4959 .fixup_map2 = { 3 },
4960 .errstr = "R0 unbounded memory access",
4962 .errstr_unpriv = "R0 leaks addr",
4963 .result_unpriv = REJECT,
4964 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4967 "constant register |= constant should keep constant type",
4969 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4970 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4971 BPF_MOV64_IMM(BPF_REG_2, 34),
4972 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
4973 BPF_MOV64_IMM(BPF_REG_3, 0),
4974 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4978 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4981 "constant register |= constant should not bypass stack boundary checks",
4983 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4984 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4985 BPF_MOV64_IMM(BPF_REG_2, 34),
4986 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
4987 BPF_MOV64_IMM(BPF_REG_3, 0),
4988 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4991 .errstr = "invalid stack type R1 off=-48 access_size=58",
4993 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4996 "constant register |= constant register should keep constant type",
4998 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4999 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5000 BPF_MOV64_IMM(BPF_REG_2, 34),
5001 BPF_MOV64_IMM(BPF_REG_4, 13),
5002 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
5003 BPF_MOV64_IMM(BPF_REG_3, 0),
5004 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5008 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5011 "constant register |= constant register should not bypass stack boundary checks",
5013 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5014 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5015 BPF_MOV64_IMM(BPF_REG_2, 34),
5016 BPF_MOV64_IMM(BPF_REG_4, 24),
5017 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
5018 BPF_MOV64_IMM(BPF_REG_3, 0),
5019 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5022 .errstr = "invalid stack type R1 off=-48 access_size=58",
5024 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5027 "invalid direct packet write for LWT_IN",
5029 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5030 offsetof(struct __sk_buff, data)),
5031 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5032 offsetof(struct __sk_buff, data_end)),
5033 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5034 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5035 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5036 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5037 BPF_MOV64_IMM(BPF_REG_0, 0),
5040 .errstr = "cannot write into packet",
5042 .prog_type = BPF_PROG_TYPE_LWT_IN,
5045 "invalid direct packet write for LWT_OUT",
5047 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5048 offsetof(struct __sk_buff, data)),
5049 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5050 offsetof(struct __sk_buff, data_end)),
5051 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5052 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5053 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5054 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5055 BPF_MOV64_IMM(BPF_REG_0, 0),
5058 .errstr = "cannot write into packet",
5060 .prog_type = BPF_PROG_TYPE_LWT_OUT,
5063 "direct packet write for LWT_XMIT",
5065 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5066 offsetof(struct __sk_buff, data)),
5067 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5068 offsetof(struct __sk_buff, data_end)),
5069 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5070 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5071 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5072 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5073 BPF_MOV64_IMM(BPF_REG_0, 0),
5077 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
5080 "direct packet read for LWT_IN",
5082 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5083 offsetof(struct __sk_buff, data)),
5084 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5085 offsetof(struct __sk_buff, data_end)),
5086 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5087 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5088 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5089 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5090 BPF_MOV64_IMM(BPF_REG_0, 0),
5094 .prog_type = BPF_PROG_TYPE_LWT_IN,
5097 "direct packet read for LWT_OUT",
5099 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5100 offsetof(struct __sk_buff, data)),
5101 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5102 offsetof(struct __sk_buff, data_end)),
5103 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5104 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5105 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5106 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5107 BPF_MOV64_IMM(BPF_REG_0, 0),
5111 .prog_type = BPF_PROG_TYPE_LWT_OUT,
5114 "direct packet read for LWT_XMIT",
5116 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5117 offsetof(struct __sk_buff, data)),
5118 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5119 offsetof(struct __sk_buff, data_end)),
5120 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5121 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5122 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5123 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5124 BPF_MOV64_IMM(BPF_REG_0, 0),
5128 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
5131 "overlapping checks for direct packet access",
5133 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5134 offsetof(struct __sk_buff, data)),
5135 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5136 offsetof(struct __sk_buff, data_end)),
5137 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5138 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5139 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
5140 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
5141 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
5142 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
5143 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
5144 BPF_MOV64_IMM(BPF_REG_0, 0),
5148 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
5151 "make headroom for LWT_XMIT",
5153 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5154 BPF_MOV64_IMM(BPF_REG_2, 34),
5155 BPF_MOV64_IMM(BPF_REG_3, 0),
5156 BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
5157 /* split for s390 to succeed */
5158 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
5159 BPF_MOV64_IMM(BPF_REG_2, 42),
5160 BPF_MOV64_IMM(BPF_REG_3, 0),
5161 BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
5162 BPF_MOV64_IMM(BPF_REG_0, 0),
5166 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
5169 "invalid access of tc_classid for LWT_IN",
5171 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5172 offsetof(struct __sk_buff, tc_classid)),
5176 .errstr = "invalid bpf_context access",
5179 "invalid access of tc_classid for LWT_OUT",
5181 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5182 offsetof(struct __sk_buff, tc_classid)),
5186 .errstr = "invalid bpf_context access",
5189 "invalid access of tc_classid for LWT_XMIT",
5191 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5192 offsetof(struct __sk_buff, tc_classid)),
5196 .errstr = "invalid bpf_context access",
5199 "leak pointer into ctx 1",
5201 BPF_MOV64_IMM(BPF_REG_0, 0),
5202 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
5203 offsetof(struct __sk_buff, cb[0])),
5204 BPF_LD_MAP_FD(BPF_REG_2, 0),
5205 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
5206 offsetof(struct __sk_buff, cb[0])),
5209 .fixup_map1 = { 2 },
5210 .errstr_unpriv = "R2 leaks addr into mem",
5211 .result_unpriv = REJECT,
5213 .errstr = "BPF_XADD stores into R1 context is not allowed",
5216 "leak pointer into ctx 2",
5218 BPF_MOV64_IMM(BPF_REG_0, 0),
5219 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
5220 offsetof(struct __sk_buff, cb[0])),
5221 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
5222 offsetof(struct __sk_buff, cb[0])),
5225 .errstr_unpriv = "R10 leaks addr into mem",
5226 .result_unpriv = REJECT,
5228 .errstr = "BPF_XADD stores into R1 context is not allowed",
5231 "leak pointer into ctx 3",
5233 BPF_MOV64_IMM(BPF_REG_0, 0),
5234 BPF_LD_MAP_FD(BPF_REG_2, 0),
5235 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
5236 offsetof(struct __sk_buff, cb[0])),
5239 .fixup_map1 = { 1 },
5240 .errstr_unpriv = "R2 leaks addr into ctx",
5241 .result_unpriv = REJECT,
5245 "leak pointer into map val",
5247 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5248 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5249 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5250 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5251 BPF_LD_MAP_FD(BPF_REG_1, 0),
5252 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5253 BPF_FUNC_map_lookup_elem),
5254 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
5255 BPF_MOV64_IMM(BPF_REG_3, 0),
5256 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
5257 BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
5258 BPF_MOV64_IMM(BPF_REG_0, 0),
5261 .fixup_map1 = { 4 },
5262 .errstr_unpriv = "R6 leaks addr into mem",
5263 .result_unpriv = REJECT,
5267 "helper access to map: full range",
5269 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5270 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5271 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5272 BPF_LD_MAP_FD(BPF_REG_1, 0),
5273 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5274 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5275 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5276 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5277 BPF_MOV64_IMM(BPF_REG_3, 0),
5278 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5281 .fixup_map2 = { 3 },
5283 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5286 "helper access to map: partial range",
5288 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5289 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5290 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5291 BPF_LD_MAP_FD(BPF_REG_1, 0),
5292 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5293 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5294 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5295 BPF_MOV64_IMM(BPF_REG_2, 8),
5296 BPF_MOV64_IMM(BPF_REG_3, 0),
5297 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5300 .fixup_map2 = { 3 },
5302 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5305 "helper access to map: empty range",
5307 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5308 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5309 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5310 BPF_LD_MAP_FD(BPF_REG_1, 0),
5311 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5312 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
5313 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5314 BPF_MOV64_IMM(BPF_REG_2, 0),
5315 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
5318 .fixup_map2 = { 3 },
5319 .errstr = "invalid access to map value, value_size=48 off=0 size=0",
5321 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5324 "helper access to map: out-of-bound range",
5326 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5327 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5328 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5329 BPF_LD_MAP_FD(BPF_REG_1, 0),
5330 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5331 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5332 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5333 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
5334 BPF_MOV64_IMM(BPF_REG_3, 0),
5335 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5338 .fixup_map2 = { 3 },
5339 .errstr = "invalid access to map value, value_size=48 off=0 size=56",
5341 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5344 "helper access to map: negative range",
5346 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5347 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5348 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5349 BPF_LD_MAP_FD(BPF_REG_1, 0),
5350 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5351 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5352 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5353 BPF_MOV64_IMM(BPF_REG_2, -8),
5354 BPF_MOV64_IMM(BPF_REG_3, 0),
5355 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5358 .fixup_map2 = { 3 },
5359 .errstr = "R2 min value is negative",
5361 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5364 "helper access to adjusted map (via const imm): full range",
5366 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5367 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5368 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5369 BPF_LD_MAP_FD(BPF_REG_1, 0),
5370 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5371 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5372 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5373 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5374 offsetof(struct test_val, foo)),
5375 BPF_MOV64_IMM(BPF_REG_2,
5376 sizeof(struct test_val) -
5377 offsetof(struct test_val, foo)),
5378 BPF_MOV64_IMM(BPF_REG_3, 0),
5379 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5382 .fixup_map2 = { 3 },
5384 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5387 "helper access to adjusted map (via const imm): partial range",
5389 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5390 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5391 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5392 BPF_LD_MAP_FD(BPF_REG_1, 0),
5393 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5394 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5395 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5396 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5397 offsetof(struct test_val, foo)),
5398 BPF_MOV64_IMM(BPF_REG_2, 8),
5399 BPF_MOV64_IMM(BPF_REG_3, 0),
5400 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5403 .fixup_map2 = { 3 },
5405 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5408 "helper access to adjusted map (via const imm): empty range",
5410 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5411 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5412 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5413 BPF_LD_MAP_FD(BPF_REG_1, 0),
5414 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5415 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5416 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5417 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5418 offsetof(struct test_val, foo)),
5419 BPF_MOV64_IMM(BPF_REG_2, 0),
5420 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
5423 .fixup_map2 = { 3 },
5424 .errstr = "invalid access to map value, value_size=48 off=4 size=0",
5426 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5429 "helper access to adjusted map (via const imm): out-of-bound range",
5431 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5432 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5433 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5434 BPF_LD_MAP_FD(BPF_REG_1, 0),
5435 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5436 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5437 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5438 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5439 offsetof(struct test_val, foo)),
5440 BPF_MOV64_IMM(BPF_REG_2,
5441 sizeof(struct test_val) -
5442 offsetof(struct test_val, foo) + 8),
5443 BPF_MOV64_IMM(BPF_REG_3, 0),
5444 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5447 .fixup_map2 = { 3 },
5448 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
5450 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5453 "helper access to adjusted map (via const imm): negative range (> adjustment)",
5455 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5456 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5457 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5458 BPF_LD_MAP_FD(BPF_REG_1, 0),
5459 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5460 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5461 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5462 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5463 offsetof(struct test_val, foo)),
5464 BPF_MOV64_IMM(BPF_REG_2, -8),
5465 BPF_MOV64_IMM(BPF_REG_3, 0),
5466 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5469 .fixup_map2 = { 3 },
5470 .errstr = "R2 min value is negative",
5472 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5475 "helper access to adjusted map (via const imm): negative range (< adjustment)",
5477 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5478 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5479 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5480 BPF_LD_MAP_FD(BPF_REG_1, 0),
5481 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5482 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5483 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5484 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5485 offsetof(struct test_val, foo)),
5486 BPF_MOV64_IMM(BPF_REG_2, -1),
5487 BPF_MOV64_IMM(BPF_REG_3, 0),
5488 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5491 .fixup_map2 = { 3 },
5492 .errstr = "R2 min value is negative",
5494 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5497 "helper access to adjusted map (via const reg): full range",
5499 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5500 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5501 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5502 BPF_LD_MAP_FD(BPF_REG_1, 0),
5503 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5504 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5505 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5506 BPF_MOV64_IMM(BPF_REG_3,
5507 offsetof(struct test_val, foo)),
5508 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5509 BPF_MOV64_IMM(BPF_REG_2,
5510 sizeof(struct test_val) -
5511 offsetof(struct test_val, foo)),
5512 BPF_MOV64_IMM(BPF_REG_3, 0),
5513 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5516 .fixup_map2 = { 3 },
5518 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5521 "helper access to adjusted map (via const reg): partial range",
5523 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5524 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5525 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5526 BPF_LD_MAP_FD(BPF_REG_1, 0),
5527 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5528 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5529 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5530 BPF_MOV64_IMM(BPF_REG_3,
5531 offsetof(struct test_val, foo)),
5532 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5533 BPF_MOV64_IMM(BPF_REG_2, 8),
5534 BPF_MOV64_IMM(BPF_REG_3, 0),
5535 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5538 .fixup_map2 = { 3 },
5540 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5543 "helper access to adjusted map (via const reg): empty range",
5545 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5546 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5547 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5548 BPF_LD_MAP_FD(BPF_REG_1, 0),
5549 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5550 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5551 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5552 BPF_MOV64_IMM(BPF_REG_3, 0),
5553 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5554 BPF_MOV64_IMM(BPF_REG_2, 0),
5555 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
5558 .fixup_map2 = { 3 },
5559 .errstr = "R1 min value is outside of the array range",
5561 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5564 "helper access to adjusted map (via const reg): out-of-bound range",
5566 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5567 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5568 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5569 BPF_LD_MAP_FD(BPF_REG_1, 0),
5570 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5571 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5572 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5573 BPF_MOV64_IMM(BPF_REG_3,
5574 offsetof(struct test_val, foo)),
5575 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5576 BPF_MOV64_IMM(BPF_REG_2,
5577 sizeof(struct test_val) -
5578 offsetof(struct test_val, foo) + 8),
5579 BPF_MOV64_IMM(BPF_REG_3, 0),
5580 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5583 .fixup_map2 = { 3 },
5584 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
5586 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5589 "helper access to adjusted map (via const reg): negative range (> adjustment)",
5591 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5592 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5593 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5594 BPF_LD_MAP_FD(BPF_REG_1, 0),
5595 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5596 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5597 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5598 BPF_MOV64_IMM(BPF_REG_3,
5599 offsetof(struct test_val, foo)),
5600 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5601 BPF_MOV64_IMM(BPF_REG_2, -8),
5602 BPF_MOV64_IMM(BPF_REG_3, 0),
5603 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5606 .fixup_map2 = { 3 },
5607 .errstr = "R2 min value is negative",
5609 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5612 "helper access to adjusted map (via const reg): negative range (< adjustment)",
5614 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5615 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5616 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5617 BPF_LD_MAP_FD(BPF_REG_1, 0),
5618 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5619 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5620 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5621 BPF_MOV64_IMM(BPF_REG_3,
5622 offsetof(struct test_val, foo)),
5623 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5624 BPF_MOV64_IMM(BPF_REG_2, -1),
5625 BPF_MOV64_IMM(BPF_REG_3, 0),
5626 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5629 .fixup_map2 = { 3 },
5630 .errstr = "R2 min value is negative",
5632 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5635 "helper access to adjusted map (via variable): full range",
5637 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5638 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5639 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5640 BPF_LD_MAP_FD(BPF_REG_1, 0),
5641 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5642 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5643 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5644 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5645 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5646 offsetof(struct test_val, foo), 4),
5647 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5648 BPF_MOV64_IMM(BPF_REG_2,
5649 sizeof(struct test_val) -
5650 offsetof(struct test_val, foo)),
5651 BPF_MOV64_IMM(BPF_REG_3, 0),
5652 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5655 .fixup_map2 = { 3 },
5657 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5660 "helper access to adjusted map (via variable): partial range",
5662 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5663 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5664 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5665 BPF_LD_MAP_FD(BPF_REG_1, 0),
5666 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5667 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5668 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5669 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5670 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5671 offsetof(struct test_val, foo), 4),
5672 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5673 BPF_MOV64_IMM(BPF_REG_2, 8),
5674 BPF_MOV64_IMM(BPF_REG_3, 0),
5675 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5678 .fixup_map2 = { 3 },
5680 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5683 "helper access to adjusted map (via variable): empty range",
5685 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5686 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5687 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5688 BPF_LD_MAP_FD(BPF_REG_1, 0),
5689 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5690 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5691 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5692 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5693 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5694 offsetof(struct test_val, foo), 3),
5695 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5696 BPF_MOV64_IMM(BPF_REG_2, 0),
5697 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
5700 .fixup_map2 = { 3 },
5701 .errstr = "R1 min value is outside of the array range",
5703 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5706 "helper access to adjusted map (via variable): no max check",
5708 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5709 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5710 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5711 BPF_LD_MAP_FD(BPF_REG_1, 0),
5712 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5713 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5714 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5715 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5716 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5717 BPF_MOV64_IMM(BPF_REG_2, 1),
5718 BPF_MOV64_IMM(BPF_REG_3, 0),
5719 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5722 .fixup_map2 = { 3 },
5723 .errstr = "R1 unbounded memory access",
5725 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5728 "helper access to adjusted map (via variable): wrong max check",
5730 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5731 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5732 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5733 BPF_LD_MAP_FD(BPF_REG_1, 0),
5734 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5735 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5736 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5737 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5738 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5739 offsetof(struct test_val, foo), 4),
5740 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5741 BPF_MOV64_IMM(BPF_REG_2,
5742 sizeof(struct test_val) -
5743 offsetof(struct test_val, foo) + 1),
5744 BPF_MOV64_IMM(BPF_REG_3, 0),
5745 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5748 .fixup_map2 = { 3 },
5749 .errstr = "invalid access to map value, value_size=48 off=4 size=45",
5751 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5754 "helper access to map: bounds check using <, good access",
5756 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5757 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5758 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5759 BPF_LD_MAP_FD(BPF_REG_1, 0),
5760 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5761 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5762 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5763 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5764 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
5765 BPF_MOV64_IMM(BPF_REG_0, 0),
5767 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5768 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5769 BPF_MOV64_IMM(BPF_REG_0, 0),
5772 .fixup_map2 = { 3 },
5774 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5777 "helper access to map: bounds check using <, bad access",
5779 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5780 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5781 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5782 BPF_LD_MAP_FD(BPF_REG_1, 0),
5783 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5784 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5785 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5786 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5787 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
5788 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5789 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5790 BPF_MOV64_IMM(BPF_REG_0, 0),
5792 BPF_MOV64_IMM(BPF_REG_0, 0),
5795 .fixup_map2 = { 3 },
5797 .errstr = "R1 unbounded memory access",
5798 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5801 "helper access to map: bounds check using <=, good access",
5803 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5804 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5805 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5806 BPF_LD_MAP_FD(BPF_REG_1, 0),
5807 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5808 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5809 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5810 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5811 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
5812 BPF_MOV64_IMM(BPF_REG_0, 0),
5814 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5815 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5816 BPF_MOV64_IMM(BPF_REG_0, 0),
5819 .fixup_map2 = { 3 },
5821 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5824 "helper access to map: bounds check using <=, bad access",
5826 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5827 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5828 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5829 BPF_LD_MAP_FD(BPF_REG_1, 0),
5830 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5831 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5832 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5833 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5834 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
5835 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5836 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5837 BPF_MOV64_IMM(BPF_REG_0, 0),
5839 BPF_MOV64_IMM(BPF_REG_0, 0),
5842 .fixup_map2 = { 3 },
5844 .errstr = "R1 unbounded memory access",
5845 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5848 "helper access to map: bounds check using s<, good access",
5850 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5851 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5852 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5853 BPF_LD_MAP_FD(BPF_REG_1, 0),
5854 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5855 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5856 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5857 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5858 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
5859 BPF_MOV64_IMM(BPF_REG_0, 0),
5861 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
5862 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5863 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5864 BPF_MOV64_IMM(BPF_REG_0, 0),
5867 .fixup_map2 = { 3 },
5869 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5872 "helper access to map: bounds check using s<, good access 2",
5874 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5875 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5876 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5877 BPF_LD_MAP_FD(BPF_REG_1, 0),
5878 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5879 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5880 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5881 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5882 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
5883 BPF_MOV64_IMM(BPF_REG_0, 0),
5885 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
5886 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5887 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5888 BPF_MOV64_IMM(BPF_REG_0, 0),
5891 .fixup_map2 = { 3 },
5893 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5896 "helper access to map: bounds check using s<, bad access",
5898 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5899 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5900 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5901 BPF_LD_MAP_FD(BPF_REG_1, 0),
5902 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5903 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5904 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5905 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
5906 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
5907 BPF_MOV64_IMM(BPF_REG_0, 0),
5909 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
5910 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5911 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5912 BPF_MOV64_IMM(BPF_REG_0, 0),
5915 .fixup_map2 = { 3 },
5917 .errstr = "R1 min value is negative",
5918 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5921 "helper access to map: bounds check using s<=, good access",
5923 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5924 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5925 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5926 BPF_LD_MAP_FD(BPF_REG_1, 0),
5927 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5928 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5929 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5930 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5931 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5932 BPF_MOV64_IMM(BPF_REG_0, 0),
5934 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
5935 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5936 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5937 BPF_MOV64_IMM(BPF_REG_0, 0),
5940 .fixup_map2 = { 3 },
5942 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5945 "helper access to map: bounds check using s<=, good access 2",
5947 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5948 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5949 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5950 BPF_LD_MAP_FD(BPF_REG_1, 0),
5951 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5952 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5953 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5954 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5955 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5956 BPF_MOV64_IMM(BPF_REG_0, 0),
5958 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
5959 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5960 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5961 BPF_MOV64_IMM(BPF_REG_0, 0),
5964 .fixup_map2 = { 3 },
5966 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5969 "helper access to map: bounds check using s<=, bad access",
5971 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5972 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5973 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5974 BPF_LD_MAP_FD(BPF_REG_1, 0),
5975 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5976 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5977 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5978 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
5979 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5980 BPF_MOV64_IMM(BPF_REG_0, 0),
5982 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
5983 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5984 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5985 BPF_MOV64_IMM(BPF_REG_0, 0),
5988 .fixup_map2 = { 3 },
5990 .errstr = "R1 min value is negative",
5991 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5994 "map lookup helper access to map",
5996 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5997 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5998 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5999 BPF_LD_MAP_FD(BPF_REG_1, 0),
6000 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6001 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6002 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6003 BPF_LD_MAP_FD(BPF_REG_1, 0),
6004 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6007 .fixup_map3 = { 3, 8 },
6009 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6012 "map update helper access to map",
6014 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6015 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6016 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6017 BPF_LD_MAP_FD(BPF_REG_1, 0),
6018 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6019 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6020 BPF_MOV64_IMM(BPF_REG_4, 0),
6021 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
6022 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6023 BPF_LD_MAP_FD(BPF_REG_1, 0),
6024 BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
6027 .fixup_map3 = { 3, 10 },
6029 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6032 "map update helper access to map: wrong size",
6034 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6035 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6036 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6037 BPF_LD_MAP_FD(BPF_REG_1, 0),
6038 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6039 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6040 BPF_MOV64_IMM(BPF_REG_4, 0),
6041 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
6042 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6043 BPF_LD_MAP_FD(BPF_REG_1, 0),
6044 BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
6047 .fixup_map1 = { 3 },
6048 .fixup_map3 = { 10 },
6050 .errstr = "invalid access to map value, value_size=8 off=0 size=16",
6051 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6054 "map helper access to adjusted map (via const imm)",
6056 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6057 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6058 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6059 BPF_LD_MAP_FD(BPF_REG_1, 0),
6060 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6061 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6062 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6063 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
6064 offsetof(struct other_val, bar)),
6065 BPF_LD_MAP_FD(BPF_REG_1, 0),
6066 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6069 .fixup_map3 = { 3, 9 },
6071 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6074 "map helper access to adjusted map (via const imm): out-of-bound 1",
6076 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6077 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6078 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6079 BPF_LD_MAP_FD(BPF_REG_1, 0),
6080 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6081 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6082 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6083 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
6084 sizeof(struct other_val) - 4),
6085 BPF_LD_MAP_FD(BPF_REG_1, 0),
6086 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6089 .fixup_map3 = { 3, 9 },
6091 .errstr = "invalid access to map value, value_size=16 off=12 size=8",
6092 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6095 "map helper access to adjusted map (via const imm): out-of-bound 2",
6097 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6098 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6099 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6100 BPF_LD_MAP_FD(BPF_REG_1, 0),
6101 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6102 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6103 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6104 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6105 BPF_LD_MAP_FD(BPF_REG_1, 0),
6106 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6109 .fixup_map3 = { 3, 9 },
6111 .errstr = "invalid access to map value, value_size=16 off=-4 size=8",
6112 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6115 "map helper access to adjusted map (via const reg)",
6117 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6118 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6119 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6120 BPF_LD_MAP_FD(BPF_REG_1, 0),
6121 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6122 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6123 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6124 BPF_MOV64_IMM(BPF_REG_3,
6125 offsetof(struct other_val, bar)),
6126 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6127 BPF_LD_MAP_FD(BPF_REG_1, 0),
6128 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6131 .fixup_map3 = { 3, 10 },
6133 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6136 "map helper access to adjusted map (via const reg): out-of-bound 1",
6138 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6139 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6140 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6141 BPF_LD_MAP_FD(BPF_REG_1, 0),
6142 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6143 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6144 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6145 BPF_MOV64_IMM(BPF_REG_3,
6146 sizeof(struct other_val) - 4),
6147 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6148 BPF_LD_MAP_FD(BPF_REG_1, 0),
6149 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6152 .fixup_map3 = { 3, 10 },
6154 .errstr = "invalid access to map value, value_size=16 off=12 size=8",
6155 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6158 "map helper access to adjusted map (via const reg): out-of-bound 2",
6160 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6161 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6162 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6163 BPF_LD_MAP_FD(BPF_REG_1, 0),
6164 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6165 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6166 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6167 BPF_MOV64_IMM(BPF_REG_3, -4),
6168 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6169 BPF_LD_MAP_FD(BPF_REG_1, 0),
6170 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6173 .fixup_map3 = { 3, 10 },
6175 .errstr = "invalid access to map value, value_size=16 off=-4 size=8",
6176 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6179 "map helper access to adjusted map (via variable)",
6181 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6182 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6183 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6184 BPF_LD_MAP_FD(BPF_REG_1, 0),
6185 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6186 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6187 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6188 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6189 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6190 offsetof(struct other_val, bar), 4),
6191 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6192 BPF_LD_MAP_FD(BPF_REG_1, 0),
6193 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6196 .fixup_map3 = { 3, 11 },
6198 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6201 "map helper access to adjusted map (via variable): no max check",
6203 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6204 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6205 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6206 BPF_LD_MAP_FD(BPF_REG_1, 0),
6207 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6208 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6209 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6210 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6211 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6212 BPF_LD_MAP_FD(BPF_REG_1, 0),
6213 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6216 .fixup_map3 = { 3, 10 },
6218 .errstr = "R2 unbounded memory access, make sure to bounds check any array access into a map",
6219 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6222 "map helper access to adjusted map (via variable): wrong max check",
6224 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6225 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6226 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6227 BPF_LD_MAP_FD(BPF_REG_1, 0),
6228 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6229 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6230 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6231 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6232 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6233 offsetof(struct other_val, bar) + 1, 4),
6234 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6235 BPF_LD_MAP_FD(BPF_REG_1, 0),
6236 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6239 .fixup_map3 = { 3, 11 },
6241 .errstr = "invalid access to map value, value_size=16 off=9 size=8",
6242 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6245 "map element value is preserved across register spilling",
6247 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6248 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6249 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6250 BPF_LD_MAP_FD(BPF_REG_1, 0),
6251 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6252 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6253 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
6254 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6255 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
6256 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
6257 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
6258 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
6261 .fixup_map2 = { 3 },
6262 .errstr_unpriv = "R0 leaks addr",
6264 .result_unpriv = REJECT,
6267 "map element value or null is marked on register spilling",
6269 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6270 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6271 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6272 BPF_LD_MAP_FD(BPF_REG_1, 0),
6273 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6274 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6275 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
6276 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
6277 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6278 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
6279 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
6282 .fixup_map2 = { 3 },
6283 .errstr_unpriv = "R0 leaks addr",
6285 .result_unpriv = REJECT,
6288 "map element value store of cleared call register",
6290 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6291 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6292 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6293 BPF_LD_MAP_FD(BPF_REG_1, 0),
6294 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6295 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
6296 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
6299 .fixup_map2 = { 3 },
6300 .errstr_unpriv = "R1 !read_ok",
6301 .errstr = "R1 !read_ok",
6303 .result_unpriv = REJECT,
6306 "map element value with unaligned store",
6308 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6309 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6310 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6311 BPF_LD_MAP_FD(BPF_REG_1, 0),
6312 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6313 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
6314 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
6315 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
6316 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
6317 BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
6318 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
6319 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
6320 BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
6321 BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
6322 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
6323 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
6324 BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
6325 BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
6326 BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
6327 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
6328 BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
6329 BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
6330 BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
6333 .fixup_map2 = { 3 },
6334 .errstr_unpriv = "R0 leaks addr",
6336 .result_unpriv = REJECT,
6337 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6340 "map element value with unaligned load",
6342 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6343 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6344 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6345 BPF_LD_MAP_FD(BPF_REG_1, 0),
6346 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6347 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
6348 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
6349 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
6350 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
6351 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
6352 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
6353 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
6354 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
6355 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
6356 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
6357 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
6358 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
6361 .fixup_map2 = { 3 },
6362 .errstr_unpriv = "R0 leaks addr",
6364 .result_unpriv = REJECT,
6365 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6368 "map element value illegal alu op, 1",
6370 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6371 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6372 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6373 BPF_LD_MAP_FD(BPF_REG_1, 0),
6374 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6375 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6376 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
6377 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6380 .fixup_map2 = { 3 },
6381 .errstr = "R0 bitwise operator &= on pointer",
6385 "map element value illegal alu op, 2",
6387 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6388 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6389 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6390 BPF_LD_MAP_FD(BPF_REG_1, 0),
6391 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6392 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6393 BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
6394 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6397 .fixup_map2 = { 3 },
6398 .errstr = "R0 32-bit pointer arithmetic prohibited",
6402 "map element value illegal alu op, 3",
6404 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6405 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6406 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6407 BPF_LD_MAP_FD(BPF_REG_1, 0),
6408 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6409 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6410 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
6411 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6414 .fixup_map2 = { 3 },
6415 .errstr = "R0 pointer arithmetic with /= operator",
6419 "map element value illegal alu op, 4",
6421 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6422 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6423 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6424 BPF_LD_MAP_FD(BPF_REG_1, 0),
6425 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6426 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6427 BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
6428 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6431 .fixup_map2 = { 3 },
6432 .errstr_unpriv = "R0 pointer arithmetic prohibited",
6433 .errstr = "invalid mem access 'inv'",
6435 .result_unpriv = REJECT,
6438 "map element value illegal alu op, 5",
6440 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6441 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6442 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6443 BPF_LD_MAP_FD(BPF_REG_1, 0),
6444 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6445 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6446 BPF_MOV64_IMM(BPF_REG_3, 4096),
6447 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6448 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6449 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6450 BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
6451 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
6452 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6455 .fixup_map2 = { 3 },
6456 .errstr = "R0 invalid mem access 'inv'",
6460 "map element value is preserved across register spilling",
6462 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6463 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6464 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6465 BPF_LD_MAP_FD(BPF_REG_1, 0),
6466 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6467 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6468 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
6469 offsetof(struct test_val, foo)),
6470 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
6471 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6472 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
6473 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
6474 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
6475 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
6478 .fixup_map2 = { 3 },
6479 .errstr_unpriv = "R0 leaks addr",
6481 .result_unpriv = REJECT,
6482 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6485 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
6487 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6488 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6489 BPF_MOV64_IMM(BPF_REG_0, 0),
6490 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
6491 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
6492 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
6493 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
6494 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
6495 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
6496 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
6497 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
6498 BPF_MOV64_IMM(BPF_REG_2, 16),
6499 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6500 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6501 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
6502 BPF_MOV64_IMM(BPF_REG_4, 0),
6503 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
6504 BPF_MOV64_IMM(BPF_REG_3, 0),
6505 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6506 BPF_MOV64_IMM(BPF_REG_0, 0),
6510 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6513 "helper access to variable memory: stack, bitwise AND, zero included",
6515 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6516 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6517 BPF_MOV64_IMM(BPF_REG_2, 16),
6518 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6519 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6520 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
6521 BPF_MOV64_IMM(BPF_REG_3, 0),
6522 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6525 .errstr = "invalid indirect read from stack off -64+0 size 64",
6527 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6530 "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
6532 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6533 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6534 BPF_MOV64_IMM(BPF_REG_2, 16),
6535 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6536 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6537 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
6538 BPF_MOV64_IMM(BPF_REG_4, 0),
6539 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
6540 BPF_MOV64_IMM(BPF_REG_3, 0),
6541 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6542 BPF_MOV64_IMM(BPF_REG_0, 0),
6545 .errstr = "invalid stack type R1 off=-64 access_size=65",
6547 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6550 "helper access to variable memory: stack, JMP, correct bounds",
6552 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6553 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6554 BPF_MOV64_IMM(BPF_REG_0, 0),
6555 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
6556 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
6557 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
6558 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
6559 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
6560 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
6561 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
6562 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
6563 BPF_MOV64_IMM(BPF_REG_2, 16),
6564 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6565 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6566 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
6567 BPF_MOV64_IMM(BPF_REG_4, 0),
6568 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
6569 BPF_MOV64_IMM(BPF_REG_3, 0),
6570 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6571 BPF_MOV64_IMM(BPF_REG_0, 0),
6575 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6578 "helper access to variable memory: stack, JMP (signed), correct bounds",
6580 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6581 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6582 BPF_MOV64_IMM(BPF_REG_0, 0),
6583 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
6584 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
6585 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
6586 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
6587 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
6588 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
6589 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
6590 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
6591 BPF_MOV64_IMM(BPF_REG_2, 16),
6592 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6593 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6594 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
6595 BPF_MOV64_IMM(BPF_REG_4, 0),
6596 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
6597 BPF_MOV64_IMM(BPF_REG_3, 0),
6598 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6599 BPF_MOV64_IMM(BPF_REG_0, 0),
6603 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6606 "helper access to variable memory: stack, JMP, bounds + offset",
6608 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6609 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6610 BPF_MOV64_IMM(BPF_REG_2, 16),
6611 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6612 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6613 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
6614 BPF_MOV64_IMM(BPF_REG_4, 0),
6615 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
6616 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
6617 BPF_MOV64_IMM(BPF_REG_3, 0),
6618 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6619 BPF_MOV64_IMM(BPF_REG_0, 0),
6622 .errstr = "invalid stack type R1 off=-64 access_size=65",
6624 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6627 "helper access to variable memory: stack, JMP, wrong max",
6629 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6630 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6631 BPF_MOV64_IMM(BPF_REG_2, 16),
6632 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6633 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6634 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
6635 BPF_MOV64_IMM(BPF_REG_4, 0),
6636 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
6637 BPF_MOV64_IMM(BPF_REG_3, 0),
6638 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6639 BPF_MOV64_IMM(BPF_REG_0, 0),
6642 .errstr = "invalid stack type R1 off=-64 access_size=65",
6644 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6647 "helper access to variable memory: stack, JMP, no max check",
6649 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6650 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6651 BPF_MOV64_IMM(BPF_REG_2, 16),
6652 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6653 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6654 BPF_MOV64_IMM(BPF_REG_4, 0),
6655 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
6656 BPF_MOV64_IMM(BPF_REG_3, 0),
6657 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6658 BPF_MOV64_IMM(BPF_REG_0, 0),
6661 /* because max wasn't checked, signed min is negative */
6662 .errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
6664 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6667 "helper access to variable memory: stack, JMP, no min check",
6669 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6670 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6671 BPF_MOV64_IMM(BPF_REG_2, 16),
6672 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6673 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6674 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
6675 BPF_MOV64_IMM(BPF_REG_3, 0),
6676 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6677 BPF_MOV64_IMM(BPF_REG_0, 0),
6680 .errstr = "invalid indirect read from stack off -64+0 size 64",
6682 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6685 "helper access to variable memory: stack, JMP (signed), no min check",
6687 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6688 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6689 BPF_MOV64_IMM(BPF_REG_2, 16),
6690 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6691 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6692 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
6693 BPF_MOV64_IMM(BPF_REG_3, 0),
6694 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6695 BPF_MOV64_IMM(BPF_REG_0, 0),
6698 .errstr = "R2 min value is negative",
6700 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6703 "helper access to variable memory: map, JMP, correct bounds",
6705 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6706 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6707 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6708 BPF_LD_MAP_FD(BPF_REG_1, 0),
6709 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6710 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
6711 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6712 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
6713 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6714 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
6715 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
6716 sizeof(struct test_val), 4),
6717 BPF_MOV64_IMM(BPF_REG_4, 0),
6718 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
6719 BPF_MOV64_IMM(BPF_REG_3, 0),
6720 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6721 BPF_MOV64_IMM(BPF_REG_0, 0),
6724 .fixup_map2 = { 3 },
6726 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6729 "helper access to variable memory: map, JMP, wrong max",
6731 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6732 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6733 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6734 BPF_LD_MAP_FD(BPF_REG_1, 0),
6735 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6736 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
6737 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6738 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
6739 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6740 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
6741 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
6742 sizeof(struct test_val) + 1, 4),
6743 BPF_MOV64_IMM(BPF_REG_4, 0),
6744 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
6745 BPF_MOV64_IMM(BPF_REG_3, 0),
6746 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6747 BPF_MOV64_IMM(BPF_REG_0, 0),
6750 .fixup_map2 = { 3 },
6751 .errstr = "invalid access to map value, value_size=48 off=0 size=49",
6753 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6756 "helper access to variable memory: map adjusted, JMP, correct bounds",
6758 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6759 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6760 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6761 BPF_LD_MAP_FD(BPF_REG_1, 0),
6762 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6763 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
6764 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6765 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
6766 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
6767 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6768 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
6769 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
6770 sizeof(struct test_val) - 20, 4),
6771 BPF_MOV64_IMM(BPF_REG_4, 0),
6772 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
6773 BPF_MOV64_IMM(BPF_REG_3, 0),
6774 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6775 BPF_MOV64_IMM(BPF_REG_0, 0),
6778 .fixup_map2 = { 3 },
6780 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6783 "helper access to variable memory: map adjusted, JMP, wrong max",
6785 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6786 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6787 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6788 BPF_LD_MAP_FD(BPF_REG_1, 0),
6789 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6790 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
6791 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6792 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
6793 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
6794 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6795 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
6796 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
6797 sizeof(struct test_val) - 19, 4),
6798 BPF_MOV64_IMM(BPF_REG_4, 0),
6799 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
6800 BPF_MOV64_IMM(BPF_REG_3, 0),
6801 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6802 BPF_MOV64_IMM(BPF_REG_0, 0),
6805 .fixup_map2 = { 3 },
6806 .errstr = "R1 min value is outside of the array range",
6808 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6811 "helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
6813 BPF_MOV64_IMM(BPF_REG_1, 0),
6814 BPF_MOV64_IMM(BPF_REG_2, 0),
6815 BPF_MOV64_IMM(BPF_REG_3, 0),
6816 BPF_MOV64_IMM(BPF_REG_4, 0),
6817 BPF_MOV64_IMM(BPF_REG_5, 0),
6818 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6822 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6825 "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
6827 BPF_MOV64_IMM(BPF_REG_1, 0),
6828 BPF_MOV64_IMM(BPF_REG_2, 1),
6829 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6830 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
6831 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
6832 BPF_MOV64_IMM(BPF_REG_3, 0),
6833 BPF_MOV64_IMM(BPF_REG_4, 0),
6834 BPF_MOV64_IMM(BPF_REG_5, 0),
6835 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6838 .errstr = "R1 type=inv expected=fp",
6840 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6843 "helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
6845 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6846 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
6847 BPF_MOV64_IMM(BPF_REG_2, 0),
6848 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
6849 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
6850 BPF_MOV64_IMM(BPF_REG_3, 0),
6851 BPF_MOV64_IMM(BPF_REG_4, 0),
6852 BPF_MOV64_IMM(BPF_REG_5, 0),
6853 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6857 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6860 "helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
6862 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6863 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6864 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6865 BPF_LD_MAP_FD(BPF_REG_1, 0),
6866 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6867 BPF_FUNC_map_lookup_elem),
6868 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6869 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6870 BPF_MOV64_IMM(BPF_REG_2, 0),
6871 BPF_MOV64_IMM(BPF_REG_3, 0),
6872 BPF_MOV64_IMM(BPF_REG_4, 0),
6873 BPF_MOV64_IMM(BPF_REG_5, 0),
6874 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6877 .fixup_map1 = { 3 },
6879 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6882 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
6884 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6885 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6886 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6887 BPF_LD_MAP_FD(BPF_REG_1, 0),
6888 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6889 BPF_FUNC_map_lookup_elem),
6890 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6891 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6892 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7),
6893 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6894 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
6895 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
6896 BPF_MOV64_IMM(BPF_REG_3, 0),
6897 BPF_MOV64_IMM(BPF_REG_4, 0),
6898 BPF_MOV64_IMM(BPF_REG_5, 0),
6899 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6902 .fixup_map1 = { 3 },
6904 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6907 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
6909 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6910 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6911 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6912 BPF_LD_MAP_FD(BPF_REG_1, 0),
6913 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6914 BPF_FUNC_map_lookup_elem),
6915 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6916 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6917 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6918 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
6919 BPF_MOV64_IMM(BPF_REG_3, 0),
6920 BPF_MOV64_IMM(BPF_REG_4, 0),
6921 BPF_MOV64_IMM(BPF_REG_5, 0),
6922 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6925 .fixup_map1 = { 3 },
6927 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6930 "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
6932 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
6933 offsetof(struct __sk_buff, data)),
6934 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6935 offsetof(struct __sk_buff, data_end)),
6936 BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),
6937 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
6938 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
6939 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
6940 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
6941 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
6942 BPF_MOV64_IMM(BPF_REG_3, 0),
6943 BPF_MOV64_IMM(BPF_REG_4, 0),
6944 BPF_MOV64_IMM(BPF_REG_5, 0),
6945 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6949 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6950 .retval = 0 /* csum_diff of 64-byte packet */,
6953 "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
6955 BPF_MOV64_IMM(BPF_REG_1, 0),
6956 BPF_MOV64_IMM(BPF_REG_2, 0),
6957 BPF_MOV64_IMM(BPF_REG_3, 0),
6958 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6961 .errstr = "R1 type=inv expected=fp",
6963 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6966 "helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
6968 BPF_MOV64_IMM(BPF_REG_1, 0),
6969 BPF_MOV64_IMM(BPF_REG_2, 1),
6970 BPF_MOV64_IMM(BPF_REG_3, 0),
6971 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6974 .errstr = "R1 type=inv expected=fp",
6976 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6979 "helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
6981 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6982 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
6983 BPF_MOV64_IMM(BPF_REG_2, 0),
6984 BPF_MOV64_IMM(BPF_REG_3, 0),
6985 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6989 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6992 "helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
6994 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6995 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6996 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6997 BPF_LD_MAP_FD(BPF_REG_1, 0),
6998 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6999 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7000 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7001 BPF_MOV64_IMM(BPF_REG_2, 0),
7002 BPF_MOV64_IMM(BPF_REG_3, 0),
7003 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7006 .fixup_map1 = { 3 },
7008 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7011 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7013 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7014 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7015 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7016 BPF_LD_MAP_FD(BPF_REG_1, 0),
7017 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7018 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7019 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7020 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
7021 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7022 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7023 BPF_MOV64_IMM(BPF_REG_3, 0),
7024 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7027 .fixup_map1 = { 3 },
7029 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7032 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7034 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7035 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7036 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7037 BPF_LD_MAP_FD(BPF_REG_1, 0),
7038 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7039 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7040 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7041 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7042 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
7043 BPF_MOV64_IMM(BPF_REG_3, 0),
7044 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7047 .fixup_map1 = { 3 },
7049 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7052 "helper access to variable memory: 8 bytes leak",
7054 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7055 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7056 BPF_MOV64_IMM(BPF_REG_0, 0),
7057 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7058 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7059 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7060 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7061 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7062 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7063 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7064 BPF_MOV64_IMM(BPF_REG_2, 1),
7065 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7066 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7067 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
7068 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
7069 BPF_MOV64_IMM(BPF_REG_3, 0),
7070 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7071 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7074 .errstr = "invalid indirect read from stack off -64+32 size 64",
7076 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7079 "helper access to variable memory: 8 bytes no leak (init memory)",
7081 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7082 BPF_MOV64_IMM(BPF_REG_0, 0),
7083 BPF_MOV64_IMM(BPF_REG_0, 0),
7084 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7085 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7086 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7087 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7088 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
7089 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7090 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7091 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7092 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7093 BPF_MOV64_IMM(BPF_REG_2, 0),
7094 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
7095 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
7096 BPF_MOV64_IMM(BPF_REG_3, 0),
7097 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7098 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7102 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7105 "invalid and of negative number",
7107 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7108 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7109 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7110 BPF_LD_MAP_FD(BPF_REG_1, 0),
7111 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7112 BPF_FUNC_map_lookup_elem),
7113 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7114 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7115 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
7116 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
7117 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7118 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
7119 offsetof(struct test_val, foo)),
7122 .fixup_map2 = { 3 },
7123 .errstr = "R0 max value is outside of the array range",
7125 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7128 "invalid range check",
7130 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7131 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7132 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7133 BPF_LD_MAP_FD(BPF_REG_1, 0),
7134 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7135 BPF_FUNC_map_lookup_elem),
7136 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
7137 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
7138 BPF_MOV64_IMM(BPF_REG_9, 1),
7139 BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
7140 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
7141 BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
7142 BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
7143 BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
7144 BPF_MOV32_IMM(BPF_REG_3, 1),
7145 BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
7146 BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
7147 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
7148 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
7149 BPF_MOV64_REG(BPF_REG_0, 0),
7152 .fixup_map2 = { 3 },
7153 .errstr = "R0 max value is outside of the array range",
7155 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7158 "map in map access",
7160 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7161 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7162 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7163 BPF_LD_MAP_FD(BPF_REG_1, 0),
7164 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7165 BPF_FUNC_map_lookup_elem),
7166 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7167 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7168 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7169 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7170 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7171 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7172 BPF_FUNC_map_lookup_elem),
7173 BPF_MOV64_IMM(BPF_REG_0, 0),
7176 .fixup_map_in_map = { 3 },
7180 "invalid inner map pointer",
7182 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7183 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7184 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7185 BPF_LD_MAP_FD(BPF_REG_1, 0),
7186 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7187 BPF_FUNC_map_lookup_elem),
7188 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7189 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7190 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7191 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7192 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7193 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7194 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7195 BPF_FUNC_map_lookup_elem),
7196 BPF_MOV64_IMM(BPF_REG_0, 0),
7199 .fixup_map_in_map = { 3 },
7200 .errstr = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
7204 "forgot null checking on the inner map pointer",
7206 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7207 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7208 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7209 BPF_LD_MAP_FD(BPF_REG_1, 0),
7210 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7211 BPF_FUNC_map_lookup_elem),
7212 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7213 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7214 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7215 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7216 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7217 BPF_FUNC_map_lookup_elem),
7218 BPF_MOV64_IMM(BPF_REG_0, 0),
7221 .fixup_map_in_map = { 3 },
7222 .errstr = "R1 type=map_value_or_null expected=map_ptr",
7226 "ld_abs: check calling conv, r1",
7228 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7229 BPF_MOV64_IMM(BPF_REG_1, 0),
7230 BPF_LD_ABS(BPF_W, -0x200000),
7231 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
7234 .errstr = "R1 !read_ok",
7238 "ld_abs: check calling conv, r2",
7240 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7241 BPF_MOV64_IMM(BPF_REG_2, 0),
7242 BPF_LD_ABS(BPF_W, -0x200000),
7243 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7246 .errstr = "R2 !read_ok",
7250 "ld_abs: check calling conv, r3",
7252 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7253 BPF_MOV64_IMM(BPF_REG_3, 0),
7254 BPF_LD_ABS(BPF_W, -0x200000),
7255 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
7258 .errstr = "R3 !read_ok",
7262 "ld_abs: check calling conv, r4",
7264 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7265 BPF_MOV64_IMM(BPF_REG_4, 0),
7266 BPF_LD_ABS(BPF_W, -0x200000),
7267 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
7270 .errstr = "R4 !read_ok",
7274 "ld_abs: check calling conv, r5",
7276 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7277 BPF_MOV64_IMM(BPF_REG_5, 0),
7278 BPF_LD_ABS(BPF_W, -0x200000),
7279 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
7282 .errstr = "R5 !read_ok",
7286 "ld_abs: check calling conv, r7",
7288 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7289 BPF_MOV64_IMM(BPF_REG_7, 0),
7290 BPF_LD_ABS(BPF_W, -0x200000),
7291 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
7297 "ld_abs: tests on r6 and skb data reload helper",
7299 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7300 BPF_LD_ABS(BPF_B, 0),
7301 BPF_LD_ABS(BPF_H, 0),
7302 BPF_LD_ABS(BPF_W, 0),
7303 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
7304 BPF_MOV64_IMM(BPF_REG_6, 0),
7305 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
7306 BPF_MOV64_IMM(BPF_REG_2, 1),
7307 BPF_MOV64_IMM(BPF_REG_3, 2),
7308 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7309 BPF_FUNC_skb_vlan_push),
7310 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
7311 BPF_LD_ABS(BPF_B, 0),
7312 BPF_LD_ABS(BPF_H, 0),
7313 BPF_LD_ABS(BPF_W, 0),
7314 BPF_MOV64_IMM(BPF_REG_0, 42),
7317 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7319 .retval = 42 /* ultimate return value */,
7322 "ld_ind: check calling conv, r1",
7324 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7325 BPF_MOV64_IMM(BPF_REG_1, 1),
7326 BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
7327 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
7330 .errstr = "R1 !read_ok",
7334 "ld_ind: check calling conv, r2",
7336 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7337 BPF_MOV64_IMM(BPF_REG_2, 1),
7338 BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
7339 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7342 .errstr = "R2 !read_ok",
7346 "ld_ind: check calling conv, r3",
7348 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7349 BPF_MOV64_IMM(BPF_REG_3, 1),
7350 BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
7351 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
7354 .errstr = "R3 !read_ok",
7358 "ld_ind: check calling conv, r4",
7360 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7361 BPF_MOV64_IMM(BPF_REG_4, 1),
7362 BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
7363 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
7366 .errstr = "R4 !read_ok",
7370 "ld_ind: check calling conv, r5",
7372 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7373 BPF_MOV64_IMM(BPF_REG_5, 1),
7374 BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
7375 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
7378 .errstr = "R5 !read_ok",
7382 "ld_ind: check calling conv, r7",
7384 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7385 BPF_MOV64_IMM(BPF_REG_7, 1),
7386 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
7387 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
7394 "check bpf_perf_event_data->sample_period byte load permitted",
7396 BPF_MOV64_IMM(BPF_REG_0, 0),
7397 #if __BYTE_ORDER == __LITTLE_ENDIAN
7398 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
7399 offsetof(struct bpf_perf_event_data, sample_period)),
7401 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
7402 offsetof(struct bpf_perf_event_data, sample_period) + 7),
7407 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
7410 "check bpf_perf_event_data->sample_period half load permitted",
7412 BPF_MOV64_IMM(BPF_REG_0, 0),
7413 #if __BYTE_ORDER == __LITTLE_ENDIAN
7414 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7415 offsetof(struct bpf_perf_event_data, sample_period)),
7417 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7418 offsetof(struct bpf_perf_event_data, sample_period) + 6),
7423 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
7426 "check bpf_perf_event_data->sample_period word load permitted",
7428 BPF_MOV64_IMM(BPF_REG_0, 0),
7429 #if __BYTE_ORDER == __LITTLE_ENDIAN
7430 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
7431 offsetof(struct bpf_perf_event_data, sample_period)),
7433 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
7434 offsetof(struct bpf_perf_event_data, sample_period) + 4),
7439 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
7442 "check bpf_perf_event_data->sample_period dword load permitted",
7444 BPF_MOV64_IMM(BPF_REG_0, 0),
7445 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
7446 offsetof(struct bpf_perf_event_data, sample_period)),
7450 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
7453 "check skb->data half load not permitted",
7455 BPF_MOV64_IMM(BPF_REG_0, 0),
7456 #if __BYTE_ORDER == __LITTLE_ENDIAN
7457 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7458 offsetof(struct __sk_buff, data)),
7460 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7461 offsetof(struct __sk_buff, data) + 2),
7466 .errstr = "invalid bpf_context access",
7469 "check skb->tc_classid half load not permitted for lwt prog",
7471 BPF_MOV64_IMM(BPF_REG_0, 0),
7472 #if __BYTE_ORDER == __LITTLE_ENDIAN
7473 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7474 offsetof(struct __sk_buff, tc_classid)),
7476 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7477 offsetof(struct __sk_buff, tc_classid) + 2),
7482 .errstr = "invalid bpf_context access",
7483 .prog_type = BPF_PROG_TYPE_LWT_IN,
7486 "bounds checks mixing signed and unsigned, positive bounds",
7488 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7489 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7490 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7491 BPF_LD_MAP_FD(BPF_REG_1, 0),
7492 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7493 BPF_FUNC_map_lookup_elem),
7494 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7495 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7496 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7497 BPF_MOV64_IMM(BPF_REG_2, 2),
7498 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
7499 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
7500 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7501 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7502 BPF_MOV64_IMM(BPF_REG_0, 0),
7505 .fixup_map1 = { 3 },
7506 .errstr = "unbounded min value",
7510 "bounds checks mixing signed and unsigned",
7512 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7513 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7514 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7515 BPF_LD_MAP_FD(BPF_REG_1, 0),
7516 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7517 BPF_FUNC_map_lookup_elem),
7518 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7519 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7520 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7521 BPF_MOV64_IMM(BPF_REG_2, -1),
7522 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
7523 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7524 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7525 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7526 BPF_MOV64_IMM(BPF_REG_0, 0),
7529 .fixup_map1 = { 3 },
7530 .errstr = "unbounded min value",
7534 "bounds checks mixing signed and unsigned, variant 2",
7536 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7537 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7538 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7539 BPF_LD_MAP_FD(BPF_REG_1, 0),
7540 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7541 BPF_FUNC_map_lookup_elem),
7542 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7543 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7544 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7545 BPF_MOV64_IMM(BPF_REG_2, -1),
7546 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
7547 BPF_MOV64_IMM(BPF_REG_8, 0),
7548 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
7549 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
7550 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
7551 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
7552 BPF_MOV64_IMM(BPF_REG_0, 0),
7555 .fixup_map1 = { 3 },
7556 .errstr = "unbounded min value",
7560 "bounds checks mixing signed and unsigned, variant 3",
7562 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7563 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7564 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7565 BPF_LD_MAP_FD(BPF_REG_1, 0),
7566 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7567 BPF_FUNC_map_lookup_elem),
7568 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
7569 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7570 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7571 BPF_MOV64_IMM(BPF_REG_2, -1),
7572 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
7573 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
7574 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
7575 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
7576 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
7577 BPF_MOV64_IMM(BPF_REG_0, 0),
7580 .fixup_map1 = { 3 },
7581 .errstr = "unbounded min value",
7585 "bounds checks mixing signed and unsigned, variant 4",
7587 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7588 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7589 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7590 BPF_LD_MAP_FD(BPF_REG_1, 0),
7591 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7592 BPF_FUNC_map_lookup_elem),
7593 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7594 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7595 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7596 BPF_MOV64_IMM(BPF_REG_2, 1),
7597 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
7598 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7599 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7600 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7601 BPF_MOV64_IMM(BPF_REG_0, 0),
7604 .fixup_map1 = { 3 },
7608 "bounds checks mixing signed and unsigned, variant 5",
7610 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7611 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7612 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7613 BPF_LD_MAP_FD(BPF_REG_1, 0),
7614 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7615 BPF_FUNC_map_lookup_elem),
7616 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7617 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7618 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7619 BPF_MOV64_IMM(BPF_REG_2, -1),
7620 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
7621 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
7622 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
7623 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
7624 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7625 BPF_MOV64_IMM(BPF_REG_0, 0),
7628 .fixup_map1 = { 3 },
7629 .errstr = "unbounded min value",
7633 "bounds checks mixing signed and unsigned, variant 6",
7635 BPF_MOV64_IMM(BPF_REG_2, 0),
7636 BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
7637 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
7638 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7639 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
7640 BPF_MOV64_IMM(BPF_REG_6, -1),
7641 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
7642 BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
7643 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
7644 BPF_MOV64_IMM(BPF_REG_5, 0),
7645 BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
7646 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7647 BPF_FUNC_skb_load_bytes),
7648 BPF_MOV64_IMM(BPF_REG_0, 0),
7651 .errstr = "R4 min value is negative, either use unsigned",
7655 "bounds checks mixing signed and unsigned, variant 7",
7657 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7658 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7659 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7660 BPF_LD_MAP_FD(BPF_REG_1, 0),
7661 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7662 BPF_FUNC_map_lookup_elem),
7663 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7664 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7665 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7666 BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
7667 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
7668 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7669 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7670 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7671 BPF_MOV64_IMM(BPF_REG_0, 0),
7674 .fixup_map1 = { 3 },
7678 "bounds checks mixing signed and unsigned, variant 8",
7680 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7681 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7682 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7683 BPF_LD_MAP_FD(BPF_REG_1, 0),
7684 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7685 BPF_FUNC_map_lookup_elem),
7686 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7687 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7688 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7689 BPF_MOV64_IMM(BPF_REG_2, -1),
7690 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
7691 BPF_MOV64_IMM(BPF_REG_0, 0),
7693 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7694 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7695 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7696 BPF_MOV64_IMM(BPF_REG_0, 0),
7699 .fixup_map1 = { 3 },
7700 .errstr = "unbounded min value",
7704 "bounds checks mixing signed and unsigned, variant 9",
7706 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7707 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7708 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7709 BPF_LD_MAP_FD(BPF_REG_1, 0),
7710 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7711 BPF_FUNC_map_lookup_elem),
7712 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
7713 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7714 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7715 BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
7716 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
7717 BPF_MOV64_IMM(BPF_REG_0, 0),
7719 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7720 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7721 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7722 BPF_MOV64_IMM(BPF_REG_0, 0),
7725 .fixup_map1 = { 3 },
7729 "bounds checks mixing signed and unsigned, variant 10",
7731 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7732 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7733 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7734 BPF_LD_MAP_FD(BPF_REG_1, 0),
7735 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7736 BPF_FUNC_map_lookup_elem),
7737 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7738 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7739 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7740 BPF_MOV64_IMM(BPF_REG_2, 0),
7741 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
7742 BPF_MOV64_IMM(BPF_REG_0, 0),
7744 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7745 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7746 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7747 BPF_MOV64_IMM(BPF_REG_0, 0),
7750 .fixup_map1 = { 3 },
7751 .errstr = "unbounded min value",
7755 "bounds checks mixing signed and unsigned, variant 11",
7757 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7758 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7759 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7760 BPF_LD_MAP_FD(BPF_REG_1, 0),
7761 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7762 BPF_FUNC_map_lookup_elem),
7763 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7764 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7765 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7766 BPF_MOV64_IMM(BPF_REG_2, -1),
7767 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
7769 BPF_MOV64_IMM(BPF_REG_0, 0),
7771 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7772 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7773 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7774 BPF_MOV64_IMM(BPF_REG_0, 0),
7777 .fixup_map1 = { 3 },
7778 .errstr = "unbounded min value",
7782 "bounds checks mixing signed and unsigned, variant 12",
7784 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7785 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7786 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7787 BPF_LD_MAP_FD(BPF_REG_1, 0),
7788 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7789 BPF_FUNC_map_lookup_elem),
7790 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7791 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7792 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7793 BPF_MOV64_IMM(BPF_REG_2, -6),
7794 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
7795 BPF_MOV64_IMM(BPF_REG_0, 0),
7797 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7798 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7799 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7800 BPF_MOV64_IMM(BPF_REG_0, 0),
7803 .fixup_map1 = { 3 },
7804 .errstr = "unbounded min value",
7808 "bounds checks mixing signed and unsigned, variant 13",
7810 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7811 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7812 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7813 BPF_LD_MAP_FD(BPF_REG_1, 0),
7814 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7815 BPF_FUNC_map_lookup_elem),
7816 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7817 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7818 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7819 BPF_MOV64_IMM(BPF_REG_2, 2),
7820 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
7821 BPF_MOV64_IMM(BPF_REG_7, 1),
7822 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
7823 BPF_MOV64_IMM(BPF_REG_0, 0),
7825 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
7826 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
7827 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
7828 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7829 BPF_MOV64_IMM(BPF_REG_0, 0),
7832 .fixup_map1 = { 3 },
7833 .errstr = "unbounded min value",
7837 "bounds checks mixing signed and unsigned, variant 14",
7839 BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
7840 offsetof(struct __sk_buff, mark)),
7841 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7842 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7843 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7844 BPF_LD_MAP_FD(BPF_REG_1, 0),
7845 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7846 BPF_FUNC_map_lookup_elem),
7847 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
7848 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7849 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7850 BPF_MOV64_IMM(BPF_REG_2, -1),
7851 BPF_MOV64_IMM(BPF_REG_8, 2),
7852 BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
7853 BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
7854 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7855 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7856 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7857 BPF_MOV64_IMM(BPF_REG_0, 0),
7859 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
7860 BPF_JMP_IMM(BPF_JA, 0, 0, -7),
7862 .fixup_map1 = { 4 },
7863 .errstr = "unbounded min value",
7867 "bounds checks mixing signed and unsigned, variant 15",
7869 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7870 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7871 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7872 BPF_LD_MAP_FD(BPF_REG_1, 0),
7873 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7874 BPF_FUNC_map_lookup_elem),
7875 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7876 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7877 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7878 BPF_MOV64_IMM(BPF_REG_2, -6),
7879 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
7880 BPF_MOV64_IMM(BPF_REG_0, 0),
7882 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7883 BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
7884 BPF_MOV64_IMM(BPF_REG_0, 0),
7886 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7887 BPF_MOV64_IMM(BPF_REG_0, 0),
7890 .fixup_map1 = { 3 },
7891 .errstr = "unbounded min value",
7893 .result_unpriv = REJECT,
7896 "subtraction bounds (map value) variant 1",
7898 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7899 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7900 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7901 BPF_LD_MAP_FD(BPF_REG_1, 0),
7902 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7903 BPF_FUNC_map_lookup_elem),
7904 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7905 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7906 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
7907 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
7908 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
7909 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
7910 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
7911 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7912 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7914 BPF_MOV64_IMM(BPF_REG_0, 0),
7917 .fixup_map1 = { 3 },
7918 .errstr = "R0 max value is outside of the array range",
7922 "subtraction bounds (map value) variant 2",
7924 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7925 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7926 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7927 BPF_LD_MAP_FD(BPF_REG_1, 0),
7928 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7929 BPF_FUNC_map_lookup_elem),
7930 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
7931 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7932 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
7933 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
7934 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
7935 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
7936 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7937 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7939 BPF_MOV64_IMM(BPF_REG_0, 0),
7942 .fixup_map1 = { 3 },
7943 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
7944 .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
7948 "bounds check based on zero-extended MOV",
7950 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7951 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7952 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7953 BPF_LD_MAP_FD(BPF_REG_1, 0),
7954 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7955 BPF_FUNC_map_lookup_elem),
7956 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7957 /* r2 = 0x0000'0000'ffff'ffff */
7958 BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
7960 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
7962 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
7963 /* access at offset 0 */
7964 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7966 BPF_MOV64_IMM(BPF_REG_0, 0),
7969 .fixup_map1 = { 3 },
7973 "bounds check based on sign-extended MOV. test1",
7975 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7976 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7977 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7978 BPF_LD_MAP_FD(BPF_REG_1, 0),
7979 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7980 BPF_FUNC_map_lookup_elem),
7981 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7982 /* r2 = 0xffff'ffff'ffff'ffff */
7983 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
7984 /* r2 = 0xffff'ffff */
7985 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
7986 /* r0 = <oob pointer> */
7987 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
7988 /* access to OOB pointer */
7989 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7991 BPF_MOV64_IMM(BPF_REG_0, 0),
7994 .fixup_map1 = { 3 },
7995 .errstr = "map_value pointer and 4294967295",
7999 "bounds check based on sign-extended MOV. test2",
8001 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8002 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8003 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8004 BPF_LD_MAP_FD(BPF_REG_1, 0),
8005 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8006 BPF_FUNC_map_lookup_elem),
8007 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8008 /* r2 = 0xffff'ffff'ffff'ffff */
8009 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
8010 /* r2 = 0xfff'ffff */
8011 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
8012 /* r0 = <oob pointer> */
8013 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8014 /* access to OOB pointer */
8015 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8017 BPF_MOV64_IMM(BPF_REG_0, 0),
8020 .fixup_map1 = { 3 },
8021 .errstr = "R0 min value is outside of the array range",
8025 "bounds check based on reg_off + var_off + insn_off. test1",
8027 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
8028 offsetof(struct __sk_buff, mark)),
8029 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8030 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8031 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8032 BPF_LD_MAP_FD(BPF_REG_1, 0),
8033 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8034 BPF_FUNC_map_lookup_elem),
8035 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8036 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
8037 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
8038 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
8039 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
8040 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
8041 BPF_MOV64_IMM(BPF_REG_0, 0),
8044 .fixup_map1 = { 4 },
8045 .errstr = "value_size=8 off=1073741825",
8047 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8050 "bounds check based on reg_off + var_off + insn_off. test2",
8052 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
8053 offsetof(struct __sk_buff, mark)),
8054 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8055 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8056 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8057 BPF_LD_MAP_FD(BPF_REG_1, 0),
8058 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8059 BPF_FUNC_map_lookup_elem),
8060 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8061 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
8062 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
8063 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
8064 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
8065 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
8066 BPF_MOV64_IMM(BPF_REG_0, 0),
8069 .fixup_map1 = { 4 },
8070 .errstr = "value 1073741823",
8072 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8075 "bounds check after truncation of non-boundary-crossing range",
8077 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8078 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8079 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8080 BPF_LD_MAP_FD(BPF_REG_1, 0),
8081 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8082 BPF_FUNC_map_lookup_elem),
8083 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8084 /* r1 = [0x00, 0xff] */
8085 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8086 BPF_MOV64_IMM(BPF_REG_2, 1),
8087 /* r2 = 0x10'0000'0000 */
8088 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
8089 /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
8090 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
8091 /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
8092 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8093 /* r1 = [0x00, 0xff] */
8094 BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
8096 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8098 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8099 /* access at offset 0 */
8100 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8102 BPF_MOV64_IMM(BPF_REG_0, 0),
8105 .fixup_map1 = { 3 },
8109 "bounds check after truncation of boundary-crossing range (1)",
8111 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8112 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8113 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8114 BPF_LD_MAP_FD(BPF_REG_1, 0),
8115 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8116 BPF_FUNC_map_lookup_elem),
8117 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8118 /* r1 = [0x00, 0xff] */
8119 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8120 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8121 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
8122 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8123 /* r1 = [0xffff'ff80, 0xffff'ffff] or
8124 * [0x0000'0000, 0x0000'007f]
8126 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
8127 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8128 /* r1 = [0x00, 0xff] or
8129 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
8131 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8133 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
8135 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8136 /* no-op or OOB pointer computation */
8137 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8138 /* potentially OOB access */
8139 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8141 BPF_MOV64_IMM(BPF_REG_0, 0),
8144 .fixup_map1 = { 3 },
8145 /* not actually fully unbounded, but the bound is very high */
8146 .errstr = "R0 unbounded memory access",
8150 "bounds check after truncation of boundary-crossing range (2)",
8152 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8153 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8154 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8155 BPF_LD_MAP_FD(BPF_REG_1, 0),
8156 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8157 BPF_FUNC_map_lookup_elem),
8158 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8159 /* r1 = [0x00, 0xff] */
8160 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8161 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8162 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
8163 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8164 /* r1 = [0xffff'ff80, 0xffff'ffff] or
8165 * [0x0000'0000, 0x0000'007f]
8166 * difference to previous test: truncation via MOV32
8169 BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
8170 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8171 /* r1 = [0x00, 0xff] or
8172 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
8174 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8176 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
8178 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8179 /* no-op or OOB pointer computation */
8180 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8181 /* potentially OOB access */
8182 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8184 BPF_MOV64_IMM(BPF_REG_0, 0),
8187 .fixup_map1 = { 3 },
8188 /* not actually fully unbounded, but the bound is very high */
8189 .errstr = "R0 unbounded memory access",
8193 "bounds check after wrapping 32-bit addition",
8195 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8196 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8197 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8198 BPF_LD_MAP_FD(BPF_REG_1, 0),
8199 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8200 BPF_FUNC_map_lookup_elem),
8201 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
8202 /* r1 = 0x7fff'ffff */
8203 BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
8204 /* r1 = 0xffff'fffe */
8205 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8207 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
8209 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8210 /* access at offset 0 */
8211 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8213 BPF_MOV64_IMM(BPF_REG_0, 0),
8216 .fixup_map1 = { 3 },
8220 "bounds check after shift with oversized count operand",
8222 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8223 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8224 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8225 BPF_LD_MAP_FD(BPF_REG_1, 0),
8226 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8227 BPF_FUNC_map_lookup_elem),
8228 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
8229 BPF_MOV64_IMM(BPF_REG_2, 32),
8230 BPF_MOV64_IMM(BPF_REG_1, 1),
8231 /* r1 = (u32)1 << (u32)32 = ? */
8232 BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
8233 /* r1 = [0x0000, 0xffff] */
8234 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
8235 /* computes unknown pointer, potentially OOB */
8236 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8237 /* potentially OOB access */
8238 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8240 BPF_MOV64_IMM(BPF_REG_0, 0),
8243 .fixup_map1 = { 3 },
8244 .errstr = "R0 max value is outside of the array range",
8248 "bounds check after right shift of maybe-negative number",
8250 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8251 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8252 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8253 BPF_LD_MAP_FD(BPF_REG_1, 0),
8254 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8255 BPF_FUNC_map_lookup_elem),
8256 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
8257 /* r1 = [0x00, 0xff] */
8258 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8259 /* r1 = [-0x01, 0xfe] */
8260 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
8261 /* r1 = 0 or 0xff'ffff'ffff'ffff */
8262 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8263 /* r1 = 0 or 0xffff'ffff'ffff */
8264 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8265 /* computes unknown pointer, potentially OOB */
8266 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8267 /* potentially OOB access */
8268 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8270 BPF_MOV64_IMM(BPF_REG_0, 0),
8273 .fixup_map1 = { 3 },
8274 .errstr = "R0 unbounded memory access",
8278 "bounds check map access with off+size signed 32bit overflow. test1",
8280 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8281 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8282 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8283 BPF_LD_MAP_FD(BPF_REG_1, 0),
8284 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8285 BPF_FUNC_map_lookup_elem),
8286 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
8288 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
8289 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
8293 .fixup_map1 = { 3 },
8294 .errstr = "map_value pointer and 2147483646",
8298 "bounds check map access with off+size signed 32bit overflow. test2",
8300 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8301 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8302 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8303 BPF_LD_MAP_FD(BPF_REG_1, 0),
8304 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8305 BPF_FUNC_map_lookup_elem),
8306 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
8308 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
8309 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
8310 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
8311 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
8315 .fixup_map1 = { 3 },
8316 .errstr = "pointer offset 1073741822",
8317 .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
8321 "bounds check map access with off+size signed 32bit overflow. test3",
8323 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8324 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8325 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8326 BPF_LD_MAP_FD(BPF_REG_1, 0),
8327 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8328 BPF_FUNC_map_lookup_elem),
8329 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
8331 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
8332 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
8333 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
8337 .fixup_map1 = { 3 },
8338 .errstr = "pointer offset -1073741822",
8339 .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
8343 "bounds check map access with off+size signed 32bit overflow. test4",
8345 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8346 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8347 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8348 BPF_LD_MAP_FD(BPF_REG_1, 0),
8349 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8350 BPF_FUNC_map_lookup_elem),
8351 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
8353 BPF_MOV64_IMM(BPF_REG_1, 1000000),
8354 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
8355 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8356 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
8360 .fixup_map1 = { 3 },
8361 .errstr = "map_value pointer and 1000000000000",
8365 "pointer/scalar confusion in state equality check (way 1)",
8367 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8368 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8369 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8370 BPF_LD_MAP_FD(BPF_REG_1, 0),
8371 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8372 BPF_FUNC_map_lookup_elem),
8373 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
8374 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
8376 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
8380 .fixup_map1 = { 3 },
8382 .retval = POINTER_VALUE,
8383 .result_unpriv = REJECT,
8384 .errstr_unpriv = "R0 leaks addr as return value"
8387 "pointer/scalar confusion in state equality check (way 2)",
8389 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8390 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8391 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8392 BPF_LD_MAP_FD(BPF_REG_1, 0),
8393 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8394 BPF_FUNC_map_lookup_elem),
8395 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
8396 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
8398 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
8401 .fixup_map1 = { 3 },
8403 .retval = POINTER_VALUE,
8404 .result_unpriv = REJECT,
8405 .errstr_unpriv = "R0 leaks addr as return value"
8408 "variable-offset ctx access",
8410 /* Get an unknown value */
8411 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8412 /* Make it small and 4-byte aligned */
8413 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
8414 /* add it to skb. We now have either &skb->len or
8415 * &skb->pkt_type, but we don't know which
8417 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
8418 /* dereference it */
8419 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
8422 .errstr = "variable ctx access var_off=(0x0; 0x4)",
8424 .prog_type = BPF_PROG_TYPE_LWT_IN,
8427 "variable-offset stack access",
8429 /* Fill the top 8 bytes of the stack */
8430 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8431 /* Get an unknown value */
8432 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8433 /* Make it small and 4-byte aligned */
8434 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
8435 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
8436 /* add it to fp. We now have either fp-4 or fp-8, but
8437 * we don't know which
8439 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
8440 /* dereference it */
8441 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
8444 .errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
8446 .prog_type = BPF_PROG_TYPE_LWT_IN,
8449 "indirect variable-offset stack access",
8451 /* Fill the top 8 bytes of the stack */
8452 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8453 /* Get an unknown value */
8454 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8455 /* Make it small and 4-byte aligned */
8456 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
8457 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
8458 /* add it to fp. We now have either fp-4 or fp-8, but
8459 * we don't know which
8461 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
8462 /* dereference it indirectly */
8463 BPF_LD_MAP_FD(BPF_REG_1, 0),
8464 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8465 BPF_FUNC_map_lookup_elem),
8466 BPF_MOV64_IMM(BPF_REG_0, 0),
8469 .fixup_map1 = { 5 },
8470 .errstr = "variable stack read R2",
8472 .prog_type = BPF_PROG_TYPE_LWT_IN,
8475 "direct stack access with 32-bit wraparound. test1",
8477 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8478 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8479 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8480 BPF_MOV32_IMM(BPF_REG_0, 0),
8481 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8484 .errstr = "fp pointer and 2147483647",
8488 "direct stack access with 32-bit wraparound. test2",
8490 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8491 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
8492 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
8493 BPF_MOV32_IMM(BPF_REG_0, 0),
8494 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8497 .errstr = "fp pointer and 1073741823",
8501 "direct stack access with 32-bit wraparound. test3",
8503 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8504 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
8505 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
8506 BPF_MOV32_IMM(BPF_REG_0, 0),
8507 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8510 .errstr = "fp pointer offset 1073741822",
8511 .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
8515 "liveness pruning and write screening",
8517 /* Get an unknown value */
8518 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8519 /* branch conditions teach us nothing about R2 */
8520 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
8521 BPF_MOV64_IMM(BPF_REG_0, 0),
8522 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
8523 BPF_MOV64_IMM(BPF_REG_0, 0),
8526 .errstr = "R0 !read_ok",
8528 .prog_type = BPF_PROG_TYPE_LWT_IN,
8531 "varlen_map_value_access pruning",
8533 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8534 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8535 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8536 BPF_LD_MAP_FD(BPF_REG_1, 0),
8537 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8538 BPF_FUNC_map_lookup_elem),
8539 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
8540 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
8541 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
8542 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
8543 BPF_MOV32_IMM(BPF_REG_1, 0),
8544 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
8545 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8546 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
8547 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
8548 offsetof(struct test_val, foo)),
8551 .fixup_map2 = { 3 },
8552 .errstr_unpriv = "R0 leaks addr",
8553 .errstr = "R0 unbounded memory access",
8554 .result_unpriv = REJECT,
8556 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8559 "invalid 64-bit BPF_END",
8561 BPF_MOV32_IMM(BPF_REG_0, 0),
8563 .code = BPF_ALU64 | BPF_END | BPF_TO_LE,
8564 .dst_reg = BPF_REG_0,
8571 .errstr = "unknown opcode d7",
8575 "XDP, using ifindex from netdev",
8577 BPF_MOV64_IMM(BPF_REG_0, 0),
8578 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8579 offsetof(struct xdp_md, ingress_ifindex)),
8580 BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 1, 1),
8581 BPF_MOV64_IMM(BPF_REG_0, 1),
8585 .prog_type = BPF_PROG_TYPE_XDP,
8589 "meta access, test1",
8591 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8592 offsetof(struct xdp_md, data_meta)),
8593 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8594 offsetof(struct xdp_md, data)),
8595 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
8596 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8597 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
8598 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8599 BPF_MOV64_IMM(BPF_REG_0, 0),
8603 .prog_type = BPF_PROG_TYPE_XDP,
8606 "meta access, test2",
8608 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8609 offsetof(struct xdp_md, data_meta)),
8610 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8611 offsetof(struct xdp_md, data)),
8612 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
8613 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8),
8614 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
8615 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
8616 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
8617 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8618 BPF_MOV64_IMM(BPF_REG_0, 0),
8622 .errstr = "invalid access to packet, off=-8",
8623 .prog_type = BPF_PROG_TYPE_XDP,
8626 "meta access, test3",
8628 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8629 offsetof(struct xdp_md, data_meta)),
8630 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8631 offsetof(struct xdp_md, data_end)),
8632 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
8633 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8634 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
8635 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8636 BPF_MOV64_IMM(BPF_REG_0, 0),
8640 .errstr = "invalid access to packet",
8641 .prog_type = BPF_PROG_TYPE_XDP,
8644 "meta access, test4",
8646 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8647 offsetof(struct xdp_md, data_meta)),
8648 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8649 offsetof(struct xdp_md, data_end)),
8650 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
8651 offsetof(struct xdp_md, data)),
8652 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
8653 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8654 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
8655 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8656 BPF_MOV64_IMM(BPF_REG_0, 0),
8660 .errstr = "invalid access to packet",
8661 .prog_type = BPF_PROG_TYPE_XDP,
8664 "meta access, test5",
8666 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8667 offsetof(struct xdp_md, data_meta)),
8668 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
8669 offsetof(struct xdp_md, data)),
8670 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
8671 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8672 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3),
8673 BPF_MOV64_IMM(BPF_REG_2, -8),
8674 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8675 BPF_FUNC_xdp_adjust_meta),
8676 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
8677 BPF_MOV64_IMM(BPF_REG_0, 0),
8681 .errstr = "R3 !read_ok",
8682 .prog_type = BPF_PROG_TYPE_XDP,
8685 "meta access, test6",
8687 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8688 offsetof(struct xdp_md, data_meta)),
8689 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8690 offsetof(struct xdp_md, data)),
8691 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
8692 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8693 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
8694 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
8695 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1),
8696 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8697 BPF_MOV64_IMM(BPF_REG_0, 0),
8701 .errstr = "invalid access to packet",
8702 .prog_type = BPF_PROG_TYPE_XDP,
8705 "meta access, test7",
8707 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8708 offsetof(struct xdp_md, data_meta)),
8709 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8710 offsetof(struct xdp_md, data)),
8711 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
8712 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8713 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
8714 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
8715 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
8716 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8717 BPF_MOV64_IMM(BPF_REG_0, 0),
8721 .prog_type = BPF_PROG_TYPE_XDP,
8724 "meta access, test8",
8726 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8727 offsetof(struct xdp_md, data_meta)),
8728 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8729 offsetof(struct xdp_md, data)),
8730 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
8731 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
8732 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
8733 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8734 BPF_MOV64_IMM(BPF_REG_0, 0),
8738 .prog_type = BPF_PROG_TYPE_XDP,
8741 "meta access, test9",
8743 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8744 offsetof(struct xdp_md, data_meta)),
8745 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8746 offsetof(struct xdp_md, data)),
8747 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
8748 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
8749 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
8750 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
8751 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8752 BPF_MOV64_IMM(BPF_REG_0, 0),
8756 .errstr = "invalid access to packet",
8757 .prog_type = BPF_PROG_TYPE_XDP,
8760 "meta access, test10",
8762 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8763 offsetof(struct xdp_md, data_meta)),
8764 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8765 offsetof(struct xdp_md, data)),
8766 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
8767 offsetof(struct xdp_md, data_end)),
8768 BPF_MOV64_IMM(BPF_REG_5, 42),
8769 BPF_MOV64_IMM(BPF_REG_6, 24),
8770 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
8771 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
8772 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
8773 BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
8774 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
8775 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
8776 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
8777 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
8778 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1),
8779 BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
8780 BPF_MOV64_IMM(BPF_REG_0, 0),
8784 .errstr = "invalid access to packet",
8785 .prog_type = BPF_PROG_TYPE_XDP,
8788 "meta access, test11",
8790 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8791 offsetof(struct xdp_md, data_meta)),
8792 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8793 offsetof(struct xdp_md, data)),
8794 BPF_MOV64_IMM(BPF_REG_5, 42),
8795 BPF_MOV64_IMM(BPF_REG_6, 24),
8796 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
8797 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
8798 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
8799 BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
8800 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
8801 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
8802 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
8803 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
8804 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1),
8805 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0),
8806 BPF_MOV64_IMM(BPF_REG_0, 0),
8810 .prog_type = BPF_PROG_TYPE_XDP,
8813 "meta access, test12",
8815 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8816 offsetof(struct xdp_md, data_meta)),
8817 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8818 offsetof(struct xdp_md, data)),
8819 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
8820 offsetof(struct xdp_md, data_end)),
8821 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
8822 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
8823 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5),
8824 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
8825 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
8826 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
8827 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1),
8828 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8829 BPF_MOV64_IMM(BPF_REG_0, 0),
8833 .prog_type = BPF_PROG_TYPE_XDP,
8836 "arithmetic ops make PTR_TO_CTX unusable",
8838 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
8839 offsetof(struct __sk_buff, data) -
8840 offsetof(struct __sk_buff, mark)),
8841 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8842 offsetof(struct __sk_buff, mark)),
8845 .errstr = "dereference of modified ctx ptr",
8847 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8850 "pkt_end - pkt_start is allowed",
8852 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8853 offsetof(struct __sk_buff, data_end)),
8854 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8855 offsetof(struct __sk_buff, data)),
8856 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
8860 .retval = TEST_DATA_LEN,
8861 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8864 "XDP pkt read, pkt_end mangling, bad access 1",
8866 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8867 offsetof(struct xdp_md, data)),
8868 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8869 offsetof(struct xdp_md, data_end)),
8870 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8871 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8872 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
8873 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8874 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8875 BPF_MOV64_IMM(BPF_REG_0, 0),
8878 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
8880 .prog_type = BPF_PROG_TYPE_XDP,
8883 "XDP pkt read, pkt_end mangling, bad access 2",
8885 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8886 offsetof(struct xdp_md, data)),
8887 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8888 offsetof(struct xdp_md, data_end)),
8889 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8890 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8891 BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
8892 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8893 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8894 BPF_MOV64_IMM(BPF_REG_0, 0),
8897 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
8899 .prog_type = BPF_PROG_TYPE_XDP,
8902 "XDP pkt read, pkt_data' > pkt_end, good access",
8904 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8905 offsetof(struct xdp_md, data)),
8906 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8907 offsetof(struct xdp_md, data_end)),
8908 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8909 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8910 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8911 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8912 BPF_MOV64_IMM(BPF_REG_0, 0),
8916 .prog_type = BPF_PROG_TYPE_XDP,
8919 "XDP pkt read, pkt_data' > pkt_end, bad access 1",
8921 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8922 offsetof(struct xdp_md, data)),
8923 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8924 offsetof(struct xdp_md, data_end)),
8925 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8926 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8927 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8928 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8929 BPF_MOV64_IMM(BPF_REG_0, 0),
8932 .errstr = "R1 offset is outside of the packet",
8934 .prog_type = BPF_PROG_TYPE_XDP,
8935 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8938 "XDP pkt read, pkt_data' > pkt_end, bad access 2",
8940 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8941 offsetof(struct xdp_md, data)),
8942 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8943 offsetof(struct xdp_md, data_end)),
8944 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8945 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8946 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
8947 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8948 BPF_MOV64_IMM(BPF_REG_0, 0),
8951 .errstr = "R1 offset is outside of the packet",
8953 .prog_type = BPF_PROG_TYPE_XDP,
8956 "XDP pkt read, pkt_end > pkt_data', good access",
8958 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8959 offsetof(struct xdp_md, data)),
8960 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8961 offsetof(struct xdp_md, data_end)),
8962 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8963 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8964 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8965 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8966 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8967 BPF_MOV64_IMM(BPF_REG_0, 0),
8971 .prog_type = BPF_PROG_TYPE_XDP,
8972 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8975 "XDP pkt read, pkt_end > pkt_data', bad access 1",
8977 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8978 offsetof(struct xdp_md, data)),
8979 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8980 offsetof(struct xdp_md, data_end)),
8981 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8982 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8983 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8984 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8985 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8986 BPF_MOV64_IMM(BPF_REG_0, 0),
8989 .errstr = "R1 offset is outside of the packet",
8991 .prog_type = BPF_PROG_TYPE_XDP,
8994 "XDP pkt read, pkt_end > pkt_data', bad access 2",
8996 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8997 offsetof(struct xdp_md, data)),
8998 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8999 offsetof(struct xdp_md, data_end)),
9000 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9001 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9002 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9003 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9004 BPF_MOV64_IMM(BPF_REG_0, 0),
9007 .errstr = "R1 offset is outside of the packet",
9009 .prog_type = BPF_PROG_TYPE_XDP,
9012 "XDP pkt read, pkt_data' < pkt_end, good access",
9014 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9015 offsetof(struct xdp_md, data)),
9016 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9017 offsetof(struct xdp_md, data_end)),
9018 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9019 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9020 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9021 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9022 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9023 BPF_MOV64_IMM(BPF_REG_0, 0),
9027 .prog_type = BPF_PROG_TYPE_XDP,
9028 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9031 "XDP pkt read, pkt_data' < pkt_end, bad access 1",
9033 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9034 offsetof(struct xdp_md, data)),
9035 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9036 offsetof(struct xdp_md, data_end)),
9037 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9038 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9039 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9040 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9041 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9042 BPF_MOV64_IMM(BPF_REG_0, 0),
9045 .errstr = "R1 offset is outside of the packet",
9047 .prog_type = BPF_PROG_TYPE_XDP,
9050 "XDP pkt read, pkt_data' < pkt_end, bad access 2",
9052 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9053 offsetof(struct xdp_md, data)),
9054 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9055 offsetof(struct xdp_md, data_end)),
9056 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9057 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9058 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9059 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9060 BPF_MOV64_IMM(BPF_REG_0, 0),
9063 .errstr = "R1 offset is outside of the packet",
9065 .prog_type = BPF_PROG_TYPE_XDP,
9068 "XDP pkt read, pkt_end < pkt_data', good access",
9070 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9071 offsetof(struct xdp_md, data)),
9072 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9073 offsetof(struct xdp_md, data_end)),
9074 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9075 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9076 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9077 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9078 BPF_MOV64_IMM(BPF_REG_0, 0),
9082 .prog_type = BPF_PROG_TYPE_XDP,
9085 "XDP pkt read, pkt_end < pkt_data', bad access 1",
9087 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9088 offsetof(struct xdp_md, data)),
9089 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9090 offsetof(struct xdp_md, data_end)),
9091 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9092 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9093 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9094 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9095 BPF_MOV64_IMM(BPF_REG_0, 0),
9098 .errstr = "R1 offset is outside of the packet",
9100 .prog_type = BPF_PROG_TYPE_XDP,
9101 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9104 "XDP pkt read, pkt_end < pkt_data', bad access 2",
9106 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9107 offsetof(struct xdp_md, data)),
9108 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9109 offsetof(struct xdp_md, data_end)),
9110 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9111 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9112 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
9113 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9114 BPF_MOV64_IMM(BPF_REG_0, 0),
9117 .errstr = "R1 offset is outside of the packet",
9119 .prog_type = BPF_PROG_TYPE_XDP,
9122 "XDP pkt read, pkt_data' >= pkt_end, good access",
9124 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9125 offsetof(struct xdp_md, data)),
9126 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9127 offsetof(struct xdp_md, data_end)),
9128 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9129 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9130 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
9131 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9132 BPF_MOV64_IMM(BPF_REG_0, 0),
9136 .prog_type = BPF_PROG_TYPE_XDP,
9137 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9140 "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
9142 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9143 offsetof(struct xdp_md, data)),
9144 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9145 offsetof(struct xdp_md, data_end)),
9146 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9147 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9148 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
9149 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9150 BPF_MOV64_IMM(BPF_REG_0, 0),
9153 .errstr = "R1 offset is outside of the packet",
9155 .prog_type = BPF_PROG_TYPE_XDP,
9158 "XDP pkt read, pkt_data' >= pkt_end, bad access 2",
9160 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9161 offsetof(struct xdp_md, data)),
9162 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9163 offsetof(struct xdp_md, data_end)),
9164 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9165 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9166 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
9167 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9168 BPF_MOV64_IMM(BPF_REG_0, 0),
9171 .errstr = "R1 offset is outside of the packet",
9173 .prog_type = BPF_PROG_TYPE_XDP,
9174 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9177 "XDP pkt read, pkt_end >= pkt_data', good access",
9179 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9180 offsetof(struct xdp_md, data)),
9181 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9182 offsetof(struct xdp_md, data_end)),
9183 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9184 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9185 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9186 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9187 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9188 BPF_MOV64_IMM(BPF_REG_0, 0),
9192 .prog_type = BPF_PROG_TYPE_XDP,
9195 "XDP pkt read, pkt_end >= pkt_data', bad access 1",
9197 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9198 offsetof(struct xdp_md, data)),
9199 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9200 offsetof(struct xdp_md, data_end)),
9201 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9202 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9203 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9204 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9205 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9206 BPF_MOV64_IMM(BPF_REG_0, 0),
9209 .errstr = "R1 offset is outside of the packet",
9211 .prog_type = BPF_PROG_TYPE_XDP,
9212 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9215 "XDP pkt read, pkt_end >= pkt_data', bad access 2",
9217 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9218 offsetof(struct xdp_md, data)),
9219 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9220 offsetof(struct xdp_md, data_end)),
9221 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9222 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9223 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9224 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9225 BPF_MOV64_IMM(BPF_REG_0, 0),
9228 .errstr = "R1 offset is outside of the packet",
9230 .prog_type = BPF_PROG_TYPE_XDP,
9233 "XDP pkt read, pkt_data' <= pkt_end, good access",
9235 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9236 offsetof(struct xdp_md, data)),
9237 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9238 offsetof(struct xdp_md, data_end)),
9239 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9240 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9241 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9242 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9243 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9244 BPF_MOV64_IMM(BPF_REG_0, 0),
9248 .prog_type = BPF_PROG_TYPE_XDP,
9251 "XDP pkt read, pkt_data' <= pkt_end, bad access 1",
9253 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9254 offsetof(struct xdp_md, data)),
9255 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9256 offsetof(struct xdp_md, data_end)),
9257 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9258 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9259 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9260 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9261 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9262 BPF_MOV64_IMM(BPF_REG_0, 0),
9265 .errstr = "R1 offset is outside of the packet",
9267 .prog_type = BPF_PROG_TYPE_XDP,
9268 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9271 "XDP pkt read, pkt_data' <= pkt_end, bad access 2",
9273 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9274 offsetof(struct xdp_md, data)),
9275 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9276 offsetof(struct xdp_md, data_end)),
9277 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9278 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9279 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9280 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9281 BPF_MOV64_IMM(BPF_REG_0, 0),
9284 .errstr = "R1 offset is outside of the packet",
9286 .prog_type = BPF_PROG_TYPE_XDP,
9289 "XDP pkt read, pkt_end <= pkt_data', good access",
9291 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9292 offsetof(struct xdp_md, data)),
9293 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9294 offsetof(struct xdp_md, data_end)),
9295 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9296 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9297 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
9298 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9299 BPF_MOV64_IMM(BPF_REG_0, 0),
9303 .prog_type = BPF_PROG_TYPE_XDP,
9304 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9307 "XDP pkt read, pkt_end <= pkt_data', bad access 1",
9309 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9310 offsetof(struct xdp_md, data)),
9311 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9312 offsetof(struct xdp_md, data_end)),
9313 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9314 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9315 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
9316 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9317 BPF_MOV64_IMM(BPF_REG_0, 0),
9320 .errstr = "R1 offset is outside of the packet",
9322 .prog_type = BPF_PROG_TYPE_XDP,
9325 "XDP pkt read, pkt_end <= pkt_data', bad access 2",
9327 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9328 offsetof(struct xdp_md, data)),
9329 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9330 offsetof(struct xdp_md, data_end)),
9331 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9332 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9333 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
9334 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9335 BPF_MOV64_IMM(BPF_REG_0, 0),
9338 .errstr = "R1 offset is outside of the packet",
9340 .prog_type = BPF_PROG_TYPE_XDP,
9341 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9344 "XDP pkt read, pkt_meta' > pkt_data, good access",
9346 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9347 offsetof(struct xdp_md, data_meta)),
9348 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9349 offsetof(struct xdp_md, data)),
9350 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9351 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9352 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9353 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9354 BPF_MOV64_IMM(BPF_REG_0, 0),
9358 .prog_type = BPF_PROG_TYPE_XDP,
9361 "XDP pkt read, pkt_meta' > pkt_data, bad access 1",
9363 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9364 offsetof(struct xdp_md, data_meta)),
9365 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9366 offsetof(struct xdp_md, data)),
9367 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9368 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9369 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9370 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9371 BPF_MOV64_IMM(BPF_REG_0, 0),
9374 .errstr = "R1 offset is outside of the packet",
9376 .prog_type = BPF_PROG_TYPE_XDP,
9377 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9380 "XDP pkt read, pkt_meta' > pkt_data, bad access 2",
9382 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9383 offsetof(struct xdp_md, data_meta)),
9384 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9385 offsetof(struct xdp_md, data)),
9386 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9387 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9388 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
9389 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9390 BPF_MOV64_IMM(BPF_REG_0, 0),
9393 .errstr = "R1 offset is outside of the packet",
9395 .prog_type = BPF_PROG_TYPE_XDP,
9398 "XDP pkt read, pkt_data > pkt_meta', good access",
9400 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9401 offsetof(struct xdp_md, data_meta)),
9402 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9403 offsetof(struct xdp_md, data)),
9404 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9405 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9406 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9407 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9408 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9409 BPF_MOV64_IMM(BPF_REG_0, 0),
9413 .prog_type = BPF_PROG_TYPE_XDP,
9414 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9417 "XDP pkt read, pkt_data > pkt_meta', bad access 1",
9419 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9420 offsetof(struct xdp_md, data_meta)),
9421 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9422 offsetof(struct xdp_md, data)),
9423 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9424 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9425 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9426 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9427 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9428 BPF_MOV64_IMM(BPF_REG_0, 0),
9431 .errstr = "R1 offset is outside of the packet",
9433 .prog_type = BPF_PROG_TYPE_XDP,
9436 "XDP pkt read, pkt_data > pkt_meta', bad access 2",
9438 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9439 offsetof(struct xdp_md, data_meta)),
9440 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9441 offsetof(struct xdp_md, data)),
9442 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9443 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9444 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9445 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9446 BPF_MOV64_IMM(BPF_REG_0, 0),
9449 .errstr = "R1 offset is outside of the packet",
9451 .prog_type = BPF_PROG_TYPE_XDP,
9454 "XDP pkt read, pkt_meta' < pkt_data, good access",
9456 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9457 offsetof(struct xdp_md, data_meta)),
9458 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9459 offsetof(struct xdp_md, data)),
9460 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9461 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9462 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9463 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9464 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9465 BPF_MOV64_IMM(BPF_REG_0, 0),
9469 .prog_type = BPF_PROG_TYPE_XDP,
9470 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9473 "XDP pkt read, pkt_meta' < pkt_data, bad access 1",
9475 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9476 offsetof(struct xdp_md, data_meta)),
9477 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9478 offsetof(struct xdp_md, data)),
9479 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9480 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9481 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9482 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9483 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9484 BPF_MOV64_IMM(BPF_REG_0, 0),
9487 .errstr = "R1 offset is outside of the packet",
9489 .prog_type = BPF_PROG_TYPE_XDP,
9492 "XDP pkt read, pkt_meta' < pkt_data, bad access 2",
9494 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9495 offsetof(struct xdp_md, data_meta)),
9496 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9497 offsetof(struct xdp_md, data)),
9498 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9499 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9500 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9501 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9502 BPF_MOV64_IMM(BPF_REG_0, 0),
9505 .errstr = "R1 offset is outside of the packet",
9507 .prog_type = BPF_PROG_TYPE_XDP,
9510 "XDP pkt read, pkt_data < pkt_meta', good access",
9512 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9513 offsetof(struct xdp_md, data_meta)),
9514 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9515 offsetof(struct xdp_md, data)),
9516 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9517 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9518 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9519 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9520 BPF_MOV64_IMM(BPF_REG_0, 0),
9524 .prog_type = BPF_PROG_TYPE_XDP,
9527 "XDP pkt read, pkt_data < pkt_meta', bad access 1",
9529 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9530 offsetof(struct xdp_md, data_meta)),
9531 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9532 offsetof(struct xdp_md, data)),
9533 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9534 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9535 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9536 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9537 BPF_MOV64_IMM(BPF_REG_0, 0),
9540 .errstr = "R1 offset is outside of the packet",
9542 .prog_type = BPF_PROG_TYPE_XDP,
9543 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9546 "XDP pkt read, pkt_data < pkt_meta', bad access 2",
9548 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9549 offsetof(struct xdp_md, data_meta)),
9550 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9551 offsetof(struct xdp_md, data)),
9552 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9553 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9554 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
9555 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9556 BPF_MOV64_IMM(BPF_REG_0, 0),
9559 .errstr = "R1 offset is outside of the packet",
9561 .prog_type = BPF_PROG_TYPE_XDP,
9564 "XDP pkt read, pkt_meta' >= pkt_data, good access",
9566 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9567 offsetof(struct xdp_md, data_meta)),
9568 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9569 offsetof(struct xdp_md, data)),
9570 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9571 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9572 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
9573 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9574 BPF_MOV64_IMM(BPF_REG_0, 0),
9578 .prog_type = BPF_PROG_TYPE_XDP,
9579 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9582 "XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
9584 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9585 offsetof(struct xdp_md, data_meta)),
9586 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9587 offsetof(struct xdp_md, data)),
9588 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9589 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9590 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
9591 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9592 BPF_MOV64_IMM(BPF_REG_0, 0),
9595 .errstr = "R1 offset is outside of the packet",
9597 .prog_type = BPF_PROG_TYPE_XDP,
9600 "XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
9602 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9603 offsetof(struct xdp_md, data_meta)),
9604 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9605 offsetof(struct xdp_md, data)),
9606 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9607 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9608 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
9609 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9610 BPF_MOV64_IMM(BPF_REG_0, 0),
9613 .errstr = "R1 offset is outside of the packet",
9615 .prog_type = BPF_PROG_TYPE_XDP,
9616 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9619 "XDP pkt read, pkt_data >= pkt_meta', good access",
9621 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9622 offsetof(struct xdp_md, data_meta)),
9623 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9624 offsetof(struct xdp_md, data)),
9625 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9626 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9627 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9628 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9629 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9630 BPF_MOV64_IMM(BPF_REG_0, 0),
9634 .prog_type = BPF_PROG_TYPE_XDP,
9637 "XDP pkt read, pkt_data >= pkt_meta', bad access 1",
9639 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9640 offsetof(struct xdp_md, data_meta)),
9641 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9642 offsetof(struct xdp_md, data)),
9643 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9644 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9645 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9646 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9647 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9648 BPF_MOV64_IMM(BPF_REG_0, 0),
9651 .errstr = "R1 offset is outside of the packet",
9653 .prog_type = BPF_PROG_TYPE_XDP,
9654 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9657 "XDP pkt read, pkt_data >= pkt_meta', bad access 2",
9659 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9660 offsetof(struct xdp_md, data_meta)),
9661 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9662 offsetof(struct xdp_md, data)),
9663 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9664 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9665 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9666 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9667 BPF_MOV64_IMM(BPF_REG_0, 0),
9670 .errstr = "R1 offset is outside of the packet",
9672 .prog_type = BPF_PROG_TYPE_XDP,
9675 "XDP pkt read, pkt_meta' <= pkt_data, good access",
9677 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9678 offsetof(struct xdp_md, data_meta)),
9679 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9680 offsetof(struct xdp_md, data)),
9681 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9682 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9683 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9684 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9685 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9686 BPF_MOV64_IMM(BPF_REG_0, 0),
9690 .prog_type = BPF_PROG_TYPE_XDP,
9693 "XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
9695 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9696 offsetof(struct xdp_md, data_meta)),
9697 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9698 offsetof(struct xdp_md, data)),
9699 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9700 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9701 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9702 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9703 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9704 BPF_MOV64_IMM(BPF_REG_0, 0),
9707 .errstr = "R1 offset is outside of the packet",
9709 .prog_type = BPF_PROG_TYPE_XDP,
9710 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9713 "XDP pkt read, pkt_meta' <= pkt_data, bad access 2",
9715 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9716 offsetof(struct xdp_md, data_meta)),
9717 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9718 offsetof(struct xdp_md, data)),
9719 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9720 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9721 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9722 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9723 BPF_MOV64_IMM(BPF_REG_0, 0),
9726 .errstr = "R1 offset is outside of the packet",
9728 .prog_type = BPF_PROG_TYPE_XDP,
9731 "XDP pkt read, pkt_data <= pkt_meta', good access",
9733 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9734 offsetof(struct xdp_md, data_meta)),
9735 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9736 offsetof(struct xdp_md, data)),
9737 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9738 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9739 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
9740 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9741 BPF_MOV64_IMM(BPF_REG_0, 0),
9745 .prog_type = BPF_PROG_TYPE_XDP,
9746 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9749 "XDP pkt read, pkt_data <= pkt_meta', bad access 1",
9751 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9752 offsetof(struct xdp_md, data_meta)),
9753 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9754 offsetof(struct xdp_md, data)),
9755 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9756 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9757 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
9758 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9759 BPF_MOV64_IMM(BPF_REG_0, 0),
9762 .errstr = "R1 offset is outside of the packet",
9764 .prog_type = BPF_PROG_TYPE_XDP,
9767 "XDP pkt read, pkt_data <= pkt_meta', bad access 2",
9769 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9770 offsetof(struct xdp_md, data_meta)),
9771 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9772 offsetof(struct xdp_md, data)),
9773 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9774 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9775 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
9776 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9777 BPF_MOV64_IMM(BPF_REG_0, 0),
9780 .errstr = "R1 offset is outside of the packet",
9782 .prog_type = BPF_PROG_TYPE_XDP,
9783 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9786 "check deducing bounds from const, 1",
9788 BPF_MOV64_IMM(BPF_REG_0, 1),
9789 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
9790 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9793 .errstr_unpriv = "R1 has pointer with unsupported alu operation",
9794 .errstr = "R0 tried to subtract pointer from scalar",
9798 "check deducing bounds from const, 2",
9800 BPF_MOV64_IMM(BPF_REG_0, 1),
9801 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
9803 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
9805 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
9808 .errstr_unpriv = "R1 has pointer with unsupported alu operation",
9809 .result_unpriv = REJECT,
9814 "check deducing bounds from const, 3",
9816 BPF_MOV64_IMM(BPF_REG_0, 0),
9817 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
9818 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9821 .errstr_unpriv = "R1 has pointer with unsupported alu operation",
9822 .errstr = "R0 tried to subtract pointer from scalar",
9826 "check deducing bounds from const, 4",
9828 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9829 BPF_MOV64_IMM(BPF_REG_0, 0),
9830 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
9832 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
9834 BPF_ALU64_REG(BPF_SUB, BPF_REG_6, BPF_REG_0),
9837 .errstr_unpriv = "R6 has pointer with unsupported alu operation",
9838 .result_unpriv = REJECT,
9842 "check deducing bounds from const, 5",
9844 BPF_MOV64_IMM(BPF_REG_0, 0),
9845 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
9846 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9849 .errstr_unpriv = "R1 has pointer with unsupported alu operation",
9850 .errstr = "R0 tried to subtract pointer from scalar",
9854 "check deducing bounds from const, 6",
9856 BPF_MOV64_IMM(BPF_REG_0, 0),
9857 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
9859 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9862 .errstr_unpriv = "R1 has pointer with unsupported alu operation",
9863 .errstr = "R0 tried to subtract pointer from scalar",
9867 "check deducing bounds from const, 7",
9869 BPF_MOV64_IMM(BPF_REG_0, ~0),
9870 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
9871 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
9872 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9873 offsetof(struct __sk_buff, mark)),
9876 .errstr_unpriv = "R1 has pointer with unsupported alu operation",
9877 .errstr = "dereference of modified ctx ptr",
9881 "check deducing bounds from const, 8",
9883 BPF_MOV64_IMM(BPF_REG_0, ~0),
9884 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
9885 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
9886 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9887 offsetof(struct __sk_buff, mark)),
9890 .errstr_unpriv = "R1 has pointer with unsupported alu operation",
9891 .errstr = "dereference of modified ctx ptr",
9895 "check deducing bounds from const, 9",
9897 BPF_MOV64_IMM(BPF_REG_0, 0),
9898 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
9899 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9902 .errstr_unpriv = "R1 has pointer with unsupported alu operation",
9903 .errstr = "R0 tried to subtract pointer from scalar",
9907 "check deducing bounds from const, 10",
9909 BPF_MOV64_IMM(BPF_REG_0, 0),
9910 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
9911 /* Marks reg as unknown. */
9912 BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
9913 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9916 .errstr = "math between ctx pointer and register with unbounded min value is not allowed",
9920 "bpf_exit with invalid return code. test1",
9922 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9925 .errstr = "R0 has value (0x0; 0xffffffff)",
9927 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9930 "bpf_exit with invalid return code. test2",
9932 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9933 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
9937 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9940 "bpf_exit with invalid return code. test3",
9942 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9943 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3),
9946 .errstr = "R0 has value (0x0; 0x3)",
9948 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9951 "bpf_exit with invalid return code. test4",
9953 BPF_MOV64_IMM(BPF_REG_0, 1),
9957 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9960 "bpf_exit with invalid return code. test5",
9962 BPF_MOV64_IMM(BPF_REG_0, 2),
9965 .errstr = "R0 has value (0x2; 0x0)",
9967 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9970 "bpf_exit with invalid return code. test6",
9972 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
9975 .errstr = "R0 is not a known value (ctx)",
9977 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9980 "bpf_exit with invalid return code. test7",
9982 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9983 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4),
9984 BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2),
9987 .errstr = "R0 has unknown scalar value",
9989 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9992 "calls: basic sanity",
9994 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9995 BPF_MOV64_IMM(BPF_REG_0, 1),
9997 BPF_MOV64_IMM(BPF_REG_0, 2),
10000 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10004 "calls: not on unpriviledged",
10006 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10007 BPF_MOV64_IMM(BPF_REG_0, 1),
10009 BPF_MOV64_IMM(BPF_REG_0, 2),
10012 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
10013 .result_unpriv = REJECT,
10018 "calls: div by 0 in subprog",
10020 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10021 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10022 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10023 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10024 offsetof(struct __sk_buff, data_end)),
10025 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10026 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10027 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10028 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10029 BPF_MOV64_IMM(BPF_REG_0, 1),
10031 BPF_MOV32_IMM(BPF_REG_2, 0),
10032 BPF_MOV32_IMM(BPF_REG_3, 1),
10033 BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
10034 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10035 offsetof(struct __sk_buff, data)),
10038 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10043 "calls: multiple ret types in subprog 1",
10045 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10046 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10047 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10048 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10049 offsetof(struct __sk_buff, data_end)),
10050 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10051 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10052 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10053 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10054 BPF_MOV64_IMM(BPF_REG_0, 1),
10056 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10057 offsetof(struct __sk_buff, data)),
10058 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
10059 BPF_MOV32_IMM(BPF_REG_0, 42),
10062 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10064 .errstr = "R0 invalid mem access 'inv'",
10067 "calls: multiple ret types in subprog 2",
10069 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10070 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10071 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10072 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10073 offsetof(struct __sk_buff, data_end)),
10074 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10075 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10076 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10077 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10078 BPF_MOV64_IMM(BPF_REG_0, 1),
10080 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10081 offsetof(struct __sk_buff, data)),
10082 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10083 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
10084 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10085 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10086 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10087 BPF_LD_MAP_FD(BPF_REG_1, 0),
10088 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10089 BPF_FUNC_map_lookup_elem),
10090 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
10091 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
10092 offsetof(struct __sk_buff, data)),
10093 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
10096 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10097 .fixup_map1 = { 16 },
10099 .errstr = "R0 min value is outside of the array range",
10102 "calls: overlapping caller/callee",
10104 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
10105 BPF_MOV64_IMM(BPF_REG_0, 1),
10108 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10109 .errstr = "last insn is not an exit or jmp",
10113 "calls: wrong recursive calls",
10115 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
10116 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
10117 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
10118 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
10119 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
10120 BPF_MOV64_IMM(BPF_REG_0, 1),
10123 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10124 .errstr = "jump out of range",
10128 "calls: wrong src reg",
10130 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
10131 BPF_MOV64_IMM(BPF_REG_0, 1),
10134 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10135 .errstr = "BPF_CALL uses reserved fields",
10139 "calls: wrong off value",
10141 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
10142 BPF_MOV64_IMM(BPF_REG_0, 1),
10144 BPF_MOV64_IMM(BPF_REG_0, 2),
10147 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10148 .errstr = "BPF_CALL uses reserved fields",
10152 "calls: jump back loop",
10154 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
10155 BPF_MOV64_IMM(BPF_REG_0, 1),
10158 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10159 .errstr = "back-edge from insn 0 to 0",
10163 "calls: conditional call",
10165 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10166 offsetof(struct __sk_buff, mark)),
10167 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10168 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10169 BPF_MOV64_IMM(BPF_REG_0, 1),
10171 BPF_MOV64_IMM(BPF_REG_0, 2),
10174 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10175 .errstr = "jump out of range",
10179 "calls: conditional call 2",
10181 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10182 offsetof(struct __sk_buff, mark)),
10183 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10184 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10185 BPF_MOV64_IMM(BPF_REG_0, 1),
10187 BPF_MOV64_IMM(BPF_REG_0, 2),
10189 BPF_MOV64_IMM(BPF_REG_0, 3),
10192 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10196 "calls: conditional call 3",
10198 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10199 offsetof(struct __sk_buff, mark)),
10200 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10201 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
10202 BPF_MOV64_IMM(BPF_REG_0, 1),
10204 BPF_MOV64_IMM(BPF_REG_0, 1),
10205 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
10206 BPF_MOV64_IMM(BPF_REG_0, 3),
10207 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
10209 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10210 .errstr = "back-edge from insn",
10214 "calls: conditional call 4",
10216 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10217 offsetof(struct __sk_buff, mark)),
10218 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10219 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10220 BPF_MOV64_IMM(BPF_REG_0, 1),
10222 BPF_MOV64_IMM(BPF_REG_0, 1),
10223 BPF_JMP_IMM(BPF_JA, 0, 0, -5),
10224 BPF_MOV64_IMM(BPF_REG_0, 3),
10227 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10231 "calls: conditional call 5",
10233 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10234 offsetof(struct __sk_buff, mark)),
10235 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10236 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10237 BPF_MOV64_IMM(BPF_REG_0, 1),
10239 BPF_MOV64_IMM(BPF_REG_0, 1),
10240 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
10241 BPF_MOV64_IMM(BPF_REG_0, 3),
10244 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10245 .errstr = "back-edge from insn",
10249 "calls: conditional call 6",
10251 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10252 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
10254 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10255 offsetof(struct __sk_buff, mark)),
10258 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10259 .errstr = "back-edge from insn",
10263 "calls: using r0 returned by callee",
10265 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10267 BPF_MOV64_IMM(BPF_REG_0, 2),
10270 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10274 "calls: using uninit r0 from callee",
10276 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10280 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10281 .errstr = "!read_ok",
10285 "calls: callee is using r1",
10287 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10289 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10290 offsetof(struct __sk_buff, len)),
10293 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
10295 .retval = TEST_DATA_LEN,
10298 "calls: callee using args1",
10300 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10302 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
10305 .errstr_unpriv = "allowed for root only",
10306 .result_unpriv = REJECT,
10308 .retval = POINTER_VALUE,
10311 "calls: callee using wrong args2",
10313 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10315 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10318 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10319 .errstr = "R2 !read_ok",
10323 "calls: callee using two args",
10325 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10326 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
10327 offsetof(struct __sk_buff, len)),
10328 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
10329 offsetof(struct __sk_buff, len)),
10330 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10332 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
10333 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
10336 .errstr_unpriv = "allowed for root only",
10337 .result_unpriv = REJECT,
10339 .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
10342 "calls: callee changing pkt pointers",
10344 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
10345 offsetof(struct xdp_md, data)),
10346 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
10347 offsetof(struct xdp_md, data_end)),
10348 BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
10349 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
10350 BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
10351 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10352 /* clear_all_pkt_pointers() has to walk all frames
10353 * to make sure that pkt pointers in the caller
10354 * are cleared when callee is calling a helper that
10355 * adjusts packet size
10357 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
10358 BPF_MOV32_IMM(BPF_REG_0, 0),
10360 BPF_MOV64_IMM(BPF_REG_2, 0),
10361 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10362 BPF_FUNC_xdp_adjust_head),
10366 .errstr = "R6 invalid mem access 'inv'",
10367 .prog_type = BPF_PROG_TYPE_XDP,
10370 "calls: two calls with args",
10372 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10374 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10375 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
10376 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
10377 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10378 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10379 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
10380 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
10382 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10383 offsetof(struct __sk_buff, len)),
10386 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10388 .retval = TEST_DATA_LEN + TEST_DATA_LEN,
10391 "calls: calls with stack arith",
10393 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10394 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
10395 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10397 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
10398 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10400 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
10401 BPF_MOV64_IMM(BPF_REG_0, 42),
10402 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
10405 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10410 "calls: calls with misaligned stack access",
10412 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10413 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
10414 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10416 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
10417 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10419 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
10420 BPF_MOV64_IMM(BPF_REG_0, 42),
10421 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
10424 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10425 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
10426 .errstr = "misaligned stack access",
10430 "calls: calls control flow, jump test",
10432 BPF_MOV64_IMM(BPF_REG_0, 42),
10433 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10434 BPF_MOV64_IMM(BPF_REG_0, 43),
10435 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10436 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
10439 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10444 "calls: calls control flow, jump test 2",
10446 BPF_MOV64_IMM(BPF_REG_0, 42),
10447 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10448 BPF_MOV64_IMM(BPF_REG_0, 43),
10449 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10450 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
10453 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10454 .errstr = "jump out of range from insn 1 to 4",
10458 "calls: two calls with bad jump",
10460 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10462 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10463 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
10464 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
10465 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10466 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10467 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
10468 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
10470 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10471 offsetof(struct __sk_buff, len)),
10472 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
10475 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10476 .errstr = "jump out of range from insn 11 to 9",
10480 "calls: recursive call. test1",
10482 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10484 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
10487 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10488 .errstr = "back-edge",
10492 "calls: recursive call. test2",
10494 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10496 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
10499 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10500 .errstr = "back-edge",
10504 "calls: unreachable code",
10506 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10508 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10510 BPF_MOV64_IMM(BPF_REG_0, 0),
10512 BPF_MOV64_IMM(BPF_REG_0, 0),
10515 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10516 .errstr = "unreachable insn 6",
10520 "calls: invalid call",
10522 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10524 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
10527 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10528 .errstr = "invalid destination",
10532 "calls: invalid call 2",
10534 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10536 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
10539 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10540 .errstr = "invalid destination",
10544 "calls: jumping across function bodies. test1",
10546 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10547 BPF_MOV64_IMM(BPF_REG_0, 0),
10549 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
10552 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10553 .errstr = "jump out of range",
10557 "calls: jumping across function bodies. test2",
10559 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
10560 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10561 BPF_MOV64_IMM(BPF_REG_0, 0),
10565 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10566 .errstr = "jump out of range",
10570 "calls: call without exit",
10572 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10574 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10576 BPF_MOV64_IMM(BPF_REG_0, 0),
10577 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
10579 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10580 .errstr = "not an exit",
10584 "calls: call into middle of ld_imm64",
10586 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10587 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10588 BPF_MOV64_IMM(BPF_REG_0, 0),
10590 BPF_LD_IMM64(BPF_REG_0, 0),
10593 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10594 .errstr = "last insn",
10598 "calls: call into middle of other call",
10600 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10601 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10602 BPF_MOV64_IMM(BPF_REG_0, 0),
10604 BPF_MOV64_IMM(BPF_REG_0, 0),
10605 BPF_MOV64_IMM(BPF_REG_0, 0),
10608 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10609 .errstr = "last insn",
10613 "calls: ld_abs with changing ctx data in callee",
10615 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10616 BPF_LD_ABS(BPF_B, 0),
10617 BPF_LD_ABS(BPF_H, 0),
10618 BPF_LD_ABS(BPF_W, 0),
10619 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
10620 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
10621 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
10622 BPF_LD_ABS(BPF_B, 0),
10623 BPF_LD_ABS(BPF_H, 0),
10624 BPF_LD_ABS(BPF_W, 0),
10626 BPF_MOV64_IMM(BPF_REG_2, 1),
10627 BPF_MOV64_IMM(BPF_REG_3, 2),
10628 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10629 BPF_FUNC_skb_vlan_push),
10632 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10633 .errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
10637 "calls: two calls with bad fallthrough",
10639 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10641 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10642 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
10643 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
10644 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10645 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10646 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
10647 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
10648 BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
10649 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10650 offsetof(struct __sk_buff, len)),
10653 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10654 .errstr = "not an exit",
10658 "calls: two calls with stack read",
10660 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10661 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10662 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10663 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10665 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10666 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
10667 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
10668 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10669 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10670 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
10671 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
10673 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10676 .prog_type = BPF_PROG_TYPE_XDP,
10680 "calls: two calls with stack write",
10683 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10684 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10685 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10686 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10687 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10688 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10689 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
10693 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10694 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10695 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
10696 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
10697 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10698 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10699 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
10700 BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
10701 /* write into stack frame of main prog */
10702 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
10706 /* read from stack frame of main prog */
10707 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10710 .prog_type = BPF_PROG_TYPE_XDP,
10714 "calls: stack overflow using two frames (pre-call access)",
10717 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10718 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
10722 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10723 BPF_MOV64_IMM(BPF_REG_0, 0),
10726 .prog_type = BPF_PROG_TYPE_XDP,
10727 .errstr = "combined stack size",
10731 "calls: stack overflow using two frames (post-call access)",
10734 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
10735 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10739 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10740 BPF_MOV64_IMM(BPF_REG_0, 0),
10743 .prog_type = BPF_PROG_TYPE_XDP,
10744 .errstr = "combined stack size",
10748 "calls: stack depth check using three frames. test1",
10751 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
10752 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
10753 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
10754 BPF_MOV64_IMM(BPF_REG_0, 0),
10757 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
10760 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
10761 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
10764 .prog_type = BPF_PROG_TYPE_XDP,
10765 /* stack_main=32, stack_A=256, stack_B=64
10766 * and max(main+A, main+A+B) < 512
10771 "calls: stack depth check using three frames. test2",
10774 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
10775 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
10776 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
10777 BPF_MOV64_IMM(BPF_REG_0, 0),
10780 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
10783 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
10784 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
10787 .prog_type = BPF_PROG_TYPE_XDP,
10788 /* stack_main=32, stack_A=64, stack_B=256
10789 * and max(main+A, main+A+B) < 512
10794 "calls: stack depth check using three frames. test3",
10797 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10798 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
10799 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10800 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
10801 BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
10802 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
10803 BPF_MOV64_IMM(BPF_REG_0, 0),
10806 BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
10808 BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
10809 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
10811 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
10812 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
10813 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
10816 .prog_type = BPF_PROG_TYPE_XDP,
10817 /* stack_main=64, stack_A=224, stack_B=256
10818 * and max(main+A, main+A+B) > 512
10820 .errstr = "combined stack",
10824 "calls: stack depth check using three frames. test4",
10825 /* void main(void) {
10830 * void func1(int alloc_or_recurse) {
10831 * if (alloc_or_recurse) {
10832 * frame_pointer[-300] = 1;
10834 * func2(alloc_or_recurse);
10837 * void func2(int alloc_or_recurse) {
10838 * if (alloc_or_recurse) {
10839 * frame_pointer[-300] = 1;
10845 BPF_MOV64_IMM(BPF_REG_1, 0),
10846 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
10847 BPF_MOV64_IMM(BPF_REG_1, 1),
10848 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
10849 BPF_MOV64_IMM(BPF_REG_1, 1),
10850 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
10851 BPF_MOV64_IMM(BPF_REG_0, 0),
10854 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
10855 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10857 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
10860 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
10861 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10864 .prog_type = BPF_PROG_TYPE_XDP,
10866 .errstr = "combined stack",
10869 "calls: stack depth check using three frames. test5",
10872 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
10875 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
10878 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
10881 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
10884 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
10887 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
10890 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
10893 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
10896 BPF_MOV64_IMM(BPF_REG_0, 0),
10899 .prog_type = BPF_PROG_TYPE_XDP,
10900 .errstr = "call stack",
10904 "calls: spill into caller stack frame",
10906 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10907 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10908 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10909 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10911 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
10912 BPF_MOV64_IMM(BPF_REG_0, 0),
10915 .prog_type = BPF_PROG_TYPE_XDP,
10916 .errstr = "cannot spill",
10920 "calls: write into caller stack frame",
10922 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10923 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10924 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10925 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10926 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
10928 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
10929 BPF_MOV64_IMM(BPF_REG_0, 0),
10932 .prog_type = BPF_PROG_TYPE_XDP,
10937 "calls: write into callee stack frame",
10939 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10940 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
10942 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
10943 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
10946 .prog_type = BPF_PROG_TYPE_XDP,
10947 .errstr = "cannot return stack pointer",
10951 "calls: two calls with stack write and void return",
10954 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10955 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10956 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10957 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10958 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10959 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10960 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
10964 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10965 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10966 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10967 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
10968 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10972 /* write into stack frame of main prog */
10973 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
10974 BPF_EXIT_INSN(), /* void return */
10976 .prog_type = BPF_PROG_TYPE_XDP,
10980 "calls: ambiguous return value",
10982 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10983 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
10984 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
10985 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10986 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10987 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
10989 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
10990 BPF_MOV64_IMM(BPF_REG_0, 0),
10993 .errstr_unpriv = "allowed for root only",
10994 .result_unpriv = REJECT,
10995 .errstr = "R0 !read_ok",
10999 "calls: two calls that return map_value",
11002 /* pass fp-16, fp-8 into a function */
11003 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11004 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11005 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11006 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11007 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
11009 /* fetch map_value_ptr from the stack of this function */
11010 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
11011 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
11012 /* write into map value */
11013 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11014 /* fetch secound map_value_ptr from the stack */
11015 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
11016 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
11017 /* write into map value */
11018 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11019 BPF_MOV64_IMM(BPF_REG_0, 0),
11023 /* call 3rd function twice */
11024 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11025 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11026 /* first time with fp-8 */
11027 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11028 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11029 /* second time with fp-16 */
11030 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11034 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11035 /* lookup from map */
11036 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11037 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11038 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11039 BPF_LD_MAP_FD(BPF_REG_1, 0),
11040 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11041 BPF_FUNC_map_lookup_elem),
11042 /* write map_value_ptr into stack frame of main prog */
11043 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11044 BPF_MOV64_IMM(BPF_REG_0, 0),
11045 BPF_EXIT_INSN(), /* return 0 */
11047 .prog_type = BPF_PROG_TYPE_XDP,
11048 .fixup_map1 = { 23 },
11052 "calls: two calls that return map_value with bool condition",
11055 /* pass fp-16, fp-8 into a function */
11056 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11057 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11058 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11059 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11060 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11061 BPF_MOV64_IMM(BPF_REG_0, 0),
11065 /* call 3rd function twice */
11066 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11067 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11068 /* first time with fp-8 */
11069 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
11070 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
11071 /* fetch map_value_ptr from the stack of this function */
11072 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11073 /* write into map value */
11074 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11075 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11076 /* second time with fp-16 */
11077 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11078 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
11079 /* fetch secound map_value_ptr from the stack */
11080 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
11081 /* write into map value */
11082 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11086 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11087 /* lookup from map */
11088 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11089 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11090 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11091 BPF_LD_MAP_FD(BPF_REG_1, 0),
11092 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11093 BPF_FUNC_map_lookup_elem),
11094 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11095 BPF_MOV64_IMM(BPF_REG_0, 0),
11096 BPF_EXIT_INSN(), /* return 0 */
11097 /* write map_value_ptr into stack frame of main prog */
11098 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11099 BPF_MOV64_IMM(BPF_REG_0, 1),
11100 BPF_EXIT_INSN(), /* return 1 */
11102 .prog_type = BPF_PROG_TYPE_XDP,
11103 .fixup_map1 = { 23 },
11107 "calls: two calls that return map_value with incorrect bool check",
11110 /* pass fp-16, fp-8 into a function */
11111 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11112 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11113 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11114 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11115 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11116 BPF_MOV64_IMM(BPF_REG_0, 0),
11120 /* call 3rd function twice */
11121 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11122 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11123 /* first time with fp-8 */
11124 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
11125 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
11126 /* fetch map_value_ptr from the stack of this function */
11127 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11128 /* write into map value */
11129 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11130 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11131 /* second time with fp-16 */
11132 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11133 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11134 /* fetch secound map_value_ptr from the stack */
11135 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
11136 /* write into map value */
11137 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11141 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11142 /* lookup from map */
11143 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11144 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11145 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11146 BPF_LD_MAP_FD(BPF_REG_1, 0),
11147 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11148 BPF_FUNC_map_lookup_elem),
11149 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11150 BPF_MOV64_IMM(BPF_REG_0, 0),
11151 BPF_EXIT_INSN(), /* return 0 */
11152 /* write map_value_ptr into stack frame of main prog */
11153 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11154 BPF_MOV64_IMM(BPF_REG_0, 1),
11155 BPF_EXIT_INSN(), /* return 1 */
11157 .prog_type = BPF_PROG_TYPE_XDP,
11158 .fixup_map1 = { 23 },
11160 .errstr = "invalid read from stack off -16+0 size 8",
11163 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
11166 /* pass fp-16, fp-8 into a function */
11167 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11168 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11169 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11170 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11171 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11172 BPF_MOV64_IMM(BPF_REG_0, 0),
11176 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11177 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11178 /* 1st lookup from map */
11179 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11180 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11181 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11182 BPF_LD_MAP_FD(BPF_REG_1, 0),
11183 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11184 BPF_FUNC_map_lookup_elem),
11185 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11186 BPF_MOV64_IMM(BPF_REG_8, 0),
11187 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11188 /* write map_value_ptr into stack frame of main prog at fp-8 */
11189 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11190 BPF_MOV64_IMM(BPF_REG_8, 1),
11192 /* 2nd lookup from map */
11193 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
11194 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11195 BPF_LD_MAP_FD(BPF_REG_1, 0),
11196 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
11197 BPF_FUNC_map_lookup_elem),
11198 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11199 BPF_MOV64_IMM(BPF_REG_9, 0),
11200 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11201 /* write map_value_ptr into stack frame of main prog at fp-16 */
11202 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11203 BPF_MOV64_IMM(BPF_REG_9, 1),
11205 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11206 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
11207 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11208 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11209 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11210 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
11214 /* if arg2 == 1 do *arg1 = 0 */
11215 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11216 /* fetch map_value_ptr from the stack of this function */
11217 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11218 /* write into map value */
11219 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11221 /* if arg4 == 1 do *arg3 = 0 */
11222 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
11223 /* fetch map_value_ptr from the stack of this function */
11224 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11225 /* write into map value */
11226 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
11229 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11230 .fixup_map1 = { 12, 22 },
11232 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
11235 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
11238 /* pass fp-16, fp-8 into a function */
11239 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11240 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11241 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11242 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11243 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11244 BPF_MOV64_IMM(BPF_REG_0, 0),
11248 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11249 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11250 /* 1st lookup from map */
11251 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11252 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11253 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11254 BPF_LD_MAP_FD(BPF_REG_1, 0),
11255 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11256 BPF_FUNC_map_lookup_elem),
11257 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11258 BPF_MOV64_IMM(BPF_REG_8, 0),
11259 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11260 /* write map_value_ptr into stack frame of main prog at fp-8 */
11261 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11262 BPF_MOV64_IMM(BPF_REG_8, 1),
11264 /* 2nd lookup from map */
11265 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
11266 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11267 BPF_LD_MAP_FD(BPF_REG_1, 0),
11268 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
11269 BPF_FUNC_map_lookup_elem),
11270 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11271 BPF_MOV64_IMM(BPF_REG_9, 0),
11272 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11273 /* write map_value_ptr into stack frame of main prog at fp-16 */
11274 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11275 BPF_MOV64_IMM(BPF_REG_9, 1),
11277 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11278 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
11279 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11280 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11281 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11282 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
11286 /* if arg2 == 1 do *arg1 = 0 */
11287 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11288 /* fetch map_value_ptr from the stack of this function */
11289 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11290 /* write into map value */
11291 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11293 /* if arg4 == 1 do *arg3 = 0 */
11294 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
11295 /* fetch map_value_ptr from the stack of this function */
11296 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11297 /* write into map value */
11298 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11301 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11302 .fixup_map1 = { 12, 22 },
11306 "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
11309 /* pass fp-16, fp-8 into a function */
11310 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11311 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11312 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11313 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11314 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
11315 BPF_MOV64_IMM(BPF_REG_0, 0),
11319 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11320 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11321 /* 1st lookup from map */
11322 BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
11323 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11324 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
11325 BPF_LD_MAP_FD(BPF_REG_1, 0),
11326 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11327 BPF_FUNC_map_lookup_elem),
11328 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11329 BPF_MOV64_IMM(BPF_REG_8, 0),
11330 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11331 /* write map_value_ptr into stack frame of main prog at fp-8 */
11332 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11333 BPF_MOV64_IMM(BPF_REG_8, 1),
11335 /* 2nd lookup from map */
11336 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11337 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
11338 BPF_LD_MAP_FD(BPF_REG_1, 0),
11339 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11340 BPF_FUNC_map_lookup_elem),
11341 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11342 BPF_MOV64_IMM(BPF_REG_9, 0), // 26
11343 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11344 /* write map_value_ptr into stack frame of main prog at fp-16 */
11345 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11346 BPF_MOV64_IMM(BPF_REG_9, 1),
11348 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11349 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
11350 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11351 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11352 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11353 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
11354 BPF_JMP_IMM(BPF_JA, 0, 0, -30),
11357 /* if arg2 == 1 do *arg1 = 0 */
11358 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11359 /* fetch map_value_ptr from the stack of this function */
11360 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11361 /* write into map value */
11362 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11364 /* if arg4 == 1 do *arg3 = 0 */
11365 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
11366 /* fetch map_value_ptr from the stack of this function */
11367 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11368 /* write into map value */
11369 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
11370 BPF_JMP_IMM(BPF_JA, 0, 0, -8),
11372 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11373 .fixup_map1 = { 12, 22 },
11375 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
11378 "calls: two calls that receive map_value_ptr_or_null via arg. test1",
11381 /* pass fp-16, fp-8 into a function */
11382 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11383 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11384 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11385 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11386 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11387 BPF_MOV64_IMM(BPF_REG_0, 0),
11391 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11392 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11393 /* 1st lookup from map */
11394 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11395 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11396 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11397 BPF_LD_MAP_FD(BPF_REG_1, 0),
11398 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11399 BPF_FUNC_map_lookup_elem),
11400 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
11401 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11402 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11403 BPF_MOV64_IMM(BPF_REG_8, 0),
11404 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11405 BPF_MOV64_IMM(BPF_REG_8, 1),
11407 /* 2nd lookup from map */
11408 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11409 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11410 BPF_LD_MAP_FD(BPF_REG_1, 0),
11411 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11412 BPF_FUNC_map_lookup_elem),
11413 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
11414 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11415 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11416 BPF_MOV64_IMM(BPF_REG_9, 0),
11417 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11418 BPF_MOV64_IMM(BPF_REG_9, 1),
11420 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11421 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11422 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11423 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11424 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11425 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11429 /* if arg2 == 1 do *arg1 = 0 */
11430 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11431 /* fetch map_value_ptr from the stack of this function */
11432 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11433 /* write into map value */
11434 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11436 /* if arg4 == 1 do *arg3 = 0 */
11437 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
11438 /* fetch map_value_ptr from the stack of this function */
11439 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11440 /* write into map value */
11441 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11444 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11445 .fixup_map1 = { 12, 22 },
11449 "calls: two calls that receive map_value_ptr_or_null via arg. test2",
11452 /* pass fp-16, fp-8 into a function */
11453 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11454 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11455 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11456 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11457 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11458 BPF_MOV64_IMM(BPF_REG_0, 0),
11462 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11463 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11464 /* 1st lookup from map */
11465 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11466 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11467 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11468 BPF_LD_MAP_FD(BPF_REG_1, 0),
11469 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11470 BPF_FUNC_map_lookup_elem),
11471 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
11472 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11473 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11474 BPF_MOV64_IMM(BPF_REG_8, 0),
11475 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11476 BPF_MOV64_IMM(BPF_REG_8, 1),
11478 /* 2nd lookup from map */
11479 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11480 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11481 BPF_LD_MAP_FD(BPF_REG_1, 0),
11482 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11483 BPF_FUNC_map_lookup_elem),
11484 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
11485 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11486 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11487 BPF_MOV64_IMM(BPF_REG_9, 0),
11488 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11489 BPF_MOV64_IMM(BPF_REG_9, 1),
11491 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11492 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11493 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11494 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11495 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11496 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11500 /* if arg2 == 1 do *arg1 = 0 */
11501 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11502 /* fetch map_value_ptr from the stack of this function */
11503 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11504 /* write into map value */
11505 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11507 /* if arg4 == 0 do *arg3 = 0 */
11508 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
11509 /* fetch map_value_ptr from the stack of this function */
11510 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11511 /* write into map value */
11512 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11515 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11516 .fixup_map1 = { 12, 22 },
11518 .errstr = "R0 invalid mem access 'inv'",
11521 "calls: pkt_ptr spill into caller stack",
11523 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11524 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11525 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11529 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11530 offsetof(struct __sk_buff, data)),
11531 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11532 offsetof(struct __sk_buff, data_end)),
11533 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11534 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11535 /* spill unchecked pkt_ptr into stack of caller */
11536 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11537 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
11538 /* now the pkt range is verified, read pkt_ptr from stack */
11539 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
11540 /* write 4 bytes into packet */
11541 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11545 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11546 .retval = POINTER_VALUE,
11549 "calls: pkt_ptr spill into caller stack 2",
11551 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11552 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11553 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11554 /* Marking is still kept, but not in all cases safe. */
11555 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11556 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
11560 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11561 offsetof(struct __sk_buff, data)),
11562 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11563 offsetof(struct __sk_buff, data_end)),
11564 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11565 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11566 /* spill unchecked pkt_ptr into stack of caller */
11567 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11568 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
11569 /* now the pkt range is verified, read pkt_ptr from stack */
11570 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
11571 /* write 4 bytes into packet */
11572 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11575 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11576 .errstr = "invalid access to packet",
11580 "calls: pkt_ptr spill into caller stack 3",
11582 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11583 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11584 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11585 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
11586 /* Marking is still kept and safe here. */
11587 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11588 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
11592 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11593 offsetof(struct __sk_buff, data)),
11594 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11595 offsetof(struct __sk_buff, data_end)),
11596 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11597 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11598 /* spill unchecked pkt_ptr into stack of caller */
11599 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11600 BPF_MOV64_IMM(BPF_REG_5, 0),
11601 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
11602 BPF_MOV64_IMM(BPF_REG_5, 1),
11603 /* now the pkt range is verified, read pkt_ptr from stack */
11604 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
11605 /* write 4 bytes into packet */
11606 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11607 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11610 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11615 "calls: pkt_ptr spill into caller stack 4",
11617 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11618 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11619 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11620 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
11621 /* Check marking propagated. */
11622 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11623 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
11627 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11628 offsetof(struct __sk_buff, data)),
11629 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11630 offsetof(struct __sk_buff, data_end)),
11631 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11632 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11633 /* spill unchecked pkt_ptr into stack of caller */
11634 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11635 BPF_MOV64_IMM(BPF_REG_5, 0),
11636 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
11637 BPF_MOV64_IMM(BPF_REG_5, 1),
11638 /* don't read back pkt_ptr from stack here */
11639 /* write 4 bytes into packet */
11640 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11641 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11644 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11649 "calls: pkt_ptr spill into caller stack 5",
11651 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11652 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11653 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
11654 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11655 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11656 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
11660 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11661 offsetof(struct __sk_buff, data)),
11662 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11663 offsetof(struct __sk_buff, data_end)),
11664 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11665 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11666 BPF_MOV64_IMM(BPF_REG_5, 0),
11667 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
11668 /* spill checked pkt_ptr into stack of caller */
11669 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11670 BPF_MOV64_IMM(BPF_REG_5, 1),
11671 /* don't read back pkt_ptr from stack here */
11672 /* write 4 bytes into packet */
11673 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11674 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11677 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11678 .errstr = "same insn cannot be used with different",
11682 "calls: pkt_ptr spill into caller stack 6",
11684 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11685 offsetof(struct __sk_buff, data_end)),
11686 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11687 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11688 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11689 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11690 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11691 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
11695 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11696 offsetof(struct __sk_buff, data)),
11697 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11698 offsetof(struct __sk_buff, data_end)),
11699 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11700 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11701 BPF_MOV64_IMM(BPF_REG_5, 0),
11702 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
11703 /* spill checked pkt_ptr into stack of caller */
11704 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11705 BPF_MOV64_IMM(BPF_REG_5, 1),
11706 /* don't read back pkt_ptr from stack here */
11707 /* write 4 bytes into packet */
11708 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11709 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11712 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11713 .errstr = "R4 invalid mem access",
11717 "calls: pkt_ptr spill into caller stack 7",
11719 BPF_MOV64_IMM(BPF_REG_2, 0),
11720 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11721 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11722 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11723 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11724 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11725 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
11729 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11730 offsetof(struct __sk_buff, data)),
11731 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11732 offsetof(struct __sk_buff, data_end)),
11733 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11734 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11735 BPF_MOV64_IMM(BPF_REG_5, 0),
11736 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
11737 /* spill checked pkt_ptr into stack of caller */
11738 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11739 BPF_MOV64_IMM(BPF_REG_5, 1),
11740 /* don't read back pkt_ptr from stack here */
11741 /* write 4 bytes into packet */
11742 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11743 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11746 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11747 .errstr = "R4 invalid mem access",
11751 "calls: pkt_ptr spill into caller stack 8",
11753 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11754 offsetof(struct __sk_buff, data)),
11755 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11756 offsetof(struct __sk_buff, data_end)),
11757 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11758 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11759 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
11761 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11762 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11763 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11764 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11765 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11766 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
11770 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11771 offsetof(struct __sk_buff, data)),
11772 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11773 offsetof(struct __sk_buff, data_end)),
11774 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11775 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11776 BPF_MOV64_IMM(BPF_REG_5, 0),
11777 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
11778 /* spill checked pkt_ptr into stack of caller */
11779 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11780 BPF_MOV64_IMM(BPF_REG_5, 1),
11781 /* don't read back pkt_ptr from stack here */
11782 /* write 4 bytes into packet */
11783 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11784 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11787 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11791 "calls: pkt_ptr spill into caller stack 9",
11793 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11794 offsetof(struct __sk_buff, data)),
11795 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11796 offsetof(struct __sk_buff, data_end)),
11797 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11798 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11799 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
11801 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11802 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11803 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11804 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11805 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11806 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
11810 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11811 offsetof(struct __sk_buff, data)),
11812 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11813 offsetof(struct __sk_buff, data_end)),
11814 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11815 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11816 BPF_MOV64_IMM(BPF_REG_5, 0),
11817 /* spill unchecked pkt_ptr into stack of caller */
11818 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11819 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
11820 BPF_MOV64_IMM(BPF_REG_5, 1),
11821 /* don't read back pkt_ptr from stack here */
11822 /* write 4 bytes into packet */
11823 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11824 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11827 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11828 .errstr = "invalid access to packet",
11832 "calls: caller stack init to zero or map_value_or_null",
11834 BPF_MOV64_IMM(BPF_REG_0, 0),
11835 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
11836 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11837 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11838 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11839 /* fetch map_value_or_null or const_zero from stack */
11840 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
11841 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
11842 /* store into map_value */
11843 BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
11847 /* if (ctx == 0) return; */
11848 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
11849 /* else bpf_map_lookup() and *(fp - 8) = r0 */
11850 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
11851 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11852 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11853 BPF_LD_MAP_FD(BPF_REG_1, 0),
11854 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11855 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11856 BPF_FUNC_map_lookup_elem),
11857 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
11858 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11861 .fixup_map1 = { 13 },
11863 .prog_type = BPF_PROG_TYPE_XDP,
11866 "calls: stack init to zero and pruning",
11868 /* first make allocated_stack 16 byte */
11869 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
11870 /* now fork the execution such that the false branch
11871 * of JGT insn will be verified second and it skisp zero
11872 * init of fp-8 stack slot. If stack liveness marking
11873 * is missing live_read marks from call map_lookup
11874 * processing then pruning will incorrectly assume
11875 * that fp-8 stack slot was unused in the fall-through
11876 * branch and will accept the program incorrectly
11878 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
11879 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11880 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
11881 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11882 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11883 BPF_LD_MAP_FD(BPF_REG_1, 0),
11884 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11885 BPF_FUNC_map_lookup_elem),
11888 .fixup_map2 = { 6 },
11889 .errstr = "invalid indirect read from stack off -8+0 size 8",
11891 .prog_type = BPF_PROG_TYPE_XDP,
11894 "calls: two calls returning different map pointers for lookup (hash, array)",
11897 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
11899 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11901 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
11902 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11903 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11904 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11905 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11906 BPF_FUNC_map_lookup_elem),
11907 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
11908 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
11909 offsetof(struct test_val, foo)),
11910 BPF_MOV64_IMM(BPF_REG_0, 1),
11913 BPF_LD_MAP_FD(BPF_REG_0, 0),
11916 BPF_LD_MAP_FD(BPF_REG_0, 0),
11919 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11920 .fixup_map2 = { 13 },
11921 .fixup_map4 = { 16 },
11926 "calls: two calls returning different map pointers for lookup (hash, map in map)",
11929 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
11931 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11933 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
11934 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11935 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11936 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11937 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11938 BPF_FUNC_map_lookup_elem),
11939 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
11940 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
11941 offsetof(struct test_val, foo)),
11942 BPF_MOV64_IMM(BPF_REG_0, 1),
11945 BPF_LD_MAP_FD(BPF_REG_0, 0),
11948 BPF_LD_MAP_FD(BPF_REG_0, 0),
11951 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11952 .fixup_map_in_map = { 16 },
11953 .fixup_map4 = { 13 },
11955 .errstr = "R0 invalid mem access 'map_ptr'",
11958 "cond: two branches returning different map pointers for lookup (tail, tail)",
11960 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
11961 offsetof(struct __sk_buff, mark)),
11962 BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 3),
11963 BPF_LD_MAP_FD(BPF_REG_2, 0),
11964 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11965 BPF_LD_MAP_FD(BPF_REG_2, 0),
11966 BPF_MOV64_IMM(BPF_REG_3, 7),
11967 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11968 BPF_FUNC_tail_call),
11969 BPF_MOV64_IMM(BPF_REG_0, 1),
11972 .fixup_prog1 = { 5 },
11973 .fixup_prog2 = { 2 },
11974 .result_unpriv = REJECT,
11975 .errstr_unpriv = "tail_call abusing map_ptr",
11980 "cond: two branches returning same map pointers for lookup (tail, tail)",
11982 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
11983 offsetof(struct __sk_buff, mark)),
11984 BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 3),
11985 BPF_LD_MAP_FD(BPF_REG_2, 0),
11986 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11987 BPF_LD_MAP_FD(BPF_REG_2, 0),
11988 BPF_MOV64_IMM(BPF_REG_3, 7),
11989 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11990 BPF_FUNC_tail_call),
11991 BPF_MOV64_IMM(BPF_REG_0, 1),
11994 .fixup_prog2 = { 2, 5 },
11995 .result_unpriv = ACCEPT,
12000 "search pruning: all branches should be verified (nop operation)",
12002 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12003 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12004 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
12005 BPF_LD_MAP_FD(BPF_REG_1, 0),
12006 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
12007 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
12008 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
12009 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
12010 BPF_MOV64_IMM(BPF_REG_4, 0),
12012 BPF_MOV64_IMM(BPF_REG_4, 1),
12013 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
12014 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
12015 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
12016 BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
12017 BPF_MOV64_IMM(BPF_REG_6, 0),
12018 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
12021 .fixup_map1 = { 3 },
12022 .errstr = "R6 invalid mem access 'inv'",
12024 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
12027 "search pruning: all branches should be verified (invalid stack access)",
12029 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12030 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12031 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
12032 BPF_LD_MAP_FD(BPF_REG_1, 0),
12033 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
12034 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
12035 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
12036 BPF_MOV64_IMM(BPF_REG_4, 0),
12037 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
12038 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
12040 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
12041 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
12042 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
12045 .fixup_map1 = { 3 },
12046 .errstr = "invalid read from stack off -16+0 size 8",
12048 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
12051 "jit: lsh, rsh, arsh by 1",
12053 BPF_MOV64_IMM(BPF_REG_0, 1),
12054 BPF_MOV64_IMM(BPF_REG_1, 0xff),
12055 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 1),
12056 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 1),
12057 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1),
12059 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 1),
12060 BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 1),
12061 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0xff, 1),
12063 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 1),
12064 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x7f, 1),
12066 BPF_MOV64_IMM(BPF_REG_0, 2),
12073 "jit: mov32 for ldimm64, 1",
12075 BPF_MOV64_IMM(BPF_REG_0, 2),
12076 BPF_LD_IMM64(BPF_REG_1, 0xfeffffffffffffffULL),
12077 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
12078 BPF_LD_IMM64(BPF_REG_2, 0xfeffffffULL),
12079 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
12080 BPF_MOV64_IMM(BPF_REG_0, 1),
12087 "jit: mov32 for ldimm64, 2",
12089 BPF_MOV64_IMM(BPF_REG_0, 1),
12090 BPF_LD_IMM64(BPF_REG_1, 0x1ffffffffULL),
12091 BPF_LD_IMM64(BPF_REG_2, 0xffffffffULL),
12092 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
12093 BPF_MOV64_IMM(BPF_REG_0, 2),
12100 "jit: various mul tests",
12102 BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
12103 BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
12104 BPF_LD_IMM64(BPF_REG_1, 0xefefefULL),
12105 BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
12106 BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
12107 BPF_MOV64_IMM(BPF_REG_0, 1),
12109 BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
12110 BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
12111 BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
12112 BPF_MOV64_IMM(BPF_REG_0, 1),
12114 BPF_MOV32_REG(BPF_REG_2, BPF_REG_2),
12115 BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
12116 BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
12117 BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
12118 BPF_MOV64_IMM(BPF_REG_0, 1),
12120 BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
12121 BPF_ALU32_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
12122 BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
12123 BPF_MOV64_IMM(BPF_REG_0, 1),
12125 BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL),
12126 BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL),
12127 BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
12128 BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1),
12129 BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2),
12130 BPF_MOV64_IMM(BPF_REG_0, 1),
12132 BPF_MOV64_IMM(BPF_REG_0, 2),
12139 "xadd/w check unaligned stack",
12141 BPF_MOV64_IMM(BPF_REG_0, 1),
12142 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12143 BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
12144 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
12148 .errstr = "misaligned stack access off",
12149 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12152 "xadd/w check unaligned map",
12154 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12155 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12156 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12157 BPF_LD_MAP_FD(BPF_REG_1, 0),
12158 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12159 BPF_FUNC_map_lookup_elem),
12160 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
12162 BPF_MOV64_IMM(BPF_REG_1, 1),
12163 BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
12164 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
12167 .fixup_map1 = { 3 },
12169 .errstr = "misaligned value access off",
12170 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12173 "xadd/w check unaligned pkt",
12175 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12176 offsetof(struct xdp_md, data)),
12177 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12178 offsetof(struct xdp_md, data_end)),
12179 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
12180 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
12181 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
12182 BPF_MOV64_IMM(BPF_REG_0, 99),
12183 BPF_JMP_IMM(BPF_JA, 0, 0, 6),
12184 BPF_MOV64_IMM(BPF_REG_0, 1),
12185 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12186 BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
12187 BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
12188 BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
12189 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
12193 .errstr = "BPF_XADD stores into R2 packet",
12194 .prog_type = BPF_PROG_TYPE_XDP,
12197 "xadd/w check whether src/dst got mangled, 1",
12199 BPF_MOV64_IMM(BPF_REG_0, 1),
12200 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
12201 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
12202 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12203 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12204 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12205 BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
12206 BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
12207 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
12209 BPF_MOV64_IMM(BPF_REG_0, 42),
12213 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12217 "xadd/w check whether src/dst got mangled, 2",
12219 BPF_MOV64_IMM(BPF_REG_0, 1),
12220 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
12221 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
12222 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
12223 BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
12224 BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
12225 BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
12226 BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
12227 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
12229 BPF_MOV64_IMM(BPF_REG_0, 42),
12233 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12237 "bpf_get_stack return R0 within range",
12239 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12240 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12241 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12242 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12243 BPF_LD_MAP_FD(BPF_REG_1, 0),
12244 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12245 BPF_FUNC_map_lookup_elem),
12246 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28),
12247 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
12248 BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)/2),
12249 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12250 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
12251 BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)/2),
12252 BPF_MOV64_IMM(BPF_REG_4, 256),
12253 BPF_EMIT_CALL(BPF_FUNC_get_stack),
12254 BPF_MOV64_IMM(BPF_REG_1, 0),
12255 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
12256 BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
12257 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32),
12258 BPF_JMP_REG(BPF_JSLT, BPF_REG_8, BPF_REG_1, 16),
12259 BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8),
12260 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
12261 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8),
12262 BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
12263 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
12264 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 32),
12265 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
12266 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1),
12267 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
12268 BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)/2),
12269 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5),
12270 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4),
12271 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12272 BPF_MOV64_REG(BPF_REG_3, BPF_REG_9),
12273 BPF_MOV64_IMM(BPF_REG_4, 0),
12274 BPF_EMIT_CALL(BPF_FUNC_get_stack),
12277 .fixup_map2 = { 4 },
12279 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
12282 "ld_abs: invalid op 1",
12284 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12285 BPF_LD_ABS(BPF_DW, 0),
12288 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12290 .errstr = "unknown opcode",
12293 "ld_abs: invalid op 2",
12295 BPF_MOV32_IMM(BPF_REG_0, 256),
12296 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12297 BPF_LD_IND(BPF_DW, BPF_REG_0, 0),
12300 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12302 .errstr = "unknown opcode",
12305 "ld_abs: nmap reduced",
12307 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12308 BPF_LD_ABS(BPF_H, 12),
12309 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 28),
12310 BPF_LD_ABS(BPF_H, 12),
12311 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 26),
12312 BPF_MOV32_IMM(BPF_REG_0, 18),
12313 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -64),
12314 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -64),
12315 BPF_LD_IND(BPF_W, BPF_REG_7, 14),
12316 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -60),
12317 BPF_MOV32_IMM(BPF_REG_0, 280971478),
12318 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
12319 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
12320 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -60),
12321 BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
12322 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 15),
12323 BPF_LD_ABS(BPF_H, 12),
12324 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 13),
12325 BPF_MOV32_IMM(BPF_REG_0, 22),
12326 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
12327 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
12328 BPF_LD_IND(BPF_H, BPF_REG_7, 14),
12329 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -52),
12330 BPF_MOV32_IMM(BPF_REG_0, 17366),
12331 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -48),
12332 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -48),
12333 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -52),
12334 BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
12335 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12336 BPF_MOV32_IMM(BPF_REG_0, 256),
12338 BPF_MOV32_IMM(BPF_REG_0, 0),
12342 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0,
12343 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
12344 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
12346 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12351 "ld_abs: div + abs, test 1",
12353 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
12354 BPF_LD_ABS(BPF_B, 3),
12355 BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
12356 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
12357 BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
12358 BPF_LD_ABS(BPF_B, 4),
12359 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
12360 BPF_LD_IND(BPF_B, BPF_REG_8, -70),
12364 10, 20, 30, 40, 50,
12366 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12371 "ld_abs: div + abs, test 2",
12373 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
12374 BPF_LD_ABS(BPF_B, 3),
12375 BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
12376 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
12377 BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
12378 BPF_LD_ABS(BPF_B, 128),
12379 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
12380 BPF_LD_IND(BPF_B, BPF_REG_8, -70),
12384 10, 20, 30, 40, 50,
12386 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12391 "ld_abs: div + abs, test 3",
12393 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
12394 BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
12395 BPF_LD_ABS(BPF_B, 3),
12396 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
12400 10, 20, 30, 40, 50,
12402 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12407 "ld_abs: div + abs, test 4",
12409 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
12410 BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
12411 BPF_LD_ABS(BPF_B, 256),
12412 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
12416 10, 20, 30, 40, 50,
12418 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12423 "ld_abs: vlan + abs, test 1",
12428 .fill_helper = bpf_fill_ld_abs_vlan_push_pop,
12429 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12434 "ld_abs: vlan + abs, test 2",
12436 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12437 BPF_LD_ABS(BPF_B, 0),
12438 BPF_LD_ABS(BPF_H, 0),
12439 BPF_LD_ABS(BPF_W, 0),
12440 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
12441 BPF_MOV64_IMM(BPF_REG_6, 0),
12442 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
12443 BPF_MOV64_IMM(BPF_REG_2, 1),
12444 BPF_MOV64_IMM(BPF_REG_3, 2),
12445 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12446 BPF_FUNC_skb_vlan_push),
12447 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
12448 BPF_LD_ABS(BPF_B, 0),
12449 BPF_LD_ABS(BPF_H, 0),
12450 BPF_LD_ABS(BPF_W, 0),
12451 BPF_MOV64_IMM(BPF_REG_0, 42),
12457 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12462 "ld_abs: jump around ld_abs",
12467 .fill_helper = bpf_fill_jump_around_ld_abs,
12468 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12473 "ld_dw: xor semi-random 64 bit imms, test 1",
12476 .fill_helper = bpf_fill_rand_ld_dw,
12477 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12482 "ld_dw: xor semi-random 64 bit imms, test 2",
12485 .fill_helper = bpf_fill_rand_ld_dw,
12486 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12491 "ld_dw: xor semi-random 64 bit imms, test 3",
12494 .fill_helper = bpf_fill_rand_ld_dw,
12495 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12500 "ld_dw: xor semi-random 64 bit imms, test 4",
12503 .fill_helper = bpf_fill_rand_ld_dw,
12504 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12509 "pass unmodified ctx pointer to helper",
12511 BPF_MOV64_IMM(BPF_REG_2, 0),
12512 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12513 BPF_FUNC_csum_update),
12514 BPF_MOV64_IMM(BPF_REG_0, 0),
12517 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12521 "pass modified ctx pointer to helper, 1",
12523 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
12524 BPF_MOV64_IMM(BPF_REG_2, 0),
12525 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12526 BPF_FUNC_csum_update),
12527 BPF_MOV64_IMM(BPF_REG_0, 0),
12530 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12532 .errstr = "dereference of modified ctx ptr",
12535 "pass modified ctx pointer to helper, 2",
12537 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
12538 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12539 BPF_FUNC_get_socket_cookie),
12540 BPF_MOV64_IMM(BPF_REG_0, 0),
12543 .result_unpriv = REJECT,
12545 .errstr_unpriv = "dereference of modified ctx ptr",
12546 .errstr = "dereference of modified ctx ptr",
12549 "pass modified ctx pointer to helper, 3",
12551 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 0),
12552 BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 4),
12553 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
12554 BPF_MOV64_IMM(BPF_REG_2, 0),
12555 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12556 BPF_FUNC_csum_update),
12557 BPF_MOV64_IMM(BPF_REG_0, 0),
12560 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12562 .errstr = "variable ctx access var_off=(0x0; 0x4)",
12565 "mov64 src == dst",
12567 BPF_MOV64_IMM(BPF_REG_2, 0),
12568 BPF_MOV64_REG(BPF_REG_2, BPF_REG_2),
12569 // Check bounds are OK
12570 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
12571 BPF_MOV64_IMM(BPF_REG_0, 0),
12574 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12578 "mov64 src != dst",
12580 BPF_MOV64_IMM(BPF_REG_3, 0),
12581 BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
12582 // Check bounds are OK
12583 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
12584 BPF_MOV64_IMM(BPF_REG_0, 0),
12587 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12591 "calls: ctx read at start of subprog",
12593 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12594 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
12595 BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
12596 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12597 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12598 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12600 BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
12601 BPF_MOV64_IMM(BPF_REG_0, 0),
12604 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
12605 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
12606 .result_unpriv = REJECT,
12611 static int probe_filter_length(const struct bpf_insn *fp)
12615 for (len = MAX_INSNS - 1; len > 0; --len)
12616 if (fp[len].code != 0 || fp[len].imm != 0)
12621 static int create_map(uint32_t type, uint32_t size_key,
12622 uint32_t size_value, uint32_t max_elem)
12626 fd = bpf_create_map(type, size_key, size_value, max_elem,
12627 type == BPF_MAP_TYPE_HASH ? BPF_F_NO_PREALLOC : 0);
12629 printf("Failed to create hash map '%s'!\n", strerror(errno));
12634 static int create_prog_dummy1(enum bpf_map_type prog_type)
12636 struct bpf_insn prog[] = {
12637 BPF_MOV64_IMM(BPF_REG_0, 42),
12641 return bpf_load_program(prog_type, prog,
12642 ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
12645 static int create_prog_dummy2(enum bpf_map_type prog_type, int mfd, int idx)
12647 struct bpf_insn prog[] = {
12648 BPF_MOV64_IMM(BPF_REG_3, idx),
12649 BPF_LD_MAP_FD(BPF_REG_2, mfd),
12650 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12651 BPF_FUNC_tail_call),
12652 BPF_MOV64_IMM(BPF_REG_0, 41),
12656 return bpf_load_program(prog_type, prog,
12657 ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
12660 static int create_prog_array(enum bpf_map_type prog_type, uint32_t max_elem,
12664 int mfd, p1fd, p2fd;
12666 mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
12667 sizeof(int), max_elem, 0);
12669 printf("Failed to create prog array '%s'!\n", strerror(errno));
12673 p1fd = create_prog_dummy1(prog_type);
12674 p2fd = create_prog_dummy2(prog_type, mfd, p2key);
12675 if (p1fd < 0 || p2fd < 0)
12677 if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
12679 if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
12692 static int create_map_in_map(void)
12694 int inner_map_fd, outer_map_fd;
12696 inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
12697 sizeof(int), 1, 0);
12698 if (inner_map_fd < 0) {
12699 printf("Failed to create array '%s'!\n", strerror(errno));
12700 return inner_map_fd;
12703 outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
12704 sizeof(int), inner_map_fd, 1, 0);
12705 if (outer_map_fd < 0)
12706 printf("Failed to create array of maps '%s'!\n",
12709 close(inner_map_fd);
12711 return outer_map_fd;
12714 static int create_cgroup_storage(void)
12718 fd = bpf_create_map(BPF_MAP_TYPE_CGROUP_STORAGE,
12719 sizeof(struct bpf_cgroup_storage_key),
12720 TEST_DATA_LEN, 0, 0);
12722 printf("Failed to create array '%s'!\n", strerror(errno));
12727 static char bpf_vlog[UINT_MAX >> 8];
12729 static void do_test_fixup(struct bpf_test *test, enum bpf_map_type prog_type,
12730 struct bpf_insn *prog, int *map_fds)
12732 int *fixup_map1 = test->fixup_map1;
12733 int *fixup_map2 = test->fixup_map2;
12734 int *fixup_map3 = test->fixup_map3;
12735 int *fixup_map4 = test->fixup_map4;
12736 int *fixup_prog1 = test->fixup_prog1;
12737 int *fixup_prog2 = test->fixup_prog2;
12738 int *fixup_map_in_map = test->fixup_map_in_map;
12739 int *fixup_cgroup_storage = test->fixup_cgroup_storage;
12741 if (test->fill_helper)
12742 test->fill_helper(test);
12744 /* Allocating HTs with 1 elem is fine here, since we only test
12745 * for verifier and not do a runtime lookup, so the only thing
12746 * that really matters is value size in this case.
12749 map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
12750 sizeof(long long), 1);
12752 prog[*fixup_map1].imm = map_fds[0];
12754 } while (*fixup_map1);
12758 map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
12759 sizeof(struct test_val), 1);
12761 prog[*fixup_map2].imm = map_fds[1];
12763 } while (*fixup_map2);
12767 map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
12768 sizeof(struct other_val), 1);
12770 prog[*fixup_map3].imm = map_fds[2];
12772 } while (*fixup_map3);
12776 map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
12777 sizeof(struct test_val), 1);
12779 prog[*fixup_map4].imm = map_fds[3];
12781 } while (*fixup_map4);
12784 if (*fixup_prog1) {
12785 map_fds[4] = create_prog_array(prog_type, 4, 0);
12787 prog[*fixup_prog1].imm = map_fds[4];
12789 } while (*fixup_prog1);
12792 if (*fixup_prog2) {
12793 map_fds[5] = create_prog_array(prog_type, 8, 7);
12795 prog[*fixup_prog2].imm = map_fds[5];
12797 } while (*fixup_prog2);
12800 if (*fixup_map_in_map) {
12801 map_fds[6] = create_map_in_map();
12803 prog[*fixup_map_in_map].imm = map_fds[6];
12804 fixup_map_in_map++;
12805 } while (*fixup_map_in_map);
12808 if (*fixup_cgroup_storage) {
12809 map_fds[7] = create_cgroup_storage();
12811 prog[*fixup_cgroup_storage].imm = map_fds[7];
12812 fixup_cgroup_storage++;
12813 } while (*fixup_cgroup_storage);
12817 static int set_admin(bool admin)
12820 const cap_value_t cap_val = CAP_SYS_ADMIN;
12823 caps = cap_get_proc();
12825 perror("cap_get_proc");
12828 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
12829 admin ? CAP_SET : CAP_CLEAR)) {
12830 perror("cap_set_flag");
12833 if (cap_set_proc(caps)) {
12834 perror("cap_set_proc");
12839 if (cap_free(caps))
12840 perror("cap_free");
12844 static void do_test_single(struct bpf_test *test, bool unpriv,
12845 int *passes, int *errors)
12847 int fd_prog, expected_ret, reject_from_alignment;
12848 int prog_len, prog_type = test->prog_type;
12849 struct bpf_insn *prog = test->insns;
12850 int map_fds[MAX_NR_MAPS];
12851 const char *expected_err;
12852 uint32_t expected_val;
12856 for (i = 0; i < MAX_NR_MAPS; i++)
12860 prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
12861 do_test_fixup(test, prog_type, prog, map_fds);
12862 prog_len = probe_filter_length(prog);
12864 fd_prog = bpf_verify_program(prog_type, prog, prog_len,
12865 test->flags & F_LOAD_WITH_STRICT_ALIGNMENT ?
12866 BPF_F_STRICT_ALIGNMENT : 0,
12867 "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
12869 expected_ret = unpriv && test->result_unpriv != UNDEF ?
12870 test->result_unpriv : test->result;
12871 expected_err = unpriv && test->errstr_unpriv ?
12872 test->errstr_unpriv : test->errstr;
12873 expected_val = unpriv && test->retval_unpriv ?
12874 test->retval_unpriv : test->retval;
12876 reject_from_alignment = fd_prog < 0 &&
12877 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
12878 strstr(bpf_vlog, "misaligned");
12879 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
12880 if (reject_from_alignment) {
12881 printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
12886 if (expected_ret == ACCEPT) {
12887 if (fd_prog < 0 && !reject_from_alignment) {
12888 printf("FAIL\nFailed to load prog '%s'!\n",
12893 if (fd_prog >= 0) {
12894 printf("FAIL\nUnexpected success to load!\n");
12897 if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
12898 printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
12899 expected_err, bpf_vlog);
12904 if (fd_prog >= 0) {
12905 __u8 tmp[TEST_DATA_LEN << 2];
12906 __u32 size_tmp = sizeof(tmp);
12910 err = bpf_prog_test_run(fd_prog, 1, test->data,
12911 sizeof(test->data), tmp, &size_tmp,
12915 if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
12916 printf("Unexpected bpf_prog_test_run error\n");
12919 if (!err && retval != expected_val &&
12920 expected_val != POINTER_VALUE) {
12921 printf("FAIL retval %d != %d\n", retval, expected_val);
12926 printf("OK%s\n", reject_from_alignment ?
12927 " (NOTE: reject due to unknown alignment)" : "");
12930 for (i = 0; i < MAX_NR_MAPS; i++)
12936 printf("%s", bpf_vlog);
12940 static bool is_admin(void)
12943 cap_flag_value_t sysadmin = CAP_CLEAR;
12944 const cap_value_t cap_val = CAP_SYS_ADMIN;
12946 #ifdef CAP_IS_SUPPORTED
12947 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
12948 perror("cap_get_flag");
12952 caps = cap_get_proc();
12954 perror("cap_get_proc");
12957 if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
12958 perror("cap_get_flag");
12959 if (cap_free(caps))
12960 perror("cap_free");
12961 return (sysadmin == CAP_SET);
12964 static void get_unpriv_disabled()
12969 fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
12971 perror("fopen /proc/sys/"UNPRIV_SYSCTL);
12972 unpriv_disabled = true;
12975 if (fgets(buf, 2, fd) == buf && atoi(buf))
12976 unpriv_disabled = true;
12980 static bool test_as_unpriv(struct bpf_test *test)
12982 return !test->prog_type ||
12983 test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
12984 test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
12987 static int do_test(bool unpriv, unsigned int from, unsigned int to)
12989 int i, passes = 0, errors = 0, skips = 0;
12991 for (i = from; i < to; i++) {
12992 struct bpf_test *test = &tests[i];
12994 /* Program types that are not supported by non-root we
12997 if (test_as_unpriv(test) && unpriv_disabled) {
12998 printf("#%d/u %s SKIP\n", i, test->descr);
13000 } else if (test_as_unpriv(test)) {
13003 printf("#%d/u %s ", i, test->descr);
13004 do_test_single(test, true, &passes, &errors);
13010 printf("#%d/p %s SKIP\n", i, test->descr);
13013 printf("#%d/p %s ", i, test->descr);
13014 do_test_single(test, false, &passes, &errors);
13018 printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
13020 return errors ? EXIT_FAILURE : EXIT_SUCCESS;
13023 int main(int argc, char **argv)
13025 unsigned int from = 0, to = ARRAY_SIZE(tests);
13026 bool unpriv = !is_admin();
13029 unsigned int l = atoi(argv[argc - 2]);
13030 unsigned int u = atoi(argv[argc - 1]);
13032 if (l < to && u < to) {
13036 } else if (argc == 2) {
13037 unsigned int t = atoi(argv[argc - 1]);
13045 get_unpriv_disabled();
13046 if (unpriv && unpriv_disabled) {
13047 printf("Cannot run as unprivileged user with sysctl %s.\n",
13049 return EXIT_FAILURE;
13052 bpf_semi_rand_init();
13053 return do_test(unpriv, from, to);