OSDN Git Service

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[uclinux-h8/linux.git] / tools / testing / selftests / bpf / test_verifier.c
index fd7de7e..94498ea 100644 (file)
 # endif
 #endif
 #include "bpf_rlimit.h"
+#include "bpf_rand.h"
 #include "../../../include/linux/filter.h"
 
 #ifndef ARRAY_SIZE
 # define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
 #endif
 
-#define MAX_INSNS      512
+#define MAX_INSNS      BPF_MAXINSNS
 #define MAX_FIXUPS     8
 #define MAX_NR_MAPS    4
 #define POINTER_VALUE  0xcafe4all
@@ -64,6 +65,7 @@ struct bpf_test {
        struct bpf_insn insns[MAX_INSNS];
        int fixup_map1[MAX_FIXUPS];
        int fixup_map2[MAX_FIXUPS];
+       int fixup_map3[MAX_FIXUPS];
        int fixup_prog[MAX_FIXUPS];
        int fixup_map_in_map[MAX_FIXUPS];
        const char *errstr;
@@ -76,6 +78,8 @@ struct bpf_test {
        } result, result_unpriv;
        enum bpf_prog_type prog_type;
        uint8_t flags;
+       __u8 data[TEST_DATA_LEN];
+       void (*fill_helper)(struct bpf_test *self);
 };
 
 /* Note we want this to be 64 bit aligned so that the end of our array is
@@ -88,6 +92,91 @@ struct test_val {
        int foo[MAX_ENTRIES];
 };
 
+struct other_val {
+       long long foo;
+       long long bar;
+};
+
+static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
+{
+       /* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */
+#define PUSH_CNT 51
+       unsigned int len = BPF_MAXINSNS;
+       struct bpf_insn *insn = self->insns;
+       int i = 0, j, k = 0;
+
+       insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
+loop:
+       for (j = 0; j < PUSH_CNT; j++) {
+               insn[i++] = BPF_LD_ABS(BPF_B, 0);
+               insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
+               i++;
+               insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
+               insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
+               insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
+               insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                        BPF_FUNC_skb_vlan_push),
+               insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
+               i++;
+       }
+
+       for (j = 0; j < PUSH_CNT; j++) {
+               insn[i++] = BPF_LD_ABS(BPF_B, 0);
+               insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
+               i++;
+               insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
+               insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                        BPF_FUNC_skb_vlan_pop),
+               insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
+               i++;
+       }
+       if (++k < 5)
+               goto loop;
+
+       for (; i < len - 1; i++)
+               insn[i] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 0xbef);
+       insn[len - 1] = BPF_EXIT_INSN();
+}
+
+static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
+{
+       struct bpf_insn *insn = self->insns;
+       unsigned int len = BPF_MAXINSNS;
+       int i = 0;
+
+       insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
+       insn[i++] = BPF_LD_ABS(BPF_B, 0);
+       insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
+       i++;
+       while (i < len - 1)
+               insn[i++] = BPF_LD_ABS(BPF_B, 1);
+       insn[i] = BPF_EXIT_INSN();
+}
+
+static void bpf_fill_rand_ld_dw(struct bpf_test *self)
+{
+       struct bpf_insn *insn = self->insns;
+       uint64_t res = 0;
+       int i = 0;
+
+       insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0);
+       while (i < self->retval) {
+               uint64_t val = bpf_semi_rand_get();
+               struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) };
+
+               res ^= val;
+               insn[i++] = tmp[0];
+               insn[i++] = tmp[1];
+               insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
+       }
+       insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
+       insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
+       insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
+       insn[i] = BPF_EXIT_INSN();
+       res ^= (res >> 32);
+       self->retval = (uint32_t)res;
+}
+
 static struct bpf_test tests[] = {
        {
                "add+sub+mul",
@@ -5594,6 +5683,257 @@ static struct bpf_test tests[] = {
                .prog_type = BPF_PROG_TYPE_TRACEPOINT,
        },
        {
+               "map lookup helper access to map",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map3 = { 3, 8 },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "map update helper access to map",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+                       BPF_MOV64_IMM(BPF_REG_4, 0),
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map3 = { 3, 10 },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "map update helper access to map: wrong size",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+                       BPF_MOV64_IMM(BPF_REG_4, 0),
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .fixup_map3 = { 10 },
+               .result = REJECT,
+               .errstr = "invalid access to map value, value_size=8 off=0 size=16",
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "map helper access to adjusted map (via const imm)",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
+                                     offsetof(struct other_val, bar)),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map3 = { 3, 9 },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "map helper access to adjusted map (via const imm): out-of-bound 1",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
+                                     sizeof(struct other_val) - 4),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map3 = { 3, 9 },
+               .result = REJECT,
+               .errstr = "invalid access to map value, value_size=16 off=12 size=8",
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "map helper access to adjusted map (via const imm): out-of-bound 2",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map3 = { 3, 9 },
+               .result = REJECT,
+               .errstr = "invalid access to map value, value_size=16 off=-4 size=8",
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "map helper access to adjusted map (via const reg)",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+                       BPF_MOV64_IMM(BPF_REG_3,
+                                     offsetof(struct other_val, bar)),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map3 = { 3, 10 },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "map helper access to adjusted map (via const reg): out-of-bound 1",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+                       BPF_MOV64_IMM(BPF_REG_3,
+                                     sizeof(struct other_val) - 4),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map3 = { 3, 10 },
+               .result = REJECT,
+               .errstr = "invalid access to map value, value_size=16 off=12 size=8",
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "map helper access to adjusted map (via const reg): out-of-bound 2",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+                       BPF_MOV64_IMM(BPF_REG_3, -4),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map3 = { 3, 10 },
+               .result = REJECT,
+               .errstr = "invalid access to map value, value_size=16 off=-4 size=8",
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "map helper access to adjusted map (via variable)",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
+                                   offsetof(struct other_val, bar), 4),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map3 = { 3, 11 },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "map helper access to adjusted map (via variable): no max check",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map3 = { 3, 10 },
+               .result = REJECT,
+               .errstr = "R2 unbounded memory access, make sure to bounds check any array access into a map",
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "map helper access to adjusted map (via variable): wrong max check",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
+                                   offsetof(struct other_val, bar) + 1, 4),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map3 = { 3, 11 },
+               .result = REJECT,
+               .errstr = "invalid access to map value, value_size=16 off=9 size=8",
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
                "map element value is preserved across register spilling",
                .insns = {
                        BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
@@ -11423,6 +11763,278 @@ static struct bpf_test tests[] = {
                .errstr = "BPF_XADD stores into R2 packet",
                .prog_type = BPF_PROG_TYPE_XDP,
        },
+       {
+               "bpf_get_stack return R0 within range",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28),
+                       BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+                       BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+                       BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)),
+                       BPF_MOV64_IMM(BPF_REG_4, 256),
+                       BPF_EMIT_CALL(BPF_FUNC_get_stack),
+                       BPF_MOV64_IMM(BPF_REG_1, 0),
+                       BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+                       BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
+                       BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32),
+                       BPF_JMP_REG(BPF_JSLT, BPF_REG_1, BPF_REG_8, 16),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
+                       BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
+                       BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 32),
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+                       BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5),
+                       BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_9),
+                       BPF_MOV64_IMM(BPF_REG_4, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_get_stack),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 4 },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "ld_abs: invalid op 1",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_LD_ABS(BPF_DW, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .result = REJECT,
+               .errstr = "unknown opcode",
+       },
+       {
+               "ld_abs: invalid op 2",
+               .insns = {
+                       BPF_MOV32_IMM(BPF_REG_0, 256),
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_LD_IND(BPF_DW, BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .result = REJECT,
+               .errstr = "unknown opcode",
+       },
+       {
+               "ld_abs: nmap reduced",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_LD_ABS(BPF_H, 12),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 28),
+                       BPF_LD_ABS(BPF_H, 12),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 26),
+                       BPF_MOV32_IMM(BPF_REG_0, 18),
+                       BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -64),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -64),
+                       BPF_LD_IND(BPF_W, BPF_REG_7, 14),
+                       BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -60),
+                       BPF_MOV32_IMM(BPF_REG_0, 280971478),
+                       BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -60),
+                       BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 15),
+                       BPF_LD_ABS(BPF_H, 12),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 13),
+                       BPF_MOV32_IMM(BPF_REG_0, 22),
+                       BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
+                       BPF_LD_IND(BPF_H, BPF_REG_7, 14),
+                       BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -52),
+                       BPF_MOV32_IMM(BPF_REG_0, 17366),
+                       BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -48),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -48),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -52),
+                       BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+                       BPF_MOV32_IMM(BPF_REG_0, 256),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV32_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .data = {
+                       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0,
+                       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+                       0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
+               },
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .result = ACCEPT,
+               .retval = 256,
+       },
+       {
+               "ld_abs: div + abs, test 1",
+               .insns = {
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+                       BPF_LD_ABS(BPF_B, 3),
+                       BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
+                       BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
+                       BPF_LD_ABS(BPF_B, 4),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
+                       BPF_LD_IND(BPF_B, BPF_REG_8, -70),
+                       BPF_EXIT_INSN(),
+               },
+               .data = {
+                       10, 20, 30, 40, 50,
+               },
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .result = ACCEPT,
+               .retval = 10,
+       },
+       {
+               "ld_abs: div + abs, test 2",
+               .insns = {
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+                       BPF_LD_ABS(BPF_B, 3),
+                       BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
+                       BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
+                       BPF_LD_ABS(BPF_B, 128),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
+                       BPF_LD_IND(BPF_B, BPF_REG_8, -70),
+                       BPF_EXIT_INSN(),
+               },
+               .data = {
+                       10, 20, 30, 40, 50,
+               },
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .result = ACCEPT,
+               .retval = 0,
+       },
+       {
+               "ld_abs: div + abs, test 3",
+               .insns = {
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+                       BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
+                       BPF_LD_ABS(BPF_B, 3),
+                       BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
+                       BPF_EXIT_INSN(),
+               },
+               .data = {
+                       10, 20, 30, 40, 50,
+               },
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .result = ACCEPT,
+               .retval = 0,
+       },
+       {
+               "ld_abs: div + abs, test 4",
+               .insns = {
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+                       BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
+                       BPF_LD_ABS(BPF_B, 256),
+                       BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
+                       BPF_EXIT_INSN(),
+               },
+               .data = {
+                       10, 20, 30, 40, 50,
+               },
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .result = ACCEPT,
+               .retval = 0,
+       },
+       {
+               "ld_abs: vlan + abs, test 1",
+               .insns = { },
+               .data = {
+                       0x34,
+               },
+               .fill_helper = bpf_fill_ld_abs_vlan_push_pop,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .result = ACCEPT,
+               .retval = 0xbef,
+       },
+       {
+               "ld_abs: vlan + abs, test 2",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_LD_ABS(BPF_B, 0),
+                       BPF_LD_ABS(BPF_H, 0),
+                       BPF_LD_ABS(BPF_W, 0),
+                       BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
+                       BPF_MOV64_IMM(BPF_REG_6, 0),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+                       BPF_MOV64_IMM(BPF_REG_2, 1),
+                       BPF_MOV64_IMM(BPF_REG_3, 2),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_skb_vlan_push),
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
+                       BPF_LD_ABS(BPF_B, 0),
+                       BPF_LD_ABS(BPF_H, 0),
+                       BPF_LD_ABS(BPF_W, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 42),
+                       BPF_EXIT_INSN(),
+               },
+               .data = {
+                       0x34,
+               },
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .result = ACCEPT,
+               .retval = 42,
+       },
+       {
+               "ld_abs: jump around ld_abs",
+               .insns = { },
+               .data = {
+                       10, 11,
+               },
+               .fill_helper = bpf_fill_jump_around_ld_abs,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .result = ACCEPT,
+               .retval = 10,
+       },
+       {
+               "ld_dw: xor semi-random 64 bit imms, test 1",
+               .insns = { },
+               .data = { },
+               .fill_helper = bpf_fill_rand_ld_dw,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .result = ACCEPT,
+               .retval = 4090,
+       },
+       {
+               "ld_dw: xor semi-random 64 bit imms, test 2",
+               .insns = { },
+               .data = { },
+               .fill_helper = bpf_fill_rand_ld_dw,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .result = ACCEPT,
+               .retval = 2047,
+       },
+       {
+               "ld_dw: xor semi-random 64 bit imms, test 3",
+               .insns = { },
+               .data = { },
+               .fill_helper = bpf_fill_rand_ld_dw,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .result = ACCEPT,
+               .retval = 511,
+       },
+       {
+               "ld_dw: xor semi-random 64 bit imms, test 4",
+               .insns = { },
+               .data = { },
+               .fill_helper = bpf_fill_rand_ld_dw,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .result = ACCEPT,
+               .retval = 5,
+       },
 };
 
 static int probe_filter_length(const struct bpf_insn *fp)
@@ -11526,16 +12138,20 @@ static int create_map_in_map(void)
        return outer_map_fd;
 }
 
-static char bpf_vlog[32768];
+static char bpf_vlog[UINT_MAX >> 8];
 
 static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
                          int *map_fds)
 {
        int *fixup_map1 = test->fixup_map1;
        int *fixup_map2 = test->fixup_map2;
+       int *fixup_map3 = test->fixup_map3;
        int *fixup_prog = test->fixup_prog;
        int *fixup_map_in_map = test->fixup_map_in_map;
 
+       if (test->fill_helper)
+               test->fill_helper(test);
+
        /* Allocating HTs with 1 elem is fine here, since we only test
         * for verifier and not do a runtime lookup, so the only thing
         * that really matters is value size in this case.
@@ -11556,6 +12172,14 @@ static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
                } while (*fixup_map2);
        }
 
+       if (*fixup_map3) {
+               map_fds[1] = create_map(sizeof(struct other_val), 1);
+               do {
+                       prog[*fixup_map3].imm = map_fds[1];
+                       fixup_map3++;
+               } while (*fixup_map3);
+       }
+
        if (*fixup_prog) {
                map_fds[2] = create_prog_array();
                do {
@@ -11577,10 +12201,8 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
                           int *passes, int *errors)
 {
        int fd_prog, expected_ret, reject_from_alignment;
+       int prog_len, prog_type = test->prog_type;
        struct bpf_insn *prog = test->insns;
-       int prog_len = probe_filter_length(prog);
-       char data_in[TEST_DATA_LEN] = {};
-       int prog_type = test->prog_type;
        int map_fds[MAX_NR_MAPS];
        const char *expected_err;
        uint32_t retval;
@@ -11590,6 +12212,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
                map_fds[i] = -1;
 
        do_test_fixup(test, prog, map_fds);
+       prog_len = probe_filter_length(prog);
 
        fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
                                     prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
@@ -11629,8 +12252,9 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
        }
 
        if (fd_prog >= 0) {
-               err = bpf_prog_test_run(fd_prog, 1, data_in, sizeof(data_in),
-                                       NULL, NULL, &retval, NULL);
+               err = bpf_prog_test_run(fd_prog, 1, test->data,
+                                       sizeof(test->data), NULL, NULL,
+                                       &retval, NULL);
                if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
                        printf("Unexpected bpf_prog_test_run error\n");
                        goto fail_log;
@@ -11788,5 +12412,6 @@ int main(int argc, char **argv)
                return EXIT_FAILURE;
        }
 
+       bpf_semi_rand_init();
        return do_test(unpriv, from, to);
 }