OSDN Git Service

Merge https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
[uclinux-h8/linux.git] / tools / lib / bpf / libbpf.c
index f6faa33..902f1ad 100644 (file)
@@ -168,6 +168,25 @@ int libbpf_set_strict_mode(enum libbpf_strict_mode mode)
        return 0;
 }
 
+__u32 libbpf_major_version(void)
+{
+       return LIBBPF_MAJOR_VERSION;
+}
+
+__u32 libbpf_minor_version(void)
+{
+       return LIBBPF_MINOR_VERSION;
+}
+
+const char *libbpf_version_string(void)
+{
+#define __S(X) #X
+#define _S(X) __S(X)
+       return  "v" _S(LIBBPF_MAJOR_VERSION) "." _S(LIBBPF_MINOR_VERSION);
+#undef _S
+#undef __S
+}
+
 enum kern_feature_id {
        /* v4.14: kernel support for program & map names. */
        FEAT_PROG_NAME,
@@ -211,13 +230,19 @@ enum reloc_type {
        RELO_EXTERN_VAR,
        RELO_EXTERN_FUNC,
        RELO_SUBPROG_ADDR,
+       RELO_CORE,
 };
 
 struct reloc_desc {
        enum reloc_type type;
        int insn_idx;
-       int map_idx;
-       int sym_off;
+       union {
+               const struct bpf_core_relo *core_relo; /* used when type == RELO_CORE */
+               struct {
+                       int map_idx;
+                       int sym_off;
+               };
+       };
 };
 
 struct bpf_sec_def;
@@ -306,7 +331,11 @@ struct bpf_program {
 
        struct reloc_desc *reloc_desc;
        int nr_reloc;
-       int log_level;
+
+       /* BPF verifier log settings */
+       char *log_buf;
+       size_t log_size;
+       __u32 log_level;
 
        struct {
                int nr;
@@ -402,6 +431,7 @@ struct bpf_map {
        char *pin_path;
        bool pinned;
        bool reused;
+       bool skipped;
        __u64 map_extra;
 };
 
@@ -548,6 +578,11 @@ struct bpf_object {
        size_t btf_module_cnt;
        size_t btf_module_cap;
 
+       /* optional log settings passed to BPF_BTF_LOAD and BPF_PROG_LOAD commands */
+       char *log_buf;
+       size_t log_size;
+       __u32 log_level;
+
        void *priv;
        bpf_object_clear_priv_t clear_priv;
 
@@ -683,6 +718,9 @@ bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
        prog->instances.fds = NULL;
        prog->instances.nr = -1;
 
+       /* inherit object's log_level */
+       prog->log_level = obj->log_level;
+
        prog->sec_name = strdup(sec_name);
        if (!prog->sec_name)
                goto errout;
@@ -2258,6 +2296,9 @@ int parse_btf_map_def(const char *map_name, struct btf *btf,
                        map_def->parts |= MAP_DEF_VALUE_SIZE | MAP_DEF_VALUE_TYPE;
                }
                else if (strcmp(name, "values") == 0) {
+                       bool is_map_in_map = bpf_map_type__is_map_in_map(map_def->map_type);
+                       bool is_prog_array = map_def->map_type == BPF_MAP_TYPE_PROG_ARRAY;
+                       const char *desc = is_map_in_map ? "map-in-map inner" : "prog-array value";
                        char inner_map_name[128];
                        int err;
 
@@ -2271,8 +2312,8 @@ int parse_btf_map_def(const char *map_name, struct btf *btf,
                                        map_name, name);
                                return -EINVAL;
                        }
-                       if (!bpf_map_type__is_map_in_map(map_def->map_type)) {
-                               pr_warn("map '%s': should be map-in-map.\n",
+                       if (!is_map_in_map && !is_prog_array) {
+                               pr_warn("map '%s': should be map-in-map or prog-array.\n",
                                        map_name);
                                return -ENOTSUP;
                        }
@@ -2284,22 +2325,30 @@ int parse_btf_map_def(const char *map_name, struct btf *btf,
                        map_def->value_size = 4;
                        t = btf__type_by_id(btf, m->type);
                        if (!t) {
-                               pr_warn("map '%s': map-in-map inner type [%d] not found.\n",
-                                       map_name, m->type);
+                               pr_warn("map '%s': %s type [%d] not found.\n",
+                                       map_name, desc, m->type);
                                return -EINVAL;
                        }
                        if (!btf_is_array(t) || btf_array(t)->nelems) {
-                               pr_warn("map '%s': map-in-map inner spec is not a zero-sized array.\n",
-                                       map_name);
+                               pr_warn("map '%s': %s spec is not a zero-sized array.\n",
+                                       map_name, desc);
                                return -EINVAL;
                        }
                        t = skip_mods_and_typedefs(btf, btf_array(t)->type, NULL);
                        if (!btf_is_ptr(t)) {
-                               pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
-                                       map_name, btf_kind_str(t));
+                               pr_warn("map '%s': %s def is of unexpected kind %s.\n",
+                                       map_name, desc, btf_kind_str(t));
                                return -EINVAL;
                        }
                        t = skip_mods_and_typedefs(btf, t->type, NULL);
+                       if (is_prog_array) {
+                               if (!btf_is_func_proto(t)) {
+                                       pr_warn("map '%s': prog-array value def is of unexpected kind %s.\n",
+                                               map_name, btf_kind_str(t));
+                                       return -EINVAL;
+                               }
+                               continue;
+                       }
                        if (!btf_is_struct(t)) {
                                pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
                                        map_name, btf_kind_str(t));
@@ -2981,7 +3030,9 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
                 */
                btf__set_fd(kern_btf, 0);
        } else {
-               err = btf__load_into_kernel(kern_btf);
+               /* currently BPF_BTF_LOAD only supports log_level 1 */
+               err = btf_load_into_kernel(kern_btf, obj->log_buf, obj->log_size,
+                                          obj->log_level ? 1 : 0);
        }
        if (sanitize) {
                if (!err) {
@@ -3350,7 +3401,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
 
        /* sort BPF programs by section name and in-section instruction offset
         * for faster search */
-       qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs);
+       if (obj->nr_programs)
+               qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs);
 
        return bpf_object__init_btf(obj, btf_data, btf_ext_data);
 }
@@ -4342,7 +4394,6 @@ static int probe_kern_prog_name(void)
 
 static int probe_kern_global_data(void)
 {
-       struct bpf_create_map_attr map_attr;
        char *cp, errmsg[STRERR_BUFSIZE];
        struct bpf_insn insns[] = {
                BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
@@ -4352,13 +4403,7 @@ static int probe_kern_global_data(void)
        };
        int ret, map, insn_cnt = ARRAY_SIZE(insns);
 
-       memset(&map_attr, 0, sizeof(map_attr));
-       map_attr.map_type = BPF_MAP_TYPE_ARRAY;
-       map_attr.key_size = sizeof(int);
-       map_attr.value_size = 32;
-       map_attr.max_entries = 1;
-
-       map = bpf_create_map_xattr(&map_attr);
+       map = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), 32, 1, NULL);
        if (map < 0) {
                ret = -errno;
                cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
@@ -4488,15 +4533,11 @@ static int probe_kern_btf_type_tag(void)
 
 static int probe_kern_array_mmap(void)
 {
-       struct bpf_create_map_attr attr = {
-               .map_type = BPF_MAP_TYPE_ARRAY,
-               .map_flags = BPF_F_MMAPABLE,
-               .key_size = sizeof(int),
-               .value_size = sizeof(int),
-               .max_entries = 1,
-       };
+       LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_MMAPABLE);
+       int fd;
 
-       return probe_fd(bpf_create_map_xattr(&attr));
+       fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), sizeof(int), 1, &opts);
+       return probe_fd(fd);
 }
 
 static int probe_kern_exp_attach_type(void)
@@ -4535,7 +4576,6 @@ static int probe_kern_probe_read_kernel(void)
 
 static int probe_prog_bind_map(void)
 {
-       struct bpf_create_map_attr map_attr;
        char *cp, errmsg[STRERR_BUFSIZE];
        struct bpf_insn insns[] = {
                BPF_MOV64_IMM(BPF_REG_0, 0),
@@ -4543,13 +4583,7 @@ static int probe_prog_bind_map(void)
        };
        int ret, map, prog, insn_cnt = ARRAY_SIZE(insns);
 
-       memset(&map_attr, 0, sizeof(map_attr));
-       map_attr.map_type = BPF_MAP_TYPE_ARRAY;
-       map_attr.key_size = sizeof(int);
-       map_attr.value_size = 32;
-       map_attr.max_entries = 1;
-
-       map = bpf_create_map_xattr(&map_attr);
+       map = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), 32, 1, NULL);
        if (map < 0) {
                ret = -errno;
                cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
@@ -4820,19 +4854,16 @@ static void bpf_map__destroy(struct bpf_map *map);
 
 static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner)
 {
-       struct bpf_create_map_params create_attr;
+       LIBBPF_OPTS(bpf_map_create_opts, create_attr);
        struct bpf_map_def *def = &map->def;
+       const char *map_name = NULL;
+       __u32 max_entries;
        int err = 0;
 
-       memset(&create_attr, 0, sizeof(create_attr));
-
        if (kernel_supports(obj, FEAT_PROG_NAME))
-               create_attr.name = map->name;
+               map_name = map->name;
        create_attr.map_ifindex = map->map_ifindex;
-       create_attr.map_type = def->type;
        create_attr.map_flags = def->map_flags;
-       create_attr.key_size = def->key_size;
-       create_attr.value_size = def->value_size;
        create_attr.numa_node = map->numa_node;
        create_attr.map_extra = map->map_extra;
 
@@ -4846,18 +4877,14 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
                        return nr_cpus;
                }
                pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus);
-               create_attr.max_entries = nr_cpus;
+               max_entries = nr_cpus;
        } else {
-               create_attr.max_entries = def->max_entries;
+               max_entries = def->max_entries;
        }
 
        if (bpf_map__is_struct_ops(map))
-               create_attr.btf_vmlinux_value_type_id =
-                       map->btf_vmlinux_value_type_id;
+               create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
 
-       create_attr.btf_fd = 0;
-       create_attr.btf_key_type_id = 0;
-       create_attr.btf_value_type_id = 0;
        if (obj->btf && btf__fd(obj->btf) >= 0 && !bpf_map_find_btf_info(obj, map)) {
                create_attr.btf_fd = btf__fd(obj->btf);
                create_attr.btf_key_type_id = map->btf_key_type_id;
@@ -4903,13 +4930,17 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
        }
 
        if (obj->gen_loader) {
-               bpf_gen__map_create(obj->gen_loader, &create_attr, is_inner ? -1 : map - obj->maps);
+               bpf_gen__map_create(obj->gen_loader, def->type, map_name,
+                                   def->key_size, def->value_size, max_entries,
+                                   &create_attr, is_inner ? -1 : map - obj->maps);
                /* Pretend to have valid FD to pass various fd >= 0 checks.
                 * This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
                 */
                map->fd = 0;
        } else {
-               map->fd = libbpf__bpf_create_map_xattr(&create_attr);
+               map->fd = bpf_map_create(def->type, map_name,
+                                        def->key_size, def->value_size,
+                                        max_entries, &create_attr);
        }
        if (map->fd < 0 && (create_attr.btf_key_type_id ||
                            create_attr.btf_value_type_id)) {
@@ -4924,7 +4955,9 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
                create_attr.btf_value_type_id = 0;
                map->btf_key_type_id = 0;
                map->btf_value_type_id = 0;
-               map->fd = libbpf__bpf_create_map_xattr(&create_attr);
+               map->fd = bpf_map_create(def->type, map_name,
+                                        def->key_size, def->value_size,
+                                        max_entries, &create_attr);
        }
 
        err = map->fd < 0 ? -errno : 0;
@@ -4939,7 +4972,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
        return err;
 }
 
-static int init_map_slots(struct bpf_object *obj, struct bpf_map *map)
+static int init_map_in_map_slots(struct bpf_object *obj, struct bpf_map *map)
 {
        const struct bpf_map *targ_map;
        unsigned int i;
@@ -4951,18 +4984,18 @@ static int init_map_slots(struct bpf_object *obj, struct bpf_map *map)
 
                targ_map = map->init_slots[i];
                fd = bpf_map__fd(targ_map);
+
                if (obj->gen_loader) {
-                       pr_warn("// TODO map_update_elem: idx %td key %d value==map_idx %td\n",
-                               map - obj->maps, i, targ_map - obj->maps);
-                       return -ENOTSUP;
+                       bpf_gen__populate_outer_map(obj->gen_loader,
+                                                   map - obj->maps, i,
+                                                   targ_map - obj->maps);
                } else {
                        err = bpf_map_update_elem(map->fd, &i, &fd, 0);
                }
                if (err) {
                        err = -errno;
                        pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
-                               map->name, i, targ_map->name,
-                               fd, err);
+                               map->name, i, targ_map->name, fd, err);
                        return err;
                }
                pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
@@ -4975,6 +5008,59 @@ static int init_map_slots(struct bpf_object *obj, struct bpf_map *map)
        return 0;
 }
 
+static int init_prog_array_slots(struct bpf_object *obj, struct bpf_map *map)
+{
+       const struct bpf_program *targ_prog;
+       unsigned int i;
+       int fd, err;
+
+       if (obj->gen_loader)
+               return -ENOTSUP;
+
+       for (i = 0; i < map->init_slots_sz; i++) {
+               if (!map->init_slots[i])
+                       continue;
+
+               targ_prog = map->init_slots[i];
+               fd = bpf_program__fd(targ_prog);
+
+               err = bpf_map_update_elem(map->fd, &i, &fd, 0);
+               if (err) {
+                       err = -errno;
+                       pr_warn("map '%s': failed to initialize slot [%d] to prog '%s' fd=%d: %d\n",
+                               map->name, i, targ_prog->name, fd, err);
+                       return err;
+               }
+               pr_debug("map '%s': slot [%d] set to prog '%s' fd=%d\n",
+                        map->name, i, targ_prog->name, fd);
+       }
+
+       zfree(&map->init_slots);
+       map->init_slots_sz = 0;
+
+       return 0;
+}
+
+static int bpf_object_init_prog_arrays(struct bpf_object *obj)
+{
+       struct bpf_map *map;
+       int i, err;
+
+       for (i = 0; i < obj->nr_maps; i++) {
+               map = &obj->maps[i];
+
+               if (!map->init_slots_sz || map->def.type != BPF_MAP_TYPE_PROG_ARRAY)
+                       continue;
+
+               err = init_prog_array_slots(obj, map);
+               if (err < 0) {
+                       zclose(map->fd);
+                       return err;
+               }
+       }
+       return 0;
+}
+
 static int
 bpf_object__create_maps(struct bpf_object *obj)
 {
@@ -4987,6 +5073,26 @@ bpf_object__create_maps(struct bpf_object *obj)
        for (i = 0; i < obj->nr_maps; i++) {
                map = &obj->maps[i];
 
+               /* To support old kernels, we skip creating global data maps
+                * (.rodata, .data, .kconfig, etc); later on, during program
+                * loading, if we detect that at least one of the to-be-loaded
+                * programs is referencing any global data map, we'll error
+                * out with program name and relocation index logged.
+                * This approach allows to accommodate Clang emitting
+                * unnecessary .rodata.str1.1 sections for string literals,
+                * but also it allows to have CO-RE applications that use
+                * global variables in some of BPF programs, but not others.
+                * If those global variable-using programs are not loaded at
+                * runtime due to bpf_program__set_autoload(prog, false),
+                * bpf_object loading will succeed just fine even on old
+                * kernels.
+                */
+               if (bpf_map__is_internal(map) &&
+                   !kernel_supports(obj, FEAT_GLOBAL_DATA)) {
+                       map->skipped = true;
+                       continue;
+               }
+
                retried = false;
 retry:
                if (map->pin_path) {
@@ -5023,8 +5129,8 @@ retry:
                                }
                        }
 
-                       if (map->init_slots_sz) {
-                               err = init_map_slots(obj, map);
+                       if (map->init_slots_sz && map->def.type != BPF_MAP_TYPE_PROG_ARRAY) {
+                               err = init_map_in_map_slots(obj, map);
                                if (err < 0) {
                                        zclose(map->fd);
                                        goto err_out;
@@ -5096,15 +5202,18 @@ static int bpf_core_add_cands(struct bpf_core_cand *local_cand,
                              struct bpf_core_cand_list *cands)
 {
        struct bpf_core_cand *new_cands, *cand;
-       const struct btf_type *t;
-       const char *targ_name;
+       const struct btf_type *t, *local_t;
+       const char *targ_name, *local_name;
        size_t targ_essent_len;
        int n, i;
 
+       local_t = btf__type_by_id(local_cand->btf, local_cand->id);
+       local_name = btf__str_by_offset(local_cand->btf, local_t->name_off);
+
        n = btf__type_cnt(targ_btf);
        for (i = targ_start_id; i < n; i++) {
                t = btf__type_by_id(targ_btf, i);
-               if (btf_kind(t) != btf_kind(local_cand->t))
+               if (btf_kind(t) != btf_kind(local_t))
                        continue;
 
                targ_name = btf__name_by_offset(targ_btf, t->name_off);
@@ -5115,12 +5224,12 @@ static int bpf_core_add_cands(struct bpf_core_cand *local_cand,
                if (targ_essent_len != local_essent_len)
                        continue;
 
-               if (strncmp(local_cand->name, targ_name, local_essent_len) != 0)
+               if (strncmp(local_name, targ_name, local_essent_len) != 0)
                        continue;
 
                pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n",
-                        local_cand->id, btf_kind_str(local_cand->t),
-                        local_cand->name, i, btf_kind_str(t), targ_name,
+                        local_cand->id, btf_kind_str(local_t),
+                        local_name, i, btf_kind_str(t), targ_name,
                         targ_btf_name);
                new_cands = libbpf_reallocarray(cands->cands, cands->len + 1,
                                              sizeof(*cands->cands));
@@ -5129,8 +5238,6 @@ static int bpf_core_add_cands(struct bpf_core_cand *local_cand,
 
                cand = &new_cands[cands->len];
                cand->btf = targ_btf;
-               cand->t = t;
-               cand->name = targ_name;
                cand->id = i;
 
                cands->cands = new_cands;
@@ -5237,18 +5344,21 @@ bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 l
        struct bpf_core_cand local_cand = {};
        struct bpf_core_cand_list *cands;
        const struct btf *main_btf;
+       const struct btf_type *local_t;
+       const char *local_name;
        size_t local_essent_len;
        int err, i;
 
        local_cand.btf = local_btf;
-       local_cand.t = btf__type_by_id(local_btf, local_type_id);
-       if (!local_cand.t)
+       local_cand.id = local_type_id;
+       local_t = btf__type_by_id(local_btf, local_type_id);
+       if (!local_t)
                return ERR_PTR(-EINVAL);
 
-       local_cand.name = btf__name_by_offset(local_btf, local_cand.t->name_off);
-       if (str_is_empty(local_cand.name))
+       local_name = btf__name_by_offset(local_btf, local_t->name_off);
+       if (str_is_empty(local_name))
                return ERR_PTR(-EINVAL);
-       local_essent_len = bpf_core_essential_name_len(local_cand.name);
+       local_essent_len = bpf_core_essential_name_len(local_name);
 
        cands = calloc(1, sizeof(*cands));
        if (!cands)
@@ -5398,12 +5508,31 @@ static void *u32_as_hash_key(__u32 x)
        return (void *)(uintptr_t)x;
 }
 
+static int record_relo_core(struct bpf_program *prog,
+                           const struct bpf_core_relo *core_relo, int insn_idx)
+{
+       struct reloc_desc *relos, *relo;
+
+       relos = libbpf_reallocarray(prog->reloc_desc,
+                                   prog->nr_reloc + 1, sizeof(*relos));
+       if (!relos)
+               return -ENOMEM;
+       relo = &relos[prog->nr_reloc];
+       relo->type = RELO_CORE;
+       relo->insn_idx = insn_idx;
+       relo->core_relo = core_relo;
+       prog->reloc_desc = relos;
+       prog->nr_reloc++;
+       return 0;
+}
+
 static int bpf_core_apply_relo(struct bpf_program *prog,
                               const struct bpf_core_relo *relo,
                               int relo_idx,
                               const struct btf *local_btf,
                               struct hashmap *cand_cache)
 {
+       struct bpf_core_spec specs_scratch[3] = {};
        const void *type_key = u32_as_hash_key(relo->type_id);
        struct bpf_core_cand_list *cands = NULL;
        const char *prog_name = prog->name;
@@ -5434,13 +5563,15 @@ static int bpf_core_apply_relo(struct bpf_program *prog,
                return -EINVAL;
 
        if (prog->obj->gen_loader) {
-               pr_warn("// TODO core_relo: prog %td insn[%d] %s kind %d\n",
+               const char *spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
+
+               pr_debug("record_relo_core: prog %td insn[%d] %s %s %s final insn_idx %d\n",
                        prog - prog->obj->programs, relo->insn_off / 8,
-                       local_name, relo->kind);
-               return -ENOTSUP;
+                       btf_kind_str(local_type), local_name, spec_str, insn_idx);
+               return record_relo_core(prog, relo, insn_idx);
        }
 
-       if (relo->kind != BPF_TYPE_ID_LOCAL &&
+       if (relo->kind != BPF_CORE_TYPE_ID_LOCAL &&
            !hashmap__find(cand_cache, type_key, (void **)&cands)) {
                cands = bpf_core_find_cands(prog->obj, local_btf, local_id);
                if (IS_ERR(cands)) {
@@ -5456,7 +5587,8 @@ static int bpf_core_apply_relo(struct bpf_program *prog,
                }
        }
 
-       return bpf_core_apply_relo_insn(prog_name, insn, insn_idx, relo, relo_idx, local_btf, cands);
+       return bpf_core_apply_relo_insn(prog_name, insn, insn_idx, relo,
+                                       relo_idx, local_btf, cands, specs_scratch);
 }
 
 static int
@@ -5586,6 +5718,13 @@ bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
                                insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
                                insn[0].imm = relo->map_idx;
                        } else {
+                               const struct bpf_map *map = &obj->maps[relo->map_idx];
+
+                               if (map->skipped) {
+                                       pr_warn("prog '%s': relo #%d: kernel doesn't support global data\n",
+                                               prog->name, i);
+                                       return -ENOTSUP;
+                               }
                                insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
                                insn[0].imm = obj->maps[relo->map_idx].fd;
                        }
@@ -5634,6 +5773,9 @@ bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
                case RELO_CALL:
                        /* handled already */
                        break;
+               case RELO_CORE:
+                       /* will be handled by bpf_program_record_relos() */
+                       break;
                default:
                        pr_warn("prog '%s': relo #%d: bad relo type %d\n",
                                prog->name, i, relo->type);
@@ -5797,6 +5939,8 @@ static int cmp_relo_by_insn_idx(const void *key, const void *elem)
 
 static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx)
 {
+       if (!prog->nr_reloc)
+               return NULL;
        return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc,
                       sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx);
 }
@@ -5812,8 +5956,9 @@ static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_progra
        relos = libbpf_reallocarray(main_prog->reloc_desc, new_cnt, sizeof(*relos));
        if (!relos)
                return -ENOMEM;
-       memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc,
-              sizeof(*relos) * subprog->nr_reloc);
+       if (subprog->nr_reloc)
+               memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc,
+                      sizeof(*relos) * subprog->nr_reloc);
 
        for (i = main_prog->nr_reloc; i < new_cnt; i++)
                relos[i].insn_idx += subprog->sub_insn_off;
@@ -6071,6 +6216,35 @@ bpf_object__free_relocs(struct bpf_object *obj)
        }
 }
 
+static int cmp_relocs(const void *_a, const void *_b)
+{
+       const struct reloc_desc *a = _a;
+       const struct reloc_desc *b = _b;
+
+       if (a->insn_idx != b->insn_idx)
+               return a->insn_idx < b->insn_idx ? -1 : 1;
+
+       /* no two relocations should have the same insn_idx, but ... */
+       if (a->type != b->type)
+               return a->type < b->type ? -1 : 1;
+
+       return 0;
+}
+
+static void bpf_object__sort_relos(struct bpf_object *obj)
+{
+       int i;
+
+       for (i = 0; i < obj->nr_programs; i++) {
+               struct bpf_program *p = &obj->programs[i];
+
+               if (!p->nr_reloc)
+                       continue;
+
+               qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs);
+       }
+}
+
 static int
 bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
 {
@@ -6085,6 +6259,8 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
                                err);
                        return err;
                }
+               if (obj->gen_loader)
+                       bpf_object__sort_relos(obj);
        }
 
        /* Before relocating calls pre-process relocations and mark
@@ -6120,6 +6296,8 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
                 */
                if (prog_is_subprog(obj, prog))
                        continue;
+               if (!prog->load)
+                       continue;
 
                err = bpf_object__relocate_calls(obj, prog);
                if (err) {
@@ -6133,6 +6311,8 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
                prog = &obj->programs[i];
                if (prog_is_subprog(obj, prog))
                        continue;
+               if (!prog->load)
+                       continue;
                err = bpf_object__relocate_data(obj, prog);
                if (err) {
                        pr_warn("prog '%s': failed to relocate data references: %d\n",
@@ -6155,9 +6335,11 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj,
        int i, j, nrels, new_sz;
        const struct btf_var_secinfo *vi = NULL;
        const struct btf_type *sec, *var, *def;
-       struct bpf_map *map = NULL, *targ_map;
+       struct bpf_map *map = NULL, *targ_map = NULL;
+       struct bpf_program *targ_prog = NULL;
+       bool is_prog_array, is_map_in_map;
        const struct btf_member *member;
-       const char *name, *mname;
+       const char *name, *mname, *type;
        unsigned int moff;
        Elf64_Sym *sym;
        Elf64_Rel *rel;
@@ -6184,11 +6366,6 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj,
                        return -LIBBPF_ERRNO__FORMAT;
                }
                name = elf_sym_str(obj, sym->st_name) ?: "<?>";
-               if (sym->st_shndx != obj->efile.btf_maps_shndx) {
-                       pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n",
-                               i, name);
-                       return -LIBBPF_ERRNO__RELOC;
-               }
 
                pr_debug(".maps relo #%d: for %zd value %zd rel->r_offset %zu name %d ('%s')\n",
                         i, (ssize_t)(rel->r_info >> 32), (size_t)sym->st_value,
@@ -6210,19 +6387,45 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj,
                        return -EINVAL;
                }
 
-               if (!bpf_map_type__is_map_in_map(map->def.type))
-                       return -EINVAL;
-               if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS &&
-                   map->def.key_size != sizeof(int)) {
-                       pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n",
-                               i, map->name, sizeof(int));
+               is_map_in_map = bpf_map_type__is_map_in_map(map->def.type);
+               is_prog_array = map->def.type == BPF_MAP_TYPE_PROG_ARRAY;
+               type = is_map_in_map ? "map" : "prog";
+               if (is_map_in_map) {
+                       if (sym->st_shndx != obj->efile.btf_maps_shndx) {
+                               pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n",
+                                       i, name);
+                               return -LIBBPF_ERRNO__RELOC;
+                       }
+                       if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS &&
+                           map->def.key_size != sizeof(int)) {
+                               pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n",
+                                       i, map->name, sizeof(int));
+                               return -EINVAL;
+                       }
+                       targ_map = bpf_object__find_map_by_name(obj, name);
+                       if (!targ_map) {
+                               pr_warn(".maps relo #%d: '%s' isn't a valid map reference\n",
+                                       i, name);
+                               return -ESRCH;
+                       }
+               } else if (is_prog_array) {
+                       targ_prog = bpf_object__find_program_by_name(obj, name);
+                       if (!targ_prog) {
+                               pr_warn(".maps relo #%d: '%s' isn't a valid program reference\n",
+                                       i, name);
+                               return -ESRCH;
+                       }
+                       if (targ_prog->sec_idx != sym->st_shndx ||
+                           targ_prog->sec_insn_off * 8 != sym->st_value ||
+                           prog_is_subprog(obj, targ_prog)) {
+                               pr_warn(".maps relo #%d: '%s' isn't an entry-point program\n",
+                                       i, name);
+                               return -LIBBPF_ERRNO__RELOC;
+                       }
+               } else {
                        return -EINVAL;
                }
 
-               targ_map = bpf_object__find_map_by_name(obj, name);
-               if (!targ_map)
-                       return -ESRCH;
-
                var = btf__type_by_id(obj->btf, vi->type);
                def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
                if (btf_vlen(def) == 0)
@@ -6253,30 +6456,15 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj,
                               (new_sz - map->init_slots_sz) * host_ptr_sz);
                        map->init_slots_sz = new_sz;
                }
-               map->init_slots[moff] = targ_map;
+               map->init_slots[moff] = is_map_in_map ? (void *)targ_map : (void *)targ_prog;
 
-               pr_debug(".maps relo #%d: map '%s' slot [%d] points to map '%s'\n",
-                        i, map->name, moff, name);
+               pr_debug(".maps relo #%d: map '%s' slot [%d] points to %s '%s'\n",
+                        i, map->name, moff, type, name);
        }
 
        return 0;
 }
 
-static int cmp_relocs(const void *_a, const void *_b)
-{
-       const struct reloc_desc *a = _a;
-       const struct reloc_desc *b = _b;
-
-       if (a->insn_idx != b->insn_idx)
-               return a->insn_idx < b->insn_idx ? -1 : 1;
-
-       /* no two relocations should have the same insn_idx, but ... */
-       if (a->type != b->type)
-               return a->type < b->type ? -1 : 1;
-
-       return 0;
-}
-
 static int bpf_object__collect_relos(struct bpf_object *obj)
 {
        int i, err;
@@ -6309,14 +6497,7 @@ static int bpf_object__collect_relos(struct bpf_object *obj)
                        return err;
        }
 
-       for (i = 0; i < obj->nr_programs; i++) {
-               struct bpf_program *p = &obj->programs[i];
-
-               if (!p->nr_reloc)
-                       continue;
-
-               qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs);
-       }
+       bpf_object__sort_relos(obj);
        return 0;
 }
 
@@ -6419,8 +6600,10 @@ static int bpf_object_load_prog_instance(struct bpf_object *obj, struct bpf_prog
        const char *prog_name = NULL;
        char *cp, errmsg[STRERR_BUFSIZE];
        size_t log_buf_size = 0;
-       char *log_buf = NULL;
+       char *log_buf = NULL, *tmp;
        int btf_fd, ret, err;
+       bool own_log_buf = true;
+       __u32 log_level = prog->log_level;
 
        if (prog->type == BPF_PROG_TYPE_UNSPEC) {
                /*
@@ -6438,7 +6621,6 @@ static int bpf_object_load_prog_instance(struct bpf_object *obj, struct bpf_prog
        load_attr.expected_attach_type = prog->expected_attach_type;
        if (kernel_supports(obj, FEAT_PROG_NAME))
                prog_name = prog->name;
-       load_attr.attach_btf_id = prog->attach_btf_id;
        load_attr.attach_prog_fd = prog->attach_prog_fd;
        load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd;
        load_attr.attach_btf_id = prog->attach_btf_id;
@@ -6456,7 +6638,7 @@ static int bpf_object_load_prog_instance(struct bpf_object *obj, struct bpf_prog
                load_attr.line_info_rec_size = prog->line_info_rec_size;
                load_attr.line_info_cnt = prog->line_info_cnt;
        }
-       load_attr.log_level = prog->log_level;
+       load_attr.log_level = log_level;
        load_attr.prog_flags = prog->prog_flags;
        load_attr.fd_array = obj->fd_array;
 
@@ -6477,22 +6659,45 @@ static int bpf_object_load_prog_instance(struct bpf_object *obj, struct bpf_prog
                *prog_fd = -1;
                return 0;
        }
-retry_load:
-       if (log_buf_size) {
-               log_buf = malloc(log_buf_size);
-               if (!log_buf)
-                       return -ENOMEM;
 
-               *log_buf = 0;
+retry_load:
+       /* if log_level is zero, we don't request logs initiallly even if
+        * custom log_buf is specified; if the program load fails, then we'll
+        * bump log_level to 1 and use either custom log_buf or we'll allocate
+        * our own and retry the load to get details on what failed
+        */
+       if (log_level) {
+               if (prog->log_buf) {
+                       log_buf = prog->log_buf;
+                       log_buf_size = prog->log_size;
+                       own_log_buf = false;
+               } else if (obj->log_buf) {
+                       log_buf = obj->log_buf;
+                       log_buf_size = obj->log_size;
+                       own_log_buf = false;
+               } else {
+                       log_buf_size = max((size_t)BPF_LOG_BUF_SIZE, log_buf_size * 2);
+                       tmp = realloc(log_buf, log_buf_size);
+                       if (!tmp) {
+                               ret = -ENOMEM;
+                               goto out;
+                       }
+                       log_buf = tmp;
+                       log_buf[0] = '\0';
+                       own_log_buf = true;
+               }
        }
 
        load_attr.log_buf = log_buf;
        load_attr.log_size = log_buf_size;
-       ret = bpf_prog_load(prog->type, prog_name, license, insns, insns_cnt, &load_attr);
+       load_attr.log_level = log_level;
 
+       ret = bpf_prog_load(prog->type, prog_name, license, insns, insns_cnt, &load_attr);
        if (ret >= 0) {
-               if (log_buf && load_attr.log_level)
-                       pr_debug("verifier log:\n%s", log_buf);
+               if (log_level && own_log_buf) {
+                       pr_debug("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n",
+                                prog->name, log_buf);
+               }
 
                if (obj->has_rodata && kernel_supports(obj, FEAT_PROG_BIND_MAP)) {
                        struct bpf_map *map;
@@ -6505,8 +6710,8 @@ retry_load:
 
                                if (bpf_prog_bind_map(ret, bpf_map__fd(map), NULL)) {
                                        cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
-                                       pr_warn("prog '%s': failed to bind .rodata map: %s\n",
-                                               prog->name, cp);
+                                       pr_warn("prog '%s': failed to bind map '%s': %s\n",
+                                               prog->name, map->real_name, cp);
                                        /* Don't fail hard if can't bind rodata. */
                                }
                        }
@@ -6517,49 +6722,41 @@ retry_load:
                goto out;
        }
 
-       if (!log_buf || errno == ENOSPC) {
-               log_buf_size = max((size_t)BPF_LOG_BUF_SIZE,
-                                  log_buf_size << 1);
-
-               free(log_buf);
+       if (log_level == 0) {
+               log_level = 1;
                goto retry_load;
        }
-       ret = errno ? -errno : -LIBBPF_ERRNO__LOAD;
+       /* On ENOSPC, increase log buffer size and retry, unless custom
+        * log_buf is specified.
+        * Be careful to not overflow u32, though. Kernel's log buf size limit
+        * isn't part of UAPI so it can always be bumped to full 4GB. So don't
+        * multiply by 2 unless we are sure we'll fit within 32 bits.
+        * Currently, we'll get -EINVAL when we reach (UINT_MAX >> 2).
+        */
+       if (own_log_buf && errno == ENOSPC && log_buf_size <= UINT_MAX / 2)
+               goto retry_load;
+
+       ret = -errno;
        cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
-       pr_warn("load bpf program failed: %s\n", cp);
+       pr_warn("prog '%s': BPF program load failed: %s\n", prog->name, cp);
        pr_perm_msg(ret);
 
-       if (log_buf && log_buf[0] != '\0') {
-               ret = -LIBBPF_ERRNO__VERIFY;
-               pr_warn("-- BEGIN DUMP LOG ---\n");
-               pr_warn("\n%s\n", log_buf);
-               pr_warn("-- END LOG --\n");
-       } else if (insns_cnt >= BPF_MAXINSNS) {
-               pr_warn("Program too large (%d insns), at most %d insns\n",
-                       insns_cnt, BPF_MAXINSNS);
-               ret = -LIBBPF_ERRNO__PROG2BIG;
-       } else if (prog->type != BPF_PROG_TYPE_KPROBE) {
-               /* Wrong program type? */
-               int fd;
-
-               load_attr.expected_attach_type = 0;
-               load_attr.log_buf = NULL;
-               load_attr.log_size = 0;
-               fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, prog_name, license,
-                                  insns, insns_cnt, &load_attr);
-               if (fd >= 0) {
-                       close(fd);
-                       ret = -LIBBPF_ERRNO__PROGTYPE;
-                       goto out;
-               }
+       if (own_log_buf && log_buf && log_buf[0] != '\0') {
+               pr_warn("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n",
+                       prog->name, log_buf);
+       }
+       if (insns_cnt >= BPF_MAXINSNS) {
+               pr_warn("prog '%s': program too large (%d insns), at most %d insns\n",
+                       prog->name, insns_cnt, BPF_MAXINSNS);
        }
 
 out:
-       free(log_buf);
+       if (own_log_buf)
+               free(log_buf);
        return ret;
 }
 
-static int bpf_program__record_externs(struct bpf_program *prog)
+static int bpf_program_record_relos(struct bpf_program *prog)
 {
        struct bpf_object *obj = prog->obj;
        int i;
@@ -6581,6 +6778,17 @@ static int bpf_program__record_externs(struct bpf_program *prog)
                                               ext->is_weak, false, BTF_KIND_FUNC,
                                               relo->insn_idx);
                        break;
+               case RELO_CORE: {
+                       struct bpf_core_relo cr = {
+                               .insn_off = relo->insn_idx * 8,
+                               .type_id = relo->core_relo->type_id,
+                               .access_str_off = relo->core_relo->access_str_off,
+                               .kind = relo->core_relo->kind,
+                       };
+
+                       bpf_gen__record_relo_core(obj->gen_loader, &cr);
+                       break;
+               }
                default:
                        continue;
                }
@@ -6620,7 +6828,7 @@ static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog
                                prog->name, prog->instances.nr);
                }
                if (obj->gen_loader)
-                       bpf_program__record_externs(prog);
+                       bpf_program_record_relos(prog);
                err = bpf_object_load_prog_instance(obj, prog,
                                                    prog->insns, prog->insns_cnt,
                                                    license, kern_ver, &fd);
@@ -6749,14 +6957,16 @@ static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object
        return 0;
 }
 
-static struct bpf_object *
-__bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
-                  const struct bpf_object_open_opts *opts)
+static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, size_t obj_buf_sz,
+                                         const struct bpf_object_open_opts *opts)
 {
        const char *obj_name, *kconfig, *btf_tmp_path;
        struct bpf_object *obj;
        char tmp_name[64];
        int err;
+       char *log_buf;
+       size_t log_size;
+       __u32 log_level;
 
        if (elf_version(EV_CURRENT) == EV_NONE) {
                pr_warn("failed to init libelf for %s\n",
@@ -6779,10 +6989,22 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
                pr_debug("loading object '%s' from buffer\n", obj_name);
        }
 
+       log_buf = OPTS_GET(opts, kernel_log_buf, NULL);
+       log_size = OPTS_GET(opts, kernel_log_size, 0);
+       log_level = OPTS_GET(opts, kernel_log_level, 0);
+       if (log_size > UINT_MAX)
+               return ERR_PTR(-EINVAL);
+       if (log_size && !log_buf)
+               return ERR_PTR(-EINVAL);
+
        obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
        if (IS_ERR(obj))
                return obj;
 
+       obj->log_buf = log_buf;
+       obj->log_size = log_size;
+       obj->log_level = log_level;
+
        btf_tmp_path = OPTS_GET(opts, btf_custom_path, NULL);
        if (btf_tmp_path) {
                if (strlen(btf_tmp_path) >= PATH_MAX) {
@@ -6836,7 +7058,7 @@ __bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags)
                return NULL;
 
        pr_debug("loading %s\n", attr->file);
-       return __bpf_object__open(attr->file, NULL, 0, &opts);
+       return bpf_object_open(attr->file, NULL, 0, &opts);
 }
 
 struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
@@ -6862,7 +7084,7 @@ bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
 
        pr_debug("loading %s\n", path);
 
-       return libbpf_ptr(__bpf_object__open(path, NULL, 0, opts));
+       return libbpf_ptr(bpf_object_open(path, NULL, 0, opts));
 }
 
 struct bpf_object *
@@ -6872,7 +7094,7 @@ bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
        if (!obj_buf || obj_buf_sz == 0)
                return libbpf_err_ptr(-EINVAL);
 
-       return libbpf_ptr(__bpf_object__open(NULL, obj_buf, obj_buf_sz, opts));
+       return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, opts));
 }
 
 struct bpf_object *
@@ -6889,7 +7111,7 @@ bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
        if (!obj_buf || obj_buf_sz == 0)
                return errno = EINVAL, NULL;
 
-       return libbpf_ptr(__bpf_object__open(NULL, obj_buf, obj_buf_sz, &opts));
+       return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, &opts));
 }
 
 static int bpf_object_unload(struct bpf_object *obj)
@@ -6920,10 +7142,6 @@ static int bpf_object__sanitize_maps(struct bpf_object *obj)
        bpf_object__for_each_map(m, obj) {
                if (!bpf_map__is_internal(m))
                        continue;
-               if (!kernel_supports(obj, FEAT_GLOBAL_DATA)) {
-                       pr_warn("kernel doesn't support global data\n");
-                       return -ENOTSUP;
-               }
                if (!kernel_supports(obj, FEAT_ARRAY_MMAP))
                        m->def.map_flags ^= BPF_F_MMAPABLE;
        }
@@ -7246,14 +7464,10 @@ static int bpf_object__resolve_externs(struct bpf_object *obj,
        return 0;
 }
 
-int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
+static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path)
 {
-       struct bpf_object *obj;
        int err, i;
 
-       if (!attr)
-               return libbpf_err(-EINVAL);
-       obj = attr->obj;
        if (!obj)
                return libbpf_err(-EINVAL);
 
@@ -7263,7 +7477,7 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
        }
 
        if (obj->gen_loader)
-               bpf_gen__init(obj->gen_loader, attr->log_level, obj->nr_programs, obj->nr_maps);
+               bpf_gen__init(obj->gen_loader, extra_log_level, obj->nr_programs, obj->nr_maps);
 
        err = bpf_object__probe_loading(obj);
        err = err ? : bpf_object__load_vmlinux_btf(obj, false);
@@ -7272,8 +7486,9 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
        err = err ? : bpf_object__sanitize_maps(obj);
        err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
        err = err ? : bpf_object__create_maps(obj);
-       err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : attr->target_btf_path);
-       err = err ? : bpf_object__load_progs(obj, attr->log_level);
+       err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
+       err = err ? : bpf_object__load_progs(obj, extra_log_level);
+       err = err ? : bpf_object_init_prog_arrays(obj);
 
        if (obj->gen_loader) {
                /* reset FDs */
@@ -7317,13 +7532,14 @@ out:
        return libbpf_err(err);
 }
 
-int bpf_object__load(struct bpf_object *obj)
+int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
 {
-       struct bpf_object_load_attr attr = {
-               .obj = obj,
-       };
+       return bpf_object_load(attr->obj, attr->log_level, attr->target_btf_path);
+}
 
-       return bpf_object__load_xattr(&attr);
+int bpf_object__load(struct bpf_object *obj)
+{
+       return bpf_object_load(obj, 0, NULL);
 }
 
 static int make_parent_dir(const char *path)
@@ -7712,6 +7928,9 @@ int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
                char *pin_path = NULL;
                char buf[PATH_MAX];
 
+               if (map->skipped)
+                       continue;
+
                if (path) {
                        int len;
 
@@ -8296,12 +8515,46 @@ __u32 bpf_program__flags(const struct bpf_program *prog)
        return prog->prog_flags;
 }
 
-int bpf_program__set_extra_flags(struct bpf_program *prog, __u32 extra_flags)
+int bpf_program__set_flags(struct bpf_program *prog, __u32 flags)
+{
+       if (prog->obj->loaded)
+               return libbpf_err(-EBUSY);
+
+       prog->prog_flags = flags;
+       return 0;
+}
+
+__u32 bpf_program__log_level(const struct bpf_program *prog)
+{
+       return prog->log_level;
+}
+
+int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level)
 {
        if (prog->obj->loaded)
                return libbpf_err(-EBUSY);
 
-       prog->prog_flags |= extra_flags;
+       prog->log_level = log_level;
+       return 0;
+}
+
+const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size)
+{
+       *log_size = prog->log_size;
+       return prog->log_buf;
+}
+
+int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size)
+{
+       if (log_size && !log_buf)
+               return -EINVAL;
+       if (prog->log_size > UINT_MAX)
+               return -EINVAL;
+       if (prog->obj->loaded)
+               return -EBUSY;
+
+       prog->log_buf = log_buf;
+       prog->log_size = log_size;
        return 0;
 }