OSDN Git Service

bpf: Allow RCU-protected lookups to happen from bh context
authorToke Høiland-Jørgensen <toke@redhat.com>
Thu, 24 Jun 2021 16:05:54 +0000 (18:05 +0200)
committerDaniel Borkmann <daniel@iogearbox.net>
Thu, 24 Jun 2021 17:41:15 +0000 (19:41 +0200)
XDP programs are called from a NAPI poll context, which means the RCU
reference liveness is ensured by local_bh_disable(). Add
rcu_read_lock_bh_held() as a condition to the RCU checks for map lookups so
lockdep understands that the dereferences are safe from inside *either* an
rcu_read_lock() section *or* a local_bh_disable() section. While both
bh_disabled and rcu_read_lock() provide RCU protection, they are
semantically distinct, so we need both conditions to prevent lockdep
complaints.

This change is done in preparation for removing the redundant
rcu_read_lock()s from drivers.

Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Link: https://lore.kernel.org/bpf/20210624160609.292325-5-toke@redhat.com
kernel/bpf/hashtab.c
kernel/bpf/helpers.c
kernel/bpf/lpm_trie.c

index 6f6681b..72c58cc 100644 (file)
@@ -596,7 +596,8 @@ static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
        struct htab_elem *l;
        u32 hash, key_size;
 
-       WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
+       WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
+                    !rcu_read_lock_bh_held());
 
        key_size = map->key_size;
 
@@ -989,7 +990,8 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
                /* unknown flags */
                return -EINVAL;
 
-       WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
+       WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
+                    !rcu_read_lock_bh_held());
 
        key_size = map->key_size;
 
@@ -1082,7 +1084,8 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
                /* unknown flags */
                return -EINVAL;
 
-       WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
+       WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
+                    !rcu_read_lock_bh_held());
 
        key_size = map->key_size;
 
@@ -1148,7 +1151,8 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
                /* unknown flags */
                return -EINVAL;
 
-       WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
+       WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
+                    !rcu_read_lock_bh_held());
 
        key_size = map->key_size;
 
@@ -1202,7 +1206,8 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
                /* unknown flags */
                return -EINVAL;
 
-       WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
+       WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
+                    !rcu_read_lock_bh_held());
 
        key_size = map->key_size;
 
@@ -1276,7 +1281,8 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
        u32 hash, key_size;
        int ret;
 
-       WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
+       WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
+                    !rcu_read_lock_bh_held());
 
        key_size = map->key_size;
 
@@ -1311,7 +1317,8 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
        u32 hash, key_size;
        int ret;
 
-       WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
+       WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
+                    !rcu_read_lock_bh_held());
 
        key_size = map->key_size;
 
index a2f1f15..62cf003 100644 (file)
@@ -29,7 +29,7 @@
  */
 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
 {
-       WARN_ON_ONCE(!rcu_read_lock_held());
+       WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
        return (unsigned long) map->ops->map_lookup_elem(map, key);
 }
 
@@ -45,7 +45,7 @@ const struct bpf_func_proto bpf_map_lookup_elem_proto = {
 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
           void *, value, u64, flags)
 {
-       WARN_ON_ONCE(!rcu_read_lock_held());
+       WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
        return map->ops->map_update_elem(map, key, value, flags);
 }
 
@@ -62,7 +62,7 @@ const struct bpf_func_proto bpf_map_update_elem_proto = {
 
 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
 {
-       WARN_ON_ONCE(!rcu_read_lock_held());
+       WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
        return map->ops->map_delete_elem(map, key);
 }
 
index 1b7b8a6..423549d 100644 (file)
@@ -232,7 +232,8 @@ static void *trie_lookup_elem(struct bpf_map *map, void *_key)
 
        /* Start walking the trie from the root node ... */
 
-       for (node = rcu_dereference(trie->root); node;) {
+       for (node = rcu_dereference_check(trie->root, rcu_read_lock_bh_held());
+            node;) {
                unsigned int next_bit;
                size_t matchlen;
 
@@ -264,7 +265,8 @@ static void *trie_lookup_elem(struct bpf_map *map, void *_key)
                 * traverse down.
                 */
                next_bit = extract_bit(key->data, node->prefixlen);
-               node = rcu_dereference(node->child[next_bit]);
+               node = rcu_dereference_check(node->child[next_bit],
+                                            rcu_read_lock_bh_held());
        }
 
        if (!found)