2 * Resizable, Scalable, Concurrent Hash Table
4 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5 * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
8 * Code partially derived from nft_hash
9 * Rewritten with rehash code from br_multicast plus single list
10 * pointer as suggested by Josh Triplett
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
17 #ifndef _LINUX_RHASHTABLE_H
18 #define _LINUX_RHASHTABLE_H
20 #include <linux/compiler.h>
21 #include <linux/errno.h>
22 #include <linux/jhash.h>
23 #include <linux/list_nulls.h>
24 #include <linux/workqueue.h>
25 #include <linux/mutex.h>
26 #include <linux/rcupdate.h>
29 * The end of the chain is marked with a special nulls marks which has
30 * the following format:
32 * +-------+-----------------------------------------------------+-+
34 * +-------+-----------------------------------------------------+-+
36 * Base (4 bits) : Reserved to distinguish between multiple tables.
37 * Specified via &struct rhashtable_params.nulls_base.
38 * Hash (27 bits): Full hash (unmasked) of first element added to bucket
39 * 1 (1 bit) : Nulls marker (always set)
41 * The remaining bits of the next pointer remain unused for now.
43 #define RHT_BASE_BITS 4
44 #define RHT_HASH_BITS 27
45 #define RHT_BASE_SHIFT RHT_HASH_BITS
47 /* Base bits plus 1 bit for nulls marker */
48 #define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
51 struct rhash_head __rcu *next;
55 * struct bucket_table - Table of hash buckets
56 * @size: Number of hash buckets
57 * @rehash: Current bucket being rehashed
58 * @hash_rnd: Random seed to fold into hash
59 * @locks_mask: Mask to apply before accessing locks[]
60 * @locks: Array of spinlocks protecting individual buckets
61 * @walkers: List of active walkers
62 * @rcu: RCU structure for freeing the table
63 * @future_tbl: Table under construction during rehashing
64 * @buckets: size * hash buckets
70 unsigned int locks_mask;
72 struct list_head walkers;
75 struct bucket_table __rcu *future_tbl;
77 struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
81 * struct rhashtable_compare_arg - Key for the function rhashtable_compare
83 * @key: Key to compare against
85 struct rhashtable_compare_arg {
86 struct rhashtable *ht;
90 typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
91 typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 seed);
92 typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
98 * struct rhashtable_params - Hash table construction parameters
99 * @nelem_hint: Hint on number of elements, should be 75% of desired size
100 * @key_len: Length of key
101 * @key_offset: Offset of key in struct to be hashed
102 * @head_offset: Offset of rhash_head in struct to be hashed
103 * @max_size: Maximum size while expanding
104 * @min_size: Minimum size while shrinking
105 * @nulls_base: Base value to generate nulls marker
106 * @insecure_elasticity: Set to true to disable chain length checks
107 * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
108 * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
109 * @obj_hashfn: Function to hash object
110 * @obj_cmpfn: Function to compare key with object
112 struct rhashtable_params {
117 unsigned int max_size;
118 unsigned int min_size;
120 bool insecure_elasticity;
123 rht_obj_hashfn_t obj_hashfn;
124 rht_obj_cmpfn_t obj_cmpfn;
128 * struct rhashtable - Hash table handle
130 * @nelems: Number of elements in table
131 * @key_len: Key length for hashfn
132 * @elasticity: Maximum chain length before rehash
133 * @p: Configuration parameters
134 * @run_work: Deferred worker to expand/shrink asynchronously
135 * @mutex: Mutex to protect current/future table swapping
136 * @lock: Spin lock to protect walker list
137 * @being_destroyed: True if table is set up for destruction
140 struct bucket_table __rcu *tbl;
142 bool being_destroyed;
143 unsigned int key_len;
144 unsigned int elasticity;
145 struct rhashtable_params p;
146 struct work_struct run_work;
152 * struct rhashtable_walker - Hash table walker
153 * @list: List entry on list of walkers
154 * @tbl: The table that we were walking over
156 struct rhashtable_walker {
157 struct list_head list;
158 struct bucket_table *tbl;
162 * struct rhashtable_iter - Hash table iterator, fits into netlink cb
163 * @ht: Table to iterate through
164 * @p: Current pointer
165 * @walker: Associated rhashtable walker
166 * @slot: Current slot
167 * @skip: Number of entries to skip in slot
169 struct rhashtable_iter {
170 struct rhashtable *ht;
171 struct rhash_head *p;
172 struct rhashtable_walker *walker;
177 static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
179 return NULLS_MARKER(ht->p.nulls_base + hash);
182 #define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \
183 ((ptr) = (typeof(ptr)) rht_marker(ht, hash))
185 static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
187 return ((unsigned long) ptr & 1);
190 static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr)
192 return ((unsigned long) ptr) >> 1;
195 static inline void *rht_obj(const struct rhashtable *ht,
196 const struct rhash_head *he)
198 return (char *)he - ht->p.head_offset;
201 static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
204 return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1);
207 static inline unsigned int rht_key_hashfn(
208 struct rhashtable *ht, const struct bucket_table *tbl,
209 const void *key, const struct rhashtable_params params)
213 /* params must be equal to ht->p if it isn't constant. */
214 if (!__builtin_constant_p(params.key_len))
215 hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd);
216 else if (params.key_len) {
217 unsigned int key_len = params.key_len;
220 hash = params.hashfn(key, key_len, tbl->hash_rnd);
221 else if (key_len & (sizeof(u32) - 1))
222 hash = jhash(key, key_len, tbl->hash_rnd);
224 hash = jhash2(key, key_len / sizeof(u32),
227 unsigned int key_len = ht->p.key_len;
230 hash = params.hashfn(key, key_len, tbl->hash_rnd);
232 hash = jhash(key, key_len, tbl->hash_rnd);
235 return rht_bucket_index(tbl, hash);
238 static inline unsigned int rht_head_hashfn(
239 struct rhashtable *ht, const struct bucket_table *tbl,
240 const struct rhash_head *he, const struct rhashtable_params params)
242 const char *ptr = rht_obj(ht, he);
244 return likely(params.obj_hashfn) ?
245 rht_bucket_index(tbl, params.obj_hashfn(ptr, tbl->hash_rnd)) :
246 rht_key_hashfn(ht, tbl, ptr + params.key_offset, params);
250 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
252 * @tbl: current table
254 static inline bool rht_grow_above_75(const struct rhashtable *ht,
255 const struct bucket_table *tbl)
257 /* Expand table when exceeding 75% load */
258 return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
259 (!ht->p.max_size || tbl->size < ht->p.max_size);
263 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
265 * @tbl: current table
267 static inline bool rht_shrink_below_30(const struct rhashtable *ht,
268 const struct bucket_table *tbl)
270 /* Shrink table beneath 30% load */
271 return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
272 tbl->size > ht->p.min_size;
276 * rht_grow_above_100 - returns true if nelems > table-size
278 * @tbl: current table
280 static inline bool rht_grow_above_100(const struct rhashtable *ht,
281 const struct bucket_table *tbl)
283 return atomic_read(&ht->nelems) > tbl->size;
286 /* The bucket lock is selected based on the hash and protects mutations
287 * on a group of hash buckets.
289 * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
290 * a single lock always covers both buckets which may both contains
291 * entries which link to the same bucket of the old table during resizing.
292 * This allows to simplify the locking as locking the bucket in both
293 * tables during resize always guarantee protection.
295 * IMPORTANT: When holding the bucket lock of both the old and new table
296 * during expansions and shrinking, the old bucket lock must always be
299 static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl,
302 return &tbl->locks[hash & tbl->locks_mask];
305 #ifdef CONFIG_PROVE_LOCKING
306 int lockdep_rht_mutex_is_held(struct rhashtable *ht);
307 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
309 static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht)
314 static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
319 #endif /* CONFIG_PROVE_LOCKING */
321 int rhashtable_init(struct rhashtable *ht,
322 const struct rhashtable_params *params);
324 int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
325 struct rhash_head *obj,
326 struct bucket_table *old_tbl);
327 int rhashtable_insert_rehash(struct rhashtable *ht);
329 int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
330 void rhashtable_walk_exit(struct rhashtable_iter *iter);
331 int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
332 void *rhashtable_walk_next(struct rhashtable_iter *iter);
333 void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
335 void rhashtable_destroy(struct rhashtable *ht);
337 #define rht_dereference(p, ht) \
338 rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
340 #define rht_dereference_rcu(p, ht) \
341 rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
343 #define rht_dereference_bucket(p, tbl, hash) \
344 rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
346 #define rht_dereference_bucket_rcu(p, tbl, hash) \
347 rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
349 #define rht_entry(tpos, pos, member) \
350 ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
353 * rht_for_each_continue - continue iterating over hash chain
354 * @pos: the &struct rhash_head to use as a loop cursor.
355 * @head: the previous &struct rhash_head to continue from
356 * @tbl: the &struct bucket_table
357 * @hash: the hash value / bucket index
359 #define rht_for_each_continue(pos, head, tbl, hash) \
360 for (pos = rht_dereference_bucket(head, tbl, hash); \
361 !rht_is_a_nulls(pos); \
362 pos = rht_dereference_bucket((pos)->next, tbl, hash))
365 * rht_for_each - iterate over hash chain
366 * @pos: the &struct rhash_head to use as a loop cursor.
367 * @tbl: the &struct bucket_table
368 * @hash: the hash value / bucket index
370 #define rht_for_each(pos, tbl, hash) \
371 rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash)
374 * rht_for_each_entry_continue - continue iterating over hash chain
375 * @tpos: the type * to use as a loop cursor.
376 * @pos: the &struct rhash_head to use as a loop cursor.
377 * @head: the previous &struct rhash_head to continue from
378 * @tbl: the &struct bucket_table
379 * @hash: the hash value / bucket index
380 * @member: name of the &struct rhash_head within the hashable struct.
382 #define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \
383 for (pos = rht_dereference_bucket(head, tbl, hash); \
384 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
385 pos = rht_dereference_bucket((pos)->next, tbl, hash))
388 * rht_for_each_entry - iterate over hash chain of given type
389 * @tpos: the type * to use as a loop cursor.
390 * @pos: the &struct rhash_head to use as a loop cursor.
391 * @tbl: the &struct bucket_table
392 * @hash: the hash value / bucket index
393 * @member: name of the &struct rhash_head within the hashable struct.
395 #define rht_for_each_entry(tpos, pos, tbl, hash, member) \
396 rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \
400 * rht_for_each_entry_safe - safely iterate over hash chain of given type
401 * @tpos: the type * to use as a loop cursor.
402 * @pos: the &struct rhash_head to use as a loop cursor.
403 * @next: the &struct rhash_head to use as next in loop cursor.
404 * @tbl: the &struct bucket_table
405 * @hash: the hash value / bucket index
406 * @member: name of the &struct rhash_head within the hashable struct.
408 * This hash chain list-traversal primitive allows for the looped code to
409 * remove the loop cursor from the list.
411 #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
412 for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \
413 next = !rht_is_a_nulls(pos) ? \
414 rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
415 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
417 next = !rht_is_a_nulls(pos) ? \
418 rht_dereference_bucket(pos->next, tbl, hash) : NULL)
421 * rht_for_each_rcu_continue - continue iterating over rcu hash chain
422 * @pos: the &struct rhash_head to use as a loop cursor.
423 * @head: the previous &struct rhash_head to continue from
424 * @tbl: the &struct bucket_table
425 * @hash: the hash value / bucket index
427 * This hash chain list-traversal primitive may safely run concurrently with
428 * the _rcu mutation primitives such as rhashtable_insert() as long as the
429 * traversal is guarded by rcu_read_lock().
431 #define rht_for_each_rcu_continue(pos, head, tbl, hash) \
432 for (({barrier(); }), \
433 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
434 !rht_is_a_nulls(pos); \
435 pos = rcu_dereference_raw(pos->next))
438 * rht_for_each_rcu - iterate over rcu hash chain
439 * @pos: the &struct rhash_head to use as a loop cursor.
440 * @tbl: the &struct bucket_table
441 * @hash: the hash value / bucket index
443 * This hash chain list-traversal primitive may safely run concurrently with
444 * the _rcu mutation primitives such as rhashtable_insert() as long as the
445 * traversal is guarded by rcu_read_lock().
447 #define rht_for_each_rcu(pos, tbl, hash) \
448 rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash)
451 * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
452 * @tpos: the type * to use as a loop cursor.
453 * @pos: the &struct rhash_head to use as a loop cursor.
454 * @head: the previous &struct rhash_head to continue from
455 * @tbl: the &struct bucket_table
456 * @hash: the hash value / bucket index
457 * @member: name of the &struct rhash_head within the hashable struct.
459 * This hash chain list-traversal primitive may safely run concurrently with
460 * the _rcu mutation primitives such as rhashtable_insert() as long as the
461 * traversal is guarded by rcu_read_lock().
463 #define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \
464 for (({barrier(); }), \
465 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
466 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
467 pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
470 * rht_for_each_entry_rcu - iterate over rcu hash chain of given type
471 * @tpos: the type * to use as a loop cursor.
472 * @pos: the &struct rhash_head to use as a loop cursor.
473 * @tbl: the &struct bucket_table
474 * @hash: the hash value / bucket index
475 * @member: name of the &struct rhash_head within the hashable struct.
477 * This hash chain list-traversal primitive may safely run concurrently with
478 * the _rcu mutation primitives such as rhashtable_insert() as long as the
479 * traversal is guarded by rcu_read_lock().
481 #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
482 rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
485 static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
488 struct rhashtable *ht = arg->ht;
489 const char *ptr = obj;
491 return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
495 * rhashtable_lookup_fast - search hash table, inlined version
497 * @key: the pointer to the key
498 * @params: hash table parameters
500 * Computes the hash value for the key and traverses the bucket chain looking
501 * for a entry with an identical key. The first matching entry is returned.
503 * Returns the first entry on which the compare function returned true.
505 static inline void *rhashtable_lookup_fast(
506 struct rhashtable *ht, const void *key,
507 const struct rhashtable_params params)
509 struct rhashtable_compare_arg arg = {
513 const struct bucket_table *tbl;
514 struct rhash_head *he;
519 tbl = rht_dereference_rcu(ht->tbl, ht);
521 hash = rht_key_hashfn(ht, tbl, key, params);
522 rht_for_each_rcu(he, tbl, hash) {
523 if (params.obj_cmpfn ?
524 params.obj_cmpfn(&arg, rht_obj(ht, he)) :
525 rhashtable_compare(&arg, rht_obj(ht, he)))
528 return rht_obj(ht, he);
531 /* Ensure we see any new tables. */
534 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
542 static inline int __rhashtable_insert_fast(
543 struct rhashtable *ht, const void *key, struct rhash_head *obj,
544 const struct rhashtable_params params)
546 struct rhashtable_compare_arg arg = {
550 struct bucket_table *tbl, *new_tbl;
551 struct rhash_head *head;
553 unsigned int elasticity;
560 tbl = rht_dereference_rcu(ht->tbl, ht);
562 /* All insertions must grab the oldest table containing
563 * the hashed bucket that is yet to be rehashed.
566 hash = rht_head_hashfn(ht, tbl, obj, params);
567 lock = rht_bucket_lock(tbl, hash);
570 if (tbl->rehash <= hash)
573 spin_unlock_bh(lock);
574 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
577 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
578 if (unlikely(new_tbl)) {
579 err = rhashtable_insert_slow(ht, key, obj, new_tbl);
585 if (unlikely(rht_grow_above_100(ht, tbl))) {
587 spin_unlock_bh(lock);
588 err = rhashtable_insert_rehash(ht);
597 elasticity = ht->elasticity;
598 rht_for_each(head, tbl, hash) {
600 unlikely(!(params.obj_cmpfn ?
601 params.obj_cmpfn(&arg, rht_obj(ht, head)) :
602 rhashtable_compare(&arg, rht_obj(ht, head)))))
610 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
612 RCU_INIT_POINTER(obj->next, head);
614 rcu_assign_pointer(tbl->buckets[hash], obj);
616 atomic_inc(&ht->nelems);
617 if (rht_grow_above_75(ht, tbl))
618 schedule_work(&ht->run_work);
621 spin_unlock_bh(lock);
628 * rhashtable_insert_fast - insert object into hash table
630 * @obj: pointer to hash head inside object
631 * @params: hash table parameters
633 * Will take a per bucket spinlock to protect against mutual mutations
634 * on the same bucket. Multiple insertions may occur in parallel unless
635 * they map to the same bucket lock.
637 * It is safe to call this function from atomic context.
639 * Will trigger an automatic deferred table resizing if the size grows
640 * beyond the watermark indicated by grow_decision() which can be passed
641 * to rhashtable_init().
643 static inline int rhashtable_insert_fast(
644 struct rhashtable *ht, struct rhash_head *obj,
645 const struct rhashtable_params params)
647 return __rhashtable_insert_fast(ht, NULL, obj, params);
651 * rhashtable_lookup_insert_fast - lookup and insert object into hash table
653 * @obj: pointer to hash head inside object
654 * @params: hash table parameters
656 * Locks down the bucket chain in both the old and new table if a resize
657 * is in progress to ensure that writers can't remove from the old table
658 * and can't insert to the new table during the atomic operation of search
659 * and insertion. Searches for duplicates in both the old and new table if
660 * a resize is in progress.
662 * This lookup function may only be used for fixed key hash table (key_len
663 * parameter set). It will BUG() if used inappropriately.
665 * It is safe to call this function from atomic context.
667 * Will trigger an automatic deferred table resizing if the size grows
668 * beyond the watermark indicated by grow_decision() which can be passed
669 * to rhashtable_init().
671 static inline int rhashtable_lookup_insert_fast(
672 struct rhashtable *ht, struct rhash_head *obj,
673 const struct rhashtable_params params)
675 const char *key = rht_obj(ht, obj);
677 BUG_ON(ht->p.obj_hashfn);
679 return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj,
684 * rhashtable_lookup_insert_key - search and insert object to hash table
688 * @obj: pointer to hash head inside object
689 * @params: hash table parameters
691 * Locks down the bucket chain in both the old and new table if a resize
692 * is in progress to ensure that writers can't remove from the old table
693 * and can't insert to the new table during the atomic operation of search
694 * and insertion. Searches for duplicates in both the old and new table if
695 * a resize is in progress.
697 * Lookups may occur in parallel with hashtable mutations and resizing.
699 * Will trigger an automatic deferred table resizing if the size grows
700 * beyond the watermark indicated by grow_decision() which can be passed
701 * to rhashtable_init().
703 * Returns zero on success.
705 static inline int rhashtable_lookup_insert_key(
706 struct rhashtable *ht, const void *key, struct rhash_head *obj,
707 const struct rhashtable_params params)
709 BUG_ON(!ht->p.obj_hashfn || !key);
711 return __rhashtable_insert_fast(ht, key, obj, params);
714 static inline int __rhashtable_remove_fast(
715 struct rhashtable *ht, struct bucket_table *tbl,
716 struct rhash_head *obj, const struct rhashtable_params params)
718 struct rhash_head __rcu **pprev;
719 struct rhash_head *he;
724 hash = rht_head_hashfn(ht, tbl, obj, params);
725 lock = rht_bucket_lock(tbl, hash);
729 pprev = &tbl->buckets[hash];
730 rht_for_each(he, tbl, hash) {
736 rcu_assign_pointer(*pprev, obj->next);
741 spin_unlock_bh(lock);
747 * rhashtable_remove_fast - remove object from hash table
749 * @obj: pointer to hash head inside object
750 * @params: hash table parameters
752 * Since the hash chain is single linked, the removal operation needs to
753 * walk the bucket chain upon removal. The removal operation is thus
754 * considerable slow if the hash table is not correctly sized.
756 * Will automatically shrink the table via rhashtable_expand() if the
757 * shrink_decision function specified at rhashtable_init() returns true.
759 * Returns zero on success, -ENOENT if the entry could not be found.
761 static inline int rhashtable_remove_fast(
762 struct rhashtable *ht, struct rhash_head *obj,
763 const struct rhashtable_params params)
765 struct bucket_table *tbl;
770 tbl = rht_dereference_rcu(ht->tbl, ht);
772 /* Because we have already taken (and released) the bucket
773 * lock in old_tbl, if we find that future_tbl is not yet
774 * visible then that guarantees the entry to still be in
775 * the old tbl if it exists.
777 while ((err = __rhashtable_remove_fast(ht, tbl, obj, params)) &&
778 (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
784 atomic_dec(&ht->nelems);
785 if (rht_shrink_below_30(ht, tbl))
786 schedule_work(&ht->run_work);
794 #endif /* _LINUX_RHASHTABLE_H */