OSDN Git Service

rhashtable: key_hashfn() must return full hash value
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / lib / rhashtable.c
1 /*
2  * Resizable, Scalable, Concurrent Hash Table
3  *
4  * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
5  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
6  *
7  * Based on the following paper:
8  * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
9  *
10  * Code partially derived from nft_hash
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  */
16
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/log2.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/mm.h>
23 #include <linux/jhash.h>
24 #include <linux/random.h>
25 #include <linux/rhashtable.h>
26
27 #define HASH_DEFAULT_SIZE       64UL
28 #define HASH_MIN_SIZE           4UL
29 #define BUCKET_LOCKS_PER_CPU   128UL
30
31 /* Base bits plus 1 bit for nulls marker */
32 #define HASH_RESERVED_SPACE     (RHT_BASE_BITS + 1)
33
34 enum {
35         RHT_LOCK_NORMAL,
36         RHT_LOCK_NESTED,
37         RHT_LOCK_NESTED2,
38 };
39
40 /* The bucket lock is selected based on the hash and protects mutations
41  * on a group of hash buckets.
42  *
43  * IMPORTANT: When holding the bucket lock of both the old and new table
44  * during expansions and shrinking, the old bucket lock must always be
45  * acquired first.
46  */
47 static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash)
48 {
49         return &tbl->locks[hash & tbl->locks_mask];
50 }
51
52 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
53 #define ASSERT_BUCKET_LOCK(TBL, HASH) \
54         BUG_ON(!lockdep_rht_bucket_is_held(TBL, HASH))
55
56 #ifdef CONFIG_PROVE_LOCKING
57 int lockdep_rht_mutex_is_held(struct rhashtable *ht)
58 {
59         return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
60 }
61 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
62
63 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
64 {
65         spinlock_t *lock = bucket_lock(tbl, hash);
66
67         return (debug_locks) ? lockdep_is_held(lock) : 1;
68 }
69 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
70 #endif
71
72 static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
73 {
74         return (void *) he - ht->p.head_offset;
75 }
76
77 static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash)
78 {
79         return hash & (tbl->size - 1);
80 }
81
82 static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr)
83 {
84         u32 hash;
85
86         if (unlikely(!ht->p.key_len))
87                 hash = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
88         else
89                 hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len,
90                                     ht->p.hash_rnd);
91
92         return hash >> HASH_RESERVED_SPACE;
93 }
94
95 static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len)
96 {
97         return ht->p.hashfn(key, len, ht->p.hash_rnd) >> HASH_RESERVED_SPACE;
98 }
99
100 static u32 head_hashfn(const struct rhashtable *ht,
101                        const struct bucket_table *tbl,
102                        const struct rhash_head *he)
103 {
104         return rht_bucket_index(tbl, obj_raw_hashfn(ht, rht_obj(ht, he)));
105 }
106
107 static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n)
108 {
109         struct rhash_head __rcu **pprev;
110
111         for (pprev = &tbl->buckets[n];
112              !rht_is_a_nulls(rht_dereference_bucket(*pprev, tbl, n));
113              pprev = &rht_dereference_bucket(*pprev, tbl, n)->next)
114                 ;
115
116         return pprev;
117 }
118
119 static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl)
120 {
121         unsigned int i, size;
122 #if defined(CONFIG_PROVE_LOCKING)
123         unsigned int nr_pcpus = 2;
124 #else
125         unsigned int nr_pcpus = num_possible_cpus();
126 #endif
127
128         nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
129         size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
130
131         /* Never allocate more than one lock per bucket */
132         size = min_t(unsigned int, size, tbl->size);
133
134         if (sizeof(spinlock_t) != 0) {
135 #ifdef CONFIG_NUMA
136                 if (size * sizeof(spinlock_t) > PAGE_SIZE)
137                         tbl->locks = vmalloc(size * sizeof(spinlock_t));
138                 else
139 #endif
140                 tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
141                                            GFP_KERNEL);
142                 if (!tbl->locks)
143                         return -ENOMEM;
144                 for (i = 0; i < size; i++)
145                         spin_lock_init(&tbl->locks[i]);
146         }
147         tbl->locks_mask = size - 1;
148
149         return 0;
150 }
151
152 static void bucket_table_free(const struct bucket_table *tbl)
153 {
154         if (tbl)
155                 kvfree(tbl->locks);
156
157         kvfree(tbl);
158 }
159
160 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
161                                                size_t nbuckets)
162 {
163         struct bucket_table *tbl;
164         size_t size;
165         int i;
166
167         size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
168         tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
169         if (tbl == NULL)
170                 tbl = vzalloc(size);
171
172         if (tbl == NULL)
173                 return NULL;
174
175         tbl->size = nbuckets;
176
177         if (alloc_bucket_locks(ht, tbl) < 0) {
178                 bucket_table_free(tbl);
179                 return NULL;
180         }
181
182         for (i = 0; i < nbuckets; i++)
183                 INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
184
185         return tbl;
186 }
187
188 /**
189  * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
190  * @ht:         hash table
191  * @new_size:   new table size
192  */
193 bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
194 {
195         /* Expand table when exceeding 75% load */
196         return atomic_read(&ht->nelems) > (new_size / 4 * 3) &&
197                (ht->p.max_shift && atomic_read(&ht->shift) < ht->p.max_shift);
198 }
199 EXPORT_SYMBOL_GPL(rht_grow_above_75);
200
201 /**
202  * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
203  * @ht:         hash table
204  * @new_size:   new table size
205  */
206 bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
207 {
208         /* Shrink table beneath 30% load */
209         return atomic_read(&ht->nelems) < (new_size * 3 / 10) &&
210                (atomic_read(&ht->shift) > ht->p.min_shift);
211 }
212 EXPORT_SYMBOL_GPL(rht_shrink_below_30);
213
214 static void hashtable_chain_unzip(const struct rhashtable *ht,
215                                   const struct bucket_table *new_tbl,
216                                   struct bucket_table *old_tbl,
217                                   size_t old_hash)
218 {
219         struct rhash_head *he, *p, *next;
220         spinlock_t *new_bucket_lock, *new_bucket_lock2 = NULL;
221         unsigned int new_hash, new_hash2;
222
223         ASSERT_BUCKET_LOCK(old_tbl, old_hash);
224
225         /* Old bucket empty, no work needed. */
226         p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl,
227                                    old_hash);
228         if (rht_is_a_nulls(p))
229                 return;
230
231         new_hash = new_hash2 = head_hashfn(ht, new_tbl, p);
232         new_bucket_lock = bucket_lock(new_tbl, new_hash);
233
234         /* Advance the old bucket pointer one or more times until it
235          * reaches a node that doesn't hash to the same bucket as the
236          * previous node p. Call the previous node p;
237          */
238         rht_for_each_continue(he, p->next, old_tbl, old_hash) {
239                 new_hash2 = head_hashfn(ht, new_tbl, he);
240                 if (new_hash != new_hash2)
241                         break;
242                 p = he;
243         }
244         rcu_assign_pointer(old_tbl->buckets[old_hash], p->next);
245
246         spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
247
248         /* If we have encountered an entry that maps to a different bucket in
249          * the new table, lock down that bucket as well as we might cut off
250          * the end of the chain.
251          */
252         new_bucket_lock2 = bucket_lock(new_tbl, new_hash);
253         if (new_bucket_lock != new_bucket_lock2)
254                 spin_lock_bh_nested(new_bucket_lock2, RHT_LOCK_NESTED2);
255
256         /* Find the subsequent node which does hash to the same
257          * bucket as node P, or NULL if no such node exists.
258          */
259         INIT_RHT_NULLS_HEAD(next, ht, old_hash);
260         if (!rht_is_a_nulls(he)) {
261                 rht_for_each_continue(he, he->next, old_tbl, old_hash) {
262                         if (head_hashfn(ht, new_tbl, he) == new_hash) {
263                                 next = he;
264                                 break;
265                         }
266                 }
267         }
268
269         /* Set p's next pointer to that subsequent node pointer,
270          * bypassing the nodes which do not hash to p's bucket
271          */
272         rcu_assign_pointer(p->next, next);
273
274         if (new_bucket_lock != new_bucket_lock2)
275                 spin_unlock_bh(new_bucket_lock2);
276         spin_unlock_bh(new_bucket_lock);
277 }
278
279 static void link_old_to_new(struct bucket_table *new_tbl,
280                             unsigned int new_hash, struct rhash_head *entry)
281 {
282         spinlock_t *new_bucket_lock;
283
284         new_bucket_lock = bucket_lock(new_tbl, new_hash);
285
286         spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
287         rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), entry);
288         spin_unlock_bh(new_bucket_lock);
289 }
290
291 /**
292  * rhashtable_expand - Expand hash table while allowing concurrent lookups
293  * @ht:         the hash table to expand
294  *
295  * A secondary bucket array is allocated and the hash entries are migrated
296  * while keeping them on both lists until the end of the RCU grace period.
297  *
298  * This function may only be called in a context where it is safe to call
299  * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
300  *
301  * The caller must ensure that no concurrent resizing occurs by holding
302  * ht->mutex.
303  *
304  * It is valid to have concurrent insertions and deletions protected by per
305  * bucket locks or concurrent RCU protected lookups and traversals.
306  */
307 int rhashtable_expand(struct rhashtable *ht)
308 {
309         struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
310         struct rhash_head *he;
311         spinlock_t *old_bucket_lock;
312         unsigned int new_hash, old_hash;
313         bool complete = false;
314
315         ASSERT_RHT_MUTEX(ht);
316
317         new_tbl = bucket_table_alloc(ht, old_tbl->size * 2);
318         if (new_tbl == NULL)
319                 return -ENOMEM;
320
321         atomic_inc(&ht->shift);
322
323         /* Make insertions go into the new, empty table right away. Deletions
324          * and lookups will be attempted in both tables until we synchronize.
325          * The synchronize_rcu() guarantees for the new table to be picked up
326          * so no new additions go into the old table while we relink.
327          */
328         rcu_assign_pointer(ht->future_tbl, new_tbl);
329         synchronize_rcu();
330
331         /* For each new bucket, search the corresponding old bucket for the
332          * first entry that hashes to the new bucket, and link the end of
333          * newly formed bucket chain (containing entries added to future
334          * table) to that entry. Since all the entries which will end up in
335          * the new bucket appear in the same old bucket, this constructs an
336          * entirely valid new hash table, but with multiple buckets
337          * "zipped" together into a single imprecise chain.
338          */
339         for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
340                 old_hash = rht_bucket_index(old_tbl, new_hash);
341                 old_bucket_lock = bucket_lock(old_tbl, old_hash);
342
343                 spin_lock_bh(old_bucket_lock);
344                 rht_for_each(he, old_tbl, old_hash) {
345                         if (head_hashfn(ht, new_tbl, he) == new_hash) {
346                                 link_old_to_new(new_tbl, new_hash, he);
347                                 break;
348                         }
349                 }
350                 spin_unlock_bh(old_bucket_lock);
351         }
352
353         /* Publish the new table pointer. Lookups may now traverse
354          * the new table, but they will not benefit from any
355          * additional efficiency until later steps unzip the buckets.
356          */
357         rcu_assign_pointer(ht->tbl, new_tbl);
358
359         /* Unzip interleaved hash chains */
360         while (!complete && !ht->being_destroyed) {
361                 /* Wait for readers. All new readers will see the new
362                  * table, and thus no references to the old table will
363                  * remain.
364                  */
365                 synchronize_rcu();
366
367                 /* For each bucket in the old table (each of which
368                  * contains items from multiple buckets of the new
369                  * table): ...
370                  */
371                 complete = true;
372                 for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
373                         struct rhash_head *head;
374
375                         old_bucket_lock = bucket_lock(old_tbl, old_hash);
376                         spin_lock_bh(old_bucket_lock);
377
378                         hashtable_chain_unzip(ht, new_tbl, old_tbl, old_hash);
379                         head = rht_dereference_bucket(old_tbl->buckets[old_hash],
380                                                       old_tbl, old_hash);
381                         if (!rht_is_a_nulls(head))
382                                 complete = false;
383
384                         spin_unlock_bh(old_bucket_lock);
385                 }
386         }
387
388         bucket_table_free(old_tbl);
389         return 0;
390 }
391 EXPORT_SYMBOL_GPL(rhashtable_expand);
392
393 /**
394  * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
395  * @ht:         the hash table to shrink
396  *
397  * This function may only be called in a context where it is safe to call
398  * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
399  *
400  * The caller must ensure that no concurrent resizing occurs by holding
401  * ht->mutex.
402  *
403  * The caller must ensure that no concurrent table mutations take place.
404  * It is however valid to have concurrent lookups if they are RCU protected.
405  *
406  * It is valid to have concurrent insertions and deletions protected by per
407  * bucket locks or concurrent RCU protected lookups and traversals.
408  */
409 int rhashtable_shrink(struct rhashtable *ht)
410 {
411         struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht);
412         spinlock_t *new_bucket_lock, *old_bucket_lock1, *old_bucket_lock2;
413         unsigned int new_hash;
414
415         ASSERT_RHT_MUTEX(ht);
416
417         new_tbl = bucket_table_alloc(ht, tbl->size / 2);
418         if (new_tbl == NULL)
419                 return -ENOMEM;
420
421         rcu_assign_pointer(ht->future_tbl, new_tbl);
422         synchronize_rcu();
423
424         /* Link the first entry in the old bucket to the end of the
425          * bucket in the new table. As entries are concurrently being
426          * added to the new table, lock down the new bucket. As we
427          * always divide the size in half when shrinking, each bucket
428          * in the new table maps to exactly two buckets in the old
429          * table.
430          *
431          * As removals can occur concurrently on the old table, we need
432          * to lock down both matching buckets in the old table.
433          */
434         for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
435                 old_bucket_lock1 = bucket_lock(tbl, new_hash);
436                 old_bucket_lock2 = bucket_lock(tbl, new_hash + new_tbl->size);
437                 new_bucket_lock = bucket_lock(new_tbl, new_hash);
438
439                 spin_lock_bh(old_bucket_lock1);
440
441                 /* Depending on the lock per buckets mapping, the bucket in
442                  * the lower and upper region may map to the same lock.
443                  */
444                 if (old_bucket_lock1 != old_bucket_lock2) {
445                         spin_lock_bh_nested(old_bucket_lock2, RHT_LOCK_NESTED);
446                         spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED2);
447                 } else {
448                         spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
449                 }
450
451                 rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
452                                    tbl->buckets[new_hash]);
453                 rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
454                                    tbl->buckets[new_hash + new_tbl->size]);
455
456                 spin_unlock_bh(new_bucket_lock);
457                 if (old_bucket_lock1 != old_bucket_lock2)
458                         spin_unlock_bh(old_bucket_lock2);
459                 spin_unlock_bh(old_bucket_lock1);
460         }
461
462         /* Publish the new, valid hash table */
463         rcu_assign_pointer(ht->tbl, new_tbl);
464         atomic_dec(&ht->shift);
465
466         /* Wait for readers. No new readers will have references to the
467          * old hash table.
468          */
469         synchronize_rcu();
470
471         bucket_table_free(tbl);
472
473         return 0;
474 }
475 EXPORT_SYMBOL_GPL(rhashtable_shrink);
476
477 static void rht_deferred_worker(struct work_struct *work)
478 {
479         struct rhashtable *ht;
480         struct bucket_table *tbl;
481         struct rhashtable_walker *walker;
482
483         ht = container_of(work, struct rhashtable, run_work);
484         mutex_lock(&ht->mutex);
485         if (ht->being_destroyed)
486                 goto unlock;
487
488         tbl = rht_dereference(ht->tbl, ht);
489
490         list_for_each_entry(walker, &ht->walkers, list)
491                 walker->resize = true;
492
493         if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
494                 rhashtable_expand(ht);
495         else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size))
496                 rhashtable_shrink(ht);
497
498 unlock:
499         mutex_unlock(&ht->mutex);
500 }
501
502 static void rhashtable_wakeup_worker(struct rhashtable *ht)
503 {
504         struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
505         struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
506         size_t size = tbl->size;
507
508         /* Only adjust the table if no resizing is currently in progress. */
509         if (tbl == new_tbl &&
510             ((ht->p.grow_decision && ht->p.grow_decision(ht, size)) ||
511              (ht->p.shrink_decision && ht->p.shrink_decision(ht, size))))
512                 schedule_work(&ht->run_work);
513 }
514
515 static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
516                                 struct bucket_table *tbl, u32 hash)
517 {
518         struct rhash_head *head = rht_dereference_bucket(tbl->buckets[hash],
519                                                          tbl, hash);
520
521         if (rht_is_a_nulls(head))
522                 INIT_RHT_NULLS_HEAD(obj->next, ht, hash);
523         else
524                 RCU_INIT_POINTER(obj->next, head);
525
526         rcu_assign_pointer(tbl->buckets[hash], obj);
527
528         atomic_inc(&ht->nelems);
529
530         rhashtable_wakeup_worker(ht);
531 }
532
533 /**
534  * rhashtable_insert - insert object into hash table
535  * @ht:         hash table
536  * @obj:        pointer to hash head inside object
537  *
538  * Will take a per bucket spinlock to protect against mutual mutations
539  * on the same bucket. Multiple insertions may occur in parallel unless
540  * they map to the same bucket lock.
541  *
542  * It is safe to call this function from atomic context.
543  *
544  * Will trigger an automatic deferred table resizing if the size grows
545  * beyond the watermark indicated by grow_decision() which can be passed
546  * to rhashtable_init().
547  */
548 void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
549 {
550         struct bucket_table *tbl;
551         spinlock_t *lock;
552         unsigned hash;
553
554         rcu_read_lock();
555
556         tbl = rht_dereference_rcu(ht->future_tbl, ht);
557         hash = head_hashfn(ht, tbl, obj);
558         lock = bucket_lock(tbl, hash);
559
560         spin_lock_bh(lock);
561         __rhashtable_insert(ht, obj, tbl, hash);
562         spin_unlock_bh(lock);
563
564         rcu_read_unlock();
565 }
566 EXPORT_SYMBOL_GPL(rhashtable_insert);
567
568 /**
569  * rhashtable_remove - remove object from hash table
570  * @ht:         hash table
571  * @obj:        pointer to hash head inside object
572  *
573  * Since the hash chain is single linked, the removal operation needs to
574  * walk the bucket chain upon removal. The removal operation is thus
575  * considerable slow if the hash table is not correctly sized.
576  *
577  * Will automatically shrink the table via rhashtable_expand() if the
578  * shrink_decision function specified at rhashtable_init() returns true.
579  *
580  * The caller must ensure that no concurrent table mutations occur. It is
581  * however valid to have concurrent lookups if they are RCU protected.
582  */
583 bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
584 {
585         struct bucket_table *tbl;
586         struct rhash_head __rcu **pprev;
587         struct rhash_head *he;
588         spinlock_t *lock;
589         unsigned int hash;
590         bool ret = false;
591
592         rcu_read_lock();
593         tbl = rht_dereference_rcu(ht->tbl, ht);
594         hash = head_hashfn(ht, tbl, obj);
595
596         lock = bucket_lock(tbl, hash);
597         spin_lock_bh(lock);
598
599 restart:
600         pprev = &tbl->buckets[hash];
601         rht_for_each(he, tbl, hash) {
602                 if (he != obj) {
603                         pprev = &he->next;
604                         continue;
605                 }
606
607                 rcu_assign_pointer(*pprev, obj->next);
608
609                 ret = true;
610                 break;
611         }
612
613         /* The entry may be linked in either 'tbl', 'future_tbl', or both.
614          * 'future_tbl' only exists for a short period of time during
615          * resizing. Thus traversing both is fine and the added cost is
616          * very rare.
617          */
618         if (tbl != rht_dereference_rcu(ht->future_tbl, ht)) {
619                 spin_unlock_bh(lock);
620
621                 tbl = rht_dereference_rcu(ht->future_tbl, ht);
622                 hash = head_hashfn(ht, tbl, obj);
623
624                 lock = bucket_lock(tbl, hash);
625                 spin_lock_bh(lock);
626                 goto restart;
627         }
628
629         spin_unlock_bh(lock);
630
631         if (ret) {
632                 atomic_dec(&ht->nelems);
633                 rhashtable_wakeup_worker(ht);
634         }
635
636         rcu_read_unlock();
637
638         return ret;
639 }
640 EXPORT_SYMBOL_GPL(rhashtable_remove);
641
642 struct rhashtable_compare_arg {
643         struct rhashtable *ht;
644         const void *key;
645 };
646
647 static bool rhashtable_compare(void *ptr, void *arg)
648 {
649         struct rhashtable_compare_arg *x = arg;
650         struct rhashtable *ht = x->ht;
651
652         return !memcmp(ptr + ht->p.key_offset, x->key, ht->p.key_len);
653 }
654
655 /**
656  * rhashtable_lookup - lookup key in hash table
657  * @ht:         hash table
658  * @key:        pointer to key
659  *
660  * Computes the hash value for the key and traverses the bucket chain looking
661  * for a entry with an identical key. The first matching entry is returned.
662  *
663  * This lookup function may only be used for fixed key hash table (key_len
664  * parameter set). It will BUG() if used inappropriately.
665  *
666  * Lookups may occur in parallel with hashtable mutations and resizing.
667  */
668 void *rhashtable_lookup(struct rhashtable *ht, const void *key)
669 {
670         struct rhashtable_compare_arg arg = {
671                 .ht = ht,
672                 .key = key,
673         };
674
675         BUG_ON(!ht->p.key_len);
676
677         return rhashtable_lookup_compare(ht, key, &rhashtable_compare, &arg);
678 }
679 EXPORT_SYMBOL_GPL(rhashtable_lookup);
680
681 /**
682  * rhashtable_lookup_compare - search hash table with compare function
683  * @ht:         hash table
684  * @key:        the pointer to the key
685  * @compare:    compare function, must return true on match
686  * @arg:        argument passed on to compare function
687  *
688  * Traverses the bucket chain behind the provided hash value and calls the
689  * specified compare function for each entry.
690  *
691  * Lookups may occur in parallel with hashtable mutations and resizing.
692  *
693  * Returns the first entry on which the compare function returned true.
694  */
695 void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
696                                 bool (*compare)(void *, void *), void *arg)
697 {
698         const struct bucket_table *tbl, *old_tbl;
699         struct rhash_head *he;
700         u32 hash;
701
702         rcu_read_lock();
703
704         old_tbl = rht_dereference_rcu(ht->tbl, ht);
705         tbl = rht_dereference_rcu(ht->future_tbl, ht);
706         hash = key_hashfn(ht, key, ht->p.key_len);
707 restart:
708         rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) {
709                 if (!compare(rht_obj(ht, he), arg))
710                         continue;
711                 rcu_read_unlock();
712                 return rht_obj(ht, he);
713         }
714
715         if (unlikely(tbl != old_tbl)) {
716                 tbl = old_tbl;
717                 goto restart;
718         }
719         rcu_read_unlock();
720
721         return NULL;
722 }
723 EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
724
725 /**
726  * rhashtable_lookup_insert - lookup and insert object into hash table
727  * @ht:         hash table
728  * @obj:        pointer to hash head inside object
729  *
730  * Locks down the bucket chain in both the old and new table if a resize
731  * is in progress to ensure that writers can't remove from the old table
732  * and can't insert to the new table during the atomic operation of search
733  * and insertion. Searches for duplicates in both the old and new table if
734  * a resize is in progress.
735  *
736  * This lookup function may only be used for fixed key hash table (key_len
737  * parameter set). It will BUG() if used inappropriately.
738  *
739  * It is safe to call this function from atomic context.
740  *
741  * Will trigger an automatic deferred table resizing if the size grows
742  * beyond the watermark indicated by grow_decision() which can be passed
743  * to rhashtable_init().
744  */
745 bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj)
746 {
747         struct rhashtable_compare_arg arg = {
748                 .ht = ht,
749                 .key = rht_obj(ht, obj) + ht->p.key_offset,
750         };
751
752         BUG_ON(!ht->p.key_len);
753
754         return rhashtable_lookup_compare_insert(ht, obj, &rhashtable_compare,
755                                                 &arg);
756 }
757 EXPORT_SYMBOL_GPL(rhashtable_lookup_insert);
758
759 /**
760  * rhashtable_lookup_compare_insert - search and insert object to hash table
761  *                                    with compare function
762  * @ht:         hash table
763  * @obj:        pointer to hash head inside object
764  * @compare:    compare function, must return true on match
765  * @arg:        argument passed on to compare function
766  *
767  * Locks down the bucket chain in both the old and new table if a resize
768  * is in progress to ensure that writers can't remove from the old table
769  * and can't insert to the new table during the atomic operation of search
770  * and insertion. Searches for duplicates in both the old and new table if
771  * a resize is in progress.
772  *
773  * Lookups may occur in parallel with hashtable mutations and resizing.
774  *
775  * Will trigger an automatic deferred table resizing if the size grows
776  * beyond the watermark indicated by grow_decision() which can be passed
777  * to rhashtable_init().
778  */
779 bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
780                                       struct rhash_head *obj,
781                                       bool (*compare)(void *, void *),
782                                       void *arg)
783 {
784         struct bucket_table *new_tbl, *old_tbl;
785         spinlock_t *new_bucket_lock, *old_bucket_lock;
786         u32 new_hash, old_hash;
787         bool success = true;
788
789         BUG_ON(!ht->p.key_len);
790
791         rcu_read_lock();
792
793         old_tbl = rht_dereference_rcu(ht->tbl, ht);
794         old_hash = head_hashfn(ht, old_tbl, obj);
795         old_bucket_lock = bucket_lock(old_tbl, old_hash);
796         spin_lock_bh(old_bucket_lock);
797
798         new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
799         new_hash = head_hashfn(ht, new_tbl, obj);
800         new_bucket_lock = bucket_lock(new_tbl, new_hash);
801         if (unlikely(old_tbl != new_tbl))
802                 spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
803
804         if (rhashtable_lookup_compare(ht, rht_obj(ht, obj) + ht->p.key_offset,
805                                       compare, arg)) {
806                 success = false;
807                 goto exit;
808         }
809
810         __rhashtable_insert(ht, obj, new_tbl, new_hash);
811
812 exit:
813         if (unlikely(old_tbl != new_tbl))
814                 spin_unlock_bh(new_bucket_lock);
815         spin_unlock_bh(old_bucket_lock);
816
817         rcu_read_unlock();
818
819         return success;
820 }
821 EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert);
822
823 /**
824  * rhashtable_walk_init - Initialise an iterator
825  * @ht:         Table to walk over
826  * @iter:       Hash table Iterator
827  *
828  * This function prepares a hash table walk.
829  *
830  * Note that if you restart a walk after rhashtable_walk_stop you
831  * may see the same object twice.  Also, you may miss objects if
832  * there are removals in between rhashtable_walk_stop and the next
833  * call to rhashtable_walk_start.
834  *
835  * For a completely stable walk you should construct your own data
836  * structure outside the hash table.
837  *
838  * This function may sleep so you must not call it from interrupt
839  * context or with spin locks held.
840  *
841  * You must call rhashtable_walk_exit if this function returns
842  * successfully.
843  */
844 int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
845 {
846         iter->ht = ht;
847         iter->p = NULL;
848         iter->slot = 0;
849         iter->skip = 0;
850
851         iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
852         if (!iter->walker)
853                 return -ENOMEM;
854
855         mutex_lock(&ht->mutex);
856         list_add(&iter->walker->list, &ht->walkers);
857         mutex_unlock(&ht->mutex);
858
859         return 0;
860 }
861 EXPORT_SYMBOL_GPL(rhashtable_walk_init);
862
863 /**
864  * rhashtable_walk_exit - Free an iterator
865  * @iter:       Hash table Iterator
866  *
867  * This function frees resources allocated by rhashtable_walk_init.
868  */
869 void rhashtable_walk_exit(struct rhashtable_iter *iter)
870 {
871         mutex_lock(&iter->ht->mutex);
872         list_del(&iter->walker->list);
873         mutex_unlock(&iter->ht->mutex);
874         kfree(iter->walker);
875 }
876 EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
877
878 /**
879  * rhashtable_walk_start - Start a hash table walk
880  * @iter:       Hash table iterator
881  *
882  * Start a hash table walk.  Note that we take the RCU lock in all
883  * cases including when we return an error.  So you must always call
884  * rhashtable_walk_stop to clean up.
885  *
886  * Returns zero if successful.
887  *
888  * Returns -EAGAIN if resize event occured.  Note that the iterator
889  * will rewind back to the beginning and you may use it immediately
890  * by calling rhashtable_walk_next.
891  */
892 int rhashtable_walk_start(struct rhashtable_iter *iter)
893 {
894         rcu_read_lock();
895
896         if (iter->walker->resize) {
897                 iter->slot = 0;
898                 iter->skip = 0;
899                 iter->walker->resize = false;
900                 return -EAGAIN;
901         }
902
903         return 0;
904 }
905 EXPORT_SYMBOL_GPL(rhashtable_walk_start);
906
907 /**
908  * rhashtable_walk_next - Return the next object and advance the iterator
909  * @iter:       Hash table iterator
910  *
911  * Note that you must call rhashtable_walk_stop when you are finished
912  * with the walk.
913  *
914  * Returns the next object or NULL when the end of the table is reached.
915  *
916  * Returns -EAGAIN if resize event occured.  Note that the iterator
917  * will rewind back to the beginning and you may continue to use it.
918  */
919 void *rhashtable_walk_next(struct rhashtable_iter *iter)
920 {
921         const struct bucket_table *tbl;
922         struct rhashtable *ht = iter->ht;
923         struct rhash_head *p = iter->p;
924         void *obj = NULL;
925
926         tbl = rht_dereference_rcu(ht->tbl, ht);
927
928         if (p) {
929                 p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
930                 goto next;
931         }
932
933         for (; iter->slot < tbl->size; iter->slot++) {
934                 int skip = iter->skip;
935
936                 rht_for_each_rcu(p, tbl, iter->slot) {
937                         if (!skip)
938                                 break;
939                         skip--;
940                 }
941
942 next:
943                 if (!rht_is_a_nulls(p)) {
944                         iter->skip++;
945                         iter->p = p;
946                         obj = rht_obj(ht, p);
947                         goto out;
948                 }
949
950                 iter->skip = 0;
951         }
952
953         iter->p = NULL;
954
955 out:
956         if (iter->walker->resize) {
957                 iter->p = NULL;
958                 iter->slot = 0;
959                 iter->skip = 0;
960                 iter->walker->resize = false;
961                 return ERR_PTR(-EAGAIN);
962         }
963
964         return obj;
965 }
966 EXPORT_SYMBOL_GPL(rhashtable_walk_next);
967
968 /**
969  * rhashtable_walk_stop - Finish a hash table walk
970  * @iter:       Hash table iterator
971  *
972  * Finish a hash table walk.
973  */
974 void rhashtable_walk_stop(struct rhashtable_iter *iter)
975 {
976         rcu_read_unlock();
977         iter->p = NULL;
978 }
979 EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
980
981 static size_t rounded_hashtable_size(struct rhashtable_params *params)
982 {
983         return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
984                    1UL << params->min_shift);
985 }
986
987 /**
988  * rhashtable_init - initialize a new hash table
989  * @ht:         hash table to be initialized
990  * @params:     configuration parameters
991  *
992  * Initializes a new hash table based on the provided configuration
993  * parameters. A table can be configured either with a variable or
994  * fixed length key:
995  *
996  * Configuration Example 1: Fixed length keys
997  * struct test_obj {
998  *      int                     key;
999  *      void *                  my_member;
1000  *      struct rhash_head       node;
1001  * };
1002  *
1003  * struct rhashtable_params params = {
1004  *      .head_offset = offsetof(struct test_obj, node),
1005  *      .key_offset = offsetof(struct test_obj, key),
1006  *      .key_len = sizeof(int),
1007  *      .hashfn = jhash,
1008  *      .nulls_base = (1U << RHT_BASE_SHIFT),
1009  * };
1010  *
1011  * Configuration Example 2: Variable length keys
1012  * struct test_obj {
1013  *      [...]
1014  *      struct rhash_head       node;
1015  * };
1016  *
1017  * u32 my_hash_fn(const void *data, u32 seed)
1018  * {
1019  *      struct test_obj *obj = data;
1020  *
1021  *      return [... hash ...];
1022  * }
1023  *
1024  * struct rhashtable_params params = {
1025  *      .head_offset = offsetof(struct test_obj, node),
1026  *      .hashfn = jhash,
1027  *      .obj_hashfn = my_hash_fn,
1028  * };
1029  */
1030 int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
1031 {
1032         struct bucket_table *tbl;
1033         size_t size;
1034
1035         size = HASH_DEFAULT_SIZE;
1036
1037         if ((params->key_len && !params->hashfn) ||
1038             (!params->key_len && !params->obj_hashfn))
1039                 return -EINVAL;
1040
1041         if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
1042                 return -EINVAL;
1043
1044         params->min_shift = max_t(size_t, params->min_shift,
1045                                   ilog2(HASH_MIN_SIZE));
1046
1047         if (params->nelem_hint)
1048                 size = rounded_hashtable_size(params);
1049
1050         memset(ht, 0, sizeof(*ht));
1051         mutex_init(&ht->mutex);
1052         memcpy(&ht->p, params, sizeof(*params));
1053         INIT_LIST_HEAD(&ht->walkers);
1054
1055         if (params->locks_mul)
1056                 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
1057         else
1058                 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
1059
1060         tbl = bucket_table_alloc(ht, size);
1061         if (tbl == NULL)
1062                 return -ENOMEM;
1063
1064         atomic_set(&ht->nelems, 0);
1065         atomic_set(&ht->shift, ilog2(tbl->size));
1066         RCU_INIT_POINTER(ht->tbl, tbl);
1067         RCU_INIT_POINTER(ht->future_tbl, tbl);
1068
1069         if (!ht->p.hash_rnd)
1070                 get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
1071
1072         if (ht->p.grow_decision || ht->p.shrink_decision)
1073                 INIT_WORK(&ht->run_work, rht_deferred_worker);
1074
1075         return 0;
1076 }
1077 EXPORT_SYMBOL_GPL(rhashtable_init);
1078
1079 /**
1080  * rhashtable_destroy - destroy hash table
1081  * @ht:         the hash table to destroy
1082  *
1083  * Frees the bucket array. This function is not rcu safe, therefore the caller
1084  * has to make sure that no resizing may happen by unpublishing the hashtable
1085  * and waiting for the quiescent cycle before releasing the bucket array.
1086  */
1087 void rhashtable_destroy(struct rhashtable *ht)
1088 {
1089         ht->being_destroyed = true;
1090
1091         if (ht->p.grow_decision || ht->p.shrink_decision)
1092                 cancel_work_sync(&ht->run_work);
1093
1094         mutex_lock(&ht->mutex);
1095         bucket_table_free(rht_dereference(ht->tbl, ht));
1096         mutex_unlock(&ht->mutex);
1097 }
1098 EXPORT_SYMBOL_GPL(rhashtable_destroy);