3 * (C) 2001-2002 Andreas Gruenbacher, <a.gruenbacher@computer.org>
7 * Filesystem Meta Information Block Cache (mbcache)
9 * The mbcache caches blocks of block devices that need to be located
10 * by their device/block number, as well as by other criteria (such
11 * as the block's contents).
13 * There can only be one cache entry in a cache per device and block number.
14 * Additional indexes need not be unique in this sense. The number of
15 * additional indexes (=other criteria) can be hardwired at compile time
16 * or specified at cache create time.
18 * Each cache entry is of fixed size. An entry may be `valid' or `invalid'
19 * in the cache. A valid entry is in the main hash tables of the cache,
20 * and may also be in the lru list. An invalid entry is not in any hashes
23 * A valid cache entry is only in the lru list if no handles refer to it.
24 * Invalid cache entries will be freed when the last handle to the cache
25 * entry is released. Entries that cannot be freed immediately are put
26 * back on the lru list.
30 * Lock descriptions and usage:
32 * Each hash chain of both the block and index hash tables now contains
33 * a built-in lock used to serialize accesses to the hash chain.
35 * Accesses to global data structures mb_cache_list and mb_cache_lru_list
36 * are serialized via the global spinlock mb_cache_spinlock.
38 * Each mb_cache_entry contains a spinlock, e_entry_lock, to serialize
39 * accesses to its local data, such as e_used and e_queued.
43 * Each block hash chain's lock has the highest lock order, followed by an
44 * index hash chain's lock, mb_cache_bg_lock (used to implement mb_cache_entry's
45 * lock), and mb_cach_spinlock, with the lowest order. While holding
46 * either a block or index hash chain lock, a thread can acquire an
47 * mc_cache_bg_lock, which in turn can also acquire mb_cache_spinlock.
51 * Since both mb_cache_entry_get and mb_cache_entry_find scan the block and
52 * index hash chian, it needs to lock the corresponding hash chain. For each
53 * mb_cache_entry within the chain, it needs to lock the mb_cache_entry to
54 * prevent either any simultaneous release or free on the entry and also
55 * to serialize accesses to either the e_used or e_queued member of the entry.
57 * To avoid having a dangling reference to an already freed
58 * mb_cache_entry, an mb_cache_entry is only freed when it is not on a
59 * block hash chain and also no longer being referenced, both e_used,
60 * and e_queued are 0's. When an mb_cache_entry is explicitly freed it is
61 * first removed from a block hash chain.
64 #include <linux/kernel.h>
65 #include <linux/module.h>
67 #include <linux/hash.h>
70 #include <linux/slab.h>
71 #include <linux/sched.h>
72 #include <linux/list_bl.h>
73 #include <linux/mbcache.h>
74 #include <linux/init.h>
75 #include <linux/blockgroup_lock.h>
76 #include <linux/log2.h>
79 # define mb_debug(f...) do { \
80 printk(KERN_DEBUG f); \
83 #define mb_assert(c) do { if (!(c)) \
84 printk(KERN_ERR "assertion " #c " failed\n"); \
87 # define mb_debug(f...) do { } while(0)
88 # define mb_assert(c) do { } while(0)
90 #define mb_error(f...) do { \
95 #define MB_CACHE_WRITER ((unsigned short)~0U >> 1)
97 #define MB_CACHE_ENTRY_LOCK_BITS ilog2(NR_BG_LOCKS)
98 #define MB_CACHE_ENTRY_LOCK_INDEX(ce) \
99 (hash_long((unsigned long)ce, MB_CACHE_ENTRY_LOCK_BITS))
101 static DECLARE_WAIT_QUEUE_HEAD(mb_cache_queue);
102 static struct blockgroup_lock *mb_cache_bg_lock;
103 static struct kmem_cache *mb_cache_kmem_cache;
105 MODULE_AUTHOR("Andreas Gruenbacher <a.gruenbacher@computer.org>");
106 MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
107 MODULE_LICENSE("GPL");
109 EXPORT_SYMBOL(mb_cache_create);
110 EXPORT_SYMBOL(mb_cache_shrink);
111 EXPORT_SYMBOL(mb_cache_destroy);
112 EXPORT_SYMBOL(mb_cache_entry_alloc);
113 EXPORT_SYMBOL(mb_cache_entry_insert);
114 EXPORT_SYMBOL(mb_cache_entry_release);
115 EXPORT_SYMBOL(mb_cache_entry_free);
116 EXPORT_SYMBOL(mb_cache_entry_get);
117 #if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
118 EXPORT_SYMBOL(mb_cache_entry_find_first);
119 EXPORT_SYMBOL(mb_cache_entry_find_next);
123 * Global data: list of all mbcache's, lru list, and a spinlock for
124 * accessing cache data structures on SMP machines. The lru list is
125 * global across all mbcaches.
128 static LIST_HEAD(mb_cache_list);
129 static LIST_HEAD(mb_cache_lru_list);
130 static DEFINE_SPINLOCK(mb_cache_spinlock);
133 __spin_lock_mb_cache_entry(struct mb_cache_entry *ce)
135 spin_lock(bgl_lock_ptr(mb_cache_bg_lock,
136 MB_CACHE_ENTRY_LOCK_INDEX(ce)));
140 __spin_unlock_mb_cache_entry(struct mb_cache_entry *ce)
142 spin_unlock(bgl_lock_ptr(mb_cache_bg_lock,
143 MB_CACHE_ENTRY_LOCK_INDEX(ce)));
147 __mb_cache_entry_is_block_hashed(struct mb_cache_entry *ce)
149 return !hlist_bl_unhashed(&ce->e_block_list);
154 __mb_cache_entry_unhash_block(struct mb_cache_entry *ce)
156 if (__mb_cache_entry_is_block_hashed(ce))
157 hlist_bl_del_init(&ce->e_block_list);
161 __mb_cache_entry_is_index_hashed(struct mb_cache_entry *ce)
163 return !hlist_bl_unhashed(&ce->e_index.o_list);
167 __mb_cache_entry_unhash_index(struct mb_cache_entry *ce)
169 if (__mb_cache_entry_is_index_hashed(ce))
170 hlist_bl_del_init(&ce->e_index.o_list);
174 * __mb_cache_entry_unhash_unlock()
176 * This function is called to unhash both the block and index hash
178 * It assumes both the block and index hash chain is locked upon entry.
179 * It also unlock both hash chains both exit
182 __mb_cache_entry_unhash_unlock(struct mb_cache_entry *ce)
184 __mb_cache_entry_unhash_index(ce);
185 hlist_bl_unlock(ce->e_index_hash_p);
186 __mb_cache_entry_unhash_block(ce);
187 hlist_bl_unlock(ce->e_block_hash_p);
191 __mb_cache_entry_forget(struct mb_cache_entry *ce, gfp_t gfp_mask)
193 struct mb_cache *cache = ce->e_cache;
195 mb_assert(!(ce->e_used || ce->e_queued || atomic_read(&ce->e_refcnt)));
196 kmem_cache_free(cache->c_entry_cache, ce);
197 atomic_dec(&cache->c_entry_count);
201 __mb_cache_entry_release(struct mb_cache_entry *ce)
203 /* First lock the entry to serialize access to its local data. */
204 __spin_lock_mb_cache_entry(ce);
205 /* Wake up all processes queuing for this cache entry. */
207 wake_up_all(&mb_cache_queue);
208 if (ce->e_used >= MB_CACHE_WRITER)
209 ce->e_used -= MB_CACHE_WRITER;
211 * Make sure that all cache entries on lru_list have
212 * both e_used and e_qued of 0s.
215 if (!(ce->e_used || ce->e_queued || atomic_read(&ce->e_refcnt))) {
216 if (!__mb_cache_entry_is_block_hashed(ce)) {
217 __spin_unlock_mb_cache_entry(ce);
221 * Need access to lru list, first drop entry lock,
222 * then reacquire the lock in the proper order.
224 spin_lock(&mb_cache_spinlock);
226 * Evaluate the conditions under global lock mb_cache_spinlock,
227 * to check if mb_cache_entry_get() is running now
228 * and has already deleted the entry from mb_cache_lru_list
229 * and incremented ce->e_refcnt to prevent further additions
230 * to mb_cache_lru_list.
232 if (!(ce->e_used || ce->e_queued ||
233 atomic_read(&ce->e_refcnt))) {
234 if (list_empty(&ce->e_lru_list))
235 list_add_tail(&ce->e_lru_list,
238 spin_unlock(&mb_cache_spinlock);
240 __spin_unlock_mb_cache_entry(ce);
243 mb_assert(list_empty(&ce->e_lru_list));
244 __mb_cache_entry_forget(ce, GFP_KERNEL);
248 * mb_cache_shrink_scan() memory pressure callback
250 * This function is called by the kernel memory management when memory
254 * @sc: shrink_control passed from reclaim
256 * Returns the number of objects freed.
259 mb_cache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
261 LIST_HEAD(free_list);
262 struct mb_cache_entry *entry, *tmp;
263 int nr_to_scan = sc->nr_to_scan;
264 gfp_t gfp_mask = sc->gfp_mask;
265 unsigned long freed = 0;
267 mb_debug("trying to free %d entries", nr_to_scan);
268 spin_lock(&mb_cache_spinlock);
269 while ((nr_to_scan-- > 0) && !list_empty(&mb_cache_lru_list)) {
270 struct mb_cache_entry *ce =
271 list_entry(mb_cache_lru_list.next,
272 struct mb_cache_entry, e_lru_list);
273 list_del_init(&ce->e_lru_list);
274 if (ce->e_used || ce->e_queued || atomic_read(&ce->e_refcnt))
276 /* Prevent any find or get operation on the entry */
277 hlist_bl_lock(ce->e_block_hash_p);
278 hlist_bl_lock(ce->e_index_hash_p);
279 /* Ignore if it is touched by a find/get */
280 if (ce->e_used || ce->e_queued || atomic_read(&ce->e_refcnt) ||
281 !list_empty(&ce->e_lru_list)) {
282 hlist_bl_unlock(ce->e_index_hash_p);
283 hlist_bl_unlock(ce->e_block_hash_p);
286 __mb_cache_entry_unhash_unlock(ce);
287 spin_unlock(&mb_cache_spinlock);
288 list_add_tail(&ce->e_lru_list, &free_list);
289 spin_lock(&mb_cache_spinlock);
291 spin_unlock(&mb_cache_spinlock);
293 list_for_each_entry_safe(entry, tmp, &free_list, e_lru_list) {
294 __mb_cache_entry_forget(entry, gfp_mask);
301 mb_cache_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
303 struct mb_cache *cache;
304 unsigned long count = 0;
306 spin_lock(&mb_cache_spinlock);
307 list_for_each_entry(cache, &mb_cache_list, c_cache_list) {
308 mb_debug("cache %s (%d)", cache->c_name,
309 atomic_read(&cache->c_entry_count));
310 count += atomic_read(&cache->c_entry_count);
312 spin_unlock(&mb_cache_spinlock);
314 return vfs_pressure_ratio(count);
317 static struct shrinker mb_cache_shrinker = {
318 .count_objects = mb_cache_shrink_count,
319 .scan_objects = mb_cache_shrink_scan,
320 .seeks = DEFAULT_SEEKS,
324 * mb_cache_create() create a new cache
326 * All entries in one cache are equal size. Cache entries may be from
327 * multiple devices. If this is the first mbcache created, registers
328 * the cache with kernel memory management. Returns NULL if no more
329 * memory was available.
331 * @name: name of the cache (informal)
332 * @bucket_bits: log2(number of hash buckets)
335 mb_cache_create(const char *name, int bucket_bits)
337 int n, bucket_count = 1 << bucket_bits;
338 struct mb_cache *cache = NULL;
340 if (!mb_cache_bg_lock) {
341 mb_cache_bg_lock = kmalloc(sizeof(struct blockgroup_lock),
343 if (!mb_cache_bg_lock)
345 bgl_lock_init(mb_cache_bg_lock);
348 cache = kmalloc(sizeof(struct mb_cache), GFP_KERNEL);
351 cache->c_name = name;
352 atomic_set(&cache->c_entry_count, 0);
353 cache->c_bucket_bits = bucket_bits;
354 cache->c_block_hash = kmalloc(bucket_count *
355 sizeof(struct hlist_bl_head), GFP_KERNEL);
356 if (!cache->c_block_hash)
358 for (n=0; n<bucket_count; n++)
359 INIT_HLIST_BL_HEAD(&cache->c_block_hash[n]);
360 cache->c_index_hash = kmalloc(bucket_count *
361 sizeof(struct hlist_bl_head), GFP_KERNEL);
362 if (!cache->c_index_hash)
364 for (n=0; n<bucket_count; n++)
365 INIT_HLIST_BL_HEAD(&cache->c_index_hash[n]);
366 if (!mb_cache_kmem_cache) {
367 mb_cache_kmem_cache = kmem_cache_create(name,
368 sizeof(struct mb_cache_entry), 0,
369 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
370 if (!mb_cache_kmem_cache)
373 cache->c_entry_cache = mb_cache_kmem_cache;
376 * Set an upper limit on the number of cache entries so that the hash
377 * chains won't grow too long.
379 cache->c_max_entries = bucket_count << 4;
381 spin_lock(&mb_cache_spinlock);
382 list_add(&cache->c_cache_list, &mb_cache_list);
383 spin_unlock(&mb_cache_spinlock);
387 kfree(cache->c_index_hash);
390 kfree(cache->c_block_hash);
399 * Removes all cache entries of a device from the cache. All cache entries
400 * currently in use cannot be freed, and thus remain in the cache. All others
403 * @bdev: which device's cache entries to shrink
406 mb_cache_shrink(struct block_device *bdev)
408 LIST_HEAD(free_list);
410 struct mb_cache_entry *ce, *tmp;
412 l = &mb_cache_lru_list;
413 spin_lock(&mb_cache_spinlock);
414 while (!list_is_last(l, &mb_cache_lru_list)) {
416 ce = list_entry(l, struct mb_cache_entry, e_lru_list);
417 if (ce->e_bdev == bdev) {
418 list_del_init(&ce->e_lru_list);
419 if (ce->e_used || ce->e_queued ||
420 atomic_read(&ce->e_refcnt))
422 spin_unlock(&mb_cache_spinlock);
424 * Prevent any find or get operation on the entry.
426 hlist_bl_lock(ce->e_block_hash_p);
427 hlist_bl_lock(ce->e_index_hash_p);
428 /* Ignore if it is touched by a find/get */
429 if (ce->e_used || ce->e_queued ||
430 atomic_read(&ce->e_refcnt) ||
431 !list_empty(&ce->e_lru_list)) {
432 hlist_bl_unlock(ce->e_index_hash_p);
433 hlist_bl_unlock(ce->e_block_hash_p);
434 l = &mb_cache_lru_list;
435 spin_lock(&mb_cache_spinlock);
438 __mb_cache_entry_unhash_unlock(ce);
439 mb_assert(!(ce->e_used || ce->e_queued ||
440 atomic_read(&ce->e_refcnt)));
441 list_add_tail(&ce->e_lru_list, &free_list);
442 l = &mb_cache_lru_list;
443 spin_lock(&mb_cache_spinlock);
446 spin_unlock(&mb_cache_spinlock);
448 list_for_each_entry_safe(ce, tmp, &free_list, e_lru_list) {
449 __mb_cache_entry_forget(ce, GFP_KERNEL);
457 * Shrinks the cache to its minimum possible size (hopefully 0 entries),
458 * and then destroys it. If this was the last mbcache, un-registers the
459 * mbcache from kernel memory management.
462 mb_cache_destroy(struct mb_cache *cache)
464 LIST_HEAD(free_list);
465 struct mb_cache_entry *ce, *tmp;
467 spin_lock(&mb_cache_spinlock);
468 list_for_each_entry_safe(ce, tmp, &mb_cache_lru_list, e_lru_list) {
469 if (ce->e_cache == cache)
470 list_move_tail(&ce->e_lru_list, &free_list);
472 list_del(&cache->c_cache_list);
473 spin_unlock(&mb_cache_spinlock);
475 list_for_each_entry_safe(ce, tmp, &free_list, e_lru_list) {
476 list_del_init(&ce->e_lru_list);
478 * Prevent any find or get operation on the entry.
480 hlist_bl_lock(ce->e_block_hash_p);
481 hlist_bl_lock(ce->e_index_hash_p);
482 mb_assert(!(ce->e_used || ce->e_queued ||
483 atomic_read(&ce->e_refcnt)));
484 __mb_cache_entry_unhash_unlock(ce);
485 __mb_cache_entry_forget(ce, GFP_KERNEL);
488 if (atomic_read(&cache->c_entry_count) > 0) {
489 mb_error("cache %s: %d orphaned entries",
491 atomic_read(&cache->c_entry_count));
494 if (list_empty(&mb_cache_list)) {
495 kmem_cache_destroy(mb_cache_kmem_cache);
496 mb_cache_kmem_cache = NULL;
498 kfree(cache->c_index_hash);
499 kfree(cache->c_block_hash);
504 * mb_cache_entry_alloc()
506 * Allocates a new cache entry. The new entry will not be valid initially,
507 * and thus cannot be looked up yet. It should be filled with data, and
508 * then inserted into the cache using mb_cache_entry_insert(). Returns NULL
509 * if no more memory was available.
511 struct mb_cache_entry *
512 mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags)
514 struct mb_cache_entry *ce;
516 if (atomic_read(&cache->c_entry_count) >= cache->c_max_entries) {
519 l = &mb_cache_lru_list;
520 spin_lock(&mb_cache_spinlock);
521 while (!list_is_last(l, &mb_cache_lru_list)) {
523 ce = list_entry(l, struct mb_cache_entry, e_lru_list);
524 if (ce->e_cache == cache) {
525 list_del_init(&ce->e_lru_list);
526 if (ce->e_used || ce->e_queued ||
527 atomic_read(&ce->e_refcnt))
530 * Prevent any find or get operation on the
533 hlist_bl_lock(ce->e_block_hash_p);
534 hlist_bl_lock(ce->e_index_hash_p);
535 /* Ignore if it is touched by a find/get */
536 if (ce->e_used || ce->e_queued ||
537 atomic_read(&ce->e_refcnt) ||
538 !list_empty(&ce->e_lru_list)) {
539 hlist_bl_unlock(ce->e_index_hash_p);
540 hlist_bl_unlock(ce->e_block_hash_p);
541 l = &mb_cache_lru_list;
544 mb_assert(list_empty(&ce->e_lru_list));
545 mb_assert(!(ce->e_used || ce->e_queued ||
546 atomic_read(&ce->e_refcnt)));
547 __mb_cache_entry_unhash_unlock(ce);
548 spin_unlock(&mb_cache_spinlock);
552 spin_unlock(&mb_cache_spinlock);
555 ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags);
558 atomic_inc(&cache->c_entry_count);
559 INIT_LIST_HEAD(&ce->e_lru_list);
560 INIT_HLIST_BL_NODE(&ce->e_block_list);
561 INIT_HLIST_BL_NODE(&ce->e_index.o_list);
564 atomic_set(&ce->e_refcnt, 0);
566 ce->e_block_hash_p = &cache->c_block_hash[0];
567 ce->e_index_hash_p = &cache->c_index_hash[0];
568 ce->e_used = 1 + MB_CACHE_WRITER;
574 * mb_cache_entry_insert()
576 * Inserts an entry that was allocated using mb_cache_entry_alloc() into
577 * the cache. After this, the cache entry can be looked up, but is not yet
578 * in the lru list as the caller still holds a handle to it. Returns 0 on
579 * success, or -EBUSY if a cache entry for that device + inode exists
580 * already (this may happen after a failed lookup, but when another process
581 * has inserted the same cache entry in the meantime).
583 * @bdev: device the cache entry belongs to
584 * @block: block number
588 mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev,
589 sector_t block, unsigned int key)
591 struct mb_cache *cache = ce->e_cache;
593 struct hlist_bl_node *l;
594 struct hlist_bl_head *block_hash_p;
595 struct hlist_bl_head *index_hash_p;
596 struct mb_cache_entry *lce;
599 bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
600 cache->c_bucket_bits);
601 block_hash_p = &cache->c_block_hash[bucket];
602 hlist_bl_lock(block_hash_p);
603 hlist_bl_for_each_entry(lce, l, block_hash_p, e_block_list) {
604 if (lce->e_bdev == bdev && lce->e_block == block) {
605 hlist_bl_unlock(block_hash_p);
609 mb_assert(!__mb_cache_entry_is_block_hashed(ce));
610 __mb_cache_entry_unhash_block(ce);
611 __mb_cache_entry_unhash_index(ce);
614 ce->e_block_hash_p = block_hash_p;
615 ce->e_index.o_key = key;
616 hlist_bl_add_head(&ce->e_block_list, block_hash_p);
617 hlist_bl_unlock(block_hash_p);
618 bucket = hash_long(key, cache->c_bucket_bits);
619 index_hash_p = &cache->c_index_hash[bucket];
620 hlist_bl_lock(index_hash_p);
621 ce->e_index_hash_p = index_hash_p;
622 hlist_bl_add_head(&ce->e_index.o_list, index_hash_p);
623 hlist_bl_unlock(index_hash_p);
629 * mb_cache_entry_release()
631 * Release a handle to a cache entry. When the last handle to a cache entry
632 * is released it is either freed (if it is invalid) or otherwise inserted
633 * in to the lru list.
636 mb_cache_entry_release(struct mb_cache_entry *ce)
638 __mb_cache_entry_release(ce);
643 * mb_cache_entry_free()
647 mb_cache_entry_free(struct mb_cache_entry *ce)
650 mb_assert(list_empty(&ce->e_lru_list));
651 hlist_bl_lock(ce->e_index_hash_p);
652 __mb_cache_entry_unhash_index(ce);
653 hlist_bl_unlock(ce->e_index_hash_p);
654 hlist_bl_lock(ce->e_block_hash_p);
655 __mb_cache_entry_unhash_block(ce);
656 hlist_bl_unlock(ce->e_block_hash_p);
657 __mb_cache_entry_release(ce);
662 * mb_cache_entry_get()
664 * Get a cache entry by device / block number. (There can only be one entry
665 * in the cache per device and block.) Returns NULL if no such cache entry
666 * exists. The returned cache entry is locked for exclusive access ("single
669 struct mb_cache_entry *
670 mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev,
674 struct hlist_bl_node *l;
675 struct mb_cache_entry *ce;
676 struct hlist_bl_head *block_hash_p;
678 bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
679 cache->c_bucket_bits);
680 block_hash_p = &cache->c_block_hash[bucket];
681 /* First serialize access to the block corresponding hash chain. */
682 spin_lock(&mb_cache_spinlock);
683 hlist_bl_lock(block_hash_p);
684 hlist_bl_for_each_entry(ce, l, block_hash_p, e_block_list) {
685 mb_assert(ce->e_block_hash_p == block_hash_p);
686 if (ce->e_bdev == bdev && ce->e_block == block) {
688 * Prevent a free from removing the entry.
690 atomic_inc(&ce->e_refcnt);
691 if (!list_empty(&ce->e_lru_list))
692 list_del_init(&ce->e_lru_list);
693 hlist_bl_unlock(block_hash_p);
694 spin_unlock(&mb_cache_spinlock);
695 __spin_lock_mb_cache_entry(ce);
696 if (ce->e_used > 0) {
698 while (ce->e_used > 0) {
700 prepare_to_wait(&mb_cache_queue, &wait,
701 TASK_UNINTERRUPTIBLE);
702 __spin_unlock_mb_cache_entry(ce);
704 __spin_lock_mb_cache_entry(ce);
707 finish_wait(&mb_cache_queue, &wait);
709 ce->e_used += 1 + MB_CACHE_WRITER;
710 atomic_dec(&ce->e_refcnt);
711 __spin_unlock_mb_cache_entry(ce);
713 if (!__mb_cache_entry_is_block_hashed(ce)) {
714 __mb_cache_entry_release(ce);
720 hlist_bl_unlock(block_hash_p);
721 spin_unlock(&mb_cache_spinlock);
725 #if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
727 static struct mb_cache_entry *
728 __mb_cache_entry_find(struct hlist_bl_node *l, struct hlist_bl_head *head,
729 struct block_device *bdev, unsigned int key)
732 /* The index hash chain is alredy acquire by caller. */
734 struct mb_cache_entry *ce =
735 hlist_bl_entry(l, struct mb_cache_entry,
737 mb_assert(ce->e_index_hash_p == head);
738 if (ce->e_bdev == bdev && ce->e_index.o_key == key) {
740 * Prevent a free from removing the entry.
742 atomic_inc(&ce->e_refcnt);
743 hlist_bl_unlock(head);
744 __spin_lock_mb_cache_entry(ce);
745 atomic_dec(&ce->e_refcnt);
747 /* Incrementing before holding the lock gives readers
748 priority over writers. */
749 if (ce->e_used >= MB_CACHE_WRITER) {
752 while (ce->e_used >= MB_CACHE_WRITER) {
754 prepare_to_wait(&mb_cache_queue, &wait,
755 TASK_UNINTERRUPTIBLE);
756 __spin_unlock_mb_cache_entry(ce);
758 __spin_lock_mb_cache_entry(ce);
761 finish_wait(&mb_cache_queue, &wait);
763 __spin_unlock_mb_cache_entry(ce);
764 if (!list_empty(&ce->e_lru_list)) {
765 spin_lock(&mb_cache_spinlock);
766 list_del_init(&ce->e_lru_list);
767 spin_unlock(&mb_cache_spinlock);
769 if (!__mb_cache_entry_is_block_hashed(ce)) {
770 __mb_cache_entry_release(ce);
771 return ERR_PTR(-EAGAIN);
777 hlist_bl_unlock(head);
783 * mb_cache_entry_find_first()
785 * Find the first cache entry on a given device with a certain key in
786 * an additional index. Additional matches can be found with
787 * mb_cache_entry_find_next(). Returns NULL if no match was found. The
788 * returned cache entry is locked for shared access ("multiple readers").
790 * @cache: the cache to search
791 * @bdev: the device the cache entry should belong to
792 * @key: the key in the index
794 struct mb_cache_entry *
795 mb_cache_entry_find_first(struct mb_cache *cache, struct block_device *bdev,
798 unsigned int bucket = hash_long(key, cache->c_bucket_bits);
799 struct hlist_bl_node *l;
800 struct mb_cache_entry *ce = NULL;
801 struct hlist_bl_head *index_hash_p;
803 index_hash_p = &cache->c_index_hash[bucket];
804 hlist_bl_lock(index_hash_p);
805 if (!hlist_bl_empty(index_hash_p)) {
806 l = hlist_bl_first(index_hash_p);
807 ce = __mb_cache_entry_find(l, index_hash_p, bdev, key);
809 hlist_bl_unlock(index_hash_p);
815 * mb_cache_entry_find_next()
817 * Find the next cache entry on a given device with a certain key in an
818 * additional index. Returns NULL if no match could be found. The previous
819 * entry is atomatically released, so that mb_cache_entry_find_next() can
820 * be called like this:
822 * entry = mb_cache_entry_find_first();
825 * entry = mb_cache_entry_find_next(entry, ...);
828 * @prev: The previous match
829 * @bdev: the device the cache entry should belong to
830 * @key: the key in the index
832 struct mb_cache_entry *
833 mb_cache_entry_find_next(struct mb_cache_entry *prev,
834 struct block_device *bdev, unsigned int key)
836 struct mb_cache *cache = prev->e_cache;
837 unsigned int bucket = hash_long(key, cache->c_bucket_bits);
838 struct hlist_bl_node *l;
839 struct mb_cache_entry *ce;
840 struct hlist_bl_head *index_hash_p;
842 index_hash_p = &cache->c_index_hash[bucket];
843 mb_assert(prev->e_index_hash_p == index_hash_p);
844 hlist_bl_lock(index_hash_p);
845 mb_assert(!hlist_bl_empty(index_hash_p));
846 l = prev->e_index.o_list.next;
847 ce = __mb_cache_entry_find(l, index_hash_p, bdev, key);
848 __mb_cache_entry_release(prev);
852 #endif /* !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) */
854 static int __init init_mbcache(void)
856 register_shrinker(&mb_cache_shrinker);
860 static void __exit exit_mbcache(void)
862 unregister_shrinker(&mb_cache_shrinker);
865 module_init(init_mbcache)
866 module_exit(exit_mbcache)