1 // SPDX-License-Identifier: GPL-2.0
3 * linux/drivers/staging/erofs/unzip_vle.c
5 * Copyright (C) 2018 HUAWEI, Inc.
6 * http://www.huawei.com/
7 * Created by Gao Xiang <gaoxiang25@huawei.com>
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of the Linux
11 * distribution for more details.
13 #include "unzip_vle.h"
14 #include <linux/prefetch.h>
16 #include <trace/events/erofs.h>
19 * a compressed_pages[] placeholder in order to avoid
20 * being filled with file pages for in-place decompression.
22 #define PAGE_UNALLOCATED ((void *)0x5F0E4B1D)
24 /* how to allocate cached pages for a workgroup */
25 enum z_erofs_cache_alloctype {
26 DONTALLOC, /* don't allocate any cached pages */
27 DELAYEDALLOC, /* delayed allocation (at the time of submitting io) */
31 * tagged pointer with 1-bit tag for all compressed pages
32 * tag 0 - the page is just found with an extra page reference
34 typedef tagptr1_t compressed_page_t;
36 #define tag_compressed_page_justfound(page) \
37 tagptr_fold(compressed_page_t, page, 1)
39 static struct workqueue_struct *z_erofs_workqueue __read_mostly;
40 static struct kmem_cache *z_erofs_workgroup_cachep __read_mostly;
42 void z_erofs_exit_zip_subsystem(void)
44 destroy_workqueue(z_erofs_workqueue);
45 kmem_cache_destroy(z_erofs_workgroup_cachep);
48 static inline int init_unzip_workqueue(void)
50 const unsigned int onlinecpus = num_possible_cpus();
53 * we don't need too many threads, limiting threads
54 * could improve scheduling performance.
57 alloc_workqueue("erofs_unzipd",
58 WQ_UNBOUND | WQ_HIGHPRI | WQ_CPU_INTENSIVE,
59 onlinecpus + onlinecpus / 4);
61 return z_erofs_workqueue ? 0 : -ENOMEM;
64 static void init_once(void *ptr)
66 struct z_erofs_vle_workgroup *grp = ptr;
67 struct z_erofs_vle_work *const work =
68 z_erofs_vle_grab_primary_work(grp);
71 mutex_init(&work->lock);
74 for (i = 0; i < Z_EROFS_CLUSTER_MAX_PAGES; ++i)
75 grp->compressed_pages[i] = NULL;
78 static void init_always(struct z_erofs_vle_workgroup *grp)
80 struct z_erofs_vle_work *const work =
81 z_erofs_vle_grab_primary_work(grp);
83 atomic_set(&grp->obj.refcount, 1);
86 DBG_BUGON(work->nr_pages);
87 DBG_BUGON(work->vcnt);
90 int __init z_erofs_init_zip_subsystem(void)
92 z_erofs_workgroup_cachep =
93 kmem_cache_create("erofs_compress",
94 Z_EROFS_WORKGROUP_SIZE, 0,
95 SLAB_RECLAIM_ACCOUNT, init_once);
97 if (z_erofs_workgroup_cachep) {
98 if (!init_unzip_workqueue())
101 kmem_cache_destroy(z_erofs_workgroup_cachep);
106 enum z_erofs_vle_work_role {
107 Z_EROFS_VLE_WORK_SECONDARY,
108 Z_EROFS_VLE_WORK_PRIMARY,
110 * The current work was the tail of an exist chain, and the previous
111 * processed chained works are all decided to be hooked up to it.
112 * A new chain should be created for the remaining unprocessed works,
113 * therefore different from Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
114 * the next work cannot reuse the whole page in the following scenario:
115 * ________________________________________________________________
116 * | tail (partial) page | head (partial) page |
117 * | (belongs to the next work) | (belongs to the current work) |
118 * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________|
120 Z_EROFS_VLE_WORK_PRIMARY_HOOKED,
122 * The current work has been linked with the processed chained works,
123 * and could be also linked with the potential remaining works, which
124 * means if the processing page is the tail partial page of the work,
125 * the current work can safely use the whole page (since the next work
126 * is under control) for in-place decompression, as illustrated below:
127 * ________________________________________________________________
128 * | tail (partial) page | head (partial) page |
129 * | (of the current work) | (of the previous work) |
130 * | PRIMARY_FOLLOWED or | |
131 * |_____PRIMARY_HOOKED____|____________PRIMARY_FOLLOWED____________|
133 * [ (*) the above page can be used for the current work itself. ]
135 Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
139 struct z_erofs_vle_work_builder {
140 enum z_erofs_vle_work_role role;
142 * 'hosted = false' means that the current workgroup doesn't belong to
143 * the owned chained workgroups. In the other words, it is none of our
144 * business to submit this workgroup.
148 struct z_erofs_vle_workgroup *grp;
149 struct z_erofs_vle_work *work;
150 struct z_erofs_pagevec_ctor vector;
152 /* pages used for reading the compressed data */
153 struct page **compressed_pages;
154 unsigned int compressed_deficit;
157 #define VLE_WORK_BUILDER_INIT() \
158 { .work = NULL, .role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED }
160 #ifdef EROFS_FS_HAS_MANAGED_CACHE
161 static void preload_compressed_pages(struct z_erofs_vle_work_builder *bl,
162 struct address_space *mc,
164 unsigned int clusterpages,
165 enum z_erofs_cache_alloctype type,
166 struct list_head *pagepool,
169 struct page **const pages = bl->compressed_pages;
170 const unsigned int remaining = bl->compressed_deficit;
171 bool standalone = true;
172 unsigned int i, j = 0;
174 if (bl->role < Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
177 gfp = mapping_gfp_constraint(mc, gfp) & ~__GFP_RECLAIM;
179 index += clusterpages - remaining;
181 for (i = 0; i < remaining; ++i) {
185 /* the compressed page was loaded before */
186 if (READ_ONCE(pages[i]))
189 page = find_get_page(mc, index + i);
192 t = tag_compressed_page_justfound(page);
193 } else if (type == DELAYEDALLOC) {
194 t = tagptr_init(compressed_page_t, PAGE_UNALLOCATED);
195 } else { /* DONTALLOC */
202 if (!cmpxchg_relaxed(&pages[i], NULL, tagptr_cast_ptr(t)))
208 bl->compressed_pages += j;
209 bl->compressed_deficit = remaining - j;
212 bl->role = Z_EROFS_VLE_WORK_PRIMARY;
215 /* called by erofs_shrinker to get rid of all compressed_pages */
216 int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
217 struct erofs_workgroup *egrp)
219 struct z_erofs_vle_workgroup *const grp =
220 container_of(egrp, struct z_erofs_vle_workgroup, obj);
221 struct address_space *const mapping = MNGD_MAPPING(sbi);
222 const int clusterpages = erofs_clusterpages(sbi);
226 * refcount of workgroup is now freezed as 1,
227 * therefore no need to worry about available decompression users.
229 for (i = 0; i < clusterpages; ++i) {
230 struct page *page = grp->compressed_pages[i];
232 if (!page || page->mapping != mapping)
235 /* block other users from reclaiming or migrating the page */
236 if (!trylock_page(page))
239 /* barrier is implied in the following 'unlock_page' */
240 WRITE_ONCE(grp->compressed_pages[i], NULL);
242 set_page_private(page, 0);
243 ClearPagePrivate(page);
251 int erofs_try_to_free_cached_page(struct address_space *mapping,
254 struct erofs_sb_info *const sbi = EROFS_SB(mapping->host->i_sb);
255 const unsigned int clusterpages = erofs_clusterpages(sbi);
256 struct z_erofs_vle_workgroup *const grp = (void *)page_private(page);
257 int ret = 0; /* 0 - busy */
259 if (erofs_workgroup_try_to_freeze(&grp->obj, 1)) {
262 for (i = 0; i < clusterpages; ++i) {
263 if (grp->compressed_pages[i] == page) {
264 WRITE_ONCE(grp->compressed_pages[i], NULL);
269 erofs_workgroup_unfreeze(&grp->obj, 1);
272 ClearPagePrivate(page);
279 static void preload_compressed_pages(struct z_erofs_vle_work_builder *bl,
280 struct address_space *mc,
282 unsigned int clusterpages,
283 enum z_erofs_cache_alloctype type,
284 struct list_head *pagepool,
287 /* nowhere to load compressed pages from */
291 /* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
292 static inline bool try_to_reuse_as_compressed_page(
293 struct z_erofs_vle_work_builder *b,
296 while (b->compressed_deficit) {
297 --b->compressed_deficit;
298 if (!cmpxchg(b->compressed_pages++, NULL, page))
305 /* callers must be with work->lock held */
306 static int z_erofs_vle_work_add_page(
307 struct z_erofs_vle_work_builder *builder,
309 enum z_erofs_page_type type)
314 /* give priority for the compressed data storage */
315 if (builder->role >= Z_EROFS_VLE_WORK_PRIMARY &&
316 type == Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
317 try_to_reuse_as_compressed_page(builder, page))
320 ret = z_erofs_pagevec_ctor_enqueue(&builder->vector,
321 page, type, &occupied);
322 builder->work->vcnt += (unsigned int)ret;
324 return ret ? 0 : -EAGAIN;
327 static enum z_erofs_vle_work_role
328 try_to_claim_workgroup(struct z_erofs_vle_workgroup *grp,
329 z_erofs_vle_owned_workgrp_t *owned_head,
332 DBG_BUGON(*hosted == true);
334 /* let's claim these following types of workgroup */
336 if (grp->next == Z_EROFS_VLE_WORKGRP_NIL) {
337 /* type 1, nil workgroup */
338 if (cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_NIL,
339 *owned_head) != Z_EROFS_VLE_WORKGRP_NIL)
342 *owned_head = &grp->next;
344 /* lucky, I am the followee :) */
345 return Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
347 } else if (grp->next == Z_EROFS_VLE_WORKGRP_TAIL) {
349 * type 2, link to the end of a existing open chain,
350 * be careful that its submission itself is governed
351 * by the original owned chain.
353 if (cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_TAIL,
354 *owned_head) != Z_EROFS_VLE_WORKGRP_TAIL)
356 *owned_head = Z_EROFS_VLE_WORKGRP_TAIL;
357 return Z_EROFS_VLE_WORK_PRIMARY_HOOKED;
360 return Z_EROFS_VLE_WORK_PRIMARY; /* :( better luck next time */
363 struct z_erofs_vle_work_finder {
364 struct super_block *sb;
366 unsigned int pageofs;
368 struct z_erofs_vle_workgroup **grp_ret;
369 enum z_erofs_vle_work_role *role;
370 z_erofs_vle_owned_workgrp_t *owned_head;
374 static struct z_erofs_vle_work *
375 z_erofs_vle_work_lookup(const struct z_erofs_vle_work_finder *f)
378 struct erofs_workgroup *egrp;
379 struct z_erofs_vle_workgroup *grp;
380 struct z_erofs_vle_work *work;
382 egrp = erofs_find_workgroup(f->sb, f->idx, &tag);
388 grp = container_of(egrp, struct z_erofs_vle_workgroup, obj);
391 work = z_erofs_vle_grab_work(grp, f->pageofs);
392 /* if multiref is disabled, `primary' is always true */
395 DBG_BUGON(work->pageofs != f->pageofs);
398 * lock must be taken first to avoid grp->next == NIL between
399 * claiming workgroup and adding pages:
403 * mutex_lock(&work->lock)
404 * add all pages to pagevec
406 * [correct locking case 1]:
407 * mutex_lock(grp->work[a])
409 * mutex_lock(grp->work[b]) mutex_lock(grp->work[c])
410 * ... *role = SECONDARY
411 * add all pages to pagevec
413 * mutex_unlock(grp->work[c])
414 * mutex_lock(grp->work[c])
419 * [correct locking case 2]:
420 * mutex_lock(grp->work[b])
422 * mutex_lock(grp->work[a])
424 * mutex_lock(grp->work[c])
428 * mutex_lock(grp->work[a])
429 * *role = PRIMARY_OWNER
430 * add all pages to pagevec
433 mutex_lock(&work->lock);
437 *f->role = Z_EROFS_VLE_WORK_SECONDARY;
438 else /* claim the workgroup if possible */
439 *f->role = try_to_claim_workgroup(grp, f->owned_head,
444 static struct z_erofs_vle_work *
445 z_erofs_vle_work_register(const struct z_erofs_vle_work_finder *f,
446 struct erofs_map_blocks *map)
449 struct z_erofs_vle_workgroup *grp = *f->grp_ret;
450 struct z_erofs_vle_work *work;
452 /* if multiref is disabled, grp should never be nullptr */
455 return ERR_PTR(-EINVAL);
458 /* no available workgroup, let's allocate one */
459 grp = kmem_cache_alloc(z_erofs_workgroup_cachep, GFP_NOFS);
461 return ERR_PTR(-ENOMEM);
464 grp->obj.index = f->idx;
465 grp->llen = map->m_llen;
467 z_erofs_vle_set_workgrp_fmt(grp,
468 (map->m_flags & EROFS_MAP_ZIPPED) ?
469 Z_EROFS_VLE_WORKGRP_FMT_LZ4 :
470 Z_EROFS_VLE_WORKGRP_FMT_PLAIN);
472 /* new workgrps have been claimed as type 1 */
473 WRITE_ONCE(grp->next, *f->owned_head);
474 /* primary and followed work for all new workgrps */
475 *f->role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
476 /* it should be submitted by ourselves */
480 work = z_erofs_vle_grab_primary_work(grp);
481 work->pageofs = f->pageofs;
484 * lock all primary followed works before visible to others
485 * and mutex_trylock *never* fails for a new workgroup.
487 mutex_trylock(&work->lock);
490 int err = erofs_register_workgroup(f->sb, &grp->obj, 0);
493 mutex_unlock(&work->lock);
494 kmem_cache_free(z_erofs_workgroup_cachep, grp);
495 return ERR_PTR(-EAGAIN);
499 *f->owned_head = &grp->next;
504 #define builder_is_hooked(builder) \
505 ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_HOOKED)
507 #define builder_is_followed(builder) \
508 ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
510 static int z_erofs_vle_work_iter_begin(struct z_erofs_vle_work_builder *builder,
511 struct super_block *sb,
512 struct erofs_map_blocks *map,
513 z_erofs_vle_owned_workgrp_t *owned_head)
515 const unsigned int clusterpages = erofs_clusterpages(EROFS_SB(sb));
516 struct z_erofs_vle_workgroup *grp;
517 const struct z_erofs_vle_work_finder finder = {
519 .idx = erofs_blknr(map->m_pa),
520 .pageofs = map->m_la & ~PAGE_MASK,
522 .role = &builder->role,
523 .owned_head = owned_head,
524 .hosted = &builder->hosted
526 struct z_erofs_vle_work *work;
528 DBG_BUGON(builder->work);
530 /* must be Z_EROFS_WORK_TAIL or the next chained work */
531 DBG_BUGON(*owned_head == Z_EROFS_VLE_WORKGRP_NIL);
532 DBG_BUGON(*owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
534 DBG_BUGON(erofs_blkoff(map->m_pa));
537 work = z_erofs_vle_work_lookup(&finder);
539 unsigned int orig_llen;
541 /* increase workgroup `llen' if needed */
542 while ((orig_llen = READ_ONCE(grp->llen)) < map->m_llen &&
543 orig_llen != cmpxchg_relaxed(&grp->llen,
544 orig_llen, map->m_llen))
549 work = z_erofs_vle_work_register(&finder, map);
550 if (unlikely(work == ERR_PTR(-EAGAIN)))
554 return PTR_ERR(work);
556 z_erofs_pagevec_ctor_init(&builder->vector,
557 Z_EROFS_VLE_INLINE_PAGEVECS, work->pagevec, work->vcnt);
559 if (builder->role >= Z_EROFS_VLE_WORK_PRIMARY) {
560 /* enable possibly in-place decompression */
561 builder->compressed_pages = grp->compressed_pages;
562 builder->compressed_deficit = clusterpages;
564 builder->compressed_pages = NULL;
565 builder->compressed_deficit = 0;
569 builder->work = work;
574 * keep in mind that no referenced workgroups will be freed
575 * only after a RCU grace period, so rcu_read_lock() could
576 * prevent a workgroup from being freed.
578 static void z_erofs_rcu_callback(struct rcu_head *head)
580 struct z_erofs_vle_work *work = container_of(head,
581 struct z_erofs_vle_work, rcu);
582 struct z_erofs_vle_workgroup *grp =
583 z_erofs_vle_work_workgroup(work, true);
585 kmem_cache_free(z_erofs_workgroup_cachep, grp);
588 void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
590 struct z_erofs_vle_workgroup *const vgrp = container_of(grp,
591 struct z_erofs_vle_workgroup, obj);
592 struct z_erofs_vle_work *const work = &vgrp->work;
594 call_rcu(&work->rcu, z_erofs_rcu_callback);
597 static void __z_erofs_vle_work_release(struct z_erofs_vle_workgroup *grp,
598 struct z_erofs_vle_work *work __maybe_unused)
600 erofs_workgroup_put(&grp->obj);
603 static void z_erofs_vle_work_release(struct z_erofs_vle_work *work)
605 struct z_erofs_vle_workgroup *grp =
606 z_erofs_vle_work_workgroup(work, true);
608 __z_erofs_vle_work_release(grp, work);
612 z_erofs_vle_work_iter_end(struct z_erofs_vle_work_builder *builder)
614 struct z_erofs_vle_work *work = builder->work;
619 z_erofs_pagevec_ctor_exit(&builder->vector, false);
620 mutex_unlock(&work->lock);
623 * if all pending pages are added, don't hold work reference
624 * any longer if the current work isn't hosted by ourselves.
626 if (!builder->hosted)
627 __z_erofs_vle_work_release(builder->grp, work);
629 builder->work = NULL;
634 static inline struct page *__stagingpage_alloc(struct list_head *pagepool,
637 struct page *page = erofs_allocpage(pagepool, gfp);
642 page->mapping = Z_EROFS_MAPPING_STAGING;
646 struct z_erofs_vle_frontend {
647 struct inode *const inode;
649 struct z_erofs_vle_work_builder builder;
650 struct erofs_map_blocks map;
652 z_erofs_vle_owned_workgrp_t owned_head;
654 /* used for applying cache strategy on the fly */
656 erofs_off_t headoffset;
659 #define VLE_FRONTEND_INIT(__i) { \
666 .builder = VLE_WORK_BUILDER_INIT(), \
667 .owned_head = Z_EROFS_VLE_WORKGRP_TAIL, \
670 #ifdef EROFS_FS_HAS_MANAGED_CACHE
672 should_alloc_managed_pages(struct z_erofs_vle_frontend *fe, erofs_off_t la)
677 if (EROFS_FS_ZIP_CACHE_LVL >= 2)
678 return la < fe->headoffset;
684 should_alloc_managed_pages(struct z_erofs_vle_frontend *fe, erofs_off_t la)
690 static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
692 struct list_head *page_pool)
694 struct super_block *const sb = fe->inode->i_sb;
695 struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb);
696 struct erofs_map_blocks *const map = &fe->map;
697 struct z_erofs_vle_work_builder *const builder = &fe->builder;
698 const loff_t offset = page_offset(page);
700 bool tight = builder_is_hooked(builder);
701 struct z_erofs_vle_work *work = builder->work;
703 enum z_erofs_cache_alloctype cache_strategy;
704 enum z_erofs_page_type page_type;
705 unsigned int cur, end, spiltted, index;
708 /* register locked file pages as online pages in pack */
709 z_erofs_onlinepage_init(page);
716 /* lucky, within the range of the current map_blocks */
717 if (offset + cur >= map->m_la &&
718 offset + cur < map->m_la + map->m_llen) {
719 /* didn't get a valid unzip work previously (very rare) */
725 /* go ahead the next map_blocks */
726 debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
728 if (z_erofs_vle_work_iter_end(builder))
729 fe->backmost = false;
731 map->m_la = offset + cur;
733 err = z_erofs_map_blocks_iter(fe->inode, map, 0);
738 if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED)))
741 DBG_BUGON(map->m_plen != 1 << sbi->clusterbits);
742 DBG_BUGON(erofs_blkoff(map->m_pa));
744 err = z_erofs_vle_work_iter_begin(builder, sb, map, &fe->owned_head);
748 /* preload all compressed pages (maybe downgrade role if necessary) */
749 if (should_alloc_managed_pages(fe, map->m_la))
750 cache_strategy = DELAYEDALLOC;
752 cache_strategy = DONTALLOC;
754 preload_compressed_pages(builder, MNGD_MAPPING(sbi),
755 map->m_pa / PAGE_SIZE,
756 map->m_plen / PAGE_SIZE,
757 cache_strategy, page_pool, GFP_KERNEL);
759 tight &= builder_is_hooked(builder);
760 work = builder->work;
762 cur = end - min_t(unsigned int, offset + end - map->m_la, end);
763 if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED))) {
764 zero_user_segment(page, cur, end);
768 /* let's derive page type */
769 page_type = cur ? Z_EROFS_VLE_PAGE_TYPE_HEAD :
770 (!spiltted ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
771 (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
772 Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
775 tight &= builder_is_followed(builder);
778 err = z_erofs_vle_work_add_page(builder, page, page_type);
779 /* should allocate an additional staging page for pagevec */
780 if (err == -EAGAIN) {
781 struct page *const newpage =
782 __stagingpage_alloc(page_pool, GFP_NOFS);
784 err = z_erofs_vle_work_add_page(builder,
785 newpage, Z_EROFS_PAGE_TYPE_EXCLUSIVE);
793 index = page->index - map->m_la / PAGE_SIZE;
795 /* FIXME! avoid the last relundant fixup & endio */
796 z_erofs_onlinepage_fixup(page, index, true);
798 /* bump up the number of spiltted parts of a page */
800 /* also update nr_pages */
801 work->nr_pages = max_t(pgoff_t, work->nr_pages, index + 1);
803 /* can be used for verification */
804 map->m_llen = offset + cur - map->m_la;
811 /* FIXME! avoid the last relundant fixup & endio */
812 z_erofs_onlinepage_endio(page);
814 debugln("%s, finish page: %pK spiltted: %u map->m_llen %llu",
815 __func__, page, spiltted, map->m_llen);
818 /* if some error occurred while processing this page */
824 static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
826 tagptr1_t t = tagptr_init(tagptr1_t, ptr);
827 struct z_erofs_vle_unzip_io *io = tagptr_unfold_ptr(t);
828 bool background = tagptr_unfold_tags(t);
833 spin_lock_irqsave(&io->u.wait.lock, flags);
834 if (!atomic_add_return(bios, &io->pending_bios))
835 wake_up_locked(&io->u.wait);
836 spin_unlock_irqrestore(&io->u.wait.lock, flags);
840 if (!atomic_add_return(bios, &io->pending_bios))
841 queue_work(z_erofs_workqueue, &io->u.work);
844 static inline void z_erofs_vle_read_endio(struct bio *bio)
846 const blk_status_t err = bio->bi_status;
848 struct bio_vec *bvec;
849 #ifdef EROFS_FS_HAS_MANAGED_CACHE
850 struct address_space *mc = NULL;
852 struct bvec_iter_all iter_all;
854 bio_for_each_segment_all(bvec, bio, i, iter_all) {
855 struct page *page = bvec->bv_page;
856 bool cachemngd = false;
858 DBG_BUGON(PageUptodate(page));
859 DBG_BUGON(!page->mapping);
861 #ifdef EROFS_FS_HAS_MANAGED_CACHE
862 if (unlikely(!mc && !z_erofs_is_stagingpage(page))) {
863 struct inode *const inode = page->mapping->host;
864 struct super_block *const sb = inode->i_sb;
866 mc = MNGD_MAPPING(EROFS_SB(sb));
870 * If mc has not gotten, it equals NULL,
871 * however, page->mapping never be NULL if working properly.
873 cachemngd = (page->mapping == mc);
879 SetPageUptodate(page);
885 z_erofs_vle_unzip_kickoff(bio->bi_private, -1);
889 static struct page *z_pagemap_global[Z_EROFS_VLE_VMAP_GLOBAL_PAGES];
890 static DEFINE_MUTEX(z_pagemap_global_lock);
892 static int z_erofs_vle_unzip(struct super_block *sb,
893 struct z_erofs_vle_workgroup *grp,
894 struct list_head *page_pool)
896 struct erofs_sb_info *const sbi = EROFS_SB(sb);
897 const unsigned int clusterpages = erofs_clusterpages(sbi);
899 struct z_erofs_pagevec_ctor ctor;
900 unsigned int nr_pages;
901 unsigned int sparsemem_pages = 0;
902 struct page *pages_onstack[Z_EROFS_VLE_VMAP_ONSTACK_PAGES];
903 struct page **pages, **compressed_pages, *page;
904 unsigned int i, llen;
906 enum z_erofs_page_type page_type;
908 struct z_erofs_vle_work *work;
913 work = z_erofs_vle_grab_primary_work(grp);
914 DBG_BUGON(!READ_ONCE(work->nr_pages));
916 mutex_lock(&work->lock);
917 nr_pages = work->nr_pages;
919 if (likely(nr_pages <= Z_EROFS_VLE_VMAP_ONSTACK_PAGES))
920 pages = pages_onstack;
921 else if (nr_pages <= Z_EROFS_VLE_VMAP_GLOBAL_PAGES &&
922 mutex_trylock(&z_pagemap_global_lock))
923 pages = z_pagemap_global;
926 pages = kvmalloc_array(nr_pages,
927 sizeof(struct page *), GFP_KERNEL);
929 /* fallback to global pagemap for the lowmem scenario */
930 if (unlikely(!pages)) {
931 if (nr_pages > Z_EROFS_VLE_VMAP_GLOBAL_PAGES)
934 mutex_lock(&z_pagemap_global_lock);
935 pages = z_pagemap_global;
940 for (i = 0; i < nr_pages; ++i)
943 z_erofs_pagevec_ctor_init(&ctor,
944 Z_EROFS_VLE_INLINE_PAGEVECS, work->pagevec, 0);
946 for (i = 0; i < work->vcnt; ++i) {
949 page = z_erofs_pagevec_ctor_dequeue(&ctor, &page_type);
951 /* all pages in pagevec ought to be valid */
953 DBG_BUGON(!page->mapping);
955 if (z_erofs_gather_if_stagingpage(page_pool, page))
958 if (page_type == Z_EROFS_VLE_PAGE_TYPE_HEAD)
961 pagenr = z_erofs_onlinepage_index(page);
963 DBG_BUGON(pagenr >= nr_pages);
964 DBG_BUGON(pages[pagenr]);
966 pages[pagenr] = page;
970 z_erofs_pagevec_ctor_exit(&ctor, true);
973 compressed_pages = grp->compressed_pages;
976 for (i = 0; i < clusterpages; ++i) {
979 page = compressed_pages[i];
981 /* all compressed pages ought to be valid */
983 DBG_BUGON(!page->mapping);
985 if (!z_erofs_is_stagingpage(page)) {
986 #ifdef EROFS_FS_HAS_MANAGED_CACHE
987 if (page->mapping == MNGD_MAPPING(sbi)) {
988 if (unlikely(!PageUptodate(page)))
995 * only if non-head page can be selected
996 * for inplace decompression
998 pagenr = z_erofs_onlinepage_index(page);
1000 DBG_BUGON(pagenr >= nr_pages);
1001 DBG_BUGON(pages[pagenr]);
1003 pages[pagenr] = page;
1008 /* PG_error needs checking for inplaced and staging pages */
1009 if (unlikely(PageError(page))) {
1010 DBG_BUGON(PageUptodate(page));
1018 llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
1020 if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
1021 err = z_erofs_vle_plain_copy(compressed_pages, clusterpages,
1022 pages, nr_pages, work->pageofs);
1026 if (llen > grp->llen)
1029 err = z_erofs_vle_unzip_fast_percpu(compressed_pages, clusterpages,
1030 pages, llen, work->pageofs);
1031 if (err != -ENOTSUPP)
1034 if (sparsemem_pages >= nr_pages)
1035 goto skip_allocpage;
1037 for (i = 0; i < nr_pages; ++i) {
1041 pages[i] = __stagingpage_alloc(page_pool, GFP_NOFS);
1045 vout = erofs_vmap(pages, nr_pages);
1051 err = z_erofs_vle_unzip_vmap(compressed_pages,
1052 clusterpages, vout, llen, work->pageofs, overlapped);
1054 erofs_vunmap(vout, nr_pages);
1057 /* must handle all compressed pages before endding pages */
1058 for (i = 0; i < clusterpages; ++i) {
1059 page = compressed_pages[i];
1061 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1062 if (page->mapping == MNGD_MAPPING(sbi))
1065 /* recycle all individual staging pages */
1066 (void)z_erofs_gather_if_stagingpage(page_pool, page);
1068 WRITE_ONCE(compressed_pages[i], NULL);
1071 for (i = 0; i < nr_pages; ++i) {
1076 DBG_BUGON(!page->mapping);
1078 /* recycle all individual staging pages */
1079 if (z_erofs_gather_if_stagingpage(page_pool, page))
1082 if (unlikely(err < 0))
1085 z_erofs_onlinepage_endio(page);
1088 if (pages == z_pagemap_global)
1089 mutex_unlock(&z_pagemap_global_lock);
1090 else if (unlikely(pages != pages_onstack))
1096 /* all work locks MUST be taken before the following line */
1098 WRITE_ONCE(grp->next, Z_EROFS_VLE_WORKGRP_NIL);
1100 /* all work locks SHOULD be released right now */
1101 mutex_unlock(&work->lock);
1103 z_erofs_vle_work_release(work);
1107 static void z_erofs_vle_unzip_all(struct super_block *sb,
1108 struct z_erofs_vle_unzip_io *io,
1109 struct list_head *page_pool)
1111 z_erofs_vle_owned_workgrp_t owned = io->head;
1113 while (owned != Z_EROFS_VLE_WORKGRP_TAIL_CLOSED) {
1114 struct z_erofs_vle_workgroup *grp;
1116 /* no possible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */
1117 DBG_BUGON(owned == Z_EROFS_VLE_WORKGRP_TAIL);
1119 /* no possible that 'owned' equals NULL */
1120 DBG_BUGON(owned == Z_EROFS_VLE_WORKGRP_NIL);
1122 grp = container_of(owned, struct z_erofs_vle_workgroup, next);
1123 owned = READ_ONCE(grp->next);
1125 z_erofs_vle_unzip(sb, grp, page_pool);
1129 static void z_erofs_vle_unzip_wq(struct work_struct *work)
1131 struct z_erofs_vle_unzip_io_sb *iosb = container_of(work,
1132 struct z_erofs_vle_unzip_io_sb, io.u.work);
1133 LIST_HEAD(page_pool);
1135 DBG_BUGON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1136 z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &page_pool);
1138 put_pages_list(&page_pool);
1142 static struct page *
1143 pickup_page_for_submission(struct z_erofs_vle_workgroup *grp,
1145 struct list_head *pagepool,
1146 struct address_space *mc,
1149 /* determined at compile time to avoid too many #ifdefs */
1150 const bool nocache = __builtin_constant_p(mc) ? !mc : false;
1151 const pgoff_t index = grp->obj.index;
1152 bool tocache = false;
1154 struct address_space *mapping;
1155 struct page *oldpage, *page;
1157 compressed_page_t t;
1161 page = READ_ONCE(grp->compressed_pages[nr]);
1168 * the cached page has not been allocated and
1169 * an placeholder is out there, prepare it now.
1171 if (!nocache && page == PAGE_UNALLOCATED) {
1176 /* process the target tagged pointer */
1177 t = tagptr_init(compressed_page_t, page);
1178 justfound = tagptr_unfold_tags(t);
1179 page = tagptr_unfold_ptr(t);
1181 mapping = READ_ONCE(page->mapping);
1184 * if managed cache is disabled, it's no way to
1185 * get such a cached-like page.
1188 /* if managed cache is disabled, it is impossible `justfound' */
1189 DBG_BUGON(justfound);
1191 /* and it should be locked, not uptodate, and not truncated */
1192 DBG_BUGON(!PageLocked(page));
1193 DBG_BUGON(PageUptodate(page));
1194 DBG_BUGON(!mapping);
1199 * unmanaged (file) pages are all locked solidly,
1200 * therefore it is impossible for `mapping' to be NULL.
1202 if (mapping && mapping != mc)
1203 /* ought to be unmanaged pages */
1208 /* only true if page reclaim goes wrong, should never happen */
1209 DBG_BUGON(justfound && PagePrivate(page));
1211 /* the page is still in manage cache */
1212 if (page->mapping == mc) {
1213 WRITE_ONCE(grp->compressed_pages[nr], page);
1215 ClearPageError(page);
1216 if (!PagePrivate(page)) {
1218 * impossible to be !PagePrivate(page) for
1219 * the current restriction as well if
1220 * the page is already in compressed_pages[].
1222 DBG_BUGON(!justfound);
1225 set_page_private(page, (unsigned long)grp);
1226 SetPagePrivate(page);
1229 /* no need to submit io if it is already up-to-date */
1230 if (PageUptodate(page)) {
1238 * the managed page has been truncated, it's unsafe to
1239 * reuse this one, let's allocate a new cache-managed page.
1241 DBG_BUGON(page->mapping);
1242 DBG_BUGON(!justfound);
1248 page = __stagingpage_alloc(pagepool, gfp);
1249 if (oldpage != cmpxchg(&grp->compressed_pages[nr], oldpage, page)) {
1250 list_add(&page->lru, pagepool);
1254 if (nocache || !tocache)
1256 if (add_to_page_cache_lru(page, mc, index + nr, gfp)) {
1257 page->mapping = Z_EROFS_MAPPING_STAGING;
1261 set_page_private(page, (unsigned long)grp);
1262 SetPagePrivate(page);
1263 out: /* the only exit (for tracing and debugging) */
1267 static struct z_erofs_vle_unzip_io *
1268 jobqueue_init(struct super_block *sb,
1269 struct z_erofs_vle_unzip_io *io,
1272 struct z_erofs_vle_unzip_io_sb *iosb;
1275 /* waitqueue available for foreground io */
1278 init_waitqueue_head(&io->u.wait);
1279 atomic_set(&io->pending_bios, 0);
1283 iosb = kvzalloc(sizeof(struct z_erofs_vle_unzip_io_sb),
1284 GFP_KERNEL | __GFP_NOFAIL);
1287 /* initialize fields in the allocated descriptor */
1290 INIT_WORK(&io->u.work, z_erofs_vle_unzip_wq);
1292 io->head = Z_EROFS_VLE_WORKGRP_TAIL_CLOSED;
1296 /* define workgroup jobqueue types */
1298 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1305 static void *jobqueueset_init(struct super_block *sb,
1306 z_erofs_vle_owned_workgrp_t qtail[],
1307 struct z_erofs_vle_unzip_io *q[],
1308 struct z_erofs_vle_unzip_io *fgq,
1311 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1313 * if managed cache is enabled, bypass jobqueue is needed,
1314 * no need to read from device for all workgroups in this queue.
1316 q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, true);
1317 qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
1320 q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, forcefg);
1321 qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
1323 return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], !forcefg));
1326 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1327 static void move_to_bypass_jobqueue(struct z_erofs_vle_workgroup *grp,
1328 z_erofs_vle_owned_workgrp_t qtail[],
1329 z_erofs_vle_owned_workgrp_t owned_head)
1331 z_erofs_vle_owned_workgrp_t *const submit_qtail = qtail[JQ_SUBMIT];
1332 z_erofs_vle_owned_workgrp_t *const bypass_qtail = qtail[JQ_BYPASS];
1334 DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1335 if (owned_head == Z_EROFS_VLE_WORKGRP_TAIL)
1336 owned_head = Z_EROFS_VLE_WORKGRP_TAIL_CLOSED;
1338 WRITE_ONCE(grp->next, Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1340 WRITE_ONCE(*submit_qtail, owned_head);
1341 WRITE_ONCE(*bypass_qtail, &grp->next);
1343 qtail[JQ_BYPASS] = &grp->next;
1346 static bool postsubmit_is_all_bypassed(struct z_erofs_vle_unzip_io *q[],
1347 unsigned int nr_bios,
1351 * although background is preferred, no one is pending for submission.
1352 * don't issue workqueue for decompression but drop it directly instead.
1354 if (force_fg || nr_bios)
1357 kvfree(container_of(q[JQ_SUBMIT],
1358 struct z_erofs_vle_unzip_io_sb,
1363 static void move_to_bypass_jobqueue(struct z_erofs_vle_workgroup *grp,
1364 z_erofs_vle_owned_workgrp_t qtail[],
1365 z_erofs_vle_owned_workgrp_t owned_head)
1367 /* impossible to bypass submission for managed cache disabled */
1371 static bool postsubmit_is_all_bypassed(struct z_erofs_vle_unzip_io *q[],
1372 unsigned int nr_bios,
1375 /* bios should be >0 if managed cache is disabled */
1376 DBG_BUGON(!nr_bios);
1381 static bool z_erofs_vle_submit_all(struct super_block *sb,
1382 z_erofs_vle_owned_workgrp_t owned_head,
1383 struct list_head *pagepool,
1384 struct z_erofs_vle_unzip_io *fgq,
1387 struct erofs_sb_info *const sbi = EROFS_SB(sb);
1388 const unsigned int clusterpages = erofs_clusterpages(sbi);
1389 const gfp_t gfp = GFP_NOFS;
1391 z_erofs_vle_owned_workgrp_t qtail[NR_JOBQUEUES];
1392 struct z_erofs_vle_unzip_io *q[NR_JOBQUEUES];
1395 /* since bio will be NULL, no need to initialize last_index */
1396 pgoff_t uninitialized_var(last_index);
1397 bool force_submit = false;
1398 unsigned int nr_bios;
1400 if (unlikely(owned_head == Z_EROFS_VLE_WORKGRP_TAIL))
1403 force_submit = false;
1406 bi_private = jobqueueset_init(sb, qtail, q, fgq, force_fg);
1408 /* by default, all need io submission */
1409 q[JQ_SUBMIT]->head = owned_head;
1412 struct z_erofs_vle_workgroup *grp;
1413 pgoff_t first_index;
1415 unsigned int i = 0, bypass = 0;
1418 /* no possible 'owned_head' equals the following */
1419 DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1420 DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_NIL);
1422 grp = container_of(owned_head,
1423 struct z_erofs_vle_workgroup, next);
1425 /* close the main owned chain at first */
1426 owned_head = cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_TAIL,
1427 Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1429 first_index = grp->obj.index;
1430 force_submit |= (first_index != last_index + 1);
1433 page = pickup_page_for_submission(grp, i, pagepool,
1434 MNGD_MAPPING(sbi), gfp);
1436 force_submit = true;
1441 if (bio && force_submit) {
1443 __submit_bio(bio, REQ_OP_READ, 0);
1448 bio = erofs_grab_bio(sb, first_index + i,
1450 z_erofs_vle_read_endio, true);
1451 bio->bi_private = bi_private;
1456 err = bio_add_page(bio, page, PAGE_SIZE, 0);
1457 if (err < PAGE_SIZE)
1458 goto submit_bio_retry;
1460 force_submit = false;
1461 last_index = first_index + i;
1463 if (++i < clusterpages)
1466 if (bypass < clusterpages)
1467 qtail[JQ_SUBMIT] = &grp->next;
1469 move_to_bypass_jobqueue(grp, qtail, owned_head);
1470 } while (owned_head != Z_EROFS_VLE_WORKGRP_TAIL);
1473 __submit_bio(bio, REQ_OP_READ, 0);
1475 if (postsubmit_is_all_bypassed(q, nr_bios, force_fg))
1478 z_erofs_vle_unzip_kickoff(bi_private, nr_bios);
1482 static void z_erofs_submit_and_unzip(struct z_erofs_vle_frontend *f,
1483 struct list_head *pagepool,
1486 struct super_block *sb = f->inode->i_sb;
1487 struct z_erofs_vle_unzip_io io[NR_JOBQUEUES];
1489 if (!z_erofs_vle_submit_all(sb, f->owned_head, pagepool, io, force_fg))
1492 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1493 z_erofs_vle_unzip_all(sb, &io[JQ_BYPASS], pagepool);
1498 /* wait until all bios are completed */
1499 wait_event(io[JQ_SUBMIT].u.wait,
1500 !atomic_read(&io[JQ_SUBMIT].pending_bios));
1502 /* let's synchronous decompression */
1503 z_erofs_vle_unzip_all(sb, &io[JQ_SUBMIT], pagepool);
1506 static int z_erofs_vle_normalaccess_readpage(struct file *file,
1509 struct inode *const inode = page->mapping->host;
1510 struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
1512 LIST_HEAD(pagepool);
1514 trace_erofs_readpage(page, false);
1516 f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT;
1518 err = z_erofs_do_read_page(&f, page, &pagepool);
1519 (void)z_erofs_vle_work_iter_end(&f.builder);
1522 errln("%s, failed to read, err [%d]", __func__, err);
1526 z_erofs_submit_and_unzip(&f, &pagepool, true);
1529 put_page(f.map.mpage);
1531 /* clean up the remaining free pages */
1532 put_pages_list(&pagepool);
1536 static int z_erofs_vle_normalaccess_readpages(struct file *filp,
1537 struct address_space *mapping,
1538 struct list_head *pages,
1539 unsigned int nr_pages)
1541 struct inode *const inode = mapping->host;
1542 struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
1544 bool sync = __should_decompress_synchronously(sbi, nr_pages);
1545 struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
1546 gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
1547 struct page *head = NULL;
1548 LIST_HEAD(pagepool);
1550 trace_erofs_readpages(mapping->host, lru_to_page(pages),
1553 f.headoffset = (erofs_off_t)lru_to_page(pages)->index << PAGE_SHIFT;
1555 for (; nr_pages; --nr_pages) {
1556 struct page *page = lru_to_page(pages);
1558 prefetchw(&page->flags);
1559 list_del(&page->lru);
1562 * A pure asynchronous readahead is indicated if
1563 * a PG_readahead marked page is hitted at first.
1564 * Let's also do asynchronous decompression for this case.
1566 sync &= !(PageReadahead(page) && !head);
1568 if (add_to_page_cache_lru(page, mapping, page->index, gfp)) {
1569 list_add(&page->lru, &pagepool);
1573 set_page_private(page, (unsigned long)head);
1578 struct page *page = head;
1581 /* traversal in reverse order */
1582 head = (void *)page_private(page);
1584 err = z_erofs_do_read_page(&f, page, &pagepool);
1586 struct erofs_vnode *vi = EROFS_V(inode);
1588 errln("%s, readahead error at page %lu of nid %llu",
1589 __func__, page->index, vi->nid);
1595 (void)z_erofs_vle_work_iter_end(&f.builder);
1597 z_erofs_submit_and_unzip(&f, &pagepool, sync);
1600 put_page(f.map.mpage);
1602 /* clean up the remaining free pages */
1603 put_pages_list(&pagepool);
1607 const struct address_space_operations z_erofs_vle_normalaccess_aops = {
1608 .readpage = z_erofs_vle_normalaccess_readpage,
1609 .readpages = z_erofs_vle_normalaccess_readpages,
1613 * Variable-sized Logical Extent (Fixed Physical Cluster) Compression Mode
1615 * VLE compression mode attempts to compress a number of logical data into
1616 * a physical cluster with a fixed size.
1617 * VLE compression mode uses "struct z_erofs_vle_decompressed_index".
1619 #define __vle_cluster_advise(x, bit, bits) \
1620 ((le16_to_cpu(x) >> (bit)) & ((1 << (bits)) - 1))
1622 #define __vle_cluster_type(advise) __vle_cluster_advise(advise, \
1623 Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT, Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS)
1625 #define vle_cluster_type(di) \
1626 __vle_cluster_type((di)->di_advise)
1629 vle_decompressed_index_clusterofs(unsigned int *clusterofs,
1630 unsigned int clustersize,
1631 struct z_erofs_vle_decompressed_index *di)
1633 switch (vle_cluster_type(di)) {
1634 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1635 *clusterofs = clustersize;
1637 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1638 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1639 *clusterofs = le16_to_cpu(di->di_clusterofs);
1648 static inline erofs_blk_t
1649 vle_extent_blkaddr(struct inode *inode, pgoff_t index)
1651 struct erofs_sb_info *sbi = EROFS_I_SB(inode);
1652 struct erofs_vnode *vi = EROFS_V(inode);
1654 unsigned int ofs = Z_EROFS_VLE_EXTENT_ALIGN(vi->inode_isize +
1655 vi->xattr_isize) + sizeof(struct erofs_extent_header) +
1656 index * sizeof(struct z_erofs_vle_decompressed_index);
1658 return erofs_blknr(iloc(sbi, vi->nid) + ofs);
1661 static inline unsigned int
1662 vle_extent_blkoff(struct inode *inode, pgoff_t index)
1664 struct erofs_sb_info *sbi = EROFS_I_SB(inode);
1665 struct erofs_vnode *vi = EROFS_V(inode);
1667 unsigned int ofs = Z_EROFS_VLE_EXTENT_ALIGN(vi->inode_isize +
1668 vi->xattr_isize) + sizeof(struct erofs_extent_header) +
1669 index * sizeof(struct z_erofs_vle_decompressed_index);
1671 return erofs_blkoff(iloc(sbi, vi->nid) + ofs);
1674 struct vle_map_blocks_iter_ctx {
1675 struct inode *inode;
1676 struct super_block *sb;
1677 unsigned int clusterbits;
1679 struct page **mpage_ret;
1684 vle_get_logical_extent_head(const struct vle_map_blocks_iter_ctx *ctx,
1685 unsigned int lcn, /* logical cluster number */
1686 unsigned long long *ofs,
1688 unsigned int *flags)
1690 const unsigned int clustersize = 1 << ctx->clusterbits;
1691 const erofs_blk_t mblk = vle_extent_blkaddr(ctx->inode, lcn);
1692 struct page *mpage = *ctx->mpage_ret; /* extent metapage */
1694 struct z_erofs_vle_decompressed_index *di;
1695 unsigned int cluster_type, delta0;
1697 if (mpage->index != mblk) {
1698 kunmap_atomic(*ctx->kaddr_ret);
1702 mpage = erofs_get_meta_page(ctx->sb, mblk, false);
1703 if (IS_ERR(mpage)) {
1704 *ctx->mpage_ret = NULL;
1705 return PTR_ERR(mpage);
1707 *ctx->mpage_ret = mpage;
1708 *ctx->kaddr_ret = kmap_atomic(mpage);
1711 di = *ctx->kaddr_ret + vle_extent_blkoff(ctx->inode, lcn);
1713 cluster_type = vle_cluster_type(di);
1714 switch (cluster_type) {
1715 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1716 delta0 = le16_to_cpu(di->di_u.delta[0]);
1717 if (unlikely(!delta0 || delta0 > lcn)) {
1718 errln("invalid NONHEAD dl0 %u at lcn %u of nid %llu",
1719 delta0, lcn, EROFS_V(ctx->inode)->nid);
1723 return vle_get_logical_extent_head(ctx,
1724 lcn - delta0, ofs, pblk, flags);
1725 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1726 *flags ^= EROFS_MAP_ZIPPED;
1728 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1729 /* clustersize should be a power of two */
1730 *ofs = ((u64)lcn << ctx->clusterbits) +
1731 (le16_to_cpu(di->di_clusterofs) & (clustersize - 1));
1732 *pblk = le32_to_cpu(di->di_u.blkaddr);
1735 errln("unknown cluster type %u at lcn %u of nid %llu",
1736 cluster_type, lcn, EROFS_V(ctx->inode)->nid);
1743 int z_erofs_map_blocks_iter(struct inode *inode,
1744 struct erofs_map_blocks *map,
1748 const struct vle_map_blocks_iter_ctx ctx = {
1751 .clusterbits = EROFS_I_SB(inode)->clusterbits,
1752 .mpage_ret = &map->mpage,
1755 const unsigned int clustersize = 1 << ctx.clusterbits;
1756 /* if both m_(l,p)len are 0, regularize l_lblk, l_lofs, etc... */
1757 const bool initial = !map->m_llen;
1759 /* logicial extent (start, end) offset */
1760 unsigned long long ofs, end;
1764 /* initialize `pblk' to keep gcc from printing foolish warnings */
1765 erofs_blk_t mblk, pblk = 0;
1766 struct page *mpage = map->mpage;
1767 struct z_erofs_vle_decompressed_index *di;
1768 unsigned int cluster_type, logical_cluster_ofs;
1771 trace_z_erofs_map_blocks_iter_enter(inode, map, flags);
1773 /* when trying to read beyond EOF, leave it unmapped */
1774 if (unlikely(map->m_la >= inode->i_size)) {
1775 DBG_BUGON(!initial);
1776 map->m_llen = map->m_la + 1 - inode->i_size;
1777 map->m_la = inode->i_size;
1782 debugln("%s, m_la %llu m_llen %llu --- start", __func__,
1783 map->m_la, map->m_llen);
1785 ofs = map->m_la + map->m_llen;
1787 /* clustersize should be power of two */
1788 lcn = ofs >> ctx.clusterbits;
1789 ofs_rem = ofs & (clustersize - 1);
1791 mblk = vle_extent_blkaddr(inode, lcn);
1793 if (!mpage || mpage->index != mblk) {
1797 mpage = erofs_get_meta_page(ctx.sb, mblk, false);
1798 if (IS_ERR(mpage)) {
1799 err = PTR_ERR(mpage);
1805 DBG_BUGON(!PageUptodate(mpage));
1808 kaddr = kmap_atomic(mpage);
1809 di = kaddr + vle_extent_blkoff(inode, lcn);
1811 debugln("%s, lcn %u mblk %u e_blkoff %u", __func__, lcn,
1812 mblk, vle_extent_blkoff(inode, lcn));
1814 err = vle_decompressed_index_clusterofs(&logical_cluster_ofs,
1820 /* [walking mode] 'map' has been already initialized */
1821 map->m_llen += logical_cluster_ofs;
1825 /* by default, compressed */
1826 map->m_flags |= EROFS_MAP_ZIPPED;
1828 end = ((u64)lcn + 1) * clustersize;
1830 cluster_type = vle_cluster_type(di);
1832 switch (cluster_type) {
1833 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1834 if (ofs_rem >= logical_cluster_ofs)
1835 map->m_flags ^= EROFS_MAP_ZIPPED;
1837 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1838 if (ofs_rem == logical_cluster_ofs) {
1839 pblk = le32_to_cpu(di->di_u.blkaddr);
1843 if (ofs_rem > logical_cluster_ofs) {
1844 ofs = (u64)lcn * clustersize | logical_cluster_ofs;
1845 pblk = le32_to_cpu(di->di_u.blkaddr);
1849 /* logical cluster number should be >= 1 */
1850 if (unlikely(!lcn)) {
1851 errln("invalid logical cluster 0 at nid %llu",
1852 EROFS_V(inode)->nid);
1856 end = ((u64)lcn-- * clustersize) | logical_cluster_ofs;
1858 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1859 /* get the correspoinding first chunk */
1860 err = vle_get_logical_extent_head(&ctx, lcn, &ofs,
1861 &pblk, &map->m_flags);
1864 if (unlikely(err)) {
1871 errln("unknown cluster type %u at offset %llu of nid %llu",
1872 cluster_type, ofs, EROFS_V(inode)->nid);
1879 map->m_llen = end - ofs;
1880 map->m_plen = clustersize;
1881 map->m_pa = blknr_to_addr(pblk);
1882 map->m_flags |= EROFS_MAP_MAPPED;
1884 kunmap_atomic(kaddr);
1887 debugln("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o",
1888 __func__, map->m_la, map->m_pa,
1889 map->m_llen, map->m_plen, map->m_flags);
1891 trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err);
1893 /* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */
1894 DBG_BUGON(err < 0 && err != -ENOMEM);