From: Linus Torvalds Date: Wed, 3 Jun 2020 03:16:55 +0000 (-0700) Subject: Merge tag 'erofs-for-5.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang... X-Git-Tag: v5.8-rc1~180 X-Git-Url: http://git.osdn.net/view?p=tomoyo%2Ftomoyo-test1.git;a=commitdiff_plain;h=d6f9469a03d832dcd17041ed67774ffb5f3e73b3 Merge tag 'erofs-for-5.8-rc1' of git://git./linux/kernel/git/xiang/erofs Pull erofs updates from Gao Xiang: "The most interesting part is the new mount api conversion, which is actually a old patch already pending for several cycles. And the others are recent trivial cleanups here. Summary: - Convert to use the new mount apis - Some random cleanup patches" * tag 'erofs-for-5.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs: erofs: suppress false positive last_block warning erofs: convert to use the new mount fs_context api erofs: code cleanup by removing ifdef macro surrounding --- d6f9469a03d832dcd17041ed67774ffb5f3e73b3 diff --cc fs/erofs/data.c index d0542151e8c4,2812645b361e..64b56c7df023 --- a/fs/erofs/data.c +++ b/fs/erofs/data.c @@@ -280,32 -280,42 +280,32 @@@ static int erofs_raw_access_readpage(st return 0; } -static int erofs_raw_access_readpages(struct file *filp, - struct address_space *mapping, - struct list_head *pages, - unsigned int nr_pages) +static void erofs_raw_access_readahead(struct readahead_control *rac) { - erofs_off_t last_block; + erofs_off_t uninitialized_var(last_block); struct bio *bio = NULL; - gfp_t gfp = readahead_gfp_mask(mapping); - struct page *page = list_last_entry(pages, struct page, lru); - - trace_erofs_readpages(mapping->host, page, nr_pages, true); + struct page *page; - for (; nr_pages; --nr_pages) { - page = list_entry(pages->prev, struct page, lru); + trace_erofs_readpages(rac->mapping->host, readahead_index(rac), + readahead_count(rac), true); + while ((page = readahead_page(rac))) { prefetchw(&page->flags); - list_del(&page->lru); - if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) { - bio = erofs_read_raw_page(bio, mapping, page, - &last_block, nr_pages, true); + bio = erofs_read_raw_page(bio, rac->mapping, page, &last_block, + readahead_count(rac), true); - /* all the page errors are ignored when readahead */ - if (IS_ERR(bio)) { - pr_err("%s, readahead error at page %lu of nid %llu\n", - __func__, page->index, - EROFS_I(mapping->host)->nid); + /* all the page errors are ignored when readahead */ + if (IS_ERR(bio)) { + pr_err("%s, readahead error at page %lu of nid %llu\n", + __func__, page->index, + EROFS_I(rac->mapping->host)->nid); - bio = NULL; - } + bio = NULL; } - /* pages could still be locked */ put_page(page); } - DBG_BUGON(!list_empty(pages)); /* the rare case (end in gaps) */ if (bio) diff --cc fs/erofs/zdata.c index 187f93b4900e,5086b1218aac..be50a4d9d273 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@@ -1302,26 -1302,31 +1302,26 @@@ static int z_erofs_readpage(struct fil static bool should_decompress_synchronously(struct erofs_sb_info *sbi, unsigned int nr) { - return nr <= sbi->max_sync_decompress_pages; + return nr <= sbi->ctx.max_sync_decompress_pages; } -static int z_erofs_readpages(struct file *filp, struct address_space *mapping, - struct list_head *pages, unsigned int nr_pages) +static void z_erofs_readahead(struct readahead_control *rac) { - struct inode *const inode = mapping->host; + struct inode *const inode = rac->mapping->host; struct erofs_sb_info *const sbi = EROFS_I_SB(inode); - bool sync = should_decompress_synchronously(sbi, nr_pages); + bool sync = should_decompress_synchronously(sbi, readahead_count(rac)); struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); - gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL); - struct page *head = NULL; + struct page *page, *head = NULL; LIST_HEAD(pagepool); - trace_erofs_readpages(mapping->host, lru_to_page(pages), - nr_pages, false); + trace_erofs_readpages(inode, readahead_index(rac), + readahead_count(rac), false); - f.headoffset = (erofs_off_t)lru_to_page(pages)->index << PAGE_SHIFT; - - for (; nr_pages; --nr_pages) { - struct page *page = lru_to_page(pages); + f.headoffset = readahead_pos(rac); + while ((page = readahead_page(rac))) { prefetchw(&page->flags); - list_del(&page->lru); /* * A pure asynchronous readahead is indicated if