1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/slab.h>
7 #include "btrfs_inode.h"
10 * Subpage (sectorsize < PAGE_SIZE) support overview:
14 * - Only support 64K page size for now
15 * This is to make metadata handling easier, as 64K page would ensure
16 * all nodesize would fit inside one page, thus we don't need to handle
17 * cases where a tree block crosses several pages.
19 * - Only metadata read-write for now
20 * The data read-write part is in development.
22 * - Metadata can't cross 64K page boundary
23 * btrfs-progs and kernel have done that for a while, thus only ancient
24 * filesystems could have such problem. For such case, do a graceful
30 * Metadata read is fully supported.
31 * Meaning when reading one tree block will only trigger the read for the
32 * needed range, other unrelated range in the same page will not be touched.
34 * Metadata write support is partial.
35 * The writeback is still for the full page, but we will only submit
36 * the dirty extent buffers in the page.
38 * This means, if we have a metadata page like this:
42 * |/////////| |///////////|
43 * \- Tree block A \- Tree block B
45 * Even if we just want to writeback tree block A, we will also writeback
46 * tree block B if it's also dirty.
48 * This may cause extra metadata writeback which results more COW.
53 * Both metadata and data will use a new structure, btrfs_subpage, to
54 * record the status of each sector inside a page. This provides the extra
58 * Since we have multiple tree blocks inside one page, we can't rely on page
59 * locking anymore, or we will have greatly reduced concurrency or even
60 * deadlocks (hold one tree lock while trying to lock another tree lock in
63 * Thus for metadata locking, subpage support relies on io_tree locking only.
64 * This means a slightly higher tree locking latency.
67 bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct page *page)
69 if (fs_info->sectorsize >= PAGE_SIZE)
73 * Only data pages (either through DIO or compression) can have no
74 * mapping. And if page->mapping->host is data inode, it's subpage.
75 * As we have ruled our sectorsize >= PAGE_SIZE case already.
77 if (!page->mapping || !page->mapping->host ||
78 is_data_inode(page->mapping->host))
82 * Now the only remaining case is metadata, which we only go subpage
83 * routine if nodesize < PAGE_SIZE.
85 if (fs_info->nodesize < PAGE_SIZE)
90 void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sectorsize)
95 ASSERT(IS_ALIGNED(PAGE_SIZE, sectorsize));
97 nr_bits = PAGE_SIZE / sectorsize;
98 subpage_info->bitmap_nr_bits = nr_bits;
100 subpage_info->uptodate_offset = cur;
103 subpage_info->error_offset = cur;
106 subpage_info->dirty_offset = cur;
109 subpage_info->writeback_offset = cur;
112 subpage_info->ordered_offset = cur;
115 subpage_info->checked_offset = cur;
118 subpage_info->total_nr_bits = cur;
121 int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
122 struct page *page, enum btrfs_subpage_type type)
124 struct btrfs_subpage *subpage;
127 * We have cases like a dummy extent buffer page, which is not mapped
128 * and doesn't need to be locked.
131 ASSERT(PageLocked(page));
133 /* Either not subpage, or the page already has private attached */
134 if (!btrfs_is_subpage(fs_info, page) || PagePrivate(page))
137 subpage = btrfs_alloc_subpage(fs_info, type);
139 return PTR_ERR(subpage);
141 attach_page_private(page, subpage);
145 void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info,
148 struct btrfs_subpage *subpage;
150 /* Either not subpage, or already detached */
151 if (!btrfs_is_subpage(fs_info, page) || !PagePrivate(page))
154 subpage = detach_page_private(page);
156 btrfs_free_subpage(subpage);
159 struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
160 enum btrfs_subpage_type type)
162 struct btrfs_subpage *ret;
163 unsigned int real_size;
165 ASSERT(fs_info->sectorsize < PAGE_SIZE);
167 real_size = struct_size(ret, bitmaps,
168 BITS_TO_LONGS(fs_info->subpage_info->total_nr_bits));
169 ret = kzalloc(real_size, GFP_NOFS);
171 return ERR_PTR(-ENOMEM);
173 spin_lock_init(&ret->lock);
174 if (type == BTRFS_SUBPAGE_METADATA) {
175 atomic_set(&ret->eb_refs, 0);
177 atomic_set(&ret->readers, 0);
178 atomic_set(&ret->writers, 0);
183 void btrfs_free_subpage(struct btrfs_subpage *subpage)
189 * Increase the eb_refs of current subpage.
191 * This is important for eb allocation, to prevent race with last eb freeing
193 * With the eb_refs increased before the eb inserted into radix tree,
194 * detach_extent_buffer_page() won't detach the page private while we're still
195 * allocating the extent buffer.
197 void btrfs_page_inc_eb_refs(const struct btrfs_fs_info *fs_info,
200 struct btrfs_subpage *subpage;
202 if (!btrfs_is_subpage(fs_info, page))
205 ASSERT(PagePrivate(page) && page->mapping);
206 lockdep_assert_held(&page->mapping->private_lock);
208 subpage = (struct btrfs_subpage *)page->private;
209 atomic_inc(&subpage->eb_refs);
212 void btrfs_page_dec_eb_refs(const struct btrfs_fs_info *fs_info,
215 struct btrfs_subpage *subpage;
217 if (!btrfs_is_subpage(fs_info, page))
220 ASSERT(PagePrivate(page) && page->mapping);
221 lockdep_assert_held(&page->mapping->private_lock);
223 subpage = (struct btrfs_subpage *)page->private;
224 ASSERT(atomic_read(&subpage->eb_refs));
225 atomic_dec(&subpage->eb_refs);
228 static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
229 struct page *page, u64 start, u32 len)
232 ASSERT(PagePrivate(page) && page->private);
233 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
234 IS_ALIGNED(len, fs_info->sectorsize));
236 * The range check only works for mapped page, we can still have
237 * unmapped page like dummy extent buffer pages.
240 ASSERT(page_offset(page) <= start &&
241 start + len <= page_offset(page) + PAGE_SIZE);
244 void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info,
245 struct page *page, u64 start, u32 len)
247 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
248 const int nbits = len >> fs_info->sectorsize_bits;
250 btrfs_subpage_assert(fs_info, page, start, len);
252 atomic_add(nbits, &subpage->readers);
255 void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info,
256 struct page *page, u64 start, u32 len)
258 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
259 const int nbits = len >> fs_info->sectorsize_bits;
263 btrfs_subpage_assert(fs_info, page, start, len);
264 is_data = is_data_inode(page->mapping->host);
265 ASSERT(atomic_read(&subpage->readers) >= nbits);
266 last = atomic_sub_and_test(nbits, &subpage->readers);
269 * For data we need to unlock the page if the last read has finished.
271 * And please don't replace @last with atomic_sub_and_test() call
272 * inside if () condition.
273 * As we want the atomic_sub_and_test() to be always executed.
279 static void btrfs_subpage_clamp_range(struct page *page, u64 *start, u32 *len)
281 u64 orig_start = *start;
284 *start = max_t(u64, page_offset(page), orig_start);
286 * For certain call sites like btrfs_drop_pages(), we may have pages
287 * beyond the target range. In that case, just set @len to 0, subpage
288 * helpers can handle @len == 0 without any problem.
290 if (page_offset(page) >= orig_start + orig_len)
293 *len = min_t(u64, page_offset(page) + PAGE_SIZE,
294 orig_start + orig_len) - *start;
297 void btrfs_subpage_start_writer(const struct btrfs_fs_info *fs_info,
298 struct page *page, u64 start, u32 len)
300 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
301 const int nbits = (len >> fs_info->sectorsize_bits);
304 btrfs_subpage_assert(fs_info, page, start, len);
306 ASSERT(atomic_read(&subpage->readers) == 0);
307 ret = atomic_add_return(nbits, &subpage->writers);
308 ASSERT(ret == nbits);
311 bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_info,
312 struct page *page, u64 start, u32 len)
314 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
315 const int nbits = (len >> fs_info->sectorsize_bits);
317 btrfs_subpage_assert(fs_info, page, start, len);
320 * We have call sites passing @lock_page into
321 * extent_clear_unlock_delalloc() for compression path.
323 * This @locked_page is locked by plain lock_page(), thus its
324 * subpage::writers is 0. Handle them in a special way.
326 if (atomic_read(&subpage->writers) == 0)
329 ASSERT(atomic_read(&subpage->writers) >= nbits);
330 return atomic_sub_and_test(nbits, &subpage->writers);
334 * Lock a page for delalloc page writeback.
336 * Return -EAGAIN if the page is not properly initialized.
337 * Return 0 with the page locked, and writer counter updated.
339 * Even with 0 returned, the page still need extra check to make sure
340 * it's really the correct page, as the caller is using
341 * filemap_get_folios_contig(), which can race with page invalidating.
343 int btrfs_page_start_writer_lock(const struct btrfs_fs_info *fs_info,
344 struct page *page, u64 start, u32 len)
346 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) {
351 if (!PagePrivate(page) || !page->private) {
355 btrfs_subpage_clamp_range(page, &start, &len);
356 btrfs_subpage_start_writer(fs_info, page, start, len);
360 void btrfs_page_end_writer_lock(const struct btrfs_fs_info *fs_info,
361 struct page *page, u64 start, u32 len)
363 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page))
364 return unlock_page(page);
365 btrfs_subpage_clamp_range(page, &start, &len);
366 if (btrfs_subpage_end_and_test_writer(fs_info, page, start, len))
370 static bool bitmap_test_range_all_set(unsigned long *addr, unsigned int start,
373 unsigned int found_zero;
375 found_zero = find_next_zero_bit(addr, start + nbits, start);
376 if (found_zero == start + nbits)
381 static bool bitmap_test_range_all_zero(unsigned long *addr, unsigned int start,
384 unsigned int found_set;
386 found_set = find_next_bit(addr, start + nbits, start);
387 if (found_set == start + nbits)
392 #define subpage_calc_start_bit(fs_info, page, name, start, len) \
394 unsigned int start_bit; \
396 btrfs_subpage_assert(fs_info, page, start, len); \
397 start_bit = offset_in_page(start) >> fs_info->sectorsize_bits; \
398 start_bit += fs_info->subpage_info->name##_offset; \
402 #define subpage_test_bitmap_all_set(fs_info, subpage, name) \
403 bitmap_test_range_all_set(subpage->bitmaps, \
404 fs_info->subpage_info->name##_offset, \
405 fs_info->subpage_info->bitmap_nr_bits)
407 #define subpage_test_bitmap_all_zero(fs_info, subpage, name) \
408 bitmap_test_range_all_zero(subpage->bitmaps, \
409 fs_info->subpage_info->name##_offset, \
410 fs_info->subpage_info->bitmap_nr_bits)
412 void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info,
413 struct page *page, u64 start, u32 len)
415 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
416 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
417 uptodate, start, len);
420 spin_lock_irqsave(&subpage->lock, flags);
421 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
422 if (subpage_test_bitmap_all_set(fs_info, subpage, uptodate))
423 SetPageUptodate(page);
424 spin_unlock_irqrestore(&subpage->lock, flags);
427 void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info,
428 struct page *page, u64 start, u32 len)
430 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
431 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
432 uptodate, start, len);
435 spin_lock_irqsave(&subpage->lock, flags);
436 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
437 ClearPageUptodate(page);
438 spin_unlock_irqrestore(&subpage->lock, flags);
441 void btrfs_subpage_set_error(const struct btrfs_fs_info *fs_info,
442 struct page *page, u64 start, u32 len)
444 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
445 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
449 spin_lock_irqsave(&subpage->lock, flags);
450 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
452 spin_unlock_irqrestore(&subpage->lock, flags);
455 void btrfs_subpage_clear_error(const struct btrfs_fs_info *fs_info,
456 struct page *page, u64 start, u32 len)
458 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
459 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
463 spin_lock_irqsave(&subpage->lock, flags);
464 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
465 if (subpage_test_bitmap_all_zero(fs_info, subpage, error))
466 ClearPageError(page);
467 spin_unlock_irqrestore(&subpage->lock, flags);
470 void btrfs_subpage_set_dirty(const struct btrfs_fs_info *fs_info,
471 struct page *page, u64 start, u32 len)
473 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
474 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
478 spin_lock_irqsave(&subpage->lock, flags);
479 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
480 spin_unlock_irqrestore(&subpage->lock, flags);
481 set_page_dirty(page);
485 * Extra clear_and_test function for subpage dirty bitmap.
487 * Return true if we're the last bits in the dirty_bitmap and clear the
489 * Return false otherwise.
491 * NOTE: Callers should manually clear page dirty for true case, as we have
492 * extra handling for tree blocks.
494 bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
495 struct page *page, u64 start, u32 len)
497 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
498 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
503 spin_lock_irqsave(&subpage->lock, flags);
504 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
505 if (subpage_test_bitmap_all_zero(fs_info, subpage, dirty))
507 spin_unlock_irqrestore(&subpage->lock, flags);
511 void btrfs_subpage_clear_dirty(const struct btrfs_fs_info *fs_info,
512 struct page *page, u64 start, u32 len)
516 last = btrfs_subpage_clear_and_test_dirty(fs_info, page, start, len);
518 clear_page_dirty_for_io(page);
521 void btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info,
522 struct page *page, u64 start, u32 len)
524 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
525 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
526 writeback, start, len);
529 spin_lock_irqsave(&subpage->lock, flags);
530 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
531 set_page_writeback(page);
532 spin_unlock_irqrestore(&subpage->lock, flags);
535 void btrfs_subpage_clear_writeback(const struct btrfs_fs_info *fs_info,
536 struct page *page, u64 start, u32 len)
538 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
539 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
540 writeback, start, len);
543 spin_lock_irqsave(&subpage->lock, flags);
544 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
545 if (subpage_test_bitmap_all_zero(fs_info, subpage, writeback)) {
546 ASSERT(PageWriteback(page));
547 end_page_writeback(page);
549 spin_unlock_irqrestore(&subpage->lock, flags);
552 void btrfs_subpage_set_ordered(const struct btrfs_fs_info *fs_info,
553 struct page *page, u64 start, u32 len)
555 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
556 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
557 ordered, start, len);
560 spin_lock_irqsave(&subpage->lock, flags);
561 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
562 SetPageOrdered(page);
563 spin_unlock_irqrestore(&subpage->lock, flags);
566 void btrfs_subpage_clear_ordered(const struct btrfs_fs_info *fs_info,
567 struct page *page, u64 start, u32 len)
569 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
570 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
571 ordered, start, len);
574 spin_lock_irqsave(&subpage->lock, flags);
575 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
576 if (subpage_test_bitmap_all_zero(fs_info, subpage, ordered))
577 ClearPageOrdered(page);
578 spin_unlock_irqrestore(&subpage->lock, flags);
581 void btrfs_subpage_set_checked(const struct btrfs_fs_info *fs_info,
582 struct page *page, u64 start, u32 len)
584 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
585 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
586 checked, start, len);
589 spin_lock_irqsave(&subpage->lock, flags);
590 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
591 if (subpage_test_bitmap_all_set(fs_info, subpage, checked))
592 SetPageChecked(page);
593 spin_unlock_irqrestore(&subpage->lock, flags);
596 void btrfs_subpage_clear_checked(const struct btrfs_fs_info *fs_info,
597 struct page *page, u64 start, u32 len)
599 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
600 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
601 checked, start, len);
604 spin_lock_irqsave(&subpage->lock, flags);
605 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
606 ClearPageChecked(page);
607 spin_unlock_irqrestore(&subpage->lock, flags);
611 * Unlike set/clear which is dependent on each page status, for test all bits
612 * are tested in the same way.
614 #define IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(name) \
615 bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info, \
616 struct page *page, u64 start, u32 len) \
618 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; \
619 unsigned int start_bit = subpage_calc_start_bit(fs_info, page, \
621 unsigned long flags; \
624 spin_lock_irqsave(&subpage->lock, flags); \
625 ret = bitmap_test_range_all_set(subpage->bitmaps, start_bit, \
626 len >> fs_info->sectorsize_bits); \
627 spin_unlock_irqrestore(&subpage->lock, flags); \
630 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate);
631 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(error);
632 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(dirty);
633 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(writeback);
634 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(ordered);
635 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(checked);
638 * Note that, in selftests (extent-io-tests), we can have empty fs_info passed
639 * in. We only test sectorsize == PAGE_SIZE cases so far, thus we can fall
640 * back to regular sectorsize branch.
642 #define IMPLEMENT_BTRFS_PAGE_OPS(name, set_page_func, clear_page_func, \
644 void btrfs_page_set_##name(const struct btrfs_fs_info *fs_info, \
645 struct page *page, u64 start, u32 len) \
647 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \
648 set_page_func(page); \
651 btrfs_subpage_set_##name(fs_info, page, start, len); \
653 void btrfs_page_clear_##name(const struct btrfs_fs_info *fs_info, \
654 struct page *page, u64 start, u32 len) \
656 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \
657 clear_page_func(page); \
660 btrfs_subpage_clear_##name(fs_info, page, start, len); \
662 bool btrfs_page_test_##name(const struct btrfs_fs_info *fs_info, \
663 struct page *page, u64 start, u32 len) \
665 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) \
666 return test_page_func(page); \
667 return btrfs_subpage_test_##name(fs_info, page, start, len); \
669 void btrfs_page_clamp_set_##name(const struct btrfs_fs_info *fs_info, \
670 struct page *page, u64 start, u32 len) \
672 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \
673 set_page_func(page); \
676 btrfs_subpage_clamp_range(page, &start, &len); \
677 btrfs_subpage_set_##name(fs_info, page, start, len); \
679 void btrfs_page_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
680 struct page *page, u64 start, u32 len) \
682 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \
683 clear_page_func(page); \
686 btrfs_subpage_clamp_range(page, &start, &len); \
687 btrfs_subpage_clear_##name(fs_info, page, start, len); \
689 bool btrfs_page_clamp_test_##name(const struct btrfs_fs_info *fs_info, \
690 struct page *page, u64 start, u32 len) \
692 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) \
693 return test_page_func(page); \
694 btrfs_subpage_clamp_range(page, &start, &len); \
695 return btrfs_subpage_test_##name(fs_info, page, start, len); \
697 IMPLEMENT_BTRFS_PAGE_OPS(uptodate, SetPageUptodate, ClearPageUptodate,
699 IMPLEMENT_BTRFS_PAGE_OPS(error, SetPageError, ClearPageError, PageError);
700 IMPLEMENT_BTRFS_PAGE_OPS(dirty, set_page_dirty, clear_page_dirty_for_io,
702 IMPLEMENT_BTRFS_PAGE_OPS(writeback, set_page_writeback, end_page_writeback,
704 IMPLEMENT_BTRFS_PAGE_OPS(ordered, SetPageOrdered, ClearPageOrdered,
706 IMPLEMENT_BTRFS_PAGE_OPS(checked, SetPageChecked, ClearPageChecked, PageChecked);
709 * Make sure not only the page dirty bit is cleared, but also subpage dirty bit
712 void btrfs_page_assert_not_dirty(const struct btrfs_fs_info *fs_info,
715 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
717 if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
720 ASSERT(!PageDirty(page));
721 if (!btrfs_is_subpage(fs_info, page))
724 ASSERT(PagePrivate(page) && page->private);
725 ASSERT(subpage_test_bitmap_all_zero(fs_info, subpage, dirty));
729 * Handle different locked pages with different page sizes:
731 * - Page locked by plain lock_page()
732 * It should not have any subpage::writers count.
733 * Can be unlocked by unlock_page().
734 * This is the most common locked page for __extent_writepage() called
735 * inside extent_write_cache_pages().
736 * Rarer cases include the @locked_page from extent_write_locked_range().
738 * - Page locked by lock_delalloc_pages()
739 * There is only one caller, all pages except @locked_page for
740 * extent_write_locked_range().
741 * In this case, we have to call subpage helper to handle the case.
743 void btrfs_page_unlock_writer(struct btrfs_fs_info *fs_info, struct page *page,
746 struct btrfs_subpage *subpage;
748 ASSERT(PageLocked(page));
749 /* For non-subpage case, we just unlock the page */
750 if (!btrfs_is_subpage(fs_info, page))
751 return unlock_page(page);
753 ASSERT(PagePrivate(page) && page->private);
754 subpage = (struct btrfs_subpage *)page->private;
757 * For subpage case, there are two types of locked page. With or
758 * without writers number.
760 * Since we own the page lock, no one else could touch subpage::writers
761 * and we are safe to do several atomic operations without spinlock.
763 if (atomic_read(&subpage->writers) == 0)
764 /* No writers, locked by plain lock_page() */
765 return unlock_page(page);
767 /* Have writers, use proper subpage helper to end it */
768 btrfs_page_end_writer_lock(fs_info, page, start, len);