1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
9 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
11 * Removed a lot of unnecessary code and simplified things now that
12 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
14 * Speed up hash, lru, and free list operations. Use gfp() for allocating
15 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
17 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
19 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
22 #include <linux/kernel.h>
23 #include <linux/sched/signal.h>
24 #include <linux/syscalls.h>
26 #include <linux/iomap.h>
28 #include <linux/percpu.h>
29 #include <linux/slab.h>
30 #include <linux/capability.h>
31 #include <linux/blkdev.h>
32 #include <linux/file.h>
33 #include <linux/quotaops.h>
34 #include <linux/highmem.h>
35 #include <linux/export.h>
36 #include <linux/backing-dev.h>
37 #include <linux/writeback.h>
38 #include <linux/hash.h>
39 #include <linux/suspend.h>
40 #include <linux/buffer_head.h>
41 #include <linux/task_io_accounting_ops.h>
42 #include <linux/bio.h>
43 #include <linux/cpu.h>
44 #include <linux/bitops.h>
45 #include <linux/mpage.h>
46 #include <linux/bit_spinlock.h>
47 #include <linux/pagevec.h>
48 #include <linux/sched/mm.h>
49 #include <trace/events/block.h>
50 #include <linux/fscrypt.h>
51 #include <linux/fsverity.h>
55 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
56 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
57 struct writeback_control *wbc);
59 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
61 inline void touch_buffer(struct buffer_head *bh)
63 trace_block_touch_buffer(bh);
64 folio_mark_accessed(bh->b_folio);
66 EXPORT_SYMBOL(touch_buffer);
68 void __lock_buffer(struct buffer_head *bh)
70 wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
72 EXPORT_SYMBOL(__lock_buffer);
74 void unlock_buffer(struct buffer_head *bh)
76 clear_bit_unlock(BH_Lock, &bh->b_state);
77 smp_mb__after_atomic();
78 wake_up_bit(&bh->b_state, BH_Lock);
80 EXPORT_SYMBOL(unlock_buffer);
83 * Returns if the folio has dirty or writeback buffers. If all the buffers
84 * are unlocked and clean then the folio_test_dirty information is stale. If
85 * any of the buffers are locked, it is assumed they are locked for IO.
87 void buffer_check_dirty_writeback(struct folio *folio,
88 bool *dirty, bool *writeback)
90 struct buffer_head *head, *bh;
94 BUG_ON(!folio_test_locked(folio));
96 head = folio_buffers(folio);
100 if (folio_test_writeback(folio))
105 if (buffer_locked(bh))
108 if (buffer_dirty(bh))
111 bh = bh->b_this_page;
112 } while (bh != head);
116 * Block until a buffer comes unlocked. This doesn't stop it
117 * from becoming locked again - you have to lock it yourself
118 * if you want to preserve its state.
120 void __wait_on_buffer(struct buffer_head * bh)
122 wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
124 EXPORT_SYMBOL(__wait_on_buffer);
126 static void buffer_io_error(struct buffer_head *bh, char *msg)
128 if (!test_bit(BH_Quiet, &bh->b_state))
129 printk_ratelimited(KERN_ERR
130 "Buffer I/O error on dev %pg, logical block %llu%s\n",
131 bh->b_bdev, (unsigned long long)bh->b_blocknr, msg);
135 * End-of-IO handler helper function which does not touch the bh after
137 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
138 * a race there is benign: unlock_buffer() only use the bh's address for
139 * hashing after unlocking the buffer, so it doesn't actually touch the bh
142 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
145 set_buffer_uptodate(bh);
147 /* This happens, due to failed read-ahead attempts. */
148 clear_buffer_uptodate(bh);
154 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
157 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
159 __end_buffer_read_notouch(bh, uptodate);
162 EXPORT_SYMBOL(end_buffer_read_sync);
164 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
167 set_buffer_uptodate(bh);
169 buffer_io_error(bh, ", lost sync page write");
170 mark_buffer_write_io_error(bh);
171 clear_buffer_uptodate(bh);
176 EXPORT_SYMBOL(end_buffer_write_sync);
179 * Various filesystems appear to want __find_get_block to be non-blocking.
180 * But it's the page lock which protects the buffers. To get around this,
181 * we get exclusion from try_to_free_buffers with the blockdev mapping's
184 * Hack idea: for the blockdev mapping, private_lock contention
185 * may be quite high. This code could TryLock the page, and if that
186 * succeeds, there is no need to take private_lock.
188 static struct buffer_head *
189 __find_get_block_slow(struct block_device *bdev, sector_t block)
191 struct inode *bd_inode = bdev->bd_inode;
192 struct address_space *bd_mapping = bd_inode->i_mapping;
193 struct buffer_head *ret = NULL;
195 struct buffer_head *bh;
196 struct buffer_head *head;
199 static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
201 index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
202 folio = __filemap_get_folio(bd_mapping, index, FGP_ACCESSED, 0);
206 spin_lock(&bd_mapping->private_lock);
207 head = folio_buffers(folio);
212 if (!buffer_mapped(bh))
214 else if (bh->b_blocknr == block) {
219 bh = bh->b_this_page;
220 } while (bh != head);
222 /* we might be here because some of the buffers on this page are
223 * not mapped. This is due to various races between
224 * file io on the block device and getblk. It gets dealt with
225 * elsewhere, don't buffer_error if we had some unmapped buffers
227 ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
228 if (all_mapped && __ratelimit(&last_warned)) {
229 printk("__find_get_block_slow() failed. block=%llu, "
230 "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
231 "device %pg blocksize: %d\n",
232 (unsigned long long)block,
233 (unsigned long long)bh->b_blocknr,
234 bh->b_state, bh->b_size, bdev,
235 1 << bd_inode->i_blkbits);
238 spin_unlock(&bd_mapping->private_lock);
244 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
247 struct buffer_head *first;
248 struct buffer_head *tmp;
250 int folio_uptodate = 1;
252 BUG_ON(!buffer_async_read(bh));
256 set_buffer_uptodate(bh);
258 clear_buffer_uptodate(bh);
259 buffer_io_error(bh, ", async page read");
260 folio_set_error(folio);
264 * Be _very_ careful from here on. Bad things can happen if
265 * two buffer heads end IO at almost the same time and both
266 * decide that the page is now completely done.
268 first = folio_buffers(folio);
269 spin_lock_irqsave(&first->b_uptodate_lock, flags);
270 clear_buffer_async_read(bh);
274 if (!buffer_uptodate(tmp))
276 if (buffer_async_read(tmp)) {
277 BUG_ON(!buffer_locked(tmp));
280 tmp = tmp->b_this_page;
282 spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
285 * If all of the buffers are uptodate then we can set the page
289 folio_mark_uptodate(folio);
294 spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
298 struct postprocess_bh_ctx {
299 struct work_struct work;
300 struct buffer_head *bh;
303 static void verify_bh(struct work_struct *work)
305 struct postprocess_bh_ctx *ctx =
306 container_of(work, struct postprocess_bh_ctx, work);
307 struct buffer_head *bh = ctx->bh;
310 valid = fsverity_verify_blocks(bh->b_folio, bh->b_size, bh_offset(bh));
311 end_buffer_async_read(bh, valid);
315 static bool need_fsverity(struct buffer_head *bh)
317 struct folio *folio = bh->b_folio;
318 struct inode *inode = folio->mapping->host;
320 return fsverity_active(inode) &&
322 folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
325 static void decrypt_bh(struct work_struct *work)
327 struct postprocess_bh_ctx *ctx =
328 container_of(work, struct postprocess_bh_ctx, work);
329 struct buffer_head *bh = ctx->bh;
332 err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size,
334 if (err == 0 && need_fsverity(bh)) {
336 * We use different work queues for decryption and for verity
337 * because verity may require reading metadata pages that need
338 * decryption, and we shouldn't recurse to the same workqueue.
340 INIT_WORK(&ctx->work, verify_bh);
341 fsverity_enqueue_verify_work(&ctx->work);
344 end_buffer_async_read(bh, err == 0);
349 * I/O completion handler for block_read_full_folio() - pages
350 * which come unlocked at the end of I/O.
352 static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
354 struct inode *inode = bh->b_folio->mapping->host;
355 bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode);
356 bool verify = need_fsverity(bh);
358 /* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */
359 if (uptodate && (decrypt || verify)) {
360 struct postprocess_bh_ctx *ctx =
361 kmalloc(sizeof(*ctx), GFP_ATOMIC);
366 INIT_WORK(&ctx->work, decrypt_bh);
367 fscrypt_enqueue_decrypt_work(&ctx->work);
369 INIT_WORK(&ctx->work, verify_bh);
370 fsverity_enqueue_verify_work(&ctx->work);
376 end_buffer_async_read(bh, uptodate);
380 * Completion handler for block_write_full_page() - pages which are unlocked
381 * during I/O, and which have PageWriteback cleared upon I/O completion.
383 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
386 struct buffer_head *first;
387 struct buffer_head *tmp;
390 BUG_ON(!buffer_async_write(bh));
394 set_buffer_uptodate(bh);
396 buffer_io_error(bh, ", lost async page write");
397 mark_buffer_write_io_error(bh);
398 clear_buffer_uptodate(bh);
399 folio_set_error(folio);
402 first = folio_buffers(folio);
403 spin_lock_irqsave(&first->b_uptodate_lock, flags);
405 clear_buffer_async_write(bh);
407 tmp = bh->b_this_page;
409 if (buffer_async_write(tmp)) {
410 BUG_ON(!buffer_locked(tmp));
413 tmp = tmp->b_this_page;
415 spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
416 folio_end_writeback(folio);
420 spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
423 EXPORT_SYMBOL(end_buffer_async_write);
426 * If a page's buffers are under async readin (end_buffer_async_read
427 * completion) then there is a possibility that another thread of
428 * control could lock one of the buffers after it has completed
429 * but while some of the other buffers have not completed. This
430 * locked buffer would confuse end_buffer_async_read() into not unlocking
431 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
432 * that this buffer is not under async I/O.
434 * The page comes unlocked when it has no locked buffer_async buffers
437 * PageLocked prevents anyone starting new async I/O reads any of
440 * PageWriteback is used to prevent simultaneous writeout of the same
443 * PageLocked prevents anyone from starting writeback of a page which is
444 * under read I/O (PageWriteback is only ever set against a locked page).
446 static void mark_buffer_async_read(struct buffer_head *bh)
448 bh->b_end_io = end_buffer_async_read_io;
449 set_buffer_async_read(bh);
452 static void mark_buffer_async_write_endio(struct buffer_head *bh,
453 bh_end_io_t *handler)
455 bh->b_end_io = handler;
456 set_buffer_async_write(bh);
459 void mark_buffer_async_write(struct buffer_head *bh)
461 mark_buffer_async_write_endio(bh, end_buffer_async_write);
463 EXPORT_SYMBOL(mark_buffer_async_write);
467 * fs/buffer.c contains helper functions for buffer-backed address space's
468 * fsync functions. A common requirement for buffer-based filesystems is
469 * that certain data from the backing blockdev needs to be written out for
470 * a successful fsync(). For example, ext2 indirect blocks need to be
471 * written back and waited upon before fsync() returns.
473 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
474 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
475 * management of a list of dependent buffers at ->i_mapping->private_list.
477 * Locking is a little subtle: try_to_free_buffers() will remove buffers
478 * from their controlling inode's queue when they are being freed. But
479 * try_to_free_buffers() will be operating against the *blockdev* mapping
480 * at the time, not against the S_ISREG file which depends on those buffers.
481 * So the locking for private_list is via the private_lock in the address_space
482 * which backs the buffers. Which is different from the address_space
483 * against which the buffers are listed. So for a particular address_space,
484 * mapping->private_lock does *not* protect mapping->private_list! In fact,
485 * mapping->private_list will always be protected by the backing blockdev's
488 * Which introduces a requirement: all buffers on an address_space's
489 * ->private_list must be from the same address_space: the blockdev's.
491 * address_spaces which do not place buffers at ->private_list via these
492 * utility functions are free to use private_lock and private_list for
493 * whatever they want. The only requirement is that list_empty(private_list)
494 * be true at clear_inode() time.
496 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
497 * filesystems should do that. invalidate_inode_buffers() should just go
498 * BUG_ON(!list_empty).
500 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
501 * take an address_space, not an inode. And it should be called
502 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
505 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
506 * list if it is already on a list. Because if the buffer is on a list,
507 * it *must* already be on the right one. If not, the filesystem is being
508 * silly. This will save a ton of locking. But first we have to ensure
509 * that buffers are taken *off* the old inode's list when they are freed
510 * (presumably in truncate). That requires careful auditing of all
511 * filesystems (do it inside bforget()). It could also be done by bringing
516 * The buffer's backing address_space's private_lock must be held
518 static void __remove_assoc_queue(struct buffer_head *bh)
520 list_del_init(&bh->b_assoc_buffers);
521 WARN_ON(!bh->b_assoc_map);
522 bh->b_assoc_map = NULL;
525 int inode_has_buffers(struct inode *inode)
527 return !list_empty(&inode->i_data.private_list);
531 * osync is designed to support O_SYNC io. It waits synchronously for
532 * all already-submitted IO to complete, but does not queue any new
533 * writes to the disk.
535 * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer
536 * as you dirty the buffers, and then use osync_inode_buffers to wait for
537 * completion. Any other dirty buffers which are not yet queued for
538 * write will not be flushed to disk by the osync.
540 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
542 struct buffer_head *bh;
548 list_for_each_prev(p, list) {
550 if (buffer_locked(bh)) {
554 if (!buffer_uptodate(bh))
566 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
567 * @mapping: the mapping which wants those buffers written
569 * Starts I/O against the buffers at mapping->private_list, and waits upon
572 * Basically, this is a convenience function for fsync().
573 * @mapping is a file or directory which needs those buffers to be written for
574 * a successful fsync().
576 int sync_mapping_buffers(struct address_space *mapping)
578 struct address_space *buffer_mapping = mapping->private_data;
580 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
583 return fsync_buffers_list(&buffer_mapping->private_lock,
584 &mapping->private_list);
586 EXPORT_SYMBOL(sync_mapping_buffers);
589 * generic_buffers_fsync_noflush - generic buffer fsync implementation
590 * for simple filesystems with no inode lock
592 * @file: file to synchronize
593 * @start: start offset in bytes
594 * @end: end offset in bytes (inclusive)
595 * @datasync: only synchronize essential metadata if true
597 * This is a generic implementation of the fsync method for simple
598 * filesystems which track all non-inode metadata in the buffers list
599 * hanging off the address_space structure.
601 int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end,
604 struct inode *inode = file->f_mapping->host;
608 err = file_write_and_wait_range(file, start, end);
612 ret = sync_mapping_buffers(inode->i_mapping);
613 if (!(inode->i_state & I_DIRTY_ALL))
615 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
618 err = sync_inode_metadata(inode, 1);
623 /* check and advance again to catch errors after syncing out buffers */
624 err = file_check_and_advance_wb_err(file);
629 EXPORT_SYMBOL(generic_buffers_fsync_noflush);
632 * generic_buffers_fsync - generic buffer fsync implementation
633 * for simple filesystems with no inode lock
635 * @file: file to synchronize
636 * @start: start offset in bytes
637 * @end: end offset in bytes (inclusive)
638 * @datasync: only synchronize essential metadata if true
640 * This is a generic implementation of the fsync method for simple
641 * filesystems which track all non-inode metadata in the buffers list
642 * hanging off the address_space structure. This also makes sure that
643 * a device cache flush operation is called at the end.
645 int generic_buffers_fsync(struct file *file, loff_t start, loff_t end,
648 struct inode *inode = file->f_mapping->host;
651 ret = generic_buffers_fsync_noflush(file, start, end, datasync);
653 ret = blkdev_issue_flush(inode->i_sb->s_bdev);
656 EXPORT_SYMBOL(generic_buffers_fsync);
659 * Called when we've recently written block `bblock', and it is known that
660 * `bblock' was for a buffer_boundary() buffer. This means that the block at
661 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
662 * dirty, schedule it for IO. So that indirects merge nicely with their data.
664 void write_boundary_block(struct block_device *bdev,
665 sector_t bblock, unsigned blocksize)
667 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
669 if (buffer_dirty(bh))
670 write_dirty_buffer(bh, 0);
675 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
677 struct address_space *mapping = inode->i_mapping;
678 struct address_space *buffer_mapping = bh->b_folio->mapping;
680 mark_buffer_dirty(bh);
681 if (!mapping->private_data) {
682 mapping->private_data = buffer_mapping;
684 BUG_ON(mapping->private_data != buffer_mapping);
686 if (!bh->b_assoc_map) {
687 spin_lock(&buffer_mapping->private_lock);
688 list_move_tail(&bh->b_assoc_buffers,
689 &mapping->private_list);
690 bh->b_assoc_map = mapping;
691 spin_unlock(&buffer_mapping->private_lock);
694 EXPORT_SYMBOL(mark_buffer_dirty_inode);
697 * Add a page to the dirty page list.
699 * It is a sad fact of life that this function is called from several places
700 * deeply under spinlocking. It may not sleep.
702 * If the page has buffers, the uptodate buffers are set dirty, to preserve
703 * dirty-state coherency between the page and the buffers. It the page does
704 * not have buffers then when they are later attached they will all be set
707 * The buffers are dirtied before the page is dirtied. There's a small race
708 * window in which a writepage caller may see the page cleanness but not the
709 * buffer dirtiness. That's fine. If this code were to set the page dirty
710 * before the buffers, a concurrent writepage caller could clear the page dirty
711 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
712 * page on the dirty page list.
714 * We use private_lock to lock against try_to_free_buffers while using the
715 * page's buffer list. Also use this to protect against clean buffers being
716 * added to the page after it was set dirty.
718 * FIXME: may need to call ->reservepage here as well. That's rather up to the
719 * address_space though.
721 bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
723 struct buffer_head *head;
726 spin_lock(&mapping->private_lock);
727 head = folio_buffers(folio);
729 struct buffer_head *bh = head;
732 set_buffer_dirty(bh);
733 bh = bh->b_this_page;
734 } while (bh != head);
737 * Lock out page's memcg migration to keep PageDirty
738 * synchronized with per-memcg dirty page counters.
740 folio_memcg_lock(folio);
741 newly_dirty = !folio_test_set_dirty(folio);
742 spin_unlock(&mapping->private_lock);
745 __folio_mark_dirty(folio, mapping, 1);
747 folio_memcg_unlock(folio);
750 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
754 EXPORT_SYMBOL(block_dirty_folio);
757 * Write out and wait upon a list of buffers.
759 * We have conflicting pressures: we want to make sure that all
760 * initially dirty buffers get waited on, but that any subsequently
761 * dirtied buffers don't. After all, we don't want fsync to last
762 * forever if somebody is actively writing to the file.
764 * Do this in two main stages: first we copy dirty buffers to a
765 * temporary inode list, queueing the writes as we go. Then we clean
766 * up, waiting for those writes to complete.
768 * During this second stage, any subsequent updates to the file may end
769 * up refiling the buffer on the original inode's dirty list again, so
770 * there is a chance we will end up with a buffer queued for write but
771 * not yet completed on that list. So, as a final cleanup we go through
772 * the osync code to catch these locked, dirty buffers without requeuing
773 * any newly dirty buffers for write.
775 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
777 struct buffer_head *bh;
778 struct list_head tmp;
779 struct address_space *mapping;
781 struct blk_plug plug;
783 INIT_LIST_HEAD(&tmp);
784 blk_start_plug(&plug);
787 while (!list_empty(list)) {
788 bh = BH_ENTRY(list->next);
789 mapping = bh->b_assoc_map;
790 __remove_assoc_queue(bh);
791 /* Avoid race with mark_buffer_dirty_inode() which does
792 * a lockless check and we rely on seeing the dirty bit */
794 if (buffer_dirty(bh) || buffer_locked(bh)) {
795 list_add(&bh->b_assoc_buffers, &tmp);
796 bh->b_assoc_map = mapping;
797 if (buffer_dirty(bh)) {
801 * Ensure any pending I/O completes so that
802 * write_dirty_buffer() actually writes the
803 * current contents - it is a noop if I/O is
804 * still in flight on potentially older
807 write_dirty_buffer(bh, REQ_SYNC);
810 * Kick off IO for the previous mapping. Note
811 * that we will not run the very last mapping,
812 * wait_on_buffer() will do that for us
813 * through sync_buffer().
822 blk_finish_plug(&plug);
825 while (!list_empty(&tmp)) {
826 bh = BH_ENTRY(tmp.prev);
828 mapping = bh->b_assoc_map;
829 __remove_assoc_queue(bh);
830 /* Avoid race with mark_buffer_dirty_inode() which does
831 * a lockless check and we rely on seeing the dirty bit */
833 if (buffer_dirty(bh)) {
834 list_add(&bh->b_assoc_buffers,
835 &mapping->private_list);
836 bh->b_assoc_map = mapping;
840 if (!buffer_uptodate(bh))
847 err2 = osync_buffers_list(lock, list);
855 * Invalidate any and all dirty buffers on a given inode. We are
856 * probably unmounting the fs, but that doesn't mean we have already
857 * done a sync(). Just drop the buffers from the inode list.
859 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
860 * assumes that all the buffers are against the blockdev. Not true
863 void invalidate_inode_buffers(struct inode *inode)
865 if (inode_has_buffers(inode)) {
866 struct address_space *mapping = &inode->i_data;
867 struct list_head *list = &mapping->private_list;
868 struct address_space *buffer_mapping = mapping->private_data;
870 spin_lock(&buffer_mapping->private_lock);
871 while (!list_empty(list))
872 __remove_assoc_queue(BH_ENTRY(list->next));
873 spin_unlock(&buffer_mapping->private_lock);
876 EXPORT_SYMBOL(invalidate_inode_buffers);
879 * Remove any clean buffers from the inode's buffer list. This is called
880 * when we're trying to free the inode itself. Those buffers can pin it.
882 * Returns true if all buffers were removed.
884 int remove_inode_buffers(struct inode *inode)
888 if (inode_has_buffers(inode)) {
889 struct address_space *mapping = &inode->i_data;
890 struct list_head *list = &mapping->private_list;
891 struct address_space *buffer_mapping = mapping->private_data;
893 spin_lock(&buffer_mapping->private_lock);
894 while (!list_empty(list)) {
895 struct buffer_head *bh = BH_ENTRY(list->next);
896 if (buffer_dirty(bh)) {
900 __remove_assoc_queue(bh);
902 spin_unlock(&buffer_mapping->private_lock);
908 * Create the appropriate buffers when given a folio for data area and
909 * the size of each buffer.. Use the bh->b_this_page linked list to
910 * follow the buffers created. Return NULL if unable to create more
913 * The retry flag is used to differentiate async IO (paging, swapping)
914 * which may not fail from ordinary buffer allocations.
916 struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
919 struct buffer_head *bh, *head;
920 gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT;
922 struct mem_cgroup *memcg, *old_memcg;
927 /* The folio lock pins the memcg */
928 memcg = folio_memcg(folio);
929 old_memcg = set_active_memcg(memcg);
932 offset = folio_size(folio);
933 while ((offset -= size) >= 0) {
934 bh = alloc_buffer_head(gfp);
938 bh->b_this_page = head;
944 /* Link the buffer to its folio */
945 folio_set_bh(bh, folio, offset);
948 set_active_memcg(old_memcg);
951 * In case anything failed, we just free everything we got.
957 head = head->b_this_page;
958 free_buffer_head(bh);
964 EXPORT_SYMBOL_GPL(folio_alloc_buffers);
966 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
969 return folio_alloc_buffers(page_folio(page), size, retry);
971 EXPORT_SYMBOL_GPL(alloc_page_buffers);
973 static inline void link_dev_buffers(struct folio *folio,
974 struct buffer_head *head)
976 struct buffer_head *bh, *tail;
981 bh = bh->b_this_page;
983 tail->b_this_page = head;
984 folio_attach_private(folio, head);
987 static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
989 sector_t retval = ~((sector_t)0);
990 loff_t sz = bdev_nr_bytes(bdev);
993 unsigned int sizebits = blksize_bits(size);
994 retval = (sz >> sizebits);
1000 * Initialise the state of a blockdev folio's buffers.
1002 static sector_t folio_init_buffers(struct folio *folio,
1003 struct block_device *bdev, sector_t block, int size)
1005 struct buffer_head *head = folio_buffers(folio);
1006 struct buffer_head *bh = head;
1007 bool uptodate = folio_test_uptodate(folio);
1008 sector_t end_block = blkdev_max_block(bdev, size);
1011 if (!buffer_mapped(bh)) {
1012 bh->b_end_io = NULL;
1013 bh->b_private = NULL;
1015 bh->b_blocknr = block;
1017 set_buffer_uptodate(bh);
1018 if (block < end_block)
1019 set_buffer_mapped(bh);
1022 bh = bh->b_this_page;
1023 } while (bh != head);
1026 * Caller needs to validate requested block against end of device.
1032 * Create the page-cache page that contains the requested block.
1034 * This is used purely for blockdev mappings.
1037 grow_dev_page(struct block_device *bdev, sector_t block,
1038 pgoff_t index, int size, int sizebits, gfp_t gfp)
1040 struct inode *inode = bdev->bd_inode;
1041 struct folio *folio;
1042 struct buffer_head *bh;
1047 gfp_mask = mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS) | gfp;
1050 * XXX: __getblk_slow() can not really deal with failure and
1051 * will endlessly loop on improvised global reclaim. Prefer
1052 * looping in the allocator rather than here, at least that
1053 * code knows what it's doing.
1055 gfp_mask |= __GFP_NOFAIL;
1057 folio = __filemap_get_folio(inode->i_mapping, index,
1058 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp_mask);
1060 bh = folio_buffers(folio);
1062 if (bh->b_size == size) {
1063 end_block = folio_init_buffers(folio, bdev,
1064 (sector_t)index << sizebits, size);
1067 if (!try_to_free_buffers(folio))
1071 bh = folio_alloc_buffers(folio, size, true);
1074 * Link the folio to the buffers and initialise them. Take the
1075 * lock to be atomic wrt __find_get_block(), which does not
1076 * run under the folio lock.
1078 spin_lock(&inode->i_mapping->private_lock);
1079 link_dev_buffers(folio, bh);
1080 end_block = folio_init_buffers(folio, bdev,
1081 (sector_t)index << sizebits, size);
1082 spin_unlock(&inode->i_mapping->private_lock);
1084 ret = (block < end_block) ? 1 : -ENXIO;
1086 folio_unlock(folio);
1092 * Create buffers for the specified block device block's page. If
1093 * that page was dirty, the buffers are set dirty also.
1096 grow_buffers(struct block_device *bdev, sector_t block, int size, gfp_t gfp)
1101 sizebits = PAGE_SHIFT - __ffs(size);
1102 index = block >> sizebits;
1105 * Check for a block which wants to lie outside our maximum possible
1106 * pagecache index. (this comparison is done using sector_t types).
1108 if (unlikely(index != block >> sizebits)) {
1109 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1111 __func__, (unsigned long long)block,
1116 /* Create a page with the proper size buffers.. */
1117 return grow_dev_page(bdev, block, index, size, sizebits, gfp);
1120 static struct buffer_head *
1121 __getblk_slow(struct block_device *bdev, sector_t block,
1122 unsigned size, gfp_t gfp)
1124 /* Size must be multiple of hard sectorsize */
1125 if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1126 (size < 512 || size > PAGE_SIZE))) {
1127 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1129 printk(KERN_ERR "logical block size: %d\n",
1130 bdev_logical_block_size(bdev));
1137 struct buffer_head *bh;
1140 bh = __find_get_block(bdev, block, size);
1144 ret = grow_buffers(bdev, block, size, gfp);
1151 * The relationship between dirty buffers and dirty pages:
1153 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1154 * the page is tagged dirty in the page cache.
1156 * At all times, the dirtiness of the buffers represents the dirtiness of
1157 * subsections of the page. If the page has buffers, the page dirty bit is
1158 * merely a hint about the true dirty state.
1160 * When a page is set dirty in its entirety, all its buffers are marked dirty
1161 * (if the page has buffers).
1163 * When a buffer is marked dirty, its page is dirtied, but the page's other
1166 * Also. When blockdev buffers are explicitly read with bread(), they
1167 * individually become uptodate. But their backing page remains not
1168 * uptodate - even if all of its buffers are uptodate. A subsequent
1169 * block_read_full_folio() against that folio will discover all the uptodate
1170 * buffers, will set the folio uptodate and will perform no I/O.
1174 * mark_buffer_dirty - mark a buffer_head as needing writeout
1175 * @bh: the buffer_head to mark dirty
1177 * mark_buffer_dirty() will set the dirty bit against the buffer, then set
1178 * its backing page dirty, then tag the page as dirty in the page cache
1179 * and then attach the address_space's inode to its superblock's dirty
1182 * mark_buffer_dirty() is atomic. It takes bh->b_folio->mapping->private_lock,
1183 * i_pages lock and mapping->host->i_lock.
1185 void mark_buffer_dirty(struct buffer_head *bh)
1187 WARN_ON_ONCE(!buffer_uptodate(bh));
1189 trace_block_dirty_buffer(bh);
1192 * Very *carefully* optimize the it-is-already-dirty case.
1194 * Don't let the final "is it dirty" escape to before we
1195 * perhaps modified the buffer.
1197 if (buffer_dirty(bh)) {
1199 if (buffer_dirty(bh))
1203 if (!test_set_buffer_dirty(bh)) {
1204 struct folio *folio = bh->b_folio;
1205 struct address_space *mapping = NULL;
1207 folio_memcg_lock(folio);
1208 if (!folio_test_set_dirty(folio)) {
1209 mapping = folio->mapping;
1211 __folio_mark_dirty(folio, mapping, 0);
1213 folio_memcg_unlock(folio);
1215 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1218 EXPORT_SYMBOL(mark_buffer_dirty);
1220 void mark_buffer_write_io_error(struct buffer_head *bh)
1222 struct super_block *sb;
1224 set_buffer_write_io_error(bh);
1225 /* FIXME: do we need to set this in both places? */
1226 if (bh->b_folio && bh->b_folio->mapping)
1227 mapping_set_error(bh->b_folio->mapping, -EIO);
1228 if (bh->b_assoc_map)
1229 mapping_set_error(bh->b_assoc_map, -EIO);
1231 sb = READ_ONCE(bh->b_bdev->bd_super);
1233 errseq_set(&sb->s_wb_err, -EIO);
1236 EXPORT_SYMBOL(mark_buffer_write_io_error);
1239 * Decrement a buffer_head's reference count. If all buffers against a page
1240 * have zero reference count, are clean and unlocked, and if the page is clean
1241 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1242 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1243 * a page but it ends up not being freed, and buffers may later be reattached).
1245 void __brelse(struct buffer_head * buf)
1247 if (atomic_read(&buf->b_count)) {
1251 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1253 EXPORT_SYMBOL(__brelse);
1256 * bforget() is like brelse(), except it discards any
1257 * potentially dirty data.
1259 void __bforget(struct buffer_head *bh)
1261 clear_buffer_dirty(bh);
1262 if (bh->b_assoc_map) {
1263 struct address_space *buffer_mapping = bh->b_folio->mapping;
1265 spin_lock(&buffer_mapping->private_lock);
1266 list_del_init(&bh->b_assoc_buffers);
1267 bh->b_assoc_map = NULL;
1268 spin_unlock(&buffer_mapping->private_lock);
1272 EXPORT_SYMBOL(__bforget);
1274 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1277 if (buffer_uptodate(bh)) {
1282 bh->b_end_io = end_buffer_read_sync;
1283 submit_bh(REQ_OP_READ, bh);
1285 if (buffer_uptodate(bh))
1293 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1294 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1295 * refcount elevated by one when they're in an LRU. A buffer can only appear
1296 * once in a particular CPU's LRU. A single buffer can be present in multiple
1297 * CPU's LRUs at the same time.
1299 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1300 * sb_find_get_block().
1302 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1303 * a local interrupt disable for that.
1306 #define BH_LRU_SIZE 16
1309 struct buffer_head *bhs[BH_LRU_SIZE];
1312 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1315 #define bh_lru_lock() local_irq_disable()
1316 #define bh_lru_unlock() local_irq_enable()
1318 #define bh_lru_lock() preempt_disable()
1319 #define bh_lru_unlock() preempt_enable()
1322 static inline void check_irqs_on(void)
1324 #ifdef irqs_disabled
1325 BUG_ON(irqs_disabled());
1330 * Install a buffer_head into this cpu's LRU. If not already in the LRU, it is
1331 * inserted at the front, and the buffer_head at the back if any is evicted.
1332 * Or, if already in the LRU it is moved to the front.
1334 static void bh_lru_install(struct buffer_head *bh)
1336 struct buffer_head *evictee = bh;
1344 * the refcount of buffer_head in bh_lru prevents dropping the
1345 * attached page(i.e., try_to_free_buffers) so it could cause
1346 * failing page migration.
1347 * Skip putting upcoming bh into bh_lru until migration is done.
1349 if (lru_cache_disabled()) {
1354 b = this_cpu_ptr(&bh_lrus);
1355 for (i = 0; i < BH_LRU_SIZE; i++) {
1356 swap(evictee, b->bhs[i]);
1357 if (evictee == bh) {
1369 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1371 static struct buffer_head *
1372 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1374 struct buffer_head *ret = NULL;
1379 for (i = 0; i < BH_LRU_SIZE; i++) {
1380 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1382 if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
1383 bh->b_size == size) {
1386 __this_cpu_write(bh_lrus.bhs[i],
1387 __this_cpu_read(bh_lrus.bhs[i - 1]));
1390 __this_cpu_write(bh_lrus.bhs[0], bh);
1402 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1403 * it in the LRU and mark it as accessed. If it is not present then return
1406 struct buffer_head *
1407 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1409 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1412 /* __find_get_block_slow will mark the page accessed */
1413 bh = __find_get_block_slow(bdev, block);
1421 EXPORT_SYMBOL(__find_get_block);
1424 * __getblk_gfp() will locate (and, if necessary, create) the buffer_head
1425 * which corresponds to the passed block_device, block and size. The
1426 * returned buffer has its reference count incremented.
1428 * __getblk_gfp() will lock up the machine if grow_dev_page's
1429 * try_to_free_buffers() attempt is failing. FIXME, perhaps?
1431 struct buffer_head *
1432 __getblk_gfp(struct block_device *bdev, sector_t block,
1433 unsigned size, gfp_t gfp)
1435 struct buffer_head *bh = __find_get_block(bdev, block, size);
1439 bh = __getblk_slow(bdev, block, size, gfp);
1442 EXPORT_SYMBOL(__getblk_gfp);
1445 * Do async read-ahead on a buffer..
1447 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1449 struct buffer_head *bh = __getblk(bdev, block, size);
1451 bh_readahead(bh, REQ_RAHEAD);
1455 EXPORT_SYMBOL(__breadahead);
1458 * __bread_gfp() - reads a specified block and returns the bh
1459 * @bdev: the block_device to read from
1460 * @block: number of block
1461 * @size: size (in bytes) to read
1462 * @gfp: page allocation flag
1464 * Reads a specified block, and returns buffer head that contains it.
1465 * The page cache can be allocated from non-movable area
1466 * not to prevent page migration if you set gfp to zero.
1467 * It returns NULL if the block was unreadable.
1469 struct buffer_head *
1470 __bread_gfp(struct block_device *bdev, sector_t block,
1471 unsigned size, gfp_t gfp)
1473 struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
1475 if (likely(bh) && !buffer_uptodate(bh))
1476 bh = __bread_slow(bh);
1479 EXPORT_SYMBOL(__bread_gfp);
1481 static void __invalidate_bh_lrus(struct bh_lru *b)
1485 for (i = 0; i < BH_LRU_SIZE; i++) {
1491 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1492 * This doesn't race because it runs in each cpu either in irq
1493 * or with preempt disabled.
1495 static void invalidate_bh_lru(void *arg)
1497 struct bh_lru *b = &get_cpu_var(bh_lrus);
1499 __invalidate_bh_lrus(b);
1500 put_cpu_var(bh_lrus);
1503 bool has_bh_in_lru(int cpu, void *dummy)
1505 struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
1508 for (i = 0; i < BH_LRU_SIZE; i++) {
1516 void invalidate_bh_lrus(void)
1518 on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
1520 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1523 * It's called from workqueue context so we need a bh_lru_lock to close
1524 * the race with preemption/irq.
1526 void invalidate_bh_lrus_cpu(void)
1531 b = this_cpu_ptr(&bh_lrus);
1532 __invalidate_bh_lrus(b);
1536 void set_bh_page(struct buffer_head *bh,
1537 struct page *page, unsigned long offset)
1540 BUG_ON(offset >= PAGE_SIZE);
1541 if (PageHighMem(page))
1543 * This catches illegal uses and preserves the offset:
1545 bh->b_data = (char *)(0 + offset);
1547 bh->b_data = page_address(page) + offset;
1549 EXPORT_SYMBOL(set_bh_page);
1551 void folio_set_bh(struct buffer_head *bh, struct folio *folio,
1552 unsigned long offset)
1554 bh->b_folio = folio;
1555 BUG_ON(offset >= folio_size(folio));
1556 if (folio_test_highmem(folio))
1558 * This catches illegal uses and preserves the offset:
1560 bh->b_data = (char *)(0 + offset);
1562 bh->b_data = folio_address(folio) + offset;
1564 EXPORT_SYMBOL(folio_set_bh);
1567 * Called when truncating a buffer on a page completely.
1570 /* Bits that are cleared during an invalidate */
1571 #define BUFFER_FLAGS_DISCARD \
1572 (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
1573 1 << BH_Delay | 1 << BH_Unwritten)
1575 static void discard_buffer(struct buffer_head * bh)
1577 unsigned long b_state;
1580 clear_buffer_dirty(bh);
1582 b_state = READ_ONCE(bh->b_state);
1584 } while (!try_cmpxchg(&bh->b_state, &b_state,
1585 b_state & ~BUFFER_FLAGS_DISCARD));
1590 * block_invalidate_folio - Invalidate part or all of a buffer-backed folio.
1591 * @folio: The folio which is affected.
1592 * @offset: start of the range to invalidate
1593 * @length: length of the range to invalidate
1595 * block_invalidate_folio() is called when all or part of the folio has been
1596 * invalidated by a truncate operation.
1598 * block_invalidate_folio() does not have to release all buffers, but it must
1599 * ensure that no dirty buffer is left outside @offset and that no I/O
1600 * is underway against any of the blocks which are outside the truncation
1601 * point. Because the caller is about to free (and possibly reuse) those
1604 void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)
1606 struct buffer_head *head, *bh, *next;
1607 size_t curr_off = 0;
1608 size_t stop = length + offset;
1610 BUG_ON(!folio_test_locked(folio));
1613 * Check for overflow
1615 BUG_ON(stop > folio_size(folio) || stop < length);
1617 head = folio_buffers(folio);
1623 size_t next_off = curr_off + bh->b_size;
1624 next = bh->b_this_page;
1627 * Are we still fully in range ?
1629 if (next_off > stop)
1633 * is this block fully invalidated?
1635 if (offset <= curr_off)
1637 curr_off = next_off;
1639 } while (bh != head);
1642 * We release buffers only if the entire folio is being invalidated.
1643 * The get_block cached value has been unconditionally invalidated,
1644 * so real IO is not possible anymore.
1646 if (length == folio_size(folio))
1647 filemap_release_folio(folio, 0);
1651 EXPORT_SYMBOL(block_invalidate_folio);
1654 * We attach and possibly dirty the buffers atomically wrt
1655 * block_dirty_folio() via private_lock. try_to_free_buffers
1656 * is already excluded via the folio lock.
1658 void folio_create_empty_buffers(struct folio *folio, unsigned long blocksize,
1659 unsigned long b_state)
1661 struct buffer_head *bh, *head, *tail;
1663 head = folio_alloc_buffers(folio, blocksize, true);
1666 bh->b_state |= b_state;
1668 bh = bh->b_this_page;
1670 tail->b_this_page = head;
1672 spin_lock(&folio->mapping->private_lock);
1673 if (folio_test_uptodate(folio) || folio_test_dirty(folio)) {
1676 if (folio_test_dirty(folio))
1677 set_buffer_dirty(bh);
1678 if (folio_test_uptodate(folio))
1679 set_buffer_uptodate(bh);
1680 bh = bh->b_this_page;
1681 } while (bh != head);
1683 folio_attach_private(folio, head);
1684 spin_unlock(&folio->mapping->private_lock);
1686 EXPORT_SYMBOL(folio_create_empty_buffers);
1688 void create_empty_buffers(struct page *page,
1689 unsigned long blocksize, unsigned long b_state)
1691 folio_create_empty_buffers(page_folio(page), blocksize, b_state);
1693 EXPORT_SYMBOL(create_empty_buffers);
1696 * clean_bdev_aliases: clean a range of buffers in block device
1697 * @bdev: Block device to clean buffers in
1698 * @block: Start of a range of blocks to clean
1699 * @len: Number of blocks to clean
1701 * We are taking a range of blocks for data and we don't want writeback of any
1702 * buffer-cache aliases starting from return from this function and until the
1703 * moment when something will explicitly mark the buffer dirty (hopefully that
1704 * will not happen until we will free that block ;-) We don't even need to mark
1705 * it not-uptodate - nobody can expect anything from a newly allocated buffer
1706 * anyway. We used to use unmap_buffer() for such invalidation, but that was
1707 * wrong. We definitely don't want to mark the alias unmapped, for example - it
1708 * would confuse anyone who might pick it with bread() afterwards...
1710 * Also.. Note that bforget() doesn't lock the buffer. So there can be
1711 * writeout I/O going on against recently-freed buffers. We don't wait on that
1712 * I/O in bforget() - it's more efficient to wait on the I/O only if we really
1713 * need to. That happens here.
1715 void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
1717 struct inode *bd_inode = bdev->bd_inode;
1718 struct address_space *bd_mapping = bd_inode->i_mapping;
1719 struct folio_batch fbatch;
1720 pgoff_t index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
1723 struct buffer_head *bh;
1724 struct buffer_head *head;
1726 end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits);
1727 folio_batch_init(&fbatch);
1728 while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) {
1729 count = folio_batch_count(&fbatch);
1730 for (i = 0; i < count; i++) {
1731 struct folio *folio = fbatch.folios[i];
1733 if (!folio_buffers(folio))
1736 * We use folio lock instead of bd_mapping->private_lock
1737 * to pin buffers here since we can afford to sleep and
1738 * it scales better than a global spinlock lock.
1741 /* Recheck when the folio is locked which pins bhs */
1742 head = folio_buffers(folio);
1747 if (!buffer_mapped(bh) || (bh->b_blocknr < block))
1749 if (bh->b_blocknr >= block + len)
1751 clear_buffer_dirty(bh);
1753 clear_buffer_req(bh);
1755 bh = bh->b_this_page;
1756 } while (bh != head);
1758 folio_unlock(folio);
1760 folio_batch_release(&fbatch);
1762 /* End of range already reached? */
1763 if (index > end || !index)
1767 EXPORT_SYMBOL(clean_bdev_aliases);
1770 * Size is a power-of-two in the range 512..PAGE_SIZE,
1771 * and the case we care about most is PAGE_SIZE.
1773 * So this *could* possibly be written with those
1774 * constraints in mind (relevant mostly if some
1775 * architecture has a slow bit-scan instruction)
1777 static inline int block_size_bits(unsigned int blocksize)
1779 return ilog2(blocksize);
1782 static struct buffer_head *folio_create_buffers(struct folio *folio,
1783 struct inode *inode,
1784 unsigned int b_state)
1786 BUG_ON(!folio_test_locked(folio));
1788 if (!folio_buffers(folio))
1789 folio_create_empty_buffers(folio,
1790 1 << READ_ONCE(inode->i_blkbits),
1792 return folio_buffers(folio);
1796 * NOTE! All mapped/uptodate combinations are valid:
1798 * Mapped Uptodate Meaning
1800 * No No "unknown" - must do get_block()
1801 * No Yes "hole" - zero-filled
1802 * Yes No "allocated" - allocated on disk, not read in
1803 * Yes Yes "valid" - allocated and up-to-date in memory.
1805 * "Dirty" is valid only with the last case (mapped+uptodate).
1809 * While block_write_full_page is writing back the dirty buffers under
1810 * the page lock, whoever dirtied the buffers may decide to clean them
1811 * again at any time. We handle that by only looking at the buffer
1812 * state inside lock_buffer().
1814 * If block_write_full_page() is called for regular writeback
1815 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1816 * locked buffer. This only can happen if someone has written the buffer
1817 * directly, with submit_bh(). At the address_space level PageWriteback
1818 * prevents this contention from occurring.
1820 * If block_write_full_page() is called with wbc->sync_mode ==
1821 * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
1822 * causes the writes to be flagged as synchronous writes.
1824 int __block_write_full_folio(struct inode *inode, struct folio *folio,
1825 get_block_t *get_block, struct writeback_control *wbc,
1826 bh_end_io_t *handler)
1830 sector_t last_block;
1831 struct buffer_head *bh, *head;
1832 unsigned int blocksize, bbits;
1833 int nr_underway = 0;
1834 blk_opf_t write_flags = wbc_to_write_flags(wbc);
1836 head = folio_create_buffers(folio, inode,
1837 (1 << BH_Dirty) | (1 << BH_Uptodate));
1840 * Be very careful. We have no exclusion from block_dirty_folio
1841 * here, and the (potentially unmapped) buffers may become dirty at
1842 * any time. If a buffer becomes dirty here after we've inspected it
1843 * then we just miss that fact, and the folio stays dirty.
1845 * Buffers outside i_size may be dirtied by block_dirty_folio;
1846 * handle that here by just cleaning them.
1850 blocksize = bh->b_size;
1851 bbits = block_size_bits(blocksize);
1853 block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
1854 last_block = (i_size_read(inode) - 1) >> bbits;
1857 * Get all the dirty buffers mapped to disk addresses and
1858 * handle any aliases from the underlying blockdev's mapping.
1861 if (block > last_block) {
1863 * mapped buffers outside i_size will occur, because
1864 * this folio can be outside i_size when there is a
1865 * truncate in progress.
1868 * The buffer was zeroed by block_write_full_page()
1870 clear_buffer_dirty(bh);
1871 set_buffer_uptodate(bh);
1872 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1874 WARN_ON(bh->b_size != blocksize);
1875 err = get_block(inode, block, bh, 1);
1878 clear_buffer_delay(bh);
1879 if (buffer_new(bh)) {
1880 /* blockdev mappings never come here */
1881 clear_buffer_new(bh);
1882 clean_bdev_bh_alias(bh);
1885 bh = bh->b_this_page;
1887 } while (bh != head);
1890 if (!buffer_mapped(bh))
1893 * If it's a fully non-blocking write attempt and we cannot
1894 * lock the buffer then redirty the folio. Note that this can
1895 * potentially cause a busy-wait loop from writeback threads
1896 * and kswapd activity, but those code paths have their own
1897 * higher-level throttling.
1899 if (wbc->sync_mode != WB_SYNC_NONE) {
1901 } else if (!trylock_buffer(bh)) {
1902 folio_redirty_for_writepage(wbc, folio);
1905 if (test_clear_buffer_dirty(bh)) {
1906 mark_buffer_async_write_endio(bh, handler);
1910 } while ((bh = bh->b_this_page) != head);
1913 * The folio and its buffers are protected by the writeback flag,
1914 * so we can drop the bh refcounts early.
1916 BUG_ON(folio_test_writeback(folio));
1917 folio_start_writeback(folio);
1920 struct buffer_head *next = bh->b_this_page;
1921 if (buffer_async_write(bh)) {
1922 submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc);
1926 } while (bh != head);
1927 folio_unlock(folio);
1931 if (nr_underway == 0) {
1933 * The folio was marked dirty, but the buffers were
1934 * clean. Someone wrote them back by hand with
1935 * write_dirty_buffer/submit_bh. A rare case.
1937 folio_end_writeback(folio);
1940 * The folio and buffer_heads can be released at any time from
1948 * ENOSPC, or some other error. We may already have added some
1949 * blocks to the file, so we need to write these out to avoid
1950 * exposing stale data.
1951 * The folio is currently locked and not marked for writeback
1954 /* Recovery: lock and submit the mapped buffers */
1956 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1957 !buffer_delay(bh)) {
1959 mark_buffer_async_write_endio(bh, handler);
1962 * The buffer may have been set dirty during
1963 * attachment to a dirty folio.
1965 clear_buffer_dirty(bh);
1967 } while ((bh = bh->b_this_page) != head);
1968 folio_set_error(folio);
1969 BUG_ON(folio_test_writeback(folio));
1970 mapping_set_error(folio->mapping, err);
1971 folio_start_writeback(folio);
1973 struct buffer_head *next = bh->b_this_page;
1974 if (buffer_async_write(bh)) {
1975 clear_buffer_dirty(bh);
1976 submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc);
1980 } while (bh != head);
1981 folio_unlock(folio);
1984 EXPORT_SYMBOL(__block_write_full_folio);
1987 * If a folio has any new buffers, zero them out here, and mark them uptodate
1988 * and dirty so they'll be written out (in order to prevent uninitialised
1989 * block data from leaking). And clear the new bit.
1991 void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to)
1993 size_t block_start, block_end;
1994 struct buffer_head *head, *bh;
1996 BUG_ON(!folio_test_locked(folio));
1997 head = folio_buffers(folio);
2004 block_end = block_start + bh->b_size;
2006 if (buffer_new(bh)) {
2007 if (block_end > from && block_start < to) {
2008 if (!folio_test_uptodate(folio)) {
2011 start = max(from, block_start);
2012 xend = min(to, block_end);
2014 folio_zero_segment(folio, start, xend);
2015 set_buffer_uptodate(bh);
2018 clear_buffer_new(bh);
2019 mark_buffer_dirty(bh);
2023 block_start = block_end;
2024 bh = bh->b_this_page;
2025 } while (bh != head);
2027 EXPORT_SYMBOL(folio_zero_new_buffers);
2030 iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
2031 const struct iomap *iomap)
2033 loff_t offset = block << inode->i_blkbits;
2035 bh->b_bdev = iomap->bdev;
2038 * Block points to offset in file we need to map, iomap contains
2039 * the offset at which the map starts. If the map ends before the
2040 * current block, then do not map the buffer and let the caller
2043 BUG_ON(offset >= iomap->offset + iomap->length);
2045 switch (iomap->type) {
2048 * If the buffer is not up to date or beyond the current EOF,
2049 * we need to mark it as new to ensure sub-block zeroing is
2050 * executed if necessary.
2052 if (!buffer_uptodate(bh) ||
2053 (offset >= i_size_read(inode)))
2056 case IOMAP_DELALLOC:
2057 if (!buffer_uptodate(bh) ||
2058 (offset >= i_size_read(inode)))
2060 set_buffer_uptodate(bh);
2061 set_buffer_mapped(bh);
2062 set_buffer_delay(bh);
2064 case IOMAP_UNWRITTEN:
2066 * For unwritten regions, we always need to ensure that regions
2067 * in the block we are not writing to are zeroed. Mark the
2068 * buffer as new to ensure this.
2071 set_buffer_unwritten(bh);
2074 if ((iomap->flags & IOMAP_F_NEW) ||
2075 offset >= i_size_read(inode))
2077 bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
2079 set_buffer_mapped(bh);
2084 int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
2085 get_block_t *get_block, const struct iomap *iomap)
2087 unsigned from = pos & (PAGE_SIZE - 1);
2088 unsigned to = from + len;
2089 struct inode *inode = folio->mapping->host;
2090 unsigned block_start, block_end;
2093 unsigned blocksize, bbits;
2094 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
2096 BUG_ON(!folio_test_locked(folio));
2097 BUG_ON(from > PAGE_SIZE);
2098 BUG_ON(to > PAGE_SIZE);
2101 head = folio_create_buffers(folio, inode, 0);
2102 blocksize = head->b_size;
2103 bbits = block_size_bits(blocksize);
2105 block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
2107 for(bh = head, block_start = 0; bh != head || !block_start;
2108 block++, block_start=block_end, bh = bh->b_this_page) {
2109 block_end = block_start + blocksize;
2110 if (block_end <= from || block_start >= to) {
2111 if (folio_test_uptodate(folio)) {
2112 if (!buffer_uptodate(bh))
2113 set_buffer_uptodate(bh);
2118 clear_buffer_new(bh);
2119 if (!buffer_mapped(bh)) {
2120 WARN_ON(bh->b_size != blocksize);
2122 err = get_block(inode, block, bh, 1);
2126 iomap_to_bh(inode, block, bh, iomap);
2129 if (buffer_new(bh)) {
2130 clean_bdev_bh_alias(bh);
2131 if (folio_test_uptodate(folio)) {
2132 clear_buffer_new(bh);
2133 set_buffer_uptodate(bh);
2134 mark_buffer_dirty(bh);
2137 if (block_end > to || block_start < from)
2138 folio_zero_segments(folio,
2144 if (folio_test_uptodate(folio)) {
2145 if (!buffer_uptodate(bh))
2146 set_buffer_uptodate(bh);
2149 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
2150 !buffer_unwritten(bh) &&
2151 (block_start < from || block_end > to)) {
2152 bh_read_nowait(bh, 0);
2157 * If we issued read requests - let them complete.
2159 while(wait_bh > wait) {
2160 wait_on_buffer(*--wait_bh);
2161 if (!buffer_uptodate(*wait_bh))
2165 folio_zero_new_buffers(folio, from, to);
2169 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
2170 get_block_t *get_block)
2172 return __block_write_begin_int(page_folio(page), pos, len, get_block,
2175 EXPORT_SYMBOL(__block_write_begin);
2177 static int __block_commit_write(struct inode *inode, struct folio *folio,
2178 size_t from, size_t to)
2180 size_t block_start, block_end;
2181 bool partial = false;
2183 struct buffer_head *bh, *head;
2185 bh = head = folio_buffers(folio);
2186 blocksize = bh->b_size;
2190 block_end = block_start + blocksize;
2191 if (block_end <= from || block_start >= to) {
2192 if (!buffer_uptodate(bh))
2195 set_buffer_uptodate(bh);
2196 mark_buffer_dirty(bh);
2199 clear_buffer_new(bh);
2201 block_start = block_end;
2202 bh = bh->b_this_page;
2203 } while (bh != head);
2206 * If this is a partial write which happened to make all buffers
2207 * uptodate then we can optimize away a bogus read_folio() for
2208 * the next read(). Here we 'discover' whether the folio went
2209 * uptodate as a result of this (potentially partial) write.
2212 folio_mark_uptodate(folio);
2217 * block_write_begin takes care of the basic task of block allocation and
2218 * bringing partial write blocks uptodate first.
2220 * The filesystem needs to handle block truncation upon failure.
2222 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
2223 struct page **pagep, get_block_t *get_block)
2225 pgoff_t index = pos >> PAGE_SHIFT;
2229 page = grab_cache_page_write_begin(mapping, index);
2233 status = __block_write_begin(page, pos, len, get_block);
2234 if (unlikely(status)) {
2243 EXPORT_SYMBOL(block_write_begin);
2245 int block_write_end(struct file *file, struct address_space *mapping,
2246 loff_t pos, unsigned len, unsigned copied,
2247 struct page *page, void *fsdata)
2249 struct folio *folio = page_folio(page);
2250 struct inode *inode = mapping->host;
2251 size_t start = pos - folio_pos(folio);
2253 if (unlikely(copied < len)) {
2255 * The buffers that were written will now be uptodate, so
2256 * we don't have to worry about a read_folio reading them
2257 * and overwriting a partial write. However if we have
2258 * encountered a short write and only partially written
2259 * into a buffer, it will not be marked uptodate, so a
2260 * read_folio might come in and destroy our partial write.
2262 * Do the simplest thing, and just treat any short write to a
2263 * non uptodate folio as a zero-length write, and force the
2264 * caller to redo the whole thing.
2266 if (!folio_test_uptodate(folio))
2269 folio_zero_new_buffers(folio, start+copied, start+len);
2271 flush_dcache_folio(folio);
2273 /* This could be a short (even 0-length) commit */
2274 __block_commit_write(inode, folio, start, start + copied);
2278 EXPORT_SYMBOL(block_write_end);
2280 int generic_write_end(struct file *file, struct address_space *mapping,
2281 loff_t pos, unsigned len, unsigned copied,
2282 struct page *page, void *fsdata)
2284 struct inode *inode = mapping->host;
2285 loff_t old_size = inode->i_size;
2286 bool i_size_changed = false;
2288 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2291 * No need to use i_size_read() here, the i_size cannot change under us
2292 * because we hold i_rwsem.
2294 * But it's important to update i_size while still holding page lock:
2295 * page writeout could otherwise come in and zero beyond i_size.
2297 if (pos + copied > inode->i_size) {
2298 i_size_write(inode, pos + copied);
2299 i_size_changed = true;
2306 pagecache_isize_extended(inode, old_size, pos);
2308 * Don't mark the inode dirty under page lock. First, it unnecessarily
2309 * makes the holding time of page lock longer. Second, it forces lock
2310 * ordering of page lock and transaction start for journaling
2314 mark_inode_dirty(inode);
2317 EXPORT_SYMBOL(generic_write_end);
2320 * block_is_partially_uptodate checks whether buffers within a folio are
2323 * Returns true if all buffers which correspond to the specified part
2324 * of the folio are uptodate.
2326 bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
2328 unsigned block_start, block_end, blocksize;
2330 struct buffer_head *bh, *head;
2333 head = folio_buffers(folio);
2336 blocksize = head->b_size;
2337 to = min_t(unsigned, folio_size(folio) - from, count);
2339 if (from < blocksize && to > folio_size(folio) - blocksize)
2345 block_end = block_start + blocksize;
2346 if (block_end > from && block_start < to) {
2347 if (!buffer_uptodate(bh)) {
2351 if (block_end >= to)
2354 block_start = block_end;
2355 bh = bh->b_this_page;
2356 } while (bh != head);
2360 EXPORT_SYMBOL(block_is_partially_uptodate);
2363 * Generic "read_folio" function for block devices that have the normal
2364 * get_block functionality. This is most of the block device filesystems.
2365 * Reads the folio asynchronously --- the unlock_buffer() and
2366 * set/clear_buffer_uptodate() functions propagate buffer state into the
2367 * folio once IO has completed.
2369 int block_read_full_folio(struct folio *folio, get_block_t *get_block)
2371 struct inode *inode = folio->mapping->host;
2372 sector_t iblock, lblock;
2373 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2374 unsigned int blocksize, bbits;
2376 int fully_mapped = 1;
2377 bool page_error = false;
2378 loff_t limit = i_size_read(inode);
2380 /* This is needed for ext4. */
2381 if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
2382 limit = inode->i_sb->s_maxbytes;
2384 VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
2386 head = folio_create_buffers(folio, inode, 0);
2387 blocksize = head->b_size;
2388 bbits = block_size_bits(blocksize);
2390 iblock = (sector_t)folio->index << (PAGE_SHIFT - bbits);
2391 lblock = (limit+blocksize-1) >> bbits;
2397 if (buffer_uptodate(bh))
2400 if (!buffer_mapped(bh)) {
2404 if (iblock < lblock) {
2405 WARN_ON(bh->b_size != blocksize);
2406 err = get_block(inode, iblock, bh, 0);
2408 folio_set_error(folio);
2412 if (!buffer_mapped(bh)) {
2413 folio_zero_range(folio, i * blocksize,
2416 set_buffer_uptodate(bh);
2420 * get_block() might have updated the buffer
2423 if (buffer_uptodate(bh))
2427 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2430 folio_set_mappedtodisk(folio);
2434 * All buffers are uptodate - we can set the folio uptodate
2435 * as well. But not if get_block() returned an error.
2438 folio_mark_uptodate(folio);
2439 folio_unlock(folio);
2443 /* Stage two: lock the buffers */
2444 for (i = 0; i < nr; i++) {
2447 mark_buffer_async_read(bh);
2451 * Stage 3: start the IO. Check for uptodateness
2452 * inside the buffer lock in case another process reading
2453 * the underlying blockdev brought it uptodate (the sct fix).
2455 for (i = 0; i < nr; i++) {
2457 if (buffer_uptodate(bh))
2458 end_buffer_async_read(bh, 1);
2460 submit_bh(REQ_OP_READ, bh);
2464 EXPORT_SYMBOL(block_read_full_folio);
2466 /* utility function for filesystems that need to do work on expanding
2467 * truncates. Uses filesystem pagecache writes to allow the filesystem to
2468 * deal with the hole.
2470 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2472 struct address_space *mapping = inode->i_mapping;
2473 const struct address_space_operations *aops = mapping->a_ops;
2475 void *fsdata = NULL;
2478 err = inode_newsize_ok(inode, size);
2482 err = aops->write_begin(NULL, mapping, size, 0, &page, &fsdata);
2486 err = aops->write_end(NULL, mapping, size, 0, 0, page, fsdata);
2492 EXPORT_SYMBOL(generic_cont_expand_simple);
2494 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2495 loff_t pos, loff_t *bytes)
2497 struct inode *inode = mapping->host;
2498 const struct address_space_operations *aops = mapping->a_ops;
2499 unsigned int blocksize = i_blocksize(inode);
2501 void *fsdata = NULL;
2502 pgoff_t index, curidx;
2504 unsigned zerofrom, offset, len;
2507 index = pos >> PAGE_SHIFT;
2508 offset = pos & ~PAGE_MASK;
2510 while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) {
2511 zerofrom = curpos & ~PAGE_MASK;
2512 if (zerofrom & (blocksize-1)) {
2513 *bytes |= (blocksize-1);
2516 len = PAGE_SIZE - zerofrom;
2518 err = aops->write_begin(file, mapping, curpos, len,
2522 zero_user(page, zerofrom, len);
2523 err = aops->write_end(file, mapping, curpos, len, len,
2530 balance_dirty_pages_ratelimited(mapping);
2532 if (fatal_signal_pending(current)) {
2538 /* page covers the boundary, find the boundary offset */
2539 if (index == curidx) {
2540 zerofrom = curpos & ~PAGE_MASK;
2541 /* if we will expand the thing last block will be filled */
2542 if (offset <= zerofrom) {
2545 if (zerofrom & (blocksize-1)) {
2546 *bytes |= (blocksize-1);
2549 len = offset - zerofrom;
2551 err = aops->write_begin(file, mapping, curpos, len,
2555 zero_user(page, zerofrom, len);
2556 err = aops->write_end(file, mapping, curpos, len, len,
2568 * For moronic filesystems that do not allow holes in file.
2569 * We may have to extend the file.
2571 int cont_write_begin(struct file *file, struct address_space *mapping,
2572 loff_t pos, unsigned len,
2573 struct page **pagep, void **fsdata,
2574 get_block_t *get_block, loff_t *bytes)
2576 struct inode *inode = mapping->host;
2577 unsigned int blocksize = i_blocksize(inode);
2578 unsigned int zerofrom;
2581 err = cont_expand_zero(file, mapping, pos, bytes);
2585 zerofrom = *bytes & ~PAGE_MASK;
2586 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2587 *bytes |= (blocksize-1);
2591 return block_write_begin(mapping, pos, len, pagep, get_block);
2593 EXPORT_SYMBOL(cont_write_begin);
2595 int block_commit_write(struct page *page, unsigned from, unsigned to)
2597 struct folio *folio = page_folio(page);
2598 struct inode *inode = folio->mapping->host;
2599 __block_commit_write(inode, folio, from, to);
2602 EXPORT_SYMBOL(block_commit_write);
2605 * block_page_mkwrite() is not allowed to change the file size as it gets
2606 * called from a page fault handler when a page is first dirtied. Hence we must
2607 * be careful to check for EOF conditions here. We set the page up correctly
2608 * for a written page which means we get ENOSPC checking when writing into
2609 * holes and correct delalloc and unwritten extent mapping on filesystems that
2610 * support these features.
2612 * We are not allowed to take the i_mutex here so we have to play games to
2613 * protect against truncate races as the page could now be beyond EOF. Because
2614 * truncate writes the inode size before removing pages, once we have the
2615 * page lock we can determine safely if the page is beyond EOF. If it is not
2616 * beyond EOF, then the page is guaranteed safe against truncation until we
2619 * Direct callers of this function should protect against filesystem freezing
2620 * using sb_start_pagefault() - sb_end_pagefault() functions.
2622 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2623 get_block_t get_block)
2625 struct folio *folio = page_folio(vmf->page);
2626 struct inode *inode = file_inode(vma->vm_file);
2632 size = i_size_read(inode);
2633 if ((folio->mapping != inode->i_mapping) ||
2634 (folio_pos(folio) >= size)) {
2635 /* We overload EFAULT to mean page got truncated */
2640 end = folio_size(folio);
2641 /* folio is wholly or partially inside EOF */
2642 if (folio_pos(folio) + end > size)
2643 end = size - folio_pos(folio);
2645 ret = __block_write_begin_int(folio, 0, end, get_block, NULL);
2647 ret = __block_commit_write(inode, folio, 0, end);
2649 if (unlikely(ret < 0))
2651 folio_mark_dirty(folio);
2652 folio_wait_stable(folio);
2655 folio_unlock(folio);
2658 EXPORT_SYMBOL(block_page_mkwrite);
2660 int block_truncate_page(struct address_space *mapping,
2661 loff_t from, get_block_t *get_block)
2663 pgoff_t index = from >> PAGE_SHIFT;
2666 size_t offset, length, pos;
2667 struct inode *inode = mapping->host;
2668 struct folio *folio;
2669 struct buffer_head *bh;
2672 blocksize = i_blocksize(inode);
2673 length = from & (blocksize - 1);
2675 /* Block boundary? Nothing to do */
2679 length = blocksize - length;
2680 iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits);
2682 folio = filemap_grab_folio(mapping, index);
2684 return PTR_ERR(folio);
2686 bh = folio_buffers(folio);
2688 folio_create_empty_buffers(folio, blocksize, 0);
2689 bh = folio_buffers(folio);
2692 /* Find the buffer that contains "offset" */
2693 offset = offset_in_folio(folio, from);
2695 while (offset >= pos) {
2696 bh = bh->b_this_page;
2701 if (!buffer_mapped(bh)) {
2702 WARN_ON(bh->b_size != blocksize);
2703 err = get_block(inode, iblock, bh, 0);
2706 /* unmapped? It's a hole - nothing to do */
2707 if (!buffer_mapped(bh))
2711 /* Ok, it's mapped. Make sure it's up-to-date */
2712 if (folio_test_uptodate(folio))
2713 set_buffer_uptodate(bh);
2715 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2716 err = bh_read(bh, 0);
2717 /* Uhhuh. Read error. Complain and punt. */
2722 folio_zero_range(folio, offset, length);
2723 mark_buffer_dirty(bh);
2726 folio_unlock(folio);
2731 EXPORT_SYMBOL(block_truncate_page);
2734 * The generic ->writepage function for buffer-backed address_spaces
2736 int block_write_full_page(struct page *page, get_block_t *get_block,
2737 struct writeback_control *wbc)
2739 struct folio *folio = page_folio(page);
2740 struct inode * const inode = folio->mapping->host;
2741 loff_t i_size = i_size_read(inode);
2743 /* Is the folio fully inside i_size? */
2744 if (folio_pos(folio) + folio_size(folio) <= i_size)
2745 return __block_write_full_folio(inode, folio, get_block, wbc,
2746 end_buffer_async_write);
2748 /* Is the folio fully outside i_size? (truncate in progress) */
2749 if (folio_pos(folio) >= i_size) {
2750 folio_unlock(folio);
2751 return 0; /* don't care */
2755 * The folio straddles i_size. It must be zeroed out on each and every
2756 * writepage invocation because it may be mmapped. "A file is mapped
2757 * in multiples of the page size. For a file that is not a multiple of
2758 * the page size, the remaining memory is zeroed when mapped, and
2759 * writes to that region are not written out to the file."
2761 folio_zero_segment(folio, offset_in_folio(folio, i_size),
2763 return __block_write_full_folio(inode, folio, get_block, wbc,
2764 end_buffer_async_write);
2766 EXPORT_SYMBOL(block_write_full_page);
2768 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2769 get_block_t *get_block)
2771 struct inode *inode = mapping->host;
2772 struct buffer_head tmp = {
2773 .b_size = i_blocksize(inode),
2776 get_block(inode, block, &tmp, 0);
2777 return tmp.b_blocknr;
2779 EXPORT_SYMBOL(generic_block_bmap);
2781 static void end_bio_bh_io_sync(struct bio *bio)
2783 struct buffer_head *bh = bio->bi_private;
2785 if (unlikely(bio_flagged(bio, BIO_QUIET)))
2786 set_bit(BH_Quiet, &bh->b_state);
2788 bh->b_end_io(bh, !bio->bi_status);
2792 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
2793 struct writeback_control *wbc)
2795 const enum req_op op = opf & REQ_OP_MASK;
2798 BUG_ON(!buffer_locked(bh));
2799 BUG_ON(!buffer_mapped(bh));
2800 BUG_ON(!bh->b_end_io);
2801 BUG_ON(buffer_delay(bh));
2802 BUG_ON(buffer_unwritten(bh));
2805 * Only clear out a write error when rewriting
2807 if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
2808 clear_buffer_write_io_error(bh);
2810 if (buffer_meta(bh))
2812 if (buffer_prio(bh))
2815 bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO);
2817 fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
2819 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2821 __bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
2823 bio->bi_end_io = end_bio_bh_io_sync;
2824 bio->bi_private = bh;
2826 /* Take care of bh's that straddle the end of the device */
2830 wbc_init_bio(wbc, bio);
2831 wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size);
2837 void submit_bh(blk_opf_t opf, struct buffer_head *bh)
2839 submit_bh_wbc(opf, bh, NULL);
2841 EXPORT_SYMBOL(submit_bh);
2843 void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2846 if (!test_clear_buffer_dirty(bh)) {
2850 bh->b_end_io = end_buffer_write_sync;
2852 submit_bh(REQ_OP_WRITE | op_flags, bh);
2854 EXPORT_SYMBOL(write_dirty_buffer);
2857 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2858 * and then start new I/O and then wait upon it. The caller must have a ref on
2861 int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2863 WARN_ON(atomic_read(&bh->b_count) < 1);
2865 if (test_clear_buffer_dirty(bh)) {
2867 * The bh should be mapped, but it might not be if the
2868 * device was hot-removed. Not much we can do but fail the I/O.
2870 if (!buffer_mapped(bh)) {
2876 bh->b_end_io = end_buffer_write_sync;
2877 submit_bh(REQ_OP_WRITE | op_flags, bh);
2879 if (!buffer_uptodate(bh))
2886 EXPORT_SYMBOL(__sync_dirty_buffer);
2888 int sync_dirty_buffer(struct buffer_head *bh)
2890 return __sync_dirty_buffer(bh, REQ_SYNC);
2892 EXPORT_SYMBOL(sync_dirty_buffer);
2895 * try_to_free_buffers() checks if all the buffers on this particular folio
2896 * are unused, and releases them if so.
2898 * Exclusion against try_to_free_buffers may be obtained by either
2899 * locking the folio or by holding its mapping's private_lock.
2901 * If the folio is dirty but all the buffers are clean then we need to
2902 * be sure to mark the folio clean as well. This is because the folio
2903 * may be against a block device, and a later reattachment of buffers
2904 * to a dirty folio will set *all* buffers dirty. Which would corrupt
2905 * filesystem data on the same device.
2907 * The same applies to regular filesystem folios: if all the buffers are
2908 * clean then we set the folio clean and proceed. To do that, we require
2909 * total exclusion from block_dirty_folio(). That is obtained with
2912 * try_to_free_buffers() is non-blocking.
2914 static inline int buffer_busy(struct buffer_head *bh)
2916 return atomic_read(&bh->b_count) |
2917 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2921 drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free)
2923 struct buffer_head *head = folio_buffers(folio);
2924 struct buffer_head *bh;
2928 if (buffer_busy(bh))
2930 bh = bh->b_this_page;
2931 } while (bh != head);
2934 struct buffer_head *next = bh->b_this_page;
2936 if (bh->b_assoc_map)
2937 __remove_assoc_queue(bh);
2939 } while (bh != head);
2940 *buffers_to_free = head;
2941 folio_detach_private(folio);
2947 bool try_to_free_buffers(struct folio *folio)
2949 struct address_space * const mapping = folio->mapping;
2950 struct buffer_head *buffers_to_free = NULL;
2953 BUG_ON(!folio_test_locked(folio));
2954 if (folio_test_writeback(folio))
2957 if (mapping == NULL) { /* can this still happen? */
2958 ret = drop_buffers(folio, &buffers_to_free);
2962 spin_lock(&mapping->private_lock);
2963 ret = drop_buffers(folio, &buffers_to_free);
2966 * If the filesystem writes its buffers by hand (eg ext3)
2967 * then we can have clean buffers against a dirty folio. We
2968 * clean the folio here; otherwise the VM will never notice
2969 * that the filesystem did any IO at all.
2971 * Also, during truncate, discard_buffer will have marked all
2972 * the folio's buffers clean. We discover that here and clean
2975 * private_lock must be held over this entire operation in order
2976 * to synchronise against block_dirty_folio and prevent the
2977 * dirty bit from being lost.
2980 folio_cancel_dirty(folio);
2981 spin_unlock(&mapping->private_lock);
2983 if (buffers_to_free) {
2984 struct buffer_head *bh = buffers_to_free;
2987 struct buffer_head *next = bh->b_this_page;
2988 free_buffer_head(bh);
2990 } while (bh != buffers_to_free);
2994 EXPORT_SYMBOL(try_to_free_buffers);
2997 * Buffer-head allocation
2999 static struct kmem_cache *bh_cachep __read_mostly;
3002 * Once the number of bh's in the machine exceeds this level, we start
3003 * stripping them in writeback.
3005 static unsigned long max_buffer_heads;
3007 int buffer_heads_over_limit;
3009 struct bh_accounting {
3010 int nr; /* Number of live bh's */
3011 int ratelimit; /* Limit cacheline bouncing */
3014 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3016 static void recalc_bh_state(void)
3021 if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
3023 __this_cpu_write(bh_accounting.ratelimit, 0);
3024 for_each_online_cpu(i)
3025 tot += per_cpu(bh_accounting, i).nr;
3026 buffer_heads_over_limit = (tot > max_buffer_heads);
3029 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3031 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
3033 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3034 spin_lock_init(&ret->b_uptodate_lock);
3036 __this_cpu_inc(bh_accounting.nr);
3042 EXPORT_SYMBOL(alloc_buffer_head);
3044 void free_buffer_head(struct buffer_head *bh)
3046 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3047 kmem_cache_free(bh_cachep, bh);
3049 __this_cpu_dec(bh_accounting.nr);
3053 EXPORT_SYMBOL(free_buffer_head);
3055 static int buffer_exit_cpu_dead(unsigned int cpu)
3058 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3060 for (i = 0; i < BH_LRU_SIZE; i++) {
3064 this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
3065 per_cpu(bh_accounting, cpu).nr = 0;
3070 * bh_uptodate_or_lock - Test whether the buffer is uptodate
3071 * @bh: struct buffer_head
3073 * Return true if the buffer is up-to-date and false,
3074 * with the buffer locked, if not.
3076 int bh_uptodate_or_lock(struct buffer_head *bh)
3078 if (!buffer_uptodate(bh)) {
3080 if (!buffer_uptodate(bh))
3086 EXPORT_SYMBOL(bh_uptodate_or_lock);
3089 * __bh_read - Submit read for a locked buffer
3090 * @bh: struct buffer_head
3091 * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3092 * @wait: wait until reading finish
3094 * Returns zero on success or don't wait, and -EIO on error.
3096 int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait)
3100 BUG_ON(!buffer_locked(bh));
3103 bh->b_end_io = end_buffer_read_sync;
3104 submit_bh(REQ_OP_READ | op_flags, bh);
3107 if (!buffer_uptodate(bh))
3112 EXPORT_SYMBOL(__bh_read);
3115 * __bh_read_batch - Submit read for a batch of unlocked buffers
3116 * @nr: entry number of the buffer batch
3117 * @bhs: a batch of struct buffer_head
3118 * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3119 * @force_lock: force to get a lock on the buffer if set, otherwise drops any
3120 * buffer that cannot lock.
3122 * Returns zero on success or don't wait, and -EIO on error.
3124 void __bh_read_batch(int nr, struct buffer_head *bhs[],
3125 blk_opf_t op_flags, bool force_lock)
3129 for (i = 0; i < nr; i++) {
3130 struct buffer_head *bh = bhs[i];
3132 if (buffer_uptodate(bh))
3138 if (!trylock_buffer(bh))
3141 if (buffer_uptodate(bh)) {
3146 bh->b_end_io = end_buffer_read_sync;
3148 submit_bh(REQ_OP_READ | op_flags, bh);
3151 EXPORT_SYMBOL(__bh_read_batch);
3153 void __init buffer_init(void)
3155 unsigned long nrpages;
3158 bh_cachep = kmem_cache_create("buffer_head",
3159 sizeof(struct buffer_head), 0,
3160 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3165 * Limit the bh occupancy to 10% of ZONE_NORMAL
3167 nrpages = (nr_free_buffer_pages() * 10) / 100;
3168 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3169 ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead",
3170 NULL, buffer_exit_cpu_dead);