OSDN Git Service

9p: acl: fix uninitialized iattr access
[android-x86/kernel.git] / fs / buffer.c
1 /*
2  *  linux/fs/buffer.c
3  *
4  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
5  */
6
7 /*
8  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9  *
10  * Removed a lot of unnecessary code and simplified things now that
11  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12  *
13  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
14  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
15  *
16  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17  *
18  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19  */
20
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
23 #include <linux/fs.h>
24 #include <linux/iomap.h>
25 #include <linux/mm.h>
26 #include <linux/percpu.h>
27 #include <linux/slab.h>
28 #include <linux/capability.h>
29 #include <linux/blkdev.h>
30 #include <linux/file.h>
31 #include <linux/quotaops.h>
32 #include <linux/highmem.h>
33 #include <linux/export.h>
34 #include <linux/backing-dev.h>
35 #include <linux/writeback.h>
36 #include <linux/hash.h>
37 #include <linux/suspend.h>
38 #include <linux/buffer_head.h>
39 #include <linux/task_io_accounting_ops.h>
40 #include <linux/bio.h>
41 #include <linux/notifier.h>
42 #include <linux/cpu.h>
43 #include <linux/bitops.h>
44 #include <linux/mpage.h>
45 #include <linux/bit_spinlock.h>
46 #include <trace/events/block.h>
47
48 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
49 static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
50                          unsigned long bio_flags,
51                          struct writeback_control *wbc);
52
53 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
54
55 void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
56 {
57         bh->b_end_io = handler;
58         bh->b_private = private;
59 }
60 EXPORT_SYMBOL(init_buffer);
61
62 inline void touch_buffer(struct buffer_head *bh)
63 {
64         trace_block_touch_buffer(bh);
65         mark_page_accessed(bh->b_page);
66 }
67 EXPORT_SYMBOL(touch_buffer);
68
69 void __lock_buffer(struct buffer_head *bh)
70 {
71         wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
72 }
73 EXPORT_SYMBOL(__lock_buffer);
74
75 void unlock_buffer(struct buffer_head *bh)
76 {
77         clear_bit_unlock(BH_Lock, &bh->b_state);
78         smp_mb__after_atomic();
79         wake_up_bit(&bh->b_state, BH_Lock);
80 }
81 EXPORT_SYMBOL(unlock_buffer);
82
83 /*
84  * Returns if the page has dirty or writeback buffers. If all the buffers
85  * are unlocked and clean then the PageDirty information is stale. If
86  * any of the pages are locked, it is assumed they are locked for IO.
87  */
88 void buffer_check_dirty_writeback(struct page *page,
89                                      bool *dirty, bool *writeback)
90 {
91         struct buffer_head *head, *bh;
92         *dirty = false;
93         *writeback = false;
94
95         BUG_ON(!PageLocked(page));
96
97         if (!page_has_buffers(page))
98                 return;
99
100         if (PageWriteback(page))
101                 *writeback = true;
102
103         head = page_buffers(page);
104         bh = head;
105         do {
106                 if (buffer_locked(bh))
107                         *writeback = true;
108
109                 if (buffer_dirty(bh))
110                         *dirty = true;
111
112                 bh = bh->b_this_page;
113         } while (bh != head);
114 }
115 EXPORT_SYMBOL(buffer_check_dirty_writeback);
116
117 /*
118  * Block until a buffer comes unlocked.  This doesn't stop it
119  * from becoming locked again - you have to lock it yourself
120  * if you want to preserve its state.
121  */
122 void __wait_on_buffer(struct buffer_head * bh)
123 {
124         wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
125 }
126 EXPORT_SYMBOL(__wait_on_buffer);
127
128 static void
129 __clear_page_buffers(struct page *page)
130 {
131         ClearPagePrivate(page);
132         set_page_private(page, 0);
133         put_page(page);
134 }
135
136 static void buffer_io_error(struct buffer_head *bh, char *msg)
137 {
138         if (!test_bit(BH_Quiet, &bh->b_state))
139                 printk_ratelimited(KERN_ERR
140                         "Buffer I/O error on dev %pg, logical block %llu%s\n",
141                         bh->b_bdev, (unsigned long long)bh->b_blocknr, msg);
142 }
143
144 /*
145  * End-of-IO handler helper function which does not touch the bh after
146  * unlocking it.
147  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
148  * a race there is benign: unlock_buffer() only use the bh's address for
149  * hashing after unlocking the buffer, so it doesn't actually touch the bh
150  * itself.
151  */
152 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
153 {
154         if (uptodate) {
155                 set_buffer_uptodate(bh);
156         } else {
157                 /* This happens, due to failed read-ahead attempts. */
158                 clear_buffer_uptodate(bh);
159         }
160         unlock_buffer(bh);
161 }
162
163 /*
164  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
165  * unlock the buffer. This is what ll_rw_block uses too.
166  */
167 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
168 {
169         __end_buffer_read_notouch(bh, uptodate);
170         put_bh(bh);
171 }
172 EXPORT_SYMBOL(end_buffer_read_sync);
173
174 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
175 {
176         if (uptodate) {
177                 set_buffer_uptodate(bh);
178         } else {
179                 buffer_io_error(bh, ", lost sync page write");
180                 set_buffer_write_io_error(bh);
181                 clear_buffer_uptodate(bh);
182         }
183         unlock_buffer(bh);
184         put_bh(bh);
185 }
186 EXPORT_SYMBOL(end_buffer_write_sync);
187
188 /*
189  * Various filesystems appear to want __find_get_block to be non-blocking.
190  * But it's the page lock which protects the buffers.  To get around this,
191  * we get exclusion from try_to_free_buffers with the blockdev mapping's
192  * private_lock.
193  *
194  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
195  * may be quite high.  This code could TryLock the page, and if that
196  * succeeds, there is no need to take private_lock. (But if
197  * private_lock is contended then so is mapping->tree_lock).
198  */
199 static struct buffer_head *
200 __find_get_block_slow(struct block_device *bdev, sector_t block)
201 {
202         struct inode *bd_inode = bdev->bd_inode;
203         struct address_space *bd_mapping = bd_inode->i_mapping;
204         struct buffer_head *ret = NULL;
205         pgoff_t index;
206         struct buffer_head *bh;
207         struct buffer_head *head;
208         struct page *page;
209         int all_mapped = 1;
210         static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
211
212         index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
213         page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED);
214         if (!page)
215                 goto out;
216
217         spin_lock(&bd_mapping->private_lock);
218         if (!page_has_buffers(page))
219                 goto out_unlock;
220         head = page_buffers(page);
221         bh = head;
222         do {
223                 if (!buffer_mapped(bh))
224                         all_mapped = 0;
225                 else if (bh->b_blocknr == block) {
226                         ret = bh;
227                         get_bh(bh);
228                         goto out_unlock;
229                 }
230                 bh = bh->b_this_page;
231         } while (bh != head);
232
233         /* we might be here because some of the buffers on this page are
234          * not mapped.  This is due to various races between
235          * file io on the block device and getblk.  It gets dealt with
236          * elsewhere, don't buffer_error if we had some unmapped buffers
237          */
238         ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
239         if (all_mapped && __ratelimit(&last_warned)) {
240                 printk("__find_get_block_slow() failed. block=%llu, "
241                        "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
242                        "device %pg blocksize: %d\n",
243                        (unsigned long long)block,
244                        (unsigned long long)bh->b_blocknr,
245                        bh->b_state, bh->b_size, bdev,
246                        1 << bd_inode->i_blkbits);
247         }
248 out_unlock:
249         spin_unlock(&bd_mapping->private_lock);
250         put_page(page);
251 out:
252         return ret;
253 }
254
255 /*
256  * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
257  */
258 static void free_more_memory(void)
259 {
260         struct zoneref *z;
261         int nid;
262
263         wakeup_flusher_threads(1024, WB_REASON_FREE_MORE_MEM);
264         yield();
265
266         for_each_online_node(nid) {
267
268                 z = first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
269                                                 gfp_zone(GFP_NOFS), NULL);
270                 if (z->zone)
271                         try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
272                                                 GFP_NOFS, NULL);
273         }
274 }
275
276 /*
277  * I/O completion handler for block_read_full_page() - pages
278  * which come unlocked at the end of I/O.
279  */
280 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
281 {
282         unsigned long flags;
283         struct buffer_head *first;
284         struct buffer_head *tmp;
285         struct page *page;
286         int page_uptodate = 1;
287
288         BUG_ON(!buffer_async_read(bh));
289
290         page = bh->b_page;
291         if (uptodate) {
292                 set_buffer_uptodate(bh);
293         } else {
294                 clear_buffer_uptodate(bh);
295                 buffer_io_error(bh, ", async page read");
296                 SetPageError(page);
297         }
298
299         /*
300          * Be _very_ careful from here on. Bad things can happen if
301          * two buffer heads end IO at almost the same time and both
302          * decide that the page is now completely done.
303          */
304         first = page_buffers(page);
305         local_irq_save(flags);
306         bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
307         clear_buffer_async_read(bh);
308         unlock_buffer(bh);
309         tmp = bh;
310         do {
311                 if (!buffer_uptodate(tmp))
312                         page_uptodate = 0;
313                 if (buffer_async_read(tmp)) {
314                         BUG_ON(!buffer_locked(tmp));
315                         goto still_busy;
316                 }
317                 tmp = tmp->b_this_page;
318         } while (tmp != bh);
319         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
320         local_irq_restore(flags);
321
322         /*
323          * If none of the buffers had errors and they are all
324          * uptodate then we can set the page uptodate.
325          */
326         if (page_uptodate && !PageError(page))
327                 SetPageUptodate(page);
328         unlock_page(page);
329         return;
330
331 still_busy:
332         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
333         local_irq_restore(flags);
334         return;
335 }
336
337 /*
338  * Completion handler for block_write_full_page() - pages which are unlocked
339  * during I/O, and which have PageWriteback cleared upon I/O completion.
340  */
341 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
342 {
343         unsigned long flags;
344         struct buffer_head *first;
345         struct buffer_head *tmp;
346         struct page *page;
347
348         BUG_ON(!buffer_async_write(bh));
349
350         page = bh->b_page;
351         if (uptodate) {
352                 set_buffer_uptodate(bh);
353         } else {
354                 buffer_io_error(bh, ", lost async page write");
355                 mapping_set_error(page->mapping, -EIO);
356                 set_buffer_write_io_error(bh);
357                 clear_buffer_uptodate(bh);
358                 SetPageError(page);
359         }
360
361         first = page_buffers(page);
362         local_irq_save(flags);
363         bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
364
365         clear_buffer_async_write(bh);
366         unlock_buffer(bh);
367         tmp = bh->b_this_page;
368         while (tmp != bh) {
369                 if (buffer_async_write(tmp)) {
370                         BUG_ON(!buffer_locked(tmp));
371                         goto still_busy;
372                 }
373                 tmp = tmp->b_this_page;
374         }
375         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
376         local_irq_restore(flags);
377         end_page_writeback(page);
378         return;
379
380 still_busy:
381         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
382         local_irq_restore(flags);
383         return;
384 }
385 EXPORT_SYMBOL(end_buffer_async_write);
386
387 /*
388  * If a page's buffers are under async readin (end_buffer_async_read
389  * completion) then there is a possibility that another thread of
390  * control could lock one of the buffers after it has completed
391  * but while some of the other buffers have not completed.  This
392  * locked buffer would confuse end_buffer_async_read() into not unlocking
393  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
394  * that this buffer is not under async I/O.
395  *
396  * The page comes unlocked when it has no locked buffer_async buffers
397  * left.
398  *
399  * PageLocked prevents anyone starting new async I/O reads any of
400  * the buffers.
401  *
402  * PageWriteback is used to prevent simultaneous writeout of the same
403  * page.
404  *
405  * PageLocked prevents anyone from starting writeback of a page which is
406  * under read I/O (PageWriteback is only ever set against a locked page).
407  */
408 static void mark_buffer_async_read(struct buffer_head *bh)
409 {
410         bh->b_end_io = end_buffer_async_read;
411         set_buffer_async_read(bh);
412 }
413
414 static void mark_buffer_async_write_endio(struct buffer_head *bh,
415                                           bh_end_io_t *handler)
416 {
417         bh->b_end_io = handler;
418         set_buffer_async_write(bh);
419 }
420
421 void mark_buffer_async_write(struct buffer_head *bh)
422 {
423         mark_buffer_async_write_endio(bh, end_buffer_async_write);
424 }
425 EXPORT_SYMBOL(mark_buffer_async_write);
426
427
428 /*
429  * fs/buffer.c contains helper functions for buffer-backed address space's
430  * fsync functions.  A common requirement for buffer-based filesystems is
431  * that certain data from the backing blockdev needs to be written out for
432  * a successful fsync().  For example, ext2 indirect blocks need to be
433  * written back and waited upon before fsync() returns.
434  *
435  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
436  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
437  * management of a list of dependent buffers at ->i_mapping->private_list.
438  *
439  * Locking is a little subtle: try_to_free_buffers() will remove buffers
440  * from their controlling inode's queue when they are being freed.  But
441  * try_to_free_buffers() will be operating against the *blockdev* mapping
442  * at the time, not against the S_ISREG file which depends on those buffers.
443  * So the locking for private_list is via the private_lock in the address_space
444  * which backs the buffers.  Which is different from the address_space 
445  * against which the buffers are listed.  So for a particular address_space,
446  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
447  * mapping->private_list will always be protected by the backing blockdev's
448  * ->private_lock.
449  *
450  * Which introduces a requirement: all buffers on an address_space's
451  * ->private_list must be from the same address_space: the blockdev's.
452  *
453  * address_spaces which do not place buffers at ->private_list via these
454  * utility functions are free to use private_lock and private_list for
455  * whatever they want.  The only requirement is that list_empty(private_list)
456  * be true at clear_inode() time.
457  *
458  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
459  * filesystems should do that.  invalidate_inode_buffers() should just go
460  * BUG_ON(!list_empty).
461  *
462  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
463  * take an address_space, not an inode.  And it should be called
464  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
465  * queued up.
466  *
467  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
468  * list if it is already on a list.  Because if the buffer is on a list,
469  * it *must* already be on the right one.  If not, the filesystem is being
470  * silly.  This will save a ton of locking.  But first we have to ensure
471  * that buffers are taken *off* the old inode's list when they are freed
472  * (presumably in truncate).  That requires careful auditing of all
473  * filesystems (do it inside bforget()).  It could also be done by bringing
474  * b_inode back.
475  */
476
477 /*
478  * The buffer's backing address_space's private_lock must be held
479  */
480 static void __remove_assoc_queue(struct buffer_head *bh)
481 {
482         list_del_init(&bh->b_assoc_buffers);
483         WARN_ON(!bh->b_assoc_map);
484         if (buffer_write_io_error(bh))
485                 set_bit(AS_EIO, &bh->b_assoc_map->flags);
486         bh->b_assoc_map = NULL;
487 }
488
489 int inode_has_buffers(struct inode *inode)
490 {
491         return !list_empty(&inode->i_data.private_list);
492 }
493
494 /*
495  * osync is designed to support O_SYNC io.  It waits synchronously for
496  * all already-submitted IO to complete, but does not queue any new
497  * writes to the disk.
498  *
499  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
500  * you dirty the buffers, and then use osync_inode_buffers to wait for
501  * completion.  Any other dirty buffers which are not yet queued for
502  * write will not be flushed to disk by the osync.
503  */
504 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
505 {
506         struct buffer_head *bh;
507         struct list_head *p;
508         int err = 0;
509
510         spin_lock(lock);
511 repeat:
512         list_for_each_prev(p, list) {
513                 bh = BH_ENTRY(p);
514                 if (buffer_locked(bh)) {
515                         get_bh(bh);
516                         spin_unlock(lock);
517                         wait_on_buffer(bh);
518                         if (!buffer_uptodate(bh))
519                                 err = -EIO;
520                         brelse(bh);
521                         spin_lock(lock);
522                         goto repeat;
523                 }
524         }
525         spin_unlock(lock);
526         return err;
527 }
528
529 static void do_thaw_one(struct super_block *sb, void *unused)
530 {
531         while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
532                 printk(KERN_WARNING "Emergency Thaw on %pg\n", sb->s_bdev);
533 }
534
535 static void do_thaw_all(struct work_struct *work)
536 {
537         iterate_supers(do_thaw_one, NULL);
538         kfree(work);
539         printk(KERN_WARNING "Emergency Thaw complete\n");
540 }
541
542 /**
543  * emergency_thaw_all -- forcibly thaw every frozen filesystem
544  *
545  * Used for emergency unfreeze of all filesystems via SysRq
546  */
547 void emergency_thaw_all(void)
548 {
549         struct work_struct *work;
550
551         work = kmalloc(sizeof(*work), GFP_ATOMIC);
552         if (work) {
553                 INIT_WORK(work, do_thaw_all);
554                 schedule_work(work);
555         }
556 }
557
558 /**
559  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
560  * @mapping: the mapping which wants those buffers written
561  *
562  * Starts I/O against the buffers at mapping->private_list, and waits upon
563  * that I/O.
564  *
565  * Basically, this is a convenience function for fsync().
566  * @mapping is a file or directory which needs those buffers to be written for
567  * a successful fsync().
568  */
569 int sync_mapping_buffers(struct address_space *mapping)
570 {
571         struct address_space *buffer_mapping = mapping->private_data;
572
573         if (buffer_mapping == NULL || list_empty(&mapping->private_list))
574                 return 0;
575
576         return fsync_buffers_list(&buffer_mapping->private_lock,
577                                         &mapping->private_list);
578 }
579 EXPORT_SYMBOL(sync_mapping_buffers);
580
581 /*
582  * Called when we've recently written block `bblock', and it is known that
583  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
584  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
585  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
586  */
587 void write_boundary_block(struct block_device *bdev,
588                         sector_t bblock, unsigned blocksize)
589 {
590         struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
591         if (bh) {
592                 if (buffer_dirty(bh))
593                         ll_rw_block(REQ_OP_WRITE, 0, 1, &bh);
594                 put_bh(bh);
595         }
596 }
597
598 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
599 {
600         struct address_space *mapping = inode->i_mapping;
601         struct address_space *buffer_mapping = bh->b_page->mapping;
602
603         mark_buffer_dirty(bh);
604         if (!mapping->private_data) {
605                 mapping->private_data = buffer_mapping;
606         } else {
607                 BUG_ON(mapping->private_data != buffer_mapping);
608         }
609         if (!bh->b_assoc_map) {
610                 spin_lock(&buffer_mapping->private_lock);
611                 list_move_tail(&bh->b_assoc_buffers,
612                                 &mapping->private_list);
613                 bh->b_assoc_map = mapping;
614                 spin_unlock(&buffer_mapping->private_lock);
615         }
616 }
617 EXPORT_SYMBOL(mark_buffer_dirty_inode);
618
619 /*
620  * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
621  * dirty.
622  *
623  * If warn is true, then emit a warning if the page is not uptodate and has
624  * not been truncated.
625  *
626  * The caller must hold lock_page_memcg().
627  */
628 static void __set_page_dirty(struct page *page, struct address_space *mapping,
629                              int warn)
630 {
631         unsigned long flags;
632
633         spin_lock_irqsave(&mapping->tree_lock, flags);
634         if (page->mapping) {    /* Race with truncate? */
635                 WARN_ON_ONCE(warn && !PageUptodate(page));
636                 account_page_dirtied(page, mapping);
637                 radix_tree_tag_set(&mapping->page_tree,
638                                 page_index(page), PAGECACHE_TAG_DIRTY);
639         }
640         spin_unlock_irqrestore(&mapping->tree_lock, flags);
641 }
642
643 /*
644  * Add a page to the dirty page list.
645  *
646  * It is a sad fact of life that this function is called from several places
647  * deeply under spinlocking.  It may not sleep.
648  *
649  * If the page has buffers, the uptodate buffers are set dirty, to preserve
650  * dirty-state coherency between the page and the buffers.  It the page does
651  * not have buffers then when they are later attached they will all be set
652  * dirty.
653  *
654  * The buffers are dirtied before the page is dirtied.  There's a small race
655  * window in which a writepage caller may see the page cleanness but not the
656  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
657  * before the buffers, a concurrent writepage caller could clear the page dirty
658  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
659  * page on the dirty page list.
660  *
661  * We use private_lock to lock against try_to_free_buffers while using the
662  * page's buffer list.  Also use this to protect against clean buffers being
663  * added to the page after it was set dirty.
664  *
665  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
666  * address_space though.
667  */
668 int __set_page_dirty_buffers(struct page *page)
669 {
670         int newly_dirty;
671         struct address_space *mapping = page_mapping(page);
672
673         if (unlikely(!mapping))
674                 return !TestSetPageDirty(page);
675
676         spin_lock(&mapping->private_lock);
677         if (page_has_buffers(page)) {
678                 struct buffer_head *head = page_buffers(page);
679                 struct buffer_head *bh = head;
680
681                 do {
682                         set_buffer_dirty(bh);
683                         bh = bh->b_this_page;
684                 } while (bh != head);
685         }
686         /*
687          * Lock out page->mem_cgroup migration to keep PageDirty
688          * synchronized with per-memcg dirty page counters.
689          */
690         lock_page_memcg(page);
691         newly_dirty = !TestSetPageDirty(page);
692         spin_unlock(&mapping->private_lock);
693
694         if (newly_dirty)
695                 __set_page_dirty(page, mapping, 1);
696
697         unlock_page_memcg(page);
698
699         if (newly_dirty)
700                 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
701
702         return newly_dirty;
703 }
704 EXPORT_SYMBOL(__set_page_dirty_buffers);
705
706 /*
707  * Write out and wait upon a list of buffers.
708  *
709  * We have conflicting pressures: we want to make sure that all
710  * initially dirty buffers get waited on, but that any subsequently
711  * dirtied buffers don't.  After all, we don't want fsync to last
712  * forever if somebody is actively writing to the file.
713  *
714  * Do this in two main stages: first we copy dirty buffers to a
715  * temporary inode list, queueing the writes as we go.  Then we clean
716  * up, waiting for those writes to complete.
717  * 
718  * During this second stage, any subsequent updates to the file may end
719  * up refiling the buffer on the original inode's dirty list again, so
720  * there is a chance we will end up with a buffer queued for write but
721  * not yet completed on that list.  So, as a final cleanup we go through
722  * the osync code to catch these locked, dirty buffers without requeuing
723  * any newly dirty buffers for write.
724  */
725 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
726 {
727         struct buffer_head *bh;
728         struct list_head tmp;
729         struct address_space *mapping;
730         int err = 0, err2;
731         struct blk_plug plug;
732
733         INIT_LIST_HEAD(&tmp);
734         blk_start_plug(&plug);
735
736         spin_lock(lock);
737         while (!list_empty(list)) {
738                 bh = BH_ENTRY(list->next);
739                 mapping = bh->b_assoc_map;
740                 __remove_assoc_queue(bh);
741                 /* Avoid race with mark_buffer_dirty_inode() which does
742                  * a lockless check and we rely on seeing the dirty bit */
743                 smp_mb();
744                 if (buffer_dirty(bh) || buffer_locked(bh)) {
745                         list_add(&bh->b_assoc_buffers, &tmp);
746                         bh->b_assoc_map = mapping;
747                         if (buffer_dirty(bh)) {
748                                 get_bh(bh);
749                                 spin_unlock(lock);
750                                 /*
751                                  * Ensure any pending I/O completes so that
752                                  * write_dirty_buffer() actually writes the
753                                  * current contents - it is a noop if I/O is
754                                  * still in flight on potentially older
755                                  * contents.
756                                  */
757                                 write_dirty_buffer(bh, WRITE_SYNC);
758
759                                 /*
760                                  * Kick off IO for the previous mapping. Note
761                                  * that we will not run the very last mapping,
762                                  * wait_on_buffer() will do that for us
763                                  * through sync_buffer().
764                                  */
765                                 brelse(bh);
766                                 spin_lock(lock);
767                         }
768                 }
769         }
770
771         spin_unlock(lock);
772         blk_finish_plug(&plug);
773         spin_lock(lock);
774
775         while (!list_empty(&tmp)) {
776                 bh = BH_ENTRY(tmp.prev);
777                 get_bh(bh);
778                 mapping = bh->b_assoc_map;
779                 __remove_assoc_queue(bh);
780                 /* Avoid race with mark_buffer_dirty_inode() which does
781                  * a lockless check and we rely on seeing the dirty bit */
782                 smp_mb();
783                 if (buffer_dirty(bh)) {
784                         list_add(&bh->b_assoc_buffers,
785                                  &mapping->private_list);
786                         bh->b_assoc_map = mapping;
787                 }
788                 spin_unlock(lock);
789                 wait_on_buffer(bh);
790                 if (!buffer_uptodate(bh))
791                         err = -EIO;
792                 brelse(bh);
793                 spin_lock(lock);
794         }
795         
796         spin_unlock(lock);
797         err2 = osync_buffers_list(lock, list);
798         if (err)
799                 return err;
800         else
801                 return err2;
802 }
803
804 /*
805  * Invalidate any and all dirty buffers on a given inode.  We are
806  * probably unmounting the fs, but that doesn't mean we have already
807  * done a sync().  Just drop the buffers from the inode list.
808  *
809  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
810  * assumes that all the buffers are against the blockdev.  Not true
811  * for reiserfs.
812  */
813 void invalidate_inode_buffers(struct inode *inode)
814 {
815         if (inode_has_buffers(inode)) {
816                 struct address_space *mapping = &inode->i_data;
817                 struct list_head *list = &mapping->private_list;
818                 struct address_space *buffer_mapping = mapping->private_data;
819
820                 spin_lock(&buffer_mapping->private_lock);
821                 while (!list_empty(list))
822                         __remove_assoc_queue(BH_ENTRY(list->next));
823                 spin_unlock(&buffer_mapping->private_lock);
824         }
825 }
826 EXPORT_SYMBOL(invalidate_inode_buffers);
827
828 /*
829  * Remove any clean buffers from the inode's buffer list.  This is called
830  * when we're trying to free the inode itself.  Those buffers can pin it.
831  *
832  * Returns true if all buffers were removed.
833  */
834 int remove_inode_buffers(struct inode *inode)
835 {
836         int ret = 1;
837
838         if (inode_has_buffers(inode)) {
839                 struct address_space *mapping = &inode->i_data;
840                 struct list_head *list = &mapping->private_list;
841                 struct address_space *buffer_mapping = mapping->private_data;
842
843                 spin_lock(&buffer_mapping->private_lock);
844                 while (!list_empty(list)) {
845                         struct buffer_head *bh = BH_ENTRY(list->next);
846                         if (buffer_dirty(bh)) {
847                                 ret = 0;
848                                 break;
849                         }
850                         __remove_assoc_queue(bh);
851                 }
852                 spin_unlock(&buffer_mapping->private_lock);
853         }
854         return ret;
855 }
856
857 /*
858  * Create the appropriate buffers when given a page for data area and
859  * the size of each buffer.. Use the bh->b_this_page linked list to
860  * follow the buffers created.  Return NULL if unable to create more
861  * buffers.
862  *
863  * The retry flag is used to differentiate async IO (paging, swapping)
864  * which may not fail from ordinary buffer allocations.
865  */
866 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
867                 int retry)
868 {
869         struct buffer_head *bh, *head;
870         long offset;
871
872 try_again:
873         head = NULL;
874         offset = PAGE_SIZE;
875         while ((offset -= size) >= 0) {
876                 bh = alloc_buffer_head(GFP_NOFS);
877                 if (!bh)
878                         goto no_grow;
879
880                 bh->b_this_page = head;
881                 bh->b_blocknr = -1;
882                 head = bh;
883
884                 bh->b_size = size;
885
886                 /* Link the buffer to its page */
887                 set_bh_page(bh, page, offset);
888         }
889         return head;
890 /*
891  * In case anything failed, we just free everything we got.
892  */
893 no_grow:
894         if (head) {
895                 do {
896                         bh = head;
897                         head = head->b_this_page;
898                         free_buffer_head(bh);
899                 } while (head);
900         }
901
902         /*
903          * Return failure for non-async IO requests.  Async IO requests
904          * are not allowed to fail, so we have to wait until buffer heads
905          * become available.  But we don't want tasks sleeping with 
906          * partially complete buffers, so all were released above.
907          */
908         if (!retry)
909                 return NULL;
910
911         /* We're _really_ low on memory. Now we just
912          * wait for old buffer heads to become free due to
913          * finishing IO.  Since this is an async request and
914          * the reserve list is empty, we're sure there are 
915          * async buffer heads in use.
916          */
917         free_more_memory();
918         goto try_again;
919 }
920 EXPORT_SYMBOL_GPL(alloc_page_buffers);
921
922 static inline void
923 link_dev_buffers(struct page *page, struct buffer_head *head)
924 {
925         struct buffer_head *bh, *tail;
926
927         bh = head;
928         do {
929                 tail = bh;
930                 bh = bh->b_this_page;
931         } while (bh);
932         tail->b_this_page = head;
933         attach_page_buffers(page, head);
934 }
935
936 static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
937 {
938         sector_t retval = ~((sector_t)0);
939         loff_t sz = i_size_read(bdev->bd_inode);
940
941         if (sz) {
942                 unsigned int sizebits = blksize_bits(size);
943                 retval = (sz >> sizebits);
944         }
945         return retval;
946 }
947
948 /*
949  * Initialise the state of a blockdev page's buffers.
950  */ 
951 static sector_t
952 init_page_buffers(struct page *page, struct block_device *bdev,
953                         sector_t block, int size)
954 {
955         struct buffer_head *head = page_buffers(page);
956         struct buffer_head *bh = head;
957         int uptodate = PageUptodate(page);
958         sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode), size);
959
960         do {
961                 if (!buffer_mapped(bh)) {
962                         init_buffer(bh, NULL, NULL);
963                         bh->b_bdev = bdev;
964                         bh->b_blocknr = block;
965                         if (uptodate)
966                                 set_buffer_uptodate(bh);
967                         if (block < end_block)
968                                 set_buffer_mapped(bh);
969                 }
970                 block++;
971                 bh = bh->b_this_page;
972         } while (bh != head);
973
974         /*
975          * Caller needs to validate requested block against end of device.
976          */
977         return end_block;
978 }
979
980 /*
981  * Create the page-cache page that contains the requested block.
982  *
983  * This is used purely for blockdev mappings.
984  */
985 static int
986 grow_dev_page(struct block_device *bdev, sector_t block,
987               pgoff_t index, int size, int sizebits, gfp_t gfp)
988 {
989         struct inode *inode = bdev->bd_inode;
990         struct page *page;
991         struct buffer_head *bh;
992         sector_t end_block;
993         int ret = 0;            /* Will call free_more_memory() */
994         gfp_t gfp_mask;
995
996         gfp_mask = mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS) | gfp;
997
998         /*
999          * XXX: __getblk_slow() can not really deal with failure and
1000          * will endlessly loop on improvised global reclaim.  Prefer
1001          * looping in the allocator rather than here, at least that
1002          * code knows what it's doing.
1003          */
1004         gfp_mask |= __GFP_NOFAIL;
1005
1006         page = find_or_create_page(inode->i_mapping, index, gfp_mask);
1007         if (!page)
1008                 return ret;
1009
1010         BUG_ON(!PageLocked(page));
1011
1012         if (page_has_buffers(page)) {
1013                 bh = page_buffers(page);
1014                 if (bh->b_size == size) {
1015                         end_block = init_page_buffers(page, bdev,
1016                                                 (sector_t)index << sizebits,
1017                                                 size);
1018                         goto done;
1019                 }
1020                 if (!try_to_free_buffers(page))
1021                         goto failed;
1022         }
1023
1024         /*
1025          * Allocate some buffers for this page
1026          */
1027         bh = alloc_page_buffers(page, size, 0);
1028         if (!bh)
1029                 goto failed;
1030
1031         /*
1032          * Link the page to the buffers and initialise them.  Take the
1033          * lock to be atomic wrt __find_get_block(), which does not
1034          * run under the page lock.
1035          */
1036         spin_lock(&inode->i_mapping->private_lock);
1037         link_dev_buffers(page, bh);
1038         end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits,
1039                         size);
1040         spin_unlock(&inode->i_mapping->private_lock);
1041 done:
1042         ret = (block < end_block) ? 1 : -ENXIO;
1043 failed:
1044         unlock_page(page);
1045         put_page(page);
1046         return ret;
1047 }
1048
1049 /*
1050  * Create buffers for the specified block device block's page.  If
1051  * that page was dirty, the buffers are set dirty also.
1052  */
1053 static int
1054 grow_buffers(struct block_device *bdev, sector_t block, int size, gfp_t gfp)
1055 {
1056         pgoff_t index;
1057         int sizebits;
1058
1059         sizebits = -1;
1060         do {
1061                 sizebits++;
1062         } while ((size << sizebits) < PAGE_SIZE);
1063
1064         index = block >> sizebits;
1065
1066         /*
1067          * Check for a block which wants to lie outside our maximum possible
1068          * pagecache index.  (this comparison is done using sector_t types).
1069          */
1070         if (unlikely(index != block >> sizebits)) {
1071                 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1072                         "device %pg\n",
1073                         __func__, (unsigned long long)block,
1074                         bdev);
1075                 return -EIO;
1076         }
1077
1078         /* Create a page with the proper size buffers.. */
1079         return grow_dev_page(bdev, block, index, size, sizebits, gfp);
1080 }
1081
1082 static struct buffer_head *
1083 __getblk_slow(struct block_device *bdev, sector_t block,
1084              unsigned size, gfp_t gfp)
1085 {
1086         /* Size must be multiple of hard sectorsize */
1087         if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1088                         (size < 512 || size > PAGE_SIZE))) {
1089                 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1090                                         size);
1091                 printk(KERN_ERR "logical block size: %d\n",
1092                                         bdev_logical_block_size(bdev));
1093
1094                 dump_stack();
1095                 return NULL;
1096         }
1097
1098         for (;;) {
1099                 struct buffer_head *bh;
1100                 int ret;
1101
1102                 bh = __find_get_block(bdev, block, size);
1103                 if (bh)
1104                         return bh;
1105
1106                 ret = grow_buffers(bdev, block, size, gfp);
1107                 if (ret < 0)
1108                         return NULL;
1109                 if (ret == 0)
1110                         free_more_memory();
1111         }
1112 }
1113
1114 /*
1115  * The relationship between dirty buffers and dirty pages:
1116  *
1117  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1118  * the page is tagged dirty in its radix tree.
1119  *
1120  * At all times, the dirtiness of the buffers represents the dirtiness of
1121  * subsections of the page.  If the page has buffers, the page dirty bit is
1122  * merely a hint about the true dirty state.
1123  *
1124  * When a page is set dirty in its entirety, all its buffers are marked dirty
1125  * (if the page has buffers).
1126  *
1127  * When a buffer is marked dirty, its page is dirtied, but the page's other
1128  * buffers are not.
1129  *
1130  * Also.  When blockdev buffers are explicitly read with bread(), they
1131  * individually become uptodate.  But their backing page remains not
1132  * uptodate - even if all of its buffers are uptodate.  A subsequent
1133  * block_read_full_page() against that page will discover all the uptodate
1134  * buffers, will set the page uptodate and will perform no I/O.
1135  */
1136
1137 /**
1138  * mark_buffer_dirty - mark a buffer_head as needing writeout
1139  * @bh: the buffer_head to mark dirty
1140  *
1141  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1142  * backing page dirty, then tag the page as dirty in its address_space's radix
1143  * tree and then attach the address_space's inode to its superblock's dirty
1144  * inode list.
1145  *
1146  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
1147  * mapping->tree_lock and mapping->host->i_lock.
1148  */
1149 void mark_buffer_dirty(struct buffer_head *bh)
1150 {
1151         WARN_ON_ONCE(!buffer_uptodate(bh));
1152
1153         trace_block_dirty_buffer(bh);
1154
1155         /*
1156          * Very *carefully* optimize the it-is-already-dirty case.
1157          *
1158          * Don't let the final "is it dirty" escape to before we
1159          * perhaps modified the buffer.
1160          */
1161         if (buffer_dirty(bh)) {
1162                 smp_mb();
1163                 if (buffer_dirty(bh))
1164                         return;
1165         }
1166
1167         if (!test_set_buffer_dirty(bh)) {
1168                 struct page *page = bh->b_page;
1169                 struct address_space *mapping = NULL;
1170
1171                 lock_page_memcg(page);
1172                 if (!TestSetPageDirty(page)) {
1173                         mapping = page_mapping(page);
1174                         if (mapping)
1175                                 __set_page_dirty(page, mapping, 0);
1176                 }
1177                 unlock_page_memcg(page);
1178                 if (mapping)
1179                         __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1180         }
1181 }
1182 EXPORT_SYMBOL(mark_buffer_dirty);
1183
1184 /*
1185  * Decrement a buffer_head's reference count.  If all buffers against a page
1186  * have zero reference count, are clean and unlocked, and if the page is clean
1187  * and unlocked then try_to_free_buffers() may strip the buffers from the page
1188  * in preparation for freeing it (sometimes, rarely, buffers are removed from
1189  * a page but it ends up not being freed, and buffers may later be reattached).
1190  */
1191 void __brelse(struct buffer_head * buf)
1192 {
1193         if (atomic_read(&buf->b_count)) {
1194                 put_bh(buf);
1195                 return;
1196         }
1197         WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1198 }
1199 EXPORT_SYMBOL(__brelse);
1200
1201 /*
1202  * bforget() is like brelse(), except it discards any
1203  * potentially dirty data.
1204  */
1205 void __bforget(struct buffer_head *bh)
1206 {
1207         clear_buffer_dirty(bh);
1208         if (bh->b_assoc_map) {
1209                 struct address_space *buffer_mapping = bh->b_page->mapping;
1210
1211                 spin_lock(&buffer_mapping->private_lock);
1212                 list_del_init(&bh->b_assoc_buffers);
1213                 bh->b_assoc_map = NULL;
1214                 spin_unlock(&buffer_mapping->private_lock);
1215         }
1216         __brelse(bh);
1217 }
1218 EXPORT_SYMBOL(__bforget);
1219
1220 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1221 {
1222         lock_buffer(bh);
1223         if (buffer_uptodate(bh)) {
1224                 unlock_buffer(bh);
1225                 return bh;
1226         } else {
1227                 get_bh(bh);
1228                 bh->b_end_io = end_buffer_read_sync;
1229                 submit_bh(REQ_OP_READ, 0, bh);
1230                 wait_on_buffer(bh);
1231                 if (buffer_uptodate(bh))
1232                         return bh;
1233         }
1234         brelse(bh);
1235         return NULL;
1236 }
1237
1238 /*
1239  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1240  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1241  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1242  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1243  * CPU's LRUs at the same time.
1244  *
1245  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1246  * sb_find_get_block().
1247  *
1248  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1249  * a local interrupt disable for that.
1250  */
1251
1252 #define BH_LRU_SIZE     16
1253
1254 struct bh_lru {
1255         struct buffer_head *bhs[BH_LRU_SIZE];
1256 };
1257
1258 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1259
1260 #ifdef CONFIG_SMP
1261 #define bh_lru_lock()   local_irq_disable()
1262 #define bh_lru_unlock() local_irq_enable()
1263 #else
1264 #define bh_lru_lock()   preempt_disable()
1265 #define bh_lru_unlock() preempt_enable()
1266 #endif
1267
1268 static inline void check_irqs_on(void)
1269 {
1270 #ifdef irqs_disabled
1271         BUG_ON(irqs_disabled());
1272 #endif
1273 }
1274
1275 /*
1276  * The LRU management algorithm is dopey-but-simple.  Sorry.
1277  */
1278 static void bh_lru_install(struct buffer_head *bh)
1279 {
1280         struct buffer_head *evictee = NULL;
1281
1282         check_irqs_on();
1283         bh_lru_lock();
1284         if (__this_cpu_read(bh_lrus.bhs[0]) != bh) {
1285                 struct buffer_head *bhs[BH_LRU_SIZE];
1286                 int in;
1287                 int out = 0;
1288
1289                 get_bh(bh);
1290                 bhs[out++] = bh;
1291                 for (in = 0; in < BH_LRU_SIZE; in++) {
1292                         struct buffer_head *bh2 =
1293                                 __this_cpu_read(bh_lrus.bhs[in]);
1294
1295                         if (bh2 == bh) {
1296                                 __brelse(bh2);
1297                         } else {
1298                                 if (out >= BH_LRU_SIZE) {
1299                                         BUG_ON(evictee != NULL);
1300                                         evictee = bh2;
1301                                 } else {
1302                                         bhs[out++] = bh2;
1303                                 }
1304                         }
1305                 }
1306                 while (out < BH_LRU_SIZE)
1307                         bhs[out++] = NULL;
1308                 memcpy(this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs));
1309         }
1310         bh_lru_unlock();
1311
1312         if (evictee)
1313                 __brelse(evictee);
1314 }
1315
1316 /*
1317  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1318  */
1319 static struct buffer_head *
1320 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1321 {
1322         struct buffer_head *ret = NULL;
1323         unsigned int i;
1324
1325         check_irqs_on();
1326         bh_lru_lock();
1327         for (i = 0; i < BH_LRU_SIZE; i++) {
1328                 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1329
1330                 if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
1331                     bh->b_size == size) {
1332                         if (i) {
1333                                 while (i) {
1334                                         __this_cpu_write(bh_lrus.bhs[i],
1335                                                 __this_cpu_read(bh_lrus.bhs[i - 1]));
1336                                         i--;
1337                                 }
1338                                 __this_cpu_write(bh_lrus.bhs[0], bh);
1339                         }
1340                         get_bh(bh);
1341                         ret = bh;
1342                         break;
1343                 }
1344         }
1345         bh_lru_unlock();
1346         return ret;
1347 }
1348
1349 /*
1350  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1351  * it in the LRU and mark it as accessed.  If it is not present then return
1352  * NULL
1353  */
1354 struct buffer_head *
1355 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1356 {
1357         struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1358
1359         if (bh == NULL) {
1360                 /* __find_get_block_slow will mark the page accessed */
1361                 bh = __find_get_block_slow(bdev, block);
1362                 if (bh)
1363                         bh_lru_install(bh);
1364         } else
1365                 touch_buffer(bh);
1366
1367         return bh;
1368 }
1369 EXPORT_SYMBOL(__find_get_block);
1370
1371 /*
1372  * __getblk_gfp() will locate (and, if necessary, create) the buffer_head
1373  * which corresponds to the passed block_device, block and size. The
1374  * returned buffer has its reference count incremented.
1375  *
1376  * __getblk_gfp() will lock up the machine if grow_dev_page's
1377  * try_to_free_buffers() attempt is failing.  FIXME, perhaps?
1378  */
1379 struct buffer_head *
1380 __getblk_gfp(struct block_device *bdev, sector_t block,
1381              unsigned size, gfp_t gfp)
1382 {
1383         struct buffer_head *bh = __find_get_block(bdev, block, size);
1384
1385         might_sleep();
1386         if (bh == NULL)
1387                 bh = __getblk_slow(bdev, block, size, gfp);
1388         return bh;
1389 }
1390 EXPORT_SYMBOL(__getblk_gfp);
1391
1392 /*
1393  * Do async read-ahead on a buffer..
1394  */
1395 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1396 {
1397         struct buffer_head *bh = __getblk(bdev, block, size);
1398         if (likely(bh)) {
1399                 ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh);
1400                 brelse(bh);
1401         }
1402 }
1403 EXPORT_SYMBOL(__breadahead);
1404
1405 /**
1406  *  __bread_gfp() - reads a specified block and returns the bh
1407  *  @bdev: the block_device to read from
1408  *  @block: number of block
1409  *  @size: size (in bytes) to read
1410  *  @gfp: page allocation flag
1411  *
1412  *  Reads a specified block, and returns buffer head that contains it.
1413  *  The page cache can be allocated from non-movable area
1414  *  not to prevent page migration if you set gfp to zero.
1415  *  It returns NULL if the block was unreadable.
1416  */
1417 struct buffer_head *
1418 __bread_gfp(struct block_device *bdev, sector_t block,
1419                    unsigned size, gfp_t gfp)
1420 {
1421         struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
1422
1423         if (likely(bh) && !buffer_uptodate(bh))
1424                 bh = __bread_slow(bh);
1425         return bh;
1426 }
1427 EXPORT_SYMBOL(__bread_gfp);
1428
1429 /*
1430  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1431  * This doesn't race because it runs in each cpu either in irq
1432  * or with preempt disabled.
1433  */
1434 static void invalidate_bh_lru(void *arg)
1435 {
1436         struct bh_lru *b = &get_cpu_var(bh_lrus);
1437         int i;
1438
1439         for (i = 0; i < BH_LRU_SIZE; i++) {
1440                 brelse(b->bhs[i]);
1441                 b->bhs[i] = NULL;
1442         }
1443         put_cpu_var(bh_lrus);
1444 }
1445
1446 static bool has_bh_in_lru(int cpu, void *dummy)
1447 {
1448         struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
1449         int i;
1450         
1451         for (i = 0; i < BH_LRU_SIZE; i++) {
1452                 if (b->bhs[i])
1453                         return 1;
1454         }
1455
1456         return 0;
1457 }
1458
1459 void invalidate_bh_lrus(void)
1460 {
1461         on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1, GFP_KERNEL);
1462 }
1463 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1464
1465 void set_bh_page(struct buffer_head *bh,
1466                 struct page *page, unsigned long offset)
1467 {
1468         bh->b_page = page;
1469         BUG_ON(offset >= PAGE_SIZE);
1470         if (PageHighMem(page))
1471                 /*
1472                  * This catches illegal uses and preserves the offset:
1473                  */
1474                 bh->b_data = (char *)(0 + offset);
1475         else
1476                 bh->b_data = page_address(page) + offset;
1477 }
1478 EXPORT_SYMBOL(set_bh_page);
1479
1480 /*
1481  * Called when truncating a buffer on a page completely.
1482  */
1483
1484 /* Bits that are cleared during an invalidate */
1485 #define BUFFER_FLAGS_DISCARD \
1486         (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
1487          1 << BH_Delay | 1 << BH_Unwritten)
1488
1489 static void discard_buffer(struct buffer_head * bh)
1490 {
1491         unsigned long b_state, b_state_old;
1492
1493         lock_buffer(bh);
1494         clear_buffer_dirty(bh);
1495         bh->b_bdev = NULL;
1496         b_state = bh->b_state;
1497         for (;;) {
1498                 b_state_old = cmpxchg(&bh->b_state, b_state,
1499                                       (b_state & ~BUFFER_FLAGS_DISCARD));
1500                 if (b_state_old == b_state)
1501                         break;
1502                 b_state = b_state_old;
1503         }
1504         unlock_buffer(bh);
1505 }
1506
1507 /**
1508  * block_invalidatepage - invalidate part or all of a buffer-backed page
1509  *
1510  * @page: the page which is affected
1511  * @offset: start of the range to invalidate
1512  * @length: length of the range to invalidate
1513  *
1514  * block_invalidatepage() is called when all or part of the page has become
1515  * invalidated by a truncate operation.
1516  *
1517  * block_invalidatepage() does not have to release all buffers, but it must
1518  * ensure that no dirty buffer is left outside @offset and that no I/O
1519  * is underway against any of the blocks which are outside the truncation
1520  * point.  Because the caller is about to free (and possibly reuse) those
1521  * blocks on-disk.
1522  */
1523 void block_invalidatepage(struct page *page, unsigned int offset,
1524                           unsigned int length)
1525 {
1526         struct buffer_head *head, *bh, *next;
1527         unsigned int curr_off = 0;
1528         unsigned int stop = length + offset;
1529
1530         BUG_ON(!PageLocked(page));
1531         if (!page_has_buffers(page))
1532                 goto out;
1533
1534         /*
1535          * Check for overflow
1536          */
1537         BUG_ON(stop > PAGE_SIZE || stop < length);
1538
1539         head = page_buffers(page);
1540         bh = head;
1541         do {
1542                 unsigned int next_off = curr_off + bh->b_size;
1543                 next = bh->b_this_page;
1544
1545                 /*
1546                  * Are we still fully in range ?
1547                  */
1548                 if (next_off > stop)
1549                         goto out;
1550
1551                 /*
1552                  * is this block fully invalidated?
1553                  */
1554                 if (offset <= curr_off)
1555                         discard_buffer(bh);
1556                 curr_off = next_off;
1557                 bh = next;
1558         } while (bh != head);
1559
1560         /*
1561          * We release buffers only if the entire page is being invalidated.
1562          * The get_block cached value has been unconditionally invalidated,
1563          * so real IO is not possible anymore.
1564          */
1565         if (offset == 0)
1566                 try_to_release_page(page, 0);
1567 out:
1568         return;
1569 }
1570 EXPORT_SYMBOL(block_invalidatepage);
1571
1572
1573 /*
1574  * We attach and possibly dirty the buffers atomically wrt
1575  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
1576  * is already excluded via the page lock.
1577  */
1578 void create_empty_buffers(struct page *page,
1579                         unsigned long blocksize, unsigned long b_state)
1580 {
1581         struct buffer_head *bh, *head, *tail;
1582
1583         head = alloc_page_buffers(page, blocksize, 1);
1584         bh = head;
1585         do {
1586                 bh->b_state |= b_state;
1587                 tail = bh;
1588                 bh = bh->b_this_page;
1589         } while (bh);
1590         tail->b_this_page = head;
1591
1592         spin_lock(&page->mapping->private_lock);
1593         if (PageUptodate(page) || PageDirty(page)) {
1594                 bh = head;
1595                 do {
1596                         if (PageDirty(page))
1597                                 set_buffer_dirty(bh);
1598                         if (PageUptodate(page))
1599                                 set_buffer_uptodate(bh);
1600                         bh = bh->b_this_page;
1601                 } while (bh != head);
1602         }
1603         attach_page_buffers(page, head);
1604         spin_unlock(&page->mapping->private_lock);
1605 }
1606 EXPORT_SYMBOL(create_empty_buffers);
1607
1608 /*
1609  * We are taking a block for data and we don't want any output from any
1610  * buffer-cache aliases starting from return from that function and
1611  * until the moment when something will explicitly mark the buffer
1612  * dirty (hopefully that will not happen until we will free that block ;-)
1613  * We don't even need to mark it not-uptodate - nobody can expect
1614  * anything from a newly allocated buffer anyway. We used to used
1615  * unmap_buffer() for such invalidation, but that was wrong. We definitely
1616  * don't want to mark the alias unmapped, for example - it would confuse
1617  * anyone who might pick it with bread() afterwards...
1618  *
1619  * Also..  Note that bforget() doesn't lock the buffer.  So there can
1620  * be writeout I/O going on against recently-freed buffers.  We don't
1621  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1622  * only if we really need to.  That happens here.
1623  */
1624 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1625 {
1626         struct buffer_head *old_bh;
1627
1628         might_sleep();
1629
1630         old_bh = __find_get_block_slow(bdev, block);
1631         if (old_bh) {
1632                 clear_buffer_dirty(old_bh);
1633                 wait_on_buffer(old_bh);
1634                 clear_buffer_req(old_bh);
1635                 __brelse(old_bh);
1636         }
1637 }
1638 EXPORT_SYMBOL(unmap_underlying_metadata);
1639
1640 /*
1641  * Size is a power-of-two in the range 512..PAGE_SIZE,
1642  * and the case we care about most is PAGE_SIZE.
1643  *
1644  * So this *could* possibly be written with those
1645  * constraints in mind (relevant mostly if some
1646  * architecture has a slow bit-scan instruction)
1647  */
1648 static inline int block_size_bits(unsigned int blocksize)
1649 {
1650         return ilog2(blocksize);
1651 }
1652
1653 static struct buffer_head *create_page_buffers(struct page *page, struct inode *inode, unsigned int b_state)
1654 {
1655         BUG_ON(!PageLocked(page));
1656
1657         if (!page_has_buffers(page))
1658                 create_empty_buffers(page, 1 << ACCESS_ONCE(inode->i_blkbits), b_state);
1659         return page_buffers(page);
1660 }
1661
1662 /*
1663  * NOTE! All mapped/uptodate combinations are valid:
1664  *
1665  *      Mapped  Uptodate        Meaning
1666  *
1667  *      No      No              "unknown" - must do get_block()
1668  *      No      Yes             "hole" - zero-filled
1669  *      Yes     No              "allocated" - allocated on disk, not read in
1670  *      Yes     Yes             "valid" - allocated and up-to-date in memory.
1671  *
1672  * "Dirty" is valid only with the last case (mapped+uptodate).
1673  */
1674
1675 /*
1676  * While block_write_full_page is writing back the dirty buffers under
1677  * the page lock, whoever dirtied the buffers may decide to clean them
1678  * again at any time.  We handle that by only looking at the buffer
1679  * state inside lock_buffer().
1680  *
1681  * If block_write_full_page() is called for regular writeback
1682  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1683  * locked buffer.   This only can happen if someone has written the buffer
1684  * directly, with submit_bh().  At the address_space level PageWriteback
1685  * prevents this contention from occurring.
1686  *
1687  * If block_write_full_page() is called with wbc->sync_mode ==
1688  * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this
1689  * causes the writes to be flagged as synchronous writes.
1690  */
1691 int __block_write_full_page(struct inode *inode, struct page *page,
1692                         get_block_t *get_block, struct writeback_control *wbc,
1693                         bh_end_io_t *handler)
1694 {
1695         int err;
1696         sector_t block;
1697         sector_t last_block;
1698         struct buffer_head *bh, *head;
1699         unsigned int blocksize, bbits;
1700         int nr_underway = 0;
1701         int write_flags = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0);
1702
1703         head = create_page_buffers(page, inode,
1704                                         (1 << BH_Dirty)|(1 << BH_Uptodate));
1705
1706         /*
1707          * Be very careful.  We have no exclusion from __set_page_dirty_buffers
1708          * here, and the (potentially unmapped) buffers may become dirty at
1709          * any time.  If a buffer becomes dirty here after we've inspected it
1710          * then we just miss that fact, and the page stays dirty.
1711          *
1712          * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1713          * handle that here by just cleaning them.
1714          */
1715
1716         bh = head;
1717         blocksize = bh->b_size;
1718         bbits = block_size_bits(blocksize);
1719
1720         block = (sector_t)page->index << (PAGE_SHIFT - bbits);
1721         last_block = (i_size_read(inode) - 1) >> bbits;
1722
1723         /*
1724          * Get all the dirty buffers mapped to disk addresses and
1725          * handle any aliases from the underlying blockdev's mapping.
1726          */
1727         do {
1728                 if (block > last_block) {
1729                         /*
1730                          * mapped buffers outside i_size will occur, because
1731                          * this page can be outside i_size when there is a
1732                          * truncate in progress.
1733                          */
1734                         /*
1735                          * The buffer was zeroed by block_write_full_page()
1736                          */
1737                         clear_buffer_dirty(bh);
1738                         set_buffer_uptodate(bh);
1739                 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1740                            buffer_dirty(bh)) {
1741                         WARN_ON(bh->b_size != blocksize);
1742                         err = get_block(inode, block, bh, 1);
1743                         if (err)
1744                                 goto recover;
1745                         clear_buffer_delay(bh);
1746                         if (buffer_new(bh)) {
1747                                 /* blockdev mappings never come here */
1748                                 clear_buffer_new(bh);
1749                                 unmap_underlying_metadata(bh->b_bdev,
1750                                                         bh->b_blocknr);
1751                         }
1752                 }
1753                 bh = bh->b_this_page;
1754                 block++;
1755         } while (bh != head);
1756
1757         do {
1758                 if (!buffer_mapped(bh))
1759                         continue;
1760                 /*
1761                  * If it's a fully non-blocking write attempt and we cannot
1762                  * lock the buffer then redirty the page.  Note that this can
1763                  * potentially cause a busy-wait loop from writeback threads
1764                  * and kswapd activity, but those code paths have their own
1765                  * higher-level throttling.
1766                  */
1767                 if (wbc->sync_mode != WB_SYNC_NONE) {
1768                         lock_buffer(bh);
1769                 } else if (!trylock_buffer(bh)) {
1770                         redirty_page_for_writepage(wbc, page);
1771                         continue;
1772                 }
1773                 if (test_clear_buffer_dirty(bh)) {
1774                         mark_buffer_async_write_endio(bh, handler);
1775                 } else {
1776                         unlock_buffer(bh);
1777                 }
1778         } while ((bh = bh->b_this_page) != head);
1779
1780         /*
1781          * The page and its buffers are protected by PageWriteback(), so we can
1782          * drop the bh refcounts early.
1783          */
1784         BUG_ON(PageWriteback(page));
1785         set_page_writeback(page);
1786
1787         do {
1788                 struct buffer_head *next = bh->b_this_page;
1789                 if (buffer_async_write(bh)) {
1790                         submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, 0, wbc);
1791                         nr_underway++;
1792                 }
1793                 bh = next;
1794         } while (bh != head);
1795         unlock_page(page);
1796
1797         err = 0;
1798 done:
1799         if (nr_underway == 0) {
1800                 /*
1801                  * The page was marked dirty, but the buffers were
1802                  * clean.  Someone wrote them back by hand with
1803                  * ll_rw_block/submit_bh.  A rare case.
1804                  */
1805                 end_page_writeback(page);
1806
1807                 /*
1808                  * The page and buffer_heads can be released at any time from
1809                  * here on.
1810                  */
1811         }
1812         return err;
1813
1814 recover:
1815         /*
1816          * ENOSPC, or some other error.  We may already have added some
1817          * blocks to the file, so we need to write these out to avoid
1818          * exposing stale data.
1819          * The page is currently locked and not marked for writeback
1820          */
1821         bh = head;
1822         /* Recovery: lock and submit the mapped buffers */
1823         do {
1824                 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1825                     !buffer_delay(bh)) {
1826                         lock_buffer(bh);
1827                         mark_buffer_async_write_endio(bh, handler);
1828                 } else {
1829                         /*
1830                          * The buffer may have been set dirty during
1831                          * attachment to a dirty page.
1832                          */
1833                         clear_buffer_dirty(bh);
1834                 }
1835         } while ((bh = bh->b_this_page) != head);
1836         SetPageError(page);
1837         BUG_ON(PageWriteback(page));
1838         mapping_set_error(page->mapping, err);
1839         set_page_writeback(page);
1840         do {
1841                 struct buffer_head *next = bh->b_this_page;
1842                 if (buffer_async_write(bh)) {
1843                         clear_buffer_dirty(bh);
1844                         submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, 0, wbc);
1845                         nr_underway++;
1846                 }
1847                 bh = next;
1848         } while (bh != head);
1849         unlock_page(page);
1850         goto done;
1851 }
1852 EXPORT_SYMBOL(__block_write_full_page);
1853
1854 /*
1855  * If a page has any new buffers, zero them out here, and mark them uptodate
1856  * and dirty so they'll be written out (in order to prevent uninitialised
1857  * block data from leaking). And clear the new bit.
1858  */
1859 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1860 {
1861         unsigned int block_start, block_end;
1862         struct buffer_head *head, *bh;
1863
1864         BUG_ON(!PageLocked(page));
1865         if (!page_has_buffers(page))
1866                 return;
1867
1868         bh = head = page_buffers(page);
1869         block_start = 0;
1870         do {
1871                 block_end = block_start + bh->b_size;
1872
1873                 if (buffer_new(bh)) {
1874                         if (block_end > from && block_start < to) {
1875                                 if (!PageUptodate(page)) {
1876                                         unsigned start, size;
1877
1878                                         start = max(from, block_start);
1879                                         size = min(to, block_end) - start;
1880
1881                                         zero_user(page, start, size);
1882                                         set_buffer_uptodate(bh);
1883                                 }
1884
1885                                 clear_buffer_new(bh);
1886                                 mark_buffer_dirty(bh);
1887                         }
1888                 }
1889
1890                 block_start = block_end;
1891                 bh = bh->b_this_page;
1892         } while (bh != head);
1893 }
1894 EXPORT_SYMBOL(page_zero_new_buffers);
1895
1896 static void
1897 iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
1898                 struct iomap *iomap)
1899 {
1900         loff_t offset = block << inode->i_blkbits;
1901
1902         bh->b_bdev = iomap->bdev;
1903
1904         /*
1905          * Block points to offset in file we need to map, iomap contains
1906          * the offset at which the map starts. If the map ends before the
1907          * current block, then do not map the buffer and let the caller
1908          * handle it.
1909          */
1910         BUG_ON(offset >= iomap->offset + iomap->length);
1911
1912         switch (iomap->type) {
1913         case IOMAP_HOLE:
1914                 /*
1915                  * If the buffer is not up to date or beyond the current EOF,
1916                  * we need to mark it as new to ensure sub-block zeroing is
1917                  * executed if necessary.
1918                  */
1919                 if (!buffer_uptodate(bh) ||
1920                     (offset >= i_size_read(inode)))
1921                         set_buffer_new(bh);
1922                 break;
1923         case IOMAP_DELALLOC:
1924                 if (!buffer_uptodate(bh) ||
1925                     (offset >= i_size_read(inode)))
1926                         set_buffer_new(bh);
1927                 set_buffer_uptodate(bh);
1928                 set_buffer_mapped(bh);
1929                 set_buffer_delay(bh);
1930                 break;
1931         case IOMAP_UNWRITTEN:
1932                 /*
1933                  * For unwritten regions, we always need to ensure that
1934                  * sub-block writes cause the regions in the block we are not
1935                  * writing to are zeroed. Set the buffer as new to ensure this.
1936                  */
1937                 set_buffer_new(bh);
1938                 set_buffer_unwritten(bh);
1939                 /* FALLTHRU */
1940         case IOMAP_MAPPED:
1941                 if (offset >= i_size_read(inode))
1942                         set_buffer_new(bh);
1943                 bh->b_blocknr = (iomap->blkno >> (inode->i_blkbits - 9)) +
1944                                 ((offset - iomap->offset) >> inode->i_blkbits);
1945                 set_buffer_mapped(bh);
1946                 break;
1947         }
1948 }
1949
1950 int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
1951                 get_block_t *get_block, struct iomap *iomap)
1952 {
1953         unsigned from = pos & (PAGE_SIZE - 1);
1954         unsigned to = from + len;
1955         struct inode *inode = page->mapping->host;
1956         unsigned block_start, block_end;
1957         sector_t block;
1958         int err = 0;
1959         unsigned blocksize, bbits;
1960         struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1961
1962         BUG_ON(!PageLocked(page));
1963         BUG_ON(from > PAGE_SIZE);
1964         BUG_ON(to > PAGE_SIZE);
1965         BUG_ON(from > to);
1966
1967         head = create_page_buffers(page, inode, 0);
1968         blocksize = head->b_size;
1969         bbits = block_size_bits(blocksize);
1970
1971         block = (sector_t)page->index << (PAGE_SHIFT - bbits);
1972
1973         for(bh = head, block_start = 0; bh != head || !block_start;
1974             block++, block_start=block_end, bh = bh->b_this_page) {
1975                 block_end = block_start + blocksize;
1976                 if (block_end <= from || block_start >= to) {
1977                         if (PageUptodate(page)) {
1978                                 if (!buffer_uptodate(bh))
1979                                         set_buffer_uptodate(bh);
1980                         }
1981                         continue;
1982                 }
1983                 if (buffer_new(bh))
1984                         clear_buffer_new(bh);
1985                 if (!buffer_mapped(bh)) {
1986                         WARN_ON(bh->b_size != blocksize);
1987                         if (get_block) {
1988                                 err = get_block(inode, block, bh, 1);
1989                                 if (err)
1990                                         break;
1991                         } else {
1992                                 iomap_to_bh(inode, block, bh, iomap);
1993                         }
1994
1995                         if (buffer_new(bh)) {
1996                                 unmap_underlying_metadata(bh->b_bdev,
1997                                                         bh->b_blocknr);
1998                                 if (PageUptodate(page)) {
1999                                         clear_buffer_new(bh);
2000                                         set_buffer_uptodate(bh);
2001                                         mark_buffer_dirty(bh);
2002                                         continue;
2003                                 }
2004                                 if (block_end > to || block_start < from)
2005                                         zero_user_segments(page,
2006                                                 to, block_end,
2007                                                 block_start, from);
2008                                 continue;
2009                         }
2010                 }
2011                 if (PageUptodate(page)) {
2012                         if (!buffer_uptodate(bh))
2013                                 set_buffer_uptodate(bh);
2014                         continue; 
2015                 }
2016                 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
2017                     !buffer_unwritten(bh) &&
2018                      (block_start < from || block_end > to)) {
2019                         ll_rw_block(REQ_OP_READ, 0, 1, &bh);
2020                         *wait_bh++=bh;
2021                 }
2022         }
2023         /*
2024          * If we issued read requests - let them complete.
2025          */
2026         while(wait_bh > wait) {
2027                 wait_on_buffer(*--wait_bh);
2028                 if (!buffer_uptodate(*wait_bh))
2029                         err = -EIO;
2030         }
2031         if (unlikely(err))
2032                 page_zero_new_buffers(page, from, to);
2033         return err;
2034 }
2035
2036 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
2037                 get_block_t *get_block)
2038 {
2039         return __block_write_begin_int(page, pos, len, get_block, NULL);
2040 }
2041 EXPORT_SYMBOL(__block_write_begin);
2042
2043 static int __block_commit_write(struct inode *inode, struct page *page,
2044                 unsigned from, unsigned to)
2045 {
2046         unsigned block_start, block_end;
2047         int partial = 0;
2048         unsigned blocksize;
2049         struct buffer_head *bh, *head;
2050
2051         bh = head = page_buffers(page);
2052         blocksize = bh->b_size;
2053
2054         block_start = 0;
2055         do {
2056                 block_end = block_start + blocksize;
2057                 if (block_end <= from || block_start >= to) {
2058                         if (!buffer_uptodate(bh))
2059                                 partial = 1;
2060                 } else {
2061                         set_buffer_uptodate(bh);
2062                         mark_buffer_dirty(bh);
2063                 }
2064                 clear_buffer_new(bh);
2065
2066                 block_start = block_end;
2067                 bh = bh->b_this_page;
2068         } while (bh != head);
2069
2070         /*
2071          * If this is a partial write which happened to make all buffers
2072          * uptodate then we can optimize away a bogus readpage() for
2073          * the next read(). Here we 'discover' whether the page went
2074          * uptodate as a result of this (potentially partial) write.
2075          */
2076         if (!partial)
2077                 SetPageUptodate(page);
2078         return 0;
2079 }
2080
2081 /*
2082  * block_write_begin takes care of the basic task of block allocation and
2083  * bringing partial write blocks uptodate first.
2084  *
2085  * The filesystem needs to handle block truncation upon failure.
2086  */
2087 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
2088                 unsigned flags, struct page **pagep, get_block_t *get_block)
2089 {
2090         pgoff_t index = pos >> PAGE_SHIFT;
2091         struct page *page;
2092         int status;
2093
2094         page = grab_cache_page_write_begin(mapping, index, flags);
2095         if (!page)
2096                 return -ENOMEM;
2097
2098         status = __block_write_begin(page, pos, len, get_block);
2099         if (unlikely(status)) {
2100                 unlock_page(page);
2101                 put_page(page);
2102                 page = NULL;
2103         }
2104
2105         *pagep = page;
2106         return status;
2107 }
2108 EXPORT_SYMBOL(block_write_begin);
2109
2110 int block_write_end(struct file *file, struct address_space *mapping,
2111                         loff_t pos, unsigned len, unsigned copied,
2112                         struct page *page, void *fsdata)
2113 {
2114         struct inode *inode = mapping->host;
2115         unsigned start;
2116
2117         start = pos & (PAGE_SIZE - 1);
2118
2119         if (unlikely(copied < len)) {
2120                 /*
2121                  * The buffers that were written will now be uptodate, so we
2122                  * don't have to worry about a readpage reading them and
2123                  * overwriting a partial write. However if we have encountered
2124                  * a short write and only partially written into a buffer, it
2125                  * will not be marked uptodate, so a readpage might come in and
2126                  * destroy our partial write.
2127                  *
2128                  * Do the simplest thing, and just treat any short write to a
2129                  * non uptodate page as a zero-length write, and force the
2130                  * caller to redo the whole thing.
2131                  */
2132                 if (!PageUptodate(page))
2133                         copied = 0;
2134
2135                 page_zero_new_buffers(page, start+copied, start+len);
2136         }
2137         flush_dcache_page(page);
2138
2139         /* This could be a short (even 0-length) commit */
2140         __block_commit_write(inode, page, start, start+copied);
2141
2142         return copied;
2143 }
2144 EXPORT_SYMBOL(block_write_end);
2145
2146 int generic_write_end(struct file *file, struct address_space *mapping,
2147                         loff_t pos, unsigned len, unsigned copied,
2148                         struct page *page, void *fsdata)
2149 {
2150         struct inode *inode = mapping->host;
2151         loff_t old_size = inode->i_size;
2152         int i_size_changed = 0;
2153
2154         copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2155
2156         /*
2157          * No need to use i_size_read() here, the i_size
2158          * cannot change under us because we hold i_mutex.
2159          *
2160          * But it's important to update i_size while still holding page lock:
2161          * page writeout could otherwise come in and zero beyond i_size.
2162          */
2163         if (pos+copied > inode->i_size) {
2164                 i_size_write(inode, pos+copied);
2165                 i_size_changed = 1;
2166         }
2167
2168         unlock_page(page);
2169         put_page(page);
2170
2171         if (old_size < pos)
2172                 pagecache_isize_extended(inode, old_size, pos);
2173         /*
2174          * Don't mark the inode dirty under page lock. First, it unnecessarily
2175          * makes the holding time of page lock longer. Second, it forces lock
2176          * ordering of page lock and transaction start for journaling
2177          * filesystems.
2178          */
2179         if (i_size_changed)
2180                 mark_inode_dirty(inode);
2181
2182         return copied;
2183 }
2184 EXPORT_SYMBOL(generic_write_end);
2185
2186 /*
2187  * block_is_partially_uptodate checks whether buffers within a page are
2188  * uptodate or not.
2189  *
2190  * Returns true if all buffers which correspond to a file portion
2191  * we want to read are uptodate.
2192  */
2193 int block_is_partially_uptodate(struct page *page, unsigned long from,
2194                                         unsigned long count)
2195 {
2196         unsigned block_start, block_end, blocksize;
2197         unsigned to;
2198         struct buffer_head *bh, *head;
2199         int ret = 1;
2200
2201         if (!page_has_buffers(page))
2202                 return 0;
2203
2204         head = page_buffers(page);
2205         blocksize = head->b_size;
2206         to = min_t(unsigned, PAGE_SIZE - from, count);
2207         to = from + to;
2208         if (from < blocksize && to > PAGE_SIZE - blocksize)
2209                 return 0;
2210
2211         bh = head;
2212         block_start = 0;
2213         do {
2214                 block_end = block_start + blocksize;
2215                 if (block_end > from && block_start < to) {
2216                         if (!buffer_uptodate(bh)) {
2217                                 ret = 0;
2218                                 break;
2219                         }
2220                         if (block_end >= to)
2221                                 break;
2222                 }
2223                 block_start = block_end;
2224                 bh = bh->b_this_page;
2225         } while (bh != head);
2226
2227         return ret;
2228 }
2229 EXPORT_SYMBOL(block_is_partially_uptodate);
2230
2231 /*
2232  * Generic "read page" function for block devices that have the normal
2233  * get_block functionality. This is most of the block device filesystems.
2234  * Reads the page asynchronously --- the unlock_buffer() and
2235  * set/clear_buffer_uptodate() functions propagate buffer state into the
2236  * page struct once IO has completed.
2237  */
2238 int block_read_full_page(struct page *page, get_block_t *get_block)
2239 {
2240         struct inode *inode = page->mapping->host;
2241         sector_t iblock, lblock;
2242         struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2243         unsigned int blocksize, bbits;
2244         int nr, i;
2245         int fully_mapped = 1;
2246
2247         head = create_page_buffers(page, inode, 0);
2248         blocksize = head->b_size;
2249         bbits = block_size_bits(blocksize);
2250
2251         iblock = (sector_t)page->index << (PAGE_SHIFT - bbits);
2252         lblock = (i_size_read(inode)+blocksize-1) >> bbits;
2253         bh = head;
2254         nr = 0;
2255         i = 0;
2256
2257         do {
2258                 if (buffer_uptodate(bh))
2259                         continue;
2260
2261                 if (!buffer_mapped(bh)) {
2262                         int err = 0;
2263
2264                         fully_mapped = 0;
2265                         if (iblock < lblock) {
2266                                 WARN_ON(bh->b_size != blocksize);
2267                                 err = get_block(inode, iblock, bh, 0);
2268                                 if (err)
2269                                         SetPageError(page);
2270                         }
2271                         if (!buffer_mapped(bh)) {
2272                                 zero_user(page, i * blocksize, blocksize);
2273                                 if (!err)
2274                                         set_buffer_uptodate(bh);
2275                                 continue;
2276                         }
2277                         /*
2278                          * get_block() might have updated the buffer
2279                          * synchronously
2280                          */
2281                         if (buffer_uptodate(bh))
2282                                 continue;
2283                 }
2284                 arr[nr++] = bh;
2285         } while (i++, iblock++, (bh = bh->b_this_page) != head);
2286
2287         if (fully_mapped)
2288                 SetPageMappedToDisk(page);
2289
2290         if (!nr) {
2291                 /*
2292                  * All buffers are uptodate - we can set the page uptodate
2293                  * as well. But not if get_block() returned an error.
2294                  */
2295                 if (!PageError(page))
2296                         SetPageUptodate(page);
2297                 unlock_page(page);
2298                 return 0;
2299         }
2300
2301         /* Stage two: lock the buffers */
2302         for (i = 0; i < nr; i++) {
2303                 bh = arr[i];
2304                 lock_buffer(bh);
2305                 mark_buffer_async_read(bh);
2306         }
2307
2308         /*
2309          * Stage 3: start the IO.  Check for uptodateness
2310          * inside the buffer lock in case another process reading
2311          * the underlying blockdev brought it uptodate (the sct fix).
2312          */
2313         for (i = 0; i < nr; i++) {
2314                 bh = arr[i];
2315                 if (buffer_uptodate(bh))
2316                         end_buffer_async_read(bh, 1);
2317                 else
2318                         submit_bh(REQ_OP_READ, 0, bh);
2319         }
2320         return 0;
2321 }
2322 EXPORT_SYMBOL(block_read_full_page);
2323
2324 /* utility function for filesystems that need to do work on expanding
2325  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
2326  * deal with the hole.  
2327  */
2328 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2329 {
2330         struct address_space *mapping = inode->i_mapping;
2331         struct page *page;
2332         void *fsdata;
2333         int err;
2334
2335         err = inode_newsize_ok(inode, size);
2336         if (err)
2337                 goto out;
2338
2339         err = pagecache_write_begin(NULL, mapping, size, 0,
2340                                 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2341                                 &page, &fsdata);
2342         if (err)
2343                 goto out;
2344
2345         err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2346         BUG_ON(err > 0);
2347
2348 out:
2349         return err;
2350 }
2351 EXPORT_SYMBOL(generic_cont_expand_simple);
2352
2353 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2354                             loff_t pos, loff_t *bytes)
2355 {
2356         struct inode *inode = mapping->host;
2357         unsigned int blocksize = i_blocksize(inode);
2358         struct page *page;
2359         void *fsdata;
2360         pgoff_t index, curidx;
2361         loff_t curpos;
2362         unsigned zerofrom, offset, len;
2363         int err = 0;
2364
2365         index = pos >> PAGE_SHIFT;
2366         offset = pos & ~PAGE_MASK;
2367
2368         while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) {
2369                 zerofrom = curpos & ~PAGE_MASK;
2370                 if (zerofrom & (blocksize-1)) {
2371                         *bytes |= (blocksize-1);
2372                         (*bytes)++;
2373                 }
2374                 len = PAGE_SIZE - zerofrom;
2375
2376                 err = pagecache_write_begin(file, mapping, curpos, len,
2377                                                 AOP_FLAG_UNINTERRUPTIBLE,
2378                                                 &page, &fsdata);
2379                 if (err)
2380                         goto out;
2381                 zero_user(page, zerofrom, len);
2382                 err = pagecache_write_end(file, mapping, curpos, len, len,
2383                                                 page, fsdata);
2384                 if (err < 0)
2385                         goto out;
2386                 BUG_ON(err != len);
2387                 err = 0;
2388
2389                 balance_dirty_pages_ratelimited(mapping);
2390
2391                 if (unlikely(fatal_signal_pending(current))) {
2392                         err = -EINTR;
2393                         goto out;
2394                 }
2395         }
2396
2397         /* page covers the boundary, find the boundary offset */
2398         if (index == curidx) {
2399                 zerofrom = curpos & ~PAGE_MASK;
2400                 /* if we will expand the thing last block will be filled */
2401                 if (offset <= zerofrom) {
2402                         goto out;
2403                 }
2404                 if (zerofrom & (blocksize-1)) {
2405                         *bytes |= (blocksize-1);
2406                         (*bytes)++;
2407                 }
2408                 len = offset - zerofrom;
2409
2410                 err = pagecache_write_begin(file, mapping, curpos, len,
2411                                                 AOP_FLAG_UNINTERRUPTIBLE,
2412                                                 &page, &fsdata);
2413                 if (err)
2414                         goto out;
2415                 zero_user(page, zerofrom, len);
2416                 err = pagecache_write_end(file, mapping, curpos, len, len,
2417                                                 page, fsdata);
2418                 if (err < 0)
2419                         goto out;
2420                 BUG_ON(err != len);
2421                 err = 0;
2422         }
2423 out:
2424         return err;
2425 }
2426
2427 /*
2428  * For moronic filesystems that do not allow holes in file.
2429  * We may have to extend the file.
2430  */
2431 int cont_write_begin(struct file *file, struct address_space *mapping,
2432                         loff_t pos, unsigned len, unsigned flags,
2433                         struct page **pagep, void **fsdata,
2434                         get_block_t *get_block, loff_t *bytes)
2435 {
2436         struct inode *inode = mapping->host;
2437         unsigned int blocksize = i_blocksize(inode);
2438         unsigned int zerofrom;
2439         int err;
2440
2441         err = cont_expand_zero(file, mapping, pos, bytes);
2442         if (err)
2443                 return err;
2444
2445         zerofrom = *bytes & ~PAGE_MASK;
2446         if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2447                 *bytes |= (blocksize-1);
2448                 (*bytes)++;
2449         }
2450
2451         return block_write_begin(mapping, pos, len, flags, pagep, get_block);
2452 }
2453 EXPORT_SYMBOL(cont_write_begin);
2454
2455 int block_commit_write(struct page *page, unsigned from, unsigned to)
2456 {
2457         struct inode *inode = page->mapping->host;
2458         __block_commit_write(inode,page,from,to);
2459         return 0;
2460 }
2461 EXPORT_SYMBOL(block_commit_write);
2462
2463 /*
2464  * block_page_mkwrite() is not allowed to change the file size as it gets
2465  * called from a page fault handler when a page is first dirtied. Hence we must
2466  * be careful to check for EOF conditions here. We set the page up correctly
2467  * for a written page which means we get ENOSPC checking when writing into
2468  * holes and correct delalloc and unwritten extent mapping on filesystems that
2469  * support these features.
2470  *
2471  * We are not allowed to take the i_mutex here so we have to play games to
2472  * protect against truncate races as the page could now be beyond EOF.  Because
2473  * truncate writes the inode size before removing pages, once we have the
2474  * page lock we can determine safely if the page is beyond EOF. If it is not
2475  * beyond EOF, then the page is guaranteed safe against truncation until we
2476  * unlock the page.
2477  *
2478  * Direct callers of this function should protect against filesystem freezing
2479  * using sb_start_pagefault() - sb_end_pagefault() functions.
2480  */
2481 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2482                          get_block_t get_block)
2483 {
2484         struct page *page = vmf->page;
2485         struct inode *inode = file_inode(vma->vm_file);
2486         unsigned long end;
2487         loff_t size;
2488         int ret;
2489
2490         lock_page(page);
2491         size = i_size_read(inode);
2492         if ((page->mapping != inode->i_mapping) ||
2493             (page_offset(page) > size)) {
2494                 /* We overload EFAULT to mean page got truncated */
2495                 ret = -EFAULT;
2496                 goto out_unlock;
2497         }
2498
2499         /* page is wholly or partially inside EOF */
2500         if (((page->index + 1) << PAGE_SHIFT) > size)
2501                 end = size & ~PAGE_MASK;
2502         else
2503                 end = PAGE_SIZE;
2504
2505         ret = __block_write_begin(page, 0, end, get_block);
2506         if (!ret)
2507                 ret = block_commit_write(page, 0, end);
2508
2509         if (unlikely(ret < 0))
2510                 goto out_unlock;
2511         set_page_dirty(page);
2512         wait_for_stable_page(page);
2513         return 0;
2514 out_unlock:
2515         unlock_page(page);
2516         return ret;
2517 }
2518 EXPORT_SYMBOL(block_page_mkwrite);
2519
2520 /*
2521  * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2522  * immediately, while under the page lock.  So it needs a special end_io
2523  * handler which does not touch the bh after unlocking it.
2524  */
2525 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2526 {
2527         __end_buffer_read_notouch(bh, uptodate);
2528 }
2529
2530 /*
2531  * Attach the singly-linked list of buffers created by nobh_write_begin, to
2532  * the page (converting it to circular linked list and taking care of page
2533  * dirty races).
2534  */
2535 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2536 {
2537         struct buffer_head *bh;
2538
2539         BUG_ON(!PageLocked(page));
2540
2541         spin_lock(&page->mapping->private_lock);
2542         bh = head;
2543         do {
2544                 if (PageDirty(page))
2545                         set_buffer_dirty(bh);
2546                 if (!bh->b_this_page)
2547                         bh->b_this_page = head;
2548                 bh = bh->b_this_page;
2549         } while (bh != head);
2550         attach_page_buffers(page, head);
2551         spin_unlock(&page->mapping->private_lock);
2552 }
2553
2554 /*
2555  * On entry, the page is fully not uptodate.
2556  * On exit the page is fully uptodate in the areas outside (from,to)
2557  * The filesystem needs to handle block truncation upon failure.
2558  */
2559 int nobh_write_begin(struct address_space *mapping,
2560                         loff_t pos, unsigned len, unsigned flags,
2561                         struct page **pagep, void **fsdata,
2562                         get_block_t *get_block)
2563 {
2564         struct inode *inode = mapping->host;
2565         const unsigned blkbits = inode->i_blkbits;
2566         const unsigned blocksize = 1 << blkbits;
2567         struct buffer_head *head, *bh;
2568         struct page *page;
2569         pgoff_t index;
2570         unsigned from, to;
2571         unsigned block_in_page;
2572         unsigned block_start, block_end;
2573         sector_t block_in_file;
2574         int nr_reads = 0;
2575         int ret = 0;
2576         int is_mapped_to_disk = 1;
2577
2578         index = pos >> PAGE_SHIFT;
2579         from = pos & (PAGE_SIZE - 1);
2580         to = from + len;
2581
2582         page = grab_cache_page_write_begin(mapping, index, flags);
2583         if (!page)
2584                 return -ENOMEM;
2585         *pagep = page;
2586         *fsdata = NULL;
2587
2588         if (page_has_buffers(page)) {
2589                 ret = __block_write_begin(page, pos, len, get_block);
2590                 if (unlikely(ret))
2591                         goto out_release;
2592                 return ret;
2593         }
2594
2595         if (PageMappedToDisk(page))
2596                 return 0;
2597
2598         /*
2599          * Allocate buffers so that we can keep track of state, and potentially
2600          * attach them to the page if an error occurs. In the common case of
2601          * no error, they will just be freed again without ever being attached
2602          * to the page (which is all OK, because we're under the page lock).
2603          *
2604          * Be careful: the buffer linked list is a NULL terminated one, rather
2605          * than the circular one we're used to.
2606          */
2607         head = alloc_page_buffers(page, blocksize, 0);
2608         if (!head) {
2609                 ret = -ENOMEM;
2610                 goto out_release;
2611         }
2612
2613         block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
2614
2615         /*
2616          * We loop across all blocks in the page, whether or not they are
2617          * part of the affected region.  This is so we can discover if the
2618          * page is fully mapped-to-disk.
2619          */
2620         for (block_start = 0, block_in_page = 0, bh = head;
2621                   block_start < PAGE_SIZE;
2622                   block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2623                 int create;
2624
2625                 block_end = block_start + blocksize;
2626                 bh->b_state = 0;
2627                 create = 1;
2628                 if (block_start >= to)
2629                         create = 0;
2630                 ret = get_block(inode, block_in_file + block_in_page,
2631                                         bh, create);
2632                 if (ret)
2633                         goto failed;
2634                 if (!buffer_mapped(bh))
2635                         is_mapped_to_disk = 0;
2636                 if (buffer_new(bh))
2637                         unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2638                 if (PageUptodate(page)) {
2639                         set_buffer_uptodate(bh);
2640                         continue;
2641                 }
2642                 if (buffer_new(bh) || !buffer_mapped(bh)) {
2643                         zero_user_segments(page, block_start, from,
2644                                                         to, block_end);
2645                         continue;
2646                 }
2647                 if (buffer_uptodate(bh))
2648                         continue;       /* reiserfs does this */
2649                 if (block_start < from || block_end > to) {
2650                         lock_buffer(bh);
2651                         bh->b_end_io = end_buffer_read_nobh;
2652                         submit_bh(REQ_OP_READ, 0, bh);
2653                         nr_reads++;
2654                 }
2655         }
2656
2657         if (nr_reads) {
2658                 /*
2659                  * The page is locked, so these buffers are protected from
2660                  * any VM or truncate activity.  Hence we don't need to care
2661                  * for the buffer_head refcounts.
2662                  */
2663                 for (bh = head; bh; bh = bh->b_this_page) {
2664                         wait_on_buffer(bh);
2665                         if (!buffer_uptodate(bh))
2666                                 ret = -EIO;
2667                 }
2668                 if (ret)
2669                         goto failed;
2670         }
2671
2672         if (is_mapped_to_disk)
2673                 SetPageMappedToDisk(page);
2674
2675         *fsdata = head; /* to be released by nobh_write_end */
2676
2677         return 0;
2678
2679 failed:
2680         BUG_ON(!ret);
2681         /*
2682          * Error recovery is a bit difficult. We need to zero out blocks that
2683          * were newly allocated, and dirty them to ensure they get written out.
2684          * Buffers need to be attached to the page at this point, otherwise
2685          * the handling of potential IO errors during writeout would be hard
2686          * (could try doing synchronous writeout, but what if that fails too?)
2687          */
2688         attach_nobh_buffers(page, head);
2689         page_zero_new_buffers(page, from, to);
2690
2691 out_release:
2692         unlock_page(page);
2693         put_page(page);
2694         *pagep = NULL;
2695
2696         return ret;
2697 }
2698 EXPORT_SYMBOL(nobh_write_begin);
2699
2700 int nobh_write_end(struct file *file, struct address_space *mapping,
2701                         loff_t pos, unsigned len, unsigned copied,
2702                         struct page *page, void *fsdata)
2703 {
2704         struct inode *inode = page->mapping->host;
2705         struct buffer_head *head = fsdata;
2706         struct buffer_head *bh;
2707         BUG_ON(fsdata != NULL && page_has_buffers(page));
2708
2709         if (unlikely(copied < len) && head)
2710                 attach_nobh_buffers(page, head);
2711         if (page_has_buffers(page))
2712                 return generic_write_end(file, mapping, pos, len,
2713                                         copied, page, fsdata);
2714
2715         SetPageUptodate(page);
2716         set_page_dirty(page);
2717         if (pos+copied > inode->i_size) {
2718                 i_size_write(inode, pos+copied);
2719                 mark_inode_dirty(inode);
2720         }
2721
2722         unlock_page(page);
2723         put_page(page);
2724
2725         while (head) {
2726                 bh = head;
2727                 head = head->b_this_page;
2728                 free_buffer_head(bh);
2729         }
2730
2731         return copied;
2732 }
2733 EXPORT_SYMBOL(nobh_write_end);
2734
2735 /*
2736  * nobh_writepage() - based on block_full_write_page() except
2737  * that it tries to operate without attaching bufferheads to
2738  * the page.
2739  */
2740 int nobh_writepage(struct page *page, get_block_t *get_block,
2741                         struct writeback_control *wbc)
2742 {
2743         struct inode * const inode = page->mapping->host;
2744         loff_t i_size = i_size_read(inode);
2745         const pgoff_t end_index = i_size >> PAGE_SHIFT;
2746         unsigned offset;
2747         int ret;
2748
2749         /* Is the page fully inside i_size? */
2750         if (page->index < end_index)
2751                 goto out;
2752
2753         /* Is the page fully outside i_size? (truncate in progress) */
2754         offset = i_size & (PAGE_SIZE-1);
2755         if (page->index >= end_index+1 || !offset) {
2756                 /*
2757                  * The page may have dirty, unmapped buffers.  For example,
2758                  * they may have been added in ext3_writepage().  Make them
2759                  * freeable here, so the page does not leak.
2760                  */
2761 #if 0
2762                 /* Not really sure about this  - do we need this ? */
2763                 if (page->mapping->a_ops->invalidatepage)
2764                         page->mapping->a_ops->invalidatepage(page, offset);
2765 #endif
2766                 unlock_page(page);
2767                 return 0; /* don't care */
2768         }
2769
2770         /*
2771          * The page straddles i_size.  It must be zeroed out on each and every
2772          * writepage invocation because it may be mmapped.  "A file is mapped
2773          * in multiples of the page size.  For a file that is not a multiple of
2774          * the  page size, the remaining memory is zeroed when mapped, and
2775          * writes to that region are not written out to the file."
2776          */
2777         zero_user_segment(page, offset, PAGE_SIZE);
2778 out:
2779         ret = mpage_writepage(page, get_block, wbc);
2780         if (ret == -EAGAIN)
2781                 ret = __block_write_full_page(inode, page, get_block, wbc,
2782                                               end_buffer_async_write);
2783         return ret;
2784 }
2785 EXPORT_SYMBOL(nobh_writepage);
2786
2787 int nobh_truncate_page(struct address_space *mapping,
2788                         loff_t from, get_block_t *get_block)
2789 {
2790         pgoff_t index = from >> PAGE_SHIFT;
2791         unsigned offset = from & (PAGE_SIZE-1);
2792         unsigned blocksize;
2793         sector_t iblock;
2794         unsigned length, pos;
2795         struct inode *inode = mapping->host;
2796         struct page *page;
2797         struct buffer_head map_bh;
2798         int err;
2799
2800         blocksize = i_blocksize(inode);
2801         length = offset & (blocksize - 1);
2802
2803         /* Block boundary? Nothing to do */
2804         if (!length)
2805                 return 0;
2806
2807         length = blocksize - length;
2808         iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits);
2809
2810         page = grab_cache_page(mapping, index);
2811         err = -ENOMEM;
2812         if (!page)
2813                 goto out;
2814
2815         if (page_has_buffers(page)) {
2816 has_buffers:
2817                 unlock_page(page);
2818                 put_page(page);
2819                 return block_truncate_page(mapping, from, get_block);
2820         }
2821
2822         /* Find the buffer that contains "offset" */
2823         pos = blocksize;
2824         while (offset >= pos) {
2825                 iblock++;
2826                 pos += blocksize;
2827         }
2828
2829         map_bh.b_size = blocksize;
2830         map_bh.b_state = 0;
2831         err = get_block(inode, iblock, &map_bh, 0);
2832         if (err)
2833                 goto unlock;
2834         /* unmapped? It's a hole - nothing to do */
2835         if (!buffer_mapped(&map_bh))
2836                 goto unlock;
2837
2838         /* Ok, it's mapped. Make sure it's up-to-date */
2839         if (!PageUptodate(page)) {
2840                 err = mapping->a_ops->readpage(NULL, page);
2841                 if (err) {
2842                         put_page(page);
2843                         goto out;
2844                 }
2845                 lock_page(page);
2846                 if (!PageUptodate(page)) {
2847                         err = -EIO;
2848                         goto unlock;
2849                 }
2850                 if (page_has_buffers(page))
2851                         goto has_buffers;
2852         }
2853         zero_user(page, offset, length);
2854         set_page_dirty(page);
2855         err = 0;
2856
2857 unlock:
2858         unlock_page(page);
2859         put_page(page);
2860 out:
2861         return err;
2862 }
2863 EXPORT_SYMBOL(nobh_truncate_page);
2864
2865 int block_truncate_page(struct address_space *mapping,
2866                         loff_t from, get_block_t *get_block)
2867 {
2868         pgoff_t index = from >> PAGE_SHIFT;
2869         unsigned offset = from & (PAGE_SIZE-1);
2870         unsigned blocksize;
2871         sector_t iblock;
2872         unsigned length, pos;
2873         struct inode *inode = mapping->host;
2874         struct page *page;
2875         struct buffer_head *bh;
2876         int err;
2877
2878         blocksize = i_blocksize(inode);
2879         length = offset & (blocksize - 1);
2880
2881         /* Block boundary? Nothing to do */
2882         if (!length)
2883                 return 0;
2884
2885         length = blocksize - length;
2886         iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits);
2887         
2888         page = grab_cache_page(mapping, index);
2889         err = -ENOMEM;
2890         if (!page)
2891                 goto out;
2892
2893         if (!page_has_buffers(page))
2894                 create_empty_buffers(page, blocksize, 0);
2895
2896         /* Find the buffer that contains "offset" */
2897         bh = page_buffers(page);
2898         pos = blocksize;
2899         while (offset >= pos) {
2900                 bh = bh->b_this_page;
2901                 iblock++;
2902                 pos += blocksize;
2903         }
2904
2905         err = 0;
2906         if (!buffer_mapped(bh)) {
2907                 WARN_ON(bh->b_size != blocksize);
2908                 err = get_block(inode, iblock, bh, 0);
2909                 if (err)
2910                         goto unlock;
2911                 /* unmapped? It's a hole - nothing to do */
2912                 if (!buffer_mapped(bh))
2913                         goto unlock;
2914         }
2915
2916         /* Ok, it's mapped. Make sure it's up-to-date */
2917         if (PageUptodate(page))
2918                 set_buffer_uptodate(bh);
2919
2920         if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2921                 err = -EIO;
2922                 ll_rw_block(REQ_OP_READ, 0, 1, &bh);
2923                 wait_on_buffer(bh);
2924                 /* Uhhuh. Read error. Complain and punt. */
2925                 if (!buffer_uptodate(bh))
2926                         goto unlock;
2927         }
2928
2929         zero_user(page, offset, length);
2930         mark_buffer_dirty(bh);
2931         err = 0;
2932
2933 unlock:
2934         unlock_page(page);
2935         put_page(page);
2936 out:
2937         return err;
2938 }
2939 EXPORT_SYMBOL(block_truncate_page);
2940
2941 /*
2942  * The generic ->writepage function for buffer-backed address_spaces
2943  */
2944 int block_write_full_page(struct page *page, get_block_t *get_block,
2945                         struct writeback_control *wbc)
2946 {
2947         struct inode * const inode = page->mapping->host;
2948         loff_t i_size = i_size_read(inode);
2949         const pgoff_t end_index = i_size >> PAGE_SHIFT;
2950         unsigned offset;
2951
2952         /* Is the page fully inside i_size? */
2953         if (page->index < end_index)
2954                 return __block_write_full_page(inode, page, get_block, wbc,
2955                                                end_buffer_async_write);
2956
2957         /* Is the page fully outside i_size? (truncate in progress) */
2958         offset = i_size & (PAGE_SIZE-1);
2959         if (page->index >= end_index+1 || !offset) {
2960                 /*
2961                  * The page may have dirty, unmapped buffers.  For example,
2962                  * they may have been added in ext3_writepage().  Make them
2963                  * freeable here, so the page does not leak.
2964                  */
2965                 do_invalidatepage(page, 0, PAGE_SIZE);
2966                 unlock_page(page);
2967                 return 0; /* don't care */
2968         }
2969
2970         /*
2971          * The page straddles i_size.  It must be zeroed out on each and every
2972          * writepage invocation because it may be mmapped.  "A file is mapped
2973          * in multiples of the page size.  For a file that is not a multiple of
2974          * the  page size, the remaining memory is zeroed when mapped, and
2975          * writes to that region are not written out to the file."
2976          */
2977         zero_user_segment(page, offset, PAGE_SIZE);
2978         return __block_write_full_page(inode, page, get_block, wbc,
2979                                                         end_buffer_async_write);
2980 }
2981 EXPORT_SYMBOL(block_write_full_page);
2982
2983 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2984                             get_block_t *get_block)
2985 {
2986         struct buffer_head tmp;
2987         struct inode *inode = mapping->host;
2988         tmp.b_state = 0;
2989         tmp.b_blocknr = 0;
2990         tmp.b_size = i_blocksize(inode);
2991         get_block(inode, block, &tmp, 0);
2992         return tmp.b_blocknr;
2993 }
2994 EXPORT_SYMBOL(generic_block_bmap);
2995
2996 static void end_bio_bh_io_sync(struct bio *bio)
2997 {
2998         struct buffer_head *bh = bio->bi_private;
2999
3000         if (unlikely(bio_flagged(bio, BIO_QUIET)))
3001                 set_bit(BH_Quiet, &bh->b_state);
3002
3003         bh->b_end_io(bh, !bio->bi_error);
3004         bio_put(bio);
3005 }
3006
3007 /*
3008  * This allows us to do IO even on the odd last sectors
3009  * of a device, even if the block size is some multiple
3010  * of the physical sector size.
3011  *
3012  * We'll just truncate the bio to the size of the device,
3013  * and clear the end of the buffer head manually.
3014  *
3015  * Truly out-of-range accesses will turn into actual IO
3016  * errors, this only handles the "we need to be able to
3017  * do IO at the final sector" case.
3018  */
3019 void guard_bio_eod(int op, struct bio *bio)
3020 {
3021         sector_t maxsector;
3022         struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
3023         unsigned truncated_bytes;
3024
3025         maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
3026         if (!maxsector)
3027                 return;
3028
3029         /*
3030          * If the *whole* IO is past the end of the device,
3031          * let it through, and the IO layer will turn it into
3032          * an EIO.
3033          */
3034         if (unlikely(bio->bi_iter.bi_sector >= maxsector))
3035                 return;
3036
3037         maxsector -= bio->bi_iter.bi_sector;
3038         if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
3039                 return;
3040
3041         /* Uhhuh. We've got a bio that straddles the device size! */
3042         truncated_bytes = bio->bi_iter.bi_size - (maxsector << 9);
3043
3044         /*
3045          * The bio contains more than one segment which spans EOD, just return
3046          * and let IO layer turn it into an EIO
3047          */
3048         if (truncated_bytes > bvec->bv_len)
3049                 return;
3050
3051         /* Truncate the bio.. */
3052         bio->bi_iter.bi_size -= truncated_bytes;
3053         bvec->bv_len -= truncated_bytes;
3054
3055         /* ..and clear the end of the buffer for reads */
3056         if (op == REQ_OP_READ) {
3057                 zero_user(bvec->bv_page, bvec->bv_offset + bvec->bv_len,
3058                                 truncated_bytes);
3059         }
3060 }
3061
3062 static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
3063                          unsigned long bio_flags, struct writeback_control *wbc)
3064 {
3065         struct bio *bio;
3066
3067         BUG_ON(!buffer_locked(bh));
3068         BUG_ON(!buffer_mapped(bh));
3069         BUG_ON(!bh->b_end_io);
3070         BUG_ON(buffer_delay(bh));
3071         BUG_ON(buffer_unwritten(bh));
3072
3073         /*
3074          * Only clear out a write error when rewriting
3075          */
3076         if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
3077                 clear_buffer_write_io_error(bh);
3078
3079         /*
3080          * from here on down, it's all bio -- do the initial mapping,
3081          * submit_bio -> generic_make_request may further map this bio around
3082          */
3083         bio = bio_alloc(GFP_NOIO, 1);
3084
3085         if (wbc) {
3086                 wbc_init_bio(wbc, bio);
3087                 wbc_account_io(wbc, bh->b_page, bh->b_size);
3088         }
3089
3090         bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
3091         bio->bi_bdev = bh->b_bdev;
3092
3093         bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
3094         BUG_ON(bio->bi_iter.bi_size != bh->b_size);
3095
3096         bio->bi_end_io = end_bio_bh_io_sync;
3097         bio->bi_private = bh;
3098         bio->bi_flags |= bio_flags;
3099
3100         /* Take care of bh's that straddle the end of the device */
3101         guard_bio_eod(op, bio);
3102
3103         if (buffer_meta(bh))
3104                 op_flags |= REQ_META;
3105         if (buffer_prio(bh))
3106                 op_flags |= REQ_PRIO;
3107         bio_set_op_attrs(bio, op, op_flags);
3108
3109         submit_bio(bio);
3110         return 0;
3111 }
3112
3113 int _submit_bh(int op, int op_flags, struct buffer_head *bh,
3114                unsigned long bio_flags)
3115 {
3116         return submit_bh_wbc(op, op_flags, bh, bio_flags, NULL);
3117 }
3118 EXPORT_SYMBOL_GPL(_submit_bh);
3119
3120 int submit_bh(int op, int op_flags,  struct buffer_head *bh)
3121 {
3122         return submit_bh_wbc(op, op_flags, bh, 0, NULL);
3123 }
3124 EXPORT_SYMBOL(submit_bh);
3125
3126 /**
3127  * ll_rw_block: low-level access to block devices (DEPRECATED)
3128  * @op: whether to %READ or %WRITE
3129  * @op_flags: rq_flag_bits
3130  * @nr: number of &struct buffer_heads in the array
3131  * @bhs: array of pointers to &struct buffer_head
3132  *
3133  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
3134  * requests an I/O operation on them, either a %REQ_OP_READ or a %REQ_OP_WRITE.
3135  * @op_flags contains flags modifying the detailed I/O behavior, most notably
3136  * %REQ_RAHEAD.
3137  *
3138  * This function drops any buffer that it cannot get a lock on (with the
3139  * BH_Lock state bit), any buffer that appears to be clean when doing a write
3140  * request, and any buffer that appears to be up-to-date when doing read
3141  * request.  Further it marks as clean buffers that are processed for
3142  * writing (the buffer cache won't assume that they are actually clean
3143  * until the buffer gets unlocked).
3144  *
3145  * ll_rw_block sets b_end_io to simple completion handler that marks
3146  * the buffer up-to-date (if appropriate), unlocks the buffer and wakes
3147  * any waiters. 
3148  *
3149  * All of the buffers must be for the same device, and must also be a
3150  * multiple of the current approved size for the device.
3151  */
3152 void ll_rw_block(int op, int op_flags,  int nr, struct buffer_head *bhs[])
3153 {
3154         int i;
3155
3156         for (i = 0; i < nr; i++) {
3157                 struct buffer_head *bh = bhs[i];
3158
3159                 if (!trylock_buffer(bh))
3160                         continue;
3161                 if (op == WRITE) {
3162                         if (test_clear_buffer_dirty(bh)) {
3163                                 bh->b_end_io = end_buffer_write_sync;
3164                                 get_bh(bh);
3165                                 submit_bh(op, op_flags, bh);
3166                                 continue;
3167                         }
3168                 } else {
3169                         if (!buffer_uptodate(bh)) {
3170                                 bh->b_end_io = end_buffer_read_sync;
3171                                 get_bh(bh);
3172                                 submit_bh(op, op_flags, bh);
3173                                 continue;
3174                         }
3175                 }
3176                 unlock_buffer(bh);
3177         }
3178 }
3179 EXPORT_SYMBOL(ll_rw_block);
3180
3181 void write_dirty_buffer(struct buffer_head *bh, int op_flags)
3182 {
3183         lock_buffer(bh);
3184         if (!test_clear_buffer_dirty(bh)) {
3185                 unlock_buffer(bh);
3186                 return;
3187         }
3188         bh->b_end_io = end_buffer_write_sync;
3189         get_bh(bh);
3190         submit_bh(REQ_OP_WRITE, op_flags, bh);
3191 }
3192 EXPORT_SYMBOL(write_dirty_buffer);
3193
3194 /*
3195  * For a data-integrity writeout, we need to wait upon any in-progress I/O
3196  * and then start new I/O and then wait upon it.  The caller must have a ref on
3197  * the buffer_head.
3198  */
3199 int __sync_dirty_buffer(struct buffer_head *bh, int op_flags)
3200 {
3201         int ret = 0;
3202
3203         WARN_ON(atomic_read(&bh->b_count) < 1);
3204         lock_buffer(bh);
3205         if (test_clear_buffer_dirty(bh)) {
3206                 get_bh(bh);
3207                 bh->b_end_io = end_buffer_write_sync;
3208                 ret = submit_bh(REQ_OP_WRITE, op_flags, bh);
3209                 wait_on_buffer(bh);
3210                 if (!ret && !buffer_uptodate(bh))
3211                         ret = -EIO;
3212         } else {
3213                 unlock_buffer(bh);
3214         }
3215         return ret;
3216 }
3217 EXPORT_SYMBOL(__sync_dirty_buffer);
3218
3219 int sync_dirty_buffer(struct buffer_head *bh)
3220 {
3221         return __sync_dirty_buffer(bh, WRITE_SYNC);
3222 }
3223 EXPORT_SYMBOL(sync_dirty_buffer);
3224
3225 /*
3226  * try_to_free_buffers() checks if all the buffers on this particular page
3227  * are unused, and releases them if so.
3228  *
3229  * Exclusion against try_to_free_buffers may be obtained by either
3230  * locking the page or by holding its mapping's private_lock.
3231  *
3232  * If the page is dirty but all the buffers are clean then we need to
3233  * be sure to mark the page clean as well.  This is because the page
3234  * may be against a block device, and a later reattachment of buffers
3235  * to a dirty page will set *all* buffers dirty.  Which would corrupt
3236  * filesystem data on the same device.
3237  *
3238  * The same applies to regular filesystem pages: if all the buffers are
3239  * clean then we set the page clean and proceed.  To do that, we require
3240  * total exclusion from __set_page_dirty_buffers().  That is obtained with
3241  * private_lock.
3242  *
3243  * try_to_free_buffers() is non-blocking.
3244  */
3245 static inline int buffer_busy(struct buffer_head *bh)
3246 {
3247         return atomic_read(&bh->b_count) |
3248                 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3249 }
3250
3251 static int
3252 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3253 {
3254         struct buffer_head *head = page_buffers(page);
3255         struct buffer_head *bh;
3256
3257         bh = head;
3258         do {
3259                 if (buffer_write_io_error(bh) && page->mapping)
3260                         mapping_set_error(page->mapping, -EIO);
3261                 if (buffer_busy(bh))
3262                         goto failed;
3263                 bh = bh->b_this_page;
3264         } while (bh != head);
3265
3266         do {
3267                 struct buffer_head *next = bh->b_this_page;
3268
3269                 if (bh->b_assoc_map)
3270                         __remove_assoc_queue(bh);
3271                 bh = next;
3272         } while (bh != head);
3273         *buffers_to_free = head;
3274         __clear_page_buffers(page);
3275         return 1;
3276 failed:
3277         return 0;
3278 }
3279
3280 int try_to_free_buffers(struct page *page)
3281 {
3282         struct address_space * const mapping = page->mapping;
3283         struct buffer_head *buffers_to_free = NULL;
3284         int ret = 0;
3285
3286         BUG_ON(!PageLocked(page));
3287         if (PageWriteback(page))
3288                 return 0;
3289
3290         if (mapping == NULL) {          /* can this still happen? */
3291                 ret = drop_buffers(page, &buffers_to_free);
3292                 goto out;
3293         }
3294
3295         spin_lock(&mapping->private_lock);
3296         ret = drop_buffers(page, &buffers_to_free);
3297
3298         /*
3299          * If the filesystem writes its buffers by hand (eg ext3)
3300          * then we can have clean buffers against a dirty page.  We
3301          * clean the page here; otherwise the VM will never notice
3302          * that the filesystem did any IO at all.
3303          *
3304          * Also, during truncate, discard_buffer will have marked all
3305          * the page's buffers clean.  We discover that here and clean
3306          * the page also.
3307          *
3308          * private_lock must be held over this entire operation in order
3309          * to synchronise against __set_page_dirty_buffers and prevent the
3310          * dirty bit from being lost.
3311          */
3312         if (ret)
3313                 cancel_dirty_page(page);
3314         spin_unlock(&mapping->private_lock);
3315 out:
3316         if (buffers_to_free) {
3317                 struct buffer_head *bh = buffers_to_free;
3318
3319                 do {
3320                         struct buffer_head *next = bh->b_this_page;
3321                         free_buffer_head(bh);
3322                         bh = next;
3323                 } while (bh != buffers_to_free);
3324         }
3325         return ret;
3326 }
3327 EXPORT_SYMBOL(try_to_free_buffers);
3328
3329 /*
3330  * There are no bdflush tunables left.  But distributions are
3331  * still running obsolete flush daemons, so we terminate them here.
3332  *
3333  * Use of bdflush() is deprecated and will be removed in a future kernel.
3334  * The `flush-X' kernel threads fully replace bdflush daemons and this call.
3335  */
3336 SYSCALL_DEFINE2(bdflush, int, func, long, data)
3337 {
3338         static int msg_count;
3339
3340         if (!capable(CAP_SYS_ADMIN))
3341                 return -EPERM;
3342
3343         if (msg_count < 5) {
3344                 msg_count++;
3345                 printk(KERN_INFO
3346                         "warning: process `%s' used the obsolete bdflush"
3347                         " system call\n", current->comm);
3348                 printk(KERN_INFO "Fix your initscripts?\n");
3349         }
3350
3351         if (func == 1)
3352                 do_exit(0);
3353         return 0;
3354 }
3355
3356 /*
3357  * Buffer-head allocation
3358  */
3359 static struct kmem_cache *bh_cachep __read_mostly;
3360
3361 /*
3362  * Once the number of bh's in the machine exceeds this level, we start
3363  * stripping them in writeback.
3364  */
3365 static unsigned long max_buffer_heads;
3366
3367 int buffer_heads_over_limit;
3368
3369 struct bh_accounting {
3370         int nr;                 /* Number of live bh's */
3371         int ratelimit;          /* Limit cacheline bouncing */
3372 };
3373
3374 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3375
3376 static void recalc_bh_state(void)
3377 {
3378         int i;
3379         int tot = 0;
3380
3381         if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
3382                 return;
3383         __this_cpu_write(bh_accounting.ratelimit, 0);
3384         for_each_online_cpu(i)
3385                 tot += per_cpu(bh_accounting, i).nr;
3386         buffer_heads_over_limit = (tot > max_buffer_heads);
3387 }
3388
3389 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3390 {
3391         struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
3392         if (ret) {
3393                 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3394                 preempt_disable();
3395                 __this_cpu_inc(bh_accounting.nr);
3396                 recalc_bh_state();
3397                 preempt_enable();
3398         }
3399         return ret;
3400 }
3401 EXPORT_SYMBOL(alloc_buffer_head);
3402
3403 void free_buffer_head(struct buffer_head *bh)
3404 {
3405         BUG_ON(!list_empty(&bh->b_assoc_buffers));
3406         kmem_cache_free(bh_cachep, bh);
3407         preempt_disable();
3408         __this_cpu_dec(bh_accounting.nr);
3409         recalc_bh_state();
3410         preempt_enable();
3411 }
3412 EXPORT_SYMBOL(free_buffer_head);
3413
3414 static void buffer_exit_cpu(int cpu)
3415 {
3416         int i;
3417         struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3418
3419         for (i = 0; i < BH_LRU_SIZE; i++) {
3420                 brelse(b->bhs[i]);
3421                 b->bhs[i] = NULL;
3422         }
3423         this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
3424         per_cpu(bh_accounting, cpu).nr = 0;
3425 }
3426
3427 static int buffer_cpu_notify(struct notifier_block *self,
3428                               unsigned long action, void *hcpu)
3429 {
3430         if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3431                 buffer_exit_cpu((unsigned long)hcpu);
3432         return NOTIFY_OK;
3433 }
3434
3435 /**
3436  * bh_uptodate_or_lock - Test whether the buffer is uptodate
3437  * @bh: struct buffer_head
3438  *
3439  * Return true if the buffer is up-to-date and false,
3440  * with the buffer locked, if not.
3441  */
3442 int bh_uptodate_or_lock(struct buffer_head *bh)
3443 {
3444         if (!buffer_uptodate(bh)) {
3445                 lock_buffer(bh);
3446                 if (!buffer_uptodate(bh))
3447                         return 0;
3448                 unlock_buffer(bh);
3449         }
3450         return 1;
3451 }
3452 EXPORT_SYMBOL(bh_uptodate_or_lock);
3453
3454 /**
3455  * bh_submit_read - Submit a locked buffer for reading
3456  * @bh: struct buffer_head
3457  *
3458  * Returns zero on success and -EIO on error.
3459  */
3460 int bh_submit_read(struct buffer_head *bh)
3461 {
3462         BUG_ON(!buffer_locked(bh));
3463
3464         if (buffer_uptodate(bh)) {
3465                 unlock_buffer(bh);
3466                 return 0;
3467         }
3468
3469         get_bh(bh);
3470         bh->b_end_io = end_buffer_read_sync;
3471         submit_bh(REQ_OP_READ, 0, bh);
3472         wait_on_buffer(bh);
3473         if (buffer_uptodate(bh))
3474                 return 0;
3475         return -EIO;
3476 }
3477 EXPORT_SYMBOL(bh_submit_read);
3478
3479 void __init buffer_init(void)
3480 {
3481         unsigned long nrpages;
3482
3483         bh_cachep = kmem_cache_create("buffer_head",
3484                         sizeof(struct buffer_head), 0,
3485                                 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3486                                 SLAB_MEM_SPREAD),
3487                                 NULL);
3488
3489         /*
3490          * Limit the bh occupancy to 10% of ZONE_NORMAL
3491          */
3492         nrpages = (nr_free_buffer_pages() * 10) / 100;
3493         max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3494         hotcpu_notifier(buffer_cpu_notify, 0);
3495 }