OSDN Git Service

Btrfs: check btree node's nritems
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / fs / btrfs / inode.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/statfs.h>
34 #include <linux/compat.h>
35 #include <linux/bit_spinlock.h>
36 #include <linux/xattr.h>
37 #include <linux/posix_acl.h>
38 #include <linux/falloc.h>
39 #include <linux/slab.h>
40 #include <linux/ratelimit.h>
41 #include <linux/mount.h>
42 #include <linux/btrfs.h>
43 #include <linux/blkdev.h>
44 #include <linux/posix_acl_xattr.h>
45 #include <linux/uio.h>
46 #include "ctree.h"
47 #include "disk-io.h"
48 #include "transaction.h"
49 #include "btrfs_inode.h"
50 #include "print-tree.h"
51 #include "ordered-data.h"
52 #include "xattr.h"
53 #include "tree-log.h"
54 #include "volumes.h"
55 #include "compression.h"
56 #include "locking.h"
57 #include "free-space-cache.h"
58 #include "inode-map.h"
59 #include "backref.h"
60 #include "hash.h"
61 #include "props.h"
62 #include "qgroup.h"
63
64 struct btrfs_iget_args {
65         struct btrfs_key *location;
66         struct btrfs_root *root;
67 };
68
69 static const struct inode_operations btrfs_dir_inode_operations;
70 static const struct inode_operations btrfs_symlink_inode_operations;
71 static const struct inode_operations btrfs_dir_ro_inode_operations;
72 static const struct inode_operations btrfs_special_inode_operations;
73 static const struct inode_operations btrfs_file_inode_operations;
74 static const struct address_space_operations btrfs_aops;
75 static const struct address_space_operations btrfs_symlink_aops;
76 static const struct file_operations btrfs_dir_file_operations;
77 static struct extent_io_ops btrfs_extent_io_ops;
78
79 static struct kmem_cache *btrfs_inode_cachep;
80 static struct kmem_cache *btrfs_delalloc_work_cachep;
81 struct kmem_cache *btrfs_trans_handle_cachep;
82 struct kmem_cache *btrfs_transaction_cachep;
83 struct kmem_cache *btrfs_path_cachep;
84 struct kmem_cache *btrfs_free_space_cachep;
85
86 #define S_SHIFT 12
87 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
88         [S_IFREG >> S_SHIFT]    = BTRFS_FT_REG_FILE,
89         [S_IFDIR >> S_SHIFT]    = BTRFS_FT_DIR,
90         [S_IFCHR >> S_SHIFT]    = BTRFS_FT_CHRDEV,
91         [S_IFBLK >> S_SHIFT]    = BTRFS_FT_BLKDEV,
92         [S_IFIFO >> S_SHIFT]    = BTRFS_FT_FIFO,
93         [S_IFSOCK >> S_SHIFT]   = BTRFS_FT_SOCK,
94         [S_IFLNK >> S_SHIFT]    = BTRFS_FT_SYMLINK,
95 };
96
97 static int btrfs_setsize(struct inode *inode, struct iattr *attr);
98 static int btrfs_truncate(struct inode *inode);
99 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
100 static noinline int cow_file_range(struct inode *inode,
101                                    struct page *locked_page,
102                                    u64 start, u64 end, int *page_started,
103                                    unsigned long *nr_written, int unlock);
104 static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
105                                            u64 len, u64 orig_start,
106                                            u64 block_start, u64 block_len,
107                                            u64 orig_block_len, u64 ram_bytes,
108                                            int type);
109
110 static int btrfs_dirty_inode(struct inode *inode);
111
112 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
113 void btrfs_test_inode_set_ops(struct inode *inode)
114 {
115         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
116 }
117 #endif
118
119 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
120                                      struct inode *inode,  struct inode *dir,
121                                      const struct qstr *qstr)
122 {
123         int err;
124
125         err = btrfs_init_acl(trans, inode, dir);
126         if (!err)
127                 err = btrfs_xattr_security_init(trans, inode, dir, qstr);
128         return err;
129 }
130
131 /*
132  * this does all the hard work for inserting an inline extent into
133  * the btree.  The caller should have done a btrfs_drop_extents so that
134  * no overlapping inline items exist in the btree
135  */
136 static int insert_inline_extent(struct btrfs_trans_handle *trans,
137                                 struct btrfs_path *path, int extent_inserted,
138                                 struct btrfs_root *root, struct inode *inode,
139                                 u64 start, size_t size, size_t compressed_size,
140                                 int compress_type,
141                                 struct page **compressed_pages)
142 {
143         struct extent_buffer *leaf;
144         struct page *page = NULL;
145         char *kaddr;
146         unsigned long ptr;
147         struct btrfs_file_extent_item *ei;
148         int err = 0;
149         int ret;
150         size_t cur_size = size;
151         unsigned long offset;
152
153         if (compressed_size && compressed_pages)
154                 cur_size = compressed_size;
155
156         inode_add_bytes(inode, size);
157
158         if (!extent_inserted) {
159                 struct btrfs_key key;
160                 size_t datasize;
161
162                 key.objectid = btrfs_ino(inode);
163                 key.offset = start;
164                 key.type = BTRFS_EXTENT_DATA_KEY;
165
166                 datasize = btrfs_file_extent_calc_inline_size(cur_size);
167                 path->leave_spinning = 1;
168                 ret = btrfs_insert_empty_item(trans, root, path, &key,
169                                               datasize);
170                 if (ret) {
171                         err = ret;
172                         goto fail;
173                 }
174         }
175         leaf = path->nodes[0];
176         ei = btrfs_item_ptr(leaf, path->slots[0],
177                             struct btrfs_file_extent_item);
178         btrfs_set_file_extent_generation(leaf, ei, trans->transid);
179         btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
180         btrfs_set_file_extent_encryption(leaf, ei, 0);
181         btrfs_set_file_extent_other_encoding(leaf, ei, 0);
182         btrfs_set_file_extent_ram_bytes(leaf, ei, size);
183         ptr = btrfs_file_extent_inline_start(ei);
184
185         if (compress_type != BTRFS_COMPRESS_NONE) {
186                 struct page *cpage;
187                 int i = 0;
188                 while (compressed_size > 0) {
189                         cpage = compressed_pages[i];
190                         cur_size = min_t(unsigned long, compressed_size,
191                                        PAGE_CACHE_SIZE);
192
193                         kaddr = kmap_atomic(cpage);
194                         write_extent_buffer(leaf, kaddr, ptr, cur_size);
195                         kunmap_atomic(kaddr);
196
197                         i++;
198                         ptr += cur_size;
199                         compressed_size -= cur_size;
200                 }
201                 btrfs_set_file_extent_compression(leaf, ei,
202                                                   compress_type);
203         } else {
204                 page = find_get_page(inode->i_mapping,
205                                      start >> PAGE_CACHE_SHIFT);
206                 btrfs_set_file_extent_compression(leaf, ei, 0);
207                 kaddr = kmap_atomic(page);
208                 offset = start & (PAGE_CACHE_SIZE - 1);
209                 write_extent_buffer(leaf, kaddr + offset, ptr, size);
210                 kunmap_atomic(kaddr);
211                 page_cache_release(page);
212         }
213         btrfs_mark_buffer_dirty(leaf);
214         btrfs_release_path(path);
215
216         /*
217          * we're an inline extent, so nobody can
218          * extend the file past i_size without locking
219          * a page we already have locked.
220          *
221          * We must do any isize and inode updates
222          * before we unlock the pages.  Otherwise we
223          * could end up racing with unlink.
224          */
225         BTRFS_I(inode)->disk_i_size = inode->i_size;
226         ret = btrfs_update_inode(trans, root, inode);
227
228         return ret;
229 fail:
230         return err;
231 }
232
233
234 /*
235  * conditionally insert an inline extent into the file.  This
236  * does the checks required to make sure the data is small enough
237  * to fit as an inline extent.
238  */
239 static noinline int cow_file_range_inline(struct btrfs_root *root,
240                                           struct inode *inode, u64 start,
241                                           u64 end, size_t compressed_size,
242                                           int compress_type,
243                                           struct page **compressed_pages)
244 {
245         struct btrfs_trans_handle *trans;
246         u64 isize = i_size_read(inode);
247         u64 actual_end = min(end + 1, isize);
248         u64 inline_len = actual_end - start;
249         u64 aligned_end = ALIGN(end, root->sectorsize);
250         u64 data_len = inline_len;
251         int ret;
252         struct btrfs_path *path;
253         int extent_inserted = 0;
254         u32 extent_item_size;
255
256         if (compressed_size)
257                 data_len = compressed_size;
258
259         if (start > 0 ||
260             actual_end > PAGE_CACHE_SIZE ||
261             data_len > BTRFS_MAX_INLINE_DATA_SIZE(root) ||
262             (!compressed_size &&
263             (actual_end & (root->sectorsize - 1)) == 0) ||
264             end + 1 < isize ||
265             data_len > root->fs_info->max_inline) {
266                 return 1;
267         }
268
269         path = btrfs_alloc_path();
270         if (!path)
271                 return -ENOMEM;
272
273         trans = btrfs_join_transaction(root);
274         if (IS_ERR(trans)) {
275                 btrfs_free_path(path);
276                 return PTR_ERR(trans);
277         }
278         trans->block_rsv = &root->fs_info->delalloc_block_rsv;
279
280         if (compressed_size && compressed_pages)
281                 extent_item_size = btrfs_file_extent_calc_inline_size(
282                    compressed_size);
283         else
284                 extent_item_size = btrfs_file_extent_calc_inline_size(
285                     inline_len);
286
287         ret = __btrfs_drop_extents(trans, root, inode, path,
288                                    start, aligned_end, NULL,
289                                    1, 1, extent_item_size, &extent_inserted);
290         if (ret) {
291                 btrfs_abort_transaction(trans, root, ret);
292                 goto out;
293         }
294
295         if (isize > actual_end)
296                 inline_len = min_t(u64, isize, actual_end);
297         ret = insert_inline_extent(trans, path, extent_inserted,
298                                    root, inode, start,
299                                    inline_len, compressed_size,
300                                    compress_type, compressed_pages);
301         if (ret && ret != -ENOSPC) {
302                 btrfs_abort_transaction(trans, root, ret);
303                 goto out;
304         } else if (ret == -ENOSPC) {
305                 ret = 1;
306                 goto out;
307         }
308
309         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
310         btrfs_delalloc_release_metadata(inode, end + 1 - start);
311         btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
312 out:
313         /*
314          * Don't forget to free the reserved space, as for inlined extent
315          * it won't count as data extent, free them directly here.
316          * And at reserve time, it's always aligned to page size, so
317          * just free one page here.
318          */
319         btrfs_qgroup_free_data(inode, 0, PAGE_CACHE_SIZE);
320         btrfs_free_path(path);
321         btrfs_end_transaction(trans, root);
322         return ret;
323 }
324
325 struct async_extent {
326         u64 start;
327         u64 ram_size;
328         u64 compressed_size;
329         struct page **pages;
330         unsigned long nr_pages;
331         int compress_type;
332         struct list_head list;
333 };
334
335 struct async_cow {
336         struct inode *inode;
337         struct btrfs_root *root;
338         struct page *locked_page;
339         u64 start;
340         u64 end;
341         struct list_head extents;
342         struct btrfs_work work;
343 };
344
345 static noinline int add_async_extent(struct async_cow *cow,
346                                      u64 start, u64 ram_size,
347                                      u64 compressed_size,
348                                      struct page **pages,
349                                      unsigned long nr_pages,
350                                      int compress_type)
351 {
352         struct async_extent *async_extent;
353
354         async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
355         BUG_ON(!async_extent); /* -ENOMEM */
356         async_extent->start = start;
357         async_extent->ram_size = ram_size;
358         async_extent->compressed_size = compressed_size;
359         async_extent->pages = pages;
360         async_extent->nr_pages = nr_pages;
361         async_extent->compress_type = compress_type;
362         list_add_tail(&async_extent->list, &cow->extents);
363         return 0;
364 }
365
366 static inline int inode_need_compress(struct inode *inode)
367 {
368         struct btrfs_root *root = BTRFS_I(inode)->root;
369
370         /* force compress */
371         if (btrfs_test_opt(root, FORCE_COMPRESS))
372                 return 1;
373         /* bad compression ratios */
374         if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
375                 return 0;
376         if (btrfs_test_opt(root, COMPRESS) ||
377             BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS ||
378             BTRFS_I(inode)->force_compress)
379                 return 1;
380         return 0;
381 }
382
383 /*
384  * we create compressed extents in two phases.  The first
385  * phase compresses a range of pages that have already been
386  * locked (both pages and state bits are locked).
387  *
388  * This is done inside an ordered work queue, and the compression
389  * is spread across many cpus.  The actual IO submission is step
390  * two, and the ordered work queue takes care of making sure that
391  * happens in the same order things were put onto the queue by
392  * writepages and friends.
393  *
394  * If this code finds it can't get good compression, it puts an
395  * entry onto the work queue to write the uncompressed bytes.  This
396  * makes sure that both compressed inodes and uncompressed inodes
397  * are written in the same order that the flusher thread sent them
398  * down.
399  */
400 static noinline void compress_file_range(struct inode *inode,
401                                         struct page *locked_page,
402                                         u64 start, u64 end,
403                                         struct async_cow *async_cow,
404                                         int *num_added)
405 {
406         struct btrfs_root *root = BTRFS_I(inode)->root;
407         u64 num_bytes;
408         u64 blocksize = root->sectorsize;
409         u64 actual_end;
410         u64 isize = i_size_read(inode);
411         int ret = 0;
412         struct page **pages = NULL;
413         unsigned long nr_pages;
414         unsigned long nr_pages_ret = 0;
415         unsigned long total_compressed = 0;
416         unsigned long total_in = 0;
417         unsigned long max_compressed = 128 * 1024;
418         unsigned long max_uncompressed = 128 * 1024;
419         int i;
420         int will_compress;
421         int compress_type = root->fs_info->compress_type;
422         int redirty = 0;
423
424         /* if this is a small write inside eof, kick off a defrag */
425         if ((end - start + 1) < 16 * 1024 &&
426             (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
427                 btrfs_add_inode_defrag(NULL, inode);
428
429         actual_end = min_t(u64, isize, end + 1);
430 again:
431         will_compress = 0;
432         nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
433         nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
434
435         /*
436          * we don't want to send crud past the end of i_size through
437          * compression, that's just a waste of CPU time.  So, if the
438          * end of the file is before the start of our current
439          * requested range of bytes, we bail out to the uncompressed
440          * cleanup code that can deal with all of this.
441          *
442          * It isn't really the fastest way to fix things, but this is a
443          * very uncommon corner.
444          */
445         if (actual_end <= start)
446                 goto cleanup_and_bail_uncompressed;
447
448         total_compressed = actual_end - start;
449
450         /*
451          * skip compression for a small file range(<=blocksize) that
452          * isn't an inline extent, since it dosen't save disk space at all.
453          */
454         if (total_compressed <= blocksize &&
455            (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
456                 goto cleanup_and_bail_uncompressed;
457
458         /* we want to make sure that amount of ram required to uncompress
459          * an extent is reasonable, so we limit the total size in ram
460          * of a compressed extent to 128k.  This is a crucial number
461          * because it also controls how easily we can spread reads across
462          * cpus for decompression.
463          *
464          * We also want to make sure the amount of IO required to do
465          * a random read is reasonably small, so we limit the size of
466          * a compressed extent to 128k.
467          */
468         total_compressed = min(total_compressed, max_uncompressed);
469         num_bytes = ALIGN(end - start + 1, blocksize);
470         num_bytes = max(blocksize,  num_bytes);
471         total_in = 0;
472         ret = 0;
473
474         /*
475          * we do compression for mount -o compress and when the
476          * inode has not been flagged as nocompress.  This flag can
477          * change at any time if we discover bad compression ratios.
478          */
479         if (inode_need_compress(inode)) {
480                 WARN_ON(pages);
481                 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
482                 if (!pages) {
483                         /* just bail out to the uncompressed code */
484                         nr_pages = 0;
485                         goto cont;
486                 }
487
488                 if (BTRFS_I(inode)->force_compress)
489                         compress_type = BTRFS_I(inode)->force_compress;
490
491                 /*
492                  * we need to call clear_page_dirty_for_io on each
493                  * page in the range.  Otherwise applications with the file
494                  * mmap'd can wander in and change the page contents while
495                  * we are compressing them.
496                  *
497                  * If the compression fails for any reason, we set the pages
498                  * dirty again later on.
499                  */
500                 extent_range_clear_dirty_for_io(inode, start, end);
501                 redirty = 1;
502                 ret = btrfs_compress_pages(compress_type,
503                                            inode->i_mapping, start,
504                                            total_compressed, pages,
505                                            nr_pages, &nr_pages_ret,
506                                            &total_in,
507                                            &total_compressed,
508                                            max_compressed);
509
510                 if (!ret) {
511                         unsigned long offset = total_compressed &
512                                 (PAGE_CACHE_SIZE - 1);
513                         struct page *page = pages[nr_pages_ret - 1];
514                         char *kaddr;
515
516                         /* zero the tail end of the last page, we might be
517                          * sending it down to disk
518                          */
519                         if (offset) {
520                                 kaddr = kmap_atomic(page);
521                                 memset(kaddr + offset, 0,
522                                        PAGE_CACHE_SIZE - offset);
523                                 kunmap_atomic(kaddr);
524                         }
525                         will_compress = 1;
526                 }
527         }
528 cont:
529         if (start == 0) {
530                 /* lets try to make an inline extent */
531                 if (ret || total_in < (actual_end - start)) {
532                         /* we didn't compress the entire range, try
533                          * to make an uncompressed inline extent.
534                          */
535                         ret = cow_file_range_inline(root, inode, start, end,
536                                                     0, 0, NULL);
537                 } else {
538                         /* try making a compressed inline extent */
539                         ret = cow_file_range_inline(root, inode, start, end,
540                                                     total_compressed,
541                                                     compress_type, pages);
542                 }
543                 if (ret <= 0) {
544                         unsigned long clear_flags = EXTENT_DELALLOC |
545                                 EXTENT_DEFRAG;
546                         unsigned long page_error_op;
547
548                         clear_flags |= (ret < 0) ? EXTENT_DO_ACCOUNTING : 0;
549                         page_error_op = ret < 0 ? PAGE_SET_ERROR : 0;
550
551                         /*
552                          * inline extent creation worked or returned error,
553                          * we don't need to create any more async work items.
554                          * Unlock and free up our temp pages.
555                          */
556                         extent_clear_unlock_delalloc(inode, start, end, NULL,
557                                                      clear_flags, PAGE_UNLOCK |
558                                                      PAGE_CLEAR_DIRTY |
559                                                      PAGE_SET_WRITEBACK |
560                                                      page_error_op |
561                                                      PAGE_END_WRITEBACK);
562                         goto free_pages_out;
563                 }
564         }
565
566         if (will_compress) {
567                 /*
568                  * we aren't doing an inline extent round the compressed size
569                  * up to a block size boundary so the allocator does sane
570                  * things
571                  */
572                 total_compressed = ALIGN(total_compressed, blocksize);
573
574                 /*
575                  * one last check to make sure the compression is really a
576                  * win, compare the page count read with the blocks on disk
577                  */
578                 total_in = ALIGN(total_in, PAGE_CACHE_SIZE);
579                 if (total_compressed >= total_in) {
580                         will_compress = 0;
581                 } else {
582                         num_bytes = total_in;
583                 }
584         }
585         if (!will_compress && pages) {
586                 /*
587                  * the compression code ran but failed to make things smaller,
588                  * free any pages it allocated and our page pointer array
589                  */
590                 for (i = 0; i < nr_pages_ret; i++) {
591                         WARN_ON(pages[i]->mapping);
592                         page_cache_release(pages[i]);
593                 }
594                 kfree(pages);
595                 pages = NULL;
596                 total_compressed = 0;
597                 nr_pages_ret = 0;
598
599                 /* flag the file so we don't compress in the future */
600                 if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
601                     !(BTRFS_I(inode)->force_compress)) {
602                         BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
603                 }
604         }
605         if (will_compress) {
606                 *num_added += 1;
607
608                 /* the async work queues will take care of doing actual
609                  * allocation on disk for these compressed pages,
610                  * and will submit them to the elevator.
611                  */
612                 add_async_extent(async_cow, start, num_bytes,
613                                  total_compressed, pages, nr_pages_ret,
614                                  compress_type);
615
616                 if (start + num_bytes < end) {
617                         start += num_bytes;
618                         pages = NULL;
619                         cond_resched();
620                         goto again;
621                 }
622         } else {
623 cleanup_and_bail_uncompressed:
624                 /*
625                  * No compression, but we still need to write the pages in
626                  * the file we've been given so far.  redirty the locked
627                  * page if it corresponds to our extent and set things up
628                  * for the async work queue to run cow_file_range to do
629                  * the normal delalloc dance
630                  */
631                 if (page_offset(locked_page) >= start &&
632                     page_offset(locked_page) <= end) {
633                         __set_page_dirty_nobuffers(locked_page);
634                         /* unlocked later on in the async handlers */
635                 }
636                 if (redirty)
637                         extent_range_redirty_for_io(inode, start, end);
638                 add_async_extent(async_cow, start, end - start + 1,
639                                  0, NULL, 0, BTRFS_COMPRESS_NONE);
640                 *num_added += 1;
641         }
642
643         return;
644
645 free_pages_out:
646         for (i = 0; i < nr_pages_ret; i++) {
647                 WARN_ON(pages[i]->mapping);
648                 page_cache_release(pages[i]);
649         }
650         kfree(pages);
651 }
652
653 static void free_async_extent_pages(struct async_extent *async_extent)
654 {
655         int i;
656
657         if (!async_extent->pages)
658                 return;
659
660         for (i = 0; i < async_extent->nr_pages; i++) {
661                 WARN_ON(async_extent->pages[i]->mapping);
662                 page_cache_release(async_extent->pages[i]);
663         }
664         kfree(async_extent->pages);
665         async_extent->nr_pages = 0;
666         async_extent->pages = NULL;
667 }
668
669 /*
670  * phase two of compressed writeback.  This is the ordered portion
671  * of the code, which only gets called in the order the work was
672  * queued.  We walk all the async extents created by compress_file_range
673  * and send them down to the disk.
674  */
675 static noinline void submit_compressed_extents(struct inode *inode,
676                                               struct async_cow *async_cow)
677 {
678         struct async_extent *async_extent;
679         u64 alloc_hint = 0;
680         struct btrfs_key ins;
681         struct extent_map *em;
682         struct btrfs_root *root = BTRFS_I(inode)->root;
683         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
684         struct extent_io_tree *io_tree;
685         int ret = 0;
686
687 again:
688         while (!list_empty(&async_cow->extents)) {
689                 async_extent = list_entry(async_cow->extents.next,
690                                           struct async_extent, list);
691                 list_del(&async_extent->list);
692
693                 io_tree = &BTRFS_I(inode)->io_tree;
694
695 retry:
696                 /* did the compression code fall back to uncompressed IO? */
697                 if (!async_extent->pages) {
698                         int page_started = 0;
699                         unsigned long nr_written = 0;
700
701                         lock_extent(io_tree, async_extent->start,
702                                          async_extent->start +
703                                          async_extent->ram_size - 1);
704
705                         /* allocate blocks */
706                         ret = cow_file_range(inode, async_cow->locked_page,
707                                              async_extent->start,
708                                              async_extent->start +
709                                              async_extent->ram_size - 1,
710                                              &page_started, &nr_written, 0);
711
712                         /* JDM XXX */
713
714                         /*
715                          * if page_started, cow_file_range inserted an
716                          * inline extent and took care of all the unlocking
717                          * and IO for us.  Otherwise, we need to submit
718                          * all those pages down to the drive.
719                          */
720                         if (!page_started && !ret)
721                                 extent_write_locked_range(io_tree,
722                                                   inode, async_extent->start,
723                                                   async_extent->start +
724                                                   async_extent->ram_size - 1,
725                                                   btrfs_get_extent,
726                                                   WB_SYNC_ALL);
727                         else if (ret)
728                                 unlock_page(async_cow->locked_page);
729                         kfree(async_extent);
730                         cond_resched();
731                         continue;
732                 }
733
734                 lock_extent(io_tree, async_extent->start,
735                             async_extent->start + async_extent->ram_size - 1);
736
737                 ret = btrfs_reserve_extent(root,
738                                            async_extent->compressed_size,
739                                            async_extent->compressed_size,
740                                            0, alloc_hint, &ins, 1, 1);
741                 if (ret) {
742                         free_async_extent_pages(async_extent);
743
744                         if (ret == -ENOSPC) {
745                                 unlock_extent(io_tree, async_extent->start,
746                                               async_extent->start +
747                                               async_extent->ram_size - 1);
748
749                                 /*
750                                  * we need to redirty the pages if we decide to
751                                  * fallback to uncompressed IO, otherwise we
752                                  * will not submit these pages down to lower
753                                  * layers.
754                                  */
755                                 extent_range_redirty_for_io(inode,
756                                                 async_extent->start,
757                                                 async_extent->start +
758                                                 async_extent->ram_size - 1);
759
760                                 goto retry;
761                         }
762                         goto out_free;
763                 }
764                 /*
765                  * here we're doing allocation and writeback of the
766                  * compressed pages
767                  */
768                 btrfs_drop_extent_cache(inode, async_extent->start,
769                                         async_extent->start +
770                                         async_extent->ram_size - 1, 0);
771
772                 em = alloc_extent_map();
773                 if (!em) {
774                         ret = -ENOMEM;
775                         goto out_free_reserve;
776                 }
777                 em->start = async_extent->start;
778                 em->len = async_extent->ram_size;
779                 em->orig_start = em->start;
780                 em->mod_start = em->start;
781                 em->mod_len = em->len;
782
783                 em->block_start = ins.objectid;
784                 em->block_len = ins.offset;
785                 em->orig_block_len = ins.offset;
786                 em->ram_bytes = async_extent->ram_size;
787                 em->bdev = root->fs_info->fs_devices->latest_bdev;
788                 em->compress_type = async_extent->compress_type;
789                 set_bit(EXTENT_FLAG_PINNED, &em->flags);
790                 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
791                 em->generation = -1;
792
793                 while (1) {
794                         write_lock(&em_tree->lock);
795                         ret = add_extent_mapping(em_tree, em, 1);
796                         write_unlock(&em_tree->lock);
797                         if (ret != -EEXIST) {
798                                 free_extent_map(em);
799                                 break;
800                         }
801                         btrfs_drop_extent_cache(inode, async_extent->start,
802                                                 async_extent->start +
803                                                 async_extent->ram_size - 1, 0);
804                 }
805
806                 if (ret)
807                         goto out_free_reserve;
808
809                 ret = btrfs_add_ordered_extent_compress(inode,
810                                                 async_extent->start,
811                                                 ins.objectid,
812                                                 async_extent->ram_size,
813                                                 ins.offset,
814                                                 BTRFS_ORDERED_COMPRESSED,
815                                                 async_extent->compress_type);
816                 if (ret) {
817                         btrfs_drop_extent_cache(inode, async_extent->start,
818                                                 async_extent->start +
819                                                 async_extent->ram_size - 1, 0);
820                         goto out_free_reserve;
821                 }
822
823                 /*
824                  * clear dirty, set writeback and unlock the pages.
825                  */
826                 extent_clear_unlock_delalloc(inode, async_extent->start,
827                                 async_extent->start +
828                                 async_extent->ram_size - 1,
829                                 NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
830                                 PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
831                                 PAGE_SET_WRITEBACK);
832                 ret = btrfs_submit_compressed_write(inode,
833                                     async_extent->start,
834                                     async_extent->ram_size,
835                                     ins.objectid,
836                                     ins.offset, async_extent->pages,
837                                     async_extent->nr_pages);
838                 if (ret) {
839                         struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
840                         struct page *p = async_extent->pages[0];
841                         const u64 start = async_extent->start;
842                         const u64 end = start + async_extent->ram_size - 1;
843
844                         p->mapping = inode->i_mapping;
845                         tree->ops->writepage_end_io_hook(p, start, end,
846                                                          NULL, 0);
847                         p->mapping = NULL;
848                         extent_clear_unlock_delalloc(inode, start, end, NULL, 0,
849                                                      PAGE_END_WRITEBACK |
850                                                      PAGE_SET_ERROR);
851                         free_async_extent_pages(async_extent);
852                 }
853                 alloc_hint = ins.objectid + ins.offset;
854                 kfree(async_extent);
855                 cond_resched();
856         }
857         return;
858 out_free_reserve:
859         btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
860 out_free:
861         extent_clear_unlock_delalloc(inode, async_extent->start,
862                                      async_extent->start +
863                                      async_extent->ram_size - 1,
864                                      NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
865                                      EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
866                                      PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
867                                      PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK |
868                                      PAGE_SET_ERROR);
869         free_async_extent_pages(async_extent);
870         kfree(async_extent);
871         goto again;
872 }
873
874 static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
875                                       u64 num_bytes)
876 {
877         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
878         struct extent_map *em;
879         u64 alloc_hint = 0;
880
881         read_lock(&em_tree->lock);
882         em = search_extent_mapping(em_tree, start, num_bytes);
883         if (em) {
884                 /*
885                  * if block start isn't an actual block number then find the
886                  * first block in this inode and use that as a hint.  If that
887                  * block is also bogus then just don't worry about it.
888                  */
889                 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
890                         free_extent_map(em);
891                         em = search_extent_mapping(em_tree, 0, 0);
892                         if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
893                                 alloc_hint = em->block_start;
894                         if (em)
895                                 free_extent_map(em);
896                 } else {
897                         alloc_hint = em->block_start;
898                         free_extent_map(em);
899                 }
900         }
901         read_unlock(&em_tree->lock);
902
903         return alloc_hint;
904 }
905
906 /*
907  * when extent_io.c finds a delayed allocation range in the file,
908  * the call backs end up in this code.  The basic idea is to
909  * allocate extents on disk for the range, and create ordered data structs
910  * in ram to track those extents.
911  *
912  * locked_page is the page that writepage had locked already.  We use
913  * it to make sure we don't do extra locks or unlocks.
914  *
915  * *page_started is set to one if we unlock locked_page and do everything
916  * required to start IO on it.  It may be clean and already done with
917  * IO when we return.
918  */
919 static noinline int cow_file_range(struct inode *inode,
920                                    struct page *locked_page,
921                                    u64 start, u64 end, int *page_started,
922                                    unsigned long *nr_written,
923                                    int unlock)
924 {
925         struct btrfs_root *root = BTRFS_I(inode)->root;
926         u64 alloc_hint = 0;
927         u64 num_bytes;
928         unsigned long ram_size;
929         u64 disk_num_bytes;
930         u64 cur_alloc_size;
931         u64 blocksize = root->sectorsize;
932         struct btrfs_key ins;
933         struct extent_map *em;
934         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
935         int ret = 0;
936
937         if (btrfs_is_free_space_inode(inode)) {
938                 WARN_ON_ONCE(1);
939                 ret = -EINVAL;
940                 goto out_unlock;
941         }
942
943         num_bytes = ALIGN(end - start + 1, blocksize);
944         num_bytes = max(blocksize,  num_bytes);
945         disk_num_bytes = num_bytes;
946
947         /* if this is a small write inside eof, kick off defrag */
948         if (num_bytes < 64 * 1024 &&
949             (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
950                 btrfs_add_inode_defrag(NULL, inode);
951
952         if (start == 0) {
953                 /* lets try to make an inline extent */
954                 ret = cow_file_range_inline(root, inode, start, end, 0, 0,
955                                             NULL);
956                 if (ret == 0) {
957                         extent_clear_unlock_delalloc(inode, start, end, NULL,
958                                      EXTENT_LOCKED | EXTENT_DELALLOC |
959                                      EXTENT_DEFRAG, PAGE_UNLOCK |
960                                      PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
961                                      PAGE_END_WRITEBACK);
962
963                         *nr_written = *nr_written +
964                              (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
965                         *page_started = 1;
966                         goto out;
967                 } else if (ret < 0) {
968                         goto out_unlock;
969                 }
970         }
971
972         BUG_ON(disk_num_bytes >
973                btrfs_super_total_bytes(root->fs_info->super_copy));
974
975         alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
976         btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
977
978         while (disk_num_bytes > 0) {
979                 unsigned long op;
980
981                 cur_alloc_size = disk_num_bytes;
982                 ret = btrfs_reserve_extent(root, cur_alloc_size,
983                                            root->sectorsize, 0, alloc_hint,
984                                            &ins, 1, 1);
985                 if (ret < 0)
986                         goto out_unlock;
987
988                 em = alloc_extent_map();
989                 if (!em) {
990                         ret = -ENOMEM;
991                         goto out_reserve;
992                 }
993                 em->start = start;
994                 em->orig_start = em->start;
995                 ram_size = ins.offset;
996                 em->len = ins.offset;
997                 em->mod_start = em->start;
998                 em->mod_len = em->len;
999
1000                 em->block_start = ins.objectid;
1001                 em->block_len = ins.offset;
1002                 em->orig_block_len = ins.offset;
1003                 em->ram_bytes = ram_size;
1004                 em->bdev = root->fs_info->fs_devices->latest_bdev;
1005                 set_bit(EXTENT_FLAG_PINNED, &em->flags);
1006                 em->generation = -1;
1007
1008                 while (1) {
1009                         write_lock(&em_tree->lock);
1010                         ret = add_extent_mapping(em_tree, em, 1);
1011                         write_unlock(&em_tree->lock);
1012                         if (ret != -EEXIST) {
1013                                 free_extent_map(em);
1014                                 break;
1015                         }
1016                         btrfs_drop_extent_cache(inode, start,
1017                                                 start + ram_size - 1, 0);
1018                 }
1019                 if (ret)
1020                         goto out_reserve;
1021
1022                 cur_alloc_size = ins.offset;
1023                 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
1024                                                ram_size, cur_alloc_size, 0);
1025                 if (ret)
1026                         goto out_drop_extent_cache;
1027
1028                 if (root->root_key.objectid ==
1029                     BTRFS_DATA_RELOC_TREE_OBJECTID) {
1030                         ret = btrfs_reloc_clone_csums(inode, start,
1031                                                       cur_alloc_size);
1032                         if (ret)
1033                                 goto out_drop_extent_cache;
1034                 }
1035
1036                 if (disk_num_bytes < cur_alloc_size)
1037                         break;
1038
1039                 /* we're not doing compressed IO, don't unlock the first
1040                  * page (which the caller expects to stay locked), don't
1041                  * clear any dirty bits and don't set any writeback bits
1042                  *
1043                  * Do set the Private2 bit so we know this page was properly
1044                  * setup for writepage
1045                  */
1046                 op = unlock ? PAGE_UNLOCK : 0;
1047                 op |= PAGE_SET_PRIVATE2;
1048
1049                 extent_clear_unlock_delalloc(inode, start,
1050                                              start + ram_size - 1, locked_page,
1051                                              EXTENT_LOCKED | EXTENT_DELALLOC,
1052                                              op);
1053                 disk_num_bytes -= cur_alloc_size;
1054                 num_bytes -= cur_alloc_size;
1055                 alloc_hint = ins.objectid + ins.offset;
1056                 start += cur_alloc_size;
1057         }
1058 out:
1059         return ret;
1060
1061 out_drop_extent_cache:
1062         btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0);
1063 out_reserve:
1064         btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
1065 out_unlock:
1066         extent_clear_unlock_delalloc(inode, start, end, locked_page,
1067                                      EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
1068                                      EXTENT_DELALLOC | EXTENT_DEFRAG,
1069                                      PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
1070                                      PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK);
1071         goto out;
1072 }
1073
1074 /*
1075  * work queue call back to started compression on a file and pages
1076  */
1077 static noinline void async_cow_start(struct btrfs_work *work)
1078 {
1079         struct async_cow *async_cow;
1080         int num_added = 0;
1081         async_cow = container_of(work, struct async_cow, work);
1082
1083         compress_file_range(async_cow->inode, async_cow->locked_page,
1084                             async_cow->start, async_cow->end, async_cow,
1085                             &num_added);
1086         if (num_added == 0) {
1087                 btrfs_add_delayed_iput(async_cow->inode);
1088                 async_cow->inode = NULL;
1089         }
1090 }
1091
1092 /*
1093  * work queue call back to submit previously compressed pages
1094  */
1095 static noinline void async_cow_submit(struct btrfs_work *work)
1096 {
1097         struct async_cow *async_cow;
1098         struct btrfs_root *root;
1099         unsigned long nr_pages;
1100
1101         async_cow = container_of(work, struct async_cow, work);
1102
1103         root = async_cow->root;
1104         nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
1105                 PAGE_CACHE_SHIFT;
1106
1107         /*
1108          * atomic_sub_return implies a barrier for waitqueue_active
1109          */
1110         if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) <
1111             5 * 1024 * 1024 &&
1112             waitqueue_active(&root->fs_info->async_submit_wait))
1113                 wake_up(&root->fs_info->async_submit_wait);
1114
1115         if (async_cow->inode)
1116                 submit_compressed_extents(async_cow->inode, async_cow);
1117 }
1118
1119 static noinline void async_cow_free(struct btrfs_work *work)
1120 {
1121         struct async_cow *async_cow;
1122         async_cow = container_of(work, struct async_cow, work);
1123         if (async_cow->inode)
1124                 btrfs_add_delayed_iput(async_cow->inode);
1125         kfree(async_cow);
1126 }
1127
1128 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
1129                                 u64 start, u64 end, int *page_started,
1130                                 unsigned long *nr_written)
1131 {
1132         struct async_cow *async_cow;
1133         struct btrfs_root *root = BTRFS_I(inode)->root;
1134         unsigned long nr_pages;
1135         u64 cur_end;
1136         int limit = 10 * 1024 * 1024;
1137
1138         clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
1139                          1, 0, NULL, GFP_NOFS);
1140         while (start < end) {
1141                 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
1142                 BUG_ON(!async_cow); /* -ENOMEM */
1143                 async_cow->inode = igrab(inode);
1144                 async_cow->root = root;
1145                 async_cow->locked_page = locked_page;
1146                 async_cow->start = start;
1147
1148                 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS &&
1149                     !btrfs_test_opt(root, FORCE_COMPRESS))
1150                         cur_end = end;
1151                 else
1152                         cur_end = min(end, start + 512 * 1024 - 1);
1153
1154                 async_cow->end = cur_end;
1155                 INIT_LIST_HEAD(&async_cow->extents);
1156
1157                 btrfs_init_work(&async_cow->work,
1158                                 btrfs_delalloc_helper,
1159                                 async_cow_start, async_cow_submit,
1160                                 async_cow_free);
1161
1162                 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
1163                         PAGE_CACHE_SHIFT;
1164                 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
1165
1166                 btrfs_queue_work(root->fs_info->delalloc_workers,
1167                                  &async_cow->work);
1168
1169                 if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
1170                         wait_event(root->fs_info->async_submit_wait,
1171                            (atomic_read(&root->fs_info->async_delalloc_pages) <
1172                             limit));
1173                 }
1174
1175                 while (atomic_read(&root->fs_info->async_submit_draining) &&
1176                       atomic_read(&root->fs_info->async_delalloc_pages)) {
1177                         wait_event(root->fs_info->async_submit_wait,
1178                           (atomic_read(&root->fs_info->async_delalloc_pages) ==
1179                            0));
1180                 }
1181
1182                 *nr_written += nr_pages;
1183                 start = cur_end + 1;
1184         }
1185         *page_started = 1;
1186         return 0;
1187 }
1188
1189 static noinline int csum_exist_in_range(struct btrfs_root *root,
1190                                         u64 bytenr, u64 num_bytes)
1191 {
1192         int ret;
1193         struct btrfs_ordered_sum *sums;
1194         LIST_HEAD(list);
1195
1196         ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
1197                                        bytenr + num_bytes - 1, &list, 0);
1198         if (ret == 0 && list_empty(&list))
1199                 return 0;
1200
1201         while (!list_empty(&list)) {
1202                 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1203                 list_del(&sums->list);
1204                 kfree(sums);
1205         }
1206         if (ret < 0)
1207                 return ret;
1208         return 1;
1209 }
1210
1211 /*
1212  * when nowcow writeback call back.  This checks for snapshots or COW copies
1213  * of the extents that exist in the file, and COWs the file as required.
1214  *
1215  * If no cow copies or snapshots exist, we write directly to the existing
1216  * blocks on disk
1217  */
1218 static noinline int run_delalloc_nocow(struct inode *inode,
1219                                        struct page *locked_page,
1220                               u64 start, u64 end, int *page_started, int force,
1221                               unsigned long *nr_written)
1222 {
1223         struct btrfs_root *root = BTRFS_I(inode)->root;
1224         struct btrfs_trans_handle *trans;
1225         struct extent_buffer *leaf;
1226         struct btrfs_path *path;
1227         struct btrfs_file_extent_item *fi;
1228         struct btrfs_key found_key;
1229         u64 cow_start;
1230         u64 cur_offset;
1231         u64 extent_end;
1232         u64 extent_offset;
1233         u64 disk_bytenr;
1234         u64 num_bytes;
1235         u64 disk_num_bytes;
1236         u64 ram_bytes;
1237         int extent_type;
1238         int ret, err;
1239         int type;
1240         int nocow;
1241         int check_prev = 1;
1242         bool nolock;
1243         u64 ino = btrfs_ino(inode);
1244
1245         path = btrfs_alloc_path();
1246         if (!path) {
1247                 extent_clear_unlock_delalloc(inode, start, end, locked_page,
1248                                              EXTENT_LOCKED | EXTENT_DELALLOC |
1249                                              EXTENT_DO_ACCOUNTING |
1250                                              EXTENT_DEFRAG, PAGE_UNLOCK |
1251                                              PAGE_CLEAR_DIRTY |
1252                                              PAGE_SET_WRITEBACK |
1253                                              PAGE_END_WRITEBACK);
1254                 return -ENOMEM;
1255         }
1256
1257         nolock = btrfs_is_free_space_inode(inode);
1258
1259         if (nolock)
1260                 trans = btrfs_join_transaction_nolock(root);
1261         else
1262                 trans = btrfs_join_transaction(root);
1263
1264         if (IS_ERR(trans)) {
1265                 extent_clear_unlock_delalloc(inode, start, end, locked_page,
1266                                              EXTENT_LOCKED | EXTENT_DELALLOC |
1267                                              EXTENT_DO_ACCOUNTING |
1268                                              EXTENT_DEFRAG, PAGE_UNLOCK |
1269                                              PAGE_CLEAR_DIRTY |
1270                                              PAGE_SET_WRITEBACK |
1271                                              PAGE_END_WRITEBACK);
1272                 btrfs_free_path(path);
1273                 return PTR_ERR(trans);
1274         }
1275
1276         trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1277
1278         cow_start = (u64)-1;
1279         cur_offset = start;
1280         while (1) {
1281                 ret = btrfs_lookup_file_extent(trans, root, path, ino,
1282                                                cur_offset, 0);
1283                 if (ret < 0)
1284                         goto error;
1285                 if (ret > 0 && path->slots[0] > 0 && check_prev) {
1286                         leaf = path->nodes[0];
1287                         btrfs_item_key_to_cpu(leaf, &found_key,
1288                                               path->slots[0] - 1);
1289                         if (found_key.objectid == ino &&
1290                             found_key.type == BTRFS_EXTENT_DATA_KEY)
1291                                 path->slots[0]--;
1292                 }
1293                 check_prev = 0;
1294 next_slot:
1295                 leaf = path->nodes[0];
1296                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1297                         ret = btrfs_next_leaf(root, path);
1298                         if (ret < 0) {
1299                                 if (cow_start != (u64)-1)
1300                                         cur_offset = cow_start;
1301                                 goto error;
1302                         }
1303                         if (ret > 0)
1304                                 break;
1305                         leaf = path->nodes[0];
1306                 }
1307
1308                 nocow = 0;
1309                 disk_bytenr = 0;
1310                 num_bytes = 0;
1311                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1312
1313                 if (found_key.objectid > ino)
1314                         break;
1315                 if (WARN_ON_ONCE(found_key.objectid < ino) ||
1316                     found_key.type < BTRFS_EXTENT_DATA_KEY) {
1317                         path->slots[0]++;
1318                         goto next_slot;
1319                 }
1320                 if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
1321                     found_key.offset > end)
1322                         break;
1323
1324                 if (found_key.offset > cur_offset) {
1325                         extent_end = found_key.offset;
1326                         extent_type = 0;
1327                         goto out_check;
1328                 }
1329
1330                 fi = btrfs_item_ptr(leaf, path->slots[0],
1331                                     struct btrfs_file_extent_item);
1332                 extent_type = btrfs_file_extent_type(leaf, fi);
1333
1334                 ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
1335                 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1336                     extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1337                         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1338                         extent_offset = btrfs_file_extent_offset(leaf, fi);
1339                         extent_end = found_key.offset +
1340                                 btrfs_file_extent_num_bytes(leaf, fi);
1341                         disk_num_bytes =
1342                                 btrfs_file_extent_disk_num_bytes(leaf, fi);
1343                         if (extent_end <= start) {
1344                                 path->slots[0]++;
1345                                 goto next_slot;
1346                         }
1347                         if (disk_bytenr == 0)
1348                                 goto out_check;
1349                         if (btrfs_file_extent_compression(leaf, fi) ||
1350                             btrfs_file_extent_encryption(leaf, fi) ||
1351                             btrfs_file_extent_other_encoding(leaf, fi))
1352                                 goto out_check;
1353                         if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1354                                 goto out_check;
1355                         if (btrfs_extent_readonly(root, disk_bytenr))
1356                                 goto out_check;
1357                         ret = btrfs_cross_ref_exist(trans, root, ino,
1358                                                   found_key.offset -
1359                                                   extent_offset, disk_bytenr);
1360                         if (ret) {
1361                                 /*
1362                                  * ret could be -EIO if the above fails to read
1363                                  * metadata.
1364                                  */
1365                                 if (ret < 0) {
1366                                         if (cow_start != (u64)-1)
1367                                                 cur_offset = cow_start;
1368                                         goto error;
1369                                 }
1370
1371                                 WARN_ON_ONCE(nolock);
1372                                 goto out_check;
1373                         }
1374                         disk_bytenr += extent_offset;
1375                         disk_bytenr += cur_offset - found_key.offset;
1376                         num_bytes = min(end + 1, extent_end) - cur_offset;
1377                         /*
1378                          * if there are pending snapshots for this root,
1379                          * we fall into common COW way.
1380                          */
1381                         if (!nolock) {
1382                                 err = btrfs_start_write_no_snapshoting(root);
1383                                 if (!err)
1384                                         goto out_check;
1385                         }
1386                         /*
1387                          * force cow if csum exists in the range.
1388                          * this ensure that csum for a given extent are
1389                          * either valid or do not exist.
1390                          */
1391                         ret = csum_exist_in_range(root, disk_bytenr, num_bytes);
1392                         if (ret) {
1393                                 /*
1394                                  * ret could be -EIO if the above fails to read
1395                                  * metadata.
1396                                  */
1397                                 if (ret < 0) {
1398                                         if (cow_start != (u64)-1)
1399                                                 cur_offset = cow_start;
1400                                         goto error;
1401                                 }
1402                                 WARN_ON_ONCE(nolock);
1403                                 goto out_check;
1404                         }
1405                         nocow = 1;
1406                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1407                         extent_end = found_key.offset +
1408                                 btrfs_file_extent_inline_len(leaf,
1409                                                      path->slots[0], fi);
1410                         extent_end = ALIGN(extent_end, root->sectorsize);
1411                 } else {
1412                         BUG_ON(1);
1413                 }
1414 out_check:
1415                 if (extent_end <= start) {
1416                         path->slots[0]++;
1417                         if (!nolock && nocow)
1418                                 btrfs_end_write_no_snapshoting(root);
1419                         goto next_slot;
1420                 }
1421                 if (!nocow) {
1422                         if (cow_start == (u64)-1)
1423                                 cow_start = cur_offset;
1424                         cur_offset = extent_end;
1425                         if (cur_offset > end)
1426                                 break;
1427                         path->slots[0]++;
1428                         goto next_slot;
1429                 }
1430
1431                 btrfs_release_path(path);
1432                 if (cow_start != (u64)-1) {
1433                         ret = cow_file_range(inode, locked_page,
1434                                              cow_start, found_key.offset - 1,
1435                                              page_started, nr_written, 1);
1436                         if (ret) {
1437                                 if (!nolock && nocow)
1438                                         btrfs_end_write_no_snapshoting(root);
1439                                 goto error;
1440                         }
1441                         cow_start = (u64)-1;
1442                 }
1443
1444                 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1445                         struct extent_map *em;
1446                         struct extent_map_tree *em_tree;
1447                         em_tree = &BTRFS_I(inode)->extent_tree;
1448                         em = alloc_extent_map();
1449                         BUG_ON(!em); /* -ENOMEM */
1450                         em->start = cur_offset;
1451                         em->orig_start = found_key.offset - extent_offset;
1452                         em->len = num_bytes;
1453                         em->block_len = num_bytes;
1454                         em->block_start = disk_bytenr;
1455                         em->orig_block_len = disk_num_bytes;
1456                         em->ram_bytes = ram_bytes;
1457                         em->bdev = root->fs_info->fs_devices->latest_bdev;
1458                         em->mod_start = em->start;
1459                         em->mod_len = em->len;
1460                         set_bit(EXTENT_FLAG_PINNED, &em->flags);
1461                         set_bit(EXTENT_FLAG_FILLING, &em->flags);
1462                         em->generation = -1;
1463                         while (1) {
1464                                 write_lock(&em_tree->lock);
1465                                 ret = add_extent_mapping(em_tree, em, 1);
1466                                 write_unlock(&em_tree->lock);
1467                                 if (ret != -EEXIST) {
1468                                         free_extent_map(em);
1469                                         break;
1470                                 }
1471                                 btrfs_drop_extent_cache(inode, em->start,
1472                                                 em->start + em->len - 1, 0);
1473                         }
1474                         type = BTRFS_ORDERED_PREALLOC;
1475                 } else {
1476                         type = BTRFS_ORDERED_NOCOW;
1477                 }
1478
1479                 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1480                                                num_bytes, num_bytes, type);
1481                 BUG_ON(ret); /* -ENOMEM */
1482
1483                 if (root->root_key.objectid ==
1484                     BTRFS_DATA_RELOC_TREE_OBJECTID) {
1485                         ret = btrfs_reloc_clone_csums(inode, cur_offset,
1486                                                       num_bytes);
1487                         if (ret) {
1488                                 if (!nolock && nocow)
1489                                         btrfs_end_write_no_snapshoting(root);
1490                                 goto error;
1491                         }
1492                 }
1493
1494                 extent_clear_unlock_delalloc(inode, cur_offset,
1495                                              cur_offset + num_bytes - 1,
1496                                              locked_page, EXTENT_LOCKED |
1497                                              EXTENT_DELALLOC, PAGE_UNLOCK |
1498                                              PAGE_SET_PRIVATE2);
1499                 if (!nolock && nocow)
1500                         btrfs_end_write_no_snapshoting(root);
1501                 cur_offset = extent_end;
1502                 if (cur_offset > end)
1503                         break;
1504         }
1505         btrfs_release_path(path);
1506
1507         if (cur_offset <= end && cow_start == (u64)-1) {
1508                 cow_start = cur_offset;
1509                 cur_offset = end;
1510         }
1511
1512         if (cow_start != (u64)-1) {
1513                 ret = cow_file_range(inode, locked_page, cow_start, end,
1514                                      page_started, nr_written, 1);
1515                 if (ret)
1516                         goto error;
1517         }
1518
1519 error:
1520         err = btrfs_end_transaction(trans, root);
1521         if (!ret)
1522                 ret = err;
1523
1524         if (ret && cur_offset < end)
1525                 extent_clear_unlock_delalloc(inode, cur_offset, end,
1526                                              locked_page, EXTENT_LOCKED |
1527                                              EXTENT_DELALLOC | EXTENT_DEFRAG |
1528                                              EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
1529                                              PAGE_CLEAR_DIRTY |
1530                                              PAGE_SET_WRITEBACK |
1531                                              PAGE_END_WRITEBACK);
1532         btrfs_free_path(path);
1533         return ret;
1534 }
1535
1536 static inline int need_force_cow(struct inode *inode, u64 start, u64 end)
1537 {
1538
1539         if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
1540             !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC))
1541                 return 0;
1542
1543         /*
1544          * @defrag_bytes is a hint value, no spinlock held here,
1545          * if is not zero, it means the file is defragging.
1546          * Force cow if given extent needs to be defragged.
1547          */
1548         if (BTRFS_I(inode)->defrag_bytes &&
1549             test_range_bit(&BTRFS_I(inode)->io_tree, start, end,
1550                            EXTENT_DEFRAG, 0, NULL))
1551                 return 1;
1552
1553         return 0;
1554 }
1555
1556 /*
1557  * extent_io.c call back to do delayed allocation processing
1558  */
1559 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1560                               u64 start, u64 end, int *page_started,
1561                               unsigned long *nr_written)
1562 {
1563         int ret;
1564         int force_cow = need_force_cow(inode, start, end);
1565
1566         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW && !force_cow) {
1567                 ret = run_delalloc_nocow(inode, locked_page, start, end,
1568                                          page_started, 1, nr_written);
1569         } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) {
1570                 ret = run_delalloc_nocow(inode, locked_page, start, end,
1571                                          page_started, 0, nr_written);
1572         } else if (!inode_need_compress(inode)) {
1573                 ret = cow_file_range(inode, locked_page, start, end,
1574                                       page_started, nr_written, 1);
1575         } else {
1576                 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1577                         &BTRFS_I(inode)->runtime_flags);
1578                 ret = cow_file_range_async(inode, locked_page, start, end,
1579                                            page_started, nr_written);
1580         }
1581         return ret;
1582 }
1583
1584 static void btrfs_split_extent_hook(struct inode *inode,
1585                                     struct extent_state *orig, u64 split)
1586 {
1587         u64 size;
1588
1589         /* not delalloc, ignore it */
1590         if (!(orig->state & EXTENT_DELALLOC))
1591                 return;
1592
1593         size = orig->end - orig->start + 1;
1594         if (size > BTRFS_MAX_EXTENT_SIZE) {
1595                 u64 num_extents;
1596                 u64 new_size;
1597
1598                 /*
1599                  * See the explanation in btrfs_merge_extent_hook, the same
1600                  * applies here, just in reverse.
1601                  */
1602                 new_size = orig->end - split + 1;
1603                 num_extents = div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1,
1604                                         BTRFS_MAX_EXTENT_SIZE);
1605                 new_size = split - orig->start;
1606                 num_extents += div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1,
1607                                         BTRFS_MAX_EXTENT_SIZE);
1608                 if (div64_u64(size + BTRFS_MAX_EXTENT_SIZE - 1,
1609                               BTRFS_MAX_EXTENT_SIZE) >= num_extents)
1610                         return;
1611         }
1612
1613         spin_lock(&BTRFS_I(inode)->lock);
1614         BTRFS_I(inode)->outstanding_extents++;
1615         spin_unlock(&BTRFS_I(inode)->lock);
1616 }
1617
1618 /*
1619  * extent_io.c merge_extent_hook, used to track merged delayed allocation
1620  * extents so we can keep track of new extents that are just merged onto old
1621  * extents, such as when we are doing sequential writes, so we can properly
1622  * account for the metadata space we'll need.
1623  */
1624 static void btrfs_merge_extent_hook(struct inode *inode,
1625                                     struct extent_state *new,
1626                                     struct extent_state *other)
1627 {
1628         u64 new_size, old_size;
1629         u64 num_extents;
1630
1631         /* not delalloc, ignore it */
1632         if (!(other->state & EXTENT_DELALLOC))
1633                 return;
1634
1635         if (new->start > other->start)
1636                 new_size = new->end - other->start + 1;
1637         else
1638                 new_size = other->end - new->start + 1;
1639
1640         /* we're not bigger than the max, unreserve the space and go */
1641         if (new_size <= BTRFS_MAX_EXTENT_SIZE) {
1642                 spin_lock(&BTRFS_I(inode)->lock);
1643                 BTRFS_I(inode)->outstanding_extents--;
1644                 spin_unlock(&BTRFS_I(inode)->lock);
1645                 return;
1646         }
1647
1648         /*
1649          * We have to add up either side to figure out how many extents were
1650          * accounted for before we merged into one big extent.  If the number of
1651          * extents we accounted for is <= the amount we need for the new range
1652          * then we can return, otherwise drop.  Think of it like this
1653          *
1654          * [ 4k][MAX_SIZE]
1655          *
1656          * So we've grown the extent by a MAX_SIZE extent, this would mean we
1657          * need 2 outstanding extents, on one side we have 1 and the other side
1658          * we have 1 so they are == and we can return.  But in this case
1659          *
1660          * [MAX_SIZE+4k][MAX_SIZE+4k]
1661          *
1662          * Each range on their own accounts for 2 extents, but merged together
1663          * they are only 3 extents worth of accounting, so we need to drop in
1664          * this case.
1665          */
1666         old_size = other->end - other->start + 1;
1667         num_extents = div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1,
1668                                 BTRFS_MAX_EXTENT_SIZE);
1669         old_size = new->end - new->start + 1;
1670         num_extents += div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1,
1671                                  BTRFS_MAX_EXTENT_SIZE);
1672
1673         if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1,
1674                       BTRFS_MAX_EXTENT_SIZE) >= num_extents)
1675                 return;
1676
1677         spin_lock(&BTRFS_I(inode)->lock);
1678         BTRFS_I(inode)->outstanding_extents--;
1679         spin_unlock(&BTRFS_I(inode)->lock);
1680 }
1681
1682 static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
1683                                       struct inode *inode)
1684 {
1685         spin_lock(&root->delalloc_lock);
1686         if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1687                 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1688                               &root->delalloc_inodes);
1689                 set_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1690                         &BTRFS_I(inode)->runtime_flags);
1691                 root->nr_delalloc_inodes++;
1692                 if (root->nr_delalloc_inodes == 1) {
1693                         spin_lock(&root->fs_info->delalloc_root_lock);
1694                         BUG_ON(!list_empty(&root->delalloc_root));
1695                         list_add_tail(&root->delalloc_root,
1696                                       &root->fs_info->delalloc_roots);
1697                         spin_unlock(&root->fs_info->delalloc_root_lock);
1698                 }
1699         }
1700         spin_unlock(&root->delalloc_lock);
1701 }
1702
1703 static void btrfs_del_delalloc_inode(struct btrfs_root *root,
1704                                      struct inode *inode)
1705 {
1706         spin_lock(&root->delalloc_lock);
1707         if (!list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1708                 list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1709                 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1710                           &BTRFS_I(inode)->runtime_flags);
1711                 root->nr_delalloc_inodes--;
1712                 if (!root->nr_delalloc_inodes) {
1713                         spin_lock(&root->fs_info->delalloc_root_lock);
1714                         BUG_ON(list_empty(&root->delalloc_root));
1715                         list_del_init(&root->delalloc_root);
1716                         spin_unlock(&root->fs_info->delalloc_root_lock);
1717                 }
1718         }
1719         spin_unlock(&root->delalloc_lock);
1720 }
1721
1722 /*
1723  * extent_io.c set_bit_hook, used to track delayed allocation
1724  * bytes in this file, and to maintain the list of inodes that
1725  * have pending delalloc work to be done.
1726  */
1727 static void btrfs_set_bit_hook(struct inode *inode,
1728                                struct extent_state *state, unsigned *bits)
1729 {
1730
1731         if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC))
1732                 WARN_ON(1);
1733         /*
1734          * set_bit and clear bit hooks normally require _irqsave/restore
1735          * but in this case, we are only testing for the DELALLOC
1736          * bit, which is only set or cleared with irqs on
1737          */
1738         if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1739                 struct btrfs_root *root = BTRFS_I(inode)->root;
1740                 u64 len = state->end + 1 - state->start;
1741                 bool do_list = !btrfs_is_free_space_inode(inode);
1742
1743                 if (*bits & EXTENT_FIRST_DELALLOC) {
1744                         *bits &= ~EXTENT_FIRST_DELALLOC;
1745                 } else {
1746                         spin_lock(&BTRFS_I(inode)->lock);
1747                         BTRFS_I(inode)->outstanding_extents++;
1748                         spin_unlock(&BTRFS_I(inode)->lock);
1749                 }
1750
1751                 /* For sanity tests */
1752                 if (btrfs_test_is_dummy_root(root))
1753                         return;
1754
1755                 __percpu_counter_add(&root->fs_info->delalloc_bytes, len,
1756                                      root->fs_info->delalloc_batch);
1757                 spin_lock(&BTRFS_I(inode)->lock);
1758                 BTRFS_I(inode)->delalloc_bytes += len;
1759                 if (*bits & EXTENT_DEFRAG)
1760                         BTRFS_I(inode)->defrag_bytes += len;
1761                 if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1762                                          &BTRFS_I(inode)->runtime_flags))
1763                         btrfs_add_delalloc_inodes(root, inode);
1764                 spin_unlock(&BTRFS_I(inode)->lock);
1765         }
1766 }
1767
1768 /*
1769  * extent_io.c clear_bit_hook, see set_bit_hook for why
1770  */
1771 static void btrfs_clear_bit_hook(struct inode *inode,
1772                                  struct extent_state *state,
1773                                  unsigned *bits)
1774 {
1775         u64 len = state->end + 1 - state->start;
1776         u64 num_extents = div64_u64(len + BTRFS_MAX_EXTENT_SIZE -1,
1777                                     BTRFS_MAX_EXTENT_SIZE);
1778
1779         spin_lock(&BTRFS_I(inode)->lock);
1780         if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG))
1781                 BTRFS_I(inode)->defrag_bytes -= len;
1782         spin_unlock(&BTRFS_I(inode)->lock);
1783
1784         /*
1785          * set_bit and clear bit hooks normally require _irqsave/restore
1786          * but in this case, we are only testing for the DELALLOC
1787          * bit, which is only set or cleared with irqs on
1788          */
1789         if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1790                 struct btrfs_root *root = BTRFS_I(inode)->root;
1791                 bool do_list = !btrfs_is_free_space_inode(inode);
1792
1793                 if (*bits & EXTENT_FIRST_DELALLOC) {
1794                         *bits &= ~EXTENT_FIRST_DELALLOC;
1795                 } else if (!(*bits & EXTENT_DO_ACCOUNTING)) {
1796                         spin_lock(&BTRFS_I(inode)->lock);
1797                         BTRFS_I(inode)->outstanding_extents -= num_extents;
1798                         spin_unlock(&BTRFS_I(inode)->lock);
1799                 }
1800
1801                 /*
1802                  * We don't reserve metadata space for space cache inodes so we
1803                  * don't need to call dellalloc_release_metadata if there is an
1804                  * error.
1805                  */
1806                 if (*bits & EXTENT_DO_ACCOUNTING &&
1807                     root != root->fs_info->tree_root)
1808                         btrfs_delalloc_release_metadata(inode, len);
1809
1810                 /* For sanity tests. */
1811                 if (btrfs_test_is_dummy_root(root))
1812                         return;
1813
1814                 if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
1815                     && do_list && !(state->state & EXTENT_NORESERVE))
1816                         btrfs_free_reserved_data_space_noquota(inode,
1817                                         state->start, len);
1818
1819                 __percpu_counter_add(&root->fs_info->delalloc_bytes, -len,
1820                                      root->fs_info->delalloc_batch);
1821                 spin_lock(&BTRFS_I(inode)->lock);
1822                 BTRFS_I(inode)->delalloc_bytes -= len;
1823                 if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
1824                     test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1825                              &BTRFS_I(inode)->runtime_flags))
1826                         btrfs_del_delalloc_inode(root, inode);
1827                 spin_unlock(&BTRFS_I(inode)->lock);
1828         }
1829 }
1830
1831 /*
1832  * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1833  * we don't create bios that span stripes or chunks
1834  */
1835 int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
1836                          size_t size, struct bio *bio,
1837                          unsigned long bio_flags)
1838 {
1839         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1840         u64 logical = (u64)bio->bi_iter.bi_sector << 9;
1841         u64 length = 0;
1842         u64 map_length;
1843         int ret;
1844
1845         if (bio_flags & EXTENT_BIO_COMPRESSED)
1846                 return 0;
1847
1848         length = bio->bi_iter.bi_size;
1849         map_length = length;
1850         ret = btrfs_map_block(root->fs_info, rw, logical,
1851                               &map_length, NULL, 0);
1852         /* Will always return 0 with map_multi == NULL */
1853         BUG_ON(ret < 0);
1854         if (map_length < length + size)
1855                 return 1;
1856         return 0;
1857 }
1858
1859 /*
1860  * in order to insert checksums into the metadata in large chunks,
1861  * we wait until bio submission time.   All the pages in the bio are
1862  * checksummed and sums are attached onto the ordered extent record.
1863  *
1864  * At IO completion time the cums attached on the ordered extent record
1865  * are inserted into the btree
1866  */
1867 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1868                                     struct bio *bio, int mirror_num,
1869                                     unsigned long bio_flags,
1870                                     u64 bio_offset)
1871 {
1872         struct btrfs_root *root = BTRFS_I(inode)->root;
1873         int ret = 0;
1874
1875         ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1876         BUG_ON(ret); /* -ENOMEM */
1877         return 0;
1878 }
1879
1880 /*
1881  * in order to insert checksums into the metadata in large chunks,
1882  * we wait until bio submission time.   All the pages in the bio are
1883  * checksummed and sums are attached onto the ordered extent record.
1884  *
1885  * At IO completion time the cums attached on the ordered extent record
1886  * are inserted into the btree
1887  */
1888 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1889                           int mirror_num, unsigned long bio_flags,
1890                           u64 bio_offset)
1891 {
1892         struct btrfs_root *root = BTRFS_I(inode)->root;
1893         int ret;
1894
1895         ret = btrfs_map_bio(root, rw, bio, mirror_num, 1);
1896         if (ret) {
1897                 bio->bi_error = ret;
1898                 bio_endio(bio);
1899         }
1900         return ret;
1901 }
1902
1903 /*
1904  * extent_io.c submission hook. This does the right thing for csum calculation
1905  * on write, or reading the csums from the tree before a read
1906  */
1907 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1908                           int mirror_num, unsigned long bio_flags,
1909                           u64 bio_offset)
1910 {
1911         struct btrfs_root *root = BTRFS_I(inode)->root;
1912         enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA;
1913         int ret = 0;
1914         int skip_sum;
1915         int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
1916
1917         skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1918
1919         if (btrfs_is_free_space_inode(inode))
1920                 metadata = BTRFS_WQ_ENDIO_FREE_SPACE;
1921
1922         if (!(rw & REQ_WRITE)) {
1923                 ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
1924                 if (ret)
1925                         goto out;
1926
1927                 if (bio_flags & EXTENT_BIO_COMPRESSED) {
1928                         ret = btrfs_submit_compressed_read(inode, bio,
1929                                                            mirror_num,
1930                                                            bio_flags);
1931                         goto out;
1932                 } else if (!skip_sum) {
1933                         ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
1934                         if (ret)
1935                                 goto out;
1936                 }
1937                 goto mapit;
1938         } else if (async && !skip_sum) {
1939                 /* csum items have already been cloned */
1940                 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1941                         goto mapit;
1942                 /* we're doing a write, do the async checksumming */
1943                 ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1944                                    inode, rw, bio, mirror_num,
1945                                    bio_flags, bio_offset,
1946                                    __btrfs_submit_bio_start,
1947                                    __btrfs_submit_bio_done);
1948                 goto out;
1949         } else if (!skip_sum) {
1950                 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1951                 if (ret)
1952                         goto out;
1953         }
1954
1955 mapit:
1956         ret = btrfs_map_bio(root, rw, bio, mirror_num, 0);
1957
1958 out:
1959         if (ret < 0) {
1960                 bio->bi_error = ret;
1961                 bio_endio(bio);
1962         }
1963         return ret;
1964 }
1965
1966 /*
1967  * given a list of ordered sums record them in the inode.  This happens
1968  * at IO completion time based on sums calculated at bio submission time.
1969  */
1970 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1971                              struct inode *inode, u64 file_offset,
1972                              struct list_head *list)
1973 {
1974         struct btrfs_ordered_sum *sum;
1975
1976         list_for_each_entry(sum, list, list) {
1977                 trans->adding_csums = 1;
1978                 btrfs_csum_file_blocks(trans,
1979                        BTRFS_I(inode)->root->fs_info->csum_root, sum);
1980                 trans->adding_csums = 0;
1981         }
1982         return 0;
1983 }
1984
1985 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
1986                               struct extent_state **cached_state)
1987 {
1988         WARN_ON((end & (PAGE_CACHE_SIZE - 1)) == 0);
1989         return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1990                                    cached_state, GFP_NOFS);
1991 }
1992
1993 /* see btrfs_writepage_start_hook for details on why this is required */
1994 struct btrfs_writepage_fixup {
1995         struct page *page;
1996         struct btrfs_work work;
1997 };
1998
1999 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
2000 {
2001         struct btrfs_writepage_fixup *fixup;
2002         struct btrfs_ordered_extent *ordered;
2003         struct extent_state *cached_state = NULL;
2004         struct page *page;
2005         struct inode *inode;
2006         u64 page_start;
2007         u64 page_end;
2008         int ret;
2009
2010         fixup = container_of(work, struct btrfs_writepage_fixup, work);
2011         page = fixup->page;
2012 again:
2013         lock_page(page);
2014         if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
2015                 ClearPageChecked(page);
2016                 goto out_page;
2017         }
2018
2019         inode = page->mapping->host;
2020         page_start = page_offset(page);
2021         page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
2022
2023         lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
2024                          &cached_state);
2025
2026         /* already ordered? We're done */
2027         if (PagePrivate2(page))
2028                 goto out;
2029
2030         ordered = btrfs_lookup_ordered_extent(inode, page_start);
2031         if (ordered) {
2032                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
2033                                      page_end, &cached_state, GFP_NOFS);
2034                 unlock_page(page);
2035                 btrfs_start_ordered_extent(inode, ordered, 1);
2036                 btrfs_put_ordered_extent(ordered);
2037                 goto again;
2038         }
2039
2040         ret = btrfs_delalloc_reserve_space(inode, page_start,
2041                                            PAGE_CACHE_SIZE);
2042         if (ret) {
2043                 mapping_set_error(page->mapping, ret);
2044                 end_extent_writepage(page, ret, page_start, page_end);
2045                 ClearPageChecked(page);
2046                 goto out;
2047          }
2048
2049         ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
2050                                         &cached_state);
2051         if (ret) {
2052                 mapping_set_error(page->mapping, ret);
2053                 end_extent_writepage(page, ret, page_start, page_end);
2054                 ClearPageChecked(page);
2055                 goto out;
2056         }
2057
2058         ClearPageChecked(page);
2059         set_page_dirty(page);
2060 out:
2061         unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
2062                              &cached_state, GFP_NOFS);
2063 out_page:
2064         unlock_page(page);
2065         page_cache_release(page);
2066         kfree(fixup);
2067 }
2068
2069 /*
2070  * There are a few paths in the higher layers of the kernel that directly
2071  * set the page dirty bit without asking the filesystem if it is a
2072  * good idea.  This causes problems because we want to make sure COW
2073  * properly happens and the data=ordered rules are followed.
2074  *
2075  * In our case any range that doesn't have the ORDERED bit set
2076  * hasn't been properly setup for IO.  We kick off an async process
2077  * to fix it up.  The async helper will wait for ordered extents, set
2078  * the delalloc bit and make it safe to write the page.
2079  */
2080 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
2081 {
2082         struct inode *inode = page->mapping->host;
2083         struct btrfs_writepage_fixup *fixup;
2084         struct btrfs_root *root = BTRFS_I(inode)->root;
2085
2086         /* this page is properly in the ordered list */
2087         if (TestClearPagePrivate2(page))
2088                 return 0;
2089
2090         if (PageChecked(page))
2091                 return -EAGAIN;
2092
2093         fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
2094         if (!fixup)
2095                 return -EAGAIN;
2096
2097         SetPageChecked(page);
2098         page_cache_get(page);
2099         btrfs_init_work(&fixup->work, btrfs_fixup_helper,
2100                         btrfs_writepage_fixup_worker, NULL, NULL);
2101         fixup->page = page;
2102         btrfs_queue_work(root->fs_info->fixup_workers, &fixup->work);
2103         return -EBUSY;
2104 }
2105
2106 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
2107                                        struct inode *inode, u64 file_pos,
2108                                        u64 disk_bytenr, u64 disk_num_bytes,
2109                                        u64 num_bytes, u64 ram_bytes,
2110                                        u8 compression, u8 encryption,
2111                                        u16 other_encoding, int extent_type)
2112 {
2113         struct btrfs_root *root = BTRFS_I(inode)->root;
2114         struct btrfs_file_extent_item *fi;
2115         struct btrfs_path *path;
2116         struct extent_buffer *leaf;
2117         struct btrfs_key ins;
2118         int extent_inserted = 0;
2119         int ret;
2120
2121         path = btrfs_alloc_path();
2122         if (!path)
2123                 return -ENOMEM;
2124
2125         /*
2126          * we may be replacing one extent in the tree with another.
2127          * The new extent is pinned in the extent map, and we don't want
2128          * to drop it from the cache until it is completely in the btree.
2129          *
2130          * So, tell btrfs_drop_extents to leave this extent in the cache.
2131          * the caller is expected to unpin it and allow it to be merged
2132          * with the others.
2133          */
2134         ret = __btrfs_drop_extents(trans, root, inode, path, file_pos,
2135                                    file_pos + num_bytes, NULL, 0,
2136                                    1, sizeof(*fi), &extent_inserted);
2137         if (ret)
2138                 goto out;
2139
2140         if (!extent_inserted) {
2141                 ins.objectid = btrfs_ino(inode);
2142                 ins.offset = file_pos;
2143                 ins.type = BTRFS_EXTENT_DATA_KEY;
2144
2145                 path->leave_spinning = 1;
2146                 ret = btrfs_insert_empty_item(trans, root, path, &ins,
2147                                               sizeof(*fi));
2148                 if (ret)
2149                         goto out;
2150         }
2151         leaf = path->nodes[0];
2152         fi = btrfs_item_ptr(leaf, path->slots[0],
2153                             struct btrfs_file_extent_item);
2154         btrfs_set_file_extent_generation(leaf, fi, trans->transid);
2155         btrfs_set_file_extent_type(leaf, fi, extent_type);
2156         btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
2157         btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
2158         btrfs_set_file_extent_offset(leaf, fi, 0);
2159         btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2160         btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
2161         btrfs_set_file_extent_compression(leaf, fi, compression);
2162         btrfs_set_file_extent_encryption(leaf, fi, encryption);
2163         btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
2164
2165         btrfs_mark_buffer_dirty(leaf);
2166         btrfs_release_path(path);
2167
2168         inode_add_bytes(inode, num_bytes);
2169
2170         ins.objectid = disk_bytenr;
2171         ins.offset = disk_num_bytes;
2172         ins.type = BTRFS_EXTENT_ITEM_KEY;
2173         ret = btrfs_alloc_reserved_file_extent(trans, root,
2174                                         root->root_key.objectid,
2175                                         btrfs_ino(inode), file_pos,
2176                                         ram_bytes, &ins);
2177         /*
2178          * Release the reserved range from inode dirty range map, as it is
2179          * already moved into delayed_ref_head
2180          */
2181         btrfs_qgroup_release_data(inode, file_pos, ram_bytes);
2182 out:
2183         btrfs_free_path(path);
2184
2185         return ret;
2186 }
2187
2188 /* snapshot-aware defrag */
2189 struct sa_defrag_extent_backref {
2190         struct rb_node node;
2191         struct old_sa_defrag_extent *old;
2192         u64 root_id;
2193         u64 inum;
2194         u64 file_pos;
2195         u64 extent_offset;
2196         u64 num_bytes;
2197         u64 generation;
2198 };
2199
2200 struct old_sa_defrag_extent {
2201         struct list_head list;
2202         struct new_sa_defrag_extent *new;
2203
2204         u64 extent_offset;
2205         u64 bytenr;
2206         u64 offset;
2207         u64 len;
2208         int count;
2209 };
2210
2211 struct new_sa_defrag_extent {
2212         struct rb_root root;
2213         struct list_head head;
2214         struct btrfs_path *path;
2215         struct inode *inode;
2216         u64 file_pos;
2217         u64 len;
2218         u64 bytenr;
2219         u64 disk_len;
2220         u8 compress_type;
2221 };
2222
2223 static int backref_comp(struct sa_defrag_extent_backref *b1,
2224                         struct sa_defrag_extent_backref *b2)
2225 {
2226         if (b1->root_id < b2->root_id)
2227                 return -1;
2228         else if (b1->root_id > b2->root_id)
2229                 return 1;
2230
2231         if (b1->inum < b2->inum)
2232                 return -1;
2233         else if (b1->inum > b2->inum)
2234                 return 1;
2235
2236         if (b1->file_pos < b2->file_pos)
2237                 return -1;
2238         else if (b1->file_pos > b2->file_pos)
2239                 return 1;
2240
2241         /*
2242          * [------------------------------] ===> (a range of space)
2243          *     |<--->|   |<---->| =============> (fs/file tree A)
2244          * |<---------------------------->| ===> (fs/file tree B)
2245          *
2246          * A range of space can refer to two file extents in one tree while
2247          * refer to only one file extent in another tree.
2248          *
2249          * So we may process a disk offset more than one time(two extents in A)
2250          * and locate at the same extent(one extent in B), then insert two same
2251          * backrefs(both refer to the extent in B).
2252          */
2253         return 0;
2254 }
2255
2256 static void backref_insert(struct rb_root *root,
2257                            struct sa_defrag_extent_backref *backref)
2258 {
2259         struct rb_node **p = &root->rb_node;
2260         struct rb_node *parent = NULL;
2261         struct sa_defrag_extent_backref *entry;
2262         int ret;
2263
2264         while (*p) {
2265                 parent = *p;
2266                 entry = rb_entry(parent, struct sa_defrag_extent_backref, node);
2267
2268                 ret = backref_comp(backref, entry);
2269                 if (ret < 0)
2270                         p = &(*p)->rb_left;
2271                 else
2272                         p = &(*p)->rb_right;
2273         }
2274
2275         rb_link_node(&backref->node, parent, p);
2276         rb_insert_color(&backref->node, root);
2277 }
2278
2279 /*
2280  * Note the backref might has changed, and in this case we just return 0.
2281  */
2282 static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
2283                                        void *ctx)
2284 {
2285         struct btrfs_file_extent_item *extent;
2286         struct btrfs_fs_info *fs_info;
2287         struct old_sa_defrag_extent *old = ctx;
2288         struct new_sa_defrag_extent *new = old->new;
2289         struct btrfs_path *path = new->path;
2290         struct btrfs_key key;
2291         struct btrfs_root *root;
2292         struct sa_defrag_extent_backref *backref;
2293         struct extent_buffer *leaf;
2294         struct inode *inode = new->inode;
2295         int slot;
2296         int ret;
2297         u64 extent_offset;
2298         u64 num_bytes;
2299
2300         if (BTRFS_I(inode)->root->root_key.objectid == root_id &&
2301             inum == btrfs_ino(inode))
2302                 return 0;
2303
2304         key.objectid = root_id;
2305         key.type = BTRFS_ROOT_ITEM_KEY;
2306         key.offset = (u64)-1;
2307
2308         fs_info = BTRFS_I(inode)->root->fs_info;
2309         root = btrfs_read_fs_root_no_name(fs_info, &key);
2310         if (IS_ERR(root)) {
2311                 if (PTR_ERR(root) == -ENOENT)
2312                         return 0;
2313                 WARN_ON(1);
2314                 pr_debug("inum=%llu, offset=%llu, root_id=%llu\n",
2315                          inum, offset, root_id);
2316                 return PTR_ERR(root);
2317         }
2318
2319         key.objectid = inum;
2320         key.type = BTRFS_EXTENT_DATA_KEY;
2321         if (offset > (u64)-1 << 32)
2322                 key.offset = 0;
2323         else
2324                 key.offset = offset;
2325
2326         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2327         if (WARN_ON(ret < 0))
2328                 return ret;
2329         ret = 0;
2330
2331         while (1) {
2332                 cond_resched();
2333
2334                 leaf = path->nodes[0];
2335                 slot = path->slots[0];
2336
2337                 if (slot >= btrfs_header_nritems(leaf)) {
2338                         ret = btrfs_next_leaf(root, path);
2339                         if (ret < 0) {
2340                                 goto out;
2341                         } else if (ret > 0) {
2342                                 ret = 0;
2343                                 goto out;
2344                         }
2345                         continue;
2346                 }
2347
2348                 path->slots[0]++;
2349
2350                 btrfs_item_key_to_cpu(leaf, &key, slot);
2351
2352                 if (key.objectid > inum)
2353                         goto out;
2354
2355                 if (key.objectid < inum || key.type != BTRFS_EXTENT_DATA_KEY)
2356                         continue;
2357
2358                 extent = btrfs_item_ptr(leaf, slot,
2359                                         struct btrfs_file_extent_item);
2360
2361                 if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr)
2362                         continue;
2363
2364                 /*
2365                  * 'offset' refers to the exact key.offset,
2366                  * NOT the 'offset' field in btrfs_extent_data_ref, ie.
2367                  * (key.offset - extent_offset).
2368                  */
2369                 if (key.offset != offset)
2370                         continue;
2371
2372                 extent_offset = btrfs_file_extent_offset(leaf, extent);
2373                 num_bytes = btrfs_file_extent_num_bytes(leaf, extent);
2374
2375                 if (extent_offset >= old->extent_offset + old->offset +
2376                     old->len || extent_offset + num_bytes <=
2377                     old->extent_offset + old->offset)
2378                         continue;
2379                 break;
2380         }
2381
2382         backref = kmalloc(sizeof(*backref), GFP_NOFS);
2383         if (!backref) {
2384                 ret = -ENOENT;
2385                 goto out;
2386         }
2387
2388         backref->root_id = root_id;
2389         backref->inum = inum;
2390         backref->file_pos = offset;
2391         backref->num_bytes = num_bytes;
2392         backref->extent_offset = extent_offset;
2393         backref->generation = btrfs_file_extent_generation(leaf, extent);
2394         backref->old = old;
2395         backref_insert(&new->root, backref);
2396         old->count++;
2397 out:
2398         btrfs_release_path(path);
2399         WARN_ON(ret);
2400         return ret;
2401 }
2402
2403 static noinline bool record_extent_backrefs(struct btrfs_path *path,
2404                                    struct new_sa_defrag_extent *new)
2405 {
2406         struct btrfs_fs_info *fs_info = BTRFS_I(new->inode)->root->fs_info;
2407         struct old_sa_defrag_extent *old, *tmp;
2408         int ret;
2409
2410         new->path = path;
2411
2412         list_for_each_entry_safe(old, tmp, &new->head, list) {
2413                 ret = iterate_inodes_from_logical(old->bytenr +
2414                                                   old->extent_offset, fs_info,
2415                                                   path, record_one_backref,
2416                                                   old);
2417                 if (ret < 0 && ret != -ENOENT)
2418                         return false;
2419
2420                 /* no backref to be processed for this extent */
2421                 if (!old->count) {
2422                         list_del(&old->list);
2423                         kfree(old);
2424                 }
2425         }
2426
2427         if (list_empty(&new->head))
2428                 return false;
2429
2430         return true;
2431 }
2432
2433 static int relink_is_mergable(struct extent_buffer *leaf,
2434                               struct btrfs_file_extent_item *fi,
2435                               struct new_sa_defrag_extent *new)
2436 {
2437         if (btrfs_file_extent_disk_bytenr(leaf, fi) != new->bytenr)
2438                 return 0;
2439
2440         if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2441                 return 0;
2442
2443         if (btrfs_file_extent_compression(leaf, fi) != new->compress_type)
2444                 return 0;
2445
2446         if (btrfs_file_extent_encryption(leaf, fi) ||
2447             btrfs_file_extent_other_encoding(leaf, fi))
2448                 return 0;
2449
2450         return 1;
2451 }
2452
2453 /*
2454  * Note the backref might has changed, and in this case we just return 0.
2455  */
2456 static noinline int relink_extent_backref(struct btrfs_path *path,
2457                                  struct sa_defrag_extent_backref *prev,
2458                                  struct sa_defrag_extent_backref *backref)
2459 {
2460         struct btrfs_file_extent_item *extent;
2461         struct btrfs_file_extent_item *item;
2462         struct btrfs_ordered_extent *ordered;
2463         struct btrfs_trans_handle *trans;
2464         struct btrfs_fs_info *fs_info;
2465         struct btrfs_root *root;
2466         struct btrfs_key key;
2467         struct extent_buffer *leaf;
2468         struct old_sa_defrag_extent *old = backref->old;
2469         struct new_sa_defrag_extent *new = old->new;
2470         struct inode *src_inode = new->inode;
2471         struct inode *inode;
2472         struct extent_state *cached = NULL;
2473         int ret = 0;
2474         u64 start;
2475         u64 len;
2476         u64 lock_start;
2477         u64 lock_end;
2478         bool merge = false;
2479         int index;
2480
2481         if (prev && prev->root_id == backref->root_id &&
2482             prev->inum == backref->inum &&
2483             prev->file_pos + prev->num_bytes == backref->file_pos)
2484                 merge = true;
2485
2486         /* step 1: get root */
2487         key.objectid = backref->root_id;
2488         key.type = BTRFS_ROOT_ITEM_KEY;
2489         key.offset = (u64)-1;
2490
2491         fs_info = BTRFS_I(src_inode)->root->fs_info;
2492         index = srcu_read_lock(&fs_info->subvol_srcu);
2493
2494         root = btrfs_read_fs_root_no_name(fs_info, &key);
2495         if (IS_ERR(root)) {
2496                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2497                 if (PTR_ERR(root) == -ENOENT)
2498                         return 0;
2499                 return PTR_ERR(root);
2500         }
2501
2502         if (btrfs_root_readonly(root)) {
2503                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2504                 return 0;
2505         }
2506
2507         /* step 2: get inode */
2508         key.objectid = backref->inum;
2509         key.type = BTRFS_INODE_ITEM_KEY;
2510         key.offset = 0;
2511
2512         inode = btrfs_iget(fs_info->sb, &key, root, NULL);
2513         if (IS_ERR(inode)) {
2514                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2515                 return 0;
2516         }
2517
2518         srcu_read_unlock(&fs_info->subvol_srcu, index);
2519
2520         /* step 3: relink backref */
2521         lock_start = backref->file_pos;
2522         lock_end = backref->file_pos + backref->num_bytes - 1;
2523         lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2524                          0, &cached);
2525
2526         ordered = btrfs_lookup_first_ordered_extent(inode, lock_end);
2527         if (ordered) {
2528                 btrfs_put_ordered_extent(ordered);
2529                 goto out_unlock;
2530         }
2531
2532         trans = btrfs_join_transaction(root);
2533         if (IS_ERR(trans)) {
2534                 ret = PTR_ERR(trans);
2535                 goto out_unlock;
2536         }
2537
2538         key.objectid = backref->inum;
2539         key.type = BTRFS_EXTENT_DATA_KEY;
2540         key.offset = backref->file_pos;
2541
2542         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2543         if (ret < 0) {
2544                 goto out_free_path;
2545         } else if (ret > 0) {
2546                 ret = 0;
2547                 goto out_free_path;
2548         }
2549
2550         extent = btrfs_item_ptr(path->nodes[0], path->slots[0],
2551                                 struct btrfs_file_extent_item);
2552
2553         if (btrfs_file_extent_generation(path->nodes[0], extent) !=
2554             backref->generation)
2555                 goto out_free_path;
2556
2557         btrfs_release_path(path);
2558
2559         start = backref->file_pos;
2560         if (backref->extent_offset < old->extent_offset + old->offset)
2561                 start += old->extent_offset + old->offset -
2562                          backref->extent_offset;
2563
2564         len = min(backref->extent_offset + backref->num_bytes,
2565                   old->extent_offset + old->offset + old->len);
2566         len -= max(backref->extent_offset, old->extent_offset + old->offset);
2567
2568         ret = btrfs_drop_extents(trans, root, inode, start,
2569                                  start + len, 1);
2570         if (ret)
2571                 goto out_free_path;
2572 again:
2573         key.objectid = btrfs_ino(inode);
2574         key.type = BTRFS_EXTENT_DATA_KEY;
2575         key.offset = start;
2576
2577         path->leave_spinning = 1;
2578         if (merge) {
2579                 struct btrfs_file_extent_item *fi;
2580                 u64 extent_len;
2581                 struct btrfs_key found_key;
2582
2583                 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2584                 if (ret < 0)
2585                         goto out_free_path;
2586
2587                 path->slots[0]--;
2588                 leaf = path->nodes[0];
2589                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2590
2591                 fi = btrfs_item_ptr(leaf, path->slots[0],
2592                                     struct btrfs_file_extent_item);
2593                 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
2594
2595                 if (extent_len + found_key.offset == start &&
2596                     relink_is_mergable(leaf, fi, new)) {
2597                         btrfs_set_file_extent_num_bytes(leaf, fi,
2598                                                         extent_len + len);
2599                         btrfs_mark_buffer_dirty(leaf);
2600                         inode_add_bytes(inode, len);
2601
2602                         ret = 1;
2603                         goto out_free_path;
2604                 } else {
2605                         merge = false;
2606                         btrfs_release_path(path);
2607                         goto again;
2608                 }
2609         }
2610
2611         ret = btrfs_insert_empty_item(trans, root, path, &key,
2612                                         sizeof(*extent));
2613         if (ret) {
2614                 btrfs_abort_transaction(trans, root, ret);
2615                 goto out_free_path;
2616         }
2617
2618         leaf = path->nodes[0];
2619         item = btrfs_item_ptr(leaf, path->slots[0],
2620                                 struct btrfs_file_extent_item);
2621         btrfs_set_file_extent_disk_bytenr(leaf, item, new->bytenr);
2622         btrfs_set_file_extent_disk_num_bytes(leaf, item, new->disk_len);
2623         btrfs_set_file_extent_offset(leaf, item, start - new->file_pos);
2624         btrfs_set_file_extent_num_bytes(leaf, item, len);
2625         btrfs_set_file_extent_ram_bytes(leaf, item, new->len);
2626         btrfs_set_file_extent_generation(leaf, item, trans->transid);
2627         btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
2628         btrfs_set_file_extent_compression(leaf, item, new->compress_type);
2629         btrfs_set_file_extent_encryption(leaf, item, 0);
2630         btrfs_set_file_extent_other_encoding(leaf, item, 0);
2631
2632         btrfs_mark_buffer_dirty(leaf);
2633         inode_add_bytes(inode, len);
2634         btrfs_release_path(path);
2635
2636         ret = btrfs_inc_extent_ref(trans, root, new->bytenr,
2637                         new->disk_len, 0,
2638                         backref->root_id, backref->inum,
2639                         new->file_pos); /* start - extent_offset */
2640         if (ret) {
2641                 btrfs_abort_transaction(trans, root, ret);
2642                 goto out_free_path;
2643         }
2644
2645         ret = 1;
2646 out_free_path:
2647         btrfs_release_path(path);
2648         path->leave_spinning = 0;
2649         btrfs_end_transaction(trans, root);
2650 out_unlock:
2651         unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2652                              &cached, GFP_NOFS);
2653         iput(inode);
2654         return ret;
2655 }
2656
2657 static void free_sa_defrag_extent(struct new_sa_defrag_extent *new)
2658 {
2659         struct old_sa_defrag_extent *old, *tmp;
2660
2661         if (!new)
2662                 return;
2663
2664         list_for_each_entry_safe(old, tmp, &new->head, list) {
2665                 kfree(old);
2666         }
2667         kfree(new);
2668 }
2669
2670 static void relink_file_extents(struct new_sa_defrag_extent *new)
2671 {
2672         struct btrfs_path *path;
2673         struct sa_defrag_extent_backref *backref;
2674         struct sa_defrag_extent_backref *prev = NULL;
2675         struct inode *inode;
2676         struct btrfs_root *root;
2677         struct rb_node *node;
2678         int ret;
2679
2680         inode = new->inode;
2681         root = BTRFS_I(inode)->root;
2682
2683         path = btrfs_alloc_path();
2684         if (!path)
2685                 return;
2686
2687         if (!record_extent_backrefs(path, new)) {
2688                 btrfs_free_path(path);
2689                 goto out;
2690         }
2691         btrfs_release_path(path);
2692
2693         while (1) {
2694                 node = rb_first(&new->root);
2695                 if (!node)
2696                         break;
2697                 rb_erase(node, &new->root);
2698
2699                 backref = rb_entry(node, struct sa_defrag_extent_backref, node);
2700
2701                 ret = relink_extent_backref(path, prev, backref);
2702                 WARN_ON(ret < 0);
2703
2704                 kfree(prev);
2705
2706                 if (ret == 1)
2707                         prev = backref;
2708                 else
2709                         prev = NULL;
2710                 cond_resched();
2711         }
2712         kfree(prev);
2713
2714         btrfs_free_path(path);
2715 out:
2716         free_sa_defrag_extent(new);
2717
2718         atomic_dec(&root->fs_info->defrag_running);
2719         wake_up(&root->fs_info->transaction_wait);
2720 }
2721
2722 static struct new_sa_defrag_extent *
2723 record_old_file_extents(struct inode *inode,
2724                         struct btrfs_ordered_extent *ordered)
2725 {
2726         struct btrfs_root *root = BTRFS_I(inode)->root;
2727         struct btrfs_path *path;
2728         struct btrfs_key key;
2729         struct old_sa_defrag_extent *old;
2730         struct new_sa_defrag_extent *new;
2731         int ret;
2732
2733         new = kmalloc(sizeof(*new), GFP_NOFS);
2734         if (!new)
2735                 return NULL;
2736
2737         new->inode = inode;
2738         new->file_pos = ordered->file_offset;
2739         new->len = ordered->len;
2740         new->bytenr = ordered->start;
2741         new->disk_len = ordered->disk_len;
2742         new->compress_type = ordered->compress_type;
2743         new->root = RB_ROOT;
2744         INIT_LIST_HEAD(&new->head);
2745
2746         path = btrfs_alloc_path();
2747         if (!path)
2748                 goto out_kfree;
2749
2750         key.objectid = btrfs_ino(inode);
2751         key.type = BTRFS_EXTENT_DATA_KEY;
2752         key.offset = new->file_pos;
2753
2754         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2755         if (ret < 0)
2756                 goto out_free_path;
2757         if (ret > 0 && path->slots[0] > 0)
2758                 path->slots[0]--;
2759
2760         /* find out all the old extents for the file range */
2761         while (1) {
2762                 struct btrfs_file_extent_item *extent;
2763                 struct extent_buffer *l;
2764                 int slot;
2765                 u64 num_bytes;
2766                 u64 offset;
2767                 u64 end;
2768                 u64 disk_bytenr;
2769                 u64 extent_offset;
2770
2771                 l = path->nodes[0];
2772                 slot = path->slots[0];
2773
2774                 if (slot >= btrfs_header_nritems(l)) {
2775                         ret = btrfs_next_leaf(root, path);
2776                         if (ret < 0)
2777                                 goto out_free_path;
2778                         else if (ret > 0)
2779                                 break;
2780                         continue;
2781                 }
2782
2783                 btrfs_item_key_to_cpu(l, &key, slot);
2784
2785                 if (key.objectid != btrfs_ino(inode))
2786                         break;
2787                 if (key.type != BTRFS_EXTENT_DATA_KEY)
2788                         break;
2789                 if (key.offset >= new->file_pos + new->len)
2790                         break;
2791
2792                 extent = btrfs_item_ptr(l, slot, struct btrfs_file_extent_item);
2793
2794                 num_bytes = btrfs_file_extent_num_bytes(l, extent);
2795                 if (key.offset + num_bytes < new->file_pos)
2796                         goto next;
2797
2798                 disk_bytenr = btrfs_file_extent_disk_bytenr(l, extent);
2799                 if (!disk_bytenr)
2800                         goto next;
2801
2802                 extent_offset = btrfs_file_extent_offset(l, extent);
2803
2804                 old = kmalloc(sizeof(*old), GFP_NOFS);
2805                 if (!old)
2806                         goto out_free_path;
2807
2808                 offset = max(new->file_pos, key.offset);
2809                 end = min(new->file_pos + new->len, key.offset + num_bytes);
2810
2811                 old->bytenr = disk_bytenr;
2812                 old->extent_offset = extent_offset;
2813                 old->offset = offset - key.offset;
2814                 old->len = end - offset;
2815                 old->new = new;
2816                 old->count = 0;
2817                 list_add_tail(&old->list, &new->head);
2818 next:
2819                 path->slots[0]++;
2820                 cond_resched();
2821         }
2822
2823         btrfs_free_path(path);
2824         atomic_inc(&root->fs_info->defrag_running);
2825
2826         return new;
2827
2828 out_free_path:
2829         btrfs_free_path(path);
2830 out_kfree:
2831         free_sa_defrag_extent(new);
2832         return NULL;
2833 }
2834
2835 static void btrfs_release_delalloc_bytes(struct btrfs_root *root,
2836                                          u64 start, u64 len)
2837 {
2838         struct btrfs_block_group_cache *cache;
2839
2840         cache = btrfs_lookup_block_group(root->fs_info, start);
2841         ASSERT(cache);
2842
2843         spin_lock(&cache->lock);
2844         cache->delalloc_bytes -= len;
2845         spin_unlock(&cache->lock);
2846
2847         btrfs_put_block_group(cache);
2848 }
2849
2850 /* as ordered data IO finishes, this gets called so we can finish
2851  * an ordered extent if the range of bytes in the file it covers are
2852  * fully written.
2853  */
2854 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
2855 {
2856         struct inode *inode = ordered_extent->inode;
2857         struct btrfs_root *root = BTRFS_I(inode)->root;
2858         struct btrfs_trans_handle *trans = NULL;
2859         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2860         struct extent_state *cached_state = NULL;
2861         struct new_sa_defrag_extent *new = NULL;
2862         int compress_type = 0;
2863         int ret = 0;
2864         u64 logical_len = ordered_extent->len;
2865         bool nolock;
2866         bool truncated = false;
2867
2868         nolock = btrfs_is_free_space_inode(inode);
2869
2870         if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
2871                 ret = -EIO;
2872                 goto out;
2873         }
2874
2875         btrfs_free_io_failure_record(inode, ordered_extent->file_offset,
2876                                      ordered_extent->file_offset +
2877                                      ordered_extent->len - 1);
2878
2879         if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
2880                 truncated = true;
2881                 logical_len = ordered_extent->truncated_len;
2882                 /* Truncated the entire extent, don't bother adding */
2883                 if (!logical_len)
2884                         goto out;
2885         }
2886
2887         if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
2888                 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
2889
2890                 /*
2891                  * For mwrite(mmap + memset to write) case, we still reserve
2892                  * space for NOCOW range.
2893                  * As NOCOW won't cause a new delayed ref, just free the space
2894                  */
2895                 btrfs_qgroup_free_data(inode, ordered_extent->file_offset,
2896                                        ordered_extent->len);
2897                 btrfs_ordered_update_i_size(inode, 0, ordered_extent);
2898                 if (nolock)
2899                         trans = btrfs_join_transaction_nolock(root);
2900                 else
2901                         trans = btrfs_join_transaction(root);
2902                 if (IS_ERR(trans)) {
2903                         ret = PTR_ERR(trans);
2904                         trans = NULL;
2905                         goto out;
2906                 }
2907                 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
2908                 ret = btrfs_update_inode_fallback(trans, root, inode);
2909                 if (ret) /* -ENOMEM or corruption */
2910                         btrfs_abort_transaction(trans, root, ret);
2911                 goto out;
2912         }
2913
2914         lock_extent_bits(io_tree, ordered_extent->file_offset,
2915                          ordered_extent->file_offset + ordered_extent->len - 1,
2916                          0, &cached_state);
2917
2918         ret = test_range_bit(io_tree, ordered_extent->file_offset,
2919                         ordered_extent->file_offset + ordered_extent->len - 1,
2920                         EXTENT_DEFRAG, 1, cached_state);
2921         if (ret) {
2922                 u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
2923                 if (0 && last_snapshot >= BTRFS_I(inode)->generation)
2924                         /* the inode is shared */
2925                         new = record_old_file_extents(inode, ordered_extent);
2926
2927                 clear_extent_bit(io_tree, ordered_extent->file_offset,
2928                         ordered_extent->file_offset + ordered_extent->len - 1,
2929                         EXTENT_DEFRAG, 0, 0, &cached_state, GFP_NOFS);
2930         }
2931
2932         if (nolock)
2933                 trans = btrfs_join_transaction_nolock(root);
2934         else
2935                 trans = btrfs_join_transaction(root);
2936         if (IS_ERR(trans)) {
2937                 ret = PTR_ERR(trans);
2938                 trans = NULL;
2939                 goto out_unlock;
2940         }
2941
2942         trans->block_rsv = &root->fs_info->delalloc_block_rsv;
2943
2944         if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
2945                 compress_type = ordered_extent->compress_type;
2946         if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
2947                 BUG_ON(compress_type);
2948                 ret = btrfs_mark_extent_written(trans, inode,
2949                                                 ordered_extent->file_offset,
2950                                                 ordered_extent->file_offset +
2951                                                 logical_len);
2952         } else {
2953                 BUG_ON(root == root->fs_info->tree_root);
2954                 ret = insert_reserved_file_extent(trans, inode,
2955                                                 ordered_extent->file_offset,
2956                                                 ordered_extent->start,
2957                                                 ordered_extent->disk_len,
2958                                                 logical_len, logical_len,
2959                                                 compress_type, 0, 0,
2960                                                 BTRFS_FILE_EXTENT_REG);
2961                 if (!ret)
2962                         btrfs_release_delalloc_bytes(root,
2963                                                      ordered_extent->start,
2964                                                      ordered_extent->disk_len);
2965         }
2966         unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
2967                            ordered_extent->file_offset, ordered_extent->len,
2968                            trans->transid);
2969         if (ret < 0) {
2970                 btrfs_abort_transaction(trans, root, ret);
2971                 goto out_unlock;
2972         }
2973
2974         add_pending_csums(trans, inode, ordered_extent->file_offset,
2975                           &ordered_extent->list);
2976
2977         btrfs_ordered_update_i_size(inode, 0, ordered_extent);
2978         ret = btrfs_update_inode_fallback(trans, root, inode);
2979         if (ret) { /* -ENOMEM or corruption */
2980                 btrfs_abort_transaction(trans, root, ret);
2981                 goto out_unlock;
2982         }
2983         ret = 0;
2984 out_unlock:
2985         unlock_extent_cached(io_tree, ordered_extent->file_offset,
2986                              ordered_extent->file_offset +
2987                              ordered_extent->len - 1, &cached_state, GFP_NOFS);
2988 out:
2989         if (root != root->fs_info->tree_root)
2990                 btrfs_delalloc_release_metadata(inode, ordered_extent->len);
2991         if (trans)
2992                 btrfs_end_transaction(trans, root);
2993
2994         if (ret || truncated) {
2995                 u64 start, end;
2996
2997                 if (truncated)
2998                         start = ordered_extent->file_offset + logical_len;
2999                 else
3000                         start = ordered_extent->file_offset;
3001                 end = ordered_extent->file_offset + ordered_extent->len - 1;
3002                 clear_extent_uptodate(io_tree, start, end, NULL, GFP_NOFS);
3003
3004                 /* Drop the cache for the part of the extent we didn't write. */
3005                 btrfs_drop_extent_cache(inode, start, end, 0);
3006
3007                 /*
3008                  * If the ordered extent had an IOERR or something else went
3009                  * wrong we need to return the space for this ordered extent
3010                  * back to the allocator.  We only free the extent in the
3011                  * truncated case if we didn't write out the extent at all.
3012                  */
3013                 if ((ret || !logical_len) &&
3014                     !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
3015                     !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
3016                         btrfs_free_reserved_extent(root, ordered_extent->start,
3017                                                    ordered_extent->disk_len, 1);
3018         }
3019
3020
3021         /*
3022          * This needs to be done to make sure anybody waiting knows we are done
3023          * updating everything for this ordered extent.
3024          */
3025         btrfs_remove_ordered_extent(inode, ordered_extent);
3026
3027         /* for snapshot-aware defrag */
3028         if (new) {
3029                 if (ret) {
3030                         free_sa_defrag_extent(new);
3031                         atomic_dec(&root->fs_info->defrag_running);
3032                 } else {
3033                         relink_file_extents(new);
3034                 }
3035         }
3036
3037         /* once for us */
3038         btrfs_put_ordered_extent(ordered_extent);
3039         /* once for the tree */
3040         btrfs_put_ordered_extent(ordered_extent);
3041
3042         return ret;
3043 }
3044
3045 static void finish_ordered_fn(struct btrfs_work *work)
3046 {
3047         struct btrfs_ordered_extent *ordered_extent;
3048         ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
3049         btrfs_finish_ordered_io(ordered_extent);
3050 }
3051
3052 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
3053                                 struct extent_state *state, int uptodate)
3054 {
3055         struct inode *inode = page->mapping->host;
3056         struct btrfs_root *root = BTRFS_I(inode)->root;
3057         struct btrfs_ordered_extent *ordered_extent = NULL;
3058         struct btrfs_workqueue *wq;
3059         btrfs_work_func_t func;
3060
3061         trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
3062
3063         ClearPagePrivate2(page);
3064         if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
3065                                             end - start + 1, uptodate))
3066                 return 0;
3067
3068         if (btrfs_is_free_space_inode(inode)) {
3069                 wq = root->fs_info->endio_freespace_worker;
3070                 func = btrfs_freespace_write_helper;
3071         } else {
3072                 wq = root->fs_info->endio_write_workers;
3073                 func = btrfs_endio_write_helper;
3074         }
3075
3076         btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL,
3077                         NULL);
3078         btrfs_queue_work(wq, &ordered_extent->work);
3079
3080         return 0;
3081 }
3082
3083 static int __readpage_endio_check(struct inode *inode,
3084                                   struct btrfs_io_bio *io_bio,
3085                                   int icsum, struct page *page,
3086                                   int pgoff, u64 start, size_t len)
3087 {
3088         char *kaddr;
3089         u32 csum_expected;
3090         u32 csum = ~(u32)0;
3091
3092         csum_expected = *(((u32 *)io_bio->csum) + icsum);
3093
3094         kaddr = kmap_atomic(page);
3095         csum = btrfs_csum_data(kaddr + pgoff, csum,  len);
3096         btrfs_csum_final(csum, (char *)&csum);
3097         if (csum != csum_expected)
3098                 goto zeroit;
3099
3100         kunmap_atomic(kaddr);
3101         return 0;
3102 zeroit:
3103         btrfs_warn_rl(BTRFS_I(inode)->root->fs_info,
3104                 "csum failed ino %llu off %llu csum %u expected csum %u",
3105                            btrfs_ino(inode), start, csum, csum_expected);
3106         memset(kaddr + pgoff, 1, len);
3107         flush_dcache_page(page);
3108         kunmap_atomic(kaddr);
3109         if (csum_expected == 0)
3110                 return 0;
3111         return -EIO;
3112 }
3113
3114 /*
3115  * when reads are done, we need to check csums to verify the data is correct
3116  * if there's a match, we allow the bio to finish.  If not, the code in
3117  * extent_io.c will try to find good copies for us.
3118  */
3119 static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
3120                                       u64 phy_offset, struct page *page,
3121                                       u64 start, u64 end, int mirror)
3122 {
3123         size_t offset = start - page_offset(page);
3124         struct inode *inode = page->mapping->host;
3125         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3126         struct btrfs_root *root = BTRFS_I(inode)->root;
3127
3128         if (PageChecked(page)) {
3129                 ClearPageChecked(page);
3130                 return 0;
3131         }
3132
3133         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
3134                 return 0;
3135
3136         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
3137             test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
3138                 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
3139                                   GFP_NOFS);
3140                 return 0;
3141         }
3142
3143         phy_offset >>= inode->i_sb->s_blocksize_bits;
3144         return __readpage_endio_check(inode, io_bio, phy_offset, page, offset,
3145                                       start, (size_t)(end - start + 1));
3146 }
3147
3148 struct delayed_iput {
3149         struct list_head list;
3150         struct inode *inode;
3151 };
3152
3153 /* JDM: If this is fs-wide, why can't we add a pointer to
3154  * btrfs_inode instead and avoid the allocation? */
3155 void btrfs_add_delayed_iput(struct inode *inode)
3156 {
3157         struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
3158         struct delayed_iput *delayed;
3159
3160         if (atomic_add_unless(&inode->i_count, -1, 1))
3161                 return;
3162
3163         delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
3164         delayed->inode = inode;
3165
3166         spin_lock(&fs_info->delayed_iput_lock);
3167         list_add_tail(&delayed->list, &fs_info->delayed_iputs);
3168         spin_unlock(&fs_info->delayed_iput_lock);
3169 }
3170
3171 void btrfs_run_delayed_iputs(struct btrfs_root *root)
3172 {
3173         LIST_HEAD(list);
3174         struct btrfs_fs_info *fs_info = root->fs_info;
3175         struct delayed_iput *delayed;
3176         int empty;
3177
3178         spin_lock(&fs_info->delayed_iput_lock);
3179         empty = list_empty(&fs_info->delayed_iputs);
3180         spin_unlock(&fs_info->delayed_iput_lock);
3181         if (empty)
3182                 return;
3183
3184         spin_lock(&fs_info->delayed_iput_lock);
3185         list_splice_init(&fs_info->delayed_iputs, &list);
3186         spin_unlock(&fs_info->delayed_iput_lock);
3187
3188         while (!list_empty(&list)) {
3189                 delayed = list_entry(list.next, struct delayed_iput, list);
3190                 list_del(&delayed->list);
3191                 iput(delayed->inode);
3192                 kfree(delayed);
3193         }
3194 }
3195
3196 /*
3197  * This is called in transaction commit time. If there are no orphan
3198  * files in the subvolume, it removes orphan item and frees block_rsv
3199  * structure.
3200  */
3201 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
3202                               struct btrfs_root *root)
3203 {
3204         struct btrfs_block_rsv *block_rsv;
3205         int ret;
3206
3207         if (atomic_read(&root->orphan_inodes) ||
3208             root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
3209                 return;
3210
3211         spin_lock(&root->orphan_lock);
3212         if (atomic_read(&root->orphan_inodes)) {
3213                 spin_unlock(&root->orphan_lock);
3214                 return;
3215         }
3216
3217         if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) {
3218                 spin_unlock(&root->orphan_lock);
3219                 return;
3220         }
3221
3222         block_rsv = root->orphan_block_rsv;
3223         root->orphan_block_rsv = NULL;
3224         spin_unlock(&root->orphan_lock);
3225
3226         if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state) &&
3227             btrfs_root_refs(&root->root_item) > 0) {
3228                 ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
3229                                             root->root_key.objectid);
3230                 if (ret)
3231                         btrfs_abort_transaction(trans, root, ret);
3232                 else
3233                         clear_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
3234                                   &root->state);
3235         }
3236
3237         if (block_rsv) {
3238                 WARN_ON(block_rsv->size > 0);
3239                 btrfs_free_block_rsv(root, block_rsv);
3240         }
3241 }
3242
3243 /*
3244  * This creates an orphan entry for the given inode in case something goes
3245  * wrong in the middle of an unlink/truncate.
3246  *
3247  * NOTE: caller of this function should reserve 5 units of metadata for
3248  *       this function.
3249  */
3250 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
3251 {
3252         struct btrfs_root *root = BTRFS_I(inode)->root;
3253         struct btrfs_block_rsv *block_rsv = NULL;
3254         int reserve = 0;
3255         int insert = 0;
3256         int ret;
3257
3258         if (!root->orphan_block_rsv) {
3259                 block_rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
3260                 if (!block_rsv)
3261                         return -ENOMEM;
3262         }
3263
3264         spin_lock(&root->orphan_lock);
3265         if (!root->orphan_block_rsv) {
3266                 root->orphan_block_rsv = block_rsv;
3267         } else if (block_rsv) {
3268                 btrfs_free_block_rsv(root, block_rsv);
3269                 block_rsv = NULL;
3270         }
3271
3272         if (!test_and_set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3273                               &BTRFS_I(inode)->runtime_flags)) {
3274 #if 0
3275                 /*
3276                  * For proper ENOSPC handling, we should do orphan
3277                  * cleanup when mounting. But this introduces backward
3278                  * compatibility issue.
3279                  */
3280                 if (!xchg(&root->orphan_item_inserted, 1))
3281                         insert = 2;
3282                 else
3283                         insert = 1;
3284 #endif
3285                 insert = 1;
3286                 atomic_inc(&root->orphan_inodes);
3287         }
3288
3289         if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3290                               &BTRFS_I(inode)->runtime_flags))
3291                 reserve = 1;
3292         spin_unlock(&root->orphan_lock);
3293
3294         /* grab metadata reservation from transaction handle */
3295         if (reserve) {
3296                 ret = btrfs_orphan_reserve_metadata(trans, inode);
3297                 BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */
3298         }
3299
3300         /* insert an orphan item to track this unlinked/truncated file */
3301         if (insert >= 1) {
3302                 ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
3303                 if (ret) {
3304                         atomic_dec(&root->orphan_inodes);
3305                         if (reserve) {
3306                                 clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3307                                           &BTRFS_I(inode)->runtime_flags);
3308                                 btrfs_orphan_release_metadata(inode);
3309                         }
3310                         if (ret != -EEXIST) {
3311                                 clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3312                                           &BTRFS_I(inode)->runtime_flags);
3313                                 btrfs_abort_transaction(trans, root, ret);
3314                                 return ret;
3315                         }
3316                 }
3317                 ret = 0;
3318         }
3319
3320         /* insert an orphan item to track subvolume contains orphan files */
3321         if (insert >= 2) {
3322                 ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
3323                                                root->root_key.objectid);
3324                 if (ret && ret != -EEXIST) {
3325                         btrfs_abort_transaction(trans, root, ret);
3326                         return ret;
3327                 }
3328         }
3329         return 0;
3330 }
3331
3332 /*
3333  * We have done the truncate/delete so we can go ahead and remove the orphan
3334  * item for this particular inode.
3335  */
3336 static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
3337                             struct inode *inode)
3338 {
3339         struct btrfs_root *root = BTRFS_I(inode)->root;
3340         int delete_item = 0;
3341         int release_rsv = 0;
3342         int ret = 0;
3343
3344         spin_lock(&root->orphan_lock);
3345         if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3346                                &BTRFS_I(inode)->runtime_flags))
3347                 delete_item = 1;
3348
3349         if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3350                                &BTRFS_I(inode)->runtime_flags))
3351                 release_rsv = 1;
3352         spin_unlock(&root->orphan_lock);
3353
3354         if (delete_item) {
3355                 atomic_dec(&root->orphan_inodes);
3356                 if (trans)
3357                         ret = btrfs_del_orphan_item(trans, root,
3358                                                     btrfs_ino(inode));
3359         }
3360
3361         if (release_rsv)
3362                 btrfs_orphan_release_metadata(inode);
3363
3364         return ret;
3365 }
3366
3367 /*
3368  * this cleans up any orphans that may be left on the list from the last use
3369  * of this root.
3370  */
3371 int btrfs_orphan_cleanup(struct btrfs_root *root)
3372 {
3373         struct btrfs_path *path;
3374         struct extent_buffer *leaf;
3375         struct btrfs_key key, found_key;
3376         struct btrfs_trans_handle *trans;
3377         struct inode *inode;
3378         u64 last_objectid = 0;
3379         int ret = 0, nr_unlink = 0, nr_truncate = 0;
3380
3381         if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
3382                 return 0;
3383
3384         path = btrfs_alloc_path();
3385         if (!path) {
3386                 ret = -ENOMEM;
3387                 goto out;
3388         }
3389         path->reada = -1;
3390
3391         key.objectid = BTRFS_ORPHAN_OBJECTID;
3392         key.type = BTRFS_ORPHAN_ITEM_KEY;
3393         key.offset = (u64)-1;
3394
3395         while (1) {
3396                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3397                 if (ret < 0)
3398                         goto out;
3399
3400                 /*
3401                  * if ret == 0 means we found what we were searching for, which
3402                  * is weird, but possible, so only screw with path if we didn't
3403                  * find the key and see if we have stuff that matches
3404                  */
3405                 if (ret > 0) {
3406                         ret = 0;
3407                         if (path->slots[0] == 0)
3408                                 break;
3409                         path->slots[0]--;
3410                 }
3411
3412                 /* pull out the item */
3413                 leaf = path->nodes[0];
3414                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3415
3416                 /* make sure the item matches what we want */
3417                 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
3418                         break;
3419                 if (found_key.type != BTRFS_ORPHAN_ITEM_KEY)
3420                         break;
3421
3422                 /* release the path since we're done with it */
3423                 btrfs_release_path(path);
3424
3425                 /*
3426                  * this is where we are basically btrfs_lookup, without the
3427                  * crossing root thing.  we store the inode number in the
3428                  * offset of the orphan item.
3429                  */
3430
3431                 if (found_key.offset == last_objectid) {
3432                         btrfs_err(root->fs_info,
3433                                 "Error removing orphan entry, stopping orphan cleanup");
3434                         ret = -EINVAL;
3435                         goto out;
3436                 }
3437
3438                 last_objectid = found_key.offset;
3439
3440                 found_key.objectid = found_key.offset;
3441                 found_key.type = BTRFS_INODE_ITEM_KEY;
3442                 found_key.offset = 0;
3443                 inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
3444                 ret = PTR_ERR_OR_ZERO(inode);
3445                 if (ret && ret != -ESTALE)
3446                         goto out;
3447
3448                 if (ret == -ESTALE && root == root->fs_info->tree_root) {
3449                         struct btrfs_root *dead_root;
3450                         struct btrfs_fs_info *fs_info = root->fs_info;
3451                         int is_dead_root = 0;
3452
3453                         /*
3454                          * this is an orphan in the tree root. Currently these
3455                          * could come from 2 sources:
3456                          *  a) a snapshot deletion in progress
3457                          *  b) a free space cache inode
3458                          * We need to distinguish those two, as the snapshot
3459                          * orphan must not get deleted.
3460                          * find_dead_roots already ran before us, so if this
3461                          * is a snapshot deletion, we should find the root
3462                          * in the dead_roots list
3463                          */
3464                         spin_lock(&fs_info->trans_lock);
3465                         list_for_each_entry(dead_root, &fs_info->dead_roots,
3466                                             root_list) {
3467                                 if (dead_root->root_key.objectid ==
3468                                     found_key.objectid) {
3469                                         is_dead_root = 1;
3470                                         break;
3471                                 }
3472                         }
3473                         spin_unlock(&fs_info->trans_lock);
3474                         if (is_dead_root) {
3475                                 /* prevent this orphan from being found again */
3476                                 key.offset = found_key.objectid - 1;
3477                                 continue;
3478                         }
3479                 }
3480                 /*
3481                  * Inode is already gone but the orphan item is still there,
3482                  * kill the orphan item.
3483                  */
3484                 if (ret == -ESTALE) {
3485                         trans = btrfs_start_transaction(root, 1);
3486                         if (IS_ERR(trans)) {
3487                                 ret = PTR_ERR(trans);
3488                                 goto out;
3489                         }
3490                         btrfs_debug(root->fs_info, "auto deleting %Lu",
3491                                 found_key.objectid);
3492                         ret = btrfs_del_orphan_item(trans, root,
3493                                                     found_key.objectid);
3494                         btrfs_end_transaction(trans, root);
3495                         if (ret)
3496                                 goto out;
3497                         continue;
3498                 }
3499
3500                 /*
3501                  * add this inode to the orphan list so btrfs_orphan_del does
3502                  * the proper thing when we hit it
3503                  */
3504                 set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3505                         &BTRFS_I(inode)->runtime_flags);
3506                 atomic_inc(&root->orphan_inodes);
3507
3508                 /* if we have links, this was a truncate, lets do that */
3509                 if (inode->i_nlink) {
3510                         if (WARN_ON(!S_ISREG(inode->i_mode))) {
3511                                 iput(inode);
3512                                 continue;
3513                         }
3514                         nr_truncate++;
3515
3516                         /* 1 for the orphan item deletion. */
3517                         trans = btrfs_start_transaction(root, 1);
3518                         if (IS_ERR(trans)) {
3519                                 iput(inode);
3520                                 ret = PTR_ERR(trans);
3521                                 goto out;
3522                         }
3523                         ret = btrfs_orphan_add(trans, inode);
3524                         btrfs_end_transaction(trans, root);
3525                         if (ret) {
3526                                 iput(inode);
3527                                 goto out;
3528                         }
3529
3530                         ret = btrfs_truncate(inode);
3531                         if (ret)
3532                                 btrfs_orphan_del(NULL, inode);
3533                 } else {
3534                         nr_unlink++;
3535                 }
3536
3537                 /* this will do delete_inode and everything for us */
3538                 iput(inode);
3539                 if (ret)
3540                         goto out;
3541         }
3542         /* release the path since we're done with it */
3543         btrfs_release_path(path);
3544
3545         root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
3546
3547         if (root->orphan_block_rsv)
3548                 btrfs_block_rsv_release(root, root->orphan_block_rsv,
3549                                         (u64)-1);
3550
3551         if (root->orphan_block_rsv ||
3552             test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) {
3553                 trans = btrfs_join_transaction(root);
3554                 if (!IS_ERR(trans))
3555                         btrfs_end_transaction(trans, root);
3556         }
3557
3558         if (nr_unlink)
3559                 btrfs_debug(root->fs_info, "unlinked %d orphans", nr_unlink);
3560         if (nr_truncate)
3561                 btrfs_debug(root->fs_info, "truncated %d orphans", nr_truncate);
3562
3563 out:
3564         if (ret)
3565                 btrfs_err(root->fs_info,
3566                         "could not do orphan cleanup %d", ret);
3567         btrfs_free_path(path);
3568         return ret;
3569 }
3570
3571 /*
3572  * very simple check to peek ahead in the leaf looking for xattrs.  If we
3573  * don't find any xattrs, we know there can't be any acls.
3574  *
3575  * slot is the slot the inode is in, objectid is the objectid of the inode
3576  */
3577 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
3578                                           int slot, u64 objectid,
3579                                           int *first_xattr_slot)
3580 {
3581         u32 nritems = btrfs_header_nritems(leaf);
3582         struct btrfs_key found_key;
3583         static u64 xattr_access = 0;
3584         static u64 xattr_default = 0;
3585         int scanned = 0;
3586
3587         if (!xattr_access) {
3588                 xattr_access = btrfs_name_hash(POSIX_ACL_XATTR_ACCESS,
3589                                         strlen(POSIX_ACL_XATTR_ACCESS));
3590                 xattr_default = btrfs_name_hash(POSIX_ACL_XATTR_DEFAULT,
3591                                         strlen(POSIX_ACL_XATTR_DEFAULT));
3592         }
3593
3594         slot++;
3595         *first_xattr_slot = -1;
3596         while (slot < nritems) {
3597                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3598
3599                 /* we found a different objectid, there must not be acls */
3600                 if (found_key.objectid != objectid)
3601                         return 0;
3602
3603                 /* we found an xattr, assume we've got an acl */
3604                 if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
3605                         if (*first_xattr_slot == -1)
3606                                 *first_xattr_slot = slot;
3607                         if (found_key.offset == xattr_access ||
3608                             found_key.offset == xattr_default)
3609                                 return 1;
3610                 }
3611
3612                 /*
3613                  * we found a key greater than an xattr key, there can't
3614                  * be any acls later on
3615                  */
3616                 if (found_key.type > BTRFS_XATTR_ITEM_KEY)
3617                         return 0;
3618
3619                 slot++;
3620                 scanned++;
3621
3622                 /*
3623                  * it goes inode, inode backrefs, xattrs, extents,
3624                  * so if there are a ton of hard links to an inode there can
3625                  * be a lot of backrefs.  Don't waste time searching too hard,
3626                  * this is just an optimization
3627                  */
3628                 if (scanned >= 8)
3629                         break;
3630         }
3631         /* we hit the end of the leaf before we found an xattr or
3632          * something larger than an xattr.  We have to assume the inode
3633          * has acls
3634          */
3635         if (*first_xattr_slot == -1)
3636                 *first_xattr_slot = slot;
3637         return 1;
3638 }
3639
3640 /*
3641  * read an inode from the btree into the in-memory inode
3642  */
3643 static void btrfs_read_locked_inode(struct inode *inode)
3644 {
3645         struct btrfs_path *path;
3646         struct extent_buffer *leaf;
3647         struct btrfs_inode_item *inode_item;
3648         struct btrfs_root *root = BTRFS_I(inode)->root;
3649         struct btrfs_key location;
3650         unsigned long ptr;
3651         int maybe_acls;
3652         u32 rdev;
3653         int ret;
3654         bool filled = false;
3655         int first_xattr_slot;
3656
3657         ret = btrfs_fill_inode(inode, &rdev);
3658         if (!ret)
3659                 filled = true;
3660
3661         path = btrfs_alloc_path();
3662         if (!path)
3663                 goto make_bad;
3664
3665         memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
3666
3667         ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
3668         if (ret)
3669                 goto make_bad;
3670
3671         leaf = path->nodes[0];
3672
3673         if (filled)
3674                 goto cache_index;
3675
3676         inode_item = btrfs_item_ptr(leaf, path->slots[0],
3677                                     struct btrfs_inode_item);
3678         inode->i_mode = btrfs_inode_mode(leaf, inode_item);
3679         set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
3680         i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
3681         i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
3682         btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
3683
3684         inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime);
3685         inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime);
3686
3687         inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime);
3688         inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime);
3689
3690         inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime);
3691         inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime);
3692
3693         BTRFS_I(inode)->i_otime.tv_sec =
3694                 btrfs_timespec_sec(leaf, &inode_item->otime);
3695         BTRFS_I(inode)->i_otime.tv_nsec =
3696                 btrfs_timespec_nsec(leaf, &inode_item->otime);
3697
3698         inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
3699         BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
3700         BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
3701
3702         inode->i_version = btrfs_inode_sequence(leaf, inode_item);
3703         inode->i_generation = BTRFS_I(inode)->generation;
3704         inode->i_rdev = 0;
3705         rdev = btrfs_inode_rdev(leaf, inode_item);
3706
3707         BTRFS_I(inode)->index_cnt = (u64)-1;
3708         BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
3709
3710 cache_index:
3711         /*
3712          * If we were modified in the current generation and evicted from memory
3713          * and then re-read we need to do a full sync since we don't have any
3714          * idea about which extents were modified before we were evicted from
3715          * cache.
3716          *
3717          * This is required for both inode re-read from disk and delayed inode
3718          * in delayed_nodes_tree.
3719          */
3720         if (BTRFS_I(inode)->last_trans == root->fs_info->generation)
3721                 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3722                         &BTRFS_I(inode)->runtime_flags);
3723
3724         /*
3725          * We don't persist the id of the transaction where an unlink operation
3726          * against the inode was last made. So here we assume the inode might
3727          * have been evicted, and therefore the exact value of last_unlink_trans
3728          * lost, and set it to last_trans to avoid metadata inconsistencies
3729          * between the inode and its parent if the inode is fsync'ed and the log
3730          * replayed. For example, in the scenario:
3731          *
3732          * touch mydir/foo
3733          * ln mydir/foo mydir/bar
3734          * sync
3735          * unlink mydir/bar
3736          * echo 2 > /proc/sys/vm/drop_caches   # evicts inode
3737          * xfs_io -c fsync mydir/foo
3738          * <power failure>
3739          * mount fs, triggers fsync log replay
3740          *
3741          * We must make sure that when we fsync our inode foo we also log its
3742          * parent inode, otherwise after log replay the parent still has the
3743          * dentry with the "bar" name but our inode foo has a link count of 1
3744          * and doesn't have an inode ref with the name "bar" anymore.
3745          *
3746          * Setting last_unlink_trans to last_trans is a pessimistic approach,
3747          * but it guarantees correctness at the expense of ocassional full
3748          * transaction commits on fsync if our inode is a directory, or if our
3749          * inode is not a directory, logging its parent unnecessarily.
3750          */
3751         BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
3752
3753         path->slots[0]++;
3754         if (inode->i_nlink != 1 ||
3755             path->slots[0] >= btrfs_header_nritems(leaf))
3756                 goto cache_acl;
3757
3758         btrfs_item_key_to_cpu(leaf, &location, path->slots[0]);
3759         if (location.objectid != btrfs_ino(inode))
3760                 goto cache_acl;
3761
3762         ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3763         if (location.type == BTRFS_INODE_REF_KEY) {
3764                 struct btrfs_inode_ref *ref;
3765
3766                 ref = (struct btrfs_inode_ref *)ptr;
3767                 BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref);
3768         } else if (location.type == BTRFS_INODE_EXTREF_KEY) {
3769                 struct btrfs_inode_extref *extref;
3770
3771                 extref = (struct btrfs_inode_extref *)ptr;
3772                 BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf,
3773                                                                      extref);
3774         }
3775 cache_acl:
3776         /*
3777          * try to precache a NULL acl entry for files that don't have
3778          * any xattrs or acls
3779          */
3780         maybe_acls = acls_after_inode_item(leaf, path->slots[0],
3781                                            btrfs_ino(inode), &first_xattr_slot);
3782         if (first_xattr_slot != -1) {
3783                 path->slots[0] = first_xattr_slot;
3784                 ret = btrfs_load_inode_props(inode, path);
3785                 if (ret)
3786                         btrfs_err(root->fs_info,
3787                                   "error loading props for ino %llu (root %llu): %d",
3788                                   btrfs_ino(inode),
3789                                   root->root_key.objectid, ret);
3790         }
3791         btrfs_free_path(path);
3792
3793         if (!maybe_acls)
3794                 cache_no_acl(inode);
3795
3796         switch (inode->i_mode & S_IFMT) {
3797         case S_IFREG:
3798                 inode->i_mapping->a_ops = &btrfs_aops;
3799                 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3800                 inode->i_fop = &btrfs_file_operations;
3801                 inode->i_op = &btrfs_file_inode_operations;
3802                 break;
3803         case S_IFDIR:
3804                 inode->i_fop = &btrfs_dir_file_operations;
3805                 if (root == root->fs_info->tree_root)
3806                         inode->i_op = &btrfs_dir_ro_inode_operations;
3807                 else
3808                         inode->i_op = &btrfs_dir_inode_operations;
3809                 break;
3810         case S_IFLNK:
3811                 inode->i_op = &btrfs_symlink_inode_operations;
3812                 inode->i_mapping->a_ops = &btrfs_symlink_aops;
3813                 break;
3814         default:
3815                 inode->i_op = &btrfs_special_inode_operations;
3816                 init_special_inode(inode, inode->i_mode, rdev);
3817                 break;
3818         }
3819
3820         btrfs_update_iflags(inode);
3821         return;
3822
3823 make_bad:
3824         btrfs_free_path(path);
3825         make_bad_inode(inode);
3826 }
3827
3828 /*
3829  * given a leaf and an inode, copy the inode fields into the leaf
3830  */
3831 static void fill_inode_item(struct btrfs_trans_handle *trans,
3832                             struct extent_buffer *leaf,
3833                             struct btrfs_inode_item *item,
3834                             struct inode *inode)
3835 {
3836         struct btrfs_map_token token;
3837
3838         btrfs_init_map_token(&token);
3839
3840         btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3841         btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3842         btrfs_set_token_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size,
3843                                    &token);
3844         btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3845         btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3846
3847         btrfs_set_token_timespec_sec(leaf, &item->atime,
3848                                      inode->i_atime.tv_sec, &token);
3849         btrfs_set_token_timespec_nsec(leaf, &item->atime,
3850                                       inode->i_atime.tv_nsec, &token);
3851
3852         btrfs_set_token_timespec_sec(leaf, &item->mtime,
3853                                      inode->i_mtime.tv_sec, &token);
3854         btrfs_set_token_timespec_nsec(leaf, &item->mtime,
3855                                       inode->i_mtime.tv_nsec, &token);
3856
3857         btrfs_set_token_timespec_sec(leaf, &item->ctime,
3858                                      inode->i_ctime.tv_sec, &token);
3859         btrfs_set_token_timespec_nsec(leaf, &item->ctime,
3860                                       inode->i_ctime.tv_nsec, &token);
3861
3862         btrfs_set_token_timespec_sec(leaf, &item->otime,
3863                                      BTRFS_I(inode)->i_otime.tv_sec, &token);
3864         btrfs_set_token_timespec_nsec(leaf, &item->otime,
3865                                       BTRFS_I(inode)->i_otime.tv_nsec, &token);
3866
3867         btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3868                                      &token);
3869         btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation,
3870                                          &token);
3871         btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
3872         btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3873         btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3874         btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3875         btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3876 }
3877
3878 /*
3879  * copy everything in the in-memory inode into the btree.
3880  */
3881 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
3882                                 struct btrfs_root *root, struct inode *inode)
3883 {
3884         struct btrfs_inode_item *inode_item;
3885         struct btrfs_path *path;
3886         struct extent_buffer *leaf;
3887         int ret;
3888
3889         path = btrfs_alloc_path();
3890         if (!path)
3891                 return -ENOMEM;
3892
3893         path->leave_spinning = 1;
3894         ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
3895                                  1);
3896         if (ret) {
3897                 if (ret > 0)
3898                         ret = -ENOENT;
3899                 goto failed;
3900         }
3901
3902         leaf = path->nodes[0];
3903         inode_item = btrfs_item_ptr(leaf, path->slots[0],
3904                                     struct btrfs_inode_item);
3905
3906         fill_inode_item(trans, leaf, inode_item, inode);
3907         btrfs_mark_buffer_dirty(leaf);
3908         btrfs_set_inode_last_trans(trans, inode);
3909         ret = 0;
3910 failed:
3911         btrfs_free_path(path);
3912         return ret;
3913 }
3914
3915 /*
3916  * copy everything in the in-memory inode into the btree.
3917  */
3918 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
3919                                 struct btrfs_root *root, struct inode *inode)
3920 {
3921         int ret;
3922
3923         /*
3924          * If the inode is a free space inode, we can deadlock during commit
3925          * if we put it into the delayed code.
3926          *
3927          * The data relocation inode should also be directly updated
3928          * without delay
3929          */
3930         if (!btrfs_is_free_space_inode(inode)
3931             && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
3932             && !root->fs_info->log_root_recovering) {
3933                 btrfs_update_root_times(trans, root);
3934
3935                 ret = btrfs_delayed_update_inode(trans, root, inode);
3936                 if (!ret)
3937                         btrfs_set_inode_last_trans(trans, inode);
3938                 return ret;
3939         }
3940
3941         return btrfs_update_inode_item(trans, root, inode);
3942 }
3943
3944 noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
3945                                          struct btrfs_root *root,
3946                                          struct inode *inode)
3947 {
3948         int ret;
3949
3950         ret = btrfs_update_inode(trans, root, inode);
3951         if (ret == -ENOSPC)
3952                 return btrfs_update_inode_item(trans, root, inode);
3953         return ret;
3954 }
3955
3956 /*
3957  * unlink helper that gets used here in inode.c and in the tree logging
3958  * recovery code.  It remove a link in a directory with a given name, and
3959  * also drops the back refs in the inode to the directory
3960  */
3961 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
3962                                 struct btrfs_root *root,
3963                                 struct inode *dir, struct inode *inode,
3964                                 const char *name, int name_len)
3965 {
3966         struct btrfs_path *path;
3967         int ret = 0;
3968         struct extent_buffer *leaf;
3969         struct btrfs_dir_item *di;
3970         struct btrfs_key key;
3971         u64 index;
3972         u64 ino = btrfs_ino(inode);
3973         u64 dir_ino = btrfs_ino(dir);
3974
3975         path = btrfs_alloc_path();
3976         if (!path) {
3977                 ret = -ENOMEM;
3978                 goto out;
3979         }
3980
3981         path->leave_spinning = 1;
3982         di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
3983                                     name, name_len, -1);
3984         if (IS_ERR(di)) {
3985                 ret = PTR_ERR(di);
3986                 goto err;
3987         }
3988         if (!di) {
3989                 ret = -ENOENT;
3990                 goto err;
3991         }
3992         leaf = path->nodes[0];
3993         btrfs_dir_item_key_to_cpu(leaf, di, &key);
3994         ret = btrfs_delete_one_dir_name(trans, root, path, di);
3995         if (ret)
3996                 goto err;
3997         btrfs_release_path(path);
3998
3999         /*
4000          * If we don't have dir index, we have to get it by looking up
4001          * the inode ref, since we get the inode ref, remove it directly,
4002          * it is unnecessary to do delayed deletion.
4003          *
4004          * But if we have dir index, needn't search inode ref to get it.
4005          * Since the inode ref is close to the inode item, it is better
4006          * that we delay to delete it, and just do this deletion when
4007          * we update the inode item.
4008          */
4009         if (BTRFS_I(inode)->dir_index) {
4010                 ret = btrfs_delayed_delete_inode_ref(inode);
4011                 if (!ret) {
4012                         index = BTRFS_I(inode)->dir_index;
4013                         goto skip_backref;
4014                 }
4015         }
4016
4017         ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
4018                                   dir_ino, &index);
4019         if (ret) {
4020                 btrfs_info(root->fs_info,
4021                         "failed to delete reference to %.*s, inode %llu parent %llu",
4022                         name_len, name, ino, dir_ino);
4023                 btrfs_abort_transaction(trans, root, ret);
4024                 goto err;
4025         }
4026 skip_backref:
4027         ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
4028         if (ret) {
4029                 btrfs_abort_transaction(trans, root, ret);
4030                 goto err;
4031         }
4032
4033         ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
4034                                          inode, dir_ino);
4035         if (ret != 0 && ret != -ENOENT) {
4036                 btrfs_abort_transaction(trans, root, ret);
4037                 goto err;
4038         }
4039
4040         ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
4041                                            dir, index);
4042         if (ret == -ENOENT)
4043                 ret = 0;
4044         else if (ret)
4045                 btrfs_abort_transaction(trans, root, ret);
4046 err:
4047         btrfs_free_path(path);
4048         if (ret)
4049                 goto out;
4050
4051         btrfs_i_size_write(dir, dir->i_size - name_len * 2);
4052         inode_inc_iversion(inode);
4053         inode_inc_iversion(dir);
4054         inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
4055         ret = btrfs_update_inode(trans, root, dir);
4056 out:
4057         return ret;
4058 }
4059
4060 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4061                        struct btrfs_root *root,
4062                        struct inode *dir, struct inode *inode,
4063                        const char *name, int name_len)
4064 {
4065         int ret;
4066         ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
4067         if (!ret) {
4068                 drop_nlink(inode);
4069                 ret = btrfs_update_inode(trans, root, inode);
4070         }
4071         return ret;
4072 }
4073
4074 /*
4075  * helper to start transaction for unlink and rmdir.
4076  *
4077  * unlink and rmdir are special in btrfs, they do not always free space, so
4078  * if we cannot make our reservations the normal way try and see if there is
4079  * plenty of slack room in the global reserve to migrate, otherwise we cannot
4080  * allow the unlink to occur.
4081  */
4082 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir)
4083 {
4084         struct btrfs_root *root = BTRFS_I(dir)->root;
4085
4086         /*
4087          * 1 for the possible orphan item
4088          * 1 for the dir item
4089          * 1 for the dir index
4090          * 1 for the inode ref
4091          * 1 for the inode
4092          */
4093         return btrfs_start_transaction_fallback_global_rsv(root, 5, 5);
4094 }
4095
4096 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
4097 {
4098         struct btrfs_root *root = BTRFS_I(dir)->root;
4099         struct btrfs_trans_handle *trans;
4100         struct inode *inode = d_inode(dentry);
4101         int ret;
4102
4103         trans = __unlink_start_trans(dir);
4104         if (IS_ERR(trans))
4105                 return PTR_ERR(trans);
4106
4107         btrfs_record_unlink_dir(trans, dir, d_inode(dentry), 0);
4108
4109         ret = btrfs_unlink_inode(trans, root, dir, d_inode(dentry),
4110                                  dentry->d_name.name, dentry->d_name.len);
4111         if (ret)
4112                 goto out;
4113
4114         if (inode->i_nlink == 0) {
4115                 ret = btrfs_orphan_add(trans, inode);
4116                 if (ret)
4117                         goto out;
4118         }
4119
4120 out:
4121         btrfs_end_transaction(trans, root);
4122         btrfs_btree_balance_dirty(root);
4123         return ret;
4124 }
4125
4126 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
4127                         struct btrfs_root *root,
4128                         struct inode *dir, u64 objectid,
4129                         const char *name, int name_len)
4130 {
4131         struct btrfs_path *path;
4132         struct extent_buffer *leaf;
4133         struct btrfs_dir_item *di;
4134         struct btrfs_key key;
4135         u64 index;
4136         int ret;
4137         u64 dir_ino = btrfs_ino(dir);
4138
4139         path = btrfs_alloc_path();
4140         if (!path)
4141                 return -ENOMEM;
4142
4143         di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4144                                    name, name_len, -1);
4145         if (IS_ERR_OR_NULL(di)) {
4146                 if (!di)
4147                         ret = -ENOENT;
4148                 else
4149                         ret = PTR_ERR(di);
4150                 goto out;
4151         }
4152
4153         leaf = path->nodes[0];
4154         btrfs_dir_item_key_to_cpu(leaf, di, &key);
4155         WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
4156         ret = btrfs_delete_one_dir_name(trans, root, path, di);
4157         if (ret) {
4158                 btrfs_abort_transaction(trans, root, ret);
4159                 goto out;
4160         }
4161         btrfs_release_path(path);
4162
4163         ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
4164                                  objectid, root->root_key.objectid,
4165                                  dir_ino, &index, name, name_len);
4166         if (ret < 0) {
4167                 if (ret != -ENOENT) {
4168                         btrfs_abort_transaction(trans, root, ret);
4169                         goto out;
4170                 }
4171                 di = btrfs_search_dir_index_item(root, path, dir_ino,
4172                                                  name, name_len);
4173                 if (IS_ERR_OR_NULL(di)) {
4174                         if (!di)
4175                                 ret = -ENOENT;
4176                         else
4177                                 ret = PTR_ERR(di);
4178                         btrfs_abort_transaction(trans, root, ret);
4179                         goto out;
4180                 }
4181
4182                 leaf = path->nodes[0];
4183                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4184                 btrfs_release_path(path);
4185                 index = key.offset;
4186         }
4187         btrfs_release_path(path);
4188
4189         ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
4190         if (ret) {
4191                 btrfs_abort_transaction(trans, root, ret);
4192                 goto out;
4193         }
4194
4195         btrfs_i_size_write(dir, dir->i_size - name_len * 2);
4196         inode_inc_iversion(dir);
4197         dir->i_mtime = dir->i_ctime = CURRENT_TIME;
4198         ret = btrfs_update_inode_fallback(trans, root, dir);
4199         if (ret)
4200                 btrfs_abort_transaction(trans, root, ret);
4201 out:
4202         btrfs_free_path(path);
4203         return ret;
4204 }
4205
4206 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
4207 {
4208         struct inode *inode = d_inode(dentry);
4209         int err = 0;
4210         struct btrfs_root *root = BTRFS_I(dir)->root;
4211         struct btrfs_trans_handle *trans;
4212
4213         if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
4214                 return -ENOTEMPTY;
4215         if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
4216                 return -EPERM;
4217
4218         trans = __unlink_start_trans(dir);
4219         if (IS_ERR(trans))
4220                 return PTR_ERR(trans);
4221
4222         if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
4223                 err = btrfs_unlink_subvol(trans, root, dir,
4224                                           BTRFS_I(inode)->location.objectid,
4225                                           dentry->d_name.name,
4226                                           dentry->d_name.len);
4227                 goto out;
4228         }
4229
4230         err = btrfs_orphan_add(trans, inode);
4231         if (err)
4232                 goto out;
4233
4234         /* now the directory is empty */
4235         err = btrfs_unlink_inode(trans, root, dir, d_inode(dentry),
4236                                  dentry->d_name.name, dentry->d_name.len);
4237         if (!err)
4238                 btrfs_i_size_write(inode, 0);
4239 out:
4240         btrfs_end_transaction(trans, root);
4241         btrfs_btree_balance_dirty(root);
4242
4243         return err;
4244 }
4245
4246 static int truncate_space_check(struct btrfs_trans_handle *trans,
4247                                 struct btrfs_root *root,
4248                                 u64 bytes_deleted)
4249 {
4250         int ret;
4251
4252         bytes_deleted = btrfs_csum_bytes_to_leaves(root, bytes_deleted);
4253         ret = btrfs_block_rsv_add(root, &root->fs_info->trans_block_rsv,
4254                                   bytes_deleted, BTRFS_RESERVE_NO_FLUSH);
4255         if (!ret)
4256                 trans->bytes_reserved += bytes_deleted;
4257         return ret;
4258
4259 }
4260
4261 static int truncate_inline_extent(struct inode *inode,
4262                                   struct btrfs_path *path,
4263                                   struct btrfs_key *found_key,
4264                                   const u64 item_end,
4265                                   const u64 new_size)
4266 {
4267         struct extent_buffer *leaf = path->nodes[0];
4268         int slot = path->slots[0];
4269         struct btrfs_file_extent_item *fi;
4270         u32 size = (u32)(new_size - found_key->offset);
4271         struct btrfs_root *root = BTRFS_I(inode)->root;
4272
4273         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
4274
4275         if (btrfs_file_extent_compression(leaf, fi) != BTRFS_COMPRESS_NONE) {
4276                 loff_t offset = new_size;
4277                 loff_t page_end = ALIGN(offset, PAGE_CACHE_SIZE);
4278
4279                 /*
4280                  * Zero out the remaining of the last page of our inline extent,
4281                  * instead of directly truncating our inline extent here - that
4282                  * would be much more complex (decompressing all the data, then
4283                  * compressing the truncated data, which might be bigger than
4284                  * the size of the inline extent, resize the extent, etc).
4285                  * We release the path because to get the page we might need to
4286                  * read the extent item from disk (data not in the page cache).
4287                  */
4288                 btrfs_release_path(path);
4289                 return btrfs_truncate_page(inode, offset, page_end - offset, 0);
4290         }
4291
4292         btrfs_set_file_extent_ram_bytes(leaf, fi, size);
4293         size = btrfs_file_extent_calc_inline_size(size);
4294         btrfs_truncate_item(root, path, size, 1);
4295
4296         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4297                 inode_sub_bytes(inode, item_end + 1 - new_size);
4298
4299         return 0;
4300 }
4301
4302 /*
4303  * this can truncate away extent items, csum items and directory items.
4304  * It starts at a high offset and removes keys until it can't find
4305  * any higher than new_size
4306  *
4307  * csum items that cross the new i_size are truncated to the new size
4308  * as well.
4309  *
4310  * min_type is the minimum key type to truncate down to.  If set to 0, this
4311  * will kill all the items on this inode, including the INODE_ITEM_KEY.
4312  */
4313 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
4314                                struct btrfs_root *root,
4315                                struct inode *inode,
4316                                u64 new_size, u32 min_type)
4317 {
4318         struct btrfs_path *path;
4319         struct extent_buffer *leaf;
4320         struct btrfs_file_extent_item *fi;
4321         struct btrfs_key key;
4322         struct btrfs_key found_key;
4323         u64 extent_start = 0;
4324         u64 extent_num_bytes = 0;
4325         u64 extent_offset = 0;
4326         u64 item_end = 0;
4327         u64 last_size = new_size;
4328         u32 found_type = (u8)-1;
4329         int found_extent;
4330         int del_item;
4331         int pending_del_nr = 0;
4332         int pending_del_slot = 0;
4333         int extent_type = -1;
4334         int ret;
4335         int err = 0;
4336         u64 ino = btrfs_ino(inode);
4337         u64 bytes_deleted = 0;
4338         bool be_nice = 0;
4339         bool should_throttle = 0;
4340         bool should_end = 0;
4341
4342         BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
4343
4344         /*
4345          * for non-free space inodes and ref cows, we want to back off from
4346          * time to time
4347          */
4348         if (!btrfs_is_free_space_inode(inode) &&
4349             test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4350                 be_nice = 1;
4351
4352         path = btrfs_alloc_path();
4353         if (!path)
4354                 return -ENOMEM;
4355         path->reada = -1;
4356
4357         /*
4358          * We want to drop from the next block forward in case this new size is
4359          * not block aligned since we will be keeping the last block of the
4360          * extent just the way it is.
4361          */
4362         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
4363             root == root->fs_info->tree_root)
4364                 btrfs_drop_extent_cache(inode, ALIGN(new_size,
4365                                         root->sectorsize), (u64)-1, 0);
4366
4367         /*
4368          * This function is also used to drop the items in the log tree before
4369          * we relog the inode, so if root != BTRFS_I(inode)->root, it means
4370          * it is used to drop the loged items. So we shouldn't kill the delayed
4371          * items.
4372          */
4373         if (min_type == 0 && root == BTRFS_I(inode)->root)
4374                 btrfs_kill_delayed_inode_items(inode);
4375
4376         key.objectid = ino;
4377         key.offset = (u64)-1;
4378         key.type = (u8)-1;
4379
4380 search_again:
4381         /*
4382          * with a 16K leaf size and 128MB extents, you can actually queue
4383          * up a huge file in a single leaf.  Most of the time that
4384          * bytes_deleted is > 0, it will be huge by the time we get here
4385          */
4386         if (be_nice && bytes_deleted > 32 * 1024 * 1024) {
4387                 if (btrfs_should_end_transaction(trans, root)) {
4388                         err = -EAGAIN;
4389                         goto error;
4390                 }
4391         }
4392
4393
4394         path->leave_spinning = 1;
4395         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
4396         if (ret < 0) {
4397                 err = ret;
4398                 goto out;
4399         }
4400
4401         if (ret > 0) {
4402                 /* there are no items in the tree for us to truncate, we're
4403                  * done
4404                  */
4405                 if (path->slots[0] == 0)
4406                         goto out;
4407                 path->slots[0]--;
4408         }
4409
4410         while (1) {
4411                 fi = NULL;
4412                 leaf = path->nodes[0];
4413                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4414                 found_type = found_key.type;
4415
4416                 if (found_key.objectid != ino)
4417                         break;
4418
4419                 if (found_type < min_type)
4420                         break;
4421
4422                 item_end = found_key.offset;
4423                 if (found_type == BTRFS_EXTENT_DATA_KEY) {
4424                         fi = btrfs_item_ptr(leaf, path->slots[0],
4425                                             struct btrfs_file_extent_item);
4426                         extent_type = btrfs_file_extent_type(leaf, fi);
4427                         if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4428                                 item_end +=
4429                                     btrfs_file_extent_num_bytes(leaf, fi);
4430                         } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4431                                 item_end += btrfs_file_extent_inline_len(leaf,
4432                                                          path->slots[0], fi);
4433                         }
4434                         item_end--;
4435                 }
4436                 if (found_type > min_type) {
4437                         del_item = 1;
4438                 } else {
4439                         if (item_end < new_size) {
4440                                 /*
4441                                  * With NO_HOLES mode, for the following mapping
4442                                  *
4443                                  * [0-4k][hole][8k-12k]
4444                                  *
4445                                  * if truncating isize down to 6k, it ends up
4446                                  * isize being 8k.
4447                                  */
4448                                 if (btrfs_fs_incompat(root->fs_info, NO_HOLES))
4449                                         last_size = new_size;
4450                                 break;
4451                         }
4452                         if (found_key.offset >= new_size)
4453                                 del_item = 1;
4454                         else
4455                                 del_item = 0;
4456                 }
4457                 found_extent = 0;
4458                 /* FIXME, shrink the extent if the ref count is only 1 */
4459                 if (found_type != BTRFS_EXTENT_DATA_KEY)
4460                         goto delete;
4461
4462                 if (del_item)
4463                         last_size = found_key.offset;
4464                 else
4465                         last_size = new_size;
4466
4467                 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4468                         u64 num_dec;
4469                         extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
4470                         if (!del_item) {
4471                                 u64 orig_num_bytes =
4472                                         btrfs_file_extent_num_bytes(leaf, fi);
4473                                 extent_num_bytes = ALIGN(new_size -
4474                                                 found_key.offset,
4475                                                 root->sectorsize);
4476                                 btrfs_set_file_extent_num_bytes(leaf, fi,
4477                                                          extent_num_bytes);
4478                                 num_dec = (orig_num_bytes -
4479                                            extent_num_bytes);
4480                                 if (test_bit(BTRFS_ROOT_REF_COWS,
4481                                              &root->state) &&
4482                                     extent_start != 0)
4483                                         inode_sub_bytes(inode, num_dec);
4484                                 btrfs_mark_buffer_dirty(leaf);
4485                         } else {
4486                                 extent_num_bytes =
4487                                         btrfs_file_extent_disk_num_bytes(leaf,
4488                                                                          fi);
4489                                 extent_offset = found_key.offset -
4490                                         btrfs_file_extent_offset(leaf, fi);
4491
4492                                 /* FIXME blocksize != 4096 */
4493                                 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
4494                                 if (extent_start != 0) {
4495                                         found_extent = 1;
4496                                         if (test_bit(BTRFS_ROOT_REF_COWS,
4497                                                      &root->state))
4498                                                 inode_sub_bytes(inode, num_dec);
4499                                 }
4500                         }
4501                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4502                         /*
4503                          * we can't truncate inline items that have had
4504                          * special encodings
4505                          */
4506                         if (!del_item &&
4507                             btrfs_file_extent_encryption(leaf, fi) == 0 &&
4508                             btrfs_file_extent_other_encoding(leaf, fi) == 0) {
4509
4510                                 /*
4511                                  * Need to release path in order to truncate a
4512                                  * compressed extent. So delete any accumulated
4513                                  * extent items so far.
4514                                  */
4515                                 if (btrfs_file_extent_compression(leaf, fi) !=
4516                                     BTRFS_COMPRESS_NONE && pending_del_nr) {
4517                                         err = btrfs_del_items(trans, root, path,
4518                                                               pending_del_slot,
4519                                                               pending_del_nr);
4520                                         if (err) {
4521                                                 btrfs_abort_transaction(trans,
4522                                                                         root,
4523                                                                         err);
4524                                                 goto error;
4525                                         }
4526                                         pending_del_nr = 0;
4527                                 }
4528
4529                                 err = truncate_inline_extent(inode, path,
4530                                                              &found_key,
4531                                                              item_end,
4532                                                              new_size);
4533                                 if (err) {
4534                                         btrfs_abort_transaction(trans,
4535                                                                 root, err);
4536                                         goto error;
4537                                 }
4538                         } else if (test_bit(BTRFS_ROOT_REF_COWS,
4539                                             &root->state)) {
4540                                 inode_sub_bytes(inode, item_end + 1 - new_size);
4541                         }
4542                 }
4543 delete:
4544                 if (del_item) {
4545                         if (!pending_del_nr) {
4546                                 /* no pending yet, add ourselves */
4547                                 pending_del_slot = path->slots[0];
4548                                 pending_del_nr = 1;
4549                         } else if (pending_del_nr &&
4550                                    path->slots[0] + 1 == pending_del_slot) {
4551                                 /* hop on the pending chunk */
4552                                 pending_del_nr++;
4553                                 pending_del_slot = path->slots[0];
4554                         } else {
4555                                 BUG();
4556                         }
4557                 } else {
4558                         break;
4559                 }
4560                 should_throttle = 0;
4561
4562                 if (found_extent &&
4563                     (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
4564                      root == root->fs_info->tree_root)) {
4565                         btrfs_set_path_blocking(path);
4566                         bytes_deleted += extent_num_bytes;
4567                         ret = btrfs_free_extent(trans, root, extent_start,
4568                                                 extent_num_bytes, 0,
4569                                                 btrfs_header_owner(leaf),
4570                                                 ino, extent_offset);
4571                         BUG_ON(ret);
4572                         if (btrfs_should_throttle_delayed_refs(trans, root))
4573                                 btrfs_async_run_delayed_refs(root,
4574                                         trans->delayed_ref_updates * 2, 0);
4575                         if (be_nice) {
4576                                 if (truncate_space_check(trans, root,
4577                                                          extent_num_bytes)) {
4578                                         should_end = 1;
4579                                 }
4580                                 if (btrfs_should_throttle_delayed_refs(trans,
4581                                                                        root)) {
4582                                         should_throttle = 1;
4583                                 }
4584                         }
4585                 }
4586
4587                 if (found_type == BTRFS_INODE_ITEM_KEY)
4588                         break;
4589
4590                 if (path->slots[0] == 0 ||
4591                     path->slots[0] != pending_del_slot ||
4592                     should_throttle || should_end) {
4593                         if (pending_del_nr) {
4594                                 ret = btrfs_del_items(trans, root, path,
4595                                                 pending_del_slot,
4596                                                 pending_del_nr);
4597                                 if (ret) {
4598                                         btrfs_abort_transaction(trans,
4599                                                                 root, ret);
4600                                         goto error;
4601                                 }
4602                                 pending_del_nr = 0;
4603                         }
4604                         btrfs_release_path(path);
4605                         if (should_throttle) {
4606                                 unsigned long updates = trans->delayed_ref_updates;
4607                                 if (updates) {
4608                                         trans->delayed_ref_updates = 0;
4609                                         ret = btrfs_run_delayed_refs(trans, root, updates * 2);
4610                                         if (ret && !err)
4611                                                 err = ret;
4612                                 }
4613                         }
4614                         /*
4615                          * if we failed to refill our space rsv, bail out
4616                          * and let the transaction restart
4617                          */
4618                         if (should_end) {
4619                                 err = -EAGAIN;
4620                                 goto error;
4621                         }
4622                         goto search_again;
4623                 } else {
4624                         path->slots[0]--;
4625                 }
4626         }
4627 out:
4628         if (pending_del_nr) {
4629                 ret = btrfs_del_items(trans, root, path, pending_del_slot,
4630                                       pending_del_nr);
4631                 if (ret)
4632                         btrfs_abort_transaction(trans, root, ret);
4633         }
4634 error:
4635         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
4636                 btrfs_ordered_update_i_size(inode, last_size, NULL);
4637
4638         btrfs_free_path(path);
4639
4640         if (be_nice && bytes_deleted > 32 * 1024 * 1024) {
4641                 unsigned long updates = trans->delayed_ref_updates;
4642                 if (updates) {
4643                         trans->delayed_ref_updates = 0;
4644                         ret = btrfs_run_delayed_refs(trans, root, updates * 2);
4645                         if (ret && !err)
4646                                 err = ret;
4647                 }
4648         }
4649         return err;
4650 }
4651
4652 /*
4653  * btrfs_truncate_page - read, zero a chunk and write a page
4654  * @inode - inode that we're zeroing
4655  * @from - the offset to start zeroing
4656  * @len - the length to zero, 0 to zero the entire range respective to the
4657  *      offset
4658  * @front - zero up to the offset instead of from the offset on
4659  *
4660  * This will find the page for the "from" offset and cow the page and zero the
4661  * part we want to zero.  This is used with truncate and hole punching.
4662  */
4663 int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len,
4664                         int front)
4665 {
4666         struct address_space *mapping = inode->i_mapping;
4667         struct btrfs_root *root = BTRFS_I(inode)->root;
4668         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4669         struct btrfs_ordered_extent *ordered;
4670         struct extent_state *cached_state = NULL;
4671         char *kaddr;
4672         u32 blocksize = root->sectorsize;
4673         pgoff_t index = from >> PAGE_CACHE_SHIFT;
4674         unsigned offset = from & (PAGE_CACHE_SIZE-1);
4675         struct page *page;
4676         gfp_t mask = btrfs_alloc_write_mask(mapping);
4677         int ret = 0;
4678         u64 page_start;
4679         u64 page_end;
4680
4681         if ((offset & (blocksize - 1)) == 0 &&
4682             (!len || ((len & (blocksize - 1)) == 0)))
4683                 goto out;
4684         ret = btrfs_delalloc_reserve_space(inode,
4685                         round_down(from, PAGE_CACHE_SIZE), PAGE_CACHE_SIZE);
4686         if (ret)
4687                 goto out;
4688
4689 again:
4690         page = find_or_create_page(mapping, index, mask);
4691         if (!page) {
4692                 btrfs_delalloc_release_space(inode,
4693                                 round_down(from, PAGE_CACHE_SIZE),
4694                                 PAGE_CACHE_SIZE);
4695                 ret = -ENOMEM;
4696                 goto out;
4697         }
4698
4699         page_start = page_offset(page);
4700         page_end = page_start + PAGE_CACHE_SIZE - 1;
4701
4702         if (!PageUptodate(page)) {
4703                 ret = btrfs_readpage(NULL, page);
4704                 lock_page(page);
4705                 if (page->mapping != mapping) {
4706                         unlock_page(page);
4707                         page_cache_release(page);
4708                         goto again;
4709                 }
4710                 if (!PageUptodate(page)) {
4711                         ret = -EIO;
4712                         goto out_unlock;
4713                 }
4714         }
4715         wait_on_page_writeback(page);
4716
4717         lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
4718         set_page_extent_mapped(page);
4719
4720         ordered = btrfs_lookup_ordered_extent(inode, page_start);
4721         if (ordered) {
4722                 unlock_extent_cached(io_tree, page_start, page_end,
4723                                      &cached_state, GFP_NOFS);
4724                 unlock_page(page);
4725                 page_cache_release(page);
4726                 btrfs_start_ordered_extent(inode, ordered, 1);
4727                 btrfs_put_ordered_extent(ordered);
4728                 goto again;
4729         }
4730
4731         clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
4732                           EXTENT_DIRTY | EXTENT_DELALLOC |
4733                           EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
4734                           0, 0, &cached_state, GFP_NOFS);
4735
4736         ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
4737                                         &cached_state);
4738         if (ret) {
4739                 unlock_extent_cached(io_tree, page_start, page_end,
4740                                      &cached_state, GFP_NOFS);
4741                 goto out_unlock;
4742         }
4743
4744         if (offset != PAGE_CACHE_SIZE) {
4745                 if (!len)
4746                         len = PAGE_CACHE_SIZE - offset;
4747                 kaddr = kmap(page);
4748                 if (front)
4749                         memset(kaddr, 0, offset);
4750                 else
4751                         memset(kaddr + offset, 0, len);
4752                 flush_dcache_page(page);
4753                 kunmap(page);
4754         }
4755         ClearPageChecked(page);
4756         set_page_dirty(page);
4757         unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
4758                              GFP_NOFS);
4759
4760 out_unlock:
4761         if (ret)
4762                 btrfs_delalloc_release_space(inode, page_start,
4763                                              PAGE_CACHE_SIZE);
4764         unlock_page(page);
4765         page_cache_release(page);
4766 out:
4767         return ret;
4768 }
4769
4770 static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
4771                              u64 offset, u64 len)
4772 {
4773         struct btrfs_trans_handle *trans;
4774         int ret;
4775
4776         /*
4777          * Still need to make sure the inode looks like it's been updated so
4778          * that any holes get logged if we fsync.
4779          */
4780         if (btrfs_fs_incompat(root->fs_info, NO_HOLES)) {
4781                 BTRFS_I(inode)->last_trans = root->fs_info->generation;
4782                 BTRFS_I(inode)->last_sub_trans = root->log_transid;
4783                 BTRFS_I(inode)->last_log_commit = root->last_log_commit;
4784                 return 0;
4785         }
4786
4787         /*
4788          * 1 - for the one we're dropping
4789          * 1 - for the one we're adding
4790          * 1 - for updating the inode.
4791          */
4792         trans = btrfs_start_transaction(root, 3);
4793         if (IS_ERR(trans))
4794                 return PTR_ERR(trans);
4795
4796         ret = btrfs_drop_extents(trans, root, inode, offset, offset + len, 1);
4797         if (ret) {
4798                 btrfs_abort_transaction(trans, root, ret);
4799                 btrfs_end_transaction(trans, root);
4800                 return ret;
4801         }
4802
4803         ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset,
4804                                        0, 0, len, 0, len, 0, 0, 0);
4805         if (ret)
4806                 btrfs_abort_transaction(trans, root, ret);
4807         else
4808                 btrfs_update_inode(trans, root, inode);
4809         btrfs_end_transaction(trans, root);
4810         return ret;
4811 }
4812
4813 /*
4814  * This function puts in dummy file extents for the area we're creating a hole
4815  * for.  So if we are truncating this file to a larger size we need to insert
4816  * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
4817  * the range between oldsize and size
4818  */
4819 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
4820 {
4821         struct btrfs_root *root = BTRFS_I(inode)->root;
4822         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4823         struct extent_map *em = NULL;
4824         struct extent_state *cached_state = NULL;
4825         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4826         u64 hole_start = ALIGN(oldsize, root->sectorsize);
4827         u64 block_end = ALIGN(size, root->sectorsize);
4828         u64 last_byte;
4829         u64 cur_offset;
4830         u64 hole_size;
4831         int err = 0;
4832
4833         /*
4834          * If our size started in the middle of a page we need to zero out the
4835          * rest of the page before we expand the i_size, otherwise we could
4836          * expose stale data.
4837          */
4838         err = btrfs_truncate_page(inode, oldsize, 0, 0);
4839         if (err)
4840                 return err;
4841
4842         if (size <= hole_start)
4843                 return 0;
4844
4845         while (1) {
4846                 struct btrfs_ordered_extent *ordered;
4847
4848                 lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
4849                                  &cached_state);
4850                 ordered = btrfs_lookup_ordered_range(inode, hole_start,
4851                                                      block_end - hole_start);
4852                 if (!ordered)
4853                         break;
4854                 unlock_extent_cached(io_tree, hole_start, block_end - 1,
4855                                      &cached_state, GFP_NOFS);
4856                 btrfs_start_ordered_extent(inode, ordered, 1);
4857                 btrfs_put_ordered_extent(ordered);
4858         }
4859
4860         cur_offset = hole_start;
4861         while (1) {
4862                 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
4863                                 block_end - cur_offset, 0);
4864                 if (IS_ERR(em)) {
4865                         err = PTR_ERR(em);
4866                         em = NULL;
4867                         break;
4868                 }
4869                 last_byte = min(extent_map_end(em), block_end);
4870                 last_byte = ALIGN(last_byte , root->sectorsize);
4871                 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
4872                         struct extent_map *hole_em;
4873                         hole_size = last_byte - cur_offset;
4874
4875                         err = maybe_insert_hole(root, inode, cur_offset,
4876                                                 hole_size);
4877                         if (err)
4878                                 break;
4879                         btrfs_drop_extent_cache(inode, cur_offset,
4880                                                 cur_offset + hole_size - 1, 0);
4881                         hole_em = alloc_extent_map();
4882                         if (!hole_em) {
4883                                 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4884                                         &BTRFS_I(inode)->runtime_flags);
4885                                 goto next;
4886                         }
4887                         hole_em->start = cur_offset;
4888                         hole_em->len = hole_size;
4889                         hole_em->orig_start = cur_offset;
4890
4891                         hole_em->block_start = EXTENT_MAP_HOLE;
4892                         hole_em->block_len = 0;
4893                         hole_em->orig_block_len = 0;
4894                         hole_em->ram_bytes = hole_size;
4895                         hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
4896                         hole_em->compress_type = BTRFS_COMPRESS_NONE;
4897                         hole_em->generation = root->fs_info->generation;
4898
4899                         while (1) {
4900                                 write_lock(&em_tree->lock);
4901                                 err = add_extent_mapping(em_tree, hole_em, 1);
4902                                 write_unlock(&em_tree->lock);
4903                                 if (err != -EEXIST)
4904                                         break;
4905                                 btrfs_drop_extent_cache(inode, cur_offset,
4906                                                         cur_offset +
4907                                                         hole_size - 1, 0);
4908                         }
4909                         free_extent_map(hole_em);
4910                 }
4911 next:
4912                 free_extent_map(em);
4913                 em = NULL;
4914                 cur_offset = last_byte;
4915                 if (cur_offset >= block_end)
4916                         break;
4917         }
4918         free_extent_map(em);
4919         unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
4920                              GFP_NOFS);
4921         return err;
4922 }
4923
4924 static int wait_snapshoting_atomic_t(atomic_t *a)
4925 {
4926         schedule();
4927         return 0;
4928 }
4929
4930 static void wait_for_snapshot_creation(struct btrfs_root *root)
4931 {
4932         while (true) {
4933                 int ret;
4934
4935                 ret = btrfs_start_write_no_snapshoting(root);
4936                 if (ret)
4937                         break;
4938                 wait_on_atomic_t(&root->will_be_snapshoted,
4939                                  wait_snapshoting_atomic_t,
4940                                  TASK_UNINTERRUPTIBLE);
4941         }
4942 }
4943
4944 static int btrfs_setsize(struct inode *inode, struct iattr *attr)
4945 {
4946         struct btrfs_root *root = BTRFS_I(inode)->root;
4947         struct btrfs_trans_handle *trans;
4948         loff_t oldsize = i_size_read(inode);
4949         loff_t newsize = attr->ia_size;
4950         int mask = attr->ia_valid;
4951         int ret;
4952
4953         /*
4954          * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
4955          * special case where we need to update the times despite not having
4956          * these flags set.  For all other operations the VFS set these flags
4957          * explicitly if it wants a timestamp update.
4958          */
4959         if (newsize != oldsize) {
4960                 inode_inc_iversion(inode);
4961                 if (!(mask & (ATTR_CTIME | ATTR_MTIME)))
4962                         inode->i_ctime = inode->i_mtime =
4963                                 current_fs_time(inode->i_sb);
4964         }
4965
4966         if (newsize > oldsize) {
4967                 truncate_pagecache(inode, newsize);
4968                 /*
4969                  * Don't do an expanding truncate while snapshoting is ongoing.
4970                  * This is to ensure the snapshot captures a fully consistent
4971                  * state of this file - if the snapshot captures this expanding
4972                  * truncation, it must capture all writes that happened before
4973                  * this truncation.
4974                  */
4975                 wait_for_snapshot_creation(root);
4976                 ret = btrfs_cont_expand(inode, oldsize, newsize);
4977                 if (ret) {
4978                         btrfs_end_write_no_snapshoting(root);
4979                         return ret;
4980                 }
4981
4982                 trans = btrfs_start_transaction(root, 1);
4983                 if (IS_ERR(trans)) {
4984                         btrfs_end_write_no_snapshoting(root);
4985                         return PTR_ERR(trans);
4986                 }
4987
4988                 i_size_write(inode, newsize);
4989                 btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
4990                 ret = btrfs_update_inode(trans, root, inode);
4991                 btrfs_end_write_no_snapshoting(root);
4992                 btrfs_end_transaction(trans, root);
4993         } else {
4994
4995                 /*
4996                  * We're truncating a file that used to have good data down to
4997                  * zero. Make sure it gets into the ordered flush list so that
4998                  * any new writes get down to disk quickly.
4999                  */
5000                 if (newsize == 0)
5001                         set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
5002                                 &BTRFS_I(inode)->runtime_flags);
5003
5004                 /*
5005                  * 1 for the orphan item we're going to add
5006                  * 1 for the orphan item deletion.
5007                  */
5008                 trans = btrfs_start_transaction(root, 2);
5009                 if (IS_ERR(trans))
5010                         return PTR_ERR(trans);
5011
5012                 /*
5013                  * We need to do this in case we fail at _any_ point during the
5014                  * actual truncate.  Once we do the truncate_setsize we could
5015                  * invalidate pages which forces any outstanding ordered io to
5016                  * be instantly completed which will give us extents that need
5017                  * to be truncated.  If we fail to get an orphan inode down we
5018                  * could have left over extents that were never meant to live,
5019                  * so we need to garuntee from this point on that everything
5020                  * will be consistent.
5021                  */
5022                 ret = btrfs_orphan_add(trans, inode);
5023                 btrfs_end_transaction(trans, root);
5024                 if (ret)
5025                         return ret;
5026
5027                 /* we don't support swapfiles, so vmtruncate shouldn't fail */
5028                 truncate_setsize(inode, newsize);
5029
5030                 /* Disable nonlocked read DIO to avoid the end less truncate */
5031                 btrfs_inode_block_unlocked_dio(inode);
5032                 inode_dio_wait(inode);
5033                 btrfs_inode_resume_unlocked_dio(inode);
5034
5035                 ret = btrfs_truncate(inode);
5036                 if (ret && inode->i_nlink) {
5037                         int err;
5038
5039                         /*
5040                          * failed to truncate, disk_i_size is only adjusted down
5041                          * as we remove extents, so it should represent the true
5042                          * size of the inode, so reset the in memory size and
5043                          * delete our orphan entry.
5044                          */
5045                         trans = btrfs_join_transaction(root);
5046                         if (IS_ERR(trans)) {
5047                                 btrfs_orphan_del(NULL, inode);
5048                                 return ret;
5049                         }
5050                         i_size_write(inode, BTRFS_I(inode)->disk_i_size);
5051                         err = btrfs_orphan_del(trans, inode);
5052                         if (err)
5053                                 btrfs_abort_transaction(trans, root, err);
5054                         btrfs_end_transaction(trans, root);
5055                 }
5056         }
5057
5058         return ret;
5059 }
5060
5061 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
5062 {
5063         struct inode *inode = d_inode(dentry);
5064         struct btrfs_root *root = BTRFS_I(inode)->root;
5065         int err;
5066
5067         if (btrfs_root_readonly(root))
5068                 return -EROFS;
5069
5070         err = inode_change_ok(inode, attr);
5071         if (err)
5072                 return err;
5073
5074         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
5075                 err = btrfs_setsize(inode, attr);
5076                 if (err)
5077                         return err;
5078         }
5079
5080         if (attr->ia_valid) {
5081                 setattr_copy(inode, attr);
5082                 inode_inc_iversion(inode);
5083                 err = btrfs_dirty_inode(inode);
5084
5085                 if (!err && attr->ia_valid & ATTR_MODE)
5086                         err = posix_acl_chmod(inode, inode->i_mode);
5087         }
5088
5089         return err;
5090 }
5091
5092 /*
5093  * While truncating the inode pages during eviction, we get the VFS calling
5094  * btrfs_invalidatepage() against each page of the inode. This is slow because
5095  * the calls to btrfs_invalidatepage() result in a huge amount of calls to
5096  * lock_extent_bits() and clear_extent_bit(), which keep merging and splitting
5097  * extent_state structures over and over, wasting lots of time.
5098  *
5099  * Therefore if the inode is being evicted, let btrfs_invalidatepage() skip all
5100  * those expensive operations on a per page basis and do only the ordered io
5101  * finishing, while we release here the extent_map and extent_state structures,
5102  * without the excessive merging and splitting.
5103  */
5104 static void evict_inode_truncate_pages(struct inode *inode)
5105 {
5106         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5107         struct extent_map_tree *map_tree = &BTRFS_I(inode)->extent_tree;
5108         struct rb_node *node;
5109
5110         ASSERT(inode->i_state & I_FREEING);
5111         truncate_inode_pages_final(&inode->i_data);
5112
5113         write_lock(&map_tree->lock);
5114         while (!RB_EMPTY_ROOT(&map_tree->map)) {
5115                 struct extent_map *em;
5116
5117                 node = rb_first(&map_tree->map);
5118                 em = rb_entry(node, struct extent_map, rb_node);
5119                 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
5120                 clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
5121                 remove_extent_mapping(map_tree, em);
5122                 free_extent_map(em);
5123                 if (need_resched()) {
5124                         write_unlock(&map_tree->lock);
5125                         cond_resched();
5126                         write_lock(&map_tree->lock);
5127                 }
5128         }
5129         write_unlock(&map_tree->lock);
5130
5131         /*
5132          * Keep looping until we have no more ranges in the io tree.
5133          * We can have ongoing bios started by readpages (called from readahead)
5134          * that have their endio callback (extent_io.c:end_bio_extent_readpage)
5135          * still in progress (unlocked the pages in the bio but did not yet
5136          * unlocked the ranges in the io tree). Therefore this means some
5137          * ranges can still be locked and eviction started because before
5138          * submitting those bios, which are executed by a separate task (work
5139          * queue kthread), inode references (inode->i_count) were not taken
5140          * (which would be dropped in the end io callback of each bio).
5141          * Therefore here we effectively end up waiting for those bios and
5142          * anyone else holding locked ranges without having bumped the inode's
5143          * reference count - if we don't do it, when they access the inode's
5144          * io_tree to unlock a range it may be too late, leading to an
5145          * use-after-free issue.
5146          */
5147         spin_lock(&io_tree->lock);
5148         while (!RB_EMPTY_ROOT(&io_tree->state)) {
5149                 struct extent_state *state;
5150                 struct extent_state *cached_state = NULL;
5151                 u64 start;
5152                 u64 end;
5153
5154                 node = rb_first(&io_tree->state);
5155                 state = rb_entry(node, struct extent_state, rb_node);
5156                 start = state->start;
5157                 end = state->end;
5158                 spin_unlock(&io_tree->lock);
5159
5160                 lock_extent_bits(io_tree, start, end, 0, &cached_state);
5161
5162                 /*
5163                  * If still has DELALLOC flag, the extent didn't reach disk,
5164                  * and its reserved space won't be freed by delayed_ref.
5165                  * So we need to free its reserved space here.
5166                  * (Refer to comment in btrfs_invalidatepage, case 2)
5167                  *
5168                  * Note, end is the bytenr of last byte, so we need + 1 here.
5169                  */
5170                 if (state->state & EXTENT_DELALLOC)
5171                         btrfs_qgroup_free_data(inode, start, end - start + 1);
5172
5173                 clear_extent_bit(io_tree, start, end,
5174                                  EXTENT_LOCKED | EXTENT_DIRTY |
5175                                  EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
5176                                  EXTENT_DEFRAG, 1, 1,
5177                                  &cached_state, GFP_NOFS);
5178
5179                 cond_resched();
5180                 spin_lock(&io_tree->lock);
5181         }
5182         spin_unlock(&io_tree->lock);
5183 }
5184
5185 void btrfs_evict_inode(struct inode *inode)
5186 {
5187         struct btrfs_trans_handle *trans;
5188         struct btrfs_root *root = BTRFS_I(inode)->root;
5189         struct btrfs_block_rsv *rsv, *global_rsv;
5190         int steal_from_global = 0;
5191         u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
5192         int ret;
5193
5194         trace_btrfs_inode_evict(inode);
5195
5196         evict_inode_truncate_pages(inode);
5197
5198         if (inode->i_nlink &&
5199             ((btrfs_root_refs(&root->root_item) != 0 &&
5200               root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) ||
5201              btrfs_is_free_space_inode(inode)))
5202                 goto no_delete;
5203
5204         if (is_bad_inode(inode)) {
5205                 btrfs_orphan_del(NULL, inode);
5206                 goto no_delete;
5207         }
5208         /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
5209         if (!special_file(inode->i_mode))
5210                 btrfs_wait_ordered_range(inode, 0, (u64)-1);
5211
5212         btrfs_free_io_failure_record(inode, 0, (u64)-1);
5213
5214         if (root->fs_info->log_root_recovering) {
5215                 BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
5216                                  &BTRFS_I(inode)->runtime_flags));
5217                 goto no_delete;
5218         }
5219
5220         if (inode->i_nlink > 0) {
5221                 BUG_ON(btrfs_root_refs(&root->root_item) != 0 &&
5222                        root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID);
5223                 goto no_delete;
5224         }
5225
5226         ret = btrfs_commit_inode_delayed_inode(inode);
5227         if (ret) {
5228                 btrfs_orphan_del(NULL, inode);
5229                 goto no_delete;
5230         }
5231
5232         rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
5233         if (!rsv) {
5234                 btrfs_orphan_del(NULL, inode);
5235                 goto no_delete;
5236         }
5237         rsv->size = min_size;
5238         rsv->failfast = 1;
5239         global_rsv = &root->fs_info->global_block_rsv;
5240
5241         btrfs_i_size_write(inode, 0);
5242
5243         /*
5244          * This is a bit simpler than btrfs_truncate since we've already
5245          * reserved our space for our orphan item in the unlink, so we just
5246          * need to reserve some slack space in case we add bytes and update
5247          * inode item when doing the truncate.
5248          */
5249         while (1) {
5250                 ret = btrfs_block_rsv_refill(root, rsv, min_size,
5251                                              BTRFS_RESERVE_FLUSH_LIMIT);
5252
5253                 /*
5254                  * Try and steal from the global reserve since we will
5255                  * likely not use this space anyway, we want to try as
5256                  * hard as possible to get this to work.
5257                  */
5258                 if (ret)
5259                         steal_from_global++;
5260                 else
5261                         steal_from_global = 0;
5262                 ret = 0;
5263
5264                 /*
5265                  * steal_from_global == 0: we reserved stuff, hooray!
5266                  * steal_from_global == 1: we didn't reserve stuff, boo!
5267                  * steal_from_global == 2: we've committed, still not a lot of
5268                  * room but maybe we'll have room in the global reserve this
5269                  * time.
5270                  * steal_from_global == 3: abandon all hope!
5271                  */
5272                 if (steal_from_global > 2) {
5273                         btrfs_warn(root->fs_info,
5274                                 "Could not get space for a delete, will truncate on mount %d",
5275                                 ret);
5276                         btrfs_orphan_del(NULL, inode);
5277                         btrfs_free_block_rsv(root, rsv);
5278                         goto no_delete;
5279                 }
5280
5281                 trans = btrfs_join_transaction(root);
5282                 if (IS_ERR(trans)) {
5283                         btrfs_orphan_del(NULL, inode);
5284                         btrfs_free_block_rsv(root, rsv);
5285                         goto no_delete;
5286                 }
5287
5288                 /*
5289                  * We can't just steal from the global reserve, we need tomake
5290                  * sure there is room to do it, if not we need to commit and try
5291                  * again.
5292                  */
5293                 if (steal_from_global) {
5294                         if (!btrfs_check_space_for_delayed_refs(trans, root))
5295                                 ret = btrfs_block_rsv_migrate(global_rsv, rsv,
5296                                                               min_size);
5297                         else
5298                                 ret = -ENOSPC;
5299                 }
5300
5301                 /*
5302                  * Couldn't steal from the global reserve, we have too much
5303                  * pending stuff built up, commit the transaction and try it
5304                  * again.
5305                  */
5306                 if (ret) {
5307                         ret = btrfs_commit_transaction(trans, root);
5308                         if (ret) {
5309                                 btrfs_orphan_del(NULL, inode);
5310                                 btrfs_free_block_rsv(root, rsv);
5311                                 goto no_delete;
5312                         }
5313                         continue;
5314                 } else {
5315                         steal_from_global = 0;
5316                 }
5317
5318                 trans->block_rsv = rsv;
5319
5320                 ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
5321                 if (ret != -ENOSPC && ret != -EAGAIN)
5322                         break;
5323
5324                 trans->block_rsv = &root->fs_info->trans_block_rsv;
5325                 btrfs_end_transaction(trans, root);
5326                 trans = NULL;
5327                 btrfs_btree_balance_dirty(root);
5328         }
5329
5330         btrfs_free_block_rsv(root, rsv);
5331
5332         /*
5333          * Errors here aren't a big deal, it just means we leave orphan items
5334          * in the tree.  They will be cleaned up on the next mount.
5335          */
5336         if (ret == 0) {
5337                 trans->block_rsv = root->orphan_block_rsv;
5338                 btrfs_orphan_del(trans, inode);
5339         } else {
5340                 btrfs_orphan_del(NULL, inode);
5341         }
5342
5343         trans->block_rsv = &root->fs_info->trans_block_rsv;
5344         if (!(root == root->fs_info->tree_root ||
5345               root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
5346                 btrfs_return_ino(root, btrfs_ino(inode));
5347
5348         btrfs_end_transaction(trans, root);
5349         btrfs_btree_balance_dirty(root);
5350 no_delete:
5351         btrfs_remove_delayed_node(inode);
5352         clear_inode(inode);
5353         return;
5354 }
5355
5356 /*
5357  * this returns the key found in the dir entry in the location pointer.
5358  * If no dir entries were found, location->objectid is 0.
5359  */
5360 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
5361                                struct btrfs_key *location)
5362 {
5363         const char *name = dentry->d_name.name;
5364         int namelen = dentry->d_name.len;
5365         struct btrfs_dir_item *di;
5366         struct btrfs_path *path;
5367         struct btrfs_root *root = BTRFS_I(dir)->root;
5368         int ret = 0;
5369
5370         path = btrfs_alloc_path();
5371         if (!path)
5372                 return -ENOMEM;
5373
5374         di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name,
5375                                     namelen, 0);
5376         if (IS_ERR(di))
5377                 ret = PTR_ERR(di);
5378
5379         if (IS_ERR_OR_NULL(di))
5380                 goto out_err;
5381
5382         btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
5383 out:
5384         btrfs_free_path(path);
5385         return ret;
5386 out_err:
5387         location->objectid = 0;
5388         goto out;
5389 }
5390
5391 /*
5392  * when we hit a tree root in a directory, the btrfs part of the inode
5393  * needs to be changed to reflect the root directory of the tree root.  This
5394  * is kind of like crossing a mount point.
5395  */
5396 static int fixup_tree_root_location(struct btrfs_root *root,
5397                                     struct inode *dir,
5398                                     struct dentry *dentry,
5399                                     struct btrfs_key *location,
5400                                     struct btrfs_root **sub_root)
5401 {
5402         struct btrfs_path *path;
5403         struct btrfs_root *new_root;
5404         struct btrfs_root_ref *ref;
5405         struct extent_buffer *leaf;
5406         struct btrfs_key key;
5407         int ret;
5408         int err = 0;
5409
5410         path = btrfs_alloc_path();
5411         if (!path) {
5412                 err = -ENOMEM;
5413                 goto out;
5414         }
5415
5416         err = -ENOENT;
5417         key.objectid = BTRFS_I(dir)->root->root_key.objectid;
5418         key.type = BTRFS_ROOT_REF_KEY;
5419         key.offset = location->objectid;
5420
5421         ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, path,
5422                                 0, 0);
5423         if (ret) {
5424                 if (ret < 0)
5425                         err = ret;
5426                 goto out;
5427         }
5428
5429         leaf = path->nodes[0];
5430         ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
5431         if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
5432             btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
5433                 goto out;
5434
5435         ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
5436                                    (unsigned long)(ref + 1),
5437                                    dentry->d_name.len);
5438         if (ret)
5439                 goto out;
5440
5441         btrfs_release_path(path);
5442
5443         new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
5444         if (IS_ERR(new_root)) {
5445                 err = PTR_ERR(new_root);
5446                 goto out;
5447         }
5448
5449         *sub_root = new_root;
5450         location->objectid = btrfs_root_dirid(&new_root->root_item);
5451         location->type = BTRFS_INODE_ITEM_KEY;
5452         location->offset = 0;
5453         err = 0;
5454 out:
5455         btrfs_free_path(path);
5456         return err;
5457 }
5458
5459 static void inode_tree_add(struct inode *inode)
5460 {
5461         struct btrfs_root *root = BTRFS_I(inode)->root;
5462         struct btrfs_inode *entry;
5463         struct rb_node **p;
5464         struct rb_node *parent;
5465         struct rb_node *new = &BTRFS_I(inode)->rb_node;
5466         u64 ino = btrfs_ino(inode);
5467
5468         if (inode_unhashed(inode))
5469                 return;
5470         parent = NULL;
5471         spin_lock(&root->inode_lock);
5472         p = &root->inode_tree.rb_node;
5473         while (*p) {
5474                 parent = *p;
5475                 entry = rb_entry(parent, struct btrfs_inode, rb_node);
5476
5477                 if (ino < btrfs_ino(&entry->vfs_inode))
5478                         p = &parent->rb_left;
5479                 else if (ino > btrfs_ino(&entry->vfs_inode))
5480                         p = &parent->rb_right;
5481                 else {
5482                         WARN_ON(!(entry->vfs_inode.i_state &
5483                                   (I_WILL_FREE | I_FREEING)));
5484                         rb_replace_node(parent, new, &root->inode_tree);
5485                         RB_CLEAR_NODE(parent);
5486                         spin_unlock(&root->inode_lock);
5487                         return;
5488                 }
5489         }
5490         rb_link_node(new, parent, p);
5491         rb_insert_color(new, &root->inode_tree);
5492         spin_unlock(&root->inode_lock);
5493 }
5494
5495 static void inode_tree_del(struct inode *inode)
5496 {
5497         struct btrfs_root *root = BTRFS_I(inode)->root;
5498         int empty = 0;
5499
5500         spin_lock(&root->inode_lock);
5501         if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
5502                 rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
5503                 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
5504                 empty = RB_EMPTY_ROOT(&root->inode_tree);
5505         }
5506         spin_unlock(&root->inode_lock);
5507
5508         if (empty && btrfs_root_refs(&root->root_item) == 0) {
5509                 synchronize_srcu(&root->fs_info->subvol_srcu);
5510                 spin_lock(&root->inode_lock);
5511                 empty = RB_EMPTY_ROOT(&root->inode_tree);
5512                 spin_unlock(&root->inode_lock);
5513                 if (empty)
5514                         btrfs_add_dead_root(root);
5515         }
5516 }
5517
5518 void btrfs_invalidate_inodes(struct btrfs_root *root)
5519 {
5520         struct rb_node *node;
5521         struct rb_node *prev;
5522         struct btrfs_inode *entry;
5523         struct inode *inode;
5524         u64 objectid = 0;
5525
5526         if (!test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
5527                 WARN_ON(btrfs_root_refs(&root->root_item) != 0);
5528
5529         spin_lock(&root->inode_lock);
5530 again:
5531         node = root->inode_tree.rb_node;
5532         prev = NULL;
5533         while (node) {
5534                 prev = node;
5535                 entry = rb_entry(node, struct btrfs_inode, rb_node);
5536
5537                 if (objectid < btrfs_ino(&entry->vfs_inode))
5538                         node = node->rb_left;
5539                 else if (objectid > btrfs_ino(&entry->vfs_inode))
5540                         node = node->rb_right;
5541                 else
5542                         break;
5543         }
5544         if (!node) {
5545                 while (prev) {
5546                         entry = rb_entry(prev, struct btrfs_inode, rb_node);
5547                         if (objectid <= btrfs_ino(&entry->vfs_inode)) {
5548                                 node = prev;
5549                                 break;
5550                         }
5551                         prev = rb_next(prev);
5552                 }
5553         }
5554         while (node) {
5555                 entry = rb_entry(node, struct btrfs_inode, rb_node);
5556                 objectid = btrfs_ino(&entry->vfs_inode) + 1;
5557                 inode = igrab(&entry->vfs_inode);
5558                 if (inode) {
5559                         spin_unlock(&root->inode_lock);
5560                         if (atomic_read(&inode->i_count) > 1)
5561                                 d_prune_aliases(inode);
5562                         /*
5563                          * btrfs_drop_inode will have it removed from
5564                          * the inode cache when its usage count
5565                          * hits zero.
5566                          */
5567                         iput(inode);
5568                         cond_resched();
5569                         spin_lock(&root->inode_lock);
5570                         goto again;
5571                 }
5572
5573                 if (cond_resched_lock(&root->inode_lock))
5574                         goto again;
5575
5576                 node = rb_next(node);
5577         }
5578         spin_unlock(&root->inode_lock);
5579 }
5580
5581 static int btrfs_init_locked_inode(struct inode *inode, void *p)
5582 {
5583         struct btrfs_iget_args *args = p;
5584         inode->i_ino = args->location->objectid;
5585         memcpy(&BTRFS_I(inode)->location, args->location,
5586                sizeof(*args->location));
5587         BTRFS_I(inode)->root = args->root;
5588         return 0;
5589 }
5590
5591 static int btrfs_find_actor(struct inode *inode, void *opaque)
5592 {
5593         struct btrfs_iget_args *args = opaque;
5594         return args->location->objectid == BTRFS_I(inode)->location.objectid &&
5595                 args->root == BTRFS_I(inode)->root;
5596 }
5597
5598 static struct inode *btrfs_iget_locked(struct super_block *s,
5599                                        struct btrfs_key *location,
5600                                        struct btrfs_root *root)
5601 {
5602         struct inode *inode;
5603         struct btrfs_iget_args args;
5604         unsigned long hashval = btrfs_inode_hash(location->objectid, root);
5605
5606         args.location = location;
5607         args.root = root;
5608
5609         inode = iget5_locked(s, hashval, btrfs_find_actor,
5610                              btrfs_init_locked_inode,
5611                              (void *)&args);
5612         return inode;
5613 }
5614
5615 /* Get an inode object given its location and corresponding root.
5616  * Returns in *is_new if the inode was read from disk
5617  */
5618 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
5619                          struct btrfs_root *root, int *new)
5620 {
5621         struct inode *inode;
5622
5623         inode = btrfs_iget_locked(s, location, root);
5624         if (!inode)
5625                 return ERR_PTR(-ENOMEM);
5626
5627         if (inode->i_state & I_NEW) {
5628                 btrfs_read_locked_inode(inode);
5629                 if (!is_bad_inode(inode)) {
5630                         inode_tree_add(inode);
5631                         unlock_new_inode(inode);
5632                         if (new)
5633                                 *new = 1;
5634                 } else {
5635                         unlock_new_inode(inode);
5636                         iput(inode);
5637                         inode = ERR_PTR(-ESTALE);
5638                 }
5639         }
5640
5641         return inode;
5642 }
5643
5644 static struct inode *new_simple_dir(struct super_block *s,
5645                                     struct btrfs_key *key,
5646                                     struct btrfs_root *root)
5647 {
5648         struct inode *inode = new_inode(s);
5649
5650         if (!inode)
5651                 return ERR_PTR(-ENOMEM);
5652
5653         BTRFS_I(inode)->root = root;
5654         memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
5655         set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
5656
5657         inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
5658         inode->i_op = &btrfs_dir_ro_inode_operations;
5659         inode->i_fop = &simple_dir_operations;
5660         inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
5661         inode->i_mtime = CURRENT_TIME;
5662         inode->i_atime = inode->i_mtime;
5663         inode->i_ctime = inode->i_mtime;
5664         BTRFS_I(inode)->i_otime = inode->i_mtime;
5665
5666         return inode;
5667 }
5668
5669 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
5670 {
5671         struct inode *inode;
5672         struct btrfs_root *root = BTRFS_I(dir)->root;
5673         struct btrfs_root *sub_root = root;
5674         struct btrfs_key location;
5675         int index;
5676         int ret = 0;
5677
5678         if (dentry->d_name.len > BTRFS_NAME_LEN)
5679                 return ERR_PTR(-ENAMETOOLONG);
5680
5681         ret = btrfs_inode_by_name(dir, dentry, &location);
5682         if (ret < 0)
5683                 return ERR_PTR(ret);
5684
5685         if (location.objectid == 0)
5686                 return ERR_PTR(-ENOENT);
5687
5688         if (location.type == BTRFS_INODE_ITEM_KEY) {
5689                 inode = btrfs_iget(dir->i_sb, &location, root, NULL);
5690                 return inode;
5691         }
5692
5693         BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
5694
5695         index = srcu_read_lock(&root->fs_info->subvol_srcu);
5696         ret = fixup_tree_root_location(root, dir, dentry,
5697                                        &location, &sub_root);
5698         if (ret < 0) {
5699                 if (ret != -ENOENT)
5700                         inode = ERR_PTR(ret);
5701                 else
5702                         inode = new_simple_dir(dir->i_sb, &location, sub_root);
5703         } else {
5704                 inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
5705         }
5706         srcu_read_unlock(&root->fs_info->subvol_srcu, index);
5707
5708         if (!IS_ERR(inode) && root != sub_root) {
5709                 down_read(&root->fs_info->cleanup_work_sem);
5710                 if (!(inode->i_sb->s_flags & MS_RDONLY))
5711                         ret = btrfs_orphan_cleanup(sub_root);
5712                 up_read(&root->fs_info->cleanup_work_sem);
5713                 if (ret) {
5714                         iput(inode);
5715                         inode = ERR_PTR(ret);
5716                 }
5717         }
5718
5719         return inode;
5720 }
5721
5722 static int btrfs_dentry_delete(const struct dentry *dentry)
5723 {
5724         struct btrfs_root *root;
5725         struct inode *inode = d_inode(dentry);
5726
5727         if (!inode && !IS_ROOT(dentry))
5728                 inode = d_inode(dentry->d_parent);
5729
5730         if (inode) {
5731                 root = BTRFS_I(inode)->root;
5732                 if (btrfs_root_refs(&root->root_item) == 0)
5733                         return 1;
5734
5735                 if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
5736                         return 1;
5737         }
5738         return 0;
5739 }
5740
5741 static void btrfs_dentry_release(struct dentry *dentry)
5742 {
5743         kfree(dentry->d_fsdata);
5744 }
5745
5746 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
5747                                    unsigned int flags)
5748 {
5749         struct inode *inode;
5750
5751         inode = btrfs_lookup_dentry(dir, dentry);
5752         if (IS_ERR(inode)) {
5753                 if (PTR_ERR(inode) == -ENOENT)
5754                         inode = NULL;
5755                 else
5756                         return ERR_CAST(inode);
5757         }
5758
5759         return d_splice_alias(inode, dentry);
5760 }
5761
5762 unsigned char btrfs_filetype_table[] = {
5763         DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
5764 };
5765
5766 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
5767 {
5768         struct inode *inode = file_inode(file);
5769         struct btrfs_root *root = BTRFS_I(inode)->root;
5770         struct btrfs_item *item;
5771         struct btrfs_dir_item *di;
5772         struct btrfs_key key;
5773         struct btrfs_key found_key;
5774         struct btrfs_path *path;
5775         struct list_head ins_list;
5776         struct list_head del_list;
5777         int ret;
5778         struct extent_buffer *leaf;
5779         int slot;
5780         unsigned char d_type;
5781         int over = 0;
5782         u32 di_cur;
5783         u32 di_total;
5784         u32 di_len;
5785         int key_type = BTRFS_DIR_INDEX_KEY;
5786         char tmp_name[32];
5787         char *name_ptr;
5788         int name_len;
5789         int is_curr = 0;        /* ctx->pos points to the current index? */
5790         bool emitted;
5791
5792         /* FIXME, use a real flag for deciding about the key type */
5793         if (root->fs_info->tree_root == root)
5794                 key_type = BTRFS_DIR_ITEM_KEY;
5795
5796         if (!dir_emit_dots(file, ctx))
5797                 return 0;
5798
5799         path = btrfs_alloc_path();
5800         if (!path)
5801                 return -ENOMEM;
5802
5803         path->reada = 1;
5804
5805         if (key_type == BTRFS_DIR_INDEX_KEY) {
5806                 INIT_LIST_HEAD(&ins_list);
5807                 INIT_LIST_HEAD(&del_list);
5808                 btrfs_get_delayed_items(inode, &ins_list, &del_list);
5809         }
5810
5811         key.type = key_type;
5812         key.offset = ctx->pos;
5813         key.objectid = btrfs_ino(inode);
5814
5815         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5816         if (ret < 0)
5817                 goto err;
5818
5819         emitted = false;
5820         while (1) {
5821                 leaf = path->nodes[0];
5822                 slot = path->slots[0];
5823                 if (slot >= btrfs_header_nritems(leaf)) {
5824                         ret = btrfs_next_leaf(root, path);
5825                         if (ret < 0)
5826                                 goto err;
5827                         else if (ret > 0)
5828                                 break;
5829                         continue;
5830                 }
5831
5832                 item = btrfs_item_nr(slot);
5833                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5834
5835                 if (found_key.objectid != key.objectid)
5836                         break;
5837                 if (found_key.type != key_type)
5838                         break;
5839                 if (found_key.offset < ctx->pos)
5840                         goto next;
5841                 if (key_type == BTRFS_DIR_INDEX_KEY &&
5842                     btrfs_should_delete_dir_index(&del_list,
5843                                                   found_key.offset))
5844                         goto next;
5845
5846                 ctx->pos = found_key.offset;
5847                 is_curr = 1;
5848
5849                 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
5850                 di_cur = 0;
5851                 di_total = btrfs_item_size(leaf, item);
5852
5853                 while (di_cur < di_total) {
5854                         struct btrfs_key location;
5855
5856                         if (verify_dir_item(root, leaf, di))
5857                                 break;
5858
5859                         name_len = btrfs_dir_name_len(leaf, di);
5860                         if (name_len <= sizeof(tmp_name)) {
5861                                 name_ptr = tmp_name;
5862                         } else {
5863                                 name_ptr = kmalloc(name_len, GFP_NOFS);
5864                                 if (!name_ptr) {
5865                                         ret = -ENOMEM;
5866                                         goto err;
5867                                 }
5868                         }
5869                         read_extent_buffer(leaf, name_ptr,
5870                                            (unsigned long)(di + 1), name_len);
5871
5872                         d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
5873                         btrfs_dir_item_key_to_cpu(leaf, di, &location);
5874
5875
5876                         /* is this a reference to our own snapshot? If so
5877                          * skip it.
5878                          *
5879                          * In contrast to old kernels, we insert the snapshot's
5880                          * dir item and dir index after it has been created, so
5881                          * we won't find a reference to our own snapshot. We
5882                          * still keep the following code for backward
5883                          * compatibility.
5884                          */
5885                         if (location.type == BTRFS_ROOT_ITEM_KEY &&
5886                             location.objectid == root->root_key.objectid) {
5887                                 over = 0;
5888                                 goto skip;
5889                         }
5890                         over = !dir_emit(ctx, name_ptr, name_len,
5891                                        location.objectid, d_type);
5892
5893 skip:
5894                         if (name_ptr != tmp_name)
5895                                 kfree(name_ptr);
5896
5897                         if (over)
5898                                 goto nopos;
5899                         emitted = true;
5900                         di_len = btrfs_dir_name_len(leaf, di) +
5901                                  btrfs_dir_data_len(leaf, di) + sizeof(*di);
5902                         di_cur += di_len;
5903                         di = (struct btrfs_dir_item *)((char *)di + di_len);
5904                 }
5905 next:
5906                 path->slots[0]++;
5907         }
5908
5909         if (key_type == BTRFS_DIR_INDEX_KEY) {
5910                 if (is_curr)
5911                         ctx->pos++;
5912                 ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list, &emitted);
5913                 if (ret)
5914                         goto nopos;
5915         }
5916
5917         /*
5918          * If we haven't emitted any dir entry, we must not touch ctx->pos as
5919          * it was was set to the termination value in previous call. We assume
5920          * that "." and ".." were emitted if we reach this point and set the
5921          * termination value as well for an empty directory.
5922          */
5923         if (ctx->pos > 2 && !emitted)
5924                 goto nopos;
5925
5926         /* Reached end of directory/root. Bump pos past the last item. */
5927         ctx->pos++;
5928
5929         /*
5930          * Stop new entries from being returned after we return the last
5931          * entry.
5932          *
5933          * New directory entries are assigned a strictly increasing
5934          * offset.  This means that new entries created during readdir
5935          * are *guaranteed* to be seen in the future by that readdir.
5936          * This has broken buggy programs which operate on names as
5937          * they're returned by readdir.  Until we re-use freed offsets
5938          * we have this hack to stop new entries from being returned
5939          * under the assumption that they'll never reach this huge
5940          * offset.
5941          *
5942          * This is being careful not to overflow 32bit loff_t unless the
5943          * last entry requires it because doing so has broken 32bit apps
5944          * in the past.
5945          */
5946         if (key_type == BTRFS_DIR_INDEX_KEY) {
5947                 if (ctx->pos >= INT_MAX)
5948                         ctx->pos = LLONG_MAX;
5949                 else
5950                         ctx->pos = INT_MAX;
5951         }
5952 nopos:
5953         ret = 0;
5954 err:
5955         if (key_type == BTRFS_DIR_INDEX_KEY)
5956                 btrfs_put_delayed_items(&ins_list, &del_list);
5957         btrfs_free_path(path);
5958         return ret;
5959 }
5960
5961 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
5962 {
5963         struct btrfs_root *root = BTRFS_I(inode)->root;
5964         struct btrfs_trans_handle *trans;
5965         int ret = 0;
5966         bool nolock = false;
5967
5968         if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
5969                 return 0;
5970
5971         if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(inode))
5972                 nolock = true;
5973
5974         if (wbc->sync_mode == WB_SYNC_ALL) {
5975                 if (nolock)
5976                         trans = btrfs_join_transaction_nolock(root);
5977                 else
5978                         trans = btrfs_join_transaction(root);
5979                 if (IS_ERR(trans))
5980                         return PTR_ERR(trans);
5981                 ret = btrfs_commit_transaction(trans, root);
5982         }
5983         return ret;
5984 }
5985
5986 /*
5987  * This is somewhat expensive, updating the tree every time the
5988  * inode changes.  But, it is most likely to find the inode in cache.
5989  * FIXME, needs more benchmarking...there are no reasons other than performance
5990  * to keep or drop this code.
5991  */
5992 static int btrfs_dirty_inode(struct inode *inode)
5993 {
5994         struct btrfs_root *root = BTRFS_I(inode)->root;
5995         struct btrfs_trans_handle *trans;
5996         int ret;
5997
5998         if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
5999                 return 0;
6000
6001         trans = btrfs_join_transaction(root);
6002         if (IS_ERR(trans))
6003                 return PTR_ERR(trans);
6004
6005         ret = btrfs_update_inode(trans, root, inode);
6006         if (ret && ret == -ENOSPC) {
6007                 /* whoops, lets try again with the full transaction */
6008                 btrfs_end_transaction(trans, root);
6009                 trans = btrfs_start_transaction(root, 1);
6010                 if (IS_ERR(trans))
6011                         return PTR_ERR(trans);
6012
6013                 ret = btrfs_update_inode(trans, root, inode);
6014         }
6015         btrfs_end_transaction(trans, root);
6016         if (BTRFS_I(inode)->delayed_node)
6017                 btrfs_balance_delayed_items(root);
6018
6019         return ret;
6020 }
6021
6022 /*
6023  * This is a copy of file_update_time.  We need this so we can return error on
6024  * ENOSPC for updating the inode in the case of file write and mmap writes.
6025  */
6026 static int btrfs_update_time(struct inode *inode, struct timespec *now,
6027                              int flags)
6028 {
6029         struct btrfs_root *root = BTRFS_I(inode)->root;
6030
6031         if (btrfs_root_readonly(root))
6032                 return -EROFS;
6033
6034         if (flags & S_VERSION)
6035                 inode_inc_iversion(inode);
6036         if (flags & S_CTIME)
6037                 inode->i_ctime = *now;
6038         if (flags & S_MTIME)
6039                 inode->i_mtime = *now;
6040         if (flags & S_ATIME)
6041                 inode->i_atime = *now;
6042         return btrfs_dirty_inode(inode);
6043 }
6044
6045 /*
6046  * find the highest existing sequence number in a directory
6047  * and then set the in-memory index_cnt variable to reflect
6048  * free sequence numbers
6049  */
6050 static int btrfs_set_inode_index_count(struct inode *inode)
6051 {
6052         struct btrfs_root *root = BTRFS_I(inode)->root;
6053         struct btrfs_key key, found_key;
6054         struct btrfs_path *path;
6055         struct extent_buffer *leaf;
6056         int ret;
6057
6058         key.objectid = btrfs_ino(inode);
6059         key.type = BTRFS_DIR_INDEX_KEY;
6060         key.offset = (u64)-1;
6061
6062         path = btrfs_alloc_path();
6063         if (!path)
6064                 return -ENOMEM;
6065
6066         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6067         if (ret < 0)
6068                 goto out;
6069         /* FIXME: we should be able to handle this */
6070         if (ret == 0)
6071                 goto out;
6072         ret = 0;
6073
6074         /*
6075          * MAGIC NUMBER EXPLANATION:
6076          * since we search a directory based on f_pos we have to start at 2
6077          * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
6078          * else has to start at 2
6079          */
6080         if (path->slots[0] == 0) {
6081                 BTRFS_I(inode)->index_cnt = 2;
6082                 goto out;
6083         }
6084
6085         path->slots[0]--;
6086
6087         leaf = path->nodes[0];
6088         btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6089
6090         if (found_key.objectid != btrfs_ino(inode) ||
6091             found_key.type != BTRFS_DIR_INDEX_KEY) {
6092                 BTRFS_I(inode)->index_cnt = 2;
6093                 goto out;
6094         }
6095
6096         BTRFS_I(inode)->index_cnt = found_key.offset + 1;
6097 out:
6098         btrfs_free_path(path);
6099         return ret;
6100 }
6101
6102 /*
6103  * helper to find a free sequence number in a given directory.  This current
6104  * code is very simple, later versions will do smarter things in the btree
6105  */
6106 int btrfs_set_inode_index(struct inode *dir, u64 *index)
6107 {
6108         int ret = 0;
6109
6110         if (BTRFS_I(dir)->index_cnt == (u64)-1) {
6111                 ret = btrfs_inode_delayed_dir_index_count(dir);
6112                 if (ret) {
6113                         ret = btrfs_set_inode_index_count(dir);
6114                         if (ret)
6115                                 return ret;
6116                 }
6117         }
6118
6119         *index = BTRFS_I(dir)->index_cnt;
6120         BTRFS_I(dir)->index_cnt++;
6121
6122         return ret;
6123 }
6124
6125 static int btrfs_insert_inode_locked(struct inode *inode)
6126 {
6127         struct btrfs_iget_args args;
6128         args.location = &BTRFS_I(inode)->location;
6129         args.root = BTRFS_I(inode)->root;
6130
6131         return insert_inode_locked4(inode,
6132                    btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root),
6133                    btrfs_find_actor, &args);
6134 }
6135
6136 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
6137                                      struct btrfs_root *root,
6138                                      struct inode *dir,
6139                                      const char *name, int name_len,
6140                                      u64 ref_objectid, u64 objectid,
6141                                      umode_t mode, u64 *index)
6142 {
6143         struct inode *inode;
6144         struct btrfs_inode_item *inode_item;
6145         struct btrfs_key *location;
6146         struct btrfs_path *path;
6147         struct btrfs_inode_ref *ref;
6148         struct btrfs_key key[2];
6149         u32 sizes[2];
6150         int nitems = name ? 2 : 1;
6151         unsigned long ptr;
6152         int ret;
6153
6154         path = btrfs_alloc_path();
6155         if (!path)
6156                 return ERR_PTR(-ENOMEM);
6157
6158         inode = new_inode(root->fs_info->sb);
6159         if (!inode) {
6160                 btrfs_free_path(path);
6161                 return ERR_PTR(-ENOMEM);
6162         }
6163
6164         /*
6165          * O_TMPFILE, set link count to 0, so that after this point,
6166          * we fill in an inode item with the correct link count.
6167          */
6168         if (!name)
6169                 set_nlink(inode, 0);
6170
6171         /*
6172          * we have to initialize this early, so we can reclaim the inode
6173          * number if we fail afterwards in this function.
6174          */
6175         inode->i_ino = objectid;
6176
6177         if (dir && name) {
6178                 trace_btrfs_inode_request(dir);
6179
6180                 ret = btrfs_set_inode_index(dir, index);
6181                 if (ret) {
6182                         btrfs_free_path(path);
6183                         iput(inode);
6184                         return ERR_PTR(ret);
6185                 }
6186         } else if (dir) {
6187                 *index = 0;
6188         }
6189         /*
6190          * index_cnt is ignored for everything but a dir,
6191          * btrfs_get_inode_index_count has an explanation for the magic
6192          * number
6193          */
6194         BTRFS_I(inode)->index_cnt = 2;
6195         BTRFS_I(inode)->dir_index = *index;
6196         BTRFS_I(inode)->root = root;
6197         BTRFS_I(inode)->generation = trans->transid;
6198         inode->i_generation = BTRFS_I(inode)->generation;
6199
6200         /*
6201          * We could have gotten an inode number from somebody who was fsynced
6202          * and then removed in this same transaction, so let's just set full
6203          * sync since it will be a full sync anyway and this will blow away the
6204          * old info in the log.
6205          */
6206         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
6207
6208         key[0].objectid = objectid;
6209         key[0].type = BTRFS_INODE_ITEM_KEY;
6210         key[0].offset = 0;
6211
6212         sizes[0] = sizeof(struct btrfs_inode_item);
6213
6214         if (name) {
6215                 /*
6216                  * Start new inodes with an inode_ref. This is slightly more
6217                  * efficient for small numbers of hard links since they will
6218                  * be packed into one item. Extended refs will kick in if we
6219                  * add more hard links than can fit in the ref item.
6220                  */
6221                 key[1].objectid = objectid;
6222                 key[1].type = BTRFS_INODE_REF_KEY;
6223                 key[1].offset = ref_objectid;
6224
6225                 sizes[1] = name_len + sizeof(*ref);
6226         }
6227
6228         location = &BTRFS_I(inode)->location;
6229         location->objectid = objectid;
6230         location->offset = 0;
6231         location->type = BTRFS_INODE_ITEM_KEY;
6232
6233         ret = btrfs_insert_inode_locked(inode);
6234         if (ret < 0)
6235                 goto fail;
6236
6237         path->leave_spinning = 1;
6238         ret = btrfs_insert_empty_items(trans, root, path, key, sizes, nitems);
6239         if (ret != 0)
6240                 goto fail_unlock;
6241
6242         inode_init_owner(inode, dir, mode);
6243         inode_set_bytes(inode, 0);
6244
6245         inode->i_mtime = CURRENT_TIME;
6246         inode->i_atime = inode->i_mtime;
6247         inode->i_ctime = inode->i_mtime;
6248         BTRFS_I(inode)->i_otime = inode->i_mtime;
6249
6250         inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
6251                                   struct btrfs_inode_item);
6252         memset_extent_buffer(path->nodes[0], 0, (unsigned long)inode_item,
6253                              sizeof(*inode_item));
6254         fill_inode_item(trans, path->nodes[0], inode_item, inode);
6255
6256         if (name) {
6257                 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
6258                                      struct btrfs_inode_ref);
6259                 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
6260                 btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
6261                 ptr = (unsigned long)(ref + 1);
6262                 write_extent_buffer(path->nodes[0], name, ptr, name_len);
6263         }
6264
6265         btrfs_mark_buffer_dirty(path->nodes[0]);
6266         btrfs_free_path(path);
6267
6268         btrfs_inherit_iflags(inode, dir);
6269
6270         if (S_ISREG(mode)) {
6271                 if (btrfs_test_opt(root, NODATASUM))
6272                         BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
6273                 if (btrfs_test_opt(root, NODATACOW))
6274                         BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
6275                                 BTRFS_INODE_NODATASUM;
6276         }
6277
6278         inode_tree_add(inode);
6279
6280         trace_btrfs_inode_new(inode);
6281         btrfs_set_inode_last_trans(trans, inode);
6282
6283         btrfs_update_root_times(trans, root);
6284
6285         ret = btrfs_inode_inherit_props(trans, inode, dir);
6286         if (ret)
6287                 btrfs_err(root->fs_info,
6288                           "error inheriting props for ino %llu (root %llu): %d",
6289                           btrfs_ino(inode), root->root_key.objectid, ret);
6290
6291         return inode;
6292
6293 fail_unlock:
6294         unlock_new_inode(inode);
6295 fail:
6296         if (dir && name)
6297                 BTRFS_I(dir)->index_cnt--;
6298         btrfs_free_path(path);
6299         iput(inode);
6300         return ERR_PTR(ret);
6301 }
6302
6303 static inline u8 btrfs_inode_type(struct inode *inode)
6304 {
6305         return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
6306 }
6307
6308 /*
6309  * utility function to add 'inode' into 'parent_inode' with
6310  * a give name and a given sequence number.
6311  * if 'add_backref' is true, also insert a backref from the
6312  * inode to the parent directory.
6313  */
6314 int btrfs_add_link(struct btrfs_trans_handle *trans,
6315                    struct inode *parent_inode, struct inode *inode,
6316                    const char *name, int name_len, int add_backref, u64 index)
6317 {
6318         int ret = 0;
6319         struct btrfs_key key;
6320         struct btrfs_root *root = BTRFS_I(parent_inode)->root;
6321         u64 ino = btrfs_ino(inode);
6322         u64 parent_ino = btrfs_ino(parent_inode);
6323
6324         if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6325                 memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
6326         } else {
6327                 key.objectid = ino;
6328                 key.type = BTRFS_INODE_ITEM_KEY;
6329                 key.offset = 0;
6330         }
6331
6332         if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6333                 ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
6334                                          key.objectid, root->root_key.objectid,
6335                                          parent_ino, index, name, name_len);
6336         } else if (add_backref) {
6337                 ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
6338                                              parent_ino, index);
6339         }
6340
6341         /* Nothing to clean up yet */
6342         if (ret)
6343                 return ret;
6344
6345         ret = btrfs_insert_dir_item(trans, root, name, name_len,
6346                                     parent_inode, &key,
6347                                     btrfs_inode_type(inode), index);
6348         if (ret == -EEXIST || ret == -EOVERFLOW)
6349                 goto fail_dir_item;
6350         else if (ret) {
6351                 btrfs_abort_transaction(trans, root, ret);
6352                 return ret;
6353         }
6354
6355         btrfs_i_size_write(parent_inode, parent_inode->i_size +
6356                            name_len * 2);
6357         inode_inc_iversion(parent_inode);
6358         parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
6359         ret = btrfs_update_inode(trans, root, parent_inode);
6360         if (ret)
6361                 btrfs_abort_transaction(trans, root, ret);
6362         return ret;
6363
6364 fail_dir_item:
6365         if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6366                 u64 local_index;
6367                 int err;
6368                 err = btrfs_del_root_ref(trans, root->fs_info->tree_root,
6369                                  key.objectid, root->root_key.objectid,
6370                                  parent_ino, &local_index, name, name_len);
6371
6372         } else if (add_backref) {
6373                 u64 local_index;
6374                 int err;
6375
6376                 err = btrfs_del_inode_ref(trans, root, name, name_len,
6377                                           ino, parent_ino, &local_index);
6378         }
6379         return ret;
6380 }
6381
6382 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
6383                             struct inode *dir, struct dentry *dentry,
6384                             struct inode *inode, int backref, u64 index)
6385 {
6386         int err = btrfs_add_link(trans, dir, inode,
6387                                  dentry->d_name.name, dentry->d_name.len,
6388                                  backref, index);
6389         if (err > 0)
6390                 err = -EEXIST;
6391         return err;
6392 }
6393
6394 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
6395                         umode_t mode, dev_t rdev)
6396 {
6397         struct btrfs_trans_handle *trans;
6398         struct btrfs_root *root = BTRFS_I(dir)->root;
6399         struct inode *inode = NULL;
6400         int err;
6401         int drop_inode = 0;
6402         u64 objectid;
6403         u64 index = 0;
6404
6405         /*
6406          * 2 for inode item and ref
6407          * 2 for dir items
6408          * 1 for xattr if selinux is on
6409          */
6410         trans = btrfs_start_transaction(root, 5);
6411         if (IS_ERR(trans))
6412                 return PTR_ERR(trans);
6413
6414         err = btrfs_find_free_ino(root, &objectid);
6415         if (err)
6416                 goto out_unlock;
6417
6418         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6419                                 dentry->d_name.len, btrfs_ino(dir), objectid,
6420                                 mode, &index);
6421         if (IS_ERR(inode)) {
6422                 err = PTR_ERR(inode);
6423                 goto out_unlock;
6424         }
6425
6426         /*
6427         * If the active LSM wants to access the inode during
6428         * d_instantiate it needs these. Smack checks to see
6429         * if the filesystem supports xattrs by looking at the
6430         * ops vector.
6431         */
6432         inode->i_op = &btrfs_special_inode_operations;
6433         init_special_inode(inode, inode->i_mode, rdev);
6434
6435         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6436         if (err)
6437                 goto out_unlock_inode;
6438
6439         err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
6440         if (err) {
6441                 goto out_unlock_inode;
6442         } else {
6443                 btrfs_update_inode(trans, root, inode);
6444                 d_instantiate_new(dentry, inode);
6445         }
6446
6447 out_unlock:
6448         btrfs_end_transaction(trans, root);
6449         btrfs_balance_delayed_items(root);
6450         btrfs_btree_balance_dirty(root);
6451         if (drop_inode) {
6452                 inode_dec_link_count(inode);
6453                 iput(inode);
6454         }
6455         return err;
6456
6457 out_unlock_inode:
6458         drop_inode = 1;
6459         unlock_new_inode(inode);
6460         goto out_unlock;
6461
6462 }
6463
6464 static int btrfs_create(struct inode *dir, struct dentry *dentry,
6465                         umode_t mode, bool excl)
6466 {
6467         struct btrfs_trans_handle *trans;
6468         struct btrfs_root *root = BTRFS_I(dir)->root;
6469         struct inode *inode = NULL;
6470         int drop_inode_on_err = 0;
6471         int err;
6472         u64 objectid;
6473         u64 index = 0;
6474
6475         /*
6476          * 2 for inode item and ref
6477          * 2 for dir items
6478          * 1 for xattr if selinux is on
6479          */
6480         trans = btrfs_start_transaction(root, 5);
6481         if (IS_ERR(trans))
6482                 return PTR_ERR(trans);
6483
6484         err = btrfs_find_free_ino(root, &objectid);
6485         if (err)
6486                 goto out_unlock;
6487
6488         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6489                                 dentry->d_name.len, btrfs_ino(dir), objectid,
6490                                 mode, &index);
6491         if (IS_ERR(inode)) {
6492                 err = PTR_ERR(inode);
6493                 goto out_unlock;
6494         }
6495         drop_inode_on_err = 1;
6496         /*
6497         * If the active LSM wants to access the inode during
6498         * d_instantiate it needs these. Smack checks to see
6499         * if the filesystem supports xattrs by looking at the
6500         * ops vector.
6501         */
6502         inode->i_fop = &btrfs_file_operations;
6503         inode->i_op = &btrfs_file_inode_operations;
6504         inode->i_mapping->a_ops = &btrfs_aops;
6505
6506         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6507         if (err)
6508                 goto out_unlock_inode;
6509
6510         err = btrfs_update_inode(trans, root, inode);
6511         if (err)
6512                 goto out_unlock_inode;
6513
6514         err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
6515         if (err)
6516                 goto out_unlock_inode;
6517
6518         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
6519         d_instantiate_new(dentry, inode);
6520
6521 out_unlock:
6522         btrfs_end_transaction(trans, root);
6523         if (err && drop_inode_on_err) {
6524                 inode_dec_link_count(inode);
6525                 iput(inode);
6526         }
6527         btrfs_balance_delayed_items(root);
6528         btrfs_btree_balance_dirty(root);
6529         return err;
6530
6531 out_unlock_inode:
6532         unlock_new_inode(inode);
6533         goto out_unlock;
6534
6535 }
6536
6537 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
6538                       struct dentry *dentry)
6539 {
6540         struct btrfs_trans_handle *trans = NULL;
6541         struct btrfs_root *root = BTRFS_I(dir)->root;
6542         struct inode *inode = d_inode(old_dentry);
6543         u64 index;
6544         int err;
6545         int drop_inode = 0;
6546
6547         /* do not allow sys_link's with other subvols of the same device */
6548         if (root->objectid != BTRFS_I(inode)->root->objectid)
6549                 return -EXDEV;
6550
6551         if (inode->i_nlink >= BTRFS_LINK_MAX)
6552                 return -EMLINK;
6553
6554         err = btrfs_set_inode_index(dir, &index);
6555         if (err)
6556                 goto fail;
6557
6558         /*
6559          * 2 items for inode and inode ref
6560          * 2 items for dir items
6561          * 1 item for parent inode
6562          */
6563         trans = btrfs_start_transaction(root, 5);
6564         if (IS_ERR(trans)) {
6565                 err = PTR_ERR(trans);
6566                 trans = NULL;
6567                 goto fail;
6568         }
6569
6570         /* There are several dir indexes for this inode, clear the cache. */
6571         BTRFS_I(inode)->dir_index = 0ULL;
6572         inc_nlink(inode);
6573         inode_inc_iversion(inode);
6574         inode->i_ctime = CURRENT_TIME;
6575         ihold(inode);
6576         set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
6577
6578         err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
6579
6580         if (err) {
6581                 drop_inode = 1;
6582         } else {
6583                 struct dentry *parent = dentry->d_parent;
6584                 err = btrfs_update_inode(trans, root, inode);
6585                 if (err)
6586                         goto fail;
6587                 if (inode->i_nlink == 1) {
6588                         /*
6589                          * If new hard link count is 1, it's a file created
6590                          * with open(2) O_TMPFILE flag.
6591                          */
6592                         err = btrfs_orphan_del(trans, inode);
6593                         if (err)
6594                                 goto fail;
6595                 }
6596                 d_instantiate(dentry, inode);
6597                 btrfs_log_new_name(trans, inode, NULL, parent);
6598         }
6599
6600         btrfs_balance_delayed_items(root);
6601 fail:
6602         if (trans)
6603                 btrfs_end_transaction(trans, root);
6604         if (drop_inode) {
6605                 inode_dec_link_count(inode);
6606                 iput(inode);
6607         }
6608         btrfs_btree_balance_dirty(root);
6609         return err;
6610 }
6611
6612 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
6613 {
6614         struct inode *inode = NULL;
6615         struct btrfs_trans_handle *trans;
6616         struct btrfs_root *root = BTRFS_I(dir)->root;
6617         int err = 0;
6618         int drop_on_err = 0;
6619         u64 objectid = 0;
6620         u64 index = 0;
6621
6622         /*
6623          * 2 items for inode and ref
6624          * 2 items for dir items
6625          * 1 for xattr if selinux is on
6626          */
6627         trans = btrfs_start_transaction(root, 5);
6628         if (IS_ERR(trans))
6629                 return PTR_ERR(trans);
6630
6631         err = btrfs_find_free_ino(root, &objectid);
6632         if (err)
6633                 goto out_fail;
6634
6635         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6636                                 dentry->d_name.len, btrfs_ino(dir), objectid,
6637                                 S_IFDIR | mode, &index);
6638         if (IS_ERR(inode)) {
6639                 err = PTR_ERR(inode);
6640                 goto out_fail;
6641         }
6642
6643         drop_on_err = 1;
6644         /* these must be set before we unlock the inode */
6645         inode->i_op = &btrfs_dir_inode_operations;
6646         inode->i_fop = &btrfs_dir_file_operations;
6647
6648         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6649         if (err)
6650                 goto out_fail_inode;
6651
6652         btrfs_i_size_write(inode, 0);
6653         err = btrfs_update_inode(trans, root, inode);
6654         if (err)
6655                 goto out_fail_inode;
6656
6657         err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
6658                              dentry->d_name.len, 0, index);
6659         if (err)
6660                 goto out_fail_inode;
6661
6662         d_instantiate_new(dentry, inode);
6663         drop_on_err = 0;
6664
6665 out_fail:
6666         btrfs_end_transaction(trans, root);
6667         if (drop_on_err) {
6668                 inode_dec_link_count(inode);
6669                 iput(inode);
6670         }
6671         btrfs_balance_delayed_items(root);
6672         btrfs_btree_balance_dirty(root);
6673         return err;
6674
6675 out_fail_inode:
6676         unlock_new_inode(inode);
6677         goto out_fail;
6678 }
6679
6680 /* Find next extent map of a given extent map, caller needs to ensure locks */
6681 static struct extent_map *next_extent_map(struct extent_map *em)
6682 {
6683         struct rb_node *next;
6684
6685         next = rb_next(&em->rb_node);
6686         if (!next)
6687                 return NULL;
6688         return container_of(next, struct extent_map, rb_node);
6689 }
6690
6691 static struct extent_map *prev_extent_map(struct extent_map *em)
6692 {
6693         struct rb_node *prev;
6694
6695         prev = rb_prev(&em->rb_node);
6696         if (!prev)
6697                 return NULL;
6698         return container_of(prev, struct extent_map, rb_node);
6699 }
6700
6701 /* helper for btfs_get_extent.  Given an existing extent in the tree,
6702  * the existing extent is the nearest extent to map_start,
6703  * and an extent that you want to insert, deal with overlap and insert
6704  * the best fitted new extent into the tree.
6705  */
6706 static int merge_extent_mapping(struct extent_map_tree *em_tree,
6707                                 struct extent_map *existing,
6708                                 struct extent_map *em,
6709                                 u64 map_start)
6710 {
6711         struct extent_map *prev;
6712         struct extent_map *next;
6713         u64 start;
6714         u64 end;
6715         u64 start_diff;
6716
6717         BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
6718
6719         if (existing->start > map_start) {
6720                 next = existing;
6721                 prev = prev_extent_map(next);
6722         } else {
6723                 prev = existing;
6724                 next = next_extent_map(prev);
6725         }
6726
6727         start = prev ? extent_map_end(prev) : em->start;
6728         start = max_t(u64, start, em->start);
6729         end = next ? next->start : extent_map_end(em);
6730         end = min_t(u64, end, extent_map_end(em));
6731         start_diff = start - em->start;
6732         em->start = start;
6733         em->len = end - start;
6734         if (em->block_start < EXTENT_MAP_LAST_BYTE &&
6735             !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
6736                 em->block_start += start_diff;
6737                 em->block_len -= start_diff;
6738         }
6739         return add_extent_mapping(em_tree, em, 0);
6740 }
6741
6742 static noinline int uncompress_inline(struct btrfs_path *path,
6743                                       struct inode *inode, struct page *page,
6744                                       size_t pg_offset, u64 extent_offset,
6745                                       struct btrfs_file_extent_item *item)
6746 {
6747         int ret;
6748         struct extent_buffer *leaf = path->nodes[0];
6749         char *tmp;
6750         size_t max_size;
6751         unsigned long inline_size;
6752         unsigned long ptr;
6753         int compress_type;
6754
6755         WARN_ON(pg_offset != 0);
6756         compress_type = btrfs_file_extent_compression(leaf, item);
6757         max_size = btrfs_file_extent_ram_bytes(leaf, item);
6758         inline_size = btrfs_file_extent_inline_item_len(leaf,
6759                                         btrfs_item_nr(path->slots[0]));
6760         tmp = kmalloc(inline_size, GFP_NOFS);
6761         if (!tmp)
6762                 return -ENOMEM;
6763         ptr = btrfs_file_extent_inline_start(item);
6764
6765         read_extent_buffer(leaf, tmp, ptr, inline_size);
6766
6767         max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
6768         ret = btrfs_decompress(compress_type, tmp, page,
6769                                extent_offset, inline_size, max_size);
6770
6771         /*
6772          * decompression code contains a memset to fill in any space between the end
6773          * of the uncompressed data and the end of max_size in case the decompressed
6774          * data ends up shorter than ram_bytes.  That doesn't cover the hole between
6775          * the end of an inline extent and the beginning of the next block, so we
6776          * cover that region here.
6777          */
6778
6779         if (max_size + pg_offset < PAGE_SIZE) {
6780                 char *map = kmap(page);
6781                 memset(map + pg_offset + max_size, 0, PAGE_SIZE - max_size - pg_offset);
6782                 kunmap(page);
6783         }
6784         kfree(tmp);
6785         return ret;
6786 }
6787
6788 /*
6789  * a bit scary, this does extent mapping from logical file offset to the disk.
6790  * the ugly parts come from merging extents from the disk with the in-ram
6791  * representation.  This gets more complex because of the data=ordered code,
6792  * where the in-ram extents might be locked pending data=ordered completion.
6793  *
6794  * This also copies inline extents directly into the page.
6795  */
6796
6797 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
6798                                     size_t pg_offset, u64 start, u64 len,
6799                                     int create)
6800 {
6801         int ret;
6802         int err = 0;
6803         u64 extent_start = 0;
6804         u64 extent_end = 0;
6805         u64 objectid = btrfs_ino(inode);
6806         u32 found_type;
6807         struct btrfs_path *path = NULL;
6808         struct btrfs_root *root = BTRFS_I(inode)->root;
6809         struct btrfs_file_extent_item *item;
6810         struct extent_buffer *leaf;
6811         struct btrfs_key found_key;
6812         struct extent_map *em = NULL;
6813         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
6814         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
6815         struct btrfs_trans_handle *trans = NULL;
6816         const bool new_inline = !page || create;
6817
6818 again:
6819         read_lock(&em_tree->lock);
6820         em = lookup_extent_mapping(em_tree, start, len);
6821         if (em)
6822                 em->bdev = root->fs_info->fs_devices->latest_bdev;
6823         read_unlock(&em_tree->lock);
6824
6825         if (em) {
6826                 if (em->start > start || em->start + em->len <= start)
6827                         free_extent_map(em);
6828                 else if (em->block_start == EXTENT_MAP_INLINE && page)
6829                         free_extent_map(em);
6830                 else
6831                         goto out;
6832         }
6833         em = alloc_extent_map();
6834         if (!em) {
6835                 err = -ENOMEM;
6836                 goto out;
6837         }
6838         em->bdev = root->fs_info->fs_devices->latest_bdev;
6839         em->start = EXTENT_MAP_HOLE;
6840         em->orig_start = EXTENT_MAP_HOLE;
6841         em->len = (u64)-1;
6842         em->block_len = (u64)-1;
6843
6844         if (!path) {
6845                 path = btrfs_alloc_path();
6846                 if (!path) {
6847                         err = -ENOMEM;
6848                         goto out;
6849                 }
6850                 /*
6851                  * Chances are we'll be called again, so go ahead and do
6852                  * readahead
6853                  */
6854                 path->reada = 1;
6855         }
6856
6857         ret = btrfs_lookup_file_extent(trans, root, path,
6858                                        objectid, start, trans != NULL);
6859         if (ret < 0) {
6860                 err = ret;
6861                 goto out;
6862         }
6863
6864         if (ret != 0) {
6865                 if (path->slots[0] == 0)
6866                         goto not_found;
6867                 path->slots[0]--;
6868         }
6869
6870         leaf = path->nodes[0];
6871         item = btrfs_item_ptr(leaf, path->slots[0],
6872                               struct btrfs_file_extent_item);
6873         /* are we inside the extent that was found? */
6874         btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6875         found_type = found_key.type;
6876         if (found_key.objectid != objectid ||
6877             found_type != BTRFS_EXTENT_DATA_KEY) {
6878                 /*
6879                  * If we backup past the first extent we want to move forward
6880                  * and see if there is an extent in front of us, otherwise we'll
6881                  * say there is a hole for our whole search range which can
6882                  * cause problems.
6883                  */
6884                 extent_end = start;
6885                 goto next;
6886         }
6887
6888         found_type = btrfs_file_extent_type(leaf, item);
6889         extent_start = found_key.offset;
6890         if (found_type == BTRFS_FILE_EXTENT_REG ||
6891             found_type == BTRFS_FILE_EXTENT_PREALLOC) {
6892                 extent_end = extent_start +
6893                        btrfs_file_extent_num_bytes(leaf, item);
6894         } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
6895                 size_t size;
6896                 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
6897                 extent_end = ALIGN(extent_start + size, root->sectorsize);
6898         }
6899 next:
6900         if (start >= extent_end) {
6901                 path->slots[0]++;
6902                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
6903                         ret = btrfs_next_leaf(root, path);
6904                         if (ret < 0) {
6905                                 err = ret;
6906                                 goto out;
6907                         }
6908                         if (ret > 0)
6909                                 goto not_found;
6910                         leaf = path->nodes[0];
6911                 }
6912                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6913                 if (found_key.objectid != objectid ||
6914                     found_key.type != BTRFS_EXTENT_DATA_KEY)
6915                         goto not_found;
6916                 if (start + len <= found_key.offset)
6917                         goto not_found;
6918                 if (start > found_key.offset)
6919                         goto next;
6920                 em->start = start;
6921                 em->orig_start = start;
6922                 em->len = found_key.offset - start;
6923                 goto not_found_em;
6924         }
6925
6926         btrfs_extent_item_to_extent_map(inode, path, item, new_inline, em);
6927
6928         if (found_type == BTRFS_FILE_EXTENT_REG ||
6929             found_type == BTRFS_FILE_EXTENT_PREALLOC) {
6930                 goto insert;
6931         } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
6932                 unsigned long ptr;
6933                 char *map;
6934                 size_t size;
6935                 size_t extent_offset;
6936                 size_t copy_size;
6937
6938                 if (new_inline)
6939                         goto out;
6940
6941                 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
6942                 extent_offset = page_offset(page) + pg_offset - extent_start;
6943                 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
6944                                 size - extent_offset);
6945                 em->start = extent_start + extent_offset;
6946                 em->len = ALIGN(copy_size, root->sectorsize);
6947                 em->orig_block_len = em->len;
6948                 em->orig_start = em->start;
6949                 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
6950                 if (create == 0 && !PageUptodate(page)) {
6951                         if (btrfs_file_extent_compression(leaf, item) !=
6952                             BTRFS_COMPRESS_NONE) {
6953                                 ret = uncompress_inline(path, inode, page,
6954                                                         pg_offset,
6955                                                         extent_offset, item);
6956                                 if (ret) {
6957                                         err = ret;
6958                                         goto out;
6959                                 }
6960                         } else {
6961                                 map = kmap(page);
6962                                 read_extent_buffer(leaf, map + pg_offset, ptr,
6963                                                    copy_size);
6964                                 if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
6965                                         memset(map + pg_offset + copy_size, 0,
6966                                                PAGE_CACHE_SIZE - pg_offset -
6967                                                copy_size);
6968                                 }
6969                                 kunmap(page);
6970                         }
6971                         flush_dcache_page(page);
6972                 } else if (create && PageUptodate(page)) {
6973                         BUG();
6974                         if (!trans) {
6975                                 kunmap(page);
6976                                 free_extent_map(em);
6977                                 em = NULL;
6978
6979                                 btrfs_release_path(path);
6980                                 trans = btrfs_join_transaction(root);
6981
6982                                 if (IS_ERR(trans))
6983                                         return ERR_CAST(trans);
6984                                 goto again;
6985                         }
6986                         map = kmap(page);
6987                         write_extent_buffer(leaf, map + pg_offset, ptr,
6988                                             copy_size);
6989                         kunmap(page);
6990                         btrfs_mark_buffer_dirty(leaf);
6991                 }
6992                 set_extent_uptodate(io_tree, em->start,
6993                                     extent_map_end(em) - 1, NULL, GFP_NOFS);
6994                 goto insert;
6995         }
6996 not_found:
6997         em->start = start;
6998         em->orig_start = start;
6999         em->len = len;
7000 not_found_em:
7001         em->block_start = EXTENT_MAP_HOLE;
7002         set_bit(EXTENT_FLAG_VACANCY, &em->flags);
7003 insert:
7004         btrfs_release_path(path);
7005         if (em->start > start || extent_map_end(em) <= start) {
7006                 btrfs_err(root->fs_info, "bad extent! em: [%llu %llu] passed [%llu %llu]",
7007                         em->start, em->len, start, len);
7008                 err = -EIO;
7009                 goto out;
7010         }
7011
7012         err = 0;
7013         write_lock(&em_tree->lock);
7014         ret = add_extent_mapping(em_tree, em, 0);
7015         /* it is possible that someone inserted the extent into the tree
7016          * while we had the lock dropped.  It is also possible that
7017          * an overlapping map exists in the tree
7018          */
7019         if (ret == -EEXIST) {
7020                 struct extent_map *existing;
7021
7022                 ret = 0;
7023
7024                 existing = search_extent_mapping(em_tree, start, len);
7025                 /*
7026                  * existing will always be non-NULL, since there must be
7027                  * extent causing the -EEXIST.
7028                  */
7029                 if (start >= extent_map_end(existing) ||
7030                     start <= existing->start) {
7031                         /*
7032                          * The existing extent map is the one nearest to
7033                          * the [start, start + len) range which overlaps
7034                          */
7035                         err = merge_extent_mapping(em_tree, existing,
7036                                                    em, start);
7037                         free_extent_map(existing);
7038                         if (err) {
7039                                 free_extent_map(em);
7040                                 em = NULL;
7041                         }
7042                 } else {
7043                         free_extent_map(em);
7044                         em = existing;
7045                         err = 0;
7046                 }
7047         }
7048         write_unlock(&em_tree->lock);
7049 out:
7050
7051         trace_btrfs_get_extent(root, em);
7052
7053         btrfs_free_path(path);
7054         if (trans) {
7055                 ret = btrfs_end_transaction(trans, root);
7056                 if (!err)
7057                         err = ret;
7058         }
7059         if (err) {
7060                 free_extent_map(em);
7061                 return ERR_PTR(err);
7062         }
7063         BUG_ON(!em); /* Error is always set */
7064         return em;
7065 }
7066
7067 struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
7068                                            size_t pg_offset, u64 start, u64 len,
7069                                            int create)
7070 {
7071         struct extent_map *em;
7072         struct extent_map *hole_em = NULL;
7073         u64 range_start = start;
7074         u64 end;
7075         u64 found;
7076         u64 found_end;
7077         int err = 0;
7078
7079         em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
7080         if (IS_ERR(em))
7081                 return em;
7082         if (em) {
7083                 /*
7084                  * if our em maps to
7085                  * -  a hole or
7086                  * -  a pre-alloc extent,
7087                  * there might actually be delalloc bytes behind it.
7088                  */
7089                 if (em->block_start != EXTENT_MAP_HOLE &&
7090                     !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7091                         return em;
7092                 else
7093                         hole_em = em;
7094         }
7095
7096         /* check to see if we've wrapped (len == -1 or similar) */
7097         end = start + len;
7098         if (end < start)
7099                 end = (u64)-1;
7100         else
7101                 end -= 1;
7102
7103         em = NULL;
7104
7105         /* ok, we didn't find anything, lets look for delalloc */
7106         found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
7107                                  end, len, EXTENT_DELALLOC, 1);
7108         found_end = range_start + found;
7109         if (found_end < range_start)
7110                 found_end = (u64)-1;
7111
7112         /*
7113          * we didn't find anything useful, return
7114          * the original results from get_extent()
7115          */
7116         if (range_start > end || found_end <= start) {
7117                 em = hole_em;
7118                 hole_em = NULL;
7119                 goto out;
7120         }
7121
7122         /* adjust the range_start to make sure it doesn't
7123          * go backwards from the start they passed in
7124          */
7125         range_start = max(start, range_start);
7126         found = found_end - range_start;
7127
7128         if (found > 0) {
7129                 u64 hole_start = start;
7130                 u64 hole_len = len;
7131
7132                 em = alloc_extent_map();
7133                 if (!em) {
7134                         err = -ENOMEM;
7135                         goto out;
7136                 }
7137                 /*
7138                  * when btrfs_get_extent can't find anything it
7139                  * returns one huge hole
7140                  *
7141                  * make sure what it found really fits our range, and
7142                  * adjust to make sure it is based on the start from
7143                  * the caller
7144                  */
7145                 if (hole_em) {
7146                         u64 calc_end = extent_map_end(hole_em);
7147
7148                         if (calc_end <= start || (hole_em->start > end)) {
7149                                 free_extent_map(hole_em);
7150                                 hole_em = NULL;
7151                         } else {
7152                                 hole_start = max(hole_em->start, start);
7153                                 hole_len = calc_end - hole_start;
7154                         }
7155                 }
7156                 em->bdev = NULL;
7157                 if (hole_em && range_start > hole_start) {
7158                         /* our hole starts before our delalloc, so we
7159                          * have to return just the parts of the hole
7160                          * that go until  the delalloc starts
7161                          */
7162                         em->len = min(hole_len,
7163                                       range_start - hole_start);
7164                         em->start = hole_start;
7165                         em->orig_start = hole_start;
7166                         /*
7167                          * don't adjust block start at all,
7168                          * it is fixed at EXTENT_MAP_HOLE
7169                          */
7170                         em->block_start = hole_em->block_start;
7171                         em->block_len = hole_len;
7172                         if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags))
7173                                 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
7174                 } else {
7175                         em->start = range_start;
7176                         em->len = found;
7177                         em->orig_start = range_start;
7178                         em->block_start = EXTENT_MAP_DELALLOC;
7179                         em->block_len = found;
7180                 }
7181         } else if (hole_em) {
7182                 return hole_em;
7183         }
7184 out:
7185
7186         free_extent_map(hole_em);
7187         if (err) {
7188                 free_extent_map(em);
7189                 return ERR_PTR(err);
7190         }
7191         return em;
7192 }
7193
7194 static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
7195                                                   u64 start, u64 len)
7196 {
7197         struct btrfs_root *root = BTRFS_I(inode)->root;
7198         struct extent_map *em;
7199         struct btrfs_key ins;
7200         u64 alloc_hint;
7201         int ret;
7202
7203         alloc_hint = get_extent_allocation_hint(inode, start, len);
7204         ret = btrfs_reserve_extent(root, len, root->sectorsize, 0,
7205                                    alloc_hint, &ins, 1, 1);
7206         if (ret)
7207                 return ERR_PTR(ret);
7208
7209         em = create_pinned_em(inode, start, ins.offset, start, ins.objectid,
7210                               ins.offset, ins.offset, ins.offset, 0);
7211         if (IS_ERR(em)) {
7212                 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
7213                 return em;
7214         }
7215
7216         ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
7217                                            ins.offset, ins.offset, 0);
7218         if (ret) {
7219                 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
7220                 free_extent_map(em);
7221                 return ERR_PTR(ret);
7222         }
7223
7224         return em;
7225 }
7226
7227 /*
7228  * returns 1 when the nocow is safe, < 1 on error, 0 if the
7229  * block must be cow'd
7230  */
7231 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
7232                               u64 *orig_start, u64 *orig_block_len,
7233                               u64 *ram_bytes)
7234 {
7235         struct btrfs_trans_handle *trans;
7236         struct btrfs_path *path;
7237         int ret;
7238         struct extent_buffer *leaf;
7239         struct btrfs_root *root = BTRFS_I(inode)->root;
7240         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7241         struct btrfs_file_extent_item *fi;
7242         struct btrfs_key key;
7243         u64 disk_bytenr;
7244         u64 backref_offset;
7245         u64 extent_end;
7246         u64 num_bytes;
7247         int slot;
7248         int found_type;
7249         bool nocow = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW);
7250
7251         path = btrfs_alloc_path();
7252         if (!path)
7253                 return -ENOMEM;
7254
7255         ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode),
7256                                        offset, 0);
7257         if (ret < 0)
7258                 goto out;
7259
7260         slot = path->slots[0];
7261         if (ret == 1) {
7262                 if (slot == 0) {
7263                         /* can't find the item, must cow */
7264                         ret = 0;
7265                         goto out;
7266                 }
7267                 slot--;
7268         }
7269         ret = 0;
7270         leaf = path->nodes[0];
7271         btrfs_item_key_to_cpu(leaf, &key, slot);
7272         if (key.objectid != btrfs_ino(inode) ||
7273             key.type != BTRFS_EXTENT_DATA_KEY) {
7274                 /* not our file or wrong item type, must cow */
7275                 goto out;
7276         }
7277
7278         if (key.offset > offset) {
7279                 /* Wrong offset, must cow */
7280                 goto out;
7281         }
7282
7283         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
7284         found_type = btrfs_file_extent_type(leaf, fi);
7285         if (found_type != BTRFS_FILE_EXTENT_REG &&
7286             found_type != BTRFS_FILE_EXTENT_PREALLOC) {
7287                 /* not a regular extent, must cow */
7288                 goto out;
7289         }
7290
7291         if (!nocow && found_type == BTRFS_FILE_EXTENT_REG)
7292                 goto out;
7293
7294         extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
7295         if (extent_end <= offset)
7296                 goto out;
7297
7298         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
7299         if (disk_bytenr == 0)
7300                 goto out;
7301
7302         if (btrfs_file_extent_compression(leaf, fi) ||
7303             btrfs_file_extent_encryption(leaf, fi) ||
7304             btrfs_file_extent_other_encoding(leaf, fi))
7305                 goto out;
7306
7307         backref_offset = btrfs_file_extent_offset(leaf, fi);
7308
7309         if (orig_start) {
7310                 *orig_start = key.offset - backref_offset;
7311                 *orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
7312                 *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
7313         }
7314
7315         if (btrfs_extent_readonly(root, disk_bytenr))
7316                 goto out;
7317
7318         num_bytes = min(offset + *len, extent_end) - offset;
7319         if (!nocow && found_type == BTRFS_FILE_EXTENT_PREALLOC) {
7320                 u64 range_end;
7321
7322                 range_end = round_up(offset + num_bytes, root->sectorsize) - 1;
7323                 ret = test_range_bit(io_tree, offset, range_end,
7324                                      EXTENT_DELALLOC, 0, NULL);
7325                 if (ret) {
7326                         ret = -EAGAIN;
7327                         goto out;
7328                 }
7329         }
7330
7331         btrfs_release_path(path);
7332
7333         /*
7334          * look for other files referencing this extent, if we
7335          * find any we must cow
7336          */
7337         trans = btrfs_join_transaction(root);
7338         if (IS_ERR(trans)) {
7339                 ret = 0;
7340                 goto out;
7341         }
7342
7343         ret = btrfs_cross_ref_exist(trans, root, btrfs_ino(inode),
7344                                     key.offset - backref_offset, disk_bytenr);
7345         btrfs_end_transaction(trans, root);
7346         if (ret) {
7347                 ret = 0;
7348                 goto out;
7349         }
7350
7351         /*
7352          * adjust disk_bytenr and num_bytes to cover just the bytes
7353          * in this extent we are about to write.  If there
7354          * are any csums in that range we have to cow in order
7355          * to keep the csums correct
7356          */
7357         disk_bytenr += backref_offset;
7358         disk_bytenr += offset - key.offset;
7359         if (csum_exist_in_range(root, disk_bytenr, num_bytes))
7360                                 goto out;
7361         /*
7362          * all of the above have passed, it is safe to overwrite this extent
7363          * without cow
7364          */
7365         *len = num_bytes;
7366         ret = 1;
7367 out:
7368         btrfs_free_path(path);
7369         return ret;
7370 }
7371
7372 bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
7373 {
7374         struct radix_tree_root *root = &inode->i_mapping->page_tree;
7375         int found = false;
7376         void **pagep = NULL;
7377         struct page *page = NULL;
7378         unsigned long start_idx;
7379         unsigned long end_idx;
7380
7381         start_idx = start >> PAGE_CACHE_SHIFT;
7382
7383         /*
7384          * end is the last byte in the last page.  end == start is legal
7385          */
7386         end_idx = end >> PAGE_CACHE_SHIFT;
7387
7388         rcu_read_lock();
7389
7390         /* Most of the code in this while loop is lifted from
7391          * find_get_page.  It's been modified to begin searching from a
7392          * page and return just the first page found in that range.  If the
7393          * found idx is less than or equal to the end idx then we know that
7394          * a page exists.  If no pages are found or if those pages are
7395          * outside of the range then we're fine (yay!) */
7396         while (page == NULL &&
7397                radix_tree_gang_lookup_slot(root, &pagep, NULL, start_idx, 1)) {
7398                 page = radix_tree_deref_slot(pagep);
7399                 if (unlikely(!page))
7400                         break;
7401
7402                 if (radix_tree_exception(page)) {
7403                         if (radix_tree_deref_retry(page)) {
7404                                 page = NULL;
7405                                 continue;
7406                         }
7407                         /*
7408                          * Otherwise, shmem/tmpfs must be storing a swap entry
7409                          * here as an exceptional entry: so return it without
7410                          * attempting to raise page count.
7411                          */
7412                         page = NULL;
7413                         break; /* TODO: Is this relevant for this use case? */
7414                 }
7415
7416                 if (!page_cache_get_speculative(page)) {
7417                         page = NULL;
7418                         continue;
7419                 }
7420
7421                 /*
7422                  * Has the page moved?
7423                  * This is part of the lockless pagecache protocol. See
7424                  * include/linux/pagemap.h for details.
7425                  */
7426                 if (unlikely(page != *pagep)) {
7427                         page_cache_release(page);
7428                         page = NULL;
7429                 }
7430         }
7431
7432         if (page) {
7433                 if (page->index <= end_idx)
7434                         found = true;
7435                 page_cache_release(page);
7436         }
7437
7438         rcu_read_unlock();
7439         return found;
7440 }
7441
7442 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
7443                               struct extent_state **cached_state, int writing)
7444 {
7445         struct btrfs_ordered_extent *ordered;
7446         int ret = 0;
7447
7448         while (1) {
7449                 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7450                                  0, cached_state);
7451                 /*
7452                  * We're concerned with the entire range that we're going to be
7453                  * doing DIO to, so we need to make sure theres no ordered
7454                  * extents in this range.
7455                  */
7456                 ordered = btrfs_lookup_ordered_range(inode, lockstart,
7457                                                      lockend - lockstart + 1);
7458
7459                 /*
7460                  * We need to make sure there are no buffered pages in this
7461                  * range either, we could have raced between the invalidate in
7462                  * generic_file_direct_write and locking the extent.  The
7463                  * invalidate needs to happen so that reads after a write do not
7464                  * get stale data.
7465                  */
7466                 if (!ordered &&
7467                     (!writing ||
7468                      !btrfs_page_exists_in_range(inode, lockstart, lockend)))
7469                         break;
7470
7471                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7472                                      cached_state, GFP_NOFS);
7473
7474                 if (ordered) {
7475                         btrfs_start_ordered_extent(inode, ordered, 1);
7476                         btrfs_put_ordered_extent(ordered);
7477                 } else {
7478                         /* Screw you mmap */
7479                         ret = btrfs_fdatawrite_range(inode, lockstart, lockend);
7480                         if (ret)
7481                                 break;
7482                         ret = filemap_fdatawait_range(inode->i_mapping,
7483                                                       lockstart,
7484                                                       lockend);
7485                         if (ret)
7486                                 break;
7487
7488                         /*
7489                          * If we found a page that couldn't be invalidated just
7490                          * fall back to buffered.
7491                          */
7492                         ret = invalidate_inode_pages2_range(inode->i_mapping,
7493                                         lockstart >> PAGE_CACHE_SHIFT,
7494                                         lockend >> PAGE_CACHE_SHIFT);
7495                         if (ret)
7496                                 break;
7497                 }
7498
7499                 cond_resched();
7500         }
7501
7502         return ret;
7503 }
7504
7505 static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
7506                                            u64 len, u64 orig_start,
7507                                            u64 block_start, u64 block_len,
7508                                            u64 orig_block_len, u64 ram_bytes,
7509                                            int type)
7510 {
7511         struct extent_map_tree *em_tree;
7512         struct extent_map *em;
7513         struct btrfs_root *root = BTRFS_I(inode)->root;
7514         int ret;
7515
7516         em_tree = &BTRFS_I(inode)->extent_tree;
7517         em = alloc_extent_map();
7518         if (!em)
7519                 return ERR_PTR(-ENOMEM);
7520
7521         em->start = start;
7522         em->orig_start = orig_start;
7523         em->mod_start = start;
7524         em->mod_len = len;
7525         em->len = len;
7526         em->block_len = block_len;
7527         em->block_start = block_start;
7528         em->bdev = root->fs_info->fs_devices->latest_bdev;
7529         em->orig_block_len = orig_block_len;
7530         em->ram_bytes = ram_bytes;
7531         em->generation = -1;
7532         set_bit(EXTENT_FLAG_PINNED, &em->flags);
7533         if (type == BTRFS_ORDERED_PREALLOC)
7534                 set_bit(EXTENT_FLAG_FILLING, &em->flags);
7535
7536         do {
7537                 btrfs_drop_extent_cache(inode, em->start,
7538                                 em->start + em->len - 1, 0);
7539                 write_lock(&em_tree->lock);
7540                 ret = add_extent_mapping(em_tree, em, 1);
7541                 write_unlock(&em_tree->lock);
7542         } while (ret == -EEXIST);
7543
7544         if (ret) {
7545                 free_extent_map(em);
7546                 return ERR_PTR(ret);
7547         }
7548
7549         return em;
7550 }
7551
7552 struct btrfs_dio_data {
7553         u64 outstanding_extents;
7554         u64 reserve;
7555 };
7556
7557 static void adjust_dio_outstanding_extents(struct inode *inode,
7558                                            struct btrfs_dio_data *dio_data,
7559                                            const u64 len)
7560 {
7561         unsigned num_extents;
7562
7563         num_extents = (unsigned) div64_u64(len + BTRFS_MAX_EXTENT_SIZE - 1,
7564                                            BTRFS_MAX_EXTENT_SIZE);
7565         /*
7566          * If we have an outstanding_extents count still set then we're
7567          * within our reservation, otherwise we need to adjust our inode
7568          * counter appropriately.
7569          */
7570         if (dio_data->outstanding_extents >= num_extents) {
7571                 dio_data->outstanding_extents -= num_extents;
7572         } else {
7573                 /*
7574                  * If dio write length has been split due to no large enough
7575                  * contiguous space, we need to compensate our inode counter
7576                  * appropriately.
7577                  */
7578                 u64 num_needed = num_extents - dio_data->outstanding_extents;
7579
7580                 spin_lock(&BTRFS_I(inode)->lock);
7581                 BTRFS_I(inode)->outstanding_extents += num_needed;
7582                 spin_unlock(&BTRFS_I(inode)->lock);
7583         }
7584 }
7585
7586 static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
7587                                    struct buffer_head *bh_result, int create)
7588 {
7589         struct extent_map *em;
7590         struct btrfs_root *root = BTRFS_I(inode)->root;
7591         struct extent_state *cached_state = NULL;
7592         struct btrfs_dio_data *dio_data = NULL;
7593         u64 start = iblock << inode->i_blkbits;
7594         u64 lockstart, lockend;
7595         u64 len = bh_result->b_size;
7596         int unlock_bits = EXTENT_LOCKED;
7597         int ret = 0;
7598
7599         if (create)
7600                 unlock_bits |= EXTENT_DIRTY;
7601         else
7602                 len = min_t(u64, len, root->sectorsize);
7603
7604         lockstart = start;
7605         lockend = start + len - 1;
7606
7607         if (current->journal_info) {
7608                 /*
7609                  * Need to pull our outstanding extents and set journal_info to NULL so
7610                  * that anything that needs to check if there's a transction doesn't get
7611                  * confused.
7612                  */
7613                 dio_data = current->journal_info;
7614                 current->journal_info = NULL;
7615         }
7616
7617         /*
7618          * If this errors out it's because we couldn't invalidate pagecache for
7619          * this range and we need to fallback to buffered.
7620          */
7621         if (lock_extent_direct(inode, lockstart, lockend, &cached_state,
7622                                create)) {
7623                 ret = -ENOTBLK;
7624                 goto err;
7625         }
7626
7627         em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
7628         if (IS_ERR(em)) {
7629                 ret = PTR_ERR(em);
7630                 goto unlock_err;
7631         }
7632
7633         /*
7634          * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
7635          * io.  INLINE is special, and we could probably kludge it in here, but
7636          * it's still buffered so for safety lets just fall back to the generic
7637          * buffered path.
7638          *
7639          * For COMPRESSED we _have_ to read the entire extent in so we can
7640          * decompress it, so there will be buffering required no matter what we
7641          * do, so go ahead and fallback to buffered.
7642          *
7643          * We return -ENOTBLK because thats what makes DIO go ahead and go back
7644          * to buffered IO.  Don't blame me, this is the price we pay for using
7645          * the generic code.
7646          */
7647         if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
7648             em->block_start == EXTENT_MAP_INLINE) {
7649                 free_extent_map(em);
7650                 ret = -ENOTBLK;
7651                 goto unlock_err;
7652         }
7653
7654         /* Just a good old fashioned hole, return */
7655         if (!create && (em->block_start == EXTENT_MAP_HOLE ||
7656                         test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
7657                 free_extent_map(em);
7658                 goto unlock_err;
7659         }
7660
7661         /*
7662          * We don't allocate a new extent in the following cases
7663          *
7664          * 1) The inode is marked as NODATACOW.  In this case we'll just use the
7665          * existing extent.
7666          * 2) The extent is marked as PREALLOC.  We're good to go here and can
7667          * just use the extent.
7668          *
7669          */
7670         if (!create) {
7671                 len = min(len, em->len - (start - em->start));
7672                 lockstart = start + len;
7673                 goto unlock;
7674         }
7675
7676         if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
7677             ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
7678              em->block_start != EXTENT_MAP_HOLE)) {
7679                 int type;
7680                 u64 block_start, orig_start, orig_block_len, ram_bytes;
7681
7682                 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7683                         type = BTRFS_ORDERED_PREALLOC;
7684                 else
7685                         type = BTRFS_ORDERED_NOCOW;
7686                 len = min(len, em->len - (start - em->start));
7687                 block_start = em->block_start + (start - em->start);
7688
7689                 if (can_nocow_extent(inode, start, &len, &orig_start,
7690                                      &orig_block_len, &ram_bytes) == 1) {
7691                         if (type == BTRFS_ORDERED_PREALLOC) {
7692                                 free_extent_map(em);
7693                                 em = create_pinned_em(inode, start, len,
7694                                                        orig_start,
7695                                                        block_start, len,
7696                                                        orig_block_len,
7697                                                        ram_bytes, type);
7698                                 if (IS_ERR(em)) {
7699                                         ret = PTR_ERR(em);
7700                                         goto unlock_err;
7701                                 }
7702                         }
7703
7704                         ret = btrfs_add_ordered_extent_dio(inode, start,
7705                                            block_start, len, len, type);
7706                         if (ret) {
7707                                 free_extent_map(em);
7708                                 goto unlock_err;
7709                         }
7710                         goto unlock;
7711                 }
7712         }
7713
7714         /*
7715          * this will cow the extent, reset the len in case we changed
7716          * it above
7717          */
7718         len = bh_result->b_size;
7719         free_extent_map(em);
7720         em = btrfs_new_extent_direct(inode, start, len);
7721         if (IS_ERR(em)) {
7722                 ret = PTR_ERR(em);
7723                 goto unlock_err;
7724         }
7725         len = min(len, em->len - (start - em->start));
7726 unlock:
7727         bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
7728                 inode->i_blkbits;
7729         bh_result->b_size = len;
7730         bh_result->b_bdev = em->bdev;
7731         set_buffer_mapped(bh_result);
7732         if (create) {
7733                 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7734                         set_buffer_new(bh_result);
7735
7736                 /*
7737                  * Need to update the i_size under the extent lock so buffered
7738                  * readers will get the updated i_size when we unlock.
7739                  */
7740                 if (start + len > i_size_read(inode))
7741                         i_size_write(inode, start + len);
7742
7743                 adjust_dio_outstanding_extents(inode, dio_data, len);
7744                 btrfs_free_reserved_data_space(inode, start, len);
7745                 WARN_ON(dio_data->reserve < len);
7746                 dio_data->reserve -= len;
7747                 current->journal_info = dio_data;
7748         }
7749
7750         /*
7751          * In the case of write we need to clear and unlock the entire range,
7752          * in the case of read we need to unlock only the end area that we
7753          * aren't using if there is any left over space.
7754          */
7755         if (lockstart < lockend) {
7756                 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
7757                                  lockend, unlock_bits, 1, 0,
7758                                  &cached_state, GFP_NOFS);
7759         } else {
7760                 free_extent_state(cached_state);
7761         }
7762
7763         free_extent_map(em);
7764
7765         return 0;
7766
7767 unlock_err:
7768         clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7769                          unlock_bits, 1, 0, &cached_state, GFP_NOFS);
7770 err:
7771         if (dio_data)
7772                 current->journal_info = dio_data;
7773         /*
7774          * Compensate the delalloc release we do in btrfs_direct_IO() when we
7775          * write less data then expected, so that we don't underflow our inode's
7776          * outstanding extents counter.
7777          */
7778         if (create && dio_data)
7779                 adjust_dio_outstanding_extents(inode, dio_data, len);
7780
7781         return ret;
7782 }
7783
7784 static inline int submit_dio_repair_bio(struct inode *inode, struct bio *bio,
7785                                         int rw, int mirror_num)
7786 {
7787         struct btrfs_root *root = BTRFS_I(inode)->root;
7788         int ret;
7789
7790         BUG_ON(rw & REQ_WRITE);
7791
7792         bio_get(bio);
7793
7794         ret = btrfs_bio_wq_end_io(root->fs_info, bio,
7795                                   BTRFS_WQ_ENDIO_DIO_REPAIR);
7796         if (ret)
7797                 goto err;
7798
7799         ret = btrfs_map_bio(root, rw, bio, mirror_num, 0);
7800 err:
7801         bio_put(bio);
7802         return ret;
7803 }
7804
7805 static int btrfs_check_dio_repairable(struct inode *inode,
7806                                       struct bio *failed_bio,
7807                                       struct io_failure_record *failrec,
7808                                       int failed_mirror)
7809 {
7810         int num_copies;
7811
7812         num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info,
7813                                       failrec->logical, failrec->len);
7814         if (num_copies == 1) {
7815                 /*
7816                  * we only have a single copy of the data, so don't bother with
7817                  * all the retry and error correction code that follows. no
7818                  * matter what the error is, it is very likely to persist.
7819                  */
7820                 pr_debug("Check DIO Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n",
7821                          num_copies, failrec->this_mirror, failed_mirror);
7822                 return 0;
7823         }
7824
7825         failrec->failed_mirror = failed_mirror;
7826         failrec->this_mirror++;
7827         if (failrec->this_mirror == failed_mirror)
7828                 failrec->this_mirror++;
7829
7830         if (failrec->this_mirror > num_copies) {
7831                 pr_debug("Check DIO Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n",
7832                          num_copies, failrec->this_mirror, failed_mirror);
7833                 return 0;
7834         }
7835
7836         return 1;
7837 }
7838
7839 static int dio_read_error(struct inode *inode, struct bio *failed_bio,
7840                           struct page *page, u64 start, u64 end,
7841                           int failed_mirror, bio_end_io_t *repair_endio,
7842                           void *repair_arg)
7843 {
7844         struct io_failure_record *failrec;
7845         struct bio *bio;
7846         int isector;
7847         int read_mode;
7848         int ret;
7849
7850         BUG_ON(failed_bio->bi_rw & REQ_WRITE);
7851
7852         ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
7853         if (ret)
7854                 return ret;
7855
7856         ret = btrfs_check_dio_repairable(inode, failed_bio, failrec,
7857                                          failed_mirror);
7858         if (!ret) {
7859                 free_io_failure(inode, failrec);
7860                 return -EIO;
7861         }
7862
7863         if (failed_bio->bi_vcnt > 1)
7864                 read_mode = READ_SYNC | REQ_FAILFAST_DEV;
7865         else
7866                 read_mode = READ_SYNC;
7867
7868         isector = start - btrfs_io_bio(failed_bio)->logical;
7869         isector >>= inode->i_sb->s_blocksize_bits;
7870         bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
7871                                       0, isector, repair_endio, repair_arg);
7872         if (!bio) {
7873                 free_io_failure(inode, failrec);
7874                 return -EIO;
7875         }
7876
7877         btrfs_debug(BTRFS_I(inode)->root->fs_info,
7878                     "Repair DIO Read Error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d\n",
7879                     read_mode, failrec->this_mirror, failrec->in_validation);
7880
7881         ret = submit_dio_repair_bio(inode, bio, read_mode,
7882                                     failrec->this_mirror);
7883         if (ret) {
7884                 free_io_failure(inode, failrec);
7885                 bio_put(bio);
7886         }
7887
7888         return ret;
7889 }
7890
7891 struct btrfs_retry_complete {
7892         struct completion done;
7893         struct inode *inode;
7894         u64 start;
7895         int uptodate;
7896 };
7897
7898 static void btrfs_retry_endio_nocsum(struct bio *bio)
7899 {
7900         struct btrfs_retry_complete *done = bio->bi_private;
7901         struct bio_vec *bvec;
7902         int i;
7903
7904         if (bio->bi_error)
7905                 goto end;
7906
7907         done->uptodate = 1;
7908         bio_for_each_segment_all(bvec, bio, i)
7909                 clean_io_failure(done->inode, done->start, bvec->bv_page, 0);
7910 end:
7911         complete(&done->done);
7912         bio_put(bio);
7913 }
7914
7915 static int __btrfs_correct_data_nocsum(struct inode *inode,
7916                                        struct btrfs_io_bio *io_bio)
7917 {
7918         struct bio_vec *bvec;
7919         struct btrfs_retry_complete done;
7920         u64 start;
7921         int i;
7922         int ret;
7923
7924         start = io_bio->logical;
7925         done.inode = inode;
7926
7927         bio_for_each_segment_all(bvec, &io_bio->bio, i) {
7928 try_again:
7929                 done.uptodate = 0;
7930                 done.start = start;
7931                 init_completion(&done.done);
7932
7933                 ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, start,
7934                                      start + bvec->bv_len - 1,
7935                                      io_bio->mirror_num,
7936                                      btrfs_retry_endio_nocsum, &done);
7937                 if (ret)
7938                         return ret;
7939
7940                 wait_for_completion(&done.done);
7941
7942                 if (!done.uptodate) {
7943                         /* We might have another mirror, so try again */
7944                         goto try_again;
7945                 }
7946
7947                 start += bvec->bv_len;
7948         }
7949
7950         return 0;
7951 }
7952
7953 static void btrfs_retry_endio(struct bio *bio)
7954 {
7955         struct btrfs_retry_complete *done = bio->bi_private;
7956         struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
7957         struct bio_vec *bvec;
7958         int uptodate;
7959         int ret;
7960         int i;
7961
7962         if (bio->bi_error)
7963                 goto end;
7964
7965         uptodate = 1;
7966         bio_for_each_segment_all(bvec, bio, i) {
7967                 ret = __readpage_endio_check(done->inode, io_bio, i,
7968                                              bvec->bv_page, 0,
7969                                              done->start, bvec->bv_len);
7970                 if (!ret)
7971                         clean_io_failure(done->inode, done->start,
7972                                          bvec->bv_page, 0);
7973                 else
7974                         uptodate = 0;
7975         }
7976
7977         done->uptodate = uptodate;
7978 end:
7979         complete(&done->done);
7980         bio_put(bio);
7981 }
7982
7983 static int __btrfs_subio_endio_read(struct inode *inode,
7984                                     struct btrfs_io_bio *io_bio, int err)
7985 {
7986         struct bio_vec *bvec;
7987         struct btrfs_retry_complete done;
7988         u64 start;
7989         u64 offset = 0;
7990         int i;
7991         int ret;
7992
7993         err = 0;
7994         start = io_bio->logical;
7995         done.inode = inode;
7996
7997         bio_for_each_segment_all(bvec, &io_bio->bio, i) {
7998                 ret = __readpage_endio_check(inode, io_bio, i, bvec->bv_page,
7999                                              0, start, bvec->bv_len);
8000                 if (likely(!ret))
8001                         goto next;
8002 try_again:
8003                 done.uptodate = 0;
8004                 done.start = start;
8005                 init_completion(&done.done);
8006
8007                 ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, start,
8008                                      start + bvec->bv_len - 1,
8009                                      io_bio->mirror_num,
8010                                      btrfs_retry_endio, &done);
8011                 if (ret) {
8012                         err = ret;
8013                         goto next;
8014                 }
8015
8016                 wait_for_completion(&done.done);
8017
8018                 if (!done.uptodate) {
8019                         /* We might have another mirror, so try again */
8020                         goto try_again;
8021                 }
8022 next:
8023                 offset += bvec->bv_len;
8024                 start += bvec->bv_len;
8025         }
8026
8027         return err;
8028 }
8029
8030 static int btrfs_subio_endio_read(struct inode *inode,
8031                                   struct btrfs_io_bio *io_bio, int err)
8032 {
8033         bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
8034
8035         if (skip_csum) {
8036                 if (unlikely(err))
8037                         return __btrfs_correct_data_nocsum(inode, io_bio);
8038                 else
8039                         return 0;
8040         } else {
8041                 return __btrfs_subio_endio_read(inode, io_bio, err);
8042         }
8043 }
8044
8045 static void btrfs_endio_direct_read(struct bio *bio)
8046 {
8047         struct btrfs_dio_private *dip = bio->bi_private;
8048         struct inode *inode = dip->inode;
8049         struct bio *dio_bio;
8050         struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
8051         int err = bio->bi_error;
8052
8053         if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED)
8054                 err = btrfs_subio_endio_read(inode, io_bio, err);
8055
8056         unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
8057                       dip->logical_offset + dip->bytes - 1);
8058         dio_bio = dip->dio_bio;
8059
8060         kfree(dip);
8061
8062         dio_bio->bi_error = bio->bi_error;
8063         dio_end_io(dio_bio, bio->bi_error);
8064
8065         if (io_bio->end_io)
8066                 io_bio->end_io(io_bio, err);
8067         bio_put(bio);
8068 }
8069
8070 static void btrfs_endio_direct_write(struct bio *bio)
8071 {
8072         struct btrfs_dio_private *dip = bio->bi_private;
8073         struct inode *inode = dip->inode;
8074         struct btrfs_root *root = BTRFS_I(inode)->root;
8075         struct btrfs_ordered_extent *ordered = NULL;
8076         u64 ordered_offset = dip->logical_offset;
8077         u64 ordered_bytes = dip->bytes;
8078         struct bio *dio_bio;
8079         int ret;
8080
8081 again:
8082         ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
8083                                                    &ordered_offset,
8084                                                    ordered_bytes,
8085                                                    !bio->bi_error);
8086         if (!ret)
8087                 goto out_test;
8088
8089         btrfs_init_work(&ordered->work, btrfs_endio_write_helper,
8090                         finish_ordered_fn, NULL, NULL);
8091         btrfs_queue_work(root->fs_info->endio_write_workers,
8092                          &ordered->work);
8093 out_test:
8094         /*
8095          * our bio might span multiple ordered extents.  If we haven't
8096          * completed the accounting for the whole dio, go back and try again
8097          */
8098         if (ordered_offset < dip->logical_offset + dip->bytes) {
8099                 ordered_bytes = dip->logical_offset + dip->bytes -
8100                         ordered_offset;
8101                 ordered = NULL;
8102                 goto again;
8103         }
8104         dio_bio = dip->dio_bio;
8105
8106         kfree(dip);
8107
8108         dio_bio->bi_error = bio->bi_error;
8109         dio_end_io(dio_bio, bio->bi_error);
8110         bio_put(bio);
8111 }
8112
8113 static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
8114                                     struct bio *bio, int mirror_num,
8115                                     unsigned long bio_flags, u64 offset)
8116 {
8117         int ret;
8118         struct btrfs_root *root = BTRFS_I(inode)->root;
8119         ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
8120         BUG_ON(ret); /* -ENOMEM */
8121         return 0;
8122 }
8123
8124 static void btrfs_end_dio_bio(struct bio *bio)
8125 {
8126         struct btrfs_dio_private *dip = bio->bi_private;
8127         int err = bio->bi_error;
8128
8129         if (err)
8130                 btrfs_warn(BTRFS_I(dip->inode)->root->fs_info,
8131                            "direct IO failed ino %llu rw %lu sector %#Lx len %u err no %d",
8132                            btrfs_ino(dip->inode), bio->bi_rw,
8133                            (unsigned long long)bio->bi_iter.bi_sector,
8134                            bio->bi_iter.bi_size, err);
8135
8136         if (dip->subio_endio)
8137                 err = dip->subio_endio(dip->inode, btrfs_io_bio(bio), err);
8138
8139         if (err) {
8140                 dip->errors = 1;
8141
8142                 /*
8143                  * before atomic variable goto zero, we must make sure
8144                  * dip->errors is perceived to be set.
8145                  */
8146                 smp_mb__before_atomic();
8147         }
8148
8149         /* if there are more bios still pending for this dio, just exit */
8150         if (!atomic_dec_and_test(&dip->pending_bios))
8151                 goto out;
8152
8153         if (dip->errors) {
8154                 bio_io_error(dip->orig_bio);
8155         } else {
8156                 dip->dio_bio->bi_error = 0;
8157                 bio_endio(dip->orig_bio);
8158         }
8159 out:
8160         bio_put(bio);
8161 }
8162
8163 static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
8164                                        u64 first_sector, gfp_t gfp_flags)
8165 {
8166         struct bio *bio;
8167         bio = btrfs_bio_alloc(bdev, first_sector, BIO_MAX_PAGES, gfp_flags);
8168         if (bio)
8169                 bio_associate_current(bio);
8170         return bio;
8171 }
8172
8173 static inline int btrfs_lookup_and_bind_dio_csum(struct btrfs_root *root,
8174                                                  struct inode *inode,
8175                                                  struct btrfs_dio_private *dip,
8176                                                  struct bio *bio,
8177                                                  u64 file_offset)
8178 {
8179         struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
8180         struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio);
8181         int ret;
8182
8183         /*
8184          * We load all the csum data we need when we submit
8185          * the first bio to reduce the csum tree search and
8186          * contention.
8187          */
8188         if (dip->logical_offset == file_offset) {
8189                 ret = btrfs_lookup_bio_sums_dio(root, inode, dip->orig_bio,
8190                                                 file_offset);
8191                 if (ret)
8192                         return ret;
8193         }
8194
8195         if (bio == dip->orig_bio)
8196                 return 0;
8197
8198         file_offset -= dip->logical_offset;
8199         file_offset >>= inode->i_sb->s_blocksize_bits;
8200         io_bio->csum = (u8 *)(((u32 *)orig_io_bio->csum) + file_offset);
8201
8202         return 0;
8203 }
8204
8205 static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
8206                                          int rw, u64 file_offset, int skip_sum,
8207                                          int async_submit)
8208 {
8209         struct btrfs_dio_private *dip = bio->bi_private;
8210         int write = rw & REQ_WRITE;
8211         struct btrfs_root *root = BTRFS_I(inode)->root;
8212         int ret;
8213
8214         if (async_submit)
8215                 async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers);
8216
8217         bio_get(bio);
8218
8219         if (!write) {
8220                 ret = btrfs_bio_wq_end_io(root->fs_info, bio,
8221                                 BTRFS_WQ_ENDIO_DATA);
8222                 if (ret)
8223                         goto err;
8224         }
8225
8226         if (skip_sum)
8227                 goto map;
8228
8229         if (write && async_submit) {
8230                 ret = btrfs_wq_submit_bio(root->fs_info,
8231                                    inode, rw, bio, 0, 0,
8232                                    file_offset,
8233                                    __btrfs_submit_bio_start_direct_io,
8234                                    __btrfs_submit_bio_done);
8235                 goto err;
8236         } else if (write) {
8237                 /*
8238                  * If we aren't doing async submit, calculate the csum of the
8239                  * bio now.
8240                  */
8241                 ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
8242                 if (ret)
8243                         goto err;
8244         } else {
8245                 ret = btrfs_lookup_and_bind_dio_csum(root, inode, dip, bio,
8246                                                      file_offset);
8247                 if (ret)
8248                         goto err;
8249         }
8250 map:
8251         ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
8252 err:
8253         bio_put(bio);
8254         return ret;
8255 }
8256
8257 static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
8258                                     int skip_sum)
8259 {
8260         struct inode *inode = dip->inode;
8261         struct btrfs_root *root = BTRFS_I(inode)->root;
8262         struct bio *bio;
8263         struct bio *orig_bio = dip->orig_bio;
8264         struct bio_vec *bvec = orig_bio->bi_io_vec;
8265         u64 start_sector = orig_bio->bi_iter.bi_sector;
8266         u64 file_offset = dip->logical_offset;
8267         u64 submit_len = 0;
8268         u64 map_length;
8269         int nr_pages = 0;
8270         int ret;
8271         int async_submit = 0;
8272
8273         map_length = orig_bio->bi_iter.bi_size;
8274         ret = btrfs_map_block(root->fs_info, rw, start_sector << 9,
8275                               &map_length, NULL, 0);
8276         if (ret)
8277                 return -EIO;
8278
8279         if (map_length >= orig_bio->bi_iter.bi_size) {
8280                 bio = orig_bio;
8281                 dip->flags |= BTRFS_DIO_ORIG_BIO_SUBMITTED;
8282                 goto submit;
8283         }
8284
8285         /* async crcs make it difficult to collect full stripe writes. */
8286         if (btrfs_get_alloc_profile(root, 1) & BTRFS_BLOCK_GROUP_RAID56_MASK)
8287                 async_submit = 0;
8288         else
8289                 async_submit = 1;
8290
8291         bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
8292         if (!bio)
8293                 return -ENOMEM;
8294
8295         bio->bi_private = dip;
8296         bio->bi_end_io = btrfs_end_dio_bio;
8297         btrfs_io_bio(bio)->logical = file_offset;
8298         atomic_inc(&dip->pending_bios);
8299
8300         while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
8301                 if (map_length < submit_len + bvec->bv_len ||
8302                     bio_add_page(bio, bvec->bv_page, bvec->bv_len,
8303                                  bvec->bv_offset) < bvec->bv_len) {
8304                         /*
8305                          * inc the count before we submit the bio so
8306                          * we know the end IO handler won't happen before
8307                          * we inc the count. Otherwise, the dip might get freed
8308                          * before we're done setting it up
8309                          */
8310                         atomic_inc(&dip->pending_bios);
8311                         ret = __btrfs_submit_dio_bio(bio, inode, rw,
8312                                                      file_offset, skip_sum,
8313                                                      async_submit);
8314                         if (ret) {
8315                                 bio_put(bio);
8316                                 atomic_dec(&dip->pending_bios);
8317                                 goto out_err;
8318                         }
8319
8320                         start_sector += submit_len >> 9;
8321                         file_offset += submit_len;
8322
8323                         submit_len = 0;
8324                         nr_pages = 0;
8325
8326                         bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
8327                                                   start_sector, GFP_NOFS);
8328                         if (!bio)
8329                                 goto out_err;
8330                         bio->bi_private = dip;
8331                         bio->bi_end_io = btrfs_end_dio_bio;
8332                         btrfs_io_bio(bio)->logical = file_offset;
8333
8334                         map_length = orig_bio->bi_iter.bi_size;
8335                         ret = btrfs_map_block(root->fs_info, rw,
8336                                               start_sector << 9,
8337                                               &map_length, NULL, 0);
8338                         if (ret) {
8339                                 bio_put(bio);
8340                                 goto out_err;
8341                         }
8342                 } else {
8343                         submit_len += bvec->bv_len;
8344                         nr_pages++;
8345                         bvec++;
8346                 }
8347         }
8348
8349 submit:
8350         ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
8351                                      async_submit);
8352         if (!ret)
8353                 return 0;
8354
8355         bio_put(bio);
8356 out_err:
8357         dip->errors = 1;
8358         /*
8359          * before atomic variable goto zero, we must
8360          * make sure dip->errors is perceived to be set.
8361          */
8362         smp_mb__before_atomic();
8363         if (atomic_dec_and_test(&dip->pending_bios))
8364                 bio_io_error(dip->orig_bio);
8365
8366         /* bio_end_io() will handle error, so we needn't return it */
8367         return 0;
8368 }
8369
8370 static void btrfs_submit_direct(int rw, struct bio *dio_bio,
8371                                 struct inode *inode, loff_t file_offset)
8372 {
8373         struct btrfs_dio_private *dip = NULL;
8374         struct bio *io_bio = NULL;
8375         struct btrfs_io_bio *btrfs_bio;
8376         int skip_sum;
8377         int write = rw & REQ_WRITE;
8378         int ret = 0;
8379
8380         skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
8381
8382         io_bio = btrfs_bio_clone(dio_bio, GFP_NOFS);
8383         if (!io_bio) {
8384                 ret = -ENOMEM;
8385                 goto free_ordered;
8386         }
8387
8388         dip = kzalloc(sizeof(*dip), GFP_NOFS);
8389         if (!dip) {
8390                 ret = -ENOMEM;
8391                 goto free_ordered;
8392         }
8393
8394         dip->private = dio_bio->bi_private;
8395         dip->inode = inode;
8396         dip->logical_offset = file_offset;
8397         dip->bytes = dio_bio->bi_iter.bi_size;
8398         dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9;
8399         io_bio->bi_private = dip;
8400         dip->orig_bio = io_bio;
8401         dip->dio_bio = dio_bio;
8402         atomic_set(&dip->pending_bios, 0);
8403         btrfs_bio = btrfs_io_bio(io_bio);
8404         btrfs_bio->logical = file_offset;
8405
8406         if (write) {
8407                 io_bio->bi_end_io = btrfs_endio_direct_write;
8408         } else {
8409                 io_bio->bi_end_io = btrfs_endio_direct_read;
8410                 dip->subio_endio = btrfs_subio_endio_read;
8411         }
8412
8413         ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
8414         if (!ret)
8415                 return;
8416
8417         if (btrfs_bio->end_io)
8418                 btrfs_bio->end_io(btrfs_bio, ret);
8419
8420 free_ordered:
8421         /*
8422          * If we arrived here it means either we failed to submit the dip
8423          * or we either failed to clone the dio_bio or failed to allocate the
8424          * dip. If we cloned the dio_bio and allocated the dip, we can just
8425          * call bio_endio against our io_bio so that we get proper resource
8426          * cleanup if we fail to submit the dip, otherwise, we must do the
8427          * same as btrfs_endio_direct_[write|read] because we can't call these
8428          * callbacks - they require an allocated dip and a clone of dio_bio.
8429          */
8430         if (io_bio && dip) {
8431                 io_bio->bi_error = -EIO;
8432                 bio_endio(io_bio);
8433                 /*
8434                  * The end io callbacks free our dip, do the final put on io_bio
8435                  * and all the cleanup and final put for dio_bio (through
8436                  * dio_end_io()).
8437                  */
8438                 dip = NULL;
8439                 io_bio = NULL;
8440         } else {
8441                 if (write) {
8442                         struct btrfs_ordered_extent *ordered;
8443
8444                         ordered = btrfs_lookup_ordered_extent(inode,
8445                                                               file_offset);
8446                         set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
8447                         /*
8448                          * Decrements our ref on the ordered extent and removes
8449                          * the ordered extent from the inode's ordered tree,
8450                          * doing all the proper resource cleanup such as for the
8451                          * reserved space and waking up any waiters for this
8452                          * ordered extent (through btrfs_remove_ordered_extent).
8453                          */
8454                         btrfs_finish_ordered_io(ordered);
8455                 } else {
8456                         unlock_extent(&BTRFS_I(inode)->io_tree, file_offset,
8457                               file_offset + dio_bio->bi_iter.bi_size - 1);
8458                 }
8459                 dio_bio->bi_error = -EIO;
8460                 /*
8461                  * Releases and cleans up our dio_bio, no need to bio_put()
8462                  * nor bio_endio()/bio_io_error() against dio_bio.
8463                  */
8464                 dio_end_io(dio_bio, ret);
8465         }
8466         if (io_bio)
8467                 bio_put(io_bio);
8468         kfree(dip);
8469 }
8470
8471 static ssize_t check_direct_IO(struct btrfs_root *root, struct kiocb *iocb,
8472                         const struct iov_iter *iter, loff_t offset)
8473 {
8474         int seg;
8475         int i;
8476         unsigned blocksize_mask = root->sectorsize - 1;
8477         ssize_t retval = -EINVAL;
8478
8479         if (offset & blocksize_mask)
8480                 goto out;
8481
8482         if (iov_iter_alignment(iter) & blocksize_mask)
8483                 goto out;
8484
8485         /* If this is a write we don't need to check anymore */
8486         if (iov_iter_rw(iter) == WRITE)
8487                 return 0;
8488         /*
8489          * Check to make sure we don't have duplicate iov_base's in this
8490          * iovec, if so return EINVAL, otherwise we'll get csum errors
8491          * when reading back.
8492          */
8493         for (seg = 0; seg < iter->nr_segs; seg++) {
8494                 for (i = seg + 1; i < iter->nr_segs; i++) {
8495                         if (iter->iov[seg].iov_base == iter->iov[i].iov_base)
8496                                 goto out;
8497                 }
8498         }
8499         retval = 0;
8500 out:
8501         return retval;
8502 }
8503
8504 static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
8505                                loff_t offset)
8506 {
8507         struct file *file = iocb->ki_filp;
8508         struct inode *inode = file->f_mapping->host;
8509         struct btrfs_root *root = BTRFS_I(inode)->root;
8510         struct btrfs_dio_data dio_data = { 0 };
8511         size_t count = 0;
8512         int flags = 0;
8513         bool wakeup = true;
8514         bool relock = false;
8515         ssize_t ret;
8516
8517         if (check_direct_IO(BTRFS_I(inode)->root, iocb, iter, offset))
8518                 return 0;
8519
8520         inode_dio_begin(inode);
8521         smp_mb__after_atomic();
8522
8523         /*
8524          * The generic stuff only does filemap_write_and_wait_range, which
8525          * isn't enough if we've written compressed pages to this area, so
8526          * we need to flush the dirty pages again to make absolutely sure
8527          * that any outstanding dirty pages are on disk.
8528          */
8529         count = iov_iter_count(iter);
8530         if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
8531                      &BTRFS_I(inode)->runtime_flags))
8532                 filemap_fdatawrite_range(inode->i_mapping, offset,
8533                                          offset + count - 1);
8534
8535         if (iov_iter_rw(iter) == WRITE) {
8536                 /*
8537                  * If the write DIO is beyond the EOF, we need update
8538                  * the isize, but it is protected by i_mutex. So we can
8539                  * not unlock the i_mutex at this case.
8540                  */
8541                 if (offset + count <= inode->i_size) {
8542                         mutex_unlock(&inode->i_mutex);
8543                         relock = true;
8544                 }
8545                 ret = btrfs_delalloc_reserve_space(inode, offset, count);
8546                 if (ret)
8547                         goto out;
8548                 dio_data.outstanding_extents = div64_u64(count +
8549                                                 BTRFS_MAX_EXTENT_SIZE - 1,
8550                                                 BTRFS_MAX_EXTENT_SIZE);
8551
8552                 /*
8553                  * We need to know how many extents we reserved so that we can
8554                  * do the accounting properly if we go over the number we
8555                  * originally calculated.  Abuse current->journal_info for this.
8556                  */
8557                 dio_data.reserve = round_up(count, root->sectorsize);
8558                 current->journal_info = &dio_data;
8559         } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
8560                                      &BTRFS_I(inode)->runtime_flags)) {
8561                 inode_dio_end(inode);
8562                 flags = DIO_LOCKING | DIO_SKIP_HOLES;
8563                 wakeup = false;
8564         }
8565
8566         ret = __blockdev_direct_IO(iocb, inode,
8567                                    BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
8568                                    iter, offset, btrfs_get_blocks_direct, NULL,
8569                                    btrfs_submit_direct, flags);
8570         if (iov_iter_rw(iter) == WRITE) {
8571                 current->journal_info = NULL;
8572                 if (ret < 0 && ret != -EIOCBQUEUED) {
8573                         if (dio_data.reserve)
8574                                 btrfs_delalloc_release_space(inode, offset,
8575                                                              dio_data.reserve);
8576                 } else if (ret >= 0 && (size_t)ret < count)
8577                         btrfs_delalloc_release_space(inode, offset,
8578                                                      count - (size_t)ret);
8579         }
8580 out:
8581         if (wakeup)
8582                 inode_dio_end(inode);
8583         if (relock)
8584                 mutex_lock(&inode->i_mutex);
8585
8586         return ret;
8587 }
8588
8589 #define BTRFS_FIEMAP_FLAGS      (FIEMAP_FLAG_SYNC)
8590
8591 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
8592                 __u64 start, __u64 len)
8593 {
8594         int     ret;
8595
8596         ret = fiemap_check_flags(fieinfo, BTRFS_FIEMAP_FLAGS);
8597         if (ret)
8598                 return ret;
8599
8600         return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
8601 }
8602
8603 int btrfs_readpage(struct file *file, struct page *page)
8604 {
8605         struct extent_io_tree *tree;
8606         tree = &BTRFS_I(page->mapping->host)->io_tree;
8607         return extent_read_full_page(tree, page, btrfs_get_extent, 0);
8608 }
8609
8610 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
8611 {
8612         struct extent_io_tree *tree;
8613         struct inode *inode = page->mapping->host;
8614         int ret;
8615
8616         if (current->flags & PF_MEMALLOC) {
8617                 redirty_page_for_writepage(wbc, page);
8618                 unlock_page(page);
8619                 return 0;
8620         }
8621
8622         /*
8623          * If we are under memory pressure we will call this directly from the
8624          * VM, we need to make sure we have the inode referenced for the ordered
8625          * extent.  If not just return like we didn't do anything.
8626          */
8627         if (!igrab(inode)) {
8628                 redirty_page_for_writepage(wbc, page);
8629                 return AOP_WRITEPAGE_ACTIVATE;
8630         }
8631         tree = &BTRFS_I(page->mapping->host)->io_tree;
8632         ret = extent_write_full_page(tree, page, btrfs_get_extent, wbc);
8633         btrfs_add_delayed_iput(inode);
8634         return ret;
8635 }
8636
8637 static int btrfs_writepages(struct address_space *mapping,
8638                             struct writeback_control *wbc)
8639 {
8640         struct extent_io_tree *tree;
8641
8642         tree = &BTRFS_I(mapping->host)->io_tree;
8643         return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
8644 }
8645
8646 static int
8647 btrfs_readpages(struct file *file, struct address_space *mapping,
8648                 struct list_head *pages, unsigned nr_pages)
8649 {
8650         struct extent_io_tree *tree;
8651         tree = &BTRFS_I(mapping->host)->io_tree;
8652         return extent_readpages(tree, mapping, pages, nr_pages,
8653                                 btrfs_get_extent);
8654 }
8655 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
8656 {
8657         struct extent_io_tree *tree;
8658         struct extent_map_tree *map;
8659         int ret;
8660
8661         tree = &BTRFS_I(page->mapping->host)->io_tree;
8662         map = &BTRFS_I(page->mapping->host)->extent_tree;
8663         ret = try_release_extent_mapping(map, tree, page, gfp_flags);
8664         if (ret == 1) {
8665                 ClearPagePrivate(page);
8666                 set_page_private(page, 0);
8667                 page_cache_release(page);
8668         }
8669         return ret;
8670 }
8671
8672 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
8673 {
8674         if (PageWriteback(page) || PageDirty(page))
8675                 return 0;
8676         return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
8677 }
8678
8679 static void btrfs_invalidatepage(struct page *page, unsigned int offset,
8680                                  unsigned int length)
8681 {
8682         struct inode *inode = page->mapping->host;
8683         struct extent_io_tree *tree;
8684         struct btrfs_ordered_extent *ordered;
8685         struct extent_state *cached_state = NULL;
8686         u64 page_start = page_offset(page);
8687         u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
8688         int inode_evicting = inode->i_state & I_FREEING;
8689
8690         /*
8691          * we have the page locked, so new writeback can't start,
8692          * and the dirty bit won't be cleared while we are here.
8693          *
8694          * Wait for IO on this page so that we can safely clear
8695          * the PagePrivate2 bit and do ordered accounting
8696          */
8697         wait_on_page_writeback(page);
8698
8699         tree = &BTRFS_I(inode)->io_tree;
8700         if (offset) {
8701                 btrfs_releasepage(page, GFP_NOFS);
8702                 return;
8703         }
8704
8705         if (!inode_evicting)
8706                 lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
8707         ordered = btrfs_lookup_ordered_extent(inode, page_start);
8708         if (ordered) {
8709                 /*
8710                  * IO on this page will never be started, so we need
8711                  * to account for any ordered extents now
8712                  */
8713                 if (!inode_evicting)
8714                         clear_extent_bit(tree, page_start, page_end,
8715                                          EXTENT_DIRTY | EXTENT_DELALLOC |
8716                                          EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
8717                                          EXTENT_DEFRAG, 1, 0, &cached_state,
8718                                          GFP_NOFS);
8719                 /*
8720                  * whoever cleared the private bit is responsible
8721                  * for the finish_ordered_io
8722                  */
8723                 if (TestClearPagePrivate2(page)) {
8724                         struct btrfs_ordered_inode_tree *tree;
8725                         u64 new_len;
8726
8727                         tree = &BTRFS_I(inode)->ordered_tree;
8728
8729                         spin_lock_irq(&tree->lock);
8730                         set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
8731                         new_len = page_start - ordered->file_offset;
8732                         if (new_len < ordered->truncated_len)
8733                                 ordered->truncated_len = new_len;
8734                         spin_unlock_irq(&tree->lock);
8735
8736                         if (btrfs_dec_test_ordered_pending(inode, &ordered,
8737                                                            page_start,
8738                                                            PAGE_CACHE_SIZE, 1))
8739                                 btrfs_finish_ordered_io(ordered);
8740                 }
8741                 btrfs_put_ordered_extent(ordered);
8742                 if (!inode_evicting) {
8743                         cached_state = NULL;
8744                         lock_extent_bits(tree, page_start, page_end, 0,
8745                                          &cached_state);
8746                 }
8747         }
8748
8749         /*
8750          * Qgroup reserved space handler
8751          * Page here will be either
8752          * 1) Already written to disk
8753          *    In this case, its reserved space is released from data rsv map
8754          *    and will be freed by delayed_ref handler finally.
8755          *    So even we call qgroup_free_data(), it won't decrease reserved
8756          *    space.
8757          * 2) Not written to disk
8758          *    This means the reserved space should be freed here. However,
8759          *    if a truncate invalidates the page (by clearing PageDirty)
8760          *    and the page is accounted for while allocating extent
8761          *    in btrfs_check_data_free_space() we let delayed_ref to
8762          *    free the entire extent.
8763          */
8764         if (PageDirty(page))
8765                 btrfs_qgroup_free_data(inode, page_start, PAGE_SIZE);
8766         if (!inode_evicting) {
8767                 clear_extent_bit(tree, page_start, page_end,
8768                                  EXTENT_LOCKED | EXTENT_DIRTY |
8769                                  EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
8770                                  EXTENT_DEFRAG, 1, 1,
8771                                  &cached_state, GFP_NOFS);
8772
8773                 __btrfs_releasepage(page, GFP_NOFS);
8774         }
8775
8776         ClearPageChecked(page);
8777         if (PagePrivate(page)) {
8778                 ClearPagePrivate(page);
8779                 set_page_private(page, 0);
8780                 page_cache_release(page);
8781         }
8782 }
8783
8784 /*
8785  * btrfs_page_mkwrite() is not allowed to change the file size as it gets
8786  * called from a page fault handler when a page is first dirtied. Hence we must
8787  * be careful to check for EOF conditions here. We set the page up correctly
8788  * for a written page which means we get ENOSPC checking when writing into
8789  * holes and correct delalloc and unwritten extent mapping on filesystems that
8790  * support these features.
8791  *
8792  * We are not allowed to take the i_mutex here so we have to play games to
8793  * protect against truncate races as the page could now be beyond EOF.  Because
8794  * vmtruncate() writes the inode size before removing pages, once we have the
8795  * page lock we can determine safely if the page is beyond EOF. If it is not
8796  * beyond EOF, then the page is guaranteed safe against truncation until we
8797  * unlock the page.
8798  */
8799 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
8800 {
8801         struct page *page = vmf->page;
8802         struct inode *inode = file_inode(vma->vm_file);
8803         struct btrfs_root *root = BTRFS_I(inode)->root;
8804         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
8805         struct btrfs_ordered_extent *ordered;
8806         struct extent_state *cached_state = NULL;
8807         char *kaddr;
8808         unsigned long zero_start;
8809         loff_t size;
8810         int ret;
8811         int reserved = 0;
8812         u64 page_start;
8813         u64 page_end;
8814
8815         sb_start_pagefault(inode->i_sb);
8816         page_start = page_offset(page);
8817         page_end = page_start + PAGE_CACHE_SIZE - 1;
8818
8819         ret = btrfs_delalloc_reserve_space(inode, page_start,
8820                                            PAGE_CACHE_SIZE);
8821         if (!ret) {
8822                 ret = file_update_time(vma->vm_file);
8823                 reserved = 1;
8824         }
8825         if (ret) {
8826                 if (ret == -ENOMEM)
8827                         ret = VM_FAULT_OOM;
8828                 else /* -ENOSPC, -EIO, etc */
8829                         ret = VM_FAULT_SIGBUS;
8830                 if (reserved)
8831                         goto out;
8832                 goto out_noreserve;
8833         }
8834
8835         ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
8836 again:
8837         lock_page(page);
8838         size = i_size_read(inode);
8839
8840         if ((page->mapping != inode->i_mapping) ||
8841             (page_start >= size)) {
8842                 /* page got truncated out from underneath us */
8843                 goto out_unlock;
8844         }
8845         wait_on_page_writeback(page);
8846
8847         lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
8848         set_page_extent_mapped(page);
8849
8850         /*
8851          * we can't set the delalloc bits if there are pending ordered
8852          * extents.  Drop our locks and wait for them to finish
8853          */
8854         ordered = btrfs_lookup_ordered_extent(inode, page_start);
8855         if (ordered) {
8856                 unlock_extent_cached(io_tree, page_start, page_end,
8857                                      &cached_state, GFP_NOFS);
8858                 unlock_page(page);
8859                 btrfs_start_ordered_extent(inode, ordered, 1);
8860                 btrfs_put_ordered_extent(ordered);
8861                 goto again;
8862         }
8863
8864         /*
8865          * XXX - page_mkwrite gets called every time the page is dirtied, even
8866          * if it was already dirty, so for space accounting reasons we need to
8867          * clear any delalloc bits for the range we are fixing to save.  There
8868          * is probably a better way to do this, but for now keep consistent with
8869          * prepare_pages in the normal write path.
8870          */
8871         clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
8872                           EXTENT_DIRTY | EXTENT_DELALLOC |
8873                           EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
8874                           0, 0, &cached_state, GFP_NOFS);
8875
8876         ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
8877                                         &cached_state);
8878         if (ret) {
8879                 unlock_extent_cached(io_tree, page_start, page_end,
8880                                      &cached_state, GFP_NOFS);
8881                 ret = VM_FAULT_SIGBUS;
8882                 goto out_unlock;
8883         }
8884         ret = 0;
8885
8886         /* page is wholly or partially inside EOF */
8887         if (page_start + PAGE_CACHE_SIZE > size)
8888                 zero_start = size & ~PAGE_CACHE_MASK;
8889         else
8890                 zero_start = PAGE_CACHE_SIZE;
8891
8892         if (zero_start != PAGE_CACHE_SIZE) {
8893                 kaddr = kmap(page);
8894                 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
8895                 flush_dcache_page(page);
8896                 kunmap(page);
8897         }
8898         ClearPageChecked(page);
8899         set_page_dirty(page);
8900         SetPageUptodate(page);
8901
8902         BTRFS_I(inode)->last_trans = root->fs_info->generation;
8903         BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
8904         BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
8905
8906         unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
8907
8908 out_unlock:
8909         if (!ret) {
8910                 sb_end_pagefault(inode->i_sb);
8911                 return VM_FAULT_LOCKED;
8912         }
8913         unlock_page(page);
8914 out:
8915         btrfs_delalloc_release_space(inode, page_start, PAGE_CACHE_SIZE);
8916 out_noreserve:
8917         sb_end_pagefault(inode->i_sb);
8918         return ret;
8919 }
8920
8921 static int btrfs_truncate(struct inode *inode)
8922 {
8923         struct btrfs_root *root = BTRFS_I(inode)->root;
8924         struct btrfs_block_rsv *rsv;
8925         int ret = 0;
8926         int err = 0;
8927         struct btrfs_trans_handle *trans;
8928         u64 mask = root->sectorsize - 1;
8929         u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
8930
8931         ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask),
8932                                        (u64)-1);
8933         if (ret)
8934                 return ret;
8935
8936         /*
8937          * Yes ladies and gentelment, this is indeed ugly.  The fact is we have
8938          * 3 things going on here
8939          *
8940          * 1) We need to reserve space for our orphan item and the space to
8941          * delete our orphan item.  Lord knows we don't want to have a dangling
8942          * orphan item because we didn't reserve space to remove it.
8943          *
8944          * 2) We need to reserve space to update our inode.
8945          *
8946          * 3) We need to have something to cache all the space that is going to
8947          * be free'd up by the truncate operation, but also have some slack
8948          * space reserved in case it uses space during the truncate (thank you
8949          * very much snapshotting).
8950          *
8951          * And we need these to all be seperate.  The fact is we can use alot of
8952          * space doing the truncate, and we have no earthly idea how much space
8953          * we will use, so we need the truncate reservation to be seperate so it
8954          * doesn't end up using space reserved for updating the inode or
8955          * removing the orphan item.  We also need to be able to stop the
8956          * transaction and start a new one, which means we need to be able to
8957          * update the inode several times, and we have no idea of knowing how
8958          * many times that will be, so we can't just reserve 1 item for the
8959          * entirety of the opration, so that has to be done seperately as well.
8960          * Then there is the orphan item, which does indeed need to be held on
8961          * to for the whole operation, and we need nobody to touch this reserved
8962          * space except the orphan code.
8963          *
8964          * So that leaves us with
8965          *
8966          * 1) root->orphan_block_rsv - for the orphan deletion.
8967          * 2) rsv - for the truncate reservation, which we will steal from the
8968          * transaction reservation.
8969          * 3) fs_info->trans_block_rsv - this will have 1 items worth left for
8970          * updating the inode.
8971          */
8972         rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
8973         if (!rsv)
8974                 return -ENOMEM;
8975         rsv->size = min_size;
8976         rsv->failfast = 1;
8977
8978         /*
8979          * 1 for the truncate slack space
8980          * 1 for updating the inode.
8981          */
8982         trans = btrfs_start_transaction(root, 2);
8983         if (IS_ERR(trans)) {
8984                 err = PTR_ERR(trans);
8985                 goto out;
8986         }
8987
8988         /* Migrate the slack space for the truncate to our reserve */
8989         ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
8990                                       min_size);
8991         BUG_ON(ret);
8992
8993         /*
8994          * So if we truncate and then write and fsync we normally would just
8995          * write the extents that changed, which is a problem if we need to
8996          * first truncate that entire inode.  So set this flag so we write out
8997          * all of the extents in the inode to the sync log so we're completely
8998          * safe.
8999          */
9000         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
9001         trans->block_rsv = rsv;
9002
9003         while (1) {
9004                 ret = btrfs_truncate_inode_items(trans, root, inode,
9005                                                  inode->i_size,
9006                                                  BTRFS_EXTENT_DATA_KEY);
9007                 if (ret != -ENOSPC && ret != -EAGAIN) {
9008                         err = ret;
9009                         break;
9010                 }
9011
9012                 trans->block_rsv = &root->fs_info->trans_block_rsv;
9013                 ret = btrfs_update_inode(trans, root, inode);
9014                 if (ret) {
9015                         err = ret;
9016                         break;
9017                 }
9018
9019                 btrfs_end_transaction(trans, root);
9020                 btrfs_btree_balance_dirty(root);
9021
9022                 trans = btrfs_start_transaction(root, 2);
9023                 if (IS_ERR(trans)) {
9024                         ret = err = PTR_ERR(trans);
9025                         trans = NULL;
9026                         break;
9027                 }
9028
9029                 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
9030                                               rsv, min_size);
9031                 BUG_ON(ret);    /* shouldn't happen */
9032                 trans->block_rsv = rsv;
9033         }
9034
9035         if (ret == 0 && inode->i_nlink > 0) {
9036                 trans->block_rsv = root->orphan_block_rsv;
9037                 ret = btrfs_orphan_del(trans, inode);
9038                 if (ret)
9039                         err = ret;
9040         }
9041
9042         if (trans) {
9043                 trans->block_rsv = &root->fs_info->trans_block_rsv;
9044                 ret = btrfs_update_inode(trans, root, inode);
9045                 if (ret && !err)
9046                         err = ret;
9047
9048                 ret = btrfs_end_transaction(trans, root);
9049                 btrfs_btree_balance_dirty(root);
9050         }
9051
9052 out:
9053         btrfs_free_block_rsv(root, rsv);
9054
9055         if (ret && !err)
9056                 err = ret;
9057
9058         return err;
9059 }
9060
9061 /*
9062  * create a new subvolume directory/inode (helper for the ioctl).
9063  */
9064 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
9065                              struct btrfs_root *new_root,
9066                              struct btrfs_root *parent_root,
9067                              u64 new_dirid)
9068 {
9069         struct inode *inode;
9070         int err;
9071         u64 index = 0;
9072
9073         inode = btrfs_new_inode(trans, new_root, NULL, "..", 2,
9074                                 new_dirid, new_dirid,
9075                                 S_IFDIR | (~current_umask() & S_IRWXUGO),
9076                                 &index);
9077         if (IS_ERR(inode))
9078                 return PTR_ERR(inode);
9079         inode->i_op = &btrfs_dir_inode_operations;
9080         inode->i_fop = &btrfs_dir_file_operations;
9081
9082         set_nlink(inode, 1);
9083         btrfs_i_size_write(inode, 0);
9084         unlock_new_inode(inode);
9085
9086         err = btrfs_subvol_inherit_props(trans, new_root, parent_root);
9087         if (err)
9088                 btrfs_err(new_root->fs_info,
9089                           "error inheriting subvolume %llu properties: %d",
9090                           new_root->root_key.objectid, err);
9091
9092         err = btrfs_update_inode(trans, new_root, inode);
9093
9094         iput(inode);
9095         return err;
9096 }
9097
9098 struct inode *btrfs_alloc_inode(struct super_block *sb)
9099 {
9100         struct btrfs_inode *ei;
9101         struct inode *inode;
9102
9103         ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
9104         if (!ei)
9105                 return NULL;
9106
9107         ei->root = NULL;
9108         ei->generation = 0;
9109         ei->last_trans = 0;
9110         ei->last_sub_trans = 0;
9111         ei->logged_trans = 0;
9112         ei->delalloc_bytes = 0;
9113         ei->defrag_bytes = 0;
9114         ei->disk_i_size = 0;
9115         ei->flags = 0;
9116         ei->csum_bytes = 0;
9117         ei->index_cnt = (u64)-1;
9118         ei->dir_index = 0;
9119         ei->last_unlink_trans = 0;
9120         ei->last_log_commit = 0;
9121
9122         spin_lock_init(&ei->lock);
9123         ei->outstanding_extents = 0;
9124         ei->reserved_extents = 0;
9125
9126         ei->runtime_flags = 0;
9127         ei->force_compress = BTRFS_COMPRESS_NONE;
9128
9129         ei->delayed_node = NULL;
9130
9131         ei->i_otime.tv_sec = 0;
9132         ei->i_otime.tv_nsec = 0;
9133
9134         inode = &ei->vfs_inode;
9135         extent_map_tree_init(&ei->extent_tree);
9136         extent_io_tree_init(&ei->io_tree, &inode->i_data);
9137         extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
9138         ei->io_tree.track_uptodate = 1;
9139         ei->io_failure_tree.track_uptodate = 1;
9140         atomic_set(&ei->sync_writers, 0);
9141         mutex_init(&ei->log_mutex);
9142         mutex_init(&ei->delalloc_mutex);
9143         btrfs_ordered_inode_tree_init(&ei->ordered_tree);
9144         INIT_LIST_HEAD(&ei->delalloc_inodes);
9145         RB_CLEAR_NODE(&ei->rb_node);
9146
9147         return inode;
9148 }
9149
9150 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
9151 void btrfs_test_destroy_inode(struct inode *inode)
9152 {
9153         btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
9154         kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
9155 }
9156 #endif
9157
9158 static void btrfs_i_callback(struct rcu_head *head)
9159 {
9160         struct inode *inode = container_of(head, struct inode, i_rcu);
9161         kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
9162 }
9163
9164 void btrfs_destroy_inode(struct inode *inode)
9165 {
9166         struct btrfs_ordered_extent *ordered;
9167         struct btrfs_root *root = BTRFS_I(inode)->root;
9168
9169         WARN_ON(!hlist_empty(&inode->i_dentry));
9170         WARN_ON(inode->i_data.nrpages);
9171         WARN_ON(BTRFS_I(inode)->outstanding_extents);
9172         WARN_ON(BTRFS_I(inode)->reserved_extents);
9173         WARN_ON(BTRFS_I(inode)->delalloc_bytes);
9174         WARN_ON(BTRFS_I(inode)->csum_bytes);
9175         WARN_ON(BTRFS_I(inode)->defrag_bytes);
9176
9177         /*
9178          * This can happen where we create an inode, but somebody else also
9179          * created the same inode and we need to destroy the one we already
9180          * created.
9181          */
9182         if (!root)
9183                 goto free;
9184
9185         if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
9186                      &BTRFS_I(inode)->runtime_flags)) {
9187                 btrfs_info(root->fs_info, "inode %llu still on the orphan list",
9188                         btrfs_ino(inode));
9189                 atomic_dec(&root->orphan_inodes);
9190         }
9191
9192         while (1) {
9193                 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
9194                 if (!ordered)
9195                         break;
9196                 else {
9197                         btrfs_err(root->fs_info, "found ordered extent %llu %llu on inode cleanup",
9198                                 ordered->file_offset, ordered->len);
9199                         btrfs_remove_ordered_extent(inode, ordered);
9200                         btrfs_put_ordered_extent(ordered);
9201                         btrfs_put_ordered_extent(ordered);
9202                 }
9203         }
9204         btrfs_qgroup_check_reserved_leak(inode);
9205         inode_tree_del(inode);
9206         btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
9207 free:
9208         call_rcu(&inode->i_rcu, btrfs_i_callback);
9209 }
9210
9211 int btrfs_drop_inode(struct inode *inode)
9212 {
9213         struct btrfs_root *root = BTRFS_I(inode)->root;
9214
9215         if (root == NULL)
9216                 return 1;
9217
9218         /* the snap/subvol tree is on deleting */
9219         if (btrfs_root_refs(&root->root_item) == 0)
9220                 return 1;
9221         else
9222                 return generic_drop_inode(inode);
9223 }
9224
9225 static void init_once(void *foo)
9226 {
9227         struct btrfs_inode *ei = (struct btrfs_inode *) foo;
9228
9229         inode_init_once(&ei->vfs_inode);
9230 }
9231
9232 void btrfs_destroy_cachep(void)
9233 {
9234         /*
9235          * Make sure all delayed rcu free inodes are flushed before we
9236          * destroy cache.
9237          */
9238         rcu_barrier();
9239         if (btrfs_inode_cachep)
9240                 kmem_cache_destroy(btrfs_inode_cachep);
9241         if (btrfs_trans_handle_cachep)
9242                 kmem_cache_destroy(btrfs_trans_handle_cachep);
9243         if (btrfs_transaction_cachep)
9244                 kmem_cache_destroy(btrfs_transaction_cachep);
9245         if (btrfs_path_cachep)
9246                 kmem_cache_destroy(btrfs_path_cachep);
9247         if (btrfs_free_space_cachep)
9248                 kmem_cache_destroy(btrfs_free_space_cachep);
9249         if (btrfs_delalloc_work_cachep)
9250                 kmem_cache_destroy(btrfs_delalloc_work_cachep);
9251 }
9252
9253 int btrfs_init_cachep(void)
9254 {
9255         btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
9256                         sizeof(struct btrfs_inode), 0,
9257                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
9258         if (!btrfs_inode_cachep)
9259                 goto fail;
9260
9261         btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle",
9262                         sizeof(struct btrfs_trans_handle), 0,
9263                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
9264         if (!btrfs_trans_handle_cachep)
9265                 goto fail;
9266
9267         btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction",
9268                         sizeof(struct btrfs_transaction), 0,
9269                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
9270         if (!btrfs_transaction_cachep)
9271                 goto fail;
9272
9273         btrfs_path_cachep = kmem_cache_create("btrfs_path",
9274                         sizeof(struct btrfs_path), 0,
9275                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
9276         if (!btrfs_path_cachep)
9277                 goto fail;
9278
9279         btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space",
9280                         sizeof(struct btrfs_free_space), 0,
9281                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
9282         if (!btrfs_free_space_cachep)
9283                 goto fail;
9284
9285         btrfs_delalloc_work_cachep = kmem_cache_create("btrfs_delalloc_work",
9286                         sizeof(struct btrfs_delalloc_work), 0,
9287                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
9288                         NULL);
9289         if (!btrfs_delalloc_work_cachep)
9290                 goto fail;
9291
9292         return 0;
9293 fail:
9294         btrfs_destroy_cachep();
9295         return -ENOMEM;
9296 }
9297
9298 static int btrfs_getattr(struct vfsmount *mnt,
9299                          struct dentry *dentry, struct kstat *stat)
9300 {
9301         u64 delalloc_bytes;
9302         struct inode *inode = d_inode(dentry);
9303         u32 blocksize = inode->i_sb->s_blocksize;
9304
9305         generic_fillattr(inode, stat);
9306         stat->dev = BTRFS_I(inode)->root->anon_dev;
9307         stat->blksize = PAGE_CACHE_SIZE;
9308
9309         spin_lock(&BTRFS_I(inode)->lock);
9310         delalloc_bytes = BTRFS_I(inode)->delalloc_bytes;
9311         spin_unlock(&BTRFS_I(inode)->lock);
9312         stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
9313                         ALIGN(delalloc_bytes, blocksize)) >> 9;
9314         return 0;
9315 }
9316
9317 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
9318                            struct inode *new_dir, struct dentry *new_dentry)
9319 {
9320         struct btrfs_trans_handle *trans;
9321         struct btrfs_root *root = BTRFS_I(old_dir)->root;
9322         struct btrfs_root *dest = BTRFS_I(new_dir)->root;
9323         struct inode *new_inode = d_inode(new_dentry);
9324         struct inode *old_inode = d_inode(old_dentry);
9325         struct timespec ctime = CURRENT_TIME;
9326         u64 index = 0;
9327         u64 root_objectid;
9328         int ret;
9329         u64 old_ino = btrfs_ino(old_inode);
9330
9331         if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
9332                 return -EPERM;
9333
9334         /* we only allow rename subvolume link between subvolumes */
9335         if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
9336                 return -EXDEV;
9337
9338         if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
9339             (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID))
9340                 return -ENOTEMPTY;
9341
9342         if (S_ISDIR(old_inode->i_mode) && new_inode &&
9343             new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
9344                 return -ENOTEMPTY;
9345
9346
9347         /* check for collisions, even if the  name isn't there */
9348         ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino,
9349                              new_dentry->d_name.name,
9350                              new_dentry->d_name.len);
9351
9352         if (ret) {
9353                 if (ret == -EEXIST) {
9354                         /* we shouldn't get
9355                          * eexist without a new_inode */
9356                         if (WARN_ON(!new_inode)) {
9357                                 return ret;
9358                         }
9359                 } else {
9360                         /* maybe -EOVERFLOW */
9361                         return ret;
9362                 }
9363         }
9364         ret = 0;
9365
9366         /*
9367          * we're using rename to replace one file with another.  Start IO on it
9368          * now so  we don't add too much work to the end of the transaction
9369          */
9370         if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size)
9371                 filemap_flush(old_inode->i_mapping);
9372
9373         /* close the racy window with snapshot create/destroy ioctl */
9374         if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9375                 down_read(&root->fs_info->subvol_sem);
9376         /*
9377          * We want to reserve the absolute worst case amount of items.  So if
9378          * both inodes are subvols and we need to unlink them then that would
9379          * require 4 item modifications, but if they are both normal inodes it
9380          * would require 5 item modifications, so we'll assume their normal
9381          * inodes.  So 5 * 2 is 10, plus 1 for the new link, so 11 total items
9382          * should cover the worst case number of items we'll modify.
9383          */
9384         trans = btrfs_start_transaction(root, 11);
9385         if (IS_ERR(trans)) {
9386                 ret = PTR_ERR(trans);
9387                 goto out_notrans;
9388         }
9389
9390         if (dest != root)
9391                 btrfs_record_root_in_trans(trans, dest);
9392
9393         ret = btrfs_set_inode_index(new_dir, &index);
9394         if (ret)
9395                 goto out_fail;
9396
9397         BTRFS_I(old_inode)->dir_index = 0ULL;
9398         if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9399                 /* force full log commit if subvolume involved. */
9400                 btrfs_set_log_full_commit(root->fs_info, trans);
9401         } else {
9402                 ret = btrfs_insert_inode_ref(trans, dest,
9403                                              new_dentry->d_name.name,
9404                                              new_dentry->d_name.len,
9405                                              old_ino,
9406                                              btrfs_ino(new_dir), index);
9407                 if (ret)
9408                         goto out_fail;
9409                 /*
9410                  * this is an ugly little race, but the rename is required
9411                  * to make sure that if we crash, the inode is either at the
9412                  * old name or the new one.  pinning the log transaction lets
9413                  * us make sure we don't allow a log commit to come in after
9414                  * we unlink the name but before we add the new name back in.
9415                  */
9416                 btrfs_pin_log_trans(root);
9417         }
9418
9419         inode_inc_iversion(old_dir);
9420         inode_inc_iversion(new_dir);
9421         inode_inc_iversion(old_inode);
9422         old_dir->i_ctime = old_dir->i_mtime = ctime;
9423         new_dir->i_ctime = new_dir->i_mtime = ctime;
9424         old_inode->i_ctime = ctime;
9425
9426         if (old_dentry->d_parent != new_dentry->d_parent)
9427                 btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
9428
9429         if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9430                 root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
9431                 ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
9432                                         old_dentry->d_name.name,
9433                                         old_dentry->d_name.len);
9434         } else {
9435                 ret = __btrfs_unlink_inode(trans, root, old_dir,
9436                                         d_inode(old_dentry),
9437                                         old_dentry->d_name.name,
9438                                         old_dentry->d_name.len);
9439                 if (!ret)
9440                         ret = btrfs_update_inode(trans, root, old_inode);
9441         }
9442         if (ret) {
9443                 btrfs_abort_transaction(trans, root, ret);
9444                 goto out_fail;
9445         }
9446
9447         if (new_inode) {
9448                 inode_inc_iversion(new_inode);
9449                 new_inode->i_ctime = CURRENT_TIME;
9450                 if (unlikely(btrfs_ino(new_inode) ==
9451                              BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
9452                         root_objectid = BTRFS_I(new_inode)->location.objectid;
9453                         ret = btrfs_unlink_subvol(trans, dest, new_dir,
9454                                                 root_objectid,
9455                                                 new_dentry->d_name.name,
9456                                                 new_dentry->d_name.len);
9457                         BUG_ON(new_inode->i_nlink == 0);
9458                 } else {
9459                         ret = btrfs_unlink_inode(trans, dest, new_dir,
9460                                                  d_inode(new_dentry),
9461                                                  new_dentry->d_name.name,
9462                                                  new_dentry->d_name.len);
9463                 }
9464                 if (!ret && new_inode->i_nlink == 0)
9465                         ret = btrfs_orphan_add(trans, d_inode(new_dentry));
9466                 if (ret) {
9467                         btrfs_abort_transaction(trans, root, ret);
9468                         goto out_fail;
9469                 }
9470         }
9471
9472         ret = btrfs_add_link(trans, new_dir, old_inode,
9473                              new_dentry->d_name.name,
9474                              new_dentry->d_name.len, 0, index);
9475         if (ret) {
9476                 btrfs_abort_transaction(trans, root, ret);
9477                 goto out_fail;
9478         }
9479
9480         if (old_inode->i_nlink == 1)
9481                 BTRFS_I(old_inode)->dir_index = index;
9482
9483         if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
9484                 struct dentry *parent = new_dentry->d_parent;
9485                 btrfs_log_new_name(trans, old_inode, old_dir, parent);
9486                 btrfs_end_log_trans(root);
9487         }
9488 out_fail:
9489         btrfs_end_transaction(trans, root);
9490 out_notrans:
9491         if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9492                 up_read(&root->fs_info->subvol_sem);
9493
9494         return ret;
9495 }
9496
9497 static int btrfs_rename2(struct inode *old_dir, struct dentry *old_dentry,
9498                          struct inode *new_dir, struct dentry *new_dentry,
9499                          unsigned int flags)
9500 {
9501         if (flags & ~RENAME_NOREPLACE)
9502                 return -EINVAL;
9503
9504         return btrfs_rename(old_dir, old_dentry, new_dir, new_dentry);
9505 }
9506
9507 static void btrfs_run_delalloc_work(struct btrfs_work *work)
9508 {
9509         struct btrfs_delalloc_work *delalloc_work;
9510         struct inode *inode;
9511
9512         delalloc_work = container_of(work, struct btrfs_delalloc_work,
9513                                      work);
9514         inode = delalloc_work->inode;
9515         if (delalloc_work->wait) {
9516                 btrfs_wait_ordered_range(inode, 0, (u64)-1);
9517         } else {
9518                 filemap_flush(inode->i_mapping);
9519                 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
9520                              &BTRFS_I(inode)->runtime_flags))
9521                         filemap_flush(inode->i_mapping);
9522         }
9523
9524         if (delalloc_work->delay_iput)
9525                 btrfs_add_delayed_iput(inode);
9526         else
9527                 iput(inode);
9528         complete(&delalloc_work->completion);
9529 }
9530
9531 struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode,
9532                                                     int wait, int delay_iput)
9533 {
9534         struct btrfs_delalloc_work *work;
9535
9536         work = kmem_cache_zalloc(btrfs_delalloc_work_cachep, GFP_NOFS);
9537         if (!work)
9538                 return NULL;
9539
9540         init_completion(&work->completion);
9541         INIT_LIST_HEAD(&work->list);
9542         work->inode = inode;
9543         work->wait = wait;
9544         work->delay_iput = delay_iput;
9545         WARN_ON_ONCE(!inode);
9546         btrfs_init_work(&work->work, btrfs_flush_delalloc_helper,
9547                         btrfs_run_delalloc_work, NULL, NULL);
9548
9549         return work;
9550 }
9551
9552 void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work)
9553 {
9554         wait_for_completion(&work->completion);
9555         kmem_cache_free(btrfs_delalloc_work_cachep, work);
9556 }
9557
9558 /*
9559  * some fairly slow code that needs optimization. This walks the list
9560  * of all the inodes with pending delalloc and forces them to disk.
9561  */
9562 static int __start_delalloc_inodes(struct btrfs_root *root, int delay_iput,
9563                                    int nr)
9564 {
9565         struct btrfs_inode *binode;
9566         struct inode *inode;
9567         struct btrfs_delalloc_work *work, *next;
9568         struct list_head works;
9569         struct list_head splice;
9570         int ret = 0;
9571
9572         INIT_LIST_HEAD(&works);
9573         INIT_LIST_HEAD(&splice);
9574
9575         mutex_lock(&root->delalloc_mutex);
9576         spin_lock(&root->delalloc_lock);
9577         list_splice_init(&root->delalloc_inodes, &splice);
9578         while (!list_empty(&splice)) {
9579                 binode = list_entry(splice.next, struct btrfs_inode,
9580                                     delalloc_inodes);
9581
9582                 list_move_tail(&binode->delalloc_inodes,
9583                                &root->delalloc_inodes);
9584                 inode = igrab(&binode->vfs_inode);
9585                 if (!inode) {
9586                         cond_resched_lock(&root->delalloc_lock);
9587                         continue;
9588                 }
9589                 spin_unlock(&root->delalloc_lock);
9590
9591                 work = btrfs_alloc_delalloc_work(inode, 0, delay_iput);
9592                 if (!work) {
9593                         if (delay_iput)
9594                                 btrfs_add_delayed_iput(inode);
9595                         else
9596                                 iput(inode);
9597                         ret = -ENOMEM;
9598                         goto out;
9599                 }
9600                 list_add_tail(&work->list, &works);
9601                 btrfs_queue_work(root->fs_info->flush_workers,
9602                                  &work->work);
9603                 ret++;
9604                 if (nr != -1 && ret >= nr)
9605                         goto out;
9606                 cond_resched();
9607                 spin_lock(&root->delalloc_lock);
9608         }
9609         spin_unlock(&root->delalloc_lock);
9610
9611 out:
9612         list_for_each_entry_safe(work, next, &works, list) {
9613                 list_del_init(&work->list);
9614                 btrfs_wait_and_free_delalloc_work(work);
9615         }
9616
9617         if (!list_empty_careful(&splice)) {
9618                 spin_lock(&root->delalloc_lock);
9619                 list_splice_tail(&splice, &root->delalloc_inodes);
9620                 spin_unlock(&root->delalloc_lock);
9621         }
9622         mutex_unlock(&root->delalloc_mutex);
9623         return ret;
9624 }
9625
9626 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
9627 {
9628         int ret;
9629
9630         if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
9631                 return -EROFS;
9632
9633         ret = __start_delalloc_inodes(root, delay_iput, -1);
9634         if (ret > 0)
9635                 ret = 0;
9636         /*
9637          * the filemap_flush will queue IO into the worker threads, but
9638          * we have to make sure the IO is actually started and that
9639          * ordered extents get created before we return
9640          */
9641         atomic_inc(&root->fs_info->async_submit_draining);
9642         while (atomic_read(&root->fs_info->nr_async_submits) ||
9643               atomic_read(&root->fs_info->async_delalloc_pages)) {
9644                 wait_event(root->fs_info->async_submit_wait,
9645                    (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
9646                     atomic_read(&root->fs_info->async_delalloc_pages) == 0));
9647         }
9648         atomic_dec(&root->fs_info->async_submit_draining);
9649         return ret;
9650 }
9651
9652 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput,
9653                                int nr)
9654 {
9655         struct btrfs_root *root;
9656         struct list_head splice;
9657         int ret;
9658
9659         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
9660                 return -EROFS;
9661
9662         INIT_LIST_HEAD(&splice);
9663
9664         mutex_lock(&fs_info->delalloc_root_mutex);
9665         spin_lock(&fs_info->delalloc_root_lock);
9666         list_splice_init(&fs_info->delalloc_roots, &splice);
9667         while (!list_empty(&splice) && nr) {
9668                 root = list_first_entry(&splice, struct btrfs_root,
9669                                         delalloc_root);
9670                 root = btrfs_grab_fs_root(root);
9671                 BUG_ON(!root);
9672                 list_move_tail(&root->delalloc_root,
9673                                &fs_info->delalloc_roots);
9674                 spin_unlock(&fs_info->delalloc_root_lock);
9675
9676                 ret = __start_delalloc_inodes(root, delay_iput, nr);
9677                 btrfs_put_fs_root(root);
9678                 if (ret < 0)
9679                         goto out;
9680
9681                 if (nr != -1) {
9682                         nr -= ret;
9683                         WARN_ON(nr < 0);
9684                 }
9685                 spin_lock(&fs_info->delalloc_root_lock);
9686         }
9687         spin_unlock(&fs_info->delalloc_root_lock);
9688
9689         ret = 0;
9690         atomic_inc(&fs_info->async_submit_draining);
9691         while (atomic_read(&fs_info->nr_async_submits) ||
9692               atomic_read(&fs_info->async_delalloc_pages)) {
9693                 wait_event(fs_info->async_submit_wait,
9694                    (atomic_read(&fs_info->nr_async_submits) == 0 &&
9695                     atomic_read(&fs_info->async_delalloc_pages) == 0));
9696         }
9697         atomic_dec(&fs_info->async_submit_draining);
9698 out:
9699         if (!list_empty_careful(&splice)) {
9700                 spin_lock(&fs_info->delalloc_root_lock);
9701                 list_splice_tail(&splice, &fs_info->delalloc_roots);
9702                 spin_unlock(&fs_info->delalloc_root_lock);
9703         }
9704         mutex_unlock(&fs_info->delalloc_root_mutex);
9705         return ret;
9706 }
9707
9708 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
9709                          const char *symname)
9710 {
9711         struct btrfs_trans_handle *trans;
9712         struct btrfs_root *root = BTRFS_I(dir)->root;
9713         struct btrfs_path *path;
9714         struct btrfs_key key;
9715         struct inode *inode = NULL;
9716         int err;
9717         int drop_inode = 0;
9718         u64 objectid;
9719         u64 index = 0;
9720         int name_len;
9721         int datasize;
9722         unsigned long ptr;
9723         struct btrfs_file_extent_item *ei;
9724         struct extent_buffer *leaf;
9725
9726         name_len = strlen(symname);
9727         if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
9728                 return -ENAMETOOLONG;
9729
9730         /*
9731          * 2 items for inode item and ref
9732          * 2 items for dir items
9733          * 1 item for updating parent inode item
9734          * 1 item for the inline extent item
9735          * 1 item for xattr if selinux is on
9736          */
9737         trans = btrfs_start_transaction(root, 7);
9738         if (IS_ERR(trans))
9739                 return PTR_ERR(trans);
9740
9741         err = btrfs_find_free_ino(root, &objectid);
9742         if (err)
9743                 goto out_unlock;
9744
9745         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
9746                                 dentry->d_name.len, btrfs_ino(dir), objectid,
9747                                 S_IFLNK|S_IRWXUGO, &index);
9748         if (IS_ERR(inode)) {
9749                 err = PTR_ERR(inode);
9750                 goto out_unlock;
9751         }
9752
9753         /*
9754         * If the active LSM wants to access the inode during
9755         * d_instantiate it needs these. Smack checks to see
9756         * if the filesystem supports xattrs by looking at the
9757         * ops vector.
9758         */
9759         inode->i_fop = &btrfs_file_operations;
9760         inode->i_op = &btrfs_file_inode_operations;
9761         inode->i_mapping->a_ops = &btrfs_aops;
9762         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
9763
9764         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
9765         if (err)
9766                 goto out_unlock_inode;
9767
9768         err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
9769         if (err)
9770                 goto out_unlock_inode;
9771
9772         path = btrfs_alloc_path();
9773         if (!path) {
9774                 err = -ENOMEM;
9775                 goto out_unlock_inode;
9776         }
9777         key.objectid = btrfs_ino(inode);
9778         key.offset = 0;
9779         key.type = BTRFS_EXTENT_DATA_KEY;
9780         datasize = btrfs_file_extent_calc_inline_size(name_len);
9781         err = btrfs_insert_empty_item(trans, root, path, &key,
9782                                       datasize);
9783         if (err) {
9784                 btrfs_free_path(path);
9785                 goto out_unlock_inode;
9786         }
9787         leaf = path->nodes[0];
9788         ei = btrfs_item_ptr(leaf, path->slots[0],
9789                             struct btrfs_file_extent_item);
9790         btrfs_set_file_extent_generation(leaf, ei, trans->transid);
9791         btrfs_set_file_extent_type(leaf, ei,
9792                                    BTRFS_FILE_EXTENT_INLINE);
9793         btrfs_set_file_extent_encryption(leaf, ei, 0);
9794         btrfs_set_file_extent_compression(leaf, ei, 0);
9795         btrfs_set_file_extent_other_encoding(leaf, ei, 0);
9796         btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
9797
9798         ptr = btrfs_file_extent_inline_start(ei);
9799         write_extent_buffer(leaf, symname, ptr, name_len);
9800         btrfs_mark_buffer_dirty(leaf);
9801         btrfs_free_path(path);
9802
9803         inode->i_op = &btrfs_symlink_inode_operations;
9804         inode->i_mapping->a_ops = &btrfs_symlink_aops;
9805         inode_set_bytes(inode, name_len);
9806         btrfs_i_size_write(inode, name_len);
9807         err = btrfs_update_inode(trans, root, inode);
9808         if (err) {
9809                 drop_inode = 1;
9810                 goto out_unlock_inode;
9811         }
9812
9813         d_instantiate_new(dentry, inode);
9814
9815 out_unlock:
9816         btrfs_end_transaction(trans, root);
9817         if (drop_inode) {
9818                 inode_dec_link_count(inode);
9819                 iput(inode);
9820         }
9821         btrfs_btree_balance_dirty(root);
9822         return err;
9823
9824 out_unlock_inode:
9825         drop_inode = 1;
9826         unlock_new_inode(inode);
9827         goto out_unlock;
9828 }
9829
9830 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
9831                                        u64 start, u64 num_bytes, u64 min_size,
9832                                        loff_t actual_len, u64 *alloc_hint,
9833                                        struct btrfs_trans_handle *trans)
9834 {
9835         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
9836         struct extent_map *em;
9837         struct btrfs_root *root = BTRFS_I(inode)->root;
9838         struct btrfs_key ins;
9839         u64 cur_offset = start;
9840         u64 i_size;
9841         u64 cur_bytes;
9842         u64 last_alloc = (u64)-1;
9843         int ret = 0;
9844         bool own_trans = true;
9845
9846         if (trans)
9847                 own_trans = false;
9848         while (num_bytes > 0) {
9849                 if (own_trans) {
9850                         trans = btrfs_start_transaction(root, 3);
9851                         if (IS_ERR(trans)) {
9852                                 ret = PTR_ERR(trans);
9853                                 break;
9854                         }
9855                 }
9856
9857                 cur_bytes = min(num_bytes, 256ULL * 1024 * 1024);
9858                 cur_bytes = max(cur_bytes, min_size);
9859                 /*
9860                  * If we are severely fragmented we could end up with really
9861                  * small allocations, so if the allocator is returning small
9862                  * chunks lets make its job easier by only searching for those
9863                  * sized chunks.
9864                  */
9865                 cur_bytes = min(cur_bytes, last_alloc);
9866                 ret = btrfs_reserve_extent(root, cur_bytes, min_size, 0,
9867                                            *alloc_hint, &ins, 1, 0);
9868                 if (ret) {
9869                         if (own_trans)
9870                                 btrfs_end_transaction(trans, root);
9871                         break;
9872                 }
9873
9874                 last_alloc = ins.offset;
9875                 ret = insert_reserved_file_extent(trans, inode,
9876                                                   cur_offset, ins.objectid,
9877                                                   ins.offset, ins.offset,
9878                                                   ins.offset, 0, 0, 0,
9879                                                   BTRFS_FILE_EXTENT_PREALLOC);
9880                 if (ret) {
9881                         btrfs_free_reserved_extent(root, ins.objectid,
9882                                                    ins.offset, 0);
9883                         btrfs_abort_transaction(trans, root, ret);
9884                         if (own_trans)
9885                                 btrfs_end_transaction(trans, root);
9886                         break;
9887                 }
9888
9889                 btrfs_drop_extent_cache(inode, cur_offset,
9890                                         cur_offset + ins.offset -1, 0);
9891
9892                 em = alloc_extent_map();
9893                 if (!em) {
9894                         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
9895                                 &BTRFS_I(inode)->runtime_flags);
9896                         goto next;
9897                 }
9898
9899                 em->start = cur_offset;
9900                 em->orig_start = cur_offset;
9901                 em->len = ins.offset;
9902                 em->block_start = ins.objectid;
9903                 em->block_len = ins.offset;
9904                 em->orig_block_len = ins.offset;
9905                 em->ram_bytes = ins.offset;
9906                 em->bdev = root->fs_info->fs_devices->latest_bdev;
9907                 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
9908                 em->generation = trans->transid;
9909
9910                 while (1) {
9911                         write_lock(&em_tree->lock);
9912                         ret = add_extent_mapping(em_tree, em, 1);
9913                         write_unlock(&em_tree->lock);
9914                         if (ret != -EEXIST)
9915                                 break;
9916                         btrfs_drop_extent_cache(inode, cur_offset,
9917                                                 cur_offset + ins.offset - 1,
9918                                                 0);
9919                 }
9920                 free_extent_map(em);
9921 next:
9922                 num_bytes -= ins.offset;
9923                 cur_offset += ins.offset;
9924                 *alloc_hint = ins.objectid + ins.offset;
9925
9926                 inode_inc_iversion(inode);
9927                 inode->i_ctime = CURRENT_TIME;
9928                 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
9929                 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
9930                     (actual_len > inode->i_size) &&
9931                     (cur_offset > inode->i_size)) {
9932                         if (cur_offset > actual_len)
9933                                 i_size = actual_len;
9934                         else
9935                                 i_size = cur_offset;
9936                         i_size_write(inode, i_size);
9937                         btrfs_ordered_update_i_size(inode, i_size, NULL);
9938                 }
9939
9940                 ret = btrfs_update_inode(trans, root, inode);
9941
9942                 if (ret) {
9943                         btrfs_abort_transaction(trans, root, ret);
9944                         if (own_trans)
9945                                 btrfs_end_transaction(trans, root);
9946                         break;
9947                 }
9948
9949                 if (own_trans)
9950                         btrfs_end_transaction(trans, root);
9951         }
9952         return ret;
9953 }
9954
9955 int btrfs_prealloc_file_range(struct inode *inode, int mode,
9956                               u64 start, u64 num_bytes, u64 min_size,
9957                               loff_t actual_len, u64 *alloc_hint)
9958 {
9959         return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
9960                                            min_size, actual_len, alloc_hint,
9961                                            NULL);
9962 }
9963
9964 int btrfs_prealloc_file_range_trans(struct inode *inode,
9965                                     struct btrfs_trans_handle *trans, int mode,
9966                                     u64 start, u64 num_bytes, u64 min_size,
9967                                     loff_t actual_len, u64 *alloc_hint)
9968 {
9969         return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
9970                                            min_size, actual_len, alloc_hint, trans);
9971 }
9972
9973 static int btrfs_set_page_dirty(struct page *page)
9974 {
9975         return __set_page_dirty_nobuffers(page);
9976 }
9977
9978 static int btrfs_permission(struct inode *inode, int mask)
9979 {
9980         struct btrfs_root *root = BTRFS_I(inode)->root;
9981         umode_t mode = inode->i_mode;
9982
9983         if (mask & MAY_WRITE &&
9984             (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
9985                 if (btrfs_root_readonly(root))
9986                         return -EROFS;
9987                 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
9988                         return -EACCES;
9989         }
9990         return generic_permission(inode, mask);
9991 }
9992
9993 static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
9994 {
9995         struct btrfs_trans_handle *trans;
9996         struct btrfs_root *root = BTRFS_I(dir)->root;
9997         struct inode *inode = NULL;
9998         u64 objectid;
9999         u64 index;
10000         int ret = 0;
10001
10002         /*
10003          * 5 units required for adding orphan entry
10004          */
10005         trans = btrfs_start_transaction(root, 5);
10006         if (IS_ERR(trans))
10007                 return PTR_ERR(trans);
10008
10009         ret = btrfs_find_free_ino(root, &objectid);
10010         if (ret)
10011                 goto out;
10012
10013         inode = btrfs_new_inode(trans, root, dir, NULL, 0,
10014                                 btrfs_ino(dir), objectid, mode, &index);
10015         if (IS_ERR(inode)) {
10016                 ret = PTR_ERR(inode);
10017                 inode = NULL;
10018                 goto out;
10019         }
10020
10021         inode->i_fop = &btrfs_file_operations;
10022         inode->i_op = &btrfs_file_inode_operations;
10023
10024         inode->i_mapping->a_ops = &btrfs_aops;
10025         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
10026
10027         ret = btrfs_init_inode_security(trans, inode, dir, NULL);
10028         if (ret)
10029                 goto out_inode;
10030
10031         ret = btrfs_update_inode(trans, root, inode);
10032         if (ret)
10033                 goto out_inode;
10034         ret = btrfs_orphan_add(trans, inode);
10035         if (ret)
10036                 goto out_inode;
10037
10038         /*
10039          * We set number of links to 0 in btrfs_new_inode(), and here we set
10040          * it to 1 because d_tmpfile() will issue a warning if the count is 0,
10041          * through:
10042          *
10043          *    d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
10044          */
10045         set_nlink(inode, 1);
10046         unlock_new_inode(inode);
10047         d_tmpfile(dentry, inode);
10048         mark_inode_dirty(inode);
10049
10050 out:
10051         btrfs_end_transaction(trans, root);
10052         if (ret)
10053                 iput(inode);
10054         btrfs_balance_delayed_items(root);
10055         btrfs_btree_balance_dirty(root);
10056         return ret;
10057
10058 out_inode:
10059         unlock_new_inode(inode);
10060         goto out;
10061
10062 }
10063
10064 /* Inspired by filemap_check_errors() */
10065 int btrfs_inode_check_errors(struct inode *inode)
10066 {
10067         int ret = 0;
10068
10069         if (test_bit(AS_ENOSPC, &inode->i_mapping->flags) &&
10070             test_and_clear_bit(AS_ENOSPC, &inode->i_mapping->flags))
10071                 ret = -ENOSPC;
10072         if (test_bit(AS_EIO, &inode->i_mapping->flags) &&
10073             test_and_clear_bit(AS_EIO, &inode->i_mapping->flags))
10074                 ret = -EIO;
10075
10076         return ret;
10077 }
10078
10079 static const struct inode_operations btrfs_dir_inode_operations = {
10080         .getattr        = btrfs_getattr,
10081         .lookup         = btrfs_lookup,
10082         .create         = btrfs_create,
10083         .unlink         = btrfs_unlink,
10084         .link           = btrfs_link,
10085         .mkdir          = btrfs_mkdir,
10086         .rmdir          = btrfs_rmdir,
10087         .rename2        = btrfs_rename2,
10088         .symlink        = btrfs_symlink,
10089         .setattr        = btrfs_setattr,
10090         .mknod          = btrfs_mknod,
10091         .setxattr       = btrfs_setxattr,
10092         .getxattr       = btrfs_getxattr,
10093         .listxattr      = btrfs_listxattr,
10094         .removexattr    = btrfs_removexattr,
10095         .permission     = btrfs_permission,
10096         .get_acl        = btrfs_get_acl,
10097         .set_acl        = btrfs_set_acl,
10098         .update_time    = btrfs_update_time,
10099         .tmpfile        = btrfs_tmpfile,
10100 };
10101 static const struct inode_operations btrfs_dir_ro_inode_operations = {
10102         .lookup         = btrfs_lookup,
10103         .permission     = btrfs_permission,
10104         .get_acl        = btrfs_get_acl,
10105         .set_acl        = btrfs_set_acl,
10106         .update_time    = btrfs_update_time,
10107 };
10108
10109 static const struct file_operations btrfs_dir_file_operations = {
10110         .llseek         = generic_file_llseek,
10111         .read           = generic_read_dir,
10112         .iterate        = btrfs_real_readdir,
10113         .unlocked_ioctl = btrfs_ioctl,
10114 #ifdef CONFIG_COMPAT
10115         .compat_ioctl   = btrfs_ioctl,
10116 #endif
10117         .release        = btrfs_release_file,
10118         .fsync          = btrfs_sync_file,
10119 };
10120
10121 static struct extent_io_ops btrfs_extent_io_ops = {
10122         .fill_delalloc = run_delalloc_range,
10123         .submit_bio_hook = btrfs_submit_bio_hook,
10124         .merge_bio_hook = btrfs_merge_bio_hook,
10125         .readpage_end_io_hook = btrfs_readpage_end_io_hook,
10126         .writepage_end_io_hook = btrfs_writepage_end_io_hook,
10127         .writepage_start_hook = btrfs_writepage_start_hook,
10128         .set_bit_hook = btrfs_set_bit_hook,
10129         .clear_bit_hook = btrfs_clear_bit_hook,
10130         .merge_extent_hook = btrfs_merge_extent_hook,
10131         .split_extent_hook = btrfs_split_extent_hook,
10132 };
10133
10134 /*
10135  * btrfs doesn't support the bmap operation because swapfiles
10136  * use bmap to make a mapping of extents in the file.  They assume
10137  * these extents won't change over the life of the file and they
10138  * use the bmap result to do IO directly to the drive.
10139  *
10140  * the btrfs bmap call would return logical addresses that aren't
10141  * suitable for IO and they also will change frequently as COW
10142  * operations happen.  So, swapfile + btrfs == corruption.
10143  *
10144  * For now we're avoiding this by dropping bmap.
10145  */
10146 static const struct address_space_operations btrfs_aops = {
10147         .readpage       = btrfs_readpage,
10148         .writepage      = btrfs_writepage,
10149         .writepages     = btrfs_writepages,
10150         .readpages      = btrfs_readpages,
10151         .direct_IO      = btrfs_direct_IO,
10152         .invalidatepage = btrfs_invalidatepage,
10153         .releasepage    = btrfs_releasepage,
10154         .set_page_dirty = btrfs_set_page_dirty,
10155         .error_remove_page = generic_error_remove_page,
10156 };
10157
10158 static const struct address_space_operations btrfs_symlink_aops = {
10159         .readpage       = btrfs_readpage,
10160         .writepage      = btrfs_writepage,
10161         .invalidatepage = btrfs_invalidatepage,
10162         .releasepage    = btrfs_releasepage,
10163 };
10164
10165 static const struct inode_operations btrfs_file_inode_operations = {
10166         .getattr        = btrfs_getattr,
10167         .setattr        = btrfs_setattr,
10168         .setxattr       = btrfs_setxattr,
10169         .getxattr       = btrfs_getxattr,
10170         .listxattr      = btrfs_listxattr,
10171         .removexattr    = btrfs_removexattr,
10172         .permission     = btrfs_permission,
10173         .fiemap         = btrfs_fiemap,
10174         .get_acl        = btrfs_get_acl,
10175         .set_acl        = btrfs_set_acl,
10176         .update_time    = btrfs_update_time,
10177 };
10178 static const struct inode_operations btrfs_special_inode_operations = {
10179         .getattr        = btrfs_getattr,
10180         .setattr        = btrfs_setattr,
10181         .permission     = btrfs_permission,
10182         .setxattr       = btrfs_setxattr,
10183         .getxattr       = btrfs_getxattr,
10184         .listxattr      = btrfs_listxattr,
10185         .removexattr    = btrfs_removexattr,
10186         .get_acl        = btrfs_get_acl,
10187         .set_acl        = btrfs_set_acl,
10188         .update_time    = btrfs_update_time,
10189 };
10190 static const struct inode_operations btrfs_symlink_inode_operations = {
10191         .readlink       = generic_readlink,
10192         .follow_link    = page_follow_link_light,
10193         .put_link       = page_put_link,
10194         .getattr        = btrfs_getattr,
10195         .setattr        = btrfs_setattr,
10196         .permission     = btrfs_permission,
10197         .setxattr       = btrfs_setxattr,
10198         .getxattr       = btrfs_getxattr,
10199         .listxattr      = btrfs_listxattr,
10200         .removexattr    = btrfs_removexattr,
10201         .update_time    = btrfs_update_time,
10202 };
10203
10204 const struct dentry_operations btrfs_dentry_operations = {
10205         .d_delete       = btrfs_dentry_delete,
10206         .d_release      = btrfs_dentry_release,
10207 };