1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_da_format.h"
17 #include "xfs_da_btree.h"
19 #include "xfs_inode.h"
20 #include "xfs_btree.h"
21 #include "xfs_trans.h"
22 #include "xfs_inode_item.h"
23 #include "xfs_extfree_item.h"
24 #include "xfs_alloc.h"
26 #include "xfs_bmap_util.h"
27 #include "xfs_bmap_btree.h"
28 #include "xfs_rtalloc.h"
29 #include "xfs_errortag.h"
30 #include "xfs_error.h"
31 #include "xfs_quota.h"
32 #include "xfs_trans_space.h"
33 #include "xfs_buf_item.h"
34 #include "xfs_trace.h"
35 #include "xfs_symlink.h"
36 #include "xfs_attr_leaf.h"
37 #include "xfs_filestream.h"
39 #include "xfs_ag_resv.h"
40 #include "xfs_refcount.h"
41 #include "xfs_icache.h"
44 kmem_zone_t *xfs_bmap_free_item_zone;
47 * Miscellaneous helper functions
51 * Compute and fill in the value of the maximum depth of a bmap btree
52 * in this filesystem. Done once, during mount.
55 xfs_bmap_compute_maxlevels(
56 xfs_mount_t *mp, /* file system mount structure */
57 int whichfork) /* data or attr fork */
59 int level; /* btree level */
60 uint maxblocks; /* max blocks at this level */
61 uint maxleafents; /* max leaf entries possible */
62 int maxrootrecs; /* max records in root block */
63 int minleafrecs; /* min records in leaf block */
64 int minnoderecs; /* min records in node block */
65 int sz; /* root block size */
68 * The maximum number of extents in a file, hence the maximum
69 * number of leaf entries, is controlled by the type of di_nextents
70 * (a signed 32-bit number, xfs_extnum_t), or by di_anextents
71 * (a signed 16-bit number, xfs_aextnum_t).
73 * Note that we can no longer assume that if we are in ATTR1 that
74 * the fork offset of all the inodes will be
75 * (xfs_default_attroffset(ip) >> 3) because we could have mounted
76 * with ATTR2 and then mounted back with ATTR1, keeping the
77 * di_forkoff's fixed but probably at various positions. Therefore,
78 * for both ATTR1 and ATTR2 we have to assume the worst case scenario
79 * of a minimum size available.
81 if (whichfork == XFS_DATA_FORK) {
82 maxleafents = MAXEXTNUM;
83 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
85 maxleafents = MAXAEXTNUM;
86 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
88 maxrootrecs = xfs_bmdr_maxrecs(sz, 0);
89 minleafrecs = mp->m_bmap_dmnr[0];
90 minnoderecs = mp->m_bmap_dmnr[1];
91 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
92 for (level = 1; maxblocks > 1; level++) {
93 if (maxblocks <= maxrootrecs)
96 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
98 mp->m_bm_maxlevels[whichfork] = level;
101 STATIC int /* error */
103 struct xfs_btree_cur *cur,
104 struct xfs_bmbt_irec *irec,
105 int *stat) /* success/failure */
107 cur->bc_rec.b = *irec;
108 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
111 STATIC int /* error */
112 xfs_bmbt_lookup_first(
113 struct xfs_btree_cur *cur,
114 int *stat) /* success/failure */
116 cur->bc_rec.b.br_startoff = 0;
117 cur->bc_rec.b.br_startblock = 0;
118 cur->bc_rec.b.br_blockcount = 0;
119 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
123 * Check if the inode needs to be converted to btree format.
125 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
127 return whichfork != XFS_COW_FORK &&
128 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
129 XFS_IFORK_NEXTENTS(ip, whichfork) >
130 XFS_IFORK_MAXEXT(ip, whichfork);
134 * Check if the inode should be converted to extent format.
136 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
138 return whichfork != XFS_COW_FORK &&
139 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
140 XFS_IFORK_NEXTENTS(ip, whichfork) <=
141 XFS_IFORK_MAXEXT(ip, whichfork);
145 * Update the record referred to by cur to the value given by irec
146 * This either works (return 0) or gets an EFSCORRUPTED error.
150 struct xfs_btree_cur *cur,
151 struct xfs_bmbt_irec *irec)
153 union xfs_btree_rec rec;
155 xfs_bmbt_disk_set_all(&rec.bmbt, irec);
156 return xfs_btree_update(cur, &rec);
160 * Compute the worst-case number of indirect blocks that will be used
161 * for ip's delayed extent of length "len".
164 xfs_bmap_worst_indlen(
165 xfs_inode_t *ip, /* incore inode pointer */
166 xfs_filblks_t len) /* delayed extent length */
168 int level; /* btree level number */
169 int maxrecs; /* maximum record count at this level */
170 xfs_mount_t *mp; /* mount structure */
171 xfs_filblks_t rval; /* return value */
174 maxrecs = mp->m_bmap_dmxr[0];
175 for (level = 0, rval = 0;
176 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
179 do_div(len, maxrecs);
182 return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
185 maxrecs = mp->m_bmap_dmxr[1];
191 * Calculate the default attribute fork offset for newly created inodes.
194 xfs_default_attroffset(
195 struct xfs_inode *ip)
197 struct xfs_mount *mp = ip->i_mount;
200 if (mp->m_sb.sb_inodesize == 256) {
201 offset = XFS_LITINO(mp, ip->i_d.di_version) -
202 XFS_BMDR_SPACE_CALC(MINABTPTRS);
204 offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
207 ASSERT(offset < XFS_LITINO(mp, ip->i_d.di_version));
212 * Helper routine to reset inode di_forkoff field when switching
213 * attribute fork from local to extent format - we reset it where
214 * possible to make space available for inline data fork extents.
217 xfs_bmap_forkoff_reset(
221 if (whichfork == XFS_ATTR_FORK &&
222 ip->i_d.di_format != XFS_DINODE_FMT_DEV &&
223 ip->i_d.di_format != XFS_DINODE_FMT_BTREE) {
224 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3;
226 if (dfl_forkoff > ip->i_d.di_forkoff)
227 ip->i_d.di_forkoff = dfl_forkoff;
232 STATIC struct xfs_buf *
234 struct xfs_btree_cur *cur,
237 struct xfs_log_item *lip;
243 for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
244 if (!cur->bc_bufs[i])
246 if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno)
247 return cur->bc_bufs[i];
250 /* Chase down all the log items to see if the bp is there */
251 list_for_each_entry(lip, &cur->bc_tp->t_items, li_trans) {
252 struct xfs_buf_log_item *bip = (struct xfs_buf_log_item *)lip;
254 if (bip->bli_item.li_type == XFS_LI_BUF &&
255 XFS_BUF_ADDR(bip->bli_buf) == bno)
264 struct xfs_btree_block *block,
270 __be64 *pp, *thispa; /* pointer to block address */
271 xfs_bmbt_key_t *prevp, *keyp;
273 ASSERT(be16_to_cpu(block->bb_level) > 0);
276 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
277 dmxr = mp->m_bmap_dmxr[0];
278 keyp = XFS_BMBT_KEY_ADDR(mp, block, i);
281 ASSERT(be64_to_cpu(prevp->br_startoff) <
282 be64_to_cpu(keyp->br_startoff));
287 * Compare the block numbers to see if there are dups.
290 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz);
292 pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr);
294 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
296 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz);
298 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr);
299 if (*thispa == *pp) {
300 xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld",
302 (unsigned long long)be64_to_cpu(*thispa));
303 xfs_err(mp, "%s: ptrs are equal in node\n",
305 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
312 * Check that the extents for the inode ip are in the right order in all
313 * btree leaves. THis becomes prohibitively expensive for large extent count
314 * files, so don't bother with inodes that have more than 10,000 extents in
315 * them. The btree record ordering checks will still be done, so for such large
316 * bmapbt constructs that is going to catch most corruptions.
319 xfs_bmap_check_leaf_extents(
320 xfs_btree_cur_t *cur, /* btree cursor or null */
321 xfs_inode_t *ip, /* incore inode pointer */
322 int whichfork) /* data or attr fork */
324 struct xfs_btree_block *block; /* current btree block */
325 xfs_fsblock_t bno; /* block # of "block" */
326 xfs_buf_t *bp; /* buffer for "block" */
327 int error; /* error return value */
328 xfs_extnum_t i=0, j; /* index into the extents list */
329 struct xfs_ifork *ifp; /* fork structure */
330 int level; /* btree level, for checking */
331 xfs_mount_t *mp; /* file system mount structure */
332 __be64 *pp; /* pointer to block address */
333 xfs_bmbt_rec_t *ep; /* pointer to current extent */
334 xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */
335 xfs_bmbt_rec_t *nextp; /* pointer to next extent */
338 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) {
342 /* skip large extent count inodes */
343 if (ip->i_d.di_nextents > 10000)
348 ifp = XFS_IFORK_PTR(ip, whichfork);
349 block = ifp->if_broot;
351 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
353 level = be16_to_cpu(block->bb_level);
355 xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
356 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
357 bno = be64_to_cpu(*pp);
359 ASSERT(bno != NULLFSBLOCK);
360 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
361 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
364 * Go down the tree until leaf level is reached, following the first
365 * pointer (leftmost) at each level.
367 while (level-- > 0) {
368 /* See if buf is in cur first */
370 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
373 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
379 block = XFS_BUF_TO_BLOCK(bp);
384 * Check this block for basic sanity (increasing keys and
385 * no duplicate blocks).
388 xfs_check_block(block, mp, 0, 0);
389 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
390 bno = be64_to_cpu(*pp);
391 XFS_WANT_CORRUPTED_GOTO(mp,
392 xfs_verify_fsbno(mp, bno), error0);
395 xfs_trans_brelse(NULL, bp);
400 * Here with bp and block set to the leftmost leaf node in the tree.
405 * Loop over all leaf nodes checking that all extents are in the right order.
408 xfs_fsblock_t nextbno;
409 xfs_extnum_t num_recs;
412 num_recs = xfs_btree_get_numrecs(block);
415 * Read-ahead the next leaf block, if any.
418 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
421 * Check all the extents to make sure they are OK.
422 * If we had a previous block, the last entry should
423 * conform with the first entry in this one.
426 ep = XFS_BMBT_REC_ADDR(mp, block, 1);
428 ASSERT(xfs_bmbt_disk_get_startoff(&last) +
429 xfs_bmbt_disk_get_blockcount(&last) <=
430 xfs_bmbt_disk_get_startoff(ep));
432 for (j = 1; j < num_recs; j++) {
433 nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1);
434 ASSERT(xfs_bmbt_disk_get_startoff(ep) +
435 xfs_bmbt_disk_get_blockcount(ep) <=
436 xfs_bmbt_disk_get_startoff(nextp));
444 xfs_trans_brelse(NULL, bp);
448 * If we've reached the end, stop.
450 if (bno == NULLFSBLOCK)
454 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
457 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
463 block = XFS_BUF_TO_BLOCK(bp);
469 xfs_warn(mp, "%s: at error0", __func__);
471 xfs_trans_brelse(NULL, bp);
473 xfs_warn(mp, "%s: BAD after btree leaves for %d extents",
475 xfs_err(mp, "%s: CORRUPTED BTREE OR SOMETHING", __func__);
476 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
481 * Validate that the bmbt_irecs being returned from bmapi are valid
482 * given the caller's original parameters. Specifically check the
483 * ranges of the returned irecs to ensure that they only extend beyond
484 * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
487 xfs_bmap_validate_ret(
491 xfs_bmbt_irec_t *mval,
495 int i; /* index to map values */
497 ASSERT(ret_nmap <= nmap);
499 for (i = 0; i < ret_nmap; i++) {
500 ASSERT(mval[i].br_blockcount > 0);
501 if (!(flags & XFS_BMAPI_ENTIRE)) {
502 ASSERT(mval[i].br_startoff >= bno);
503 ASSERT(mval[i].br_blockcount <= len);
504 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
507 ASSERT(mval[i].br_startoff < bno + len);
508 ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
512 mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
513 mval[i].br_startoff);
514 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
515 mval[i].br_startblock != HOLESTARTBLOCK);
516 ASSERT(mval[i].br_state == XFS_EXT_NORM ||
517 mval[i].br_state == XFS_EXT_UNWRITTEN);
522 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
523 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
527 * bmap free list manipulation functions
531 * Add the extent to the list of extents to be free at transaction end.
532 * The list is maintained sorted (by block number).
536 struct xfs_trans *tp,
539 struct xfs_owner_info *oinfo,
542 struct xfs_extent_free_item *new; /* new element */
544 struct xfs_mount *mp = tp->t_mountp;
548 ASSERT(bno != NULLFSBLOCK);
550 ASSERT(len <= MAXEXTLEN);
551 ASSERT(!isnullstartblock(bno));
552 agno = XFS_FSB_TO_AGNO(mp, bno);
553 agbno = XFS_FSB_TO_AGBNO(mp, bno);
554 ASSERT(agno < mp->m_sb.sb_agcount);
555 ASSERT(agbno < mp->m_sb.sb_agblocks);
556 ASSERT(len < mp->m_sb.sb_agblocks);
557 ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
559 ASSERT(xfs_bmap_free_item_zone != NULL);
561 new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
562 new->xefi_startblock = bno;
563 new->xefi_blockcount = (xfs_extlen_t)len;
565 new->xefi_oinfo = *oinfo;
567 xfs_rmap_skip_owner_update(&new->xefi_oinfo);
568 new->xefi_skip_discard = skip_discard;
569 trace_xfs_bmap_free_defer(tp->t_mountp,
570 XFS_FSB_TO_AGNO(tp->t_mountp, bno), 0,
571 XFS_FSB_TO_AGBNO(tp->t_mountp, bno), len);
572 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_FREE, &new->xefi_list);
576 * Inode fork format manipulation functions
580 * Transform a btree format file with only one leaf node, where the
581 * extents list will fit in the inode, into an extents format file.
582 * Since the file extents are already in-core, all we have to do is
583 * give up the space for the btree root and pitch the leaf block.
585 STATIC int /* error */
586 xfs_bmap_btree_to_extents(
587 xfs_trans_t *tp, /* transaction pointer */
588 xfs_inode_t *ip, /* incore inode pointer */
589 xfs_btree_cur_t *cur, /* btree cursor */
590 int *logflagsp, /* inode logging flags */
591 int whichfork) /* data or attr fork */
594 struct xfs_btree_block *cblock;/* child btree block */
595 xfs_fsblock_t cbno; /* child block number */
596 xfs_buf_t *cbp; /* child block's buffer */
597 int error; /* error return value */
598 struct xfs_ifork *ifp; /* inode fork data */
599 xfs_mount_t *mp; /* mount point structure */
600 __be64 *pp; /* ptr to block address */
601 struct xfs_btree_block *rblock;/* root btree block */
602 struct xfs_owner_info oinfo;
605 ifp = XFS_IFORK_PTR(ip, whichfork);
606 ASSERT(whichfork != XFS_COW_FORK);
607 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
608 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
609 rblock = ifp->if_broot;
610 ASSERT(be16_to_cpu(rblock->bb_level) == 1);
611 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
612 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
613 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
614 cbno = be64_to_cpu(*pp);
617 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp,
618 xfs_btree_check_lptr(cur, cbno, 1));
620 error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp, XFS_BMAP_BTREE_REF,
624 cblock = XFS_BUF_TO_BLOCK(cbp);
625 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
627 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
628 xfs_bmap_add_free(cur->bc_tp, cbno, 1, &oinfo);
629 ip->i_d.di_nblocks--;
630 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
631 xfs_trans_binval(tp, cbp);
632 if (cur->bc_bufs[0] == cbp)
633 cur->bc_bufs[0] = NULL;
634 xfs_iroot_realloc(ip, -1, whichfork);
635 ASSERT(ifp->if_broot == NULL);
636 ASSERT((ifp->if_flags & XFS_IFBROOT) == 0);
637 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
638 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
643 * Convert an extents-format file into a btree-format file.
644 * The new file will have a root block (in the inode) and a single child block.
646 STATIC int /* error */
647 xfs_bmap_extents_to_btree(
648 struct xfs_trans *tp, /* transaction pointer */
649 struct xfs_inode *ip, /* incore inode pointer */
650 struct xfs_btree_cur **curp, /* cursor returned to caller */
651 int wasdel, /* converting a delayed alloc */
652 int *logflagsp, /* inode logging flags */
653 int whichfork) /* data or attr fork */
655 struct xfs_btree_block *ablock; /* allocated (child) bt block */
656 struct xfs_buf *abp; /* buffer for ablock */
657 struct xfs_alloc_arg args; /* allocation arguments */
658 struct xfs_bmbt_rec *arp; /* child record pointer */
659 struct xfs_btree_block *block; /* btree root block */
660 struct xfs_btree_cur *cur; /* bmap btree cursor */
661 int error; /* error return value */
662 struct xfs_ifork *ifp; /* inode fork pointer */
663 struct xfs_bmbt_key *kp; /* root block key pointer */
664 struct xfs_mount *mp; /* mount structure */
665 xfs_bmbt_ptr_t *pp; /* root block address pointer */
666 struct xfs_iext_cursor icur;
667 struct xfs_bmbt_irec rec;
668 xfs_extnum_t cnt = 0;
671 ASSERT(whichfork != XFS_COW_FORK);
672 ifp = XFS_IFORK_PTR(ip, whichfork);
673 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
676 * Make space in the inode incore.
678 xfs_iroot_realloc(ip, 1, whichfork);
679 ifp->if_flags |= XFS_IFBROOT;
684 block = ifp->if_broot;
685 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
686 XFS_BTNUM_BMAP, 1, 1, ip->i_ino,
687 XFS_BTREE_LONG_PTRS);
689 * Need a cursor. Can't allocate until bb_level is filled in.
691 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
692 cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
694 * Convert to a btree with two levels, one record in root.
696 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE);
697 memset(&args, 0, sizeof(args));
700 xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork);
701 if (tp->t_firstblock == NULLFSBLOCK) {
702 args.type = XFS_ALLOCTYPE_START_BNO;
703 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
704 } else if (tp->t_flags & XFS_TRANS_LOWMODE) {
705 args.type = XFS_ALLOCTYPE_START_BNO;
706 args.fsbno = tp->t_firstblock;
708 args.type = XFS_ALLOCTYPE_NEAR_BNO;
709 args.fsbno = tp->t_firstblock;
711 args.minlen = args.maxlen = args.prod = 1;
712 args.wasdel = wasdel;
714 if ((error = xfs_alloc_vextent(&args))) {
715 xfs_iroot_realloc(ip, -1, whichfork);
716 ASSERT(ifp->if_broot == NULL);
717 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
718 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
722 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
723 xfs_iroot_realloc(ip, -1, whichfork);
724 ASSERT(ifp->if_broot == NULL);
725 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
726 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
730 * Allocation can't fail, the space was reserved.
732 ASSERT(tp->t_firstblock == NULLFSBLOCK ||
733 args.agno >= XFS_FSB_TO_AGNO(mp, tp->t_firstblock));
734 tp->t_firstblock = args.fsbno;
735 cur->bc_private.b.allocated++;
736 ip->i_d.di_nblocks++;
737 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
738 abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
740 * Fill in the child block.
742 abp->b_ops = &xfs_bmbt_buf_ops;
743 ablock = XFS_BUF_TO_BLOCK(abp);
744 xfs_btree_init_block_int(mp, ablock, abp->b_bn,
745 XFS_BTNUM_BMAP, 0, 0, ip->i_ino,
746 XFS_BTREE_LONG_PTRS);
748 for_each_xfs_iext(ifp, &icur, &rec) {
749 if (isnullstartblock(rec.br_startblock))
751 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1 + cnt);
752 xfs_bmbt_disk_set_all(arp, &rec);
755 ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork));
756 xfs_btree_set_numrecs(ablock, cnt);
759 * Fill in the root key and pointer.
761 kp = XFS_BMBT_KEY_ADDR(mp, block, 1);
762 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
763 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
764 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
765 be16_to_cpu(block->bb_level)));
766 *pp = cpu_to_be64(args.fsbno);
769 * Do all this logging at the end so that
770 * the root is at the right level.
772 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
773 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
774 ASSERT(*curp == NULL);
776 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
781 * Convert a local file to an extents file.
782 * This code is out of bounds for data forks of regular files,
783 * since the file data needs to get logged so things will stay consistent.
784 * (The bmap-level manipulations are ok, though).
787 xfs_bmap_local_to_extents_empty(
788 struct xfs_inode *ip,
791 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
793 ASSERT(whichfork != XFS_COW_FORK);
794 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
795 ASSERT(ifp->if_bytes == 0);
796 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
798 xfs_bmap_forkoff_reset(ip, whichfork);
799 ifp->if_flags &= ~XFS_IFINLINE;
800 ifp->if_flags |= XFS_IFEXTENTS;
801 ifp->if_u1.if_root = NULL;
803 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
807 STATIC int /* error */
808 xfs_bmap_local_to_extents(
809 xfs_trans_t *tp, /* transaction pointer */
810 xfs_inode_t *ip, /* incore inode pointer */
811 xfs_extlen_t total, /* total blocks needed by transaction */
812 int *logflagsp, /* inode logging flags */
814 void (*init_fn)(struct xfs_trans *tp,
816 struct xfs_inode *ip,
817 struct xfs_ifork *ifp))
820 int flags; /* logging flags returned */
821 struct xfs_ifork *ifp; /* inode fork pointer */
822 xfs_alloc_arg_t args; /* allocation arguments */
823 xfs_buf_t *bp; /* buffer for extent block */
824 struct xfs_bmbt_irec rec;
825 struct xfs_iext_cursor icur;
828 * We don't want to deal with the case of keeping inode data inline yet.
829 * So sending the data fork of a regular inode is invalid.
831 ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK));
832 ifp = XFS_IFORK_PTR(ip, whichfork);
833 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
835 if (!ifp->if_bytes) {
836 xfs_bmap_local_to_extents_empty(ip, whichfork);
837 flags = XFS_ILOG_CORE;
843 ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS)) == XFS_IFINLINE);
844 memset(&args, 0, sizeof(args));
846 args.mp = ip->i_mount;
847 xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0);
849 * Allocate a block. We know we need only one, since the
850 * file currently fits in an inode.
852 if (tp->t_firstblock == NULLFSBLOCK) {
853 args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
854 args.type = XFS_ALLOCTYPE_START_BNO;
856 args.fsbno = tp->t_firstblock;
857 args.type = XFS_ALLOCTYPE_NEAR_BNO;
860 args.minlen = args.maxlen = args.prod = 1;
861 error = xfs_alloc_vextent(&args);
865 /* Can't fail, the space was reserved. */
866 ASSERT(args.fsbno != NULLFSBLOCK);
867 ASSERT(args.len == 1);
868 tp->t_firstblock = args.fsbno;
869 bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
872 * Initialize the block, copy the data and log the remote buffer.
874 * The callout is responsible for logging because the remote format
875 * might differ from the local format and thus we don't know how much to
876 * log here. Note that init_fn must also set the buffer log item type
879 init_fn(tp, bp, ip, ifp);
881 /* account for the change in fork size */
882 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
883 xfs_bmap_local_to_extents_empty(ip, whichfork);
884 flags |= XFS_ILOG_CORE;
886 ifp->if_u1.if_root = NULL;
890 rec.br_startblock = args.fsbno;
891 rec.br_blockcount = 1;
892 rec.br_state = XFS_EXT_NORM;
893 xfs_iext_first(ifp, &icur);
894 xfs_iext_insert(ip, &icur, &rec, 0);
896 XFS_IFORK_NEXT_SET(ip, whichfork, 1);
897 ip->i_d.di_nblocks = 1;
898 xfs_trans_mod_dquot_byino(tp, ip,
899 XFS_TRANS_DQ_BCOUNT, 1L);
900 flags |= xfs_ilog_fext(whichfork);
908 * Called from xfs_bmap_add_attrfork to handle btree format files.
910 STATIC int /* error */
911 xfs_bmap_add_attrfork_btree(
912 xfs_trans_t *tp, /* transaction pointer */
913 xfs_inode_t *ip, /* incore inode pointer */
914 int *flags) /* inode logging flags */
916 xfs_btree_cur_t *cur; /* btree cursor */
917 int error; /* error return value */
918 xfs_mount_t *mp; /* file system mount struct */
919 int stat; /* newroot status */
922 if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip))
923 *flags |= XFS_ILOG_DBROOT;
925 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
926 error = xfs_bmbt_lookup_first(cur, &stat);
929 /* must be at least one entry */
930 XFS_WANT_CORRUPTED_GOTO(mp, stat == 1, error0);
931 if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
934 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
937 cur->bc_private.b.allocated = 0;
938 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
942 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
947 * Called from xfs_bmap_add_attrfork to handle extents format files.
949 STATIC int /* error */
950 xfs_bmap_add_attrfork_extents(
951 struct xfs_trans *tp, /* transaction pointer */
952 struct xfs_inode *ip, /* incore inode pointer */
953 int *flags) /* inode logging flags */
955 xfs_btree_cur_t *cur; /* bmap btree cursor */
956 int error; /* error return value */
958 if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip))
961 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags,
964 cur->bc_private.b.allocated = 0;
965 xfs_btree_del_cursor(cur, error);
971 * Called from xfs_bmap_add_attrfork to handle local format files. Each
972 * different data fork content type needs a different callout to do the
973 * conversion. Some are basic and only require special block initialisation
974 * callouts for the data formating, others (directories) are so specialised they
975 * handle everything themselves.
977 * XXX (dgc): investigate whether directory conversion can use the generic
978 * formatting callout. It should be possible - it's just a very complex
981 STATIC int /* error */
982 xfs_bmap_add_attrfork_local(
983 struct xfs_trans *tp, /* transaction pointer */
984 struct xfs_inode *ip, /* incore inode pointer */
985 int *flags) /* inode logging flags */
987 struct xfs_da_args dargs; /* args for dir/attr code */
989 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
992 if (S_ISDIR(VFS_I(ip)->i_mode)) {
993 memset(&dargs, 0, sizeof(dargs));
994 dargs.geo = ip->i_mount->m_dir_geo;
996 dargs.total = dargs.geo->fsbcount;
997 dargs.whichfork = XFS_DATA_FORK;
999 return xfs_dir2_sf_to_block(&dargs);
1002 if (S_ISLNK(VFS_I(ip)->i_mode))
1003 return xfs_bmap_local_to_extents(tp, ip, 1, flags,
1005 xfs_symlink_local_to_remote);
1007 /* should only be called for types that support local format data */
1009 return -EFSCORRUPTED;
1013 * Convert inode from non-attributed to attributed.
1014 * Must not be in a transaction, ip must not be locked.
1016 int /* error code */
1017 xfs_bmap_add_attrfork(
1018 xfs_inode_t *ip, /* incore inode pointer */
1019 int size, /* space new attribute needs */
1020 int rsvd) /* xact may use reserved blks */
1022 xfs_mount_t *mp; /* mount structure */
1023 xfs_trans_t *tp; /* transaction pointer */
1024 int blks; /* space reservation */
1025 int version = 1; /* superblock attr version */
1026 int logflags; /* logging flags */
1027 int error; /* error return value */
1029 ASSERT(XFS_IFORK_Q(ip) == 0);
1032 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1034 blks = XFS_ADDAFORK_SPACE_RES(mp);
1036 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_addafork, blks, 0,
1037 rsvd ? XFS_TRANS_RESERVE : 0, &tp);
1041 xfs_ilock(ip, XFS_ILOCK_EXCL);
1042 error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ?
1043 XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
1044 XFS_QMOPT_RES_REGBLKS);
1047 if (XFS_IFORK_Q(ip))
1049 if (ip->i_d.di_anextents != 0) {
1050 error = -EFSCORRUPTED;
1053 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) {
1055 * For inodes coming from pre-6.2 filesystems.
1057 ASSERT(ip->i_d.di_aformat == 0);
1058 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
1061 xfs_trans_ijoin(tp, ip, 0);
1062 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1064 switch (ip->i_d.di_format) {
1065 case XFS_DINODE_FMT_DEV:
1066 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
1068 case XFS_DINODE_FMT_LOCAL:
1069 case XFS_DINODE_FMT_EXTENTS:
1070 case XFS_DINODE_FMT_BTREE:
1071 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
1072 if (!ip->i_d.di_forkoff)
1073 ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
1074 else if (mp->m_flags & XFS_MOUNT_ATTR2)
1083 ASSERT(ip->i_afp == NULL);
1084 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
1085 ip->i_afp->if_flags = XFS_IFEXTENTS;
1087 switch (ip->i_d.di_format) {
1088 case XFS_DINODE_FMT_LOCAL:
1089 error = xfs_bmap_add_attrfork_local(tp, ip, &logflags);
1091 case XFS_DINODE_FMT_EXTENTS:
1092 error = xfs_bmap_add_attrfork_extents(tp, ip, &logflags);
1094 case XFS_DINODE_FMT_BTREE:
1095 error = xfs_bmap_add_attrfork_btree(tp, ip, &logflags);
1102 xfs_trans_log_inode(tp, ip, logflags);
1105 if (!xfs_sb_version_hasattr(&mp->m_sb) ||
1106 (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
1107 bool log_sb = false;
1109 spin_lock(&mp->m_sb_lock);
1110 if (!xfs_sb_version_hasattr(&mp->m_sb)) {
1111 xfs_sb_version_addattr(&mp->m_sb);
1114 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) {
1115 xfs_sb_version_addattr2(&mp->m_sb);
1118 spin_unlock(&mp->m_sb_lock);
1123 error = xfs_trans_commit(tp);
1124 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1128 xfs_trans_cancel(tp);
1129 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1134 * Internal and external extent tree search functions.
1138 * Read in extents from a btree-format inode.
1142 struct xfs_trans *tp,
1143 struct xfs_inode *ip,
1146 struct xfs_mount *mp = ip->i_mount;
1147 int state = xfs_bmap_fork_to_state(whichfork);
1148 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1149 xfs_extnum_t nextents = XFS_IFORK_NEXTENTS(ip, whichfork);
1150 struct xfs_btree_block *block = ifp->if_broot;
1151 struct xfs_iext_cursor icur;
1152 struct xfs_bmbt_irec new;
1160 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1162 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
1163 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
1164 return -EFSCORRUPTED;
1168 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
1170 level = be16_to_cpu(block->bb_level);
1172 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
1173 bno = be64_to_cpu(*pp);
1176 * Go down the tree until leaf level is reached, following the first
1177 * pointer (leftmost) at each level.
1179 while (level-- > 0) {
1180 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
1181 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
1184 block = XFS_BUF_TO_BLOCK(bp);
1187 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
1188 bno = be64_to_cpu(*pp);
1189 XFS_WANT_CORRUPTED_GOTO(mp,
1190 xfs_verify_fsbno(mp, bno), out_brelse);
1191 xfs_trans_brelse(tp, bp);
1195 * Here with bp and block set to the leftmost leaf node in the tree.
1198 xfs_iext_first(ifp, &icur);
1201 * Loop over all leaf nodes. Copy information to the extent records.
1204 xfs_bmbt_rec_t *frp;
1205 xfs_fsblock_t nextbno;
1206 xfs_extnum_t num_recs;
1208 num_recs = xfs_btree_get_numrecs(block);
1209 if (unlikely(i + num_recs > nextents)) {
1210 xfs_warn(ip->i_mount,
1211 "corrupt dinode %Lu, (btree extents).",
1212 (unsigned long long) ip->i_ino);
1213 xfs_inode_verifier_error(ip, -EFSCORRUPTED,
1214 __func__, block, sizeof(*block),
1216 error = -EFSCORRUPTED;
1220 * Read-ahead the next leaf block, if any.
1222 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
1223 if (nextbno != NULLFSBLOCK)
1224 xfs_btree_reada_bufl(mp, nextbno, 1,
1227 * Copy records into the extent records.
1229 frp = XFS_BMBT_REC_ADDR(mp, block, 1);
1230 for (j = 0; j < num_recs; j++, frp++, i++) {
1233 xfs_bmbt_disk_get_all(frp, &new);
1234 fa = xfs_bmap_validate_extent(ip, whichfork, &new);
1236 error = -EFSCORRUPTED;
1237 xfs_inode_verifier_error(ip, error,
1238 "xfs_iread_extents(2)",
1239 frp, sizeof(*frp), fa);
1242 xfs_iext_insert(ip, &icur, &new, state);
1243 trace_xfs_read_extent(ip, &icur, state, _THIS_IP_);
1244 xfs_iext_next(ifp, &icur);
1246 xfs_trans_brelse(tp, bp);
1249 * If we've reached the end, stop.
1251 if (bno == NULLFSBLOCK)
1253 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
1254 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
1257 block = XFS_BUF_TO_BLOCK(bp);
1260 if (i != XFS_IFORK_NEXTENTS(ip, whichfork)) {
1261 error = -EFSCORRUPTED;
1264 ASSERT(i == xfs_iext_count(ifp));
1266 ifp->if_flags |= XFS_IFEXTENTS;
1270 xfs_trans_brelse(tp, bp);
1272 xfs_iext_destroy(ifp);
1277 * Returns the relative block number of the first unused block(s) in the given
1278 * fork with at least "len" logically contiguous blocks free. This is the
1279 * lowest-address hole if the fork has holes, else the first block past the end
1280 * of fork. Return 0 if the fork is currently local (in-inode).
1283 xfs_bmap_first_unused(
1284 struct xfs_trans *tp, /* transaction pointer */
1285 struct xfs_inode *ip, /* incore inode */
1286 xfs_extlen_t len, /* size of hole to find */
1287 xfs_fileoff_t *first_unused, /* unused block */
1288 int whichfork) /* data or attr fork */
1290 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1291 struct xfs_bmbt_irec got;
1292 struct xfs_iext_cursor icur;
1293 xfs_fileoff_t lastaddr = 0;
1294 xfs_fileoff_t lowest, max;
1297 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE ||
1298 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ||
1299 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
1301 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
1306 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1307 error = xfs_iread_extents(tp, ip, whichfork);
1312 lowest = max = *first_unused;
1313 for_each_xfs_iext(ifp, &icur, &got) {
1315 * See if the hole before this extent will work.
1317 if (got.br_startoff >= lowest + len &&
1318 got.br_startoff - max >= len)
1320 lastaddr = got.br_startoff + got.br_blockcount;
1321 max = XFS_FILEOFF_MAX(lastaddr, lowest);
1324 *first_unused = max;
1329 * Returns the file-relative block number of the last block - 1 before
1330 * last_block (input value) in the file.
1331 * This is not based on i_size, it is based on the extent records.
1332 * Returns 0 for local files, as they do not have extent records.
1335 xfs_bmap_last_before(
1336 struct xfs_trans *tp, /* transaction pointer */
1337 struct xfs_inode *ip, /* incore inode */
1338 xfs_fileoff_t *last_block, /* last block */
1339 int whichfork) /* data or attr fork */
1341 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1342 struct xfs_bmbt_irec got;
1343 struct xfs_iext_cursor icur;
1346 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
1347 case XFS_DINODE_FMT_LOCAL:
1350 case XFS_DINODE_FMT_BTREE:
1351 case XFS_DINODE_FMT_EXTENTS:
1357 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1358 error = xfs_iread_extents(tp, ip, whichfork);
1363 if (!xfs_iext_lookup_extent_before(ip, ifp, last_block, &icur, &got))
1369 xfs_bmap_last_extent(
1370 struct xfs_trans *tp,
1371 struct xfs_inode *ip,
1373 struct xfs_bmbt_irec *rec,
1376 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1377 struct xfs_iext_cursor icur;
1380 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1381 error = xfs_iread_extents(tp, ip, whichfork);
1386 xfs_iext_last(ifp, &icur);
1387 if (!xfs_iext_get_extent(ifp, &icur, rec))
1395 * Check the last inode extent to determine whether this allocation will result
1396 * in blocks being allocated at the end of the file. When we allocate new data
1397 * blocks at the end of the file which do not start at the previous data block,
1398 * we will try to align the new blocks at stripe unit boundaries.
1400 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
1401 * at, or past the EOF.
1405 struct xfs_bmalloca *bma,
1408 struct xfs_bmbt_irec rec;
1413 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
1424 * Check if we are allocation or past the last extent, or at least into
1425 * the last delayed allocated extent.
1427 bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount ||
1428 (bma->offset >= rec.br_startoff &&
1429 isnullstartblock(rec.br_startblock));
1434 * Returns the file-relative block number of the first block past eof in
1435 * the file. This is not based on i_size, it is based on the extent records.
1436 * Returns 0 for local files, as they do not have extent records.
1439 xfs_bmap_last_offset(
1440 struct xfs_inode *ip,
1441 xfs_fileoff_t *last_block,
1444 struct xfs_bmbt_irec rec;
1450 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL)
1453 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
1454 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
1457 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
1458 if (error || is_empty)
1461 *last_block = rec.br_startoff + rec.br_blockcount;
1466 * Returns whether the selected fork of the inode has exactly one
1467 * block or not. For the data fork we check this matches di_size,
1468 * implying the file's range is 0..bsize-1.
1470 int /* 1=>1 block, 0=>otherwise */
1472 xfs_inode_t *ip, /* incore inode */
1473 int whichfork) /* data or attr fork */
1475 struct xfs_ifork *ifp; /* inode fork pointer */
1476 int rval; /* return value */
1477 xfs_bmbt_irec_t s; /* internal version of extent */
1478 struct xfs_iext_cursor icur;
1481 if (whichfork == XFS_DATA_FORK)
1482 return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize;
1484 if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1)
1486 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
1488 ifp = XFS_IFORK_PTR(ip, whichfork);
1489 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
1490 xfs_iext_first(ifp, &icur);
1491 xfs_iext_get_extent(ifp, &icur, &s);
1492 rval = s.br_startoff == 0 && s.br_blockcount == 1;
1493 if (rval && whichfork == XFS_DATA_FORK)
1494 ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize);
1499 * Extent tree manipulation functions used during allocation.
1503 * Convert a delayed allocation to a real allocation.
1505 STATIC int /* error */
1506 xfs_bmap_add_extent_delay_real(
1507 struct xfs_bmalloca *bma,
1510 struct xfs_bmbt_irec *new = &bma->got;
1511 int error; /* error return value */
1512 int i; /* temp state */
1513 struct xfs_ifork *ifp; /* inode fork pointer */
1514 xfs_fileoff_t new_endoff; /* end offset of new entry */
1515 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
1516 /* left is 0, right is 1, prev is 2 */
1517 int rval=0; /* return value (logging flags) */
1518 int state = xfs_bmap_fork_to_state(whichfork);
1519 xfs_filblks_t da_new; /* new count del alloc blocks used */
1520 xfs_filblks_t da_old; /* old count del alloc blocks used */
1521 xfs_filblks_t temp=0; /* value for da_new calculations */
1522 int tmp_rval; /* partial logging flags */
1523 struct xfs_mount *mp;
1524 xfs_extnum_t *nextents;
1525 struct xfs_bmbt_irec old;
1527 mp = bma->ip->i_mount;
1528 ifp = XFS_IFORK_PTR(bma->ip, whichfork);
1529 ASSERT(whichfork != XFS_ATTR_FORK);
1530 nextents = (whichfork == XFS_COW_FORK ? &bma->ip->i_cnextents :
1531 &bma->ip->i_d.di_nextents);
1533 ASSERT(!isnullstartblock(new->br_startblock));
1535 (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
1537 XFS_STATS_INC(mp, xs_add_exlist);
1544 * Set up a bunch of variables to make the tests simpler.
1546 xfs_iext_get_extent(ifp, &bma->icur, &PREV);
1547 new_endoff = new->br_startoff + new->br_blockcount;
1548 ASSERT(isnullstartblock(PREV.br_startblock));
1549 ASSERT(PREV.br_startoff <= new->br_startoff);
1550 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1552 da_old = startblockval(PREV.br_startblock);
1556 * Set flags determining what part of the previous delayed allocation
1557 * extent is being replaced by a real allocation.
1559 if (PREV.br_startoff == new->br_startoff)
1560 state |= BMAP_LEFT_FILLING;
1561 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1562 state |= BMAP_RIGHT_FILLING;
1565 * Check and set flags if this segment has a left neighbor.
1566 * Don't set contiguous if the combined extent would be too large.
1568 if (xfs_iext_peek_prev_extent(ifp, &bma->icur, &LEFT)) {
1569 state |= BMAP_LEFT_VALID;
1570 if (isnullstartblock(LEFT.br_startblock))
1571 state |= BMAP_LEFT_DELAY;
1574 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1575 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1576 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1577 LEFT.br_state == new->br_state &&
1578 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1579 state |= BMAP_LEFT_CONTIG;
1582 * Check and set flags if this segment has a right neighbor.
1583 * Don't set contiguous if the combined extent would be too large.
1584 * Also check for all-three-contiguous being too large.
1586 if (xfs_iext_peek_next_extent(ifp, &bma->icur, &RIGHT)) {
1587 state |= BMAP_RIGHT_VALID;
1588 if (isnullstartblock(RIGHT.br_startblock))
1589 state |= BMAP_RIGHT_DELAY;
1592 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1593 new_endoff == RIGHT.br_startoff &&
1594 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1595 new->br_state == RIGHT.br_state &&
1596 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
1597 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1598 BMAP_RIGHT_FILLING)) !=
1599 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1600 BMAP_RIGHT_FILLING) ||
1601 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1603 state |= BMAP_RIGHT_CONTIG;
1607 * Switch out based on the FILLING and CONTIG state bits.
1609 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1610 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1611 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1612 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1614 * Filling in all of a previously delayed allocation extent.
1615 * The left and right neighbors are both contiguous with new.
1617 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
1619 xfs_iext_remove(bma->ip, &bma->icur, state);
1620 xfs_iext_remove(bma->ip, &bma->icur, state);
1621 xfs_iext_prev(ifp, &bma->icur);
1622 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1625 if (bma->cur == NULL)
1626 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1628 rval = XFS_ILOG_CORE;
1629 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
1632 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1633 error = xfs_btree_delete(bma->cur, &i);
1636 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1637 error = xfs_btree_decrement(bma->cur, 0, &i);
1640 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1641 error = xfs_bmbt_update(bma->cur, &LEFT);
1647 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1649 * Filling in all of a previously delayed allocation extent.
1650 * The left neighbor is contiguous, the right is not.
1653 LEFT.br_blockcount += PREV.br_blockcount;
1655 xfs_iext_remove(bma->ip, &bma->icur, state);
1656 xfs_iext_prev(ifp, &bma->icur);
1657 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1659 if (bma->cur == NULL)
1660 rval = XFS_ILOG_DEXT;
1663 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1666 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1667 error = xfs_bmbt_update(bma->cur, &LEFT);
1673 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1675 * Filling in all of a previously delayed allocation extent.
1676 * The right neighbor is contiguous, the left is not.
1678 PREV.br_startblock = new->br_startblock;
1679 PREV.br_blockcount += RIGHT.br_blockcount;
1681 xfs_iext_next(ifp, &bma->icur);
1682 xfs_iext_remove(bma->ip, &bma->icur, state);
1683 xfs_iext_prev(ifp, &bma->icur);
1684 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1686 if (bma->cur == NULL)
1687 rval = XFS_ILOG_DEXT;
1690 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
1693 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1694 error = xfs_bmbt_update(bma->cur, &PREV);
1700 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1702 * Filling in all of a previously delayed allocation extent.
1703 * Neither the left nor right neighbors are contiguous with
1706 PREV.br_startblock = new->br_startblock;
1707 PREV.br_state = new->br_state;
1708 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1711 if (bma->cur == NULL)
1712 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1714 rval = XFS_ILOG_CORE;
1715 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1718 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1719 error = xfs_btree_insert(bma->cur, &i);
1722 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1726 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1728 * Filling in the first part of a previous delayed allocation.
1729 * The left neighbor is contiguous.
1732 temp = PREV.br_blockcount - new->br_blockcount;
1733 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1734 startblockval(PREV.br_startblock));
1736 LEFT.br_blockcount += new->br_blockcount;
1738 PREV.br_blockcount = temp;
1739 PREV.br_startoff += new->br_blockcount;
1740 PREV.br_startblock = nullstartblock(da_new);
1742 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1743 xfs_iext_prev(ifp, &bma->icur);
1744 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1746 if (bma->cur == NULL)
1747 rval = XFS_ILOG_DEXT;
1750 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1753 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1754 error = xfs_bmbt_update(bma->cur, &LEFT);
1760 case BMAP_LEFT_FILLING:
1762 * Filling in the first part of a previous delayed allocation.
1763 * The left neighbor is not contiguous.
1765 xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1767 if (bma->cur == NULL)
1768 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1770 rval = XFS_ILOG_CORE;
1771 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1774 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1775 error = xfs_btree_insert(bma->cur, &i);
1778 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1781 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1782 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1783 &bma->cur, 1, &tmp_rval, whichfork);
1789 temp = PREV.br_blockcount - new->br_blockcount;
1790 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1791 startblockval(PREV.br_startblock) -
1792 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
1794 PREV.br_startoff = new_endoff;
1795 PREV.br_blockcount = temp;
1796 PREV.br_startblock = nullstartblock(da_new);
1797 xfs_iext_next(ifp, &bma->icur);
1798 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1799 xfs_iext_prev(ifp, &bma->icur);
1802 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1804 * Filling in the last part of a previous delayed allocation.
1805 * The right neighbor is contiguous with the new allocation.
1808 RIGHT.br_startoff = new->br_startoff;
1809 RIGHT.br_startblock = new->br_startblock;
1810 RIGHT.br_blockcount += new->br_blockcount;
1812 if (bma->cur == NULL)
1813 rval = XFS_ILOG_DEXT;
1816 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1819 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1820 error = xfs_bmbt_update(bma->cur, &RIGHT);
1825 temp = PREV.br_blockcount - new->br_blockcount;
1826 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1827 startblockval(PREV.br_startblock));
1829 PREV.br_blockcount = temp;
1830 PREV.br_startblock = nullstartblock(da_new);
1832 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1833 xfs_iext_next(ifp, &bma->icur);
1834 xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT);
1837 case BMAP_RIGHT_FILLING:
1839 * Filling in the last part of a previous delayed allocation.
1840 * The right neighbor is not contiguous.
1842 xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1844 if (bma->cur == NULL)
1845 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1847 rval = XFS_ILOG_CORE;
1848 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1851 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1852 error = xfs_btree_insert(bma->cur, &i);
1855 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1858 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1859 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1860 &bma->cur, 1, &tmp_rval, whichfork);
1866 temp = PREV.br_blockcount - new->br_blockcount;
1867 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1868 startblockval(PREV.br_startblock) -
1869 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
1871 PREV.br_startblock = nullstartblock(da_new);
1872 PREV.br_blockcount = temp;
1873 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1874 xfs_iext_next(ifp, &bma->icur);
1879 * Filling in the middle part of a previous delayed allocation.
1880 * Contiguity is impossible here.
1881 * This case is avoided almost all the time.
1883 * We start with a delayed allocation:
1885 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
1888 * and we are allocating:
1889 * +rrrrrrrrrrrrrrrrr+
1892 * and we set it up for insertion as:
1893 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
1895 * PREV @ idx LEFT RIGHT
1896 * inserted at idx + 1
1900 /* LEFT is the new middle */
1903 /* RIGHT is the new right */
1904 RIGHT.br_state = PREV.br_state;
1905 RIGHT.br_startoff = new_endoff;
1906 RIGHT.br_blockcount =
1907 PREV.br_startoff + PREV.br_blockcount - new_endoff;
1908 RIGHT.br_startblock =
1909 nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1910 RIGHT.br_blockcount));
1913 PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
1914 PREV.br_startblock =
1915 nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1916 PREV.br_blockcount));
1917 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1919 xfs_iext_next(ifp, &bma->icur);
1920 xfs_iext_insert(bma->ip, &bma->icur, &RIGHT, state);
1921 xfs_iext_insert(bma->ip, &bma->icur, &LEFT, state);
1924 if (bma->cur == NULL)
1925 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1927 rval = XFS_ILOG_CORE;
1928 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1931 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1932 error = xfs_btree_insert(bma->cur, &i);
1935 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1938 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1939 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1940 &bma->cur, 1, &tmp_rval, whichfork);
1946 da_new = startblockval(PREV.br_startblock) +
1947 startblockval(RIGHT.br_startblock);
1950 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1951 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1952 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
1953 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1954 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1955 case BMAP_LEFT_CONTIG:
1956 case BMAP_RIGHT_CONTIG:
1958 * These cases are all impossible.
1963 /* add reverse mapping unless caller opted out */
1964 if (!(bma->flags & XFS_BMAPI_NORMAP)) {
1965 error = xfs_rmap_map_extent(bma->tp, bma->ip, whichfork, new);
1970 /* convert to a btree if necessary */
1971 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1972 int tmp_logflags; /* partial log flag return val */
1974 ASSERT(bma->cur == NULL);
1975 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1976 &bma->cur, da_old > 0, &tmp_logflags,
1978 bma->logflags |= tmp_logflags;
1984 da_new += bma->cur->bc_private.b.allocated;
1985 bma->cur->bc_private.b.allocated = 0;
1988 /* adjust for changes in reserved delayed indirect blocks */
1989 if (da_new != da_old) {
1990 ASSERT(state == 0 || da_new < da_old);
1991 error = xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new),
1995 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
1997 if (whichfork != XFS_COW_FORK)
1998 bma->logflags |= rval;
2006 * Convert an unwritten allocation to a real allocation or vice versa.
2008 STATIC int /* error */
2009 xfs_bmap_add_extent_unwritten_real(
2010 struct xfs_trans *tp,
2011 xfs_inode_t *ip, /* incore inode pointer */
2013 struct xfs_iext_cursor *icur,
2014 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
2015 xfs_bmbt_irec_t *new, /* new data to add to file extents */
2016 int *logflagsp) /* inode logging flags */
2018 xfs_btree_cur_t *cur; /* btree cursor */
2019 int error; /* error return value */
2020 int i; /* temp state */
2021 struct xfs_ifork *ifp; /* inode fork pointer */
2022 xfs_fileoff_t new_endoff; /* end offset of new entry */
2023 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
2024 /* left is 0, right is 1, prev is 2 */
2025 int rval=0; /* return value (logging flags) */
2026 int state = xfs_bmap_fork_to_state(whichfork);
2027 struct xfs_mount *mp = ip->i_mount;
2028 struct xfs_bmbt_irec old;
2033 ifp = XFS_IFORK_PTR(ip, whichfork);
2035 ASSERT(!isnullstartblock(new->br_startblock));
2037 XFS_STATS_INC(mp, xs_add_exlist);
2044 * Set up a bunch of variables to make the tests simpler.
2047 xfs_iext_get_extent(ifp, icur, &PREV);
2048 ASSERT(new->br_state != PREV.br_state);
2049 new_endoff = new->br_startoff + new->br_blockcount;
2050 ASSERT(PREV.br_startoff <= new->br_startoff);
2051 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
2054 * Set flags determining what part of the previous oldext allocation
2055 * extent is being replaced by a newext allocation.
2057 if (PREV.br_startoff == new->br_startoff)
2058 state |= BMAP_LEFT_FILLING;
2059 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
2060 state |= BMAP_RIGHT_FILLING;
2063 * Check and set flags if this segment has a left neighbor.
2064 * Don't set contiguous if the combined extent would be too large.
2066 if (xfs_iext_peek_prev_extent(ifp, icur, &LEFT)) {
2067 state |= BMAP_LEFT_VALID;
2068 if (isnullstartblock(LEFT.br_startblock))
2069 state |= BMAP_LEFT_DELAY;
2072 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2073 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
2074 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
2075 LEFT.br_state == new->br_state &&
2076 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2077 state |= BMAP_LEFT_CONTIG;
2080 * Check and set flags if this segment has a right neighbor.
2081 * Don't set contiguous if the combined extent would be too large.
2082 * Also check for all-three-contiguous being too large.
2084 if (xfs_iext_peek_next_extent(ifp, icur, &RIGHT)) {
2085 state |= BMAP_RIGHT_VALID;
2086 if (isnullstartblock(RIGHT.br_startblock))
2087 state |= BMAP_RIGHT_DELAY;
2090 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2091 new_endoff == RIGHT.br_startoff &&
2092 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
2093 new->br_state == RIGHT.br_state &&
2094 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
2095 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2096 BMAP_RIGHT_FILLING)) !=
2097 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2098 BMAP_RIGHT_FILLING) ||
2099 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
2101 state |= BMAP_RIGHT_CONTIG;
2104 * Switch out based on the FILLING and CONTIG state bits.
2106 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2107 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
2108 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2109 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2111 * Setting all of a previous oldext extent to newext.
2112 * The left and right neighbors are both contiguous with new.
2114 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
2116 xfs_iext_remove(ip, icur, state);
2117 xfs_iext_remove(ip, icur, state);
2118 xfs_iext_prev(ifp, icur);
2119 xfs_iext_update_extent(ip, state, icur, &LEFT);
2120 XFS_IFORK_NEXT_SET(ip, whichfork,
2121 XFS_IFORK_NEXTENTS(ip, whichfork) - 2);
2123 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2125 rval = XFS_ILOG_CORE;
2126 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2129 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2130 if ((error = xfs_btree_delete(cur, &i)))
2132 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2133 if ((error = xfs_btree_decrement(cur, 0, &i)))
2135 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2136 if ((error = xfs_btree_delete(cur, &i)))
2138 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2139 if ((error = xfs_btree_decrement(cur, 0, &i)))
2141 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2142 error = xfs_bmbt_update(cur, &LEFT);
2148 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2150 * Setting all of a previous oldext extent to newext.
2151 * The left neighbor is contiguous, the right is not.
2153 LEFT.br_blockcount += PREV.br_blockcount;
2155 xfs_iext_remove(ip, icur, state);
2156 xfs_iext_prev(ifp, icur);
2157 xfs_iext_update_extent(ip, state, icur, &LEFT);
2158 XFS_IFORK_NEXT_SET(ip, whichfork,
2159 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2161 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2163 rval = XFS_ILOG_CORE;
2164 error = xfs_bmbt_lookup_eq(cur, &PREV, &i);
2167 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2168 if ((error = xfs_btree_delete(cur, &i)))
2170 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2171 if ((error = xfs_btree_decrement(cur, 0, &i)))
2173 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2174 error = xfs_bmbt_update(cur, &LEFT);
2180 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2182 * Setting all of a previous oldext extent to newext.
2183 * The right neighbor is contiguous, the left is not.
2185 PREV.br_blockcount += RIGHT.br_blockcount;
2186 PREV.br_state = new->br_state;
2188 xfs_iext_next(ifp, icur);
2189 xfs_iext_remove(ip, icur, state);
2190 xfs_iext_prev(ifp, icur);
2191 xfs_iext_update_extent(ip, state, icur, &PREV);
2193 XFS_IFORK_NEXT_SET(ip, whichfork,
2194 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2196 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2198 rval = XFS_ILOG_CORE;
2199 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2202 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2203 if ((error = xfs_btree_delete(cur, &i)))
2205 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2206 if ((error = xfs_btree_decrement(cur, 0, &i)))
2208 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2209 error = xfs_bmbt_update(cur, &PREV);
2215 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
2217 * Setting all of a previous oldext extent to newext.
2218 * Neither the left nor right neighbors are contiguous with
2221 PREV.br_state = new->br_state;
2222 xfs_iext_update_extent(ip, state, icur, &PREV);
2225 rval = XFS_ILOG_DEXT;
2228 error = xfs_bmbt_lookup_eq(cur, new, &i);
2231 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2232 error = xfs_bmbt_update(cur, &PREV);
2238 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
2240 * Setting the first part of a previous oldext extent to newext.
2241 * The left neighbor is contiguous.
2243 LEFT.br_blockcount += new->br_blockcount;
2246 PREV.br_startoff += new->br_blockcount;
2247 PREV.br_startblock += new->br_blockcount;
2248 PREV.br_blockcount -= new->br_blockcount;
2250 xfs_iext_update_extent(ip, state, icur, &PREV);
2251 xfs_iext_prev(ifp, icur);
2252 xfs_iext_update_extent(ip, state, icur, &LEFT);
2255 rval = XFS_ILOG_DEXT;
2258 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2261 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2262 error = xfs_bmbt_update(cur, &PREV);
2265 error = xfs_btree_decrement(cur, 0, &i);
2268 error = xfs_bmbt_update(cur, &LEFT);
2274 case BMAP_LEFT_FILLING:
2276 * Setting the first part of a previous oldext extent to newext.
2277 * The left neighbor is not contiguous.
2280 PREV.br_startoff += new->br_blockcount;
2281 PREV.br_startblock += new->br_blockcount;
2282 PREV.br_blockcount -= new->br_blockcount;
2284 xfs_iext_update_extent(ip, state, icur, &PREV);
2285 xfs_iext_insert(ip, icur, new, state);
2286 XFS_IFORK_NEXT_SET(ip, whichfork,
2287 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2289 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2291 rval = XFS_ILOG_CORE;
2292 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2295 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2296 error = xfs_bmbt_update(cur, &PREV);
2299 cur->bc_rec.b = *new;
2300 if ((error = xfs_btree_insert(cur, &i)))
2302 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2306 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2308 * Setting the last part of a previous oldext extent to newext.
2309 * The right neighbor is contiguous with the new allocation.
2312 PREV.br_blockcount -= new->br_blockcount;
2314 RIGHT.br_startoff = new->br_startoff;
2315 RIGHT.br_startblock = new->br_startblock;
2316 RIGHT.br_blockcount += new->br_blockcount;
2318 xfs_iext_update_extent(ip, state, icur, &PREV);
2319 xfs_iext_next(ifp, icur);
2320 xfs_iext_update_extent(ip, state, icur, &RIGHT);
2323 rval = XFS_ILOG_DEXT;
2326 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2329 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2330 error = xfs_bmbt_update(cur, &PREV);
2333 error = xfs_btree_increment(cur, 0, &i);
2336 error = xfs_bmbt_update(cur, &RIGHT);
2342 case BMAP_RIGHT_FILLING:
2344 * Setting the last part of a previous oldext extent to newext.
2345 * The right neighbor is not contiguous.
2348 PREV.br_blockcount -= new->br_blockcount;
2350 xfs_iext_update_extent(ip, state, icur, &PREV);
2351 xfs_iext_next(ifp, icur);
2352 xfs_iext_insert(ip, icur, new, state);
2354 XFS_IFORK_NEXT_SET(ip, whichfork,
2355 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2357 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2359 rval = XFS_ILOG_CORE;
2360 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2363 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2364 error = xfs_bmbt_update(cur, &PREV);
2367 error = xfs_bmbt_lookup_eq(cur, new, &i);
2370 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2371 if ((error = xfs_btree_insert(cur, &i)))
2373 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2379 * Setting the middle part of a previous oldext extent to
2380 * newext. Contiguity is impossible here.
2381 * One extent becomes three extents.
2384 PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
2387 r[1].br_startoff = new_endoff;
2388 r[1].br_blockcount =
2389 old.br_startoff + old.br_blockcount - new_endoff;
2390 r[1].br_startblock = new->br_startblock + new->br_blockcount;
2391 r[1].br_state = PREV.br_state;
2393 xfs_iext_update_extent(ip, state, icur, &PREV);
2394 xfs_iext_next(ifp, icur);
2395 xfs_iext_insert(ip, icur, &r[1], state);
2396 xfs_iext_insert(ip, icur, &r[0], state);
2398 XFS_IFORK_NEXT_SET(ip, whichfork,
2399 XFS_IFORK_NEXTENTS(ip, whichfork) + 2);
2401 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2403 rval = XFS_ILOG_CORE;
2404 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2407 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2408 /* new right extent - oldext */
2409 error = xfs_bmbt_update(cur, &r[1]);
2412 /* new left extent - oldext */
2413 cur->bc_rec.b = PREV;
2414 if ((error = xfs_btree_insert(cur, &i)))
2416 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2418 * Reset the cursor to the position of the new extent
2419 * we are about to insert as we can't trust it after
2420 * the previous insert.
2422 error = xfs_bmbt_lookup_eq(cur, new, &i);
2425 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2426 /* new middle extent - newext */
2427 if ((error = xfs_btree_insert(cur, &i)))
2429 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2433 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2434 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2435 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2436 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2437 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2438 case BMAP_LEFT_CONTIG:
2439 case BMAP_RIGHT_CONTIG:
2441 * These cases are all impossible.
2446 /* update reverse mappings */
2447 error = xfs_rmap_convert_extent(mp, tp, ip, whichfork, new);
2451 /* convert to a btree if necessary */
2452 if (xfs_bmap_needs_btree(ip, whichfork)) {
2453 int tmp_logflags; /* partial log flag return val */
2455 ASSERT(cur == NULL);
2456 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
2457 &tmp_logflags, whichfork);
2458 *logflagsp |= tmp_logflags;
2463 /* clear out the allocated field, done with it now in any case. */
2465 cur->bc_private.b.allocated = 0;
2469 xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
2479 * Convert a hole to a delayed allocation.
2482 xfs_bmap_add_extent_hole_delay(
2483 xfs_inode_t *ip, /* incore inode pointer */
2485 struct xfs_iext_cursor *icur,
2486 xfs_bmbt_irec_t *new) /* new data to add to file extents */
2488 struct xfs_ifork *ifp; /* inode fork pointer */
2489 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2490 xfs_filblks_t newlen=0; /* new indirect size */
2491 xfs_filblks_t oldlen=0; /* old indirect size */
2492 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2493 int state = xfs_bmap_fork_to_state(whichfork);
2494 xfs_filblks_t temp; /* temp for indirect calculations */
2496 ifp = XFS_IFORK_PTR(ip, whichfork);
2497 ASSERT(isnullstartblock(new->br_startblock));
2500 * Check and set flags if this segment has a left neighbor
2502 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
2503 state |= BMAP_LEFT_VALID;
2504 if (isnullstartblock(left.br_startblock))
2505 state |= BMAP_LEFT_DELAY;
2509 * Check and set flags if the current (right) segment exists.
2510 * If it doesn't exist, we're converting the hole at end-of-file.
2512 if (xfs_iext_get_extent(ifp, icur, &right)) {
2513 state |= BMAP_RIGHT_VALID;
2514 if (isnullstartblock(right.br_startblock))
2515 state |= BMAP_RIGHT_DELAY;
2519 * Set contiguity flags on the left and right neighbors.
2520 * Don't let extents get too large, even if the pieces are contiguous.
2522 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
2523 left.br_startoff + left.br_blockcount == new->br_startoff &&
2524 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2525 state |= BMAP_LEFT_CONTIG;
2527 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
2528 new->br_startoff + new->br_blockcount == right.br_startoff &&
2529 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2530 (!(state & BMAP_LEFT_CONTIG) ||
2531 (left.br_blockcount + new->br_blockcount +
2532 right.br_blockcount <= MAXEXTLEN)))
2533 state |= BMAP_RIGHT_CONTIG;
2536 * Switch out based on the contiguity flags.
2538 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2539 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2541 * New allocation is contiguous with delayed allocations
2542 * on the left and on the right.
2543 * Merge all three into a single extent record.
2545 temp = left.br_blockcount + new->br_blockcount +
2546 right.br_blockcount;
2548 oldlen = startblockval(left.br_startblock) +
2549 startblockval(new->br_startblock) +
2550 startblockval(right.br_startblock);
2551 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2553 left.br_startblock = nullstartblock(newlen);
2554 left.br_blockcount = temp;
2556 xfs_iext_remove(ip, icur, state);
2557 xfs_iext_prev(ifp, icur);
2558 xfs_iext_update_extent(ip, state, icur, &left);
2561 case BMAP_LEFT_CONTIG:
2563 * New allocation is contiguous with a delayed allocation
2565 * Merge the new allocation with the left neighbor.
2567 temp = left.br_blockcount + new->br_blockcount;
2569 oldlen = startblockval(left.br_startblock) +
2570 startblockval(new->br_startblock);
2571 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2573 left.br_blockcount = temp;
2574 left.br_startblock = nullstartblock(newlen);
2576 xfs_iext_prev(ifp, icur);
2577 xfs_iext_update_extent(ip, state, icur, &left);
2580 case BMAP_RIGHT_CONTIG:
2582 * New allocation is contiguous with a delayed allocation
2584 * Merge the new allocation with the right neighbor.
2586 temp = new->br_blockcount + right.br_blockcount;
2587 oldlen = startblockval(new->br_startblock) +
2588 startblockval(right.br_startblock);
2589 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2591 right.br_startoff = new->br_startoff;
2592 right.br_startblock = nullstartblock(newlen);
2593 right.br_blockcount = temp;
2594 xfs_iext_update_extent(ip, state, icur, &right);
2599 * New allocation is not contiguous with another
2600 * delayed allocation.
2601 * Insert a new entry.
2603 oldlen = newlen = 0;
2604 xfs_iext_insert(ip, icur, new, state);
2607 if (oldlen != newlen) {
2608 ASSERT(oldlen > newlen);
2609 xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen),
2612 * Nothing to do for disk quota accounting here.
2618 * Convert a hole to a real allocation.
2620 STATIC int /* error */
2621 xfs_bmap_add_extent_hole_real(
2622 struct xfs_trans *tp,
2623 struct xfs_inode *ip,
2625 struct xfs_iext_cursor *icur,
2626 struct xfs_btree_cur **curp,
2627 struct xfs_bmbt_irec *new,
2631 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
2632 struct xfs_mount *mp = ip->i_mount;
2633 struct xfs_btree_cur *cur = *curp;
2634 int error; /* error return value */
2635 int i; /* temp state */
2636 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2637 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2638 int rval=0; /* return value (logging flags) */
2639 int state = xfs_bmap_fork_to_state(whichfork);
2640 struct xfs_bmbt_irec old;
2642 ASSERT(!isnullstartblock(new->br_startblock));
2643 ASSERT(!cur || !(cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
2645 XFS_STATS_INC(mp, xs_add_exlist);
2648 * Check and set flags if this segment has a left neighbor.
2650 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
2651 state |= BMAP_LEFT_VALID;
2652 if (isnullstartblock(left.br_startblock))
2653 state |= BMAP_LEFT_DELAY;
2657 * Check and set flags if this segment has a current value.
2658 * Not true if we're inserting into the "hole" at eof.
2660 if (xfs_iext_get_extent(ifp, icur, &right)) {
2661 state |= BMAP_RIGHT_VALID;
2662 if (isnullstartblock(right.br_startblock))
2663 state |= BMAP_RIGHT_DELAY;
2667 * We're inserting a real allocation between "left" and "right".
2668 * Set the contiguity flags. Don't let extents get too large.
2670 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2671 left.br_startoff + left.br_blockcount == new->br_startoff &&
2672 left.br_startblock + left.br_blockcount == new->br_startblock &&
2673 left.br_state == new->br_state &&
2674 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2675 state |= BMAP_LEFT_CONTIG;
2677 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2678 new->br_startoff + new->br_blockcount == right.br_startoff &&
2679 new->br_startblock + new->br_blockcount == right.br_startblock &&
2680 new->br_state == right.br_state &&
2681 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2682 (!(state & BMAP_LEFT_CONTIG) ||
2683 left.br_blockcount + new->br_blockcount +
2684 right.br_blockcount <= MAXEXTLEN))
2685 state |= BMAP_RIGHT_CONTIG;
2689 * Select which case we're in here, and implement it.
2691 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2692 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2694 * New allocation is contiguous with real allocations on the
2695 * left and on the right.
2696 * Merge all three into a single extent record.
2698 left.br_blockcount += new->br_blockcount + right.br_blockcount;
2700 xfs_iext_remove(ip, icur, state);
2701 xfs_iext_prev(ifp, icur);
2702 xfs_iext_update_extent(ip, state, icur, &left);
2704 XFS_IFORK_NEXT_SET(ip, whichfork,
2705 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2707 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2709 rval = XFS_ILOG_CORE;
2710 error = xfs_bmbt_lookup_eq(cur, &right, &i);
2713 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2714 error = xfs_btree_delete(cur, &i);
2717 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2718 error = xfs_btree_decrement(cur, 0, &i);
2721 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2722 error = xfs_bmbt_update(cur, &left);
2728 case BMAP_LEFT_CONTIG:
2730 * New allocation is contiguous with a real allocation
2732 * Merge the new allocation with the left neighbor.
2735 left.br_blockcount += new->br_blockcount;
2737 xfs_iext_prev(ifp, icur);
2738 xfs_iext_update_extent(ip, state, icur, &left);
2741 rval = xfs_ilog_fext(whichfork);
2744 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2747 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2748 error = xfs_bmbt_update(cur, &left);
2754 case BMAP_RIGHT_CONTIG:
2756 * New allocation is contiguous with a real allocation
2758 * Merge the new allocation with the right neighbor.
2762 right.br_startoff = new->br_startoff;
2763 right.br_startblock = new->br_startblock;
2764 right.br_blockcount += new->br_blockcount;
2765 xfs_iext_update_extent(ip, state, icur, &right);
2768 rval = xfs_ilog_fext(whichfork);
2771 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2774 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2775 error = xfs_bmbt_update(cur, &right);
2783 * New allocation is not contiguous with another
2785 * Insert a new entry.
2787 xfs_iext_insert(ip, icur, new, state);
2788 XFS_IFORK_NEXT_SET(ip, whichfork,
2789 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2791 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2793 rval = XFS_ILOG_CORE;
2794 error = xfs_bmbt_lookup_eq(cur, new, &i);
2797 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2798 error = xfs_btree_insert(cur, &i);
2801 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2806 /* add reverse mapping unless caller opted out */
2807 if (!(flags & XFS_BMAPI_NORMAP)) {
2808 error = xfs_rmap_map_extent(tp, ip, whichfork, new);
2813 /* convert to a btree if necessary */
2814 if (xfs_bmap_needs_btree(ip, whichfork)) {
2815 int tmp_logflags; /* partial log flag return val */
2817 ASSERT(cur == NULL);
2818 error = xfs_bmap_extents_to_btree(tp, ip, curp, 0,
2819 &tmp_logflags, whichfork);
2820 *logflagsp |= tmp_logflags;
2826 /* clear out the allocated field, done with it now in any case. */
2828 cur->bc_private.b.allocated = 0;
2830 xfs_bmap_check_leaf_extents(cur, ip, whichfork);
2837 * Functions used in the extent read, allocate and remove paths
2841 * Adjust the size of the new extent based on di_extsize and rt extsize.
2844 xfs_bmap_extsize_align(
2846 xfs_bmbt_irec_t *gotp, /* next extent pointer */
2847 xfs_bmbt_irec_t *prevp, /* previous extent pointer */
2848 xfs_extlen_t extsz, /* align to this extent size */
2849 int rt, /* is this a realtime inode? */
2850 int eof, /* is extent at end-of-file? */
2851 int delay, /* creating delalloc extent? */
2852 int convert, /* overwriting unwritten extent? */
2853 xfs_fileoff_t *offp, /* in/out: aligned offset */
2854 xfs_extlen_t *lenp) /* in/out: aligned length */
2856 xfs_fileoff_t orig_off; /* original offset */
2857 xfs_extlen_t orig_alen; /* original length */
2858 xfs_fileoff_t orig_end; /* original off+len */
2859 xfs_fileoff_t nexto; /* next file offset */
2860 xfs_fileoff_t prevo; /* previous file offset */
2861 xfs_fileoff_t align_off; /* temp for offset */
2862 xfs_extlen_t align_alen; /* temp for length */
2863 xfs_extlen_t temp; /* temp for calculations */
2868 orig_off = align_off = *offp;
2869 orig_alen = align_alen = *lenp;
2870 orig_end = orig_off + orig_alen;
2873 * If this request overlaps an existing extent, then don't
2874 * attempt to perform any additional alignment.
2876 if (!delay && !eof &&
2877 (orig_off >= gotp->br_startoff) &&
2878 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
2883 * If the file offset is unaligned vs. the extent size
2884 * we need to align it. This will be possible unless
2885 * the file was previously written with a kernel that didn't
2886 * perform this alignment, or if a truncate shot us in the
2889 div_u64_rem(orig_off, extsz, &temp);
2895 /* Same adjustment for the end of the requested area. */
2896 temp = (align_alen % extsz);
2898 align_alen += extsz - temp;
2901 * For large extent hint sizes, the aligned extent might be larger than
2902 * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls
2903 * the length back under MAXEXTLEN. The outer allocation loops handle
2904 * short allocation just fine, so it is safe to do this. We only want to
2905 * do it when we are forced to, though, because it means more allocation
2906 * operations are required.
2908 while (align_alen > MAXEXTLEN)
2909 align_alen -= extsz;
2910 ASSERT(align_alen <= MAXEXTLEN);
2913 * If the previous block overlaps with this proposed allocation
2914 * then move the start forward without adjusting the length.
2916 if (prevp->br_startoff != NULLFILEOFF) {
2917 if (prevp->br_startblock == HOLESTARTBLOCK)
2918 prevo = prevp->br_startoff;
2920 prevo = prevp->br_startoff + prevp->br_blockcount;
2923 if (align_off != orig_off && align_off < prevo)
2926 * If the next block overlaps with this proposed allocation
2927 * then move the start back without adjusting the length,
2928 * but not before offset 0.
2929 * This may of course make the start overlap previous block,
2930 * and if we hit the offset 0 limit then the next block
2931 * can still overlap too.
2933 if (!eof && gotp->br_startoff != NULLFILEOFF) {
2934 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
2935 (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
2936 nexto = gotp->br_startoff + gotp->br_blockcount;
2938 nexto = gotp->br_startoff;
2940 nexto = NULLFILEOFF;
2942 align_off + align_alen != orig_end &&
2943 align_off + align_alen > nexto)
2944 align_off = nexto > align_alen ? nexto - align_alen : 0;
2946 * If we're now overlapping the next or previous extent that
2947 * means we can't fit an extsz piece in this hole. Just move
2948 * the start forward to the first valid spot and set
2949 * the length so we hit the end.
2951 if (align_off != orig_off && align_off < prevo)
2953 if (align_off + align_alen != orig_end &&
2954 align_off + align_alen > nexto &&
2955 nexto != NULLFILEOFF) {
2956 ASSERT(nexto > prevo);
2957 align_alen = nexto - align_off;
2961 * If realtime, and the result isn't a multiple of the realtime
2962 * extent size we need to remove blocks until it is.
2964 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
2966 * We're not covering the original request, or
2967 * we won't be able to once we fix the length.
2969 if (orig_off < align_off ||
2970 orig_end > align_off + align_alen ||
2971 align_alen - temp < orig_alen)
2974 * Try to fix it by moving the start up.
2976 if (align_off + temp <= orig_off) {
2981 * Try to fix it by moving the end in.
2983 else if (align_off + align_alen - temp >= orig_end)
2986 * Set the start to the minimum then trim the length.
2989 align_alen -= orig_off - align_off;
2990 align_off = orig_off;
2991 align_alen -= align_alen % mp->m_sb.sb_rextsize;
2994 * Result doesn't cover the request, fail it.
2996 if (orig_off < align_off || orig_end > align_off + align_alen)
2999 ASSERT(orig_off >= align_off);
3000 /* see MAXEXTLEN handling above */
3001 ASSERT(orig_end <= align_off + align_alen ||
3002 align_alen + extsz > MAXEXTLEN);
3006 if (!eof && gotp->br_startoff != NULLFILEOFF)
3007 ASSERT(align_off + align_alen <= gotp->br_startoff);
3008 if (prevp->br_startoff != NULLFILEOFF)
3009 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
3017 #define XFS_ALLOC_GAP_UNITS 4
3021 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3023 xfs_fsblock_t adjust; /* adjustment to block numbers */
3024 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3025 xfs_mount_t *mp; /* mount point structure */
3026 int nullfb; /* true if ap->firstblock isn't set */
3027 int rt; /* true if inode is realtime */
3029 #define ISVALID(x,y) \
3031 (x) < mp->m_sb.sb_rblocks : \
3032 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
3033 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
3034 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
3036 mp = ap->ip->i_mount;
3037 nullfb = ap->tp->t_firstblock == NULLFSBLOCK;
3038 rt = XFS_IS_REALTIME_INODE(ap->ip) &&
3039 xfs_alloc_is_userdata(ap->datatype);
3040 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp,
3041 ap->tp->t_firstblock);
3043 * If allocating at eof, and there's a previous real block,
3044 * try to use its last block as our starting point.
3046 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF &&
3047 !isnullstartblock(ap->prev.br_startblock) &&
3048 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount,
3049 ap->prev.br_startblock)) {
3050 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount;
3052 * Adjust for the gap between prevp and us.
3054 adjust = ap->offset -
3055 (ap->prev.br_startoff + ap->prev.br_blockcount);
3057 ISVALID(ap->blkno + adjust, ap->prev.br_startblock))
3058 ap->blkno += adjust;
3061 * If not at eof, then compare the two neighbor blocks.
3062 * Figure out whether either one gives us a good starting point,
3063 * and pick the better one.
3065 else if (!ap->eof) {
3066 xfs_fsblock_t gotbno; /* right side block number */
3067 xfs_fsblock_t gotdiff=0; /* right side difference */
3068 xfs_fsblock_t prevbno; /* left side block number */
3069 xfs_fsblock_t prevdiff=0; /* left side difference */
3072 * If there's a previous (left) block, select a requested
3073 * start block based on it.
3075 if (ap->prev.br_startoff != NULLFILEOFF &&
3076 !isnullstartblock(ap->prev.br_startblock) &&
3077 (prevbno = ap->prev.br_startblock +
3078 ap->prev.br_blockcount) &&
3079 ISVALID(prevbno, ap->prev.br_startblock)) {
3081 * Calculate gap to end of previous block.
3083 adjust = prevdiff = ap->offset -
3084 (ap->prev.br_startoff +
3085 ap->prev.br_blockcount);
3087 * Figure the startblock based on the previous block's
3088 * end and the gap size.
3090 * If the gap is large relative to the piece we're
3091 * allocating, or using it gives us an invalid block
3092 * number, then just use the end of the previous block.
3094 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3095 ISVALID(prevbno + prevdiff,
3096 ap->prev.br_startblock))
3101 * If the firstblock forbids it, can't use it,
3104 if (!rt && !nullfb &&
3105 XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno)
3106 prevbno = NULLFSBLOCK;
3109 * No previous block or can't follow it, just default.
3112 prevbno = NULLFSBLOCK;
3114 * If there's a following (right) block, select a requested
3115 * start block based on it.
3117 if (!isnullstartblock(ap->got.br_startblock)) {
3119 * Calculate gap to start of next block.
3121 adjust = gotdiff = ap->got.br_startoff - ap->offset;
3123 * Figure the startblock based on the next block's
3124 * start and the gap size.
3126 gotbno = ap->got.br_startblock;
3129 * If the gap is large relative to the piece we're
3130 * allocating, or using it gives us an invalid block
3131 * number, then just use the start of the next block
3132 * offset by our length.
3134 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3135 ISVALID(gotbno - gotdiff, gotbno))
3137 else if (ISVALID(gotbno - ap->length, gotbno)) {
3138 gotbno -= ap->length;
3139 gotdiff += adjust - ap->length;
3143 * If the firstblock forbids it, can't use it,
3146 if (!rt && !nullfb &&
3147 XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno)
3148 gotbno = NULLFSBLOCK;
3151 * No next block, just default.
3154 gotbno = NULLFSBLOCK;
3156 * If both valid, pick the better one, else the only good
3157 * one, else ap->blkno is already set (to 0 or the inode block).
3159 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK)
3160 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno;
3161 else if (prevbno != NULLFSBLOCK)
3162 ap->blkno = prevbno;
3163 else if (gotbno != NULLFSBLOCK)
3170 xfs_bmap_longest_free_extent(
3171 struct xfs_trans *tp,
3176 struct xfs_mount *mp = tp->t_mountp;
3177 struct xfs_perag *pag;
3178 xfs_extlen_t longest;
3181 pag = xfs_perag_get(mp, ag);
3182 if (!pag->pagf_init) {
3183 error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK);
3187 if (!pag->pagf_init) {
3193 longest = xfs_alloc_longest_free_extent(pag,
3194 xfs_alloc_min_freelist(mp, pag),
3195 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
3196 if (*blen < longest)
3205 xfs_bmap_select_minlen(
3206 struct xfs_bmalloca *ap,
3207 struct xfs_alloc_arg *args,
3211 if (notinit || *blen < ap->minlen) {
3213 * Since we did a BUF_TRYLOCK above, it is possible that
3214 * there is space for this request.
3216 args->minlen = ap->minlen;
3217 } else if (*blen < args->maxlen) {
3219 * If the best seen length is less than the request length,
3220 * use the best as the minimum.
3222 args->minlen = *blen;
3225 * Otherwise we've seen an extent as big as maxlen, use that
3228 args->minlen = args->maxlen;
3233 xfs_bmap_btalloc_nullfb(
3234 struct xfs_bmalloca *ap,
3235 struct xfs_alloc_arg *args,
3238 struct xfs_mount *mp = ap->ip->i_mount;
3239 xfs_agnumber_t ag, startag;
3243 args->type = XFS_ALLOCTYPE_START_BNO;
3244 args->total = ap->total;
3246 startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3247 if (startag == NULLAGNUMBER)
3250 while (*blen < args->maxlen) {
3251 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3256 if (++ag == mp->m_sb.sb_agcount)
3262 xfs_bmap_select_minlen(ap, args, blen, notinit);
3267 xfs_bmap_btalloc_filestreams(
3268 struct xfs_bmalloca *ap,
3269 struct xfs_alloc_arg *args,
3272 struct xfs_mount *mp = ap->ip->i_mount;
3277 args->type = XFS_ALLOCTYPE_NEAR_BNO;
3278 args->total = ap->total;
3280 ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3281 if (ag == NULLAGNUMBER)
3284 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, ¬init);
3288 if (*blen < args->maxlen) {
3289 error = xfs_filestream_new_ag(ap, &ag);
3293 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3300 xfs_bmap_select_minlen(ap, args, blen, notinit);
3303 * Set the failure fallback case to look in the selected AG as stream
3306 ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0);
3310 /* Update all inode and quota accounting for the allocation we just did. */
3312 xfs_bmap_btalloc_accounting(
3313 struct xfs_bmalloca *ap,
3314 struct xfs_alloc_arg *args)
3316 if (ap->flags & XFS_BMAPI_COWFORK) {
3318 * COW fork blocks are in-core only and thus are treated as
3319 * in-core quota reservation (like delalloc blocks) even when
3320 * converted to real blocks. The quota reservation is not
3321 * accounted to disk until blocks are remapped to the data
3322 * fork. So if these blocks were previously delalloc, we
3323 * already have quota reservation and there's nothing to do
3330 * Otherwise, we've allocated blocks in a hole. The transaction
3331 * has acquired in-core quota reservation for this extent.
3332 * Rather than account these as real blocks, however, we reduce
3333 * the transaction quota reservation based on the allocation.
3334 * This essentially transfers the transaction quota reservation
3335 * to that of a delalloc extent.
3337 ap->ip->i_delayed_blks += args->len;
3338 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, XFS_TRANS_DQ_RES_BLKS,
3343 /* data/attr fork only */
3344 ap->ip->i_d.di_nblocks += args->len;
3345 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
3347 ap->ip->i_delayed_blks -= args->len;
3348 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
3349 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : XFS_TRANS_DQ_BCOUNT,
3355 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3357 xfs_mount_t *mp; /* mount point structure */
3358 xfs_alloctype_t atype = 0; /* type for allocation routines */
3359 xfs_extlen_t align = 0; /* minimum allocation alignment */
3360 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3362 xfs_alloc_arg_t args;
3363 xfs_fileoff_t orig_offset;
3364 xfs_extlen_t orig_length;
3366 xfs_extlen_t nextminlen = 0;
3367 int nullfb; /* true if ap->firstblock isn't set */
3374 orig_offset = ap->offset;
3375 orig_length = ap->length;
3377 mp = ap->ip->i_mount;
3379 /* stripe alignment for allocation is determined by mount parameters */
3381 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
3382 stripe_align = mp->m_swidth;
3383 else if (mp->m_dalign)
3384 stripe_align = mp->m_dalign;
3386 if (ap->flags & XFS_BMAPI_COWFORK)
3387 align = xfs_get_cowextsz_hint(ap->ip);
3388 else if (xfs_alloc_is_userdata(ap->datatype))
3389 align = xfs_get_extsz_hint(ap->ip);
3391 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
3392 align, 0, ap->eof, 0, ap->conv,
3393 &ap->offset, &ap->length);
3399 nullfb = ap->tp->t_firstblock == NULLFSBLOCK;
3400 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp,
3401 ap->tp->t_firstblock);
3403 if (xfs_alloc_is_userdata(ap->datatype) &&
3404 xfs_inode_is_filestream(ap->ip)) {
3405 ag = xfs_filestream_lookup_ag(ap->ip);
3406 ag = (ag != NULLAGNUMBER) ? ag : 0;
3407 ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0);
3409 ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
3412 ap->blkno = ap->tp->t_firstblock;
3414 xfs_bmap_adjacent(ap);
3417 * If allowed, use ap->blkno; otherwise must use firstblock since
3418 * it's in the right allocation group.
3420 if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno)
3423 ap->blkno = ap->tp->t_firstblock;
3425 * Normal allocation, done through xfs_alloc_vextent.
3427 tryagain = isaligned = 0;
3428 memset(&args, 0, sizeof(args));
3431 args.fsbno = ap->blkno;
3432 xfs_rmap_skip_owner_update(&args.oinfo);
3434 /* Trim the allocation back to the maximum an AG can fit. */
3435 args.maxlen = min(ap->length, mp->m_ag_max_usable);
3439 * Search for an allocation group with a single extent large
3440 * enough for the request. If one isn't found, then adjust
3441 * the minimum allocation size to the largest space found.
3443 if (xfs_alloc_is_userdata(ap->datatype) &&
3444 xfs_inode_is_filestream(ap->ip))
3445 error = xfs_bmap_btalloc_filestreams(ap, &args, &blen);
3447 error = xfs_bmap_btalloc_nullfb(ap, &args, &blen);
3450 } else if (ap->tp->t_flags & XFS_TRANS_LOWMODE) {
3451 if (xfs_inode_is_filestream(ap->ip))
3452 args.type = XFS_ALLOCTYPE_FIRST_AG;
3454 args.type = XFS_ALLOCTYPE_START_BNO;
3455 args.total = args.minlen = ap->minlen;
3457 args.type = XFS_ALLOCTYPE_NEAR_BNO;
3458 args.total = ap->total;
3459 args.minlen = ap->minlen;
3461 /* apply extent size hints if obtained earlier */
3464 div_u64_rem(ap->offset, args.prod, &args.mod);
3466 args.mod = args.prod - args.mod;
3467 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
3471 args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
3472 div_u64_rem(ap->offset, args.prod, &args.mod);
3474 args.mod = args.prod - args.mod;
3477 * If we are not low on available data blocks, and the
3478 * underlying logical volume manager is a stripe, and
3479 * the file offset is zero then try to allocate data
3480 * blocks on stripe unit boundary.
3481 * NOTE: ap->aeof is only set if the allocation length
3482 * is >= the stripe unit and the allocation offset is
3483 * at the end of file.
3485 if (!(ap->tp->t_flags & XFS_TRANS_LOWMODE) && ap->aeof) {
3487 args.alignment = stripe_align;
3491 * Adjust for alignment
3493 if (blen > args.alignment && blen <= args.maxlen)
3494 args.minlen = blen - args.alignment;
3495 args.minalignslop = 0;
3498 * First try an exact bno allocation.
3499 * If it fails then do a near or start bno
3500 * allocation with alignment turned on.
3504 args.type = XFS_ALLOCTYPE_THIS_BNO;
3507 * Compute the minlen+alignment for the
3508 * next case. Set slop so that the value
3509 * of minlen+alignment+slop doesn't go up
3510 * between the calls.
3512 if (blen > stripe_align && blen <= args.maxlen)
3513 nextminlen = blen - stripe_align;
3515 nextminlen = args.minlen;
3516 if (nextminlen + stripe_align > args.minlen + 1)
3518 nextminlen + stripe_align -
3521 args.minalignslop = 0;
3525 args.minalignslop = 0;
3527 args.minleft = ap->minleft;
3528 args.wasdel = ap->wasdel;
3529 args.resv = XFS_AG_RESV_NONE;
3530 args.datatype = ap->datatype;
3531 if (ap->datatype & XFS_ALLOC_USERDATA_ZERO)
3534 error = xfs_alloc_vextent(&args);
3538 if (tryagain && args.fsbno == NULLFSBLOCK) {
3540 * Exact allocation failed. Now try with alignment
3544 args.fsbno = ap->blkno;
3545 args.alignment = stripe_align;
3546 args.minlen = nextminlen;
3547 args.minalignslop = 0;
3549 if ((error = xfs_alloc_vextent(&args)))
3552 if (isaligned && args.fsbno == NULLFSBLOCK) {
3554 * allocation failed, so turn off alignment and
3558 args.fsbno = ap->blkno;
3560 if ((error = xfs_alloc_vextent(&args)))
3563 if (args.fsbno == NULLFSBLOCK && nullfb &&
3564 args.minlen > ap->minlen) {
3565 args.minlen = ap->minlen;
3566 args.type = XFS_ALLOCTYPE_START_BNO;
3567 args.fsbno = ap->blkno;
3568 if ((error = xfs_alloc_vextent(&args)))
3571 if (args.fsbno == NULLFSBLOCK && nullfb) {
3573 args.type = XFS_ALLOCTYPE_FIRST_AG;
3574 args.total = ap->minlen;
3575 if ((error = xfs_alloc_vextent(&args)))
3577 ap->tp->t_flags |= XFS_TRANS_LOWMODE;
3579 if (args.fsbno != NULLFSBLOCK) {
3581 * check the allocation happened at the same or higher AG than
3582 * the first block that was allocated.
3584 ASSERT(ap->tp->t_firstblock == NULLFSBLOCK ||
3585 XFS_FSB_TO_AGNO(mp, ap->tp->t_firstblock) <=
3586 XFS_FSB_TO_AGNO(mp, args.fsbno));
3588 ap->blkno = args.fsbno;
3589 if (ap->tp->t_firstblock == NULLFSBLOCK)
3590 ap->tp->t_firstblock = args.fsbno;
3591 ASSERT(nullfb || fb_agno <= args.agno);
3592 ap->length = args.len;
3594 * If the extent size hint is active, we tried to round the
3595 * caller's allocation request offset down to extsz and the
3596 * length up to another extsz boundary. If we found a free
3597 * extent we mapped it in starting at this new offset. If the
3598 * newly mapped space isn't long enough to cover any of the
3599 * range of offsets that was originally requested, move the
3600 * mapping up so that we can fill as much of the caller's
3601 * original request as possible. Free space is apparently
3602 * very fragmented so we're unlikely to be able to satisfy the
3605 if (ap->length <= orig_length)
3606 ap->offset = orig_offset;
3607 else if (ap->offset + ap->length < orig_offset + orig_length)
3608 ap->offset = orig_offset + orig_length - ap->length;
3609 xfs_bmap_btalloc_accounting(ap, &args);
3611 ap->blkno = NULLFSBLOCK;
3618 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
3619 * It figures out where to ask the underlying allocator to put the new extent.
3623 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3625 if (XFS_IS_REALTIME_INODE(ap->ip) &&
3626 xfs_alloc_is_userdata(ap->datatype))
3627 return xfs_bmap_rtalloc(ap);
3628 return xfs_bmap_btalloc(ap);
3631 /* Trim extent to fit a logical block range. */
3634 struct xfs_bmbt_irec *irec,
3638 xfs_fileoff_t distance;
3639 xfs_fileoff_t end = bno + len;
3641 if (irec->br_startoff + irec->br_blockcount <= bno ||
3642 irec->br_startoff >= end) {
3643 irec->br_blockcount = 0;
3647 if (irec->br_startoff < bno) {
3648 distance = bno - irec->br_startoff;
3649 if (isnullstartblock(irec->br_startblock))
3650 irec->br_startblock = DELAYSTARTBLOCK;
3651 if (irec->br_startblock != DELAYSTARTBLOCK &&
3652 irec->br_startblock != HOLESTARTBLOCK)
3653 irec->br_startblock += distance;
3654 irec->br_startoff += distance;
3655 irec->br_blockcount -= distance;
3658 if (end < irec->br_startoff + irec->br_blockcount) {
3659 distance = irec->br_startoff + irec->br_blockcount - end;
3660 irec->br_blockcount -= distance;
3664 /* trim extent to within eof */
3666 xfs_trim_extent_eof(
3667 struct xfs_bmbt_irec *irec,
3668 struct xfs_inode *ip)
3671 xfs_trim_extent(irec, 0, XFS_B_TO_FSB(ip->i_mount,
3672 i_size_read(VFS_I(ip))));
3676 * Trim the returned map to the required bounds
3680 struct xfs_bmbt_irec *mval,
3681 struct xfs_bmbt_irec *got,
3689 if ((flags & XFS_BMAPI_ENTIRE) ||
3690 got->br_startoff + got->br_blockcount <= obno) {
3692 if (isnullstartblock(got->br_startblock))
3693 mval->br_startblock = DELAYSTARTBLOCK;
3699 ASSERT((*bno >= obno) || (n == 0));
3701 mval->br_startoff = *bno;
3702 if (isnullstartblock(got->br_startblock))
3703 mval->br_startblock = DELAYSTARTBLOCK;
3705 mval->br_startblock = got->br_startblock +
3706 (*bno - got->br_startoff);
3708 * Return the minimum of what we got and what we asked for for
3709 * the length. We can use the len variable here because it is
3710 * modified below and we could have been there before coming
3711 * here if the first part of the allocation didn't overlap what
3714 mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno,
3715 got->br_blockcount - (*bno - got->br_startoff));
3716 mval->br_state = got->br_state;
3717 ASSERT(mval->br_blockcount <= len);
3722 * Update and validate the extent map to return
3725 xfs_bmapi_update_map(
3726 struct xfs_bmbt_irec **map,
3734 xfs_bmbt_irec_t *mval = *map;
3736 ASSERT((flags & XFS_BMAPI_ENTIRE) ||
3737 ((mval->br_startoff + mval->br_blockcount) <= end));
3738 ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) ||
3739 (mval->br_startoff < obno));
3741 *bno = mval->br_startoff + mval->br_blockcount;
3743 if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) {
3744 /* update previous map with new information */
3745 ASSERT(mval->br_startblock == mval[-1].br_startblock);
3746 ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
3747 ASSERT(mval->br_state == mval[-1].br_state);
3748 mval[-1].br_blockcount = mval->br_blockcount;
3749 mval[-1].br_state = mval->br_state;
3750 } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
3751 mval[-1].br_startblock != DELAYSTARTBLOCK &&
3752 mval[-1].br_startblock != HOLESTARTBLOCK &&
3753 mval->br_startblock == mval[-1].br_startblock +
3754 mval[-1].br_blockcount &&
3755 mval[-1].br_state == mval->br_state) {
3756 ASSERT(mval->br_startoff ==
3757 mval[-1].br_startoff + mval[-1].br_blockcount);
3758 mval[-1].br_blockcount += mval->br_blockcount;
3759 } else if (*n > 0 &&
3760 mval->br_startblock == DELAYSTARTBLOCK &&
3761 mval[-1].br_startblock == DELAYSTARTBLOCK &&
3762 mval->br_startoff ==
3763 mval[-1].br_startoff + mval[-1].br_blockcount) {
3764 mval[-1].br_blockcount += mval->br_blockcount;
3765 mval[-1].br_state = mval->br_state;
3766 } else if (!((*n == 0) &&
3767 ((mval->br_startoff + mval->br_blockcount) <=
3776 * Map file blocks to filesystem blocks without allocation.
3780 struct xfs_inode *ip,
3783 struct xfs_bmbt_irec *mval,
3787 struct xfs_mount *mp = ip->i_mount;
3788 struct xfs_ifork *ifp;
3789 struct xfs_bmbt_irec got;
3792 struct xfs_iext_cursor icur;
3796 int whichfork = xfs_bmapi_whichfork(flags);
3799 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE|
3800 XFS_BMAPI_COWFORK)));
3801 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL));
3803 if (unlikely(XFS_TEST_ERROR(
3804 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
3805 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
3806 mp, XFS_ERRTAG_BMAPIFORMAT))) {
3807 XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp);
3808 return -EFSCORRUPTED;
3811 if (XFS_FORCED_SHUTDOWN(mp))
3814 XFS_STATS_INC(mp, xs_blk_mapr);
3816 ifp = XFS_IFORK_PTR(ip, whichfork);
3818 /* No CoW fork? Return a hole. */
3819 if (whichfork == XFS_COW_FORK && !ifp) {
3820 mval->br_startoff = bno;
3821 mval->br_startblock = HOLESTARTBLOCK;
3822 mval->br_blockcount = len;
3823 mval->br_state = XFS_EXT_NORM;
3828 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
3829 error = xfs_iread_extents(NULL, ip, whichfork);
3834 if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got))
3839 while (bno < end && n < *nmap) {
3840 /* Reading past eof, act as though there's a hole up to end. */
3842 got.br_startoff = end;
3843 if (got.br_startoff > bno) {
3844 /* Reading in a hole. */
3845 mval->br_startoff = bno;
3846 mval->br_startblock = HOLESTARTBLOCK;
3847 mval->br_blockcount =
3848 XFS_FILBLKS_MIN(len, got.br_startoff - bno);
3849 mval->br_state = XFS_EXT_NORM;
3850 bno += mval->br_blockcount;
3851 len -= mval->br_blockcount;
3857 /* set up the extent map to return. */
3858 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
3859 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
3861 /* If we're done, stop now. */
3862 if (bno >= end || n >= *nmap)
3865 /* Else go on to the next record. */
3866 if (!xfs_iext_next_extent(ifp, &icur, &got))
3874 * Add a delayed allocation extent to an inode. Blocks are reserved from the
3875 * global pool and the extent inserted into the inode in-core extent tree.
3877 * On entry, got refers to the first extent beyond the offset of the extent to
3878 * allocate or eof is specified if no such extent exists. On return, got refers
3879 * to the extent record that was inserted to the inode fork.
3881 * Note that the allocated extent may have been merged with contiguous extents
3882 * during insertion into the inode fork. Thus, got does not reflect the current
3883 * state of the inode fork on return. If necessary, the caller can use lastx to
3884 * look up the updated record in the inode fork.
3887 xfs_bmapi_reserve_delalloc(
3888 struct xfs_inode *ip,
3892 xfs_filblks_t prealloc,
3893 struct xfs_bmbt_irec *got,
3894 struct xfs_iext_cursor *icur,
3897 struct xfs_mount *mp = ip->i_mount;
3898 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
3900 xfs_extlen_t indlen;
3902 xfs_fileoff_t aoff = off;
3905 * Cap the alloc length. Keep track of prealloc so we know whether to
3906 * tag the inode before we return.
3908 alen = XFS_FILBLKS_MIN(len + prealloc, MAXEXTLEN);
3910 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
3911 if (prealloc && alen >= len)
3912 prealloc = alen - len;
3914 /* Figure out the extent size, adjust alen */
3915 if (whichfork == XFS_COW_FORK) {
3916 struct xfs_bmbt_irec prev;
3917 xfs_extlen_t extsz = xfs_get_cowextsz_hint(ip);
3919 if (!xfs_iext_peek_prev_extent(ifp, icur, &prev))
3920 prev.br_startoff = NULLFILEOFF;
3922 error = xfs_bmap_extsize_align(mp, got, &prev, extsz, 0, eof,
3923 1, 0, &aoff, &alen);
3928 * Make a transaction-less quota reservation for delayed allocation
3929 * blocks. This number gets adjusted later. We return if we haven't
3930 * allocated blocks already inside this loop.
3932 error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0,
3933 XFS_QMOPT_RES_REGBLKS);
3938 * Split changing sb for alen and indlen since they could be coming
3939 * from different places.
3941 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
3944 error = xfs_mod_fdblocks(mp, -((int64_t)alen), false);
3946 goto out_unreserve_quota;
3948 error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false);
3950 goto out_unreserve_blocks;
3953 ip->i_delayed_blks += alen;
3955 got->br_startoff = aoff;
3956 got->br_startblock = nullstartblock(indlen);
3957 got->br_blockcount = alen;
3958 got->br_state = XFS_EXT_NORM;
3960 xfs_bmap_add_extent_hole_delay(ip, whichfork, icur, got);
3963 * Tag the inode if blocks were preallocated. Note that COW fork
3964 * preallocation can occur at the start or end of the extent, even when
3965 * prealloc == 0, so we must also check the aligned offset and length.
3967 if (whichfork == XFS_DATA_FORK && prealloc)
3968 xfs_inode_set_eofblocks_tag(ip);
3969 if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len))
3970 xfs_inode_set_cowblocks_tag(ip);
3974 out_unreserve_blocks:
3975 xfs_mod_fdblocks(mp, alen, false);
3976 out_unreserve_quota:
3977 if (XFS_IS_QUOTA_ON(mp))
3978 xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0,
3979 XFS_QMOPT_RES_REGBLKS);
3985 struct xfs_bmalloca *bma)
3987 struct xfs_mount *mp = bma->ip->i_mount;
3988 int whichfork = xfs_bmapi_whichfork(bma->flags);
3989 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
3990 int tmp_logflags = 0;
3993 ASSERT(bma->length > 0);
3996 * For the wasdelay case, we could also just allocate the stuff asked
3997 * for in this bmap call but that wouldn't be as good.
4000 bma->length = (xfs_extlen_t)bma->got.br_blockcount;
4001 bma->offset = bma->got.br_startoff;
4002 xfs_iext_peek_prev_extent(ifp, &bma->icur, &bma->prev);
4004 bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN);
4006 bma->length = XFS_FILBLKS_MIN(bma->length,
4007 bma->got.br_startoff - bma->offset);
4011 * Set the data type being allocated. For the data fork, the first data
4012 * in the file is treated differently to all other allocations. For the
4013 * attribute fork, we only need to ensure the allocated range is not on
4016 if (!(bma->flags & XFS_BMAPI_METADATA)) {
4017 bma->datatype = XFS_ALLOC_NOBUSY;
4018 if (whichfork == XFS_DATA_FORK) {
4019 if (bma->offset == 0)
4020 bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA;
4022 bma->datatype |= XFS_ALLOC_USERDATA;
4024 if (bma->flags & XFS_BMAPI_ZERO)
4025 bma->datatype |= XFS_ALLOC_USERDATA_ZERO;
4028 bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1;
4031 * Only want to do the alignment at the eof if it is userdata and
4032 * allocation length is larger than a stripe unit.
4034 if (mp->m_dalign && bma->length >= mp->m_dalign &&
4035 !(bma->flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) {
4036 error = xfs_bmap_isaeof(bma, whichfork);
4041 error = xfs_bmap_alloc(bma);
4045 if (bma->blkno == NULLFSBLOCK)
4047 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur)
4048 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
4050 * Bump the number of extents we've allocated
4056 bma->cur->bc_private.b.flags =
4057 bma->wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
4059 bma->got.br_startoff = bma->offset;
4060 bma->got.br_startblock = bma->blkno;
4061 bma->got.br_blockcount = bma->length;
4062 bma->got.br_state = XFS_EXT_NORM;
4065 * In the data fork, a wasdelay extent has been initialized, so
4066 * shouldn't be flagged as unwritten.
4068 * For the cow fork, however, we convert delalloc reservations
4069 * (extents allocated for speculative preallocation) to
4070 * allocated unwritten extents, and only convert the unwritten
4071 * extents to real extents when we're about to write the data.
4073 if ((!bma->wasdel || (bma->flags & XFS_BMAPI_COWFORK)) &&
4074 (bma->flags & XFS_BMAPI_PREALLOC) &&
4075 xfs_sb_version_hasextflgbit(&mp->m_sb))
4076 bma->got.br_state = XFS_EXT_UNWRITTEN;
4079 error = xfs_bmap_add_extent_delay_real(bma, whichfork);
4081 error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip,
4082 whichfork, &bma->icur, &bma->cur, &bma->got,
4083 &bma->logflags, bma->flags);
4085 bma->logflags |= tmp_logflags;
4090 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
4091 * or xfs_bmap_add_extent_hole_real might have merged it into one of
4092 * the neighbouring ones.
4094 xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
4096 ASSERT(bma->got.br_startoff <= bma->offset);
4097 ASSERT(bma->got.br_startoff + bma->got.br_blockcount >=
4098 bma->offset + bma->length);
4099 ASSERT(bma->got.br_state == XFS_EXT_NORM ||
4100 bma->got.br_state == XFS_EXT_UNWRITTEN);
4105 xfs_bmapi_convert_unwritten(
4106 struct xfs_bmalloca *bma,
4107 struct xfs_bmbt_irec *mval,
4111 int whichfork = xfs_bmapi_whichfork(flags);
4112 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4113 int tmp_logflags = 0;
4116 /* check if we need to do unwritten->real conversion */
4117 if (mval->br_state == XFS_EXT_UNWRITTEN &&
4118 (flags & XFS_BMAPI_PREALLOC))
4121 /* check if we need to do real->unwritten conversion */
4122 if (mval->br_state == XFS_EXT_NORM &&
4123 (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) !=
4124 (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
4128 * Modify (by adding) the state flag, if writing.
4130 ASSERT(mval->br_blockcount <= len);
4131 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
4132 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
4133 bma->ip, whichfork);
4135 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
4136 ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
4139 * Before insertion into the bmbt, zero the range being converted
4142 if (flags & XFS_BMAPI_ZERO) {
4143 error = xfs_zero_extent(bma->ip, mval->br_startblock,
4144 mval->br_blockcount);
4149 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork,
4150 &bma->icur, &bma->cur, mval, &tmp_logflags);
4152 * Log the inode core unconditionally in the unwritten extent conversion
4153 * path because the conversion might not have done so (e.g., if the
4154 * extent count hasn't changed). We need to make sure the inode is dirty
4155 * in the transaction for the sake of fsync(), even if nothing has
4156 * changed, because fsync() will not force the log for this transaction
4157 * unless it sees the inode pinned.
4159 * Note: If we're only converting cow fork extents, there aren't
4160 * any on-disk updates to make, so we don't need to log anything.
4162 if (whichfork != XFS_COW_FORK)
4163 bma->logflags |= tmp_logflags | XFS_ILOG_CORE;
4168 * Update our extent pointer, given that
4169 * xfs_bmap_add_extent_unwritten_real might have merged it into one
4170 * of the neighbouring ones.
4172 xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
4175 * We may have combined previously unwritten space with written space,
4176 * so generate another request.
4178 if (mval->br_blockcount < len)
4184 * Map file blocks to filesystem blocks, and allocate blocks or convert the
4185 * extent state if necessary. Details behaviour is controlled by the flags
4186 * parameter. Only allocates blocks from a single allocation group, to avoid
4191 struct xfs_trans *tp, /* transaction pointer */
4192 struct xfs_inode *ip, /* incore inode */
4193 xfs_fileoff_t bno, /* starting file offs. mapped */
4194 xfs_filblks_t len, /* length to map in file */
4195 int flags, /* XFS_BMAPI_... */
4196 xfs_extlen_t total, /* total blocks needed */
4197 struct xfs_bmbt_irec *mval, /* output: map values */
4198 int *nmap) /* i/o: mval size/count */
4200 struct xfs_mount *mp = ip->i_mount;
4201 struct xfs_ifork *ifp;
4202 struct xfs_bmalloca bma = { NULL }; /* args for xfs_bmap_alloc */
4203 xfs_fileoff_t end; /* end of mapped file region */
4204 bool eof = false; /* after the end of extents */
4205 int error; /* error return */
4206 int n; /* current extent index */
4207 xfs_fileoff_t obno; /* old block number (offset) */
4208 int whichfork; /* data or attr fork */
4211 xfs_fileoff_t orig_bno; /* original block number value */
4212 int orig_flags; /* original flags arg value */
4213 xfs_filblks_t orig_len; /* original value of len arg */
4214 struct xfs_bmbt_irec *orig_mval; /* original value of mval */
4215 int orig_nmap; /* original value of *nmap */
4223 whichfork = xfs_bmapi_whichfork(flags);
4226 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
4227 ASSERT(tp != NULL ||
4228 (flags & (XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK)) ==
4229 (XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK));
4231 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL);
4232 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
4233 ASSERT(!(flags & XFS_BMAPI_REMAP));
4235 /* zeroing is for currently only for data extents, not metadata */
4236 ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) !=
4237 (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO));
4239 * we can allocate unwritten extents or pre-zero allocated blocks,
4240 * but it makes no sense to do both at once. This would result in
4241 * zeroing the unwritten extent twice, but it still being an
4242 * unwritten extent....
4244 ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) !=
4245 (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO));
4247 if (unlikely(XFS_TEST_ERROR(
4248 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4249 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
4250 mp, XFS_ERRTAG_BMAPIFORMAT))) {
4251 XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp);
4252 return -EFSCORRUPTED;
4255 if (XFS_FORCED_SHUTDOWN(mp))
4258 ifp = XFS_IFORK_PTR(ip, whichfork);
4260 XFS_STATS_INC(mp, xs_blk_mapw);
4262 if (!tp || tp->t_firstblock == NULLFSBLOCK) {
4263 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE)
4264 bma.minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1;
4271 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4272 error = xfs_iread_extents(tp, ip, whichfork);
4281 if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.icur, &bma.got))
4283 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
4284 bma.prev.br_startoff = NULLFILEOFF;
4290 while (bno < end && n < *nmap) {
4291 bool need_alloc = false, wasdelay = false;
4293 /* in hole or beyond EOF? */
4294 if (eof || bma.got.br_startoff > bno) {
4296 * CoW fork conversions should /never/ hit EOF or
4297 * holes. There should always be something for us
4300 ASSERT(!((flags & XFS_BMAPI_CONVERT) &&
4301 (flags & XFS_BMAPI_COWFORK)));
4303 if (flags & XFS_BMAPI_DELALLOC) {
4305 * For the COW fork we can reasonably get a
4306 * request for converting an extent that races
4307 * with other threads already having converted
4308 * part of it, as there converting COW to
4309 * regular blocks is not protected using the
4312 ASSERT(flags & XFS_BMAPI_COWFORK);
4313 if (!(flags & XFS_BMAPI_COWFORK)) {
4318 if (eof || bno >= end)
4323 } else if (isnullstartblock(bma.got.br_startblock)) {
4328 * First, deal with the hole before the allocated space
4329 * that we found, if any.
4331 if ((need_alloc || wasdelay) &&
4332 !(flags & XFS_BMAPI_CONVERT_ONLY)) {
4334 bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4335 bma.wasdel = wasdelay;
4340 * There's a 32/64 bit type mismatch between the
4341 * allocation length request (which can be 64 bits in
4342 * length) and the bma length request, which is
4343 * xfs_extlen_t and therefore 32 bits. Hence we have to
4344 * check for 32-bit overflows and handle them here.
4346 if (len > (xfs_filblks_t)MAXEXTLEN)
4347 bma.length = MAXEXTLEN;
4352 ASSERT(bma.length > 0);
4353 error = xfs_bmapi_allocate(&bma);
4356 if (bma.blkno == NULLFSBLOCK)
4360 * If this is a CoW allocation, record the data in
4361 * the refcount btree for orphan recovery.
4363 if (whichfork == XFS_COW_FORK) {
4364 error = xfs_refcount_alloc_cow_extent(tp,
4365 bma.blkno, bma.length);
4371 /* Deal with the allocated space we found. */
4372 xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno,
4375 /* Execute unwritten extent conversion if necessary */
4376 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags);
4377 if (error == -EAGAIN)
4382 /* update the extent map to return */
4383 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4386 * If we're done, stop now. Stop when we've allocated
4387 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise
4388 * the transaction may get too big.
4390 if (bno >= end || n >= *nmap || bma.nallocs >= *nmap)
4393 /* Else go on to the next record. */
4395 if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got))
4401 * Transform from btree to extents, give it cur.
4403 if (xfs_bmap_wants_extents(ip, whichfork)) {
4404 int tmp_logflags = 0;
4407 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur,
4408 &tmp_logflags, whichfork);
4409 bma.logflags |= tmp_logflags;
4414 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
4415 XFS_IFORK_NEXTENTS(ip, whichfork) >
4416 XFS_IFORK_MAXEXT(ip, whichfork));
4420 * Log everything. Do this after conversion, there's no point in
4421 * logging the extent records if we've converted to btree format.
4423 if ((bma.logflags & xfs_ilog_fext(whichfork)) &&
4424 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
4425 bma.logflags &= ~xfs_ilog_fext(whichfork);
4426 else if ((bma.logflags & xfs_ilog_fbroot(whichfork)) &&
4427 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
4428 bma.logflags &= ~xfs_ilog_fbroot(whichfork);
4430 * Log whatever the flags say, even if error. Otherwise we might miss
4431 * detecting a case where the data is changed, there's an error,
4432 * and it's not logged so we don't shutdown when we should.
4435 xfs_trans_log_inode(tp, ip, bma.logflags);
4438 xfs_btree_del_cursor(bma.cur, error);
4441 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
4448 struct xfs_trans *tp,
4449 struct xfs_inode *ip,
4452 xfs_fsblock_t startblock,
4455 struct xfs_mount *mp = ip->i_mount;
4456 struct xfs_ifork *ifp;
4457 struct xfs_btree_cur *cur = NULL;
4458 struct xfs_bmbt_irec got;
4459 struct xfs_iext_cursor icur;
4460 int whichfork = xfs_bmapi_whichfork(flags);
4461 int logflags = 0, error;
4463 ifp = XFS_IFORK_PTR(ip, whichfork);
4465 ASSERT(len <= (xfs_filblks_t)MAXEXTLEN);
4466 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
4467 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC |
4468 XFS_BMAPI_NORMAP)));
4469 ASSERT((flags & (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)) !=
4470 (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC));
4472 if (unlikely(XFS_TEST_ERROR(
4473 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4474 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
4475 mp, XFS_ERRTAG_BMAPIFORMAT))) {
4476 XFS_ERROR_REPORT("xfs_bmapi_remap", XFS_ERRLEVEL_LOW, mp);
4477 return -EFSCORRUPTED;
4480 if (XFS_FORCED_SHUTDOWN(mp))
4483 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4484 error = xfs_iread_extents(tp, ip, whichfork);
4489 if (xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
4490 /* make sure we only reflink into a hole. */
4491 ASSERT(got.br_startoff > bno);
4492 ASSERT(got.br_startoff - bno >= len);
4495 ip->i_d.di_nblocks += len;
4496 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
4498 if (ifp->if_flags & XFS_IFBROOT) {
4499 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
4500 cur->bc_private.b.flags = 0;
4503 got.br_startoff = bno;
4504 got.br_startblock = startblock;
4505 got.br_blockcount = len;
4506 if (flags & XFS_BMAPI_PREALLOC)
4507 got.br_state = XFS_EXT_UNWRITTEN;
4509 got.br_state = XFS_EXT_NORM;
4511 error = xfs_bmap_add_extent_hole_real(tp, ip, whichfork, &icur,
4512 &cur, &got, &logflags, flags);
4516 if (xfs_bmap_wants_extents(ip, whichfork)) {
4517 int tmp_logflags = 0;
4519 error = xfs_bmap_btree_to_extents(tp, ip, cur,
4520 &tmp_logflags, whichfork);
4521 logflags |= tmp_logflags;
4525 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS)
4526 logflags &= ~XFS_ILOG_DEXT;
4527 else if (ip->i_d.di_format != XFS_DINODE_FMT_BTREE)
4528 logflags &= ~XFS_ILOG_DBROOT;
4531 xfs_trans_log_inode(tp, ip, logflags);
4533 xfs_btree_del_cursor(cur, error);
4538 * When a delalloc extent is split (e.g., due to a hole punch), the original
4539 * indlen reservation must be shared across the two new extents that are left
4542 * Given the original reservation and the worst case indlen for the two new
4543 * extents (as calculated by xfs_bmap_worst_indlen()), split the original
4544 * reservation fairly across the two new extents. If necessary, steal available
4545 * blocks from a deleted extent to make up a reservation deficiency (e.g., if
4546 * ores == 1). The number of stolen blocks is returned. The availability and
4547 * subsequent accounting of stolen blocks is the responsibility of the caller.
4549 static xfs_filblks_t
4550 xfs_bmap_split_indlen(
4551 xfs_filblks_t ores, /* original res. */
4552 xfs_filblks_t *indlen1, /* ext1 worst indlen */
4553 xfs_filblks_t *indlen2, /* ext2 worst indlen */
4554 xfs_filblks_t avail) /* stealable blocks */
4556 xfs_filblks_t len1 = *indlen1;
4557 xfs_filblks_t len2 = *indlen2;
4558 xfs_filblks_t nres = len1 + len2; /* new total res. */
4559 xfs_filblks_t stolen = 0;
4560 xfs_filblks_t resfactor;
4563 * Steal as many blocks as we can to try and satisfy the worst case
4564 * indlen for both new extents.
4566 if (ores < nres && avail)
4567 stolen = XFS_FILBLKS_MIN(nres - ores, avail);
4570 /* nothing else to do if we've satisfied the new reservation */
4575 * We can't meet the total required reservation for the two extents.
4576 * Calculate the percent of the overall shortage between both extents
4577 * and apply this percentage to each of the requested indlen values.
4578 * This distributes the shortage fairly and reduces the chances that one
4579 * of the two extents is left with nothing when extents are repeatedly
4582 resfactor = (ores * 100);
4583 do_div(resfactor, nres);
4588 ASSERT(len1 + len2 <= ores);
4589 ASSERT(len1 < *indlen1 && len2 < *indlen2);
4592 * Hand out the remainder to each extent. If one of the two reservations
4593 * is zero, we want to make sure that one gets a block first. The loop
4594 * below starts with len1, so hand len2 a block right off the bat if it
4597 ores -= (len1 + len2);
4598 ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores);
4599 if (ores && !len2 && *indlen2) {
4604 if (len1 < *indlen1) {
4610 if (len2 < *indlen2) {
4623 xfs_bmap_del_extent_delay(
4624 struct xfs_inode *ip,
4626 struct xfs_iext_cursor *icur,
4627 struct xfs_bmbt_irec *got,
4628 struct xfs_bmbt_irec *del)
4630 struct xfs_mount *mp = ip->i_mount;
4631 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
4632 struct xfs_bmbt_irec new;
4633 int64_t da_old, da_new, da_diff = 0;
4634 xfs_fileoff_t del_endoff, got_endoff;
4635 xfs_filblks_t got_indlen, new_indlen, stolen;
4636 int state = xfs_bmap_fork_to_state(whichfork);
4640 XFS_STATS_INC(mp, xs_del_exlist);
4642 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
4643 del_endoff = del->br_startoff + del->br_blockcount;
4644 got_endoff = got->br_startoff + got->br_blockcount;
4645 da_old = startblockval(got->br_startblock);
4648 ASSERT(del->br_blockcount > 0);
4649 ASSERT(got->br_startoff <= del->br_startoff);
4650 ASSERT(got_endoff >= del_endoff);
4653 uint64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount);
4655 do_div(rtexts, mp->m_sb.sb_rextsize);
4656 xfs_mod_frextents(mp, rtexts);
4660 * Update the inode delalloc counter now and wait to update the
4661 * sb counters as we might have to borrow some blocks for the
4662 * indirect block accounting.
4664 error = xfs_trans_reserve_quota_nblks(NULL, ip,
4665 -((long)del->br_blockcount), 0,
4666 isrt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
4669 ip->i_delayed_blks -= del->br_blockcount;
4671 if (got->br_startoff == del->br_startoff)
4672 state |= BMAP_LEFT_FILLING;
4673 if (got_endoff == del_endoff)
4674 state |= BMAP_RIGHT_FILLING;
4676 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4677 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4679 * Matches the whole extent. Delete the entry.
4681 xfs_iext_remove(ip, icur, state);
4682 xfs_iext_prev(ifp, icur);
4684 case BMAP_LEFT_FILLING:
4686 * Deleting the first part of the extent.
4688 got->br_startoff = del_endoff;
4689 got->br_blockcount -= del->br_blockcount;
4690 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4691 got->br_blockcount), da_old);
4692 got->br_startblock = nullstartblock((int)da_new);
4693 xfs_iext_update_extent(ip, state, icur, got);
4695 case BMAP_RIGHT_FILLING:
4697 * Deleting the last part of the extent.
4699 got->br_blockcount = got->br_blockcount - del->br_blockcount;
4700 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4701 got->br_blockcount), da_old);
4702 got->br_startblock = nullstartblock((int)da_new);
4703 xfs_iext_update_extent(ip, state, icur, got);
4707 * Deleting the middle of the extent.
4709 * Distribute the original indlen reservation across the two new
4710 * extents. Steal blocks from the deleted extent if necessary.
4711 * Stealing blocks simply fudges the fdblocks accounting below.
4712 * Warn if either of the new indlen reservations is zero as this
4713 * can lead to delalloc problems.
4715 got->br_blockcount = del->br_startoff - got->br_startoff;
4716 got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount);
4718 new.br_blockcount = got_endoff - del_endoff;
4719 new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount);
4721 WARN_ON_ONCE(!got_indlen || !new_indlen);
4722 stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen,
4723 del->br_blockcount);
4725 got->br_startblock = nullstartblock((int)got_indlen);
4727 new.br_startoff = del_endoff;
4728 new.br_state = got->br_state;
4729 new.br_startblock = nullstartblock((int)new_indlen);
4731 xfs_iext_update_extent(ip, state, icur, got);
4732 xfs_iext_next(ifp, icur);
4733 xfs_iext_insert(ip, icur, &new, state);
4735 da_new = got_indlen + new_indlen - stolen;
4736 del->br_blockcount -= stolen;
4740 ASSERT(da_old >= da_new);
4741 da_diff = da_old - da_new;
4743 da_diff += del->br_blockcount;
4745 xfs_mod_fdblocks(mp, da_diff, false);
4750 xfs_bmap_del_extent_cow(
4751 struct xfs_inode *ip,
4752 struct xfs_iext_cursor *icur,
4753 struct xfs_bmbt_irec *got,
4754 struct xfs_bmbt_irec *del)
4756 struct xfs_mount *mp = ip->i_mount;
4757 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
4758 struct xfs_bmbt_irec new;
4759 xfs_fileoff_t del_endoff, got_endoff;
4760 int state = BMAP_COWFORK;
4762 XFS_STATS_INC(mp, xs_del_exlist);
4764 del_endoff = del->br_startoff + del->br_blockcount;
4765 got_endoff = got->br_startoff + got->br_blockcount;
4767 ASSERT(del->br_blockcount > 0);
4768 ASSERT(got->br_startoff <= del->br_startoff);
4769 ASSERT(got_endoff >= del_endoff);
4770 ASSERT(!isnullstartblock(got->br_startblock));
4772 if (got->br_startoff == del->br_startoff)
4773 state |= BMAP_LEFT_FILLING;
4774 if (got_endoff == del_endoff)
4775 state |= BMAP_RIGHT_FILLING;
4777 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4778 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4780 * Matches the whole extent. Delete the entry.
4782 xfs_iext_remove(ip, icur, state);
4783 xfs_iext_prev(ifp, icur);
4785 case BMAP_LEFT_FILLING:
4787 * Deleting the first part of the extent.
4789 got->br_startoff = del_endoff;
4790 got->br_blockcount -= del->br_blockcount;
4791 got->br_startblock = del->br_startblock + del->br_blockcount;
4792 xfs_iext_update_extent(ip, state, icur, got);
4794 case BMAP_RIGHT_FILLING:
4796 * Deleting the last part of the extent.
4798 got->br_blockcount -= del->br_blockcount;
4799 xfs_iext_update_extent(ip, state, icur, got);
4803 * Deleting the middle of the extent.
4805 got->br_blockcount = del->br_startoff - got->br_startoff;
4807 new.br_startoff = del_endoff;
4808 new.br_blockcount = got_endoff - del_endoff;
4809 new.br_state = got->br_state;
4810 new.br_startblock = del->br_startblock + del->br_blockcount;
4812 xfs_iext_update_extent(ip, state, icur, got);
4813 xfs_iext_next(ifp, icur);
4814 xfs_iext_insert(ip, icur, &new, state);
4817 ip->i_delayed_blks -= del->br_blockcount;
4821 * Called by xfs_bmapi to update file extent records and the btree
4822 * after removing space.
4824 STATIC int /* error */
4825 xfs_bmap_del_extent_real(
4826 xfs_inode_t *ip, /* incore inode pointer */
4827 xfs_trans_t *tp, /* current transaction pointer */
4828 struct xfs_iext_cursor *icur,
4829 xfs_btree_cur_t *cur, /* if null, not a btree */
4830 xfs_bmbt_irec_t *del, /* data to remove from extents */
4831 int *logflagsp, /* inode logging flags */
4832 int whichfork, /* data or attr fork */
4833 int bflags) /* bmapi flags */
4835 xfs_fsblock_t del_endblock=0; /* first block past del */
4836 xfs_fileoff_t del_endoff; /* first offset past del */
4837 int do_fx; /* free extent at end of routine */
4838 int error; /* error return value */
4839 int flags = 0;/* inode logging flags */
4840 struct xfs_bmbt_irec got; /* current extent entry */
4841 xfs_fileoff_t got_endoff; /* first offset past got */
4842 int i; /* temp state */
4843 struct xfs_ifork *ifp; /* inode fork pointer */
4844 xfs_mount_t *mp; /* mount structure */
4845 xfs_filblks_t nblks; /* quota/sb block count */
4846 xfs_bmbt_irec_t new; /* new record to be inserted */
4848 uint qfield; /* quota field to update */
4849 int state = xfs_bmap_fork_to_state(whichfork);
4850 struct xfs_bmbt_irec old;
4853 XFS_STATS_INC(mp, xs_del_exlist);
4855 ifp = XFS_IFORK_PTR(ip, whichfork);
4856 ASSERT(del->br_blockcount > 0);
4857 xfs_iext_get_extent(ifp, icur, &got);
4858 ASSERT(got.br_startoff <= del->br_startoff);
4859 del_endoff = del->br_startoff + del->br_blockcount;
4860 got_endoff = got.br_startoff + got.br_blockcount;
4861 ASSERT(got_endoff >= del_endoff);
4862 ASSERT(!isnullstartblock(got.br_startblock));
4867 * If it's the case where the directory code is running with no block
4868 * reservation, and the deleted block is in the middle of its extent,
4869 * and the resulting insert of an extent would cause transformation to
4870 * btree format, then reject it. The calling code will then swap blocks
4871 * around instead. We have to do this now, rather than waiting for the
4872 * conversion to btree format, since the transaction will be dirty then.
4874 if (tp->t_blk_res == 0 &&
4875 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
4876 XFS_IFORK_NEXTENTS(ip, whichfork) >=
4877 XFS_IFORK_MAXEXT(ip, whichfork) &&
4878 del->br_startoff > got.br_startoff && del_endoff < got_endoff)
4881 flags = XFS_ILOG_CORE;
4882 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
4887 bno = div_u64_rem(del->br_startblock, mp->m_sb.sb_rextsize,
4890 len = div_u64_rem(del->br_blockcount, mp->m_sb.sb_rextsize,
4894 error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
4898 nblks = len * mp->m_sb.sb_rextsize;
4899 qfield = XFS_TRANS_DQ_RTBCOUNT;
4902 nblks = del->br_blockcount;
4903 qfield = XFS_TRANS_DQ_BCOUNT;
4906 del_endblock = del->br_startblock + del->br_blockcount;
4908 error = xfs_bmbt_lookup_eq(cur, &got, &i);
4911 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
4914 if (got.br_startoff == del->br_startoff)
4915 state |= BMAP_LEFT_FILLING;
4916 if (got_endoff == del_endoff)
4917 state |= BMAP_RIGHT_FILLING;
4919 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4920 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4922 * Matches the whole extent. Delete the entry.
4924 xfs_iext_remove(ip, icur, state);
4925 xfs_iext_prev(ifp, icur);
4926 XFS_IFORK_NEXT_SET(ip, whichfork,
4927 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
4928 flags |= XFS_ILOG_CORE;
4930 flags |= xfs_ilog_fext(whichfork);
4933 if ((error = xfs_btree_delete(cur, &i)))
4935 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
4937 case BMAP_LEFT_FILLING:
4939 * Deleting the first part of the extent.
4941 got.br_startoff = del_endoff;
4942 got.br_startblock = del_endblock;
4943 got.br_blockcount -= del->br_blockcount;
4944 xfs_iext_update_extent(ip, state, icur, &got);
4946 flags |= xfs_ilog_fext(whichfork);
4949 error = xfs_bmbt_update(cur, &got);
4953 case BMAP_RIGHT_FILLING:
4955 * Deleting the last part of the extent.
4957 got.br_blockcount -= del->br_blockcount;
4958 xfs_iext_update_extent(ip, state, icur, &got);
4960 flags |= xfs_ilog_fext(whichfork);
4963 error = xfs_bmbt_update(cur, &got);
4969 * Deleting the middle of the extent.
4973 got.br_blockcount = del->br_startoff - got.br_startoff;
4974 xfs_iext_update_extent(ip, state, icur, &got);
4976 new.br_startoff = del_endoff;
4977 new.br_blockcount = got_endoff - del_endoff;
4978 new.br_state = got.br_state;
4979 new.br_startblock = del_endblock;
4981 flags |= XFS_ILOG_CORE;
4983 error = xfs_bmbt_update(cur, &got);
4986 error = xfs_btree_increment(cur, 0, &i);
4989 cur->bc_rec.b = new;
4990 error = xfs_btree_insert(cur, &i);
4991 if (error && error != -ENOSPC)
4994 * If get no-space back from btree insert, it tried a
4995 * split, and we have a zero block reservation. Fix up
4996 * our state and return the error.
4998 if (error == -ENOSPC) {
5000 * Reset the cursor, don't trust it after any
5003 error = xfs_bmbt_lookup_eq(cur, &got, &i);
5006 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5008 * Update the btree record back
5009 * to the original value.
5011 error = xfs_bmbt_update(cur, &old);
5015 * Reset the extent record back
5016 * to the original value.
5018 xfs_iext_update_extent(ip, state, icur, &old);
5023 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5025 flags |= xfs_ilog_fext(whichfork);
5026 XFS_IFORK_NEXT_SET(ip, whichfork,
5027 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
5028 xfs_iext_next(ifp, icur);
5029 xfs_iext_insert(ip, icur, &new, state);
5033 /* remove reverse mapping */
5034 error = xfs_rmap_unmap_extent(tp, ip, whichfork, del);
5039 * If we need to, add to list of extents to delete.
5041 if (do_fx && !(bflags & XFS_BMAPI_REMAP)) {
5042 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) {
5043 error = xfs_refcount_decrease_extent(tp, del);
5047 __xfs_bmap_add_free(tp, del->br_startblock,
5048 del->br_blockcount, NULL,
5049 (bflags & XFS_BMAPI_NODISCARD) ||
5050 del->br_state == XFS_EXT_UNWRITTEN);
5055 * Adjust inode # blocks in the file.
5058 ip->i_d.di_nblocks -= nblks;
5060 * Adjust quota data.
5062 if (qfield && !(bflags & XFS_BMAPI_REMAP))
5063 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
5071 * Unmap (remove) blocks from a file.
5072 * If nexts is nonzero then the number of extents to remove is limited to
5073 * that value. If not all extents in the block range can be removed then
5078 struct xfs_trans *tp, /* transaction pointer */
5079 struct xfs_inode *ip, /* incore inode */
5080 xfs_fileoff_t start, /* first file offset deleted */
5081 xfs_filblks_t *rlen, /* i/o: amount remaining */
5082 int flags, /* misc flags */
5083 xfs_extnum_t nexts) /* number of extents max */
5085 struct xfs_btree_cur *cur; /* bmap btree cursor */
5086 struct xfs_bmbt_irec del; /* extent being deleted */
5087 int error; /* error return value */
5088 xfs_extnum_t extno; /* extent number in list */
5089 struct xfs_bmbt_irec got; /* current extent record */
5090 struct xfs_ifork *ifp; /* inode fork pointer */
5091 int isrt; /* freeing in rt area */
5092 int logflags; /* transaction logging flags */
5093 xfs_extlen_t mod; /* rt extent offset */
5094 struct xfs_mount *mp; /* mount structure */
5095 int tmp_logflags; /* partial logging flags */
5096 int wasdel; /* was a delayed alloc extent */
5097 int whichfork; /* data or attribute fork */
5099 xfs_filblks_t len = *rlen; /* length to unmap in file */
5100 xfs_fileoff_t max_len;
5101 xfs_agnumber_t prev_agno = NULLAGNUMBER, agno;
5103 struct xfs_iext_cursor icur;
5106 trace_xfs_bunmap(ip, start, len, flags, _RET_IP_);
5108 whichfork = xfs_bmapi_whichfork(flags);
5109 ASSERT(whichfork != XFS_COW_FORK);
5110 ifp = XFS_IFORK_PTR(ip, whichfork);
5112 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5113 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
5114 XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW,
5116 return -EFSCORRUPTED;
5119 if (XFS_FORCED_SHUTDOWN(mp))
5122 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5127 * Guesstimate how many blocks we can unmap without running the risk of
5128 * blowing out the transaction with a mix of EFIs and reflink
5131 if (tp && xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK)
5132 max_len = min(len, xfs_refcount_max_unmap(tp->t_log_res));
5136 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5137 (error = xfs_iread_extents(tp, ip, whichfork)))
5139 if (xfs_iext_count(ifp) == 0) {
5143 XFS_STATS_INC(mp, xs_blk_unmap);
5144 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
5147 if (!xfs_iext_lookup_extent_before(ip, ifp, &end, &icur, &got)) {
5154 if (ifp->if_flags & XFS_IFBROOT) {
5155 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
5156 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5157 cur->bc_private.b.flags = 0;
5163 * Synchronize by locking the bitmap inode.
5165 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
5166 xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
5167 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
5168 xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL);
5172 while (end != (xfs_fileoff_t)-1 && end >= start &&
5173 (nexts == 0 || extno < nexts) && max_len > 0) {
5175 * Is the found extent after a hole in which end lives?
5176 * Just back up to the previous extent, if so.
5178 if (got.br_startoff > end &&
5179 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5184 * Is the last block of this extent before the range
5185 * we're supposed to delete? If so, we're done.
5187 end = XFS_FILEOFF_MIN(end,
5188 got.br_startoff + got.br_blockcount - 1);
5192 * Then deal with the (possibly delayed) allocated space
5196 wasdel = isnullstartblock(del.br_startblock);
5199 * Make sure we don't touch multiple AGF headers out of order
5200 * in a single transaction, as that could cause AB-BA deadlocks.
5203 agno = XFS_FSB_TO_AGNO(mp, del.br_startblock);
5204 if (prev_agno != NULLAGNUMBER && prev_agno > agno)
5208 if (got.br_startoff < start) {
5209 del.br_startoff = start;
5210 del.br_blockcount -= start - got.br_startoff;
5212 del.br_startblock += start - got.br_startoff;
5214 if (del.br_startoff + del.br_blockcount > end + 1)
5215 del.br_blockcount = end + 1 - del.br_startoff;
5217 /* How much can we safely unmap? */
5218 if (max_len < del.br_blockcount) {
5219 del.br_startoff += del.br_blockcount - max_len;
5221 del.br_startblock += del.br_blockcount - max_len;
5222 del.br_blockcount = max_len;
5228 sum = del.br_startblock + del.br_blockcount;
5229 div_u64_rem(sum, mp->m_sb.sb_rextsize, &mod);
5232 * Realtime extent not lined up at the end.
5233 * The extent could have been split into written
5234 * and unwritten pieces, or we could just be
5235 * unmapping part of it. But we can't really
5236 * get rid of part of a realtime extent.
5238 if (del.br_state == XFS_EXT_UNWRITTEN ||
5239 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5241 * This piece is unwritten, or we're not
5242 * using unwritten extents. Skip over it.
5245 end -= mod > del.br_blockcount ?
5246 del.br_blockcount : mod;
5247 if (end < got.br_startoff &&
5248 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5255 * It's written, turn it unwritten.
5256 * This is better than zeroing it.
5258 ASSERT(del.br_state == XFS_EXT_NORM);
5259 ASSERT(tp->t_blk_res > 0);
5261 * If this spans a realtime extent boundary,
5262 * chop it back to the start of the one we end at.
5264 if (del.br_blockcount > mod) {
5265 del.br_startoff += del.br_blockcount - mod;
5266 del.br_startblock += del.br_blockcount - mod;
5267 del.br_blockcount = mod;
5269 del.br_state = XFS_EXT_UNWRITTEN;
5270 error = xfs_bmap_add_extent_unwritten_real(tp, ip,
5271 whichfork, &icur, &cur, &del,
5277 div_u64_rem(del.br_startblock, mp->m_sb.sb_rextsize, &mod);
5280 * Realtime extent is lined up at the end but not
5281 * at the front. We'll get rid of full extents if
5284 mod = mp->m_sb.sb_rextsize - mod;
5285 if (del.br_blockcount > mod) {
5286 del.br_blockcount -= mod;
5287 del.br_startoff += mod;
5288 del.br_startblock += mod;
5289 } else if ((del.br_startoff == start &&
5290 (del.br_state == XFS_EXT_UNWRITTEN ||
5291 tp->t_blk_res == 0)) ||
5292 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5294 * Can't make it unwritten. There isn't
5295 * a full extent here so just skip it.
5297 ASSERT(end >= del.br_blockcount);
5298 end -= del.br_blockcount;
5299 if (got.br_startoff > end &&
5300 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5305 } else if (del.br_state == XFS_EXT_UNWRITTEN) {
5306 struct xfs_bmbt_irec prev;
5309 * This one is already unwritten.
5310 * It must have a written left neighbor.
5311 * Unwrite the killed part of that one and
5314 if (!xfs_iext_prev_extent(ifp, &icur, &prev))
5316 ASSERT(prev.br_state == XFS_EXT_NORM);
5317 ASSERT(!isnullstartblock(prev.br_startblock));
5318 ASSERT(del.br_startblock ==
5319 prev.br_startblock + prev.br_blockcount);
5320 if (prev.br_startoff < start) {
5321 mod = start - prev.br_startoff;
5322 prev.br_blockcount -= mod;
5323 prev.br_startblock += mod;
5324 prev.br_startoff = start;
5326 prev.br_state = XFS_EXT_UNWRITTEN;
5327 error = xfs_bmap_add_extent_unwritten_real(tp,
5328 ip, whichfork, &icur, &cur,
5334 ASSERT(del.br_state == XFS_EXT_NORM);
5335 del.br_state = XFS_EXT_UNWRITTEN;
5336 error = xfs_bmap_add_extent_unwritten_real(tp,
5337 ip, whichfork, &icur, &cur,
5347 error = xfs_bmap_del_extent_delay(ip, whichfork, &icur,
5350 error = xfs_bmap_del_extent_real(ip, tp, &icur, cur,
5351 &del, &tmp_logflags, whichfork,
5353 logflags |= tmp_logflags;
5359 max_len -= del.br_blockcount;
5360 end = del.br_startoff - 1;
5363 * If not done go on to the next (previous) record.
5365 if (end != (xfs_fileoff_t)-1 && end >= start) {
5366 if (!xfs_iext_get_extent(ifp, &icur, &got) ||
5367 (got.br_startoff > end &&
5368 !xfs_iext_prev_extent(ifp, &icur, &got))) {
5375 if (done || end == (xfs_fileoff_t)-1 || end < start)
5378 *rlen = end - start + 1;
5381 * Convert to a btree if necessary.
5383 if (xfs_bmap_needs_btree(ip, whichfork)) {
5384 ASSERT(cur == NULL);
5385 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
5386 &tmp_logflags, whichfork);
5387 logflags |= tmp_logflags;
5392 * transform from btree to extents, give it cur
5394 else if (xfs_bmap_wants_extents(ip, whichfork)) {
5395 ASSERT(cur != NULL);
5396 error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags,
5398 logflags |= tmp_logflags;
5403 * transform from extents to local?
5408 * Log everything. Do this after conversion, there's no point in
5409 * logging the extent records if we've converted to btree format.
5411 if ((logflags & xfs_ilog_fext(whichfork)) &&
5412 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
5413 logflags &= ~xfs_ilog_fext(whichfork);
5414 else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5415 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
5416 logflags &= ~xfs_ilog_fbroot(whichfork);
5418 * Log inode even in the error case, if the transaction
5419 * is dirty we'll need to shut down the filesystem.
5422 xfs_trans_log_inode(tp, ip, logflags);
5425 cur->bc_private.b.allocated = 0;
5426 xfs_btree_del_cursor(cur, error);
5431 /* Unmap a range of a file. */
5435 struct xfs_inode *ip,
5444 error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts);
5450 * Determine whether an extent shift can be accomplished by a merge with the
5451 * extent that precedes the target hole of the shift.
5455 struct xfs_bmbt_irec *left, /* preceding extent */
5456 struct xfs_bmbt_irec *got, /* current extent to shift */
5457 xfs_fileoff_t shift) /* shift fsb */
5459 xfs_fileoff_t startoff;
5461 startoff = got->br_startoff - shift;
5464 * The extent, once shifted, must be adjacent in-file and on-disk with
5465 * the preceding extent.
5467 if ((left->br_startoff + left->br_blockcount != startoff) ||
5468 (left->br_startblock + left->br_blockcount != got->br_startblock) ||
5469 (left->br_state != got->br_state) ||
5470 (left->br_blockcount + got->br_blockcount > MAXEXTLEN))
5477 * A bmap extent shift adjusts the file offset of an extent to fill a preceding
5478 * hole in the file. If an extent shift would result in the extent being fully
5479 * adjacent to the extent that currently precedes the hole, we can merge with
5480 * the preceding extent rather than do the shift.
5482 * This function assumes the caller has verified a shift-by-merge is possible
5483 * with the provided extents via xfs_bmse_can_merge().
5487 struct xfs_trans *tp,
5488 struct xfs_inode *ip,
5490 xfs_fileoff_t shift, /* shift fsb */
5491 struct xfs_iext_cursor *icur,
5492 struct xfs_bmbt_irec *got, /* extent to shift */
5493 struct xfs_bmbt_irec *left, /* preceding extent */
5494 struct xfs_btree_cur *cur,
5495 int *logflags) /* output */
5497 struct xfs_bmbt_irec new;
5498 xfs_filblks_t blockcount;
5500 struct xfs_mount *mp = ip->i_mount;
5502 blockcount = left->br_blockcount + got->br_blockcount;
5504 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
5505 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5506 ASSERT(xfs_bmse_can_merge(left, got, shift));
5509 new.br_blockcount = blockcount;
5512 * Update the on-disk extent count, the btree if necessary and log the
5515 XFS_IFORK_NEXT_SET(ip, whichfork,
5516 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
5517 *logflags |= XFS_ILOG_CORE;
5519 *logflags |= XFS_ILOG_DEXT;
5523 /* lookup and remove the extent to merge */
5524 error = xfs_bmbt_lookup_eq(cur, got, &i);
5527 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5529 error = xfs_btree_delete(cur, &i);
5532 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5534 /* lookup and update size of the previous extent */
5535 error = xfs_bmbt_lookup_eq(cur, left, &i);
5538 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5540 error = xfs_bmbt_update(cur, &new);
5545 xfs_iext_remove(ip, icur, 0);
5546 xfs_iext_prev(XFS_IFORK_PTR(ip, whichfork), icur);
5547 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5550 /* update reverse mapping. rmap functions merge the rmaps for us */
5551 error = xfs_rmap_unmap_extent(tp, ip, whichfork, got);
5554 memcpy(&new, got, sizeof(new));
5555 new.br_startoff = left->br_startoff + left->br_blockcount;
5556 return xfs_rmap_map_extent(tp, ip, whichfork, &new);
5560 xfs_bmap_shift_update_extent(
5561 struct xfs_trans *tp,
5562 struct xfs_inode *ip,
5564 struct xfs_iext_cursor *icur,
5565 struct xfs_bmbt_irec *got,
5566 struct xfs_btree_cur *cur,
5568 xfs_fileoff_t startoff)
5570 struct xfs_mount *mp = ip->i_mount;
5571 struct xfs_bmbt_irec prev = *got;
5574 *logflags |= XFS_ILOG_CORE;
5576 got->br_startoff = startoff;
5579 error = xfs_bmbt_lookup_eq(cur, &prev, &i);
5582 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5584 error = xfs_bmbt_update(cur, got);
5588 *logflags |= XFS_ILOG_DEXT;
5591 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5594 /* update reverse mapping */
5595 error = xfs_rmap_unmap_extent(tp, ip, whichfork, &prev);
5598 return xfs_rmap_map_extent(tp, ip, whichfork, got);
5602 xfs_bmap_collapse_extents(
5603 struct xfs_trans *tp,
5604 struct xfs_inode *ip,
5605 xfs_fileoff_t *next_fsb,
5606 xfs_fileoff_t offset_shift_fsb,
5609 int whichfork = XFS_DATA_FORK;
5610 struct xfs_mount *mp = ip->i_mount;
5611 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
5612 struct xfs_btree_cur *cur = NULL;
5613 struct xfs_bmbt_irec got, prev;
5614 struct xfs_iext_cursor icur;
5615 xfs_fileoff_t new_startoff;
5619 if (unlikely(XFS_TEST_ERROR(
5620 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5621 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
5622 mp, XFS_ERRTAG_BMAPIFORMAT))) {
5623 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
5624 return -EFSCORRUPTED;
5627 if (XFS_FORCED_SHUTDOWN(mp))
5630 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL));
5632 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
5633 error = xfs_iread_extents(tp, ip, whichfork);
5638 if (ifp->if_flags & XFS_IFBROOT) {
5639 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5640 cur->bc_private.b.flags = 0;
5643 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
5647 XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock),
5650 new_startoff = got.br_startoff - offset_shift_fsb;
5651 if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) {
5652 if (new_startoff < prev.br_startoff + prev.br_blockcount) {
5657 if (xfs_bmse_can_merge(&prev, &got, offset_shift_fsb)) {
5658 error = xfs_bmse_merge(tp, ip, whichfork,
5659 offset_shift_fsb, &icur, &got, &prev,
5666 if (got.br_startoff < offset_shift_fsb) {
5672 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
5673 cur, &logflags, new_startoff);
5678 if (!xfs_iext_next_extent(ifp, &icur, &got)) {
5683 *next_fsb = got.br_startoff;
5686 xfs_btree_del_cursor(cur, error);
5688 xfs_trans_log_inode(tp, ip, logflags);
5692 /* Make sure we won't be right-shifting an extent past the maximum bound. */
5694 xfs_bmap_can_insert_extents(
5695 struct xfs_inode *ip,
5697 xfs_fileoff_t shift)
5699 struct xfs_bmbt_irec got;
5703 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
5705 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
5708 xfs_ilock(ip, XFS_ILOCK_EXCL);
5709 error = xfs_bmap_last_extent(NULL, ip, XFS_DATA_FORK, &got, &is_empty);
5710 if (!error && !is_empty && got.br_startoff >= off &&
5711 ((got.br_startoff + shift) & BMBT_STARTOFF_MASK) < got.br_startoff)
5713 xfs_iunlock(ip, XFS_ILOCK_EXCL);
5719 xfs_bmap_insert_extents(
5720 struct xfs_trans *tp,
5721 struct xfs_inode *ip,
5722 xfs_fileoff_t *next_fsb,
5723 xfs_fileoff_t offset_shift_fsb,
5725 xfs_fileoff_t stop_fsb)
5727 int whichfork = XFS_DATA_FORK;
5728 struct xfs_mount *mp = ip->i_mount;
5729 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
5730 struct xfs_btree_cur *cur = NULL;
5731 struct xfs_bmbt_irec got, next;
5732 struct xfs_iext_cursor icur;
5733 xfs_fileoff_t new_startoff;
5737 if (unlikely(XFS_TEST_ERROR(
5738 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5739 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
5740 mp, XFS_ERRTAG_BMAPIFORMAT))) {
5741 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
5742 return -EFSCORRUPTED;
5745 if (XFS_FORCED_SHUTDOWN(mp))
5748 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL));
5750 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
5751 error = xfs_iread_extents(tp, ip, whichfork);
5756 if (ifp->if_flags & XFS_IFBROOT) {
5757 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5758 cur->bc_private.b.flags = 0;
5761 if (*next_fsb == NULLFSBLOCK) {
5762 xfs_iext_last(ifp, &icur);
5763 if (!xfs_iext_get_extent(ifp, &icur, &got) ||
5764 stop_fsb > got.br_startoff) {
5769 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
5774 XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock),
5777 if (stop_fsb >= got.br_startoff + got.br_blockcount) {
5782 new_startoff = got.br_startoff + offset_shift_fsb;
5783 if (xfs_iext_peek_next_extent(ifp, &icur, &next)) {
5784 if (new_startoff + got.br_blockcount > next.br_startoff) {
5790 * Unlike a left shift (which involves a hole punch), a right
5791 * shift does not modify extent neighbors in any way. We should
5792 * never find mergeable extents in this scenario. Check anyways
5793 * and warn if we encounter two extents that could be one.
5795 if (xfs_bmse_can_merge(&got, &next, offset_shift_fsb))
5799 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
5800 cur, &logflags, new_startoff);
5804 if (!xfs_iext_prev_extent(ifp, &icur, &got) ||
5805 stop_fsb >= got.br_startoff + got.br_blockcount) {
5810 *next_fsb = got.br_startoff;
5813 xfs_btree_del_cursor(cur, error);
5815 xfs_trans_log_inode(tp, ip, logflags);
5820 * Splits an extent into two extents at split_fsb block such that it is the
5821 * first block of the current_ext. @ext is a target extent to be split.
5822 * @split_fsb is a block where the extents is split. If split_fsb lies in a
5823 * hole or the first block of extents, just return 0.
5826 xfs_bmap_split_extent_at(
5827 struct xfs_trans *tp,
5828 struct xfs_inode *ip,
5829 xfs_fileoff_t split_fsb)
5831 int whichfork = XFS_DATA_FORK;
5832 struct xfs_btree_cur *cur = NULL;
5833 struct xfs_bmbt_irec got;
5834 struct xfs_bmbt_irec new; /* split extent */
5835 struct xfs_mount *mp = ip->i_mount;
5836 struct xfs_ifork *ifp;
5837 xfs_fsblock_t gotblkcnt; /* new block count for got */
5838 struct xfs_iext_cursor icur;
5843 if (unlikely(XFS_TEST_ERROR(
5844 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5845 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
5846 mp, XFS_ERRTAG_BMAPIFORMAT))) {
5847 XFS_ERROR_REPORT("xfs_bmap_split_extent_at",
5848 XFS_ERRLEVEL_LOW, mp);
5849 return -EFSCORRUPTED;
5852 if (XFS_FORCED_SHUTDOWN(mp))
5855 ifp = XFS_IFORK_PTR(ip, whichfork);
5856 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
5857 /* Read in all the extents */
5858 error = xfs_iread_extents(tp, ip, whichfork);
5864 * If there are not extents, or split_fsb lies in a hole we are done.
5866 if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, &icur, &got) ||
5867 got.br_startoff >= split_fsb)
5870 gotblkcnt = split_fsb - got.br_startoff;
5871 new.br_startoff = split_fsb;
5872 new.br_startblock = got.br_startblock + gotblkcnt;
5873 new.br_blockcount = got.br_blockcount - gotblkcnt;
5874 new.br_state = got.br_state;
5876 if (ifp->if_flags & XFS_IFBROOT) {
5877 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5878 cur->bc_private.b.flags = 0;
5879 error = xfs_bmbt_lookup_eq(cur, &got, &i);
5882 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
5885 got.br_blockcount = gotblkcnt;
5886 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), &icur,
5889 logflags = XFS_ILOG_CORE;
5891 error = xfs_bmbt_update(cur, &got);
5895 logflags |= XFS_ILOG_DEXT;
5897 /* Add new extent */
5898 xfs_iext_next(ifp, &icur);
5899 xfs_iext_insert(ip, &icur, &new, 0);
5900 XFS_IFORK_NEXT_SET(ip, whichfork,
5901 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
5904 error = xfs_bmbt_lookup_eq(cur, &new, &i);
5907 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, del_cursor);
5908 error = xfs_btree_insert(cur, &i);
5911 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
5915 * Convert to a btree if necessary.
5917 if (xfs_bmap_needs_btree(ip, whichfork)) {
5918 int tmp_logflags; /* partial log flag return val */
5920 ASSERT(cur == NULL);
5921 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
5922 &tmp_logflags, whichfork);
5923 logflags |= tmp_logflags;
5928 cur->bc_private.b.allocated = 0;
5929 xfs_btree_del_cursor(cur, error);
5933 xfs_trans_log_inode(tp, ip, logflags);
5938 xfs_bmap_split_extent(
5939 struct xfs_inode *ip,
5940 xfs_fileoff_t split_fsb)
5942 struct xfs_mount *mp = ip->i_mount;
5943 struct xfs_trans *tp;
5946 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
5947 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
5951 xfs_ilock(ip, XFS_ILOCK_EXCL);
5952 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
5954 error = xfs_bmap_split_extent_at(tp, ip, split_fsb);
5958 return xfs_trans_commit(tp);
5961 xfs_trans_cancel(tp);
5965 /* Deferred mapping is only for real extents in the data fork. */
5967 xfs_bmap_is_update_needed(
5968 struct xfs_bmbt_irec *bmap)
5970 return bmap->br_startblock != HOLESTARTBLOCK &&
5971 bmap->br_startblock != DELAYSTARTBLOCK;
5974 /* Record a bmap intent. */
5977 struct xfs_trans *tp,
5978 enum xfs_bmap_intent_type type,
5979 struct xfs_inode *ip,
5981 struct xfs_bmbt_irec *bmap)
5983 struct xfs_bmap_intent *bi;
5985 trace_xfs_bmap_defer(tp->t_mountp,
5986 XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock),
5988 XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock),
5989 ip->i_ino, whichfork,
5991 bmap->br_blockcount,
5994 bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_SLEEP | KM_NOFS);
5995 INIT_LIST_HEAD(&bi->bi_list);
5998 bi->bi_whichfork = whichfork;
5999 bi->bi_bmap = *bmap;
6001 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list);
6005 /* Map an extent into a file. */
6007 xfs_bmap_map_extent(
6008 struct xfs_trans *tp,
6009 struct xfs_inode *ip,
6010 struct xfs_bmbt_irec *PREV)
6012 if (!xfs_bmap_is_update_needed(PREV))
6015 return __xfs_bmap_add(tp, XFS_BMAP_MAP, ip, XFS_DATA_FORK, PREV);
6018 /* Unmap an extent out of a file. */
6020 xfs_bmap_unmap_extent(
6021 struct xfs_trans *tp,
6022 struct xfs_inode *ip,
6023 struct xfs_bmbt_irec *PREV)
6025 if (!xfs_bmap_is_update_needed(PREV))
6028 return __xfs_bmap_add(tp, XFS_BMAP_UNMAP, ip, XFS_DATA_FORK, PREV);
6032 * Process one of the deferred bmap operations. We pass back the
6033 * btree cursor to maintain our lock on the bmapbt between calls.
6036 xfs_bmap_finish_one(
6037 struct xfs_trans *tp,
6038 struct xfs_inode *ip,
6039 enum xfs_bmap_intent_type type,
6041 xfs_fileoff_t startoff,
6042 xfs_fsblock_t startblock,
6043 xfs_filblks_t *blockcount,
6048 ASSERT(tp->t_firstblock == NULLFSBLOCK);
6050 trace_xfs_bmap_deferred(tp->t_mountp,
6051 XFS_FSB_TO_AGNO(tp->t_mountp, startblock), type,
6052 XFS_FSB_TO_AGBNO(tp->t_mountp, startblock),
6053 ip->i_ino, whichfork, startoff, *blockcount, state);
6055 if (WARN_ON_ONCE(whichfork != XFS_DATA_FORK))
6056 return -EFSCORRUPTED;
6058 if (XFS_TEST_ERROR(false, tp->t_mountp,
6059 XFS_ERRTAG_BMAP_FINISH_ONE))
6064 error = xfs_bmapi_remap(tp, ip, startoff, *blockcount,
6068 case XFS_BMAP_UNMAP:
6069 error = __xfs_bunmapi(tp, ip, startoff, blockcount,
6070 XFS_BMAPI_REMAP, 1);
6074 error = -EFSCORRUPTED;
6080 /* Check that an inode's extent does not have invalid flags or bad ranges. */
6082 xfs_bmap_validate_extent(
6083 struct xfs_inode *ip,
6085 struct xfs_bmbt_irec *irec)
6087 struct xfs_mount *mp = ip->i_mount;
6088 xfs_fsblock_t endfsb;
6091 isrt = XFS_IS_REALTIME_INODE(ip);
6092 endfsb = irec->br_startblock + irec->br_blockcount - 1;
6094 if (!xfs_verify_rtbno(mp, irec->br_startblock))
6095 return __this_address;
6096 if (!xfs_verify_rtbno(mp, endfsb))
6097 return __this_address;
6099 if (!xfs_verify_fsbno(mp, irec->br_startblock))
6100 return __this_address;
6101 if (!xfs_verify_fsbno(mp, endfsb))
6102 return __this_address;
6103 if (XFS_FSB_TO_AGNO(mp, irec->br_startblock) !=
6104 XFS_FSB_TO_AGNO(mp, endfsb))
6105 return __this_address;
6107 if (irec->br_state != XFS_EXT_NORM) {
6108 if (whichfork != XFS_DATA_FORK)
6109 return __this_address;
6110 if (!xfs_sb_version_hasextflgbit(&mp->m_sb))
6111 return __this_address;