1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * Copyright (c) 2013 Red Hat, Inc.
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_inode.h"
17 #include "xfs_dir2_priv.h"
18 #include "xfs_trans.h"
20 #include "xfs_attr_leaf.h"
21 #include "xfs_error.h"
22 #include "xfs_trace.h"
23 #include "xfs_buf_item.h"
29 * Routines to implement directories as Btrees of hashed names.
32 /*========================================================================
33 * Function prototypes for the kernel.
34 *========================================================================*/
37 * Routines used for growing the Btree.
39 STATIC int xfs_da3_root_split(xfs_da_state_t *state,
40 xfs_da_state_blk_t *existing_root,
41 xfs_da_state_blk_t *new_child);
42 STATIC int xfs_da3_node_split(xfs_da_state_t *state,
43 xfs_da_state_blk_t *existing_blk,
44 xfs_da_state_blk_t *split_blk,
45 xfs_da_state_blk_t *blk_to_add,
48 STATIC void xfs_da3_node_rebalance(xfs_da_state_t *state,
49 xfs_da_state_blk_t *node_blk_1,
50 xfs_da_state_blk_t *node_blk_2);
51 STATIC void xfs_da3_node_add(xfs_da_state_t *state,
52 xfs_da_state_blk_t *old_node_blk,
53 xfs_da_state_blk_t *new_node_blk);
56 * Routines used for shrinking the Btree.
58 STATIC int xfs_da3_root_join(xfs_da_state_t *state,
59 xfs_da_state_blk_t *root_blk);
60 STATIC int xfs_da3_node_toosmall(xfs_da_state_t *state, int *retval);
61 STATIC void xfs_da3_node_remove(xfs_da_state_t *state,
62 xfs_da_state_blk_t *drop_blk);
63 STATIC void xfs_da3_node_unbalance(xfs_da_state_t *state,
64 xfs_da_state_blk_t *src_node_blk,
65 xfs_da_state_blk_t *dst_node_blk);
70 STATIC int xfs_da3_blk_unlink(xfs_da_state_t *state,
71 xfs_da_state_blk_t *drop_blk,
72 xfs_da_state_blk_t *save_blk);
75 kmem_zone_t *xfs_da_state_zone; /* anchor for state struct zone */
78 * Allocate a dir-state structure.
79 * We don't put them on the stack since they're large.
82 xfs_da_state_alloc(void)
84 return kmem_zone_zalloc(xfs_da_state_zone, KM_NOFS);
88 * Kill the altpath contents of a da-state structure.
91 xfs_da_state_kill_altpath(xfs_da_state_t *state)
95 for (i = 0; i < state->altpath.active; i++)
96 state->altpath.blk[i].bp = NULL;
97 state->altpath.active = 0;
101 * Free a da-state structure.
104 xfs_da_state_free(xfs_da_state_t *state)
106 xfs_da_state_kill_altpath(state);
108 memset((char *)state, 0, sizeof(*state));
110 kmem_cache_free(xfs_da_state_zone, state);
113 static inline int xfs_dabuf_nfsb(struct xfs_mount *mp, int whichfork)
115 if (whichfork == XFS_DATA_FORK)
116 return mp->m_dir_geo->fsbcount;
117 return mp->m_attr_geo->fsbcount;
121 xfs_da3_node_hdr_from_disk(
122 struct xfs_mount *mp,
123 struct xfs_da3_icnode_hdr *to,
124 struct xfs_da_intnode *from)
126 if (xfs_sb_version_hascrc(&mp->m_sb)) {
127 struct xfs_da3_intnode *from3 = (struct xfs_da3_intnode *)from;
129 to->forw = be32_to_cpu(from3->hdr.info.hdr.forw);
130 to->back = be32_to_cpu(from3->hdr.info.hdr.back);
131 to->magic = be16_to_cpu(from3->hdr.info.hdr.magic);
132 to->count = be16_to_cpu(from3->hdr.__count);
133 to->level = be16_to_cpu(from3->hdr.__level);
134 to->btree = from3->__btree;
135 ASSERT(to->magic == XFS_DA3_NODE_MAGIC);
137 to->forw = be32_to_cpu(from->hdr.info.forw);
138 to->back = be32_to_cpu(from->hdr.info.back);
139 to->magic = be16_to_cpu(from->hdr.info.magic);
140 to->count = be16_to_cpu(from->hdr.__count);
141 to->level = be16_to_cpu(from->hdr.__level);
142 to->btree = from->__btree;
143 ASSERT(to->magic == XFS_DA_NODE_MAGIC);
148 xfs_da3_node_hdr_to_disk(
149 struct xfs_mount *mp,
150 struct xfs_da_intnode *to,
151 struct xfs_da3_icnode_hdr *from)
153 if (xfs_sb_version_hascrc(&mp->m_sb)) {
154 struct xfs_da3_intnode *to3 = (struct xfs_da3_intnode *)to;
156 ASSERT(from->magic == XFS_DA3_NODE_MAGIC);
157 to3->hdr.info.hdr.forw = cpu_to_be32(from->forw);
158 to3->hdr.info.hdr.back = cpu_to_be32(from->back);
159 to3->hdr.info.hdr.magic = cpu_to_be16(from->magic);
160 to3->hdr.__count = cpu_to_be16(from->count);
161 to3->hdr.__level = cpu_to_be16(from->level);
163 ASSERT(from->magic == XFS_DA_NODE_MAGIC);
164 to->hdr.info.forw = cpu_to_be32(from->forw);
165 to->hdr.info.back = cpu_to_be32(from->back);
166 to->hdr.info.magic = cpu_to_be16(from->magic);
167 to->hdr.__count = cpu_to_be16(from->count);
168 to->hdr.__level = cpu_to_be16(from->level);
173 * Verify an xfs_da3_blkinfo structure. Note that the da3 fields are only
174 * accessible on v5 filesystems. This header format is common across da node,
175 * attr leaf and dir leaf blocks.
178 xfs_da3_blkinfo_verify(
180 struct xfs_da3_blkinfo *hdr3)
182 struct xfs_mount *mp = bp->b_mount;
183 struct xfs_da_blkinfo *hdr = &hdr3->hdr;
185 if (!xfs_verify_magic16(bp, hdr->magic))
186 return __this_address;
188 if (xfs_sb_version_hascrc(&mp->m_sb)) {
189 if (!uuid_equal(&hdr3->uuid, &mp->m_sb.sb_meta_uuid))
190 return __this_address;
191 if (be64_to_cpu(hdr3->blkno) != bp->b_bn)
192 return __this_address;
193 if (!xfs_log_check_lsn(mp, be64_to_cpu(hdr3->lsn)))
194 return __this_address;
200 static xfs_failaddr_t
204 struct xfs_mount *mp = bp->b_mount;
205 struct xfs_da_intnode *hdr = bp->b_addr;
206 struct xfs_da3_icnode_hdr ichdr;
209 xfs_da3_node_hdr_from_disk(mp, &ichdr, hdr);
211 fa = xfs_da3_blkinfo_verify(bp, bp->b_addr);
215 if (ichdr.level == 0)
216 return __this_address;
217 if (ichdr.level > XFS_DA_NODE_MAXDEPTH)
218 return __this_address;
219 if (ichdr.count == 0)
220 return __this_address;
223 * we don't know if the node is for and attribute or directory tree,
224 * so only fail if the count is outside both bounds
226 if (ichdr.count > mp->m_dir_geo->node_ents &&
227 ichdr.count > mp->m_attr_geo->node_ents)
228 return __this_address;
230 /* XXX: hash order check? */
236 xfs_da3_node_write_verify(
239 struct xfs_mount *mp = bp->b_mount;
240 struct xfs_buf_log_item *bip = bp->b_log_item;
241 struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
244 fa = xfs_da3_node_verify(bp);
246 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
250 if (!xfs_sb_version_hascrc(&mp->m_sb))
254 hdr3->info.lsn = cpu_to_be64(bip->bli_item.li_lsn);
256 xfs_buf_update_cksum(bp, XFS_DA3_NODE_CRC_OFF);
260 * leaf/node format detection on trees is sketchy, so a node read can be done on
261 * leaf level blocks when detection identifies the tree as a node format tree
262 * incorrectly. In this case, we need to swap the verifier to match the correct
263 * format of the block being read.
266 xfs_da3_node_read_verify(
269 struct xfs_da_blkinfo *info = bp->b_addr;
272 switch (be16_to_cpu(info->magic)) {
273 case XFS_DA3_NODE_MAGIC:
274 if (!xfs_buf_verify_cksum(bp, XFS_DA3_NODE_CRC_OFF)) {
275 xfs_verifier_error(bp, -EFSBADCRC,
280 case XFS_DA_NODE_MAGIC:
281 fa = xfs_da3_node_verify(bp);
283 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
285 case XFS_ATTR_LEAF_MAGIC:
286 case XFS_ATTR3_LEAF_MAGIC:
287 bp->b_ops = &xfs_attr3_leaf_buf_ops;
288 bp->b_ops->verify_read(bp);
290 case XFS_DIR2_LEAFN_MAGIC:
291 case XFS_DIR3_LEAFN_MAGIC:
292 bp->b_ops = &xfs_dir3_leafn_buf_ops;
293 bp->b_ops->verify_read(bp);
296 xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
301 /* Verify the structure of a da3 block. */
302 static xfs_failaddr_t
303 xfs_da3_node_verify_struct(
306 struct xfs_da_blkinfo *info = bp->b_addr;
308 switch (be16_to_cpu(info->magic)) {
309 case XFS_DA3_NODE_MAGIC:
310 case XFS_DA_NODE_MAGIC:
311 return xfs_da3_node_verify(bp);
312 case XFS_ATTR_LEAF_MAGIC:
313 case XFS_ATTR3_LEAF_MAGIC:
314 bp->b_ops = &xfs_attr3_leaf_buf_ops;
315 return bp->b_ops->verify_struct(bp);
316 case XFS_DIR2_LEAFN_MAGIC:
317 case XFS_DIR3_LEAFN_MAGIC:
318 bp->b_ops = &xfs_dir3_leafn_buf_ops;
319 return bp->b_ops->verify_struct(bp);
321 return __this_address;
325 const struct xfs_buf_ops xfs_da3_node_buf_ops = {
326 .name = "xfs_da3_node",
327 .magic16 = { cpu_to_be16(XFS_DA_NODE_MAGIC),
328 cpu_to_be16(XFS_DA3_NODE_MAGIC) },
329 .verify_read = xfs_da3_node_read_verify,
330 .verify_write = xfs_da3_node_write_verify,
331 .verify_struct = xfs_da3_node_verify_struct,
335 xfs_da3_node_set_type(
336 struct xfs_trans *tp,
339 struct xfs_da_blkinfo *info = bp->b_addr;
341 switch (be16_to_cpu(info->magic)) {
342 case XFS_DA_NODE_MAGIC:
343 case XFS_DA3_NODE_MAGIC:
344 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
346 case XFS_ATTR_LEAF_MAGIC:
347 case XFS_ATTR3_LEAF_MAGIC:
348 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_ATTR_LEAF_BUF);
350 case XFS_DIR2_LEAFN_MAGIC:
351 case XFS_DIR3_LEAFN_MAGIC:
352 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF);
355 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, tp->t_mountp,
356 info, sizeof(*info));
357 xfs_trans_brelse(tp, bp);
358 return -EFSCORRUPTED;
364 struct xfs_trans *tp,
365 struct xfs_inode *dp,
367 struct xfs_buf **bpp,
372 error = xfs_da_read_buf(tp, dp, bno, 0, bpp, whichfork,
373 &xfs_da3_node_buf_ops);
374 if (error || !*bpp || !tp)
376 return xfs_da3_node_set_type(tp, *bpp);
380 xfs_da3_node_read_mapped(
381 struct xfs_trans *tp,
382 struct xfs_inode *dp,
383 xfs_daddr_t mappedbno,
384 struct xfs_buf **bpp,
387 struct xfs_mount *mp = dp->i_mount;
390 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, mappedbno,
391 XFS_FSB_TO_BB(mp, xfs_dabuf_nfsb(mp, whichfork)), 0,
392 bpp, &xfs_da3_node_buf_ops);
396 if (whichfork == XFS_ATTR_FORK)
397 xfs_buf_set_ref(*bpp, XFS_ATTR_BTREE_REF);
399 xfs_buf_set_ref(*bpp, XFS_DIR_BTREE_REF);
403 return xfs_da3_node_set_type(tp, *bpp);
406 /*========================================================================
407 * Routines used for growing the Btree.
408 *========================================================================*/
411 * Create the initial contents of an intermediate node.
415 struct xfs_da_args *args,
418 struct xfs_buf **bpp,
421 struct xfs_da_intnode *node;
422 struct xfs_trans *tp = args->trans;
423 struct xfs_mount *mp = tp->t_mountp;
424 struct xfs_da3_icnode_hdr ichdr = {0};
427 struct xfs_inode *dp = args->dp;
429 trace_xfs_da_node_create(args);
430 ASSERT(level <= XFS_DA_NODE_MAXDEPTH);
432 error = xfs_da_get_buf(tp, dp, blkno, &bp, whichfork);
435 bp->b_ops = &xfs_da3_node_buf_ops;
436 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
439 if (xfs_sb_version_hascrc(&mp->m_sb)) {
440 struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
442 memset(hdr3, 0, sizeof(struct xfs_da3_node_hdr));
443 ichdr.magic = XFS_DA3_NODE_MAGIC;
444 hdr3->info.blkno = cpu_to_be64(bp->b_bn);
445 hdr3->info.owner = cpu_to_be64(args->dp->i_ino);
446 uuid_copy(&hdr3->info.uuid, &mp->m_sb.sb_meta_uuid);
448 ichdr.magic = XFS_DA_NODE_MAGIC;
452 xfs_da3_node_hdr_to_disk(dp->i_mount, node, &ichdr);
453 xfs_trans_log_buf(tp, bp,
454 XFS_DA_LOGRANGE(node, &node->hdr, args->geo->node_hdr_size));
461 * Split a leaf node, rebalance, then possibly split
462 * intermediate nodes, rebalance, etc.
466 struct xfs_da_state *state)
468 struct xfs_da_state_blk *oldblk;
469 struct xfs_da_state_blk *newblk;
470 struct xfs_da_state_blk *addblk;
471 struct xfs_da_intnode *node;
477 trace_xfs_da_split(state->args);
480 * Walk back up the tree splitting/inserting/adjusting as necessary.
481 * If we need to insert and there isn't room, split the node, then
482 * decide which fragment to insert the new block from below into.
483 * Note that we may split the root this way, but we need more fixup.
485 max = state->path.active - 1;
486 ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH));
487 ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC ||
488 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
490 addblk = &state->path.blk[max]; /* initial dummy value */
491 for (i = max; (i >= 0) && addblk; state->path.active--, i--) {
492 oldblk = &state->path.blk[i];
493 newblk = &state->altpath.blk[i];
496 * If a leaf node then
497 * Allocate a new leaf node, then rebalance across them.
498 * else if an intermediate node then
499 * We split on the last layer, must we split the node?
501 switch (oldblk->magic) {
502 case XFS_ATTR_LEAF_MAGIC:
503 error = xfs_attr3_leaf_split(state, oldblk, newblk);
504 if ((error != 0) && (error != -ENOSPC)) {
505 return error; /* GROT: attr is inconsistent */
512 * Entry wouldn't fit, split the leaf again. The new
513 * extrablk will be consumed by xfs_da3_node_split if
516 state->extravalid = 1;
518 state->extraafter = 0; /* before newblk */
519 trace_xfs_attr_leaf_split_before(state->args);
520 error = xfs_attr3_leaf_split(state, oldblk,
523 state->extraafter = 1; /* after newblk */
524 trace_xfs_attr_leaf_split_after(state->args);
525 error = xfs_attr3_leaf_split(state, newblk,
529 return error; /* GROT: attr inconsistent */
532 case XFS_DIR2_LEAFN_MAGIC:
533 error = xfs_dir2_leafn_split(state, oldblk, newblk);
538 case XFS_DA_NODE_MAGIC:
539 error = xfs_da3_node_split(state, oldblk, newblk, addblk,
543 return error; /* GROT: dir is inconsistent */
545 * Record the newly split block for the next time thru?
555 * Update the btree to show the new hashval for this child.
557 xfs_da3_fixhashpath(state, &state->path);
563 * xfs_da3_node_split() should have consumed any extra blocks we added
564 * during a double leaf split in the attr fork. This is guaranteed as
565 * we can't be here if the attr fork only has a single leaf block.
567 ASSERT(state->extravalid == 0 ||
568 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
571 * Split the root node.
573 ASSERT(state->path.active == 0);
574 oldblk = &state->path.blk[0];
575 error = xfs_da3_root_split(state, oldblk, addblk);
580 * Update pointers to the node which used to be block 0 and just got
581 * bumped because of the addition of a new root node. Note that the
582 * original block 0 could be at any position in the list of blocks in
585 * Note: the magic numbers and sibling pointers are in the same physical
586 * place for both v2 and v3 headers (by design). Hence it doesn't matter
587 * which version of the xfs_da_intnode structure we use here as the
588 * result will be the same using either structure.
590 node = oldblk->bp->b_addr;
591 if (node->hdr.info.forw) {
592 if (be32_to_cpu(node->hdr.info.forw) != addblk->blkno) {
593 xfs_buf_corruption_error(oldblk->bp);
594 error = -EFSCORRUPTED;
597 node = addblk->bp->b_addr;
598 node->hdr.info.back = cpu_to_be32(oldblk->blkno);
599 xfs_trans_log_buf(state->args->trans, addblk->bp,
600 XFS_DA_LOGRANGE(node, &node->hdr.info,
601 sizeof(node->hdr.info)));
603 node = oldblk->bp->b_addr;
604 if (node->hdr.info.back) {
605 if (be32_to_cpu(node->hdr.info.back) != addblk->blkno) {
606 xfs_buf_corruption_error(oldblk->bp);
607 error = -EFSCORRUPTED;
610 node = addblk->bp->b_addr;
611 node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
612 xfs_trans_log_buf(state->args->trans, addblk->bp,
613 XFS_DA_LOGRANGE(node, &node->hdr.info,
614 sizeof(node->hdr.info)));
622 * Split the root. We have to create a new root and point to the two
623 * parts (the split old root) that we just created. Copy block zero to
624 * the EOF, extending the inode in process.
626 STATIC int /* error */
628 struct xfs_da_state *state,
629 struct xfs_da_state_blk *blk1,
630 struct xfs_da_state_blk *blk2)
632 struct xfs_da_intnode *node;
633 struct xfs_da_intnode *oldroot;
634 struct xfs_da_node_entry *btree;
635 struct xfs_da3_icnode_hdr nodehdr;
636 struct xfs_da_args *args;
638 struct xfs_inode *dp;
639 struct xfs_trans *tp;
640 struct xfs_dir2_leaf *leaf;
646 trace_xfs_da_root_split(state->args);
649 * Copy the existing (incorrect) block from the root node position
650 * to a free space somewhere.
653 error = xfs_da_grow_inode(args, &blkno);
659 error = xfs_da_get_buf(tp, dp, blkno, &bp, args->whichfork);
663 oldroot = blk1->bp->b_addr;
664 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
665 oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) {
666 struct xfs_da3_icnode_hdr icnodehdr;
668 xfs_da3_node_hdr_from_disk(dp->i_mount, &icnodehdr, oldroot);
669 btree = icnodehdr.btree;
670 size = (int)((char *)&btree[icnodehdr.count] - (char *)oldroot);
671 level = icnodehdr.level;
674 * we are about to copy oldroot to bp, so set up the type
675 * of bp while we know exactly what it will be.
677 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
679 struct xfs_dir3_icleaf_hdr leafhdr;
681 leaf = (xfs_dir2_leaf_t *)oldroot;
682 xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &leafhdr, leaf);
684 ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC ||
685 leafhdr.magic == XFS_DIR3_LEAFN_MAGIC);
686 size = (int)((char *)&leafhdr.ents[leafhdr.count] -
691 * we are about to copy oldroot to bp, so set up the type
692 * of bp while we know exactly what it will be.
694 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF);
698 * we can copy most of the information in the node from one block to
699 * another, but for CRC enabled headers we have to make sure that the
700 * block specific identifiers are kept intact. We update the buffer
703 memcpy(node, oldroot, size);
704 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
705 oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
706 struct xfs_da3_intnode *node3 = (struct xfs_da3_intnode *)node;
708 node3->hdr.info.blkno = cpu_to_be64(bp->b_bn);
710 xfs_trans_log_buf(tp, bp, 0, size - 1);
712 bp->b_ops = blk1->bp->b_ops;
713 xfs_trans_buf_copy_type(bp, blk1->bp);
718 * Set up the new root node.
720 error = xfs_da3_node_create(args,
721 (args->whichfork == XFS_DATA_FORK) ? args->geo->leafblk : 0,
722 level + 1, &bp, args->whichfork);
727 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
728 btree = nodehdr.btree;
729 btree[0].hashval = cpu_to_be32(blk1->hashval);
730 btree[0].before = cpu_to_be32(blk1->blkno);
731 btree[1].hashval = cpu_to_be32(blk2->hashval);
732 btree[1].before = cpu_to_be32(blk2->blkno);
734 xfs_da3_node_hdr_to_disk(dp->i_mount, node, &nodehdr);
737 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
738 oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
739 ASSERT(blk1->blkno >= args->geo->leafblk &&
740 blk1->blkno < args->geo->freeblk);
741 ASSERT(blk2->blkno >= args->geo->leafblk &&
742 blk2->blkno < args->geo->freeblk);
746 /* Header is already logged by xfs_da_node_create */
747 xfs_trans_log_buf(tp, bp,
748 XFS_DA_LOGRANGE(node, btree, sizeof(xfs_da_node_entry_t) * 2));
754 * Split the node, rebalance, then add the new entry.
756 STATIC int /* error */
758 struct xfs_da_state *state,
759 struct xfs_da_state_blk *oldblk,
760 struct xfs_da_state_blk *newblk,
761 struct xfs_da_state_blk *addblk,
765 struct xfs_da_intnode *node;
766 struct xfs_da3_icnode_hdr nodehdr;
771 struct xfs_inode *dp = state->args->dp;
773 trace_xfs_da_node_split(state->args);
775 node = oldblk->bp->b_addr;
776 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
779 * With V2 dirs the extra block is data or freespace.
781 useextra = state->extravalid && state->args->whichfork == XFS_ATTR_FORK;
782 newcount = 1 + useextra;
784 * Do we have to split the node?
786 if (nodehdr.count + newcount > state->args->geo->node_ents) {
788 * Allocate a new node, add to the doubly linked chain of
789 * nodes, then move some of our excess entries into it.
791 error = xfs_da_grow_inode(state->args, &blkno);
793 return error; /* GROT: dir is inconsistent */
795 error = xfs_da3_node_create(state->args, blkno, treelevel,
796 &newblk->bp, state->args->whichfork);
798 return error; /* GROT: dir is inconsistent */
799 newblk->blkno = blkno;
800 newblk->magic = XFS_DA_NODE_MAGIC;
801 xfs_da3_node_rebalance(state, oldblk, newblk);
802 error = xfs_da3_blk_link(state, oldblk, newblk);
811 * Insert the new entry(s) into the correct block
812 * (updating last hashval in the process).
814 * xfs_da3_node_add() inserts BEFORE the given index,
815 * and as a result of using node_lookup_int() we always
816 * point to a valid entry (not after one), but a split
817 * operation always results in a new block whose hashvals
818 * FOLLOW the current block.
820 * If we had double-split op below us, then add the extra block too.
822 node = oldblk->bp->b_addr;
823 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
824 if (oldblk->index <= nodehdr.count) {
826 xfs_da3_node_add(state, oldblk, addblk);
828 if (state->extraafter)
830 xfs_da3_node_add(state, oldblk, &state->extrablk);
831 state->extravalid = 0;
835 xfs_da3_node_add(state, newblk, addblk);
837 if (state->extraafter)
839 xfs_da3_node_add(state, newblk, &state->extrablk);
840 state->extravalid = 0;
848 * Balance the btree elements between two intermediate nodes,
849 * usually one full and one empty.
851 * NOTE: if blk2 is empty, then it will get the upper half of blk1.
854 xfs_da3_node_rebalance(
855 struct xfs_da_state *state,
856 struct xfs_da_state_blk *blk1,
857 struct xfs_da_state_blk *blk2)
859 struct xfs_da_intnode *node1;
860 struct xfs_da_intnode *node2;
861 struct xfs_da_intnode *tmpnode;
862 struct xfs_da_node_entry *btree1;
863 struct xfs_da_node_entry *btree2;
864 struct xfs_da_node_entry *btree_s;
865 struct xfs_da_node_entry *btree_d;
866 struct xfs_da3_icnode_hdr nodehdr1;
867 struct xfs_da3_icnode_hdr nodehdr2;
868 struct xfs_trans *tp;
872 struct xfs_inode *dp = state->args->dp;
874 trace_xfs_da_node_rebalance(state->args);
876 node1 = blk1->bp->b_addr;
877 node2 = blk2->bp->b_addr;
878 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr1, node1);
879 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr2, node2);
880 btree1 = nodehdr1.btree;
881 btree2 = nodehdr2.btree;
884 * Figure out how many entries need to move, and in which direction.
885 * Swap the nodes around if that makes it simpler.
887 if (nodehdr1.count > 0 && nodehdr2.count > 0 &&
888 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
889 (be32_to_cpu(btree2[nodehdr2.count - 1].hashval) <
890 be32_to_cpu(btree1[nodehdr1.count - 1].hashval)))) {
894 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr1, node1);
895 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr2, node2);
896 btree1 = nodehdr1.btree;
897 btree2 = nodehdr2.btree;
901 count = (nodehdr1.count - nodehdr2.count) / 2;
904 tp = state->args->trans;
906 * Two cases: high-to-low and low-to-high.
910 * Move elements in node2 up to make a hole.
912 tmp = nodehdr2.count;
914 tmp *= (uint)sizeof(xfs_da_node_entry_t);
915 btree_s = &btree2[0];
916 btree_d = &btree2[count];
917 memmove(btree_d, btree_s, tmp);
921 * Move the req'd B-tree elements from high in node1 to
924 nodehdr2.count += count;
925 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
926 btree_s = &btree1[nodehdr1.count - count];
927 btree_d = &btree2[0];
928 memcpy(btree_d, btree_s, tmp);
929 nodehdr1.count -= count;
932 * Move the req'd B-tree elements from low in node2 to
936 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
937 btree_s = &btree2[0];
938 btree_d = &btree1[nodehdr1.count];
939 memcpy(btree_d, btree_s, tmp);
940 nodehdr1.count += count;
942 xfs_trans_log_buf(tp, blk1->bp,
943 XFS_DA_LOGRANGE(node1, btree_d, tmp));
946 * Move elements in node2 down to fill the hole.
948 tmp = nodehdr2.count - count;
949 tmp *= (uint)sizeof(xfs_da_node_entry_t);
950 btree_s = &btree2[count];
951 btree_d = &btree2[0];
952 memmove(btree_d, btree_s, tmp);
953 nodehdr2.count -= count;
957 * Log header of node 1 and all current bits of node 2.
959 xfs_da3_node_hdr_to_disk(dp->i_mount, node1, &nodehdr1);
960 xfs_trans_log_buf(tp, blk1->bp,
961 XFS_DA_LOGRANGE(node1, &node1->hdr,
962 state->args->geo->node_hdr_size));
964 xfs_da3_node_hdr_to_disk(dp->i_mount, node2, &nodehdr2);
965 xfs_trans_log_buf(tp, blk2->bp,
966 XFS_DA_LOGRANGE(node2, &node2->hdr,
967 state->args->geo->node_hdr_size +
968 (sizeof(btree2[0]) * nodehdr2.count)));
971 * Record the last hashval from each block for upward propagation.
972 * (note: don't use the swapped node pointers)
975 node1 = blk1->bp->b_addr;
976 node2 = blk2->bp->b_addr;
977 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr1, node1);
978 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr2, node2);
979 btree1 = nodehdr1.btree;
980 btree2 = nodehdr2.btree;
982 blk1->hashval = be32_to_cpu(btree1[nodehdr1.count - 1].hashval);
983 blk2->hashval = be32_to_cpu(btree2[nodehdr2.count - 1].hashval);
986 * Adjust the expected index for insertion.
988 if (blk1->index >= nodehdr1.count) {
989 blk2->index = blk1->index - nodehdr1.count;
990 blk1->index = nodehdr1.count + 1; /* make it invalid */
995 * Add a new entry to an intermediate node.
999 struct xfs_da_state *state,
1000 struct xfs_da_state_blk *oldblk,
1001 struct xfs_da_state_blk *newblk)
1003 struct xfs_da_intnode *node;
1004 struct xfs_da3_icnode_hdr nodehdr;
1005 struct xfs_da_node_entry *btree;
1007 struct xfs_inode *dp = state->args->dp;
1009 trace_xfs_da_node_add(state->args);
1011 node = oldblk->bp->b_addr;
1012 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
1013 btree = nodehdr.btree;
1015 ASSERT(oldblk->index >= 0 && oldblk->index <= nodehdr.count);
1016 ASSERT(newblk->blkno != 0);
1017 if (state->args->whichfork == XFS_DATA_FORK)
1018 ASSERT(newblk->blkno >= state->args->geo->leafblk &&
1019 newblk->blkno < state->args->geo->freeblk);
1022 * We may need to make some room before we insert the new node.
1025 if (oldblk->index < nodehdr.count) {
1026 tmp = (nodehdr.count - oldblk->index) * (uint)sizeof(*btree);
1027 memmove(&btree[oldblk->index + 1], &btree[oldblk->index], tmp);
1029 btree[oldblk->index].hashval = cpu_to_be32(newblk->hashval);
1030 btree[oldblk->index].before = cpu_to_be32(newblk->blkno);
1031 xfs_trans_log_buf(state->args->trans, oldblk->bp,
1032 XFS_DA_LOGRANGE(node, &btree[oldblk->index],
1033 tmp + sizeof(*btree)));
1036 xfs_da3_node_hdr_to_disk(dp->i_mount, node, &nodehdr);
1037 xfs_trans_log_buf(state->args->trans, oldblk->bp,
1038 XFS_DA_LOGRANGE(node, &node->hdr,
1039 state->args->geo->node_hdr_size));
1042 * Copy the last hash value from the oldblk to propagate upwards.
1044 oldblk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
1047 /*========================================================================
1048 * Routines used for shrinking the Btree.
1049 *========================================================================*/
1052 * Deallocate an empty leaf node, remove it from its parent,
1053 * possibly deallocating that block, etc...
1057 struct xfs_da_state *state)
1059 struct xfs_da_state_blk *drop_blk;
1060 struct xfs_da_state_blk *save_blk;
1064 trace_xfs_da_join(state->args);
1066 drop_blk = &state->path.blk[ state->path.active-1 ];
1067 save_blk = &state->altpath.blk[ state->path.active-1 ];
1068 ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC);
1069 ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC ||
1070 drop_blk->magic == XFS_DIR2_LEAFN_MAGIC);
1073 * Walk back up the tree joining/deallocating as necessary.
1074 * When we stop dropping blocks, break out.
1076 for ( ; state->path.active >= 2; drop_blk--, save_blk--,
1077 state->path.active--) {
1079 * See if we can combine the block with a neighbor.
1080 * (action == 0) => no options, just leave
1081 * (action == 1) => coalesce, then unlink
1082 * (action == 2) => block empty, unlink it
1084 switch (drop_blk->magic) {
1085 case XFS_ATTR_LEAF_MAGIC:
1086 error = xfs_attr3_leaf_toosmall(state, &action);
1091 xfs_attr3_leaf_unbalance(state, drop_blk, save_blk);
1093 case XFS_DIR2_LEAFN_MAGIC:
1094 error = xfs_dir2_leafn_toosmall(state, &action);
1099 xfs_dir2_leafn_unbalance(state, drop_blk, save_blk);
1101 case XFS_DA_NODE_MAGIC:
1103 * Remove the offending node, fixup hashvals,
1104 * check for a toosmall neighbor.
1106 xfs_da3_node_remove(state, drop_blk);
1107 xfs_da3_fixhashpath(state, &state->path);
1108 error = xfs_da3_node_toosmall(state, &action);
1113 xfs_da3_node_unbalance(state, drop_blk, save_blk);
1116 xfs_da3_fixhashpath(state, &state->altpath);
1117 error = xfs_da3_blk_unlink(state, drop_blk, save_blk);
1118 xfs_da_state_kill_altpath(state);
1121 error = xfs_da_shrink_inode(state->args, drop_blk->blkno,
1123 drop_blk->bp = NULL;
1128 * We joined all the way to the top. If it turns out that
1129 * we only have one entry in the root, make the child block
1132 xfs_da3_node_remove(state, drop_blk);
1133 xfs_da3_fixhashpath(state, &state->path);
1134 error = xfs_da3_root_join(state, &state->path.blk[0]);
1140 xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo *blkinfo, __u16 level)
1142 __be16 magic = blkinfo->magic;
1145 ASSERT(magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
1146 magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
1147 magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
1148 magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
1150 ASSERT(magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1151 magic == cpu_to_be16(XFS_DA3_NODE_MAGIC));
1153 ASSERT(!blkinfo->forw);
1154 ASSERT(!blkinfo->back);
1157 #define xfs_da_blkinfo_onlychild_validate(blkinfo, level)
1161 * We have only one entry in the root. Copy the only remaining child of
1162 * the old root to block 0 as the new root node.
1166 struct xfs_da_state *state,
1167 struct xfs_da_state_blk *root_blk)
1169 struct xfs_da_intnode *oldroot;
1170 struct xfs_da_args *args;
1173 struct xfs_da3_icnode_hdr oldroothdr;
1175 struct xfs_inode *dp = state->args->dp;
1177 trace_xfs_da_root_join(state->args);
1179 ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
1182 oldroot = root_blk->bp->b_addr;
1183 xfs_da3_node_hdr_from_disk(dp->i_mount, &oldroothdr, oldroot);
1184 ASSERT(oldroothdr.forw == 0);
1185 ASSERT(oldroothdr.back == 0);
1188 * If the root has more than one child, then don't do anything.
1190 if (oldroothdr.count > 1)
1194 * Read in the (only) child block, then copy those bytes into
1195 * the root block's buffer and free the original child block.
1197 child = be32_to_cpu(oldroothdr.btree[0].before);
1199 error = xfs_da3_node_read(args->trans, dp, child, &bp, args->whichfork);
1202 xfs_da_blkinfo_onlychild_validate(bp->b_addr, oldroothdr.level);
1205 * This could be copying a leaf back into the root block in the case of
1206 * there only being a single leaf block left in the tree. Hence we have
1207 * to update the b_ops pointer as well to match the buffer type change
1208 * that could occur. For dir3 blocks we also need to update the block
1209 * number in the buffer header.
1211 memcpy(root_blk->bp->b_addr, bp->b_addr, args->geo->blksize);
1212 root_blk->bp->b_ops = bp->b_ops;
1213 xfs_trans_buf_copy_type(root_blk->bp, bp);
1214 if (oldroothdr.magic == XFS_DA3_NODE_MAGIC) {
1215 struct xfs_da3_blkinfo *da3 = root_blk->bp->b_addr;
1216 da3->blkno = cpu_to_be64(root_blk->bp->b_bn);
1218 xfs_trans_log_buf(args->trans, root_blk->bp, 0,
1219 args->geo->blksize - 1);
1220 error = xfs_da_shrink_inode(args, child, bp);
1225 * Check a node block and its neighbors to see if the block should be
1226 * collapsed into one or the other neighbor. Always keep the block
1227 * with the smaller block number.
1228 * If the current block is over 50% full, don't try to join it, return 0.
1229 * If the block is empty, fill in the state structure and return 2.
1230 * If it can be collapsed, fill in the state structure and return 1.
1231 * If nothing can be done, return 0.
1234 xfs_da3_node_toosmall(
1235 struct xfs_da_state *state,
1238 struct xfs_da_intnode *node;
1239 struct xfs_da_state_blk *blk;
1240 struct xfs_da_blkinfo *info;
1243 struct xfs_da3_icnode_hdr nodehdr;
1249 struct xfs_inode *dp = state->args->dp;
1251 trace_xfs_da_node_toosmall(state->args);
1254 * Check for the degenerate case of the block being over 50% full.
1255 * If so, it's not worth even looking to see if we might be able
1256 * to coalesce with a sibling.
1258 blk = &state->path.blk[ state->path.active-1 ];
1259 info = blk->bp->b_addr;
1260 node = (xfs_da_intnode_t *)info;
1261 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
1262 if (nodehdr.count > (state->args->geo->node_ents >> 1)) {
1263 *action = 0; /* blk over 50%, don't try to join */
1264 return 0; /* blk over 50%, don't try to join */
1268 * Check for the degenerate case of the block being empty.
1269 * If the block is empty, we'll simply delete it, no need to
1270 * coalesce it with a sibling block. We choose (arbitrarily)
1271 * to merge with the forward block unless it is NULL.
1273 if (nodehdr.count == 0) {
1275 * Make altpath point to the block we want to keep and
1276 * path point to the block we want to drop (this one).
1278 forward = (info->forw != 0);
1279 memcpy(&state->altpath, &state->path, sizeof(state->path));
1280 error = xfs_da3_path_shift(state, &state->altpath, forward,
1293 * Examine each sibling block to see if we can coalesce with
1294 * at least 25% free space to spare. We need to figure out
1295 * whether to merge with the forward or the backward block.
1296 * We prefer coalescing with the lower numbered sibling so as
1297 * to shrink a directory over time.
1299 count = state->args->geo->node_ents;
1300 count -= state->args->geo->node_ents >> 2;
1301 count -= nodehdr.count;
1303 /* start with smaller blk num */
1304 forward = nodehdr.forw < nodehdr.back;
1305 for (i = 0; i < 2; forward = !forward, i++) {
1306 struct xfs_da3_icnode_hdr thdr;
1308 blkno = nodehdr.forw;
1310 blkno = nodehdr.back;
1313 error = xfs_da3_node_read(state->args->trans, dp, blkno, &bp,
1314 state->args->whichfork);
1319 xfs_da3_node_hdr_from_disk(dp->i_mount, &thdr, node);
1320 xfs_trans_brelse(state->args->trans, bp);
1322 if (count - thdr.count >= 0)
1323 break; /* fits with at least 25% to spare */
1331 * Make altpath point to the block we want to keep (the lower
1332 * numbered block) and path point to the block we want to drop.
1334 memcpy(&state->altpath, &state->path, sizeof(state->path));
1335 if (blkno < blk->blkno) {
1336 error = xfs_da3_path_shift(state, &state->altpath, forward,
1339 error = xfs_da3_path_shift(state, &state->path, forward,
1353 * Pick up the last hashvalue from an intermediate node.
1356 xfs_da3_node_lasthash(
1357 struct xfs_inode *dp,
1361 struct xfs_da3_icnode_hdr nodehdr;
1363 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, bp->b_addr);
1365 *count = nodehdr.count;
1368 return be32_to_cpu(nodehdr.btree[nodehdr.count - 1].hashval);
1372 * Walk back up the tree adjusting hash values as necessary,
1373 * when we stop making changes, return.
1376 xfs_da3_fixhashpath(
1377 struct xfs_da_state *state,
1378 struct xfs_da_state_path *path)
1380 struct xfs_da_state_blk *blk;
1381 struct xfs_da_intnode *node;
1382 struct xfs_da_node_entry *btree;
1383 xfs_dahash_t lasthash=0;
1386 struct xfs_inode *dp = state->args->dp;
1388 trace_xfs_da_fixhashpath(state->args);
1390 level = path->active-1;
1391 blk = &path->blk[ level ];
1392 switch (blk->magic) {
1393 case XFS_ATTR_LEAF_MAGIC:
1394 lasthash = xfs_attr_leaf_lasthash(blk->bp, &count);
1398 case XFS_DIR2_LEAFN_MAGIC:
1399 lasthash = xfs_dir2_leaf_lasthash(dp, blk->bp, &count);
1403 case XFS_DA_NODE_MAGIC:
1404 lasthash = xfs_da3_node_lasthash(dp, blk->bp, &count);
1409 for (blk--, level--; level >= 0; blk--, level--) {
1410 struct xfs_da3_icnode_hdr nodehdr;
1412 node = blk->bp->b_addr;
1413 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
1414 btree = nodehdr.btree;
1415 if (be32_to_cpu(btree[blk->index].hashval) == lasthash)
1417 blk->hashval = lasthash;
1418 btree[blk->index].hashval = cpu_to_be32(lasthash);
1419 xfs_trans_log_buf(state->args->trans, blk->bp,
1420 XFS_DA_LOGRANGE(node, &btree[blk->index],
1423 lasthash = be32_to_cpu(btree[nodehdr.count - 1].hashval);
1428 * Remove an entry from an intermediate node.
1431 xfs_da3_node_remove(
1432 struct xfs_da_state *state,
1433 struct xfs_da_state_blk *drop_blk)
1435 struct xfs_da_intnode *node;
1436 struct xfs_da3_icnode_hdr nodehdr;
1437 struct xfs_da_node_entry *btree;
1440 struct xfs_inode *dp = state->args->dp;
1442 trace_xfs_da_node_remove(state->args);
1444 node = drop_blk->bp->b_addr;
1445 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
1446 ASSERT(drop_blk->index < nodehdr.count);
1447 ASSERT(drop_blk->index >= 0);
1450 * Copy over the offending entry, or just zero it out.
1452 index = drop_blk->index;
1453 btree = nodehdr.btree;
1454 if (index < nodehdr.count - 1) {
1455 tmp = nodehdr.count - index - 1;
1456 tmp *= (uint)sizeof(xfs_da_node_entry_t);
1457 memmove(&btree[index], &btree[index + 1], tmp);
1458 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1459 XFS_DA_LOGRANGE(node, &btree[index], tmp));
1460 index = nodehdr.count - 1;
1462 memset(&btree[index], 0, sizeof(xfs_da_node_entry_t));
1463 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1464 XFS_DA_LOGRANGE(node, &btree[index], sizeof(btree[index])));
1466 xfs_da3_node_hdr_to_disk(dp->i_mount, node, &nodehdr);
1467 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1468 XFS_DA_LOGRANGE(node, &node->hdr, state->args->geo->node_hdr_size));
1471 * Copy the last hash value from the block to propagate upwards.
1473 drop_blk->hashval = be32_to_cpu(btree[index - 1].hashval);
1477 * Unbalance the elements between two intermediate nodes,
1478 * move all Btree elements from one node into another.
1481 xfs_da3_node_unbalance(
1482 struct xfs_da_state *state,
1483 struct xfs_da_state_blk *drop_blk,
1484 struct xfs_da_state_blk *save_blk)
1486 struct xfs_da_intnode *drop_node;
1487 struct xfs_da_intnode *save_node;
1488 struct xfs_da_node_entry *drop_btree;
1489 struct xfs_da_node_entry *save_btree;
1490 struct xfs_da3_icnode_hdr drop_hdr;
1491 struct xfs_da3_icnode_hdr save_hdr;
1492 struct xfs_trans *tp;
1495 struct xfs_inode *dp = state->args->dp;
1497 trace_xfs_da_node_unbalance(state->args);
1499 drop_node = drop_blk->bp->b_addr;
1500 save_node = save_blk->bp->b_addr;
1501 xfs_da3_node_hdr_from_disk(dp->i_mount, &drop_hdr, drop_node);
1502 xfs_da3_node_hdr_from_disk(dp->i_mount, &save_hdr, save_node);
1503 drop_btree = drop_hdr.btree;
1504 save_btree = save_hdr.btree;
1505 tp = state->args->trans;
1508 * If the dying block has lower hashvals, then move all the
1509 * elements in the remaining block up to make a hole.
1511 if ((be32_to_cpu(drop_btree[0].hashval) <
1512 be32_to_cpu(save_btree[0].hashval)) ||
1513 (be32_to_cpu(drop_btree[drop_hdr.count - 1].hashval) <
1514 be32_to_cpu(save_btree[save_hdr.count - 1].hashval))) {
1515 /* XXX: check this - is memmove dst correct? */
1516 tmp = save_hdr.count * sizeof(xfs_da_node_entry_t);
1517 memmove(&save_btree[drop_hdr.count], &save_btree[0], tmp);
1520 xfs_trans_log_buf(tp, save_blk->bp,
1521 XFS_DA_LOGRANGE(save_node, &save_btree[0],
1522 (save_hdr.count + drop_hdr.count) *
1523 sizeof(xfs_da_node_entry_t)));
1525 sindex = save_hdr.count;
1526 xfs_trans_log_buf(tp, save_blk->bp,
1527 XFS_DA_LOGRANGE(save_node, &save_btree[sindex],
1528 drop_hdr.count * sizeof(xfs_da_node_entry_t)));
1532 * Move all the B-tree elements from drop_blk to save_blk.
1534 tmp = drop_hdr.count * (uint)sizeof(xfs_da_node_entry_t);
1535 memcpy(&save_btree[sindex], &drop_btree[0], tmp);
1536 save_hdr.count += drop_hdr.count;
1538 xfs_da3_node_hdr_to_disk(dp->i_mount, save_node, &save_hdr);
1539 xfs_trans_log_buf(tp, save_blk->bp,
1540 XFS_DA_LOGRANGE(save_node, &save_node->hdr,
1541 state->args->geo->node_hdr_size));
1544 * Save the last hashval in the remaining block for upward propagation.
1546 save_blk->hashval = be32_to_cpu(save_btree[save_hdr.count - 1].hashval);
1549 /*========================================================================
1550 * Routines used for finding things in the Btree.
1551 *========================================================================*/
1554 * Walk down the Btree looking for a particular filename, filling
1555 * in the state structure as we go.
1557 * We will set the state structure to point to each of the elements
1558 * in each of the nodes where either the hashval is or should be.
1560 * We support duplicate hashval's so for each entry in the current
1561 * node that could contain the desired hashval, descend. This is a
1562 * pruned depth-first tree search.
1565 xfs_da3_node_lookup_int(
1566 struct xfs_da_state *state,
1569 struct xfs_da_state_blk *blk;
1570 struct xfs_da_blkinfo *curr;
1571 struct xfs_da_intnode *node;
1572 struct xfs_da_node_entry *btree;
1573 struct xfs_da3_icnode_hdr nodehdr;
1574 struct xfs_da_args *args;
1576 xfs_dahash_t hashval;
1577 xfs_dahash_t btreehashval;
1583 unsigned int expected_level = 0;
1585 struct xfs_inode *dp = state->args->dp;
1590 * Descend thru the B-tree searching each level for the right
1591 * node to use, until the right hashval is found.
1593 blkno = args->geo->leafblk;
1594 for (blk = &state->path.blk[0], state->path.active = 1;
1595 state->path.active <= XFS_DA_NODE_MAXDEPTH;
1596 blk++, state->path.active++) {
1598 * Read the next node down in the tree.
1601 error = xfs_da3_node_read(args->trans, args->dp, blkno,
1602 &blk->bp, args->whichfork);
1605 state->path.active--;
1608 curr = blk->bp->b_addr;
1609 magic = be16_to_cpu(curr->magic);
1611 if (magic == XFS_ATTR_LEAF_MAGIC ||
1612 magic == XFS_ATTR3_LEAF_MAGIC) {
1613 blk->magic = XFS_ATTR_LEAF_MAGIC;
1614 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
1618 if (magic == XFS_DIR2_LEAFN_MAGIC ||
1619 magic == XFS_DIR3_LEAFN_MAGIC) {
1620 blk->magic = XFS_DIR2_LEAFN_MAGIC;
1621 blk->hashval = xfs_dir2_leaf_lasthash(args->dp,
1626 if (magic != XFS_DA_NODE_MAGIC && magic != XFS_DA3_NODE_MAGIC) {
1627 xfs_buf_corruption_error(blk->bp);
1628 return -EFSCORRUPTED;
1631 blk->magic = XFS_DA_NODE_MAGIC;
1634 * Search an intermediate node for a match.
1636 node = blk->bp->b_addr;
1637 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
1638 btree = nodehdr.btree;
1640 /* Tree taller than we can handle; bail out! */
1641 if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH) {
1642 xfs_buf_corruption_error(blk->bp);
1643 return -EFSCORRUPTED;
1646 /* Check the level from the root. */
1647 if (blkno == args->geo->leafblk)
1648 expected_level = nodehdr.level - 1;
1649 else if (expected_level != nodehdr.level) {
1650 xfs_buf_corruption_error(blk->bp);
1651 return -EFSCORRUPTED;
1655 max = nodehdr.count;
1656 blk->hashval = be32_to_cpu(btree[max - 1].hashval);
1659 * Binary search. (note: small blocks will skip loop)
1661 probe = span = max / 2;
1662 hashval = args->hashval;
1665 btreehashval = be32_to_cpu(btree[probe].hashval);
1666 if (btreehashval < hashval)
1668 else if (btreehashval > hashval)
1673 ASSERT((probe >= 0) && (probe < max));
1674 ASSERT((span <= 4) ||
1675 (be32_to_cpu(btree[probe].hashval) == hashval));
1678 * Since we may have duplicate hashval's, find the first
1679 * matching hashval in the node.
1682 be32_to_cpu(btree[probe].hashval) >= hashval) {
1685 while (probe < max &&
1686 be32_to_cpu(btree[probe].hashval) < hashval) {
1691 * Pick the right block to descend on.
1694 blk->index = max - 1;
1695 blkno = be32_to_cpu(btree[max - 1].before);
1698 blkno = be32_to_cpu(btree[probe].before);
1701 /* We can't point back to the root. */
1702 if (XFS_IS_CORRUPT(dp->i_mount, blkno == args->geo->leafblk))
1703 return -EFSCORRUPTED;
1706 if (XFS_IS_CORRUPT(dp->i_mount, expected_level != 0))
1707 return -EFSCORRUPTED;
1710 * A leaf block that ends in the hashval that we are interested in
1711 * (final hashval == search hashval) means that the next block may
1712 * contain more entries with the same hashval, shift upward to the
1713 * next leaf and keep searching.
1716 if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
1717 retval = xfs_dir2_leafn_lookup_int(blk->bp, args,
1718 &blk->index, state);
1719 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1720 retval = xfs_attr3_leaf_lookup_int(blk->bp, args);
1721 blk->index = args->index;
1722 args->blkno = blk->blkno;
1725 return -EFSCORRUPTED;
1727 if (((retval == -ENOENT) || (retval == -ENOATTR)) &&
1728 (blk->hashval == args->hashval)) {
1729 error = xfs_da3_path_shift(state, &state->path, 1, 1,
1735 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1736 /* path_shift() gives ENOENT */
1746 /*========================================================================
1748 *========================================================================*/
1751 * Compare two intermediate nodes for "order".
1755 struct xfs_inode *dp,
1756 struct xfs_buf *node1_bp,
1757 struct xfs_buf *node2_bp)
1759 struct xfs_da_intnode *node1;
1760 struct xfs_da_intnode *node2;
1761 struct xfs_da_node_entry *btree1;
1762 struct xfs_da_node_entry *btree2;
1763 struct xfs_da3_icnode_hdr node1hdr;
1764 struct xfs_da3_icnode_hdr node2hdr;
1766 node1 = node1_bp->b_addr;
1767 node2 = node2_bp->b_addr;
1768 xfs_da3_node_hdr_from_disk(dp->i_mount, &node1hdr, node1);
1769 xfs_da3_node_hdr_from_disk(dp->i_mount, &node2hdr, node2);
1770 btree1 = node1hdr.btree;
1771 btree2 = node2hdr.btree;
1773 if (node1hdr.count > 0 && node2hdr.count > 0 &&
1774 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
1775 (be32_to_cpu(btree2[node2hdr.count - 1].hashval) <
1776 be32_to_cpu(btree1[node1hdr.count - 1].hashval)))) {
1783 * Link a new block into a doubly linked list of blocks (of whatever type).
1787 struct xfs_da_state *state,
1788 struct xfs_da_state_blk *old_blk,
1789 struct xfs_da_state_blk *new_blk)
1791 struct xfs_da_blkinfo *old_info;
1792 struct xfs_da_blkinfo *new_info;
1793 struct xfs_da_blkinfo *tmp_info;
1794 struct xfs_da_args *args;
1798 struct xfs_inode *dp = state->args->dp;
1801 * Set up environment.
1804 ASSERT(args != NULL);
1805 old_info = old_blk->bp->b_addr;
1806 new_info = new_blk->bp->b_addr;
1807 ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC ||
1808 old_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1809 old_blk->magic == XFS_ATTR_LEAF_MAGIC);
1811 switch (old_blk->magic) {
1812 case XFS_ATTR_LEAF_MAGIC:
1813 before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp);
1815 case XFS_DIR2_LEAFN_MAGIC:
1816 before = xfs_dir2_leafn_order(dp, old_blk->bp, new_blk->bp);
1818 case XFS_DA_NODE_MAGIC:
1819 before = xfs_da3_node_order(dp, old_blk->bp, new_blk->bp);
1824 * Link blocks in appropriate order.
1828 * Link new block in before existing block.
1830 trace_xfs_da_link_before(args);
1831 new_info->forw = cpu_to_be32(old_blk->blkno);
1832 new_info->back = old_info->back;
1833 if (old_info->back) {
1834 error = xfs_da3_node_read(args->trans, dp,
1835 be32_to_cpu(old_info->back),
1836 &bp, args->whichfork);
1840 tmp_info = bp->b_addr;
1841 ASSERT(tmp_info->magic == old_info->magic);
1842 ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno);
1843 tmp_info->forw = cpu_to_be32(new_blk->blkno);
1844 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1846 old_info->back = cpu_to_be32(new_blk->blkno);
1849 * Link new block in after existing block.
1851 trace_xfs_da_link_after(args);
1852 new_info->forw = old_info->forw;
1853 new_info->back = cpu_to_be32(old_blk->blkno);
1854 if (old_info->forw) {
1855 error = xfs_da3_node_read(args->trans, dp,
1856 be32_to_cpu(old_info->forw),
1857 &bp, args->whichfork);
1861 tmp_info = bp->b_addr;
1862 ASSERT(tmp_info->magic == old_info->magic);
1863 ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno);
1864 tmp_info->back = cpu_to_be32(new_blk->blkno);
1865 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1867 old_info->forw = cpu_to_be32(new_blk->blkno);
1870 xfs_trans_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1);
1871 xfs_trans_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1);
1876 * Unlink a block from a doubly linked list of blocks.
1878 STATIC int /* error */
1880 struct xfs_da_state *state,
1881 struct xfs_da_state_blk *drop_blk,
1882 struct xfs_da_state_blk *save_blk)
1884 struct xfs_da_blkinfo *drop_info;
1885 struct xfs_da_blkinfo *save_info;
1886 struct xfs_da_blkinfo *tmp_info;
1887 struct xfs_da_args *args;
1892 * Set up environment.
1895 ASSERT(args != NULL);
1896 save_info = save_blk->bp->b_addr;
1897 drop_info = drop_blk->bp->b_addr;
1898 ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC ||
1899 save_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1900 save_blk->magic == XFS_ATTR_LEAF_MAGIC);
1901 ASSERT(save_blk->magic == drop_blk->magic);
1902 ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) ||
1903 (be32_to_cpu(save_info->back) == drop_blk->blkno));
1904 ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) ||
1905 (be32_to_cpu(drop_info->back) == save_blk->blkno));
1908 * Unlink the leaf block from the doubly linked chain of leaves.
1910 if (be32_to_cpu(save_info->back) == drop_blk->blkno) {
1911 trace_xfs_da_unlink_back(args);
1912 save_info->back = drop_info->back;
1913 if (drop_info->back) {
1914 error = xfs_da3_node_read(args->trans, args->dp,
1915 be32_to_cpu(drop_info->back),
1916 &bp, args->whichfork);
1920 tmp_info = bp->b_addr;
1921 ASSERT(tmp_info->magic == save_info->magic);
1922 ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno);
1923 tmp_info->forw = cpu_to_be32(save_blk->blkno);
1924 xfs_trans_log_buf(args->trans, bp, 0,
1925 sizeof(*tmp_info) - 1);
1928 trace_xfs_da_unlink_forward(args);
1929 save_info->forw = drop_info->forw;
1930 if (drop_info->forw) {
1931 error = xfs_da3_node_read(args->trans, args->dp,
1932 be32_to_cpu(drop_info->forw),
1933 &bp, args->whichfork);
1937 tmp_info = bp->b_addr;
1938 ASSERT(tmp_info->magic == save_info->magic);
1939 ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno);
1940 tmp_info->back = cpu_to_be32(save_blk->blkno);
1941 xfs_trans_log_buf(args->trans, bp, 0,
1942 sizeof(*tmp_info) - 1);
1946 xfs_trans_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1);
1951 * Move a path "forward" or "!forward" one block at the current level.
1953 * This routine will adjust a "path" to point to the next block
1954 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
1955 * Btree, including updating pointers to the intermediate nodes between
1956 * the new bottom and the root.
1960 struct xfs_da_state *state,
1961 struct xfs_da_state_path *path,
1966 struct xfs_da_state_blk *blk;
1967 struct xfs_da_blkinfo *info;
1968 struct xfs_da_args *args;
1969 struct xfs_da_node_entry *btree;
1970 struct xfs_da3_icnode_hdr nodehdr;
1972 xfs_dablk_t blkno = 0;
1975 struct xfs_inode *dp = state->args->dp;
1977 trace_xfs_da_path_shift(state->args);
1980 * Roll up the Btree looking for the first block where our
1981 * current index is not at the edge of the block. Note that
1982 * we skip the bottom layer because we want the sibling block.
1985 ASSERT(args != NULL);
1986 ASSERT(path != NULL);
1987 ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
1988 level = (path->active-1) - 1; /* skip bottom layer in path */
1989 for (blk = &path->blk[level]; level >= 0; blk--, level--) {
1990 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr,
1993 if (forward && (blk->index < nodehdr.count - 1)) {
1995 blkno = be32_to_cpu(nodehdr.btree[blk->index].before);
1997 } else if (!forward && (blk->index > 0)) {
1999 blkno = be32_to_cpu(nodehdr.btree[blk->index].before);
2004 *result = -ENOENT; /* we're out of our tree */
2005 ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
2010 * Roll down the edge of the subtree until we reach the
2011 * same depth we were at originally.
2013 for (blk++, level++; level < path->active; blk++, level++) {
2015 * Read the next child block into a local buffer.
2017 error = xfs_da3_node_read(args->trans, dp, blkno, &bp,
2023 * Release the old block (if it's dirty, the trans doesn't
2024 * actually let go) and swap the local buffer into the path
2025 * structure. This ensures failure of the above read doesn't set
2026 * a NULL buffer in an active slot in the path.
2029 xfs_trans_brelse(args->trans, blk->bp);
2033 info = blk->bp->b_addr;
2034 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
2035 info->magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
2036 info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
2037 info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
2038 info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
2039 info->magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
2043 * Note: we flatten the magic number to a single type so we
2044 * don't have to compare against crc/non-crc types elsewhere.
2046 switch (be16_to_cpu(info->magic)) {
2047 case XFS_DA_NODE_MAGIC:
2048 case XFS_DA3_NODE_MAGIC:
2049 blk->magic = XFS_DA_NODE_MAGIC;
2050 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr,
2052 btree = nodehdr.btree;
2053 blk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
2057 blk->index = nodehdr.count - 1;
2058 blkno = be32_to_cpu(btree[blk->index].before);
2060 case XFS_ATTR_LEAF_MAGIC:
2061 case XFS_ATTR3_LEAF_MAGIC:
2062 blk->magic = XFS_ATTR_LEAF_MAGIC;
2063 ASSERT(level == path->active-1);
2065 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
2067 case XFS_DIR2_LEAFN_MAGIC:
2068 case XFS_DIR3_LEAFN_MAGIC:
2069 blk->magic = XFS_DIR2_LEAFN_MAGIC;
2070 ASSERT(level == path->active-1);
2072 blk->hashval = xfs_dir2_leaf_lasthash(args->dp,
2085 /*========================================================================
2087 *========================================================================*/
2090 * Implement a simple hash on a character string.
2091 * Rotate the hash value by 7 bits, then XOR each character in.
2092 * This is implemented with some source-level loop unrolling.
2095 xfs_da_hashname(const uint8_t *name, int namelen)
2100 * Do four characters at a time as long as we can.
2102 for (hash = 0; namelen >= 4; namelen -= 4, name += 4)
2103 hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^
2104 (name[3] << 0) ^ rol32(hash, 7 * 4);
2107 * Now do the rest of the characters.
2111 return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^
2114 return (name[0] << 7) ^ (name[1] << 0) ^ rol32(hash, 7 * 2);
2116 return (name[0] << 0) ^ rol32(hash, 7 * 1);
2117 default: /* case 0: */
2124 struct xfs_da_args *args,
2125 const unsigned char *name,
2128 return (args->namelen == len && memcmp(args->name, name, len) == 0) ?
2129 XFS_CMP_EXACT : XFS_CMP_DIFFERENT;
2133 xfs_da_grow_inode_int(
2134 struct xfs_da_args *args,
2138 struct xfs_trans *tp = args->trans;
2139 struct xfs_inode *dp = args->dp;
2140 int w = args->whichfork;
2141 xfs_rfsblock_t nblks = dp->i_d.di_nblocks;
2142 struct xfs_bmbt_irec map, *mapp;
2143 int nmap, error, got, i, mapi;
2146 * Find a spot in the file space to put the new block.
2148 error = xfs_bmap_first_unused(tp, dp, count, bno, w);
2153 * Try mapping it in one filesystem block.
2156 error = xfs_bmapi_write(tp, dp, *bno, count,
2157 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
2158 args->total, &map, &nmap);
2166 } else if (nmap == 0 && count > 1) {
2171 * If we didn't get it and the block might work if fragmented,
2172 * try without the CONTIG flag. Loop until we get it all.
2174 mapp = kmem_alloc(sizeof(*mapp) * count, 0);
2175 for (b = *bno, mapi = 0; b < *bno + count; ) {
2176 nmap = min(XFS_BMAP_MAX_NMAP, count);
2177 c = (int)(*bno + count - b);
2178 error = xfs_bmapi_write(tp, dp, b, c,
2179 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
2180 args->total, &mapp[mapi], &nmap);
2186 b = mapp[mapi - 1].br_startoff +
2187 mapp[mapi - 1].br_blockcount;
2195 * Count the blocks we got, make sure it matches the total.
2197 for (i = 0, got = 0; i < mapi; i++)
2198 got += mapp[i].br_blockcount;
2199 if (got != count || mapp[0].br_startoff != *bno ||
2200 mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount !=
2206 /* account for newly allocated blocks in reserved blocks total */
2207 args->total -= dp->i_d.di_nblocks - nblks;
2216 * Add a block to the btree ahead of the file.
2217 * Return the new block number to the caller.
2221 struct xfs_da_args *args,
2222 xfs_dablk_t *new_blkno)
2227 trace_xfs_da_grow_inode(args);
2229 bno = args->geo->leafblk;
2230 error = xfs_da_grow_inode_int(args, &bno, args->geo->fsbcount);
2232 *new_blkno = (xfs_dablk_t)bno;
2237 * Ick. We need to always be able to remove a btree block, even
2238 * if there's no space reservation because the filesystem is full.
2239 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
2240 * It swaps the target block with the last block in the file. The
2241 * last block in the file can always be removed since it can't cause
2242 * a bmap btree split to do that.
2245 xfs_da3_swap_lastblock(
2246 struct xfs_da_args *args,
2247 xfs_dablk_t *dead_blknop,
2248 struct xfs_buf **dead_bufp)
2250 struct xfs_da_blkinfo *dead_info;
2251 struct xfs_da_blkinfo *sib_info;
2252 struct xfs_da_intnode *par_node;
2253 struct xfs_da_intnode *dead_node;
2254 struct xfs_dir2_leaf *dead_leaf2;
2255 struct xfs_da_node_entry *btree;
2256 struct xfs_da3_icnode_hdr par_hdr;
2257 struct xfs_inode *dp;
2258 struct xfs_trans *tp;
2259 struct xfs_mount *mp;
2260 struct xfs_buf *dead_buf;
2261 struct xfs_buf *last_buf;
2262 struct xfs_buf *sib_buf;
2263 struct xfs_buf *par_buf;
2264 xfs_dahash_t dead_hash;
2265 xfs_fileoff_t lastoff;
2266 xfs_dablk_t dead_blkno;
2267 xfs_dablk_t last_blkno;
2268 xfs_dablk_t sib_blkno;
2269 xfs_dablk_t par_blkno;
2276 trace_xfs_da_swap_lastblock(args);
2278 dead_buf = *dead_bufp;
2279 dead_blkno = *dead_blknop;
2282 w = args->whichfork;
2283 ASSERT(w == XFS_DATA_FORK);
2285 lastoff = args->geo->freeblk;
2286 error = xfs_bmap_last_before(tp, dp, &lastoff, w);
2289 if (XFS_IS_CORRUPT(mp, lastoff == 0))
2290 return -EFSCORRUPTED;
2292 * Read the last block in the btree space.
2294 last_blkno = (xfs_dablk_t)lastoff - args->geo->fsbcount;
2295 error = xfs_da3_node_read(tp, dp, last_blkno, &last_buf, w);
2299 * Copy the last block into the dead buffer and log it.
2301 memcpy(dead_buf->b_addr, last_buf->b_addr, args->geo->blksize);
2302 xfs_trans_log_buf(tp, dead_buf, 0, args->geo->blksize - 1);
2303 dead_info = dead_buf->b_addr;
2305 * Get values from the moved block.
2307 if (dead_info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
2308 dead_info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
2309 struct xfs_dir3_icleaf_hdr leafhdr;
2310 struct xfs_dir2_leaf_entry *ents;
2312 dead_leaf2 = (xfs_dir2_leaf_t *)dead_info;
2313 xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &leafhdr,
2315 ents = leafhdr.ents;
2317 dead_hash = be32_to_cpu(ents[leafhdr.count - 1].hashval);
2319 struct xfs_da3_icnode_hdr deadhdr;
2321 dead_node = (xfs_da_intnode_t *)dead_info;
2322 xfs_da3_node_hdr_from_disk(dp->i_mount, &deadhdr, dead_node);
2323 btree = deadhdr.btree;
2324 dead_level = deadhdr.level;
2325 dead_hash = be32_to_cpu(btree[deadhdr.count - 1].hashval);
2327 sib_buf = par_buf = NULL;
2329 * If the moved block has a left sibling, fix up the pointers.
2331 if ((sib_blkno = be32_to_cpu(dead_info->back))) {
2332 error = xfs_da3_node_read(tp, dp, sib_blkno, &sib_buf, w);
2335 sib_info = sib_buf->b_addr;
2336 if (XFS_IS_CORRUPT(mp,
2337 be32_to_cpu(sib_info->forw) != last_blkno ||
2338 sib_info->magic != dead_info->magic)) {
2339 error = -EFSCORRUPTED;
2342 sib_info->forw = cpu_to_be32(dead_blkno);
2343 xfs_trans_log_buf(tp, sib_buf,
2344 XFS_DA_LOGRANGE(sib_info, &sib_info->forw,
2345 sizeof(sib_info->forw)));
2349 * If the moved block has a right sibling, fix up the pointers.
2351 if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
2352 error = xfs_da3_node_read(tp, dp, sib_blkno, &sib_buf, w);
2355 sib_info = sib_buf->b_addr;
2356 if (XFS_IS_CORRUPT(mp,
2357 be32_to_cpu(sib_info->back) != last_blkno ||
2358 sib_info->magic != dead_info->magic)) {
2359 error = -EFSCORRUPTED;
2362 sib_info->back = cpu_to_be32(dead_blkno);
2363 xfs_trans_log_buf(tp, sib_buf,
2364 XFS_DA_LOGRANGE(sib_info, &sib_info->back,
2365 sizeof(sib_info->back)));
2368 par_blkno = args->geo->leafblk;
2371 * Walk down the tree looking for the parent of the moved block.
2374 error = xfs_da3_node_read(tp, dp, par_blkno, &par_buf, w);
2377 par_node = par_buf->b_addr;
2378 xfs_da3_node_hdr_from_disk(dp->i_mount, &par_hdr, par_node);
2379 if (XFS_IS_CORRUPT(mp,
2380 level >= 0 && level != par_hdr.level + 1)) {
2381 error = -EFSCORRUPTED;
2384 level = par_hdr.level;
2385 btree = par_hdr.btree;
2387 entno < par_hdr.count &&
2388 be32_to_cpu(btree[entno].hashval) < dead_hash;
2391 if (XFS_IS_CORRUPT(mp, entno == par_hdr.count)) {
2392 error = -EFSCORRUPTED;
2395 par_blkno = be32_to_cpu(btree[entno].before);
2396 if (level == dead_level + 1)
2398 xfs_trans_brelse(tp, par_buf);
2402 * We're in the right parent block.
2403 * Look for the right entry.
2407 entno < par_hdr.count &&
2408 be32_to_cpu(btree[entno].before) != last_blkno;
2411 if (entno < par_hdr.count)
2413 par_blkno = par_hdr.forw;
2414 xfs_trans_brelse(tp, par_buf);
2416 if (XFS_IS_CORRUPT(mp, par_blkno == 0)) {
2417 error = -EFSCORRUPTED;
2420 error = xfs_da3_node_read(tp, dp, par_blkno, &par_buf, w);
2423 par_node = par_buf->b_addr;
2424 xfs_da3_node_hdr_from_disk(dp->i_mount, &par_hdr, par_node);
2425 if (XFS_IS_CORRUPT(mp, par_hdr.level != level)) {
2426 error = -EFSCORRUPTED;
2429 btree = par_hdr.btree;
2433 * Update the parent entry pointing to the moved block.
2435 btree[entno].before = cpu_to_be32(dead_blkno);
2436 xfs_trans_log_buf(tp, par_buf,
2437 XFS_DA_LOGRANGE(par_node, &btree[entno].before,
2438 sizeof(btree[entno].before)));
2439 *dead_blknop = last_blkno;
2440 *dead_bufp = last_buf;
2444 xfs_trans_brelse(tp, par_buf);
2446 xfs_trans_brelse(tp, sib_buf);
2447 xfs_trans_brelse(tp, last_buf);
2452 * Remove a btree block from a directory or attribute.
2455 xfs_da_shrink_inode(
2456 struct xfs_da_args *args,
2457 xfs_dablk_t dead_blkno,
2458 struct xfs_buf *dead_buf)
2460 struct xfs_inode *dp;
2461 int done, error, w, count;
2462 struct xfs_trans *tp;
2464 trace_xfs_da_shrink_inode(args);
2467 w = args->whichfork;
2469 count = args->geo->fsbcount;
2472 * Remove extents. If we get ENOSPC for a dir we have to move
2473 * the last block to the place we want to kill.
2475 error = xfs_bunmapi(tp, dp, dead_blkno, count,
2476 xfs_bmapi_aflag(w), 0, &done);
2477 if (error == -ENOSPC) {
2478 if (w != XFS_DATA_FORK)
2480 error = xfs_da3_swap_lastblock(args, &dead_blkno,
2488 xfs_trans_binval(tp, dead_buf);
2494 struct xfs_inode *dp,
2498 struct xfs_buf_map **mapp,
2501 struct xfs_mount *mp = dp->i_mount;
2502 int nfsb = xfs_dabuf_nfsb(mp, whichfork);
2503 struct xfs_bmbt_irec irec, *irecs = &irec;
2504 struct xfs_buf_map *map = *mapp;
2505 xfs_fileoff_t off = bno;
2506 int error = 0, nirecs, i;
2509 irecs = kmem_zalloc(sizeof(irec) * nfsb, KM_NOFS);
2512 error = xfs_bmapi_read(dp, bno, nfsb, irecs, &nirecs,
2513 xfs_bmapi_aflag(whichfork));
2515 goto out_free_irecs;
2518 * Use the caller provided map for the single map case, else allocate a
2519 * larger one that needs to be free by the caller.
2522 map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map), KM_NOFS);
2524 goto out_free_irecs;
2528 for (i = 0; i < nirecs; i++) {
2529 if (irecs[i].br_startblock == HOLESTARTBLOCK ||
2530 irecs[i].br_startblock == DELAYSTARTBLOCK)
2531 goto invalid_mapping;
2532 if (off != irecs[i].br_startoff)
2533 goto invalid_mapping;
2535 map[i].bm_bn = XFS_FSB_TO_DADDR(mp, irecs[i].br_startblock);
2536 map[i].bm_len = XFS_FSB_TO_BB(mp, irecs[i].br_blockcount);
2537 off += irecs[i].br_blockcount;
2540 if (off != bno + nfsb)
2541 goto invalid_mapping;
2550 /* Caller ok with no mapping. */
2551 if (XFS_IS_CORRUPT(mp, !(flags & XFS_DABUF_MAP_HOLE_OK))) {
2552 error = -EFSCORRUPTED;
2553 if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
2554 xfs_alert(mp, "%s: bno %u inode %llu",
2555 __func__, bno, dp->i_ino);
2557 for (i = 0; i < nirecs; i++) {
2559 "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
2560 i, irecs[i].br_startoff,
2561 irecs[i].br_startblock,
2562 irecs[i].br_blockcount,
2569 goto out_free_irecs;
2573 * Get a buffer for the dir/attr block.
2577 struct xfs_trans *tp,
2578 struct xfs_inode *dp,
2580 struct xfs_buf **bpp,
2583 struct xfs_mount *mp = dp->i_mount;
2585 struct xfs_buf_map map, *mapp = ↦
2590 error = xfs_dabuf_map(dp, bno, 0, whichfork, &mapp, &nmap);
2591 if (error || nmap == 0)
2594 bp = xfs_trans_get_buf_map(tp, mp->m_ddev_targp, mapp, nmap, 0);
2595 error = bp ? bp->b_error : -EIO;
2598 xfs_trans_brelse(tp, bp);
2612 * Get a buffer for the dir/attr block, fill in the contents.
2616 struct xfs_trans *tp,
2617 struct xfs_inode *dp,
2620 struct xfs_buf **bpp,
2622 const struct xfs_buf_ops *ops)
2624 struct xfs_mount *mp = dp->i_mount;
2626 struct xfs_buf_map map, *mapp = ↦
2631 error = xfs_dabuf_map(dp, bno, flags, whichfork, &mapp, &nmap);
2635 error = xfs_trans_read_buf_map(mp, tp, mp->m_ddev_targp, mapp, nmap, 0,
2640 if (whichfork == XFS_ATTR_FORK)
2641 xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF);
2643 xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF);
2653 * Readahead the dir/attr block.
2657 struct xfs_inode *dp,
2661 const struct xfs_buf_ops *ops)
2663 struct xfs_buf_map map;
2664 struct xfs_buf_map *mapp;
2670 error = xfs_dabuf_map(dp, bno, flags, whichfork, &mapp, &nmap);
2674 xfs_buf_readahead_map(dp->i_mount->m_ddev_targp, mapp, nmap, ops);