2 * block.c --- iterate over all blocks in an inode
4 * Copyright (C) 1993 Theodore Ts'o. This file may be redistributed
5 * under the terms of the GNU Public License.
16 #include <linux/ext2_fs.h>
20 struct block_context {
22 int (*func)(ext2_filsys fs,
36 static int block_iterate_ind(blk_t *ind_block, struct block_context *ctx)
38 int ret = 0, changed = 0;
42 if (!(ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
43 !(ctx->flags & BLOCK_FLAG_DATA_ONLY))
44 ret = (*ctx->func)(ctx->fs, ind_block,
45 BLOCK_COUNT_IND, ctx->private);
46 if (!*ind_block || (ret & BLOCK_ABORT))
48 if (*ind_block >= ctx->fs->super->s_blocks_count ||
49 *ind_block < ctx->fs->super->s_first_data_block) {
50 ctx->errcode = EXT2_ET_BAD_IND_BLOCK;
54 ctx->errcode = io_channel_read_blk(ctx->fs->io, *ind_block,
60 limit = ctx->fs->blocksize >> 2;
61 if ((ctx->fs->flags & EXT2_SWAP_BYTES) ||
62 (ctx->fs->flags & EXT2_SWAP_BYTES_READ)) {
63 block_nr = (blk_t *) ctx->ind_buf;
64 for (i = 0; i < limit; i++, block_nr++)
65 *block_nr = ext2fs_swab32(*block_nr);
67 block_nr = (blk_t *) ctx->ind_buf;
68 if (ctx->flags & BLOCK_FLAG_APPEND) {
69 for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) {
70 flags = (*ctx->func)(ctx->fs, block_nr, ctx->bcount,
73 if (flags & BLOCK_ABORT) {
79 for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) {
82 flags = (*ctx->func)(ctx->fs, block_nr, ctx->bcount,
85 if (flags & BLOCK_ABORT) {
91 if (changed & BLOCK_CHANGED) {
92 if ((ctx->fs->flags & EXT2_SWAP_BYTES) ||
93 (ctx->fs->flags & EXT2_SWAP_BYTES_WRITE)) {
94 block_nr = (blk_t *) ctx->ind_buf;
95 for (i = 0; i < limit; i++, block_nr++)
96 *block_nr = ext2fs_swab32(*block_nr);
98 ctx->errcode = io_channel_write_blk(ctx->fs->io, *ind_block,
101 ret |= BLOCK_ERROR | BLOCK_ABORT;
103 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
104 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
105 !(ret & BLOCK_ABORT))
106 ret |= (*ctx->func)(ctx->fs, ind_block,
107 BLOCK_COUNT_IND, ctx->private);
111 static int block_iterate_dind(blk_t *dind_block, struct block_context *ctx)
113 int ret = 0, changed = 0;
117 if (!(ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
118 !(ctx->flags & BLOCK_FLAG_DATA_ONLY))
119 ret = (*ctx->func)(ctx->fs, dind_block,
120 BLOCK_COUNT_DIND, ctx->private);
121 if (!*dind_block || (ret & BLOCK_ABORT))
123 if (*dind_block >= ctx->fs->super->s_blocks_count ||
124 *dind_block < ctx->fs->super->s_first_data_block) {
125 ctx->errcode = EXT2_ET_BAD_DIND_BLOCK;
129 ctx->errcode = io_channel_read_blk(ctx->fs->io, *dind_block,
135 limit = ctx->fs->blocksize >> 2;
136 if ((ctx->fs->flags & EXT2_SWAP_BYTES) ||
137 (ctx->fs->flags & EXT2_SWAP_BYTES_READ)) {
138 block_nr = (blk_t *) ctx->dind_buf;
139 for (i = 0; i < limit; i++, block_nr++)
140 *block_nr = ext2fs_swab32(*block_nr);
142 block_nr = (blk_t *) ctx->dind_buf;
143 if (ctx->flags & BLOCK_FLAG_APPEND) {
144 for (i = 0; i < limit; i++, block_nr++) {
145 flags = block_iterate_ind(block_nr, ctx);
147 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
148 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
153 for (i = 0; i < limit; i++, block_nr++) {
156 flags = block_iterate_ind(block_nr, ctx);
158 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
159 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
164 if (changed & BLOCK_CHANGED) {
165 if ((ctx->fs->flags & EXT2_SWAP_BYTES) ||
166 (ctx->fs->flags & EXT2_SWAP_BYTES_WRITE)) {
167 block_nr = (blk_t *) ctx->dind_buf;
168 for (i = 0; i < limit; i++, block_nr++)
169 *block_nr = ext2fs_swab32(*block_nr);
171 ctx->errcode = io_channel_write_blk(ctx->fs->io, *dind_block,
174 ret |= BLOCK_ERROR | BLOCK_ABORT;
176 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
177 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
178 !(ret & BLOCK_ABORT))
179 ret |= (*ctx->func)(ctx->fs, dind_block,
180 BLOCK_COUNT_DIND, ctx->private);
184 static int block_iterate_tind(blk_t *tind_block, struct block_context *ctx)
186 int ret = 0, changed = 0;
190 if (!(ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
191 !(ctx->flags & BLOCK_FLAG_DATA_ONLY))
192 ret = (*ctx->func)(ctx->fs, tind_block,
193 BLOCK_COUNT_TIND, ctx->private);
194 if (!*tind_block || (ret & BLOCK_ABORT))
196 if (*tind_block >= ctx->fs->super->s_blocks_count ||
197 *tind_block < ctx->fs->super->s_first_data_block) {
198 ctx->errcode = EXT2_ET_BAD_TIND_BLOCK;
202 ctx->errcode = io_channel_read_blk(ctx->fs->io, *tind_block,
208 limit = ctx->fs->blocksize >> 2;
209 if ((ctx->fs->flags & EXT2_SWAP_BYTES) ||
210 (ctx->fs->flags & EXT2_SWAP_BYTES_READ)) {
211 block_nr = (blk_t *) ctx->tind_buf;
212 for (i = 0; i < limit; i++, block_nr++)
213 *block_nr = ext2fs_swab32(*block_nr);
215 block_nr = (blk_t *) ctx->tind_buf;
216 if (ctx->flags & BLOCK_FLAG_APPEND) {
217 for (i = 0; i < limit; i++, block_nr++) {
218 flags = block_iterate_dind(block_nr, ctx);
220 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
221 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
226 for (i = 0; i < limit; i++, block_nr++) {
229 flags = block_iterate_dind(block_nr, ctx);
231 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
232 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
237 if (changed & BLOCK_CHANGED) {
238 if ((ctx->fs->flags & EXT2_SWAP_BYTES) ||
239 (ctx->fs->flags & EXT2_SWAP_BYTES_WRITE)) {
240 block_nr = (blk_t *) ctx->tind_buf;
241 for (i = 0; i < limit; i++, block_nr++)
242 *block_nr = ext2fs_swab32(*block_nr);
244 ctx->errcode = io_channel_write_blk(ctx->fs->io, *tind_block,
247 ret |= BLOCK_ERROR | BLOCK_ABORT;
249 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
250 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
251 !(ret & BLOCK_ABORT))
252 ret |= (*ctx->func)(ctx->fs, tind_block,
253 BLOCK_COUNT_TIND, ctx->private);
258 errcode_t ext2fs_block_iterate(ext2_filsys fs,
262 int (*func)(ext2_filsys fs,
271 struct block_context ctx;
272 blk_t blocks[EXT2_N_BLOCKS]; /* directory data blocks */
273 struct ext2_inode inode;
276 EXT2_CHECK_MAGIC(fs, EXT2_ET_MAGIC_EXT2FS_FILSYS);
278 ret = ext2fs_get_blocks(fs, ino, blocks);
284 ctx.private = private;
288 ctx.ind_buf = block_buf;
290 ctx.ind_buf = malloc(fs->blocksize * 3);
294 ctx.dind_buf = ctx.ind_buf + fs->blocksize;
295 ctx.tind_buf = ctx.dind_buf + fs->blocksize;
298 * Iterate over the HURD translator block (if present)
300 if ((fs->super->s_creator_os == EXT2_OS_HURD) &&
301 !(flags & BLOCK_FLAG_DATA_ONLY) &&
302 inode.osd1.hurd1.h_i_translator) {
303 ctx.errcode = ext2fs_read_inode(fs, ino, &inode);
307 ret |= (*func)(fs, &inode.osd1.hurd1.h_i_translator,
308 BLOCK_COUNT_TRANSLATOR, private);
309 if (ret & BLOCK_ABORT)
314 * Iterate over normal data blocks
316 for (i = 0; i < EXT2_NDIR_BLOCKS ; i++, ctx.bcount++) {
317 if (blocks[i] || (flags & BLOCK_FLAG_APPEND)) {
318 ret |= (*func)(fs, &blocks[i], ctx.bcount, private);
319 if (ret & BLOCK_ABORT)
323 if (*(blocks + EXT2_IND_BLOCK) || (flags & BLOCK_FLAG_APPEND)) {
324 ret |= block_iterate_ind(blocks + EXT2_IND_BLOCK, &ctx);
325 if (ret & BLOCK_ABORT)
328 if (*(blocks + EXT2_DIND_BLOCK) || (flags & BLOCK_FLAG_APPEND)) {
329 ret |= block_iterate_dind(blocks + EXT2_DIND_BLOCK, &ctx);
330 if (ret & BLOCK_ABORT)
333 if (*(blocks + EXT2_TIND_BLOCK) || (flags & BLOCK_FLAG_APPEND)) {
334 ret |= block_iterate_tind(blocks + EXT2_TIND_BLOCK, &ctx);
335 if (ret & BLOCK_ABORT)
340 if (ret & BLOCK_CHANGED) {
342 retval = ext2fs_read_inode(fs, ino, &inode);
346 for (i=0; i < EXT2_N_BLOCKS; i++)
347 inode.i_block[i] = blocks[i];
348 retval = ext2fs_write_inode(fs, ino, &inode);
356 return (ret & BLOCK_ERROR) ? ctx.errcode : 0;