2 * Copyright (C) 2010 The Android Open Source Project
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "ext4_utils.h"
20 #include <sparse/sparse.h>
25 struct xattr_list_element {
26 struct ext4_inode *inode;
27 struct ext4_xattr_header *header;
28 struct xattr_list_element *next;
31 struct block_allocation *create_allocation()
33 struct block_allocation *alloc = malloc(sizeof(struct block_allocation));
34 alloc->list.first = NULL;
35 alloc->list.last = NULL;
36 alloc->oob_list.first = NULL;
37 alloc->oob_list.last = NULL;
38 alloc->list.iter = NULL;
39 alloc->list.partial_iter = 0;
40 alloc->oob_list.iter = NULL;
41 alloc->oob_list.partial_iter = 0;
42 alloc->filename = NULL;
47 static struct ext4_xattr_header *xattr_list_find(struct ext4_inode *inode)
49 struct xattr_list_element *element;
50 for (element = aux_info.xattrs; element != NULL; element = element->next) {
51 if (element->inode == inode)
52 return element->header;
57 static void xattr_list_insert(struct ext4_inode *inode, struct ext4_xattr_header *header)
59 struct xattr_list_element *element = malloc(sizeof(struct xattr_list_element));
60 element->inode = inode;
61 element->header = header;
62 element->next = aux_info.xattrs;
63 aux_info.xattrs = element;
66 static void region_list_remove(struct region_list *list, struct region *reg)
69 reg->prev->next = reg->next;
72 reg->next->prev = reg->prev;
74 if (list->first == reg)
75 list->first = reg->next;
77 if (list->last == reg)
78 list->last = reg->prev;
84 void region_list_append(struct region_list *list, struct region *reg)
86 if (list->first == NULL) {
90 list->partial_iter = 0;
93 list->last->next = reg;
94 reg->prev = list->last;
100 void region_list_merge(struct region_list *list1, struct region_list *list2)
102 if (list1->first == NULL) {
103 list1->first = list2->first;
104 list1->last = list2->last;
105 list1->iter = list2->first;
106 list1->partial_iter = 0;
107 list1->first->prev = NULL;
109 list1->last->next = list2->first;
110 list2->first->prev = list1->last;
111 list1->last = list2->last;
115 static void dump_starting_from(struct region *reg)
117 for (; reg; reg = reg->next) {
118 printf("%p: Blocks %d-%d (%d)\n", reg,
119 reg->block, reg->block + reg->len - 1, reg->len)
123 static void dump_region_lists(struct block_allocation *alloc) {
125 printf("Main list:\n");
126 dump_starting_from(alloc->list.first);
128 printf("OOB list:\n");
129 dump_starting_from(alloc->oob_list.first);
133 void print_blocks(FILE* f, struct block_allocation *alloc, char separator)
137 for (reg = alloc->list.first; reg; reg = reg->next) {
139 fprintf(f, "%d", reg->block);
141 fprintf(f, "%d-%d", reg->block, reg->block + reg->len - 1);
148 void append_region(struct block_allocation *alloc,
149 u32 block, u32 len, int bg_num)
152 reg = malloc(sizeof(struct region));
158 region_list_append(&alloc->list, reg);
161 static void allocate_bg_inode_table(struct block_group_info *bg)
163 if (bg->inode_table != NULL)
166 u32 block = bg->first_block + 2;
168 if (bg->has_superblock)
169 block += aux_info.bg_desc_blocks + info.bg_desc_reserve_blocks + 1;
171 bg->inode_table = calloc(aux_info.inode_table_blocks, info.block_size);
172 if (bg->inode_table == NULL)
173 critical_error_errno("calloc");
175 sparse_file_add_data(ext4_sparse_file, bg->inode_table,
176 aux_info.inode_table_blocks * info.block_size, block);
178 bg->flags &= ~EXT4_BG_INODE_UNINIT;
181 static int bitmap_set_bit(u8 *bitmap, u32 bit)
183 if (bitmap[bit / 8] & 1 << (bit % 8))
186 bitmap[bit / 8] |= 1 << (bit % 8);
190 static int bitmap_set_8_bits(u8 *bitmap, u32 bit)
192 int ret = bitmap[bit / 8];
193 bitmap[bit / 8] = 0xFF;
197 /* Marks a the first num_blocks blocks in a block group as used, and accounts
198 for them in the block group free block info. */
199 static int reserve_blocks(struct block_group_info *bg, u32 bg_num, u32 start, u32 num)
204 for (i = 0; i < num && block % 8 != 0; i++, block++) {
205 if (bitmap_set_bit(bg->block_bitmap, block)) {
206 error("attempted to reserve already reserved block %d in block group %d", block, bg_num);
211 for (; i + 8 <= (num & ~7); i += 8, block += 8) {
212 if (bitmap_set_8_bits(bg->block_bitmap, block)) {
213 error("attempted to reserve already reserved block %d in block group %d", block, bg_num);
218 for (; i < num; i++, block++) {
219 if (bitmap_set_bit(bg->block_bitmap, block)) {
220 error("attempted to reserve already reserved block %d in block group %d", block, bg_num);
225 bg->free_blocks -= num;
230 static void free_blocks(struct block_group_info *bg, u32 block, u32 num_blocks)
233 for (i = 0; i < num_blocks; i++, block--)
234 bg->block_bitmap[block / 8] &= ~(1 << (block % 8));
235 bg->free_blocks += num_blocks;
236 for (i = bg->chunk_count; i > 0 ;) {
238 if (bg->chunks[i].len >= num_blocks && bg->chunks[i].block <= block) {
239 if (bg->chunks[i].block == block) {
240 bg->chunks[i].block += num_blocks;
241 bg->chunks[i].len -= num_blocks;
242 } else if (bg->chunks[i].block + bg->chunks[i].len - 1 == block + num_blocks) {
243 bg->chunks[i].len -= num_blocks;
250 /* Reduces an existing allocation by len blocks by return the last blocks
251 to the free pool in their block group. Assumes that the blocks being
252 returned were the last ones allocated out of the block group */
253 void reduce_allocation(struct block_allocation *alloc, u32 len)
256 struct region *last_reg = alloc->list.last;
257 struct block_group_info *bg = &aux_info.bgs[last_reg->bg];
259 if (last_reg->len > len) {
260 free_blocks(bg, last_reg->block + last_reg->len - bg->first_block - 1, len);
261 last_reg->len -= len;
264 struct region *reg = alloc->list.last->prev;
265 free_blocks(bg, last_reg->block + last_reg->len - bg->first_block - 1, last_reg->len);
266 len -= last_reg->len;
270 alloc->list.first = NULL;
271 alloc->list.last = NULL;
272 alloc->list.iter = NULL;
273 alloc->list.partial_iter = 0;
280 static void init_bg(struct block_group_info *bg, unsigned int i)
282 int header_blocks = 2 + aux_info.inode_table_blocks;
284 bg->has_superblock = ext4_bg_has_super_block(i);
286 if (bg->has_superblock)
287 header_blocks += 1 + aux_info.bg_desc_blocks + info.bg_desc_reserve_blocks;
289 bg->bitmaps = calloc(info.block_size, 2);
290 bg->block_bitmap = bg->bitmaps;
291 bg->inode_bitmap = bg->bitmaps + info.block_size;
293 bg->header_blocks = header_blocks;
294 bg->first_block = aux_info.first_data_block + i * info.blocks_per_group;
296 u32 block = bg->first_block;
297 if (bg->has_superblock)
298 block += 1 + aux_info.bg_desc_blocks + info.bg_desc_reserve_blocks;
299 sparse_file_add_data(ext4_sparse_file, bg->bitmaps, 2 * info.block_size,
302 bg->data_blocks_used = 0;
303 bg->free_blocks = info.blocks_per_group;
304 bg->free_inodes = info.inodes_per_group;
305 bg->first_free_inode = 1;
306 bg->flags = EXT4_BG_INODE_UNINIT;
309 bg->max_chunk_count = 1;
310 bg->chunks = (struct region*) calloc(bg->max_chunk_count, sizeof(struct region));
312 if (reserve_blocks(bg, i, 0, bg->header_blocks) < 0)
313 error("failed to reserve %u blocks in block group %u\n", bg->header_blocks, i);
314 // Add empty starting delimiter chunk
315 reserve_bg_chunk(i, bg->header_blocks, 0);
317 if (bg->first_block + info.blocks_per_group > aux_info.len_blocks) {
318 u32 overrun = bg->first_block + info.blocks_per_group - aux_info.len_blocks;
319 reserve_blocks(bg, i, info.blocks_per_group - overrun, overrun);
320 // Add empty ending delimiter chunk
321 reserve_bg_chunk(i, info.blocks_per_group - overrun, 0);
323 reserve_bg_chunk(i, info.blocks_per_group - 1, 0);
328 void block_allocator_init()
332 aux_info.bgs = calloc(sizeof(struct block_group_info), aux_info.groups);
333 if (aux_info.bgs == NULL)
334 critical_error_errno("calloc");
336 for (i = 0; i < aux_info.groups; i++)
337 init_bg(&aux_info.bgs[i], i);
340 void block_allocator_free()
344 for (i = 0; i < aux_info.groups; i++) {
345 free(aux_info.bgs[i].bitmaps);
346 free(aux_info.bgs[i].inode_table);
351 /* Allocate a single block and return its block number */
355 struct block_allocation *blk_alloc = allocate_blocks(1);
357 return EXT4_ALLOCATE_FAILED;
359 block = blk_alloc->list.first->block;
360 free_alloc(blk_alloc);
364 static struct region *ext4_allocate_best_fit_partial(u32 len)
367 unsigned int found_bg = 0, found_prev_chunk = 0, found_block = 0;
368 u32 found_allocate_len = 0;
369 bool minimize = false;
370 struct block_group_info *bgs = aux_info.bgs;
373 for (i = 0; i < aux_info.groups; i++) {
374 for (j = 1; j < bgs[i].chunk_count; j++) {
375 u32 hole_start, hole_size;
376 hole_start = bgs[i].chunks[j-1].block + bgs[i].chunks[j-1].len;
377 hole_size = bgs[i].chunks[j].block - hole_start;
378 if (hole_size == len) {
379 // Perfect fit i.e. right between 2 chunks no need to keep searching
381 found_prev_chunk = j - 1;
382 found_block = hole_start;
383 found_allocate_len = hole_size;
385 } else if (hole_size > len && (found_allocate_len == 0 || (found_allocate_len > hole_size))) {
387 found_prev_chunk = j - 1;
388 found_block = hole_start;
389 found_allocate_len = hole_size;
391 } else if (!minimize) {
392 if (found_allocate_len < hole_size) {
394 found_prev_chunk = j - 1;
395 found_block = hole_start;
396 found_allocate_len = hole_size;
402 if (found_allocate_len == 0) {
403 error("failed to allocate %u blocks, out of space?", len);
406 if (found_allocate_len > len) found_allocate_len = len;
408 // reclaim allocated space in chunk
409 bgs[found_bg].chunks[found_prev_chunk].len += found_allocate_len;
410 if (reserve_blocks(&bgs[found_bg],
413 found_allocate_len) < 0) {
414 error("failed to reserve %u blocks in block group %u\n", found_allocate_len, found_bg);
417 bgs[found_bg].data_blocks_used += found_allocate_len;
418 reg = malloc(sizeof(struct region));
419 reg->block = found_block + bgs[found_bg].first_block;
420 reg->len = found_allocate_len;
427 static struct region *ext4_allocate_best_fit(u32 len)
429 struct region *first_reg = NULL;
430 struct region *prev_reg = NULL;
434 reg = ext4_allocate_best_fit_partial(len);
438 if (first_reg == NULL)
442 prev_reg->next = reg;
443 reg->prev = prev_reg;
453 /* Allocate len blocks. The blocks may be spread across multiple block groups,
454 and are returned in a linked list of the blocks in each block group. The
455 allocation algorithm is:
456 1. If the remaining allocation is larger than any available contiguous region,
457 allocate the largest contiguous region and loop
458 2. Otherwise, allocate the smallest contiguous region that it fits in
460 struct block_allocation *allocate_blocks(u32 len)
462 struct region *reg = ext4_allocate_best_fit(len);
467 struct block_allocation *alloc = create_allocation();
468 alloc->list.first = reg;
469 while (reg->next != NULL)
471 alloc->list.last = reg;
472 alloc->list.iter = alloc->list.first;
473 alloc->list.partial_iter = 0;
477 /* Returns the number of discontiguous regions used by an allocation */
478 int block_allocation_num_regions(struct block_allocation *alloc)
481 struct region *reg = alloc->list.first;
483 for (i = 0; reg != NULL; reg = reg->next)
489 int block_allocation_len(struct block_allocation *alloc)
492 struct region *reg = alloc->list.first;
494 for (i = 0; reg != NULL; reg = reg->next)
500 /* Returns the block number of the block'th block in an allocation */
501 u32 get_block(struct block_allocation *alloc, u32 block)
503 struct region *reg = alloc->list.iter;
504 block += alloc->list.partial_iter;
506 for (; reg; reg = reg->next) {
507 if (block < reg->len)
508 return reg->block + block;
511 return EXT4_ALLOCATE_FAILED;
514 u32 get_oob_block(struct block_allocation *alloc, u32 block)
516 struct region *reg = alloc->oob_list.iter;
517 block += alloc->oob_list.partial_iter;
519 for (; reg; reg = reg->next) {
520 if (block < reg->len)
521 return reg->block + block;
524 return EXT4_ALLOCATE_FAILED;
527 /* Gets the starting block and length in blocks of the first region
529 void get_region(struct block_allocation *alloc, u32 *block, u32 *len)
531 *block = alloc->list.iter->block;
532 *len = alloc->list.iter->len - alloc->list.partial_iter;
535 /* Move to the next region in an allocation */
536 void get_next_region(struct block_allocation *alloc)
538 alloc->list.iter = alloc->list.iter->next;
539 alloc->list.partial_iter = 0;
542 /* Returns the number of free blocks in a block group */
543 u32 get_free_blocks(u32 bg)
545 return aux_info.bgs[bg].free_blocks;
548 int last_region(struct block_allocation *alloc)
550 return (alloc->list.iter == NULL);
553 void rewind_alloc(struct block_allocation *alloc)
555 alloc->list.iter = alloc->list.first;
556 alloc->list.partial_iter = 0;
559 static struct region *do_split_allocation(struct block_allocation *alloc, u32 len)
561 struct region *reg = alloc->list.iter;
565 while (reg && len >= reg->len) {
570 if (reg == NULL && len > 0)
574 new = malloc(sizeof(struct region));
577 new->block = reg->block + len;
578 new->len = reg->len - len;
579 new->next = reg->next;
585 tmp = alloc->list.iter;
586 alloc->list.iter = new;
593 /* Splits an allocation into two allocations. The returned allocation will
594 point to the first half, and the original allocation ptr will point to the
596 static struct region *split_allocation(struct block_allocation *alloc, u32 len)
598 /* First make sure there is a split at the current ptr */
599 do_split_allocation(alloc, alloc->list.partial_iter);
601 /* Then split off len blocks */
602 struct region *middle = do_split_allocation(alloc, len);
603 alloc->list.partial_iter = 0;
607 /* Reserve the next blocks for oob data (indirect or extent blocks) */
608 int reserve_oob_blocks(struct block_allocation *alloc, int blocks)
610 struct region *oob = split_allocation(alloc, blocks);
616 while (oob && oob != alloc->list.iter) {
618 region_list_remove(&alloc->list, oob);
619 region_list_append(&alloc->oob_list, oob);
626 static int advance_list_ptr(struct region_list *list, int blocks)
628 struct region *reg = list->iter;
630 while (reg != NULL && blocks > 0) {
631 if (reg->len > list->partial_iter + blocks) {
632 list->partial_iter += blocks;
636 blocks -= (reg->len - list->partial_iter);
637 list->partial_iter = 0;
647 /* Move the allocation pointer forward */
648 int advance_blocks(struct block_allocation *alloc, int blocks)
650 return advance_list_ptr(&alloc->list, blocks);
653 int advance_oob_blocks(struct block_allocation *alloc, int blocks)
655 return advance_list_ptr(&alloc->oob_list, blocks);
658 int append_oob_allocation(struct block_allocation *alloc, u32 len)
660 struct region *reg = ext4_allocate_best_fit(len);
663 error("failed to allocate %d blocks", len);
667 for (; reg; reg = reg->next)
668 region_list_append(&alloc->oob_list, reg);
673 /* Returns an ext4_inode structure for an inode number */
674 struct ext4_inode *get_inode(u32 inode)
677 int bg = inode / info.inodes_per_group;
678 inode %= info.inodes_per_group;
680 allocate_bg_inode_table(&aux_info.bgs[bg]);
681 return (struct ext4_inode *)(aux_info.bgs[bg].inode_table + inode *
685 struct ext4_xattr_header *get_xattr_block_for_inode(struct ext4_inode *inode)
687 struct ext4_xattr_header *block = xattr_list_find(inode);
691 u32 block_num = allocate_block();
692 block = calloc(info.block_size, 1);
694 error("get_xattr: failed to allocate %d", info.block_size);
698 block->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
699 block->h_refcount = cpu_to_le32(1);
700 block->h_blocks = cpu_to_le32(1);
701 inode->i_blocks_lo = cpu_to_le32(le32_to_cpu(inode->i_blocks_lo) + (info.block_size / 512));
702 inode->i_file_acl_lo = cpu_to_le32(block_num);
704 int result = sparse_file_add_data(ext4_sparse_file, block, info.block_size, block_num);
706 error("get_xattr: sparse_file_add_data failure %d", result);
710 xattr_list_insert(inode, block);
714 /* Mark the first len inodes in a block group as used */
715 u32 reserve_inodes(int bg, u32 num)
720 if (get_free_inodes(bg) < num)
721 return EXT4_ALLOCATE_FAILED;
723 for (i = 0; i < num; i++) {
724 inode = aux_info.bgs[bg].first_free_inode + i - 1;
725 aux_info.bgs[bg].inode_bitmap[inode / 8] |= 1 << (inode % 8);
728 inode = aux_info.bgs[bg].first_free_inode;
730 aux_info.bgs[bg].first_free_inode += num;
731 aux_info.bgs[bg].free_inodes -= num;
736 /* Returns the first free inode number
737 TODO: Inodes should be allocated in the block group of the data? */
743 for (bg = 0; bg < aux_info.groups; bg++) {
744 inode = reserve_inodes(bg, 1);
745 if (inode != EXT4_ALLOCATE_FAILED)
746 return bg * info.inodes_per_group + inode;
749 return EXT4_ALLOCATE_FAILED;
752 /* Returns the number of free inodes in a block group */
753 u32 get_free_inodes(u32 bg)
755 return aux_info.bgs[bg].free_inodes;
758 /* Increments the directory count of the block group that contains inode */
759 void add_directory(u32 inode)
761 int bg = (inode - 1) / info.inodes_per_group;
762 aux_info.bgs[bg].used_dirs += 1;
765 /* Returns the number of inodes in a block group that are directories */
766 u16 get_directories(int bg)
768 return aux_info.bgs[bg].used_dirs;
771 /* Returns the flags for a block group */
772 u16 get_bg_flags(int bg)
774 return aux_info.bgs[bg].flags;
777 /* Frees the memory used by a linked list of allocation regions */
778 void free_alloc(struct block_allocation *alloc)
782 reg = alloc->list.first;
784 struct region *next = reg->next;
789 reg = alloc->oob_list.first;
791 struct region *next = reg->next;
799 void reserve_bg_chunk(int bg, u32 start_block, u32 size) {
800 struct block_group_info *bgs = aux_info.bgs;
802 if (bgs[bg].chunk_count == bgs[bg].max_chunk_count) {
803 bgs[bg].max_chunk_count *= 2;
804 bgs[bg].chunks = realloc(bgs[bg].chunks, bgs[bg].max_chunk_count * sizeof(struct region));
806 critical_error("realloc failed");
808 chunk_count = bgs[bg].chunk_count;
809 bgs[bg].chunks[chunk_count].block = start_block;
810 bgs[bg].chunks[chunk_count].len = size;
811 bgs[bg].chunks[chunk_count].bg = bg;
812 bgs[bg].chunk_count++;
815 int reserve_blocks_for_allocation(struct block_allocation *alloc) {
817 struct block_group_info *bgs = aux_info.bgs;
819 if (!alloc) return 0;
820 reg = alloc->list.first;
821 while (reg != NULL) {
822 if (reserve_blocks(&bgs[reg->bg], reg->bg, reg->block - bgs[reg->bg].first_block, reg->len) < 0) {