2 * Copyright (C) 2010 The Android Open Source Project
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
22 #include <sparse/sparse.h>
24 #include "ext4_utils/ext4_utils.h"
26 struct xattr_list_element {
27 struct ext4_inode *inode;
28 struct ext4_xattr_header *header;
29 struct xattr_list_element *next;
32 struct block_allocation *create_allocation()
34 struct block_allocation *alloc = malloc(sizeof(struct block_allocation));
35 alloc->list.first = NULL;
36 alloc->list.last = NULL;
37 alloc->oob_list.first = NULL;
38 alloc->oob_list.last = NULL;
39 alloc->list.iter = NULL;
40 alloc->list.partial_iter = 0;
41 alloc->oob_list.iter = NULL;
42 alloc->oob_list.partial_iter = 0;
43 alloc->filename = NULL;
48 static struct ext4_xattr_header *xattr_list_find(struct ext4_inode *inode)
50 struct xattr_list_element *element;
51 for (element = aux_info.xattrs; element != NULL; element = element->next) {
52 if (element->inode == inode)
53 return element->header;
58 static void xattr_list_insert(struct ext4_inode *inode, struct ext4_xattr_header *header)
60 struct xattr_list_element *element = malloc(sizeof(struct xattr_list_element));
61 element->inode = inode;
62 element->header = header;
63 element->next = aux_info.xattrs;
64 aux_info.xattrs = element;
67 static void region_list_remove(struct region_list *list, struct region *reg)
70 reg->prev->next = reg->next;
73 reg->next->prev = reg->prev;
75 if (list->first == reg)
76 list->first = reg->next;
78 if (list->last == reg)
79 list->last = reg->prev;
85 void region_list_append(struct region_list *list, struct region *reg)
87 if (list->first == NULL) {
91 list->partial_iter = 0;
94 list->last->next = reg;
95 reg->prev = list->last;
101 void region_list_merge(struct region_list *list1, struct region_list *list2)
103 if (list1->first == NULL) {
104 list1->first = list2->first;
105 list1->last = list2->last;
106 list1->iter = list2->first;
107 list1->partial_iter = 0;
108 list1->first->prev = NULL;
110 list1->last->next = list2->first;
111 list2->first->prev = list1->last;
112 list1->last = list2->last;
116 static void dump_starting_from(struct region *reg)
118 for (; reg; reg = reg->next) {
119 printf("%p: Blocks %d-%d (%d)\n", reg,
120 reg->block, reg->block + reg->len - 1, reg->len)
124 static void dump_region_lists(struct block_allocation *alloc) {
126 printf("Main list:\n");
127 dump_starting_from(alloc->list.first);
129 printf("OOB list:\n");
130 dump_starting_from(alloc->oob_list.first);
134 void print_blocks(FILE* f, struct block_allocation *alloc, char separator)
138 for (reg = alloc->list.first; reg; reg = reg->next) {
140 fprintf(f, "%d", reg->block);
142 fprintf(f, "%d-%d", reg->block, reg->block + reg->len - 1);
149 void append_region(struct block_allocation *alloc,
150 u32 block, u32 len, int bg_num)
153 reg = malloc(sizeof(struct region));
159 region_list_append(&alloc->list, reg);
162 static void allocate_bg_inode_table(struct block_group_info *bg)
164 if (bg->inode_table != NULL)
167 u32 block = bg->first_block + 2;
169 if (bg->has_superblock)
170 block += aux_info.bg_desc_blocks + info.bg_desc_reserve_blocks + 1;
172 bg->inode_table = calloc(aux_info.inode_table_blocks, info.block_size);
173 if (bg->inode_table == NULL)
174 critical_error_errno("calloc");
176 sparse_file_add_data(ext4_sparse_file, bg->inode_table,
177 aux_info.inode_table_blocks * info.block_size, block);
179 bg->flags &= ~EXT4_BG_INODE_UNINIT;
182 static int bitmap_set_bit(u8 *bitmap, u32 bit)
184 if (bitmap[bit / 8] & 1 << (bit % 8))
187 bitmap[bit / 8] |= 1 << (bit % 8);
191 static int bitmap_set_8_bits(u8 *bitmap, u32 bit)
193 int ret = bitmap[bit / 8];
194 bitmap[bit / 8] = 0xFF;
198 /* Marks a the first num_blocks blocks in a block group as used, and accounts
199 for them in the block group free block info. */
200 static int reserve_blocks(struct block_group_info *bg, u32 bg_num, u32 start, u32 num)
205 for (i = 0; i < num && block % 8 != 0; i++, block++) {
206 if (bitmap_set_bit(bg->block_bitmap, block)) {
207 error("attempted to reserve already reserved block %d in block group %d", block, bg_num);
212 for (; i + 8 <= (num & ~7); i += 8, block += 8) {
213 if (bitmap_set_8_bits(bg->block_bitmap, block)) {
214 error("attempted to reserve already reserved block %d in block group %d", block, bg_num);
219 for (; i < num; i++, block++) {
220 if (bitmap_set_bit(bg->block_bitmap, block)) {
221 error("attempted to reserve already reserved block %d in block group %d", block, bg_num);
226 bg->free_blocks -= num;
231 static void free_blocks(struct block_group_info *bg, u32 block, u32 num_blocks)
234 for (i = 0; i < num_blocks; i++, block--)
235 bg->block_bitmap[block / 8] &= ~(1 << (block % 8));
236 bg->free_blocks += num_blocks;
237 for (i = bg->chunk_count; i > 0 ;) {
239 if (bg->chunks[i].len >= num_blocks && bg->chunks[i].block <= block) {
240 if (bg->chunks[i].block == block) {
241 bg->chunks[i].block += num_blocks;
242 bg->chunks[i].len -= num_blocks;
243 } else if (bg->chunks[i].block + bg->chunks[i].len - 1 == block + num_blocks) {
244 bg->chunks[i].len -= num_blocks;
251 /* Reduces an existing allocation by len blocks by return the last blocks
252 to the free pool in their block group. Assumes that the blocks being
253 returned were the last ones allocated out of the block group */
254 void reduce_allocation(struct block_allocation *alloc, u32 len)
257 struct region *last_reg = alloc->list.last;
258 struct block_group_info *bg = &aux_info.bgs[last_reg->bg];
260 if (last_reg->len > len) {
261 free_blocks(bg, last_reg->block + last_reg->len - bg->first_block - 1, len);
262 last_reg->len -= len;
265 struct region *reg = alloc->list.last->prev;
266 free_blocks(bg, last_reg->block + last_reg->len - bg->first_block - 1, last_reg->len);
267 len -= last_reg->len;
271 alloc->list.first = NULL;
272 alloc->list.last = NULL;
273 alloc->list.iter = NULL;
274 alloc->list.partial_iter = 0;
281 static void init_bg(struct block_group_info *bg, unsigned int i)
283 int header_blocks = 2 + aux_info.inode_table_blocks;
285 bg->has_superblock = ext4_bg_has_super_block(i);
287 if (bg->has_superblock)
288 header_blocks += 1 + aux_info.bg_desc_blocks + info.bg_desc_reserve_blocks;
290 bg->bitmaps = calloc(info.block_size, 2);
291 bg->block_bitmap = bg->bitmaps;
292 bg->inode_bitmap = bg->bitmaps + info.block_size;
294 bg->header_blocks = header_blocks;
295 bg->first_block = aux_info.first_data_block + i * info.blocks_per_group;
297 u32 block = bg->first_block;
298 if (bg->has_superblock)
299 block += 1 + aux_info.bg_desc_blocks + info.bg_desc_reserve_blocks;
300 sparse_file_add_data(ext4_sparse_file, bg->bitmaps, 2 * info.block_size,
303 bg->data_blocks_used = 0;
304 bg->free_blocks = info.blocks_per_group;
305 bg->free_inodes = info.inodes_per_group;
306 bg->first_free_inode = 1;
307 bg->flags = EXT4_BG_INODE_UNINIT;
310 bg->max_chunk_count = 1;
311 bg->chunks = (struct region*) calloc(bg->max_chunk_count, sizeof(struct region));
313 if (reserve_blocks(bg, i, 0, bg->header_blocks) < 0)
314 error("failed to reserve %u blocks in block group %u\n", bg->header_blocks, i);
315 // Add empty starting delimiter chunk
316 reserve_bg_chunk(i, bg->header_blocks, 0);
318 if (bg->first_block + info.blocks_per_group > aux_info.len_blocks) {
319 u32 overrun = bg->first_block + info.blocks_per_group - aux_info.len_blocks;
320 reserve_blocks(bg, i, info.blocks_per_group - overrun, overrun);
321 // Add empty ending delimiter chunk
322 reserve_bg_chunk(i, info.blocks_per_group - overrun, 0);
324 reserve_bg_chunk(i, info.blocks_per_group - 1, 0);
329 void block_allocator_init()
333 aux_info.bgs = calloc(sizeof(struct block_group_info), aux_info.groups);
334 if (aux_info.bgs == NULL)
335 critical_error_errno("calloc");
337 for (i = 0; i < aux_info.groups; i++)
338 init_bg(&aux_info.bgs[i], i);
341 void block_allocator_free()
345 for (i = 0; i < aux_info.groups; i++) {
346 free(aux_info.bgs[i].bitmaps);
347 free(aux_info.bgs[i].inode_table);
352 /* Allocate a single block and return its block number */
356 struct block_allocation *blk_alloc = allocate_blocks(1);
358 return EXT4_ALLOCATE_FAILED;
360 block = blk_alloc->list.first->block;
361 free_alloc(blk_alloc);
365 static struct region *ext4_allocate_best_fit_partial(u32 len)
369 unsigned int found_bg = 0, found_prev_chunk = 0, found_block = 0;
370 u32 found_allocate_len = 0;
371 bool minimize = false;
372 struct block_group_info *bgs = aux_info.bgs;
375 for (i = 0; i < aux_info.groups; i++) {
376 for (j = 1; j < bgs[i].chunk_count; j++) {
377 u32 hole_start, hole_size;
378 hole_start = bgs[i].chunks[j-1].block + bgs[i].chunks[j-1].len;
379 hole_size = bgs[i].chunks[j].block - hole_start;
380 if (hole_size == len) {
381 // Perfect fit i.e. right between 2 chunks no need to keep searching
383 found_prev_chunk = j - 1;
384 found_block = hole_start;
385 found_allocate_len = hole_size;
387 } else if (hole_size > len && (found_allocate_len == 0 || (found_allocate_len > hole_size))) {
389 found_prev_chunk = j - 1;
390 found_block = hole_start;
391 found_allocate_len = hole_size;
393 } else if (!minimize) {
394 if (found_allocate_len < hole_size) {
396 found_prev_chunk = j - 1;
397 found_block = hole_start;
398 found_allocate_len = hole_size;
404 if (found_allocate_len == 0) {
405 error("failed to allocate %u blocks, out of space?", len);
408 if (found_allocate_len > len) found_allocate_len = len;
410 // reclaim allocated space in chunk
411 bgs[found_bg].chunks[found_prev_chunk].len += found_allocate_len;
412 if (reserve_blocks(&bgs[found_bg],
415 found_allocate_len) < 0) {
416 error("failed to reserve %u blocks in block group %u\n", found_allocate_len, found_bg);
419 bgs[found_bg].data_blocks_used += found_allocate_len;
420 reg = malloc(sizeof(struct region));
421 reg->block = found_block + bgs[found_bg].first_block;
422 reg->len = found_allocate_len;
429 static struct region *ext4_allocate_best_fit(u32 len)
431 struct region *first_reg = NULL;
432 struct region *prev_reg = NULL;
436 reg = ext4_allocate_best_fit_partial(len);
440 if (first_reg == NULL)
444 prev_reg->next = reg;
445 reg->prev = prev_reg;
455 /* Allocate len blocks. The blocks may be spread across multiple block groups,
456 and are returned in a linked list of the blocks in each block group. The
457 allocation algorithm is:
458 1. If the remaining allocation is larger than any available contiguous region,
459 allocate the largest contiguous region and loop
460 2. Otherwise, allocate the smallest contiguous region that it fits in
462 struct block_allocation *allocate_blocks(u32 len)
464 struct region *reg = ext4_allocate_best_fit(len);
469 struct block_allocation *alloc = create_allocation();
470 alloc->list.first = reg;
471 while (reg->next != NULL)
473 alloc->list.last = reg;
474 alloc->list.iter = alloc->list.first;
475 alloc->list.partial_iter = 0;
479 /* Returns the number of discontiguous regions used by an allocation */
480 int block_allocation_num_regions(struct block_allocation *alloc)
483 struct region *reg = alloc->list.first;
485 for (i = 0; reg != NULL; reg = reg->next)
491 int block_allocation_len(struct block_allocation *alloc)
494 struct region *reg = alloc->list.first;
496 for (i = 0; reg != NULL; reg = reg->next)
502 /* Returns the block number of the block'th block in an allocation */
503 u32 get_block(struct block_allocation *alloc, u32 block)
505 struct region *reg = alloc->list.iter;
506 block += alloc->list.partial_iter;
508 for (; reg; reg = reg->next) {
509 if (block < reg->len)
510 return reg->block + block;
513 return EXT4_ALLOCATE_FAILED;
516 u32 get_oob_block(struct block_allocation *alloc, u32 block)
518 struct region *reg = alloc->oob_list.iter;
519 block += alloc->oob_list.partial_iter;
521 for (; reg; reg = reg->next) {
522 if (block < reg->len)
523 return reg->block + block;
526 return EXT4_ALLOCATE_FAILED;
529 /* Gets the starting block and length in blocks of the first region
531 void get_region(struct block_allocation *alloc, u32 *block, u32 *len)
533 *block = alloc->list.iter->block;
534 *len = alloc->list.iter->len - alloc->list.partial_iter;
537 /* Move to the next region in an allocation */
538 void get_next_region(struct block_allocation *alloc)
540 alloc->list.iter = alloc->list.iter->next;
541 alloc->list.partial_iter = 0;
544 /* Returns the number of free blocks in a block group */
545 u32 get_free_blocks(u32 bg)
547 return aux_info.bgs[bg].free_blocks;
550 int last_region(struct block_allocation *alloc)
552 return (alloc->list.iter == NULL);
555 void rewind_alloc(struct block_allocation *alloc)
557 alloc->list.iter = alloc->list.first;
558 alloc->list.partial_iter = 0;
561 static struct region *do_split_allocation(struct block_allocation *alloc, u32 len)
563 struct region *reg = alloc->list.iter;
567 while (reg && len >= reg->len) {
572 if (reg == NULL && len > 0)
576 new = malloc(sizeof(struct region));
579 new->block = reg->block + len;
580 new->len = reg->len - len;
581 new->next = reg->next;
587 tmp = alloc->list.iter;
588 alloc->list.iter = new;
595 /* Splits an allocation into two allocations. The returned allocation will
596 point to the first half, and the original allocation ptr will point to the
598 static struct region *split_allocation(struct block_allocation *alloc, u32 len)
600 /* First make sure there is a split at the current ptr */
601 do_split_allocation(alloc, alloc->list.partial_iter);
603 /* Then split off len blocks */
604 struct region *middle = do_split_allocation(alloc, len);
605 alloc->list.partial_iter = 0;
609 /* Reserve the next blocks for oob data (indirect or extent blocks) */
610 int reserve_oob_blocks(struct block_allocation *alloc, int blocks)
612 struct region *oob = split_allocation(alloc, blocks);
618 while (oob && oob != alloc->list.iter) {
620 region_list_remove(&alloc->list, oob);
621 region_list_append(&alloc->oob_list, oob);
628 static int advance_list_ptr(struct region_list *list, int blocks)
630 struct region *reg = list->iter;
632 while (reg != NULL && blocks > 0) {
633 if (reg->len > list->partial_iter + blocks) {
634 list->partial_iter += blocks;
638 blocks -= (reg->len - list->partial_iter);
639 list->partial_iter = 0;
649 /* Move the allocation pointer forward */
650 int advance_blocks(struct block_allocation *alloc, int blocks)
652 return advance_list_ptr(&alloc->list, blocks);
655 int advance_oob_blocks(struct block_allocation *alloc, int blocks)
657 return advance_list_ptr(&alloc->oob_list, blocks);
660 int append_oob_allocation(struct block_allocation *alloc, u32 len)
662 struct region *reg = ext4_allocate_best_fit(len);
665 error("failed to allocate %d blocks", len);
669 for (; reg; reg = reg->next)
670 region_list_append(&alloc->oob_list, reg);
675 /* Returns an ext4_inode structure for an inode number */
676 struct ext4_inode *get_inode(u32 inode)
679 int bg = inode / info.inodes_per_group;
680 inode %= info.inodes_per_group;
682 allocate_bg_inode_table(&aux_info.bgs[bg]);
683 return (struct ext4_inode *)(aux_info.bgs[bg].inode_table + inode *
687 struct ext4_xattr_header *get_xattr_block_for_inode(struct ext4_inode *inode)
689 struct ext4_xattr_header *block = xattr_list_find(inode);
693 u32 block_num = allocate_block();
694 block = calloc(info.block_size, 1);
696 error("get_xattr: failed to allocate %d", info.block_size);
700 block->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
701 block->h_refcount = cpu_to_le32(1);
702 block->h_blocks = cpu_to_le32(1);
703 inode->i_blocks_lo = cpu_to_le32(le32_to_cpu(inode->i_blocks_lo) + (info.block_size / 512));
704 inode->i_file_acl_lo = cpu_to_le32(block_num);
706 int result = sparse_file_add_data(ext4_sparse_file, block, info.block_size, block_num);
708 error("get_xattr: sparse_file_add_data failure %d", result);
712 xattr_list_insert(inode, block);
716 /* Mark the first len inodes in a block group as used */
717 u32 reserve_inodes(int bg, u32 num)
722 if (get_free_inodes(bg) < num)
723 return EXT4_ALLOCATE_FAILED;
725 for (i = 0; i < num; i++) {
726 inode = aux_info.bgs[bg].first_free_inode + i - 1;
727 aux_info.bgs[bg].inode_bitmap[inode / 8] |= 1 << (inode % 8);
730 inode = aux_info.bgs[bg].first_free_inode;
732 aux_info.bgs[bg].first_free_inode += num;
733 aux_info.bgs[bg].free_inodes -= num;
738 /* Returns the first free inode number
739 TODO: Inodes should be allocated in the block group of the data? */
745 for (bg = 0; bg < aux_info.groups; bg++) {
746 inode = reserve_inodes(bg, 1);
747 if (inode != EXT4_ALLOCATE_FAILED)
748 return bg * info.inodes_per_group + inode;
751 return EXT4_ALLOCATE_FAILED;
754 /* Returns the number of free inodes in a block group */
755 u32 get_free_inodes(u32 bg)
757 return aux_info.bgs[bg].free_inodes;
760 /* Increments the directory count of the block group that contains inode */
761 void add_directory(u32 inode)
763 int bg = (inode - 1) / info.inodes_per_group;
764 aux_info.bgs[bg].used_dirs += 1;
767 /* Returns the number of inodes in a block group that are directories */
768 u16 get_directories(int bg)
770 return aux_info.bgs[bg].used_dirs;
773 /* Returns the flags for a block group */
774 u16 get_bg_flags(int bg)
776 return aux_info.bgs[bg].flags;
779 /* Frees the memory used by a linked list of allocation regions */
780 void free_alloc(struct block_allocation *alloc)
784 reg = alloc->list.first;
786 struct region *next = reg->next;
791 reg = alloc->oob_list.first;
793 struct region *next = reg->next;
801 void reserve_bg_chunk(int bg, u32 start_block, u32 size) {
802 struct block_group_info *bgs = aux_info.bgs;
804 if (bgs[bg].chunk_count == bgs[bg].max_chunk_count) {
805 bgs[bg].max_chunk_count *= 2;
806 bgs[bg].chunks = realloc(bgs[bg].chunks, bgs[bg].max_chunk_count * sizeof(struct region));
808 critical_error("realloc failed");
810 chunk_count = bgs[bg].chunk_count;
811 bgs[bg].chunks[chunk_count].block = start_block;
812 bgs[bg].chunks[chunk_count].len = size;
813 bgs[bg].chunks[chunk_count].bg = bg;
814 bgs[bg].chunk_count++;
817 int reserve_blocks_for_allocation(struct block_allocation *alloc) {
819 struct block_group_info *bgs = aux_info.bgs;
821 if (!alloc) return 0;
822 reg = alloc->list.first;
823 while (reg != NULL) {
824 if (reserve_blocks(&bgs[reg->bg], reg->bg, reg->block - bgs[reg->bg].first_block, reg->len) < 0) {