Colin Cross | ec0a2e8 | 2010-06-11 14:21:37 -0700 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright (C) 2010 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include <stdio.h> |
| 18 | #include <stdlib.h> |
| 19 | |
| 20 | #include "ext4_utils.h" |
| 21 | #include "allocate.h" |
| 22 | #include "backed_block.h" |
| 23 | #include "ext_utils.h" |
| 24 | #include "ext4.h" |
| 25 | |
| 26 | struct region_list { |
| 27 | struct region *first; |
| 28 | struct region *last; |
| 29 | struct region *iter; |
| 30 | u32 partial_iter; |
| 31 | }; |
| 32 | |
| 33 | struct block_allocation { |
| 34 | struct region_list list; |
| 35 | struct region_list oob_list; |
| 36 | }; |
| 37 | |
| 38 | struct region { |
| 39 | u32 block; |
| 40 | u32 len; |
| 41 | int bg; |
| 42 | struct region *next; |
| 43 | struct region *prev; |
| 44 | }; |
| 45 | |
| 46 | struct block_group_info { |
| 47 | u32 first_block; |
| 48 | int header_blocks; |
| 49 | int data_blocks_used; |
| 50 | int has_superblock; |
| 51 | u8 *bitmaps; |
| 52 | u8 *block_bitmap; |
| 53 | u8 *inode_bitmap; |
| 54 | u8 *inode_table; |
| 55 | u32 free_blocks; |
| 56 | u32 first_free_block; |
| 57 | u32 free_inodes; |
| 58 | u32 first_free_inode; |
| 59 | u16 used_dirs; |
| 60 | }; |
| 61 | |
| 62 | struct block_allocation *create_allocation() |
| 63 | { |
| 64 | struct block_allocation *alloc = malloc(sizeof(struct block_allocation)); |
| 65 | alloc->list.first = NULL; |
| 66 | alloc->list.last = NULL; |
| 67 | alloc->oob_list.first = NULL; |
| 68 | alloc->oob_list.last = NULL; |
| 69 | alloc->list.iter = NULL; |
| 70 | alloc->list.partial_iter = 0; |
| 71 | alloc->oob_list.iter = NULL; |
| 72 | alloc->oob_list.partial_iter = 0; |
| 73 | return alloc; |
| 74 | } |
| 75 | |
| 76 | static void region_list_remove(struct region_list *list, struct region *reg) |
| 77 | { |
| 78 | if (reg->prev) |
| 79 | reg->prev->next = reg->next; |
| 80 | |
| 81 | if (reg->next) |
| 82 | reg->next->prev = reg->prev; |
| 83 | |
| 84 | if (list->first == reg) |
| 85 | list->first = reg->next; |
| 86 | |
| 87 | if (list->last == reg) |
| 88 | list->last = reg->prev; |
| 89 | |
| 90 | reg->next = NULL; |
| 91 | reg->prev = NULL; |
| 92 | } |
| 93 | |
| 94 | static void region_list_append(struct region_list *list, struct region *reg) |
| 95 | { |
| 96 | if (list->first == NULL) { |
| 97 | list->first = reg; |
| 98 | list->last = reg; |
| 99 | list->iter = reg; |
| 100 | list->partial_iter = 0; |
| 101 | reg->prev = NULL; |
| 102 | } else { |
| 103 | list->last->next = reg; |
| 104 | reg->prev = list->last; |
| 105 | list->last = reg; |
| 106 | } |
| 107 | reg->next = NULL; |
| 108 | } |
| 109 | |
| 110 | #if 0 |
| 111 | static void dump_starting_from(struct region *reg) |
| 112 | { |
| 113 | for (; reg; reg = reg->next) { |
| 114 | printf("%p: Blocks %d-%d (%d)\n", reg, |
| 115 | reg->bg * info.blocks_per_group + reg->block, |
| 116 | reg->bg * info.blocks_per_group + reg->block + reg->len - 1, |
| 117 | reg->len); |
| 118 | } |
| 119 | } |
| 120 | |
| 121 | static void dump_region_lists(struct block_allocation *alloc) { |
| 122 | |
| 123 | printf("Main list:\n"); |
| 124 | dump_starting_from(alloc->list.first); |
| 125 | |
| 126 | printf("OOB list:\n"); |
| 127 | dump_starting_from(alloc->oob_list.first); |
| 128 | } |
| 129 | #endif |
| 130 | |
| 131 | void append_region(struct block_allocation *alloc, |
| 132 | u32 block, u32 len, int bg_num) |
| 133 | { |
| 134 | struct region *reg; |
| 135 | reg = malloc(sizeof(struct region)); |
| 136 | reg->block = block; |
| 137 | reg->len = len; |
| 138 | reg->bg = bg_num; |
| 139 | reg->next = NULL; |
| 140 | |
| 141 | region_list_append(&alloc->list, reg); |
| 142 | } |
| 143 | |
| 144 | static void allocate_bg_inode_table(struct block_group_info *bg) |
| 145 | { |
| 146 | if (bg->inode_table != NULL) |
| 147 | return; |
| 148 | |
| 149 | u32 block = bg->first_block + 2; |
| 150 | |
| 151 | if (bg->has_superblock) |
| 152 | block += aux_info.bg_desc_blocks + aux_info.bg_desc_reserve_blocks + 1; |
| 153 | |
| 154 | bg->inode_table = calloc(aux_info.inode_table_blocks, info.block_size); |
| 155 | if (bg->inode_table == NULL) |
| 156 | critical_error_errno("calloc"); |
| 157 | |
| 158 | queue_data_block(bg->inode_table, aux_info.inode_table_blocks |
| 159 | * info.block_size, block); |
| 160 | } |
| 161 | |
| 162 | static int bitmap_set_bit(u8 *bitmap, u32 bit) |
| 163 | { |
| 164 | if (bitmap[bit / 8] & 1 << (bit % 8)) |
| 165 | return 1; |
| 166 | |
| 167 | bitmap[bit / 8] |= 1 << (bit % 8); |
| 168 | return 0; |
| 169 | } |
| 170 | |
| 171 | static int bitmap_set_8_bits(u8 *bitmap, u32 bit) |
| 172 | { |
| 173 | int ret = bitmap[bit / 8]; |
| 174 | bitmap[bit / 8] = 0xFF; |
| 175 | return ret; |
| 176 | } |
| 177 | |
| 178 | /* Marks a the first num_blocks blocks in a block group as used, and accounts |
| 179 | for them in the block group free block info. */ |
| 180 | static int reserve_blocks(struct block_group_info *bg, u32 start, u32 num) |
| 181 | { |
| 182 | unsigned int i = 0; |
| 183 | |
| 184 | u32 block = start; |
| 185 | if (num > bg->free_blocks) |
| 186 | return -1; |
| 187 | |
| 188 | for (i = 0; i < num && block % 8 != 0; i++, block++) { |
| 189 | if (bitmap_set_bit(bg->block_bitmap, block)) { |
| 190 | error("attempted to reserve already reserved block"); |
| 191 | return -1; |
| 192 | } |
| 193 | } |
| 194 | |
| 195 | for (; i + 8 <= (num & ~7); i += 8, block += 8) { |
| 196 | if (bitmap_set_8_bits(bg->block_bitmap, block)) { |
| 197 | error("attempted to reserve already reserved block"); |
| 198 | return -1; |
| 199 | } |
| 200 | } |
| 201 | |
| 202 | for (; i < num; i++, block++) { |
| 203 | if (bitmap_set_bit(bg->block_bitmap, block)) { |
| 204 | error("attempted to reserve already reserved block"); |
| 205 | return -1; |
| 206 | } |
| 207 | } |
| 208 | |
| 209 | bg->free_blocks -= num; |
| 210 | if (start == bg->first_free_block) |
| 211 | bg->first_free_block = start + num; |
| 212 | |
| 213 | return 0; |
| 214 | } |
| 215 | |
| 216 | static void free_blocks(struct block_group_info *bg, u32 num_blocks) |
| 217 | { |
| 218 | unsigned int i; |
| 219 | u32 block = bg->first_free_block - 1; |
| 220 | for (i = 0; i < num_blocks; i++, block--) |
| 221 | bg->block_bitmap[block / 8] &= ~(1 << (block % 8)); |
| 222 | bg->free_blocks += num_blocks; |
| 223 | bg->first_free_block -= num_blocks; |
| 224 | } |
| 225 | |
| 226 | /* Reduces an existing allocation by len blocks by return the last blocks |
| 227 | to the free pool in their block group. Assumes that the blocks being |
| 228 | returned were the last ones allocated out of the block group */ |
| 229 | void reduce_allocation(struct block_allocation *alloc, u32 len) |
| 230 | { |
| 231 | while (len) { |
| 232 | struct region *last_reg = alloc->list.last; |
| 233 | |
| 234 | if (last_reg->len > len) { |
| 235 | free_blocks(&aux_info.bgs[last_reg->bg], len); |
| 236 | last_reg->len -= len; |
| 237 | len = 0; |
| 238 | } else { |
| 239 | struct region *reg = alloc->list.last->prev; |
| 240 | free_blocks(&aux_info.bgs[last_reg->bg], last_reg->len); |
| 241 | len -= last_reg->len; |
| 242 | if (reg) |
| 243 | reg->next = NULL; |
| 244 | free(last_reg); |
| 245 | } |
| 246 | } |
| 247 | } |
| 248 | |
| 249 | static void init_bg(struct block_group_info *bg, unsigned int i) |
| 250 | { |
| 251 | int header_blocks = 2 + aux_info.inode_table_blocks; |
| 252 | |
| 253 | bg->has_superblock = ext4_bg_has_super_block(i); |
| 254 | |
| 255 | if (bg->has_superblock) |
| 256 | header_blocks += 1 + aux_info.bg_desc_blocks + aux_info.bg_desc_reserve_blocks; |
| 257 | |
| 258 | bg->bitmaps = calloc(info.block_size, 2); |
| 259 | bg->block_bitmap = bg->bitmaps; |
| 260 | bg->inode_bitmap = bg->bitmaps + info.block_size; |
| 261 | |
| 262 | bg->header_blocks = header_blocks; |
| 263 | bg->first_block = aux_info.first_data_block + i * info.blocks_per_group; |
| 264 | |
| 265 | u32 block = bg->first_block; |
| 266 | if (bg->has_superblock) |
| 267 | block += 1 + aux_info.bg_desc_blocks + aux_info.bg_desc_reserve_blocks; |
| 268 | queue_data_block(bg->bitmaps, 2 * info.block_size, block); |
| 269 | |
| 270 | bg->data_blocks_used = 0; |
| 271 | bg->free_blocks = info.blocks_per_group; |
| 272 | bg->first_free_block = 0; |
| 273 | bg->free_inodes = info.inodes_per_group; |
| 274 | bg->first_free_inode = 1; |
| 275 | |
| 276 | if (reserve_blocks(bg, bg->first_free_block, bg->header_blocks) < 0) |
| 277 | error("failed to reserve %u blocks in block group %u\n", bg->header_blocks, i); |
| 278 | |
| 279 | u32 overrun = bg->first_block + info.blocks_per_group - aux_info.len_blocks; |
| 280 | if (overrun > 0) |
| 281 | reserve_blocks(bg, info.blocks_per_group - overrun, overrun); |
| 282 | } |
| 283 | |
| 284 | void block_allocator_init() |
| 285 | { |
| 286 | unsigned int i; |
| 287 | |
| 288 | aux_info.bgs = calloc(sizeof(struct block_group_info), aux_info.groups); |
| 289 | if (aux_info.bgs == NULL) |
| 290 | critical_error_errno("calloc"); |
| 291 | |
| 292 | for (i = 0; i < aux_info.groups; i++) |
| 293 | init_bg(&aux_info.bgs[i], i); |
| 294 | } |
| 295 | |
| 296 | void block_allocator_free() |
| 297 | { |
| 298 | unsigned int i; |
| 299 | |
| 300 | for (i = 0; i < aux_info.groups; i++) { |
| 301 | free(aux_info.bgs[i].bitmaps); |
| 302 | free(aux_info.bgs[i].inode_table); |
| 303 | } |
| 304 | free(aux_info.bgs); |
| 305 | } |
| 306 | |
| 307 | static u32 ext4_allocate_blocks_from_block_group(u32 len, int bg_num) |
| 308 | { |
| 309 | if (get_free_blocks(bg_num) < len) |
| 310 | return EXT4_ALLOCATE_FAILED; |
| 311 | |
| 312 | u32 block = aux_info.bgs[bg_num].first_free_block; |
| 313 | struct block_group_info *bg = &aux_info.bgs[bg_num]; |
| 314 | if (reserve_blocks(bg, bg->first_free_block, len) < 0) { |
| 315 | error("failed to reserve %u blocks in block group %u\n", len, bg_num); |
| 316 | return EXT4_ALLOCATE_FAILED; |
| 317 | } |
| 318 | |
| 319 | aux_info.bgs[bg_num].data_blocks_used += len; |
| 320 | |
| 321 | return bg->first_block + block; |
| 322 | } |
| 323 | |
| 324 | static struct region *ext4_allocate_contiguous_blocks(u32 len) |
| 325 | { |
| 326 | unsigned int i; |
| 327 | struct region *reg; |
| 328 | |
| 329 | for (i = 0; i < aux_info.groups; i++) { |
| 330 | u32 block = ext4_allocate_blocks_from_block_group(len, i); |
| 331 | |
| 332 | if (block != EXT4_ALLOCATE_FAILED) { |
| 333 | reg = malloc(sizeof(struct region)); |
| 334 | reg->block = block; |
| 335 | reg->len = len; |
| 336 | reg->next = NULL; |
| 337 | reg->prev = NULL; |
| 338 | reg->bg = i; |
| 339 | return reg; |
| 340 | } |
| 341 | } |
| 342 | |
| 343 | return NULL; |
| 344 | } |
| 345 | |
| 346 | /* Allocate a single block and return its block number */ |
| 347 | u32 allocate_block() |
| 348 | { |
| 349 | unsigned int i; |
| 350 | for (i = 0; i < aux_info.groups; i++) { |
| 351 | u32 block = ext4_allocate_blocks_from_block_group(1, i); |
| 352 | |
| 353 | if (block != EXT4_ALLOCATE_FAILED) |
| 354 | return block; |
| 355 | } |
| 356 | |
| 357 | return EXT4_ALLOCATE_FAILED; |
| 358 | } |
| 359 | |
| 360 | static struct region *ext4_allocate_partial(u32 len) |
| 361 | { |
| 362 | unsigned int i; |
| 363 | struct region *reg; |
| 364 | |
| 365 | for (i = 0; i < aux_info.groups; i++) { |
| 366 | if (aux_info.bgs[i].data_blocks_used == 0) { |
| 367 | u32 bg_len = aux_info.bgs[i].free_blocks; |
| 368 | u32 block; |
| 369 | |
| 370 | if (len <= bg_len) { |
| 371 | /* If the requested length would fit in a block group, |
| 372 | use the regular allocator to try to fit it in a partially |
| 373 | used block group */ |
| 374 | bg_len = len; |
| 375 | reg = ext4_allocate_contiguous_blocks(len); |
| 376 | } else { |
| 377 | block = ext4_allocate_blocks_from_block_group(bg_len, i); |
| 378 | |
| 379 | if (block == EXT4_ALLOCATE_FAILED) { |
| 380 | error("failed to allocate %d blocks in block group %d", bg_len, i); |
| 381 | return NULL; |
| 382 | } |
| 383 | |
| 384 | reg = malloc(sizeof(struct region)); |
| 385 | reg->block = block; |
| 386 | reg->len = bg_len; |
| 387 | reg->next = NULL; |
| 388 | reg->prev = NULL; |
| 389 | reg->bg = i; |
| 390 | } |
| 391 | |
| 392 | return reg; |
| 393 | } |
| 394 | } |
| 395 | return NULL; |
| 396 | } |
| 397 | |
| 398 | static struct region *ext4_allocate_multiple_contiguous_blocks(u32 len) |
| 399 | { |
| 400 | struct region *first_reg = NULL; |
| 401 | struct region *prev_reg = NULL; |
| 402 | struct region *reg; |
| 403 | |
| 404 | while (len > 0) { |
| 405 | reg = ext4_allocate_partial(len); |
| 406 | if (reg == NULL) |
| 407 | return NULL; |
| 408 | |
| 409 | if (first_reg == NULL) |
| 410 | first_reg = reg; |
| 411 | |
| 412 | if (prev_reg) { |
| 413 | prev_reg->next = reg; |
| 414 | reg->prev = prev_reg; |
| 415 | } |
| 416 | |
| 417 | prev_reg = reg; |
| 418 | len -= reg->len; |
| 419 | } |
| 420 | |
| 421 | return first_reg; |
| 422 | } |
| 423 | |
| 424 | struct region *do_allocate(u32 len) |
| 425 | { |
| 426 | struct region *reg = ext4_allocate_contiguous_blocks(len); |
| 427 | |
| 428 | if (reg == NULL) |
| 429 | reg = ext4_allocate_multiple_contiguous_blocks(len); |
| 430 | |
| 431 | return reg; |
| 432 | } |
| 433 | |
| 434 | /* Allocate len blocks. The blocks may be spread across multiple block groups, |
| 435 | and are returned in a linked list of the blocks in each block group. The |
| 436 | allocation algorithm is: |
| 437 | 1. Try to allocate the entire block len in each block group |
| 438 | 2. If the request doesn't fit in any block group, allocate as many |
| 439 | blocks as possible into each block group that is completely empty |
| 440 | 3. Put the last set of blocks in the first block group they fit in |
| 441 | */ |
| 442 | struct block_allocation *allocate_blocks(u32 len) |
| 443 | { |
| 444 | struct region *reg = do_allocate(len); |
| 445 | |
| 446 | if (reg == NULL) |
| 447 | return NULL; |
| 448 | |
| 449 | struct block_allocation *alloc = create_allocation(); |
| 450 | alloc->list.first = reg; |
| 451 | alloc->list.last = reg; |
| 452 | alloc->list.iter = alloc->list.first; |
| 453 | alloc->list.partial_iter = 0; |
| 454 | return alloc; |
| 455 | } |
| 456 | |
| 457 | /* Returns the number of discontiguous regions used by an allocation */ |
| 458 | int block_allocation_num_regions(struct block_allocation *alloc) |
| 459 | { |
| 460 | unsigned int i; |
| 461 | struct region *reg = alloc->list.first; |
| 462 | |
| 463 | for (i = 0; reg != NULL; reg = reg->next) |
| 464 | i++; |
| 465 | |
| 466 | return i; |
| 467 | } |
| 468 | |
| 469 | int block_allocation_len(struct block_allocation *alloc) |
| 470 | { |
| 471 | unsigned int i; |
| 472 | struct region *reg = alloc->list.first; |
| 473 | |
| 474 | for (i = 0; reg != NULL; reg = reg->next) |
| 475 | i += reg->len; |
| 476 | |
| 477 | return i; |
| 478 | } |
| 479 | |
| 480 | /* Returns the block number of the block'th block in an allocation */ |
| 481 | u32 get_block(struct block_allocation *alloc, u32 block) |
| 482 | { |
| 483 | struct region *reg = alloc->list.iter; |
| 484 | block += alloc->list.partial_iter; |
| 485 | |
| 486 | for (; reg; reg = reg->next) { |
| 487 | if (block < reg->len) |
| 488 | return reg->block + block; |
| 489 | block -= reg->len; |
| 490 | } |
| 491 | return EXT4_ALLOCATE_FAILED; |
| 492 | } |
| 493 | |
| 494 | u32 get_oob_block(struct block_allocation *alloc, u32 block) |
| 495 | { |
| 496 | struct region *reg = alloc->oob_list.iter; |
| 497 | block += alloc->oob_list.partial_iter; |
| 498 | |
| 499 | for (; reg; reg = reg->next) { |
| 500 | if (block < reg->len) |
| 501 | return reg->block + block; |
| 502 | block -= reg->len; |
| 503 | } |
| 504 | return EXT4_ALLOCATE_FAILED; |
| 505 | } |
| 506 | |
| 507 | /* Gets the starting block and length in blocks of the first region |
| 508 | of an allocation */ |
| 509 | void get_region(struct block_allocation *alloc, u32 *block, u32 *len) |
| 510 | { |
| 511 | *block = alloc->list.iter->block; |
| 512 | *len = alloc->list.iter->len - alloc->list.partial_iter; |
| 513 | } |
| 514 | |
| 515 | /* Move to the next region in an allocation */ |
| 516 | void get_next_region(struct block_allocation *alloc) |
| 517 | { |
| 518 | alloc->list.iter = alloc->list.iter->next; |
| 519 | alloc->list.partial_iter = 0; |
| 520 | } |
| 521 | |
| 522 | /* Returns the number of free blocks in a block group */ |
| 523 | u32 get_free_blocks(u32 bg) |
| 524 | { |
| 525 | return aux_info.bgs[bg].free_blocks; |
| 526 | } |
| 527 | |
| 528 | int last_region(struct block_allocation *alloc) |
| 529 | { |
| 530 | return (alloc->list.iter == NULL); |
| 531 | } |
| 532 | |
| 533 | void rewind_alloc(struct block_allocation *alloc) |
| 534 | { |
| 535 | alloc->list.iter = alloc->list.first; |
| 536 | alloc->list.partial_iter = 0; |
| 537 | } |
| 538 | |
| 539 | static struct region *do_split_allocation(struct block_allocation *alloc, u32 len) |
| 540 | { |
| 541 | struct region *reg = alloc->list.iter; |
| 542 | struct region *new; |
| 543 | struct region *tmp; |
| 544 | |
| 545 | while (reg && len >= reg->len) { |
| 546 | len -= reg->len; |
| 547 | reg = reg->next; |
| 548 | } |
| 549 | |
| 550 | if (reg == NULL && len > 0) |
| 551 | return NULL; |
| 552 | |
| 553 | if (len > 0) { |
| 554 | new = malloc(sizeof(struct region)); |
| 555 | |
| 556 | new->bg = reg->bg; |
| 557 | new->block = reg->block + len; |
| 558 | new->len = reg->len - len; |
| 559 | new->next = reg->next; |
| 560 | new->prev = reg; |
| 561 | |
| 562 | reg->next = new; |
| 563 | reg->len = len; |
| 564 | |
| 565 | tmp = alloc->list.iter; |
| 566 | alloc->list.iter = new; |
| 567 | return tmp; |
| 568 | } else { |
| 569 | return reg; |
| 570 | } |
| 571 | } |
| 572 | |
| 573 | /* Splits an allocation into two allocations. The returned allocation will |
| 574 | point to the first half, and the original allocation ptr will point to the |
| 575 | second half. */ |
| 576 | static struct region *split_allocation(struct block_allocation *alloc, u32 len) |
| 577 | { |
| 578 | /* First make sure there is a split at the current ptr */ |
| 579 | do_split_allocation(alloc, alloc->list.partial_iter); |
| 580 | |
| 581 | /* Then split off len blocks */ |
| 582 | struct region *middle = do_split_allocation(alloc, len); |
| 583 | alloc->list.partial_iter = 0; |
| 584 | return middle; |
| 585 | } |
| 586 | |
| 587 | /* Reserve the next blocks for oob data (indirect or extent blocks) */ |
| 588 | int reserve_oob_blocks(struct block_allocation *alloc, int blocks) |
| 589 | { |
| 590 | struct region *oob = split_allocation(alloc, blocks); |
| 591 | struct region *next; |
| 592 | |
| 593 | if (oob == NULL) |
| 594 | return -1; |
| 595 | |
| 596 | while (oob && oob != alloc->list.iter) { |
| 597 | next = oob->next; |
| 598 | region_list_remove(&alloc->list, oob); |
| 599 | region_list_append(&alloc->oob_list, oob); |
| 600 | oob = next; |
| 601 | } |
| 602 | |
| 603 | return 0; |
| 604 | } |
| 605 | |
| 606 | static int advance_list_ptr(struct region_list *list, int blocks) |
| 607 | { |
| 608 | struct region *reg = list->iter; |
| 609 | |
| 610 | while (reg != NULL && blocks > 0) { |
| 611 | if (reg->len > list->partial_iter + blocks) { |
| 612 | list->partial_iter += blocks; |
| 613 | return 0; |
| 614 | } |
| 615 | |
| 616 | blocks -= (reg->len - list->partial_iter); |
| 617 | list->partial_iter = 0; |
| 618 | reg = reg->next; |
| 619 | } |
| 620 | |
| 621 | if (blocks > 0) |
| 622 | return -1; |
| 623 | |
| 624 | return 0; |
| 625 | } |
| 626 | |
| 627 | /* Move the allocation pointer forward */ |
| 628 | int advance_blocks(struct block_allocation *alloc, int blocks) |
| 629 | { |
| 630 | return advance_list_ptr(&alloc->list, blocks); |
| 631 | } |
| 632 | |
| 633 | int advance_oob_blocks(struct block_allocation *alloc, int blocks) |
| 634 | { |
| 635 | return advance_list_ptr(&alloc->oob_list, blocks); |
| 636 | } |
| 637 | |
| 638 | int append_oob_allocation(struct block_allocation *alloc, u32 len) |
| 639 | { |
| 640 | struct region *reg = do_allocate(len); |
| 641 | |
| 642 | if (reg == NULL) { |
| 643 | error("failed to allocate %d blocks", len); |
| 644 | return -1; |
| 645 | } |
| 646 | |
| 647 | for (; reg; reg = reg->next) |
| 648 | region_list_append(&alloc->oob_list, reg); |
| 649 | |
| 650 | return 0; |
| 651 | } |
| 652 | |
| 653 | /* Returns an ext4_inode structure for an inode number */ |
| 654 | struct ext4_inode *get_inode(u32 inode) |
| 655 | { |
| 656 | inode -= 1; |
| 657 | int bg = inode / info.inodes_per_group; |
| 658 | inode %= info.inodes_per_group; |
| 659 | |
| 660 | allocate_bg_inode_table(&aux_info.bgs[bg]); |
| 661 | return (struct ext4_inode *)(aux_info.bgs[bg].inode_table + inode |
| 662 | * info.inode_size); |
| 663 | } |
| 664 | |
| 665 | /* Mark the first len inodes in a block group as used */ |
| 666 | u32 reserve_inodes(int bg, u32 num) |
| 667 | { |
| 668 | unsigned int i; |
| 669 | u32 inode; |
| 670 | |
| 671 | if (get_free_inodes(bg) < num) |
| 672 | return EXT4_ALLOCATE_FAILED; |
| 673 | |
| 674 | for (i = 0; i < num; i++) { |
| 675 | inode = aux_info.bgs[bg].first_free_inode + i - 1; |
| 676 | aux_info.bgs[bg].inode_bitmap[inode / 8] |= 1 << (inode % 8); |
| 677 | } |
| 678 | |
| 679 | inode = aux_info.bgs[bg].first_free_inode; |
| 680 | |
| 681 | aux_info.bgs[bg].first_free_inode += num; |
| 682 | aux_info.bgs[bg].free_inodes -= num; |
| 683 | |
| 684 | return inode; |
| 685 | } |
| 686 | |
| 687 | /* Returns the first free inode number |
| 688 | TODO: Inodes should be allocated in the block group of the data? */ |
| 689 | u32 allocate_inode() |
| 690 | { |
| 691 | unsigned int bg; |
| 692 | u32 inode; |
| 693 | |
| 694 | for (bg = 0; bg < aux_info.groups; bg++) { |
| 695 | inode = reserve_inodes(bg, 1); |
| 696 | if (inode != EXT4_ALLOCATE_FAILED) |
| 697 | return bg * info.inodes_per_group + inode; |
| 698 | } |
| 699 | |
| 700 | return EXT4_ALLOCATE_FAILED; |
| 701 | } |
| 702 | |
| 703 | /* Returns the number of free inodes in a block group */ |
| 704 | u32 get_free_inodes(u32 bg) |
| 705 | { |
| 706 | return aux_info.bgs[bg].free_inodes; |
| 707 | } |
| 708 | |
| 709 | /* Increments the directory count of the block group that contains inode */ |
| 710 | void add_directory(u32 inode) |
| 711 | { |
| 712 | int bg = (inode - 1) / info.inodes_per_group; |
| 713 | aux_info.bgs[bg].used_dirs += 1; |
| 714 | } |
| 715 | |
| 716 | /* Returns the number of inodes in a block group that are directories */ |
| 717 | u16 get_directories(int bg) |
| 718 | { |
| 719 | return aux_info.bgs[bg].used_dirs; |
| 720 | } |
| 721 | |
| 722 | /* Frees the memory used by a linked list of allocation regions */ |
| 723 | void free_alloc(struct block_allocation *alloc) |
| 724 | { |
| 725 | struct region *reg; |
| 726 | |
| 727 | reg = alloc->list.first; |
| 728 | while (reg) { |
| 729 | struct region *next = reg->next; |
| 730 | free(reg); |
| 731 | reg = next; |
| 732 | } |
| 733 | |
| 734 | reg = alloc->oob_list.first; |
| 735 | while (reg) { |
| 736 | struct region *next = reg->next; |
| 737 | free(reg); |
| 738 | reg = next; |
| 739 | } |
| 740 | |
| 741 | free(alloc); |
| 742 | } |