blob: d3a77b0af7afa5c9e67c1c02db00727e098efc83 [file] [log] [blame]
Colin Crossec0a2e82010-06-11 14:21:37 -07001/*
2 * Copyright (C) 2010 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
Mohamad Ayyash95791982016-02-20 03:46:00 +00008 * http://www.apache.org/licenses/LICENSE-2.0
Colin Crossec0a2e82010-06-11 14:21:37 -07009 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Colin Crossec0a2e82010-06-11 14:21:37 -070017#include "allocate.h"
Colin Crossec0a2e82010-06-11 14:21:37 -070018
Colin Cross33f96c62010-12-22 18:40:14 -080019#include <stdio.h>
20#include <stdlib.h>
21
Tao Bao06ca8112016-10-05 12:44:18 -070022#include <sparse/sparse.h>
23
24#include "ext4_utils/ext4_utils.h"
25
Nick Kralevich4df62f32013-02-07 14:21:34 -080026struct xattr_list_element {
27 struct ext4_inode *inode;
28 struct ext4_xattr_header *header;
29 struct xattr_list_element *next;
30};
31
Colin Crossec0a2e82010-06-11 14:21:37 -070032struct block_allocation *create_allocation()
33{
Colin Cross8aef66d2010-06-20 23:22:12 -070034 struct block_allocation *alloc = malloc(sizeof(struct block_allocation));
35 alloc->list.first = NULL;
36 alloc->list.last = NULL;
37 alloc->oob_list.first = NULL;
38 alloc->oob_list.last = NULL;
39 alloc->list.iter = NULL;
40 alloc->list.partial_iter = 0;
41 alloc->oob_list.iter = NULL;
42 alloc->oob_list.partial_iter = 0;
Doug Zongkerbec598e2014-08-12 11:35:37 -070043 alloc->filename = NULL;
44 alloc->next = NULL;
Colin Cross8aef66d2010-06-20 23:22:12 -070045 return alloc;
Colin Crossec0a2e82010-06-11 14:21:37 -070046}
47
Nick Kralevich4df62f32013-02-07 14:21:34 -080048static struct ext4_xattr_header *xattr_list_find(struct ext4_inode *inode)
49{
50 struct xattr_list_element *element;
51 for (element = aux_info.xattrs; element != NULL; element = element->next) {
52 if (element->inode == inode)
53 return element->header;
54 }
55 return NULL;
56}
57
58static void xattr_list_insert(struct ext4_inode *inode, struct ext4_xattr_header *header)
59{
60 struct xattr_list_element *element = malloc(sizeof(struct xattr_list_element));
61 element->inode = inode;
62 element->header = header;
63 element->next = aux_info.xattrs;
64 aux_info.xattrs = element;
65}
66
Colin Crossec0a2e82010-06-11 14:21:37 -070067static void region_list_remove(struct region_list *list, struct region *reg)
68{
69 if (reg->prev)
70 reg->prev->next = reg->next;
71
72 if (reg->next)
73 reg->next->prev = reg->prev;
74
75 if (list->first == reg)
76 list->first = reg->next;
77
78 if (list->last == reg)
79 list->last = reg->prev;
80
81 reg->next = NULL;
82 reg->prev = NULL;
83}
84
Mohamad Ayyash95791982016-02-20 03:46:00 +000085void region_list_append(struct region_list *list, struct region *reg)
Colin Crossec0a2e82010-06-11 14:21:37 -070086{
87 if (list->first == NULL) {
88 list->first = reg;
89 list->last = reg;
90 list->iter = reg;
91 list->partial_iter = 0;
92 reg->prev = NULL;
93 } else {
94 list->last->next = reg;
95 reg->prev = list->last;
96 list->last = reg;
97 }
98 reg->next = NULL;
99}
100
Mohamad Ayyashf7124d62016-04-29 11:14:02 -0700101void region_list_merge(struct region_list *list1, struct region_list *list2)
102{
103 if (list1->first == NULL) {
104 list1->first = list2->first;
105 list1->last = list2->last;
106 list1->iter = list2->first;
107 list1->partial_iter = 0;
108 list1->first->prev = NULL;
109 } else {
110 list1->last->next = list2->first;
111 list2->first->prev = list1->last;
112 list1->last = list2->last;
113 }
114}
Colin Crossec0a2e82010-06-11 14:21:37 -0700115#if 0
116static void dump_starting_from(struct region *reg)
117{
118 for (; reg; reg = reg->next) {
119 printf("%p: Blocks %d-%d (%d)\n", reg,
Doug Zongkerbec598e2014-08-12 11:35:37 -0700120 reg->block, reg->block + reg->len - 1, reg->len)
Colin Crossec0a2e82010-06-11 14:21:37 -0700121 }
122}
123
124static void dump_region_lists(struct block_allocation *alloc) {
125
126 printf("Main list:\n");
127 dump_starting_from(alloc->list.first);
128
129 printf("OOB list:\n");
130 dump_starting_from(alloc->oob_list.first);
131}
132#endif
133
Mohamad Ayyash95791982016-02-20 03:46:00 +0000134void print_blocks(FILE* f, struct block_allocation *alloc, char separator)
Doug Zongkerbec598e2014-08-12 11:35:37 -0700135{
136 struct region *reg;
Mohamad Ayyash95791982016-02-20 03:46:00 +0000137 fputc(' ', f);
Doug Zongkerbec598e2014-08-12 11:35:37 -0700138 for (reg = alloc->list.first; reg; reg = reg->next) {
139 if (reg->len == 1) {
Mohamad Ayyash95791982016-02-20 03:46:00 +0000140 fprintf(f, "%d", reg->block);
Doug Zongkerbec598e2014-08-12 11:35:37 -0700141 } else {
Mohamad Ayyash95791982016-02-20 03:46:00 +0000142 fprintf(f, "%d-%d", reg->block, reg->block + reg->len - 1);
Doug Zongkerbec598e2014-08-12 11:35:37 -0700143 }
Mohamad Ayyash95791982016-02-20 03:46:00 +0000144 fputc(separator, f);
Doug Zongkerbec598e2014-08-12 11:35:37 -0700145 }
146 fputc('\n', f);
147}
148
Colin Crossec0a2e82010-06-11 14:21:37 -0700149void append_region(struct block_allocation *alloc,
150 u32 block, u32 len, int bg_num)
151{
Colin Cross8aef66d2010-06-20 23:22:12 -0700152 struct region *reg;
153 reg = malloc(sizeof(struct region));
154 reg->block = block;
155 reg->len = len;
156 reg->bg = bg_num;
157 reg->next = NULL;
Colin Crossec0a2e82010-06-11 14:21:37 -0700158
Colin Cross8aef66d2010-06-20 23:22:12 -0700159 region_list_append(&alloc->list, reg);
Colin Crossec0a2e82010-06-11 14:21:37 -0700160}
161
162static void allocate_bg_inode_table(struct block_group_info *bg)
163{
164 if (bg->inode_table != NULL)
165 return;
166
167 u32 block = bg->first_block + 2;
168
169 if (bg->has_superblock)
Colin Cross22742ce2010-12-22 16:01:52 -0800170 block += aux_info.bg_desc_blocks + info.bg_desc_reserve_blocks + 1;
Colin Crossec0a2e82010-06-11 14:21:37 -0700171
172 bg->inode_table = calloc(aux_info.inode_table_blocks, info.block_size);
173 if (bg->inode_table == NULL)
174 critical_error_errno("calloc");
175
Colin Cross782879a2014-01-23 13:08:16 -0800176 sparse_file_add_data(ext4_sparse_file, bg->inode_table,
Colin Crossf0ee37f2012-04-24 17:48:43 -0700177 aux_info.inode_table_blocks * info.block_size, block);
Colin Crossec0a2e82010-06-11 14:21:37 -0700178
Colin Cross56497f22013-02-04 00:44:55 -0800179 bg->flags &= ~EXT4_BG_INODE_UNINIT;
Ken Sumrall107a9f12011-06-29 20:28:30 -0700180}
181
Colin Crossec0a2e82010-06-11 14:21:37 -0700182static int bitmap_set_bit(u8 *bitmap, u32 bit)
183{
184 if (bitmap[bit / 8] & 1 << (bit % 8))
185 return 1;
186
187 bitmap[bit / 8] |= 1 << (bit % 8);
188 return 0;
189}
190
191static int bitmap_set_8_bits(u8 *bitmap, u32 bit)
192{
193 int ret = bitmap[bit / 8];
194 bitmap[bit / 8] = 0xFF;
195 return ret;
196}
197
198/* Marks a the first num_blocks blocks in a block group as used, and accounts
199 for them in the block group free block info. */
Mohamad Ayyasha6866eb2016-03-02 21:10:45 -0800200static int reserve_blocks(struct block_group_info *bg, u32 bg_num, u32 start, u32 num)
Colin Crossec0a2e82010-06-11 14:21:37 -0700201{
202 unsigned int i = 0;
203
204 u32 block = start;
Colin Crossec0a2e82010-06-11 14:21:37 -0700205 for (i = 0; i < num && block % 8 != 0; i++, block++) {
206 if (bitmap_set_bit(bg->block_bitmap, block)) {
Mohamad Ayyasha6866eb2016-03-02 21:10:45 -0800207 error("attempted to reserve already reserved block %d in block group %d", block, bg_num);
Colin Crossec0a2e82010-06-11 14:21:37 -0700208 return -1;
209 }
210 }
211
212 for (; i + 8 <= (num & ~7); i += 8, block += 8) {
213 if (bitmap_set_8_bits(bg->block_bitmap, block)) {
Mohamad Ayyasha6866eb2016-03-02 21:10:45 -0800214 error("attempted to reserve already reserved block %d in block group %d", block, bg_num);
Colin Crossec0a2e82010-06-11 14:21:37 -0700215 return -1;
216 }
217 }
218
219 for (; i < num; i++, block++) {
220 if (bitmap_set_bit(bg->block_bitmap, block)) {
Mohamad Ayyasha6866eb2016-03-02 21:10:45 -0800221 error("attempted to reserve already reserved block %d in block group %d", block, bg_num);
Colin Crossec0a2e82010-06-11 14:21:37 -0700222 return -1;
223 }
224 }
225
226 bg->free_blocks -= num;
Colin Crossec0a2e82010-06-11 14:21:37 -0700227
228 return 0;
229}
230
Mohamad Ayyash95791982016-02-20 03:46:00 +0000231static void free_blocks(struct block_group_info *bg, u32 block, u32 num_blocks)
Colin Crossec0a2e82010-06-11 14:21:37 -0700232{
233 unsigned int i;
Jin Qian98e42722017-09-07 17:09:45 -0700234
235 if (num_blocks == 0)
236 return;
Colin Crossec0a2e82010-06-11 14:21:37 -0700237 for (i = 0; i < num_blocks; i++, block--)
238 bg->block_bitmap[block / 8] &= ~(1 << (block % 8));
239 bg->free_blocks += num_blocks;
Jin Qian98e42722017-09-07 17:09:45 -0700240 block++;
Mohamad Ayyashbe25a222016-09-07 11:07:55 -0700241 for (i = bg->chunk_count; i > 0 ;) {
242 --i;
Mohamad Ayyash62b5fa42016-09-01 15:05:48 -0700243 if (bg->chunks[i].len >= num_blocks && bg->chunks[i].block <= block) {
244 if (bg->chunks[i].block == block) {
245 bg->chunks[i].block += num_blocks;
246 bg->chunks[i].len -= num_blocks;
Jin Qian98e42722017-09-07 17:09:45 -0700247 } else if (bg->chunks[i].block + bg->chunks[i].len == block + num_blocks) {
Mohamad Ayyash62b5fa42016-09-01 15:05:48 -0700248 bg->chunks[i].len -= num_blocks;
249 }
250 break;
251 }
252 }
Colin Crossec0a2e82010-06-11 14:21:37 -0700253}
254
255/* Reduces an existing allocation by len blocks by return the last blocks
256 to the free pool in their block group. Assumes that the blocks being
257 returned were the last ones allocated out of the block group */
258void reduce_allocation(struct block_allocation *alloc, u32 len)
259{
260 while (len) {
261 struct region *last_reg = alloc->list.last;
Mohamad Ayyash95791982016-02-20 03:46:00 +0000262 struct block_group_info *bg = &aux_info.bgs[last_reg->bg];
Colin Crossec0a2e82010-06-11 14:21:37 -0700263
264 if (last_reg->len > len) {
Mohamad Ayyash95791982016-02-20 03:46:00 +0000265 free_blocks(bg, last_reg->block + last_reg->len - bg->first_block - 1, len);
Colin Crossec0a2e82010-06-11 14:21:37 -0700266 last_reg->len -= len;
267 len = 0;
268 } else {
Colin Crossa1a175a2010-06-17 17:27:20 -0700269 struct region *reg = alloc->list.last->prev;
Mohamad Ayyash95791982016-02-20 03:46:00 +0000270 free_blocks(bg, last_reg->block + last_reg->len - bg->first_block - 1, last_reg->len);
Colin Crossec0a2e82010-06-11 14:21:37 -0700271 len -= last_reg->len;
Colin Crossa1a175a2010-06-17 17:27:20 -0700272 if (reg) {
Colin Crossec0a2e82010-06-11 14:21:37 -0700273 reg->next = NULL;
Ting-Yuan Huang484f8772017-08-29 16:05:31 -0700274 alloc->list.last = reg;
Colin Crossa1a175a2010-06-17 17:27:20 -0700275 } else {
276 alloc->list.first = NULL;
277 alloc->list.last = NULL;
278 alloc->list.iter = NULL;
279 alloc->list.partial_iter = 0;
280 }
Colin Crossec0a2e82010-06-11 14:21:37 -0700281 free(last_reg);
282 }
283 }
284}
285
286static void init_bg(struct block_group_info *bg, unsigned int i)
287{
288 int header_blocks = 2 + aux_info.inode_table_blocks;
289
290 bg->has_superblock = ext4_bg_has_super_block(i);
291
292 if (bg->has_superblock)
Colin Cross22742ce2010-12-22 16:01:52 -0800293 header_blocks += 1 + aux_info.bg_desc_blocks + info.bg_desc_reserve_blocks;
Colin Crossec0a2e82010-06-11 14:21:37 -0700294
295 bg->bitmaps = calloc(info.block_size, 2);
296 bg->block_bitmap = bg->bitmaps;
297 bg->inode_bitmap = bg->bitmaps + info.block_size;
298
299 bg->header_blocks = header_blocks;
300 bg->first_block = aux_info.first_data_block + i * info.blocks_per_group;
301
302 u32 block = bg->first_block;
303 if (bg->has_superblock)
Colin Cross22742ce2010-12-22 16:01:52 -0800304 block += 1 + aux_info.bg_desc_blocks + info.bg_desc_reserve_blocks;
Colin Cross782879a2014-01-23 13:08:16 -0800305 sparse_file_add_data(ext4_sparse_file, bg->bitmaps, 2 * info.block_size,
Colin Crossf0ee37f2012-04-24 17:48:43 -0700306 block);
Colin Crossec0a2e82010-06-11 14:21:37 -0700307
308 bg->data_blocks_used = 0;
309 bg->free_blocks = info.blocks_per_group;
Colin Crossec0a2e82010-06-11 14:21:37 -0700310 bg->free_inodes = info.inodes_per_group;
311 bg->first_free_inode = 1;
Mohamad Ayyashdedf8f92016-04-14 19:43:31 -0700312 bg->flags = EXT4_BG_INODE_UNINIT;
Colin Crossec0a2e82010-06-11 14:21:37 -0700313
Mohamad Ayyash95791982016-02-20 03:46:00 +0000314 bg->chunk_count = 0;
315 bg->max_chunk_count = 1;
316 bg->chunks = (struct region*) calloc(bg->max_chunk_count, sizeof(struct region));
317
Mohamad Ayyasha6866eb2016-03-02 21:10:45 -0800318 if (reserve_blocks(bg, i, 0, bg->header_blocks) < 0)
Colin Crossec0a2e82010-06-11 14:21:37 -0700319 error("failed to reserve %u blocks in block group %u\n", bg->header_blocks, i);
Mohamad Ayyash95791982016-02-20 03:46:00 +0000320 // Add empty starting delimiter chunk
321 reserve_bg_chunk(i, bg->header_blocks, 0);
Colin Crossec0a2e82010-06-11 14:21:37 -0700322
Ken Sumrall17de6542013-08-15 19:06:29 -0700323 if (bg->first_block + info.blocks_per_group > aux_info.len_blocks) {
324 u32 overrun = bg->first_block + info.blocks_per_group - aux_info.len_blocks;
Mohamad Ayyasha6866eb2016-03-02 21:10:45 -0800325 reserve_blocks(bg, i, info.blocks_per_group - overrun, overrun);
Mohamad Ayyash95791982016-02-20 03:46:00 +0000326 // Add empty ending delimiter chunk
327 reserve_bg_chunk(i, info.blocks_per_group - overrun, 0);
328 } else {
329 reserve_bg_chunk(i, info.blocks_per_group - 1, 0);
Ken Sumrall17de6542013-08-15 19:06:29 -0700330 }
Mohamad Ayyash95791982016-02-20 03:46:00 +0000331
Colin Crossec0a2e82010-06-11 14:21:37 -0700332}
333
334void block_allocator_init()
335{
336 unsigned int i;
337
338 aux_info.bgs = calloc(sizeof(struct block_group_info), aux_info.groups);
339 if (aux_info.bgs == NULL)
340 critical_error_errno("calloc");
341
342 for (i = 0; i < aux_info.groups; i++)
343 init_bg(&aux_info.bgs[i], i);
344}
345
346void block_allocator_free()
347{
348 unsigned int i;
349
350 for (i = 0; i < aux_info.groups; i++) {
351 free(aux_info.bgs[i].bitmaps);
352 free(aux_info.bgs[i].inode_table);
353 }
354 free(aux_info.bgs);
355}
356
Colin Crossec0a2e82010-06-11 14:21:37 -0700357/* Allocate a single block and return its block number */
358u32 allocate_block()
359{
Mohamad Ayyash95791982016-02-20 03:46:00 +0000360 u32 block;
361 struct block_allocation *blk_alloc = allocate_blocks(1);
362 if (!blk_alloc) {
363 return EXT4_ALLOCATE_FAILED;
Colin Crossec0a2e82010-06-11 14:21:37 -0700364 }
Mohamad Ayyash95791982016-02-20 03:46:00 +0000365 block = blk_alloc->list.first->block;
366 free_alloc(blk_alloc);
367 return block;
Colin Crossec0a2e82010-06-11 14:21:37 -0700368}
369
Colin Crosseb3915a2014-01-21 00:04:04 -0800370static struct region *ext4_allocate_best_fit_partial(u32 len)
Colin Crossec0a2e82010-06-11 14:21:37 -0700371{
Dmitry Shmidtcd205302016-11-18 15:38:47 -0800372 unsigned int i;
373 int j;
Mohamad Ayyash95791982016-02-20 03:46:00 +0000374 unsigned int found_bg = 0, found_prev_chunk = 0, found_block = 0;
375 u32 found_allocate_len = 0;
376 bool minimize = false;
377 struct block_group_info *bgs = aux_info.bgs;
378 struct region *reg;
Mohamad Ayyash18785a82016-02-19 21:16:34 +0000379
Colin Crossec0a2e82010-06-11 14:21:37 -0700380 for (i = 0; i < aux_info.groups; i++) {
Mohamad Ayyash95791982016-02-20 03:46:00 +0000381 for (j = 1; j < bgs[i].chunk_count; j++) {
382 u32 hole_start, hole_size;
383 hole_start = bgs[i].chunks[j-1].block + bgs[i].chunks[j-1].len;
384 hole_size = bgs[i].chunks[j].block - hole_start;
385 if (hole_size == len) {
386 // Perfect fit i.e. right between 2 chunks no need to keep searching
387 found_bg = i;
388 found_prev_chunk = j - 1;
389 found_block = hole_start;
390 found_allocate_len = hole_size;
391 goto done;
392 } else if (hole_size > len && (found_allocate_len == 0 || (found_allocate_len > hole_size))) {
393 found_bg = i;
394 found_prev_chunk = j - 1;
395 found_block = hole_start;
396 found_allocate_len = hole_size;
397 minimize = true;
398 } else if (!minimize) {
399 if (found_allocate_len < hole_size) {
400 found_bg = i;
401 found_prev_chunk = j - 1;
402 found_block = hole_start;
403 found_allocate_len = hole_size;
404 }
405 }
Colin Crossec0a2e82010-06-11 14:21:37 -0700406 }
407 }
Colin Crosseb3915a2014-01-21 00:04:04 -0800408
Mohamad Ayyash95791982016-02-20 03:46:00 +0000409 if (found_allocate_len == 0) {
Colin Crosseb3915a2014-01-21 00:04:04 -0800410 error("failed to allocate %u blocks, out of space?", len);
Mohamad Ayyash95791982016-02-20 03:46:00 +0000411 return NULL;
Colin Crosseb3915a2014-01-21 00:04:04 -0800412 }
Mohamad Ayyash95791982016-02-20 03:46:00 +0000413 if (found_allocate_len > len) found_allocate_len = len;
414done:
415 // reclaim allocated space in chunk
416 bgs[found_bg].chunks[found_prev_chunk].len += found_allocate_len;
417 if (reserve_blocks(&bgs[found_bg],
Mohamad Ayyasha6866eb2016-03-02 21:10:45 -0800418 found_bg,
Mohamad Ayyash95791982016-02-20 03:46:00 +0000419 found_block,
420 found_allocate_len) < 0) {
421 error("failed to reserve %u blocks in block group %u\n", found_allocate_len, found_bg);
422 return NULL;
423 }
424 bgs[found_bg].data_blocks_used += found_allocate_len;
425 reg = malloc(sizeof(struct region));
426 reg->block = found_block + bgs[found_bg].first_block;
427 reg->len = found_allocate_len;
428 reg->next = NULL;
429 reg->prev = NULL;
430 reg->bg = found_bg;
431 return reg;
Colin Crossec0a2e82010-06-11 14:21:37 -0700432}
433
Colin Crosseb3915a2014-01-21 00:04:04 -0800434static struct region *ext4_allocate_best_fit(u32 len)
Colin Crossec0a2e82010-06-11 14:21:37 -0700435{
436 struct region *first_reg = NULL;
437 struct region *prev_reg = NULL;
438 struct region *reg;
439
440 while (len > 0) {
Colin Crosseb3915a2014-01-21 00:04:04 -0800441 reg = ext4_allocate_best_fit_partial(len);
Colin Crossec0a2e82010-06-11 14:21:37 -0700442 if (reg == NULL)
443 return NULL;
444
445 if (first_reg == NULL)
446 first_reg = reg;
447
448 if (prev_reg) {
449 prev_reg->next = reg;
450 reg->prev = prev_reg;
451 }
452
453 prev_reg = reg;
454 len -= reg->len;
455 }
456
457 return first_reg;
458}
459
Colin Crossec0a2e82010-06-11 14:21:37 -0700460/* Allocate len blocks. The blocks may be spread across multiple block groups,
461 and are returned in a linked list of the blocks in each block group. The
462 allocation algorithm is:
Mohamad Ayyash95791982016-02-20 03:46:00 +0000463 1. If the remaining allocation is larger than any available contiguous region,
464 allocate the largest contiguous region and loop
465 2. Otherwise, allocate the smallest contiguous region that it fits in
Colin Crossec0a2e82010-06-11 14:21:37 -0700466*/
467struct block_allocation *allocate_blocks(u32 len)
468{
Colin Crosseb3915a2014-01-21 00:04:04 -0800469 struct region *reg = ext4_allocate_best_fit(len);
Colin Crossec0a2e82010-06-11 14:21:37 -0700470
471 if (reg == NULL)
472 return NULL;
473
474 struct block_allocation *alloc = create_allocation();
475 alloc->list.first = reg;
Mohamad Ayyash95791982016-02-20 03:46:00 +0000476 while (reg->next != NULL)
477 reg = reg->next;
Colin Cross8aef66d2010-06-20 23:22:12 -0700478 alloc->list.last = reg;
Colin Crossec0a2e82010-06-11 14:21:37 -0700479 alloc->list.iter = alloc->list.first;
480 alloc->list.partial_iter = 0;
481 return alloc;
482}
483
484/* Returns the number of discontiguous regions used by an allocation */
485int block_allocation_num_regions(struct block_allocation *alloc)
486{
487 unsigned int i;
488 struct region *reg = alloc->list.first;
489
490 for (i = 0; reg != NULL; reg = reg->next)
491 i++;
492
493 return i;
494}
495
496int block_allocation_len(struct block_allocation *alloc)
497{
498 unsigned int i;
Colin Cross8aef66d2010-06-20 23:22:12 -0700499 struct region *reg = alloc->list.first;
Colin Crossec0a2e82010-06-11 14:21:37 -0700500
501 for (i = 0; reg != NULL; reg = reg->next)
502 i += reg->len;
503
504 return i;
505}
506
507/* Returns the block number of the block'th block in an allocation */
508u32 get_block(struct block_allocation *alloc, u32 block)
509{
Colin Cross8aef66d2010-06-20 23:22:12 -0700510 struct region *reg = alloc->list.iter;
511 block += alloc->list.partial_iter;
Colin Crossec0a2e82010-06-11 14:21:37 -0700512
Colin Cross8aef66d2010-06-20 23:22:12 -0700513 for (; reg; reg = reg->next) {
Colin Crossec0a2e82010-06-11 14:21:37 -0700514 if (block < reg->len)
515 return reg->block + block;
516 block -= reg->len;
517 }
518 return EXT4_ALLOCATE_FAILED;
519}
520
521u32 get_oob_block(struct block_allocation *alloc, u32 block)
522{
Colin Cross8aef66d2010-06-20 23:22:12 -0700523 struct region *reg = alloc->oob_list.iter;
524 block += alloc->oob_list.partial_iter;
Colin Crossec0a2e82010-06-11 14:21:37 -0700525
Colin Cross8aef66d2010-06-20 23:22:12 -0700526 for (; reg; reg = reg->next) {
Colin Crossec0a2e82010-06-11 14:21:37 -0700527 if (block < reg->len)
528 return reg->block + block;
529 block -= reg->len;
530 }
531 return EXT4_ALLOCATE_FAILED;
532}
533
534/* Gets the starting block and length in blocks of the first region
535 of an allocation */
536void get_region(struct block_allocation *alloc, u32 *block, u32 *len)
537{
538 *block = alloc->list.iter->block;
539 *len = alloc->list.iter->len - alloc->list.partial_iter;
540}
541
542/* Move to the next region in an allocation */
543void get_next_region(struct block_allocation *alloc)
544{
545 alloc->list.iter = alloc->list.iter->next;
546 alloc->list.partial_iter = 0;
547}
548
549/* Returns the number of free blocks in a block group */
550u32 get_free_blocks(u32 bg)
551{
552 return aux_info.bgs[bg].free_blocks;
553}
554
555int last_region(struct block_allocation *alloc)
556{
557 return (alloc->list.iter == NULL);
558}
559
560void rewind_alloc(struct block_allocation *alloc)
561{
562 alloc->list.iter = alloc->list.first;
563 alloc->list.partial_iter = 0;
564}
565
566static struct region *do_split_allocation(struct block_allocation *alloc, u32 len)
567{
Colin Cross8aef66d2010-06-20 23:22:12 -0700568 struct region *reg = alloc->list.iter;
569 struct region *new;
570 struct region *tmp;
Colin Crossec0a2e82010-06-11 14:21:37 -0700571
Colin Cross8aef66d2010-06-20 23:22:12 -0700572 while (reg && len >= reg->len) {
Colin Crossec0a2e82010-06-11 14:21:37 -0700573 len -= reg->len;
574 reg = reg->next;
575 }
576
577 if (reg == NULL && len > 0)
578 return NULL;
579
580 if (len > 0) {
581 new = malloc(sizeof(struct region));
582
583 new->bg = reg->bg;
584 new->block = reg->block + len;
585 new->len = reg->len - len;
586 new->next = reg->next;
587 new->prev = reg;
588
589 reg->next = new;
590 reg->len = len;
591
592 tmp = alloc->list.iter;
Colin Cross8aef66d2010-06-20 23:22:12 -0700593 alloc->list.iter = new;
Colin Crossec0a2e82010-06-11 14:21:37 -0700594 return tmp;
595 } else {
596 return reg;
597 }
598}
599
600/* Splits an allocation into two allocations. The returned allocation will
601 point to the first half, and the original allocation ptr will point to the
602 second half. */
603static struct region *split_allocation(struct block_allocation *alloc, u32 len)
604{
605 /* First make sure there is a split at the current ptr */
606 do_split_allocation(alloc, alloc->list.partial_iter);
607
608 /* Then split off len blocks */
609 struct region *middle = do_split_allocation(alloc, len);
610 alloc->list.partial_iter = 0;
611 return middle;
612}
613
614/* Reserve the next blocks for oob data (indirect or extent blocks) */
615int reserve_oob_blocks(struct block_allocation *alloc, int blocks)
616{
617 struct region *oob = split_allocation(alloc, blocks);
Colin Cross8aef66d2010-06-20 23:22:12 -0700618 struct region *next;
Colin Crossec0a2e82010-06-11 14:21:37 -0700619
620 if (oob == NULL)
621 return -1;
622
623 while (oob && oob != alloc->list.iter) {
624 next = oob->next;
625 region_list_remove(&alloc->list, oob);
626 region_list_append(&alloc->oob_list, oob);
627 oob = next;
628 }
629
630 return 0;
631}
632
633static int advance_list_ptr(struct region_list *list, int blocks)
634{
635 struct region *reg = list->iter;
636
637 while (reg != NULL && blocks > 0) {
638 if (reg->len > list->partial_iter + blocks) {
639 list->partial_iter += blocks;
640 return 0;
641 }
642
643 blocks -= (reg->len - list->partial_iter);
644 list->partial_iter = 0;
645 reg = reg->next;
646 }
647
648 if (blocks > 0)
649 return -1;
650
651 return 0;
652}
653
654/* Move the allocation pointer forward */
655int advance_blocks(struct block_allocation *alloc, int blocks)
656{
657 return advance_list_ptr(&alloc->list, blocks);
658}
659
660int advance_oob_blocks(struct block_allocation *alloc, int blocks)
661{
662 return advance_list_ptr(&alloc->oob_list, blocks);
663}
664
665int append_oob_allocation(struct block_allocation *alloc, u32 len)
666{
Colin Crosseb3915a2014-01-21 00:04:04 -0800667 struct region *reg = ext4_allocate_best_fit(len);
Colin Crossec0a2e82010-06-11 14:21:37 -0700668
669 if (reg == NULL) {
670 error("failed to allocate %d blocks", len);
671 return -1;
672 }
673
674 for (; reg; reg = reg->next)
675 region_list_append(&alloc->oob_list, reg);
676
677 return 0;
678}
679
680/* Returns an ext4_inode structure for an inode number */
681struct ext4_inode *get_inode(u32 inode)
682{
683 inode -= 1;
684 int bg = inode / info.inodes_per_group;
685 inode %= info.inodes_per_group;
686
687 allocate_bg_inode_table(&aux_info.bgs[bg]);
Colin Cross8aef66d2010-06-20 23:22:12 -0700688 return (struct ext4_inode *)(aux_info.bgs[bg].inode_table + inode *
689 info.inode_size);
Colin Crossec0a2e82010-06-11 14:21:37 -0700690}
691
Nick Kralevich4df62f32013-02-07 14:21:34 -0800692struct ext4_xattr_header *get_xattr_block_for_inode(struct ext4_inode *inode)
693{
694 struct ext4_xattr_header *block = xattr_list_find(inode);
695 if (block != NULL)
696 return block;
697
698 u32 block_num = allocate_block();
699 block = calloc(info.block_size, 1);
700 if (block == NULL) {
701 error("get_xattr: failed to allocate %d", info.block_size);
702 return NULL;
703 }
704
705 block->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
706 block->h_refcount = cpu_to_le32(1);
707 block->h_blocks = cpu_to_le32(1);
708 inode->i_blocks_lo = cpu_to_le32(le32_to_cpu(inode->i_blocks_lo) + (info.block_size / 512));
709 inode->i_file_acl_lo = cpu_to_le32(block_num);
710
Colin Cross782879a2014-01-23 13:08:16 -0800711 int result = sparse_file_add_data(ext4_sparse_file, block, info.block_size, block_num);
Nick Kralevich4df62f32013-02-07 14:21:34 -0800712 if (result != 0) {
713 error("get_xattr: sparse_file_add_data failure %d", result);
714 free(block);
715 return NULL;
716 }
717 xattr_list_insert(inode, block);
718 return block;
719}
720
Colin Crossec0a2e82010-06-11 14:21:37 -0700721/* Mark the first len inodes in a block group as used */
722u32 reserve_inodes(int bg, u32 num)
723{
724 unsigned int i;
725 u32 inode;
726
727 if (get_free_inodes(bg) < num)
728 return EXT4_ALLOCATE_FAILED;
729
730 for (i = 0; i < num; i++) {
731 inode = aux_info.bgs[bg].first_free_inode + i - 1;
732 aux_info.bgs[bg].inode_bitmap[inode / 8] |= 1 << (inode % 8);
733 }
734
735 inode = aux_info.bgs[bg].first_free_inode;
736
737 aux_info.bgs[bg].first_free_inode += num;
738 aux_info.bgs[bg].free_inodes -= num;
739
740 return inode;
741}
742
743/* Returns the first free inode number
744 TODO: Inodes should be allocated in the block group of the data? */
745u32 allocate_inode()
746{
747 unsigned int bg;
748 u32 inode;
749
750 for (bg = 0; bg < aux_info.groups; bg++) {
751 inode = reserve_inodes(bg, 1);
752 if (inode != EXT4_ALLOCATE_FAILED)
753 return bg * info.inodes_per_group + inode;
754 }
755
756 return EXT4_ALLOCATE_FAILED;
757}
758
759/* Returns the number of free inodes in a block group */
760u32 get_free_inodes(u32 bg)
761{
762 return aux_info.bgs[bg].free_inodes;
763}
764
765/* Increments the directory count of the block group that contains inode */
766void add_directory(u32 inode)
767{
768 int bg = (inode - 1) / info.inodes_per_group;
769 aux_info.bgs[bg].used_dirs += 1;
770}
771
772/* Returns the number of inodes in a block group that are directories */
773u16 get_directories(int bg)
774{
775 return aux_info.bgs[bg].used_dirs;
776}
777
Colin Cross56497f22013-02-04 00:44:55 -0800778/* Returns the flags for a block group */
779u16 get_bg_flags(int bg)
780{
781 return aux_info.bgs[bg].flags;
782}
783
Colin Crossec0a2e82010-06-11 14:21:37 -0700784/* Frees the memory used by a linked list of allocation regions */
785void free_alloc(struct block_allocation *alloc)
786{
787 struct region *reg;
788
789 reg = alloc->list.first;
790 while (reg) {
791 struct region *next = reg->next;
792 free(reg);
793 reg = next;
794 }
795
796 reg = alloc->oob_list.first;
797 while (reg) {
798 struct region *next = reg->next;
799 free(reg);
800 reg = next;
801 }
802
803 free(alloc);
804}
Mohamad Ayyash95791982016-02-20 03:46:00 +0000805
806void reserve_bg_chunk(int bg, u32 start_block, u32 size) {
807 struct block_group_info *bgs = aux_info.bgs;
808 int chunk_count;
809 if (bgs[bg].chunk_count == bgs[bg].max_chunk_count) {
810 bgs[bg].max_chunk_count *= 2;
811 bgs[bg].chunks = realloc(bgs[bg].chunks, bgs[bg].max_chunk_count * sizeof(struct region));
812 if (!bgs[bg].chunks)
813 critical_error("realloc failed");
814 }
815 chunk_count = bgs[bg].chunk_count;
816 bgs[bg].chunks[chunk_count].block = start_block;
817 bgs[bg].chunks[chunk_count].len = size;
818 bgs[bg].chunks[chunk_count].bg = bg;
819 bgs[bg].chunk_count++;
820}
821
822int reserve_blocks_for_allocation(struct block_allocation *alloc) {
823 struct region *reg;
824 struct block_group_info *bgs = aux_info.bgs;
825
826 if (!alloc) return 0;
827 reg = alloc->list.first;
828 while (reg != NULL) {
Mohamad Ayyasha6866eb2016-03-02 21:10:45 -0800829 if (reserve_blocks(&bgs[reg->bg], reg->bg, reg->block - bgs[reg->bg].first_block, reg->len) < 0) {
Mohamad Ayyash95791982016-02-20 03:46:00 +0000830 return -1;
Mohamad Ayyasha6866eb2016-03-02 21:10:45 -0800831 }
Mohamad Ayyash95791982016-02-20 03:46:00 +0000832 reg = reg->next;
833 }
834 return 0;
835}
836