aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Anderson <dvander@google.com>2019-12-06 17:05:54 -0800
committerTheodore Ts'o <tytso@mit.edu>2021-07-21 21:09:09 -0400
commit83782a687791f98237598d1ef9ff161a8b3a3c13 (patch)
tree475cf752839d073d1dc688e53f6c75f24e0a2568
parent942b00cb9d2f2b52f4c58877d523145ee59a89b0 (diff)
downloade2fsprogs-83782a687791f98237598d1ef9ff161a8b3a3c13.tar.gz
AOSP: e2fsdroid: Don't over-reserve blocks for files in Base FS.
If a large number of blocks move from one file to another file, e2fsdroid will inadvertently reserve the moved blocks for the source file. If the larger file is visited first, it can fail to acquire blocks because they're reserved by the smaller file. This patch only reserves the first N blocks of a file in Base FS, where N is large enough to satisfy the |st_size| property from lstat(2). We only consider "owned" blocks rather than deduplicated blocks, in case the new file cannot be deduplicated. Google-Bug-Id: 145316683 Test: e2fsdroid with dynamic partitions Change-Id: I32e255a19550d52d90342c21d7218981108a71b1 From AOSP commit: 2985b26cdbe674084fa02b211503eef9bf970023
-rw-r--r--contrib/android/basefs_allocator.c39
1 files changed, 30 insertions, 9 deletions
diff --git a/contrib/android/basefs_allocator.c b/contrib/android/basefs_allocator.c
index 5c92ddc2d..4f9f5c158 100644
--- a/contrib/android/basefs_allocator.c
+++ b/contrib/android/basefs_allocator.c
@@ -96,38 +96,59 @@ static void basefs_allocator_free(ext2_filsys fs,
*
* The dedup set is not removed from fs->block_map. This allows us to re-use
* dedup blocks separately and not have them be allocated outside of file data.
+ *
+ * This function returns non-zero if the block was owned, and 0 otherwise.
*/
-static void fs_reserve_block(ext2_filsys fs,
- struct base_fs_allocator *allocator,
- blk64_t block)
+static int fs_reserve_block(ext2_filsys fs,
+ struct base_fs_allocator *allocator,
+ blk64_t block)
{
ext2fs_block_bitmap exclusive_map = allocator->exclusive_block_map;
ext2fs_block_bitmap dedup_map = allocator->dedup_block_map;
if (block >= ext2fs_blocks_count(fs->super))
- return;
+ return 0;
if (ext2fs_test_block_bitmap2(fs->block_map, block)) {
if (!ext2fs_test_block_bitmap2(exclusive_map, block))
- return;
+ return 0;
ext2fs_unmark_block_bitmap2(exclusive_map, block);
ext2fs_mark_block_bitmap2(dedup_map, block);
+ return 0;
} else {
ext2fs_mark_block_bitmap2(fs->block_map, block);
ext2fs_mark_block_bitmap2(exclusive_map, block);
+ return 1;
}
}
+/*
+ * Walk the requested block list and reserve blocks, either into the owned
+ * pool or the dedup pool as appropriate. We stop once the file has enough
+ * owned blocks to satisfy |file_size|. This allows any extra blocks to be
+ * re-used, since otherwise a large block movement between files could
+ * trigger block allocation errors.
+ */
static void fs_reserve_blocks_range(ext2_filsys fs,
struct base_fs_allocator *allocator,
- struct block_range_list *list)
+ struct block_range_list *list,
+ off_t file_size)
{
blk64_t block;
+ off_t blocks_needed;
+ off_t blocks_acquired = 0;
struct block_range *blocks = list->head;
+ blocks_needed = file_size + (fs->blocksize - 1);
+ blocks_needed /= fs->blocksize;
+
while (blocks) {
- for (block = blocks->start; block <= blocks->end; block++)
- fs_reserve_block(fs, allocator, block);
+ for (block = blocks->start; block <= blocks->end; block++) {
+ if (fs_reserve_block(fs, allocator, block))
+ blocks_acquired++;
+ if (blocks_acquired >= blocks_needed)
+ return;
+ }
blocks = blocks->next;
}
}
@@ -163,7 +184,7 @@ static errcode_t fs_reserve_blocks(ext2_filsys fs,
return ENAMETOOLONG;
if (lstat(full_path, &st) || !S_ISREG(st.st_mode))
continue;
- fs_reserve_blocks_range(fs, allocator, &e->blocks);
+ fs_reserve_blocks_range(fs, allocator, &e->blocks, st.st_size);
}
return 0;
}