aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Sterba <dsterba@suse.com>2020-02-05 19:09:33 +0100
committerDavid Sterba <dsterba@suse.com>2020-02-11 18:42:14 +0100
commit6990553ba73817ad13bde178420ff59d42dfc016 (patch)
tree7af96dbff35713f5ad326a6b1275e92a08d321d1
parentcf6f1f5a7cb2fecb42624feb6c5402fda5260f67 (diff)
downloadbtrfs-next-6990553ba73817ad13bde178420ff59d42dfc016.tar.gz
btrfs: drop argument tree from btrfs_lock_and_flush_ordered_range
The tree pointer can be safely read from the inode so we can drop the redundant argument from btrfs_lock_and_flush_ordered_range. Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Reviewed-by: Nikolay Borisov <nborisov@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
-rw-r--r--fs/btrfs/extent_io.c4
-rw-r--r--fs/btrfs/file.c2
-rw-r--r--fs/btrfs/inode.c2
-rw-r--r--fs/btrfs/ordered-data.c10
-rw-r--r--fs/btrfs/ordered-data.h3
5 files changed, 8 insertions, 13 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 4538d450b02fa3..9b47207e9404cc 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3294,7 +3294,7 @@ static inline void contiguous_readpages(struct extent_io_tree *tree,
ASSERT(tree == &inode->io_tree);
- btrfs_lock_and_flush_ordered_range(tree, inode, start, end, NULL);
+ btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
for (index = 0; index < nr_pages; index++) {
__do_readpage(tree, pages[index], btrfs_get_extent, em_cached,
@@ -3317,7 +3317,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
ASSERT(tree == &inode->io_tree);
- btrfs_lock_and_flush_ordered_range(tree, inode, start, end, NULL);
+ btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
bio_flags, read_flags, NULL);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 5935b7a2761119..a37feadd93441f 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1561,7 +1561,7 @@ static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
lockend = round_up(pos + *write_bytes,
fs_info->sectorsize) - 1;
- btrfs_lock_and_flush_ordered_range(&inode->io_tree, inode, lockstart,
+ btrfs_lock_and_flush_ordered_range(inode, lockstart,
lockend, NULL);
num_bytes = lockend - lockstart + 1;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 84e649724549e7..c7b9c3f8a6375a 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -4611,7 +4611,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
if (size <= hole_start)
return 0;
- btrfs_lock_and_flush_ordered_range(io_tree, BTRFS_I(inode), hole_start,
+ btrfs_lock_and_flush_ordered_range(BTRFS_I(inode), hole_start,
block_end - 1, &cached_state);
cur_offset = hole_start;
while (1) {
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index bfd409522468d1..2f42186793f4eb 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -830,7 +830,6 @@ out:
* btrfs_flush_ordered_range - Lock the passed range and ensures all pending
* ordered extents in it are run to completion.
*
- * @tree: IO tree used for locking out other users of the range
* @inode: Inode whose ordered tree is to be searched
* @start: Beginning of range to flush
* @end: Last byte of range to lock
@@ -840,8 +839,7 @@ out:
* This function always returns with the given range locked, ensuring after it's
* called no order extent can be pending.
*/
-void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree,
- struct btrfs_inode *inode, u64 start,
+void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
u64 end,
struct extent_state **cached_state)
{
@@ -849,13 +847,11 @@ void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree,
struct extent_state *cache = NULL;
struct extent_state **cachedp = &cache;
- ASSERT(tree == &inode->io_tree);
-
if (cached_state)
cachedp = cached_state;
while (1) {
- lock_extent_bits(tree, start, end, cachedp);
+ lock_extent_bits(&inode->io_tree, start, end, cachedp);
ordered = btrfs_lookup_ordered_range(inode, start,
end - start + 1);
if (!ordered) {
@@ -868,7 +864,7 @@ void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree,
refcount_dec(&cache->refs);
break;
}
- unlock_extent_cached(tree, start, end, cachedp);
+ unlock_extent_cached(&inode->io_tree, start, end, cachedp);
btrfs_start_ordered_extent(&inode->vfs_inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
}
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index a46f319d9ae0c9..c01c9698250b9a 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -183,8 +183,7 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
const u64 range_start, const u64 range_len);
void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
const u64 range_start, const u64 range_len);
-void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree,
- struct btrfs_inode *inode, u64 start,
+void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
u64 end,
struct extent_state **cached_state);
int __init ordered_data_init(void);