aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJosef Bacik <josef@redhat.com>2012-01-26 08:33:32 -0500
committerJosef Bacik <josef@redhat.com>2012-03-02 15:17:14 -0500
commitf3d3c46e01b1caf987c2344dc2a346c502bdbcb3 (patch)
tree28a1934e08b10c9c80786ffe942d9fdb5102a717
parenta8e0a3055f8f07f83a6bb7665bcaac6ded6716f2 (diff)
downloadbtrfs-work-own-caching.tar.gz
Btrfs: manage metadata cache ourselvesown-caching
This patch moves the management of the metadata cache from pagecache to our own internal caching which can choose to evict things based on what is no longer in use. Thanks, Signed-off-by: Josef Bacik <josef@redhat.com>
-rw-r--r--Documentation/sysctl/vm.txt30
-rw-r--r--fs/btrfs/backref.c4
-rw-r--r--fs/btrfs/ctree.c50
-rw-r--r--fs/btrfs/ctree.h18
-rw-r--r--fs/btrfs/disk-io.c1255
-rw-r--r--fs/btrfs/disk-io.h3
-rw-r--r--fs/btrfs/extent-tree.c12
-rw-r--r--fs/btrfs/extent_io.c687
-rw-r--r--fs/btrfs/extent_io.h96
-rw-r--r--fs/btrfs/inode.c8
-rw-r--r--fs/btrfs/relocation.c3
-rw-r--r--fs/btrfs/super.c2
-rw-r--r--fs/btrfs/transaction.c62
-rw-r--r--fs/btrfs/transaction.h1
-rw-r--r--fs/btrfs/tree-log.c45
-rw-r--r--fs/btrfs/volumes.c2
-rw-r--r--include/linux/oom.h2
-rw-r--r--include/trace/events/btrfs.h2
-rw-r--r--kernel/sysctl.c16
-rw-r--r--mm/oom_kill.c13
-rw-r--r--mm/slab.c72
-rw-r--r--mm/slub.c52
22 files changed, 1496 insertions, 939 deletions
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index 96f0ee825bed3e..a0da8b3dd28c57 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -498,6 +498,36 @@ this is causing problems for your system/application.
==============================================================
+oom_dump_slabs
+
+Enables a system-wide slab cache dump to be produced when the kernel
+performs an OOM-killing and includes, per slab cache, such information as
+active objects, total objects, object size, cache name, and cache size.
+This is helpful to determine the top slab cache memory users, as well as
+to identify what was their part on this OOM-killer occurrence.
+
+If this is set to zero, this information is suppressed.
+
+If this is set to non-zero, this information is shown whenever the
+OOM killer actually kills a memory-hogging task.
+
+The default value is 1 (enabled).
+
+==============================================================
+
+oom_dump_slabs_ratio
+
+Adjust, as a percentage of total system memory dedicated to the slab cache,
+a per cache size cutting point for oom_dump_slabs reports. If this is set to
+a non-zero 'N', only caches bigger than N% of total memory committed to slab
+will be dumped out when OOM-killer is invoked.
+
+When set to 0, all slab caches will be listed at dump report.
+
+The default value is 10.
+
+==============================================================
+
oom_dump_tasks
Enables a system-wide task dump (excluding kernel threads) to be
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index b9a843226de859..04cf24d0250322 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -904,7 +904,7 @@ static char *iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
eb = path->nodes[0];
/* make sure we can use eb after releasing the path */
if (eb != eb_in)
- atomic_inc(&eb->refs);
+ extent_buffer_get(eb);
btrfs_release_path(path);
iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
@@ -1251,7 +1251,7 @@ static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
slot = path->slots[0];
eb = path->nodes[0];
/* make sure we can use eb after releasing the path */
- atomic_inc(&eb->refs);
+ extent_buffer_get(eb);
btrfs_release_path(path);
item = btrfs_item_nr(eb, slot);
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 0639a555e16ed1..51c1f2fd114141 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -137,7 +137,10 @@ noinline void btrfs_release_path(struct btrfs_path *p)
btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
p->locks[i] = 0;
}
- free_extent_buffer(p->nodes[i]);
+ if (unlikely(p->search_commit_root))
+ free_extent_buffer_stale(p->nodes[i]);
+ else
+ free_extent_buffer(p->nodes[i]);
p->nodes[i] = NULL;
}
}
@@ -418,6 +421,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
int level;
int last_ref = 0;
int unlock_orig = 0;
+ bool root_node = false;
u64 parent_start;
if (*cow_ret == buf)
@@ -473,6 +477,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
btrfs_reloc_cow_block(trans, root, buf, cow);
if (buf == root->node) {
+ root_node = true;
WARN_ON(parent && parent != buf);
if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
@@ -504,7 +509,19 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
}
if (unlock_orig)
btrfs_tree_unlock(buf);
- free_extent_buffer(buf);
+// printk(KERN_ERR "marking %p dirty, freeing %p\n", cow, buf);
+
+ /*
+ * So we need to not free the old root node immediately because some
+ * other reader could come in and get a ref on the previous root node
+ * and then end up either freeing the node twice or much worse, so we
+ * need to go through the normal LRU stuff for cow'ing root nodes so we
+ * are safe.
+ */
+ if (root_node)
+ free_extent_buffer(buf);
+ else
+ free_extent_buffer_stale(buf);
btrfs_mark_buffer_dirty(cow);
*cow_ret = cow;
return 0;
@@ -874,6 +891,7 @@ static void root_sub_used(struct btrfs_root *root, u32 size)
static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
struct extent_buffer *parent, int slot)
{
+ struct btrfs_trans_handle *trans = current->journal_info;
int level = btrfs_header_level(parent);
if (slot < 0)
return NULL;
@@ -959,7 +977,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
root_sub_used(root, mid->len);
btrfs_free_tree_block(trans, root, mid, 0, 1, 0);
/* once for the root ptr */
- free_extent_buffer(mid);
+ free_extent_buffer_stale(mid);
return 0;
}
if (btrfs_header_nritems(mid) >
@@ -1016,7 +1034,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
ret = wret;
root_sub_used(root, right->len);
btrfs_free_tree_block(trans, root, right, 0, 1, 0);
- free_extent_buffer(right);
+ free_extent_buffer_stale(right);
right = NULL;
} else {
struct btrfs_disk_key right_key;
@@ -1056,7 +1074,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
ret = wret;
root_sub_used(root, mid->len);
btrfs_free_tree_block(trans, root, mid, 0, 1, 0);
- free_extent_buffer(mid);
+ free_extent_buffer_stale(mid);
mid = NULL;
} else {
/* update the parent key to reflect our changes */
@@ -1309,6 +1327,7 @@ static void reada_for_search(struct btrfs_root *root,
static noinline int reada_for_balance(struct btrfs_root *root,
struct btrfs_path *path, int level)
{
+ struct btrfs_trans_handle *trans = current->journal_info;
int slot;
int nritems;
struct extent_buffer *parent;
@@ -1354,15 +1373,18 @@ static noinline int reada_for_balance(struct btrfs_root *root,
readahead_tree_block(root, block1, blocksize, 0);
if (block2)
readahead_tree_block(root, block2, blocksize, 0);
-
+/*
if (block1) {
+ atomic_inc(&trans->transaction->balance_read_count);
eb = read_tree_block(root, block1, blocksize, 0);
free_extent_buffer(eb);
}
if (block2) {
+ atomic_inc(&trans->transaction->balance_read_count);
eb = read_tree_block(root, block2, blocksize, 0);
free_extent_buffer(eb);
}
+*/
}
return ret;
}
@@ -1458,12 +1480,19 @@ read_block_for_search(struct btrfs_trans_handle *trans,
struct extent_buffer **eb_ret, int level, int slot,
struct btrfs_key *key)
{
+ struct btrfs_transaction *cur_trans;
u64 blocknr;
u64 gen;
u32 blocksize;
struct extent_buffer *b = *eb_ret;
struct extent_buffer *tmp;
int ret;
+ static int count = 0;
+
+ if (trans)
+ cur_trans = trans->transaction;
+ else
+ cur_trans = root->fs_info->running_transaction;
blocknr = btrfs_node_blockptr(b, slot);
gen = btrfs_node_ptr_generation(b, slot);
@@ -1489,7 +1518,7 @@ read_block_for_search(struct btrfs_trans_handle *trans,
*/
free_extent_buffer(tmp);
btrfs_set_path_blocking(p);
-
+ WARN_ON(1);
tmp = read_tree_block(root, blocknr, blocksize, gen);
if (tmp && btrfs_buffer_uptodate(tmp, gen)) {
*eb_ret = tmp;
@@ -1520,6 +1549,11 @@ read_block_for_search(struct btrfs_trans_handle *trans,
ret = -EAGAIN;
tmp = read_tree_block(root, blocknr, blocksize, 0);
if (tmp) {
+// if (root == root->fs_info->extent_root)
+// printk(KERN_ERR "reading blocknr %Lu\n", blocknr);
+ count++;
+// if (!(count % 50))
+// WARN_ON(1);
/*
* If the read above didn't mark this buffer up to date,
* it will never end up being up to date. Set ret to EIO now
@@ -3781,7 +3815,9 @@ static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
root_sub_used(root, leaf->len);
+ extent_buffer_get(leaf);
btrfs_free_tree_block(trans, root, leaf, 0, 1, 0);
+ free_extent_buffer_stale(leaf);
return 0;
}
/*
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 9d6f59c3749d14..072c62f8ebad9a 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1003,6 +1003,18 @@ struct btrfs_fs_info {
spinlock_t fs_roots_radix_lock;
struct radix_tree_root fs_roots_radix;
+ /* eb caching stuff */
+ spinlock_t eb_tree_lock;
+ spinlock_t eb_lru_lock;
+ spinlock_t eb_dirty_lock;
+ struct list_head eb_lru;
+ struct list_head dirty_leaves;
+ struct list_head dirty_nodes;
+ int eb_lru_nr;
+ struct radix_tree_root eb_tree;
+ struct shrinker eb_shrinker;
+ wait_queue_head_t writeback_ebs;
+
/* block group cache stuff */
spinlock_t block_group_cache_lock;
struct rb_root block_group_cache_tree;
@@ -1055,13 +1067,13 @@ struct btrfs_fs_info {
struct btrfs_super_block *super_for_commit;
struct block_device *__bdev;
struct super_block *sb;
- struct inode *btree_inode;
struct backing_dev_info bdi;
struct mutex tree_log_mutex;
struct mutex transaction_kthread_mutex;
struct mutex cleaner_mutex;
struct mutex chunk_mutex;
struct mutex volume_mutex;
+ struct mutex metadata_flusher_mutex;
/*
* this protects the ordered operations list only while we are
* processing all of the entries on it. This way we make
@@ -1097,6 +1109,9 @@ struct btrfs_fs_info {
atomic_t nr_async_bios;
atomic_t async_delalloc_pages;
atomic_t open_ioctl_trans;
+ atomic_t nr_ebs;
+ atomic_t dirty_ebs;
+ atomic_t inflight_ebs;
/*
* this is used by the balancing code to wait for all the pending
@@ -1156,6 +1171,7 @@ struct btrfs_fs_info {
struct btrfs_workers delayed_workers;
struct task_struct *transaction_kthread;
struct task_struct *cleaner_kthread;
+ struct task_struct *metadata_flusher_kthread;
int thread_pool_size;
struct kobject super_kobj;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 33114bc5849bd4..0ed346db11e74c 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -45,7 +45,6 @@
#include "inode-map.h"
#include "check-integrity.h"
-static struct extent_io_ops btree_extent_io_ops;
static void end_workqueue_fn(struct btrfs_work *work);
static void free_fs_root(struct btrfs_root *root);
static void btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
@@ -86,6 +85,7 @@ struct end_io_wq {
*/
struct async_submit_bio {
struct inode *inode;
+ struct btrfs_root *root;
struct bio *bio;
struct list_head list;
extent_submit_bio_hook_t *submit_bio_start;
@@ -180,66 +180,6 @@ void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
#endif
-/*
- * extents on the btree inode are pretty simple, there's one extent
- * that covers the entire device
- */
-static struct extent_map *btree_get_extent(struct inode *inode,
- struct page *page, size_t pg_offset, u64 start, u64 len,
- int create)
-{
- struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
- struct extent_map *em;
- int ret;
-
- read_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, start, len);
- if (em) {
- em->bdev =
- BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
- read_unlock(&em_tree->lock);
- goto out;
- }
- read_unlock(&em_tree->lock);
-
- em = alloc_extent_map();
- if (!em) {
- em = ERR_PTR(-ENOMEM);
- goto out;
- }
- em->start = 0;
- em->len = (u64)-1;
- em->block_len = (u64)-1;
- em->block_start = 0;
- em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
-
- write_lock(&em_tree->lock);
- ret = add_extent_mapping(em_tree, em);
- if (ret == -EEXIST) {
- u64 failed_start = em->start;
- u64 failed_len = em->len;
-
- free_extent_map(em);
- em = lookup_extent_mapping(em_tree, start, len);
- if (em) {
- ret = 0;
- } else {
- em = lookup_extent_mapping(em_tree, failed_start,
- failed_len);
- ret = -EIO;
- }
- } else if (ret) {
- free_extent_map(em);
- em = NULL;
- }
- write_unlock(&em_tree->lock);
-
- if (ret)
- em = ERR_PTR(ret);
-out:
- return em;
-}
-
u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len)
{
return crc32c(seed, data, len);
@@ -322,18 +262,20 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
* detect blocks that either didn't get written at all or got written
* in the wrong place.
*/
-static int verify_parent_transid(struct extent_io_tree *io_tree,
- struct extent_buffer *eb, u64 parent_transid)
+static int verify_parent_transid(struct extent_buffer *eb, u64 parent_transid)
{
- struct extent_state *cached_state = NULL;
int ret;
if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
return 0;
+ /*
+ *
+ * do we need locking here? maybe iolock
lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
0, &cached_state, GFP_NOFS);
- if (extent_buffer_uptodate(io_tree, eb, cached_state) &&
+ */
+ if (extent_buffer_uptodate(eb) &&
btrfs_header_generation(eb) == parent_transid) {
ret = 0;
goto out;
@@ -344,10 +286,12 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
(unsigned long long)parent_transid,
(unsigned long long)btrfs_header_generation(eb));
ret = 1;
- clear_extent_buffer_uptodate(io_tree, eb, &cached_state);
+ clear_extent_buffer_uptodate(eb);
out:
+ /*
unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
&cached_state, GFP_NOFS);
+ */
return ret;
}
@@ -359,18 +303,14 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
struct extent_buffer *eb,
u64 start, u64 parent_transid)
{
- struct extent_io_tree *io_tree;
int ret;
int num_copies = 0;
int mirror_num = 0;
clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
- io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
while (1) {
- ret = read_extent_buffer_pages(io_tree, eb, start,
- WAIT_COMPLETE,
- btree_get_extent, mirror_num);
- if (!ret && !verify_parent_transid(io_tree, eb, parent_transid))
+ ret = read_extent_buffer_pages(eb, WAIT_COMPLETE, mirror_num);
+ if (!ret && !verify_parent_transid(eb, parent_transid))
return ret;
/*
@@ -400,45 +340,73 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
{
- struct extent_io_tree *tree;
u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
u64 found_start;
- unsigned long len;
struct extent_buffer *eb;
- tree = &BTRFS_I(page->mapping->host)->io_tree;
-
- if (page->private == EXTENT_PAGE_PRIVATE)
- goto out;
if (!page->private) {
WARN_ON(1);
goto out;
}
- len = page->private >> 2;
- WARN_ON(len == 0);
- eb = find_extent_buffer(tree, start, len);
+ eb = (struct extent_buffer *)page->private;
+ if (page != eb->pages[0])
+ goto out;
found_start = btrfs_header_bytenr(eb);
if (found_start != start) {
WARN_ON(1);
- goto err;
- }
- if (eb->pages[0] != page) {
- WARN_ON(1);
- goto err;
- }
- if (!PageUptodate(page)) {
- WARN_ON(1);
- goto err;
+ goto out;
}
csum_tree_block(root, eb, 0);
-err:
- free_extent_buffer(eb);
out:
return 0;
}
+static void eb_write_endio(struct bio *bio, int err)
+{
+ struct bio_vec *bvec = bio->bi_io_vec;
+ int bio_index = 0;
+ int uptodate = err == 0;
+ struct extent_buffer *eb, *prev_eb = NULL;
+
+ while (bio_index < bio->bi_vcnt) {
+ int iounlock = 0;
+
+ eb = (struct extent_buffer *)bvec->bv_page->private;
+ if (!uptodate) {
+ clear_extent_buffer_uptodate(eb);
+ set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
+ WARN_ON(1);
+ }
+
+ spin_lock(&eb->eb_lock);
+ iounlock = (--eb->io_pages == 0);
+ spin_unlock(&eb->eb_lock);
+
+ if (iounlock)
+ extent_buffer_iounlock(eb);
+ if (prev_eb != eb) {
+ if (prev_eb) {
+ atomic_dec(&eb->root->fs_info->inflight_ebs);
+ wake_up(&eb->root->fs_info->writeback_ebs);
+ }
+ free_extent_buffer(prev_eb);
+ }
+ prev_eb = eb;
+ bio_index++;
+ bvec++;
+ }
+
+ if (prev_eb) {
+ atomic_dec(&eb->root->fs_info->inflight_ebs);
+ wake_up(&eb->root->fs_info->writeback_ebs);
+ }
+ free_extent_buffer(prev_eb);
+ //WARN_ON(atomic_read(&bio->bi_cnt) > 1);
+ bio_put(bio);
+}
+
static int check_tree_block_fsid(struct btrfs_root *root,
struct extent_buffer *eb)
{
@@ -525,66 +493,13 @@ static noinline int check_leaf(struct btrfs_root *root,
return 0;
}
-struct extent_buffer *find_eb_for_page(struct extent_io_tree *tree,
- struct page *page, int max_walk)
-{
- struct extent_buffer *eb;
- u64 start = page_offset(page);
- u64 target = start;
- u64 min_start;
-
- if (start < max_walk)
- min_start = 0;
- else
- min_start = start - max_walk;
-
- while (start >= min_start) {
- eb = find_extent_buffer(tree, start, 0);
- if (eb) {
- /*
- * we found an extent buffer and it contains our page
- * horray!
- */
- if (eb->start <= target &&
- eb->start + eb->len > target)
- return eb;
-
- /* we found an extent buffer that wasn't for us */
- free_extent_buffer(eb);
- return NULL;
- }
- if (start == 0)
- break;
- start -= PAGE_CACHE_SIZE;
- }
- return NULL;
-}
-
-static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
- struct extent_state *state)
+static void process_eb_read(struct extent_buffer *eb)
{
- struct extent_io_tree *tree;
u64 found_start;
int found_level;
- unsigned long len;
- struct extent_buffer *eb;
- struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
- int ret = 0;
- int reads_done;
-
- if (!page->private)
- goto out;
+ int ret = -EIO;
- tree = &BTRFS_I(page->mapping->host)->io_tree;
- len = page->private >> 2;
-
- eb = find_eb_for_page(tree, page, max(root->leafsize, root->nodesize));
- if (!eb) {
- ret = -EIO;
- goto out;
- }
- reads_done = atomic_dec_and_test(&eb->pages_reading);
- if (!reads_done)
+ if (test_bit(EXTENT_BUFFER_IOERR, &eb->bflags))
goto err;
found_start = btrfs_header_bytenr(eb);
@@ -593,13 +508,11 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
"%llu %llu\n",
(unsigned long long)found_start,
(unsigned long long)eb->start);
- ret = -EIO;
goto err;
}
- if (check_tree_block_fsid(root, eb)) {
+ if (check_tree_block_fsid(eb->root, eb)) {
printk_ratelimited(KERN_INFO "btrfs bad fsid on block %llu\n",
(unsigned long long)eb->start);
- ret = -EIO;
goto err;
}
found_level = btrfs_header_level(eb);
@@ -607,35 +520,108 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
eb, found_level);
- ret = csum_tree_block(root, eb, 1);
- if (ret) {
- ret = -EIO;
+ if (csum_tree_block(eb->root, eb, 1))
goto err;
- }
/*
* If this is a leaf block and it is corrupt, set the corrupt bit so
* that we don't try and read the other copies of this block, just
* return -EIO.
*/
- if (found_level == 0 && check_leaf(root, eb)) {
+ if (found_level == 0 && check_leaf(eb->root, eb))
set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
- ret = -EIO;
- }
+ else
+ ret = 0;
err:
+/*
+ * do this later
+ if (!uptodate) {
+ int failed_mirror;
+ failed_mirror = (int)(unsigned long)bio->bi_bdev;
+ *
+ * The generic bio_readpage_error handles errors the
+ * following way: If possible, new read requests are
+ * created and submitted and will end up in
+ * end_bio_extent_readpage as well (if we're lucky, not
+ * in the !uptodate case). In that case it returns 0 and
+ * we just go on with the next page in our bio. If it
+ * can't handle the error it will return -EIO and we
+ * remain responsible for that page.
+ *
+ if (!bio_readpage_error(bio, page, start, end, failed_mirror,
+ NULL)) {
+ uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
+ if (err)
+ uptodate = 0;
+ continue;
+ }
+ }
+*/
+
+ /*
+ * We have to check to make sure that we don't have IOERR set since this
+ * eb could have been split up between multiple bios and one of the
+ * other ones may have failed.
+ */
+ if (!ret && !test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
+ set_extent_buffer_uptodate(eb);
+ } else if (ret) {
+ clear_extent_buffer_uptodate(eb);
+ set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
+ }
+
if (test_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) {
clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags);
- btree_readahead_hook(root, eb, eb->start, ret);
+ btree_readahead_hook(eb->root, eb, eb->start, ret);
}
- if (ret && eb)
- clear_extent_buffer_uptodate(tree, eb, NULL);
- free_extent_buffer(eb);
-out:
- return ret;
+ extent_buffer_iounlock(eb);
+}
+
+static void eb_read_endio(struct bio *bio, int err)
+{
+ struct extent_buffer *eb, *prev_eb = NULL;
+ int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
+ struct bio_vec *bvec = bio->bi_io_vec;
+ int bio_index = 0;
+ int ret = -EIO;
+
+ WARN_ON(in_interrupt());
+ if (err)
+ uptodate = 0;
+
+ eb = bio->bi_private;
+
+ while (bio_index < bio->bi_vcnt) {
+ int process = 0;
+ eb = (struct extent_buffer *)bvec->bv_page->private;
+ if (!uptodate)
+ set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
+ spin_lock(&eb->eb_lock);
+ process = (--eb->io_pages == 0);
+ //printk(KERN_ERR "eb %p pages is %d, process is %d\n", eb, eb->io_pages, process);
+ spin_unlock(&eb->eb_lock);
+ if (process)
+ process_eb_read(eb);
+
+ if (prev_eb != eb)
+ free_extent_buffer(prev_eb);
+ prev_eb = eb;
+ bvec++;
+ bio_index++;
+ }
+
+ free_extent_buffer(prev_eb);
+ bio_put(bio);
+}
+
+struct extent_buffer *find_eb_for_page(struct page *page)
+{
+ return (struct extent_buffer *)page->private;
}
+/*
static int btree_io_failed_hook(struct bio *failed_bio,
struct page *page, u64 start, u64 end,
int mirror_num, struct extent_state *state)
@@ -665,8 +651,9 @@ static int btree_io_failed_hook(struct bio *failed_bio,
free_extent_buffer(eb);
out:
- return -EIO; /* we fixed nothing */
+ return -EIO; * we fixed nothing
}
+*/
static void end_workqueue_bio(struct bio *bio, int err)
{
@@ -823,10 +810,12 @@ static int btree_csum_one_bio(struct bio *bio)
struct bio_vec *bvec = bio->bi_io_vec;
int bio_index = 0;
struct btrfs_root *root;
+ struct extent_buffer *eb;
WARN_ON(bio->bi_vcnt <= 0);
while (bio_index < bio->bi_vcnt) {
- root = BTRFS_I(bvec->bv_page->mapping->host)->root;
+ eb = (struct extent_buffer *)bvec->bv_page->private;
+ root = eb->root;
csum_dirty_buffer(root, bvec->bv_page);
bio_index++;
bvec++;
@@ -834,38 +823,89 @@ static int btree_csum_one_bio(struct bio *bio)
return 0;
}
-static int __btree_submit_bio_start(struct inode *inode, int rw,
- struct bio *bio, int mirror_num,
- unsigned long bio_flags,
- u64 bio_offset)
+static void __btree_submit_bio_start(struct btrfs_work *work)
{
+ struct async_submit_bio *async;
+
+ async = container_of(work, struct async_submit_bio, work);
/*
* when we're called for a write, we're already in the async
* submission context. Just jump into btrfs_map_bio
*/
- btree_csum_one_bio(bio);
- return 0;
+ btree_csum_one_bio(async->bio);
}
-static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
- int mirror_num, unsigned long bio_flags,
- u64 bio_offset)
+static void __btree_submit_bio_done(struct btrfs_work *work)
{
+ struct async_submit_bio *async;
+ struct btrfs_fs_info *fs_info;
+ int limit;
+
+ async = container_of(work, struct async_submit_bio, work);
+ fs_info = async->root->fs_info;
+
+ limit = btrfs_async_submit_limit(fs_info);
+ limit = limit * 2 / 3;
+
+ atomic_dec(&fs_info->nr_async_submits);
+
+ if (atomic_read(&fs_info->nr_async_submits) < limit &&
+ waitqueue_active(&fs_info->async_submit_wait))
+ wake_up(&fs_info->async_submit_wait);
+
/*
* when we're called for a write, we're already in the async
* submission context. Just jump into btrfs_map_bio
*/
- return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
+ btrfs_map_bio(async->root, async->rw, async->bio,
+ async->mirror_num, 1);
+}
+
+int btrfs_wq_btree_submit_bio(struct btrfs_root *root, int rw,
+ struct bio *bio, int mirror_num)
+{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct async_submit_bio *async;
+
+ async = kmalloc(sizeof(*async), GFP_NOFS);
+ if (!async)
+ return -ENOMEM;
+
+ async->root = root;
+ async->rw = rw;
+ async->bio = bio;
+ async->mirror_num = mirror_num;
+
+ async->work.func = __btree_submit_bio_start;
+ async->work.ordered_func = __btree_submit_bio_done;
+ async->work.ordered_free = run_one_async_free;
+
+ async->work.flags = 0;
+
+ atomic_inc(&fs_info->nr_async_submits);
+
+ if (rw & REQ_SYNC)
+ btrfs_set_work_high_prio(&async->work);
+
+ btrfs_queue_worker(&fs_info->workers, &async->work);
+
+ while (atomic_read(&fs_info->async_submit_draining) &&
+ atomic_read(&fs_info->nr_async_submits)) {
+ wait_event(fs_info->async_submit_wait,
+ (atomic_read(&fs_info->nr_async_submits) == 0));
+ }
+
+ return 0;
}
-static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
- int mirror_num, unsigned long bio_flags,
- u64 bio_offset)
+static int btree_submit_bio(struct btrfs_root *root, int rw, struct bio *bio,
+ int mirror_num)
{
int ret;
- ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
- bio, 1);
+ bio_get(bio);
+
+ ret = btrfs_bio_wq_end_io(root->fs_info, bio, 1);
BUG_ON(ret);
if (!(rw & REQ_WRITE)) {
@@ -873,160 +913,323 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
* called for a read, do the setup so that checksum validation
* can happen in the async kernel threads
*/
- return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
- mirror_num, 0);
+ ret = btrfs_map_bio(root, rw, bio, mirror_num, 0);
+ } else {
+
+ /*
+ * kthread helpers are used to submit writes so that
+ * checksumming can happen in parallel across all CPUs
+ */
+ ret = btrfs_wq_btree_submit_bio(root, rw, bio, mirror_num);
}
- /*
- * kthread helpers are used to submit writes so that checksumming
- * can happen in parallel across all CPUs
- */
- return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
- inode, rw, bio, mirror_num, 0,
- bio_offset,
- __btree_submit_bio_start,
- __btree_submit_bio_done);
+ if (bio_flagged(bio, BIO_EOPNOTSUPP))
+ ret = -EOPNOTSUPP;
+ bio_put(bio);
+ return ret;
}
-#ifdef CONFIG_MIGRATION
-static int btree_migratepage(struct address_space *mapping,
- struct page *newpage, struct page *page,
- enum migrate_mode mode)
+static struct bio *eb_setup_bio(struct extent_buffer *eb,
+ bio_end_io_t end_io_func, u64 start,
+ int single_bio)
{
- /*
- * we can't safely write a btree page from here,
- * we haven't done the locking hook
- */
- if (PageDirty(page))
- return -EAGAIN;
- /*
- * Buffers may be managed in a filesystem specific way.
- * We must have no buffers or drop them.
- */
- if (page_has_private(page) &&
- !try_to_release_page(page, GFP_KERNEL))
- return -EAGAIN;
- return migrate_page(mapping, newpage, page, mode);
+ struct block_device *bdev = eb->root->fs_info->fs_devices->latest_bdev;
+ struct bio *bio;
+ int nr_vecs = bio_get_nr_vecs(bdev);
+
+ if (single_bio)
+ nr_vecs = min(nr_vecs, num_extent_pages(eb->start, eb->len));
+
+ bio = btrfs_bio_alloc(bdev, start >> 9, nr_vecs,
+ GFP_NOFS | __GFP_HIGH);
+ if (!bio)
+ return ERR_PTR(-ENOMEM);
+
+ bio->bi_end_io = end_io_func;
+ bio->bi_private = eb;
+ return bio;
}
-#endif
-static int btree_writepage(struct page *page, struct writeback_control *wbc)
+int merge_bio(struct btrfs_root *root, size_t size, struct bio *bio)
{
- struct extent_io_tree *tree;
- tree = &BTRFS_I(page->mapping->host)->io_tree;
+ struct btrfs_mapping_tree *map_tree;
+ u64 logical = (u64)bio->bi_sector << 9;
+ u64 length = 0;
+ u64 map_length;
+ int ret;
+
+ length = bio->bi_size;
+ map_tree = &root->fs_info->mapping_tree;
+ map_length = length;
+ ret = btrfs_map_block(map_tree, READ, logical,
+ &map_length, NULL, 0);
+
+ if (map_length < length + size)
+ return 1;
+ return ret;
+}
+/*
+ * This will handle unlocking the eb if there is an error.
+ */
+static int submit_extent_buffer(int rw, struct extent_buffer *eb,
+ bio_end_io_t end_io_func, struct bio **bio_ret,
+ int mirror_num, int debug)
+{
+ struct btrfs_root *root = eb->root;
+ struct bio *bio = NULL;
+ struct page *page;
+ u64 start = eb->start;
+ unsigned long num_pages;
+ unsigned long i;
+ unsigned long bio_flags = 0;
+ size_t page_size = PAGE_CACHE_SIZE;
+ int submitted = 0;
+ int need_ref = 1;
+ int ret = 0;
+ int single_bio = (bio_ret) ? 0 : 1;
+
+ num_pages = num_extent_pages(eb->start, eb->len);
+ spin_lock(&eb->eb_lock);
+ eb->io_pages = num_pages;
+ //printk(KERN_ERR "doing eb %p with %d num_pages\n", eb, num_pages);
+ spin_unlock(&eb->eb_lock);
+ for (i = 0; i < num_pages; i++) {
+ int new = 0;
+
+ if (!bio && bio_ret && *bio_ret)
+ bio = *bio_ret;
+new_bio:
+ if (!bio) {
+ if (debug)
+ printk(KERN_ERR "allocing new bio for %p\n", eb);
+ new = 1;
+ need_ref = 1;
+ bio = eb_setup_bio(eb, end_io_func, start, single_bio);
+ if (IS_ERR(bio)) {
+ ret = PTR_ERR(bio);
+ bio = NULL;
+ set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
+ spin_lock(&eb->eb_lock);
+ eb->io_pages -= (num_pages - i);
+ spin_unlock(&eb->eb_lock);
+ break;
+ }
+ }
+ page = extent_buffer_page(eb, i);
+ if ((!new && merge_bio(root, page_size, bio)) ||
+ bio_add_page(bio, page, page_size, 0) < page_size) {
+ if (debug)
+ printk(KERN_ERR "submitting bio for %p\n", eb);
+ ret = btree_submit_bio(root, rw, bio, mirror_num);
+ bio = NULL;
+ submitted++;
+ goto new_bio;
+ }
- if (!(current->flags & PF_MEMALLOC)) {
- return extent_write_full_page(tree, page,
- btree_get_extent, wbc);
+ if (need_ref) {
+ extent_buffer_get(eb);
+ need_ref = 0;
+ }
+ start += page_size;
}
- redirty_page_for_writepage(wbc, page);
- unlock_page(page);
- return 0;
+ if (bio_ret)
+ *bio_ret = bio;
+ else {
+ if (debug)
+ printk(KERN_ERR "new submitting bio for %p\n", eb);
+ ret = btree_submit_bio(root, rw, bio, mirror_num);
+ }
+
+ if (ret && !submitted)
+ extent_buffer_iounlock(eb);
+
+ return ret;
}
-static int btree_writepages(struct address_space *mapping,
- struct writeback_control *wbc)
+int read_extent_buffer_pages(struct extent_buffer *eb, int wait,
+ int mirror_num)
{
- struct extent_io_tree *tree;
- tree = &BTRFS_I(mapping->host)->io_tree;
- if (wbc->sync_mode == WB_SYNC_NONE) {
- struct btrfs_root *root = BTRFS_I(mapping->host)->root;
- u64 num_dirty;
- unsigned long thresh = 32 * 1024 * 1024;
+ unsigned long i;
+ int err;
+ int ret = 0;
+ int locked_pages = 0;
+ int all_uptodate = 1;
+ unsigned long num_pages;
+ unsigned long num_reads = 0;
+ size_t page_size = PAGE_CACHE_SIZE;
+ int rw = READ;
+// int rw = (wait == WAIT_COMPLETE) ? READ_SYNC : READ;
+ bool submitted = false;
+
+ if (extent_buffer_uptodate(eb))
+ return 0;
- if (wbc->for_kupdate)
+ if (!extent_buffer_tryiolock(eb)) {
+ if (wait == WAIT_NONE)
return 0;
+ extent_buffer_iolock(eb);
+ }
- /* this is a bit racy, but that's ok */
- num_dirty = root->fs_info->dirty_metadata_bytes;
- if (num_dirty < thresh)
- return 0;
+ if (extent_buffer_uptodate(eb)) {
+ extent_buffer_iounlock(eb);
+ goto out;
}
- return extent_writepages(tree, mapping, btree_get_extent, wbc);
-}
-static int btree_readpage(struct file *file, struct page *page)
-{
- struct extent_io_tree *tree;
- tree = &BTRFS_I(page->mapping->host)->io_tree;
- return extent_read_full_page(tree, page, btree_get_extent, 0);
+ clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
+ ret = submit_extent_buffer(rw, eb, eb_read_endio, NULL, mirror_num, 0);
+ if (ret || wait != WAIT_COMPLETE)
+ return ret;
+
+ wait_on_extent_buffer(eb);
+ if (!extent_buffer_uptodate(eb))
+ ret = -EIO;
+out:
+ return ret;
}
-static int btree_releasepage(struct page *page, gfp_t gfp_flags)
+/* Returns 1 if we can write this eb out, 0 if not */
+int write_iolock_eb(struct extent_buffer *eb, int wait, int write_list)
{
- struct extent_io_tree *tree;
- struct extent_map_tree *map;
- struct extent_buffer *eb;
- struct btrfs_root *root;
- int ret;
+ int ret = 0;
- if (PageWriteback(page) || PageDirty(page))
- return 0;
+ if (!extent_buffer_tryiolock(eb)) {
+ if (wait == WAIT_NONE)
+ return ret;
+ extent_buffer_iolock(eb);
+ }
- tree = &BTRFS_I(page->mapping->host)->io_tree;
- map = &BTRFS_I(page->mapping->host)->extent_tree;
+ /* We'd like to avoid taking the tree lock if at all possible */
+ if (!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags))
+ goto out;
- root = BTRFS_I(page->mapping->host)->root;
- if (page->private == EXTENT_PAGE_PRIVATE) {
- eb = find_eb_for_page(tree, page, max(root->leafsize, root->nodesize));
- free_extent_buffer(eb);
- if (eb)
- return 0;
+ btrfs_tree_lock(eb);
+
+ /*
+ * We need this to make sure that if a buffer is written out in a
+ * transaction and then we need to modify it in the same transaction
+ * again we know that we need to re-cow it.
+ */
+ if (clear_extent_buffer_dirty(eb)) {
+ struct btrfs_fs_info *fs_info = eb->root->fs_info;
+
+ btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
+ spin_lock(&fs_info->delalloc_lock);
+ if (fs_info->dirty_metadata_bytes >= eb->len)
+ fs_info->dirty_metadata_bytes -= eb->len;
+ else
+ WARN_ON(1);
+ spin_unlock(&fs_info->delalloc_lock);
+ ret = 1;
+// printk(KERN_ERR "writing buf %Lu, level %d\n", eb->start, btrfs_header_level(eb));
+ atomic_inc(&eb->root->fs_info->inflight_ebs);
+ }
+
+ btrfs_tree_unlock(eb);
+out:
+ if (write_list) {
+ spin_lock(&eb->root->fs_info->eb_dirty_lock);
+ if (test_and_clear_bit(EXTENT_BUFFER_WRITE, &eb->bflags))
+ list_del_init(&eb->dirty_list);
+ spin_unlock(&eb->root->fs_info->eb_dirty_lock);
}
- ret = try_release_extent_state(map, tree, page, gfp_flags);
if (!ret)
+ extent_buffer_iounlock(eb);
+
+ return ret;
+}
+
+static int __write_one_extent_buffer(struct extent_buffer *eb, int wait,
+ int mirror_num, int debug)
+{
+// int rw = (wait == WAIT_COMPLETE) ? WRITE_SYNC : WRITE;
+ int rw = WRITE;
+ int ret = 0;
+
+ if (!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags))
return 0;
- ret = try_release_extent_buffer(tree, page);
- if (ret == 1) {
- ClearPagePrivate(page);
- set_page_private(page, 0);
- page_cache_release(page);
- }
+ if (!write_iolock_eb(eb, wait, 0))
+ return 0;
+
+ clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
+ ret = submit_extent_buffer(rw, eb, eb_write_endio, NULL, mirror_num, debug);
+
+ if (ret || wait != WAIT_COMPLETE)
+ return ret;
+
+ wait_on_extent_buffer(eb);
+ if (test_bit(EXTENT_BUFFER_IOERR, &eb->bflags))
+ ret = -EIO;
return ret;
}
-static void btree_invalidatepage(struct page *page, unsigned long offset)
+int write_one_extent_buffer(struct extent_buffer *eb, int wait,
+ int mirror_num)
{
- struct extent_io_tree *tree;
- tree = &BTRFS_I(page->mapping->host)->io_tree;
- extent_invalidatepage(tree, page, offset);
- btree_releasepage(page, GFP_NOFS);
- if (PagePrivate(page)) {
- printk(KERN_WARNING "btrfs warning page private not zero "
- "on page %llu\n", (unsigned long long)page_offset(page));
- ClearPagePrivate(page);
- set_page_private(page, 0);
- page_cache_release(page);
- }
-}
-
-static const struct address_space_operations btree_aops = {
- .readpage = btree_readpage,
- .writepage = btree_writepage,
- .writepages = btree_writepages,
- .releasepage = btree_releasepage,
- .invalidatepage = btree_invalidatepage,
-#ifdef CONFIG_MIGRATION
- .migratepage = btree_migratepage,
-#endif
-};
+ return __write_one_extent_buffer(eb, wait, mirror_num, 0);
+}
+
+int write_extent_buffer_range(struct btrfs_root *root, u64 start,
+ u64 end, int wait)
+{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct extent_buffer *eb;
+ struct bio *bio = NULL;
+ int rw = WRITE_SYNC;
+ int ret = 0;
+ int submit;
+
+ while (start < end && !ret) {
+ submit = 0;
+
+ eb = find_extent_buffer_no_ref(fs_info, start);
+ if (!eb) {
+ start += PAGE_CACHE_SIZE;
+ if (bio) {
+ ret = btree_submit_bio(root, rw, bio, 0);
+ bio = NULL;
+ }
+ continue;
+ }
+
+ if (eb->root != eb->root->fs_info->extent_root)
+ set_bit(EXTENT_BUFFER_REFERENCED, &eb->bflags);
+ if (!write_iolock_eb(eb, wait, 0)) {
+ submit = 1;
+ goto next;
+ }
+
+ clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
+ ret = submit_extent_buffer(rw, eb, eb_write_endio, &bio, 0, 0);
+next:
+ if (submit && bio) {
+ ret = btree_submit_bio(root, rw, bio, 0);
+ bio = NULL;
+ }
+ start = eb->start + eb->len;
+ free_extent_buffer(eb);
+ cond_resched();
+ }
+
+ if (bio)
+ ret = btree_submit_bio(root, rw, bio, 0);
+
+ return ret;
+}
int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
u64 parent_transid)
{
struct extent_buffer *buf = NULL;
- struct inode *btree_inode = root->fs_info->btree_inode;
int ret = 0;
buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
if (!buf)
return 0;
- read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
- buf, 0, WAIT_NONE, btree_get_extent, 0);
+ read_extent_buffer_pages(buf, WAIT_NONE, 0);
free_extent_buffer(buf);
return ret;
}
@@ -1035,8 +1238,6 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
int mirror_num, struct extent_buffer **eb)
{
struct extent_buffer *buf = NULL;
- struct inode *btree_inode = root->fs_info->btree_inode;
- struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
int ret;
buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
@@ -1045,8 +1246,7 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
- ret = read_extent_buffer_pages(io_tree, buf, 0, WAIT_PAGE_LOCK,
- btree_get_extent, mirror_num);
+ ret = read_extent_buffer_pages(buf, WAIT_PAGE_LOCK, mirror_num);
if (ret) {
free_extent_buffer(buf);
return ret;
@@ -1055,7 +1255,7 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
free_extent_buffer(buf);
return -EIO;
- } else if (extent_buffer_uptodate(io_tree, buf, NULL)) {
+ } else if (extent_buffer_uptodate(buf)) {
*eb = buf;
} else {
free_extent_buffer(buf);
@@ -1066,36 +1266,15 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
u64 bytenr, u32 blocksize)
{
- struct inode *btree_inode = root->fs_info->btree_inode;
- struct extent_buffer *eb;
- eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
- bytenr, blocksize);
- return eb;
+ return find_extent_buffer(root->fs_info, bytenr);
}
struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
u64 bytenr, u32 blocksize)
{
- struct inode *btree_inode = root->fs_info->btree_inode;
- struct extent_buffer *eb;
-
- eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
- bytenr, blocksize);
- return eb;
-}
-
-
-int btrfs_write_tree_block(struct extent_buffer *buf)
-{
- return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
- buf->start + buf->len - 1);
+ return alloc_extent_buffer(root, bytenr, blocksize);
}
-int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
-{
- return filemap_fdatawait_range(buf->pages[0]->mapping,
- buf->start, buf->start + buf->len - 1);
-}
struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
u32 blocksize, u64 parent_transid)
@@ -1108,9 +1287,6 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
return NULL;
ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
-
- if (ret == 0)
- set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
return buf;
}
@@ -1118,12 +1294,11 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct extent_buffer *buf)
{
- struct inode *btree_inode = root->fs_info->btree_inode;
if (btrfs_header_generation(buf) ==
root->fs_info->running_transaction->transid) {
btrfs_assert_tree_locked(buf);
- if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
+ if (clear_extent_buffer_dirty(buf)) {
spin_lock(&root->fs_info->delalloc_lock);
if (root->fs_info->dirty_metadata_bytes >= buf->len)
root->fs_info->dirty_metadata_bytes -= buf->len;
@@ -1131,11 +1306,6 @@ int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
WARN_ON(1);
spin_unlock(&root->fs_info->delalloc_lock);
}
-
- /* ugh, clear_extent_buffer_dirty needs to lock the page */
- btrfs_set_lock_blocking(buf);
- clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
- buf);
}
return 0;
}
@@ -1183,8 +1353,7 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
root->log_batch = 0;
root->log_transid = 0;
root->last_log_commit = 0;
- extent_io_tree_init(&root->dirty_log_pages,
- fs_info->btree_inode->i_mapping);
+ extent_io_tree_init(&root->dirty_log_pages, NULL);
memset(&root->root_key, 0, sizeof(root->root_key));
memset(&root->root_item, 0, sizeof(root->root_item));
@@ -1541,6 +1710,168 @@ static void end_workqueue_fn(struct btrfs_work *work)
bio_endio(bio, error);
}
+/*
+ * Must already be holding ref's to all the eb's in the list and must be in
+ * sequential order.
+ */
+static void write_eb_list(struct list_head *list, int wait)
+{
+ struct extent_buffer *eb, *prev_eb = NULL;
+ struct bio *bio = NULL;
+ struct btrfs_root *root = NULL;
+// int rw = (wait == WAIT_COMPLETE) ? WRITE_SYNC : WRITE;
+ int rw = WRITE;
+ int orig_wait = wait;
+ int submit;
+
+ while (!list_empty(list)) {
+ submit = 0;
+ eb = list_first_entry(list, struct extent_buffer, dirty_list);
+ root = eb->root;
+
+ /*
+ * If we couldn't get the lock last time lets go ahead and wait
+ * for the lock this time so we don't end up looping on the same
+ * eb over and over again.
+ */
+ if (eb == prev_eb && wait == WAIT_NONE)
+ wait = WAIT_PAGE_LOCK;
+
+ if (!write_iolock_eb(eb, wait, 1)) {
+ submit = 1;
+ goto next;
+ }
+
+ clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
+ submit_extent_buffer(rw, eb, eb_write_endio, &bio, 0, 0);
+next:
+ if (submit && bio) {
+ btree_submit_bio(root, rw, bio, 0);
+ if (wait == WAIT_COMPLETE)
+ wait_on_extent_buffer(prev_eb);
+ bio = NULL;
+ }
+
+ /*
+ * If we can't get the io lock on the eb we could loop on it
+ * twice, so we need to make sure not to free it until we're
+ * sure we either don't need it anymore or it's on a bio.
+ */
+ if (prev_eb != eb)
+ free_extent_buffer(prev_eb);
+ else
+ printk(KERN_ERR "this would have bitten us\n");
+ prev_eb = eb;
+ cond_resched();
+ wait = orig_wait;
+ }
+
+ if (bio)
+ btree_submit_bio(root, rw, bio, 0);
+
+ if (wait == WAIT_COMPLETE && prev_eb)
+ wait_on_extent_buffer(prev_eb);
+ free_extent_buffer(prev_eb);
+}
+
+static int write_dirty_list_nr(struct btrfs_fs_info *fs_info, int wait, int nr)
+{
+ struct extent_buffer *eb;
+ struct list_head *process_list;
+ LIST_HEAD(my_list);
+ u64 start = 0;
+ int count = 0;
+ int list_switch = 0;
+ int written = 0;
+ int orig = nr;
+
+// nr = max(nr, 32);
+ process_list = &fs_info->dirty_leaves;
+ WARN_ON(in_interrupt());
+again:
+ spin_lock(&fs_info->eb_dirty_lock);
+ while (!list_empty(process_list) && nr-- >= 0) {
+ eb = list_first_entry(process_list,
+ struct extent_buffer,
+ dirty_list);
+ if ((!start || eb->start == start) && count < 128) {
+ start = eb->start + eb->len;
+ extent_buffer_get(eb);
+ set_bit(EXTENT_BUFFER_WRITE, &eb->bflags);
+ list_move_tail(&eb->dirty_list, &my_list);
+ count++;
+ written++;
+ continue;
+ }
+ spin_unlock(&fs_info->eb_dirty_lock);
+ write_eb_list(&my_list, wait);
+ spin_lock(&fs_info->eb_dirty_lock);
+ start = 0;
+ count = 0;
+ }
+ spin_unlock(&fs_info->eb_dirty_lock);
+
+ if (!list_empty(&my_list)) {
+// printk(KERN_ERR "Would have leaked %d eb's\n", count);
+ write_eb_list(&my_list, wait);
+ }
+
+ if (nr > 0 && !list_switch) {
+ start = 0;
+ list_switch = 1;
+ printk(KERN_ERR "writing out nodes? still have %d to write out, orig was %d\n", nr, orig);
+ process_list = &fs_info->dirty_nodes;
+ goto again;
+ }
+
+ return written;
+}
+
+#define EB_WRITE_CHUNK 16 * 1024 * 1024
+static int metadata_flusher_kthread(void *arg)
+{
+ struct btrfs_root *root = arg;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ u64 thresh = 64 * 1024 * 1024;
+ int write_nr = div64_u64(EB_WRITE_CHUNK, root->leafsize);
+
+ do {
+ int written = 0;
+ int wait = WAIT_NONE;
+
+ vfs_check_frozen(fs_info->sb, SB_FREEZE_WRITE);
+
+ mutex_lock(&fs_info->metadata_flusher_mutex);
+
+ if (fs_info->dirty_metadata_bytes < thresh)
+ goto sleep;
+ written = write_dirty_list_nr(fs_info, wait, write_nr);
+sleep:
+ mutex_unlock(&fs_info->metadata_flusher_mutex);
+
+ /*
+ * If we wrote the entire chunk we can just end up flooding the
+ * system with eb's in IO and not making any progress at
+ * reducing memory pressure, so if we wrote out as much or more
+ * than what we wanted to wait for inflight to go down some and
+ * then try and write out some more.
+ */
+ if (written) {
+ wait_event(fs_info->writeback_ebs, atomic_read(&fs_info->inflight_ebs) <
+ atomic_read(&fs_info->dirty_ebs));
+ continue;
+ }
+
+ if (!try_to_freeze()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (!kthread_should_stop())
+ schedule();
+ __set_current_state(TASK_RUNNING);
+ }
+ } while (!kthread_should_stop());
+ return 0;
+}
+
static int cleaner_kthread(void *arg)
{
struct btrfs_root *root = arg;
@@ -1900,15 +2231,8 @@ int open_ctree(struct super_block *sb,
goto fail_srcu;
}
- fs_info->btree_inode = new_inode(sb);
- if (!fs_info->btree_inode) {
- err = -ENOMEM;
- goto fail_bdi;
- }
-
- mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
-
INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
+ INIT_RADIX_TREE(&fs_info->eb_tree, GFP_ATOMIC);
INIT_LIST_HEAD(&fs_info->trans_list);
INIT_LIST_HEAD(&fs_info->dead_roots);
INIT_LIST_HEAD(&fs_info->delayed_iputs);
@@ -1916,6 +2240,9 @@ int open_ctree(struct super_block *sb,
INIT_LIST_HEAD(&fs_info->delalloc_inodes);
INIT_LIST_HEAD(&fs_info->ordered_operations);
INIT_LIST_HEAD(&fs_info->caching_block_groups);
+ INIT_LIST_HEAD(&fs_info->eb_lru);
+ INIT_LIST_HEAD(&fs_info->dirty_leaves);
+ INIT_LIST_HEAD(&fs_info->dirty_nodes);
spin_lock_init(&fs_info->delalloc_lock);
spin_lock_init(&fs_info->trans_lock);
spin_lock_init(&fs_info->ref_cache_lock);
@@ -1923,6 +2250,9 @@ int open_ctree(struct super_block *sb,
spin_lock_init(&fs_info->delayed_iput_lock);
spin_lock_init(&fs_info->defrag_inodes_lock);
spin_lock_init(&fs_info->free_chunk_lock);
+ spin_lock_init(&fs_info->eb_tree_lock);
+ spin_lock_init(&fs_info->eb_lru_lock);
+ spin_lock_init(&fs_info->eb_dirty_lock);
mutex_init(&fs_info->reloc_mutex);
init_completion(&fs_info->kobj_unregister);
@@ -1940,12 +2270,19 @@ int open_ctree(struct super_block *sb,
atomic_set(&fs_info->async_submit_draining, 0);
atomic_set(&fs_info->nr_async_bios, 0);
atomic_set(&fs_info->defrag_running, 0);
+ atomic_set(&fs_info->nr_ebs, 0);
+ atomic_set(&fs_info->dirty_ebs, 0);
+ atomic_set(&fs_info->inflight_ebs, 0);
fs_info->sb = sb;
fs_info->max_inline = 8192 * 1024;
fs_info->metadata_ratio = 0;
fs_info->defrag_inodes = RB_ROOT;
fs_info->trans_no_join = 0;
fs_info->free_chunk_space = 0;
+ fs_info->eb_lru_nr = 0;
+ fs_info->eb_shrinker.seeks = 1;
+// fs_info->eb_shrinker.batch = 256;
+ fs_info->eb_shrinker.shrink = shrink_ebs;
/* readahead state */
INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
@@ -1988,37 +2325,11 @@ int open_ctree(struct super_block *sb,
sb->s_blocksize_bits = blksize_bits(4096);
sb->s_bdi = &fs_info->bdi;
- fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
- set_nlink(fs_info->btree_inode, 1);
- /*
- * we set the i_size on the btree inode to the max possible int.
- * the real end of the address space is determined by all of
- * the devices in the system
- */
- fs_info->btree_inode->i_size = OFFSET_MAX;
- fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
- fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
-
- RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
- extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
- fs_info->btree_inode->i_mapping);
- extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
-
- BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
-
- BTRFS_I(fs_info->btree_inode)->root = tree_root;
- memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
- sizeof(struct btrfs_key));
- BTRFS_I(fs_info->btree_inode)->dummy_inode = 1;
- insert_inode_hash(fs_info->btree_inode);
-
spin_lock_init(&fs_info->block_group_cache_lock);
fs_info->block_group_cache_tree = RB_ROOT;
- extent_io_tree_init(&fs_info->freed_extents[0],
- fs_info->btree_inode->i_mapping);
- extent_io_tree_init(&fs_info->freed_extents[1],
- fs_info->btree_inode->i_mapping);
+ extent_io_tree_init(&fs_info->freed_extents[0], NULL);
+ extent_io_tree_init(&fs_info->freed_extents[1], NULL);
fs_info->pinned_extents = &fs_info->freed_extents[0];
fs_info->do_barriers = 1;
@@ -2029,6 +2340,7 @@ int open_ctree(struct super_block *sb,
mutex_init(&fs_info->transaction_kthread_mutex);
mutex_init(&fs_info->cleaner_mutex);
mutex_init(&fs_info->volume_mutex);
+ mutex_init(&fs_info->metadata_flusher_mutex);
init_rwsem(&fs_info->extent_commit_sem);
init_rwsem(&fs_info->cleanup_work_sem);
init_rwsem(&fs_info->subvol_sem);
@@ -2040,6 +2352,7 @@ int open_ctree(struct super_block *sb,
init_waitqueue_head(&fs_info->transaction_wait);
init_waitqueue_head(&fs_info->transaction_blocked_wait);
init_waitqueue_head(&fs_info->async_submit_wait);
+ init_waitqueue_head(&fs_info->writeback_ebs);
__setup_root(4096, 4096, 4096, 4096, tree_root,
fs_info, BTRFS_ROOT_TREE_OBJECTID);
@@ -2176,6 +2489,8 @@ int open_ctree(struct super_block *sb,
fs_info->endio_meta_write_workers.idle_thresh = 2;
fs_info->readahead_workers.idle_thresh = 2;
+ register_shrinker(&fs_info->eb_shrinker);
+
/*
* btrfs_start_workers can really only fail because of ENOMEM so just
* return -ENOMEM if any of these fail.
@@ -2325,6 +2640,12 @@ retry_root_backup:
if (IS_ERR(fs_info->transaction_kthread))
goto fail_cleaner;
+ fs_info->metadata_flusher_kthread =
+ kthread_run(metadata_flusher_kthread, tree_root,
+ "btrfs-metadata-flusher");
+ if (IS_ERR(fs_info->metadata_flusher_kthread))
+ goto fail_trans_kthread;
+
if (!btrfs_test_opt(tree_root, SSD) &&
!btrfs_test_opt(tree_root, NOSSD) &&
!fs_info->fs_devices->rotating) {
@@ -2355,7 +2676,7 @@ retry_root_backup:
printk(KERN_WARNING "Btrfs log replay required "
"on RO media\n");
err = -EIO;
- goto fail_trans_kthread;
+ goto fail_flusher_kthread;
}
blocksize =
btrfs_level_size(tree_root,
@@ -2364,7 +2685,7 @@ retry_root_backup:
log_tree_root = btrfs_alloc_root(fs_info);
if (!log_tree_root) {
err = -ENOMEM;
- goto fail_trans_kthread;
+ goto fail_flusher_kthread;
}
__setup_root(nodesize, leafsize, sectorsize, stripesize,
@@ -2394,7 +2715,7 @@ retry_root_backup:
printk(KERN_WARNING
"btrfs: failed to recover relocation\n");
err = -EINVAL;
- goto fail_trans_kthread;
+ goto fail_flusher_kthread;
}
}
@@ -2404,10 +2725,10 @@ retry_root_backup:
fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
if (!fs_info->fs_root)
- goto fail_trans_kthread;
+ goto fail_flusher_kthread;
if (IS_ERR(fs_info->fs_root)) {
err = PTR_ERR(fs_info->fs_root);
- goto fail_trans_kthread;
+ goto fail_flusher_kthread;
}
if (!(sb->s_flags & MS_RDONLY)) {
@@ -2428,18 +2749,13 @@ retry_root_backup:
return 0;
+fail_flusher_kthread:
+ kthread_stop(fs_info->metadata_flusher_kthread);
fail_trans_kthread:
kthread_stop(fs_info->transaction_kthread);
fail_cleaner:
kthread_stop(fs_info->cleaner_kthread);
- /*
- * make sure we're done with the btree inode before we stop our
- * kthreads
- */
- filemap_write_and_wait(fs_info->btree_inode->i_mapping);
- invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
-
fail_block_groups:
btrfs_free_block_groups(fs_info);
@@ -2447,6 +2763,7 @@ fail_tree_roots:
free_root_pointers(fs_info, 1);
fail_sb_buffer:
+ unregister_shrinker(&fs_info->eb_shrinker);
btrfs_stop_workers(&fs_info->generic_worker);
btrfs_stop_workers(&fs_info->readahead_workers);
btrfs_stop_workers(&fs_info->fixup_workers);
@@ -2464,9 +2781,6 @@ fail_alloc:
fail_iput:
btrfs_mapping_tree_free(&fs_info->mapping_tree);
- invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
- iput(fs_info->btree_inode);
-fail_bdi:
bdi_destroy(&fs_info->bdi);
fail_srcu:
cleanup_srcu_struct(&fs_info->subvol_srcu);
@@ -3009,6 +3323,7 @@ int close_ctree(struct btrfs_root *root)
kthread_stop(fs_info->transaction_kthread);
kthread_stop(fs_info->cleaner_kthread);
+ kthread_stop(fs_info->metadata_flusher_kthread);
fs_info->closing = 2;
smp_mb();
@@ -3037,8 +3352,6 @@ int close_ctree(struct btrfs_root *root)
del_fs_roots(fs_info);
- iput(fs_info->btree_inode);
-
btrfs_stop_workers(&fs_info->generic_worker);
btrfs_stop_workers(&fs_info->fixup_workers);
btrfs_stop_workers(&fs_info->delalloc_workers);
@@ -3060,6 +3373,8 @@ int close_ctree(struct btrfs_root *root)
btrfs_close_devices(fs_info->fs_devices);
btrfs_mapping_tree_free(&fs_info->mapping_tree);
+ unregister_shrinker(&fs_info->eb_shrinker);
+ btrfs_destroy_eb_cache(fs_info);
bdi_destroy(&fs_info->bdi);
cleanup_srcu_struct(&fs_info->subvol_srcu);
@@ -3070,71 +3385,44 @@ int close_ctree(struct btrfs_root *root)
int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid)
{
int ret;
- struct inode *btree_inode = buf->pages[0]->mapping->host;
- ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf,
- NULL);
+ ret = extent_buffer_uptodate(buf);
if (!ret)
return ret;
- ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
- parent_transid);
+ ret = verify_parent_transid(buf, parent_transid);
return !ret;
}
-int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
-{
- struct inode *btree_inode = buf->pages[0]->mapping->host;
- return set_extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree,
- buf);
-}
-
void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
{
- struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
+ struct btrfs_fs_info *fs_info = buf->root->fs_info;
u64 transid = btrfs_header_generation(buf);
- struct inode *btree_inode = root->fs_info->btree_inode;
int was_dirty;
+ BUG_ON(!buf);
btrfs_assert_tree_locked(buf);
- if (transid != root->fs_info->generation) {
+ if (transid != fs_info->generation) {
printk(KERN_CRIT "btrfs transid mismatch buffer %llu, "
"found %llu running %llu\n",
(unsigned long long)buf->start,
(unsigned long long)transid,
- (unsigned long long)root->fs_info->generation);
+ (unsigned long long)fs_info->generation);
WARN_ON(1);
}
- was_dirty = set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
- buf);
+ was_dirty = set_extent_buffer_dirty(buf);
if (!was_dirty) {
- spin_lock(&root->fs_info->delalloc_lock);
- root->fs_info->dirty_metadata_bytes += buf->len;
- spin_unlock(&root->fs_info->delalloc_lock);
+ spin_lock(&fs_info->delalloc_lock);
+ fs_info->dirty_metadata_bytes += buf->len;
+ spin_unlock(&fs_info->delalloc_lock);
}
}
void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
{
- /*
- * looks as though older kernels can get into trouble with
- * this code, they end up stuck in balance_dirty_pages forever
- */
- u64 num_dirty;
- unsigned long thresh = 32 * 1024 * 1024;
-
- if (current->flags & PF_MEMALLOC)
- return;
-
btrfs_balance_delayed_items(root);
- num_dirty = root->fs_info->dirty_metadata_bytes;
-
- if (num_dirty > thresh) {
- balance_dirty_pages_ratelimited_nr(
- root->fs_info->btree_inode->i_mapping, 1);
- }
- return;
+ __btrfs_btree_balance_dirty(root, nr);
}
void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
@@ -3144,71 +3432,37 @@ void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
* this code, they end up stuck in balance_dirty_pages forever
*/
u64 num_dirty;
- unsigned long thresh = 32 * 1024 * 1024;
+ int nr_writeback;
+ int nr_dirty;
+ unsigned long thresh = 64 * 1024 * 1024;
+
+ num_dirty = root->fs_info->dirty_metadata_bytes;
+ nr_writeback = atomic_read(&root->fs_info->inflight_ebs);
+ nr_dirty = atomic_read(&root->fs_info->dirty_ebs);
+
+ if (num_dirty > thresh)
+ wake_up_process(root->fs_info->metadata_flusher_kthread);
- if (current->flags & PF_MEMALLOC)
+ if (num_dirty < thresh * 2)
return;
- num_dirty = root->fs_info->dirty_metadata_bytes;
+ if (nr_writeback > nr_dirty) {
+ wait_event_killable(root->fs_info->writeback_ebs, atomic_read(&root->fs_info->inflight_ebs) <
+ atomic_read(&root->fs_info->dirty_ebs));
+ return;
+ }
- if (num_dirty > thresh) {
- balance_dirty_pages_ratelimited_nr(
- root->fs_info->btree_inode->i_mapping, 1);
+ if (num_dirty >= thresh * 4) {
+ wait_event_killable(root->fs_info->writeback_ebs, atomic_read(&root->fs_info->dirty_ebs) <
+ thresh * 4);
}
- return;
}
int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
{
- struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
- int ret;
- ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
- if (ret == 0)
- set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
- return ret;
-}
-
-static int btree_lock_page_hook(struct page *page, void *data,
- void (*flush_fn)(void *))
-{
- struct inode *inode = page->mapping->host;
- struct btrfs_root *root = BTRFS_I(inode)->root;
- struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
- struct extent_buffer *eb;
- unsigned long len;
- u64 bytenr = page_offset(page);
-
- if (page->private == EXTENT_PAGE_PRIVATE)
- goto out;
-
- len = page->private >> 2;
- eb = find_extent_buffer(io_tree, bytenr, len);
- if (!eb)
- goto out;
+ struct btrfs_root *root = buf->root;
- if (!btrfs_try_tree_write_lock(eb)) {
- flush_fn(data);
- btrfs_tree_lock(eb);
- }
- btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
-
- if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
- spin_lock(&root->fs_info->delalloc_lock);
- if (root->fs_info->dirty_metadata_bytes >= eb->len)
- root->fs_info->dirty_metadata_bytes -= eb->len;
- else
- WARN_ON(1);
- spin_unlock(&root->fs_info->delalloc_lock);
- }
-
- btrfs_tree_unlock(eb);
- free_extent_buffer(eb);
-out:
- if (!trylock_page(page)) {
- flush_fn(data);
- lock_page(page);
- }
- return 0;
+ return btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
}
static void btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
@@ -3407,12 +3661,10 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
{
int ret;
struct page *page;
- struct inode *btree_inode = root->fs_info->btree_inode;
struct extent_buffer *eb;
u64 start = 0;
u64 end;
u64 offset;
- unsigned long index;
while (1) {
ret = find_first_extent_bit(dirty_pages, start, &start, &end,
@@ -3422,38 +3674,16 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
while (start <= end) {
- index = start >> PAGE_CACHE_SHIFT;
- start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
- page = find_get_page(btree_inode->i_mapping, index);
- if (!page)
+ eb = find_extent_buffer(root->fs_info, start);
+ if (!eb) {
+ start += 1 << PAGE_CACHE_SHIFT;
continue;
- offset = page_offset(page);
-
- spin_lock(&dirty_pages->buffer_lock);
- eb = radix_tree_lookup(
- &(&BTRFS_I(page->mapping->host)->io_tree)->buffer,
- offset >> PAGE_CACHE_SHIFT);
- spin_unlock(&dirty_pages->buffer_lock);
- if (eb) {
- ret = test_and_clear_bit(EXTENT_BUFFER_DIRTY,
- &eb->bflags);
- atomic_set(&eb->refs, 1);
- }
- if (PageWriteback(page))
- end_page_writeback(page);
-
- lock_page(page);
- if (PageDirty(page)) {
- clear_page_dirty_for_io(page);
- spin_lock_irq(&page->mapping->tree_lock);
- radix_tree_tag_clear(&page->mapping->page_tree,
- page_index(page),
- PAGECACHE_TAG_DIRTY);
- spin_unlock_irq(&page->mapping->tree_lock);
}
-
- page->mapping->a_ops->invalidatepage(page, 0);
- unlock_page(page);
+ clear_extent_buffer_dirty(eb);
+ spin_lock(&eb->eb_lock);
+ eb->refs = 1;
+ spin_unlock(&eb->eb_lock);
+ free_extent_buffer(eb);
}
}
@@ -3559,12 +3789,3 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)
return 0;
}
-
-static struct extent_io_ops btree_extent_io_ops = {
- .write_cache_pages_lock_hook = btree_lock_page_hook,
- .readpage_end_io_hook = btree_readpage_end_io_hook,
- .readpage_io_failed_hook = btree_io_failed_hook,
- .submit_bio_hook = btree_submit_bio_hook,
- /* note we're sharing with inode.c for the merge bio hook */
- .merge_bio_hook = btrfs_merge_bio_hook,
-};
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index e4bc4741319bd3..21e1aedfc624bf 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -67,7 +67,6 @@ void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root);
void btrfs_mark_buffer_dirty(struct extent_buffer *buf);
int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid);
-int btrfs_set_buffer_uptodate(struct extent_buffer *buf);
int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid);
u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len);
void btrfs_csum_final(u32 crc, char *result);
@@ -79,8 +78,6 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
extent_submit_bio_hook_t *submit_bio_start,
extent_submit_bio_hook_t *submit_bio_done);
unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info);
-int btrfs_write_tree_block(struct extent_buffer *buf);
-int btrfs_wait_tree_block_writeback(struct extent_buffer *buf);
int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index e5111d53679b0a..c480ceb8918950 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -5012,9 +5012,9 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
BUG_ON(ret);
} else {
- invalidate_mapping_pages(info->btree_inode->i_mapping,
- bytenr >> PAGE_CACHE_SHIFT,
- (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
+// invalidate_mapping_pages(info->btree_inode->i_mapping,
+// bytenr >> PAGE_CACHE_SHIFT,
+// (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
}
ret = update_block_group(trans, root, bytenr, num_bytes, 0);
@@ -5132,10 +5132,10 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
goto out;
}
- if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
+// if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
pin_down_extent(root, cache, buf->start, buf->len, 1);
goto out;
- }
+// }
WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
@@ -6013,7 +6013,7 @@ struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
clean_tree_block(trans, root, buf);
btrfs_set_lock_blocking(buf);
- btrfs_set_buffer_uptodate(buf);
+ set_extent_buffer_uptodate(buf);
if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
/*
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index d4795fc651565b..a8ecc5ed5e0437 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -12,6 +12,7 @@
#include <linux/pagevec.h>
#include <linux/prefetch.h>
#include <linux/cleancache.h>
+#include <linux/ratelimit.h>
#include "extent_io.h"
#include "extent_map.h"
#include "compat.h"
@@ -33,6 +34,9 @@ static DEFINE_SPINLOCK(leak_lock);
#define BUFFER_LRU_MAX 64
+static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head);
+static int release_extent_buffer(struct extent_buffer *eb);
+
struct tree_entry {
u64 start;
u64 end;
@@ -60,10 +64,14 @@ int __init extent_io_init(void)
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
if (!extent_state_cache)
return -ENOMEM;
-
extent_buffer_cache = kmem_cache_create("extent_buffers",
sizeof(struct extent_buffer), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+/*
+ extent_buffer_cache = kmem_cache_create("extent_buffers",
+ sizeof(struct extent_buffer), 0,
+ SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER, NULL);
+*/
if (!extent_buffer_cache)
goto free_state_cache;
return 0;
@@ -94,7 +102,7 @@ void extent_io_exit(void)
eb = list_entry(buffers.next, struct extent_buffer, leak_list);
printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
"refs %d\n", (unsigned long long)eb->start,
- eb->len, atomic_read(&eb->refs));
+ eb->len, eb->refs);
list_del(&eb->leak_list);
kmem_cache_free(extent_buffer_cache, eb);
}
@@ -2471,12 +2479,6 @@ void set_page_extent_mapped(struct page *page)
}
}
-static void set_page_extent_head(struct page *page, unsigned long len)
-{
- WARN_ON(!PagePrivate(page));
- set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
-}
-
/*
* basic readpage implementation. Locked extent state structs are inserted
* into the tree that are removed when the IO is done (by the end_io
@@ -3539,7 +3541,7 @@ inline unsigned long num_extent_pages(u64 start, u64 len)
(start >> PAGE_CACHE_SHIFT);
}
-static void __free_extent_buffer(struct extent_buffer *eb)
+static void __free_extent_buffer(struct extent_buffer *eb, int count)
{
#if LEAK_DEBUG
unsigned long flags;
@@ -3547,12 +3549,13 @@ static void __free_extent_buffer(struct extent_buffer *eb)
list_del(&eb->leak_list);
spin_unlock_irqrestore(&leak_lock, flags);
#endif
+ BUG_ON(test_bit(EXTENT_BUFFER_WRITE, &eb->bflags));
if (eb->pages && eb->pages != eb->inline_pages)
kfree(eb->pages);
kmem_cache_free(extent_buffer_cache, eb);
}
-static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
+static struct extent_buffer *__alloc_extent_buffer(struct btrfs_root *root,
u64 start,
unsigned long len,
gfp_t mask)
@@ -3565,6 +3568,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
eb = kmem_cache_zalloc(extent_buffer_cache, mask);
if (eb == NULL)
return NULL;
+// printk(KERN_ERR "allocing %Lu\n", start);
eb->start = start;
eb->len = len;
rwlock_init(&eb->lock);
@@ -3575,16 +3579,22 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
atomic_set(&eb->spinning_readers, 0);
atomic_set(&eb->spinning_writers, 0);
eb->lock_nested = 0;
+ eb->root = root;
+ BUG_ON(!eb->root);
+ BUG_ON(!eb->root->fs_info);
init_waitqueue_head(&eb->write_lock_wq);
init_waitqueue_head(&eb->read_lock_wq);
+ spin_lock_init(&eb->eb_lock);
#if LEAK_DEBUG
spin_lock_irqsave(&leak_lock, flags);
list_add(&eb->leak_list, &buffers);
spin_unlock_irqrestore(&leak_lock, flags);
#endif
- atomic_set(&eb->refs, 1);
- atomic_set(&eb->pages_reading, 0);
+ eb->refs = 1;
+ eb->io_pages = 0;
+ INIT_LIST_HEAD(&eb->lru);
+ INIT_LIST_HEAD(&eb->dirty_list);
if (len > MAX_INLINE_EXTENT_BUFFER_SIZE) {
struct page **pages;
@@ -3592,7 +3602,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
PAGE_CACHE_SHIFT;
pages = kzalloc(num_pages, mask);
if (!pages) {
- __free_extent_buffer(eb);
+ __free_extent_buffer(eb, 0);
return NULL;
}
eb->pages = pages;
@@ -3600,6 +3610,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
eb->pages = eb->inline_pages;
}
+ atomic_inc(&root->fs_info->nr_ebs);
return eb;
}
@@ -3619,257 +3630,425 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
do {
index--;
page = extent_buffer_page(eb, index);
- if (page)
- page_cache_release(page);
+ if (page) {
+// printk(KERN_ERR "freeing page\n");
+ BUG_ON(atomic_read(&page->_count) != 1);
+ __free_page(page);
+ }
} while (index != start_idx);
}
/*
* Helper for releasing the extent buffer.
*/
-static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
+static inline void btrfs_release_extent_buffer(struct extent_buffer *eb, int count)
{
+// printk(KERN_ERR "actually freeing %p\n", eb);
btrfs_release_extent_buffer_page(eb, 0);
- __free_extent_buffer(eb);
+ __free_extent_buffer(eb, count);
}
-struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
+static inline int get_extent_buffer_if_nonzero(struct extent_buffer *eb)
+{
+ int ret = 1;
+
+ spin_lock(&eb->eb_lock);
+ if (eb->refs)
+ eb->refs++;
+ else
+ ret = 0;
+ spin_unlock(&eb->eb_lock);
+
+ return ret;
+}
+
+struct extent_buffer *alloc_extent_buffer(struct btrfs_root *root,
u64 start, unsigned long len)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
unsigned long num_pages = num_extent_pages(start, len);
unsigned long i;
unsigned long index = start >> PAGE_CACHE_SHIFT;
+ unsigned long flags;
struct extent_buffer *eb;
struct extent_buffer *exists = NULL;
struct page *p;
- struct address_space *mapping = tree->mapping;
- int uptodate = 1;
int ret;
-
+again:
rcu_read_lock();
- eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
- if (eb && atomic_inc_not_zero(&eb->refs)) {
+ eb = radix_tree_lookup(&fs_info->eb_tree, start >> PAGE_CACHE_SHIFT);
+ if (eb && get_extent_buffer_if_nonzero(eb)) {
+ /*
+ * This can happen if we free the extent and reallocate
+ * it for a different root before the eb is evicted
+ * from the cache.
+ */
+ if (unlikely(eb->root != root))
+ eb->root = root;
+// printk(KERN_ERR "alloc found %p\n", eb);
+ set_bit(EXTENT_BUFFER_REFERENCED, &eb->bflags);
rcu_read_unlock();
- mark_page_accessed(eb->pages[0]);
return eb;
}
+new:
rcu_read_unlock();
- eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
+ eb = __alloc_extent_buffer(root, start, len, GFP_NOFS);
if (!eb)
return NULL;
+ set_bit(EXTENT_BUFFER_NEW, &eb->bflags);
for (i = 0; i < num_pages; i++, index++) {
- p = find_or_create_page(mapping, index, GFP_NOFS);
+ p = alloc_page(GFP_NOFS);
if (!p) {
WARN_ON(1);
goto free_eb;
}
- mark_page_accessed(p);
+ set_page_private(p, (unsigned long)eb);
+ p->index = index;
eb->pages[i] = p;
- if (!PageUptodate(p))
- uptodate = 0;
-
- /*
- * see below about how we avoid a nasty race with release page
- * and why we unlock later
- */
}
- if (uptodate)
- set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
if (ret)
goto free_eb;
- spin_lock(&tree->buffer_lock);
- ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb);
+ spin_lock_irqsave(&fs_info->eb_tree_lock, flags);
+ ret = radix_tree_insert(&fs_info->eb_tree, start >> PAGE_CACHE_SHIFT, eb);
if (ret == -EEXIST) {
- exists = radix_tree_lookup(&tree->buffer,
+ exists = radix_tree_lookup(&fs_info->eb_tree,
start >> PAGE_CACHE_SHIFT);
+// printk(KERN_ERR "Found an existing eb %p\n", exists);
/* add one reference for the caller */
- atomic_inc(&exists->refs);
- spin_unlock(&tree->buffer_lock);
+ if (!get_extent_buffer_if_nonzero(exists)) {
+ printk(KERN_ERR "would have gotten a bad eb, start is %Lu, eb start is %Lu, refs %d, releasing set %s, "
+ "lru releasing set %s, stale %d, new %d\n",
+ start, exists->start, exists->refs, test_bit(EXTENT_BUFFER_RELEASING, &exists->bflags) ? "yes" : "no",
+ test_bit(EXTENT_BUFFER_LRU_RELEASE, &exists->bflags) ? "yes" : "no", test_bit(EXTENT_BUFFER_STALE, &exists->bflags),
+ test_bit(EXTENT_BUFFER_NEW, &exists->bflags));
+ spin_unlock_irqrestore(&fs_info->eb_tree_lock, flags);
+ radix_tree_preload_end();
+ btrfs_release_extent_buffer(eb, 1);
+ /*
+ * We free eb's via rcu, so we need to synchronize the
+ * rcu here to make sure we don't loop back around and
+ * find the same thing again over and over.
+ */
+ synchronize_rcu();
+ goto again;
+ }
+ spin_unlock_irqrestore(&fs_info->eb_tree_lock, flags);
radix_tree_preload_end();
+ set_bit(EXTENT_BUFFER_REFERENCED, &exists->bflags);
goto free_eb;
}
- /* add one reference for the tree */
- atomic_inc(&eb->refs);
- spin_unlock(&tree->buffer_lock);
+// printk(KERN_ERR "allocated eb %p\n", eb);
+ spin_unlock_irqrestore(&fs_info->eb_tree_lock, flags);
radix_tree_preload_end();
+ set_bit(EXTENT_BUFFER_REFERENCED, &eb->bflags);
- /*
- * there is a race where release page may have
- * tried to find this extent buffer in the radix
- * but failed. It will tell the VM it is safe to
- * reclaim the, and it will clear the page private bit.
- * We must make sure to set the page private bit properly
- * after the extent buffer is in the radix tree so
- * it doesn't get lost
- */
- set_page_extent_mapped(eb->pages[0]);
- set_page_extent_head(eb->pages[0], eb->len);
- SetPageChecked(eb->pages[0]);
- for (i = 1; i < num_pages; i++) {
- p = extent_buffer_page(eb, i);
- set_page_extent_mapped(p);
- ClearPageChecked(p);
- unlock_page(p);
- }
- unlock_page(eb->pages[0]);
return eb;
free_eb:
- for (i = 0; i < num_pages; i++) {
- if (eb->pages[i])
- unlock_page(eb->pages[i]);
- }
-
- if (!atomic_dec_and_test(&eb->refs))
- return exists;
- btrfs_release_extent_buffer(eb);
+ btrfs_release_extent_buffer(eb, 1);
return exists;
}
-struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
- u64 start, unsigned long len)
+static struct extent_buffer *
+__find_extent_buffer(struct btrfs_fs_info *fs_info, u64 start, int ref)
{
struct extent_buffer *eb;
rcu_read_lock();
- eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
- if (eb && atomic_inc_not_zero(&eb->refs)) {
+ eb = radix_tree_lookup(&fs_info->eb_tree, start >> PAGE_CACHE_SHIFT);
+ if (eb && get_extent_buffer_if_nonzero(eb)) {
+ if (ref)
+ set_bit(EXTENT_BUFFER_REFERENCED, &eb->bflags);
+// printk(KERN_ERR "find found %p\n", eb);
rcu_read_unlock();
- mark_page_accessed(eb->pages[0]);
return eb;
}
+out:
rcu_read_unlock();
return NULL;
}
+struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
+ u64 start)
+{
+ return __find_extent_buffer(fs_info, start, 1);
+}
+
+struct extent_buffer *find_extent_buffer_no_ref(struct btrfs_fs_info *fs_info,
+ u64 start)
+{
+ return __find_extent_buffer(fs_info, start, 0);
+}
+
+static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
+{
+ struct extent_buffer *eb =
+ container_of(head, struct extent_buffer, rcu_head);
+
+ btrfs_release_extent_buffer(eb, 1);
+}
+
+static void add_lru(struct extent_buffer *eb)
+{
+ struct btrfs_fs_info *fs_info = eb->root->fs_info;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fs_info->eb_lru_lock, flags);
+ if (list_empty(&eb->lru)) {
+ fs_info->eb_lru_nr++;
+ list_add_tail(&eb->lru, &fs_info->eb_lru);
+ eb->refs++;
+ }
+ spin_unlock_irqrestore(&fs_info->eb_lru_lock, flags);
+}
+
+static void del_lru(struct extent_buffer *eb)
+{
+ struct btrfs_fs_info *fs_info = eb->root->fs_info;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fs_info->eb_lru_lock, flags);
+ if (!list_empty(&eb->lru)) {
+ list_del_init(&eb->lru);
+ fs_info->eb_lru_nr--;
+ }
+ spin_unlock_irqrestore(&fs_info->eb_lru_lock, flags);
+}
+
+static int release_extent_buffer(struct extent_buffer *eb)
+{
+ struct btrfs_fs_info *fs_info = eb->root->fs_info;
+ unsigned long flags;
+
+ BUG_ON(!eb->root);
+ if (!fs_info) {
+ printk(KERN_ERR "no fs_info, fuck this bullshit, eb %Lu, root %Lu, refs %d, pointer %p\n",
+ eb->start, eb->root->objectid, eb->refs, eb);
+ BUG();
+ }
+ BUG_ON(!eb->refs);
+ if (--eb->refs == 0) {
+// if (eb->root == fs_info->extent_root) {
+// printk(KERN_ERR "releasing %Lu, %p, stale? %d, lru ? %d\n", eb->start, eb,
+// test_bit(EXTENT_BUFFER_STALE, &eb->bflags), !list_empty(&eb->lru));
+// WARN_ON(!test_bit(EXTENT_BUFFER_STALE, &eb->bflags));
+// }
+ spin_unlock(&eb->eb_lock);
+ del_lru(eb);
+ BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
+ BUG_ON(test_bit(EXTENT_BUFFER_WRITE, &eb->bflags));
+ WARN_ON(!list_empty(&eb->dirty_list));
+ spin_lock_irqsave(&fs_info->eb_tree_lock, flags);
+ radix_tree_delete(&fs_info->eb_tree,
+ eb->start >> PAGE_CACHE_SHIFT);
+ spin_unlock_irqrestore(&fs_info->eb_tree_lock, flags);
+ BUG_ON(test_and_set_bit(EXTENT_BUFFER_RELEASING, &eb->bflags));
+ call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
+ return 1;
+ }
+ spin_unlock(&eb->eb_lock);
+ return 0;
+}
+
void free_extent_buffer(struct extent_buffer *eb)
{
+ struct btrfs_fs_info *fs_info;
+
if (!eb)
return;
- if (!atomic_dec_and_test(&eb->refs))
- return;
+ clear_bit(EXTENT_BUFFER_NEW, &eb->bflags);
+ fs_info = eb->root->fs_info;
+ spin_lock(&eb->eb_lock);
+ if (eb->refs == 1 &&
+ !test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
+// printk(KERN_ERR "lru'ing %Lu\n", eb->start);
+// set_bit(EXTENT_BUFFER_REFERENCED, &eb->bflags);
+ add_lru(eb);
+ }
- WARN_ON(1);
+ release_extent_buffer(eb);
}
-int clear_extent_buffer_dirty(struct extent_io_tree *tree,
- struct extent_buffer *eb)
+void free_extent_buffer_stale(struct extent_buffer *eb)
{
- unsigned long i;
- unsigned long num_pages;
- struct page *page;
+ struct btrfs_fs_info *fs_info;
+ unsigned long flags;
- num_pages = num_extent_pages(eb->start, eb->len);
+ if (!eb)
+ return;
- for (i = 0; i < num_pages; i++) {
- page = extent_buffer_page(eb, i);
- if (!PageDirty(page))
- continue;
+ fs_info = eb->root->fs_info;
+ set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
+ spin_lock(&eb->eb_lock);
+ spin_lock_irqsave(&fs_info->eb_lru_lock, flags);
+ /*
+ * We need to check for refs > 1 to make sure we haven't already
+ * started to remove this eb in the shrinker.
+ */
+ if (!list_empty(&eb->lru) &&
+ !test_bit(EXTENT_BUFFER_LRU_RELEASE, &eb->bflags)) {
+ list_del_init(&eb->lru);
+ fs_info->eb_lru_nr--;
+ eb->refs--;
+ BUG_ON(!eb->refs);
+ }
+ spin_unlock_irqrestore(&fs_info->eb_lru_lock, flags);
+ release_extent_buffer(eb);
+}
- lock_page(page);
- WARN_ON(!PagePrivate(page));
+int shrink_ebs(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ struct btrfs_fs_info *fs_info = container_of(shrinker,
+ struct btrfs_fs_info,
+ eb_shrinker);
+ struct extent_buffer *eb, *tmp;
+ unsigned long flags;
+ LIST_HEAD(dispose);
+ int nr_to_scan = sc->nr_to_scan;
+ int list_to_scan;
+ int total_freed = 0;
+ int referenced = 0, dirty = 0, tried_to_free = 0, extent_root = 0;
+ int nr_scanned = 0;
+ if (!nr_to_scan)
+ return fs_info->eb_lru_nr;
+
+ spin_lock_irqsave(&fs_info->eb_lru_lock, flags);
+ while (!list_empty(&fs_info->eb_lru)) {
+ if (nr_to_scan-- <= 0)
+ break;
+ eb = list_first_entry(&fs_info->eb_lru, struct extent_buffer,
+ lru);
+ nr_scanned++;
+ if (test_and_clear_bit(EXTENT_BUFFER_REFERENCED,
+ &eb->bflags)) {
+ referenced++;
+ list_move_tail(&eb->lru, &fs_info->eb_lru);
+ continue;
+ } else if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
+ dirty++;
+ list_move_tail(&eb->lru, &fs_info->eb_lru);
+ continue;
+// } else if (eb->root == fs_info->extent_root) {
+// extent_root++;
+// list_move_tail(&eb->lru, &fs_info->eb_lru);
+// continue;
+ }
- set_page_extent_mapped(page);
- if (i == 0)
- set_page_extent_head(page, eb->len);
+ fs_info->eb_lru_nr--;
+ set_bit(EXTENT_BUFFER_LRU_RELEASE, &eb->bflags);
+ list_move(&eb->lru, &dispose);
+ }
+ spin_unlock_irqrestore(&fs_info->eb_lru_lock, flags);
- clear_page_dirty_for_io(page);
- spin_lock_irq(&page->mapping->tree_lock);
- if (!PageDirty(page)) {
- radix_tree_tag_clear(&page->mapping->page_tree,
- page_index(page),
- PAGECACHE_TAG_DIRTY);
- }
- spin_unlock_irq(&page->mapping->tree_lock);
- ClearPageError(page);
- unlock_page(page);
+ while (!list_empty(&dispose)) {
+ eb = list_first_entry(&dispose, struct extent_buffer, lru);
+ list_del_init(&eb->lru);
+ spin_lock(&eb->eb_lock);
+ total_freed += release_extent_buffer(eb);
+ tried_to_free++;
}
- return 0;
+/*
+ spin_lock_irqsave(&fs_info->eb_lru_lock, flags);
+ printk(KERN_ERR "freed=%d, lru=%u, referenced=%d, dirty=%d, tried_to_free=%d, nr_scanned=%d, total=%d, actually dirty=%d, inflight=%d, extent_root=%d\n", total_freed,
+ fs_info->eb_lru_nr, referenced, dirty, tried_to_free, nr_scanned, atomic_read(&fs_info->nr_ebs), atomic_read(&fs_info->dirty_ebs), atomic_read(&fs_info->inflight_ebs), extent_root);
+ spin_unlock_irqrestore(&fs_info->eb_lru_lock, flags);
+*/
+ return fs_info->eb_lru_nr;
}
-int set_extent_buffer_dirty(struct extent_io_tree *tree,
- struct extent_buffer *eb)
+void btrfs_destroy_eb_cache(struct btrfs_fs_info *fs_info)
{
- unsigned long i;
- unsigned long num_pages;
- int was_dirty = 0;
-
- was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
- num_pages = num_extent_pages(eb->start, eb->len);
- for (i = 0; i < num_pages; i++)
- __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
- return was_dirty;
+ struct extent_buffer *eb;
+ int count = 0;
+
+ synchronize_rcu();
+ printk(KERN_ERR "we have %d on the lru, %d total, inflight %d\n", fs_info->eb_lru_nr, atomic_read(&fs_info->nr_ebs), atomic_read(&fs_info->inflight_ebs));
+ while (!list_empty(&fs_info->eb_lru)) {
+ eb = list_first_entry(&fs_info->eb_lru, struct extent_buffer,
+ lru);
+ list_del_init(&eb->lru);
+ BUG_ON(eb->refs > 1);
+ btrfs_release_extent_buffer(eb, 0);
+ atomic_dec(&fs_info->nr_ebs);
+ count++;
+ }
+ printk(KERN_ERR "freed %d, total left %d\n", count, atomic_read(&fs_info->nr_ebs));
}
-static int __eb_straddles_pages(u64 start, u64 len)
+int clear_extent_buffer_dirty(struct extent_buffer *eb)
{
- if (len < PAGE_CACHE_SIZE)
- return 1;
- if (start & (PAGE_CACHE_SIZE - 1))
- return 1;
- if ((start + len) & (PAGE_CACHE_SIZE - 1))
+ WARN_ON(in_interrupt());
+ if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
+ spin_lock(&eb->root->fs_info->eb_dirty_lock);
+ if (!test_bit(EXTENT_BUFFER_WRITE, &eb->bflags)) {
+ WARN_ON(list_empty(&eb->dirty_list));
+ list_del_init(&eb->dirty_list);
+ }
+ spin_unlock(&eb->root->fs_info->eb_dirty_lock);
+ free_extent_buffer(eb);
+ atomic_dec(&eb->root->fs_info->dirty_ebs);
return 1;
+ }
return 0;
}
-static int eb_straddles_pages(struct extent_buffer *eb)
+int set_extent_buffer_dirty(struct extent_buffer *eb)
{
- return __eb_straddles_pages(eb->start, eb->len);
+ struct list_head *list;
+ int level = btrfs_header_level(eb);
+
+ WARN_ON(in_interrupt());
+// if (level)
+// list = &eb->root->fs_info->dirty_nodes;
+// else
+ list = &eb->root->fs_info->dirty_leaves;
+
+ if (!test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
+ spin_lock(&eb->root->fs_info->eb_dirty_lock);
+ WARN_ON(!list_empty(&eb->dirty_list));
+ WARN_ON(test_bit(EXTENT_BUFFER_WRITE, &eb->bflags));
+ list_add_tail(&eb->dirty_list, list);
+ spin_unlock(&eb->root->fs_info->eb_dirty_lock);
+ /* We hold a ref on this buffer until it's written out */
+ extent_buffer_get(eb);
+ atomic_inc(&eb->root->fs_info->dirty_ebs);
+ wake_up(&eb->root->fs_info->writeback_ebs);
+ return 0;
+// } else if (level) {
+// spin_lock(&eb->root->fs_info->eb_dirty_lock);
+// WARN_ON(list_empty(&eb->dirty_list));
+// if (!test_bit(EXTENT_BUFFER_WRITE, &eb->bflags))
+// list_move_tail(&eb->dirty_list, list);
+// spin_unlock(&eb->root->fs_info->eb_dirty_lock);
+ }
+ return 1;
}
-int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
- struct extent_buffer *eb,
- struct extent_state **cached_state)
+void clear_extent_buffer_uptodate(struct extent_buffer *eb)
{
- unsigned long i;
- struct page *page;
- unsigned long num_pages;
-
- num_pages = num_extent_pages(eb->start, eb->len);
clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
-
- if (eb_straddles_pages(eb)) {
- clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
- cached_state, GFP_NOFS);
- }
- for (i = 0; i < num_pages; i++) {
- page = extent_buffer_page(eb, i);
- if (page)
- ClearPageUptodate(page);
- }
- return 0;
}
-int set_extent_buffer_uptodate(struct extent_io_tree *tree,
- struct extent_buffer *eb)
+void set_extent_buffer_uptodate(struct extent_buffer *eb)
{
- unsigned long i;
- struct page *page;
- unsigned long num_pages;
-
- num_pages = num_extent_pages(eb->start, eb->len);
+ set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
+}
- if (eb_straddles_pages(eb)) {
- set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
- NULL, GFP_NOFS);
- }
- for (i = 0; i < num_pages; i++) {
- page = extent_buffer_page(eb, i);
- if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
- ((i == num_pages - 1) &&
- ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
- check_page_uptodate(tree, page);
- continue;
- }
- SetPageUptodate(page);
- }
+static int __eb_straddles_pages(u64 start, u64 len)
+{
+ if (len < PAGE_CACHE_SIZE)
+ return 1;
+ if (start & (PAGE_CACHE_SIZE - 1))
+ return 1;
+ if ((start + len) & (PAGE_CACHE_SIZE - 1))
+ return 1;
return 0;
}
@@ -3902,137 +4081,11 @@ int extent_range_uptodate(struct extent_io_tree *tree,
return pg_uptodate;
}
-int extent_buffer_uptodate(struct extent_io_tree *tree,
- struct extent_buffer *eb,
- struct extent_state *cached_state)
+int extent_buffer_uptodate(struct extent_buffer *eb)
{
- int ret = 0;
- unsigned long num_pages;
- unsigned long i;
- struct page *page;
- int pg_uptodate = 1;
-
- if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
- return 1;
-
- if (eb_straddles_pages(eb)) {
- ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
- EXTENT_UPTODATE, 1, cached_state);
- if (ret)
- return ret;
- }
-
- num_pages = num_extent_pages(eb->start, eb->len);
- for (i = 0; i < num_pages; i++) {
- page = extent_buffer_page(eb, i);
- if (!PageUptodate(page)) {
- pg_uptodate = 0;
- break;
- }
- }
- return pg_uptodate;
+ return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
}
-int read_extent_buffer_pages(struct extent_io_tree *tree,
- struct extent_buffer *eb, u64 start, int wait,
- get_extent_t *get_extent, int mirror_num)
-{
- unsigned long i;
- unsigned long start_i;
- struct page *page;
- int err;
- int ret = 0;
- int locked_pages = 0;
- int all_uptodate = 1;
- unsigned long num_pages;
- unsigned long num_reads = 0;
- struct bio *bio = NULL;
- unsigned long bio_flags = 0;
-
- if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
- return 0;
-
- if (eb_straddles_pages(eb)) {
- if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
- EXTENT_UPTODATE, 1, NULL)) {
- return 0;
- }
- }
-
- if (start) {
- WARN_ON(start < eb->start);
- start_i = (start >> PAGE_CACHE_SHIFT) -
- (eb->start >> PAGE_CACHE_SHIFT);
- } else {
- start_i = 0;
- }
-
- num_pages = num_extent_pages(eb->start, eb->len);
- for (i = start_i; i < num_pages; i++) {
- page = extent_buffer_page(eb, i);
- if (wait == WAIT_NONE) {
- if (!trylock_page(page))
- goto unlock_exit;
- } else {
- lock_page(page);
- }
- locked_pages++;
- if (!PageUptodate(page)) {
- num_reads++;
- all_uptodate = 0;
- }
- }
- if (all_uptodate) {
- if (start_i == 0)
- set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
- goto unlock_exit;
- }
-
- atomic_set(&eb->pages_reading, num_reads);
- for (i = start_i; i < num_pages; i++) {
- page = extent_buffer_page(eb, i);
- set_page_extent_mapped(page);
- if (i == 0)
- set_page_extent_head(page, eb->len);
- if (!PageUptodate(page)) {
- ClearPageError(page);
- err = __extent_read_full_page(tree, page,
- get_extent, &bio,
- mirror_num, &bio_flags);
- if (err)
- ret = err;
- } else {
- unlock_page(page);
- }
- }
-
- if (bio)
- submit_one_bio(READ, bio, mirror_num, bio_flags);
-
- if (ret || wait != WAIT_COMPLETE)
- return ret;
-
- for (i = start_i; i < num_pages; i++) {
- page = extent_buffer_page(eb, i);
- wait_on_page_locked(page);
- if (!PageUptodate(page))
- ret = -EIO;
- }
-
- if (!ret)
- set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
- return ret;
-
-unlock_exit:
- i = start_i;
- while (locked_pages > 0) {
- page = extent_buffer_page(eb, i);
- i++;
- unlock_page(page);
- locked_pages--;
- }
- return ret;
-}
void read_extent_buffer(struct extent_buffer *eb, void *dstv,
unsigned long start,
@@ -4158,7 +4211,6 @@ void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
while (len > 0) {
page = extent_buffer_page(eb, i);
- WARN_ON(!PageUptodate(page));
cur = min(len, PAGE_CACHE_SIZE - offset);
kaddr = page_address(page);
@@ -4186,9 +4238,9 @@ void memset_extent_buffer(struct extent_buffer *eb, char c,
offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
+ WARN_ON(!extent_buffer_uptodate(eb));
while (len > 0) {
page = extent_buffer_page(eb, i);
- WARN_ON(!PageUptodate(page));
cur = min(len, PAGE_CACHE_SIZE - offset);
kaddr = page_address(page);
@@ -4217,9 +4269,9 @@ void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
offset = (start_offset + dst_offset) &
((unsigned long)PAGE_CACHE_SIZE - 1);
+ WARN_ON(!extent_buffer_uptodate(dst));
while (len > 0) {
page = extent_buffer_page(dst, i);
- WARN_ON(!PageUptodate(page));
cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
@@ -4370,48 +4422,3 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
len -= cur;
}
}
-
-static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
-{
- struct extent_buffer *eb =
- container_of(head, struct extent_buffer, rcu_head);
-
- btrfs_release_extent_buffer(eb);
-}
-
-int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
-{
- u64 start = page_offset(page);
- struct extent_buffer *eb;
- int ret = 1;
-
- spin_lock(&tree->buffer_lock);
- eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
- if (!eb) {
- spin_unlock(&tree->buffer_lock);
- return ret;
- }
-
- if (atomic_read(&eb->refs) > 1 ||
- test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
- ret = 0;
- goto out;
- }
-
- /*
- * set @eb->refs to 0 if it is already 1, and then release the @eb.
- * Or go back.
- */
- if (atomic_cmpxchg(&eb->refs, 1, 0) != 1) {
- ret = 0;
- goto out;
- }
- radix_tree_delete(&tree->buffer, start >> PAGE_CACHE_SHIFT);
-out:
- spin_unlock(&tree->buffer_lock);
-
- /* at this point we can safely release the extent buffer */
- if (atomic_read(&eb->refs) == 0)
- call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
- return ret;
-}
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 9b6c8f3db32426..4ac5d8b365d3ce 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -35,6 +35,14 @@
#define EXTENT_BUFFER_DIRTY 2
#define EXTENT_BUFFER_CORRUPT 3
#define EXTENT_BUFFER_READAHEAD 4 /* this got triggered by readahead */
+#define EXTENT_BUFFER_IOLOCK 5
+#define EXTENT_BUFFER_IOERR 6
+#define EXTENT_BUFFER_REFERENCED 7
+#define EXTENT_BUFFER_STALE 8
+#define EXTENT_BUFFER_WRITE 9
+#define EXTENT_BUFFER_RELEASING 10
+#define EXTENT_BUFFER_LRU_RELEASE 11
+#define EXTENT_BUFFER_NEW 12
/* these are flags for extent_clear_unlock_delalloc */
#define EXTENT_CLEAR_UNLOCK_PAGE 0x1
@@ -127,9 +135,13 @@ struct extent_buffer {
unsigned long map_start;
unsigned long map_len;
unsigned long bflags;
- atomic_t refs;
- atomic_t pages_reading;
+ struct btrfs_root *root;
+ spinlock_t eb_lock;
+ int refs;
+ int io_pages;
struct list_head leak_list;
+ struct list_head lru;
+ struct list_head dirty_list;
struct rcu_head rcu_head;
pid_t lock_owner;
@@ -170,7 +182,43 @@ static inline int extent_compress_type(unsigned long bio_flags)
return bio_flags >> EXTENT_BIO_FLAG_SHIFT;
}
+static inline int extent_buffer_tryiolock(struct extent_buffer *eb)
+{
+ return likely(!test_and_set_bit_lock(EXTENT_BUFFER_IOLOCK,
+ &eb->bflags));
+}
+
+static int sleep_eb(void *word)
+{
+ io_schedule();
+ return 0;
+}
+
+static inline void wait_on_extent_buffer(struct extent_buffer *eb)
+{
+ might_sleep();
+ if (test_bit(EXTENT_BUFFER_IOLOCK, &eb->bflags))
+ wait_on_bit(&eb->bflags, EXTENT_BUFFER_IOLOCK, sleep_eb,
+ TASK_UNINTERRUPTIBLE);
+}
+
+static inline void extent_buffer_iolock(struct extent_buffer *eb)
+{
+ might_sleep();
+ if (!extent_buffer_tryiolock(eb))
+ wait_on_bit_lock(&eb->bflags, EXTENT_BUFFER_IOLOCK, sleep_eb,
+ TASK_UNINTERRUPTIBLE);
+}
+
+static inline void extent_buffer_iounlock(struct extent_buffer *eb)
+{
+ clear_bit_unlock(EXTENT_BUFFER_IOLOCK, &eb->bflags);
+ smp_mb__after_clear_bit();
+ wake_up_bit(&eb->bflags, EXTENT_BUFFER_IOLOCK);
+}
+
struct extent_map_tree;
+struct btrfs_fs_info;
typedef struct extent_map *(get_extent_t)(struct inode *inode,
struct page *page,
@@ -183,7 +231,8 @@ void extent_io_tree_init(struct extent_io_tree *tree,
int try_release_extent_mapping(struct extent_map_tree *map,
struct extent_io_tree *tree, struct page *page,
gfp_t mask);
-int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page);
+int try_release_extent_buffer(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *eb);
int try_release_extent_state(struct extent_map_tree *map,
struct extent_io_tree *tree, struct page *page,
gfp_t mask);
@@ -255,23 +304,33 @@ int set_state_private(struct extent_io_tree *tree, u64 start, u64 private);
int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private);
void set_page_extent_mapped(struct page *page);
-struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
+struct extent_buffer *alloc_extent_buffer(struct btrfs_root *root,
u64 start, unsigned long len);
-struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
- u64 start, unsigned long len);
+struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
+ u64 start);
+struct extent_buffer *find_extent_buffer_no_ref(struct btrfs_fs_info *fs_info,
+ u64 start);
void free_extent_buffer(struct extent_buffer *eb);
+void free_extent_buffer_stale(struct extent_buffer *eb);
+int shrink_ebs(struct shrinker *shrinker, struct shrink_control *sc);
+void btrfs_destroy_eb_cache(struct btrfs_fs_info *fs_info);
#define WAIT_NONE 0
#define WAIT_COMPLETE 1
#define WAIT_PAGE_LOCK 2
-int read_extent_buffer_pages(struct extent_io_tree *tree,
- struct extent_buffer *eb, u64 start, int wait,
- get_extent_t *get_extent, int mirror_num);
+int read_extent_buffer_pages(struct extent_buffer *eb, int wait,
+ int mirror_num);
+int write_one_extent_buffer(struct extent_buffer *eb, int wait,
+ int mirror_num);
+int write_extent_buffer_range(struct btrfs_root *root, u64 start,
+ u64 end, int wait);
unsigned long num_extent_pages(u64 start, u64 len);
struct page *extent_buffer_page(struct extent_buffer *eb, unsigned long i);
static inline void extent_buffer_get(struct extent_buffer *eb)
{
- atomic_inc(&eb->refs);
+ spin_lock(&eb->eb_lock);
+ eb->refs++;
+ spin_unlock(&eb->eb_lock);
}
int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
@@ -292,18 +351,11 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
void memset_extent_buffer(struct extent_buffer *eb, char c,
unsigned long start, unsigned long len);
int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits);
-int clear_extent_buffer_dirty(struct extent_io_tree *tree,
- struct extent_buffer *eb);
-int set_extent_buffer_dirty(struct extent_io_tree *tree,
- struct extent_buffer *eb);
-int set_extent_buffer_uptodate(struct extent_io_tree *tree,
- struct extent_buffer *eb);
-int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
- struct extent_buffer *eb,
- struct extent_state **cached_state);
-int extent_buffer_uptodate(struct extent_io_tree *tree,
- struct extent_buffer *eb,
- struct extent_state *cached_state);
+int clear_extent_buffer_dirty(struct extent_buffer *eb);
+int set_extent_buffer_dirty(struct extent_buffer *eb);
+void set_extent_buffer_uptodate(struct extent_buffer *eb);
+void clear_extent_buffer_uptodate(struct extent_buffer *eb);
+int extent_buffer_uptodate(struct extent_buffer *eb);
int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
unsigned long min_len, char **map,
unsigned long *map_start,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index c59376238a451f..1f9b0e8f6f9b5d 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -5946,6 +5946,12 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
int async_submit = 0;
int write = rw & REQ_WRITE;
+ if (orig_bio->bi_size <= root->sectorsize) {
+ /* Small bio, don't worry about doing async csum */
+ bio = orig_bio;
+ goto submit;
+ }
+
map_length = orig_bio->bi_size;
ret = btrfs_map_block(map_tree, READ, start_sector << 9,
&map_length, NULL, 0);
@@ -5954,12 +5960,12 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
return -EIO;
}
+ async_submit = 1;
if (map_length >= orig_bio->bi_size) {
bio = orig_bio;
goto submit;
}
- async_submit = 1;
bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
if (!bio)
return -ENOMEM;
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 8c1aae2c845d49..d94ea802ce6f89 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -4053,11 +4053,12 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
}
}
+ /*
filemap_write_and_wait_range(fs_info->btree_inode->i_mapping,
rc->block_group->key.objectid,
rc->block_group->key.objectid +
rc->block_group->key.offset - 1);
-
+ */
WARN_ON(rc->block_group->pinned > 0);
WARN_ON(rc->block_group->reserved > 0);
WARN_ON(btrfs_block_group_used(&rc->block_group->item) > 0);
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 3ce97b217cbeae..e9d728ad7ad9a3 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -689,7 +689,7 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
trace_btrfs_sync_fs(wait);
if (!wait) {
- filemap_flush(fs_info->btree_inode->i_mapping);
+// filemap_flush(fs_info->btree_inode->i_mapping);
return 0;
}
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 287a6728b1ad6d..f7f9605fa83c21 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -91,6 +91,7 @@ loop:
}
atomic_set(&cur_trans->num_writers, 1);
+
cur_trans->num_joined = 0;
init_waitqueue_head(&cur_trans->writer_wait);
init_waitqueue_head(&cur_trans->commit_wait);
@@ -118,8 +119,7 @@ loop:
INIT_LIST_HEAD(&cur_trans->pending_snapshots);
list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
- extent_io_tree_init(&cur_trans->dirty_pages,
- root->fs_info->btree_inode->i_mapping);
+ extent_io_tree_init(&cur_trans->dirty_pages, NULL);
root->fs_info->generation++;
cur_trans->transid = root->fs_info->generation;
root->fs_info->running_transaction = cur_trans;
@@ -576,7 +576,6 @@ int btrfs_write_marked_extents(struct btrfs_root *root,
{
int err = 0;
int werr = 0;
- struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
u64 start = 0;
u64 end;
@@ -584,11 +583,9 @@ int btrfs_write_marked_extents(struct btrfs_root *root,
mark)) {
convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT, mark,
GFP_NOFS);
- err = filemap_fdatawrite_range(mapping, start, end);
+ err = write_extent_buffer_range(root, start, end, WAIT_PAGE_LOCK);
if (err)
- werr = err;
- cond_resched();
- start = end + 1;
+ break;
}
if (err)
werr = err;
@@ -605,23 +602,33 @@ int btrfs_wait_marked_extents(struct btrfs_root *root,
struct extent_io_tree *dirty_pages, int mark)
{
int err = 0;
- int werr = 0;
- struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
u64 start = 0;
u64 end;
while (!find_first_extent_bit(dirty_pages, start, &start, &end,
EXTENT_NEED_WAIT)) {
clear_extent_bits(dirty_pages, start, end, EXTENT_NEED_WAIT, GFP_NOFS);
- err = filemap_fdatawait_range(mapping, start, end);
- if (err)
- werr = err;
- cond_resched();
- start = end + 1;
+ while (start < end) {
+ struct extent_buffer *eb;
+
+ eb = find_extent_buffer_no_ref(root->fs_info, start);
+ if (!eb) {
+ /*
+ * This could happen if the eb got free'd up
+ * after it was written out by the shrinker.
+ */
+ start += PAGE_CACHE_SIZE;
+ continue;
+ }
+ wait_on_extent_buffer(eb);
+ if (test_bit(EXTENT_BUFFER_IOERR, &eb->bflags))
+ err = -EIO;
+ start = eb->start + eb->len;
+ free_extent_buffer(eb);
+ cond_resched();
+ }
}
- if (err)
- werr = err;
- return werr;
+ return err;
}
/*
@@ -648,11 +655,8 @@ int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
- if (!trans || !trans->transaction) {
- struct inode *btree_inode;
- btree_inode = root->fs_info->btree_inode;
- return filemap_write_and_wait(btree_inode->i_mapping);
- }
+ if (!trans || !trans->transaction)
+ return 0;
return btrfs_write_and_wait_marked_extents(root,
&trans->transaction->dirty_pages,
EXTENT_DIRTY);
@@ -1258,6 +1262,12 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
*/
mutex_lock(&root->fs_info->reloc_mutex);
+ /*
+ * This makes sure the metadata flusher kthread stops so it's not
+ * writing out metadata while we're trying to.
+ */
+// mutex_lock(&root->fs_info->metadata_flusher_mutex);
+
ret = btrfs_run_delayed_items(trans, root);
BUG_ON(ret);
@@ -1333,8 +1343,16 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
wake_up(&root->fs_info->transaction_wait);
+// printk(KERN_ERR "going to write and wait transaction\n");
ret = btrfs_write_and_wait_transaction(trans, root);
BUG_ON(ret);
+
+ /*
+ * Done writing out the dirty metadata, we can let the background
+ * flusher run now if it wants to.
+ */
+// mutex_unlock(&root->fs_info->metadata_flusher_mutex);
+
write_ctree_super(trans, root, 0);
/*
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 02564e6230acd6..4afd986845714d 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -29,7 +29,6 @@ struct btrfs_transaction {
*/
atomic_t num_writers;
atomic_t use_count;
-
unsigned long num_joined;
spinlock_t commit_lock;
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index cb877e0886a71b..cc26d62022dc5f 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -275,16 +275,21 @@ static int process_one_buffer(struct btrfs_root *log,
struct extent_buffer *eb,
struct walk_control *wc, u64 gen)
{
+ int wait = WAIT_PAGE_LOCK;
+
if (wc->pin)
btrfs_pin_extent_for_log_replay(wc->trans,
log->fs_info->extent_root,
eb->start, eb->len);
if (btrfs_buffer_uptodate(eb, gen)) {
- if (wc->write)
- btrfs_write_tree_block(eb);
- if (wc->wait)
- btrfs_wait_tree_block_writeback(eb);
+ if (wc->write) {
+ if (wc->wait)
+ wait = WAIT_COMPLETE;
+ write_one_extent_buffer(eb, wait, 0);
+ } else if (wc->wait) {
+ wait_on_extent_buffer(eb);
+ }
}
return 0;
}
@@ -1756,23 +1761,30 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
btrfs_tree_lock(next);
btrfs_set_lock_blocking(next);
clean_tree_block(trans, root, next);
- btrfs_wait_tree_block_writeback(next);
+ wait_on_extent_buffer(next);
btrfs_tree_unlock(next);
WARN_ON(root_owner !=
BTRFS_TREE_LOG_OBJECTID);
ret = btrfs_free_and_pin_reserved_extent(root,
bytenr, blocksize);
+// printk(KERN_ERR "freeing %p\n", next);
BUG_ON(ret);
+ free_extent_buffer_stale(next);
+ } else {
+ free_extent_buffer(next);
}
- free_extent_buffer(next);
continue;
}
btrfs_read_buffer(next, ptr_gen);
WARN_ON(*level <= 0);
- if (path->nodes[*level-1])
- free_extent_buffer(path->nodes[*level-1]);
+ if (path->nodes[*level-1]) {
+ if (wc->free)
+ free_extent_buffer_stale(path->nodes[*level-1]);
+ else
+ free_extent_buffer(path->nodes[*level-1]);
+ }
path->nodes[*level-1] = next;
*level = btrfs_header_level(next);
path->slots[*level] = 0;
@@ -1825,16 +1837,19 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
btrfs_tree_lock(next);
btrfs_set_lock_blocking(next);
clean_tree_block(trans, root, next);
- btrfs_wait_tree_block_writeback(next);
+ wait_on_extent_buffer(next);
btrfs_tree_unlock(next);
WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
ret = btrfs_free_and_pin_reserved_extent(root,
path->nodes[*level]->start,
path->nodes[*level]->len);
+// printk(KERN_ERR "freeing %p\n", path->nodes[*level]);
BUG_ON(ret);
+ free_extent_buffer_stale(path->nodes[*level]);
+ } else {
+ free_extent_buffer(path->nodes[*level]);
}
- free_extent_buffer(path->nodes[*level]);
path->nodes[*level] = NULL;
*level = i + 1;
}
@@ -1893,7 +1908,7 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
btrfs_tree_lock(next);
btrfs_set_lock_blocking(next);
clean_tree_block(trans, log, next);
- btrfs_wait_tree_block_writeback(next);
+ wait_on_extent_buffer(next);
btrfs_tree_unlock(next);
WARN_ON(log->root_key.objectid !=
@@ -1906,7 +1921,10 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
for (i = 0; i <= orig_level; i++) {
if (path->nodes[i]) {
- free_extent_buffer(path->nodes[i]);
+ if (wc->free)
+ free_extent_buffer_stale(path->nodes[i]);
+ else
+ free_extent_buffer(path->nodes[i]);
path->nodes[i] = NULL;
}
}
@@ -2183,7 +2201,8 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS);
}
- free_extent_buffer(log->node);
+// printk(KERN_ERR "freeing log node %p\n", log->node);
+ free_extent_buffer_stale(log->node);
kfree(log);
}
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 0b4e2af7954d3c..4fbdf556aafd26 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -4354,7 +4354,7 @@ int btrfs_read_sys_array(struct btrfs_root *root)
BTRFS_SUPER_INFO_SIZE);
if (!sb)
return -ENOMEM;
- btrfs_set_buffer_uptodate(sb);
+ set_extent_buffer_uptodate(sb);
btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 552fba9c7d5a5a..8af3863228c1d0 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -69,6 +69,8 @@ extern struct task_struct *find_lock_task_mm(struct task_struct *p);
/* sysctls */
extern int sysctl_oom_dump_tasks;
+extern int sysctl_oom_dump_slabs;
+extern int sysctl_oom_dump_slabs_ratio;
extern int sysctl_oom_kill_allocating_task;
extern int sysctl_panic_on_oom;
#endif /* __KERNEL__*/
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 84f3001a568d9e..b4b04e79e1adb4 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -618,7 +618,7 @@ TRACE_EVENT(btrfs_cow_block,
TP_fast_assign(
__entry->root_objectid = root->root_key.objectid;
__entry->buf_start = buf->start;
- __entry->refs = atomic_read(&buf->refs);
+ __entry->refs = buf->refs;
__entry->cow_start = cow->start;
__entry->buf_level = btrfs_header_level(buf);
__entry->cow_level = btrfs_header_level(cow);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index f487f257e05e4f..a9da3d8695e3f8 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1040,6 +1040,22 @@ static struct ctl_table vm_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
+#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB)
+ {
+ .procname = "oom_dump_slabs",
+ .data = &sysctl_oom_dump_slabs,
+ .maxlen = sizeof(sysctl_oom_dump_slabs),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "oom_dump_slabs_ratio",
+ .data = &sysctl_oom_dump_slabs_ratio,
+ .maxlen = sizeof(sysctl_oom_dump_slabs_ratio),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+#endif
{
.procname = "overcommit_ratio",
.data = &sysctl_overcommit_ratio,
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 2958fd8e7c9abc..4de948d00908c4 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -38,9 +38,20 @@
#define CREATE_TRACE_POINTS
#include <trace/events/oom.h>
+#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB)
+extern void oom_dump_slabs(int ratio);
+#else
+static void oom_dump_slabs(int ratio)
+{
+}
+#endif
+
+
int sysctl_panic_on_oom;
int sysctl_oom_kill_allocating_task;
int sysctl_oom_dump_tasks = 1;
+int sysctl_oom_dump_slabs = 1;
+int sysctl_oom_dump_slabs_ratio = 10;
static DEFINE_SPINLOCK(zone_scan_lock);
/*
@@ -429,6 +440,8 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
dump_stack();
mem_cgroup_print_oom_info(memcg, p);
show_mem(SHOW_MEM_FILTER_NODES);
+ if (sysctl_oom_dump_slabs)
+ oom_dump_slabs(sysctl_oom_dump_slabs_ratio);
if (sysctl_oom_dump_tasks)
dump_tasks(memcg, nodemask);
}
diff --git a/mm/slab.c b/mm/slab.c
index f0bd7857ab3bed..c2b5d14cb0d32e 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4629,3 +4629,75 @@ size_t ksize(const void *objp)
return obj_size(virt_to_cache(objp));
}
EXPORT_SYMBOL(ksize);
+
+/**
+ * oom_dump_slabs - dump top slab cache users
+ * @ratio: memory committed ratio between a cache size and the total slab size
+ *
+ * Dumps the current memory state of all eligible slab caches.
+ * State information includes cache's active objects, total objects,
+ * object size, cache name, and cache size.
+ */
+void oom_dump_slabs(int ratio)
+{
+ struct kmem_cache *cachep;
+ struct kmem_list3 *l3;
+ struct slab *slabp;
+ unsigned long active_objs, num_objs, free_objects, cache_size;
+ unsigned long active_slabs, num_slabs, slab_total_mem;
+ int node;
+
+ slab_total_mem = (global_page_state(NR_SLAB_RECLAIMABLE) +
+ global_page_state(NR_SLAB_UNRECLAIMABLE)) << PAGE_SHIFT;
+
+ if (ratio < 0)
+ ratio = 0;
+
+ if (ratio > 100)
+ ratio = 100;
+
+ pr_info("--- oom_dump_slabs:\n");
+ pr_info("<active_objs> <num_objs> <objsize> <cache_name>\n");
+ mutex_lock(&cache_chain_mutex);
+ list_for_each_entry(cachep, &cache_chain, next) {
+ num_objs = 0;
+ num_slabs = 0;
+ active_objs = 0;
+ free_objects = 0;
+ active_slabs = 0;
+
+ for_each_online_node(node) {
+ l3 = cachep->nodelists[node];
+ if (!l3)
+ continue;
+
+ check_irq_on();
+ spin_lock_irq(&l3->list_lock);
+
+ list_for_each_entry(slabp, &l3->slabs_full, list) {
+ active_objs += cachep->num;
+ active_slabs++;
+ }
+ list_for_each_entry(slabp, &l3->slabs_partial, list) {
+ active_objs += slabp->inuse;
+ active_slabs++;
+ }
+ list_for_each_entry(slabp, &l3->slabs_free, list)
+ num_slabs++;
+
+ free_objects += l3->free_objects;
+ spin_unlock_irq(&l3->list_lock);
+ }
+ num_slabs += active_slabs;
+ num_objs = num_slabs * cachep->num;
+ cache_size = (cachep->buffer_size * num_objs);
+
+ if (cache_size >= (slab_total_mem * ratio / 100))
+ pr_info("%12lu %12lu %12u %-20s : %9lu kB\n",
+ active_objs, num_objs, cachep->buffer_size,
+ cachep->name, cache_size >> 10);
+ }
+ mutex_unlock(&cache_chain_mutex);
+ pr_info("---\n");
+}
+EXPORT_SYMBOL(oom_dump_slabs);
diff --git a/mm/slub.c b/mm/slub.c
index 4907563ef7ff7e..7719f9264bfd08 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5480,3 +5480,55 @@ static int __init slab_proc_init(void)
}
module_init(slab_proc_init);
#endif /* CONFIG_SLABINFO */
+
+/**
+ * oom_dump_slabs - dump top slab cache users
+ * @ratio: memory committed ratio between a cache size and the total slab size
+ *
+ * Dumps the current memory state of all eligible slab caches.
+ * State information includes cache's active objects, total objects,
+ * object size, cache name, and cache size.
+ */
+void oom_dump_slabs(int ratio)
+{
+ unsigned long cache_size, slab_total_mem;
+ unsigned long nr_objs, nr_free, nr_inuse;
+ struct kmem_cache *cachep;
+ int node;
+
+ slab_total_mem = (global_page_state(NR_SLAB_RECLAIMABLE) +
+ global_page_state(NR_SLAB_UNRECLAIMABLE)) << PAGE_SHIFT;
+
+ if (ratio < 0)
+ ratio = 0;
+
+ if (ratio > 100)
+ ratio = 100;
+
+ pr_info("--- oom_dump_slabs:\n");
+ pr_info("<active_objs> <num_objs> <objsize> <cache_name>\n");
+ down_read(&slub_lock);
+ list_for_each_entry(cachep, &slab_caches, list) {
+ nr_objs = 0;
+ nr_free = 0;
+
+ for_each_online_node(node) {
+ struct kmem_cache_node *n = get_node(cachep, node);
+ if (!n)
+ continue;
+
+ nr_objs += atomic_long_read(&n->total_objects);
+ nr_free += count_partial(n, count_free);
+ }
+ nr_inuse = nr_objs - nr_free;
+ cache_size = (cachep->size * nr_objs);
+
+ if (cache_size >= (slab_total_mem * ratio / 100))
+ pr_info("%12lu %12lu %12u %-20s : %9lu kB\n",
+ nr_inuse, nr_objs, cachep->size,
+ cachep->name, cache_size >> 10);
+ }
+ up_read(&slub_lock);
+ pr_info("---\n");
+}
+EXPORT_SYMBOL(oom_dump_slabs);