From d658621e016718eb3d26855ecf1f2c15c78bc9e7 Mon Sep 17 00:00:00 2001 From: OGAWA Hirofumi Date: Mon, 22 Jul 2013 16:41:06 +0900 Subject: tux3: Rename from "rollup" to "unify" We change our terminology from "rollup" to "unify". Signed-off-by: OGAWA Hirofumi --- fs/tux3/balloc.c | 2 +- fs/tux3/btree.c | 52 +++++++++---------- fs/tux3/buffer.c | 4 +- fs/tux3/buffer.h | 2 +- fs/tux3/buffer_writeback.c | 4 +- fs/tux3/commit.c | 114 +++++++++++++++++++++--------------------- fs/tux3/commit_flusher.c | 6 +-- fs/tux3/commit_flusher_hack.c | 12 ++--- fs/tux3/dirty-buffer.h | 12 ++--- fs/tux3/filemap.c | 10 ++-- fs/tux3/inode.c | 6 +-- fs/tux3/log.c | 62 +++++++++++------------ fs/tux3/orphan.c | 8 +-- fs/tux3/replay.c | 62 +++++++++++------------ fs/tux3/super.c | 12 ++--- fs/tux3/tux3.h | 36 ++++++------- fs/tux3/writeback.c | 8 +-- 17 files changed, 206 insertions(+), 206 deletions(-) diff --git a/fs/tux3/balloc.c b/fs/tux3/balloc.c index c70d3bdc2b09c0..f2602fbe5e80f5 100644 --- a/fs/tux3/balloc.c +++ b/fs/tux3/balloc.c @@ -127,7 +127,7 @@ static int bitmap_modify_bits(struct sb *sb, struct buffer_head *buffer, * The bitmap is modified only by backend. * blockdirty() should never return -EAGAIN. */ - clone = blockdirty(buffer, sb->rollup); + clone = blockdirty(buffer, sb->unify); if (IS_ERR(clone)) { int err = PTR_ERR(clone); assert(err != -EAGAIN); diff --git a/fs/tux3/btree.c b/fs/tux3/btree.c index 372ef24edeb938..e1e58f822a6cd7 100644 --- a/fs/tux3/btree.c +++ b/fs/tux3/btree.c @@ -93,7 +93,7 @@ static struct buffer_head *new_node(struct btree *btree) if (!IS_ERR(buffer)) { bnode_buffer_init(buffer); - mark_buffer_rollup_atomic(buffer); + mark_buffer_unify_atomic(buffer); } return buffer; } @@ -552,8 +552,8 @@ static int leaf_need_redirect(struct sb *sb, struct buffer_head *buffer) static int bnode_need_redirect(struct sb *sb, struct buffer_head *buffer) { - /* If this is not re-dirty for sb->rollup, we need to redirect */ - return !buffer_already_dirty(buffer, sb->rollup); + /* If this is not re-dirty for sb->unify, we need to redirect */ + return !buffer_already_dirty(buffer, sb->unify); } /* @@ -604,9 +604,9 @@ int cursor_redirect(struct cursor *cursor) defer_bfree(&sb->defree, oldblock, 1); } else { /* This is bnode buffer */ - mark_buffer_rollup_atomic(clone); + mark_buffer_unify_atomic(clone); log_bnode_redirect(sb, oldblock, newblock); - defer_bfree(&sb->derollup, oldblock, 1); + defer_bfree(&sb->deunify, oldblock, 1); } trace("update parent"); @@ -668,7 +668,7 @@ static void adjust_parent_sep(struct cursor *cursor, int level, __be64 newsep) be64_to_cpu(parent->key), be64_to_cpu(newsep)); parent->key = newsep; - mark_buffer_rollup_non(parent_at->buffer); + mark_buffer_unify_non(parent_at->buffer); if (parent != level_node(cursor, level)->entries) break; @@ -697,7 +697,7 @@ static void remove_index(struct cursor *cursor, struct chopped_index_info *cii) /* Remove an index */ bnode_remove_index(node, cursor->path[level].next - 1, 1); --(cursor->path[level].next); - mark_buffer_rollup_non(cursor->path[level].buffer); + mark_buffer_unify_non(cursor->path[level].buffer); /* * Climb up to common parent and update separating key. @@ -903,8 +903,8 @@ keep_prev_leaf: if (try_bnode_merge(sb, prev[level], buf)) { trace(">>> can merge node %p into node %p", buf, prev[level]); remove_index(cursor, cii); - mark_buffer_rollup_non(prev[level]); - blockput_free_rollup(sb, buf); + mark_buffer_unify_non(prev[level]); + blockput_free_unify(sb, buf); goto keep_prev_node; } blockput(prev[level]); @@ -941,7 +941,7 @@ chop_root: */ bfree(sb, bufindex(prev[0]), 1); log_bnode_free(sb, bufindex(prev[0])); - blockput_free_rollup(sb, prev[0]); + blockput_free_unify(sb, prev[0]); vecmove(prev, prev + 1, btree->root.depth); } @@ -1027,7 +1027,7 @@ static int insert_leaf(struct cursor *cursor, tuxkey_t childkey, struct buffer_h if (!keep) at->next++; log_bnode_add(sb, bufindex(parentbuf), childblock, childkey); - mark_buffer_rollup_non(parentbuf); + mark_buffer_unify_non(parentbuf); cursor_check(cursor); return 0; } @@ -1048,20 +1048,20 @@ static int insert_leaf(struct cursor *cursor, tuxkey_t childkey, struct buffer_h int child_is_left = at->next <= parent->entries + half; if (!child_is_left) { struct index_entry *newnext; - mark_buffer_rollup_non(parentbuf); + mark_buffer_unify_non(parentbuf); newnext = newnode->entries + (at->next - &parent->entries[half]); get_bh(newbuf); level_replace_blockput(cursor, level, newbuf, newnext); parentbuf = newbuf; parent = newnode; } else - mark_buffer_rollup_non(newbuf); + mark_buffer_unify_non(newbuf); bnode_add_index(parent, at->next, childblock, childkey); if (!keep) at->next++; log_bnode_add(sb, bufindex(parentbuf), childblock, childkey); - mark_buffer_rollup_non(parentbuf); + mark_buffer_unify_non(parentbuf); childkey = newkey; childblock = bufindex(newbuf); @@ -1093,7 +1093,7 @@ static int insert_leaf(struct cursor *cursor, tuxkey_t childkey, struct buffer_h btree->root.block = newrootblock; btree->root.depth++; - mark_buffer_rollup_non(newbuf); + mark_buffer_unify_non(newbuf); tux3_mark_btree_dirty(btree); cursor_check(cursor); @@ -1222,7 +1222,7 @@ int alloc_empty_btree(struct btree *btree) log_bnode_root(sb, rootblock, 1, leafblock, 0, 0); log_balloc(sb, leafblock, 1); - mark_buffer_rollup_non(rootbuf); + mark_buffer_unify_non(rootbuf); blockput(rootbuf); mark_buffer_dirty_non(leafbuf); blockput(leafbuf); @@ -1290,10 +1290,10 @@ int free_empty_btree(struct btree *btree) */ bfree(sb, bufindex(rootbuf), 1); log_bnode_free(sb, bufindex(rootbuf)); - blockput_free_rollup(sb, rootbuf); + blockput_free_unify(sb, rootbuf); } else { - defer_bfree(&sb->derollup, bufindex(rootbuf), 1); - log_bfree_on_rollup(sb, bufindex(rootbuf), 1); + defer_bfree(&sb->deunify, bufindex(rootbuf), 1); + log_bfree_on_unify(sb, bufindex(rootbuf), 1); blockput(rootbuf); } @@ -1319,7 +1319,7 @@ int replay_bnode_redirect(struct replay *rp, block_t oldblock, block_t newblock) assert(bnode_sniff(bufdata(oldbuf))); memcpy(bufdata(newbuf), bufdata(oldbuf), bufsize(newbuf)); - mark_buffer_rollup_atomic(newbuf); + mark_buffer_unify_atomic(newbuf); blockput(oldbuf); error_put_newbuf: @@ -1341,7 +1341,7 @@ int replay_bnode_root(struct replay *rp, block_t root, unsigned count, bnode_init_root(bufdata(rootbuf), count, left, right, rkey); - mark_buffer_rollup_atomic(rootbuf); + mark_buffer_unify_atomic(rootbuf); blockput(rootbuf); return 0; @@ -1373,8 +1373,8 @@ int replay_bnode_split(struct replay *rp, block_t src, unsigned pos, bnode_split(bufdata(srcbuf), pos, bufdata(dstbuf)); - mark_buffer_rollup_non(srcbuf); - mark_buffer_rollup_atomic(dstbuf); + mark_buffer_unify_non(srcbuf); + mark_buffer_unify_atomic(dstbuf); blockput(dstbuf); error_put_srcbuf: @@ -1400,7 +1400,7 @@ static int replay_bnode_change(struct sb *sb, block_t bnodeblock, struct bnode *bnode = bufdata(bnodebuf); change(bnode, val1, val2); - mark_buffer_rollup_non(bnodebuf); + mark_buffer_unify_non(bnodebuf); blockput(bnodebuf); return 0; @@ -1452,8 +1452,8 @@ int replay_bnode_merge(struct replay *rp, block_t src, block_t dst) ret = bnode_merge_nodes(sb, bufdata(dstbuf), bufdata(srcbuf)); assert(ret == 1); - mark_buffer_rollup_non(dstbuf); - mark_buffer_rollup_non(srcbuf); + mark_buffer_unify_non(dstbuf); + mark_buffer_unify_non(srcbuf); blockput(dstbuf); error_put_srcbuf: diff --git a/fs/tux3/buffer.c b/fs/tux3/buffer.c index 144dfb38504769..b84b42a6e9f39f 100644 --- a/fs/tux3/buffer.c +++ b/fs/tux3/buffer.c @@ -185,9 +185,9 @@ void blockput_free(struct sb *sb, struct buffer_head *buffer) __blockput_free(sb, buffer, TUX3_INIT_DELTA); } -void blockput_free_rollup(struct sb *sb, struct buffer_head *buffer) +void blockput_free_unify(struct sb *sb, struct buffer_head *buffer) { - __blockput_free(sb, buffer, sb->rollup); + __blockput_free(sb, buffer, sb->unify); } /* Copied from fs/buffer.c */ diff --git a/fs/tux3/buffer.h b/fs/tux3/buffer.h index e6c31a63fc3ce2..1333b2d129605b 100644 --- a/fs/tux3/buffer.h +++ b/fs/tux3/buffer.h @@ -71,7 +71,7 @@ void tux3_set_buffer_dirty(struct address_space *mapping, struct buffer_head *buffer, int delta); void tux3_clear_buffer_dirty(struct buffer_head *buffer, unsigned delta); void blockput_free(struct sb *sb, struct buffer_head *buffer); -void blockput_free_rollup(struct sb *sb, struct buffer_head *buffer); +void blockput_free_unify(struct sb *sb, struct buffer_head *buffer); void tux3_invalidate_buffer(struct buffer_head *buffer); /* buffer_writeback.c */ diff --git a/fs/tux3/buffer_writeback.c b/fs/tux3/buffer_writeback.c index 6ee25b04278bd7..ebc114ecca12e8 100644 --- a/fs/tux3/buffer_writeback.c +++ b/fs/tux3/buffer_writeback.c @@ -160,9 +160,9 @@ static void bufvec_submit_bio(int rw, struct bufvec *bufvec) * We flush all buffers on this page? * * The page may have the dirty buffer for both of "delta" and - * "rollup", and we may flush only dirty buffers for "delta". So, if + * "unify", and we may flush only dirty buffers for "delta". So, if * the page still has the dirty buffer, we should still keep the page - * dirty for "rollup". + * dirty for "unify". */ static int keep_page_dirty(struct bufvec *bufvec, struct page *page) { diff --git a/fs/tux3/commit.c b/fs/tux3/commit.c index 69e9d193cedffb..0b953ceb577788 100644 --- a/fs/tux3/commit.c +++ b/fs/tux3/commit.c @@ -18,10 +18,10 @@ static void schedule_flush_delta(struct sb *sb); /* * Need frontend modification of backend buffers. (modification - * after latest delta commit and before rollup). + * after latest delta commit and before unify). * * E.g. frontend modified backend buffers, stage_delta() of when - * rollup is called. + * unify is called. */ #define ALLOW_FRONTEND_MODIFY @@ -41,8 +41,8 @@ static void init_sb(struct sb *sb) INIT_LIST_HEAD(&sb->orphan_add); INIT_LIST_HEAD(&sb->orphan_del); stash_init(&sb->defree); - stash_init(&sb->derollup); - INIT_LIST_HEAD(&sb->rollup_buffers); + stash_init(&sb->deunify); + INIT_LIST_HEAD(&sb->unify_buffers); INIT_LIST_HEAD(&sb->alloc_inodes); spin_lock_init(&sb->forked_buffers_lock); @@ -71,7 +71,7 @@ static loff_t calc_maxbytes(loff_t blocksize) static void __setup_sb(struct sb *sb, struct disksuper *super) { sb->next_delta = TUX3_INIT_DELTA; - sb->rollup = TUX3_INIT_DELTA; + sb->unify = TUX3_INIT_DELTA; sb->marshal_delta = TUX3_INIT_DELTA - 1; sb->committed_delta = TUX3_INIT_DELTA - 1; @@ -172,7 +172,7 @@ static int relog_as_bfree(struct sb *sb, u64 val) return stash_value(&sb->defree, val); } -/* Obsolete the old rollup, then start the log of new rollup */ +/* Obsolete the old unify, then start the log of new unify */ static void new_cycle_log(struct sb *sb) { #if 0 /* ALLOW_FRONTEND_MODIFY */ @@ -186,24 +186,24 @@ static void new_cycle_log(struct sb *sb) log_finish(sb); log_finish_cycle(sb, 1); #endif - /* Initialize logcount to count log blocks on new rollup cycle. */ + /* Initialize logcount to count log blocks on new unify cycle. */ sb->super.logcount = 0; } /* * Flush a snapshot of the allocation map to disk. Physical blocks for * the bitmaps and new or redirected bitmap btree nodes may be allocated - * during the rollup. Any bitmap blocks that are (re)dirtied by these - * allocations will be written out in the next rollup cycle. + * during the unify. Any bitmap blocks that are (re)dirtied by these + * allocations will be written out in the next unify cycle. */ -static int rollup_log(struct sb *sb) +static int unify_log(struct sb *sb) { /* further block allocations belong to the next cycle */ - unsigned rollup = sb->rollup++; + unsigned unify = sb->unify++; LIST_HEAD(orphan_add); LIST_HEAD(orphan_del); - trace(">>>>>>>>> commit rollup %u", rollup); + trace(">>>>>>>>> commit unify %u", unify); /* * Orphan inodes are still living, or orphan inodes in @@ -214,10 +214,10 @@ static int rollup_log(struct sb *sb) list_splice_init(&sb->orphan_add, &orphan_add); list_splice_init(&sb->orphan_del, &orphan_del); - /* This is starting the new rollup cycle of the log */ + /* This is starting the new unify cycle of the log */ new_cycle_log(sb); - /* Add rollup log as mark of new rollup cycle. */ - log_rollup(sb); + /* Add unify log as mark of new unify cycle. */ + log_unify(sb); /* Log to store freeblocks for flushing bitmap data */ log_freeblocks(sb, sb->freeblocks); #ifdef ALLOW_FRONTEND_MODIFY @@ -229,34 +229,34 @@ static int rollup_log(struct sb *sb) stash_walk(sb, &sb->defree, relog_frontend_defer_as_bfree); #endif /* - * Re-logging defered bfree blocks after rollup as defered + * Re-logging defered bfree blocks after unify as defered * bfree (LOG_BFREE_RELOG) after delta. With this, we can - * obsolete log records on previous rollup. + * obsolete log records on previous unify. */ - unstash(sb, &sb->derollup, relog_as_bfree); + unstash(sb, &sb->deunify, relog_as_bfree); /* * Merge the dirty bnode buffers to volmap dirty list, and - * clean ->rollup_buffers up before dirtying bnode buffers on - * this rollup. Later, bnode blocks will be flushed via + * clean ->unify_buffers up before dirtying bnode buffers on + * this unify. Later, bnode blocks will be flushed via * volmap with leaves. */ - list_splice_init(&sb->rollup_buffers, + list_splice_init(&sb->unify_buffers, tux3_dirty_buffers(sb->volmap, TUX3_INIT_DELTA)); /* - * tux3_mark_buffer_rollup() doesn't dirty inode, so we make - * sure volmap is dirty for rollup buffers, now. + * tux3_mark_buffer_unify() doesn't dirty inode, so we make + * sure volmap is dirty for unify buffers, now. * - * See comment in tux3_mark_buffer_rollup(). + * See comment in tux3_mark_buffer_unify(). */ __tux3_mark_inode_dirty(sb->volmap, I_DIRTY_PAGES); /* Flush bitmap */ - trace("> flush bitmap %u", rollup); - tux3_flush_inode_internal(sb->bitmap, rollup); - trace("< done bitmap %u", rollup); + trace("> flush bitmap %u", unify); + tux3_flush_inode_internal(sb->bitmap, unify); + trace("< done bitmap %u", unify); - trace("> apply orphan inodes %u", rollup); + trace("> apply orphan inodes %u", unify); { int err; @@ -265,21 +265,21 @@ static int rollup_log(struct sb *sb) * It should be done before adding new orphan, because * orphan_add may have same inum in orphan_del. */ - err = tux3_rollup_orphan_del(sb, &orphan_del); + err = tux3_unify_orphan_del(sb, &orphan_del); if (err) return err; /* * This apply orphan inodes to sb->otree after flushed bitmap. */ - err = tux3_rollup_orphan_add(sb, &orphan_add); + err = tux3_unify_orphan_add(sb, &orphan_add); if (err) return err; } - trace("< apply orphan inodes %u", rollup); + trace("< apply orphan inodes %u", unify); assert(list_empty(&orphan_add)); assert(list_empty(&orphan_del)); - trace("<<<<<<<<< commit rollup done %u", rollup); + trace("<<<<<<<<< commit unify done %u", unify); return 0; } @@ -310,7 +310,7 @@ static int stage_delta(struct sb *sb, unsigned delta) static int write_btree(struct sb *sb, unsigned delta) { /* - * Flush leaves (and if there is rollup, bnodes too) blocks. + * Flush leaves (and if there is unify, bnodes too) blocks. * FIXME: Now we are using TUX3_INIT_DELTA for leaves. Do * we need to per delta dirty buffers? */ @@ -355,13 +355,13 @@ static void post_commit(struct sb *sb, unsigned delta) tux3_clear_dirty_inodes(sb, delta); } -static int need_rollup(struct sb *sb) +static int need_unify(struct sb *sb) { static unsigned crudehack; return !(++crudehack % 3); } -enum rollup_flags { NO_ROLLUP, ALLOW_ROLLUP, FORCE_ROLLUP, }; +enum unify_flags { NO_UNIFY, ALLOW_UNIFY, FORCE_UNIFY, }; /* For debugging */ void tux3_start_backend(struct sb *sb) @@ -382,7 +382,7 @@ int tux3_under_backend(struct sb *sb) return current->journal_info == sb; } -static int do_commit(struct sb *sb, enum rollup_flags rollup_flag) +static int do_commit(struct sb *sb, enum unify_flags unify_flag) { unsigned delta = sb->marshal_delta; struct iowait iowait; @@ -401,23 +401,23 @@ static int do_commit(struct sb *sb, enum rollup_flags rollup_flag) /* * NOTE: This works like modification from frontend. (i.e. this - * may generate defree log which is not committed yet at rollup.) + * may generate defree log which is not committed yet at unify.) * - * - this is before rollup to merge modifications to this - * rollup, and flush at once for optimization. + * - this is before unify to merge modifications to this + * unify, and flush at once for optimization. * * - this is required to prevent unexpected buffer state for * cursor_redirect(). If we applied modification after - * rollup_log, it made unexpected dirty state (i.e. leaf is + * unify_log, it made unexpected dirty state (i.e. leaf is * still dirty, but parent was already cleaned.) */ err = stage_delta(sb, delta); if (err) return err; - if ((rollup_flag == ALLOW_ROLLUP && need_rollup(sb)) || - rollup_flag == FORCE_ROLLUP) { - err = rollup_log(sb); + if ((unify_flag == ALLOW_UNIFY && need_unify(sb)) || + unify_flag == FORCE_UNIFY) { + err = unify_log(sb); if (err) return err; @@ -457,15 +457,15 @@ static int flush_delta(struct sb *sb) { unsigned delta = sb->marshal_delta; int err; -#ifndef ROLLUP_DEBUG - enum rollup_flags rollup_flag = ALLOW_ROLLUP; +#ifndef UNIFY_DEBUG + enum unify_flags unify_flag = ALLOW_UNIFY; #else struct delta_ref *delta_ref = sb->pending_delta; - enum rollup_flags rollup_flag = delta_ref->rollup_flag; + enum unify_flags unify_flag = delta_ref->unify_flag; sb->pending_delta = NULL; #endif - err = do_commit(sb, rollup_flag); + err = do_commit(sb, unify_flag); sb->committed_delta = delta; clear_bit(TUX3_COMMIT_RUNNING_BIT, &sb->backend_state); @@ -521,8 +521,8 @@ static void __delta_transition(struct sb *sb, struct delta_ref *delta_ref) atomic_set(&delta_ref->refcount, 1); /* Assign the delta number */ delta_ref->delta = sb->next_delta++; -#ifdef ROLLUP_DEBUG - delta_ref->rollup_flag = ALLOW_ROLLUP; +#ifdef UNIFY_DEBUG + delta_ref->unify_flag = ALLOW_UNIFY; #endif /* @@ -558,7 +558,7 @@ static void delta_transition(struct sb *sb) /* Set delta for marshal */ sb->marshal_delta = prev->delta; -#ifdef ROLLUP_DEBUG +#ifdef UNIFY_DEBUG sb->pending_delta = prev; #endif @@ -584,14 +584,14 @@ static void delta_transition(struct sb *sb) #include "commit_flusher.c" #include "commit_flusher_hack.c" -int force_rollup(struct sb *sb) +int force_unify(struct sb *sb) { - return sync_current_delta(sb, FORCE_ROLLUP); + return sync_current_delta(sb, FORCE_UNIFY); } int force_delta(struct sb *sb) { - return sync_current_delta(sb, NO_ROLLUP); + return sync_current_delta(sb, NO_UNIFY); } unsigned tux3_get_current_delta(void) @@ -601,7 +601,7 @@ unsigned tux3_get_current_delta(void) return delta_ref->delta; } -/* Choice sb->delta or sb->rollup from inode */ +/* Choice sb->delta or sb->unify from inode */ unsigned tux3_inode_delta(struct inode *inode) { unsigned delta; @@ -611,15 +611,15 @@ unsigned tux3_inode_delta(struct inode *inode) case TUX_LOGMAP_INO: /* * Note: volmap are special, and has both of - * TUX3_INIT_DELTA and sb->rollup. So TUX3_INIT_DELTA + * TUX3_INIT_DELTA and sb->unify. So TUX3_INIT_DELTA * can be incorrect if delta was used for buffer. * Note: logmap is similar to volmap, but it doesn't - * have sb->rollup buffers. + * have sb->unify buffers. */ delta = TUX3_INIT_DELTA; break; case TUX_BITMAP_INO: - delta = tux_sb(inode->i_sb)->rollup; + delta = tux_sb(inode->i_sb)->unify; break; default: delta = tux3_get_current_delta(); diff --git a/fs/tux3/commit_flusher.c b/fs/tux3/commit_flusher.c index 25d8ac30e0c272..3baa9103932e84 100644 --- a/fs/tux3/commit_flusher.c +++ b/fs/tux3/commit_flusher.c @@ -163,7 +163,7 @@ static int wait_for_commit(struct sb *sb, unsigned delta) try_flush_pending_until_delta(sb, delta)); } -static int sync_current_delta(struct sb *sb, enum rollup_flags rollup_flag) +static int sync_current_delta(struct sb *sb, enum unify_flags unify_flag) { struct delta_ref *delta_ref; unsigned delta; @@ -174,8 +174,8 @@ static int sync_current_delta(struct sb *sb, enum rollup_flags rollup_flag) #endif /* Get delta that have to write */ delta_ref = delta_get(sb); -#ifdef ROLLUP_DEBUG - delta_ref->rollup_flag = rollup_flag; +#ifdef UNIFY_DEBUG + delta_ref->unify_flag = unify_flag; #endif delta = delta_ref->delta; delta_put(sb, delta_ref); diff --git a/fs/tux3/commit_flusher_hack.c b/fs/tux3/commit_flusher_hack.c index 651b693fc35c14..47da09fd23d14b 100644 --- a/fs/tux3/commit_flusher_hack.c +++ b/fs/tux3/commit_flusher_hack.c @@ -124,9 +124,9 @@ static long tux3_wb_writeback(struct bdi_writeback *wb, /* Get delta that have to write */ delta_ref = delta_get(sb); -#ifdef ROLLUP_DEBUG - /* NO_ROLLUP and FORCE_ROLLUP are not supported for now */ - delta_ref->rollup_flag = ALLOW_ROLLUP; +#ifdef UNIFY_DEBUG + /* NO_UNIFY and FORCE_UNIFY are not supported for now */ + delta_ref->unify_flag = ALLOW_UNIFY; #endif delta = delta_ref->delta; delta_put(sb, delta_ref); @@ -467,10 +467,10 @@ static void try_delta_transition(struct sb *sb) #endif } -static int sync_current_delta(struct sb *sb, enum rollup_flags rollup_flag) +static int sync_current_delta(struct sb *sb, enum unify_flags unify_flag) { - /* FORCE_ROLLUP is not supported */ - WARN_ON(rollup_flag == FORCE_ROLLUP); + /* FORCE_UNIFY is not supported */ + WARN_ON(unify_flag == FORCE_UNIFY); /* This is called only for fsync, so we can take ->s_umount here */ down_read(&vfs_sb(sb)->s_umount); sync_inodes_sb(vfs_sb(sb)); diff --git a/fs/tux3/dirty-buffer.h b/fs/tux3/dirty-buffer.h index 94433b6d142f50..fd131a9e9becaf 100644 --- a/fs/tux3/dirty-buffer.h +++ b/fs/tux3/dirty-buffer.h @@ -7,7 +7,7 @@ * FIXME: this is for debug and information until complete * atomic-commit. Remove this after atomic-commit * - * FIXME: mark_buffer_rollup() would be bad name + * FIXME: mark_buffer_unify() would be bad name */ /* mark buffer dirty if atomic-commit */ @@ -22,14 +22,14 @@ static inline void mark_buffer_dirty_non(struct buffer_head *buffer) assert(buffer_dirty(buffer)); } -/* mark buffer dirty for rollup cycle if atomic-commit style */ -static inline void mark_buffer_rollup_atomic(struct buffer_head *buffer) +/* mark buffer dirty for unify cycle if atomic-commit style */ +static inline void mark_buffer_unify_atomic(struct buffer_head *buffer) { - tux3_mark_buffer_rollup(buffer); + tux3_mark_buffer_unify(buffer); } -/* mark buffer dirty for rollup cycle if non atomic-commit style */ -static inline void mark_buffer_rollup_non(struct buffer_head *buffer) +/* mark buffer dirty for unify cycle if non atomic-commit style */ +static inline void mark_buffer_unify_non(struct buffer_head *buffer) { assert(buffer_dirty(buffer)); } diff --git a/fs/tux3/filemap.c b/fs/tux3/filemap.c index 3de6bb98078614..5a5dddba4e7d16 100644 --- a/fs/tux3/filemap.c +++ b/fs/tux3/filemap.c @@ -14,9 +14,9 @@ * down_write(itree: btree->lock) (alloc_inum, save_inode, purge_inode) * down_read(itree: btree->lock) (open_inode) * - * down_write(otree: btree->lock) (tux3_rollup_orphan_add, - * tux3_rollup_orphan_del, - * load_otree_orphan) + * down_write(otree: btree->lock) (tux3_unify_orphan_add, + * tux3_unify_orphan_del, + * load_otree_orphan) * * down_write(inode: btree->lock) (btree_chop, map_region for write) * down_read(inode: btree->lock) (map_region for read) @@ -74,8 +74,8 @@ static int map_bfree(struct inode *inode, block_t block, unsigned count) { struct sb *sb = tux_sb(inode->i_sb); if (inode == sb->bitmap) { - log_bfree_on_rollup(sb, block, count); - defer_bfree(&sb->derollup, block, count); + log_bfree_on_unify(sb, block, count); + defer_bfree(&sb->deunify, block, count); } else { log_bfree(sb, block, count); defer_bfree(&sb->defree, block, count); diff --git a/fs/tux3/inode.c b/fs/tux3/inode.c index ec0c04cf67111c..923742074094c4 100644 --- a/fs/tux3/inode.c +++ b/fs/tux3/inode.c @@ -1039,12 +1039,12 @@ static void tux_setup_inode(struct inode *inode) /* * FIXME: volmap inode is not always dirty. Because - * tux3_mark_buffer_rollup() doesn't mark tuxnode->flags + * tux3_mark_buffer_unify() doesn't mark tuxnode->flags * as dirty. But, it marks inode->i_state as dirty, * so this is called to prevent to add inode into - * dirty list by replay for rollup. + * dirty list by replay for unify. * - * See, FIXME in tux3_mark_buffer_rollup(). + * See, FIXME in tux3_mark_buffer_unify(). */ if (inum == TUX_BITMAP_INO || inum == TUX_VOLMAP_INO) tux3_set_inode_always_dirty(inode); diff --git a/fs/tux3/log.c b/fs/tux3/log.c index 9f06f72b5734f7..8932c53b4f1097 100644 --- a/fs/tux3/log.c +++ b/fs/tux3/log.c @@ -23,7 +23,7 @@ * instead there is a reverse chain starting from sb->logchain. Log blocks * are read only at replay on mount and written only at delta transition. * - * - sb->super.logcount: count of log blocks in rollup cycle + * - sb->super.logcount: count of log blocks in unify cycle * - sb->lognext: Logmap index of next log block in delta cycle * - sb->logpos/logtop: Pointer/limit to write next log entry * - sb->logbuf: Cached log block referenced by logpos/logtop @@ -34,14 +34,14 @@ * * - At delta commit, count of log blocks is recorded in superblock * (later, metablock) which are the log blocks for the current - * rollup cycle. + * unify cycle. * - * - On delta completion, if log was rolluped in current delta then log blocks - * are freed for reuse. Log blocks to be freed are recorded in sb->derollup, + * - On delta completion, if log was unified in current delta then log blocks + * are freed for reuse. Log blocks to be freed are recorded in sb->deunify, * which is appended to sb->defree, the per-delta deferred free list at log * flush time. * - * - On replay, sb.super->logcount log blocks for current rollup cycle are + * - On replay, sb.super->logcount log blocks for current unify cycle are * loaded in reverse order into logmap, using the log block reverse chain * pointers. * @@ -66,7 +66,7 @@ unsigned log_size[] = { [LOG_BALLOC] = 11, [LOG_BFREE] = 11, - [LOG_BFREE_ON_ROLLUP] = 11, + [LOG_BFREE_ON_UNIFY] = 11, [LOG_BFREE_RELOG] = 11, [LOG_LEAF_REDIRECT] = 13, [LOG_LEAF_FREE] = 7, @@ -82,7 +82,7 @@ unsigned log_size[] = { [LOG_ORPHAN_ADD] = 9, [LOG_ORPHAN_DEL] = 9, [LOG_FREEBLOCKS] = 7, - [LOG_ROLLUP] = 1, + [LOG_UNIFY] = 1, [LOG_DELTA] = 1, }; @@ -211,12 +211,12 @@ int tux3_logmap_io(int rw, struct bufvec *bufvec) } /* - * We can obsolete the log blocks after next rollup + * We can obsolete the log blocks after next unify * by LOG_BFREE_RELOG. */ - defer_bfree(&sb->derollup, seg.block, seg.count); + defer_bfree(&sb->deunify, seg.block, seg.count); - /* Add count of log on this delta to rollup logcount */ + /* Add count of log on this delta to unify logcount */ be32_add_cpu(&sb->super.logcount, seg.count); count -= seg.count; @@ -284,7 +284,7 @@ static void log_u48_u48_u48(struct sb *sb, u8 intent, u64 v1, u64 v2, u64 v3) log_end(sb, encode48(data, v3)); } -/* balloc() until next rollup */ +/* balloc() until next unify */ void log_balloc(struct sb *sb, block_t block, unsigned count) { /* FIXME: 32bits count is too big? */ @@ -298,14 +298,14 @@ void log_bfree(struct sb *sb, block_t block, unsigned count) log_u32_u48(sb, LOG_BFREE, count, block); } -/* Defered bfree() until after next rollup */ -void log_bfree_on_rollup(struct sb *sb, block_t block, unsigned count) +/* Defered bfree() until after next unify */ +void log_bfree_on_unify(struct sb *sb, block_t block, unsigned count) { /* FIXME: 32bits count is too big? */ - log_u32_u48(sb, LOG_BFREE_ON_ROLLUP, count, block); + log_u32_u48(sb, LOG_BFREE_ON_UNIFY, count, block); } -/* Same with log_bfree() (re-logged log_bfree_on_rollup() on rollup) */ +/* Same with log_bfree() (re-logged log_bfree_on_unify() on unify) */ void log_bfree_relog(struct sb *sb, block_t block, unsigned count) { /* FIXME: 32bits count is too big? */ @@ -313,7 +313,7 @@ void log_bfree_relog(struct sb *sb, block_t block, unsigned count) } /* - * 1. balloc(newblock) until next rollup + * 1. balloc(newblock) until next unify * 2. bfree(oldblock) */ void log_leaf_redirect(struct sb *sb, block_t oldblock, block_t newblock) @@ -329,8 +329,8 @@ void log_leaf_free(struct sb *sb, block_t leaf) /* * 1. Redirect from oldblock to newblock - * 2. balloc(newblock) until next rollup - * 2. Defered bfree(oldblock) until after next rollup + * 2. balloc(newblock) until next unify + * 2. Defered bfree(oldblock) until after next unify */ void log_bnode_redirect(struct sb *sb, block_t oldblock, block_t newblock) { @@ -338,8 +338,8 @@ void log_bnode_redirect(struct sb *sb, block_t oldblock, block_t newblock) } /* - * 1. Construct root buffer until next rollup - * 2. balloc(root) until next rollup + * 1. Construct root buffer until next unify + * 2. balloc(root) until next unify */ /* The left key should always be 0 on new root */ void log_bnode_root(struct sb *sb, block_t root, unsigned count, @@ -356,8 +356,8 @@ void log_bnode_root(struct sb *sb, block_t root, unsigned count, } /* - * 1. Split bnode from src to dst until next rollup - * 2. balloc(dst) until next rollup + * 1. Split bnode from src to dst until next unify + * 2. balloc(dst) until next unify * (src buffer must be dirty already) */ void log_bnode_split(struct sb *sb, block_t src, unsigned pos, block_t dst) @@ -366,7 +366,7 @@ void log_bnode_split(struct sb *sb, block_t src, unsigned pos, block_t dst) } /* - * Insert new index (child, key) to parent until next rollup + * Insert new index (child, key) to parent until next unify * (parent buffer must be dirty already) */ void log_bnode_add(struct sb *sb, block_t parent, block_t child, tuxkey_t key) @@ -375,7 +375,7 @@ void log_bnode_add(struct sb *sb, block_t parent, block_t child, tuxkey_t key) } /* - * Update ->block of "key" index by child on parent until next rollup + * Update ->block of "key" index by child on parent until next unify * (parent buffer must be dirty already) */ void log_bnode_update(struct sb *sb, block_t parent, block_t child, tuxkey_t key) @@ -384,7 +384,7 @@ void log_bnode_update(struct sb *sb, block_t parent, block_t child, tuxkey_t key } /* - * 1. Merge btree nodes from src to dst until next rollup + * 1. Merge btree nodes from src to dst until next unify * 2. bfree(src) (but this is for canceling log_bnode_redirect()) * 3. Clear dirty of src buffer * (src and dst buffers must be dirty already) @@ -395,7 +395,7 @@ void log_bnode_merge(struct sb *sb, block_t src, block_t dst) } /* - * Delete indexes specified by (key, count) in bnode until next rollup + * Delete indexes specified by (key, count) in bnode until next unify * (bnode buffer must be dirty already) */ void log_bnode_del(struct sb *sb, block_t bnode, tuxkey_t key, unsigned count) @@ -404,7 +404,7 @@ void log_bnode_del(struct sb *sb, block_t bnode, tuxkey_t key, unsigned count) } /* - * Adjust ->key of index specified by "from" to "to" until next rollup + * Adjust ->key of index specified by "from" to "to" until next unify * (bnode buffer must be dirty already) */ void log_bnode_adjust(struct sb *sb, block_t bnode, tuxkey_t from, tuxkey_t to) @@ -440,16 +440,16 @@ void log_orphan_del(struct sb *sb, unsigned version, tuxkey_t inum) log_u16_u48(sb, LOG_ORPHAN_DEL, version, inum); } -/* Current freeblocks on rollup */ +/* Current freeblocks on unify */ void log_freeblocks(struct sb *sb, block_t freeblocks) { log_u48(sb, LOG_FREEBLOCKS, freeblocks); } -/* Log to know where is new rollup cycle */ -void log_rollup(struct sb *sb) +/* Log to know where is new unify cycle */ +void log_unify(struct sb *sb) { - log_intent(sb, LOG_ROLLUP); + log_intent(sb, LOG_UNIFY); } /* Just add log record as delta mark (for debugging) */ diff --git a/fs/tux3/orphan.c b/fs/tux3/orphan.c index 4e9bd6c5b13526..bb43192b44fd90 100644 --- a/fs/tux3/orphan.c +++ b/fs/tux3/orphan.c @@ -7,7 +7,7 @@ * by this. * * However, if the orphan is long life, it can make log blocks too long. - * So, to prevent it, if orphan inodes are still living until rollup, we + * So, to prevent it, if orphan inodes are still living until unify, we * store those inum into sb->otree. With it, we can obsolete log blocks. * * On replay, we can know the inum of orphan inodes yet not destroyed by @@ -89,7 +89,7 @@ struct ileaf_attr_ops oattr_ops = { }; /* Add inum into sb->otree */ -int tux3_rollup_orphan_add(struct sb *sb, struct list_head *orphan_add) +int tux3_unify_orphan_add(struct sb *sb, struct list_head *orphan_add) { struct btree *otree = otree_btree(sb); struct cursor *cursor; @@ -143,7 +143,7 @@ out: } /* Delete inum from sb->otree */ -int tux3_rollup_orphan_del(struct sb *sb, struct list_head *orphan_del) +int tux3_unify_orphan_del(struct sb *sb, struct list_head *orphan_del) { struct btree *otree = otree_btree(sb); int err; @@ -168,7 +168,7 @@ int tux3_rollup_orphan_del(struct sb *sb, struct list_head *orphan_del) /* * Make inode as orphan, and logging it. Then if orphan is living until - * rollup, orphan will be written to sb->otree. + * unify, orphan will be written to sb->otree. */ int tux3_make_orphan_add(struct inode *inode) { diff --git a/fs/tux3/replay.c b/fs/tux3/replay.c index 41b83bebdd3abb..d0ccc5f6e2fd25 100644 --- a/fs/tux3/replay.c +++ b/fs/tux3/replay.c @@ -12,7 +12,7 @@ static const char *log_name[] = { #define X(x) [x] = #x X(LOG_BALLOC), X(LOG_BFREE), - X(LOG_BFREE_ON_ROLLUP), + X(LOG_BFREE_ON_UNIFY), X(LOG_BFREE_RELOG), X(LOG_LEAF_REDIRECT), X(LOG_LEAF_FREE), @@ -28,7 +28,7 @@ static const char *log_name[] = { X(LOG_ORPHAN_ADD), X(LOG_ORPHAN_DEL), X(LOG_FREEBLOCKS), - X(LOG_ROLLUP), + X(LOG_UNIFY), X(LOG_DELTA), #undef X }; @@ -42,8 +42,8 @@ static struct replay *alloc_replay(struct sb *sb, unsigned logcount) return ERR_PTR(-ENOMEM); rp->sb = sb; - rp->rollup_pos = NULL; - rp->rollup_index = -1; + rp->unify_pos = NULL; + rp->unify_index = -1; memset(rp->blocknrs, 0, logcount * sizeof(block_t)); INIT_LIST_HEAD(&rp->log_orphan_add); @@ -77,13 +77,13 @@ static int replay_check_log(struct replay *rp, struct buffer_head *logbuf) while (data < log->data + be16_to_cpu(log->bytes)) { u8 code = *data; - /* Find latest rollup. */ - if (code == LOG_ROLLUP && rp->rollup_index == -1) { - rp->rollup_pos = data; + /* Find latest unify. */ + if (code == LOG_UNIFY && rp->unify_index == -1) { + rp->unify_pos = data; /* FIXME: index is unnecessary to use. We just - * want to know whether before or after rollup + * want to know whether before or after unify * mark. */ - rp->rollup_index = bufindex(logbuf); + rp->unify_index = bufindex(logbuf); } if (log_size[code] == 0) { @@ -188,13 +188,13 @@ static int replay_log_stage1(struct replay *rp, struct buffer_head *logbuf) /* Check whether array is uptodate */ BUILD_BUG_ON(ARRAY_SIZE(log_name) != LOG_TYPES); - /* If log is before latest rollup, those were already applied to FS. */ - if (bufindex(logbuf) < rp->rollup_index) { + /* If log is before latest unify, those were already applied to FS. */ + if (bufindex(logbuf) < rp->unify_index) { // assert(0); /* older logs should already be freed */ return 0; } - if (bufindex(logbuf) == rp->rollup_index) - data = rp->rollup_pos; + if (bufindex(logbuf) == rp->unify_index) + data = rp->unify_pos; while (data < log->data + be16_to_cpu(log->bytes)) { u8 code = *data++; @@ -309,14 +309,14 @@ static int replay_log_stage1(struct replay *rp, struct buffer_head *logbuf) } case LOG_BALLOC: case LOG_BFREE: - case LOG_BFREE_ON_ROLLUP: + case LOG_BFREE_ON_UNIFY: case LOG_BFREE_RELOG: case LOG_LEAF_REDIRECT: case LOG_LEAF_FREE: case LOG_BNODE_FREE: case LOG_ORPHAN_ADD: case LOG_ORPHAN_DEL: - case LOG_ROLLUP: + case LOG_UNIFY: case LOG_DELTA: data += log_size[code] - sizeof(code); break; @@ -339,31 +339,31 @@ static int replay_log_stage2(struct replay *rp, struct buffer_head *logbuf) /* * Log block address itself works as balloc log, and adjust - * bitmap and derollup even if logblocks is before latest - * rollup, to prevent to be overwritten. (This must be after + * bitmap and deunify even if logblocks is before latest + * unify, to prevent to be overwritten. (This must be after * LOG_FREEBLOCKS replay if there is it.) */ trace("LOG BLOCK: logblock %Lx", blocknr); err = replay_update_bitmap(rp, blocknr, 1, 1); if (err) return err; - /* Mark log block as derollup block */ - defer_bfree(&sb->derollup, blocknr, 1); + /* Mark log block as deunify block */ + defer_bfree(&sb->deunify, blocknr, 1); - /* If log is before latest rollup, those were already applied to FS. */ - if (bufindex(logbuf) < rp->rollup_index) { + /* If log is before latest unify, those were already applied to FS. */ + if (bufindex(logbuf) < rp->unify_index) { // assert(0); /* older logs should already be freed */ return 0; } - if (bufindex(logbuf) == rp->rollup_index) - data = rp->rollup_pos; + if (bufindex(logbuf) == rp->unify_index) + data = rp->unify_pos; while (data < log->data + be16_to_cpu(log->bytes)) { u8 code = *data++; switch (code) { case LOG_BALLOC: case LOG_BFREE: - case LOG_BFREE_ON_ROLLUP: + case LOG_BFREE_ON_UNIFY: case LOG_BFREE_RELOG: { u64 block; @@ -376,8 +376,8 @@ static int replay_log_stage2(struct replay *rp, struct buffer_head *logbuf) err = 0; if (code == LOG_BALLOC) err = replay_update_bitmap(rp, block, count, 1); - else if (code == LOG_BFREE_ON_ROLLUP) - defer_bfree(&sb->derollup, block, count); + else if (code == LOG_BFREE_ON_UNIFY) + defer_bfree(&sb->deunify, block, count); else err = replay_update_bitmap(rp, block, count, 0); if (err) @@ -401,7 +401,7 @@ static int replay_log_stage2(struct replay *rp, struct buffer_head *logbuf) return err; } else { /* newblock is not flushing yet */ - defer_bfree(&sb->derollup, oldblock, 1); + defer_bfree(&sb->deunify, oldblock, 1); } break; } @@ -418,7 +418,7 @@ static int replay_log_stage2(struct replay *rp, struct buffer_head *logbuf) if (code == LOG_BNODE_FREE) { struct buffer_head *buffer = vol_find_get_block(sb, block); - blockput_free_rollup(sb, buffer); + blockput_free_unify(sb, buffer); } break; } @@ -464,7 +464,7 @@ static int replay_log_stage2(struct replay *rp, struct buffer_head *logbuf) if (err) return err; - blockput_free_rollup(sb, vol_find_get_block(sb, src)); + blockput_free_unify(sb, vol_find_get_block(sb, src)); break; } case LOG_ORPHAN_ADD: @@ -489,7 +489,7 @@ static int replay_log_stage2(struct replay *rp, struct buffer_head *logbuf) case LOG_BNODE_UPDATE: case LOG_BNODE_DEL: case LOG_BNODE_ADJUST: - case LOG_ROLLUP: + case LOG_UNIFY: case LOG_DELTA: data += log_size[code] - sizeof(code); break; @@ -510,7 +510,7 @@ static int replay_logblocks(struct replay *rp, replay_log_t replay_log_func) sb->lognext = 0; while (sb->lognext < logcount) { - trace("log block %i, blocknr %Lx, rollup %Lx", sb->lognext, rp->blocknrs[sb->lognext], rp->rollup_index); + trace("log block %i, blocknr %Lx, unify %Lx", sb->lognext, rp->blocknrs[sb->lognext], rp->unify_index); log_next(sb); err = replay_log_func(rp, sb->logbuf); log_drop(sb); diff --git a/fs/tux3/super.c b/fs/tux3/super.c index 7dc70025318630..cad9e49812fca9 100644 --- a/fs/tux3/super.c +++ b/fs/tux3/super.c @@ -59,21 +59,21 @@ static void cleanup_dirty_inode(struct inode *inode) */ static void cleanup_dirty_for_umount(struct sb *sb) { - unsigned rollup = sb->rollup; + unsigned unify = sb->unify; /* * Pinned buffer and bitmap are not flushing always, it is * normal. So, this clean those for unmount. */ if (sb->bitmap) { - struct list_head *head = tux3_dirty_buffers(sb->bitmap, rollup); - cleanup_dirty_buffers(sb->bitmap, head, rollup); + struct list_head *head = tux3_dirty_buffers(sb->bitmap, unify); + cleanup_dirty_buffers(sb->bitmap, head, unify); cleanup_dirty_inode(sb->bitmap); } if (sb->volmap) { - cleanup_dirty_buffers(sb->volmap, &sb->rollup_buffers, rollup); + cleanup_dirty_buffers(sb->volmap, &sb->unify_buffers, unify); /* - * FIXME: mark_buffer_dirty() for rollup buffers marks + * FIXME: mark_buffer_dirty() for unify buffers marks * volmap as I_DIRTY_PAGES (we don't need I_DIRTY_PAGES * actually) without changing tuxnode->flags. * @@ -98,7 +98,7 @@ static void __tux3_put_super(struct sb *sbi) /* All forked buffers should be freed here */ free_forked_buffers(sbi, NULL, 1); - destroy_defer_bfree(&sbi->derollup); + destroy_defer_bfree(&sbi->deunify); destroy_defer_bfree(&sbi->defree); iput(sbi->rootdir); diff --git a/fs/tux3/tux3.h b/fs/tux3/tux3.h index 145364a81db216..be511584b5148a 100644 --- a/fs/tux3/tux3.h +++ b/fs/tux3/tux3.h @@ -216,8 +216,8 @@ struct stash { struct flink_head head; u64 *pos, *top; }; struct delta_ref { atomic_t refcount; unsigned delta; -#ifdef ROLLUP_DEBUG - int rollup_flag; /* FIXME: is there better way? */ +#ifdef UNIFY_DEBUG + int unify_flag; /* FIXME: is there better way? */ #endif }; @@ -239,12 +239,12 @@ struct sb { struct delta_ref __rcu *current_delta; /* current delta */ struct delta_ref delta_refs[TUX3_MAX_DELTA]; unsigned next_delta; /* delta commit cycle */ - unsigned rollup; /* log rollup cycle */ + unsigned unify; /* log unify cycle */ #define TUX3_COMMIT_RUNNING_BIT 0 #define TUX3_COMMIT_PENDING_BIT 1 unsigned long backend_state; /* delta state */ -#ifdef ROLLUP_DEBUG +#ifdef UNIFY_DEBUG struct delta_ref *pending_delta; /* pending delta for commit */ #endif unsigned marshal_delta; /* marshaling delta */ @@ -292,9 +292,9 @@ struct sb { struct list_head orphan_del; /* defered orphan inode del list */ struct stash defree; /* defer extent frees until after delta */ - struct stash derollup; /* defer extent frees until after rollup */ + struct stash deunify; /* defer extent frees until after unify */ - struct list_head rollup_buffers; /* dirty metadata flushed at rollup */ + struct list_head unify_buffers; /* dirty metadata flushed at unify */ struct iowait *iowait; /* helper for waiting I/O */ @@ -346,8 +346,8 @@ struct logblock { enum { LOG_BALLOC = 0x33, /* Log of block allocation */ LOG_BFREE, /* Log of freeing block after delta */ - LOG_BFREE_ON_ROLLUP, /* Log of freeing block after rollup */ - LOG_BFREE_RELOG, /* LOG_BFREE, but re-log of free after rollup */ + LOG_BFREE_ON_UNIFY, /* Log of freeing block after unify */ + LOG_BFREE_RELOG, /* LOG_BFREE, but re-log of free after unify */ LOG_LEAF_REDIRECT, /* Log of leaf redirect */ LOG_LEAF_FREE, /* Log of freeing leaf */ LOG_BNODE_REDIRECT, /* Log of bnode redirect */ @@ -361,8 +361,8 @@ enum { LOG_BNODE_FREE, /* Log of freeing bnode */ LOG_ORPHAN_ADD, /* Log of adding orphan inode */ LOG_ORPHAN_DEL, /* Log of deleting orphan inode */ - LOG_FREEBLOCKS, /* Log of freeblocks in bitmap on rollup */ - LOG_ROLLUP, /* Log of marking rollup */ + LOG_FREEBLOCKS, /* Log of freeblocks in bitmap on unify */ + LOG_UNIFY, /* Log of marking unify */ LOG_DELTA, /* just for debugging */ LOG_TYPES }; @@ -591,8 +591,8 @@ struct replay { struct list_head orphan_in_otree; /* Orphan inodes in sb->otree */ /* For replay.c */ - void *rollup_pos; /* position of rollup log in a log block */ - block_t rollup_index; /* index of a log block including rollup log */ + void *unify_pos; /* position of unify log in a log block */ + block_t unify_index; /* index of a log block including unify log */ block_t blocknrs[]; /* block address of log blocks */ }; @@ -744,7 +744,7 @@ int apply_defered_bfree(struct sb *sb, u64 val); void tux3_start_backend(struct sb *sb); void tux3_end_backend(void); int tux3_under_backend(struct sb *sb); -int force_rollup(struct sb *sb); +int force_unify(struct sb *sb); int force_delta(struct sb *sb); unsigned tux3_get_current_delta(void); unsigned tux3_inode_delta(struct inode *inode); @@ -840,7 +840,7 @@ void log_finish_cycle(struct sb *sb, int discard); int tux3_logmap_io(int rw, struct bufvec *bufvec); void log_balloc(struct sb *sb, block_t block, unsigned count); void log_bfree(struct sb *sb, block_t block, unsigned count); -void log_bfree_on_rollup(struct sb *sb, block_t block, unsigned count); +void log_bfree_on_unify(struct sb *sb, block_t block, unsigned count); void log_bfree_relog(struct sb *sb, block_t block, unsigned count); void log_leaf_redirect(struct sb *sb, block_t oldblock, block_t newblock); void log_leaf_free(struct sb *sb, block_t leaf); @@ -859,7 +859,7 @@ void log_orphan_add(struct sb *sb, unsigned version, tuxkey_t inum); void log_orphan_del(struct sb *sb, unsigned version, tuxkey_t inum); void log_freeblocks(struct sb *sb, block_t freeblocks); void log_delta(struct sb *sb); -void log_rollup(struct sb *sb); +void log_unify(struct sb *sb); typedef int (*unstash_t)(struct sb *sb, u64 val); void stash_init(struct stash *stash); @@ -872,8 +872,8 @@ void destroy_defer_bfree(struct stash *defree); /* orphan.c */ void clean_orphan_list(struct list_head *head); extern struct ileaf_attr_ops oattr_ops; -int tux3_rollup_orphan_add(struct sb *sb, struct list_head *orphan_add); -int tux3_rollup_orphan_del(struct sb *sb, struct list_head *orphan_del); +int tux3_unify_orphan_add(struct sb *sb, struct list_head *orphan_add); +int tux3_unify_orphan_del(struct sb *sb, struct list_head *orphan_del); int tux3_make_orphan_add(struct inode *inode); int tux3_make_orphan_del(struct inode *inode); int replay_orphan_add(struct replay *rp, unsigned version, inum_t inum); @@ -934,7 +934,7 @@ void tux3_xattr_read_and_clear(struct inode *inode); void tux3_clear_dirty_inode(struct inode *inode); void __tux3_mark_buffer_dirty(struct buffer_head *buffer, unsigned delta); void tux3_mark_buffer_dirty(struct buffer_head *buffer); -void tux3_mark_buffer_rollup(struct buffer_head *buffer); +void tux3_mark_buffer_unify(struct buffer_head *buffer); void tux3_mark_inode_orphan(struct tux3_inode *tuxnode); int tux3_inode_is_orphan(struct tux3_inode *tuxnode); int tux3_flush_inode_internal(struct inode *inode, unsigned delta); diff --git a/fs/tux3/writeback.c b/fs/tux3/writeback.c index 935aa320324314..f2ba557e030567 100644 --- a/fs/tux3/writeback.c +++ b/fs/tux3/writeback.c @@ -343,12 +343,12 @@ void tux3_mark_buffer_dirty(struct buffer_head *buffer) } /* - * Mark buffer as dirty to flush at rollup flush + * Mark buffer as dirty to flush at unify flush * * Specified buffer must be for volmap (i.e. no buffer fork, and * page->mapping is valid). Otherwise this will race with buffer fork. */ -void tux3_mark_buffer_rollup(struct buffer_head *buffer) +void tux3_mark_buffer_unify(struct buffer_head *buffer) { struct sb *sb; struct inode *inode; @@ -369,8 +369,8 @@ void tux3_mark_buffer_rollup(struct buffer_head *buffer) sb = tux_sb(inode->i_sb); assert(inode == sb->volmap); /* must be volmap */ - tux3_set_buffer_dirty_list(mapping(inode), buffer, sb->rollup, - &sb->rollup_buffers); + tux3_set_buffer_dirty_list(mapping(inode), buffer, sb->unify, + &sb->unify_buffers); /* * FIXME: we don't call __tux3_mark_buffer_dirty() here, but * mark_buffer_dirty() marks inode as I_DIRTY_PAGES. This -- cgit 1.2.3-korg