Implement the designed locking around journal->j_free. Things get a lot better here, too. 25-akpm/fs/jbd/checkpoint.c | 26 +++++++++++++++++--------- 25-akpm/fs/jbd/journal.c | 14 ++++++++++---- 25-akpm/fs/jbd/transaction.c | 21 ++++++++++----------- 25-akpm/include/linux/jbd.h | 7 ++++--- 4 files changed, 41 insertions(+), 27 deletions(-) diff -puN fs/jbd/checkpoint.c~jbd-210-j_free-locking fs/jbd/checkpoint.c --- 25/fs/jbd/checkpoint.c~jbd-210-j_free-locking Thu Jun 5 15:14:27 2003 +++ 25-akpm/fs/jbd/checkpoint.c Thu Jun 5 15:14:27 2003 @@ -70,25 +70,33 @@ static int __try_to_free_cp_buf(struct j } /* - * log_wait_for_space: wait until there is space in the journal. + * __log_wait_for_space: wait until there is space in the journal. * - * Called with the journal already locked, but it will be unlocked if we have - * to wait for a checkpoint to free up some space in the log. + * Called under j-state_lock *only*. It will be unlocked if we have to wait + * for a checkpoint to free up some space in the log. */ -void log_wait_for_space(journal_t *journal, int nblocks) +void __log_wait_for_space(journal_t *journal, int nblocks) { - while (log_space_left(journal) < nblocks) { + assert_spin_locked(&journal->j_state_lock); + + while (__log_space_left(journal) < nblocks) { if (journal->j_flags & JFS_ABORT) return; unlock_journal(journal); + spin_unlock(&journal->j_state_lock); down(&journal->j_checkpoint_sem); lock_journal(journal); - /* Test again, another process may have checkpointed - * while we were waiting for the checkpoint lock */ - if (log_space_left(journal) < nblocks) { + /* + * Test again, another process may have checkpointed while we + * were waiting for the checkpoint lock + */ + spin_lock(&journal->j_state_lock); + if (__log_space_left(journal) < nblocks) { + spin_unlock(&journal->j_state_lock); log_do_checkpoint(journal, nblocks); + spin_lock(&journal->j_state_lock); } up(&journal->j_checkpoint_sem); } @@ -275,7 +283,7 @@ static int __flush_buffer(journal_t *jou * Perform an actual checkpoint. We don't write out only enough to * satisfy the current blocked requests: rather we submit a reasonably * sized chunk of the outstanding data to disk at once for - * efficiency. log_wait_for_space() will retry if we didn't free enough. + * efficiency. __log_wait_for_space() will retry if we didn't free enough. * * However, we _do_ take into account the amount requested so that once * the IO has been queued, we can return as soon as enough of it has diff -puN fs/jbd/journal.c~jbd-210-j_free-locking fs/jbd/journal.c --- 25/fs/jbd/journal.c~jbd-210-j_free-locking Thu Jun 5 15:14:27 2003 +++ 25-akpm/fs/jbd/journal.c Thu Jun 5 15:14:27 2003 @@ -392,17 +392,23 @@ repeat: */ /* - * log_space_left: Return the number of free blocks left in the journal. + * __log_space_left: Return the number of free blocks left in the journal. * * Called with the journal already locked. + * + * Called under j_state_lock */ -int log_space_left (journal_t *journal) +int __log_space_left(journal_t *journal) { int left = journal->j_free; - /* Be pessimistic here about the number of those free blocks - * which might be required for log descriptor control blocks. */ + assert_spin_locked(&journal->j_state_lock); + + /* + * Be pessimistic here about the number of those free blocks which + * might be required for log descriptor control blocks. + */ #define MIN_LOG_RESERVED_BLOCKS 32 /* Allow for rounding errors */ diff -puN fs/jbd/transaction.c~jbd-210-j_free-locking fs/jbd/transaction.c --- 25/fs/jbd/transaction.c~jbd-210-j_free-locking Thu Jun 5 15:14:27 2003 +++ 25-akpm/fs/jbd/transaction.c Thu Jun 5 15:14:27 2003 @@ -219,12 +219,10 @@ repeat_locked: needed += journal->j_committing_transaction-> t_outstanding_credits; - if (log_space_left(journal) < needed) { + if (__log_space_left(journal) < needed) { jbd_debug(2, "Handle %p waiting for checkpoint...\n", handle); spin_unlock(&transaction->t_handle_lock); - spin_unlock(&journal->j_state_lock); - log_wait_for_space(journal, needed); - spin_lock(&journal->j_state_lock); + __log_wait_for_space(journal, needed); goto repeat_locked; } @@ -237,7 +235,7 @@ repeat_locked: transaction->t_handle_count++; jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n", handle, nblocks, transaction->t_outstanding_credits, - log_space_left(journal)); + __log_space_left(journal)); spin_unlock(&transaction->t_handle_lock); spin_unlock(&journal->j_state_lock); unlock_journal(journal); @@ -330,14 +328,16 @@ int journal_extend(handle_t *handle, int int result; int wanted; - lock_journal (journal); + lock_journal(journal); result = -EIO; if (is_handle_aborted(handle)) goto error_out; result = 1; - + + spin_lock(&journal->j_state_lock); + /* Don't extend a locked-down transaction! */ if (handle->h_transaction->t_state != T_RUNNING) { jbd_debug(3, "denied handle %p %d blocks: " @@ -345,7 +345,6 @@ int journal_extend(handle_t *handle, int goto error_out; } - lock_kernel(); spin_lock(&transaction->t_handle_lock); wanted = transaction->t_outstanding_credits + nblocks; @@ -355,7 +354,7 @@ int journal_extend(handle_t *handle, int goto unlock; } - if (wanted > log_space_left(journal)) { + if (wanted > __log_space_left(journal)) { jbd_debug(3, "denied handle %p %d blocks: " "insufficient log space\n", handle, nblocks); goto unlock; @@ -368,9 +367,9 @@ int journal_extend(handle_t *handle, int jbd_debug(3, "extended handle %p by %d\n", handle, nblocks); unlock: spin_unlock(&transaction->t_handle_lock); - unlock_kernel(); error_out: - unlock_journal (journal); + spin_unlock(&journal->j_state_lock); + unlock_journal(journal); return result; } diff -puN include/linux/jbd.h~jbd-210-j_free-locking include/linux/jbd.h --- 25/include/linux/jbd.h~jbd-210-j_free-locking Thu Jun 5 15:14:27 2003 +++ 25-akpm/include/linux/jbd.h Thu Jun 5 15:14:27 2003 @@ -1000,18 +1000,19 @@ extern int journal_test_revoke(journa extern void journal_clear_revoke(journal_t *); extern void journal_brelse_array(struct buffer_head *b[], int n); -/* The log thread user interface: +/* + * The log thread user interface: * * Request space in the current transaction, and force transaction commit * transitions on demand. */ -extern int log_space_left (journal_t *); /* Called with journal locked */ +int __log_space_left(journal_t *); /* Called with journal locked */ extern tid_t log_start_commit (journal_t *, transaction_t *); extern int log_wait_commit (journal_t *, tid_t); extern int log_do_checkpoint (journal_t *, int); -extern void log_wait_for_space(journal_t *, int nblocks); +void __log_wait_for_space(journal_t *, int nblocks); extern void __journal_drop_transaction(journal_t *, transaction_t *); extern int cleanup_journal_tail(journal_t *); _