bk://linux-ntfs.bkbits.net/ntfs-2.6-devel aia21@cantab.net|ChangeSet|20050125104235|43384 aia21 # This is a BitKeeper generated diff -Nru style patch. # # ChangeSet # 2005/01/25 10:42:35+00:00 aia21@cantab.net # Merge cantab.net:/home/src/ntfs-2.6 # into cantab.net:/home/src/ntfs-2.6-devel # # fs/ntfs/ChangeLog # 2005/01/25 10:42:30+00:00 aia21@cantab.net +7 -7 # Auto merged # # ChangeSet # 2005/01/22 15:46:22-08:00 akpm@bix.(none) # Merge bk://linux-ntfs.bkbits.net/ntfs-2.6-devel # into bix.(none):/usr/src/bk-ntfs # # fs/ntfs/compress.c # 2005/01/22 15:46:18-08:00 akpm@bix.(none) +0 -0 # Auto merged # # fs/ntfs/aops.c # 2005/01/22 15:46:18-08:00 akpm@bix.(none) +0 -0 # Auto merged # # ChangeSet # 2005/01/22 03:55:37+00:00 ntfs@flatcap.org # Merge flatcap.org:/home/flatcap/backup/bk/ntfs-2.6 # into flatcap.org:/home/flatcap/backup/bk/ntfs-2.6-devel # # fs/ntfs/compress.c # 2005/01/22 03:55:30+00:00 ntfs@flatcap.org +0 -0 # Auto merged # # fs/ntfs/aops.c # 2005/01/22 03:55:30+00:00 ntfs@flatcap.org +0 -0 # Auto merged # # ChangeSet # 2005/01/21 11:20:01-08:00 akpm@bix.(none) # Merge bix.(none):/usr/src/bk25 into bix.(none):/usr/src/bk-ntfs # # fs/ntfs/compress.c # 2005/01/21 11:19:57-08:00 akpm@bix.(none) +0 -0 # Auto merged # # fs/ntfs/aops.c # 2005/01/21 11:19:56-08:00 akpm@bix.(none) +0 -0 # Auto merged # # ChangeSet # 2005/01/13 16:03:50+00:00 aia21@cantab.net # NTFS: Optimise/reorganise some error handling code in fs/ntfs/aops.c. # # Signed-off-by: Anton Altaparmakov # # fs/ntfs/aops.c # 2005/01/13 16:03:38+00:00 aia21@cantab.net +6 -11 # Optimise/reorganise some error handling code. # # ChangeSet # 2005/01/13 15:26:38+00:00 aia21@cantab.net # NTFS: Fixup the resident attribute resizing code in # fs/ntfs/aops.c::ntfs_{prepare,commit}_write()() and re-enable it. # It should be safe now. (Famous last words...) # # Signed-off-by: Anton Altaparmakov # # fs/ntfs/aops.c # 2005/01/13 15:26:29+00:00 aia21@cantab.net +14 -7 # Fixup the resident attribute resizing code in ntfs_{prepare,commit}_write() # and re-enable it. # # fs/ntfs/ChangeLog # 2005/01/13 15:26:29+00:00 aia21@cantab.net +5 -0 # Update # # ChangeSet # 2005/01/13 11:04:48+00:00 aia21@cantab.net # NTFS: Fix stupid bug in fs/ntfs/mft.c introduced in last changeset. # # Signed-off-by: Anton Altaparmakov # # fs/ntfs/mft.c # 2005/01/13 11:04:39+00:00 aia21@cantab.net +4 -7 # Fix stupid bug introduced in last changeset. # # ChangeSet # 2005/01/12 13:52:39+00:00 aia21@cantab.net # NTFS: Repeat a failed ntfs_truncate() in fs/ntfs/aops.c::ntfs_writepage() # and abort if it fails again. # # Signed-off-by: Anton Altaparmakov # # fs/ntfs/aops.c # 2005/01/12 13:52:30+00:00 aia21@cantab.net +21 -10 # Repeat a failed ntfs_truncate() in fs/ntfs/aops.c::ntfs_writepage() and # abort if it fails again. # # ChangeSet # 2005/01/12 13:08:35+00:00 aia21@cantab.net # NTFS: Use i_size_{read,write}() in fs/ntfs/{aops.c,mft.c} and protect # access to the i_size and other size fields using the size_lock. # # Signed-off-by: Anton Altaparmakov # # fs/ntfs/mft.c # 2005/01/12 13:08:26+00:00 aia21@cantab.net +113 -44 # Use i_size_{read,write}() in fs/ntfs/mft.c and protect access # to the i_size and other size fields using the size_lock. # # fs/ntfs/aops.c # 2005/01/12 13:08:26+00:00 aia21@cantab.net +41 -15 # Use i_size_{read,write}() in fs/ntfs/aops.c and protect access # to the i_size and other size fields using the size_lock. # # fs/ntfs/ChangeLog # 2005/01/12 13:08:26+00:00 aia21@cantab.net +2 -0 # Update # # ChangeSet # 2005/01/05 12:38:06+00:00 aia21@cantab.net # Merge cantab.net:/home/src/bklinux-2.6 # into cantab.net:/home/src/ntfs-2.6-devel # # fs/ntfs/super.c # 2005/01/05 12:38:01+00:00 aia21@cantab.net +0 -0 # Auto merged # # ChangeSet # 2004/11/19 22:19:09+00:00 aia21@cantab.net # NTFS: Use i_size_read() in fs/ntfs/inode.c once and then use the cached value # afterwards when reading the size of the bitmap inode. # # Signed-off-by: Anton Altaparmakov # # fs/ntfs/ChangeLog # 2004/11/19 22:16:00+00:00 aia21@cantab.net +2 -0 # Update # # fs/ntfs/inode.c # 2004/11/19 22:13:47+00:00 aia21@cantab.net +14 -11 # Use i_size_read() once and then use the cache value for the bitmap # inode. # # ChangeSet # 2004/11/18 20:46:25+00:00 aia21@cantab.net # NTFS: - Use i_size_read() in fs/ntfs/super.c once and then use the cached # value afterwards. Cache the initialized_size in the same way and # protect access to the two sizes using the size_lock. # - Minor optimization to fs/ntfs/super.c::ntfs_statfs() and its helpers. # # Signed-off-by: Anton Altaparmakov # # fs/ntfs/super.c # 2004/11/18 20:43:38+00:00 aia21@cantab.net +54 -48 # - Use i_size_read() in fs/ntfs/super.c once and then use the cached # value afterwards. Cache the initialized_size in the same way and # protect access to the two sizes using the size_lock. # - Minor optimization to fs/ntfs/super.c::ntfs_statfs() and its helpers. # # fs/ntfs/ChangeLog # 2004/11/18 20:34:59+00:00 aia21@cantab.net +4 -0 # Update # # ChangeSet # 2004/11/18 15:02:37+00:00 aia21@cantab.net # NTFS: In fs/ntfs/dir.c, use i_size_read() once and then the cached value # afterwards. # # Signed-off-by: Anton Altaparmakov # # fs/ntfs/ChangeLog # 2004/11/18 15:01:06+00:00 aia21@cantab.net +2 -0 # Update # # fs/ntfs/dir.c # 2004/11/18 15:00:44+00:00 aia21@cantab.net +7 -6 # Use i_size_read() once and then the cached value afterwards. # # ChangeSet # 2004/11/18 13:50:22+00:00 aia21@cantab.net # NTFS: - In fs/ntfs/compress.c, use i_size_read() at the start and then use the # cached value everywhere. Cache the initialized_size in the same way # and protect the critical region where the two sizes are read using the # new size_lock of the ntfs inode. # - Add the new size_lock to the ntfs_inode structure (fs/ntfs/inode.h) # and initialize it (fs/ntfs/inode.c). # # Signed-off-by: Anton Altaparmakov # # fs/ntfs/ChangeLog # 2004/11/18 13:46:45+00:00 aia21@cantab.net +7 -0 # Update # # fs/ntfs/compress.c # 2004/11/18 13:38:39+00:00 aia21@cantab.net +28 -18 # Use i_size_read() at the start and then use the cached # value everywhere. Cache the initialized_size in the # same way and protect the critical region where the two # sizes are read using the new size_lock of the ntfs inode. # # fs/ntfs/inode.h # 2004/11/18 13:38:14+00:00 aia21@cantab.net +1 -0 # Add the new size_lock to the ntfs_inode structure. # # fs/ntfs/inode.c # 2004/11/18 13:37:51+00:00 aia21@cantab.net +1 -0 # Initialized the new size_lock. # # ChangeSet # 2004/11/17 15:46:56+00:00 aia21@cantab.net # NTFS: Use i_size_read() in fs/ntfs/file.c::ntfs_file_open(). # # Signed-off-by: Anton Altaparmakov # # fs/ntfs/ChangeLog # 2004/11/17 15:45:08+00:00 aia21@cantab.net +1 -0 # Update # # fs/ntfs/file.c # 2004/11/17 15:44:09+00:00 aia21@cantab.net +1 -1 # Use i_size_read() in ntfs_file_open(). # # ChangeSet # 2004/11/11 12:42:47+00:00 aia21@cantab.net # NTFS: Use i_size_read() once and then use the cached value in # fs/ntfs/lcnalloc.c::ntfs_cluster_alloc(). # # Signed-off-by: Anton Altaparmakov # # fs/ntfs/lcnalloc.c # 2004/11/11 12:42:38+00:00 aia21@cantab.net +5 -3 # Use i_size_read() once and then use the cached value in ntfs_cluster_alloc(). # # fs/ntfs/ChangeLog # 2004/11/11 12:42:38+00:00 aia21@cantab.net +2 -0 # Update # # ChangeSet # 2004/11/11 12:34:10+00:00 aia21@cantab.net # NTFS: Use i_size_read() in fs/ntfs/logfile.c::ntfs_{check,empty}_logfile(). # # Signed-off-by: Anton Altaparmakov # # fs/ntfs/logfile.c # 2004/11/11 12:34:01+00:00 aia21@cantab.net +3 -2 # Use i_size_read() when accessing the $LogFile inode->i_size in # ntfs_check_logfile() and ntfs_empty_logfile(). # # fs/ntfs/ChangeLog # 2004/11/11 12:34:00+00:00 aia21@cantab.net +12 -13 # Update # # ChangeSet # 2004/11/11 11:18:20+00:00 aia21@cantab.net # NTFS: Use i_size_read() in fs/ntfs/attrib.c::ntfs_attr_set(). # # Signed-off-by: Anton Altaparmakov # # fs/ntfs/attrib.c # 2004/11/11 11:18:10+00:00 aia21@cantab.net +5 -1 # Use i_size_read() in ntfs_attr_set(). # # fs/ntfs/Makefile # 2004/11/11 11:18:10+00:00 aia21@cantab.net +1 -1 # Start 2.1.23-WIP. # # fs/ntfs/ChangeLog # 2004/11/11 11:18:10+00:00 aia21@cantab.net +4 -0 # Update # diff -Nru a/fs/ntfs/ChangeLog b/fs/ntfs/ChangeLog --- a/fs/ntfs/ChangeLog 2005-01-28 14:26:11 -08:00 +++ b/fs/ntfs/ChangeLog 2005-01-28 14:26:11 -08:00 @@ -2,20 +2,18 @@ - Find and fix bugs. - Checkpoint or disable the user space journal ($UsnJrnl). - In between ntfs_prepare/commit_write, need exclusion between - simultaneous file extensions. Need perhaps an NInoResizeUnderway() - flag which we can set in ntfs_prepare_write() and clear again in - ntfs_commit_write(). Just have to be careful in readpage/writepage, - as well as in truncate, that we play nice... We might need to have - a data_size field in the ntfs_inode to store the real attribute - length. Also need to be careful with initialized_size extention in + simultaneous file extensions. This is given to us by holding i_sem on + the inode. The only places in the kernel when a file is resized are + prepare/commit write and truncate for both of which i_sem is held. + Just have to be careful in readpage/writepage and all other helpers + not running under i_sem that we play nice... + Also need to be careful with initialized_size extention in ntfs_prepare_write. Basically, just be _very_ careful in this code... - OTOH, perhaps i_sem, which is held accross generic_file_write is - sufficient for synchronisation here. We then just need to make sure - ntfs_readpage/writepage/truncate interoperate properly with us. - UPDATE: The above is all ok as it is due to i_sem held. The only - thing that needs to be checked is ntfs_writepage() which does not - hold i_sem. It cannot change i_size but it needs to cope with a - concurrent i_size change. + UPDATE: The onlythingis that need to be checked are read/writepage + which do not hold i_sem. Note writepage cannot change i_size but it + needs to cope with a concurrent i_size change, just like readpage. + Also both need to cope with concurrent the other sizes, i.e. + initialized/allocated/compressed size changing as well. - Implement mft.c::sync_mft_mirror_umount(). We currently will just leave the volume dirty on umount if the final iput(vol->mft_ino) causes a write of any mirrored mft records due to the mft mirror @@ -24,6 +22,36 @@ the problem. - Enable the code for setting the NT4 compatibility flag when we start making NTFS 1.2 specific modifications. + +2.1.23-WIP + + - Use i_size_read() in fs/ntfs/attrib.c::ntfs_attr_set(). + - Use i_size_read() in fs/ntfs/logfile.c::ntfs_{check,empty}_logfile(). + - Use i_size_read() once and then use the cached value in + fs/ntfs/lcnalloc.c::ntfs_cluster_alloc(). + - Use i_size_read() in fs/ntfs/file.c::ntfs_file_open(). + - Add size_lock to the ntfs_inode structure. This is an rw spinlock + and it locks against access to the inode sizes. Note, ->size_lock + is also accessed from irq context so you must use the _irqsave and + _irqrestore lock and unlock functions, respectively. + - Use i_size_read() in fs/ntfs/compress.c at the start of the read and + use the cached value afterwards. Cache the initialized_size in the + same way and protect access to the two sizes using the size_lock. + - Use i_size_read() in fs/ntfs/dir.c once and then use the cached + value afterwards. + - Use i_size_read() in fs/ntfs/super.c once and then use the cached + value afterwards. Cache the initialized_size in the same way and + protect access to the two sizes using the size_lock. + - Minor optimization to fs/ntfs/super.c::ntfs_statfs() and its helpers. + - Use i_size_read() in fs/ntfs/inode.c once and then use the cached + value afterwards when reading the size of the bitmap inode. + - Use i_size_{read,write}() in fs/ntfs/{aops.c,mft.c} and protect + access to the i_size and other size fields using the size_lock. + - Implement extension of resident files in the regular file write code + paths (fs/ntfs/aops.c::ntfs_{prepare,commit}_write()). At present + this only works until the data attribute becomes too big for the mft + record after which we abort the write returning -EOPNOTSUPP from + ntfs_prepare_write(). 2.1.23-WIP diff -Nru a/fs/ntfs/Makefile b/fs/ntfs/Makefile --- a/fs/ntfs/Makefile 2005-01-28 14:26:11 -08:00 +++ b/fs/ntfs/Makefile 2005-01-28 14:26:11 -08:00 @@ -6,7 +6,7 @@ index.o inode.o mft.o mst.o namei.o runlist.o super.o sysctl.o \ unistr.o upcase.o -EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.22\" +EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.23-WIP\" ifeq ($(CONFIG_NTFS_DEBUG),y) EXTRA_CFLAGS += -DDEBUG diff -Nru a/fs/ntfs/aops.c b/fs/ntfs/aops.c --- a/fs/ntfs/aops.c 2005-01-28 14:26:11 -08:00 +++ b/fs/ntfs/aops.c 2005-01-28 14:26:11 -08:00 @@ -66,19 +66,22 @@ ni = NTFS_I(page->mapping->host); if (likely(uptodate)) { - s64 file_ofs; + s64 file_ofs, initialized_size; set_buffer_uptodate(bh); file_ofs = ((s64)page->index << PAGE_CACHE_SHIFT) + bh_offset(bh); + read_lock_irqsave(&ni->size_lock, flags); + initialized_size = ni->initialized_size; + read_unlock_irqrestore(&ni->size_lock, flags); /* Check for the current buffer head overflowing. */ - if (file_ofs + bh->b_size > ni->initialized_size) { + if (file_ofs + bh->b_size > initialized_size) { char *addr; int ofs = 0; - if (file_ofs < ni->initialized_size) - ofs = ni->initialized_size - file_ofs; + if (file_ofs < initialized_size) + ofs = initialized_size - file_ofs; addr = kmap_atomic(page, KM_BIO_SRC_IRQ); memset(addr + bh_offset(bh) + ofs, 0, bh->b_size - ofs); flush_dcache_page(page); @@ -168,6 +171,7 @@ runlist_element *rl; struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; sector_t iblock, lblock, zblock; + unsigned long flags; unsigned int blocksize, vcn_ofs; int i, nr; unsigned char blocksize_bits; @@ -190,8 +194,10 @@ } iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); + read_lock_irqsave(&ni->size_lock, flags); lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits; zblock = (ni->initialized_size + blocksize - 1) >> blocksize_bits; + read_unlock_irqrestore(&ni->size_lock, flags); /* Loop through all the buffers in the page. */ rl = NULL; @@ -463,12 +469,15 @@ { VCN vcn; LCN lcn; + s64 initialized_size; + loff_t i_size; sector_t block, dblock, iblock; struct inode *vi; ntfs_inode *ni; ntfs_volume *vol; runlist_element *rl; struct buffer_head *bh, *head; + unsigned long flags; unsigned int blocksize, vcn_ofs; int err; BOOL need_end_writeback; @@ -510,11 +519,16 @@ /* The first block in the page. */ block = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); + read_lock_irqsave(&ni->size_lock, flags); + i_size = i_size_read(vi); + initialized_size = ni->initialized_size; + read_unlock_irqrestore(&ni->size_lock, flags); + /* The first out of bounds block for the data size. */ - dblock = (vi->i_size + blocksize - 1) >> blocksize_bits; + dblock = (i_size + blocksize - 1) >> blocksize_bits; /* The last (fully or partially) initialized block. */ - iblock = ni->initialized_size >> blocksize_bits; + iblock = initialized_size >> blocksize_bits; /* * Be very careful. We have no exclusion from __set_page_dirty_buffers @@ -559,7 +573,7 @@ /* Make sure we have enough initialized size. */ if (unlikely((block >= iblock) && - (ni->initialized_size < vi->i_size))) { + (initialized_size < i_size))) { /* * If this page is fully outside initialized size, zero * out all pages between the current initialized size @@ -846,7 +860,7 @@ (PAGE_CACHE_SHIFT - bh_size_bits); /* The first out of bounds block for the data size. */ - dblock = (vi->i_size + bh_size - 1) >> bh_size_bits; + dblock = (i_size_read(vi) + bh_size - 1) >> bh_size_bits; rl = NULL; err = err2 = nr_bhs = nr_recs = nr_locked_nis = 0; @@ -858,6 +872,7 @@ if (likely(block < rec_block)) { if (unlikely(block >= dblock)) { clear_buffer_dirty(bh); + set_buffer_uptodate(bh); continue; } /* @@ -1223,19 +1238,30 @@ static int ntfs_writepage(struct page *page, struct writeback_control *wbc) { loff_t i_size; - struct inode *vi; - ntfs_inode *ni, *base_ni; + struct inode *vi = page->mapping->host; + ntfs_inode *base_ni = NULL, *ni = NTFS_I(vi); char *kaddr; - ntfs_attr_search_ctx *ctx; - MFT_RECORD *m; + ntfs_attr_search_ctx *ctx = NULL; + MFT_RECORD *m = NULL; u32 attr_len; int err; BUG_ON(!PageLocked(page)); - - vi = page->mapping->host; + /* + * If a previous ntfs_truncate() failed, repeat it and abort if it + * fails again. + */ + if (unlikely(NInoTruncateFailed(ni))) { + down_write(&vi->i_alloc_sem); + err = ntfs_truncate(vi); + up_write(&vi->i_alloc_sem); + if (err || NInoTruncateFailed(ni)) { + if (!err) + err = -EIO; + goto err_out; + } + } i_size = i_size_read(vi); - /* Is the page fully outside i_size? (truncate in progress) */ if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT)) { @@ -1248,8 +1274,6 @@ ntfs_debug("Write outside i_size - truncated?"); return 0; } - ni = NTFS_I(vi); - /* NInoNonResident() == NInoIndexAllocPresent() */ if (NInoNonResident(ni)) { /* @@ -1367,15 +1391,12 @@ */ attr_len = le32_to_cpu(ctx->attr->data.resident.value_length); - i_size = i_size_read(VFS_I(ni)); - kaddr = kmap_atomic(page, KM_USER0); + i_size = i_size_read(vi); if (unlikely(attr_len > i_size)) { - /* Zero out of bounds area in the mft record. */ - memset((u8*)ctx->attr + le16_to_cpu( - ctx->attr->data.resident.value_offset) + - i_size, 0, attr_len - i_size); attr_len = i_size; + ctx->attr->data.resident.value_length = cpu_to_le32(attr_len); } + kaddr = kmap_atomic(page, KM_USER0); /* Copy the data from the page to the mft record. */ memcpy((u8*)ctx->attr + le16_to_cpu(ctx->attr->data.resident.value_offset), @@ -1405,8 +1426,10 @@ err = 0; } else { ntfs_error(vi->i_sb, "Resident attribute write failed with " - "error %i. Setting page error flag.", err); + "error %i.", err); SetPageError(page); + NVolSetErrors(ni->vol); + make_bad_inode(vi); } unlock_page(page); if (ctx) @@ -1425,12 +1448,15 @@ { VCN vcn; LCN lcn; + s64 initialized_size; + loff_t i_size; sector_t block, ablock, iblock; struct inode *vi; ntfs_inode *ni; ntfs_volume *vol; runlist_element *rl; struct buffer_head *bh, *head, *wait[2], **wait_bh = wait; + unsigned long flags; unsigned int vcn_ofs, block_start, block_end, blocksize; int err; BOOL is_retry; @@ -1462,6 +1488,7 @@ /* The first block in the page. */ block = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); + read_lock_irqsave(&ni->size_lock, flags); /* * The first out of bounds block for the allocated size. No need to * round up as allocated_size is in multiples of cluster size and the @@ -1470,8 +1497,12 @@ */ ablock = ni->allocated_size >> blocksize_bits; + i_size = i_size_read(vi); + initialized_size = ni->initialized_size; + read_unlock_irqrestore(&ni->size_lock, flags); + /* The last (fully or partially) initialized block. */ - iblock = ni->initialized_size >> blocksize_bits; + iblock = initialized_size >> blocksize_bits; /* Loop through all the buffers in the page. */ block_start = 0; @@ -1518,7 +1549,7 @@ * request, i.e. block < ablock is true. */ if (unlikely((block >= iblock) && - (ni->initialized_size < vi->i_size))) { + (initialized_size < i_size))) { /* * If this page is fully outside initialized size, zero * out all pages between the current initialized size @@ -1797,6 +1828,7 @@ unsigned from, unsigned to) { s64 new_size; + loff_t i_size; struct inode *vi = page->mapping->host; ntfs_inode *base_ni = NULL, *ni = NTFS_I(vi); ntfs_volume *vol = ni->vol; @@ -1868,14 +1900,8 @@ BUG_ON(page_has_buffers(page)); new_size = ((s64)page->index << PAGE_CACHE_SHIFT) + to; /* If we do not need to resize the attribute allocation we are done. */ - if (new_size <= vi->i_size) + if (new_size <= i_size_read(vi)) goto done; - - // FIXME: We abort for now as this code is not safe. - ntfs_error(vi->i_sb, "Changing the file size is not supported yet. " - "Sorry."); - return -EOPNOTSUPP; - /* Map, pin, and lock the (base) mft record. */ if (!NInoAttr(ni)) base_ni = ni; @@ -1904,7 +1930,15 @@ a = ctx->attr; /* The total length of the attribute value. */ attr_len = le32_to_cpu(a->data.resident.value_length); - BUG_ON(vi->i_size != attr_len); + /* Fix an eventual previous failure of ntfs_commit_write(). */ + i_size = i_size_read(vi); + if (unlikely(attr_len > i_size)) { + attr_len = i_size; + a->data.resident.value_length = cpu_to_le32(attr_len); + } + /* If we do not need to resize the attribute allocation we are done. */ + if (new_size <= attr_len) + goto done_unm; /* Check if new size is allowed in $AttrDef. */ err = ntfs_attr_size_bounds_check(vol, ni->type, new_size); if (unlikely(err)) { @@ -1962,6 +1996,7 @@ } flush_dcache_mft_record_page(ctx->ntfs_ino); mark_mft_record_dirty(ctx->ntfs_ino); +done_unm: ntfs_attr_put_search_ctx(ctx); unmap_mft_record(base_ni); /* @@ -2047,7 +2082,7 @@ * now we know ntfs_prepare_write() would have failed in the write * exceeds i_size case, so this will never trigger which is fine. */ - if (pos > vi->i_size) { + if (pos > i_size_read(vi)) { ntfs_error(vi->i_sb, "Writing beyond the existing file size is " "not supported yet. Sorry."); return -EOPNOTSUPP; @@ -2183,9 +2218,13 @@ } kunmap_atomic(kaddr, KM_USER0); /* Update i_size if necessary. */ - if (vi->i_size < attr_len) { + if (i_size_read(vi) < attr_len) { + unsigned long flags; + + write_lock_irqsave(&ni->size_lock, flags); ni->allocated_size = ni->initialized_size = attr_len; i_size_write(vi, attr_len); + write_unlock_irqrestore(&ni->size_lock, flags); } /* Mark the mft record dirty, so it gets written back. */ flush_dcache_mft_record_page(ctx->ntfs_ino); diff -Nru a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c --- a/fs/ntfs/attrib.c 2005-01-28 14:26:11 -08:00 +++ b/fs/ntfs/attrib.c 2005-01-28 14:26:11 -08:00 @@ -1127,6 +1127,10 @@ * byte offset @ofs inside the attribute with the constant byte @val. * * This function is effectively like memset() applied to an ntfs attribute. + * Note thie function actually only operates on the page cache pages belonging + * to the ntfs attribute and it marks them dirty after doing the memset(). + * Thus it relies on the vm dirty page write code paths to cause the modified + * pages to be written to the mft record/disk. * * Return 0 on success and -errno on error. An error code of -ESPIPE means * that @ofs + @cnt were outside the end of the attribute and no write was @@ -1155,7 +1159,7 @@ end = ofs + cnt; end_ofs = end & ~PAGE_CACHE_MASK; /* If the end is outside the inode size return -ESPIPE. */ - if (unlikely(end > VFS_I(ni)->i_size)) { + if (unlikely(end > i_size_read(VFS_I(ni)))) { ntfs_error(vol->sb, "Request exceeds end of attribute."); return -ESPIPE; } diff -Nru a/fs/ntfs/compress.c b/fs/ntfs/compress.c --- a/fs/ntfs/compress.c 2005-01-28 14:26:11 -08:00 +++ b/fs/ntfs/compress.c 2005-01-28 14:26:11 -08:00 @@ -96,13 +96,14 @@ /** * zero_partial_compressed_page - zero out of bounds compressed page region */ -static void zero_partial_compressed_page(ntfs_inode *ni, struct page *page) +static void zero_partial_compressed_page(struct page *page, + const s64 initialized_size) { u8 *kp = page_address(page); unsigned int kp_ofs; ntfs_debug("Zeroing page region outside initialized size."); - if (((s64)page->index << PAGE_CACHE_SHIFT) >= ni->initialized_size) { + if (((s64)page->index << PAGE_CACHE_SHIFT) >= initialized_size) { /* * FIXME: Using clear_page() will become wrong when we get * PAGE_CACHE_SIZE != PAGE_SIZE but for now there is no problem. @@ -110,7 +111,7 @@ clear_page(kp); return; } - kp_ofs = ni->initialized_size & ~PAGE_CACHE_MASK; + kp_ofs = initialized_size & ~PAGE_CACHE_MASK; memset(kp + kp_ofs, 0, PAGE_CACHE_SIZE - kp_ofs); return; } @@ -118,12 +119,12 @@ /** * handle_bounds_compressed_page - test for&handle out of bounds compressed page */ -static inline void handle_bounds_compressed_page(ntfs_inode *ni, - struct page *page) +static inline void handle_bounds_compressed_page(struct page *page, + const loff_t i_size, const s64 initialized_size) { - if ((page->index >= (ni->initialized_size >> PAGE_CACHE_SHIFT)) && - (ni->initialized_size < VFS_I(ni)->i_size)) - zero_partial_compressed_page(ni, page); + if ((page->index >= (initialized_size >> PAGE_CACHE_SHIFT)) && + (initialized_size < i_size)) + zero_partial_compressed_page(page, initialized_size); return; } @@ -138,6 +139,8 @@ * @xpage_done: set to 1 if xpage was completed successfully (IN/OUT) * @cb_start: compression block to decompress (IN) * @cb_size: size of compression block @cb_start in bytes (IN) + * @i_size: file size when we started the read (IN) + * @initialized_size: initialized file size when we started the read (IN) * * The caller must have disabled preemption. ntfs_decompress() reenables it when * the critical section is finished. @@ -165,7 +168,8 @@ static int ntfs_decompress(struct page *dest_pages[], int *dest_index, int *dest_ofs, const int dest_max_index, const int dest_max_ofs, const int xpage, char *xpage_done, u8 *const cb_start, - const u32 cb_size) + const u32 cb_size, const loff_t i_size, + const s64 initialized_size) { /* * Pointers into the compressed data, i.e. the compression block (cb), @@ -219,9 +223,6 @@ spin_unlock(&ntfs_cb_lock); /* Second stage: finalize completed pages. */ if (nr_completed_pages > 0) { - struct page *page = dest_pages[completed_pages[0]]; - ntfs_inode *ni = NTFS_I(page->mapping->host); - for (i = 0; i < nr_completed_pages; i++) { int di = completed_pages[i]; @@ -230,7 +231,8 @@ * If we are outside the initialized size, zero * the out of bounds page range. */ - handle_bounds_compressed_page(ni, dp); + handle_bounds_compressed_page(dp, i_size, + initialized_size); flush_dcache_page(dp); kunmap(dp); SetPageUptodate(dp); @@ -478,12 +480,14 @@ */ int ntfs_read_compressed_block(struct page *page) { + loff_t i_size; + s64 initialized_size; struct address_space *mapping = page->mapping; ntfs_inode *ni = NTFS_I(mapping->host); ntfs_volume *vol = ni->vol; struct super_block *sb = vol->sb; runlist_element *rl; - unsigned long block_size = sb->s_blocksize; + unsigned long flags, block_size = sb->s_blocksize; unsigned char block_size_bits = sb->s_blocksize_bits; u8 *cb, *cb_pos, *cb_end; struct buffer_head **bhs; @@ -552,8 +556,12 @@ * The remaining pages need to be allocated and inserted into the page * cache, alignment guarantees keep all the below much simpler. (-8 */ - max_page = ((VFS_I(ni)->i_size + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT) - offset; + read_lock_irqsave(&ni->size_lock, flags); + i_size = i_size_read(VFS_I(ni)); + initialized_size = ni->initialized_size; + read_unlock_irqrestore(&ni->size_lock, flags); + max_page = ((i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - + offset; if (nr_pages < max_page) max_page = nr_pages; for (i = 0; i < max_page; i++, offset++) { @@ -824,7 +832,8 @@ * If we are outside the initialized size, zero * the out of bounds page range. */ - handle_bounds_compressed_page(ni, page); + handle_bounds_compressed_page(page, i_size, + initialized_size); flush_dcache_page(page); kunmap(page); SetPageUptodate(page); @@ -847,7 +856,8 @@ ntfs_debug("Found compressed compression block."); err = ntfs_decompress(pages, &cur_page, &cur_ofs, cb_max_page, cb_max_ofs, xpage, &xpage_done, - cb_pos, cb_size - (cb_pos - cb)); + cb_pos, cb_size - (cb_pos - cb), i_size, + initialized_size); /* * We can sleep from now on, lock already dropped by * ntfs_decompress(). diff -Nru a/fs/ntfs/dir.c b/fs/ntfs/dir.c --- a/fs/ntfs/dir.c 2005-01-28 14:26:11 -08:00 +++ b/fs/ntfs/dir.c 2005-01-28 14:26:11 -08:00 @@ -1101,7 +1101,7 @@ static int ntfs_readdir(struct file *filp, void *dirent, filldir_t filldir) { s64 ia_pos, ia_start, prev_ia_pos, bmp_pos; - loff_t fpos; + loff_t fpos, i_size; struct inode *bmp_vi, *vdir = filp->f_dentry->d_inode; struct super_block *sb = vdir->i_sb; ntfs_inode *ndir = NTFS_I(vdir); @@ -1122,7 +1122,8 @@ vdir->i_ino, fpos); rc = err = 0; /* Are we at end of dir yet? */ - if (fpos >= vdir->i_size + vol->mft_record_size) + i_size = i_size_read(vdir); + if (fpos >= i_size + vol->mft_record_size) goto done; /* Emulate . and .. for all directories. */ if (!fpos) { @@ -1264,7 +1265,7 @@ bmp_mapping = bmp_vi->i_mapping; /* Get the starting bitmap bit position and sanity check it. */ bmp_pos = ia_pos >> ndir->itype.index.block_size_bits; - if (unlikely(bmp_pos >> 3 >= bmp_vi->i_size)) { + if (unlikely(bmp_pos >> 3 >= i_size_read(bmp_vi))) { ntfs_error(sb, "Current index allocation position exceeds " "index bitmap size."); goto err_out; @@ -1301,7 +1302,7 @@ goto get_next_bmp_page; } /* If we have reached the end of the bitmap, we are done. */ - if (unlikely(((bmp_pos + cur_bmp_pos) >> 3) >= vdir->i_size)) + if (unlikely(((bmp_pos + cur_bmp_pos) >> 3) >= i_size)) goto unm_EOD; ia_pos = (bmp_pos + cur_bmp_pos) << ndir->itype.index.block_size_bits; @@ -1441,7 +1442,7 @@ ntfs_unmap_page(bmp_page); EOD: /* We are finished, set fpos to EOD. */ - fpos = vdir->i_size + vol->mft_record_size; + fpos = i_size + vol->mft_record_size; abort: kfree(name); done: @@ -1495,7 +1496,7 @@ static int ntfs_dir_open(struct inode *vi, struct file *filp) { if (sizeof(unsigned long) < 8) { - if (vi->i_size > MAX_LFS_FILESIZE) + if (i_size_read(vi) > MAX_LFS_FILESIZE) return -EFBIG; } return 0; diff -Nru a/fs/ntfs/file.c b/fs/ntfs/file.c --- a/fs/ntfs/file.c 2005-01-28 14:26:11 -08:00 +++ b/fs/ntfs/file.c 2005-01-28 14:26:11 -08:00 @@ -47,7 +47,7 @@ static int ntfs_file_open(struct inode *vi, struct file *filp) { if (sizeof(unsigned long) < 8) { - if (vi->i_size > MAX_LFS_FILESIZE) + if (i_size_read(vi) > MAX_LFS_FILESIZE) return -EFBIG; } return generic_file_open(vi, filp); diff -Nru a/fs/ntfs/inode.c b/fs/ntfs/inode.c --- a/fs/ntfs/inode.c 2005-01-28 14:26:11 -08:00 +++ b/fs/ntfs/inode.c 2005-01-28 14:26:11 -08:00 @@ -174,7 +174,7 @@ vi = iget5_locked(sb, mft_no, (test_t)ntfs_test_inode, (set_t)ntfs_init_locked_inode, &na); - if (!vi) + if (unlikely(!vi)) return ERR_PTR(-ENOMEM); err = 0; @@ -188,7 +188,7 @@ * There is no point in keeping bad inodes around if the failure was * due to ENOMEM. We want to be able to retry again later. */ - if (err == -ENOMEM) { + if (unlikely(err == -ENOMEM)) { iput(vi); vi = ERR_PTR(err); } @@ -235,7 +235,7 @@ vi = iget5_locked(base_vi->i_sb, na.mft_no, (test_t)ntfs_test_inode, (set_t)ntfs_init_locked_inode, &na); - if (!vi) + if (unlikely(!vi)) return ERR_PTR(-ENOMEM); err = 0; @@ -250,7 +250,7 @@ * simplifies things in that we never need to check for bad attribute * inodes elsewhere. */ - if (err) { + if (unlikely(err)) { iput(vi); vi = ERR_PTR(err); } @@ -290,7 +290,7 @@ vi = iget5_locked(base_vi->i_sb, na.mft_no, (test_t)ntfs_test_inode, (set_t)ntfs_init_locked_inode, &na); - if (!vi) + if (unlikely(!vi)) return ERR_PTR(-ENOMEM); err = 0; @@ -305,7 +305,7 @@ * simplifies things in that we never need to check for bad index * inodes elsewhere. */ - if (err) { + if (unlikely(err)) { iput(vi); vi = ERR_PTR(err); } @@ -376,6 +376,7 @@ void __ntfs_init_inode(struct super_block *sb, ntfs_inode *ni) { ntfs_debug("Entering."); + rwlock_init(&ni->size_lock); ni->initialized_size = ni->allocated_size = 0; ni->seq_no = 0; atomic_set(&ni->count, 1); @@ -741,6 +742,7 @@ * in ntfs_ino->attr_list and it is ntfs_ino->attr_list_size bytes. */ if (S_ISDIR(vi->i_mode)) { + loff_t bvi_size; struct inode *bvi; ntfs_inode *bni; INDEX_ROOT *ir; @@ -958,11 +960,12 @@ goto unm_err_out; } /* Consistency check bitmap size vs. index allocation size. */ - if ((bvi->i_size << 3) < (vi->i_size >> + bvi_size = i_size_read(bvi); + if ((bvi_size << 3) < (vi->i_size >> ni->itype.index.block_size_bits)) { ntfs_error(vi->i_sb, "Index bitmap too small (0x%llx) " "for index allocation (0x%llx).", - bvi->i_size << 3, vi->i_size); + bvi_size << 3, vi->i_size); goto unm_err_out; } skip_large_dir_stuff: @@ -1429,6 +1432,7 @@ */ static int ntfs_read_locked_index_inode(struct inode *base_vi, struct inode *vi) { + loff_t bvi_size; ntfs_volume *vol = NTFS_SB(vi->i_sb); ntfs_inode *ni, *base_ni, *bni; struct inode *bvi; @@ -1632,10 +1636,10 @@ goto iput_unm_err_out; } /* Consistency check bitmap size vs. index allocation size. */ - if ((bvi->i_size << 3) < (vi->i_size >> - ni->itype.index.block_size_bits)) { + bvi_size = i_size_read(bvi); + if ((bvi_size << 3) < (vi->i_size >> ni->itype.index.block_size_bits)) { ntfs_error(vi->i_sb, "Index bitmap too small (0x%llx) for " - "index allocation (0x%llx).", bvi->i_size << 3, + "index allocation (0x%llx).", bvi_size << 3, vi->i_size); goto iput_unm_err_out; } diff -Nru a/fs/ntfs/inode.h b/fs/ntfs/inode.h --- a/fs/ntfs/inode.h 2005-01-28 14:26:11 -08:00 +++ b/fs/ntfs/inode.h 2005-01-28 14:26:11 -08:00 @@ -44,6 +44,7 @@ * fields already provided in the VFS inode. */ struct _ntfs_inode { + rwlock_t size_lock; /* Lock serializing access to inode sizes. */ s64 initialized_size; /* Copy from the attribute record. */ s64 allocated_size; /* Copy from the attribute record. */ unsigned long state; /* NTFS specific flags describing this inode. diff -Nru a/fs/ntfs/lcnalloc.c b/fs/ntfs/lcnalloc.c --- a/fs/ntfs/lcnalloc.c 2005-01-28 14:26:11 -08:00 +++ b/fs/ntfs/lcnalloc.c 2005-01-28 14:26:11 -08:00 @@ -140,6 +140,7 @@ LCN zone_start, zone_end, bmp_pos, bmp_initial_pos, last_read_pos, lcn; LCN prev_lcn = 0, prev_run_len = 0, mft_zone_size; s64 clusters; + loff_t i_size; struct inode *lcnbmp_vi; runlist_element *rl = NULL; struct address_space *mapping; @@ -249,6 +250,7 @@ clusters = count; rlpos = rlsize = 0; mapping = lcnbmp_vi->i_mapping; + i_size = i_size_read(lcnbmp_vi); while (1) { ntfs_debug("Start of outer while loop: done_zones 0x%x, " "search_zone %i, pass %i, zone_start 0x%llx, " @@ -263,7 +265,7 @@ last_read_pos = bmp_pos >> 3; ntfs_debug("last_read_pos 0x%llx.", (unsigned long long)last_read_pos); - if (last_read_pos > lcnbmp_vi->i_size) { + if (last_read_pos > i_size) { ntfs_debug("End of attribute reached. " "Skipping to zone_pass_done."); goto zone_pass_done; @@ -287,8 +289,8 @@ buf_size = last_read_pos & ~PAGE_CACHE_MASK; buf = page_address(page) + buf_size; buf_size = PAGE_CACHE_SIZE - buf_size; - if (unlikely(last_read_pos + buf_size > lcnbmp_vi->i_size)) - buf_size = lcnbmp_vi->i_size - last_read_pos; + if (unlikely(last_read_pos + buf_size > i_size)) + buf_size = i_size - last_read_pos; buf_size <<= 3; lcn = bmp_pos & 7; bmp_pos &= ~7; diff -Nru a/fs/ntfs/logfile.c b/fs/ntfs/logfile.c --- a/fs/ntfs/logfile.c 2005-01-28 14:26:11 -08:00 +++ b/fs/ntfs/logfile.c 2005-01-28 14:26:11 -08:00 @@ -443,7 +443,7 @@ /* An empty $LogFile must have been clean before it got emptied. */ if (NVolLogFileEmpty(vol)) goto is_empty; - size = log_vi->i_size; + size = i_size_read(log_vi); /* Make sure the file doesn't exceed the maximum allowed size. */ if (size > MaxLogFileSize) size = MaxLogFileSize; @@ -689,7 +689,8 @@ if (!NVolLogFileEmpty(vol)) { int err; - err = ntfs_attr_set(NTFS_I(log_vi), 0, log_vi->i_size, 0xff); + err = ntfs_attr_set(NTFS_I(log_vi), 0, i_size_read(log_vi), + 0xff); if (unlikely(err)) { ntfs_error(vol->sb, "Failed to fill $LogFile with " "0xff bytes (error code %i).", err); diff -Nru a/fs/ntfs/mft.c b/fs/ntfs/mft.c --- a/fs/ntfs/mft.c 2005-01-28 14:26:11 -08:00 +++ b/fs/ntfs/mft.c 2005-01-28 14:26:11 -08:00 @@ -45,6 +45,7 @@ */ static inline MFT_RECORD *map_mft_record_page(ntfs_inode *ni) { + loff_t i_size; ntfs_volume *vol = ni->vol; struct inode *mft_vi = vol->mft_ino; struct page *page; @@ -60,13 +61,14 @@ index = ni->mft_no << vol->mft_record_size_bits >> PAGE_CACHE_SHIFT; ofs = (ni->mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK; + i_size = i_size_read(mft_vi); /* The maximum valid index into the page cache for $MFT's data. */ - end_index = mft_vi->i_size >> PAGE_CACHE_SHIFT; + end_index = i_size >> PAGE_CACHE_SHIFT; /* If the wanted index is out of bounds the mft record doesn't exist. */ if (unlikely(index >= end_index)) { - if (index > end_index || (mft_vi->i_size & ~PAGE_CACHE_MASK) < - ofs + vol->mft_record_size) { + if (index > end_index || (i_size & ~PAGE_CACHE_MASK) < ofs + + vol->mft_record_size) { page = ERR_PTR(-ENOENT); ntfs_error(vol->sb, "Attemt to read mft record 0x%lx, " "which is beyond the end of the mft. " @@ -1121,6 +1123,7 @@ ntfs_inode *base_ni) { s64 pass_end, ll, data_pos, pass_start, ofs, bit; + unsigned long flags; struct address_space *mftbmp_mapping; u8 *buf, *byte; struct page *page; @@ -1134,9 +1137,13 @@ * Set the end of the pass making sure we do not overflow the mft * bitmap. */ + read_lock_irqsave(&NTFS_I(vol->mft_ino)->size_lock, flags); pass_end = NTFS_I(vol->mft_ino)->allocated_size >> vol->mft_record_size_bits; + read_unlock_irqrestore(&NTFS_I(vol->mft_ino)->size_lock, flags); + read_lock_irqsave(&NTFS_I(vol->mftbmp_ino)->size_lock, flags); ll = NTFS_I(vol->mftbmp_ino)->initialized_size << 3; + read_unlock_irqrestore(&NTFS_I(vol->mftbmp_ino)->size_lock, flags); if (pass_end > ll) pass_end = ll; pass = 1; @@ -1263,6 +1270,7 @@ { LCN lcn; s64 ll; + unsigned long flags; struct page *page; ntfs_inode *mft_ni, *mftbmp_ni; runlist_element *rl, *rl2 = NULL; @@ -1286,8 +1294,10 @@ * mft bitmap cannot be zero so we are ok to do this. * ntfs_find_vcn() returns the runlist locked on success. */ - rl = ntfs_find_vcn(mftbmp_ni, (mftbmp_ni->allocated_size - 1) >> - vol->cluster_size_bits, TRUE); + read_lock_irqsave(&mftbmp_ni->size_lock, flags); + ll = mftbmp_ni->allocated_size; + read_unlock_irqrestore(&mftbmp_ni->size_lock, flags); + rl = ntfs_find_vcn(mftbmp_ni, (ll - 1) >> vol->cluster_size_bits, TRUE); if (unlikely(IS_ERR(rl) || !rl->length || rl->lcn < 0)) { ntfs_error(vol->sb, "Failed to determine last allocated " "cluster of mft bitmap attribute."); @@ -1458,9 +1468,11 @@ } a = ctx->attr; } + write_lock_irqsave(&mftbmp_ni->size_lock, flags); mftbmp_ni->allocated_size += vol->cluster_size; a->data.non_resident.allocated_size = cpu_to_sle64(mftbmp_ni->allocated_size); + write_unlock_irqrestore(&mftbmp_ni->size_lock, flags); /* Ensure the changes make it to disk. */ flush_dcache_mft_record_page(ctx->ntfs_ino); mark_mft_record_dirty(ctx->ntfs_ino); @@ -1476,7 +1488,9 @@ 0, ctx)) { ntfs_error(vol->sb, "Failed to find last attribute extent of " "mft bitmap attribute.%s", es); + write_lock_irqsave(&mftbmp_ni->size_lock, flags); mftbmp_ni->allocated_size += vol->cluster_size; + write_unlock_irqrestore(&mftbmp_ni->size_lock, flags); ntfs_attr_put_search_ctx(ctx); unmap_mft_record(mft_ni); up_write(&mftbmp_ni->runlist.lock); @@ -1550,6 +1564,7 @@ static int ntfs_mft_bitmap_extend_initialized_nolock(ntfs_volume *vol) { s64 old_data_size, old_initialized_size; + unsigned long flags; struct inode *mftbmp_vi; ntfs_inode *mft_ni, *mftbmp_ni; ntfs_attr_search_ctx *ctx; @@ -1583,7 +1598,8 @@ goto put_err_out; } a = ctx->attr; - old_data_size = mftbmp_vi->i_size; + write_lock_irqsave(&mftbmp_ni->size_lock, flags); + old_data_size = i_size_read(mftbmp_vi); old_initialized_size = mftbmp_ni->initialized_size; /* * We can simply update the initialized_size before filling the space @@ -1593,11 +1609,12 @@ mftbmp_ni->initialized_size += 8; a->data.non_resident.initialized_size = cpu_to_sle64(mftbmp_ni->initialized_size); - if (mftbmp_ni->initialized_size > mftbmp_vi->i_size) { - mftbmp_vi->i_size = mftbmp_ni->initialized_size; + if (mftbmp_ni->initialized_size > old_data_size) { + i_size_write(mftbmp_vi, mftbmp_ni->initialized_size); a->data.non_resident.data_size = - cpu_to_sle64(mftbmp_vi->i_size); + cpu_to_sle64(mftbmp_ni->initialized_size); } + write_unlock_irqrestore(&mftbmp_ni->size_lock, flags); /* Ensure the changes make it to disk. */ flush_dcache_mft_record_page(ctx->ntfs_ino); mark_mft_record_dirty(ctx->ntfs_ino); @@ -1636,22 +1653,28 @@ goto err_out; } a = ctx->attr; + write_lock_irqsave(&mftbmp_ni->size_lock, flags); mftbmp_ni->initialized_size = old_initialized_size; a->data.non_resident.initialized_size = cpu_to_sle64(old_initialized_size); - if (mftbmp_vi->i_size != old_data_size) { - mftbmp_vi->i_size = old_data_size; + if (i_size_read(mftbmp_vi) != old_data_size) { + i_size_write(mftbmp_vi, old_data_size); a->data.non_resident.data_size = cpu_to_sle64(old_data_size); } + write_unlock_irqrestore(&mftbmp_ni->size_lock, flags); flush_dcache_mft_record_page(ctx->ntfs_ino); mark_mft_record_dirty(ctx->ntfs_ino); ntfs_attr_put_search_ctx(ctx); unmap_mft_record(mft_ni); +#ifdef DEBUG + read_lock_irqsave(&mftbmp_ni->size_lock, flags); ntfs_debug("Restored status of mftbmp: allocated_size 0x%llx, " "data_size 0x%llx, initialized_size 0x%llx.", (long long)mftbmp_ni->allocated_size, - (long long)mftbmp_vi->i_size, + (long long)i_size_read(mftbmp_vi), (long long)mftbmp_ni->initialized_size); + read_unlock_irqrestore(&mftbmp_ni->size_lock, flags); +#endif /* DEBUG */ err_out: return ret; } @@ -1679,7 +1702,8 @@ { LCN lcn; VCN old_last_vcn; - s64 min_nr, nr, ll = 0; + s64 min_nr, nr, ll; + unsigned long flags; ntfs_inode *mft_ni; runlist_element *rl, *rl2; ntfs_attr_search_ctx *ctx = NULL; @@ -1697,8 +1721,10 @@ * attribute cannot be zero so we are ok to do this. * ntfs_find_vcn() returns the runlist locked on success. */ - rl = ntfs_find_vcn(mft_ni, (mft_ni->allocated_size - 1) >> - vol->cluster_size_bits, TRUE); + read_lock_irqsave(&mft_ni->size_lock, flags); + ll = mft_ni->allocated_size; + read_unlock_irqrestore(&mft_ni->size_lock, flags); + rl = ntfs_find_vcn(mft_ni, (ll - 1) >> vol->cluster_size_bits, TRUE); if (unlikely(IS_ERR(rl) || !rl->length || rl->lcn < 0)) { ntfs_error(vol->sb, "Failed to determine last allocated " "cluster of mft data attribute."); @@ -1710,8 +1736,7 @@ return ret; } lcn = rl->lcn + rl->length; - ntfs_debug("Last lcn of mft data attribute is 0x%llx.", - (long long)lcn); + ntfs_debug("Last lcn of mft data attribute is 0x%llx.", (long long)lcn); /* Minimum allocation is one mft record worth of clusters. */ min_nr = vol->mft_record_size >> vol->cluster_size_bits; if (!min_nr) @@ -1721,12 +1746,13 @@ if (!nr) nr = min_nr; /* Ensure we do not go above 2^32-1 mft records. */ - if (unlikely((mft_ni->allocated_size + - (nr << vol->cluster_size_bits)) >> + read_lock_irqsave(&mft_ni->size_lock, flags); + ll = mft_ni->allocated_size; + read_unlock_irqrestore(&mft_ni->size_lock, flags); + if (unlikely((ll + (nr << vol->cluster_size_bits)) >> vol->mft_record_size_bits >= (1ll << 32))) { nr = min_nr; - if (unlikely((mft_ni->allocated_size + - (nr << vol->cluster_size_bits)) >> + if (unlikely((ll + (nr << vol->cluster_size_bits)) >> vol->mft_record_size_bits >= (1ll << 32))) { ntfs_warning(vol->sb, "Cannot allocate mft record " "because the maximum number of inodes " @@ -1875,9 +1901,11 @@ } a = ctx->attr; } + write_lock_irqsave(&mft_ni->size_lock, flags); mft_ni->allocated_size += nr << vol->cluster_size_bits; a->data.non_resident.allocated_size = cpu_to_sle64(mft_ni->allocated_size); + write_unlock_irqrestore(&mft_ni->size_lock, flags); /* Ensure the changes make it to disk. */ flush_dcache_mft_record_page(ctx->ntfs_ino); mark_mft_record_dirty(ctx->ntfs_ino); @@ -1892,7 +1920,9 @@ CASE_SENSITIVE, rl[1].vcn, NULL, 0, ctx)) { ntfs_error(vol->sb, "Failed to find last attribute extent of " "mft data attribute.%s", es); + write_lock_irqsave(&mft_ni->size_lock, flags); mft_ni->allocated_size += nr << vol->cluster_size_bits; + write_unlock_irqrestore(&mft_ni->size_lock, flags); ntfs_attr_put_search_ctx(ctx); unmap_mft_record(mft_ni); up_write(&mft_ni->runlist.lock); @@ -2036,6 +2066,7 @@ */ static int ntfs_mft_record_format(const ntfs_volume *vol, const s64 mft_no) { + loff_t i_size; struct inode *mft_vi = vol->mft_ino; struct page *page; MFT_RECORD *m; @@ -2051,10 +2082,11 @@ index = mft_no << vol->mft_record_size_bits >> PAGE_CACHE_SHIFT; ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK; /* The maximum valid index into the page cache for $MFT's data. */ - end_index = mft_vi->i_size >> PAGE_CACHE_SHIFT; + i_size = i_size_read(mft_vi); + end_index = i_size >> PAGE_CACHE_SHIFT; if (unlikely(index >= end_index)) { if (unlikely(index > end_index || ofs + vol->mft_record_size >= - (mft_vi->i_size & ~PAGE_CACHE_MASK))) { + (i_size & ~PAGE_CACHE_MASK))) { ntfs_error(vol->sb, "Tried to format non-existing mft " "record 0x%llx.", (long long)mft_no); return -ENOENT; @@ -2188,6 +2220,7 @@ ntfs_inode *base_ni, MFT_RECORD **mrec) { s64 ll, bit, old_data_initialized, old_data_size; + unsigned long flags; struct inode *vi; struct page *page; ntfs_inode *mft_ni, *mftbmp_ni, *ni; @@ -2237,9 +2270,13 @@ * the first 24 mft records as they are special and whilst they may not * be in use, we do not allocate from them. */ + read_lock_irqsave(&mft_ni->size_lock, flags); ll = mft_ni->initialized_size >> vol->mft_record_size_bits; - if (mftbmp_ni->initialized_size << 3 > ll && - mftbmp_ni->initialized_size > 3) { + read_unlock_irqrestore(&mft_ni->size_lock, flags); + read_lock_irqsave(&mftbmp_ni->size_lock, flags); + old_data_initialized = mftbmp_ni->initialized_size; + read_unlock_irqrestore(&mftbmp_ni->size_lock, flags); + if (old_data_initialized << 3 > ll && old_data_initialized > 3) { bit = ll; if (bit < 24) bit = 24; @@ -2254,15 +2291,18 @@ * mft record that we can allocate. * Note: The smallest mft record we allocate is mft record 24. */ - bit = mftbmp_ni->initialized_size << 3; + bit = old_data_initialized << 3; if (unlikely(bit >= (1ll << 32))) goto max_err_out; + read_lock_irqsave(&mftbmp_ni->size_lock, flags); + old_data_size = mftbmp_ni->allocated_size; ntfs_debug("Status of mftbmp before extension: allocated_size 0x%llx, " "data_size 0x%llx, initialized_size 0x%llx.", - (long long)mftbmp_ni->allocated_size, - (long long)vol->mftbmp_ino->i_size, - (long long)mftbmp_ni->initialized_size); - if (mftbmp_ni->initialized_size + 8 > mftbmp_ni->allocated_size) { + (long long)old_data_size, + (long long)i_size_read(vol->mftbmp_ino), + (long long)old_data_initialized); + read_unlock_irqrestore(&mftbmp_ni->size_lock, flags); + if (old_data_initialized + 8 > old_data_size) { /* Need to extend bitmap by one more cluster. */ ntfs_debug("mftbmp: initialized_size + 8 > allocated_size."); err = ntfs_mft_bitmap_extend_allocation_nolock(vol); @@ -2270,12 +2310,16 @@ up_write(&vol->mftbmp_lock); goto err_out; } +#ifdef DEBUG + read_lock_irqsave(&mftbmp_ni->size_lock, flags); ntfs_debug("Status of mftbmp after allocation extension: " "allocated_size 0x%llx, data_size 0x%llx, " "initialized_size 0x%llx.", (long long)mftbmp_ni->allocated_size, - (long long)vol->mftbmp_ino->i_size, + (long long)i_size_read(vol->mftbmp_ino), (long long)mftbmp_ni->initialized_size); + read_unlock_irqrestore(&mftbmp_ni->size_lock, flags); +#endif /* DEBUG */ } /* * We now have sufficient allocated space, extend the initialized_size @@ -2287,12 +2331,16 @@ up_write(&vol->mftbmp_lock); goto err_out; } +#ifdef DEBUG + read_lock_irqsave(&mftbmp_ni->size_lock, flags); ntfs_debug("Status of mftbmp after initialized extention: " "allocated_size 0x%llx, data_size 0x%llx, " "initialized_size 0x%llx.", (long long)mftbmp_ni->allocated_size, - (long long)vol->mftbmp_ino->i_size, + (long long)i_size_read(vol->mftbmp_ino), (long long)mftbmp_ni->initialized_size); + read_unlock_irqrestore(&mftbmp_ni->size_lock, flags); +#endif /* DEBUG */ ntfs_debug("Found free record (#3), bit 0x%llx.", (long long)bit); found_free_rec: /* @bit is the found free mft record, allocate it in the mft bitmap. */ @@ -2314,7 +2362,10 @@ * parallel allocation could allocate the same mft record as this one. */ ll = (bit + 1) << vol->mft_record_size_bits; - if (ll <= mft_ni->initialized_size) { + read_lock_irqsave(&mft_ni->size_lock, flags); + old_data_initialized = mft_ni->initialized_size; + read_unlock_irqrestore(&mft_ni->size_lock, flags); + if (ll <= old_data_initialized) { ntfs_debug("Allocated mft record already initialized."); goto mft_rec_already_initialized; } @@ -2325,26 +2376,30 @@ * actually traversed more than once when a freshly formatted volume is * first written to so it optimizes away nicely in the common case. */ + read_lock_irqsave(&mft_ni->size_lock, flags); ntfs_debug("Status of mft data before extension: " "allocated_size 0x%llx, data_size 0x%llx, " "initialized_size 0x%llx.", (long long)mft_ni->allocated_size, - (long long)vol->mft_ino->i_size, + (long long)i_size_read(vol->mft_ino), (long long)mft_ni->initialized_size); while (ll > mft_ni->allocated_size) { + read_unlock_irqrestore(&mft_ni->size_lock, flags); err = ntfs_mft_data_extend_allocation_nolock(vol); if (unlikely(err)) { ntfs_error(vol->sb, "Failed to extend mft data " "allocation."); goto undo_mftbmp_alloc_nolock; } + read_lock_irqsave(&mft_ni->size_lock, flags); ntfs_debug("Status of mft data after allocation extension: " "allocated_size 0x%llx, data_size 0x%llx, " "initialized_size 0x%llx.", (long long)mft_ni->allocated_size, - (long long)vol->mft_ino->i_size, + (long long)i_size_read(vol->mft_ino), (long long)mft_ni->initialized_size); } + read_unlock_irqrestore(&mft_ni->size_lock, flags); /* * Extend mft data initialized size (and data size of course) to reach * the allocated mft record, formatting the mft records allong the way. @@ -2352,6 +2407,7 @@ * needed by ntfs_mft_record_format(). We will update the attribute * record itself in one fell swoop later on. */ + write_lock_irqsave(&mft_ni->size_lock, flags); old_data_initialized = mft_ni->initialized_size; old_data_size = vol->mft_ino->i_size; while (ll > mft_ni->initialized_size) { @@ -2360,8 +2416,9 @@ new_initialized_size = mft_ni->initialized_size + vol->mft_record_size; mft_no = mft_ni->initialized_size >> vol->mft_record_size_bits; - if (new_initialized_size > vol->mft_ino->i_size) - vol->mft_ino->i_size = new_initialized_size; + if (new_initialized_size > i_size_read(vol->mft_ino)) + i_size_write(vol->mft_ino, new_initialized_size); + write_unlock_irqrestore(&mft_ni->size_lock, flags); ntfs_debug("Initializing mft record 0x%llx.", (long long)mft_no); err = ntfs_mft_record_format(vol, mft_no); @@ -2369,8 +2426,10 @@ ntfs_error(vol->sb, "Failed to format mft record."); goto undo_data_init; } + write_lock_irqsave(&mft_ni->size_lock, flags); mft_ni->initialized_size = new_initialized_size; } + write_unlock_irqrestore(&mft_ni->size_lock, flags); record_formatted = TRUE; /* Update the mft data attribute record to reflect the new sizes. */ m = map_mft_record(mft_ni); @@ -2396,22 +2455,27 @@ goto undo_data_init; } a = ctx->attr; + read_lock_irqsave(&mft_ni->size_lock, flags); a->data.non_resident.initialized_size = cpu_to_sle64(mft_ni->initialized_size); - a->data.non_resident.data_size = cpu_to_sle64(vol->mft_ino->i_size); + a->data.non_resident.data_size = + cpu_to_sle64(i_size_read(vol->mft_ino)); + read_unlock_irqrestore(&mft_ni->size_lock, flags); /* Ensure the changes make it to disk. */ flush_dcache_mft_record_page(ctx->ntfs_ino); mark_mft_record_dirty(ctx->ntfs_ino); ntfs_attr_put_search_ctx(ctx); unmap_mft_record(mft_ni); + read_lock_irqsave(&mft_ni->size_lock, flags); ntfs_debug("Status of mft data after mft record initialization: " "allocated_size 0x%llx, data_size 0x%llx, " "initialized_size 0x%llx.", (long long)mft_ni->allocated_size, - (long long)vol->mft_ino->i_size, + (long long)i_size_read(vol->mft_ino), (long long)mft_ni->initialized_size); - BUG_ON(vol->mft_ino->i_size > mft_ni->allocated_size); - BUG_ON(mft_ni->initialized_size > vol->mft_ino->i_size); + BUG_ON(i_size_read(vol->mft_ino) > mft_ni->allocated_size); + BUG_ON(mft_ni->initialized_size > i_size_read(vol->mft_ino)); + read_unlock_irqrestore(&mft_ni->size_lock, flags); mft_rec_already_initialized: /* * We can finally drop the mft bitmap lock as the mft data attribute @@ -2652,8 +2716,10 @@ *mrec = m; return ni; undo_data_init: + write_lock_irqsave(&mft_ni->size_lock, flags); mft_ni->initialized_size = old_data_initialized; - vol->mft_ino->i_size = old_data_size; + i_size_write(vol->mft_ino, old_data_size); + write_unlock_irqrestore(&mft_ni->size_lock, flags); goto undo_mftbmp_alloc_nolock; undo_mftbmp_alloc: down_write(&vol->mftbmp_lock); diff -Nru a/fs/ntfs/super.c b/fs/ntfs/super.c --- a/fs/ntfs/super.c 2005-01-28 14:26:11 -08:00 +++ b/fs/ntfs/super.c 2005-01-28 14:26:11 -08:00 @@ -990,12 +990,12 @@ */ static BOOL check_mft_mirror(ntfs_volume *vol) { - unsigned long index; struct super_block *sb = vol->sb; ntfs_inode *mirr_ni; struct page *mft_page, *mirr_page; u8 *kmft, *kmirr; runlist_element *rl, rl2[2]; + pgoff_t index; int mrecs_per_page, i; ntfs_debug("Entering."); @@ -1205,10 +1205,11 @@ */ static BOOL load_and_init_attrdef(ntfs_volume *vol) { + loff_t i_size; struct super_block *sb = vol->sb; struct inode *ino; struct page *page; - unsigned long index, max_index; + pgoff_t index, max_index; unsigned int size; ntfs_debug("Entering."); @@ -1220,13 +1221,14 @@ goto failed; } /* The size of FILE_AttrDef must be above 0 and fit inside 31 bits. */ - if (!ino->i_size || ino->i_size > 0x7fffffff) + i_size = i_size_read(ino); + if (i_size <= 0 || i_size > 0x7fffffff) goto iput_failed; - vol->attrdef = (ATTR_DEF*)ntfs_malloc_nofs(ino->i_size); + vol->attrdef = (ATTR_DEF*)ntfs_malloc_nofs(i_size); if (!vol->attrdef) goto iput_failed; index = 0; - max_index = ino->i_size >> PAGE_CACHE_SHIFT; + max_index = i_size >> PAGE_CACHE_SHIFT; size = PAGE_CACHE_SIZE; while (index < max_index) { /* Read the attrdef table and copy it into the linear buffer. */ @@ -1239,12 +1241,12 @@ ntfs_unmap_page(page); }; if (size == PAGE_CACHE_SIZE) { - size = ino->i_size & ~PAGE_CACHE_MASK; + size = i_size & ~PAGE_CACHE_MASK; if (size) goto read_partial_attrdef_page; } - vol->attrdef_size = ino->i_size; - ntfs_debug("Read %llu bytes from $AttrDef.", ino->i_size); + vol->attrdef_size = i_size; + ntfs_debug("Read %llu bytes from $AttrDef.", i_size); iput(ino); return TRUE; free_iput_failed: @@ -1267,10 +1269,11 @@ */ static BOOL load_and_init_upcase(ntfs_volume *vol) { + loff_t i_size; struct super_block *sb = vol->sb; struct inode *ino; struct page *page; - unsigned long index, max_index; + pgoff_t index, max_index; unsigned int size; int i, max; @@ -1286,14 +1289,15 @@ * The upcase size must not be above 64k Unicode characters, must not * be zero and must be a multiple of sizeof(ntfschar). */ - if (!ino->i_size || ino->i_size & (sizeof(ntfschar) - 1) || - ino->i_size > 64ULL * 1024 * sizeof(ntfschar)) + i_size = i_size_read(ino); + if (!i_size || i_size & (sizeof(ntfschar) - 1) || + i_size > 64ULL * 1024 * sizeof(ntfschar)) goto iput_upcase_failed; - vol->upcase = (ntfschar*)ntfs_malloc_nofs(ino->i_size); + vol->upcase = (ntfschar*)ntfs_malloc_nofs(i_size); if (!vol->upcase) goto iput_upcase_failed; index = 0; - max_index = ino->i_size >> PAGE_CACHE_SHIFT; + max_index = i_size >> PAGE_CACHE_SHIFT; size = PAGE_CACHE_SIZE; while (index < max_index) { /* Read the upcase table and copy it into the linear buffer. */ @@ -1306,13 +1310,13 @@ ntfs_unmap_page(page); }; if (size == PAGE_CACHE_SIZE) { - size = ino->i_size & ~PAGE_CACHE_MASK; + size = i_size & ~PAGE_CACHE_MASK; if (size) goto read_partial_upcase_page; } - vol->upcase_len = ino->i_size >> UCHAR_T_SIZE_BITS; + vol->upcase_len = i_size >> UCHAR_T_SIZE_BITS; ntfs_debug("Read %llu bytes from $UpCase (expected %zu bytes).", - ino->i_size, 64 * 1024 * sizeof(ntfschar)); + i_size, 64 * 1024 * sizeof(ntfschar)); iput(ino); down(&ntfs_lock); if (!default_upcase) { @@ -1435,7 +1439,7 @@ iput(vol->lcnbmp_ino); goto bitmap_failed; } - if ((vol->nr_clusters + 7) >> 3 > vol->lcnbmp_ino->i_size) { + if ((vol->nr_clusters + 7) >> 3 > i_size_read(vol->lcnbmp_ino)) { iput(vol->lcnbmp_ino); bitmap_failed: ntfs_error(sb, "Failed to load $Bitmap."); @@ -1959,8 +1963,7 @@ struct address_space *mapping = vol->lcnbmp_ino->i_mapping; filler_t *readpage = (filler_t*)mapping->a_ops->readpage; struct page *page; - unsigned long index, max_index; - unsigned int max_size; + pgoff_t index, max_index; ntfs_debug("Entering."); /* Serialize accesses to the cluster bitmap. */ @@ -1972,11 +1975,10 @@ */ max_index = (((vol->nr_clusters + 7) >> 3) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; - /* Use multiples of 4 bytes. */ - max_size = PAGE_CACHE_SIZE >> 2; - ntfs_debug("Reading $Bitmap, max_index = 0x%lx, max_size = 0x%x.", - max_index, max_size); - for (index = 0UL; index < max_index; index++) { + /* Use multiples of 4 bytes, thus max_size is PAGE_CACHE_SIZE / 4. */ + ntfs_debug("Reading $Bitmap, max_index = 0x%lx, max_size = 0x%lx.", + max_index, PAGE_CACHE_SIZE / 4); + for (index = 0; index < max_index; index++) { unsigned int i; /* * Read the page from page cache, getting it from backing store @@ -2008,7 +2010,7 @@ * the result as all out of range bytes are set to zero by * ntfs_readpage(). */ - for (i = 0; i < max_size; i++) + for (i = 0; i < PAGE_CACHE_SIZE / 4; i++) nr_free -= (s64)hweight32(kaddr[i]); kunmap_atomic(kaddr, KM_USER0); page_cache_release(page); @@ -2031,6 +2033,8 @@ /** * __get_nr_free_mft_records - return the number of free inodes on a volume * @vol: ntfs volume for which to obtain free inode count + * @nr_free: number of mft records in file system + * @max_index: maximum number of pages containing set bits * * Calculate the number of free mft records (inodes) on the mounted NTFS * volume @vol. We actually calculate the number of mft records in use instead @@ -2043,32 +2047,20 @@ * * NOTE: Caller must hold mftbmp_lock rw_semaphore for reading or writing. */ -static unsigned long __get_nr_free_mft_records(ntfs_volume *vol) +static unsigned long __get_nr_free_mft_records(ntfs_volume *vol, + s64 nr_free, const pgoff_t max_index) { - s64 nr_free; u32 *kaddr; struct address_space *mapping = vol->mftbmp_ino->i_mapping; filler_t *readpage = (filler_t*)mapping->a_ops->readpage; struct page *page; - unsigned long index, max_index; - unsigned int max_size; + pgoff_t index; ntfs_debug("Entering."); - /* Number of mft records in file system (at this point in time). */ - nr_free = vol->mft_ino->i_size >> vol->mft_record_size_bits; - /* - * Convert the maximum number of set bits into bytes rounded up, then - * convert into multiples of PAGE_CACHE_SIZE, rounding up so that if we - * have one full and one partial page max_index = 2. - */ - max_index = ((((NTFS_I(vol->mft_ino)->initialized_size >> - vol->mft_record_size_bits) + 7) >> 3) + - PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; - /* Use multiples of 4 bytes. */ - max_size = PAGE_CACHE_SIZE >> 2; + /* Use multiples of 4 bytes, thus max_size is PAGE_CACHE_SIZE / 4. */ ntfs_debug("Reading $MFT/$BITMAP, max_index = 0x%lx, max_size = " - "0x%x.", max_index, max_size); - for (index = 0UL; index < max_index; index++) { + "0x%lx.", max_index, PAGE_CACHE_SIZE / 4); + for (index = 0; index < max_index; index++) { unsigned int i; /* * Read the page from page cache, getting it from backing store @@ -2100,7 +2092,7 @@ * the result as all out of range bytes are set to zero by * ntfs_readpage(). */ - for (i = 0; i < max_size; i++) + for (i = 0; i < PAGE_CACHE_SIZE / 4; i++) nr_free -= (s64)hweight32(kaddr[i]); kunmap_atomic(kaddr, KM_USER0); page_cache_release(page); @@ -2134,8 +2126,11 @@ */ static int ntfs_statfs(struct super_block *sb, struct kstatfs *sfs) { - ntfs_volume *vol = NTFS_SB(sb); s64 size; + ntfs_volume *vol = NTFS_SB(sb); + ntfs_inode *mft_ni = NTFS_I(vol->mft_ino); + pgoff_t max_index; + unsigned long flags; ntfs_debug("Entering."); /* Type of filesystem. */ @@ -2158,10 +2153,20 @@ sfs->f_bavail = sfs->f_bfree = size; /* Serialize accesses to the inode bitmap. */ down_read(&vol->mftbmp_lock); + read_lock_irqsave(&mft_ni->size_lock, flags); + size = i_size_read(vol->mft_ino) >> vol->mft_record_size_bits; + /* + * Convert the maximum number of set bits into bytes rounded up, then + * convert into multiples of PAGE_CACHE_SIZE, rounding up so that if we + * have one full and one partial page max_index = 2. + */ + max_index = ((((mft_ni->initialized_size >> vol->mft_record_size_bits) + + 7) >> 3) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + read_unlock_irqrestore(&mft_ni->size_lock, flags); /* Number of inodes in file system (at this point in time). */ - sfs->f_files = vol->mft_ino->i_size >> vol->mft_record_size_bits; + sfs->f_files = size; /* Free inodes in fs (based on current total count). */ - sfs->f_ffree = __get_nr_free_mft_records(vol); + sfs->f_ffree = __get_nr_free_mft_records(vol, size, max_index); up_read(&vol->mftbmp_lock); /* * File system id. This is extremely *nix flavour dependent and even @@ -2347,7 +2352,8 @@ } /* Get the size of the device in units of NTFS_BLOCK_SIZE bytes. */ - vol->nr_blocks = sb->s_bdev->bd_inode->i_size >> NTFS_BLOCK_SIZE_BITS; + vol->nr_blocks = i_size_read(sb->s_bdev->bd_inode) >> + NTFS_BLOCK_SIZE_BITS; /* Read the boot sector and return unlocked buffer head to it. */ if (!(bh = read_ntfs_boot_sector(sb, silent))) {