bk://linux-ntfs.bkbits.net/ntfs-2.6-devel ntfs@flatcap.org|ChangeSet|20050102231810|50060 ntfs # This is a BitKeeper generated diff -Nru style patch. # # ChangeSet # 2004/11/18 15:02:37+00:00 aia21@cantab.net # NTFS: In fs/ntfs/dir.c, use i_size_read() once and then the cached value # afterwards. # # Signed-off-by: Anton Altaparmakov # # fs/ntfs/ChangeLog # 2004/11/18 15:01:06+00:00 aia21@cantab.net +2 -0 # Update # # fs/ntfs/dir.c # 2004/11/18 15:00:44+00:00 aia21@cantab.net +7 -6 # Use i_size_read() once and then the cached value afterwards. # # ChangeSet # 2004/11/18 13:50:22+00:00 aia21@cantab.net # NTFS: - In fs/ntfs/compress.c, use i_size_read() at the start and then use the # cached value everywhere. Cache the initialized_size in the same way # and protect the critical region where the two sizes are read using the # new size_lock of the ntfs inode. # - Add the new size_lock to the ntfs_inode structure (fs/ntfs/inode.h) # and initialize it (fs/ntfs/inode.c). # # Signed-off-by: Anton Altaparmakov # # fs/ntfs/ChangeLog # 2004/11/18 13:46:45+00:00 aia21@cantab.net +7 -0 # Update # # fs/ntfs/compress.c # 2004/11/18 13:38:39+00:00 aia21@cantab.net +28 -18 # Use i_size_read() at the start and then use the cached # value everywhere. Cache the initialized_size in the # same way and protect the critical region where the two # sizes are read using the new size_lock of the ntfs inode. # # fs/ntfs/inode.h # 2004/11/18 13:38:14+00:00 aia21@cantab.net +1 -0 # Add the new size_lock to the ntfs_inode structure. # # fs/ntfs/inode.c # 2004/11/18 13:37:51+00:00 aia21@cantab.net +1 -0 # Initialized the new size_lock. # # ChangeSet # 2004/11/17 15:46:56+00:00 aia21@cantab.net # NTFS: Use i_size_read() in fs/ntfs/file.c::ntfs_file_open(). # # Signed-off-by: Anton Altaparmakov # # fs/ntfs/ChangeLog # 2004/11/17 15:45:08+00:00 aia21@cantab.net +1 -0 # Update # # fs/ntfs/file.c # 2004/11/17 15:44:09+00:00 aia21@cantab.net +1 -1 # Use i_size_read() in ntfs_file_open(). # # ChangeSet # 2004/11/11 12:42:47+00:00 aia21@cantab.net # NTFS: Use i_size_read() once and then use the cached value in # fs/ntfs/lcnalloc.c::ntfs_cluster_alloc(). # # Signed-off-by: Anton Altaparmakov # # fs/ntfs/lcnalloc.c # 2004/11/11 12:42:38+00:00 aia21@cantab.net +5 -3 # Use i_size_read() once and then use the cached value in ntfs_cluster_alloc(). # # fs/ntfs/ChangeLog # 2004/11/11 12:42:38+00:00 aia21@cantab.net +2 -0 # Update # # ChangeSet # 2004/11/11 12:34:10+00:00 aia21@cantab.net # NTFS: Use i_size_read() in fs/ntfs/logfile.c::ntfs_{check,empty}_logfile(). # # Signed-off-by: Anton Altaparmakov # # fs/ntfs/logfile.c # 2004/11/11 12:34:01+00:00 aia21@cantab.net +3 -2 # Use i_size_read() when accessing the $LogFile inode->i_size in # ntfs_check_logfile() and ntfs_empty_logfile(). # # fs/ntfs/ChangeLog # 2004/11/11 12:34:00+00:00 aia21@cantab.net +12 -13 # Update # # ChangeSet # 2004/11/11 11:18:20+00:00 aia21@cantab.net # NTFS: Use i_size_read() in fs/ntfs/attrib.c::ntfs_attr_set(). # # Signed-off-by: Anton Altaparmakov # # fs/ntfs/attrib.c # 2004/11/11 11:18:10+00:00 aia21@cantab.net +5 -1 # Use i_size_read() in ntfs_attr_set(). # # fs/ntfs/Makefile # 2004/11/11 11:18:10+00:00 aia21@cantab.net +1 -1 # Start 2.1.23-WIP. # # fs/ntfs/ChangeLog # 2004/11/11 11:18:10+00:00 aia21@cantab.net +4 -0 # Update # diff -Nru a/fs/ntfs/ChangeLog b/fs/ntfs/ChangeLog --- a/fs/ntfs/ChangeLog 2005-01-02 23:06:04 -08:00 +++ b/fs/ntfs/ChangeLog 2005-01-02 23:06:04 -08:00 @@ -2,20 +2,18 @@ - Find and fix bugs. - Checkpoint or disable the user space journal ($UsnJrnl). - In between ntfs_prepare/commit_write, need exclusion between - simultaneous file extensions. Need perhaps an NInoResizeUnderway() - flag which we can set in ntfs_prepare_write() and clear again in - ntfs_commit_write(). Just have to be careful in readpage/writepage, - as well as in truncate, that we play nice... We might need to have - a data_size field in the ntfs_inode to store the real attribute - length. Also need to be careful with initialized_size extention in + simultaneous file extensions. This is given to us by holding i_sem on + the inode. The only places in the kernel when a file is resized are + prepare/commit write and truncate for both of which i_sem is held. + Just have to be careful in readpage/writepage and all other helpers + not running under i_sem that we play nice... + Also need to be careful with initialized_size extention in ntfs_prepare_write. Basically, just be _very_ careful in this code... - OTOH, perhaps i_sem, which is held accross generic_file_write is - sufficient for synchronisation here. We then just need to make sure - ntfs_readpage/writepage/truncate interoperate properly with us. - UPDATE: The above is all ok as it is due to i_sem held. The only - thing that needs to be checked is ntfs_writepage() which does not - hold i_sem. It cannot change i_size but it needs to cope with a - concurrent i_size change. + UPDATE: The onlythingis that need to be checked are read/writepage + which do not hold i_sem. Note writepage cannot change i_size but it + needs to cope with a concurrent i_size change, just like readpage. + Also both need to cope with concurrent the other sizes, i.e. + initialized/allocated/compressed size changing as well. - Implement mft.c::sync_mft_mirror_umount(). We currently will just leave the volume dirty on umount if the final iput(vol->mft_ino) causes a write of any mirrored mft records due to the mft mirror @@ -24,6 +22,23 @@ the problem. - Enable the code for setting the NT4 compatibility flag when we start making NTFS 1.2 specific modifications. + +2.1.23-WIP + + - Use i_size_read() in fs/ntfs/attrib.c::ntfs_attr_set(). + - Use i_size_read() in fs/ntfs/logfile.c::ntfs_{check,empty}_logfile(). + - Use i_size_read() once and then use the cached value in + fs/ntfs/lcnalloc.c::ntfs_cluster_alloc(). + - Use i_size_read() in fs/ntfs/file.c::ntfs_file_open(). + - Add size_lock to the ntfs_inode structure. This is an rw spinlock + and it locks against access to the inode sizes. Note, ->size_lock + is also accessed from irq context so you must use the _irqsave and + _irqrestore lock and unlock functions, respectively. + - Use i_size_read() in fs/ntfs/compress.c at the start of the read and + use the cached value afterwards. Cache the initialized_size in the + same way and protect access to the two sizes using the size_lock. + - Use i_size_read() in fs/ntfs/dir.c once and then use the cached + value afterwards. 2.1.22 - Many bug and race fixes and error handling improvements. diff -Nru a/fs/ntfs/Makefile b/fs/ntfs/Makefile --- a/fs/ntfs/Makefile 2005-01-02 23:06:04 -08:00 +++ b/fs/ntfs/Makefile 2005-01-02 23:06:04 -08:00 @@ -6,7 +6,7 @@ index.o inode.o mft.o mst.o namei.o runlist.o super.o sysctl.o \ unistr.o upcase.o -EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.22\" +EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.23-WIP\" ifeq ($(CONFIG_NTFS_DEBUG),y) EXTRA_CFLAGS += -DDEBUG diff -Nru a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c --- a/fs/ntfs/attrib.c 2005-01-02 23:06:04 -08:00 +++ b/fs/ntfs/attrib.c 2005-01-02 23:06:04 -08:00 @@ -1127,6 +1127,10 @@ * byte offset @ofs inside the attribute with the constant byte @val. * * This function is effectively like memset() applied to an ntfs attribute. + * Note thie function actually only operates on the page cache pages belonging + * to the ntfs attribute and it marks them dirty after doing the memset(). + * Thus it relies on the vm dirty page write code paths to cause the modified + * pages to be written to the mft record/disk. * * Return 0 on success and -errno on error. An error code of -ESPIPE means * that @ofs + @cnt were outside the end of the attribute and no write was @@ -1155,7 +1159,7 @@ end = ofs + cnt; end_ofs = end & ~PAGE_CACHE_MASK; /* If the end is outside the inode size return -ESPIPE. */ - if (unlikely(end > VFS_I(ni)->i_size)) { + if (unlikely(end > i_size_read(VFS_I(ni)))) { ntfs_error(vol->sb, "Request exceeds end of attribute."); return -ESPIPE; } diff -Nru a/fs/ntfs/compress.c b/fs/ntfs/compress.c --- a/fs/ntfs/compress.c 2005-01-02 23:06:04 -08:00 +++ b/fs/ntfs/compress.c 2005-01-02 23:06:04 -08:00 @@ -96,13 +96,14 @@ /** * zero_partial_compressed_page - zero out of bounds compressed page region */ -static void zero_partial_compressed_page(ntfs_inode *ni, struct page *page) +static void zero_partial_compressed_page(struct page *page, + const s64 initialized_size) { u8 *kp = page_address(page); unsigned int kp_ofs; ntfs_debug("Zeroing page region outside initialized size."); - if (((s64)page->index << PAGE_CACHE_SHIFT) >= ni->initialized_size) { + if (((s64)page->index << PAGE_CACHE_SHIFT) >= initialized_size) { /* * FIXME: Using clear_page() will become wrong when we get * PAGE_CACHE_SIZE != PAGE_SIZE but for now there is no problem. @@ -110,7 +111,7 @@ clear_page(kp); return; } - kp_ofs = ni->initialized_size & ~PAGE_CACHE_MASK; + kp_ofs = initialized_size & ~PAGE_CACHE_MASK; memset(kp + kp_ofs, 0, PAGE_CACHE_SIZE - kp_ofs); return; } @@ -118,12 +119,12 @@ /** * handle_bounds_compressed_page - test for&handle out of bounds compressed page */ -static inline void handle_bounds_compressed_page(ntfs_inode *ni, - struct page *page) +static inline void handle_bounds_compressed_page(struct page *page, + const loff_t i_size, const s64 initialized_size) { - if ((page->index >= (ni->initialized_size >> PAGE_CACHE_SHIFT)) && - (ni->initialized_size < VFS_I(ni)->i_size)) - zero_partial_compressed_page(ni, page); + if ((page->index >= (initialized_size >> PAGE_CACHE_SHIFT)) && + (initialized_size < i_size)) + zero_partial_compressed_page(page, initialized_size); return; } @@ -138,6 +139,8 @@ * @xpage_done: set to 1 if xpage was completed successfully (IN/OUT) * @cb_start: compression block to decompress (IN) * @cb_size: size of compression block @cb_start in bytes (IN) + * @i_size: file size when we started the read (IN) + * @initialized_size: initialized file size when we started the read (IN) * * The caller must have disabled preemption. ntfs_decompress() reenables it when * the critical section is finished. @@ -165,7 +168,8 @@ static int ntfs_decompress(struct page *dest_pages[], int *dest_index, int *dest_ofs, const int dest_max_index, const int dest_max_ofs, const int xpage, char *xpage_done, u8 *const cb_start, - const u32 cb_size) + const u32 cb_size, const loff_t i_size, + const s64 initialized_size) { /* * Pointers into the compressed data, i.e. the compression block (cb), @@ -219,9 +223,6 @@ spin_unlock(&ntfs_cb_lock); /* Second stage: finalize completed pages. */ if (nr_completed_pages > 0) { - struct page *page = dest_pages[completed_pages[0]]; - ntfs_inode *ni = NTFS_I(page->mapping->host); - for (i = 0; i < nr_completed_pages; i++) { int di = completed_pages[i]; @@ -230,7 +231,8 @@ * If we are outside the initialized size, zero * the out of bounds page range. */ - handle_bounds_compressed_page(ni, dp); + handle_bounds_compressed_page(dp, i_size, + initialized_size); flush_dcache_page(dp); kunmap(dp); SetPageUptodate(dp); @@ -478,12 +480,14 @@ */ int ntfs_read_compressed_block(struct page *page) { + loff_t i_size; + s64 initialized_size; struct address_space *mapping = page->mapping; ntfs_inode *ni = NTFS_I(mapping->host); ntfs_volume *vol = ni->vol; struct super_block *sb = vol->sb; runlist_element *rl; - unsigned long block_size = sb->s_blocksize; + unsigned long flags, block_size = sb->s_blocksize; unsigned char block_size_bits = sb->s_blocksize_bits; u8 *cb, *cb_pos, *cb_end; struct buffer_head **bhs; @@ -552,8 +556,12 @@ * The remaining pages need to be allocated and inserted into the page * cache, alignment guarantees keep all the below much simpler. (-8 */ - max_page = ((VFS_I(ni)->i_size + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT) - offset; + read_lock_irqsave(&ni->size_lock, flags); + i_size = i_size_read(VFS_I(ni)); + initialized_size = ni->initialized_size; + read_unlock_irqrestore(&ni->size_lock, flags); + max_page = ((i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - + offset; if (nr_pages < max_page) max_page = nr_pages; for (i = 0; i < max_page; i++, offset++) { @@ -824,7 +832,8 @@ * If we are outside the initialized size, zero * the out of bounds page range. */ - handle_bounds_compressed_page(ni, page); + handle_bounds_compressed_page(page, i_size, + initialized_size); flush_dcache_page(page); kunmap(page); SetPageUptodate(page); @@ -847,7 +856,8 @@ ntfs_debug("Found compressed compression block."); err = ntfs_decompress(pages, &cur_page, &cur_ofs, cb_max_page, cb_max_ofs, xpage, &xpage_done, - cb_pos, cb_size - (cb_pos - cb)); + cb_pos, cb_size - (cb_pos - cb), i_size, + initialized_size); /* * We can sleep from now on, lock already dropped by * ntfs_decompress(). diff -Nru a/fs/ntfs/dir.c b/fs/ntfs/dir.c --- a/fs/ntfs/dir.c 2005-01-02 23:06:04 -08:00 +++ b/fs/ntfs/dir.c 2005-01-02 23:06:04 -08:00 @@ -1101,7 +1101,7 @@ static int ntfs_readdir(struct file *filp, void *dirent, filldir_t filldir) { s64 ia_pos, ia_start, prev_ia_pos, bmp_pos; - loff_t fpos; + loff_t fpos, i_size; struct inode *bmp_vi, *vdir = filp->f_dentry->d_inode; struct super_block *sb = vdir->i_sb; ntfs_inode *ndir = NTFS_I(vdir); @@ -1122,7 +1122,8 @@ vdir->i_ino, fpos); rc = err = 0; /* Are we at end of dir yet? */ - if (fpos >= vdir->i_size + vol->mft_record_size) + i_size = i_size_read(vdir); + if (fpos >= i_size + vol->mft_record_size) goto done; /* Emulate . and .. for all directories. */ if (!fpos) { @@ -1264,7 +1265,7 @@ bmp_mapping = bmp_vi->i_mapping; /* Get the starting bitmap bit position and sanity check it. */ bmp_pos = ia_pos >> ndir->itype.index.block_size_bits; - if (unlikely(bmp_pos >> 3 >= bmp_vi->i_size)) { + if (unlikely(bmp_pos >> 3 >= i_size_read(bmp_vi))) { ntfs_error(sb, "Current index allocation position exceeds " "index bitmap size."); goto err_out; @@ -1301,7 +1302,7 @@ goto get_next_bmp_page; } /* If we have reached the end of the bitmap, we are done. */ - if (unlikely(((bmp_pos + cur_bmp_pos) >> 3) >= vdir->i_size)) + if (unlikely(((bmp_pos + cur_bmp_pos) >> 3) >= i_size)) goto unm_EOD; ia_pos = (bmp_pos + cur_bmp_pos) << ndir->itype.index.block_size_bits; @@ -1441,7 +1442,7 @@ ntfs_unmap_page(bmp_page); EOD: /* We are finished, set fpos to EOD. */ - fpos = vdir->i_size + vol->mft_record_size; + fpos = i_size + vol->mft_record_size; abort: kfree(name); done: @@ -1495,7 +1496,7 @@ static int ntfs_dir_open(struct inode *vi, struct file *filp) { if (sizeof(unsigned long) < 8) { - if (vi->i_size > MAX_LFS_FILESIZE) + if (i_size_read(vi) > MAX_LFS_FILESIZE) return -EFBIG; } return 0; diff -Nru a/fs/ntfs/file.c b/fs/ntfs/file.c --- a/fs/ntfs/file.c 2005-01-02 23:06:04 -08:00 +++ b/fs/ntfs/file.c 2005-01-02 23:06:04 -08:00 @@ -47,7 +47,7 @@ static int ntfs_file_open(struct inode *vi, struct file *filp) { if (sizeof(unsigned long) < 8) { - if (vi->i_size > MAX_LFS_FILESIZE) + if (i_size_read(vi) > MAX_LFS_FILESIZE) return -EFBIG; } return generic_file_open(vi, filp); diff -Nru a/fs/ntfs/inode.c b/fs/ntfs/inode.c --- a/fs/ntfs/inode.c 2005-01-02 23:06:04 -08:00 +++ b/fs/ntfs/inode.c 2005-01-02 23:06:04 -08:00 @@ -376,6 +376,7 @@ void __ntfs_init_inode(struct super_block *sb, ntfs_inode *ni) { ntfs_debug("Entering."); + rwlock_init(&ni->size_lock); ni->initialized_size = ni->allocated_size = 0; ni->seq_no = 0; atomic_set(&ni->count, 1); diff -Nru a/fs/ntfs/inode.h b/fs/ntfs/inode.h --- a/fs/ntfs/inode.h 2005-01-02 23:06:04 -08:00 +++ b/fs/ntfs/inode.h 2005-01-02 23:06:04 -08:00 @@ -44,6 +44,7 @@ * fields already provided in the VFS inode. */ struct _ntfs_inode { + rwlock_t size_lock; /* Lock serializing access to inode sizes. */ s64 initialized_size; /* Copy from the attribute record. */ s64 allocated_size; /* Copy from the attribute record. */ unsigned long state; /* NTFS specific flags describing this inode. diff -Nru a/fs/ntfs/lcnalloc.c b/fs/ntfs/lcnalloc.c --- a/fs/ntfs/lcnalloc.c 2005-01-02 23:06:04 -08:00 +++ b/fs/ntfs/lcnalloc.c 2005-01-02 23:06:04 -08:00 @@ -140,6 +140,7 @@ LCN zone_start, zone_end, bmp_pos, bmp_initial_pos, last_read_pos, lcn; LCN prev_lcn = 0, prev_run_len = 0, mft_zone_size; s64 clusters; + loff_t i_size; struct inode *lcnbmp_vi; runlist_element *rl = NULL; struct address_space *mapping; @@ -249,6 +250,7 @@ clusters = count; rlpos = rlsize = 0; mapping = lcnbmp_vi->i_mapping; + i_size = i_size_read(lcnbmp_vi); while (1) { ntfs_debug("Start of outer while loop: done_zones 0x%x, " "search_zone %i, pass %i, zone_start 0x%llx, " @@ -263,7 +265,7 @@ last_read_pos = bmp_pos >> 3; ntfs_debug("last_read_pos 0x%llx.", (unsigned long long)last_read_pos); - if (last_read_pos > lcnbmp_vi->i_size) { + if (last_read_pos > i_size) { ntfs_debug("End of attribute reached. " "Skipping to zone_pass_done."); goto zone_pass_done; @@ -287,8 +289,8 @@ buf_size = last_read_pos & ~PAGE_CACHE_MASK; buf = page_address(page) + buf_size; buf_size = PAGE_CACHE_SIZE - buf_size; - if (unlikely(last_read_pos + buf_size > lcnbmp_vi->i_size)) - buf_size = lcnbmp_vi->i_size - last_read_pos; + if (unlikely(last_read_pos + buf_size > i_size)) + buf_size = i_size - last_read_pos; buf_size <<= 3; lcn = bmp_pos & 7; bmp_pos &= ~7; diff -Nru a/fs/ntfs/logfile.c b/fs/ntfs/logfile.c --- a/fs/ntfs/logfile.c 2005-01-02 23:06:04 -08:00 +++ b/fs/ntfs/logfile.c 2005-01-02 23:06:04 -08:00 @@ -443,7 +443,7 @@ /* An empty $LogFile must have been clean before it got emptied. */ if (NVolLogFileEmpty(vol)) goto is_empty; - size = log_vi->i_size; + size = i_size_read(log_vi); /* Make sure the file doesn't exceed the maximum allowed size. */ if (size > MaxLogFileSize) size = MaxLogFileSize; @@ -689,7 +689,8 @@ if (!NVolLogFileEmpty(vol)) { int err; - err = ntfs_attr_set(NTFS_I(log_vi), 0, log_vi->i_size, 0xff); + err = ntfs_attr_set(NTFS_I(log_vi), 0, i_size_read(log_vi), + 0xff); if (unlikely(err)) { ntfs_error(vol->sb, "Failed to fill $LogFile with " "0xff bytes (error code %i).", err);