diff -urN o_direct/fs/block_dev.c blkdevpagecache/fs/block_dev.c --- o_direct/fs/block_dev.c Wed May 2 10:11:38 2001 +++ blkdevpagecache/fs/block_dev.c Wed May 9 03:23:59 2001 @@ -14,301 +14,307 @@ #include #include #include +#include +#include #include extern int *blk_size[]; extern int *blksize_size[]; -#define MAX_BUF_PER_PAGE (PAGE_SIZE / 512) -#define NBUF 64 - -ssize_t block_write(struct file * filp, const char * buf, - size_t count, loff_t *ppos) +static int FASTCALL(blkdev_get_block(struct inode *, long, struct buffer_head *)); +static int blkdev_get_block(struct inode * inode, long iblock, struct buffer_head * bh_result) { - struct inode * inode = filp->f_dentry->d_inode; - ssize_t blocksize, blocksize_bits, i, buffercount, write_error; - ssize_t block, blocks; - loff_t offset; - ssize_t chars; - ssize_t written; - struct buffer_head * bhlist[NBUF]; - size_t size; kdev_t dev = inode->i_rdev; - struct buffer_head * bh, *bufferlist[NBUF]; - register char * p; + int err; - if (is_read_only(dev)) - return -EPERM; + err = -EIO; + if (iblock >= (blk_size[MAJOR(dev)][MINOR(dev)] >> (BUFFERED_BLOCKSIZE_BITS - BLOCK_SIZE_BITS))) + goto out; - written = write_error = buffercount = 0; - blocksize = BLOCK_SIZE; - if (blksize_size[MAJOR(dev)] && blksize_size[MAJOR(dev)][MINOR(dev)]) - blocksize = blksize_size[MAJOR(dev)][MINOR(dev)]; - - i = blocksize; - blocksize_bits = 0; - while(i != 1) { - blocksize_bits++; - i >>= 1; - } - - block = *ppos >> blocksize_bits; - offset = *ppos & (blocksize-1); - - if (blk_size[MAJOR(dev)]) - size = ((loff_t) blk_size[MAJOR(dev)][MINOR(dev)] << BLOCK_SIZE_BITS) >> blocksize_bits; - else - size = INT_MAX; - while (count>0) { - if (block >= size) - return written ? written : -ENOSPC; - chars = blocksize - offset; - if (chars > count) - chars=count; - -#if 0 - /* get the buffer head */ - { - struct buffer_head * (*fn)(kdev_t, int, int) = getblk; - if (chars != blocksize) - fn = bread; - bh = fn(dev, block, blocksize); - if (!bh) - return written ? written : -EIO; - if (!buffer_uptodate(bh)) - wait_on_buffer(bh); - } -#else - bh = getblk(dev, block, blocksize); - if (!bh) - return written ? written : -EIO; + bh_result->b_dev = dev; + bh_result->b_blocknr = iblock; + bh_result->b_state |= 1UL << BH_Mapped; + err = 0; - if (!buffer_uptodate(bh)) - { - if (chars == blocksize) - wait_on_buffer(bh); - else - { - bhlist[0] = bh; - if (!filp->f_reada || !read_ahead[MAJOR(dev)]) { - /* We do this to force the read of a single buffer */ - blocks = 1; - } else { - /* Read-ahead before write */ - blocks = read_ahead[MAJOR(dev)] / (blocksize >> 9) / 2; - if (block + blocks > size) blocks = size - block; - if (blocks > NBUF) blocks=NBUF; - if (!blocks) blocks = 1; - for(i=1; i= 0) brelse(bhlist[i--]); - return written ? written : -EIO; - } - } - } - ll_rw_block(READ, blocks, bhlist); - for(i=1; iblocks; + + if (blocksize != BUFFERED_BLOCKSIZE) + BUG(); + + nr_blocks = iobuf->length >> BUFFERED_BLOCKSIZE_BITS; + /* build the blocklist */ + for (i = 0; i < nr_blocks; i++, blocknr++) { + struct buffer_head bh; + + bh.b_state = 0; + bh.b_dev = inode->i_rdev; + bh.b_size = BUFFERED_BLOCKSIZE; + + retval = blkdev_get_block(inode, blocknr, &bh); + if (retval) + goto out; + + blocks[i] = bh.b_blocknr; + } + + retval = brw_kiovec(rw, 1, &iobuf, inode->i_rdev, iobuf->blocks, blocksize); + + out: + return retval; +} + +static int blkdev_writepage(struct page * page) +{ + int err, i; + unsigned long block; + struct buffer_head *bh, *head; + struct inode *inode = page->mapping->host; + + if (!PageLocked(page)) + BUG(); + + if (!page->buffers) + create_empty_buffers(page, inode->i_dev, BUFFERED_BLOCKSIZE); + head = page->buffers; + + block = page->index << (PAGE_CACHE_SHIFT - BUFFERED_BLOCKSIZE_BITS); + + bh = head; + i = 0; + + /* Stage 1: make sure we have all the buffers mapped! */ + do { + /* + * If the buffer isn't up-to-date, we can't be sure + * that the buffer has been initialized with the proper + * block number information etc.. + * + * Leave it to the low-level FS to make all those + * decisions (block #0 may actually be a valid block) + */ + if (!buffer_mapped(bh)) { + err = blkdev_get_block(inode, block, bh); + if (err) + goto out; + } + bh = bh->b_this_page; block++; - p = offset + bh->b_data; - offset = 0; - *ppos += chars; - written += chars; - count -= chars; - copy_from_user(p,buf,chars); - p += chars; - buf += chars; - mark_buffer_uptodate(bh, 1); - mark_buffer_dirty(bh); - if (filp->f_flags & O_SYNC) - bufferlist[buffercount++] = bh; - else - brelse(bh); - if (buffercount == NBUF){ - ll_rw_block(WRITE, buffercount, bufferlist); - for(i=0; ib_count); + set_bit(BH_Uptodate, &bh->b_state); + clear_bit(BH_Dirty, &bh->b_state); + bh = bh->b_this_page; + } while (bh != head); + + /* Stage 3: submit the IO */ + do { + submit_bh(WRITE, bh); + bh = bh->b_this_page; + } while (bh != head); + + /* Done - end_buffer_io_async will unlock */ + SetPageUptodate(page); + return 0; + +out: + ClearPageUptodate(page); + UnlockPage(page); + return err; +} + +static int blkdev_readpage(struct file * file, struct page * page) +{ + struct inode *inode = page->mapping->host; + unsigned long iblock, lblock; + struct buffer_head *bh, *head, *arr[1 << (PAGE_SHIFT - BUFFERED_BLOCKSIZE_BITS)]; + unsigned int blocks; + int nr, i; + kdev_t dev = inode->i_rdev; + + if (!PageLocked(page)) + PAGE_BUG(page); + if (!page->buffers) + create_empty_buffers(page, inode->i_dev, BUFFERED_BLOCKSIZE); + head = page->buffers; + + blocks = PAGE_CACHE_SIZE >> BUFFERED_BLOCKSIZE_BITS; + iblock = page->index << (PAGE_CACHE_SHIFT - BUFFERED_BLOCKSIZE_BITS); + lblock = blk_size[MAJOR(dev)][MINOR(dev)] >> (BUFFERED_BLOCKSIZE_BITS - BLOCK_SIZE_BITS); + bh = head; + nr = 0; + i = 0; + + do { + if (buffer_uptodate(bh)) + continue; + + if (!buffer_mapped(bh)) { + if (iblock < lblock) { + if (blkdev_get_block(inode, iblock, bh)) + continue; + } + if (!buffer_mapped(bh)) { + memset(kmap(page) + i * BUFFERED_BLOCKSIZE, 0, BUFFERED_BLOCKSIZE); + flush_dcache_page(page); + kunmap(page); + set_bit(BH_Uptodate, &bh->b_state); + continue; } - buffercount=0; + /* get_block() might have updated the buffer synchronously */ + if (buffer_uptodate(bh)) + continue; } - balance_dirty(dev); - if (write_error) - break; - } - if ( buffercount ){ - ll_rw_block(WRITE, buffercount, bufferlist); - for(i=0; if_reada = 1; - if(write_error) - return -EIO; - return written; -} - -ssize_t block_read(struct file * filp, char * buf, size_t count, loff_t *ppos) -{ - struct inode * inode = filp->f_dentry->d_inode; - size_t block; - loff_t offset; - ssize_t blocksize; - ssize_t blocksize_bits, i; - size_t blocks, rblocks, left; - int bhrequest, uptodate; - struct buffer_head ** bhb, ** bhe; - struct buffer_head * buflist[NBUF]; - struct buffer_head * bhreq[NBUF]; - unsigned int chars; - loff_t size; - kdev_t dev; - ssize_t read; - dev = inode->i_rdev; - blocksize = BLOCK_SIZE; - if (blksize_size[MAJOR(dev)] && blksize_size[MAJOR(dev)][MINOR(dev)]) - blocksize = blksize_size[MAJOR(dev)][MINOR(dev)]; - i = blocksize; - blocksize_bits = 0; - while (i != 1) { - blocksize_bits++; - i >>= 1; - } - - offset = *ppos; - if (blk_size[MAJOR(dev)]) - size = (loff_t) blk_size[MAJOR(dev)][MINOR(dev)] << BLOCK_SIZE_BITS; - else - size = (loff_t) INT_MAX << BLOCK_SIZE_BITS; - - if (offset > size) - left = 0; - /* size - offset might not fit into left, so check explicitly. */ - else if (size - offset > INT_MAX) - left = INT_MAX; - else - left = size - offset; - if (left > count) - left = count; - if (left <= 0) + arr[nr] = bh; + nr++; + } while (i++, iblock++, (bh = bh->b_this_page) != head); + + if (!nr) { + /* + * all buffers are uptodate - we can set the page + * uptodate as well. + */ + SetPageUptodate(page); + UnlockPage(page); return 0; - read = 0; - block = offset >> blocksize_bits; - offset &= blocksize-1; - size >>= blocksize_bits; - rblocks = blocks = (left + offset + blocksize - 1) >> blocksize_bits; - bhb = bhe = buflist; - if (filp->f_reada) { - if (blocks < read_ahead[MAJOR(dev)] / (blocksize >> 9)) - blocks = read_ahead[MAJOR(dev)] / (blocksize >> 9); - if (rblocks > blocks) - blocks = rblocks; - - } - if (block + blocks > size) { - blocks = size - block; - if (blocks == 0) - return 0; - } - - /* We do this in a two stage process. We first try to request - as many blocks as we can, then we wait for the first one to - complete, and then we try to wrap up as many as are actually - done. This routine is rather generic, in that it can be used - in a filesystem by substituting the appropriate function in - for getblk. + } - This routine is optimized to make maximum use of the various - buffers and caches. */ + /* Stage two: lock the buffers */ + for (i = 0; i < nr; i++) { + struct buffer_head * bh = arr[i]; + lock_buffer(bh); + set_buffer_async_io(bh); + atomic_inc(&bh->b_count); + } - do { - bhrequest = 0; - uptodate = 1; - while (blocks) { - --blocks; - *bhb = getblk(dev, block++, blocksize); - if (*bhb && !buffer_uptodate(*bhb)) { - uptodate = 0; - bhreq[bhrequest++] = *bhb; - } + /* Stage 3: start the IO */ + for (i = 0; i < nr; i++) + submit_bh(READ, arr[i]); - if (++bhb == &buflist[NBUF]) - bhb = buflist; + return 0; +} - /* If the block we have on hand is uptodate, go ahead - and complete processing. */ - if (uptodate) - break; - if (bhb == bhe) - break; - } - - /* Now request them all */ - if (bhrequest) { - ll_rw_block(READ, bhrequest, bhreq); - } - - do { /* Finish off all I/O that has actually completed */ - if (*bhe) { - wait_on_buffer(*bhe); - if (!buffer_uptodate(*bhe)) { /* read error? */ - brelse(*bhe); - if (++bhe == &buflist[NBUF]) - bhe = buflist; - left = 0; - break; - } - } - if (left < blocksize - offset) - chars = left; - else - chars = blocksize - offset; - *ppos += chars; - left -= chars; - read += chars; - if (*bhe) { - copy_to_user(buf,offset+(*bhe)->b_data,chars); - brelse(*bhe); - buf += chars; - } else { - while (chars-- > 0) - put_user(0,buf++); - } - offset = 0; - if (++bhe == &buflist[NBUF]) - bhe = buflist; - } while (left > 0 && bhe != bhb && (!*bhe || !buffer_locked(*bhe))); - if (bhe == bhb && !blocks) +static int __blkdev_prepare_write(struct inode *inode, struct page *page, + unsigned from, unsigned to) +{ + unsigned block_start, block_end; + unsigned long block; + int err = 0; + struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; + kmap(page); + + if (!page->buffers) + create_empty_buffers(page, inode->i_dev, BUFFERED_BLOCKSIZE); + head = page->buffers; + + block = page->index << (PAGE_CACHE_SHIFT - BUFFERED_BLOCKSIZE_BITS); + + for(bh = head, block_start = 0; bh != head || !block_start; + block++, block_start=block_end, bh = bh->b_this_page) { + if (!bh) + BUG(); + block_end = block_start + BUFFERED_BLOCKSIZE; + if (block_end <= from) + continue; + if (block_start >= to) break; - } while (left > 0); + if (!buffer_mapped(bh)) { + err = blkdev_get_block(inode, block, bh); + if (err) + goto out; + } + if (Page_Uptodate(page)) { + set_bit(BH_Uptodate, &bh->b_state); + continue; + } + if (!buffer_uptodate(bh) && + (block_start < from || block_end > to)) { + ll_rw_block(READ, 1, &bh); + *wait_bh++=bh; + } + } + /* + * If we issued read requests - let them complete. + */ + while(wait_bh > wait) { + wait_on_buffer(*--wait_bh); + err = -EIO; + if (!buffer_uptodate(*wait_bh)) + goto out; + } + return 0; +out: + return err; +} -/* Release the read-ahead blocks */ - while (bhe != bhb) { - brelse(*bhe); - if (++bhe == &buflist[NBUF]) - bhe = buflist; - }; - if (!read) - return -EIO; - filp->f_reada = 1; - return read; +static int blkdev_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to) +{ + struct inode *inode = page->mapping->host; + int err = __blkdev_prepare_write(inode, page, from, to); + if (err) { + ClearPageUptodate(page); + kunmap(page); + } + return err; +} + +static int __blkdev_commit_write(struct inode *inode, struct page *page, + unsigned from, unsigned to) +{ + unsigned block_start, block_end; + int partial = 0, need_balance_dirty = 0; + struct buffer_head *bh, *head; + + for(bh = head = page->buffers, block_start = 0; + bh != head || !block_start; + block_start=block_end, bh = bh->b_this_page) { + block_end = block_start + BUFFERED_BLOCKSIZE; + if (block_end <= from || block_start >= to) { + if (!buffer_uptodate(bh)) + partial = 1; + } else { + set_bit(BH_Uptodate, &bh->b_state); + if (!atomic_set_buffer_dirty(bh)) { + __mark_dirty(bh); + buffer_insert_inode_data_queue(bh, inode); + need_balance_dirty = 1; + } + } + } + + if (need_balance_dirty) + balance_dirty(bh->b_dev); + /* + * is this a partial write that happened to make all buffers + * uptodate then we can optimize away a bogus readpage() for + * the next read(). Here we 'discover' wether the page went + * uptodate as a result of this (potentially partial) write. + */ + if (!partial) + SetPageUptodate(page); + return 0; +} + +static int blkdev_commit_write(struct file *file, struct page *page, + unsigned from, unsigned to) +{ + struct inode *inode = page->mapping->host; + __blkdev_commit_write(inode,page,from,to); + kunmap(page); + return 0; } /* @@ -650,7 +656,7 @@ ret = 0; if (bdev->bd_op->open) ret = bdev->bd_op->open(inode,filp); - if (!ret) + if (!ret) atomic_inc(&bdev->bd_openers); else if (!atomic_read(&bdev->bd_openers)) bdev->bd_op = NULL; @@ -667,9 +673,9 @@ down(&bdev->bd_sem); /* syncing will go here */ lock_kernel(); - if (kind == BDEV_FILE || kind == BDEV_FS) + if (kind == BDEV_FILE || kind == BDEV_FS) { fsync_dev(rdev); - if (atomic_dec_and_test(&bdev->bd_openers)) { + } if (atomic_dec_and_test(&bdev->bd_openers)) { /* invalidating buffers will go here */ invalidate_buffers(rdev); } @@ -692,6 +698,10 @@ static int blkdev_close(struct inode * inode, struct file * filp) { + filemap_fdatasync(inode->i_mapping); + fsync_dev(inode->i_rdev); + filemap_fdatawait(inode->i_mapping); + invalidate_inode_pages2(inode->i_mapping); return blkdev_put(inode->i_bdev, BDEV_FILE); } @@ -703,12 +713,22 @@ return -EINVAL; } +struct address_space_operations def_blk_aops = { + readpage: blkdev_readpage, + writepage: blkdev_writepage, + sync_page: block_sync_page, + prepare_write: blkdev_prepare_write, + commit_write: blkdev_commit_write, + direct_IO: blkdev_direct_IO, +}; + struct file_operations def_blk_fops = { open: blkdev_open, release: blkdev_close, llseek: block_llseek, - read: block_read, - write: block_write, + read: generic_file_read, + write: generic_file_write, + mmap: generic_file_mmap, fsync: block_fsync, ioctl: blkdev_ioctl, }; diff -urN o_direct/fs/buffer.c blkdevpagecache/fs/buffer.c --- o_direct/fs/buffer.c Wed May 9 03:22:07 2001 +++ blkdevpagecache/fs/buffer.c Wed May 9 03:23:59 2001 @@ -1147,7 +1147,7 @@ wakeup_bdflush(state); } -static __inline__ void __mark_dirty(struct buffer_head *bh) +inline void __mark_dirty(struct buffer_head *bh) { bh->b_flushtime = jiffies + bdf_prm.b_un.age_buffer; refile_buffer(bh); @@ -1495,7 +1495,7 @@ return 1; } -static void create_empty_buffers(struct page *page, kdev_t dev, unsigned long blocksize) +void create_empty_buffers(struct page *page, kdev_t dev, unsigned long blocksize) { struct buffer_head *bh, *head, *tail; diff -urN o_direct/fs/devices.c blkdevpagecache/fs/devices.c --- o_direct/fs/devices.c Thu Nov 16 15:37:32 2000 +++ blkdevpagecache/fs/devices.c Wed May 9 03:23:59 2001 @@ -205,6 +205,7 @@ inode->i_rdev = to_kdev_t(rdev); } else if (S_ISBLK(mode)) { inode->i_fop = &def_blk_fops; + inode->i_mapping->a_ops = &def_blk_aops; inode->i_rdev = to_kdev_t(rdev); inode->i_bdev = bdget(rdev); } else if (S_ISFIFO(mode)) diff -urN o_direct/include/linux/fs.h blkdevpagecache/include/linux/fs.h --- o_direct/include/linux/fs.h Wed May 9 03:22:07 2001 +++ blkdevpagecache/include/linux/fs.h Wed May 9 03:23:59 2001 @@ -46,6 +46,10 @@ #define BLOCK_SIZE_BITS 10 #define BLOCK_SIZE (1<b_state) @@ -1291,6 +1298,7 @@ int generic_commit_write(struct file *, struct page *, unsigned, unsigned); int block_truncate_page(struct address_space *, loff_t, get_block_t *); extern int generic_direct_IO(int, struct inode *, struct kiobuf *, unsigned long, int, get_block_t *); +extern void create_empty_buffers(struct page *, kdev_t, unsigned long); extern int generic_file_mmap(struct file *, struct vm_area_struct *); extern int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size); diff -urN o_direct/kernel/ksyms.c blkdevpagecache/kernel/ksyms.c --- o_direct/kernel/ksyms.c Wed May 9 03:22:07 2001 +++ blkdevpagecache/kernel/ksyms.c Wed May 9 03:23:59 2001 @@ -276,8 +276,6 @@ EXPORT_SYMBOL(tty_std_termios); /* block device driver support */ -EXPORT_SYMBOL(block_read); -EXPORT_SYMBOL(block_write); EXPORT_SYMBOL(blksize_size); EXPORT_SYMBOL(hardsect_size); EXPORT_SYMBOL(blk_size); diff -urN o_direct/mm/filemap.c blkdevpagecache/mm/filemap.c --- o_direct/mm/filemap.c Wed May 9 03:22:07 2001 +++ blkdevpagecache/mm/filemap.c Wed May 9 03:23:59 2001 @@ -998,16 +998,42 @@ return max_readahead[MAJOR(inode->i_dev)][MINOR(inode->i_dev)]; } +static inline unsigned long calc_end_index(struct inode * inode) +{ + unsigned long end_index; + + if (!S_ISBLK(inode->i_mode)) + end_index = inode->i_size >> PAGE_CACHE_SHIFT; + else + end_index = blk_size[MAJOR(inode->i_rdev)][MINOR(inode->i_rdev)] >> (PAGE_CACHE_SHIFT - BLOCK_SIZE_BITS); + + return end_index; +} + +static inline loff_t calc_rsize(struct inode * inode) +{ + loff_t rsize; + + if (!S_ISBLK(inode->i_mode)) + rsize = inode->i_size; + else + rsize = (loff_t) blk_size[MAJOR(inode->i_rdev)][MINOR(inode->i_rdev)] << BLOCK_SIZE_BITS; + + return rsize; +} + static void generic_file_readahead(int reada_ok, struct file * filp, struct inode * inode, struct page * page) { - unsigned long end_index = inode->i_size >> PAGE_CACHE_SHIFT; + unsigned long end_index; unsigned long index = page->index; unsigned long max_ahead, ahead; unsigned long raend; int max_readahead = get_max_readahead(inode); + end_index = calc_end_index(inode); + raend = filp->f_raend; max_ahead = 0; @@ -1172,13 +1198,17 @@ struct page *page, **hash; unsigned long end_index, nr, ret; - end_index = inode->i_size >> PAGE_CACHE_SHIFT; + end_index = calc_end_index(inode); + if (index > end_index) break; nr = PAGE_CACHE_SIZE; if (index == end_index) { - nr = inode->i_size & ~PAGE_CACHE_MASK; - if (nr <= offset) + if (!S_ISBLK(inode->i_mode)) { + nr = inode->i_size & ~PAGE_CACHE_MASK; + if (nr <= offset) + break; + } else break; } @@ -1338,9 +1368,14 @@ new_iobuf = 1; } - blocksize = inode->i_sb->s_blocksize; + if (!S_ISBLK(inode->i_mode)) { + blocksize = inode->i_sb->s_blocksize; + blocksize_bits = inode->i_sb->s_blocksize_bits; + } else { + blocksize = BUFFERED_BLOCKSIZE; + blocksize_bits = BUFFERED_BLOCKSIZE_BITS; + } blocksize_mask = blocksize - 1; - blocksize_bits = inode->i_sb->s_blocksize_bits; chunk_size = KIO_MAX_ATOMIC_IO << 10; retval = -EINVAL; @@ -1454,11 +1489,13 @@ o_direct: { - loff_t pos = *ppos; + loff_t pos = *ppos, size; struct inode * inode = filp->f_dentry->d_inode; - if (pos + count > inode->i_size) - count = inode->i_size - pos; + size = calc_rsize(inode); + if (pos + count > size) + count = size - pos; + retval = generic_file_direct_IO(READ, filp, buf, count, pos); if (retval > 0) *ppos = pos + retval; @@ -1649,6 +1686,7 @@ struct address_space *mapping = inode->i_mapping; struct page *page, **hash, *old_page; unsigned long size, pgoff; + loff_t rsize; pgoff = ((address - area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff; @@ -1657,7 +1695,8 @@ * An external ptracer can access pages that normally aren't * accessible.. */ - size = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + rsize = calc_rsize(inode); + size = (rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; if ((pgoff >= size) && (area->vm_mm == current->mm)) return NULL; @@ -1895,22 +1934,7 @@ return error; } -/* - * Shared mappings need to be able to do the right thing at - * close/unmap/sync. They will also use the private file as - * backing-store for swapping.. - */ -static struct vm_operations_struct file_shared_mmap = { - nopage: filemap_nopage, -}; - -/* - * Private mappings just need to be able to load in the map. - * - * (This is actually used for shared mappings as well, if we - * know they can't ever get write permissions..) - */ -static struct vm_operations_struct file_private_mmap = { +static struct vm_operations_struct generic_file_vm_ops = { nopage: filemap_nopage, }; @@ -1918,21 +1942,18 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma) { - struct vm_operations_struct * ops; struct inode *inode = file->f_dentry->d_inode; - ops = &file_private_mmap; if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) { if (!inode->i_mapping->a_ops->writepage) return -EINVAL; - ops = &file_shared_mmap; } if (!inode->i_sb || !S_ISREG(inode->i_mode)) return -EACCES; if (!inode->i_mapping->a_ops->readpage) return -ENOEXEC; UPDATE_ATIME(inode); - vma->vm_ops = ops; + vma->vm_ops = &generic_file_vm_ops; return 0; } @@ -2167,13 +2188,14 @@ long error = -EBADF; struct file * file; unsigned long size, rlim_rss; + loff_t rsize; /* Doesn't work if there's no mapped file. */ if (!vma->vm_file) return error; file = vma->vm_file; - size = (file->f_dentry->d_inode->i_size + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; + rsize = calc_rsize(file->f_dentry->d_inode); + size = (rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; if (end > vma->vm_end) @@ -2686,8 +2708,11 @@ written = 0; - if (file->f_flags & O_APPEND) + if (file->f_flags & O_APPEND) { + if (S_ISBLK(inode->i_mode)) + goto out; pos = inode->i_size; + } /* * Check whether we've reached the file size limit. @@ -2843,7 +2868,7 @@ err = generic_file_direct_IO(WRITE, file, (char *) buf, count, pos); if (err > 0) { loff_t end = pos + err; - if (end > inode->i_size) { + if (end > inode->i_size && !S_ISBLK(inode->i_mode)) { inode->i_size = end; mark_inode_dirty(inode); }