Signed-Off-By: Robert Love drivers/char/Makefile | 2 drivers/char/inotify.c | 1082 ++++++++++++++++++++++++++++++++++++++++++++++++ fs/attr.c | 32 + fs/inode.c | 1 fs/namei.c | 21 fs/open.c | 5 fs/read_write.c | 17 fs/super.c | 2 include/linux/fs.h | 4 include/linux/inotify.h | 100 ++++ 10 files changed, 1258 insertions(+), 8 deletions(-) diff -urN linux-2.6.9-rc2/drivers/char/inotify.c linux/drivers/char/inotify.c --- linux-2.6.9-rc2/drivers/char/inotify.c 1969-12-31 19:00:00.000000000 -0500 +++ linux/drivers/char/inotify.c 2004-09-24 17:07:09.626952248 -0400 @@ -0,0 +1,1082 @@ +/* + * Inode based directory notifications for Linux. + * + * Copyright (C) 2004 John McCutchan + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +/* TODO: + * rename inotify_watcher to inotify_watch + * need a way to connect MOVED_TO/MOVED_FROM events in user space + * use b-tree so looking up watch by WD is faster + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define INOTIFY_VERSION "0.10.0" + +#define MAX_INOTIFY_DEVS 8 /* max open inotify devices */ +#define MAX_INOTIFY_DEV_WATCHERS 8192 /* max total watches */ +#define MAX_INOTIFY_QUEUED_EVENTS 256 /* max events queued on the dev*/ + +#define INOTIFY_DEV_TIMER_TIME (jiffies + (HZ/4)) + +static atomic_t watcher_count; +static kmem_cache_t *watcher_cache; +static kmem_cache_t *kevent_cache; + +/* For debugging */ +static int event_object_count; +static int watcher_object_count; +static int inode_ref_count; +static int inotify_debug_flags; +#define iprintk(f, str...) if (inotify_debug_flags & f) printk (KERN_ALERT str) + +/* + * struct inotify_device - represents an open instance of an inotify device + * + * For each inotify device, we need to keep track of the events queued on it, + * a list of the inodes that we are watching, and so on. + */ +struct inotify_device { + unsigned long bitmask[MAX_INOTIFY_DEV_WATCHERS/BITS_PER_LONG]; + struct timer_list timer; + wait_queue_head_t wait; + struct list_head events; + struct list_head watchers; + spinlock_t lock; + unsigned int event_count; + int nr_watches; +}; + +struct inotify_watcher { + int wd; // watcher descriptor + unsigned long mask; + struct inode * inode; + struct inotify_device * dev; + struct list_head d_list; // device list + struct list_head i_list; // inode list + struct list_head u_list; // unmount list +}; +#define inotify_watcher_d_list(pos) list_entry((pos), struct inotify_watcher, d_list) +#define inotify_watcher_i_list(pos) list_entry((pos), struct inotify_watcher, i_list) +#define inotify_watcher_u_list(pos) list_entry((pos), struct inotify_watcher, u_list) + +/* + * A list of these is attached to each instance of the driver + * when the drivers read() gets called, this list is walked and + * all events that can fit in the buffer get delivered + */ +struct inotify_kernel_event { + struct list_head list; + struct inotify_event event; +}; +#define list_to_inotify_kernel_event(pos) list_entry((pos), struct inotify_kernel_event, list) + +static int find_inode(const char __user *dirname, struct inode **inode) +{ + struct nameidata nd; + int error; + + error = __user_walk(dirname, LOOKUP_FOLLOW, &nd); + if (error) { + iprintk(INOTIFY_DEBUG_INODE, "could not find inode\n"); + goto out; + } + + *inode = nd.dentry->d_inode; + __iget(*inode); + iprintk(INOTIFY_DEBUG_INODE, "ref'd inode\n"); + inode_ref_count++; + path_release(&nd); +out: + return error; +} + +static void unref_inode(struct inode *inode) +{ + inode_ref_count--; + iprintk(INOTIFY_DEBUG_INODE, "unref'd inode\n"); + iput(inode); +} + +struct inotify_kernel_event *kernel_event(int wd, int mask, + const char *filename) +{ + struct inotify_kernel_event *kevent; + + kevent = kmem_cache_alloc(kevent_cache, GFP_ATOMIC); + if (!kevent) { + iprintk(INOTIFY_DEBUG_ALLOC, + "failed to alloc kevent (%d,%d)\n", wd, mask); + goto out; + } + + iprintk(INOTIFY_DEBUG_ALLOC, "alloced kevent %p (%d,%d)\n", + kevent, wd, mask); + + kevent->event.wd = wd; + kevent->event.mask = mask; + INIT_LIST_HEAD(&kevent->list); + + if (filename) { + iprintk(INOTIFY_DEBUG_FILEN, + "filename for event was %p %s\n", filename, filename); + strncpy(kevent->event.filename, filename, + INOTIFY_FILENAME_MAX); + kevent->event.filename[INOTIFY_FILENAME_MAX-1] = '\0'; + iprintk(INOTIFY_DEBUG_FILEN, + "filename after copying %s\n", kevent->event.filename); + } else { + iprintk(INOTIFY_DEBUG_FILEN, "no filename for event\n"); + kevent->event.filename[0] = '\0'; + } + + event_object_count++; + +out: + return kevent; +} + +void delete_kernel_event(struct inotify_kernel_event *kevent) +{ + if (!kevent) + return; + + event_object_count--; + INIT_LIST_HEAD(&kevent->list); + kevent->event.wd = -1; + kevent->event.mask = 0; + + iprintk(INOTIFY_DEBUG_ALLOC, "free'd kevent %p\n", kevent); + kmem_cache_free(kevent_cache, kevent); +} + +#define inotify_dev_has_events(dev) (!list_empty(&dev->events)) +#define inotify_dev_get_event(dev) (list_to_inotify_kernel_event(dev->events.next)) +/* Does this events mask get sent to the watcher ? */ +#define event_and(event_mask,watchers_mask) ((event_mask == IN_UNMOUNT) || \ + (event_mask == IN_IGNORED) || \ + (event_mask & watchers_mask)) + +/* + * inotify_dev_queue_event - add a new event to the given device + * + * Caller must hold dev->lock. + */ +static void inotify_dev_queue_event(struct inotify_device *dev, + struct inotify_watcher *watcher, int mask, + const char *filename) +{ + struct inotify_kernel_event *kevent; + + /* + * Check if the new event is a duplicate of the last event queued. + */ + if (dev->event_count && + inotify_dev_get_event(dev)->event.mask == mask && + inotify_dev_get_event(dev)->event.wd == watcher->wd) { + + /* Check if the filenames match */ + if (!filename && inotify_dev_get_event(dev)->event.filename[0] == '\0') + return; + if (filename && !strcmp(inotify_dev_get_event(dev)->event.filename, filename)) + return; + } + /* + * the queue has already overflowed and we have already sent the + * Q_OVERFLOW event + */ + if (dev->event_count > MAX_INOTIFY_QUEUED_EVENTS) { + iprintk(INOTIFY_DEBUG_EVENTS, + "event queue for %p overflowed\n", dev); + return; + } + + /* the queue has just overflowed and we need to notify user space */ + if (dev->event_count == MAX_INOTIFY_QUEUED_EVENTS) { + dev->event_count++; + kevent = kernel_event(-1, IN_Q_OVERFLOW, NULL); + iprintk(INOTIFY_DEBUG_EVENTS, "sending IN_Q_OVERFLOW to %p\n", + dev); + goto add_event_to_queue; + } + + if (!event_and(mask, watcher->inode->watchers_mask) || + !event_and(mask, watcher->mask)) + return; + + dev->event_count++; + kevent = kernel_event(watcher->wd, mask, filename); + +add_event_to_queue: + if (!kevent) { + iprintk(INOTIFY_DEBUG_EVENTS, "failed to queue event for %p" + " -- could not allocate kevent\n", dev); + dev->event_count--; + return; + } + + list_add_tail(&kevent->list, &dev->events); + iprintk(INOTIFY_DEBUG_EVENTS, + "queued event %x for %p\n", kevent->event.mask, dev); +} + +/* + * inotify_dev_event_dequeue - destroy an event on the given device + * + * Caller must hold dev->lock. + */ +static void inotify_dev_event_dequeue(struct inotify_device *dev) +{ + struct inotify_kernel_event *kevent; + + if (!inotify_dev_has_events(dev)) + return; + + kevent = inotify_dev_get_event(dev); + list_del(&kevent->list); + dev->event_count--; + delete_kernel_event(kevent); + + iprintk(INOTIFY_DEBUG_EVENTS, "dequeued event on %p\n", dev); +} + +/* + * inotify_dev_get_wd - returns the next WD for use by the given dev + * + * Caller must hold dev->lock before calling. + */ +static int inotify_dev_get_wd(struct inotify_device *dev) +{ + int wd; + + if (!dev || dev->nr_watches == MAX_INOTIFY_DEV_WATCHERS) + return -1; + + dev->nr_watches++; + wd = find_first_zero_bit(dev->bitmask, MAX_INOTIFY_DEV_WATCHERS); + set_bit (wd, dev->bitmask); + + return wd; +} + +/* + * inotify_dev_put_wd - release the given WD on the given device + * + * Caller must hold dev->lock. + */ +static int inotify_dev_put_wd(struct inotify_device *dev, int wd) +{ + if (!dev || wd < 0) + return -1; + + dev->nr_watches--; + clear_bit(wd, dev->bitmask); + + return 0; +} + +/* + * create_watcher - creates a watcher on the given device. + * + * Grabs dev->lock, so the caller must not hold it. + */ +static struct inotify_watcher *create_watcher(struct inotify_device *dev, + int mask, struct inode *inode) +{ + struct inotify_watcher *watcher; + + watcher = kmem_cache_alloc(watcher_cache, GFP_KERNEL); + if (!watcher) { + iprintk(INOTIFY_DEBUG_ALLOC, + "failed to allocate watcher (%p,%d)\n", inode, mask); + return NULL; + } + + watcher->wd = -1; + watcher->mask = mask; + watcher->inode = inode; + watcher->dev = dev; + INIT_LIST_HEAD(&watcher->d_list); + INIT_LIST_HEAD(&watcher->i_list); + INIT_LIST_HEAD(&watcher->u_list); + + spin_lock(&dev->lock); + watcher->wd = inotify_dev_get_wd(dev); + spin_unlock(&dev->lock); + + if (watcher->wd < 0) { + iprintk(INOTIFY_DEBUG_ERRORS, + "Could not get wd for watcher %p\n", watcher); + iprintk(INOTIFY_DEBUG_ALLOC, "free'd watcher %p\n", watcher); + kmem_cache_free(watcher_cache, watcher); + return NULL; + } + + watcher_object_count++; + return watcher; +} + +/* + * delete_watcher - removes the given 'watcher' from the given 'dev' + * + * Caller must hold dev->lock. + */ +static void delete_watcher(struct inotify_device *dev, + struct inotify_watcher *watcher) +{ + inotify_dev_put_wd(dev, watcher->wd); + iprintk(INOTIFY_DEBUG_ALLOC, "free'd watcher %p\n", watcher); + kmem_cache_free(watcher_cache, watcher); + watcher_object_count--; +} + +/* + * inotify_find_dev - find the watcher associated with the given inode and dev + * + * Caller must hold dev->lock. + */ +static struct inotify_watcher *inode_find_dev(struct inode *inode, + struct inotify_device *dev) +{ + struct inotify_watcher *watcher; + + list_for_each_entry(watcher, &inode->watchers, i_list) { + if (watcher->dev == dev) + return watcher; + } + + return NULL; +} + +static struct inotify_watcher *dev_find_wd(struct inotify_device *dev, int wd) +{ + struct inotify_watcher *watcher; + + list_for_each_entry(watcher, &dev->watchers, d_list) { + if (watcher->wd == wd) + return watcher; + } + return NULL; +} + +static int inotify_dev_is_watching_inode(struct inotify_device *dev, + struct inode *inode) +{ + struct inotify_watcher *watcher; + + list_for_each_entry(watcher, &dev->watchers, d_list) { + if (watcher->inode == inode) + return 1; + } + + return 0; +} + +static int inotify_dev_add_watcher(struct inotify_device *dev, + struct inotify_watcher *watcher) +{ + int error; + + error = 0; + + if (!dev || !watcher) { + error = -EINVAL; + goto out; + } + + if (dev_find_wd (dev, watcher->wd)) { + error = -EINVAL; + goto out; + } + + if (dev->nr_watches == MAX_INOTIFY_DEV_WATCHERS) { + error = -ENOSPC; + goto out; + } + + list_add(&watcher->d_list, &dev->watchers); +out: + return error; +} + +/* + * inotify_dev_rm_watcher - remove the given watch from the given device + * + * Caller must hold dev->lock because we call inotify_dev_queue_event(). + */ +static int inotify_dev_rm_watcher(struct inotify_device *dev, + struct inotify_watcher *watcher) +{ + int error; + + error = -EINVAL; + if (watcher) { + inotify_dev_queue_event(dev, watcher, IN_IGNORED, NULL); + list_del(&watcher->d_list); + error = 0; + } + + return error; +} + +void inode_update_watchers_mask(struct inode *inode) +{ + struct inotify_watcher *watcher; + unsigned long new_mask; + + new_mask = 0; + list_for_each_entry(watcher, &inode->watchers, i_list) + new_mask |= watcher->mask; + + inode->watchers_mask = new_mask; +} + +/* + * inode_add_watcher - add a watcher to the given inode + * + * Callers must hold dev->lock, because we call inode_find_dev(). + */ +static int inode_add_watcher(struct inode *inode, + struct inotify_watcher *watcher) +{ + if (!inode || !watcher || inode_find_dev(inode, watcher->dev)) + return -EINVAL; + + list_add(&watcher->i_list, &inode->watchers); + inode->watcher_count++; + inode_update_watchers_mask(inode); + + return 0; +} + +static int inode_rm_watcher(struct inode *inode, + struct inotify_watcher *watcher) +{ + if (!inode || !watcher) + return -EINVAL; + + list_del(&watcher->i_list); + inode->watcher_count--; + + inode_update_watchers_mask(inode); + + return 0; +} + +/* Kernel API */ + +void inotify_inode_queue_event(struct inode *inode, unsigned long mask, + const char *filename) +{ + struct inotify_watcher *watcher; + + spin_lock(&inode->i_lock); + + list_for_each_entry(watcher, &inode->watchers, i_list) { + spin_lock(&watcher->dev->lock); + inotify_dev_queue_event(watcher->dev, watcher, mask, filename); + spin_unlock(&watcher->dev->lock); + } + + spin_unlock(&inode->i_lock); +} +EXPORT_SYMBOL_GPL(inotify_inode_queue_event); + +void inotify_inode_queue_event_pair(struct inode *inode1, unsigned long mask1, + const char *filename1, + struct inode *inode2, unsigned long mask2, + const char *filename2) +{ + struct inotify_watcher *watcher; + + spin_lock(&inode1->i_lock); + spin_lock(&inode2->i_lock); + + list_for_each_entry(watcher, &inode1->watchers, i_list) { + spin_lock(&watcher->dev->lock); + inotify_dev_queue_event(watcher->dev, watcher, + mask1, filename1); + spin_unlock(&watcher->dev->lock); + } + + list_for_each_entry(watcher, &inode2->watchers, i_list) { + spin_lock(&watcher->dev->lock); + inotify_dev_queue_event(watcher->dev, watcher, + mask2, filename2); + spin_unlock(&watcher->dev->lock); + } + + spin_unlock(&inode2->i_lock); + spin_unlock(&inode1->i_lock); +} +EXPORT_SYMBOL_GPL(inotify_inode_queue_event_pair); + +void inotify_dentry_parent_queue_event(struct dentry *dentry, + unsigned long mask, + const char *filename) +{ + struct dentry *parent; + + spin_lock(&dentry->d_lock); + dget(dentry->d_parent); + parent = dentry->d_parent; + inotify_inode_queue_event(parent->d_inode, mask, filename); + dput(parent); + spin_unlock(&dentry->d_lock); +} +EXPORT_SYMBOL_GPL(inotify_dentry_parent_queue_event); + +static void ignore_helper(struct inotify_watcher *watcher, int event) +{ + struct inotify_device *dev; + struct inode *inode; + + spin_lock(&watcher->dev->lock); + spin_lock(&watcher->inode->i_lock); + + inode = watcher->inode; + dev = watcher->dev; + + if (event) + inotify_dev_queue_event(dev, watcher, event, NULL); + + inode_rm_watcher(inode, watcher); + inotify_dev_rm_watcher(watcher->dev, watcher); + list_del(&watcher->u_list); + + spin_unlock(&inode->i_lock); + delete_watcher(dev, watcher); + spin_unlock(&dev->lock); + + unref_inode(inode); +} + +static void process_umount_list(struct list_head *umount) { + struct inotify_watcher *watcher, *next; + + list_for_each_entry_safe(watcher, next, umount, u_list) + ignore_helper(watcher, IN_UNMOUNT); +} + +static void build_umount_list(struct list_head *head, struct super_block *sb, + struct list_head *umount) +{ + struct inode * inode; + + list_for_each_entry(inode, head, i_list) { + struct inotify_watcher *watcher; + + if (inode->i_sb != sb) + continue; + + spin_lock(&inode->i_lock); + + list_for_each_entry(watcher, &inode->watchers, i_list) + list_add(&watcher->u_list, umount); + + spin_unlock(&inode->i_lock); + } +} + +void inotify_super_block_umount(struct super_block *sb) +{ + struct list_head umount; + + INIT_LIST_HEAD(&umount); + + spin_lock(&inode_lock); + build_umount_list(&inode_in_use, sb, &umount); + spin_unlock(&inode_lock); + + process_umount_list(&umount); +} +EXPORT_SYMBOL_GPL(inotify_super_block_umount); + +void inotify_inode_is_dead(struct inode *inode) +{ + struct inotify_watcher *watcher, *next; + + list_for_each_entry_safe(watcher, next, &inode->watchers, i_list) { + ignore_helper(watcher, 0); +} +EXPORT_SYMBOL_GPL(inotify_inode_is_dead); + +/* The driver interface is implemented below */ + +static unsigned int inotify_poll(struct file *file, poll_table *wait) +{ + struct inotify_device *dev; + + dev = file->private_data; + + poll_wait(file, &dev->wait, wait); + + if (inotify_dev_has_events(dev)) + return POLLIN | POLLRDNORM; + + return 0; +} + +#define MAX_EVENTS_AT_ONCE 20 +static ssize_t inotify_read(struct file *file, __user char *buf, + size_t count, loff_t *pos) +{ + size_t out; + struct inotify_event *eventbuf; + struct inotify_kernel_event *kevent; + struct inotify_device *dev; + char *obuf; + int err; + int events; + int event_count; + + DECLARE_WAITQUEUE(wait, current); + + out = -ENOMEM; + eventbuf = kmalloc(sizeof(struct inotify_event) * MAX_EVENTS_AT_ONCE, + GFP_KERNEL); + if (!eventbuf) + goto out; + + events = 0; + out = 0; + err = 0; + obuf = buf; + + dev = file->private_data; + + /* We only hand out full inotify events */ + if (count < sizeof(struct inotify_event)) { + out = -EINVAL; + goto out; + } + + events = count / sizeof(struct inotify_event); + if (events > MAX_EVENTS_AT_ONCE) + events = MAX_EVENTS_AT_ONCE; + + if (!inotify_dev_has_events(dev)) { + if (file->f_flags & O_NONBLOCK) { + out = -EAGAIN; + goto out; + } + } + + spin_lock(&dev->lock); + + add_wait_queue(&dev->wait, &wait); +repeat: + if (signal_pending(current)) { + spin_unlock(&dev->lock); + out = -ERESTARTSYS; + set_current_state(TASK_RUNNING); + remove_wait_queue(&dev->wait, &wait); + goto out; + } + set_current_state(TASK_INTERRUPTIBLE); + if (!inotify_dev_has_events(dev)) { + spin_unlock(&dev->lock); + schedule(); + spin_lock(&dev->lock); + goto repeat; + } + + set_current_state(TASK_RUNNING); + remove_wait_queue(&dev->wait, &wait); + + err = !access_ok(VERIFY_WRITE, (void *)buf, + sizeof(struct inotify_event)); + + if (err) { + out = -EFAULT; + goto out; + } + + /* Copy all the events we can to the event buffer */ + for (event_count = 0; event_count < events; event_count++) { + kevent = inotify_dev_get_event(dev); + eventbuf[event_count] = kevent->event; + inotify_dev_event_dequeue(dev); + } + + spin_unlock(&dev->lock); + + /* Send the event buffer to user space */ + err = copy_to_user(buf, eventbuf, + events * sizeof(struct inotify_event)); + + buf += sizeof(struct inotify_event) * events; + + out = buf - obuf; + +out: + kfree(eventbuf); + return out; +} + +static void inotify_dev_timer(unsigned long data) +{ + struct inotify_device *dev; + + if (!data) + return; + dev = (struct inotify_device *) data; + + /* reset the timer */ + mod_timer(&dev->timer, INOTIFY_DEV_TIMER_TIME); + + /* wake up anyone blocked on the device */ + if (inotify_dev_has_events(dev)) + wake_up_interruptible(&dev->wait); +} + +static int inotify_open(struct inode *inode, struct file *file) +{ + struct inotify_device *dev; + + if (atomic_read(&watcher_count) == MAX_INOTIFY_DEVS) + return -ENODEV; + + atomic_inc(&watcher_count); + + dev = kmalloc(sizeof(struct inotify_device), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + memset(dev->bitmask, 0, + sizeof(unsigned long) * MAX_INOTIFY_DEV_WATCHERS / BITS_PER_LONG); + + INIT_LIST_HEAD(&dev->events); + INIT_LIST_HEAD(&dev->watchers); + init_timer(&dev->timer); + init_waitqueue_head(&dev->wait); + + dev->event_count = 0; + dev->nr_watches = 0; + dev->lock = SPIN_LOCK_UNLOCKED; + + file->private_data = dev; + + dev->timer.data = (unsigned long) dev; + dev->timer.function = inotify_dev_timer; + dev->timer.expires = INOTIFY_DEV_TIMER_TIME; + + add_timer(&dev->timer); + + printk(KERN_ALERT "inotify device opened\n"); + + return 0; +} + +static void inotify_release_all_watchers(struct inotify_device *dev) +{ + struct inotify_watcher *watcher,*next; + + list_for_each_entry_safe(watcher, next, &dev->watchers, d_list) + ignore_helper(watcher, 0); +} + +/* + * inotify_release_all_events - destroy all of the events on a given device + */ +static void inotify_release_all_events(struct inotify_device *dev) +{ + spin_lock(&dev->lock); + while (inotify_dev_has_events(dev)) + inotify_dev_event_dequeue(dev); + spin_unlock(&dev->lock); +} + + +static int inotify_release(struct inode *inode, struct file *file) +{ + if (file->private_data) { + struct inotify_device *dev; + + dev = (struct inotify_device *) file->private_data; + del_timer_sync(&dev->timer); + inotify_release_all_watchers(dev); + inotify_release_all_events(dev); + kfree(dev); + } + + printk(KERN_ALERT "inotify device released\n"); + + atomic_dec(&watcher_count); + return 0; +} + +static int inotify_watch(struct inotify_device *dev, + struct inotify_watch_request *request) +{ + int err; + struct inode *inode; + struct inotify_watcher *watcher; + err = 0; + + err = find_inode(request->dirname, &inode); + if (err) + goto exit; + + if (!S_ISDIR(inode->i_mode)) + iprintk(INOTIFY_DEBUG_ERRORS, "watching file\n"); + + spin_lock(&dev->lock); + spin_lock(&inode->i_lock); + + /* + * This handles the case of re-adding a directory we are already + * watching, we just update the mask and return 0 + */ + if (inotify_dev_is_watching_inode(dev, inode)) { + struct inotify_watcher *owatcher; /* the old watcher */ + + iprintk(INOTIFY_DEBUG_ERRORS, + "adjusting event mask for inode %p\n", inode); + + owatcher = inode_find_dev(inode, dev); + owatcher->mask = request->mask; + inode_update_watchers_mask(inode); + spin_unlock(&inode->i_lock); + spin_unlock(&dev->lock); + unref_inode(inode); + + return 0; + } + + spin_unlock(&inode->i_lock); + spin_unlock(&dev->lock); + + watcher = create_watcher(dev, request->mask, inode); + if (!watcher) { + unref_inode(inode); + return -ENOSPC; + } + + spin_lock(&dev->lock); + spin_lock(&inode->i_lock); + + /* We can't add anymore watchers to this device */ + if (inotify_dev_add_watcher(dev, watcher) == -ENOSPC) { + iprintk(INOTIFY_DEBUG_ERRORS, + "can't add watcher dev is full\n"); + spin_unlock(&inode->i_lock); + delete_watcher(dev, watcher); + spin_unlock(&dev->lock); + + unref_inode(inode); + return -ENOSPC; + } + + inode_add_watcher(inode, watcher); + + /* we keep a reference on the inode */ + if (!err) + err = watcher->wd; + + spin_unlock(&inode->i_lock); + spin_unlock(&dev->lock); +exit: + return err; +} + +static int inotify_ignore(struct inotify_device *dev, int wd) +{ + struct inotify_watcher *watcher; + + watcher = dev_find_wd(dev, wd); + if (!watcher) + return -EINVAL; + ignore_helper(watcher, 0); + + return 0; +} + +static void inotify_print_stats(struct inotify_device *dev) +{ + int sizeof_inotify_watcher; + int sizeof_inotify_device; + int sizeof_inotify_kernel_event; + + sizeof_inotify_watcher = sizeof (struct inotify_watcher); + sizeof_inotify_device = sizeof (struct inotify_device); + sizeof_inotify_kernel_event = sizeof (struct inotify_kernel_event); + + printk(KERN_ALERT "GLOBAL INOTIFY STATS\n"); + printk(KERN_ALERT "watcher_count = %d\n", atomic_read(&watcher_count)); + printk(KERN_ALERT "event_object_count = %d\n", event_object_count); + printk(KERN_ALERT "watcher_object_count = %d\n", watcher_object_count); + printk(KERN_ALERT "inode_ref_count = %d\n", inode_ref_count); + + printk(KERN_ALERT "sizeof(struct inotify_watcher) = %d\n", + sizeof_inotify_watcher); + printk(KERN_ALERT "sizeof(struct inotify_device) = %d\n", + sizeof_inotify_device); + printk(KERN_ALERT "sizeof(struct inotify_kernel_event) = %d\n", + sizeof_inotify_kernel_event); + + spin_lock(&dev->lock); + + printk(KERN_ALERT "inotify device: %p\n", dev); + printk(KERN_ALERT "inotify event_count: %u\n", dev->event_count); + printk(KERN_ALERT "inotify watch_count: %d\n", dev->nr_watches); + + spin_unlock(&dev->lock); +} + +static int inotify_ioctl(struct inode *ip, struct file *fp, + unsigned int cmd, unsigned long arg) { + int err; + struct inotify_device *dev; + struct inotify_watch_request request; + int wd; + + dev = fp->private_data; + err = 0; + + if (_IOC_TYPE(cmd) != INOTIFY_IOCTL_MAGIC) + return -EINVAL; + if (_IOC_NR(cmd) > INOTIFY_IOCTL_MAXNR) + return -EINVAL; + + if (_IOC_DIR(cmd) & _IOC_READ) + err = !access_ok(VERIFY_READ, (void *) arg, _IOC_SIZE(cmd)); + + if (err) + err = -EFAULT; + goto out; + + if (_IOC_DIR(cmd) & _IOC_WRITE) + err = !access_ok(VERIFY_WRITE, (void *)arg, _IOC_SIZE(cmd)); + + if (err) { + err = -EFAULT; + goto out; + } + + err = -EINVAL; + + switch (cmd) { + case INOTIFY_WATCH: + iprintk(INOTIFY_DEBUG_ERRORS, "INOTIFY_WATCH ioctl\n"); + if (copy_from_user(&request, (void *) arg, + sizeof(struct inotify_watch_request))) { + err = -EFAULT; + goto out; + } + + err = inotify_watch(dev, &request); + break; + case INOTIFY_IGNORE: + iprintk(INOTIFY_DEBUG_ERRORS, "INOTIFY_IGNORE ioctl\n"); + if (copy_from_user(&wd, (void *)arg, sizeof (int))) { + + err = -EFAULT; + goto out; + } + + err = inotify_ignore(dev, wd); + break; + case INOTIFY_STATS: + iprintk(INOTIFY_DEBUG_ERRORS, "INOTIFY_STATS ioctl\n"); + inotify_print_stats(dev); + err = 0; + break; + case INOTIFY_SETDEBUG: + iprintk(INOTIFY_DEBUG_ERRORS, + "INOTIFY_SETDEBUG ioctl\n"); + if (copy_from_user(&inotify_debug_flags, (void *) arg, + sizeof (int))) { + err = -EFAULT; + goto out; + } + break; + } + +out: + return err; +} + +static struct file_operations inotify_fops = { + .owner = THIS_MODULE, + .poll = inotify_poll, + .read = inotify_read, + .open = inotify_open, + .release = inotify_release, + .ioctl = inotify_ioctl, +}; + +struct miscdevice inotify_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "inotify", + .fops = &inotify_fops, +}; + + +static int __init inotify_init(void) +{ + int ret; + + ret = misc_register(&inotify_device); + if (ret) + goto out; + + inotify_debug_flags = INOTIFY_DEBUG_NONE; + + watcher_cache = kmem_cache_create("watcher_cache", + sizeof(struct inotify_watcher), 0, SLAB_PANIC, + NULL, NULL); + + kevent_cache = kmem_cache_create("kevent_cache", + sizeof(struct inotify_kernel_event), 0, + SLAB_PANIC, NULL, NULL); + + printk(KERN_ALERT "inotify %s minor=%d\n", INOTIFY_VERSION, + inotify_device.minor); +out: + return ret; +} + +static void inotify_exit(void) +{ + kmem_cache_destroy(kevent_cache); + kmem_cache_destroy(watcher_cache); + misc_deregister(&inotify_device); + printk(KERN_ALERT "inotify shutdown ec=%d wc=%d ic=%d\n", + event_object_count, watcher_object_count, inode_ref_count); +} + +MODULE_AUTHOR("John McCutchan "); +MODULE_DESCRIPTION("Inode event driver"); +MODULE_LICENSE("GPL"); + +module_init(inotify_init); +module_exit(inotify_exit); diff -urN linux-2.6.9-rc2/drivers/char/Makefile linux/drivers/char/Makefile --- linux-2.6.9-rc2/drivers/char/Makefile 2004-09-20 17:03:56.000000000 -0400 +++ linux/drivers/char/Makefile 2004-09-24 15:28:28.692071168 -0400 @@ -7,7 +7,7 @@ # FONTMAPFILE = cp437.uni -obj-y += mem.o random.o tty_io.o n_tty.o tty_ioctl.o +obj-y += mem.o random.o tty_io.o n_tty.o tty_ioctl.o inotify.o obj-$(CONFIG_LEGACY_PTYS) += pty.o obj-$(CONFIG_UNIX98_PTYS) += pty.o diff -urN linux-2.6.9-rc2/fs/attr.c linux/fs/attr.c --- linux-2.6.9-rc2/fs/attr.c 2004-08-14 06:54:50.000000000 -0400 +++ linux/fs/attr.c 2004-09-24 15:28:19.651445552 -0400 @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -128,6 +129,29 @@ return dn_mask; } +int setattr_mask_inotify(unsigned int ia_valid) +{ + unsigned long dn_mask = 0; + + if (ia_valid & ATTR_UID) + dn_mask |= IN_ATTRIB; + if (ia_valid & ATTR_GID) + dn_mask |= IN_ATTRIB; + if (ia_valid & ATTR_SIZE) + dn_mask |= IN_MODIFY; + /* both times implies a utime(s) call */ + if ((ia_valid & (ATTR_ATIME|ATTR_MTIME)) == (ATTR_ATIME|ATTR_MTIME)) + dn_mask |= IN_ATTRIB; + else if (ia_valid & ATTR_ATIME) + dn_mask |= IN_ACCESS; + else if (ia_valid & ATTR_MTIME) + dn_mask |= IN_MODIFY; + if (ia_valid & ATTR_MODE) + dn_mask |= IN_ATTRIB; + return dn_mask; +} + + int notify_change(struct dentry * dentry, struct iattr * attr) { struct inode *inode = dentry->d_inode; @@ -185,8 +209,14 @@ } if (!error) { unsigned long dn_mask = setattr_mask(ia_valid); - if (dn_mask) + unsigned long in_mask = setattr_mask_inotify(ia_valid); + if (dn_mask) { dnotify_parent(dentry, dn_mask); + } + if (in_mask) { + inotify_inode_queue_event (dentry->d_inode, in_mask, NULL); + inotify_dentry_parent_queue_event (dentry, in_mask, dentry->d_name.name); + } } return error; } diff -urN linux-2.6.9-rc2/fs/inode.c linux/fs/inode.c --- linux-2.6.9-rc2/fs/inode.c 2004-09-20 17:04:00.000000000 -0400 +++ linux/fs/inode.c 2004-09-24 15:28:19.650445704 -0400 @@ -114,6 +114,7 @@ if (inode) { struct address_space * const mapping = &inode->i_data; + INIT_LIST_HEAD (&inode->watchers); inode->i_sb = sb; inode->i_blkbits = sb->s_blocksize_bits; inode->i_flags = 0; diff -urN linux-2.6.9-rc2/fs/namei.c linux/fs/namei.c --- linux-2.6.9-rc2/fs/namei.c 2004-09-20 17:04:01.000000000 -0400 +++ linux/fs/namei.c 2004-09-24 15:28:19.649445856 -0400 @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -1226,6 +1227,7 @@ error = dir->i_op->create(dir, dentry, mode, nd); if (!error) { inode_dir_notify(dir, DN_CREATE); + inotify_inode_queue_event(dir, IN_CREATE_FILE, dentry->d_name.name); security_inode_post_create(dir, dentry, mode); } return error; @@ -1540,6 +1542,7 @@ error = dir->i_op->mknod(dir, dentry, mode, dev); if (!error) { inode_dir_notify(dir, DN_CREATE); + inotify_inode_queue_event(dir, IN_CREATE_FILE, dentry->d_name.name); security_inode_post_mknod(dir, dentry, mode, dev); } return error; @@ -1613,6 +1616,7 @@ error = dir->i_op->mkdir(dir, dentry, mode); if (!error) { inode_dir_notify(dir, DN_CREATE); + inotify_inode_queue_event(dir, IN_CREATE_SUBDIR, dentry->d_name.name); security_inode_post_mkdir(dir,dentry, mode); } return error; @@ -1708,6 +1712,9 @@ up(&dentry->d_inode->i_sem); if (!error) { inode_dir_notify(dir, DN_DELETE); + inotify_inode_queue_event(dir, IN_DELETE_SUBDIR, dentry->d_name.name); + inotify_inode_queue_event(dentry->d_inode, IN_DELETE_SELF, NULL); + inotify_inode_is_dead (dentry->d_inode); d_delete(dentry); } dput(dentry); @@ -1780,8 +1787,11 @@ /* We don't d_delete() NFS sillyrenamed files--they still exist. */ if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) { - d_delete(dentry); inode_dir_notify(dir, DN_DELETE); + inotify_inode_queue_event(dir, IN_DELETE_FILE, dentry->d_name.name); + inotify_inode_queue_event(dentry->d_inode, IN_DELETE_SELF, NULL); + inotify_inode_is_dead (dentry->d_inode); + d_delete(dentry); } return error; } @@ -1858,6 +1868,7 @@ error = dir->i_op->symlink(dir, dentry, oldname); if (!error) { inode_dir_notify(dir, DN_CREATE); + inotify_inode_queue_event(dir, IN_CREATE_FILE, dentry->d_name.name); security_inode_post_symlink(dir, dentry, oldname); } return error; @@ -1931,6 +1942,7 @@ up(&old_dentry->d_inode->i_sem); if (!error) { inode_dir_notify(dir, DN_CREATE); + inotify_inode_queue_event(dir, IN_CREATE_FILE, new_dentry->d_name.name); security_inode_post_link(old_dentry, dir, new_dentry); } return error; @@ -2120,12 +2132,15 @@ else error = vfs_rename_other(old_dir,old_dentry,new_dir,new_dentry); if (!error) { - if (old_dir == new_dir) + if (old_dir == new_dir) { inode_dir_notify(old_dir, DN_RENAME); - else { + } else { inode_dir_notify(old_dir, DN_DELETE); inode_dir_notify(new_dir, DN_CREATE); } + + inotify_inode_queue_event (old_dir, IN_MOVED_FROM, old_dentry->d_name.name); + inotify_inode_queue_event (new_dir, IN_MOVED_TO, new_dentry->d_name.name); } return error; } diff -urN linux-2.6.9-rc2/fs/open.c linux/fs/open.c --- linux-2.6.9-rc2/fs/open.c 2004-08-14 06:54:48.000000000 -0400 +++ linux/fs/open.c 2004-09-24 15:28:19.646446312 -0400 @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -955,6 +956,8 @@ error = PTR_ERR(f); if (IS_ERR(f)) goto out_error; + inotify_inode_queue_event (f->f_dentry->d_inode, IN_OPEN, NULL); + inotify_dentry_parent_queue_event (f->f_dentry, IN_OPEN, f->f_dentry->d_name.name); fd_install(fd, f); } out: @@ -1034,6 +1037,8 @@ FD_CLR(fd, files->close_on_exec); __put_unused_fd(files, fd); spin_unlock(&files->file_lock); + inotify_dentry_parent_queue_event (filp->f_dentry, IN_CLOSE, filp->f_dentry->d_name.name); + inotify_inode_queue_event (filp->f_dentry->d_inode, IN_CLOSE, NULL); return filp_close(filp, files); out_unlock: diff -urN linux-2.6.9-rc2/fs/read_write.c linux/fs/read_write.c --- linux-2.6.9-rc2/fs/read_write.c 2004-08-14 06:55:35.000000000 -0400 +++ linux/fs/read_write.c 2004-09-24 15:28:19.645446464 -0400 @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -216,8 +217,11 @@ ret = file->f_op->read(file, buf, count, pos); else ret = do_sync_read(file, buf, count, pos); - if (ret > 0) + if (ret > 0) { dnotify_parent(file->f_dentry, DN_ACCESS); + inotify_dentry_parent_queue_event(file->f_dentry, IN_ACCESS, file->f_dentry->d_name.name); + inotify_inode_queue_event (file->f_dentry->d_inode, IN_ACCESS, NULL); + } } } @@ -260,8 +264,11 @@ ret = file->f_op->write(file, buf, count, pos); else ret = do_sync_write(file, buf, count, pos); - if (ret > 0) + if (ret > 0) { dnotify_parent(file->f_dentry, DN_MODIFY); + inotify_dentry_parent_queue_event(file->f_dentry, IN_MODIFY, file->f_dentry->d_name.name); + inotify_inode_queue_event (file->f_dentry->d_inode, IN_MODIFY, NULL); + } } } @@ -493,9 +500,13 @@ out: if (iov != iovstack) kfree(iov); - if ((ret + (type == READ)) > 0) + if ((ret + (type == READ)) > 0) { dnotify_parent(file->f_dentry, (type == READ) ? DN_ACCESS : DN_MODIFY); + inotify_dentry_parent_queue_event(file->f_dentry, + (type == READ) ? IN_ACCESS : IN_MODIFY, file->f_dentry->d_name.name); + inotify_inode_queue_event (file->f_dentry->d_inode, (type == READ) ? IN_ACCESS : IN_MODIFY, NULL); + } return ret; } diff -urN linux-2.6.9-rc2/fs/super.c linux/fs/super.c --- linux-2.6.9-rc2/fs/super.c 2004-08-14 06:55:22.000000000 -0400 +++ linux/fs/super.c 2004-09-24 15:28:19.644446616 -0400 @@ -36,6 +36,7 @@ #include /* for the emergency remount stuff */ #include #include +#include void get_filesystem(struct file_system_type *fs); @@ -204,6 +205,7 @@ if (root) { sb->s_root = NULL; + inotify_super_block_umount (sb); shrink_dcache_parent(root); shrink_dcache_anon(&sb->s_anon); dput(root); diff -urN linux-2.6.9-rc2/include/linux/fs.h linux/include/linux/fs.h --- linux-2.6.9-rc2/include/linux/fs.h 2004-09-20 17:04:07.000000000 -0400 +++ linux/include/linux/fs.h 2004-09-24 15:28:19.643446768 -0400 @@ -462,6 +462,10 @@ unsigned long i_dnotify_mask; /* Directory notify events */ struct dnotify_struct *i_dnotify; /* for directory notifications */ + struct list_head watchers; + unsigned long watchers_mask; + int watcher_count; + unsigned long i_state; unsigned long dirtied_when; /* jiffies of first dirtying */ diff -urN linux-2.6.9-rc2/include/linux/inotify.h linux/include/linux/inotify.h --- linux-2.6.9-rc2/include/linux/inotify.h 1969-12-31 19:00:00.000000000 -0500 +++ linux/include/linux/inotify.h 2004-09-24 15:28:19.641447072 -0400 @@ -0,0 +1,100 @@ +/* + * Inode based directory notification for Linux + * + * Copyright (C) 2004 John McCutchan + * + * Signed-off-by: John McCutchan ttb@tentacle.dhs.org + */ + +#ifndef _LINUX_INOTIFY_H +#define _LINUX_INOTIFY_H + +#include + +/* this size could limit things, since technically we could need PATH_MAX */ +#define INOTIFY_FILENAME_MAX 256 + +/* + * struct inotify_event - structure read from the inotify device for each event + * + * When you are watching a directory, you will receive the filename for events + * such as IN_CREATE, IN_DELETE, IN_OPEN, IN_CLOSE, ... + * + * Note: When reading from the device you must provide a buffer that is a + * multiple of sizeof(struct inotify_event) + */ +struct inotify_event { + int wd; + int mask; + int cookie; + char filename[INOTIFY_FILENAME_MAX]; +}; + +/* the following are legal, implemented events */ +#define IN_ACCESS 0x00000001 /* File was accessed */ +#define IN_MODIFY 0x00000002 /* File was modified */ +#define IN_ATTRIB 0x00000004 /* File changed attributes */ +#define IN_CLOSE 0x00000008 /* File was closed */ +#define IN_OPEN 0x00000010 /* File was opened */ +#define IN_MOVED_FROM 0x00000020 /* File was moved from X */ +#define IN_MOVED_TO 0x00000040 /* File was moved to Y */ +#define IN_DELETE_SUBDIR 0x00000080 /* Subdir was deleted */ +#define IN_DELETE_FILE 0x00000100 /* Subfile was deleted */ +#define IN_CREATE_SUBDIR 0x00000200 /* Subdir was created */ +#define IN_CREATE_FILE 0x00000400 /* Subfile was created */ +#define IN_DELETE_SELF 0x00000800 /* Self was deleted */ +#define IN_UNMOUNT 0x00001000 /* Backing filesystem was unmounted */ +#define IN_Q_OVERFLOW 0x00002000 /* The event queued overflowed */ +#define IN_IGNORED 0x00004000 /* File was ignored */ + +/* special flags */ +#define IN_ALL_EVENTS 0xffffffff /* All the events */ + +/* + * struct inotify_watch_request - represents a watch request + * + * Pass to the inotify device via the INOTIFY_WATCH ioctl + */ +struct inotify_watch_request { + char *dirname; /* directory name */ + unsigned long mask; /* event mask */ +}; + +#define INOTIFY_IOCTL_MAGIC 'Q' +#define INOTIFY_IOCTL_MAXNR 4 + +#define INOTIFY_WATCH _IOR(INOTIFY_IOCTL_MAGIC, 1, struct inotify_watch_request) +#define INOTIFY_IGNORE _IOR(INOTIFY_IOCTL_MAGIC, 2, int) +#define INOTIFY_STATS _IOR(INOTIFY_IOCTL_MAGIC, 3, int) +#define INOTIFY_SETDEBUG _IOR(INOTIFY_IOCTL_MAGIC, 4, int) + +#define INOTIFY_DEBUG_NONE 0x00000000 +#define INOTIFY_DEBUG_ALLOC 0x00000001 +#define INOTIFY_DEBUG_EVENTS 0x00000002 +#define INOTIFY_DEBUG_INODE 0x00000004 +#define INOTIFY_DEBUG_ERRORS 0x00000008 +#define INOTIFY_DEBUG_FILEN 0x00000010 +#define INOTIFY_DEBUG_ALL 0xffffffff + +#ifdef __KERNEL__ + +#include +#include + +/* Adds event to all watchers on inode that are interested in mask */ +void inotify_inode_queue_event (struct inode *inode, unsigned long mask, + const char *filename); + +/* Same as above but uses dentry's inode */ +void inotify_dentry_parent_queue_event (struct dentry *dentry, + unsigned long mask, const char *filename); + +/* This will remove all watchers from all inodes on the superblock */ +void inotify_super_block_umount (struct super_block *sb); + +/* Call this when an inode is dead, and inotify should ignore it */ +void inotify_inode_is_dead (struct inode *inode); + +#endif /* __KERNEL __ */ + +#endif /* _LINUX_INOTIFY_H */