From: Gerd Knorr This is a bunch of fixes for the regparm stuff, for all the fastcall functions delaration and definition must match too, in both cases (with and without CONFIG_REGPARM). Current gcc fails due to mismatches. Other architectures which have private FASTCALL stuff probably need adaptions for this, I've done i386 and uml only for now. --- 25-akpm/arch/i386/kernel/process.c | 2 +- 25-akpm/arch/i386/kernel/signal.c | 2 +- 25-akpm/arch/i386/kernel/vm86.c | 2 +- 25-akpm/drivers/net/ns83820.c | 8 ++++---- 25-akpm/fs/aio.c | 16 ++++++++-------- 25-akpm/fs/buffer.c | 4 ++-- 25-akpm/fs/fcntl.c | 2 +- 25-akpm/fs/file_table.c | 8 ++++---- 25-akpm/fs/namei.c | 8 ++++---- 25-akpm/fs/open.c | 4 ++-- 25-akpm/include/asm-i386/linkage.h | 1 + 25-akpm/include/asm-um/linkage.h | 1 + 25-akpm/include/linux/linkage.h | 1 + 25-akpm/kernel/exit.c | 4 ++-- 25-akpm/kernel/fork.c | 14 +++++++------- 25-akpm/kernel/pid.c | 10 +++++----- 25-akpm/kernel/rcupdate.c | 2 +- 25-akpm/kernel/sched.c | 30 +++++++++++++++--------------- 25-akpm/kernel/signal.c | 2 +- 25-akpm/kernel/softirq.c | 8 ++++---- 25-akpm/kernel/timer.c | 2 +- 25-akpm/kernel/workqueue.c | 10 +++++----- 25-akpm/lib/rwsem-spinlock.c | 16 ++++++++-------- 25-akpm/lib/rwsem.c | 8 ++++---- 25-akpm/mm/filemap.c | 8 ++++---- 25-akpm/mm/highmem.c | 4 ++-- 25-akpm/mm/memory.c | 6 +++--- 25-akpm/mm/page_alloc.c | 16 ++++++++-------- 25-akpm/mm/rmap.c | 10 +++++----- 25-akpm/mm/slab.c | 2 +- 25-akpm/mm/swap.c | 10 +++++----- 25-akpm/net/bluetooth/rfcomm/core.c | 4 ++-- 32 files changed, 114 insertions(+), 111 deletions(-) diff -puN arch/i386/kernel/process.c~fastcall-warning-fixes arch/i386/kernel/process.c --- 25/arch/i386/kernel/process.c~fastcall-warning-fixes Mon Mar 1 15:59:45 2004 +++ 25-akpm/arch/i386/kernel/process.c Mon Mar 1 15:59:45 2004 @@ -493,7 +493,7 @@ int dump_task_regs(struct task_struct *t * the task-switch, and shows up in ret_from_fork in entry.S, * for example. */ -struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p) +struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p) { struct thread_struct *prev = &prev_p->thread, *next = &next_p->thread; diff -puN arch/i386/kernel/signal.c~fastcall-warning-fixes arch/i386/kernel/signal.c --- 25/arch/i386/kernel/signal.c~fastcall-warning-fixes Mon Mar 1 15:59:45 2004 +++ 25-akpm/arch/i386/kernel/signal.c Mon Mar 1 15:59:45 2004 @@ -551,7 +551,7 @@ handle_signal(unsigned long sig, siginfo * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. */ -int do_signal(struct pt_regs *regs, sigset_t *oldset) +int fastcall do_signal(struct pt_regs *regs, sigset_t *oldset) { siginfo_t info; int signr; diff -puN arch/i386/kernel/vm86.c~fastcall-warning-fixes arch/i386/kernel/vm86.c --- 25/arch/i386/kernel/vm86.c~fastcall-warning-fixes Mon Mar 1 15:59:45 2004 +++ 25-akpm/arch/i386/kernel/vm86.c Mon Mar 1 15:59:45 2004 @@ -95,7 +95,7 @@ #define VM86_REGS_SIZE2 (sizeof(struct kernel_vm86_regs) - VM86_REGS_SIZE1) struct pt_regs * FASTCALL(save_v86_state(struct kernel_vm86_regs * regs)); -struct pt_regs * save_v86_state(struct kernel_vm86_regs * regs) +struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs) { struct tss_struct *tss; struct pt_regs *ret; diff -puN drivers/net/ns83820.c~fastcall-warning-fixes drivers/net/ns83820.c --- 25/drivers/net/ns83820.c~fastcall-warning-fixes Mon Mar 1 15:59:45 2004 +++ 25-akpm/drivers/net/ns83820.c Mon Mar 1 15:59:45 2004 @@ -598,7 +598,7 @@ static inline int rx_refill(struct net_d } static void FASTCALL(rx_refill_atomic(struct net_device *ndev)); -static void rx_refill_atomic(struct net_device *ndev) +static void fastcall rx_refill_atomic(struct net_device *ndev) { rx_refill(ndev, GFP_ATOMIC); } @@ -620,7 +620,7 @@ static inline void clear_rx_desc(struct } static void FASTCALL(phy_intr(struct net_device *ndev)); -static void phy_intr(struct net_device *ndev) +static void fastcall phy_intr(struct net_device *ndev) { struct ns83820 *dev = PRIV(ndev); static char *speeds[] = { "10", "100", "1000", "1000(?)", "1000F" }; @@ -807,7 +807,7 @@ static void ns83820_cleanup_rx(struct ns } static void FASTCALL(ns83820_rx_kick(struct net_device *ndev)); -static void ns83820_rx_kick(struct net_device *ndev) +static void fastcall ns83820_rx_kick(struct net_device *ndev) { struct ns83820 *dev = PRIV(ndev); /*if (nr_rx_empty(dev) >= NR_RX_DESC/4)*/ { @@ -829,7 +829,7 @@ static void ns83820_rx_kick(struct net_d * */ static void FASTCALL(rx_irq(struct net_device *ndev)); -static void rx_irq(struct net_device *ndev) +static void fastcall rx_irq(struct net_device *ndev) { struct ns83820 *dev = PRIV(ndev); struct rx_info *info = &dev->rx_info; diff -puN fs/aio.c~fastcall-warning-fixes fs/aio.c --- 25/fs/aio.c~fastcall-warning-fixes Mon Mar 1 15:59:45 2004 +++ 25-akpm/fs/aio.c Mon Mar 1 15:59:45 2004 @@ -312,7 +312,7 @@ void wait_for_all_aios(struct kioctx *ct /* wait_on_sync_kiocb: * Waits on the given sync kiocb to complete. */ -ssize_t wait_on_sync_kiocb(struct kiocb *iocb) +ssize_t fastcall wait_on_sync_kiocb(struct kiocb *iocb) { while (iocb->ki_users) { set_current_state(TASK_UNINTERRUPTIBLE); @@ -331,7 +331,7 @@ ssize_t wait_on_sync_kiocb(struct kiocb * go away, they will call put_ioctx and release any pinned memory * associated with the request (held via struct page * references). */ -void exit_aio(struct mm_struct *mm) +void fastcall exit_aio(struct mm_struct *mm) { struct kioctx *ctx = mm->ioctx_list; mm->ioctx_list = NULL; @@ -356,7 +356,7 @@ void exit_aio(struct mm_struct *mm) * Called when the last user of an aio context has gone away, * and the struct needs to be freed. */ -void __put_ioctx(struct kioctx *ctx) +void fastcall __put_ioctx(struct kioctx *ctx) { unsigned nr_events = ctx->max_reqs; @@ -383,7 +383,7 @@ void __put_ioctx(struct kioctx *ctx) * req (after submitting it) and aio_complete() freeing the req. */ static struct kiocb *FASTCALL(__aio_get_req(struct kioctx *ctx)); -static struct kiocb *__aio_get_req(struct kioctx *ctx) +static struct kiocb fastcall *__aio_get_req(struct kioctx *ctx) { struct kiocb *req = NULL; struct aio_ring *ring; @@ -509,7 +509,7 @@ static int __aio_put_req(struct kioctx * * Returns true if this put was the last user of the kiocb, * false if the request is still in use. */ -int aio_put_req(struct kiocb *req) +int fastcall aio_put_req(struct kiocb *req) { struct kioctx *ctx = req->ki_ctx; int ret; @@ -596,7 +596,7 @@ static void aio_kick_handler(void *data) unuse_mm(ctx->mm); } -void kick_iocb(struct kiocb *iocb) +void fastcall kick_iocb(struct kiocb *iocb) { struct kioctx *ctx = iocb->ki_ctx; @@ -622,7 +622,7 @@ void kick_iocb(struct kiocb *iocb) * Returns true if this is the last user of the request. The * only other user of the request can be the cancellation code. */ -int aio_complete(struct kiocb *iocb, long res, long res2) +int fastcall aio_complete(struct kiocb *iocb, long res, long res2) { struct kioctx *ctx = iocb->ki_ctx; struct aio_ring_info *info; @@ -985,7 +985,7 @@ asmlinkage long sys_io_destroy(aio_conte return -EINVAL; } -int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, +int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, struct iocb *iocb) { struct kiocb *req; diff -puN fs/buffer.c~fastcall-warning-fixes fs/buffer.c --- 25/fs/buffer.c~fastcall-warning-fixes Mon Mar 1 15:59:45 2004 +++ 25-akpm/fs/buffer.c Mon Mar 1 15:59:45 2004 @@ -97,7 +97,7 @@ void wake_up_buffer(struct buffer_head * } EXPORT_SYMBOL(wake_up_buffer); -void unlock_buffer(struct buffer_head *bh) +void fastcall unlock_buffer(struct buffer_head *bh) { /* * unlock_buffer against a zero-count bh is a bug, if the page @@ -1256,7 +1256,7 @@ __getblk_slow(struct block_device *bdev, * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock, * mapping->page_lock and the global inode_lock. */ -void mark_buffer_dirty(struct buffer_head *bh) +void fastcall mark_buffer_dirty(struct buffer_head *bh) { if (!buffer_uptodate(bh)) buffer_error(); diff -puN fs/fcntl.c~fastcall-warning-fixes fs/fcntl.c --- 25/fs/fcntl.c~fastcall-warning-fixes Mon Mar 1 15:59:45 2004 +++ 25-akpm/fs/fcntl.c Mon Mar 1 15:59:45 2004 @@ -19,7 +19,7 @@ #include #include -void set_close_on_exec(unsigned int fd, int flag) +void fastcall set_close_on_exec(unsigned int fd, int flag) { struct files_struct *files = current->files; spin_lock(&files->file_lock); diff -puN fs/file_table.c~fastcall-warning-fixes fs/file_table.c --- 25/fs/file_table.c~fastcall-warning-fixes Mon Mar 1 15:59:45 2004 +++ 25-akpm/fs/file_table.c Mon Mar 1 15:59:45 2004 @@ -152,7 +152,7 @@ void close_private_file(struct file *fil EXPORT_SYMBOL(close_private_file); -void fput(struct file *file) +void fastcall fput(struct file *file) { if (atomic_dec_and_test(&file->f_count)) __fput(file); @@ -163,7 +163,7 @@ EXPORT_SYMBOL(fput); /* __fput is called from task context when aio completion releases the last * last use of a struct file *. Do not use otherwise. */ -void __fput(struct file *file) +void fastcall __fput(struct file *file) { struct dentry *dentry = file->f_dentry; struct vfsmount *mnt = file->f_vfsmnt; @@ -192,7 +192,7 @@ void __fput(struct file *file) mntput(mnt); } -struct file *fget(unsigned int fd) +struct file fastcall *fget(unsigned int fd) { struct file *file; struct files_struct *files = current->files; @@ -214,7 +214,7 @@ EXPORT_SYMBOL(fget); * and a flag is returned to be passed to the corresponding fput_light(). * There must not be a cloning between an fget_light/fput_light pair. */ -struct file *fget_light(unsigned int fd, int *fput_needed) +struct file fastcall *fget_light(unsigned int fd, int *fput_needed) { struct file *file; struct files_struct *files = current->files; diff -puN fs/namei.c~fastcall-warning-fixes fs/namei.c --- 25/fs/namei.c~fastcall-warning-fixes Mon Mar 1 15:59:45 2004 +++ 25-akpm/fs/namei.c Mon Mar 1 15:59:45 2004 @@ -571,7 +571,7 @@ fail: * * We expect 'base' to be positive and a directory. */ -int link_path_walk(const char * name, struct nameidata *nd) +int fastcall link_path_walk(const char * name, struct nameidata *nd) { struct path next; struct inode *inode; @@ -771,7 +771,7 @@ return_err: return err; } -int path_walk(const char * name, struct nameidata *nd) +int fastcall path_walk(const char * name, struct nameidata *nd) { current->total_link_count = 0; return link_path_walk(name, nd); @@ -858,7 +858,7 @@ walk_init_root(const char *name, struct return 1; } -int path_lookup(const char *name, unsigned int flags, struct nameidata *nd) +int fastcall path_lookup(const char *name, unsigned int flags, struct nameidata *nd) { nd->last_type = LAST_ROOT; /* if there are only slashes... */ nd->flags = flags; @@ -971,7 +971,7 @@ access: * that namei follows links, while lnamei does not. * SMP-safe */ -int __user_walk(const char __user *name, unsigned flags, struct nameidata *nd) +int fastcall __user_walk(const char __user *name, unsigned flags, struct nameidata *nd) { char *tmp = getname(name); int err = PTR_ERR(tmp); diff -puN fs/open.c~fastcall-warning-fixes fs/open.c --- 25/fs/open.c~fastcall-warning-fixes Mon Mar 1 15:59:45 2004 +++ 25-akpm/fs/open.c Mon Mar 1 15:59:45 2004 @@ -890,7 +890,7 @@ static inline void __put_unused_fd(struc files->next_fd = fd; } -void put_unused_fd(unsigned int fd) +void fastcall put_unused_fd(unsigned int fd) { struct files_struct *files = current->files; spin_lock(&files->file_lock); @@ -913,7 +913,7 @@ EXPORT_SYMBOL(put_unused_fd); * will follow. */ -void fd_install(unsigned int fd, struct file * file) +void fastcall fd_install(unsigned int fd, struct file * file) { struct files_struct *files = current->files; spin_lock(&files->file_lock); diff -puN include/asm-i386/linkage.h~fastcall-warning-fixes include/asm-i386/linkage.h --- 25/include/asm-i386/linkage.h~fastcall-warning-fixes Mon Mar 1 15:59:45 2004 +++ 25-akpm/include/asm-i386/linkage.h Mon Mar 1 15:59:45 2004 @@ -3,6 +3,7 @@ #define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0))) #define FASTCALL(x) x __attribute__((regparm(3))) +#define fastcall __attribute__((regparm(3))) #ifdef CONFIG_X86_ALIGNMENT_16 #define __ALIGN .align 16,0x90 diff -puN include/asm-um/linkage.h~fastcall-warning-fixes include/asm-um/linkage.h --- 25/include/asm-um/linkage.h~fastcall-warning-fixes Mon Mar 1 15:59:45 2004 +++ 25-akpm/include/asm-um/linkage.h Mon Mar 1 15:59:45 2004 @@ -2,5 +2,6 @@ #define __ASM_LINKAGE_H #define FASTCALL(x) x __attribute__((regparm(3))) +#define fastcall __attribute__((regparm(3))) #endif diff -puN include/linux/linkage.h~fastcall-warning-fixes include/linux/linkage.h --- 25/include/linux/linkage.h~fastcall-warning-fixes Mon Mar 1 15:59:45 2004 +++ 25-akpm/include/linux/linkage.h Mon Mar 1 15:59:45 2004 @@ -37,6 +37,7 @@ #ifndef FASTCALL #define FASTCALL(x) x +#define fastcall #endif #endif diff -puN kernel/exit.c~fastcall-warning-fixes kernel/exit.c --- 25/kernel/exit.c~fastcall-warning-fixes Mon Mar 1 15:59:45 2004 +++ 25-akpm/kernel/exit.c Mon Mar 1 15:59:45 2004 @@ -386,7 +386,7 @@ static inline void close_files(struct fi } } -void put_files_struct(struct files_struct *files) +void fastcall put_files_struct(struct files_struct *files) { if (atomic_dec_and_test(&files->count)) { close_files(files); @@ -810,7 +810,7 @@ asmlinkage long sys_exit(int error_code) do_exit((error_code&0xff)<<8); } -task_t *next_thread(task_t *p) +task_t fastcall *next_thread(task_t *p) { struct pid_link *link = p->pids + PIDTYPE_TGID; struct list_head *tmp, *head = &link->pidptr->task_list; diff -puN kernel/fork.c~fastcall-warning-fixes kernel/fork.c --- 25/kernel/fork.c~fastcall-warning-fixes Mon Mar 1 15:59:45 2004 +++ 25-akpm/kernel/fork.c Mon Mar 1 15:59:45 2004 @@ -91,7 +91,7 @@ void __put_task_struct(struct task_struc free_task(tsk); } -void add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait) +void fastcall add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait) { unsigned long flags; @@ -103,7 +103,7 @@ void add_wait_queue(wait_queue_head_t *q EXPORT_SYMBOL(add_wait_queue); -void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait) +void fastcall add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait) { unsigned long flags; @@ -115,7 +115,7 @@ void add_wait_queue_exclusive(wait_queue EXPORT_SYMBOL(add_wait_queue_exclusive); -void remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait) +void fastcall remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait) { unsigned long flags; @@ -139,7 +139,7 @@ EXPORT_SYMBOL(remove_wait_queue); * stops them from bleeding out - it would still allow subsequent * loads to move into the the critical region). */ -void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state) +void fastcall prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state) { unsigned long flags; @@ -153,7 +153,7 @@ void prepare_to_wait(wait_queue_head_t * EXPORT_SYMBOL(prepare_to_wait); -void +void fastcall prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state) { unsigned long flags; @@ -168,7 +168,7 @@ prepare_to_wait_exclusive(wait_queue_hea EXPORT_SYMBOL(prepare_to_wait_exclusive); -void finish_wait(wait_queue_head_t *q, wait_queue_t *wait) +void fastcall finish_wait(wait_queue_head_t *q, wait_queue_t *wait) { unsigned long flags; @@ -418,7 +418,7 @@ struct mm_struct * mm_alloc(void) * is dropped: either by a lazy thread or by * mmput. Free the page directory and the mm. */ -void __mmdrop(struct mm_struct *mm) +void fastcall __mmdrop(struct mm_struct *mm) { BUG_ON(mm == &init_mm); mm_free_pgd(mm); diff -puN kernel/pid.c~fastcall-warning-fixes kernel/pid.c --- 25/kernel/pid.c~fastcall-warning-fixes Mon Mar 1 15:59:45 2004 +++ 25-akpm/kernel/pid.c Mon Mar 1 15:59:45 2004 @@ -57,7 +57,7 @@ static pidmap_t *map_limit = pidmap_arra static spinlock_t pidmap_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; -inline void free_pidmap(int pid) +fastcall void free_pidmap(int pid) { pidmap_t *map = pidmap_array + pid / BITS_PER_PAGE; int offset = pid & BITS_PER_PAGE_MASK; @@ -146,7 +146,7 @@ failure: return -1; } -inline struct pid *find_pid(enum pid_type type, int nr) +fastcall struct pid *find_pid(enum pid_type type, int nr) { struct list_head *elem, *bucket = &pid_hash[type][pid_hashfn(nr)]; struct pid *pid; @@ -159,14 +159,14 @@ inline struct pid *find_pid(enum pid_typ return NULL; } -void link_pid(task_t *task, struct pid_link *link, struct pid *pid) +void fastcall link_pid(task_t *task, struct pid_link *link, struct pid *pid) { atomic_inc(&pid->count); list_add_tail(&link->pid_chain, &pid->task_list); link->pidptr = pid; } -int attach_pid(task_t *task, enum pid_type type, int nr) +int fastcall attach_pid(task_t *task, enum pid_type type, int nr) { struct pid *pid = find_pid(type, nr); @@ -209,7 +209,7 @@ static void _detach_pid(task_t *task, en __detach_pid(task, type); } -void detach_pid(task_t *task, enum pid_type type) +void fastcall detach_pid(task_t *task, enum pid_type type) { int nr = __detach_pid(task, type); diff -puN kernel/rcupdate.c~fastcall-warning-fixes kernel/rcupdate.c --- 25/kernel/rcupdate.c~fastcall-warning-fixes Mon Mar 1 15:59:45 2004 +++ 25-akpm/kernel/rcupdate.c Mon Mar 1 15:59:45 2004 @@ -66,7 +66,7 @@ static DEFINE_PER_CPU(struct tasklet_str * The read-side of critical section that use call_rcu() for updation must * be protected by rcu_read_lock()/rcu_read_unlock(). */ -void call_rcu(struct rcu_head *head, void (*func)(void *arg), void *arg) +void fastcall call_rcu(struct rcu_head *head, void (*func)(void *arg), void *arg) { int cpu; unsigned long flags; diff -puN kernel/sched.c~fastcall-warning-fixes kernel/sched.c --- 25/kernel/sched.c~fastcall-warning-fixes Mon Mar 1 15:59:45 2004 +++ 25-akpm/kernel/sched.c Mon Mar 1 15:59:45 2004 @@ -700,7 +700,7 @@ repeat_lock_task: return success; } -int wake_up_process(task_t * p) +int fastcall wake_up_process(task_t * p) { return try_to_wake_up(p, TASK_STOPPED | TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0); @@ -708,7 +708,7 @@ int wake_up_process(task_t * p) EXPORT_SYMBOL(wake_up_process); -int wake_up_state(task_t *p, unsigned int state) +int fastcall wake_up_state(task_t *p, unsigned int state) { return try_to_wake_up(p, state, 0); } @@ -717,7 +717,7 @@ int wake_up_state(task_t *p, unsigned in * Perform scheduler related setup for a newly forked process p. * p is forked by current. */ -void sched_fork(task_t *p) +void fastcall sched_fork(task_t *p) { /* * We mark the process as running here, but have not actually @@ -773,7 +773,7 @@ void sched_fork(task_t *p) * This function will do some initial scheduler statistics housekeeping * that must be done for every newly created process. */ -void wake_up_forked_process(task_t * p) +void fastcall wake_up_forked_process(task_t * p) { unsigned long flags; runqueue_t *rq = task_rq_lock(current, &flags); @@ -817,7 +817,7 @@ void wake_up_forked_process(task_t * p) * artificially, because any timeslice recovered here * was given away by the parent in the first place.) */ -void sched_exit(task_t * p) +void fastcall sched_exit(task_t * p) { unsigned long flags; runqueue_t *rq; @@ -1796,7 +1796,7 @@ static void __wake_up_common(wait_queue_ * @mode: which threads * @nr_exclusive: how many wake-one or wake-many threads to wake up */ -void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) +void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) { unsigned long flags; @@ -1810,7 +1810,7 @@ EXPORT_SYMBOL(__wake_up); /* * Same as __wake_up but called with the spinlock in wait_queue_head_t held. */ -void __wake_up_locked(wait_queue_head_t *q, unsigned int mode) +void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode) { __wake_up_common(q, mode, 1, 0); } @@ -1828,7 +1828,7 @@ void __wake_up_locked(wait_queue_head_t * * On UP it can prevent extra preemption. */ -void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) +void fastcall __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) { unsigned long flags; @@ -1845,7 +1845,7 @@ void __wake_up_sync(wait_queue_head_t *q EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ -void complete(struct completion *x) +void fastcall complete(struct completion *x) { unsigned long flags; @@ -1858,7 +1858,7 @@ void complete(struct completion *x) EXPORT_SYMBOL(complete); -void complete_all(struct completion *x) +void fastcall complete_all(struct completion *x) { unsigned long flags; @@ -1869,7 +1869,7 @@ void complete_all(struct completion *x) spin_unlock_irqrestore(&x->wait.lock, flags); } -void wait_for_completion(struct completion *x) +void fastcall wait_for_completion(struct completion *x) { might_sleep(); spin_lock_irq(&x->wait.lock); @@ -1907,7 +1907,7 @@ EXPORT_SYMBOL(wait_for_completion); __remove_wait_queue(q, &wait); \ spin_unlock_irqrestore(&q->lock, flags); -void interruptible_sleep_on(wait_queue_head_t *q) +void fastcall interruptible_sleep_on(wait_queue_head_t *q) { SLEEP_ON_VAR @@ -1920,7 +1920,7 @@ void interruptible_sleep_on(wait_queue_h EXPORT_SYMBOL(interruptible_sleep_on); -long interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) +long fastcall interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) { SLEEP_ON_VAR @@ -1935,7 +1935,7 @@ long interruptible_sleep_on_timeout(wait EXPORT_SYMBOL(interruptible_sleep_on_timeout); -void sleep_on(wait_queue_head_t *q) +void fastcall sleep_on(wait_queue_head_t *q) { SLEEP_ON_VAR @@ -1948,7 +1948,7 @@ void sleep_on(wait_queue_head_t *q) EXPORT_SYMBOL(sleep_on); -long sleep_on_timeout(wait_queue_head_t *q, long timeout) +long fastcall sleep_on_timeout(wait_queue_head_t *q, long timeout) { SLEEP_ON_VAR diff -puN kernel/signal.c~fastcall-warning-fixes kernel/signal.c --- 25/kernel/signal.c~fastcall-warning-fixes Mon Mar 1 15:59:45 2004 +++ 25-akpm/kernel/signal.c Mon Mar 1 15:59:45 2004 @@ -213,7 +213,7 @@ static inline int has_pending_signals(si #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) -inline void recalc_sigpending_tsk(struct task_struct *t) +fastcall void recalc_sigpending_tsk(struct task_struct *t) { if (t->signal->group_stop_count > 0 || PENDING(&t->pending, &t->blocked) || diff -puN kernel/softirq.c~fastcall-warning-fixes kernel/softirq.c --- 25/kernel/softirq.c~fastcall-warning-fixes Mon Mar 1 15:59:45 2004 +++ 25-akpm/kernel/softirq.c Mon Mar 1 15:59:45 2004 @@ -130,7 +130,7 @@ EXPORT_SYMBOL(local_bh_enable); /* * This function must run with irqs disabled! */ -inline void raise_softirq_irqoff(unsigned int nr) +inline fastcall void raise_softirq_irqoff(unsigned int nr) { __raise_softirq_irqoff(nr); @@ -149,7 +149,7 @@ inline void raise_softirq_irqoff(unsigne EXPORT_SYMBOL(raise_softirq_irqoff); -void raise_softirq(unsigned int nr) +void fastcall raise_softirq(unsigned int nr) { unsigned long flags; @@ -179,7 +179,7 @@ struct tasklet_head static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec) = { NULL }; static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec) = { NULL }; -void __tasklet_schedule(struct tasklet_struct *t) +void fastcall __tasklet_schedule(struct tasklet_struct *t) { unsigned long flags; @@ -192,7 +192,7 @@ void __tasklet_schedule(struct tasklet_s EXPORT_SYMBOL(__tasklet_schedule); -void __tasklet_hi_schedule(struct tasklet_struct *t) +void fastcall __tasklet_hi_schedule(struct tasklet_struct *t) { unsigned long flags; diff -puN kernel/timer.c~fastcall-warning-fixes kernel/timer.c --- 25/kernel/timer.c~fastcall-warning-fixes Mon Mar 1 15:59:45 2004 +++ 25-akpm/kernel/timer.c Mon Mar 1 15:59:45 2004 @@ -997,7 +997,7 @@ static void process_timeout(unsigned lon * * In all cases the return value is guaranteed to be non-negative. */ -signed long schedule_timeout(signed long timeout) +fastcall signed long schedule_timeout(signed long timeout) { struct timer_list timer; unsigned long expire; diff -puN kernel/workqueue.c~fastcall-warning-fixes kernel/workqueue.c --- 25/kernel/workqueue.c~fastcall-warning-fixes Mon Mar 1 15:59:45 2004 +++ 25-akpm/kernel/workqueue.c Mon Mar 1 15:59:45 2004 @@ -78,7 +78,7 @@ static void __queue_work(struct cpu_work * We queue the work to the CPU it was submitted, but there is no * guarantee that it will be processed by that CPU. */ -int queue_work(struct workqueue_struct *wq, struct work_struct *work) +int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) { int ret = 0, cpu = get_cpu(); @@ -99,7 +99,7 @@ static void delayed_work_timer_fn(unsign __queue_work(wq->cpu_wq + smp_processor_id(), work); } -int queue_delayed_work(struct workqueue_struct *wq, +int fastcall queue_delayed_work(struct workqueue_struct *wq, struct work_struct *work, unsigned long delay) { int ret = 0; @@ -203,7 +203,7 @@ static int worker_thread(void *__cwq) * This function used to run the workqueues itself. Now we just wait for the * helper threads to do it. */ -void flush_workqueue(struct workqueue_struct *wq) +void fastcall flush_workqueue(struct workqueue_struct *wq) { struct cpu_workqueue_struct *cwq; int cpu; @@ -310,12 +310,12 @@ void destroy_workqueue(struct workqueue_ static struct workqueue_struct *keventd_wq; -int schedule_work(struct work_struct *work) +int fastcall schedule_work(struct work_struct *work) { return queue_work(keventd_wq, work); } -int schedule_delayed_work(struct work_struct *work, unsigned long delay) +int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay) { return queue_delayed_work(keventd_wq, work, delay); } diff -puN lib/rwsem.c~fastcall-warning-fixes lib/rwsem.c --- 25/lib/rwsem.c~fastcall-warning-fixes Mon Mar 1 15:59:45 2004 +++ 25-akpm/lib/rwsem.c Mon Mar 1 15:59:45 2004 @@ -162,7 +162,7 @@ static inline struct rw_semaphore *rwsem /* * wait for the read lock to be granted */ -struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem) +struct rw_semaphore fastcall *rwsem_down_read_failed(struct rw_semaphore *sem) { struct rwsem_waiter waiter; @@ -178,7 +178,7 @@ struct rw_semaphore *rwsem_down_read_fai /* * wait for the write lock to be granted */ -struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem) +struct rw_semaphore fastcall *rwsem_down_write_failed(struct rw_semaphore *sem) { struct rwsem_waiter waiter; @@ -195,7 +195,7 @@ struct rw_semaphore *rwsem_down_write_fa * handle waking up a waiter on the semaphore * - up_read has decremented the active part of the count if we come here */ -struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) +struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem) { rwsemtrace(sem,"Entering rwsem_wake"); @@ -217,7 +217,7 @@ struct rw_semaphore *rwsem_wake(struct r * - caller incremented waiting part of count, and discovered it to be still negative * - just wake up any readers at the front of the queue */ -struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) +struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem) { rwsemtrace(sem,"Entering rwsem_downgrade_wake"); diff -puN lib/rwsem-spinlock.c~fastcall-warning-fixes lib/rwsem-spinlock.c --- 25/lib/rwsem-spinlock.c~fastcall-warning-fixes Mon Mar 1 15:59:45 2004 +++ 25-akpm/lib/rwsem-spinlock.c Mon Mar 1 15:59:45 2004 @@ -29,7 +29,7 @@ void rwsemtrace(struct rw_semaphore *sem /* * initialise the semaphore */ -void init_rwsem(struct rw_semaphore *sem) +void fastcall init_rwsem(struct rw_semaphore *sem) { sem->activity = 0; spin_lock_init(&sem->wait_lock); @@ -117,7 +117,7 @@ static inline struct rw_semaphore *__rws /* * get a read lock on the semaphore */ -void __down_read(struct rw_semaphore *sem) +void fastcall __down_read(struct rw_semaphore *sem) { struct rwsem_waiter waiter; struct task_struct *tsk; @@ -162,7 +162,7 @@ void __down_read(struct rw_semaphore *se /* * trylock for reading -- returns 1 if successful, 0 if contention */ -int __down_read_trylock(struct rw_semaphore *sem) +int fastcall __down_read_trylock(struct rw_semaphore *sem) { int ret = 0; rwsemtrace(sem,"Entering __down_read_trylock"); @@ -185,7 +185,7 @@ int __down_read_trylock(struct rw_semaph * get a write lock on the semaphore * - note that we increment the waiting count anyway to indicate an exclusive lock */ -void __down_write(struct rw_semaphore *sem) +void fastcall __down_write(struct rw_semaphore *sem) { struct rwsem_waiter waiter; struct task_struct *tsk; @@ -230,7 +230,7 @@ void __down_write(struct rw_semaphore *s /* * trylock for writing -- returns 1 if successful, 0 if contention */ -int __down_write_trylock(struct rw_semaphore *sem) +int fastcall __down_write_trylock(struct rw_semaphore *sem) { int ret = 0; rwsemtrace(sem,"Entering __down_write_trylock"); @@ -252,7 +252,7 @@ int __down_write_trylock(struct rw_semap /* * release a read lock on the semaphore */ -void __up_read(struct rw_semaphore *sem) +void fastcall __up_read(struct rw_semaphore *sem) { rwsemtrace(sem,"Entering __up_read"); @@ -269,7 +269,7 @@ void __up_read(struct rw_semaphore *sem) /* * release a write lock on the semaphore */ -void __up_write(struct rw_semaphore *sem) +void fastcall __up_write(struct rw_semaphore *sem) { rwsemtrace(sem,"Entering __up_write"); @@ -288,7 +288,7 @@ void __up_write(struct rw_semaphore *sem * downgrade a write lock into a read lock * - just wake up any readers at the front of the queue */ -void __downgrade_write(struct rw_semaphore *sem) +void fastcall __downgrade_write(struct rw_semaphore *sem) { rwsemtrace(sem,"Entering __downgrade_write"); diff -puN mm/filemap.c~fastcall-warning-fixes mm/filemap.c --- 25/mm/filemap.c~fastcall-warning-fixes Mon Mar 1 15:59:45 2004 +++ 25-akpm/mm/filemap.c Mon Mar 1 15:59:45 2004 @@ -292,7 +292,7 @@ static wait_queue_head_t *page_waitqueue return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)]; } -void wait_on_page_bit(struct page *page, int bit_nr) +void fastcall wait_on_page_bit(struct page *page, int bit_nr) { wait_queue_head_t *waitqueue = page_waitqueue(page); DEFINE_WAIT(wait); @@ -324,7 +324,7 @@ EXPORT_SYMBOL(wait_on_page_bit); * the clear_bit and the read of the waitqueue (to avoid SMP races with a * parallel wait_on_page_locked()). */ -void unlock_page(struct page *page) +void fastcall unlock_page(struct page *page) { wait_queue_head_t *waitqueue = page_waitqueue(page); smp_mb__before_clear_bit(); @@ -365,7 +365,7 @@ EXPORT_SYMBOL(end_page_writeback); * chances are that on the second loop, the block layer's plug list is empty, * so sync_page() will then return in state TASK_UNINTERRUPTIBLE. */ -void __lock_page(struct page *page) +void fastcall __lock_page(struct page *page) { wait_queue_head_t *wqh = page_waitqueue(page); DEFINE_WAIT(wait); @@ -953,7 +953,7 @@ asmlinkage ssize_t sys_readahead(int fd, * and schedules an I/O to read in its contents from disk. */ static int FASTCALL(page_cache_read(struct file * file, unsigned long offset)); -static int page_cache_read(struct file * file, unsigned long offset) +static int fastcall page_cache_read(struct file * file, unsigned long offset) { struct address_space *mapping = file->f_mapping; struct page *page; diff -puN mm/highmem.c~fastcall-warning-fixes mm/highmem.c --- 25/mm/highmem.c~fastcall-warning-fixes Mon Mar 1 15:59:45 2004 +++ 25-akpm/mm/highmem.c Mon Mar 1 15:59:45 2004 @@ -147,7 +147,7 @@ start: return vaddr; } -void *kmap_high(struct page *page) +void fastcall *kmap_high(struct page *page) { unsigned long vaddr; @@ -170,7 +170,7 @@ void *kmap_high(struct page *page) EXPORT_SYMBOL(kmap_high); -void kunmap_high(struct page *page) +void fastcall kunmap_high(struct page *page) { unsigned long vaddr; unsigned long nr; diff -puN mm/memory.c~fastcall-warning-fixes mm/memory.c --- 25/mm/memory.c~fastcall-warning-fixes Mon Mar 1 15:59:45 2004 +++ 25-akpm/mm/memory.c Mon Mar 1 15:59:45 2004 @@ -145,7 +145,7 @@ void clear_page_tables(struct mmu_gather } while (--nr); } -pte_t * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address) +pte_t fastcall * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address) { if (!pmd_present(*pmd)) { struct page *new; @@ -171,7 +171,7 @@ out: return pte_offset_map(pmd, address); } -pte_t * pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned long address) +pte_t fastcall * pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned long address) { if (!pmd_present(*pmd)) { pte_t *new; @@ -1646,7 +1646,7 @@ int handle_mm_fault(struct mm_struct *mm * On a two-level page table, this ends up actually being entirely * optimized away. */ -pmd_t *__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) +pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) { pmd_t *new; diff -puN mm/page_alloc.c~fastcall-warning-fixes mm/page_alloc.c --- 25/mm/page_alloc.c~fastcall-warning-fixes Mon Mar 1 15:59:45 2004 +++ 25-akpm/mm/page_alloc.c Mon Mar 1 15:59:45 2004 @@ -443,7 +443,7 @@ void drain_local_pages(void) * Free a 0-order page */ static void FASTCALL(free_hot_cold_page(struct page *page, int cold)); -static void free_hot_cold_page(struct page *page, int cold) +static void fastcall free_hot_cold_page(struct page *page, int cold) { struct zone *zone = page_zone(page); struct per_cpu_pages *pcp; @@ -462,12 +462,12 @@ static void free_hot_cold_page(struct pa put_cpu(); } -void free_hot_page(struct page *page) +void fastcall free_hot_page(struct page *page) { free_hot_cold_page(page, 0); } -void free_cold_page(struct page *page) +void fastcall free_cold_page(struct page *page) { free_hot_cold_page(page, 1); } @@ -532,7 +532,7 @@ static struct page *buffered_rmqueue(str * sized machine, GFP_HIGHMEM and GFP_KERNEL requests basically leave the DMA * zone untouched. */ -struct page * +struct page * fastcall __alloc_pages(unsigned int gfp_mask, unsigned int order, struct zonelist *zonelist) { @@ -685,7 +685,7 @@ EXPORT_SYMBOL(__alloc_pages); /* * Common helper functions. */ -unsigned long __get_free_pages(unsigned int gfp_mask, unsigned int order) +fastcall unsigned long __get_free_pages(unsigned int gfp_mask, unsigned int order) { struct page * page; @@ -697,7 +697,7 @@ unsigned long __get_free_pages(unsigned EXPORT_SYMBOL(__get_free_pages); -unsigned long get_zeroed_page(unsigned int gfp_mask) +fastcall unsigned long get_zeroed_page(unsigned int gfp_mask) { struct page * page; @@ -726,7 +726,7 @@ void __pagevec_free(struct pagevec *pvec free_hot_cold_page(pvec->pages[i], pvec->cold); } -void __free_pages(struct page *page, unsigned int order) +fastcall void __free_pages(struct page *page, unsigned int order) { if (!PageReserved(page) && put_page_testzero(page)) { if (order == 0) @@ -738,7 +738,7 @@ void __free_pages(struct page *page, uns EXPORT_SYMBOL(__free_pages); -void free_pages(unsigned long addr, unsigned int order) +fastcall void free_pages(unsigned long addr, unsigned int order) { if (addr != 0) { BUG_ON(!virt_addr_valid(addr)); diff -puN mm/rmap.c~fastcall-warning-fixes mm/rmap.c --- 25/mm/rmap.c~fastcall-warning-fixes Mon Mar 1 15:59:45 2004 +++ 25-akpm/mm/rmap.c Mon Mar 1 15:59:45 2004 @@ -112,7 +112,7 @@ pte_chain_encode(struct pte_chain *pte_c * If the page has a single-entry pte_chain, collapse that back to a PageDirect * representation. This way, it's only done under memory pressure. */ -int page_referenced(struct page * page) +int fastcall page_referenced(struct page * page) { struct pte_chain *pc; int referenced = 0; @@ -165,7 +165,7 @@ int page_referenced(struct page * page) * Add a new pte reverse mapping to a page. * The caller needs to hold the mm->page_table_lock. */ -struct pte_chain * +struct pte_chain * fastcall page_add_rmap(struct page *page, pte_t *ptep, struct pte_chain *pte_chain) { pte_addr_t pte_paddr = ptep_to_paddr(ptep); @@ -221,7 +221,7 @@ out: * the page. * Caller needs to hold the mm->page_table_lock. */ -void page_remove_rmap(struct page *page, pte_t *ptep) +void fastcall page_remove_rmap(struct page *page, pte_t *ptep) { pte_addr_t pte_paddr = ptep_to_paddr(ptep); struct pte_chain *pc; @@ -293,7 +293,7 @@ out_unlock: * mm->page_table_lock try_to_unmap_one(), trylock */ static int FASTCALL(try_to_unmap_one(struct page *, pte_addr_t)); -static int try_to_unmap_one(struct page * page, pte_addr_t paddr) +static int fastcall try_to_unmap_one(struct page * page, pte_addr_t paddr) { pte_t *ptep = rmap_ptep_map(paddr); unsigned long address = ptep_to_address(ptep); @@ -382,7 +382,7 @@ out_unlock: * SWAP_AGAIN - we missed a trylock, try again later * SWAP_FAIL - the page is unswappable */ -int try_to_unmap(struct page * page) +int fastcall try_to_unmap(struct page * page) { struct pte_chain *pc, *next_pc, *start; int ret = SWAP_SUCCESS; diff -puN mm/slab.c~fastcall-warning-fixes mm/slab.c --- 25/mm/slab.c~fastcall-warning-fixes Mon Mar 1 15:59:45 2004 +++ 25-akpm/mm/slab.c Mon Mar 1 15:59:45 2004 @@ -2134,7 +2134,7 @@ EXPORT_SYMBOL(kmem_cache_alloc); * * Currently only used for dentry validation. */ -int kmem_ptr_validate(kmem_cache_t *cachep, void *ptr) +int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr) { unsigned long addr = (unsigned long) ptr; unsigned long min_addr = PAGE_OFFSET; diff -puN mm/swap.c~fastcall-warning-fixes mm/swap.c --- 25/mm/swap.c~fastcall-warning-fixes Mon Mar 1 15:59:45 2004 +++ 25-akpm/mm/swap.c Mon Mar 1 15:59:45 2004 @@ -76,7 +76,7 @@ int rotate_reclaimable_page(struct page /* * FIXME: speed this up? */ -void activate_page(struct page *page) +void fastcall activate_page(struct page *page) { struct zone *zone = page_zone(page); @@ -97,7 +97,7 @@ void activate_page(struct page *page) * inactive,referenced -> active,unreferenced * active,unreferenced -> active,referenced */ -void mark_page_accessed(struct page *page) +void fastcall mark_page_accessed(struct page *page) { if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) { activate_page(page); @@ -116,7 +116,7 @@ EXPORT_SYMBOL(mark_page_accessed); static DEFINE_PER_CPU(struct pagevec, lru_add_pvecs) = { 0, }; static DEFINE_PER_CPU(struct pagevec, lru_add_active_pvecs) = { 0, }; -void lru_cache_add(struct page *page) +void fastcall lru_cache_add(struct page *page) { struct pagevec *pvec = &get_cpu_var(lru_add_pvecs); @@ -126,7 +126,7 @@ void lru_cache_add(struct page *page) put_cpu_var(lru_add_pvecs); } -void lru_cache_add_active(struct page *page) +void fastcall lru_cache_add_active(struct page *page) { struct pagevec *pvec = &get_cpu_var(lru_add_active_pvecs); @@ -152,7 +152,7 @@ void lru_add_drain(void) * This path almost never happens for VM activity - pages are normally * freed via pagevecs. But it gets used by networking. */ -void __page_cache_release(struct page *page) +void fastcall __page_cache_release(struct page *page) { unsigned long flags; struct zone *zone = page_zone(page); diff -puN net/bluetooth/rfcomm/core.c~fastcall-warning-fixes net/bluetooth/rfcomm/core.c --- 25/net/bluetooth/rfcomm/core.c~fastcall-warning-fixes Mon Mar 1 15:59:45 2004 +++ 25-akpm/net/bluetooth/rfcomm/core.c Mon Mar 1 15:59:45 2004 @@ -409,7 +409,7 @@ int rfcomm_dlc_send(struct rfcomm_dlc *d return len; } -void __rfcomm_dlc_throttle(struct rfcomm_dlc *d) +void fastcall __rfcomm_dlc_throttle(struct rfcomm_dlc *d) { BT_DBG("dlc %p state %ld", d, d->state); @@ -420,7 +420,7 @@ void __rfcomm_dlc_throttle(struct rfcomm rfcomm_schedule(RFCOMM_SCHED_TX); } -void __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d) +void fastcall __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d) { BT_DBG("dlc %p state %ld", d, d->state); _