From: Prasanna S Panchamukhi Minor changes to the kprobes code to provide memory allocation for x86_64 architecture outside kprobes spin lock. Signed-off-by: Prasanna S Panchamukhi Signed-off-by: Andrew Morton --- 25-akpm/arch/i386/kernel/kprobes.c | 6 +++++- 25-akpm/arch/ppc64/kernel/kprobes.c | 10 ++++++++-- 25-akpm/arch/sparc64/kernel/kprobes.c | 6 +++++- 25-akpm/arch/x86_64/kernel/kprobes.c | 16 +++++++++++++--- 25-akpm/include/linux/kprobes.h | 1 + 25-akpm/kernel/kprobes.c | 13 ++++++++----- 6 files changed, 40 insertions(+), 12 deletions(-) diff -puN arch/i386/kernel/kprobes.c~kprobes-x86_64-memory-allocation-changes arch/i386/kernel/kprobes.c --- 25/arch/i386/kernel/kprobes.c~kprobes-x86_64-memory-allocation-changes Wed Jan 19 15:35:39 2005 +++ 25-akpm/arch/i386/kernel/kprobes.c Wed Jan 19 15:35:39 2005 @@ -62,10 +62,14 @@ static inline int is_IF_modifier(kprobe_ int arch_prepare_kprobe(struct kprobe *p) { - memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); return 0; } +void arch_copy_kprobe(struct kprobe *p) +{ + memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); +} + void arch_remove_kprobe(struct kprobe *p) { } diff -puN arch/ppc64/kernel/kprobes.c~kprobes-x86_64-memory-allocation-changes arch/ppc64/kernel/kprobes.c --- 25/arch/ppc64/kernel/kprobes.c~kprobes-x86_64-memory-allocation-changes Wed Jan 19 15:35:39 2005 +++ 25-akpm/arch/ppc64/kernel/kprobes.c Wed Jan 19 15:35:39 2005 @@ -45,13 +45,19 @@ static struct pt_regs jprobe_saved_regs; int arch_prepare_kprobe(struct kprobe *p) { - memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); - if (IS_MTMSRD(p->ainsn.insn[0]) || IS_RFID(p->ainsn.insn[0])) + kprobe_opcode_t insn = *p->addr; + + if (IS_MTMSRD(insn) || IS_RFID(insn)) /* cannot put bp on RFID/MTMSRD */ return 1; return 0; } +void arch_copy_kprobe(struct kprobe *p) +{ + memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); +} + void arch_remove_kprobe(struct kprobe *p) { } diff -puN arch/sparc64/kernel/kprobes.c~kprobes-x86_64-memory-allocation-changes arch/sparc64/kernel/kprobes.c --- 25/arch/sparc64/kernel/kprobes.c~kprobes-x86_64-memory-allocation-changes Wed Jan 19 15:35:39 2005 +++ 25-akpm/arch/sparc64/kernel/kprobes.c Wed Jan 19 15:35:39 2005 @@ -40,9 +40,13 @@ int arch_prepare_kprobe(struct kprobe *p) { + return 0; +} + +void arch_copy_kprobe(struct kprobe *p) +{ p->ainsn.insn[0] = *p->addr; p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2; - return 0; } void arch_remove_kprobe(struct kprobe *p) diff -puN arch/x86_64/kernel/kprobes.c~kprobes-x86_64-memory-allocation-changes arch/x86_64/kernel/kprobes.c --- 25/arch/x86_64/kernel/kprobes.c~kprobes-x86_64-memory-allocation-changes Wed Jan 19 15:35:39 2005 +++ 25-akpm/arch/x86_64/kernel/kprobes.c Wed Jan 19 15:35:39 2005 @@ -39,6 +39,8 @@ #include #include +static DECLARE_MUTEX(kprobe_mutex); + /* kprobe_status settings */ #define KPROBE_HIT_ACTIVE 0x00000001 #define KPROBE_HIT_SS 0x00000002 @@ -75,17 +77,25 @@ static inline int is_IF_modifier(kprobe_ int arch_prepare_kprobe(struct kprobe *p) { /* insn: must be on special executable page on x86_64. */ + up(&kprobe_mutex); p->ainsn.insn = get_insn_slot(); + down(&kprobe_mutex); if (!p->ainsn.insn) { return -ENOMEM; } - memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE); return 0; } +void arch_copy_kprobe(struct kprobe *p) +{ + memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE); +} + void arch_remove_kprobe(struct kprobe *p) { + up(&kprobe_mutex); free_insn_slot(p->ainsn.insn); + down(&kprobe_mutex); } static inline void disarm_kprobe(struct kprobe *p, struct pt_regs *regs) @@ -425,12 +435,12 @@ static kprobe_opcode_t *get_insn_slot(vo } /* All out of space. Need to allocate a new page. Use slot 0.*/ - kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_ATOMIC); + kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL); if (!kip) { return NULL; } kip->insns = (kprobe_opcode_t*) __vmalloc(PAGE_SIZE, - GFP_ATOMIC|__GFP_HIGHMEM, __pgprot(__PAGE_KERNEL_EXEC)); + GFP_KERNEL|__GFP_HIGHMEM, __pgprot(__PAGE_KERNEL_EXEC)); if (!kip->insns) { kfree(kip); return NULL; diff -puN include/linux/kprobes.h~kprobes-x86_64-memory-allocation-changes include/linux/kprobes.h --- 25/include/linux/kprobes.h~kprobes-x86_64-memory-allocation-changes Wed Jan 19 15:35:39 2005 +++ 25-akpm/include/linux/kprobes.h Wed Jan 19 15:35:39 2005 @@ -95,6 +95,7 @@ static inline int kprobe_running(void) } extern int arch_prepare_kprobe(struct kprobe *p); +extern void arch_copy_kprobe(struct kprobe *p); extern void arch_remove_kprobe(struct kprobe *p); extern void show_registers(struct pt_regs *regs); diff -puN kernel/kprobes.c~kprobes-x86_64-memory-allocation-changes kernel/kprobes.c --- 25/kernel/kprobes.c~kprobes-x86_64-memory-allocation-changes Wed Jan 19 15:35:39 2005 +++ 25-akpm/kernel/kprobes.c Wed Jan 19 15:35:39 2005 @@ -76,18 +76,19 @@ struct kprobe *get_kprobe(void *addr) int register_kprobe(struct kprobe *p) { int ret = 0; - unsigned long flags; + unsigned long flags = 0; + if ((ret = arch_prepare_kprobe(p)) != 0) { + goto out; + } spin_lock_irqsave(&kprobe_lock, flags); INIT_HLIST_NODE(&p->hlist); if (get_kprobe(p->addr)) { ret = -EEXIST; goto out; } + arch_copy_kprobe(p); - if ((ret = arch_prepare_kprobe(p)) != 0) { - goto out; - } hlist_add_head(&p->hlist, &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); @@ -97,14 +98,16 @@ int register_kprobe(struct kprobe *p) (unsigned long) p->addr + sizeof(kprobe_opcode_t)); out: spin_unlock_irqrestore(&kprobe_lock, flags); + if (ret == -EEXIST) + arch_remove_kprobe(p); return ret; } void unregister_kprobe(struct kprobe *p) { unsigned long flags; - spin_lock_irqsave(&kprobe_lock, flags); arch_remove_kprobe(p); + spin_lock_irqsave(&kprobe_lock, flags); *p->addr = p->opcode; hlist_del(&p->hlist); flush_icache_range((unsigned long) p->addr, _