aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authordavem <davem>2002-01-11 08:45:32 +0000
committerdavem <davem>2002-01-11 08:45:32 +0000
commit52ecf43dd4fcb56825978b3c389e84116ac1dee8 (patch)
tree459f454d7f7c06e30878f2ca1aea9a010228aaac
parentdb2f40212a362285c3221229ce941c06972b381b (diff)
downloadnetdev-vger-cvs-52ecf43dd4fcb56825978b3c389e84116ac1dee8.tar.gz
Known broken commit of 2.5.2-pre10 + sched-01-2.5.2-pre10-E1
I want to move my debugging over to a machine other than my workstation :-)
-rw-r--r--CREDITS17
-rw-r--r--Documentation/Configure.help10
-rw-r--r--Documentation/usb/auerswald.txt30
-rw-r--r--MAINTAINERS9
-rw-r--r--Makefile2
-rw-r--r--arch/arm/kernel/debug.S (renamed from arch/arm/kernel/debug-armv.S)0
-rw-r--r--arch/arm/kernel/head.S (renamed from arch/arm/kernel/head-armv.S)0
-rw-r--r--arch/arm/kernel/irq-arch.c41
-rw-r--r--arch/arm/mach-arc/small_page.c (renamed from arch/arm/mm/small_page.c)0
-rw-r--r--arch/arm/mach-footbridge/dc21285.c (renamed from arch/arm/kernel/dec21285.c)0
-rw-r--r--arch/arm/mach-footbridge/dma.c (renamed from arch/arm/kernel/dma-footbridge.c)0
-rw-r--r--arch/arm/mach-ftvpci/leds.c (renamed from arch/arm/kernel/leds-ftvpci.c)0
-rw-r--r--arch/arm/mach-ftvpci/pci.c (renamed from arch/arm/kernel/ftv-pci.c)0
-rw-r--r--arch/arm/mach-rpc/dma.c (renamed from arch/arm/kernel/dma-rpc.c)0
-rw-r--r--arch/cris/drivers/usb-host.c10
-rw-r--r--arch/i386/defconfig1
-rw-r--r--arch/i386/kernel/apic.c6
-rw-r--r--arch/i386/kernel/nmi.c2
-rw-r--r--arch/i386/kernel/process.c3
-rw-r--r--arch/i386/kernel/smp.c14
-rw-r--r--arch/i386/kernel/smpboot.c15
-rw-r--r--arch/i386/mm/fault.c6
-rw-r--r--arch/sparc/kernel/process.c8
-rw-r--r--arch/sparc/kernel/sun4d_smp.c10
-rw-r--r--arch/sparc/kernel/sun4m_smp.c8
-rw-r--r--arch/sparc/kernel/trampoline.S4
-rw-r--r--arch/sparc64/defconfig1
-rw-r--r--arch/sparc64/kernel/ioctl32.c22
-rw-r--r--arch/sparc64/kernel/irq.c18
-rw-r--r--arch/sparc64/kernel/process.c12
-rw-r--r--arch/sparc64/kernel/rtrap.S4
-rw-r--r--arch/sparc64/kernel/smp.c23
-rw-r--r--arch/sparc64/kernel/trampoline.S4
-rw-r--r--arch/sparc64/kernel/traps.c13
-rw-r--r--drivers/block/loop.c5
-rw-r--r--drivers/bluetooth/hci_usb.c12
-rw-r--r--drivers/char/joystick/iforce.c10
-rw-r--r--drivers/char/mwave/mwavedd.c1
-rw-r--r--drivers/ide/ataraid.c14
-rw-r--r--drivers/isdn/avmb1/b1pci.c23
-rw-r--r--drivers/isdn/avmb1/c4.c99
-rw-r--r--drivers/isdn/avmb1/t1pci.c81
-rw-r--r--drivers/isdn/hisax/st5481.h2
-rw-r--r--drivers/isdn/hisax/st5481_usb.c30
-rw-r--r--drivers/md/md.c12
-rw-r--r--drivers/net/slip.c6
-rw-r--r--drivers/scsi/scsicam.c28
-rw-r--r--drivers/usb/Config.in4
-rw-r--r--drivers/usb/Makefile11
-rw-r--r--drivers/usb/acm.c6
-rw-r--r--drivers/usb/auerswald.c2156
-rw-r--r--drivers/usb/bluetooth.c14
-rw-r--r--drivers/usb/catc.c14
-rw-r--r--drivers/usb/devio.c38
-rw-r--r--drivers/usb/hcd.c10
-rw-r--r--drivers/usb/hcd/ehci-q.c6
-rw-r--r--drivers/usb/hid-core.c12
-rw-r--r--drivers/usb/hid.h2
-rw-r--r--drivers/usb/kaweth.c24
-rw-r--r--drivers/usb/pegasus.c40
-rw-r--r--drivers/usb/pegasus.h2
-rw-r--r--drivers/usb/scanner.c20
-rw-r--r--drivers/usb/scanner.h2
-rw-r--r--drivers/usb/serial/belkin_sa.c3
-rw-r--r--drivers/usb/serial/cyberjack.c4
-rw-r--r--drivers/usb/serial/digi_acceleport.c12
-rw-r--r--drivers/usb/serial/empeg.c4
-rw-r--r--drivers/usb/serial/ftdi_sio.c3
-rw-r--r--drivers/usb/serial/io_edgeport.c5
-rw-r--r--drivers/usb/serial/io_tables.h4
-rw-r--r--drivers/usb/serial/ir-usb.c3
-rw-r--r--drivers/usb/serial/keyspan.c5
-rw-r--r--drivers/usb/serial/keyspan.h15
-rw-r--r--drivers/usb/serial/keyspan_pda.c17
-rw-r--r--drivers/usb/serial/mct_u232.c3
-rw-r--r--drivers/usb/serial/omninet.c4
-rw-r--r--drivers/usb/serial/pl2303.c3
-rw-r--r--drivers/usb/serial/whiteheat.c5
-rw-r--r--drivers/usb/storage/transport.c14
-rw-r--r--drivers/usb/uhci.c16
-rw-r--r--drivers/usb/usb-ohci.c10
-rw-r--r--drivers/usb/usb-skeleton.c38
-rw-r--r--drivers/usb/usb-uhci.c16
-rw-r--r--drivers/usb/usb.c22
-rw-r--r--drivers/usb/usbkbd.c12
-rw-r--r--fs/binfmt_elf.c2
-rw-r--r--fs/block_dev.c3
-rw-r--r--fs/buffer.c15
-rw-r--r--fs/ext3/super.c25
-rw-r--r--fs/jbd/journal.c43
-rw-r--r--fs/jbd/recovery.c11
-rw-r--r--fs/jbd/revoke.c5
-rw-r--r--fs/jbd/transaction.c4
-rw-r--r--fs/jffs2/background.c2
-rw-r--r--fs/locks.c3
-rw-r--r--fs/nfs/inode.c3
-rw-r--r--fs/nfs/pagelist.c3
-rw-r--r--fs/proc/array.c10
-rw-r--r--fs/proc/proc_misc.c10
-rw-r--r--fs/reiserfs/buffer2.c12
-rw-r--r--fs/reiserfs/journal.c3
-rw-r--r--fs/reiserfs/namei.c4
-rw-r--r--fs/reiserfs/stree.c3
-rw-r--r--fs/ufs/truncate.c5
-rw-r--r--include/asm-i386/bitops.h8
-rw-r--r--include/asm-i386/mmu_context.h22
-rw-r--r--include/asm-i386/pgalloc.h1
-rw-r--r--include/asm-i386/smp.h15
-rw-r--r--include/asm-i386/smplock.h21
-rw-r--r--include/asm-sparc/mmu_context.h23
-rw-r--r--include/asm-sparc/smp.h2
-rw-r--r--include/asm-sparc/smplock.h46
-rw-r--r--include/asm-sparc64/mmu_context.h23
-rw-r--r--include/asm-sparc64/smp.h10
-rw-r--r--include/asm-sparc64/smplock.h37
-rw-r--r--include/linux/fs.h36
-rw-r--r--include/linux/jbd.h7
-rw-r--r--include/linux/kernel_stat.h3
-rw-r--r--include/linux/list.h2
-rw-r--r--include/linux/sched.h181
-rw-r--r--include/linux/smp.h2
-rw-r--r--include/linux/usb.h27
-rw-r--r--include/linux/usbdevice_fs.h10
-rw-r--r--include/net/bluetooth/hci_usb.h2
-rw-r--r--init/do_mounts.c6
-rw-r--r--init/main.c20
-rw-r--r--kernel/capability.c2
-rw-r--r--kernel/exit.c146
-rw-r--r--kernel/fork.c51
-rw-r--r--kernel/ksyms.c7
-rw-r--r--kernel/printk.c1
-rw-r--r--kernel/ptrace.c15
-rw-r--r--kernel/sched.c1527
-rw-r--r--kernel/signal.c9
-rw-r--r--kernel/softirq.c28
-rw-r--r--kernel/sys.c6
-rw-r--r--kernel/timer.c94
-rw-r--r--mm/oom_kill.c8
-rw-r--r--mm/page_alloc.c4
-rw-r--r--net/ipv4/tcp_output.c5
-rw-r--r--net/sched/sch_generic.c6
-rw-r--r--net/socket.c3
-rw-r--r--net/sunrpc/sched.c9
-rw-r--r--net/unix/af_unix.c8
144 files changed, 4162 insertions, 1632 deletions
diff --git a/CREDITS b/CREDITS
index 382a935bf..8a002fc77 100644
--- a/CREDITS
+++ b/CREDITS
@@ -527,6 +527,16 @@ S: 14509 NE 39th Street #1096
S: Bellevue, Washington 98007
S: USA
+N: Christopher L. Cheney
+E: ccheney@debian.org
+E: ccheney@cheney.cx
+W: http://www.cheney.cx
+P: 1024D/8E384AF2 2D31 1927 87D7 1F24 9FF9 1BC5 D106 5AB3 8E38 4AF2
+D: Vista Imaging usb webcam driver
+S: 314 Prince of Wales
+S: Conroe, TX 77304
+S: USA
+
N: Stuart Cheshire
E: cheshire@cs.stanford.edu
D: Author of Starmode Radio IP (STRIP) driver
@@ -1963,9 +1973,10 @@ S: 64289 Darmstadt
S: Germany
N: Mark W. McClelland
-E: mwm@i.am
+E: mmcclell@bigfoot.com
E: mark@alpha.dyndns.org
W: http://alpha.dyndns.org/ov511/
+P: 1024D/357375CC 317C 58AC 1B39 2AB0 AB96 EB38 0B6F 731F 3573 75CC
D: OV511 driver
S: (address available on request)
S: USA
@@ -2124,6 +2135,10 @@ S: 22 Seaview St
S: Fullarton 5063
S: South Australia
+N. Wolfgang Muees
+E: wmues@nexgo.de
+D: Auerswald USB driver
+
N: Ian A. Murdock
E: imurdock@gnu.ai.mit.edu
D: Creator of Debian distribution
diff --git a/Documentation/Configure.help b/Documentation/Configure.help
index d70d37324..3305fac7c 100644
--- a/Documentation/Configure.help
+++ b/Documentation/Configure.help
@@ -13488,6 +13488,16 @@ CONFIG_USB_RIO500
The module will be called rio500.o. If you want to compile it as
a module, say M here and read <file:Documentation/modules.txt>.
+USB Auerswald ISDN device support
+CONFIG_USB_AUERSWALD
+ Say Y here if you want to connect an Auerswald USB ISDN Device
+ to your computer's USB port.
+
+ This code is also available as a module ( = code which can be
+ inserted in and removed from the running kernel whenever you want).
+ The module will be called auerswald.o. If you want to compile it as
+ a module, say M here and read <file:Documentation/modules.txt>.
+
D-Link DSB-R100 FM radio support
CONFIG_USB_DSBR
Say Y here if you want to connect this type of radio to your
diff --git a/Documentation/usb/auerswald.txt b/Documentation/usb/auerswald.txt
new file mode 100644
index 000000000..fb66b3783
--- /dev/null
+++ b/Documentation/usb/auerswald.txt
@@ -0,0 +1,30 @@
+ Auerswald USB kernel driver
+ ===========================
+
+What is it? What can I do with it?
+==================================
+The auerswald USB kernel driver connects your linux 2.4.x
+system to the auerswald usb-enabled devices.
+
+There are two types of auerswald usb devices:
+a) small PBX systems (ISDN)
+b) COMfort system telephones (ISDN)
+
+The driver installation creates the devices
+/dev/usb/auer0..15. These devices carry a vendor-
+specific protocol. You may run all auerswald java
+software on it. The java software needs a native
+library "libAuerUsbJNINative.so" installed on
+your system. This library is available from
+auerswald and shipped as part of the java software.
+
+You may create the devices with:
+ mknod -m 666 /dev/usb/auer0 c 180 80
+ ...
+ mknod -m 666 /dev/usb/auer15 c 180 95
+
+Future plans
+============
+- Connection to ISDN4LINUX (the hisax interface)
+
+The maintainer of this driver is wmues@nexgo.de
diff --git a/MAINTAINERS b/MAINTAINERS
index b2d05b640..936f12205 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1608,7 +1608,7 @@ S: Maintained
USB OV511 DRIVER
P: Mark McClelland
-M: mwm@i.am
+M: mmcclell@bigfoot.com
L: linux-usb-users@lists.sourceforge.net
L: linux-usb-devel@lists.sourceforge.net
W: http://alpha.dyndns.org/ov511/
@@ -1666,6 +1666,13 @@ L: linux-usb-users@lists.sourceforge.net
L: linux-usb-devel@lists.sourceforge.net
S: Supported
+USB AUERSWALD DRIVER
+P: Wolfgang Muees
+M: wmues@nexgo.de
+L: linux-usb-users@lists.sourceforge.net
+L: linux-usb-devel@lists.sourceforge.net
+S: Maintained
+
USB SERIAL EMPEG EMPEG-CAR MARK I/II DRIVER
P: Gary Brubaker
M: xavyer@ix.netcom.com
diff --git a/Makefile b/Makefile
index 8ce3734e4..322ec68e9 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 5
SUBLEVEL = 2
-EXTRAVERSION =-pre9
+EXTRAVERSION =-pre10
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
diff --git a/arch/arm/kernel/debug-armv.S b/arch/arm/kernel/debug.S
index 43953f66e..43953f66e 100644
--- a/arch/arm/kernel/debug-armv.S
+++ b/arch/arm/kernel/debug.S
diff --git a/arch/arm/kernel/head-armv.S b/arch/arm/kernel/head.S
index 36e7f1420..36e7f1420 100644
--- a/arch/arm/kernel/head-armv.S
+++ b/arch/arm/kernel/head.S
diff --git a/arch/arm/kernel/irq-arch.c b/arch/arm/kernel/irq-arch.c
deleted file mode 100644
index 5cd9c998b..000000000
--- a/arch/arm/kernel/irq-arch.c
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * linux/arch/arm/kernel/irq-arch.c
- *
- * Copyright (C) 1995-2000 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * We contain the architecture-specific parts of interrupt handling
- * in this file. In 2.5, it will move into the various arch/arm/mach-*
- * directories.
- */
-#include <linux/ptrace.h>
-#include <linux/kernel_stat.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/random.h>
-#include <linux/smp.h>
-#include <linux/init.h>
-
-#include <asm/hardware.h>
-#include <asm/io.h>
-#include <asm/system.h>
-
-#include <asm/mach/irq.h>
-
-/*
- * Get architecture specific interrupt handlers
- * and interrupt initialisation.
- */
-#include <asm/arch/irq.h>
-
-void __init genarch_init_irq(void)
-{
- irq_init_irq();
-}
-
diff --git a/arch/arm/mm/small_page.c b/arch/arm/mach-arc/small_page.c
index a848bd68f..a848bd68f 100644
--- a/arch/arm/mm/small_page.c
+++ b/arch/arm/mach-arc/small_page.c
diff --git a/arch/arm/kernel/dec21285.c b/arch/arm/mach-footbridge/dc21285.c
index 3d08c2e7f..3d08c2e7f 100644
--- a/arch/arm/kernel/dec21285.c
+++ b/arch/arm/mach-footbridge/dc21285.c
diff --git a/arch/arm/kernel/dma-footbridge.c b/arch/arm/mach-footbridge/dma.c
index 1674a007a..1674a007a 100644
--- a/arch/arm/kernel/dma-footbridge.c
+++ b/arch/arm/mach-footbridge/dma.c
diff --git a/arch/arm/kernel/leds-ftvpci.c b/arch/arm/mach-ftvpci/leds.c
index a1cf22dd8..a1cf22dd8 100644
--- a/arch/arm/kernel/leds-ftvpci.c
+++ b/arch/arm/mach-ftvpci/leds.c
diff --git a/arch/arm/kernel/ftv-pci.c b/arch/arm/mach-ftvpci/pci.c
index 11369bc58..11369bc58 100644
--- a/arch/arm/kernel/ftv-pci.c
+++ b/arch/arm/mach-ftvpci/pci.c
diff --git a/arch/arm/kernel/dma-rpc.c b/arch/arm/mach-rpc/dma.c
index 402b71cdb..402b71cdb 100644
--- a/arch/arm/kernel/dma-rpc.c
+++ b/arch/arm/mach-rpc/dma.c
diff --git a/arch/cris/drivers/usb-host.c b/arch/cris/drivers/usb-host.c
index 233fdbbae..b2a93ce4d 100644
--- a/arch/cris/drivers/usb-host.c
+++ b/arch/cris/drivers/usb-host.c
@@ -1981,7 +1981,7 @@ static int etrax_rh_submit_urb(urb_t *urb)
struct usb_device *usb_dev = urb->dev;
etrax_hc_t *hc = usb_dev->bus->hcpriv;
unsigned int pipe = urb->pipe;
- devrequest *cmd = (devrequest *) urb->setup_packet;
+ struct usb_ctrlrequest *cmd = (struct usb_ctrlrequest *) urb->setup_packet;
void *data = urb->transfer_buffer;
int leni = urb->transfer_buffer_length;
int len = 0;
@@ -2009,10 +2009,10 @@ static int etrax_rh_submit_urb(urb_t *urb)
return 0;
}
- bmRType_bReq = cmd->requesttype | cmd->request << 8;
- wValue = le16_to_cpu(cmd->value);
- wIndex = le16_to_cpu(cmd->index);
- wLength = le16_to_cpu(cmd->length);
+ bmRType_bReq = cmd->bRequestType | cmd->bRequest << 8;
+ wValue = le16_to_cpu(cmd->wValue);
+ wIndex = le16_to_cpu(cmd->wIndex);
+ wLength = le16_to_cpu(cmd->wLength);
dbg_rh("bmRType_bReq : 0x%04X (%d)", bmRType_bReq, bmRType_bReq);
dbg_rh("wValue : 0x%04X (%d)", wValue, wValue);
diff --git a/arch/i386/defconfig b/arch/i386/defconfig
index 88b531035..b95477775 100644
--- a/arch/i386/defconfig
+++ b/arch/i386/defconfig
@@ -825,6 +825,7 @@ CONFIG_USB_STORAGE=y
# USB Miscellaneous drivers
#
# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_AUERSWALD is not set
#
# Kernel hacking
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index dbd211d6a..d65b28f1e 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -792,8 +792,7 @@ void setup_APIC_timer(void * data)
*/
slice = clocks / (smp_num_cpus+1);
- printk("cpu: %d, clocks: %d, slice: %d\n",
- smp_processor_id(), clocks, slice);
+ printk("cpu: %d, clocks: %d, slice: %d\n", smp_processor_id(), clocks, slice);
/*
* Wait for IRQ0's slice:
@@ -816,8 +815,7 @@ void setup_APIC_timer(void * data)
__setup_APIC_LVTT(clocks);
- printk("CPU%d<T0:%d,T1:%d,D:%d,S:%d,C:%d>\n",
- smp_processor_id(), t0, t1, delta, slice, clocks);
+ printk("CPU%d<T0:%d,T1:%d,D:%d,S:%d,C:%d>\n", smp_processor_id(), t0, t1, delta, slice, clocks);
__restore_flags(flags);
}
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index 39087b9d4..a73237983 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -283,7 +283,7 @@ void nmi_watchdog_tick (struct pt_regs * regs)
* to get a message out.
*/
bust_spinlocks(1);
- printk("NMI Watchdog detected LOCKUP on CPU%d, registers:\n", cpu);
+ printk("NMI Watchdog detected LOCKUP on CPU%d, eip %08lx, registers:\n", cpu, regs->eip);
show_registers(regs);
printk("console shuts up ...\n");
console_silent();
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index c212e1da6..e17b07ecd 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -123,9 +123,6 @@ static void poll_idle (void)
void cpu_idle (void)
{
/* endless idle loop with no priority at all */
- init_idle();
- current->nice = 20;
-
while (1) {
void (*idle)(void) = pm_idle;
if (!idle)
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c
index d04a4306c..c0b3a94a1 100644
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -105,7 +105,7 @@
/* The 'big kernel lock' */
spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
-struct tlb_state cpu_tlbstate[NR_CPUS] = {[0 ... NR_CPUS-1] = { &init_mm, 0 }};
+struct tlb_state cpu_tlbstate[NR_CPUS] __cacheline_aligned = {[0 ... NR_CPUS-1] = { &init_mm, 0, }};
/*
* the following functions deal with sending IPIs between CPUs.
@@ -490,13 +490,23 @@ void flush_tlb_all(void)
* it goes straight through and wastes no time serializing
* anything. Worst case is that we lose a reschedule ...
*/
-
void smp_send_reschedule(int cpu)
{
send_IPI_mask(1 << cpu, RESCHEDULE_VECTOR);
}
/*
+ * this function sends a reschedule IPI to all (other) CPUs.
+ * This should only be used if some 'global' task became runnable,
+ * such as a RT task, that must be handled now. The first CPU
+ * that manages to grab the task will run it.
+ */
+void smp_send_reschedule_all(void)
+{
+ send_IPI_allbutself(RESCHEDULE_VECTOR);
+}
+
+/*
* Structure and data for smp_call_function(). This is designed to minimise
* static memory requirements. It also looks cleaner.
*/
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 73727329a..97663b228 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -308,14 +308,14 @@ static void __init synchronize_tsc_bp (void)
if (tsc_values[i] < avg)
realdelta = -realdelta;
- printk("BIOS BUG: CPU#%d improperly initialized, has %ld usecs TSC skew! FIXED.\n",
- i, realdelta);
+ printk("BIOS BUG: CPU#%d improperly initialized, has %ld usecs TSC skew! FIXED.\n", i, realdelta);
}
sum += delta;
}
if (!buggy)
printk("passed.\n");
+ ;
}
static void __init synchronize_tsc_ap (void)
@@ -365,7 +365,7 @@ void __init smp_callin(void)
* (This works even if the APIC is not enabled.)
*/
phys_id = GET_APIC_ID(apic_read(APIC_ID));
- cpuid = current->processor;
+ cpuid = smp_processor_id();
if (test_and_set_bit(cpuid, &cpu_online_map)) {
printk("huh, phys CPU#%d, CPU#%d already present??\n",
phys_id, cpuid);
@@ -471,6 +471,7 @@ int __init start_secondary(void *unused)
*/
local_flush_tlb();
+ init_idle();
return cpu_idle();
}
@@ -803,16 +804,13 @@ static void __init do_boot_cpu (int apicid)
if (!idle)
panic("No idle process for CPU %d", cpu);
- idle->processor = cpu;
- idle->cpus_runnable = 1 << cpu; /* we schedule the first task manually */
+ idle->cpu = cpu;
map_cpu_to_boot_apicid(cpu, apicid);
idle->thread.eip = (unsigned long) start_secondary;
- del_from_runqueue(idle);
unhash_process(idle);
- init_tasks[cpu] = idle;
/* start_eip had better be page-aligned! */
start_eip = setup_trampoline();
@@ -1020,8 +1018,7 @@ void __init smp_boot_cpus(void)
map_cpu_to_boot_apicid(0, boot_cpu_apicid);
global_irq_holder = 0;
- current->processor = 0;
- init_idle();
+ current->cpu = 0;
smp_tune_scheduling();
/*
diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c
index 89011a376..c1f8bca8f 100644
--- a/arch/i386/mm/fault.c
+++ b/arch/i386/mm/fault.c
@@ -88,8 +88,7 @@ bad_area:
out_of_memory:
if (current->pid == 1) {
- current->policy |= SCHED_YIELD;
- schedule();
+ yield();
goto survive;
}
goto bad_area;
@@ -344,8 +343,7 @@ no_context:
out_of_memory:
up_read(&mm->mmap_sem);
if (tsk->pid == 1) {
- tsk->policy |= SCHED_YIELD;
- schedule();
+ yield();
down_read(&mm->mmap_sem);
goto survive;
}
diff --git a/arch/sparc/kernel/process.c b/arch/sparc/kernel/process.c
index d81137570..8d7d1689b 100644
--- a/arch/sparc/kernel/process.c
+++ b/arch/sparc/kernel/process.c
@@ -1,4 +1,4 @@
-/* $Id: process.c,v 1.159 2002-01-08 16:00:14 davem Exp $
+/* $Id: process.c,v 1.160 2002-01-11 08:45:38 davem Exp $
* linux/arch/sparc/kernel/process.c
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -60,9 +60,6 @@ int cpu_idle(void)
goto out;
/* endless idle loop with no priority at all */
- current->nice = 20;
- init_idle();
-
for (;;) {
if (ARCH_SUN4C_SUN4) {
static int count = HZ;
@@ -108,9 +105,6 @@ out:
int cpu_idle(void)
{
/* endless idle loop with no priority at all */
- current->nice = 20;
- init_idle();
-
while(1) {
if(current->need_resched) {
schedule();
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index e8f5fb37b..a63ffb067 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -127,7 +127,7 @@ void __init smp4d_callin(void)
while((unsigned long)current_set[cpuid] < PAGE_OFFSET)
barrier();
- while(current_set[cpuid]->processor != cpuid)
+ while(current_set[cpuid]->cpu != cpuid)
barrier();
/* Fix idle thread fields. */
@@ -197,10 +197,9 @@ void __init smp4d_boot_cpus(void)
mid_xlate[i] = i;
__cpu_number_map[boot_cpu_id] = 0;
__cpu_logical_map[0] = boot_cpu_id;
- current->processor = boot_cpu_id;
+ current->cpu = boot_cpu_id;
smp_store_cpu_info(boot_cpu_id);
smp_setup_percpu_timer();
- init_idle();
local_flush_cache_all();
if(linux_num_cpus == 1)
return; /* Not an MP box. */
@@ -222,14 +221,11 @@ void __init smp4d_boot_cpus(void)
cpucount++;
p = init_task.prev_task;
- init_tasks[i] = p;
- p->processor = i;
- p->cpus_runnable = 1 << i; /* we schedule the first task manually */
+ p->cpu = i;
current_set[i] = p;
- del_from_runqueue(p);
unhash_process(p);
for (no = 0; no < linux_num_cpus; no++)
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index 676b9f261..4cd8f8bb5 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -170,12 +170,11 @@ void __init smp4m_boot_cpus(void)
mid_xlate[boot_cpu_id] = (linux_cpus[boot_cpu_id].mid & ~8);
__cpu_number_map[boot_cpu_id] = 0;
__cpu_logical_map[0] = boot_cpu_id;
- current->processor = boot_cpu_id;
+ current->cpu = boot_cpu_id;
smp_store_cpu_info(boot_cpu_id);
set_irq_udt(mid_xlate[boot_cpu_id]);
smp_setup_percpu_timer();
- init_idle();
local_flush_cache_all();
if(linux_num_cpus == 1)
return; /* Not an MP box. */
@@ -195,14 +194,11 @@ void __init smp4m_boot_cpus(void)
cpucount++;
p = init_task.prev_task;
- init_tasks[i] = p;
- p->processor = i;
- p->cpus_runnable = 1 << i; /* we schedule the first task manually */
+ p->cpu = i;
current_set[i] = p;
- del_from_runqueue(p);
unhash_process(p);
/* See trampoline.S for details... */
diff --git a/arch/sparc/kernel/trampoline.S b/arch/sparc/kernel/trampoline.S
index b74c1912d..262d78bb5 100644
--- a/arch/sparc/kernel/trampoline.S
+++ b/arch/sparc/kernel/trampoline.S
@@ -1,4 +1,4 @@
-/* $Id: trampoline.S,v 1.13 1999-08-04 03:19:15 davem Exp $
+/* $Id: trampoline.S,v 1.14 2002-01-11 08:45:38 davem Exp $
* trampoline.S: SMP cpu boot-up trampoline code.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -88,6 +88,8 @@ cpu3_startup:
.align 4
smp_do_cpu_idle:
+ call C_LABEL(init_idle)
+ nop
call C_LABEL(cpu_idle)
mov 0, %o0
diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig
index 7fa81bfca..3ff8f9448 100644
--- a/arch/sparc64/defconfig
+++ b/arch/sparc64/defconfig
@@ -840,6 +840,7 @@ CONFIG_USB_SERIAL_OMNINET=m
# USB Miscellaneous drivers
#
CONFIG_USB_RIO500=m
+CONFIG_USB_AUERSWALD=m
#
# Bluetooth support
diff --git a/arch/sparc64/kernel/ioctl32.c b/arch/sparc64/kernel/ioctl32.c
index c5d732a1b..c9ef8d5a6 100644
--- a/arch/sparc64/kernel/ioctl32.c
+++ b/arch/sparc64/kernel/ioctl32.c
@@ -1,4 +1,4 @@
-/* $Id: ioctl32.c,v 1.134 2002-01-04 21:07:15 davem Exp $
+/* $Id: ioctl32.c,v 1.135 2002-01-11 08:45:38 davem Exp $
* ioctl32.c: Conversion between 32bit and 64bit native ioctls.
*
* Copyright (C) 1997-2000 Jakub Jelinek (jakub@redhat.com)
@@ -3427,11 +3427,11 @@ static int ioc_settimeout(unsigned int fd, unsigned int cmd, unsigned long arg)
}
struct usbdevfs_ctrltransfer32 {
- __u8 requesttype;
- __u8 request;
- __u16 value;
- __u16 index;
- __u16 length;
+ __u8 bRequestType;
+ __u8 bRequest;
+ __u16 wValue;
+ __u16 wIndex;
+ __u16 wLength;
__u32 timeout; /* in milliseconds */
__u32 data;
};
@@ -3461,14 +3461,14 @@ static int do_usbdevfs_control(unsigned int fd, unsigned int cmd, unsigned long
/* In usbdevice_fs, it limits the control buffer to a page,
* for simplicity so do we.
*/
- if (!uptr || kctrl.length > PAGE_SIZE)
+ if (!uptr || kctrl.wLength > PAGE_SIZE)
return -EINVAL;
kptr = (void *)__get_free_page(GFP_KERNEL);
- if ((kctrl.requesttype & 0x80) == 0) {
+ if ((kctrl.bRequestType & 0x80) == 0) {
err = -EFAULT;
- if (copy_from_user(kptr, uptr, kctrl.length))
+ if (copy_from_user(kptr, uptr, kctrl.wLength))
goto out;
}
@@ -3480,8 +3480,8 @@ static int do_usbdevfs_control(unsigned int fd, unsigned int cmd, unsigned long
set_fs(old_fs);
if (err >= 0 &&
- ((kctrl.requesttype & 0x80) != 0)) {
- if (copy_to_user(uptr, kptr, kctrl.length))
+ ((kctrl.bRequestType & 0x80) != 0)) {
+ if (copy_to_user(uptr, kptr, kctrl.wLength))
err = -EFAULT;
}
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 003fad140..9e8648a10 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -1,4 +1,4 @@
-/* $Id: irq.c,v 1.113 2001-12-11 04:55:51 davem Exp $
+/* $Id: irq.c,v 1.114 2002-01-11 08:45:38 davem Exp $
* irq.c: UltraSparc IRQ handling/init/registry.
*
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
@@ -163,7 +163,7 @@ void enable_irq(unsigned int irq)
tid = ((tid & UPA_CONFIG_MID) << 9);
tid &= IMAP_TID_UPA;
} else {
- tid = (starfire_translate(imap, current->processor) << 26);
+ tid = (starfire_translate(imap, smp_processor_id()) << 26);
tid &= IMAP_TID_UPA;
}
@@ -1253,19 +1253,15 @@ void enable_prom_timer(void)
prom_timers->count0 = 0;
}
+/* Only invoked on boot processor. */
void __init init_IRQ(void)
{
- static int called = 0;
-
- if (called == 0) {
- called = 1;
- map_prom_timers();
- kill_prom_timer();
- memset(&ivector_table[0], 0, sizeof(ivector_table));
+ map_prom_timers();
+ kill_prom_timer();
+ memset(&ivector_table[0], 0, sizeof(ivector_table));
#ifndef CONFIG_SMP
- memset(&__up_workvec[0], 0, sizeof(__up_workvec));
+ memset(&__up_workvec[0], 0, sizeof(__up_workvec));
#endif
- }
/* We need to clear any IRQ's pending in the soft interrupt
* registers, a spurious one could be left around from the
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
index 6355d71c1..6cf8e54b9 100644
--- a/arch/sparc64/kernel/process.c
+++ b/arch/sparc64/kernel/process.c
@@ -1,4 +1,4 @@
-/* $Id: process.c,v 1.127 2002-01-08 16:00:14 davem Exp $
+/* $Id: process.c,v 1.128 2002-01-11 08:45:38 davem Exp $
* arch/sparc64/kernel/process.c
*
* Copyright (C) 1995, 1996 David S. Miller (davem@caip.rutgers.edu)
@@ -53,9 +53,6 @@ int cpu_idle(void)
return -EPERM;
/* endless idle loop with no priority at all */
- current->nice = 20;
- init_idle();
-
for (;;) {
/* If current->need_resched is zero we should really
* setup for a system wakup event and execute a shutdown
@@ -78,13 +75,10 @@ int cpu_idle(void)
/*
* the idle loop on a UltraMultiPenguin...
*/
-#define idle_me_harder() (cpu_data[current->processor].idle_volume += 1)
-#define unidle_me() (cpu_data[current->processor].idle_volume = 0)
+#define idle_me_harder() (cpu_data[smp_processor_id()].idle_volume += 1)
+#define unidle_me() (cpu_data[smp_processor_id()].idle_volume = 0)
int cpu_idle(void)
{
- current->nice = 20;
- init_idle();
-
while(1) {
if (current->need_resched != 0) {
unidle_me();
diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S
index 71d071141..a8735015f 100644
--- a/arch/sparc64/kernel/rtrap.S
+++ b/arch/sparc64/kernel/rtrap.S
@@ -1,4 +1,4 @@
-/* $Id: rtrap.S,v 1.58 2001-12-24 04:33:02 davem Exp $
+/* $Id: rtrap.S,v 1.59 2002-01-11 08:45:38 davem Exp $
* rtrap.S: Preparing for return from trap on Sparc V9.
*
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
@@ -141,7 +141,7 @@ __handle_signal:
.align 64
.globl rtrap_clr_l6, rtrap, irqsz_patchme
rtrap_clr_l6: clr %l6
-rtrap: lduw [%g6 + AOFF_task_processor], %l0
+rtrap: lduw [%g6 + AOFF_task_cpu], %l0
sethi %hi(irq_stat), %l2 ! &softirq_active
or %l2, %lo(irq_stat), %l2 ! &softirq_active
irqsz_patchme: sllx %l0, 0, %l0
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index a8f2a0994..f5a9badf7 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -220,20 +220,8 @@ void __init smp_callin(void)
while (!smp_threads_ready)
membar("#LoadLoad");
-}
-
-extern int cpu_idle(void);
-extern void init_IRQ(void);
-
-void initialize_secondary(void)
-{
-}
-int start_secondary(void *unused)
-{
- trap_init();
- init_IRQ();
- return cpu_idle();
+ init_idle();
}
void cpu_panic(void)
@@ -259,7 +247,6 @@ void __init smp_boot_cpus(void)
printk("Entering UltraSMPenguin Mode...\n");
__sti();
smp_store_cpu_info(boot_cpu_id);
- init_idle();
if (linux_num_cpus == 1)
return;
@@ -278,16 +265,13 @@ void __init smp_boot_cpus(void)
int no;
prom_printf("Starting CPU %d... ", i);
- kernel_thread(start_secondary, NULL, CLONE_PID);
+ kernel_thread(NULL, NULL, CLONE_PID);
cpucount++;
p = init_task.prev_task;
- init_tasks[cpucount] = p;
- p->processor = i;
- p->cpus_runnable = 1UL << i; /* we schedule the first task manually */
+ p->cpu = i;
- del_from_runqueue(p);
unhash_process(p);
callin_flag = 0;
@@ -1139,7 +1123,6 @@ void __init smp_tick_init(void)
__cpu_number_map[boot_cpu_id] = 0;
prom_cpu_nodes[boot_cpu_id] = linux_cpus[0].prom_node;
__cpu_logical_map[0] = boot_cpu_id;
- current->processor = boot_cpu_id;
prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
}
diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S
index f7944851e..58b122929 100644
--- a/arch/sparc64/kernel/trampoline.S
+++ b/arch/sparc64/kernel/trampoline.S
@@ -1,4 +1,4 @@
-/* $Id: trampoline.S,v 1.24 2001-11-16 21:59:20 davem Exp $
+/* $Id: trampoline.S,v 1.25 2002-01-11 08:45:38 davem Exp $
* trampoline.S: Jump start slave processors on sparc64.
*
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
@@ -262,7 +262,7 @@ startup_continue:
wrpr %o1, PSTATE_IG, %pstate
/* Get our UPA MID. */
- lduw [%o2 + AOFF_task_processor], %g1
+ lduw [%o2 + AOFF_task_cpu], %g1
sethi %hi(cpu_data), %g5
or %g5, %lo(cpu_data), %g5
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 6f8c98c1c..7d61a0fba 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -1,4 +1,4 @@
-/* $Id: traps.c,v 1.82 2001-11-18 00:12:56 davem Exp $
+/* $Id: traps.c,v 1.83 2002-01-11 08:45:38 davem Exp $
* arch/sparc64/kernel/traps.c
*
* Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
@@ -1660,13 +1660,16 @@ void do_getpsr(struct pt_regs *regs)
}
}
+/* Only invoked on boot processor. */
void trap_init(void)
{
- /* Attach to the address space of init_task. */
+ /* Attach to the address space of init_task. On SMP we
+ * do this in smp.c:smp_callin for other cpus.
+ */
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
- /* NOTE: Other cpus have this done as they are started
- * up on SMP.
- */
+#ifdef CONFIG_SMP
+ current->cpu = hard_smp_processor_id();
+#endif
}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 2ef6fc465..c69633139 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -517,7 +517,7 @@ static inline void loop_handle_bio(struct loop_device *lo, struct bio *bio)
ret = do_bio_blockbacked(lo, bio, rbh);
- bio_endio(rbh, !ret, bio_sectors(bio));
+ bio_endio(rbh, !ret, bio_sectors(rbh));
loop_put_buffer(bio);
}
}
@@ -543,8 +543,7 @@ static int loop_thread(void *data)
flush_signals(current);
spin_unlock_irq(&current->sigmask_lock);
- current->policy = SCHED_OTHER;
- current->nice = -20;
+ set_user_nice(current, -20);
spin_lock_irq(&lo->lo_lock);
lo->lo_state = Lo_bound;
diff --git a/drivers/bluetooth/hci_usb.c b/drivers/bluetooth/hci_usb.c
index 8e87a8995..4c511001e 100644
--- a/drivers/bluetooth/hci_usb.c
+++ b/drivers/bluetooth/hci_usb.c
@@ -437,18 +437,18 @@ resubmit:
static int hci_usb_ctrl_msg(struct hci_usb *husb, struct sk_buff *skb)
{
struct urb *urb = husb->ctrl_urb;
- devrequest *dr = &husb->dev_req;
+ struct usb_ctrlrequest *dr = &husb->dev_req;
int pipe, status;
DBG("%s len %d", husb->hdev.name, skb->len);
pipe = usb_sndctrlpipe(husb->udev, 0);
- dr->requesttype = HCI_CTRL_REQ;
- dr->request = 0;
- dr->index = 0;
- dr->value = 0;
- dr->length = cpu_to_le16(skb->len);
+ dr->bRequestType = HCI_CTRL_REQ;
+ dr->bRequest = 0;
+ dr->wIndex = 0;
+ dr->wValue = 0;
+ dr->wLength = cpu_to_le16(skb->len);
FILL_CONTROL_URB(urb, husb->udev, pipe, (void*)dr, skb->data, skb->len,
hci_usb_ctrl, skb);
diff --git a/drivers/char/joystick/iforce.c b/drivers/char/joystick/iforce.c
index f428c868a..4998a1a7b 100644
--- a/drivers/char/joystick/iforce.c
+++ b/drivers/char/joystick/iforce.c
@@ -134,7 +134,7 @@ struct iforce {
#ifdef IFORCE_USB
struct usb_device *usbdev; /* USB transfer */
struct urb irq, out, ctrl;
- devrequest dr;
+ struct usb_ctrlrequest dr;
#endif
/* Force Feedback */
wait_queue_head_t wait;
@@ -283,7 +283,7 @@ static int get_id_packet(struct iforce *iforce, char *packet)
#ifdef IFORCE_USB
case IFORCE_USB:
- iforce->dr.request = packet[0];
+ iforce->dr.bRequest = packet[0];
iforce->ctrl.dev = iforce->usbdev;
set_current_state(TASK_INTERRUPTIBLE);
@@ -1027,9 +1027,9 @@ static void *iforce_usb_probe(struct usb_device *dev, unsigned int ifnum,
iforce->bus = IFORCE_USB;
iforce->usbdev = dev;
- iforce->dr.requesttype = USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_INTERFACE;
- iforce->dr.index = 0;
- iforce->dr.length = 16;
+ iforce->dr.bRequestType = USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_INTERFACE;
+ iforce->dr.wIndex = 0;
+ iforce->dr.wLength = 16;
FILL_INT_URB(&iforce->irq, dev, usb_rcvintpipe(dev, epirq->bEndpointAddress),
iforce->data, 16, iforce_usb_irq, iforce, epirq->bInterval);
diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c
index 9ca09941c..e22ec9668 100644
--- a/drivers/char/mwave/mwavedd.c
+++ b/drivers/char/mwave/mwavedd.c
@@ -279,7 +279,6 @@ static int mwave_ioctl(struct inode *inode, struct file *file,
pDrvData->IPCs[ipcnum].bIsHere = FALSE;
pDrvData->IPCs[ipcnum].bIsEnabled = TRUE;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
- current->nice = -20; /* boost to provide priority timing */
#else
current->priority = 0x28; /* boost to provide priority timing */
#endif
diff --git a/drivers/ide/ataraid.c b/drivers/ide/ataraid.c
index 3c10b9fac..0170bbc53 100644
--- a/drivers/ide/ataraid.c
+++ b/drivers/ide/ataraid.c
@@ -121,11 +121,8 @@ struct buffer_head *ataraid_get_bhead(void)
void *ptr = NULL;
while (!ptr) {
ptr=kmalloc(sizeof(struct buffer_head),GFP_NOIO);
- if (!ptr) {
- __set_current_state(TASK_RUNNING);
- current->policy |= SCHED_YIELD;
- schedule();
- }
+ if (!ptr)
+ yield();
}
return ptr;
}
@@ -137,11 +134,8 @@ struct ataraid_bh_private *ataraid_get_private(void)
void *ptr = NULL;
while (!ptr) {
ptr=kmalloc(sizeof(struct ataraid_bh_private),GFP_NOIO);
- if (!ptr) {
- __set_current_state(TASK_RUNNING);
- current->policy |= SCHED_YIELD;
- schedule();
- }
+ if (!ptr)
+ yield();
}
return ptr;
}
diff --git a/drivers/isdn/avmb1/b1pci.c b/drivers/isdn/avmb1/b1pci.c
index 7c378f031..5894c35b1 100644
--- a/drivers/isdn/avmb1/b1pci.c
+++ b/drivers/isdn/avmb1/b1pci.c
@@ -30,7 +30,7 @@ static char *revision = "$Revision: 1.1.4.1.2.1 $";
/* ------------------------------------------------------------- */
-static struct pci_device_id b1pci_pci_tbl[] __initdata = {
+static struct pci_device_id b1pci_pci_tbl[] __devinitdata = {
{ PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_B1, PCI_ANY_ID, PCI_ANY_ID },
{ } /* Terminating entry */
};
@@ -404,7 +404,8 @@ static struct capi_driver b1pciv4_driver = {
static int ncards = 0;
-static int add_card(struct pci_dev *dev)
+static int __devinit b1pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
{
struct capi_driver *driver = &b1pci_driver;
struct capicardparams param;
@@ -456,13 +457,18 @@ static int add_card(struct pci_dev *dev)
return retval;
}
+static struct pci_driver b1pci_pci_driver = {
+ name: "b1pci",
+ id_table: b1pci_pci_tbl,
+ probe: b1pci_probe,
+};
+
static int __init b1pci_init(void)
{
struct capi_driver *driver = &b1pci_driver;
#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
struct capi_driver *driverv4 = &b1pciv4_driver;
#endif
- struct pci_dev *dev = NULL;
char *p;
MOD_INC_USE_COUNT;
@@ -505,10 +511,7 @@ static int __init b1pci_init(void)
}
#endif
- while ((dev = pci_find_device(PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_B1, dev))) {
- if (add_card(dev) == 0)
- ncards++;
- }
+ ncards = pci_register_driver(&b1pci_pci_driver);
if (ncards) {
printk(KERN_INFO "%s: %d B1-PCI card(s) detected\n",
driver->name, ncards);
@@ -516,6 +519,7 @@ static int __init b1pci_init(void)
return 0;
}
printk(KERN_ERR "%s: NO B1-PCI card detected\n", driver->name);
+ pci_unregister_driver(&b1pci_pci_driver);
detach_capi_driver(driver);
#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
detach_capi_driver(driverv4);
@@ -526,9 +530,10 @@ static int __init b1pci_init(void)
static void __exit b1pci_exit(void)
{
- detach_capi_driver(&b1pci_driver);
+ pci_unregister_driver(&b1pci_pci_driver);
+ detach_capi_driver(&b1pci_driver);
#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
- detach_capi_driver(&b1pciv4_driver);
+ detach_capi_driver(&b1pciv4_driver);
#endif
}
diff --git a/drivers/isdn/avmb1/c4.c b/drivers/isdn/avmb1/c4.c
index 6cbe16d18..970eec149 100644
--- a/drivers/isdn/avmb1/c4.c
+++ b/drivers/isdn/avmb1/c4.c
@@ -38,9 +38,9 @@ static char *revision = "$Revision: 1.1.4.1.2.1 $";
static int suppress_pollack;
-static struct pci_device_id c4_pci_tbl[] __initdata = {
- { PCI_VENDOR_ID_DEC,PCI_DEVICE_ID_DEC_21285, PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_C4 },
- { PCI_VENDOR_ID_DEC,PCI_DEVICE_ID_DEC_21285, PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_C2 },
+static struct pci_device_id c4_pci_tbl[] __devinitdata = {
+ { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_C4, 4 },
+ { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_C2, 2 },
{ } /* Terminating entry */
};
@@ -1284,8 +1284,6 @@ static struct capi_driver c4_driver = {
add_card: 0, /* no add_card function */
};
-static int ncards = 0;
-
static int c4_attach_driver (struct capi_driver * driver)
{
char *p;
@@ -1308,46 +1306,49 @@ static int c4_attach_driver (struct capi_driver * driver)
return 0;
}
-static int __init search_cards(struct capi_driver * driver,
- int pci_id, int nr)
+static int __devinit c4_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
{
- struct pci_dev * dev = NULL;
- int retval = 0;
-
- while ((dev = pci_find_subsys(
- PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285,
- PCI_VENDOR_ID_AVM, pci_id, dev))) {
- struct capicardparams param;
-
- if (pci_enable_device(dev) < 0) {
- printk(KERN_ERR "%s: failed to enable AVM-C%d\n",
- driver->name, nr);
- continue;
- }
- pci_set_master(dev);
-
- param.port = pci_resource_start(dev, 1);
- param.irq = dev->irq;
- param.membase = pci_resource_start(dev, 0);
-
- printk(KERN_INFO
- "%s: PCI BIOS reports AVM-C%d at i/o %#x, irq %d, mem %#x\n",
- driver->name, nr, param.port, param.irq, param.membase);
- retval = c4_add_card(driver, &param, dev, nr);
- if (retval != 0) {
- printk(KERN_ERR
- "%s: no AVM-C%d at i/o %#x, irq %d detected, mem %#x\n",
- driver->name, nr, param.port, param.irq, param.membase);
- continue;
- }
- ncards++;
+ int nr = ent->driver_data;
+ struct capi_driver *driver = (nr == 2) ? &c2_driver : &c4_driver;
+ int retval = 0;
+ struct capicardparams param;
+
+ if (pci_enable_device(dev) < 0) {
+ printk(KERN_ERR "%s: failed to enable AVM-C%d\n",
+ driver->name, nr);
+ return -ENODEV;
}
- return retval;
+ pci_set_master(dev);
+
+ param.port = pci_resource_start(dev, 1);
+ param.irq = dev->irq;
+ param.membase = pci_resource_start(dev, 0);
+
+ printk(KERN_INFO
+ "%s: PCI BIOS reports AVM-C%d at i/o %#x, irq %d, mem %#x\n",
+ driver->name, nr, param.port, param.irq, param.membase);
+
+ retval = c4_add_card(driver, &param, dev, nr);
+ if (retval != 0) {
+ printk(KERN_ERR
+ "%s: no AVM-C%d at i/o %#x, irq %d detected, mem %#x\n",
+ driver->name, nr, param.port, param.irq, param.membase);
+ return -ENODEV;
+ }
+ return 0;
}
+static struct pci_driver c4_pci_driver = {
+ name: "c4",
+ id_table: c4_pci_tbl,
+ probe: c4_probe,
+};
+
static int __init c4_init(void)
{
int retval;
+ int ncards;
MOD_INC_USE_COUNT;
@@ -1363,21 +1364,7 @@ static int __init c4_init(void)
return retval;
}
- retval = search_cards(&c4_driver, PCI_DEVICE_ID_AVM_C4, 4);
- if (retval && ncards == 0) {
- detach_capi_driver(&c2_driver);
- detach_capi_driver(&c4_driver);
- MOD_DEC_USE_COUNT;
- return retval;
- }
- retval = search_cards(&c2_driver, PCI_DEVICE_ID_AVM_C2, 2);
- if (retval && ncards == 0) {
- detach_capi_driver(&c2_driver);
- detach_capi_driver(&c4_driver);
- MOD_DEC_USE_COUNT;
- return retval;
- }
-
+ ncards = pci_register_driver(&c4_pci_driver);
if (ncards) {
printk(KERN_INFO "%s: %d C4/C2 card(s) detected\n",
c4_driver.name, ncards);
@@ -1385,6 +1372,7 @@ static int __init c4_init(void)
return 0;
}
printk(KERN_ERR "%s: NO C4/C2 card detected\n", c4_driver.name);
+ pci_unregister_driver(&c4_pci_driver);
detach_capi_driver(&c4_driver);
detach_capi_driver(&c2_driver);
MOD_DEC_USE_COUNT;
@@ -1393,8 +1381,9 @@ static int __init c4_init(void)
static void __exit c4_exit(void)
{
- detach_capi_driver(&c2_driver);
- detach_capi_driver(&c4_driver);
+ pci_unregister_driver(&c4_pci_driver);
+ detach_capi_driver(&c2_driver);
+ detach_capi_driver(&c4_driver);
}
module_init(c4_init);
diff --git a/drivers/isdn/avmb1/t1pci.c b/drivers/isdn/avmb1/t1pci.c
index 4aa72aa32..e07ffe0d3 100644
--- a/drivers/isdn/avmb1/t1pci.c
+++ b/drivers/isdn/avmb1/t1pci.c
@@ -33,7 +33,7 @@ static char *revision = "$Revision: 1.1.4.1.2.1 $";
/* ------------------------------------------------------------- */
-static struct pci_device_id t1pci_pci_tbl[] __initdata = {
+static struct pci_device_id t1pci_pci_tbl[] __devinitdata = {
{ PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_T1, PCI_ANY_ID, PCI_ANY_ID },
{ } /* Terminating entry */
};
@@ -225,27 +225,63 @@ static struct capi_driver t1pci_driver = {
add_card: 0, /* no add_card function */
};
-static int ncards = 0;
+/* ------------------------------------------------------------- */
+
+static int __devinit t1pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ struct capi_driver *driver = &t1pci_driver;
+ struct capicardparams param;
+ int retval;
+
+ if (pci_enable_device(dev) < 0) {
+ printk(KERN_ERR "%s: failed to enable AVM-T1-PCI\n",
+ driver->name);
+ return -ENODEV;
+ }
+ pci_set_master(dev);
+
+ param.port = pci_resource_start(dev, 1);
+ param.irq = dev->irq;
+ param.membase = pci_resource_start(dev, 0);
+
+ printk(KERN_INFO
+ "%s: PCI BIOS reports AVM-T1-PCI at i/o %#x, irq %d, mem %#x\n",
+ driver->name, param.port, param.irq, param.membase);
+
+ retval = t1pci_add_card(driver, &param, dev);
+ if (retval != 0) {
+ printk(KERN_ERR
+ "%s: no AVM-T1-PCI at i/o %#x, irq %d detected, mem %#x\n",
+ driver->name, param.port, param.irq, param.membase);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static struct pci_driver t1pci_pci_driver = {
+ name: "t1pci",
+ id_table: t1pci_pci_tbl,
+ probe: t1pci_probe,
+};
static int __init t1pci_init(void)
{
struct capi_driver *driver = &t1pci_driver;
- struct pci_dev *dev = NULL;
char *p;
- int retval;
+ int ncards;
MOD_INC_USE_COUNT;
if ((p = strchr(revision, ':')) != 0 && p[1]) {
- strncpy(driver->revision, p + 2, sizeof(driver->revision));
- driver->revision[sizeof(driver->revision)-1] = 0;
+ strncpy(driver->revision, p + 2, sizeof(driver->revision) - 1);
if ((p = strchr(driver->revision, '$')) != 0 && p > driver->revision)
*(p-1) = 0;
}
printk(KERN_INFO "%s: revision %s\n", driver->name, driver->revision);
- di = attach_capi_driver(driver);
+ di = attach_capi_driver(&t1pci_driver);
if (!di) {
printk(KERN_ERR "%s: failed to attach capi_driver\n",
driver->name);
@@ -253,32 +289,7 @@ static int __init t1pci_init(void)
return -EIO;
}
- while ((dev = pci_find_device(PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_T1, dev))) {
- struct capicardparams param;
-
- if (pci_enable_device(dev) < 0) {
- printk(KERN_ERR "%s: failed to enable AVM-T1-PCI\n",
- driver->name);
- continue;
- }
- pci_set_master(dev);
-
- param.port = pci_resource_start(dev, 1);
- param.irq = dev->irq;
- param.membase = pci_resource_start(dev, 0);
-
- printk(KERN_INFO
- "%s: PCI BIOS reports AVM-T1-PCI at i/o %#x, irq %d, mem %#x\n",
- driver->name, param.port, param.irq, param.membase);
- retval = t1pci_add_card(driver, &param, dev);
- if (retval != 0) {
- printk(KERN_ERR
- "%s: no AVM-T1-PCI at i/o %#x, irq %d detected, mem %#x\n",
- driver->name, param.port, param.irq, param.membase);
- continue;
- }
- ncards++;
- }
+ ncards = pci_register_driver(&t1pci_pci_driver);
if (ncards) {
printk(KERN_INFO "%s: %d T1-PCI card(s) detected\n",
driver->name, ncards);
@@ -286,6 +297,7 @@ static int __init t1pci_init(void)
return 0;
}
printk(KERN_ERR "%s: NO T1-PCI card detected\n", driver->name);
+ pci_unregister_driver(&t1pci_pci_driver);
detach_capi_driver(&t1pci_driver);
MOD_DEC_USE_COUNT;
return -ENODEV;
@@ -293,7 +305,8 @@ static int __init t1pci_init(void)
static void __exit t1pci_exit(void)
{
- detach_capi_driver(&t1pci_driver);
+ pci_unregister_driver(&t1pci_pci_driver);
+ detach_capi_driver(&t1pci_driver);
}
module_init(t1pci_init);
diff --git a/drivers/isdn/hisax/st5481.h b/drivers/isdn/hisax/st5481.h
index 48498c6f8..2881487aa 100644
--- a/drivers/isdn/hisax/st5481.h
+++ b/drivers/isdn/hisax/st5481.h
@@ -309,7 +309,7 @@ static inline int fifo_remove(struct fifo *fifo)
typedef void (*ctrl_complete_t)(void *);
typedef struct ctrl_msg {
- devrequest dr;
+ struct usb_ctrlrequest dr;
ctrl_complete_t complete;
void *context;
} ctrl_msg;
diff --git a/drivers/isdn/hisax/st5481_usb.c b/drivers/isdn/hisax/st5481_usb.c
index 902b6be6b..0aa5edf99 100644
--- a/drivers/isdn/hisax/st5481_usb.c
+++ b/drivers/isdn/hisax/st5481_usb.c
@@ -41,9 +41,9 @@ static void usb_next_ctrl_msg(struct urb *urb,
(unsigned char *)&ctrl->msg_fifo.data[r_index];
DBG(1,"request=0x%02x,value=0x%04x,index=%x",
- ((struct ctrl_msg *)urb->setup_packet)->dr.request,
- ((struct ctrl_msg *)urb->setup_packet)->dr.value,
- ((struct ctrl_msg *)urb->setup_packet)->dr.index);
+ ((struct ctrl_msg *)urb->setup_packet)->dr.bRequest,
+ ((struct ctrl_msg *)urb->setup_packet)->dr.wValue,
+ ((struct ctrl_msg *)urb->setup_packet)->dr.wIndex);
// Prepare the URB
urb->dev = adapter->usb_dev;
@@ -69,11 +69,11 @@ void usb_ctrl_msg(struct st5481_adapter *adapter,
}
ctrl_msg = &ctrl->msg_fifo.data[w_index];
- ctrl_msg->dr.requesttype = requesttype;
- ctrl_msg->dr.request = request;
- ctrl_msg->dr.value = cpu_to_le16p(&value);
- ctrl_msg->dr.index = cpu_to_le16p(&index);
- ctrl_msg->dr.length = 0;
+ ctrl_msg->dr.bRequestType = requesttype;
+ ctrl_msg->dr.bRequest = request;
+ ctrl_msg->dr.wValue = cpu_to_le16p(&value);
+ ctrl_msg->dr.wIndex = cpu_to_le16p(&index);
+ ctrl_msg->dr.wLength = 0;
ctrl_msg->complete = complete;
ctrl_msg->context = context;
@@ -140,17 +140,17 @@ static void usb_ctrl_complete(struct urb *urb)
ctrl_msg = (struct ctrl_msg *)urb->setup_packet;
- if (ctrl_msg->dr.request == USB_REQ_CLEAR_FEATURE) {
+ if (ctrl_msg->dr.bRequest == USB_REQ_CLEAR_FEATURE) {
/* Special case handling for pipe reset */
- le16_to_cpus(&ctrl_msg->dr.index);
+ le16_to_cpus(&ctrl_msg->dr.wIndex);
usb_endpoint_running(adapter->usb_dev,
- ctrl_msg->dr.index & ~USB_DIR_IN,
- (ctrl_msg->dr.index & USB_DIR_IN) == 0);
+ ctrl_msg->dr.wIndex & ~USB_DIR_IN,
+ (ctrl_msg->dr.wIndex & USB_DIR_IN) == 0);
/* toggle is reset on clear */
usb_settoggle(adapter->usb_dev,
- ctrl_msg->dr.index & ~USB_DIR_IN,
- (ctrl_msg->dr.index & USB_DIR_IN) == 0,
+ ctrl_msg->dr.wIndex & ~USB_DIR_IN,
+ (ctrl_msg->dr.wIndex & USB_DIR_IN) == 0,
0);
@@ -560,7 +560,7 @@ void st5481_release_in(struct st5481_in *in)
*/
int st5481_isoc_flatten(struct urb *urb)
{
- piso_packet_descriptor_t pipd,pend;
+ iso_packet_descriptor_t *pipd,*pend;
unsigned char *src,*dst;
unsigned int len;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 723000f28..7eeb06283 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2941,8 +2941,6 @@ int md_thread(void * arg)
* bdflush, otherwise bdflush will deadlock if there are too
* many dirty RAID5 blocks.
*/
- current->policy = SCHED_OTHER;
- current->nice = -20;
unlock_kernel();
complete(thread->event);
@@ -3390,11 +3388,6 @@ recheck:
"(but not more than %d KB/sec) for reconstruction.\n",
sysctl_speed_limit_max);
- /*
- * Resync has low priority.
- */
- current->nice = 19;
-
is_mddev_idle(mddev); /* this also initializes IO event counters */
for (m = 0; m < SYNC_MARKS; m++) {
mark[m] = jiffies;
@@ -3471,16 +3464,13 @@ recheck:
currspeed = (j-mddev->resync_mark_cnt)/2/((jiffies-mddev->resync_mark)/HZ +1) +1;
if (currspeed > sysctl_speed_limit_min) {
- current->nice = 19;
-
if ((currspeed > sysctl_speed_limit_max) ||
!is_mddev_idle(mddev)) {
current->state = TASK_INTERRUPTIBLE;
schedule_timeout(HZ/4);
goto repeat;
}
- } else
- current->nice = -20;
+ }
}
printk(KERN_INFO "md: md%d: sync done.\n",mdidx(mddev));
err = 0;
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index d59f79cb8..7b6186fc8 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -1393,10 +1393,8 @@ cleanup_module(void)
/* First of all: check for active disciplines and hangup them.
*/
do {
- if (busy) {
- current->time_slice = 0;
- schedule();
- }
+ if (busy)
+ yield();
busy = 0;
local_bh_disable();
diff --git a/drivers/scsi/scsicam.c b/drivers/scsi/scsicam.c
index 367d9716a..e8f8c90bf 100644
--- a/drivers/scsi/scsicam.c
+++ b/drivers/scsi/scsicam.c
@@ -28,19 +28,33 @@ static int setsize(unsigned long capacity, unsigned int *cyls, unsigned int *hds
unsigned char *scsi_bios_ptable(kdev_t dev)
{
+ struct block_device *bdev;
unsigned char *res = kmalloc(66, GFP_KERNEL);
kdev_t rdev = mk_kdev(major(dev), minor(dev) & ~0x0f);
if (res) {
- struct buffer_head *bh = bread(rdev, 0, block_size(rdev));
- if (bh) {
- memcpy(res, bh->b_data + 0x1be, 66);
- } else {
- kfree(res);
- res = NULL;
- }
+ struct buffer_head *bh;
+ int err;
+
+ bdev = bdget(kdev_t_to_nr(rdev));
+ if (!bdev)
+ goto fail;
+ err = blkdev_get(bdev, FMODE_READ, 0, BDEV_FILE);
+ if (err)
+ goto fail;
+ bh = __bread(bdev, 0, block_size(rdev));
+ if (!bh)
+ goto fail2;
+ memcpy(res, bh->b_data + 0x1be, 66);
+ brelse(bh);
+ blkdev_put(bdev, BDEV_FILE);
}
return res;
+fail2:
+ blkdev_put(bdev, BDEV_FILE);
+fail:
+ kfree(res);
+ return NULL;
}
/*
diff --git a/drivers/usb/Config.in b/drivers/usb/Config.in
index 1be04d52c..950c0a3e2 100644
--- a/drivers/usb/Config.in
+++ b/drivers/usb/Config.in
@@ -33,6 +33,9 @@ dep_tristate ' OHCI (Compaq, iMacs, OPTi, SiS, ALi, ...) support' CONFIG_USB_OH
comment 'USB Device Class drivers'
dep_tristate ' USB Audio support' CONFIG_USB_AUDIO $CONFIG_USB $CONFIG_SOUND
dep_tristate ' USB Bluetooth support (EXPERIMENTAL)' CONFIG_USB_BLUETOOTH $CONFIG_USB $CONFIG_EXPERIMENTAL
+if [ "$CONFIG_SCSI" = "n" ]; then
+ comment ' SCSI support is needed for USB Storage'
+fi
dep_tristate ' USB Mass Storage support' CONFIG_USB_STORAGE $CONFIG_USB $CONFIG_SCSI
dep_mbool ' USB Mass Storage verbose debug' CONFIG_USB_STORAGE_DEBUG $CONFIG_USB_STORAGE
dep_mbool ' Datafab MDCFE-B Compact Flash Reader support' CONFIG_USB_STORAGE_DATAFAB $CONFIG_USB_STORAGE $CONFIG_EXPERIMENTAL
@@ -96,5 +99,6 @@ source drivers/usb/serial/Config.in
comment 'USB Miscellaneous drivers'
dep_tristate ' USB Diamond Rio500 support (EXPERIMENTAL)' CONFIG_USB_RIO500 $CONFIG_USB $CONFIG_EXPERIMENTAL
+dep_tristate ' USB Auerswald ISDN support (EXPERIMENTAL)' CONFIG_USB_AUERSWALD $CONFIG_USB $CONFIG_EXPERIMENTAL
endmenu
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index b09211ea1..54883b59b 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -40,6 +40,12 @@ obj- :=
# Each configuration option enables a list of files.
obj-$(CONFIG_USB) += usbcore.o
+
+# EHCI needs to be linked before the other HCD drivers
+ifeq ($(CONFIG_USB_EHCI_HCD),y)
+ obj-y += hcd/ehci-hcd.o
+endif
+
obj-$(CONFIG_USB_UHCI) += usb-uhci.o
obj-$(CONFIG_USB_UHCI_ALT) += uhci.o
obj-$(CONFIG_USB_OHCI) += usb-ohci.o
@@ -73,6 +79,7 @@ obj-$(CONFIG_USB_MICROTEK) += microtek.o
obj-$(CONFIG_USB_HPUSBSCSI) += hpusbscsi.o
obj-$(CONFIG_USB_BLUETOOTH) += bluetooth.o
obj-$(CONFIG_USB_USBNET) += usbnet.o
+obj-$(CONFIG_USB_AUERSWALD) += auerswald.o
# Object files in subdirectories
mod-subdirs := serial hcd
@@ -81,10 +88,6 @@ subdir-$(CONFIG_USB_EHCI_HCD) += hcd
subdir-$(CONFIG_USB_SERIAL) += serial
subdir-$(CONFIG_USB_STORAGE) += storage
-ifeq ($(CONFIG_USB_EHCI_HCD),y)
- obj-y += hcd/ehci-hcd.o
-endif
-
ifeq ($(CONFIG_USB_SERIAL),y)
obj-y += serial/usb-serial.o
endif
diff --git a/drivers/usb/acm.c b/drivers/usb/acm.c
index 9eee6a5f9..e8834c190 100644
--- a/drivers/usb/acm.c
+++ b/drivers/usb/acm.c
@@ -184,7 +184,7 @@ static int acm_ctrl_msg(struct acm *acm, int request, int value, void *buf, int
static void acm_ctrl_irq(struct urb *urb)
{
struct acm *acm = urb->context;
- devrequest *dr = urb->transfer_buffer;
+ struct usb_ctrlrequest *dr = urb->transfer_buffer;
unsigned char *data = (unsigned char *)(dr + 1);
int newctrl;
@@ -195,7 +195,7 @@ static void acm_ctrl_irq(struct urb *urb)
return;
}
- switch (dr->request) {
+ switch (dr->bRequest) {
case ACM_IRQ_NETWORK:
@@ -223,7 +223,7 @@ static void acm_ctrl_irq(struct urb *urb)
default:
dbg("unknown control event received: request %d index %d len %d data0 %d data1 %d",
- dr->request, dr->index, dr->length, data[0], data[1]);
+ dr->bRequest, dr->wIndex, dr->wLength, data[0], data[1]);
return;
}
}
diff --git a/drivers/usb/auerswald.c b/drivers/usb/auerswald.c
new file mode 100644
index 000000000..417d5f083
--- /dev/null
+++ b/drivers/usb/auerswald.c
@@ -0,0 +1,2156 @@
+/*****************************************************************************/
+/*
+ * auerswald.c -- Auerswald PBX/System Telephone usb driver.
+ *
+ * Copyright (C) 2001 Wolfgang Mües (wmues@nexgo.de)
+ *
+ * Very much code of this driver is borrowed from dabusb.c (Deti Fliegl)
+ * and from the USB Skeleton driver (Greg Kroah-Hartman). Thank you.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+ /*****************************************************************************/
+
+/* Standard Linux module include files */
+#include <asm/uaccess.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/devfs_fs_kernel.h>
+#undef DEBUG /* include debug macros until it's done */
+#include <linux/usb.h>
+
+/*-------------------------------------------------------------------*/
+/* Debug support */
+#ifdef DEBUG
+#define dump( adr, len) \
+do { \
+ unsigned int u; \
+ printk (KERN_DEBUG); \
+ for (u = 0; u < len; u++) \
+ printk (" %02X", adr[u] & 0xFF); \
+ printk ("\n"); \
+} while (0)
+#else
+#define dump( adr, len)
+#endif
+
+/*-------------------------------------------------------------------*/
+/* Version Information */
+#define DRIVER_VERSION "0.9.9"
+#define DRIVER_AUTHOR "Wolfgang Mües <wmues@nexgo.de>"
+#define DRIVER_DESC "Auerswald PBX/System Telephone usb driver"
+
+/*-------------------------------------------------------------------*/
+/* Private declarations for Auerswald USB driver */
+
+/* Auerswald Vendor ID */
+#define ID_AUERSWALD 0x09BF
+
+#ifndef AUER_MINOR_BASE /* allow external override */
+#define AUER_MINOR_BASE 80 /* auerswald driver minor number */
+#endif
+
+/* we can have up to this number of device plugged in at once */
+#define AUER_MAX_DEVICES 16
+
+/* prefix for the device descriptors in /dev/usb */
+#define AU_PREFIX "auer"
+
+/* Number of read buffers for each device */
+#define AU_RBUFFERS 10
+
+/* Number of chain elements for each control chain */
+#define AUCH_ELEMENTS 20
+
+/* Number of retries in communication */
+#define AU_RETRIES 10
+
+/*-------------------------------------------------------------------*/
+/* vendor specific protocol */
+/* Header Byte */
+#define AUH_INDIRMASK 0x80 /* mask for direct/indirect bit */
+#define AUH_DIRECT 0x00 /* data is for USB device */
+#define AUH_INDIRECT 0x80 /* USB device is relay */
+
+#define AUH_SPLITMASK 0x40 /* mask for split bit */
+#define AUH_UNSPLIT 0x00 /* data block is full-size */
+#define AUH_SPLIT 0x40 /* data block is part of a larger one,
+ split-byte follows */
+
+#define AUH_TYPEMASK 0x3F /* mask for type of data transfer */
+#define AUH_TYPESIZE 0x40 /* different types */
+#define AUH_DCHANNEL 0x00 /* D channel data */
+#define AUH_B1CHANNEL 0x01 /* B1 channel transparent */
+#define AUH_B2CHANNEL 0x02 /* B2 channel transparent */
+/* 0x03..0x0F reserved for driver internal use */
+#define AUH_COMMAND 0x10 /* Command channel */
+#define AUH_BPROT 0x11 /* Configuration block protocol */
+#define AUH_DPROTANA 0x12 /* D channel protocol analyzer */
+#define AUH_TAPI 0x13 /* telephone api data (ATD) */
+/* 0x14..0x3F reserved for other protocols */
+#define AUH_UNASSIGNED 0xFF /* if char device has no assigned service */
+#define AUH_FIRSTUSERCH 0x11 /* first channel which is available for driver users */
+
+#define AUH_SIZE 1 /* Size of Header Byte */
+
+/* Split Byte. Only present if split bit in header byte set.*/
+#define AUS_STARTMASK 0x80 /* mask for first block of splitted frame */
+#define AUS_FIRST 0x80 /* first block */
+#define AUS_FOLLOW 0x00 /* following block */
+
+#define AUS_ENDMASK 0x40 /* mask for last block of splitted frame */
+#define AUS_END 0x40 /* last block */
+#define AUS_NOEND 0x00 /* not the last block */
+
+#define AUS_LENMASK 0x3F /* mask for block length information */
+
+/* Request types */
+#define AUT_RREQ (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER) /* Read Request */
+#define AUT_WREQ (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER) /* Write Request */
+
+/* Vendor Requests */
+#define AUV_GETINFO 0x00 /* GetDeviceInfo */
+#define AUV_WBLOCK 0x01 /* Write Block */
+#define AUV_RBLOCK 0x02 /* Read Block */
+#define AUV_CHANNELCTL 0x03 /* Channel Control */
+#define AUV_DUMMY 0x04 /* Dummy Out for retry */
+
+/* Device Info Types */
+#define AUDI_NUMBCH 0x0000 /* Number of supported B channels */
+#define AUDI_OUTFSIZE 0x0001 /* Size of OUT B channel fifos */
+#define AUDI_MBCTRANS 0x0002 /* max. Blocklength of control transfer */
+
+/* Interrupt endpoint definitions */
+#define AU_IRQENDP 1 /* Endpoint number */
+#define AU_IRQCMDID 16 /* Command-block ID */
+#define AU_BLOCKRDY 0 /* Command: Block data ready on ctl endpoint */
+#define AU_IRQMINSIZE 5 /* Nr. of bytes decoded in this driver */
+
+/* Device String Descriptors */
+#define AUSI_VENDOR 1 /* "Auerswald GmbH & Co. KG" */
+#define AUSI_DEVICE 2 /* Name of the Device */
+#define AUSI_SERIALNR 3 /* Serial Number */
+#define AUSI_MSN 4 /* "MSN ..." (first) Multiple Subscriber Number */
+
+#define AUSI_DLEN 100 /* Max. Length of Device Description */
+
+#define AUV_RETRY 0x101 /* First Firmware version which can do control retries */
+
+/*-------------------------------------------------------------------*/
+/* External data structures / Interface */
+typedef struct
+{
+ char *buf; /* return buffer for string contents */
+ unsigned int bsize; /* size of return buffer */
+} audevinfo_t,*paudevinfo_t;
+
+/* IO controls */
+#define IOCTL_AU_SLEN _IOR( 'U', 0xF0, int) /* return the max. string descriptor length */
+#define IOCTL_AU_DEVINFO _IOWR('U', 0xF1, audevinfo_t) /* get name of a specific device */
+#define IOCTL_AU_SERVREQ _IOW( 'U', 0xF2, int) /* request a service channel */
+#define IOCTL_AU_BUFLEN _IOR( 'U', 0xF3, int) /* return the max. buffer length for the device */
+#define IOCTL_AU_RXAVAIL _IOR( 'U', 0xF4, int) /* return != 0 if Receive Data available */
+#define IOCTL_AU_CONNECT _IOR( 'U', 0xF5, int) /* return != 0 if connected to a service channel */
+#define IOCTL_AU_TXREADY _IOR( 'U', 0xF6, int) /* return != 0 if Transmitt channel ready to send */
+/* 'U' 0xF7..0xFF reseved */
+
+/*-------------------------------------------------------------------*/
+/* Internal data structures */
+
+/* ..................................................................*/
+/* urb chain element */
+struct auerchain; /* forward for circular reference */
+typedef struct
+{
+ struct auerchain *chain; /* pointer to the chain to which this element belongs */
+ urb_t * urbp; /* pointer to attached urb */
+ void *context; /* saved URB context */
+ usb_complete_t complete; /* saved URB completion function */
+ struct list_head list; /* to include element into a list */
+} auerchainelement_t,*pauerchainelement_t;
+
+/* urb chain */
+typedef struct auerchain
+{
+ pauerchainelement_t active; /* element which is submitted to urb */
+ spinlock_t lock; /* protection agains interrupts */
+ struct list_head waiting_list; /* list of waiting elements */
+ struct list_head free_list; /* list of available elements */
+} auerchain_t,*pauerchain_t;
+
+/* ...................................................................*/
+/* buffer element */
+struct auerbufctl; /* forward */
+typedef struct
+{
+ char *bufp; /* reference to allocated data buffer */
+ unsigned int len; /* number of characters in data buffer */
+ unsigned int retries; /* for urb retries */
+ struct usb_ctrlrequest *dr; /* for setup data in control messages */
+ urb_t * urbp; /* USB urb */
+ struct auerbufctl *list; /* pointer to list */
+ struct list_head buff_list; /* reference to next buffer in list */
+} auerbuf_t,*pauerbuf_t;
+
+/* buffer list control block */
+typedef struct auerbufctl
+{
+ spinlock_t lock; /* protection in interrupt */
+ struct list_head free_buff_list;/* free buffers */
+ struct list_head rec_buff_list; /* buffers with receive data */
+} auerbufctl_t,*pauerbufctl_t;
+
+/* ...................................................................*/
+/* service context */
+struct auerscon; /* forward */
+typedef void (*auer_dispatch_t)(struct auerscon*, pauerbuf_t);
+typedef void (*auer_disconn_t) (struct auerscon*);
+typedef struct auerscon
+{
+ unsigned int id; /* protocol service id AUH_xxxx */
+ auer_dispatch_t dispatch; /* dispatch read buffer */
+ auer_disconn_t disconnect; /* disconnect from device, wake up all char readers */
+} auerscon_t,*pauerscon_t;
+
+/* ...................................................................*/
+/* USB device context */
+typedef struct
+{
+ struct semaphore mutex; /* protection in user context */
+ char name[16]; /* name of the /dev/usb entry */
+ unsigned int dtindex; /* index in the device table */
+ devfs_handle_t devfs; /* devfs device node */
+ struct usb_device * usbdev; /* USB device handle */
+ int open_count; /* count the number of open character channels */
+ char dev_desc[AUSI_DLEN];/* for storing a textual description */
+ unsigned int maxControlLength; /* max. Length of control paket (without header) */
+ urb_t * inturbp; /* interrupt urb */
+ char * intbufp; /* data buffer for interrupt urb */
+ unsigned int irqsize; /* size of interrupt endpoint 1 */
+ struct auerchain controlchain; /* for chaining of control messages */
+ auerbufctl_t bufctl; /* Buffer control for control transfers */
+ pauerscon_t services[AUH_TYPESIZE];/* context pointers for each service */
+ unsigned int version; /* Version of the device */
+ wait_queue_head_t bufferwait; /* wait for a control buffer */
+} auerswald_t,*pauerswald_t;
+
+/* the global usb devfs handle */
+extern devfs_handle_t usb_devfs_handle;
+
+/* array of pointers to our devices that are currently connected */
+static pauerswald_t dev_table[AUER_MAX_DEVICES];
+
+/* lock to protect the dev_table structure */
+static struct semaphore dev_table_mutex;
+
+/* ................................................................... */
+/* character device context */
+typedef struct
+{
+ struct semaphore mutex; /* protection in user context */
+ pauerswald_t auerdev; /* context pointer of assigned device */
+ auerbufctl_t bufctl; /* controls the buffer chain */
+ auerscon_t scontext; /* service context */
+ wait_queue_head_t readwait; /* for synchronous reading */
+ struct semaphore readmutex; /* protection against multiple reads */
+ pauerbuf_t readbuf; /* buffer held for partial reading */
+ unsigned int readoffset; /* current offset in readbuf */
+ unsigned int removed; /* is != 0 if device is removed */
+} auerchar_t,*pauerchar_t;
+
+
+/*-------------------------------------------------------------------*/
+/* Forwards */
+static void auerswald_ctrlread_complete (urb_t * urb);
+static void auerswald_removeservice (pauerswald_t cp, pauerscon_t scp);
+
+
+/*-------------------------------------------------------------------*/
+/* USB chain helper functions */
+/* -------------------------- */
+
+/* completion function for chained urbs */
+static void auerchain_complete (urb_t * urb)
+{
+ unsigned long flags;
+ int result;
+
+ /* get pointer to element and to chain */
+ pauerchainelement_t acep = (pauerchainelement_t) urb->context;
+ pauerchain_t acp = acep->chain;
+
+ /* restore original entries in urb */
+ urb->context = acep->context;
+ urb->complete = acep->complete;
+
+ dbg ("auerchain_complete called");
+
+ /* call original completion function
+ NOTE: this function may lead to more urbs submitted into the chain.
+ (no chain lock at calling complete()!)
+ acp->active != NULL is protecting us against recursion.*/
+ urb->complete (urb);
+
+ /* detach element from chain data structure */
+ spin_lock_irqsave (&acp->lock, flags);
+ if (acp->active != acep) /* paranoia debug check */
+ dbg ("auerchain_complete: completion on non-active element called!");
+ else
+ acp->active = NULL;
+
+ /* add the used chain element to the list of free elements */
+ list_add_tail (&acep->list, &acp->free_list);
+ acep = NULL;
+
+ /* is there a new element waiting in the chain? */
+ if (!acp->active && !list_empty (&acp->waiting_list)) {
+ /* yes: get the entry */
+ struct list_head *tmp = acp->waiting_list.next;
+ list_del (tmp);
+ acep = list_entry (tmp, auerchainelement_t, list);
+ acp->active = acep;
+ }
+ spin_unlock_irqrestore (&acp->lock, flags);
+
+ /* submit the new urb */
+ if (acep) {
+ urb = acep->urbp;
+ dbg ("auerchain_complete: submitting next urb from chain");
+ urb->status = 0; /* needed! */
+ result = usb_submit_urb( urb);
+
+ /* check for submit errors */
+ if (result) {
+ urb->status = result;
+ dbg("auerchain_complete: usb_submit_urb with error code %d", result);
+ /* and do error handling via *this* completion function (recursive) */
+ auerchain_complete( urb);
+ }
+ } else {
+ /* simple return without submitting a new urb.
+ The empty chain is detected with acp->active == NULL. */
+ };
+}
+
+
+/* submit function for chained urbs
+ this function may be called from completion context or from user space!
+ early = 1 -> submit in front of chain
+*/
+static int auerchain_submit_urb_list (pauerchain_t acp, urb_t * urb, int early)
+{
+ int result;
+ unsigned long flags;
+ pauerchainelement_t acep = NULL;
+
+ dbg ("auerchain_submit_urb called");
+
+ /* try to get a chain element */
+ spin_lock_irqsave (&acp->lock, flags);
+ if (!list_empty (&acp->free_list)) {
+ /* yes: get the entry */
+ struct list_head *tmp = acp->free_list.next;
+ list_del (tmp);
+ acep = list_entry (tmp, auerchainelement_t, list);
+ }
+ spin_unlock_irqrestore (&acp->lock, flags);
+
+ /* if no chain element available: return with error */
+ if (!acep) {
+ return -ENOMEM;
+ }
+
+ /* fill in the new chain element values */
+ acep->chain = acp;
+ acep->context = urb->context;
+ acep->complete = urb->complete;
+ acep->urbp = urb;
+ INIT_LIST_HEAD (&acep->list);
+
+ /* modify urb */
+ urb->context = acep;
+ urb->complete = auerchain_complete;
+ urb->status = -EINPROGRESS; /* usb_submit_urb does this, too */
+
+ /* add element to chain - or start it immediately */
+ spin_lock_irqsave (&acp->lock, flags);
+ if (acp->active) {
+ /* there is traffic in the chain, simple add element to chain */
+ if (early) {
+ dbg ("adding new urb to head of chain");
+ list_add (&acep->list, &acp->waiting_list);
+ } else {
+ dbg ("adding new urb to end of chain");
+ list_add_tail (&acep->list, &acp->waiting_list);
+ }
+ acep = NULL;
+ } else {
+ /* the chain is empty. Prepare restart */
+ acp->active = acep;
+ }
+ /* Spin has to be removed before usb_submit_urb! */
+ spin_unlock_irqrestore (&acp->lock, flags);
+
+ /* Submit urb if immediate restart */
+ if (acep) {
+ dbg("submitting urb immediate");
+ urb->status = 0; /* needed! */
+ result = usb_submit_urb( urb);
+ /* check for submit errors */
+ if (result) {
+ urb->status = result;
+ dbg("auerchain_submit_urb: usb_submit_urb with error code %d", result);
+ /* and do error handling via completion function */
+ auerchain_complete( urb);
+ }
+ }
+
+ return 0;
+}
+
+/* submit function for chained urbs
+ this function may be called from completion context or from user space!
+*/
+static int auerchain_submit_urb (pauerchain_t acp, urb_t * urb)
+{
+ return auerchain_submit_urb_list (acp, urb, 0);
+}
+
+/* cancel an urb which is submitted to the chain
+ the result is 0 if the urb is cancelled, or -EINPROGRESS if
+ USB_ASYNC_UNLINK is set and the function is successfully started.
+*/
+static int auerchain_unlink_urb (pauerchain_t acp, urb_t * urb)
+{
+ unsigned long flags;
+ urb_t * urbp;
+ pauerchainelement_t acep;
+ struct list_head *tmp;
+
+ dbg ("auerchain_unlink_urb called");
+
+ /* search the chain of waiting elements */
+ spin_lock_irqsave (&acp->lock, flags);
+ list_for_each (tmp, &acp->waiting_list) {
+ acep = list_entry (tmp, auerchainelement_t, list);
+ if (acep->urbp == urb) {
+ list_del (tmp);
+ urb->context = acep->context;
+ urb->complete = acep->complete;
+ list_add_tail (&acep->list, &acp->free_list);
+ spin_unlock_irqrestore (&acp->lock, flags);
+ dbg ("unlink waiting urb");
+ urb->status = -ENOENT;
+ urb->complete (urb);
+ return 0;
+ }
+ }
+ /* not found. */
+ spin_unlock_irqrestore (&acp->lock, flags);
+
+ /* get the active urb */
+ acep = acp->active;
+ if (acep) {
+ urbp = acep->urbp;
+
+ /* check if we have to cancel the active urb */
+ if (urbp == urb) {
+ /* note that there is a race condition between the check above
+ and the unlink() call because of no lock. This race is harmless,
+ because the usb module will detect the unlink() after completion.
+ We can't use the acp->lock here because the completion function
+ wants to grab it.
+ */
+ dbg ("unlink active urb");
+ return usb_unlink_urb (urbp);
+ }
+ }
+
+ /* not found anyway
+ ... is some kind of success
+ */
+ dbg ("urb to unlink not found in chain");
+ return 0;
+}
+
+/* cancel all urbs which are in the chain.
+ this function must not be called from interrupt or completion handler.
+*/
+static void auerchain_unlink_all (pauerchain_t acp)
+{
+ unsigned long flags;
+ urb_t * urbp;
+ pauerchainelement_t acep;
+
+ dbg ("auerchain_unlink_all called");
+
+ /* clear the chain of waiting elements */
+ spin_lock_irqsave (&acp->lock, flags);
+ while (!list_empty (&acp->waiting_list)) {
+ /* get the next entry */
+ struct list_head *tmp = acp->waiting_list.next;
+ list_del (tmp);
+ acep = list_entry (tmp, auerchainelement_t, list);
+ urbp = acep->urbp;
+ urbp->context = acep->context;
+ urbp->complete = acep->complete;
+ list_add_tail (&acep->list, &acp->free_list);
+ spin_unlock_irqrestore (&acp->lock, flags);
+ dbg ("unlink waiting urb");
+ urbp->status = -ENOENT;
+ urbp->complete (urbp);
+ spin_lock_irqsave (&acp->lock, flags);
+ }
+ spin_unlock_irqrestore (&acp->lock, flags);
+
+ /* clear the active urb */
+ acep = acp->active;
+ if (acep) {
+ urbp = acep->urbp;
+ urbp->transfer_flags &= ~USB_ASYNC_UNLINK;
+ dbg ("unlink active urb");
+ usb_unlink_urb (urbp);
+ }
+}
+
+
+/* free the chain.
+ this function must not be called from interrupt or completion handler.
+*/
+static void auerchain_free (pauerchain_t acp)
+{
+ unsigned long flags;
+ pauerchainelement_t acep;
+
+ dbg ("auerchain_free called");
+
+ /* first, cancel all pending urbs */
+ auerchain_unlink_all (acp);
+
+ /* free the elements */
+ spin_lock_irqsave (&acp->lock, flags);
+ while (!list_empty (&acp->free_list)) {
+ /* get the next entry */
+ struct list_head *tmp = acp->free_list.next;
+ list_del (tmp);
+ spin_unlock_irqrestore (&acp->lock, flags);
+ acep = list_entry (tmp, auerchainelement_t, list);
+ kfree (acep);
+ spin_lock_irqsave (&acp->lock, flags);
+ }
+ spin_unlock_irqrestore (&acp->lock, flags);
+}
+
+
+/* Init the chain control structure */
+static void auerchain_init (pauerchain_t acp)
+{
+ /* init the chain data structure */
+ acp->active = NULL;
+ spin_lock_init (&acp->lock);
+ INIT_LIST_HEAD (&acp->waiting_list);
+ INIT_LIST_HEAD (&acp->free_list);
+}
+
+/* setup a chain.
+ It is assumed that there is no concurrency while setting up the chain
+ requirement: auerchain_init()
+*/
+static int auerchain_setup (pauerchain_t acp, unsigned int numElements)
+{
+ pauerchainelement_t acep;
+
+ dbg ("auerchain_setup called with %d elements", numElements);
+
+ /* fill the list of free elements */
+ for (;numElements; numElements--) {
+ acep = (pauerchainelement_t) kmalloc (sizeof (auerchainelement_t), GFP_KERNEL);
+ if (!acep) goto ac_fail;
+ memset (acep, 0, sizeof (auerchainelement_t));
+ INIT_LIST_HEAD (&acep->list);
+ list_add_tail (&acep->list, &acp->free_list);
+ }
+ return 0;
+
+ac_fail:/* free the elements */
+ while (!list_empty (&acp->free_list)) {
+ /* get the next entry */
+ struct list_head *tmp = acp->free_list.next;
+ list_del (tmp);
+ acep = list_entry (tmp, auerchainelement_t, list);
+ kfree (acep);
+ }
+ return -ENOMEM;
+}
+
+
+/* completion handler for synchronous chained URBs */
+static void auerchain_blocking_completion (urb_t *urb)
+{
+ wait_queue_head_t *wakeup = (wait_queue_head_t *)urb->context;
+ wake_up (wakeup);
+}
+
+
+/* Starts chained urb and waits for completion or timeout */
+static int auerchain_start_wait_urb (pauerchain_t acp, urb_t *urb, int timeout, int* actual_length)
+{
+ DECLARE_WAITQUEUE (wait, current);
+ DECLARE_WAIT_QUEUE_HEAD (wqh);
+ int status;
+
+ dbg ("auerchain_start_wait_urb called");
+ init_waitqueue_head (&wqh);
+ current->state = TASK_INTERRUPTIBLE;
+ add_wait_queue (&wqh, &wait);
+ urb->context = &wqh;
+ status = auerchain_submit_urb ( acp, urb);
+ if (status) {
+ /* something went wrong */
+ current->state = TASK_RUNNING;
+ remove_wait_queue (&wqh, &wait);
+ return status;
+ }
+
+ if (urb->status == -EINPROGRESS) {
+ while (timeout && urb->status == -EINPROGRESS)
+ status = timeout = schedule_timeout (timeout);
+ } else
+ status = 1;
+
+ current->state = TASK_RUNNING;
+ remove_wait_queue (&wqh, &wait);
+
+ if (!status) {
+ /* timeout */
+ dbg ("auerchain_start_wait_urb: timeout");
+ auerchain_unlink_urb (acp, urb); /* remove urb safely */
+ status = -ETIMEDOUT;
+ } else
+ status = urb->status;
+
+ if (actual_length)
+ *actual_length = urb->actual_length;
+
+ return status;
+}
+
+
+/* auerchain_control_msg - Builds a control urb, sends it off and waits for completion
+ acp: pointer to the auerchain
+ dev: pointer to the usb device to send the message to
+ pipe: endpoint "pipe" to send the message to
+ request: USB message request value
+ requesttype: USB message request type value
+ value: USB message value
+ index: USB message index value
+ data: pointer to the data to send
+ size: length in bytes of the data to send
+ timeout: time to wait for the message to complete before timing out (if 0 the wait is forever)
+
+ This function sends a simple control message to a specified endpoint
+ and waits for the message to complete, or timeout.
+
+ If successful, it returns the transfered length, othwise a negative error number.
+
+ Don't use this function from within an interrupt context, like a
+ bottom half handler. If you need a asyncronous message, or need to send
+ a message from within interrupt context, use auerchain_submit_urb()
+*/
+static int auerchain_control_msg (pauerchain_t acp, struct usb_device *dev, unsigned int pipe, __u8 request, __u8 requesttype,
+ __u16 value, __u16 index, void *data, __u16 size, int timeout)
+{
+ int ret;
+ struct usb_ctrlrequest *dr;
+ urb_t *urb;
+ int length;
+
+ dbg ("auerchain_control_msg");
+ dr = kmalloc (sizeof (struct usb_ctrlrequest), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+ urb = usb_alloc_urb (0);
+ if (!urb) {
+ kfree (dr);
+ return -ENOMEM;
+ }
+
+ dr->bRequestType = requesttype;
+ dr->bRequest = request;
+ dr->wValue = cpu_to_le16 (value);
+ dr->wIndex = cpu_to_le16 (index);
+ dr->wLength = cpu_to_le16 (size);
+
+ FILL_CONTROL_URB (urb, dev, pipe, (unsigned char*)dr, data, size, /* build urb */
+ (usb_complete_t)auerchain_blocking_completion,0);
+ ret = auerchain_start_wait_urb (acp, urb, timeout, &length);
+
+ usb_free_urb (urb);
+ kfree (dr);
+
+ if (ret < 0)
+ return ret;
+ else
+ return length;
+}
+
+
+/*-------------------------------------------------------------------*/
+/* Buffer List helper functions */
+
+/* free a single auerbuf */
+static void auerbuf_free (pauerbuf_t bp)
+{
+ if (bp->bufp) {
+ kfree (bp->bufp);
+ }
+ if (bp->dr) {
+ kfree (bp->dr);
+ }
+ if (bp->urbp) {
+ usb_free_urb (bp->urbp);
+ }
+ kfree (bp);
+}
+
+/* free the buffers from an auerbuf list */
+static void auerbuf_free_list (struct list_head *q)
+{
+ struct list_head *tmp;
+ struct list_head *p;
+ pauerbuf_t bp;
+
+ dbg ("auerbuf_free_list");
+ for (p = q->next; p != q;) {
+ bp = list_entry (p, auerbuf_t, buff_list);
+ tmp = p->next;
+ list_del (p);
+ p = tmp;
+ auerbuf_free (bp);
+ }
+}
+
+/* init the members of a list control block */
+static void auerbuf_init (pauerbufctl_t bcp)
+{
+ dbg ("auerbuf_init");
+ spin_lock_init (&bcp->lock);
+ INIT_LIST_HEAD (&bcp->free_buff_list);
+ INIT_LIST_HEAD (&bcp->rec_buff_list);
+}
+
+/* free all buffers from an auerbuf chain */
+static void auerbuf_free_buffers (pauerbufctl_t bcp)
+{
+ unsigned long flags;
+ dbg ("auerbuf_free_buffers");
+
+ spin_lock_irqsave (&bcp->lock, flags);
+
+ auerbuf_free_list (&bcp->free_buff_list);
+ auerbuf_free_list (&bcp->rec_buff_list);
+
+ spin_unlock_irqrestore (&bcp->lock, flags);
+}
+
+/* setup a list of buffers */
+/* requirement: auerbuf_init() */
+static int auerbuf_setup (pauerbufctl_t bcp, unsigned int numElements, unsigned int bufsize)
+{
+ pauerbuf_t bep;
+
+ dbg ("auerbuf_setup called with %d elements of %d bytes", numElements, bufsize);
+
+ /* fill the list of free elements */
+ for (;numElements; numElements--) {
+ bep = (pauerbuf_t) kmalloc (sizeof (auerbuf_t), GFP_KERNEL);
+ if (!bep) goto bl_fail;
+ memset (bep, 0, sizeof (auerbuf_t));
+ bep->list = bcp;
+ INIT_LIST_HEAD (&bep->buff_list);
+ bep->bufp = (char *) kmalloc (bufsize, GFP_KERNEL);
+ if (!bep->bufp) goto bl_fail;
+ bep->dr = (struct usb_ctrlrequest *) kmalloc (sizeof (struct usb_ctrlrequest), GFP_KERNEL);
+ if (!bep->dr) goto bl_fail;
+ bep->urbp = usb_alloc_urb (0);
+ if (!bep->urbp) goto bl_fail;
+ list_add_tail (&bep->buff_list, &bcp->free_buff_list);
+ }
+ return 0;
+
+bl_fail:/* not enought memory. Free allocated elements */
+ dbg ("auerbuf_setup: no more memory");
+ auerbuf_free_buffers (bcp);
+ return -ENOMEM;
+}
+
+/* insert a used buffer into the free list */
+static void auerbuf_releasebuf( pauerbuf_t bp)
+{
+ unsigned long flags;
+ pauerbufctl_t bcp = bp->list;
+ bp->retries = 0;
+
+ dbg ("auerbuf_releasebuf called");
+ spin_lock_irqsave (&bcp->lock, flags);
+ list_add_tail (&bp->buff_list, &bcp->free_buff_list);
+ spin_unlock_irqrestore (&bcp->lock, flags);
+}
+
+
+/*-------------------------------------------------------------------*/
+/* Completion handlers */
+
+/* Values of urb->status or results of usb_submit_urb():
+0 Initial, OK
+-EINPROGRESS during submission until end
+-ENOENT if urb is unlinked
+-ETIMEDOUT Transfer timed out, NAK
+-ENOMEM Memory Overflow
+-ENODEV Specified USB-device or bus doesn't exist
+-ENXIO URB already queued
+-EINVAL a) Invalid transfer type specified (or not supported)
+ b) Invalid interrupt interval (0n256)
+-EAGAIN a) Specified ISO start frame too early
+ b) (using ISO-ASAP) Too much scheduled for the future wait some time and try again.
+-EFBIG Too much ISO frames requested (currently uhci900)
+-EPIPE Specified pipe-handle/Endpoint is already stalled
+-EMSGSIZE Endpoint message size is zero, do interface/alternate setting
+-EPROTO a) Bitstuff error
+ b) Unknown USB error
+-EILSEQ CRC mismatch
+-ENOSR Buffer error
+-EREMOTEIO Short packet detected
+-EXDEV ISO transfer only partially completed look at individual frame status for details
+-EINVAL ISO madness, if this happens: Log off and go home
+-EOVERFLOW babble
+*/
+
+/* check if a status code allows a retry */
+static int auerswald_status_retry (int status)
+{
+ switch (status) {
+ case 0:
+ case -ETIMEDOUT:
+ case -EOVERFLOW:
+ case -EAGAIN:
+ case -EPIPE:
+ case -EPROTO:
+ case -EILSEQ:
+ case -ENOSR:
+ case -EREMOTEIO:
+ return 1; /* do a retry */
+ }
+ return 0; /* no retry possible */
+}
+
+/* Completion of asynchronous write block */
+static void auerchar_ctrlwrite_complete (urb_t * urb)
+{
+ pauerbuf_t bp = (pauerbuf_t) urb->context;
+ pauerswald_t cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl)));
+ dbg ("auerchar_ctrlwrite_complete called");
+
+ /* reuse the buffer */
+ auerbuf_releasebuf (bp);
+ /* Wake up all processes waiting for a buffer */
+ wake_up (&cp->bufferwait);
+}
+
+/* Completion handler for dummy retry packet */
+static void auerswald_ctrlread_wretcomplete (urb_t * urb)
+{
+ pauerbuf_t bp = (pauerbuf_t) urb->context;
+ pauerswald_t cp;
+ int ret;
+ dbg ("auerswald_ctrlread_wretcomplete called");
+ dbg ("complete with status: %d", urb->status);
+ cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl)));
+
+ /* check if it is possible to advance */
+ if (!auerswald_status_retry (urb->status) || !cp->usbdev) {
+ /* reuse the buffer */
+ err ("control dummy: transmission error %d, can not retry", urb->status);
+ auerbuf_releasebuf (bp);
+ /* Wake up all processes waiting for a buffer */
+ wake_up (&cp->bufferwait);
+ return;
+ }
+
+ /* fill the control message */
+ bp->dr->bRequestType = AUT_RREQ;
+ bp->dr->bRequest = AUV_RBLOCK;
+ bp->dr->wLength = bp->dr->wValue; /* temporary stored */
+ bp->dr->wValue = cpu_to_le16 (1); /* Retry Flag */
+ /* bp->dr->wIndex = channel id; remains */
+ FILL_CONTROL_URB (bp->urbp, cp->usbdev, usb_rcvctrlpipe (cp->usbdev, 0),
+ (unsigned char*)bp->dr, bp->bufp, le16_to_cpu (bp->dr->wLength),
+ (usb_complete_t)auerswald_ctrlread_complete,bp);
+
+ /* submit the control msg as next paket */
+ ret = auerchain_submit_urb_list (&cp->controlchain, bp->urbp, 1);
+ if (ret) {
+ dbg ("auerswald_ctrlread_complete: nonzero result of auerchain_submit_urb_list %d", ret);
+ bp->urbp->status = ret;
+ auerswald_ctrlread_complete (bp->urbp);
+ }
+}
+
+/* completion handler for receiving of control messages */
+static void auerswald_ctrlread_complete (urb_t * urb)
+{
+ unsigned int serviceid;
+ pauerswald_t cp;
+ pauerscon_t scp;
+ pauerbuf_t bp = (pauerbuf_t) urb->context;
+ int ret;
+ dbg ("auerswald_ctrlread_complete called");
+
+ cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl)));
+
+ /* check if there is valid data in this urb */
+ if (urb->status) {
+ dbg ("complete with non-zero status: %d", urb->status);
+ /* should we do a retry? */
+ if (!auerswald_status_retry (urb->status)
+ || !cp->usbdev
+ || (cp->version < AUV_RETRY)
+ || (bp->retries >= AU_RETRIES)) {
+ /* reuse the buffer */
+ err ("control read: transmission error %d, can not retry", urb->status);
+ auerbuf_releasebuf (bp);
+ return;
+ }
+ bp->retries++;
+ dbg ("Retry count = %d", bp->retries);
+ /* send a long dummy control-write-message to allow device firmware to react */
+ bp->dr->bRequestType = AUT_WREQ;
+ bp->dr->bRequest = AUV_DUMMY;
+ bp->dr->wValue = bp->dr->wLength; /* temporary storage */
+ // bp->dr->wIndex channel ID remains
+ bp->dr->wLength = cpu_to_le16 (32); /* >= 8 bytes */
+ FILL_CONTROL_URB (bp->urbp, cp->usbdev, usb_sndctrlpipe (cp->usbdev, 0),
+ (unsigned char*)bp->dr, bp->bufp, 32,
+ (usb_complete_t)auerswald_ctrlread_wretcomplete,bp);
+
+ /* submit the control msg as next paket */
+ ret = auerchain_submit_urb_list (&cp->controlchain, bp->urbp, 1);
+ if (ret) {
+ dbg ("auerswald_ctrlread_complete: nonzero result of auerchain_submit_urb_list %d", ret);
+ bp->urbp->status = ret;
+ auerswald_ctrlread_wretcomplete (bp->urbp);
+ }
+ return;
+ }
+
+ /* get the actual bytecount (incl. headerbyte) */
+ bp->len = urb->actual_length;
+ serviceid = bp->bufp[0] & AUH_TYPEMASK;
+ dbg ("Paket with serviceid %d and %d bytes received", serviceid, bp->len);
+
+ /* dispatch the paket */
+ scp = cp->services[serviceid];
+ if (scp) {
+ /* look, Ma, a listener! */
+ scp->dispatch (scp, bp);
+ }
+
+ /* release the paket */
+ auerbuf_releasebuf (bp);
+ /* Wake up all processes waiting for a buffer */
+ wake_up (&cp->bufferwait);
+}
+
+/*-------------------------------------------------------------------*/
+/* Handling of Interrupt Endpoint */
+/* This interrupt Endpoint is used to inform the host about waiting
+ messages from the USB device.
+*/
+/* int completion handler. */
+static void auerswald_int_complete (urb_t * urb)
+{
+ unsigned long flags;
+ unsigned int channelid;
+ unsigned int bytecount;
+ int ret;
+ pauerbuf_t bp = NULL;
+ pauerswald_t cp = (pauerswald_t) urb->context;
+
+ dbg ("auerswald_int_complete called");
+
+ /* do not respond to an error condition */
+ if (urb->status != 0) {
+ dbg ("nonzero URB status = %d", urb->status);
+ return;
+ }
+
+ /* check if all needed data was received */
+ if (urb->actual_length < AU_IRQMINSIZE) {
+ dbg ("invalid data length received: %d bytes", urb->actual_length);
+ return;
+ }
+
+ /* check the command code */
+ if (cp->intbufp[0] != AU_IRQCMDID) {
+ dbg ("invalid command received: %d", cp->intbufp[0]);
+ return;
+ }
+
+ /* check the command type */
+ if (cp->intbufp[1] != AU_BLOCKRDY) {
+ dbg ("invalid command type received: %d", cp->intbufp[1]);
+ return;
+ }
+
+ /* now extract the information */
+ channelid = cp->intbufp[2];
+ bytecount = le16_to_cpup ((u16 *)&cp->intbufp[3]);
+
+ /* check the channel id */
+ if (channelid >= AUH_TYPESIZE) {
+ dbg ("invalid channel id received: %d", channelid);
+ return;
+ }
+
+ /* check the byte count */
+ if (bytecount > (cp->maxControlLength+AUH_SIZE)) {
+ dbg ("invalid byte count received: %d", bytecount);
+ return;
+ }
+ dbg ("Service Channel = %d", channelid);
+ dbg ("Byte Count = %d", bytecount);
+
+ /* get a buffer for the next data paket */
+ spin_lock_irqsave (&cp->bufctl.lock, flags);
+ if (!list_empty (&cp->bufctl.free_buff_list)) {
+ /* yes: get the entry */
+ struct list_head *tmp = cp->bufctl.free_buff_list.next;
+ list_del (tmp);
+ bp = list_entry (tmp, auerbuf_t, buff_list);
+ }
+ spin_unlock_irqrestore (&cp->bufctl.lock, flags);
+
+ /* if no buffer available: skip it */
+ if (!bp) {
+ dbg ("auerswald_int_complete: no data buffer available");
+ /* can we do something more?
+ This is a big problem: if this int packet is ignored, the
+ device will wait forever and not signal any more data.
+ The only real solution is: having enought buffers!
+ Or perhaps temporary disabling the int endpoint?
+ */
+ return;
+ }
+
+ /* fill the control message */
+ bp->dr->bRequestType = AUT_RREQ;
+ bp->dr->bRequest = AUV_RBLOCK;
+ bp->dr->wValue = cpu_to_le16 (0);
+ bp->dr->wIndex = cpu_to_le16 (channelid | AUH_DIRECT | AUH_UNSPLIT);
+ bp->dr->wLength = cpu_to_le16 (bytecount);
+ FILL_CONTROL_URB (bp->urbp, cp->usbdev, usb_rcvctrlpipe (cp->usbdev, 0),
+ (unsigned char*)bp->dr, bp->bufp, bytecount,
+ (usb_complete_t)auerswald_ctrlread_complete,bp);
+
+ /* submit the control msg */
+ ret = auerchain_submit_urb (&cp->controlchain, bp->urbp);
+ if (ret) {
+ dbg ("auerswald_int_complete: nonzero result of auerchain_submit_urb %d", ret);
+ bp->urbp->status = ret;
+ auerswald_ctrlread_complete( bp->urbp);
+ /* here applies the same problem as above: device locking! */
+ }
+}
+
+/* int memory deallocation
+ NOTE: no mutex please!
+*/
+static void auerswald_int_free (pauerswald_t cp)
+{
+ if (cp->inturbp) {
+ usb_free_urb (cp->inturbp);
+ cp->inturbp = NULL;
+ }
+ if (cp->intbufp) {
+ kfree (cp->intbufp);
+ cp->intbufp = NULL;
+ }
+}
+
+/* This function is called to activate the interrupt
+ endpoint. This function returns 0 if successfull or an error code.
+ NOTE: no mutex please!
+*/
+static int auerswald_int_open (pauerswald_t cp)
+{
+ int ret;
+ struct usb_endpoint_descriptor *ep;
+ int irqsize;
+ dbg ("auerswald_int_open");
+
+ ep = usb_epnum_to_ep_desc (cp->usbdev, USB_DIR_IN | AU_IRQENDP);
+ if (!ep) {
+ ret = -EFAULT;
+ goto intoend;
+ }
+ irqsize = ep->wMaxPacketSize;
+ cp->irqsize = irqsize;
+
+ /* allocate the urb and data buffer */
+ if (!cp->inturbp) {
+ cp->inturbp = usb_alloc_urb (0);
+ if (!cp->inturbp) {
+ ret = -ENOMEM;
+ goto intoend;
+ }
+ }
+ if (!cp->intbufp) {
+ cp->intbufp = (char *) kmalloc (irqsize, GFP_KERNEL);
+ if (!cp->intbufp) {
+ ret = -ENOMEM;
+ goto intoend;
+ }
+ }
+ /* setup urb */
+ FILL_INT_URB (cp->inturbp, cp->usbdev, usb_rcvintpipe (cp->usbdev,AU_IRQENDP), cp->intbufp, irqsize, auerswald_int_complete, cp, ep->bInterval);
+ /* start the urb */
+ cp->inturbp->status = 0; /* needed! */
+ ret = usb_submit_urb (cp->inturbp);
+
+intoend:
+ if (ret < 0) {
+ /* activation of interrupt endpoint has failed. Now clean up. */
+ dbg ("auerswald_int_open: activation of int endpoint failed");
+
+ /* deallocate memory */
+ auerswald_int_free (cp);
+ }
+ return ret;
+}
+
+/* This function is called to deactivate the interrupt
+ endpoint. This function returns 0 if successfull or an error code.
+ NOTE: no mutex please!
+*/
+static int auerswald_int_release (pauerswald_t cp)
+{
+ int ret = 0;
+ dbg ("auerswald_int_release");
+
+ /* stop the int endpoint */
+ if (cp->inturbp) {
+ ret = usb_unlink_urb (cp->inturbp);
+ if (ret)
+ dbg ("nonzero int unlink result received: %d", ret);
+ }
+
+ /* deallocate memory */
+ auerswald_int_free (cp);
+
+ return ret;
+}
+
+/* --------------------------------------------------------------------- */
+/* Helper functions */
+
+/* wake up waiting readers */
+static void auerchar_disconnect (pauerscon_t scp)
+{
+ pauerchar_t ccp = ((pauerchar_t)((char *)(scp)-(unsigned long)(&((pauerchar_t)0)->scontext)));
+ dbg ("auerchar_disconnect called");
+ ccp->removed = 1;
+ wake_up (&ccp->readwait);
+}
+
+
+/* dispatch a read paket to a waiting character device */
+static void auerchar_ctrlread_dispatch (pauerscon_t scp, pauerbuf_t bp)
+{
+ unsigned long flags;
+ pauerchar_t ccp;
+ pauerbuf_t newbp = NULL;
+ char * charp;
+ dbg ("auerchar_ctrlread_dispatch called");
+ ccp = ((pauerchar_t)((char *)(scp)-(unsigned long)(&((pauerchar_t)0)->scontext)));
+
+ /* get a read buffer from character device context */
+ spin_lock_irqsave (&ccp->bufctl.lock, flags);
+ if (!list_empty (&ccp->bufctl.free_buff_list)) {
+ /* yes: get the entry */
+ struct list_head *tmp = ccp->bufctl.free_buff_list.next;
+ list_del (tmp);
+ newbp = list_entry (tmp, auerbuf_t, buff_list);
+ }
+ spin_unlock_irqrestore (&ccp->bufctl.lock, flags);
+
+ if (!newbp) {
+ dbg ("No read buffer available, discard paket!");
+ return; /* no buffer, no dispatch */
+ }
+
+ /* copy information to new buffer element
+ (all buffers have the same length) */
+ charp = newbp->bufp;
+ newbp->bufp = bp->bufp;
+ bp->bufp = charp;
+ newbp->len = bp->len;
+
+ /* insert new buffer in read list */
+ spin_lock_irqsave (&ccp->bufctl.lock, flags);
+ list_add_tail (&newbp->buff_list, &ccp->bufctl.rec_buff_list);
+ spin_unlock_irqrestore (&ccp->bufctl.lock, flags);
+ dbg ("read buffer appended to rec_list");
+
+ /* wake up pending synchronous reads */
+ wake_up (&ccp->readwait);
+}
+
+
+/* Delete an auerswald driver context */
+static void auerswald_delete( pauerswald_t cp)
+{
+ dbg( "auerswald_delete");
+ if (cp == NULL) return;
+
+ /* Wake up all processes waiting for a buffer */
+ wake_up (&cp->bufferwait);
+
+ /* Cleaning up */
+ auerswald_int_release (cp);
+ auerchain_free (&cp->controlchain);
+ auerbuf_free_buffers (&cp->bufctl);
+
+ /* release the memory */
+ kfree( cp);
+}
+
+
+/* Delete an auerswald character context */
+static void auerchar_delete( pauerchar_t ccp)
+{
+ dbg ("auerchar_delete");
+ if (ccp == NULL) return;
+
+ /* wake up pending synchronous reads */
+ ccp->removed = 1;
+ wake_up (&ccp->readwait);
+
+ /* remove the read buffer */
+ if (ccp->readbuf) {
+ auerbuf_releasebuf (ccp->readbuf);
+ ccp->readbuf = NULL;
+ }
+
+ /* remove the character buffers */
+ auerbuf_free_buffers (&ccp->bufctl);
+
+ /* release the memory */
+ kfree( ccp);
+}
+
+
+/* add a new service to the device
+ scp->id must be set!
+ return: 0 if OK, else error code
+*/
+static int auerswald_addservice (pauerswald_t cp, pauerscon_t scp)
+{
+ int ret;
+
+ /* is the device available? */
+ if (!cp->usbdev) {
+ dbg ("usbdev == NULL");
+ return -EIO; /*no: can not add a service, sorry*/
+ }
+
+ /* is the service available? */
+ if (cp->services[scp->id]) {
+ dbg ("service is busy");
+ return -EBUSY;
+ }
+
+ /* device is available, service is free */
+ cp->services[scp->id] = scp;
+
+ /* register service in device */
+ ret = auerchain_control_msg(
+ &cp->controlchain, /* pointer to control chain */
+ cp->usbdev, /* pointer to device */
+ usb_sndctrlpipe (cp->usbdev, 0), /* pipe to control endpoint */
+ AUV_CHANNELCTL, /* USB message request value */
+ AUT_WREQ, /* USB message request type value */
+ 0x01, /* open USB message value */
+ scp->id, /* USB message index value */
+ NULL, /* pointer to the data to send */
+ 0, /* length in bytes of the data to send */
+ HZ * 2); /* time to wait for the message to complete before timing out */
+ if (ret < 0) {
+ dbg ("auerswald_addservice: auerchain_control_msg returned error code %d", ret);
+ /* undo above actions */
+ cp->services[scp->id] = NULL;
+ return ret;
+ }
+
+ dbg ("auerswald_addservice: channel open OK");
+ return 0;
+}
+
+
+/* remove a service from the the device
+ scp->id must be set! */
+static void auerswald_removeservice (pauerswald_t cp, pauerscon_t scp)
+{
+ dbg ("auerswald_removeservice called");
+
+ /* check if we have a service allocated */
+ if (scp->id == AUH_UNASSIGNED) return;
+
+ /* If there is a device: close the channel */
+ if (cp->usbdev) {
+ /* Close the service channel inside the device */
+ int ret = auerchain_control_msg(
+ &cp->controlchain, /* pointer to control chain */
+ cp->usbdev, /* pointer to device */
+ usb_sndctrlpipe (cp->usbdev, 0), /* pipe to control endpoint */
+ AUV_CHANNELCTL, /* USB message request value */
+ AUT_WREQ, /* USB message request type value */
+ 0x00, // close /* USB message value */
+ scp->id, /* USB message index value */
+ NULL, /* pointer to the data to send */
+ 0, /* length in bytes of the data to send */
+ HZ * 2); /* time to wait for the message to complete before timing out */
+ if (ret < 0) {
+ dbg ("auerswald_removeservice: auerchain_control_msg returned error code %d", ret);
+ }
+ else {
+ dbg ("auerswald_removeservice: channel close OK");
+ }
+ }
+
+ /* remove the service from the device */
+ cp->services[scp->id] = NULL;
+ scp->id = AUH_UNASSIGNED;
+}
+
+
+/* --------------------------------------------------------------------- */
+/* Char device functions */
+
+/* Open a new character device */
+static int auerchar_open (struct inode *inode, struct file *file)
+{
+ int dtindex = minor(inode->i_rdev) - AUER_MINOR_BASE;
+ pauerswald_t cp = NULL;
+ pauerchar_t ccp = NULL;
+ int ret;
+
+ /* minor number in range? */
+ if ((dtindex < 0) || (dtindex >= AUER_MAX_DEVICES)) {
+ return -ENODEV;
+ }
+ /* usb device available? */
+ if (down_interruptible (&dev_table_mutex)) {
+ return -ERESTARTSYS;
+ }
+ cp = dev_table[dtindex];
+ if (cp == NULL) {
+ up (&dev_table_mutex);
+ return -ENODEV;
+ }
+ if (down_interruptible (&cp->mutex)) {
+ up (&dev_table_mutex);
+ return -ERESTARTSYS;
+ }
+ up (&dev_table_mutex);
+
+ /* prevent module unloading */
+ MOD_INC_USE_COUNT;
+
+ /* we have access to the device. Now lets allocate memory */
+ ccp = (pauerchar_t) kmalloc(sizeof(auerchar_t), GFP_KERNEL);
+ if (ccp == NULL) {
+ err ("out of memory");
+ ret = -ENOMEM;
+ goto ofail;
+ }
+
+ /* Initialize device descriptor */
+ memset( ccp, 0, sizeof(auerchar_t));
+ init_MUTEX( &ccp->mutex);
+ init_MUTEX( &ccp->readmutex);
+ auerbuf_init (&ccp->bufctl);
+ ccp->scontext.id = AUH_UNASSIGNED;
+ ccp->scontext.dispatch = auerchar_ctrlread_dispatch;
+ ccp->scontext.disconnect = auerchar_disconnect;
+ init_waitqueue_head (&ccp->readwait);
+
+ ret = auerbuf_setup (&ccp->bufctl, AU_RBUFFERS, cp->maxControlLength+AUH_SIZE);
+ if (ret) {
+ goto ofail;
+ }
+
+ cp->open_count++;
+ ccp->auerdev = cp;
+ dbg("open %s as /dev/usb/%s", cp->dev_desc, cp->name);
+ up (&cp->mutex);
+
+ /* file IO stuff */
+ file->f_pos = 0;
+ file->private_data = ccp;
+ return 0;
+
+ /* Error exit */
+ofail: up (&cp->mutex);
+ auerchar_delete (ccp);
+ MOD_DEC_USE_COUNT;
+ return ret;
+}
+
+
+/* IOCTL functions */
+static int auerchar_ioctl (struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+{
+ pauerchar_t ccp = (pauerchar_t) file->private_data;
+ int ret = 0;
+ audevinfo_t devinfo;
+ pauerswald_t cp = NULL;
+ unsigned int u;
+ dbg ("ioctl");
+
+ /* get the mutexes */
+ if (down_interruptible (&ccp->mutex)) {
+ return -ERESTARTSYS;
+ }
+ cp = ccp->auerdev;
+ if (!cp) {
+ up (&ccp->mutex);
+ return -ENODEV;
+ }
+ if (down_interruptible (&cp->mutex)) {
+ up(&ccp->mutex);
+ return -ERESTARTSYS;
+ }
+
+ /* Check for removal */
+ if (!cp->usbdev) {
+ up(&cp->mutex);
+ up(&ccp->mutex);
+ return -ENODEV;
+ }
+
+ switch (cmd) {
+
+ /* return != 0 if Transmitt channel ready to send */
+ case IOCTL_AU_TXREADY:
+ dbg ("IOCTL_AU_TXREADY");
+ u = ccp->auerdev
+ && (ccp->scontext.id != AUH_UNASSIGNED)
+ && !list_empty (&cp->bufctl.free_buff_list);
+ ret = put_user (u, (unsigned int *) arg);
+ break;
+
+ /* return != 0 if connected to a service channel */
+ case IOCTL_AU_CONNECT:
+ dbg ("IOCTL_AU_CONNECT");
+ u = (ccp->scontext.id != AUH_UNASSIGNED);
+ ret = put_user (u, (unsigned int *) arg);
+ break;
+
+ /* return != 0 if Receive Data available */
+ case IOCTL_AU_RXAVAIL:
+ dbg ("IOCTL_AU_RXAVAIL");
+ if (ccp->scontext.id == AUH_UNASSIGNED) {
+ ret = -EIO;
+ break;
+ }
+ u = 0; /* no data */
+ if (ccp->readbuf) {
+ int restlen = ccp->readbuf->len - ccp->readoffset;
+ if (restlen > 0) u = 1;
+ }
+ if (!u) {
+ if (!list_empty (&ccp->bufctl.rec_buff_list)) {
+ u = 1;
+ }
+ }
+ ret = put_user (u, (unsigned int *) arg);
+ break;
+
+ /* return the max. buffer length for the device */
+ case IOCTL_AU_BUFLEN:
+ dbg ("IOCTL_AU_BUFLEN");
+ u = cp->maxControlLength;
+ ret = put_user (u, (unsigned int *) arg);
+ break;
+
+ /* requesting a service channel */
+ case IOCTL_AU_SERVREQ:
+ dbg ("IOCTL_AU_SERVREQ");
+ /* requesting a service means: release the previous one first */
+ auerswald_removeservice (cp, &ccp->scontext);
+ /* get the channel number */
+ ret = get_user (u, (unsigned int *) arg);
+ if (ret) {
+ break;
+ }
+ if ((u < AUH_FIRSTUSERCH) || (u >= AUH_TYPESIZE)) {
+ ret = -EIO;
+ break;
+ }
+ dbg ("auerchar service request parameters are ok");
+ ccp->scontext.id = u;
+
+ /* request the service now */
+ ret = auerswald_addservice (cp, &ccp->scontext);
+ if (ret) {
+ /* no: revert service entry */
+ ccp->scontext.id = AUH_UNASSIGNED;
+ }
+ break;
+
+ /* get a string descriptor for the device */
+ case IOCTL_AU_DEVINFO:
+ dbg ("IOCTL_AU_DEVINFO");
+ if (copy_from_user (&devinfo, (void *) arg, sizeof (audevinfo_t))) {
+ ret = -EFAULT;
+ break;
+ }
+ u = strlen(cp->dev_desc)+1;
+ if (u > devinfo.bsize) {
+ u = devinfo.bsize;
+ }
+ ret = copy_to_user(devinfo.buf, cp->dev_desc, u);
+ break;
+
+ /* get the max. string descriptor length */
+ case IOCTL_AU_SLEN:
+ dbg ("IOCTL_AU_SLEN");
+ u = AUSI_DLEN;
+ ret = put_user (u, (unsigned int *) arg);
+ break;
+
+ default:
+ dbg ("IOCTL_AU_UNKNOWN");
+ ret = -ENOIOCTLCMD;
+ break;
+ }
+ /* release the mutexes */
+ up(&cp->mutex);
+ up(&ccp->mutex);
+ return ret;
+}
+
+
+/* Seek is not supported */
+static loff_t auerchar_llseek (struct file *file, loff_t offset, int origin)
+{
+ dbg ("auerchar_seek");
+ return -ESPIPE;
+}
+
+
+/* Read data from the device */
+static ssize_t auerchar_read (struct file *file, char *buf, size_t count, loff_t * ppos)
+{
+ unsigned long flags;
+ pauerchar_t ccp = (pauerchar_t) file->private_data;
+ pauerbuf_t bp = NULL;
+ dbg ("auerchar_read");
+
+ /* Error checking */
+ if (!ccp)
+ return -EIO;
+ if (*ppos)
+ return -ESPIPE;
+ if (count == 0)
+ return 0;
+
+ /* get the mutex */
+ if (down_interruptible (&ccp->mutex))
+ return -ERESTARTSYS;
+
+ /* Can we expect to read something? */
+ if (ccp->scontext.id == AUH_UNASSIGNED) {
+ up (&ccp->mutex);
+ return -EIO;
+ }
+
+ /* only one reader per device allowed */
+ if (down_interruptible (&ccp->readmutex)) {
+ up (&ccp->mutex);
+ return -ERESTARTSYS;
+ }
+
+ /* read data from readbuf, if available */
+doreadbuf:
+ bp = ccp->readbuf;
+ if (bp) {
+ /* read the maximum bytes */
+ int restlen = bp->len - ccp->readoffset;
+ if (restlen < 0)
+ restlen = 0;
+ if (count > restlen)
+ count = restlen;
+ if (count) {
+ if (copy_to_user (buf, bp->bufp+ccp->readoffset, count)) {
+ dbg ("auerswald_read: copy_to_user failed");
+ up (&ccp->readmutex);
+ up (&ccp->mutex);
+ return -EFAULT;
+ }
+ }
+ /* advance the read offset */
+ ccp->readoffset += count;
+ restlen -= count;
+ // reuse the read buffer
+ if (restlen <= 0) {
+ auerbuf_releasebuf (bp);
+ ccp->readbuf = NULL;
+ }
+ /* return with number of bytes read */
+ if (count) {
+ up (&ccp->readmutex);
+ up (&ccp->mutex);
+ return count;
+ }
+ }
+
+ /* a read buffer is not available. Try to get the next data block. */
+doreadlist:
+ bp = NULL;
+ spin_lock_irqsave (&ccp->bufctl.lock, flags);
+ if (!list_empty (&ccp->bufctl.rec_buff_list)) {
+ /* yes: get the entry */
+ struct list_head *tmp = ccp->bufctl.rec_buff_list.next;
+ list_del (tmp);
+ bp = list_entry (tmp, auerbuf_t, buff_list);
+ }
+ spin_unlock_irqrestore (&ccp->bufctl.lock, flags);
+
+ /* have we got data? */
+ if (bp) {
+ ccp->readbuf = bp;
+ ccp->readoffset = AUH_SIZE; /* for headerbyte */
+ goto doreadbuf; /* now we can read! */
+ }
+
+ /* no data available. Should we wait? */
+ if (file->f_flags & O_NONBLOCK) {
+ dbg ("No read buffer available, returning -EAGAIN");
+ up (&ccp->readmutex);
+ up (&ccp->mutex);
+ return -EAGAIN; /* nonblocking, no data available */
+ }
+
+ /* yes, we should wait! */
+ up (&ccp->mutex); /* allow other operations while we wait */
+ interruptible_sleep_on (&ccp->readwait);
+ if (signal_pending (current)) {
+ /* waked up by a signal */
+ up (&ccp->readmutex);
+ return -ERESTARTSYS;
+ }
+
+ /* Anything left to read? */
+ if ((ccp->scontext.id == AUH_UNASSIGNED) || ccp->removed) {
+ up (&ccp->readmutex);
+ return -EIO;
+ }
+
+ if (down_interruptible (&ccp->mutex)) {
+ up (&ccp->readmutex);
+ return -ERESTARTSYS;
+ }
+
+ /* try to read the incomming data again */
+ goto doreadlist;
+}
+
+
+/* Write a data block into the right service channel of the device */
+static ssize_t auerchar_write (struct file *file, const char *buf, size_t len, loff_t *ppos)
+{
+ pauerchar_t ccp = (pauerchar_t) file->private_data;
+ pauerswald_t cp = NULL;
+ pauerbuf_t bp;
+ unsigned long flags;
+ int ret;
+
+ dbg ("auerchar_write %d bytes", len);
+
+ /* Error checking */
+ if (!ccp)
+ return -EIO;
+ if (*ppos)
+ return -ESPIPE;
+ if (len == 0)
+ return 0;
+
+write_again:
+ /* get the mutex */
+ if (down_interruptible (&ccp->mutex))
+ return -ERESTARTSYS;
+
+ /* Can we expect to write something? */
+ if (ccp->scontext.id == AUH_UNASSIGNED) {
+ up (&ccp->mutex);
+ return -EIO;
+ }
+
+ cp = ccp->auerdev;
+ if (!cp) {
+ up (&ccp->mutex);
+ return -ERESTARTSYS;
+ }
+ if (down_interruptible (&cp->mutex)) {
+ up (&ccp->mutex);
+ return -ERESTARTSYS;
+ }
+ if (!cp->usbdev) {
+ up (&cp->mutex);
+ up (&ccp->mutex);
+ return -EIO;
+ }
+ /* Try to get a buffer from the device pool.
+ We can't use a buffer from ccp->bufctl because the write
+ command will last beond a release() */
+ bp = NULL;
+ spin_lock_irqsave (&cp->bufctl.lock, flags);
+ if (!list_empty (&cp->bufctl.free_buff_list)) {
+ /* yes: get the entry */
+ struct list_head *tmp = cp->bufctl.free_buff_list.next;
+ list_del (tmp);
+ bp = list_entry (tmp, auerbuf_t, buff_list);
+ }
+ spin_unlock_irqrestore (&cp->bufctl.lock, flags);
+
+ /* are there any buffers left? */
+ if (!bp) {
+ up (&cp->mutex);
+ up (&ccp->mutex);
+
+ /* NONBLOCK: don't wait */
+ if (file->f_flags & O_NONBLOCK) {
+ return -EAGAIN;
+ }
+
+ /* BLOCKING: wait */
+ interruptible_sleep_on (&cp->bufferwait);
+ if (signal_pending (current)) {
+ /* waked up by a signal */
+ return -ERESTARTSYS;
+ }
+ goto write_again;
+ }
+
+ /* protect against too big write requests */
+ if (len > cp->maxControlLength) len = cp->maxControlLength;
+
+ /* Fill the buffer */
+ if (copy_from_user ( bp->bufp+AUH_SIZE, buf, len)) {
+ dbg ("copy_from_user failed");
+ auerbuf_releasebuf (bp);
+ up (&cp->mutex);
+ up (&ccp->mutex);
+ return -EIO;
+ }
+
+ /* set the header byte */
+ *(bp->bufp) = ccp->scontext.id | AUH_DIRECT | AUH_UNSPLIT;
+
+ /* Set the transfer Parameters */
+ bp->len = len+AUH_SIZE;
+ bp->dr->bRequestType = AUT_WREQ;
+ bp->dr->bRequest = AUV_WBLOCK;
+ bp->dr->wValue = cpu_to_le16 (0);
+ bp->dr->wIndex = cpu_to_le16 (ccp->scontext.id | AUH_DIRECT | AUH_UNSPLIT);
+ bp->dr->wLength = cpu_to_le16 (len+AUH_SIZE);
+ FILL_CONTROL_URB (bp->urbp, cp->usbdev, usb_sndctrlpipe (cp->usbdev, 0),
+ (unsigned char*)bp->dr, bp->bufp, len+AUH_SIZE,
+ auerchar_ctrlwrite_complete, bp);
+ /* up we go */
+ ret = auerchain_submit_urb (&cp->controlchain, bp->urbp);
+ up (&cp->mutex);
+ if (ret) {
+ dbg ("auerchar_write: nonzero result of auerchain_submit_urb %d", ret);
+ auerbuf_releasebuf (bp);
+ up (&ccp->mutex);
+ return -EIO;
+ }
+ else {
+ dbg ("auerchar_write: Write OK");
+ up (&ccp->mutex);
+ return len;
+ }
+}
+
+
+/* Close a character device */
+static int auerchar_release (struct inode *inode, struct file *file)
+{
+ pauerchar_t ccp = (pauerchar_t) file->private_data;
+ pauerswald_t cp;
+ dbg("release");
+
+ /* get the mutexes */
+ if (down_interruptible (&ccp->mutex)) {
+ return -ERESTARTSYS;
+ }
+ cp = ccp->auerdev;
+ if (cp) {
+ if (down_interruptible (&cp->mutex)) {
+ up (&ccp->mutex);
+ return -ERESTARTSYS;
+ }
+ /* remove an open service */
+ auerswald_removeservice (cp, &ccp->scontext);
+ /* detach from device */
+ if ((--cp->open_count <= 0) && (cp->usbdev == NULL)) {
+ /* usb device waits for removal */
+ up (&cp->mutex);
+ auerswald_delete (cp);
+ } else {
+ up (&cp->mutex);
+ }
+ cp = NULL;
+ ccp->auerdev = NULL;
+ }
+ up (&ccp->mutex);
+ auerchar_delete (ccp);
+
+ /* release the module */
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+
+/*----------------------------------------------------------------------*/
+/* File operation structure */
+static struct file_operations auerswald_fops =
+{
+ owner: THIS_MODULE,
+ llseek: auerchar_llseek,
+ read: auerchar_read,
+ write: auerchar_write,
+ ioctl: auerchar_ioctl,
+ open: auerchar_open,
+ release: auerchar_release,
+};
+
+
+/* --------------------------------------------------------------------- */
+/* Special USB driver functions */
+
+/* Probe if this driver wants to serve an USB device
+
+ This entry point is called whenever a new device is attached to the bus.
+ Then the device driver has to create a new instance of its internal data
+ structures for the new device.
+
+ The dev argument specifies the device context, which contains pointers
+ to all USB descriptors. The interface argument specifies the interface
+ number. If a USB driver wants to bind itself to a particular device and
+ interface it has to return a pointer. This pointer normally references
+ the device driver's context structure.
+
+ Probing normally is done by checking the vendor and product identifications
+ or the class and subclass definitions. If they match the interface number
+ is compared with the ones supported by the driver. When probing is done
+ class based it might be necessary to parse some more USB descriptors because
+ the device properties can differ in a wide range.
+*/
+static void *auerswald_probe (struct usb_device *usbdev, unsigned int ifnum,
+ const struct usb_device_id *id)
+{
+ pauerswald_t cp = NULL;
+ DECLARE_WAIT_QUEUE_HEAD (wqh);
+ unsigned int dtindex;
+ unsigned int u = 0;
+ char *pbuf;
+ int ret;
+
+ dbg ("probe: vendor id 0x%x, device id 0x%x ifnum:%d",
+ usbdev->descriptor.idVendor, usbdev->descriptor.idProduct, ifnum);
+
+ /* See if the device offered us matches that we can accept */
+ if (usbdev->descriptor.idVendor != ID_AUERSWALD) return NULL;
+
+ /* we use only the first -and only- interface */
+ if (ifnum != 0) return NULL;
+
+ /* prevent module unloading while sleeping */
+ MOD_INC_USE_COUNT;
+
+ /* allocate memory for our device and intialize it */
+ cp = kmalloc (sizeof(auerswald_t), GFP_KERNEL);
+ if (cp == NULL) {
+ err ("out of memory");
+ goto pfail;
+ }
+
+ /* Initialize device descriptor */
+ memset (cp, 0, sizeof(auerswald_t));
+ init_MUTEX (&cp->mutex);
+ cp->usbdev = usbdev;
+ auerchain_init (&cp->controlchain);
+ auerbuf_init (&cp->bufctl);
+ init_waitqueue_head (&cp->bufferwait);
+
+ /* find a free slot in the device table */
+ down (&dev_table_mutex);
+ for (dtindex = 0; dtindex < AUER_MAX_DEVICES; ++dtindex) {
+ if (dev_table[dtindex] == NULL)
+ break;
+ }
+ if ( dtindex >= AUER_MAX_DEVICES) {
+ err ("more than %d devices plugged in, can not handle this device", AUER_MAX_DEVICES);
+ up (&dev_table_mutex);
+ goto pfail;
+ }
+
+ /* Give the device a name */
+ sprintf (cp->name, AU_PREFIX "%d", dtindex);
+
+ /* Store the index */
+ cp->dtindex = dtindex;
+ dev_table[dtindex] = cp;
+ up (&dev_table_mutex);
+
+ /* initialize the devfs node for this device and register it */
+ cp->devfs = devfs_register (usb_devfs_handle, cp->name,
+ DEVFS_FL_DEFAULT, USB_MAJOR,
+ AUER_MINOR_BASE + dtindex,
+ S_IFCHR | S_IRUGO | S_IWUGO,
+ &auerswald_fops, NULL);
+
+ /* Get the usb version of the device */
+ cp->version = cp->usbdev->descriptor.bcdDevice;
+ dbg ("Version is %X", cp->version);
+
+ /* allow some time to settle the device */
+ sleep_on_timeout (&wqh, HZ / 3 );
+
+ /* Try to get a suitable textual description of the device */
+ /* Device name:*/
+ ret = usb_string( cp->usbdev, AUSI_DEVICE, cp->dev_desc, AUSI_DLEN-1);
+ if (ret >= 0) {
+ u += ret;
+ /* Append Serial Number */
+ memcpy(&cp->dev_desc[u], ",Ser# ", 6);
+ u += 6;
+ ret = usb_string( cp->usbdev, AUSI_SERIALNR, &cp->dev_desc[u], AUSI_DLEN-u-1);
+ if (ret >= 0) {
+ u += ret;
+ /* Append subscriber number */
+ memcpy(&cp->dev_desc[u], ", ", 2);
+ u += 2;
+ ret = usb_string( cp->usbdev, AUSI_MSN, &cp->dev_desc[u], AUSI_DLEN-u-1);
+ if (ret >= 0) {
+ u += ret;
+ }
+ }
+ }
+ cp->dev_desc[u] = '\0';
+ info("device is a %s", cp->dev_desc);
+
+ /* get the maximum allowed control transfer length */
+ pbuf = (char *) kmalloc (2, GFP_KERNEL); /* use an allocated buffer because of urb target */
+ if (!pbuf) {
+ err( "out of memory");
+ goto pfail;
+ }
+ ret = usb_control_msg(cp->usbdev, /* pointer to device */
+ usb_rcvctrlpipe( cp->usbdev, 0 ), /* pipe to control endpoint */
+ AUV_GETINFO, /* USB message request value */
+ AUT_RREQ, /* USB message request type value */
+ 0, /* USB message value */
+ AUDI_MBCTRANS, /* USB message index value */
+ pbuf, /* pointer to the receive buffer */
+ 2, /* length of the buffer */
+ HZ * 2); /* time to wait for the message to complete before timing out */
+ if (ret == 2) {
+ cp->maxControlLength = le16_to_cpup((u16 *)pbuf);
+ kfree(pbuf);
+ dbg("setup: max. allowed control transfersize is %d bytes", cp->maxControlLength);
+ } else {
+ kfree(pbuf);
+ err("setup: getting max. allowed control transfer length failed with error %d", ret);
+ goto pfail;
+ }
+
+ /* allocate a chain for the control messages */
+ if (auerchain_setup (&cp->controlchain, AUCH_ELEMENTS)) {
+ err ("out of memory");
+ goto pfail;
+ }
+
+ /* allocate buffers for control messages */
+ if (auerbuf_setup (&cp->bufctl, AU_RBUFFERS, cp->maxControlLength+AUH_SIZE)) {
+ err ("out of memory");
+ goto pfail;
+ }
+
+ /* start the interrupt endpoint */
+ if (auerswald_int_open (cp)) {
+ err ("int endpoint failed");
+ goto pfail;
+ }
+
+ /* all OK */
+ return cp;
+
+ /* Error exit: clean up the memory */
+pfail: auerswald_delete (cp);
+ MOD_DEC_USE_COUNT;
+ return NULL;
+}
+
+
+/* Disconnect driver from a served device
+
+ This function is called whenever a device which was served by this driver
+ is disconnected.
+
+ The argument dev specifies the device context and the driver_context
+ returns a pointer to the previously registered driver_context of the
+ probe function. After returning from the disconnect function the USB
+ framework completly deallocates all data structures associated with
+ this device. So especially the usb_device structure must not be used
+ any longer by the usb driver.
+*/
+static void auerswald_disconnect (struct usb_device *usbdev, void *driver_context)
+{
+ pauerswald_t cp = (pauerswald_t) driver_context;
+ unsigned int u;
+
+ down (&cp->mutex);
+ info ("device /dev/usb/%s now disconnecting", cp->name);
+
+ /* remove from device table */
+ /* Nobody can open() this device any more */
+ down (&dev_table_mutex);
+ dev_table[cp->dtindex] = NULL;
+ up (&dev_table_mutex);
+
+ /* remove our devfs node */
+ /* Nobody can see this device any more */
+ devfs_unregister (cp->devfs);
+
+ /* Stop the interrupt endpoint */
+ auerswald_int_release (cp);
+
+ /* remove the control chain allocated in auerswald_probe
+ This has the benefit of
+ a) all pending (a)synchronous urbs are unlinked
+ b) all buffers dealing with urbs are reclaimed
+ */
+ auerchain_free (&cp->controlchain);
+
+ if (cp->open_count == 0) {
+ /* nobody is using this device. So we can clean up now */
+ up (&cp->mutex);/* up() is possible here because no other task
+ can open the device (see above). I don't want
+ to kfree() a locked mutex. */
+ auerswald_delete (cp);
+ } else {
+ /* device is used. Remove the pointer to the
+ usb device (it's not valid any more). The last
+ release() will do the clean up */
+ cp->usbdev = NULL;
+ up (&cp->mutex);
+ /* Terminate waiting writers */
+ wake_up (&cp->bufferwait);
+ /* Inform all waiting readers */
+ for ( u = 0; u < AUH_TYPESIZE; u++) {
+ pauerscon_t scp = cp->services[u];
+ if (scp) scp->disconnect( scp);
+ }
+ }
+
+ /* The device releases this module */
+ MOD_DEC_USE_COUNT;
+}
+
+/* Descriptor for the devices which are served by this driver.
+ NOTE: this struct is parsed by the usbmanager install scripts.
+ Don't change without caution!
+*/
+static struct usb_device_id auerswald_ids [] = {
+ { USB_DEVICE (ID_AUERSWALD, 0x00C0) }, /* COMpact 2104 USB */
+ { USB_DEVICE (ID_AUERSWALD, 0x00DB) }, /* COMpact 4410/2206 USB */
+ { USB_DEVICE (ID_AUERSWALD, 0x00F1) }, /* Comfort 2000 System Telephone */
+ { USB_DEVICE (ID_AUERSWALD, 0x00F2) }, /* Comfort 1200 System Telephone */
+ { } /* Terminating entry */
+};
+
+/* Standard module device table */
+MODULE_DEVICE_TABLE (usb, auerswald_ids);
+
+/* Standard usb driver struct */
+static struct usb_driver auerswald_driver = {
+ name: "auerswald",
+ probe: auerswald_probe,
+ disconnect: auerswald_disconnect,
+ fops: &auerswald_fops,
+ minor: AUER_MINOR_BASE,
+ id_table: auerswald_ids,
+};
+
+
+/* --------------------------------------------------------------------- */
+/* Module loading/unloading */
+
+/* Driver initialisation. Called after module loading.
+ NOTE: there is no concurrency at _init
+*/
+static int __init auerswald_init (void)
+{
+ int result;
+ dbg ("init");
+
+ /* initialize the device table */
+ memset (&dev_table, 0, sizeof(dev_table));
+ init_MUTEX (&dev_table_mutex);
+
+ /* register driver at the USB subsystem */
+ result = usb_register (&auerswald_driver);
+ if (result < 0) {
+ err ("driver could not be registered");
+ return -1;
+ }
+ return 0;
+}
+
+/* Driver deinit. Called before module removal.
+ NOTE: there is no concurrency at _cleanup
+*/
+static void __exit auerswald_cleanup (void)
+{
+ dbg ("cleanup");
+ usb_deregister (&auerswald_driver);
+}
+
+/* --------------------------------------------------------------------- */
+/* Linux device driver module description */
+
+MODULE_AUTHOR (DRIVER_AUTHOR);
+MODULE_DESCRIPTION (DRIVER_DESC);
+
+module_init (auerswald_init);
+module_exit (auerswald_cleanup);
+
+/* --------------------------------------------------------------------- */
diff --git a/drivers/usb/bluetooth.c b/drivers/usb/bluetooth.c
index ba1db6f54..9429d75b9 100644
--- a/drivers/usb/bluetooth.c
+++ b/drivers/usb/bluetooth.c
@@ -179,7 +179,7 @@ struct usb_bluetooth {
__u8 control_out_bInterfaceNum;
struct urb * control_urb_pool[NUM_CONTROL_URBS];
- devrequest dr[NUM_CONTROL_URBS];
+ struct usb_ctrlrequest dr[NUM_CONTROL_URBS];
unsigned char * interrupt_in_buffer;
struct urb * interrupt_in_urb;
@@ -288,7 +288,7 @@ static inline struct usb_bluetooth *get_bluetooth_by_minor (int minor)
static int bluetooth_ctrl_msg (struct usb_bluetooth *bluetooth, int request, int value, const unsigned char *buf, int len)
{
struct urb *urb = NULL;
- devrequest *dr = NULL;
+ struct usb_ctrlrequest *dr = NULL;
int i;
int status;
@@ -325,11 +325,11 @@ static int bluetooth_ctrl_msg (struct usb_bluetooth *bluetooth, int request, int
}
memcpy (urb->transfer_buffer, buf, len);
- dr->requesttype = BLUETOOTH_CONTROL_REQUEST_TYPE;
- dr->request = request;
- dr->value = cpu_to_le16((u16) value);
- dr->index = cpu_to_le16((u16) bluetooth->control_out_bInterfaceNum);
- dr->length = cpu_to_le16((u16) len);
+ dr->bRequestType= BLUETOOTH_CONTROL_REQUEST_TYPE;
+ dr->bRequest = request;
+ dr->wValue = cpu_to_le16((u16) value);
+ dr->wIndex = cpu_to_le16((u16) bluetooth->control_out_bInterfaceNum);
+ dr->wLength = cpu_to_le16((u16) len);
FILL_CONTROL_URB (urb, bluetooth->dev, usb_sndctrlpipe(bluetooth->dev, 0),
(unsigned char*)dr, urb->transfer_buffer, len, bluetooth_ctrl_callback, bluetooth);
diff --git a/drivers/usb/catc.c b/drivers/usb/catc.c
index 0c704dc07..856fb1c8f 100644
--- a/drivers/usb/catc.c
+++ b/drivers/usb/catc.c
@@ -159,7 +159,7 @@ struct catc {
u8 rx_buf[RX_MAX_BURST * (PKT_SZ + 2)];
u8 irq_buf[2];
u8 ctrl_buf[64];
- devrequest ctrl_dr;
+ struct usb_ctrlrequest ctrl_dr;
struct timer_list timer;
u8 stats_buf[8];
@@ -383,14 +383,14 @@ static void catc_ctrl_run(struct catc *catc)
struct ctrl_queue *q = catc->ctrl_queue + catc->ctrl_tail;
struct usb_device *usbdev = catc->usbdev;
struct urb *urb = &catc->ctrl_urb;
- devrequest *dr = &catc->ctrl_dr;
+ struct usb_ctrlrequest *dr = &catc->ctrl_dr;
int status;
- dr->request = q->request;
- dr->requesttype = 0x40 | q->dir;
- dr->value = cpu_to_le16(q->value);
- dr->index = cpu_to_le16(q->index);
- dr->length = cpu_to_le16(q->len);
+ dr->bRequest = q->request;
+ dr->bRequestType = 0x40 | q->dir;
+ dr->wValue = cpu_to_le16(q->value);
+ dr->wIndex = cpu_to_le16(q->index);
+ dr->wLength = cpu_to_le16(q->len);
urb->pipe = q->dir ? usb_rcvctrlpipe(usbdev, 0) : usb_sndctrlpipe(usbdev, 0);
urb->transfer_buffer_length = q->len;
diff --git a/drivers/usb/devio.c b/drivers/usb/devio.c
index 424fee8de..0ddbffdee 100644
--- a/drivers/usb/devio.c
+++ b/drivers/usb/devio.c
@@ -527,40 +527,40 @@ static int proc_control(struct dev_state *ps, void *arg)
if (copy_from_user(&ctrl, (void *)arg, sizeof(ctrl)))
return -EFAULT;
- if ((ret = check_ctrlrecip(ps, ctrl.requesttype, ctrl.index)))
+ if ((ret = check_ctrlrecip(ps, ctrl.bRequestType, ctrl.wIndex)))
return ret;
- if (ctrl.length > PAGE_SIZE)
+ if (ctrl.wLength > PAGE_SIZE)
return -EINVAL;
if (!(tbuf = (unsigned char *)__get_free_page(GFP_KERNEL)))
return -ENOMEM;
tmo = (ctrl.timeout * HZ + 999) / 1000;
- if (ctrl.requesttype & 0x80) {
- if (ctrl.length && !access_ok(VERIFY_WRITE, ctrl.data, ctrl.length)) {
+ if (ctrl.bRequestType & 0x80) {
+ if (ctrl.wLength && !access_ok(VERIFY_WRITE, ctrl.data, ctrl.wLength)) {
free_page((unsigned long)tbuf);
return -EINVAL;
}
- i = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), ctrl.request, ctrl.requesttype,
- ctrl.value, ctrl.index, tbuf, ctrl.length, tmo);
- if ((i > 0) && ctrl.length) {
- if (copy_to_user(ctrl.data, tbuf, ctrl.length)) {
+ i = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), ctrl.bRequest, ctrl.bRequestType,
+ ctrl.wValue, ctrl.wIndex, tbuf, ctrl.wLength, tmo);
+ if ((i > 0) && ctrl.wLength) {
+ if (copy_to_user(ctrl.data, tbuf, ctrl.wLength)) {
free_page((unsigned long)tbuf);
return -EFAULT;
}
}
} else {
- if (ctrl.length) {
- if (copy_from_user(tbuf, ctrl.data, ctrl.length)) {
+ if (ctrl.wLength) {
+ if (copy_from_user(tbuf, ctrl.data, ctrl.wLength)) {
free_page((unsigned long)tbuf);
return -EFAULT;
}
}
- i = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), ctrl.request, ctrl.requesttype,
- ctrl.value, ctrl.index, tbuf, ctrl.length, tmo);
+ i = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), ctrl.bRequest, ctrl.bRequestType,
+ ctrl.wValue, ctrl.wIndex, tbuf, ctrl.wLength, tmo);
}
free_page((unsigned long)tbuf);
if (i<0) {
printk(KERN_DEBUG "usbdevfs: USBDEVFS_CONTROL failed dev %d rqt %u rq %u len %u ret %d\n",
- dev->devnum, ctrl.requesttype, ctrl.request, ctrl.length, i);
+ dev->devnum, ctrl.bRequestType, ctrl.bRequest, ctrl.wLength, i);
}
return i;
}
@@ -757,7 +757,7 @@ static int proc_submiturb(struct dev_state *ps, void *arg)
struct usbdevfs_iso_packet_desc *isopkt = NULL;
struct usb_endpoint_descriptor *ep_desc;
struct async *as;
- devrequest *dr = NULL;
+ struct usb_ctrlrequest *dr = NULL;
unsigned int u, totlen, isofrmlen;
int ret;
@@ -787,23 +787,23 @@ static int proc_submiturb(struct dev_state *ps, void *arg)
/* min 8 byte setup packet, max arbitrary */
if (uurb.buffer_length < 8 || uurb.buffer_length > PAGE_SIZE)
return -EINVAL;
- if (!(dr = kmalloc(sizeof(devrequest), GFP_KERNEL)))
+ if (!(dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL)))
return -ENOMEM;
if (copy_from_user(dr, (unsigned char*)uurb.buffer, 8)) {
kfree(dr);
return -EFAULT;
}
- if (uurb.buffer_length < (le16_to_cpup(&dr->length) + 8)) {
+ if (uurb.buffer_length < (le16_to_cpup(&dr->wLength) + 8)) {
kfree(dr);
return -EINVAL;
}
- if ((ret = check_ctrlrecip(ps, dr->requesttype, le16_to_cpup(&dr->index)))) {
+ if ((ret = check_ctrlrecip(ps, dr->bRequestType, le16_to_cpup(&dr->wIndex)))) {
kfree(dr);
return ret;
}
- uurb.endpoint = (uurb.endpoint & ~USB_ENDPOINT_DIR_MASK) | (dr->requesttype & USB_ENDPOINT_DIR_MASK);
+ uurb.endpoint = (uurb.endpoint & ~USB_ENDPOINT_DIR_MASK) | (dr->bRequestType & USB_ENDPOINT_DIR_MASK);
uurb.number_of_packets = 0;
- uurb.buffer_length = le16_to_cpup(&dr->length);
+ uurb.buffer_length = le16_to_cpup(&dr->wLength);
uurb.buffer += 8;
if (!access_ok((uurb.endpoint & USB_DIR_IN) ? VERIFY_WRITE : VERIFY_READ, uurb.buffer, uurb.buffer_length)) {
kfree(dr);
diff --git a/drivers/usb/hcd.c b/drivers/usb/hcd.c
index d01a1bcf4..8d4799ae5 100644
--- a/drivers/usb/hcd.c
+++ b/drivers/usb/hcd.c
@@ -268,16 +268,16 @@ static int rh_string (
/* Root hub control transfers execute synchronously */
static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
{
- devrequest *cmd = (devrequest *) urb->setup_packet;
+ struct usb_ctrlrequest *cmd = (struct usb_ctrlrequest *) urb->setup_packet;
u16 typeReq, wValue, wIndex, wLength;
const u8 *bufp = 0;
u8 *ubuf = urb->transfer_buffer;
int len = 0;
- typeReq = (cmd->requesttype << 8) | cmd->request;
- wValue = le16_to_cpu (cmd->value);
- wIndex = le16_to_cpu (cmd->index);
- wLength = le16_to_cpu (cmd->length);
+ typeReq = (cmd->bRequestType << 8) | cmd->bRequest;
+ wValue = le16_to_cpu (cmd->wValue);
+ wIndex = le16_to_cpu (cmd->wIndex);
+ wLength = le16_to_cpu (cmd->wLength);
if (wLength > urb->transfer_buffer_length)
goto error;
diff --git a/drivers/usb/hcd/ehci-q.c b/drivers/usb/hcd/ehci-q.c
index 25a305dc1..5ca53633a 100644
--- a/drivers/usb/hcd/ehci-q.c
+++ b/drivers/usb/hcd/ehci-q.c
@@ -362,7 +362,7 @@ scrub:
/* SETUP for control urb? */
if (unlikely (QTD_PID (token) == 2))
pci_unmap_single (ehci->hcd.pdev,
- qtd->buf_dma, sizeof (devrequest),
+ qtd->buf_dma, sizeof (struct usb_ctrlrequest),
PCI_DMA_TODEVICE);
/* another queued urb? */
@@ -443,13 +443,13 @@ qh_urb_transaction (
qtd->buf_dma = pci_map_single (
ehci->hcd.pdev,
urb->setup_packet,
- sizeof (devrequest),
+ sizeof (struct usb_ctrlrequest),
PCI_DMA_TODEVICE);
if (unlikely (!qtd->buf_dma))
goto cleanup;
/* SETUP pid */
- qtd_fill (qtd, qtd->buf_dma, sizeof (devrequest),
+ qtd_fill (qtd, qtd->buf_dma, sizeof (struct usb_ctrlrequest),
token | (2 /* "setup" */ << 8));
/* ... and always at least one more pid */
diff --git a/drivers/usb/hid-core.c b/drivers/usb/hid-core.c
index 7c40af27a..1129deb66 100644
--- a/drivers/usb/hid-core.c
+++ b/drivers/usb/hid-core.c
@@ -987,7 +987,7 @@ int hid_find_field(struct hid_device *hid, unsigned int type, unsigned int code,
static int hid_submit_out(struct hid_device *hid)
{
- hid->urbout.transfer_buffer_length = le16_to_cpup(&hid->out[hid->outtail].dr.length);
+ hid->urbout.transfer_buffer_length = le16_to_cpup(&hid->out[hid->outtail].dr.wLength);
hid->urbout.transfer_buffer = hid->out[hid->outtail].buffer;
hid->urbout.setup_packet = (void *) &(hid->out[hid->outtail].dr);
hid->urbout.dev = hid->dev;
@@ -1017,8 +1017,8 @@ void hid_write_report(struct hid_device *hid, struct hid_report *report)
{
hid_output_report(report, hid->out[hid->outhead].buffer);
- hid->out[hid->outhead].dr.value = cpu_to_le16(0x200 | report->id);
- hid->out[hid->outhead].dr.length = cpu_to_le16((report->size + 7) >> 3);
+ hid->out[hid->outhead].dr.wValue = cpu_to_le16(0x200 | report->id);
+ hid->out[hid->outhead].dr.wLength = cpu_to_le16((report->size + 7) >> 3);
hid->outhead = (hid->outhead + 1) & (HID_CONTROL_FIFO_SIZE - 1);
@@ -1179,9 +1179,9 @@ static struct hid_device *usb_hid_configure(struct usb_device *dev, int ifnum)
hid->ifnum = interface->bInterfaceNumber;
for (n = 0; n < HID_CONTROL_FIFO_SIZE; n++) {
- hid->out[n].dr.requesttype = USB_TYPE_CLASS | USB_RECIP_INTERFACE;
- hid->out[n].dr.request = HID_REQ_SET_REPORT;
- hid->out[n].dr.index = cpu_to_le16(hid->ifnum);
+ hid->out[n].dr.bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE;
+ hid->out[n].dr.bRequest = HID_REQ_SET_REPORT;
+ hid->out[n].dr.wIndex = cpu_to_le16(hid->ifnum);
}
hid->name[0] = 0;
diff --git a/drivers/usb/hid.h b/drivers/usb/hid.h
index 7eb085fce..4a65e5295 100644
--- a/drivers/usb/hid.h
+++ b/drivers/usb/hid.h
@@ -367,7 +367,7 @@ struct hid_report_enum {
#define HID_CONTROL_FIFO_SIZE 8
struct hid_control_fifo {
- devrequest dr;
+ struct usb_ctrlrequest dr;
char buffer[HID_BUFFER_SIZE];
};
diff --git a/drivers/usb/kaweth.c b/drivers/usb/kaweth.c
index 7289dcd38..a26100655 100644
--- a/drivers/usb/kaweth.c
+++ b/drivers/usb/kaweth.c
@@ -110,8 +110,8 @@ static void *kaweth_probe(
);
static void kaweth_disconnect(struct usb_device *dev, void *ptr);
int kaweth_internal_control_msg(struct usb_device *usb_dev, unsigned int pipe,
- devrequest *cmd, void *data, int len,
- int timeout);
+ struct usb_ctrlrequest *cmd, void *data,
+ int len, int timeout);
/****************************************************************
* usb_device_id
@@ -229,7 +229,7 @@ static int kaweth_control(struct kaweth_device *kaweth,
__u16 size,
int timeout)
{
- devrequest *dr;
+ struct usb_ctrlrequest *dr;
kaweth_dbg("kaweth_control()");
@@ -238,20 +238,19 @@ static int kaweth_control(struct kaweth_device *kaweth,
return -EBUSY;
}
- dr = kmalloc(sizeof(devrequest),
+ dr = kmalloc(sizeof(struct usb_ctrlrequest),
in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
- if(!dr)
- {
+ if (!dr) {
kaweth_dbg("kmalloc() failed");
return -ENOMEM;
}
- dr->requesttype = requesttype;
- dr->request = request;
- dr->value = cpu_to_le16p(&value);
- dr->index = cpu_to_le16p(&index);
- dr->length = cpu_to_le16p(&size);
+ dr->bRequestType= requesttype;
+ dr->bRequest = request;
+ dr->wValue = cpu_to_le16p(&value);
+ dr->wIndex = cpu_to_le16p(&index);
+ dr->wLength = cpu_to_le16p(&size);
return kaweth_internal_control_msg(kaweth->dev,
pipe,
@@ -1015,7 +1014,8 @@ static int usb_start_wait_urb(urb_t *urb, int timeout, int* actual_length)
/*-------------------------------------------------------------------*/
// returns status (negative) or length (positive)
int kaweth_internal_control_msg(struct usb_device *usb_dev, unsigned int pipe,
- devrequest *cmd, void *data, int len, int timeout)
+ struct usb_ctrlrequest *cmd, void *data, int len,
+ int timeout)
{
urb_t *urb;
int retv;
diff --git a/drivers/usb/pegasus.c b/drivers/usb/pegasus.c
index 7ae85febb..5b1b9fb66 100644
--- a/drivers/usb/pegasus.c
+++ b/drivers/usb/pegasus.c
@@ -142,11 +142,11 @@ static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
remove_wait_queue(&pegasus->ctrl_wait, &wait);
set_current_state(TASK_RUNNING);
- pegasus->dr.requesttype = PEGASUS_REQT_READ;
- pegasus->dr.request = PEGASUS_REQ_GET_REGS;
- pegasus->dr.value = cpu_to_le16 (0);
- pegasus->dr.index = cpu_to_le16p(&indx);
- pegasus->dr.length = cpu_to_le16p(&size);
+ pegasus->dr.bRequestType = PEGASUS_REQT_READ;
+ pegasus->dr.bRequest = PEGASUS_REQ_GET_REGS;
+ pegasus->dr.wValue = cpu_to_le16 (0);
+ pegasus->dr.wIndex = cpu_to_le16p(&indx);
+ pegasus->dr.wLength = cpu_to_le16p(&size);
pegasus->ctrl_urb.transfer_buffer_length = size;
FILL_CONTROL_URB( &pegasus->ctrl_urb, pegasus->usb,
@@ -192,11 +192,11 @@ static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
remove_wait_queue(&pegasus->ctrl_wait, &wait);
set_current_state(TASK_RUNNING);
- pegasus->dr.requesttype = PEGASUS_REQT_WRITE;
- pegasus->dr.request = PEGASUS_REQ_SET_REGS;
- pegasus->dr.value = cpu_to_le16 (0);
- pegasus->dr.index = cpu_to_le16p( &indx );
- pegasus->dr.length = cpu_to_le16p( &size );
+ pegasus->dr.bRequestType = PEGASUS_REQT_WRITE;
+ pegasus->dr.bRequest = PEGASUS_REQ_SET_REGS;
+ pegasus->dr.wValue = cpu_to_le16 (0);
+ pegasus->dr.wIndex = cpu_to_le16p( &indx );
+ pegasus->dr.wLength = cpu_to_le16p( &size );
pegasus->ctrl_urb.transfer_buffer_length = size;
FILL_CONTROL_URB( &pegasus->ctrl_urb, pegasus->usb,
@@ -242,11 +242,11 @@ static int set_register( pegasus_t *pegasus, __u16 indx, __u8 data )
remove_wait_queue(&pegasus->ctrl_wait, &wait);
set_current_state(TASK_RUNNING);
- pegasus->dr.requesttype = PEGASUS_REQT_WRITE;
- pegasus->dr.request = PEGASUS_REQ_SET_REG;
- pegasus->dr.value = cpu_to_le16p( &dat);
- pegasus->dr.index = cpu_to_le16p( &indx );
- pegasus->dr.length = cpu_to_le16( 1 );
+ pegasus->dr.bRequestType = PEGASUS_REQT_WRITE;
+ pegasus->dr.bRequest = PEGASUS_REQ_SET_REG;
+ pegasus->dr.wValue = cpu_to_le16p( &dat);
+ pegasus->dr.wIndex = cpu_to_le16p( &indx );
+ pegasus->dr.wLength = cpu_to_le16( 1 );
pegasus->ctrl_urb.transfer_buffer_length = 1;
FILL_CONTROL_URB( &pegasus->ctrl_urb, pegasus->usb,
@@ -275,11 +275,11 @@ static int update_eth_regs_async( pegasus_t *pegasus )
{
int ret;
- pegasus->dr.requesttype = PEGASUS_REQT_WRITE;
- pegasus->dr.request = PEGASUS_REQ_SET_REGS;
- pegasus->dr.value = 0;
- pegasus->dr.index = cpu_to_le16(EthCtrl0);
- pegasus->dr.length = cpu_to_le16(3);
+ pegasus->dr.bRequestType = PEGASUS_REQT_WRITE;
+ pegasus->dr.bRequest = PEGASUS_REQ_SET_REGS;
+ pegasus->dr.wValue = 0;
+ pegasus->dr.wIndex = cpu_to_le16(EthCtrl0);
+ pegasus->dr.wLength = cpu_to_le16(3);
pegasus->ctrl_urb.transfer_buffer_length = 3;
FILL_CONTROL_URB( &pegasus->ctrl_urb, pegasus->usb,
diff --git a/drivers/usb/pegasus.h b/drivers/usb/pegasus.h
index d76125cfa..ccb80c2d3 100644
--- a/drivers/usb/pegasus.h
+++ b/drivers/usb/pegasus.h
@@ -108,7 +108,7 @@ typedef struct pegasus {
int dev_index;
int intr_interval;
struct urb ctrl_urb, rx_urb, tx_urb, intr_urb;
- devrequest dr;
+ struct usb_ctrlrequest dr;
wait_queue_head_t ctrl_wait;
struct semaphore ctrl_sem;
unsigned char ALIGN(rx_buff[PEGASUS_MAX_MTU]);
diff --git a/drivers/usb/scanner.c b/drivers/usb/scanner.c
index bf1fdb968..4c147cf50 100644
--- a/drivers/usb/scanner.c
+++ b/drivers/usb/scanner.c
@@ -281,7 +281,7 @@
* 0.4.7 11/28/2001
* - Fixed typo in Documentation/scanner.txt. Thanks to
* Karel <karel.vervaeke@pandora.be> for pointing it out.
- * - Added ID's for a Memorex 6136u. Thanks to =C1lvaro Gaspar de
+ * - Added ID's for a Memorex 6136u. Thanks to Álvaro Gaspar de
* Valenzuela" <agaspard@utsi.edu>.
* - Added ID's for Agfa e25. Thanks to Heinrich
* Rust <Heinrich.Rust@gmx.de>. Also reported to work with
@@ -746,8 +746,8 @@ ioctl_scanner(struct inode *inode, struct file *file,
case SCANNER_IOCTL_CTRLMSG:
{
struct ctrlmsg_ioctl {
- devrequest req;
- void *data;
+ struct usb_ctrlrequest req;
+ void *data;
} cmsg;
int pipe, nb, ret;
unsigned char buf[64];
@@ -755,12 +755,12 @@ ioctl_scanner(struct inode *inode, struct file *file,
if (copy_from_user(&cmsg, (void *)arg, sizeof(cmsg)))
return -EFAULT;
- nb = le16_to_cpup(&cmsg.req.length);
+ nb = le16_to_cpup(&cmsg.req.wLength);
if (nb > sizeof(buf))
return -EINVAL;
- if ((cmsg.req.requesttype & 0x80) == 0) {
+ if ((cmsg.req.bRequestType & 0x80) == 0) {
pipe = usb_sndctrlpipe(dev, 0);
if (nb > 0 && copy_from_user(buf, cmsg.data, nb))
return -EFAULT;
@@ -768,10 +768,10 @@ ioctl_scanner(struct inode *inode, struct file *file,
pipe = usb_rcvctrlpipe(dev, 0);
}
- ret = usb_control_msg(dev, pipe, cmsg.req.request,
- cmsg.req.requesttype,
- le16_to_cpup(&cmsg.req.value),
- le16_to_cpup(&cmsg.req.index),
+ ret = usb_control_msg(dev, pipe, cmsg.req.bRequest,
+ cmsg.req.bRequestType,
+ le16_to_cpup(&cmsg.req.wValue),
+ le16_to_cpup(&cmsg.req.wIndex),
buf, nb, HZ);
if (ret < 0) {
@@ -779,7 +779,7 @@ ioctl_scanner(struct inode *inode, struct file *file,
return -EIO;
}
- if (nb > 0 && (cmsg.req.requesttype & 0x80) && copy_to_user(cmsg.data, buf, nb))
+ if (nb > 0 && (cmsg.req.bRequestType & 0x80) && copy_to_user(cmsg.data, buf, nb))
return -EFAULT;
return 0;
diff --git a/drivers/usb/scanner.h b/drivers/usb/scanner.h
index f7b9143bc..eb899bb4d 100644
--- a/drivers/usb/scanner.h
+++ b/drivers/usb/scanner.h
@@ -230,7 +230,7 @@ MODULE_DEVICE_TABLE (usb, scanner_device_ids);
#define SCANNER_IOCTL_VENDOR _IOR('U', 0x20, int)
#define SCANNER_IOCTL_PRODUCT _IOR('U', 0x21, int)
/* send/recv a control message to the scanner */
-#define SCANNER_IOCTL_CTRLMSG _IOWR('U', 0x22, devrequest )
+#define SCANNER_IOCTL_CTRLMSG _IOWR('U', 0x22, struct usb_ctrlrequest)
#define SCN_MAX_MNR 16 /* We're allocated 16 minors */
diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c
index 126846933..0f20382ba 100644
--- a/drivers/usb/serial/belkin_sa.c
+++ b/drivers/usb/serial/belkin_sa.c
@@ -119,6 +119,7 @@ MODULE_DEVICE_TABLE (usb, id_table_combined);
/* All of the device info needed for the serial converters */
static struct usb_serial_device_type belkin_device = {
+ owner: THIS_MODULE,
name: "Belkin / Peracom / GoHubs USB Serial Adapter",
id_table: id_table_combined,
num_interrupt_in: 1,
@@ -209,7 +210,6 @@ static int belkin_sa_open (struct usb_serial_port *port, struct file *filp)
down (&port->sem);
++port->open_count;
- MOD_INC_USE_COUNT;
if (port->open_count == 1) {
/*Start reading from the device*/
@@ -264,7 +264,6 @@ static void belkin_sa_close (struct usb_serial_port *port, struct file *filp)
}
up (&port->sem);
- MOD_DEC_USE_COUNT;
} /* belkin_sa_close */
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index f74b72bb6..fcdb59e74 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -77,6 +77,7 @@ static __devinitdata struct usb_device_id id_table [] = {
MODULE_DEVICE_TABLE (usb, id_table);
static struct usb_serial_device_type cyberjack_device = {
+ owner: THIS_MODULE,
name: "Reiner SCT Cyberjack USB card reader",
id_table: id_table,
num_interrupt_in: 1,
@@ -148,8 +149,6 @@ static int cyberjack_open (struct usb_serial_port *port, struct file *filp)
if (port_paranoia_check (port, __FUNCTION__))
return -ENODEV;
- MOD_INC_USE_COUNT;
-
dbg(__FUNCTION__ " - port %d", port->number);
down (&port->sem);
@@ -204,7 +203,6 @@ static void cyberjack_close (struct usb_serial_port *port, struct file *filp)
}
up (&port->sem);
- MOD_DEC_USE_COUNT;
}
static int cyberjack_write (struct usb_serial_port *port, int from_user, const unsigned char *buf, int count)
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 74316472f..4edf3c7a3 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -501,6 +501,7 @@ MODULE_DEVICE_TABLE (usb, id_table_combined);
/* device info needed for the Digi serial converter */
static struct usb_serial_device_type digi_acceleport_2_device = {
+ owner: THIS_MODULE,
name: "Digi USB",
id_table: id_table_2,
num_interrupt_in: 0,
@@ -524,6 +525,7 @@ static struct usb_serial_device_type digi_acceleport_2_device = {
};
static struct usb_serial_device_type digi_acceleport_4_device = {
+ owner: THIS_MODULE,
name: "Digi USB",
id_table: id_table_4,
num_interrupt_in: 0,
@@ -603,7 +605,6 @@ static void digi_wakeup_write_lock( struct usb_serial_port *port )
spin_lock_irqsave( &priv->dp_port_lock, flags );
digi_wakeup_write( port );
spin_unlock_irqrestore( &priv->dp_port_lock, flags );
- MOD_DEC_USE_COUNT;
}
static void digi_wakeup_write( struct usb_serial_port *port )
@@ -1410,9 +1411,7 @@ dbg( "digi_write_bulk_callback: TOP, urb->status=%d", urb->status );
/* also queue up a wakeup at scheduler time, in case we */
/* lost the race in write_chan(). */
- MOD_INC_USE_COUNT;
- if (schedule_task(&priv->dp_wakeup_task) == 0)
- MOD_DEC_USE_COUNT;
+ schedule_task(&priv->dp_wakeup_task);
spin_unlock( &priv->dp_port_lock );
@@ -1493,7 +1492,6 @@ dbg( "digi_open: TOP: port=%d, open_count=%d", priv->dp_port_num, port->open_cou
/* inc module use count before sleeping to wait for closes */
++port->open_count;
- MOD_INC_USE_COUNT;
/* wait for a close in progress to finish */
while( priv->dp_in_close ) {
@@ -1502,7 +1500,6 @@ dbg( "digi_open: TOP: port=%d, open_count=%d", priv->dp_port_num, port->open_cou
&priv->dp_port_lock, flags );
if( signal_pending(current) ) {
--port->open_count;
- MOD_DEC_USE_COUNT;
return( -EINTR );
}
spin_lock_irqsave( &priv->dp_port_lock, flags );
@@ -1562,7 +1559,6 @@ dbg( "digi_close: TOP: port=%d, open_count=%d", priv->dp_port_num, port->open_co
spin_lock_irqsave( &priv->dp_port_lock, flags );
if( port->open_count > 1 ) {
--port->open_count;
- MOD_DEC_USE_COUNT;
spin_unlock_irqrestore( &priv->dp_port_lock, flags );
return;
} else if( port->open_count <= 0 ) {
@@ -1642,7 +1638,6 @@ dbg( "digi_close: TOP: port=%d, open_count=%d", priv->dp_port_num, port->open_co
priv->dp_write_urb_in_use = 0;
priv->dp_in_close = 0;
--port->open_count;
- MOD_DEC_USE_COUNT;
wake_up_interruptible( &priv->dp_close_wait );
spin_unlock_irqrestore( &priv->dp_port_lock, flags );
@@ -1787,7 +1782,6 @@ dbg( "digi_shutdown: TOP, in_interrupt()=%d", in_interrupt() );
priv = serial->port[i].private;
spin_lock_irqsave( &priv->dp_port_lock, flags );
while( serial->port[i].open_count > 0 ) {
- MOD_DEC_USE_COUNT;
--serial->port[i].open_count;
}
spin_unlock_irqrestore( &priv->dp_port_lock, flags );
diff --git a/drivers/usb/serial/empeg.c b/drivers/usb/serial/empeg.c
index e935a248d..49e00fc8b 100644
--- a/drivers/usb/serial/empeg.c
+++ b/drivers/usb/serial/empeg.c
@@ -114,6 +114,7 @@ static __devinitdata struct usb_device_id id_table [] = {
MODULE_DEVICE_TABLE (usb, id_table);
static struct usb_serial_device_type empeg_device = {
+ owner: THIS_MODULE,
name: "Empeg",
id_table: id_table,
num_interrupt_in: 0,
@@ -159,7 +160,6 @@ static int empeg_open (struct usb_serial_port *port, struct file *filp)
down (&port->sem);
++port->open_count;
- MOD_INC_USE_COUNT;
if (port->open_count == 1) {
@@ -224,8 +224,6 @@ static void empeg_close (struct usb_serial_port *port, struct file * filp)
/* Uncomment the following line if you want to see some statistics in your syslog */
/* info ("Bytes In = %d Bytes Out = %d", bytes_in, bytes_out); */
-
- MOD_DEC_USE_COUNT;
}
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 8f634f74f..ff4e8e101 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -174,6 +174,7 @@ static void ftdi_sio_break_ctl (struct usb_serial_port *port, int break_state )
which share common code */
static struct usb_serial_device_type ftdi_sio_device = {
+ owner: THIS_MODULE,
name: "FTDI SIO",
id_table: id_table_sio,
num_interrupt_in: 0,
@@ -318,7 +319,6 @@ static int ftdi_sio_open (struct usb_serial_port *port, struct file *filp)
down (&port->sem);
- MOD_INC_USE_COUNT;
++port->open_count;
if (port->open_count == 1){
@@ -411,7 +411,6 @@ static void ftdi_sio_close (struct usb_serial_port *port, struct file *filp)
}
up (&port->sem);
- MOD_DEC_USE_COUNT;
} /* ftdi_sio_close */
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 395ca6262..d7968e619 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -985,7 +985,6 @@ static int edge_open (struct usb_serial_port *port, struct file * filp)
return -ENODEV;
++port->open_count;
- MOD_INC_USE_COUNT;
if (port->open_count == 1) {
/* force low_latency on so that our tty_push actually forces the data through,
@@ -999,7 +998,6 @@ static int edge_open (struct usb_serial_port *port, struct file * filp)
edge_serial = (struct edgeport_serial *)serial->private;
if (edge_serial == NULL) {
port->open_count = 0;
- MOD_DEC_USE_COUNT;
return -ENODEV;
}
if (edge_serial->interrupt_in_buffer == NULL) {
@@ -1062,7 +1060,6 @@ static int edge_open (struct usb_serial_port *port, struct file * filp)
err(__FUNCTION__" - error sending open port command");
edge_port->openPending = FALSE;
port->open_count = 0;
- MOD_DEC_USE_COUNT;
return -ENODEV;
}
@@ -1077,7 +1074,6 @@ static int edge_open (struct usb_serial_port *port, struct file * filp)
dbg(__FUNCTION__" - open timedout");
edge_port->openPending = FALSE;
port->open_count = 0;
- MOD_DEC_USE_COUNT;
return -ENODEV;
}
@@ -1283,7 +1279,6 @@ static void edge_close (struct usb_serial_port *port, struct file * filp)
port->open_count = 0;
}
- MOD_DEC_USE_COUNT;
dbg(__FUNCTION__" exited");
}
diff --git a/drivers/usb/serial/io_tables.h b/drivers/usb/serial/io_tables.h
index 01f562a5e..c6a8bd026 100644
--- a/drivers/usb/serial/io_tables.h
+++ b/drivers/usb/serial/io_tables.h
@@ -72,6 +72,7 @@ static __devinitdata struct usb_device_id id_table_combined [] = {
MODULE_DEVICE_TABLE (usb, id_table_combined);
static struct usb_serial_device_type edgeport_1port_device = {
+ owner: THIS_MODULE,
name: "Edgeport 1 port adapter",
id_table: edgeport_1port_id_table,
num_interrupt_in: 1,
@@ -93,6 +94,7 @@ static struct usb_serial_device_type edgeport_1port_device = {
};
static struct usb_serial_device_type edgeport_2port_device = {
+ owner: THIS_MODULE,
name: "Edgeport 2 port adapter",
id_table: edgeport_2port_id_table,
num_interrupt_in: 1,
@@ -114,6 +116,7 @@ static struct usb_serial_device_type edgeport_2port_device = {
};
static struct usb_serial_device_type edgeport_4port_device = {
+ owner: THIS_MODULE,
name: "Edgeport 4 port adapter",
id_table: edgeport_4port_id_table,
num_interrupt_in: 1,
@@ -135,6 +138,7 @@ static struct usb_serial_device_type edgeport_4port_device = {
};
static struct usb_serial_device_type edgeport_8port_device = {
+ owner: THIS_MODULE,
name: "Edgeport 8 port adapter",
id_table: edgeport_8port_id_table,
num_interrupt_in: 1,
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index ecec9b983..d73a7c3a7 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -91,6 +91,7 @@ MODULE_DEVICE_TABLE (usb, id_table);
struct usb_serial_device_type ir_device = {
+ owner: THIS_MODULE,
name: "IR Dongle",
id_table: id_table,
num_interrupt_in: 1,
@@ -204,7 +205,6 @@ static int ir_open (struct usb_serial_port *port, struct file *filp)
down (&port->sem);
++port->open_count;
- MOD_INC_USE_COUNT;
if (port->open_count == 1) {
if (buffer_size) {
@@ -270,7 +270,6 @@ static void ir_close (struct usb_serial_port *port, struct file * filp)
}
up (&port->sem);
- MOD_DEC_USE_COUNT;
}
static int ir_write (struct usb_serial_port *port, int from_user, const unsigned char *buf, int count)
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 80df4f451..e3062760f 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -877,8 +877,6 @@ static int keyspan_open (struct usb_serial_port *port, struct file *filp)
dbg("keyspan_open called for port%d.\n", port->number);
- MOD_INC_USE_COUNT;
-
down (&port->sem);
already_active = port->open_count;
++port->open_count;
@@ -960,8 +958,6 @@ static void keyspan_close(struct usb_serial_port *port, struct file *filp)
port->tty = 0;
}
up (&port->sem);
-
- MOD_DEC_USE_COUNT;
}
@@ -1789,7 +1785,6 @@ static void keyspan_shutdown (struct usb_serial *serial)
port = &serial->port[i];
while (port->open_count > 0) {
--port->open_count;
- MOD_DEC_USE_COUNT;
}
kfree(port->private);
}
diff --git a/drivers/usb/serial/keyspan.h b/drivers/usb/serial/keyspan.h
index 5998e9edd..e4cf84a41 100644
--- a/drivers/usb/serial/keyspan.h
+++ b/drivers/usb/serial/keyspan.h
@@ -449,6 +449,7 @@ static __devinitdata struct usb_device_id keyspan_usa49w_ids[] = {
/* Structs for the devices, pre and post renumeration. */
static struct usb_serial_device_type keyspan_usa18x_pre_device = {
+ owner: THIS_MODULE,
name: "Keyspan USA18X - (without firmware)",
id_table: keyspan_usa18x_pre_ids,
num_interrupt_in: NUM_DONT_CARE,
@@ -459,6 +460,7 @@ static struct usb_serial_device_type keyspan_usa18x_pre_device = {
};
static struct usb_serial_device_type keyspan_usa19_pre_device = {
+ owner: THIS_MODULE,
name: "Keyspan USA19 - (without firmware)",
id_table: keyspan_usa19_pre_ids,
num_interrupt_in: NUM_DONT_CARE,
@@ -470,6 +472,7 @@ static struct usb_serial_device_type keyspan_usa19_pre_device = {
static struct usb_serial_device_type keyspan_usa19w_pre_device = {
+ owner: THIS_MODULE,
name: "Keyspan USA19W - (without firmware)",
id_table: keyspan_usa19w_pre_ids,
num_interrupt_in: NUM_DONT_CARE,
@@ -481,6 +484,7 @@ static struct usb_serial_device_type keyspan_usa19w_pre_device = {
static struct usb_serial_device_type keyspan_usa28_pre_device = {
+ owner: THIS_MODULE,
name: "Keyspan USA28 - (without firmware)",
id_table: keyspan_usa28_pre_ids,
num_interrupt_in: NUM_DONT_CARE,
@@ -491,6 +495,7 @@ static struct usb_serial_device_type keyspan_usa28_pre_device = {
};
static struct usb_serial_device_type keyspan_usa28x_pre_device = {
+ owner: THIS_MODULE,
name: "Keyspan USA28X - (without firmware)",
id_table: keyspan_usa28x_pre_ids,
num_interrupt_in: NUM_DONT_CARE,
@@ -501,6 +506,7 @@ static struct usb_serial_device_type keyspan_usa28x_pre_device = {
};
static struct usb_serial_device_type keyspan_usa28xa_pre_device = {
+ owner: THIS_MODULE,
name: "Keyspan USA28XA - (without firmware)",
id_table: keyspan_usa28xa_pre_ids,
num_interrupt_in: NUM_DONT_CARE,
@@ -511,6 +517,7 @@ static struct usb_serial_device_type keyspan_usa28xa_pre_device = {
};
static struct usb_serial_device_type keyspan_usa28xb_pre_device = {
+ owner: THIS_MODULE,
name: "Keyspan USA28XB - (without firmware)",
id_table: keyspan_usa28xb_pre_ids,
num_interrupt_in: NUM_DONT_CARE,
@@ -521,6 +528,7 @@ static struct usb_serial_device_type keyspan_usa28xb_pre_device = {
};
static struct usb_serial_device_type keyspan_usa49w_pre_device = {
+ owner: THIS_MODULE,
name: "Keyspan USA49W - (without firmware)",
id_table: keyspan_usa49w_pre_ids,
num_interrupt_in: NUM_DONT_CARE,
@@ -531,6 +539,7 @@ static struct usb_serial_device_type keyspan_usa49w_pre_device = {
};
static struct usb_serial_device_type keyspan_usa18x_device = {
+ owner: THIS_MODULE,
name: "Keyspan USA18X",
id_table: keyspan_usa18x_ids,
num_interrupt_in: NUM_DONT_CARE,
@@ -554,6 +563,7 @@ static struct usb_serial_device_type keyspan_usa18x_device = {
};
static struct usb_serial_device_type keyspan_usa19_device = {
+ owner: THIS_MODULE,
name: "Keyspan USA19",
id_table: keyspan_usa19_ids,
num_interrupt_in: NUM_DONT_CARE,
@@ -578,6 +588,7 @@ static struct usb_serial_device_type keyspan_usa19_device = {
static struct usb_serial_device_type keyspan_usa19w_device = {
+ owner: THIS_MODULE,
name: "Keyspan USA19W",
id_table: keyspan_usa19w_ids,
num_interrupt_in: NUM_DONT_CARE,
@@ -602,6 +613,7 @@ static struct usb_serial_device_type keyspan_usa19w_device = {
static struct usb_serial_device_type keyspan_usa28_device = {
+ owner: THIS_MODULE,
name: "Keyspan USA28",
id_table: keyspan_usa28_ids,
num_interrupt_in: NUM_DONT_CARE,
@@ -617,6 +629,7 @@ static struct usb_serial_device_type keyspan_usa28_device = {
static struct usb_serial_device_type keyspan_usa28x_device = {
+ owner: THIS_MODULE,
name: "Keyspan USA28X/XB",
id_table: keyspan_usa28x_ids,
num_interrupt_in: NUM_DONT_CARE,
@@ -641,6 +654,7 @@ static struct usb_serial_device_type keyspan_usa28x_device = {
};
static struct usb_serial_device_type keyspan_usa28xa_device = {
+ owner: THIS_MODULE,
name: "Keyspan USA28XA",
id_table: keyspan_usa28xa_ids,
num_interrupt_in: NUM_DONT_CARE,
@@ -665,6 +679,7 @@ static struct usb_serial_device_type keyspan_usa28xa_device = {
};
static struct usb_serial_device_type keyspan_usa49w_device = {
+ owner: THIS_MODULE,
name: "Keyspan USA49W",
id_table: keyspan_usa49w_ids,
num_interrupt_in: NUM_DONT_CARE,
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index c1a9864c9..a45751e52 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -193,7 +193,6 @@ static void keyspan_pda_wakeup_write( struct usb_serial_port *port )
/* wake up other tty processes */
wake_up_interruptible( &tty->write_wait );
/* For 2.2.16 backport -- wake_up_interruptible( &tty->poll_wait ); */
- MOD_DEC_USE_COUNT;
}
static void keyspan_pda_request_unthrottle( struct usb_serial *serial )
@@ -212,7 +211,6 @@ static void keyspan_pda_request_unthrottle( struct usb_serial *serial )
NULL,
0,
2*HZ);
- MOD_DEC_USE_COUNT;
}
@@ -261,9 +259,7 @@ static void keyspan_pda_rx_interrupt (struct urb *urb)
tty = serial->port[0].tty;
priv->tx_throttled = 0;
/* queue up a wakeup at scheduler time */
- MOD_INC_USE_COUNT;
- if (schedule_task(&priv->wakeup_task) == 0)
- MOD_DEC_USE_COUNT;
+ schedule_task(&priv->wakeup_task);
break;
default:
break;
@@ -602,9 +598,7 @@ static int keyspan_pda_write(struct usb_serial_port *port, int from_user,
if (request_unthrottle) {
priv->tx_throttled = 1; /* block writers */
- MOD_INC_USE_COUNT;
- if (schedule_task(&priv->unthrottle_task) == 0)
- MOD_DEC_USE_COUNT;
+ schedule_task(&priv->unthrottle_task);
}
rc = count;
@@ -631,9 +625,7 @@ static void keyspan_pda_write_bulk_callback (struct urb *urb)
}
/* queue up a wakeup at scheduler time */
- MOD_INC_USE_COUNT;
- if (schedule_task(&priv->wakeup_task) == 0)
- MOD_DEC_USE_COUNT;
+ schedule_task(&priv->wakeup_task);
}
@@ -672,7 +664,6 @@ static int keyspan_pda_open (struct usb_serial_port *port, struct file *filp)
down (&port->sem);
- MOD_INC_USE_COUNT;
++port->open_count;
if (port->open_count == 1) {
@@ -721,7 +712,6 @@ static int keyspan_pda_open (struct usb_serial_port *port, struct file *filp)
return rc;
error:
--port->open_count;
- MOD_DEC_USE_COUNT;
up (&port->sem);
return rc;
}
@@ -749,7 +739,6 @@ static void keyspan_pda_close(struct usb_serial_port *port, struct file *filp)
}
up (&port->sem);
- MOD_DEC_USE_COUNT;
}
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index f8b7f39f9..90d296ee3 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -144,6 +144,7 @@ MODULE_DEVICE_TABLE (usb, id_table_combined);
static struct usb_serial_device_type mct_u232_device = {
+ owner: THIS_MODULE,
name: "Magic Control Technology USB-RS232",
id_table: id_table_combined,
num_interrupt_in: 2,
@@ -343,7 +344,6 @@ static int mct_u232_open (struct usb_serial_port *port, struct file *filp)
down (&port->sem);
++port->open_count;
- MOD_INC_USE_COUNT;
if (port->open_count == 1) {
/* Compensate for a hardware bug: although the Sitecom U232-P25
@@ -423,7 +423,6 @@ static void mct_u232_close (struct usb_serial_port *port, struct file *filp)
}
up (&port->sem);
- MOD_DEC_USE_COUNT;
} /* mct_u232_close */
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index ff8e75733..56cba64d9 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -88,6 +88,7 @@ MODULE_DEVICE_TABLE (usb, id_table);
static struct usb_serial_device_type zyxel_omninet_device = {
+ owner: THIS_MODULE,
name: "ZyXEL - omni.net lcd plus usb",
id_table: id_table,
num_interrupt_in: 1,
@@ -158,7 +159,6 @@ static int omninet_open (struct usb_serial_port *port, struct file *filp)
down (&port->sem);
- MOD_INC_USE_COUNT;
++port->open_count;
if (port->open_count == 1) {
@@ -167,7 +167,6 @@ static int omninet_open (struct usb_serial_port *port, struct file *filp)
err(__FUNCTION__"- kmalloc(%Zd) failed.", sizeof(struct omninet_data));
port->open_count = 0;
up (&port->sem);
- MOD_DEC_USE_COUNT;
return -ENOMEM;
}
@@ -223,7 +222,6 @@ static void omninet_close (struct usb_serial_port *port, struct file * filp)
}
up (&port->sem);
- MOD_DEC_USE_COUNT;
}
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 07ff1da9c..4e6671995 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -116,6 +116,7 @@ static void pl2303_shutdown (struct usb_serial *serial);
/* All of the device info needed for the PL2303 SIO serial converter */
static struct usb_serial_device_type pl2303_device = {
+ owner: THIS_MODULE,
name: "PL-2303",
id_table: id_table,
num_interrupt_in: NUM_DONT_CARE,
@@ -369,7 +370,6 @@ static int pl2303_open (struct usb_serial_port *port, struct file *filp)
down (&port->sem);
++port->open_count;
- MOD_INC_USE_COUNT;
if (port->open_count == 1) {
#define FISH(a,b,c,d) \
@@ -480,7 +480,6 @@ static void pl2303_close (struct usb_serial_port *port, struct file *filp)
}
up (&port->sem);
- MOD_DEC_USE_COUNT;
}
static int set_modem_info (struct usb_serial_port *port, unsigned int cmd, unsigned int *value)
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index e07b797e8..dc10e752f 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -133,6 +133,7 @@ static int whiteheat_real_startup (struct usb_serial *serial);
static void whiteheat_real_shutdown (struct usb_serial *serial);
static struct usb_serial_device_type whiteheat_fake_device = {
+ owner: THIS_MODULE,
name: "Connect Tech - WhiteHEAT - (prerenumeration)",
id_table: id_table_prerenumeration,
num_interrupt_in: NUM_DONT_CARE,
@@ -143,6 +144,7 @@ static struct usb_serial_device_type whiteheat_fake_device = {
};
static struct usb_serial_device_type whiteheat_device = {
+ owner: THIS_MODULE,
name: "Connect Tech - WhiteHEAT",
id_table: id_table_std,
num_interrupt_in: NUM_DONT_CARE,
@@ -307,7 +309,6 @@ static int whiteheat_open (struct usb_serial_port *port, struct file *filp)
down (&port->sem);
++port->open_count;
- MOD_INC_USE_COUNT;
if (port->open_count == 1) {
/* set up some stuff for our command port */
@@ -359,7 +360,6 @@ static int whiteheat_open (struct usb_serial_port *port, struct file *filp)
error_exit:
--port->open_count;
- MOD_DEC_USE_COUNT;
dbg(__FUNCTION__ " - error_exit");
up (&port->sem);
@@ -391,7 +391,6 @@ static void whiteheat_close(struct usb_serial_port *port, struct file * filp)
usb_unlink_urb (port->read_urb);
port->open_count = 0;
}
- MOD_DEC_USE_COUNT;
up (&port->sem);
}
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index b2ecee899..1f247877e 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -385,19 +385,19 @@ int usb_stor_control_msg(struct us_data *us, unsigned int pipe,
{
struct completion urb_done;
int status;
- devrequest *dr;
+ struct usb_ctrlrequest *dr;
/* allocate the device request structure */
- dr = kmalloc(sizeof(devrequest), GFP_NOIO);
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
if (!dr)
return -ENOMEM;
/* fill in the structure */
- dr->requesttype = requesttype;
- dr->request = request;
- dr->value = cpu_to_le16(value);
- dr->index = cpu_to_le16(index);
- dr->length = cpu_to_le16(size);
+ dr->bRequestType = requesttype;
+ dr->bRequest = request;
+ dr->wValue = cpu_to_le16(value);
+ dr->wIndex = cpu_to_le16(index);
+ dr->wLength = cpu_to_le16(size);
/* set up data structures for the wakeup system */
init_completion(&urb_done);
diff --git a/drivers/usb/uhci.c b/drivers/usb/uhci.c
index 1f19cb11a..8889f668f 100644
--- a/drivers/usb/uhci.c
+++ b/drivers/usb/uhci.c
@@ -646,7 +646,7 @@ static struct urb_priv *uhci_alloc_urb_priv(struct uhci *uhci, struct urb *urb)
if (usb_pipetype(urb->pipe) == PIPE_CONTROL && urb->setup_packet) {
urbp->setup_packet_dma_handle = pci_map_single(uhci->dev,
- urb->setup_packet, sizeof(devrequest),
+ urb->setup_packet, sizeof(struct usb_ctrlrequest),
PCI_DMA_TODEVICE);
if (!urbp->setup_packet_dma_handle)
return NULL;
@@ -715,7 +715,7 @@ static void uhci_destroy_urb_priv(struct urb *urb)
if (urbp->setup_packet_dma_handle)
pci_unmap_single(uhci->dev, urbp->setup_packet_dma_handle,
- sizeof(devrequest), PCI_DMA_TODEVICE);
+ sizeof(struct usb_ctrlrequest), PCI_DMA_TODEVICE);
if (urbp->transfer_buffer_dma_handle)
pci_unmap_single(uhci->dev, urbp->transfer_buffer_dma_handle,
@@ -2013,7 +2013,7 @@ static int rh_submit_urb(struct urb *urb)
{
struct uhci *uhci = (struct uhci *)urb->dev->bus->hcpriv;
unsigned int pipe = urb->pipe;
- devrequest *cmd = (devrequest *)urb->setup_packet;
+ struct usb_ctrlrequest *cmd = (struct usb_ctrlrequest *)urb->setup_packet;
void *data = urb->transfer_buffer;
int leni = urb->transfer_buffer_length;
int len = 0;
@@ -2036,10 +2036,10 @@ static int rh_submit_urb(struct urb *urb)
return -EINPROGRESS;
}
- bmRType_bReq = cmd->requesttype | cmd->request << 8;
- wValue = le16_to_cpu(cmd->value);
- wIndex = le16_to_cpu(cmd->index);
- wLength = le16_to_cpu(cmd->length);
+ bmRType_bReq = cmd->bRequestType | cmd->bRequest << 8;
+ wValue = le16_to_cpu(cmd->wValue);
+ wIndex = le16_to_cpu(cmd->wIndex);
+ wLength = le16_to_cpu(cmd->wLength);
for (i = 0; i < 8; i++)
uhci->rh.c_p_r[i] = 0;
@@ -2276,7 +2276,7 @@ static void uhci_call_completion(struct urb *urb)
if (urbp->setup_packet_dma_handle)
pci_dma_sync_single(uhci->dev, urbp->setup_packet_dma_handle,
- sizeof(devrequest), PCI_DMA_TODEVICE);
+ sizeof(struct usb_ctrlrequest), PCI_DMA_TODEVICE);
urb->dev = NULL;
if (urb->complete)
diff --git a/drivers/usb/usb-ohci.c b/drivers/usb/usb-ohci.c
index 57e4c68e1..2b2a484e7 100644
--- a/drivers/usb/usb-ohci.c
+++ b/drivers/usb/usb-ohci.c
@@ -1905,7 +1905,7 @@ static int rh_submit_urb (urb_t * urb)
struct usb_device * usb_dev = urb->dev;
ohci_t * ohci = usb_dev->bus->hcpriv;
unsigned int pipe = urb->pipe;
- devrequest * cmd = (devrequest *) urb->setup_packet;
+ struct usb_ctrlrequest * cmd = (struct usb_ctrlrequest *) urb->setup_packet;
void * data = urb->transfer_buffer;
int leni = urb->transfer_buffer_length;
int len = 0;
@@ -1929,10 +1929,10 @@ static int rh_submit_urb (urb_t * urb)
return 0;
}
- bmRType_bReq = cmd->requesttype | (cmd->request << 8);
- wValue = le16_to_cpu (cmd->value);
- wIndex = le16_to_cpu (cmd->index);
- wLength = le16_to_cpu (cmd->length);
+ bmRType_bReq = cmd->bRequestType | (cmd->bRequest << 8);
+ wValue = le16_to_cpu (cmd->wValue);
+ wIndex = le16_to_cpu (cmd->wIndex);
+ wLength = le16_to_cpu (cmd->wLength);
switch (bmRType_bReq) {
/* Request Destination:
diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c
index 1c570b549..4abfb6559 100644
--- a/drivers/usb/usb-skeleton.c
+++ b/drivers/usb/usb-skeleton.c
@@ -138,9 +138,37 @@ static struct usb_skel *minor_table[MAX_DEVICES];
/* lock to protect the minor_table structure */
static DECLARE_MUTEX (minor_table_mutex);
-/* file operations needed when we register this driver */
+/*
+ * File operations needed when we register this driver.
+ * This assumes that this driver NEEDS file operations,
+ * of course, which means that the driver is expected
+ * to have a node in the /dev directory. If the USB
+ * device were for a network interface then the driver
+ * would use "struct net_driver" instead, and a serial
+ * device would use "struct tty_driver".
+ */
static struct file_operations skel_fops = {
+ /*
+ * The owner field is part of the module-locking
+ * mechanism. The idea is that the kernel knows
+ * which module to increment the use-counter of
+ * BEFORE it calls the device's open() function.
+ * This also means that the kernel can decrement
+ * the use-counter again before calling release()
+ * or should the open() function fail.
+ *
+ * Not all device structures have an "owner" field
+ * yet. "struct file_operations" and "struct net_device"
+ * do, while "struct tty_driver" does not. If the struct
+ * has an "owner" field, then initialize it to the value
+ * THIS_MODULE and the kernel will handle all module
+ * locking for you automatically. Otherwise, you must
+ * increment the use-counter in the open() function
+ * and decrement it again in the release() function
+ * yourself.
+ */
owner: THIS_MODULE,
+
read: skel_read,
write: skel_write,
ioctl: skel_ioctl,
@@ -215,7 +243,11 @@ static int skel_open (struct inode *inode, struct file *file)
return -ENODEV;
}
- /* increment our usage count for the module */
+ /* Increment our usage count for the module.
+ * This is redundant here, because "struct file_operations"
+ * has an "owner" field. This line is included here soley as
+ * a reference for drivers using lesser structures... ;-)
+ */
MOD_INC_USE_COUNT;
/* lock our minor table and get our local data for this minor */
@@ -278,8 +310,8 @@ static int skel_release (struct inode *inode, struct file *file)
/* the device was unplugged before the file was released */
up (&dev->sem);
skel_delete (dev);
- MOD_DEC_USE_COUNT;
up (&minor_table_mutex);
+ MOD_DEC_USE_COUNT;
return 0;
}
diff --git a/drivers/usb/usb-uhci.c b/drivers/usb/usb-uhci.c
index 0dc842716..50660b98e 100644
--- a/drivers/usb/usb-uhci.c
+++ b/drivers/usb/usb-uhci.c
@@ -1082,7 +1082,7 @@ _static void uhci_urb_dma_sync(uhci_t *s, urb_t *urb, urb_priv_t *urb_priv)
{
if (urb_priv->setup_packet_dma)
pci_dma_sync_single(s->uhci_pci, urb_priv->setup_packet_dma,
- sizeof(devrequest), PCI_DMA_TODEVICE);
+ sizeof(struct usb_ctrlrequest), PCI_DMA_TODEVICE);
if (urb_priv->transfer_buffer_dma)
pci_dma_sync_single(s->uhci_pci, urb_priv->transfer_buffer_dma,
@@ -1096,7 +1096,7 @@ _static void uhci_urb_dma_unmap(uhci_t *s, urb_t *urb, urb_priv_t *urb_priv)
{
if (urb_priv->setup_packet_dma) {
pci_unmap_single(s->uhci_pci, urb_priv->setup_packet_dma,
- sizeof(devrequest), PCI_DMA_TODEVICE);
+ sizeof(struct usb_ctrlrequest), PCI_DMA_TODEVICE);
urb_priv->setup_packet_dma = 0;
}
if (urb_priv->transfer_buffer_dma) {
@@ -1675,7 +1675,7 @@ _static int uhci_submit_urb (urb_t *urb)
if (type == PIPE_CONTROL)
urb_priv->setup_packet_dma = pci_map_single(s->uhci_pci, urb->setup_packet,
- sizeof(devrequest), PCI_DMA_TODEVICE);
+ sizeof(struct usb_ctrlrequest), PCI_DMA_TODEVICE);
if (urb->transfer_buffer_length)
urb_priv->transfer_buffer_dma = pci_map_single(s->uhci_pci,
@@ -1960,7 +1960,7 @@ _static int rh_submit_urb (urb_t *urb)
struct usb_device *usb_dev = urb->dev;
uhci_t *uhci = usb_dev->bus->hcpriv;
unsigned int pipe = urb->pipe;
- devrequest *cmd = (devrequest *) urb->setup_packet;
+ struct usb_ctrlrequest *cmd = (struct usb_ctrlrequest *) urb->setup_packet;
void *data = urb->transfer_buffer;
int leni = urb->transfer_buffer_length;
int len = 0;
@@ -1986,10 +1986,10 @@ _static int rh_submit_urb (urb_t *urb)
}
- bmRType_bReq = cmd->requesttype | cmd->request << 8;
- wValue = le16_to_cpu (cmd->value);
- wIndex = le16_to_cpu (cmd->index);
- wLength = le16_to_cpu (cmd->length);
+ bmRType_bReq = cmd->bRequestType | cmd->bRequest << 8;
+ wValue = le16_to_cpu (cmd->wValue);
+ wIndex = le16_to_cpu (cmd->wIndex);
+ wLength = le16_to_cpu (cmd->wLength);
for (i = 0; i < 8; i++)
uhci->rh.c_p_r[i] = 0;
diff --git a/drivers/usb/usb.c b/drivers/usb/usb.c
index 624b30e1c..ce267032d 100644
--- a/drivers/usb/usb.c
+++ b/drivers/usb/usb.c
@@ -1287,7 +1287,7 @@ static int usb_start_wait_urb(urb_t *urb, int timeout, int* actual_length)
/*-------------------------------------------------------------------*/
// returns status (negative) or length (positive)
int usb_internal_control_msg(struct usb_device *usb_dev, unsigned int pipe,
- devrequest *cmd, void *data, int len, int timeout)
+ struct usb_ctrlrequest *cmd, void *data, int len, int timeout)
{
urb_t *urb;
int retv;
@@ -1331,17 +1331,17 @@ int usb_internal_control_msg(struct usb_device *usb_dev, unsigned int pipe,
int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request, __u8 requesttype,
__u16 value, __u16 index, void *data, __u16 size, int timeout)
{
- devrequest *dr = kmalloc(sizeof(devrequest), GFP_KERNEL);
+ struct usb_ctrlrequest *dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
int ret;
if (!dr)
return -ENOMEM;
- dr->requesttype = requesttype;
- dr->request = request;
- dr->value = cpu_to_le16p(&value);
- dr->index = cpu_to_le16p(&index);
- dr->length = cpu_to_le16p(&size);
+ dr->bRequestType= requesttype;
+ dr->bRequest = request;
+ dr->wValue = cpu_to_le16p(&value);
+ dr->wIndex = cpu_to_le16p(&index);
+ dr->wLength = cpu_to_le16p(&size);
//dbg("usb_control_msg");
@@ -1535,6 +1535,9 @@ static int usb_parse_interface(struct usb_interface *interface, unsigned char *b
}
ifp = interface->altsetting + interface->num_altsetting;
+ ifp->endpoint = NULL;
+ ifp->extra = NULL;
+ ifp->extralen = 0;
interface->num_altsetting++;
memcpy(ifp, buffer, USB_DT_INTERFACE_SIZE);
@@ -1576,10 +1579,7 @@ static int usb_parse_interface(struct usb_interface *interface, unsigned char *b
/* Copy any unknown descriptors into a storage area for */
/* drivers to later parse */
len = (int)(buffer - begin);
- if (!len) {
- ifp->extra = NULL;
- ifp->extralen = 0;
- } else {
+ if (len) {
ifp->extra = kmalloc(len, GFP_KERNEL);
if (!ifp->extra) {
diff --git a/drivers/usb/usbkbd.c b/drivers/usb/usbkbd.c
index 365adb3bb..5085dab3b 100644
--- a/drivers/usb/usbkbd.c
+++ b/drivers/usb/usbkbd.c
@@ -74,7 +74,7 @@ struct usb_kbd {
unsigned char new[8];
unsigned char old[8];
struct urb irq, led;
- devrequest dr;
+ struct usb_ctrlrequest dr;
unsigned char leds, newleds;
char name[128];
int open;
@@ -218,11 +218,11 @@ static void *usb_kbd_probe(struct usb_device *dev, unsigned int ifnum,
FILL_INT_URB(&kbd->irq, dev, pipe, kbd->new, maxp > 8 ? 8 : maxp,
usb_kbd_irq, kbd, endpoint->bInterval);
- kbd->dr.requesttype = USB_TYPE_CLASS | USB_RECIP_INTERFACE;
- kbd->dr.request = HID_REQ_SET_REPORT;
- kbd->dr.value = 0x200;
- kbd->dr.index = interface->bInterfaceNumber;
- kbd->dr.length = 1;
+ kbd->dr.bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE;
+ kbd->dr.bRequest = HID_REQ_SET_REPORT;
+ kbd->dr.wValue = 0x200;
+ kbd->dr.wIndex = interface->bInterfaceNumber;
+ kbd->dr.wLength = 1;
kbd->dev.name = kbd->name;
kbd->dev.idbus = BUS_USB;
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 3a9c713fd..b73559da9 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1119,7 +1119,7 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
psinfo.pr_state = i;
psinfo.pr_sname = (i < 0 || i > 5) ? '.' : "RSDZTD"[i];
psinfo.pr_zomb = psinfo.pr_sname == 'Z';
- psinfo.pr_nice = current->nice;
+ psinfo.pr_nice = current->__nice;
psinfo.pr_flag = current->flags;
psinfo.pr_uid = NEW_TO_OLD_UID(current->uid);
psinfo.pr_gid = NEW_TO_OLD_GID(current->gid);
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 7e04cec34..672bd95c7 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -627,7 +627,6 @@ int blkdev_open(struct inode * inode, struct file * filp)
int blkdev_put(struct block_device *bdev, int kind)
{
int ret = 0;
- kdev_t rdev = to_kdev_t(bdev->bd_dev); /* this should become bdev */
struct inode *bd_inode = bdev->bd_inode;
down(&bdev->bd_sem);
@@ -635,7 +634,7 @@ int blkdev_put(struct block_device *bdev, int kind)
if (kind == BDEV_FILE)
__block_fsync(bd_inode);
else if (kind == BDEV_FS)
- fsync_no_super(rdev);
+ fsync_no_super(bdev);
if (!--bdev->bd_openers)
kill_bdev(bdev);
if (bdev->bd_op->release)
diff --git a/fs/buffer.c b/fs/buffer.c
index 816686216..b55ac6aba 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -334,8 +334,9 @@ int fsync_super(struct super_block *sb)
return sync_buffers(dev, 1);
}
-int fsync_no_super(kdev_t dev)
+int fsync_no_super(struct block_device *bdev)
{
+ kdev_t dev = to_kdev_t(bdev->bd_dev);
sync_buffers(dev, 0);
return sync_buffers(dev, 1);
}
@@ -733,9 +734,7 @@ static void free_more_memory(void)
wakeup_bdflush();
try_to_free_pages(zone, GFP_NOFS, 0);
run_task_queue(&tq_disk);
- current->policy |= SCHED_YIELD;
- __set_current_state(TASK_RUNNING);
- schedule();
+ yield();
}
void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
@@ -1023,8 +1022,9 @@ void invalidate_inode_buffers(struct inode *inode)
* 14.02.92: changed it to sync dirty buffers a bit: better performance
* when the filesystem starts to get full of dirty blocks (I hope).
*/
-struct buffer_head * getblk(kdev_t dev, sector_t block, int size)
+struct buffer_head * __getblk(struct block_device *bdev, sector_t block, int size)
{
+ kdev_t dev = to_kdev_t(bdev->bd_dev);
for (;;) {
struct buffer_head * bh;
@@ -1176,11 +1176,10 @@ void __bforget(struct buffer_head * buf)
* Reads a specified block, and returns buffer head that
* contains it. It returns NULL if the block was unreadable.
*/
-struct buffer_head * bread(kdev_t dev, int block, int size)
+struct buffer_head * __bread(struct block_device *bdev, int block, int size)
{
- struct buffer_head * bh;
+ struct buffer_head * bh = __getblk(bdev, block, size);
- bh = getblk(dev, block, size);
touch_buffer(bh);
if (buffer_uptodate(bh))
return bh;
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index e274aac53..b092d720c 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -54,12 +54,12 @@ int journal_no_write[2];
* amount of time. This is for crash/recovery testing.
*/
-static void make_rdonly(kdev_t dev, int *no_write)
+static void make_rdonly(struct block_device *bdev, int *no_write)
{
- if (kdev_val(dev)) {
+ if (bdev) {
printk(KERN_WARNING "Turning device %s read-only\n",
- bdevname(dev));
- *no_write = 0xdead0000 + kdev_val(dev);
+ bdevname(to_kdev_t(bdev->bd_dev)));
+ *no_write = 0xdead0000 + bdev->bd_dev;
}
}
@@ -67,7 +67,7 @@ static void turn_fs_readonly(unsigned long arg)
{
struct super_block *sb = (struct super_block *)arg;
- make_rdonly(sb->s_dev, &journal_no_write[0]);
+ make_rdonly(sb->s_bdev, &journal_no_write[0]);
make_rdonly(EXT3_SB(sb)->s_journal->j_dev, &journal_no_write[1]);
wake_up(&EXT3_SB(sb)->ro_wait_queue);
}
@@ -400,7 +400,6 @@ void ext3_put_super (struct super_block * sb)
{
struct ext3_sb_info *sbi = EXT3_SB(sb);
struct ext3_super_block *es = sbi->s_es;
- kdev_t j_dev = sbi->s_journal->j_dev;
int i;
journal_destroy(sbi->s_journal);
@@ -429,15 +428,15 @@ void ext3_put_super (struct super_block * sb)
dump_orphan_list(sb, sbi);
J_ASSERT(list_empty(&sbi->s_orphan));
- invalidate_buffers(sb->s_dev);
- if (!kdev_same(j_dev, sb->s_dev)) {
+ invalidate_bdev(sb->s_bdev, 0);
+ if (sbi->journal_bdev != sb->s_bdev) {
/*
* Invalidate the journal device's buffers. We don't want them
* floating about in memory - the physical journal device may
* hotswapped, and it breaks the `ro-after' testing code.
*/
- fsync_no_super(j_dev);
- invalidate_buffers(j_dev);
+ fsync_no_super(sbi->journal_bdev);
+ invalidate_bdev(sbi->journal_bdev, 0);
ext3_blkdev_remove(sbi);
}
clear_ro_after(sb);
@@ -715,7 +714,7 @@ static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es,
sb->s_id);
if (EXT3_SB(sb)->s_journal->j_inode == NULL) {
printk("external journal on %s\n",
- bdevname(EXT3_SB(sb)->s_journal->j_dev));
+ bdevname(to_kdev_t(EXT3_SB(sb)->s_journal->j_dev->bd_dev)));
} else {
printk("internal journal\n");
}
@@ -1284,7 +1283,7 @@ static journal_t *ext3_get_dev_journal(struct super_block *sb,
sb_block = EXT3_MIN_BLOCK_SIZE / blocksize;
offset = EXT3_MIN_BLOCK_SIZE % blocksize;
set_blocksize(j_dev, blocksize);
- if (!(bh = bread(j_dev, sb_block, blocksize))) {
+ if (!(bh = __bread(bdev, sb_block, blocksize))) {
printk(KERN_ERR "EXT3-fs: couldn't read superblock of "
"external journal\n");
goto out_bdev;
@@ -1310,7 +1309,7 @@ static journal_t *ext3_get_dev_journal(struct super_block *sb,
start = sb_block + 1;
brelse(bh); /* we're done with the superblock */
- journal = journal_init_dev(j_dev, sb->s_dev,
+ journal = journal_init_dev(bdev, sb->s_bdev,
start, len, blocksize);
if (!journal) {
printk(KERN_ERR "EXT3-fs: failed to create device journal\n");
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 49b7b3592..a483de6c2 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -460,8 +460,7 @@ int journal_write_metadata_buffer(transaction_t *transaction,
printk (KERN_NOTICE __FUNCTION__
": ENOMEM at get_unused_buffer_head, "
"trying again.\n");
- current->policy |= SCHED_YIELD;
- schedule();
+ yield();
}
} while (!new_bh);
/* keep subsequent assertions sane */
@@ -476,7 +475,7 @@ int journal_write_metadata_buffer(transaction_t *transaction,
new_jh->b_transaction = NULL;
new_bh->b_size = jh2bh(jh_in)->b_size;
- new_bh->b_dev = transaction->t_journal->j_dev;
+ new_bh->b_dev = to_kdev_t(transaction->t_journal->j_dev->bd_dev);
new_bh->b_blocknr = blocknr;
new_bh->b_state |= (1 << BH_Mapped) | (1 << BH_Dirty);
@@ -640,7 +639,8 @@ int journal_bmap(journal_t *journal, unsigned long blocknr,
printk (KERN_ALERT __FUNCTION__
": journal block not found "
"at offset %lu on %s\n",
- blocknr, bdevname(journal->j_dev));
+ blocknr,
+ bdevname(to_kdev_t(journal->j_dev->bd_dev)));
err = -EIO;
__journal_abort_soft(journal, err);
}
@@ -667,7 +667,7 @@ struct journal_head * journal_get_descriptor_buffer(journal_t *journal)
if (err)
return NULL;
- bh = getblk(journal->j_dev, blocknr, journal->j_blocksize);
+ bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
bh->b_state |= (1 << BH_Dirty);
BUFFER_TRACE(bh, "return this buffer");
return journal_add_journal_head(bh);
@@ -736,7 +736,8 @@ fail:
* must have all data blocks preallocated.
*/
-journal_t * journal_init_dev(kdev_t dev, kdev_t fs_dev,
+journal_t * journal_init_dev(struct block_device *bdev,
+ struct block_device *fs_dev,
int start, int len, int blocksize)
{
journal_t *journal = journal_init_common();
@@ -745,13 +746,13 @@ journal_t * journal_init_dev(kdev_t dev, kdev_t fs_dev,
if (!journal)
return NULL;
- journal->j_dev = dev;
+ journal->j_dev = bdev;
journal->j_fs_dev = fs_dev;
journal->j_blk_offset = start;
journal->j_maxlen = len;
journal->j_blocksize = blocksize;
- bh = getblk(journal->j_dev, start, journal->j_blocksize);
+ bh = __getblk(journal->j_dev, start, journal->j_blocksize);
J_ASSERT(bh != NULL);
journal->j_sb_buffer = bh;
journal->j_superblock = (journal_superblock_t *)bh->b_data;
@@ -769,8 +770,7 @@ journal_t * journal_init_inode (struct inode *inode)
if (!journal)
return NULL;
- journal->j_dev = inode->i_dev;
- journal->j_fs_dev = inode->i_dev;
+ journal->j_dev = journal->j_fs_dev = inode->i_sb->s_bdev;
journal->j_inode = inode;
jbd_debug(1,
"journal %p: inode %s/%ld, size %Ld, bits %d, blksize %ld\n",
@@ -790,7 +790,7 @@ journal_t * journal_init_inode (struct inode *inode)
return NULL;
}
- bh = getblk(journal->j_dev, blocknr, journal->j_blocksize);
+ bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
J_ASSERT(bh != NULL);
journal->j_sb_buffer = bh;
journal->j_superblock = (journal_superblock_t *)bh->b_data;
@@ -883,17 +883,18 @@ int journal_create (journal_t *journal)
err = journal_bmap(journal, i, &blocknr);
if (err)
return err;
- bh = getblk(journal->j_dev, blocknr, journal->j_blocksize);
- wait_on_buffer(bh);
+ bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
+ lock_buffer(bh);
memset (bh->b_data, 0, journal->j_blocksize);
BUFFER_TRACE(bh, "marking dirty");
mark_buffer_dirty(bh);
BUFFER_TRACE(bh, "marking uptodate");
mark_buffer_uptodate(bh, 1);
+ unlock_buffer(bh);
__brelse(bh);
}
- sync_dev(journal->j_dev);
+ fsync_dev(to_kdev_t(journal->j_dev->bd_dev));
jbd_debug(1, "JBD: journal cleared.\n");
/* OK, fill in the initial static fields in the new superblock */
@@ -1357,14 +1358,14 @@ int journal_wipe (journal_t *journal, int write)
const char * journal_dev_name(journal_t *journal)
{
- kdev_t dev;
+ struct block_device *bdev;
if (journal->j_inode)
- dev = journal->j_inode->i_dev;
+ bdev = journal->j_inode->i_sb->s_bdev;
else
- dev = journal->j_dev;
+ bdev = journal->j_dev;
- return bdevname(dev);
+ return bdevname(to_kdev_t(bdev->bd_dev));
}
/*
@@ -1539,8 +1540,7 @@ void * __jbd_kmalloc (char *where, size_t size, int flags, int retry)
last_warning = jiffies;
}
- current->policy |= SCHED_YIELD;
- schedule();
+ yield();
}
}
@@ -1598,8 +1598,7 @@ static struct journal_head *journal_alloc_journal_head(void)
last_warning = jiffies;
}
while (ret == 0) {
- current->policy |= SCHED_YIELD;
- schedule();
+ yield();
ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS);
}
}
diff --git a/fs/jbd/recovery.c b/fs/jbd/recovery.c
index e8e416306..b4f2cd890 100644
--- a/fs/jbd/recovery.c
+++ b/fs/jbd/recovery.c
@@ -95,7 +95,7 @@ static int do_readahead(journal_t *journal, unsigned int start)
goto failed;
}
- bh = getblk(journal->j_dev, blocknr, journal->j_blocksize);
+ bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
if (!bh) {
err = -ENOMEM;
goto failed;
@@ -148,7 +148,7 @@ static int jread(struct buffer_head **bhp, journal_t *journal,
return err;
}
- bh = getblk(journal->j_dev, blocknr, journal->j_blocksize);
+ bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
if (!bh)
return -ENOMEM;
@@ -460,8 +460,9 @@ static int do_one_pass(journal_t *journal,
/* Find a buffer for the new
* data being restored */
- nbh = getblk(journal->j_fs_dev, blocknr,
- journal->j_blocksize);
+ nbh = __getblk(journal->j_fs_dev,
+ blocknr,
+ journal->j_blocksize);
if (nbh == NULL) {
printk(KERN_ERR
"JBD: Out of memory "
@@ -472,6 +473,7 @@ static int do_one_pass(journal_t *journal,
goto failed;
}
+ lock_buffer(nbh);
memcpy(nbh->b_data, obh->b_data,
journal->j_blocksize);
if (flags & JFS_FLAG_ESCAPE) {
@@ -485,6 +487,7 @@ static int do_one_pass(journal_t *journal,
mark_buffer_uptodate(nbh, 1);
++info->nr_replays;
/* ll_rw_block(WRITE, 1, &nbh); */
+ unlock_buffer(nbh);
brelse(obh);
brelse(nbh);
}
diff --git a/fs/jbd/revoke.c b/fs/jbd/revoke.c
index 01b7684e9..faf2b412b 100644
--- a/fs/jbd/revoke.c
+++ b/fs/jbd/revoke.c
@@ -137,8 +137,7 @@ oom:
if (!journal_oom_retry)
return -ENOMEM;
jbd_debug(1, "ENOMEM in " __FUNCTION__ ", retrying.\n");
- current->policy |= SCHED_YIELD;
- schedule();
+ yield();
goto repeat;
}
@@ -291,7 +290,7 @@ int journal_revoke(handle_t *handle, unsigned long blocknr,
return -EINVAL;
}
- dev = journal->j_fs_dev;
+ dev = to_kdev_t(journal->j_fs_dev->bd_dev);
bh = bh_in;
if (!bh) {
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 10b54892d..16d8e3ed2 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -1376,9 +1376,7 @@ int journal_stop(handle_t *handle)
if (handle->h_sync) {
do {
old_handle_count = transaction->t_handle_count;
- set_current_state(TASK_RUNNING);
- current->policy |= SCHED_YIELD;
- schedule();
+ yield();
} while (old_handle_count != transaction->t_handle_count);
}
diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c
index b0f01c283..6cd846e46 100644
--- a/fs/jffs2/background.c
+++ b/fs/jffs2/background.c
@@ -106,8 +106,10 @@ static int jffs2_garbage_collect_thread(void *_c)
sprintf(current->comm, "jffs2_gcd_mtd%d", c->mtd->index);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
/* FIXME in the 2.2 backport */
current->nice = 10;
+#endif
for (;;) {
spin_lock_irq(&current->sigmask_lock);
diff --git a/fs/locks.c b/fs/locks.c
index e85e1f846..eeaa45b14 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -445,8 +445,7 @@ static void locks_wake_up_blocks(struct file_lock *blocker, unsigned int wait)
/* Let the blocked process remove waiter from the
* block list when it gets scheduled.
*/
- current->policy |= SCHED_YIELD;
- schedule();
+ yield();
} else {
/* Remove waiter from the block list, because by the
* time it wakes up blocker won't exist any more.
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index a96bfbbb5..64f549723 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1107,9 +1107,6 @@ __nfs_refresh_inode(struct inode *inode, struct nfs_fattr *fattr)
inode->i_blocks = fattr->du.nfs2.blocks;
inode->i_blksize = fattr->du.nfs2.blocksize;
}
- inode->i_rdev = NODEV;
- if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
- inode->i_rdev = to_kdev_t(fattr->rdev);
/* Update attrtimeo value */
if (!invalid && time_after(jiffies, NFS_ATTRTIMEO_UPDATE(inode)+NFS_ATTRTIMEO(inode))) {
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index ec88c3ccb..f11966985 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -96,8 +96,7 @@ nfs_create_request(struct file *file, struct inode *inode,
continue;
if (signalled() && (server->flags & NFS_MOUNT_INTR))
return ERR_PTR(-ERESTARTSYS);
- current->policy = SCHED_YIELD;
- schedule();
+ yield();
}
/* Initialize the request struct. Initially, we assume a
diff --git a/fs/proc/array.c b/fs/proc/array.c
index c25745603..4ebf80ca1 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -335,8 +335,12 @@ int proc_pid_stat(struct task_struct *task, char * buffer)
/* scale priority and nice values from timeslices to -20..20 */
/* to make it look like a "normal" Unix priority/nice value */
- priority = task->dyn_prio;
- nice = task->nice;
+ priority = task->prio;
+ if (priority >= MAX_RT_PRIO)
+ priority -= MAX_RT_PRIO;
+ else
+ priority = priority-100;
+ nice = task->__nice;
read_lock(&tasklist_lock);
ppid = task->pid ? task->p_opptr->pid : 0;
@@ -386,7 +390,7 @@ int proc_pid_stat(struct task_struct *task, char * buffer)
task->nswap,
task->cnswap,
task->exit_signal,
- task->processor);
+ task->cpu);
if(mm)
mmput(mm);
return res;
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index b0cda6de4..5dc1a8f7b 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -84,11 +84,11 @@ static int loadavg_read_proc(char *page, char **start, off_t off,
a = avenrun[0] + (FIXED_1/200);
b = avenrun[1] + (FIXED_1/200);
c = avenrun[2] + (FIXED_1/200);
- len = sprintf(page,"%d.%02d %d.%02d %d.%02d %d/%d %d\n",
+ len = sprintf(page,"%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
LOAD_INT(a), LOAD_FRAC(a),
LOAD_INT(b), LOAD_FRAC(b),
LOAD_INT(c), LOAD_FRAC(c),
- nr_running, nr_threads, last_pid);
+ nr_running(), nr_threads, last_pid);
return proc_calc_metrics(page, start, off, count, eof, len);
}
@@ -100,7 +100,7 @@ static int uptime_read_proc(char *page, char **start, off_t off,
int len;
uptime = jiffies;
- idle = init_tasks[0]->times.tms_utime + init_tasks[0]->times.tms_stime;
+ idle = init_task.times.tms_utime + init_task.times.tms_stime;
/* The formula for the fraction parts really is ((t * 100) / HZ) % 100, but
that would overflow about every five days at HZ == 100.
@@ -291,10 +291,10 @@ static int kstat_read_proc(char *page, char **start, off_t off,
}
len += sprintf(page + len,
- "\nctxt %u\n"
+ "\nctxt %lu\n"
"btime %lu\n"
"processes %lu\n",
- kstat.context_swtch,
+ nr_context_switches(),
xtime.tv_sec - jif / HZ,
total_forks);
diff --git a/fs/reiserfs/buffer2.c b/fs/reiserfs/buffer2.c
index 4ef62b2c9..7e854531e 100644
--- a/fs/reiserfs/buffer2.c
+++ b/fs/reiserfs/buffer2.c
@@ -46,9 +46,7 @@ void wait_buffer_until_released (const struct buffer_head * bh)
buffer_journal_dirty(bh) ? ' ' : '!');
}
run_task_queue(&tq_disk);
- current->policy |= SCHED_YIELD;
- /* current->dyn_prio = 0; */
- schedule();
+ yield();
}
if (repeat_counter > 30000000) {
reiserfs_warning("vs-3051: done waiting, ignore vs-3050 messages for (%b)\n", bh) ;
@@ -67,11 +65,11 @@ void wait_buffer_until_released (const struct buffer_head * bh)
struct buffer_head * reiserfs_bread (struct super_block *super, int n_block)
{
struct buffer_head *result;
- PROC_EXP( unsigned int ctx_switches = kstat.context_swtch );
+ PROC_EXP( unsigned int ctx_switches = nr_context_switches() );
result = sb_bread(super, n_block);
PROC_INFO_INC( super, breads );
- PROC_EXP( if( kstat.context_swtch != ctx_switches )
+ PROC_EXP( if( nr_context_switches() != ctx_switches )
PROC_INFO_INC( super, bread_miss ) );
return result;
}
@@ -148,9 +146,7 @@ static int get_new_buffer_near_blocknr(
if ( ! (++repeat_counter % 10000) )
printk("get_new_buffer(%u): counter(%d) too big", current->pid, repeat_counter);
#endif
-
- current->time_slice = 0;
- schedule();
+ yield();
}
#ifdef CONFIG_REISERFS_CHECK
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 7bef738e2..634747f31 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -147,8 +147,7 @@ repeat:
}
bn = allocate_bitmap_node(p_s_sb) ;
if (!bn) {
- current->policy |= SCHED_YIELD ;
- schedule() ;
+ yield();
goto repeat ;
}
return bn ;
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index 487975385..bbafd4958 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -1167,9 +1167,7 @@ int reiserfs_rename (struct inode * old_dir, struct dentry *old_dentry,
#if 0
// FIXME: do we need this? shouldn't we simply continue?
run_task_queue(&tq_disk);
- current->policy |= SCHED_YIELD;
- /*current->time_slice = 0;*/
- schedule();
+ yield();
#endif
continue;
}
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index cbdc1a349..482b03d94 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -1200,8 +1200,7 @@ static char prepare_for_delete_or_cut(
#endif
run_task_queue(&tq_disk);
- current->policy |= SCHED_YIELD;
- schedule();
+ yield();
}
/* This loop can be optimized. */
} while ( (*p_n_removed < n_unfm_number || need_research) &&
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c
index fc4cb9c38..6fd8df562 100644
--- a/fs/ufs/truncate.c
+++ b/fs/ufs/truncate.c
@@ -448,10 +448,7 @@ void ufs_truncate (struct inode * inode)
if (IS_SYNC(inode) && (inode->i_state & I_DIRTY))
ufs_sync_inode (inode);
run_task_queue(&tq_disk);
- current->policy |= SCHED_YIELD;
- schedule ();
-
-
+ yield();
}
offset = inode->i_size & uspi->s_fshift;
if (offset) {
diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h
index a3063cacc..b3d1e3a3f 100644
--- a/include/asm-i386/bitops.h
+++ b/include/asm-i386/bitops.h
@@ -75,6 +75,14 @@ static __inline__ void clear_bit(int nr, volatile void * addr)
:"=m" (ADDR)
:"Ir" (nr));
}
+
+static __inline__ void __clear_bit(int nr, volatile void * addr)
+{
+ __asm__ __volatile__(
+ "btrl %1,%0"
+ :"=m" (ADDR)
+ :"Ir" (nr));
+}
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
diff --git a/include/asm-i386/mmu_context.h b/include/asm-i386/mmu_context.h
index 43cabfcf2..36fd740a4 100644
--- a/include/asm-i386/mmu_context.h
+++ b/include/asm-i386/mmu_context.h
@@ -7,6 +7,28 @@
#include <asm/pgalloc.h>
/*
+ * Every architecture must define this function. It's the fastest
+ * way of searching a 168-bit bitmap where the first 128 bits are
+ * unlikely to be clear. It's guaranteed that at least one of the 168
+ * bits is cleared.
+ */
+#if MAX_RT_PRIO != 128 || MAX_PRIO != 168
+# error update this function.
+#endif
+
+static inline int sched_find_first_zero_bit(unsigned long *b)
+{
+ unsigned int rt;
+
+ rt = b[0] & b[1] & b[2] & b[3];
+ if (unlikely(rt != 0xffffffff))
+ return find_first_zero_bit(b, MAX_RT_PRIO);
+
+ if (b[4] != ~0)
+ return ffz(b[4]) + MAX_RT_PRIO;
+ return ffz(b[5]) + 32 + MAX_RT_PRIO;
+}
+/*
* possibly do the LDT unload here?
*/
#define destroy_context(mm) do { } while(0)
diff --git a/include/asm-i386/pgalloc.h b/include/asm-i386/pgalloc.h
index 63a47164f..090573f59 100644
--- a/include/asm-i386/pgalloc.h
+++ b/include/asm-i386/pgalloc.h
@@ -224,6 +224,7 @@ struct tlb_state
{
struct mm_struct *active_mm;
int state;
+ char __cacheline_padding[24];
};
extern struct tlb_state cpu_tlbstate[NR_CPUS];
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h
index ce81f42c9..34820d600 100644
--- a/include/asm-i386/smp.h
+++ b/include/asm-i386/smp.h
@@ -63,6 +63,7 @@ extern int cpu_sibling_map[];
extern void smp_flush_tlb(void);
extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
extern void smp_send_reschedule(int cpu);
+extern void smp_send_reschedule_all(void);
extern void smp_invalidate_rcv(void); /* Process an NMI */
extern void (*mtrr_hook) (void);
extern void zap_low_mappings (void);
@@ -104,7 +105,7 @@ extern void smp_store_cpu_info(int id); /* Store per CPU info (like the initial
* so this is correct in the x86 case.
*/
-#define smp_processor_id() (current->processor)
+#define smp_processor_id() (current->cpu)
static __inline int hard_smp_processor_id(void)
{
@@ -122,17 +123,5 @@ static __inline int logical_smp_processor_id(void)
#define NO_PROC_ID 0xFF /* No processor magic marker */
-/*
- * This magic constant controls our willingness to transfer
- * a process across CPUs. Such a transfer incurs misses on the L1
- * cache, and on a P6 or P5 with multiple L2 caches L2 hits. My
- * gut feeling is this will vary by board in value. For a board
- * with separate L2 cache it probably depends also on the RSS, and
- * for a board with shared L2 cache it ought to decay fast as other
- * processes are run.
- */
-
-#define PROC_CHANGE_PENALTY 15 /* Schedule penalty */
-
#endif
#endif
diff --git a/include/asm-i386/smplock.h b/include/asm-i386/smplock.h
index 864351c54..10cfc1fd0 100644
--- a/include/asm-i386/smplock.h
+++ b/include/asm-i386/smplock.h
@@ -15,21 +15,22 @@ extern spinlock_t kernel_flag;
/*
* Release global kernel lock and global interrupt lock
*/
-#define release_kernel_lock(task, cpu) \
-do { \
- if (task->lock_depth >= 0) \
- spin_unlock(&kernel_flag); \
- release_irqlock(cpu); \
- __sti(); \
+#define release_kernel_lock(task, cpu) \
+do { \
+ if (unlikely(task->lock_depth >= 0)) { \
+ spin_unlock(&kernel_flag); \
+ release_irqlock(cpu); \
+ __sti(); \
+ } \
} while (0)
/*
* Re-acquire the kernel lock
*/
-#define reacquire_kernel_lock(task) \
-do { \
- if (task->lock_depth >= 0) \
- spin_lock(&kernel_flag); \
+#define reacquire_kernel_lock(task) \
+do { \
+ if (unlikely(task->lock_depth >= 0)) \
+ spin_lock(&kernel_flag); \
} while (0)
diff --git a/include/asm-sparc/mmu_context.h b/include/asm-sparc/mmu_context.h
index 274707e2d..8391aff3c 100644
--- a/include/asm-sparc/mmu_context.h
+++ b/include/asm-sparc/mmu_context.h
@@ -5,6 +5,29 @@
#ifndef __ASSEMBLY__
+/*
+ * Every architecture must define this function. It's the fastest
+ * way of searching a 168-bit bitmap where the first 128 bits are
+ * unlikely to be clear. It's guaranteed that at least one of the 168
+ * bits is cleared.
+ */
+#if MAX_RT_PRIO != 128 || MAX_PRIO != 168
+# error update this function.
+#endif
+
+static inline int sched_find_first_zero_bit(unsigned long *b)
+{
+ unsigned int rt;
+
+ rt = b[0] & b[1] & b[2] & b[3];
+ if (unlikely(rt != 0xffffffff))
+ return find_first_zero_bit(b, MAX_RT_PRIO);
+
+ if (b[4] != ~0)
+ return ffz(b[4]) + MAX_RT_PRIO;
+ return ffz(b[5]) + 32 + MAX_RT_PRIO;
+}
+
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
{
}
diff --git a/include/asm-sparc/smp.h b/include/asm-sparc/smp.h
index dbdab2c1e..c283f5157 100644
--- a/include/asm-sparc/smp.h
+++ b/include/asm-sparc/smp.h
@@ -190,8 +190,6 @@ extern __inline__ void smp_send_stop(void) { }
#define MBOX_IDLECPU2 0xFD
#define MBOX_STOPCPU2 0xFE
-#define PROC_CHANGE_PENALTY 15
-
#endif /* !(CONFIG_SMP) */
#define NO_PROC_ID 0xFF
diff --git a/include/asm-sparc/smplock.h b/include/asm-sparc/smplock.h
index 96565069c..dd2cc2b54 100644
--- a/include/asm-sparc/smplock.h
+++ b/include/asm-sparc/smplock.h
@@ -3,31 +3,35 @@
*
* Default SMP lock implementation
*/
+#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern spinlock_t kernel_flag;
-#define kernel_locked() spin_is_locked(&kernel_flag)
+#define kernel_locked() \
+ (spin_is_locked(&kernel_flag) &&\
+ (current->lock_depth >= 0))
/*
* Release global kernel lock and global interrupt lock
*/
-#define release_kernel_lock(task, cpu) \
-do { \
- if (task->lock_depth >= 0) \
- spin_unlock(&kernel_flag); \
- release_irqlock(cpu); \
- __sti(); \
+#define release_kernel_lock(task, cpu) \
+do { \
+ if (unlikely(task->lock_depth >= 0)) { \
+ spin_unlock(&kernel_flag); \
+ release_irqlock(cpu); \
+ __sti(); \
+ } \
} while (0)
/*
* Re-acquire the kernel lock
*/
-#define reacquire_kernel_lock(task) \
-do { \
- if (task->lock_depth >= 0) \
- spin_lock(&kernel_flag); \
+#define reacquire_kernel_lock(task) \
+do { \
+ if (unlikely(task->lock_depth >= 0)) \
+ spin_lock(&kernel_flag); \
} while (0)
@@ -38,14 +42,14 @@ do { \
* so we only need to worry about other
* CPU's.
*/
-extern __inline__ void lock_kernel(void)
-{
- if (!++current->lock_depth)
- spin_lock(&kernel_flag);
-}
+#define lock_kernel() \
+do { \
+ if (!++current->lock_depth) \
+ spin_lock(&kernel_flag); \
+} while(0)
-extern __inline__ void unlock_kernel(void)
-{
- if (--current->lock_depth < 0)
- spin_unlock(&kernel_flag);
-}
+#define unlock_kernel() \
+do { \
+ if (--current->lock_depth < 0) \
+ spin_unlock(&kernel_flag); \
+} while(0)
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index 62fe0afbc..b65ab3291 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -1,4 +1,4 @@
-/* $Id: mmu_context.h,v 1.51 2001-08-17 04:55:09 kanoj Exp $ */
+/* $Id: mmu_context.h,v 1.52 2002-01-11 08:45:38 davem Exp $ */
#ifndef __SPARC64_MMU_CONTEXT_H
#define __SPARC64_MMU_CONTEXT_H
@@ -27,6 +27,27 @@
#include <asm/system.h>
#include <asm/spitfire.h>
+/*
+ * Every architecture must define this function. It's the fastest
+ * way of searching a 168-bit bitmap where the first 128 bits are
+ * unlikely to be clear. It's guaranteed that at least one of the 168
+ * bits is cleared.
+ */
+#if MAX_RT_PRIO != 128 || MAX_PRIO != 168
+# error update this function.
+#endif
+
+static inline int sched_find_first_zero_bit(unsigned long *b)
+{
+ unsigned long rt;
+
+ rt = b[0] & b[1];
+ if (unlikely(rt != 0xffffffffffffffff))
+ return find_first_zero_bit(b, MAX_RT_PRIO);
+
+ return ffz(b[2]) + MAX_RT_PRIO;
+}
+
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
{
}
diff --git a/include/asm-sparc64/smp.h b/include/asm-sparc64/smp.h
index 5757decb7..d73d3e657 100644
--- a/include/asm-sparc64/smp.h
+++ b/include/asm-sparc64/smp.h
@@ -103,20 +103,22 @@ extern __inline__ int hard_smp_processor_id(void)
}
}
-#define smp_processor_id() (current->processor)
+#define smp_processor_id() (current->cpu)
/* This needn't do anything as we do not sleep the cpu
* inside of the idler task, so an interrupt is not needed
* to get a clean fast response.
*
+ * XXX Reverify this assumption... -DaveM
+ *
* Addendum: We do want it to do something for the signal
* delivery case, we detect that by just seeing
* if we are trying to send this to an idler or not.
*/
-extern __inline__ void smp_send_reschedule(int cpu)
+static __inline__ void smp_send_reschedule(int cpu)
{
extern void smp_receive_signal(int);
- if(cpu_data[cpu].idle_volume == 0)
+ if (cpu_data[cpu].idle_volume == 0)
smp_receive_signal(cpu);
}
@@ -127,8 +129,6 @@ extern __inline__ void smp_send_stop(void) { }
#endif /* !(__ASSEMBLY__) */
-#define PROC_CHANGE_PENALTY 20
-
#endif /* !(CONFIG_SMP) */
#define NO_PROC_ID 0xFF
diff --git a/include/asm-sparc64/smplock.h b/include/asm-sparc64/smplock.h
index 5263f0d51..dd2cc2b54 100644
--- a/include/asm-sparc64/smplock.h
+++ b/include/asm-sparc64/smplock.h
@@ -16,21 +16,22 @@ extern spinlock_t kernel_flag;
/*
* Release global kernel lock and global interrupt lock
*/
-#define release_kernel_lock(task, cpu) \
-do { \
- if (task->lock_depth >= 0) \
- spin_unlock(&kernel_flag); \
- release_irqlock(cpu); \
- __sti(); \
+#define release_kernel_lock(task, cpu) \
+do { \
+ if (unlikely(task->lock_depth >= 0)) { \
+ spin_unlock(&kernel_flag); \
+ release_irqlock(cpu); \
+ __sti(); \
+ } \
} while (0)
/*
* Re-acquire the kernel lock
*/
-#define reacquire_kernel_lock(task) \
-do { \
- if (task->lock_depth >= 0) \
- spin_lock(&kernel_flag); \
+#define reacquire_kernel_lock(task) \
+do { \
+ if (unlikely(task->lock_depth >= 0)) \
+ spin_lock(&kernel_flag); \
} while (0)
@@ -41,14 +42,14 @@ do { \
* so we only need to worry about other
* CPU's.
*/
-#define lock_kernel() \
-do { \
- if (!++current->lock_depth) \
- spin_lock(&kernel_flag); \
+#define lock_kernel() \
+do { \
+ if (!++current->lock_depth) \
+ spin_lock(&kernel_flag); \
} while(0)
-#define unlock_kernel() \
-do { \
- if (--current->lock_depth < 0) \
- spin_unlock(&kernel_flag); \
+#define unlock_kernel() \
+do { \
+ if (--current->lock_depth < 0) \
+ spin_unlock(&kernel_flag); \
} while(0)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index a83af6000..b102c2d51 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1216,7 +1216,7 @@ extern int sync_buffers(kdev_t, int);
extern void sync_dev(kdev_t);
extern int fsync_dev(kdev_t);
extern int fsync_super(struct super_block *);
-extern int fsync_no_super(kdev_t);
+extern int fsync_no_super(struct block_device *);
extern void sync_inodes_sb(struct super_block *);
extern int osync_inode_buffers(struct inode *);
extern int osync_inode_data_buffers(struct inode *);
@@ -1358,7 +1358,20 @@ extern void remove_inode_hash(struct inode *);
extern struct file * get_empty_filp(void);
extern void file_move(struct file *f, struct list_head *list);
extern struct buffer_head * get_hash_table(kdev_t, sector_t, int);
-extern struct buffer_head * getblk(kdev_t, sector_t, int);
+extern struct buffer_head * __getblk(struct block_device *, sector_t, int);
+static inline struct buffer_head * getblk(kdev_t dev, sector_t block, int size)
+{
+ struct block_device *bdev;
+ struct buffer_head *bh;
+ bdev = bdget(kdev_t_to_nr(dev));
+ if (!bdev) {
+ printk("No block device for %s\n", bdevname(dev));
+ BUG();
+ }
+ bh = __getblk(bdev, block, size);
+ atomic_dec(&bdev->bd_count);
+ return bh;
+}
extern void ll_rw_block(int, int, struct buffer_head * bh[]);
extern int submit_bh(int, struct buffer_head *);
struct bio;
@@ -1379,14 +1392,27 @@ static inline void bforget(struct buffer_head *buf)
extern int set_blocksize(kdev_t, int);
extern int sb_set_blocksize(struct super_block *, int);
extern int sb_min_blocksize(struct super_block *, int);
-extern struct buffer_head * bread(kdev_t, int, int);
+extern struct buffer_head * __bread(struct block_device *, int, int);
+static inline struct buffer_head * bread(kdev_t dev, int block, int size)
+{
+ struct block_device *bdev;
+ struct buffer_head *bh;
+ bdev = bdget(kdev_t_to_nr(dev));
+ if (!bdev) {
+ printk("No block device for %s\n", bdevname(dev));
+ BUG();
+ }
+ bh = __bread(bdev, block, size);
+ atomic_dec(&bdev->bd_count);
+ return bh;
+}
static inline struct buffer_head * sb_bread(struct super_block *sb, int block)
{
- return bread(sb->s_dev, block, sb->s_blocksize);
+ return __bread(sb->s_bdev, block, sb->s_blocksize);
}
static inline struct buffer_head * sb_getblk(struct super_block *sb, int block)
{
- return getblk(sb->s_dev, block, sb->s_blocksize);
+ return __getblk(sb->s_bdev, block, sb->s_blocksize);
}
static inline struct buffer_head * sb_get_hash_table(struct super_block *sb, int block)
{
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 2cb980fbb..cb0ba707e 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -482,13 +482,13 @@ struct journal_s
/* Device, blocksize and starting block offset for the location
* where we store the journal. */
- kdev_t j_dev;
+ struct block_device * j_dev;
int j_blocksize;
unsigned int j_blk_offset;
/* Device which holds the client fs. For internal journal this
* will be equal to j_dev. */
- kdev_t j_fs_dev;
+ struct block_device * j_fs_dev;
/* Total maximum capacity of the journal region on disk. */
unsigned int j_maxlen;
@@ -649,7 +649,8 @@ extern int journal_flush (journal_t *);
extern void journal_lock_updates (journal_t *);
extern void journal_unlock_updates (journal_t *);
-extern journal_t * journal_init_dev(kdev_t dev, kdev_t fs_dev,
+extern journal_t * journal_init_dev(struct block_device *bdev,
+ struct block_device *fs_dev,
int start, int len, int bsize);
extern journal_t * journal_init_inode (struct inode *);
extern int journal_update_format (journal_t *);
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index d685da827..a75048fbc 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -32,11 +32,12 @@ struct kernel_stat {
unsigned int ipackets, opackets;
unsigned int ierrors, oerrors;
unsigned int collisions;
- unsigned int context_swtch;
};
extern struct kernel_stat kstat;
+extern unsigned long nr_context_switches(void);
+
#if !defined(CONFIG_ARCH_S390)
/*
* Number of interrupts per specific IRQ source, since bootup
diff --git a/include/linux/list.h b/include/linux/list.h
index 61b1493f5..b6238cdd4 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -19,6 +19,8 @@ struct list_head {
struct list_head *next, *prev;
};
+typedef struct list_head list_t;
+
#define LIST_HEAD_INIT(name) { &(name), &(name) }
#define LIST_HEAD(name) \
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 905f26736..e017015a9 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -72,8 +72,9 @@ extern unsigned long avenrun[]; /* Load averages */
#define CT_TO_SECS(x) ((x) / HZ)
#define CT_TO_USECS(x) (((x) % HZ) * 1000000/HZ)
-extern int nr_running, nr_threads;
+extern int nr_threads;
extern int last_pid;
+extern unsigned long nr_running(void);
#include <linux/fs.h>
#include <linux/time.h>
@@ -116,12 +117,6 @@ extern int last_pid;
#define SCHED_FIFO 1
#define SCHED_RR 2
-/*
- * This is an additional bit set when we want to
- * yield the CPU for one re-schedule..
- */
-#define SCHED_YIELD 0x10
-
struct sched_param {
int sched_priority;
};
@@ -139,7 +134,6 @@ struct completion;
* a separate lock).
*/
extern rwlock_t tasklist_lock;
-extern spinlock_t runqueue_lock;
extern spinlock_t mmlist_lock;
extern void sched_init(void);
@@ -151,6 +145,7 @@ extern void update_process_times(int user);
extern void update_one_process(struct task_struct *p, unsigned long user,
unsigned long system, int cpu);
extern void expire_task(struct task_struct *p);
+extern void idle_tick(void);
#define MAX_SCHEDULE_TIMEOUT LONG_MAX
extern signed long FASTCALL(schedule_timeout(signed long timeout));
@@ -280,6 +275,9 @@ struct user_struct {
extern struct user_struct root_user;
#define INIT_USER (&root_user)
+typedef struct task_struct task_t;
+typedef struct prio_array prio_array_t;
+
struct task_struct {
/*
* offsets of these are hardcoded elsewhere - touch with care
@@ -297,37 +295,51 @@ struct task_struct {
int lock_depth; /* Lock depth */
-/*
- * offset 32 begins here on 32-bit platforms. We keep
- * all fields in a single cacheline that are needed for
- * the goodness() loop in schedule().
- */
- unsigned long dyn_prio;
- long nice;
- unsigned long policy;
- struct mm_struct *mm;
- int processor;
/*
- * cpus_runnable is ~0 if the process is not running on any
- * CPU. It's (1 << cpu) if it's running on a CPU. This mask
- * is updated under the runqueue lock.
- *
- * To determine whether a process might run on a CPU, this
- * mask is AND-ed with cpus_allowed.
+ * offset 32 begins here on 32-bit platforms.
*/
- unsigned long cpus_runnable, cpus_allowed;
+ unsigned int cpu;
+ int prio;
+ long __nice;
+ list_t run_list;
+ prio_array_t *array;
+
+ unsigned int time_slice;
+ unsigned long sleep_timestamp, run_timestamp;
+
/*
- * (only the 'next' pointer fits into the cacheline, but
- * that's just fine.)
+ * A task's four 'sleep history' entries.
+ *
+ * We track the last 4 seconds of time. (including the current second).
+ *
+ * A value of '0' means it has spent no time sleeping in that
+ * particular past second. The maximum value of 'HZ' means that
+ * the task spent all its time running in that particular second.
+ *
+ * 'hist_idx' points to the current second, which, unlike the other
+ * 3 entries, is only partially complete. This means that a value of
+ * '25' does not mean the task slept 25% of the time in the current
+ * second, it means that it spent 25 timer ticks sleeping in the
+ * current second.
+ *
+ * All this might look a bit complex, but it can be maintained very
+ * small overhead and it gives very good statistics, based on which
+ * the scheduler can decide whether a task is 'interactive' or a
+ * 'CPU hog'. See sched.c for more details.
*/
- struct list_head run_list;
- long time_slice;
- /* recalculation loop checkpoint */
- unsigned long rcl_last;
+ #define SLEEP_HIST_SIZE 4
+
+ int hist_idx;
+ int hist[SLEEP_HIST_SIZE];
+
+ unsigned long policy;
+ unsigned long cpus_allowed;
struct task_struct *next_task, *prev_task;
- struct mm_struct *active_mm;
+
+ struct mm_struct *mm, *active_mm;
struct list_head local_pages;
+
unsigned int allocation_order, nr_local_pages;
/* task state */
@@ -452,11 +464,66 @@ struct task_struct {
*/
#define _STK_LIM (8*1024*1024)
-#define MAX_DYNPRIO 40
-#define DEF_TSLICE (6 * HZ / 100)
-#define MAX_TSLICE (20 * HZ / 100)
-#define DEF_NICE (0)
+/*
+ * RT priorites go from 0 to 99, but internally we max
+ * them out at 128 to make it easier to search the
+ * scheduler bitmap.
+ */
+#define MAX_RT_PRIO 128
+/*
+ * The lower the priority of a process, the more likely it is
+ * to run. Priority of a process goes from 0 to 167. The 0-99
+ * priority range is allocated to RT tasks, the 128-167 range
+ * is for SCHED_OTHER tasks.
+ */
+#define MAX_PRIO (MAX_RT_PRIO+40)
+#define DEF_USER_NICE 0
+
+/*
+ * Scales user-nice values [ -20 ... 0 ... 19 ]
+ * to static priority [ 24 ... 63 (MAX_PRIO-1) ]
+ *
+ * User-nice value of -20 == static priority 24, and
+ * user-nice value 19 == static priority 63. The lower
+ * the priority value, the higher the task's priority.
+ *
+ * Note that while static priority cannot go below 24,
+ * the priority of a process can go as low as 0.
+ */
+#define NICE_TO_PRIO(n) (MAX_PRIO-1 + (n) - 19)
+
+#define DEF_PRIO NICE_TO_PRIO(DEF_USER_NICE)
+
+/*
+ * Default timeslice is 90 msecs, maximum is 150 msecs.
+ * Minimum timeslice is 20 msecs.
+ */
+#define MIN_TIMESLICE ( 20 * HZ / 1000)
+#define MAX_TIMESLICE (150 * HZ / 1000)
+
+#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
+#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
+
+/*
+ * PRIO_TO_TIMESLICE scales priority values [ 100 ... 139 ]
+ * to initial time slice values [ MAX_TIMESLICE (150 msec) ... 2 ]
+ *
+ * The higher a process's priority, the bigger timeslices
+ * it gets during one round of execution. But even the lowest
+ * priority process gets MIN_TIMESLICE worth of execution time.
+ */
+#define PRIO_TO_TIMESLICE(p) \
+ ((( (MAX_USER_PRIO-1-USER_PRIO(p))*(MAX_TIMESLICE-MIN_TIMESLICE) + \
+ MAX_USER_PRIO-1) / MAX_USER_PRIO) + MIN_TIMESLICE)
+
+#define RT_PRIO_TO_TIMESLICE(p) \
+ ((( (MAX_RT_PRIO-(p)-1)*(MAX_TIMESLICE-MIN_TIMESLICE) + \
+ MAX_RT_PRIO-1) / MAX_RT_PRIO) + MIN_TIMESLICE)
+extern void set_cpus_allowed(task_t *p, unsigned long new_mask);
+extern void set_user_nice(task_t *p, long nice);
+asmlinkage long sys_sched_yield(void);
+#define yield() sys_sched_yield()
/*
* The default (Linux) execution domain.
@@ -475,16 +542,13 @@ extern struct exec_domain default_exec_domain;
addr_limit: KERNEL_DS, \
exec_domain: &default_exec_domain, \
lock_depth: -1, \
- dyn_prio: 0, \
- nice: DEF_NICE, \
+ __nice: DEF_USER_NICE, \
policy: SCHED_OTHER, \
+ cpus_allowed: -1, \
mm: NULL, \
active_mm: &init_mm, \
- cpus_runnable: -1, \
- cpus_allowed: -1, \
- run_list: { NULL, NULL }, \
- rcl_last: 0, \
- time_slice: DEF_TSLICE, \
+ run_list: LIST_HEAD_INIT(tsk.run_list), \
+ time_slice: PRIO_TO_TIMESLICE(DEF_PRIO), \
next_task: &tsk, \
prev_task: &tsk, \
p_opptr: &tsk, \
@@ -560,19 +624,6 @@ static inline struct task_struct *find_task_by_pid(int pid)
return p;
}
-#define task_has_cpu(tsk) ((tsk)->cpus_runnable != ~0UL)
-
-static inline void task_set_cpu(struct task_struct *tsk, unsigned int cpu)
-{
- tsk->processor = cpu;
- tsk->cpus_runnable = 1UL << cpu;
-}
-
-static inline void task_release_cpu(struct task_struct *tsk)
-{
- tsk->cpus_runnable = ~0UL;
-}
-
/* per-UID process charging. */
extern struct user_struct * alloc_uid(uid_t);
extern void free_uid(struct user_struct *);
@@ -600,6 +651,7 @@ extern void FASTCALL(interruptible_sleep_on(wait_queue_head_t *q));
extern long FASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t *q,
signed long timeout));
extern int FASTCALL(wake_up_process(struct task_struct * tsk));
+extern void FASTCALL(wake_up_forked_process(struct task_struct * tsk));
#define wake_up(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1)
#define wake_up_nr(x, nr) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr)
@@ -794,6 +846,7 @@ extern void exit_sighand(struct task_struct *);
extern void reparent_to_init(void);
extern void daemonize(void);
+extern task_t *child_reaper;
extern int do_execve(char *, char **, char **, struct pt_regs *);
extern int do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long);
@@ -802,6 +855,9 @@ extern void FASTCALL(add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait));
extern void FASTCALL(add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait));
extern void FASTCALL(remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait));
+extern void wait_task_inactive(task_t * p);
+extern void kick_if_running(task_t * p);
+
#define __wait_event(wq, condition) \
do { \
wait_queue_t __wait; \
@@ -882,21 +938,8 @@ do { \
#define next_thread(p) \
list_entry((p)->thread_group.next, struct task_struct, thread_group)
-static inline void del_from_runqueue(struct task_struct * p)
-{
- nr_running--;
- list_del(&p->run_list);
- p->run_list.next = NULL;
-}
-
-static inline int task_on_runqueue(struct task_struct *p)
-{
- return (p->run_list.next != NULL);
-}
-
static inline void unhash_process(struct task_struct *p)
{
- if (task_on_runqueue(p)) BUG();
write_lock_irq(&tasklist_lock);
nr_threads--;
unhash_pid(p);
diff --git a/include/linux/smp.h b/include/linux/smp.h
index cf20a8bd6..bb1ff5c5e 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -86,6 +86,8 @@ extern volatile int smp_msg_id;
#define cpu_number_map(cpu) 0
#define smp_call_function(func,info,retry,wait) ({ 0; })
#define cpu_online_map 1
+static inline void smp_send_reschedule(int cpu) { }
+static inline void smp_send_reschedule_all(void) { }
#endif
#endif
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 6194ad03b..d53844b91 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -117,13 +117,26 @@ static __inline__ void wait_ms(unsigned int ms)
mdelay(ms);
}
-typedef struct {
- __u8 requesttype;
- __u8 request;
- __u16 value;
- __u16 index;
- __u16 length;
-} devrequest __attribute__ ((packed));
+/**
+ * struct usb_ctrlrequest - structure used to make USB device control requests easier to create and decode
+ * @bRequestType: matches the USB bmRequestType field
+ * @bRequest: matches the USB bRequest field
+ * @wValue: matches the USB wValue field
+ * @wIndex: matches the USB wIndex field
+ * @wLength: matches the USB wLength field
+ *
+ * This structure is used to send control requests to a USB device. It matches
+ * the different fields of the USB 2.0 Spec section 9.3, table 9-2. See the
+ * USB spec for a fuller description of the different fields, and what they are
+ * used for.
+ */
+struct usb_ctrlrequest {
+ __u8 bRequestType;
+ __u8 bRequest;
+ __u16 wValue;
+ __u16 wIndex;
+ __u16 wLength;
+} __attribute__ ((packed));
/*
* USB device number allocation bitmap. There's one bitmap
diff --git a/include/linux/usbdevice_fs.h b/include/linux/usbdevice_fs.h
index 69008e236..0bf98f1d5 100644
--- a/include/linux/usbdevice_fs.h
+++ b/include/linux/usbdevice_fs.h
@@ -40,11 +40,11 @@
/* usbdevfs ioctl codes */
struct usbdevfs_ctrltransfer {
- __u8 requesttype;
- __u8 request;
- __u16 value;
- __u16 index;
- __u16 length;
+ __u8 bRequestType;
+ __u8 bRequest;
+ __u16 wValue;
+ __u16 wIndex;
+ __u16 wLength;
__u32 timeout; /* in milliseconds */
void *data;
};
diff --git a/include/net/bluetooth/hci_usb.h b/include/net/bluetooth/hci_usb.h
index 348b4d49e..e257b8eb0 100644
--- a/include/net/bluetooth/hci_usb.h
+++ b/include/net/bluetooth/hci_usb.h
@@ -38,7 +38,7 @@
struct hci_usb {
struct usb_device *udev;
- devrequest dev_req;
+ struct usb_ctrlrequest dev_req;
struct urb *ctrl_urb;
struct urb *intr_urb;
struct urb *read_urb;
diff --git a/init/do_mounts.c b/init/do_mounts.c
index d954d493e..bccbedeb0 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -760,10 +760,8 @@ static void __init handle_initrd(void)
pid = kernel_thread(do_linuxrc, "/linuxrc", SIGCHLD);
if (pid > 0) {
- while (pid != wait(&i)) {
- current->policy |= SCHED_YIELD;
- schedule();
- }
+ while (pid != wait(&i))
+ yield();
}
sys_mount("..", ".", NULL, MS_MOVE, NULL);
diff --git a/init/main.c b/init/main.c
index c7e9d0faa..14b9a11aa 100644
--- a/init/main.c
+++ b/init/main.c
@@ -312,18 +312,9 @@ static void __init smp_init(void)
/* Get other processors into their bootup holding patterns. */
smp_boot_cpus();
wait_init_idle = cpu_online_map;
- clear_bit(current->processor, &wait_init_idle); /* Don't wait on me! */
smp_threads_ready=1;
smp_commence();
-
- /* Wait for the other cpus to set up their idle processes */
- printk("Waiting on wait_init_idle (map = 0x%lx)\n", wait_init_idle);
- while (wait_init_idle) {
- cpu_relax();
- barrier();
- }
- printk("All processors have done init_idle\n");
}
#endif
@@ -337,9 +328,9 @@ static void __init smp_init(void)
static void rest_init(void)
{
+ init_idle(); /* This will also wait for all other CPUs */
kernel_thread(init, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGNAL);
unlock_kernel();
- current->need_resched = 1;
cpu_idle();
}
@@ -438,12 +429,21 @@ static void __init do_initcalls(void)
call = &__initcall_start;
do {
+#if 0
+ printk("INITCALL: Invoking %p\n", *call);
+#endif
(*call)();
call++;
} while (call < &__initcall_end);
/* Make sure there is no pending stuff from the initcall sequence */
+#if 0
+ printk("INITCALL: Flushing scheduled tasks...");
+#endif
flush_scheduled_tasks();
+#if 0
+ printk("done.\n");
+#endif
}
/*
diff --git a/kernel/capability.c b/kernel/capability.c
index 7aaf1a423..8fd0d4510 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -8,6 +8,8 @@
#include <linux/mm.h>
#include <asm/uaccess.h>
+unsigned securebits = SECUREBITS_DEFAULT; /* systemwide security settings */
+
kernel_cap_t cap_bset = CAP_INIT_EFF_SET;
/* Note: never hold tasklist_lock while spinning for this one */
diff --git a/kernel/exit.c b/kernel/exit.c
index a3490404f..04774ebbc 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -29,49 +29,39 @@ int getrusage(struct task_struct *, int, struct rusage *);
static void release_task(struct task_struct * p)
{
- if (p != current) {
+ unsigned long flags;
+
+ if (p == current)
+ BUG();
#ifdef CONFIG_SMP
- /*
- * Wait to make sure the process isn't on the
- * runqueue (active on some other CPU still)
- */
- for (;;) {
- task_lock(p);
- if (!task_has_cpu(p))
- break;
- task_unlock(p);
- do {
- cpu_relax();
- barrier();
- } while (task_has_cpu(p));
- }
- task_unlock(p);
+ wait_task_inactive(p);
#endif
- atomic_dec(&p->user->processes);
- free_uid(p->user);
- unhash_process(p);
-
- release_thread(p);
- current->cmin_flt += p->min_flt + p->cmin_flt;
- current->cmaj_flt += p->maj_flt + p->cmaj_flt;
- current->cnswap += p->nswap + p->cnswap;
- /*
- * Potentially available timeslices are retrieved
- * here - this way the parent does not get penalized
- * for creating too many processes.
- *
- * (this cannot be used to artificially 'generate'
- * timeslices, because any timeslice recovered here
- * was given away by the parent in the first place.)
- */
- current->time_slice += p->time_slice;
- if (current->time_slice > MAX_TSLICE)
- current->time_slice = MAX_TSLICE;
- p->pid = 0;
- free_task_struct(p);
- } else {
- printk("task releasing itself\n");
- }
+ atomic_dec(&p->user->processes);
+ free_uid(p->user);
+ unhash_process(p);
+
+ release_thread(p);
+ current->cmin_flt += p->min_flt + p->cmin_flt;
+ current->cmaj_flt += p->maj_flt + p->cmaj_flt;
+ current->cnswap += p->nswap + p->cnswap;
+ /*
+ * Potentially available timeslices are retrieved
+ * here - this way the parent does not get penalized
+ * for creating too many processes.
+ *
+ * (this cannot be used to artificially 'generate'
+ * timeslices, because any timeslice recovered here
+ * was given away by the parent in the first place.)
+ */
+ __save_flags(flags);
+ __cli();
+ current->time_slice += p->time_slice;
+ if (current->time_slice > MAX_TIMESLICE)
+ current->time_slice = MAX_TIMESLICE;
+ __restore_flags(flags);
+
+ p->pid = 0;
+ free_task_struct(p);
}
/*
@@ -151,6 +141,80 @@ static inline int has_stopped_jobs(int pgrp)
return retval;
}
+/**
+ * reparent_to_init() - Reparent the calling kernel thread to the init task.
+ *
+ * If a kernel thread is launched as a result of a system call, or if
+ * it ever exits, it should generally reparent itself to init so that
+ * it is correctly cleaned up on exit.
+ *
+ * The various task state such as scheduling policy and priority may have
+ * been inherited from a user process, so we reset them to sane values here.
+ *
+ * NOTE that reparent_to_init() gives the caller full capabilities.
+ */
+void reparent_to_init(void)
+{
+ write_lock_irq(&tasklist_lock);
+
+ /* Reparent to init */
+ REMOVE_LINKS(current);
+ current->p_pptr = child_reaper;
+ current->p_opptr = child_reaper;
+ SET_LINKS(current);
+
+ /* Set the exit signal to SIGCHLD so we signal init on exit */
+ current->exit_signal = SIGCHLD;
+
+ current->ptrace = 0;
+ if ((current->policy == SCHED_OTHER) &&
+ (current->__nice < DEF_USER_NICE))
+ set_user_nice(current, DEF_USER_NICE);
+ /* cpus_allowed? */
+ /* rt_priority? */
+ /* signals? */
+ current->cap_effective = CAP_INIT_EFF_SET;
+ current->cap_inheritable = CAP_INIT_INH_SET;
+ current->cap_permitted = CAP_FULL_SET;
+ current->keep_capabilities = 0;
+ memcpy(current->rlim, init_task.rlim, sizeof(*(current->rlim)));
+ current->user = INIT_USER;
+
+ write_unlock_irq(&tasklist_lock);
+}
+
+/*
+ * Put all the gunge required to become a kernel thread without
+ * attached user resources in one place where it belongs.
+ */
+
+void daemonize(void)
+{
+ struct fs_struct *fs;
+
+
+ /*
+ * If we were started as result of loading a module, close all of the
+ * user space pages. We don't need them, and if we didn't close them
+ * they would be locked into memory.
+ */
+ exit_mm(current);
+
+ current->session = 1;
+ current->pgrp = 1;
+ current->tty = NULL;
+
+ /* Become as one with the init task */
+
+ exit_fs(current); /* current->fs->count--; */
+ fs = init_task.fs;
+ current->fs = fs;
+ atomic_inc(&fs->count);
+ exit_files(current);
+ current->files = init_task.files;
+ atomic_inc(&current->files->count);
+}
+
/*
* When we die, we re-parent all our children.
* Try to give them to another thread in our process
diff --git a/kernel/fork.c b/kernel/fork.c
index 4c3114eef..4774f9cdb 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -29,7 +29,6 @@
/* The idle threads do not count.. */
int nr_threads;
-int nr_running;
int max_threads;
unsigned long total_forks; /* Handle normal Linux uptimes. */
@@ -37,6 +36,8 @@ int last_pid;
struct task_struct *pidhash[PIDHASH_SZ];
+rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED; /* outer */
+
void add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
{
unsigned long flags;
@@ -564,6 +565,7 @@ int do_fork(unsigned long clone_flags, unsigned long stack_start,
struct pt_regs *regs, unsigned long stack_size)
{
int retval;
+ unsigned long flags;
struct task_struct *p;
struct completion vfork;
@@ -617,8 +619,7 @@ int do_fork(unsigned long clone_flags, unsigned long stack_start,
copy_flags(clone_flags, p);
p->pid = get_pid(clone_flags);
- p->run_list.next = NULL;
- p->run_list.prev = NULL;
+ INIT_LIST_HEAD(&p->run_list);
p->p_cptr = NULL;
init_waitqueue_head(&p->wait_chldexit);
@@ -644,14 +645,16 @@ int do_fork(unsigned long clone_flags, unsigned long stack_start,
#ifdef CONFIG_SMP
{
int i;
- p->cpus_runnable = ~0UL;
- p->processor = current->processor;
+
+ p->cpu = smp_processor_id();
+
/* ?? should we just memset this ?? */
for(i = 0; i < smp_num_cpus; i++)
p->per_cpu_utime[i] = p->per_cpu_stime[i] = 0;
spin_lock_init(&p->sigmask_lock);
}
#endif
+ p->array = NULL;
p->lock_depth = -1; /* -1 = no lock */
p->start_time = jiffies;
@@ -685,15 +688,26 @@ int do_fork(unsigned long clone_flags, unsigned long stack_start,
p->pdeath_signal = 0;
/*
- * "share" dynamic priority between parent and child, thus the
- * total amount of dynamic priorities in the system doesnt change,
- * more scheduling fairness. This is only important in the first
- * timeslice, on the long run the scheduling behaviour is unchanged.
+ * Share the timeslice between parent and child, thus the
+ * total amount of pending timeslices in the system doesnt change,
+ * resulting in more scheduling fairness.
*/
+ __save_flags(flags);
+ __cli();
p->time_slice = (current->time_slice + 1) >> 1;
current->time_slice >>= 1;
- if (!current->time_slice)
- current->need_resched = 1;
+ if (!current->time_slice) {
+ /*
+ * This case is rare, it happens when the parent has only
+ * a single jiffy left from its timeslice. Taking the
+ * runqueue lock is not a problem.
+ */
+ current->time_slice = 1;
+ expire_task(current);
+ }
+ p->sleep_timestamp = p->run_timestamp = jiffies;
+ p->hist[0] = p->hist[1] = p->hist[2] = p->hist[3] = 0;
+ __restore_flags(flags);
/*
* Ok, add it to the run-queues and make it
@@ -730,10 +744,23 @@ int do_fork(unsigned long clone_flags, unsigned long stack_start,
if (p->ptrace & PT_PTRACED)
send_sig(SIGSTOP, p, 1);
- wake_up_process(p); /* do this last */
+#define RUN_CHILD_FIRST 1
+#if RUN_CHILD_FIRST
+ wake_up_forked_process(p); /* do this last */
+#else
+ wake_up_process(p); /* do this last */
+#endif
++total_forks;
if (clone_flags & CLONE_VFORK)
wait_for_completion(&vfork);
+#if RUN_CHILD_FIRST
+ else
+ /*
+ * Let the child process run first, to avoid most of the
+ * COW overhead when the child exec()s afterwards.
+ */
+ current->need_resched = 1;
+#endif
fork_out:
return retval;
diff --git a/kernel/ksyms.c b/kernel/ksyms.c
index 56dd5a66a..eded2d7fb 100644
--- a/kernel/ksyms.c
+++ b/kernel/ksyms.c
@@ -191,12 +191,12 @@ EXPORT_SYMBOL(notify_change);
EXPORT_SYMBOL(set_blocksize);
EXPORT_SYMBOL(sb_set_blocksize);
EXPORT_SYMBOL(sb_min_blocksize);
-EXPORT_SYMBOL(getblk);
+EXPORT_SYMBOL(__getblk);
EXPORT_SYMBOL(cdget);
EXPORT_SYMBOL(cdput);
EXPORT_SYMBOL(bdget);
EXPORT_SYMBOL(bdput);
-EXPORT_SYMBOL(bread);
+EXPORT_SYMBOL(__bread);
EXPORT_SYMBOL(__brelse);
EXPORT_SYMBOL(__bforget);
EXPORT_SYMBOL(ll_rw_block);
@@ -441,6 +441,8 @@ EXPORT_SYMBOL(interruptible_sleep_on);
EXPORT_SYMBOL(interruptible_sleep_on_timeout);
EXPORT_SYMBOL(schedule);
EXPORT_SYMBOL(schedule_timeout);
+EXPORT_SYMBOL(sys_sched_yield);
+EXPORT_SYMBOL(set_user_nice);
EXPORT_SYMBOL(jiffies);
EXPORT_SYMBOL(xtime);
EXPORT_SYMBOL(do_gettimeofday);
@@ -452,6 +454,7 @@ EXPORT_SYMBOL(loops_per_jiffy);
EXPORT_SYMBOL(kstat);
EXPORT_SYMBOL(nr_running);
+EXPORT_SYMBOL(nr_context_switches);
/* misc */
EXPORT_SYMBOL(panic);
diff --git a/kernel/printk.c b/kernel/printk.c
index 4505465f2..365aed1d3 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/interrupt.h> /* For in_interrupt() */
#include <linux/config.h>
+#include <linux/delay.h>
#include <asm/uaccess.h>
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index b729b6f30..79af05d4f 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -31,20 +31,7 @@ int ptrace_check_attach(struct task_struct *child, int kill)
if (child->state != TASK_STOPPED)
return -ESRCH;
#ifdef CONFIG_SMP
- /* Make sure the child gets off its CPU.. */
- for (;;) {
- task_lock(child);
- if (!task_has_cpu(child))
- break;
- task_unlock(child);
- do {
- if (child->state != TASK_STOPPED)
- return -ESRCH;
- barrier();
- cpu_relax();
- } while (task_has_cpu(child));
- }
- task_unlock(child);
+ wait_task_inactive(child);
#endif
}
diff --git a/kernel/sched.c b/kernel/sched.c
index dc5eea2af..86c8b1e55 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -12,331 +12,351 @@
* 1998-12-28 Implemented better SMP scheduling by Ingo Molnar
*/
-/*
- * 'sched.c' is the main kernel file. It contains scheduling primitives
- * (sleep_on, wakeup, schedule etc) as well as a number of simple system
- * call functions (type getpid()), which just extract a field from
- * current-task
- */
-
-#include <linux/config.h>
#include <linux/mm.h>
+#include <linux/nmi.h>
#include <linux/init.h>
+#include <asm/uaccess.h>
#include <linux/smp_lock.h>
-#include <linux/nmi.h>
#include <linux/interrupt.h>
-#include <linux/kernel_stat.h>
-#include <linux/completion.h>
-#include <linux/prefetch.h>
-#include <linux/compiler.h>
-
-#include <asm/uaccess.h>
#include <asm/mmu_context.h>
-extern void timer_bh(void);
-extern void tqueue_bh(void);
-extern void immediate_bh(void);
+#define BITMAP_SIZE ((((MAX_PRIO+7)/8)+sizeof(long)-1)/sizeof(long))
-/*
- * scheduler variables
- */
-
-unsigned securebits = SECUREBITS_DEFAULT; /* systemwide security settings */
+typedef struct runqueue runqueue_t;
-extern void mem_use(void);
+struct prio_array {
+ int nr_active;
+ spinlock_t *lock;
+ runqueue_t *rq;
+ unsigned long bitmap[BITMAP_SIZE];
+ list_t queue[MAX_PRIO];
+};
/*
- * Scheduling quanta.
+ * This is the main, per-CPU runqueue data structure.
+ *
+ * Locking rule: those places that want to lock multiple runqueues
+ * (such as the load balancing or the process migration code), lock
+ * acquire operations must be ordered by rq->cpu.
*
- * NOTE! The unix "nice" value influences how long a process
- * gets. The nice value ranges from -20 to +19, where a -20
- * is a "high-priority" task, and a "+10" is a low-priority
- * task. The default time slice for zero-nice tasks will be 37ms.
+ * The RT event id is used to avoid calling into the the RT scheduler
+ * if there is a RT task active in an SMP system but there is no
+ * RT scheduling activity otherwise.
*/
-#define NICE_RANGE 40
-#define MIN_NICE_TSLICE 10000
-#define MAX_NICE_TSLICE 90000
-#define TASK_TIMESLICE(p) ((int) ts_table[19 - (p)->nice])
-
-static unsigned char ts_table[NICE_RANGE];
-
-#define MM_AFFINITY_BONUS 1
+static struct runqueue {
+ int cpu;
+ spinlock_t lock;
+ unsigned long nr_running, nr_switches, last_rt_event;
+ task_t *curr, *idle;
+ prio_array_t *active, *expired, arrays[2];
+ char __pad [SMP_CACHE_BYTES];
+} runqueues [NR_CPUS] __cacheline_aligned;
+
+#define this_rq() (runqueues + smp_processor_id())
+#define task_rq(p) (runqueues + (p)->cpu)
+#define cpu_rq(cpu) (runqueues + (cpu))
+#define cpu_curr(cpu) (runqueues[(cpu)].curr)
+#define rt_task(p) ((p)->policy != SCHED_OTHER)
+
+#define lock_task_rq(rq,p,flags) \
+do { \
+repeat_lock_task: \
+ rq = task_rq(p); \
+ spin_lock_irqsave(&rq->lock, flags); \
+ if (unlikely((rq)->cpu != (p)->cpu)) { \
+ spin_unlock_irqrestore(&rq->lock, flags); \
+ goto repeat_lock_task; \
+ } \
+} while (0)
+
+#define unlock_task_rq(rq,p,flags) \
+ spin_unlock_irqrestore(&rq->lock, flags)
/*
- * Init task must be ok at boot for the ix86 as we will check its signals
- * via the SMP irq return path.
+ * Adding/removing a task to/from a priority array:
*/
-
-struct task_struct * init_tasks[NR_CPUS] = {&init_task, };
+static inline void dequeue_task(struct task_struct *p, prio_array_t *array)
+{
+ array->nr_active--;
+ list_del_init(&p->run_list);
+ if (list_empty(array->queue + p->prio))
+ __set_bit(p->prio, array->bitmap);
+}
+
+static inline void enqueue_task(struct task_struct *p, prio_array_t *array)
+{
+ list_add_tail(&p->run_list, array->queue + p->prio);
+ __clear_bit(p->prio, array->bitmap);
+ array->nr_active++;
+ p->array = array;
+}
/*
- * The tasklist_lock protects the linked list of processes.
+ * This is the per-process load estimator. Processes that generate
+ * more load than the system can handle get a priority penalty.
*
- * The runqueue_lock locks the parts that actually access
- * and change the run-queues, and have to be interrupt-safe.
+ * The estimator uses a 4-entry load-history ringbuffer which is
+ * updated whenever a task is moved to/from the runqueue. The load
+ * estimate is also updated from the timer tick to get an accurate
+ * estimation of currently executing tasks as well.
*
- * If both locks are to be concurrently held, the runqueue_lock
- * nests inside the tasklist_lock.
+ * The 4-entry p->hist[4] array holds the 'sleep history' of
+ * every task. Every entry holds the number of time ticks spent
+ * sleeping in the past 4 seconds. Three of the entries belong to
+ * one-one second in the past, the fourth entry belongs to the current
+ * second. (the p->hist_idx index is used in fact as a rotating index
+ * to reduce overhead.)
*
- * task->alloc_lock nests inside tasklist_lock.
+ * The array elements are integers in the range of 0-HZ. If HZ is 100,
+ * then '100' means a process has spent 100% of it's time sleeping, in
+ * that particular second of time. '0' means the process has spent all
+ * its time on the runqueue - ie. it was a CPU hog in that second.
+ *
+ * For RAM usage and algorithmic overhead reasons we do not want a too
+ * big history buffer. It's also usually not interesting to the scheduler
+ * to know whether a task was idle or not 10 minutes ago. 'Recent behavior'
+ * is what matters, if a task was mostly sleeping recently then it's a
+ * 'good' interactive task. If it has spent most (or all) of its time
+ * running then it's a 'bad' CPU-hog that gets a priority penalty.
+ *
+ * The load estimator itself was written to be fast as well in every
+ * circumstance. Eg. if a task is context switching heavily then we do
+ * not call into the estimator, only about once per timer tick, on average.
*/
-spinlock_t runqueue_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED; /* inner */
-rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED; /* outer */
-
-static LIST_HEAD(runqueue_head);
-
-static unsigned long rcl_curr;
/*
- * We align per-CPU scheduling data on cacheline boundaries,
- * to prevent cacheline ping-pong.
+ * The 'history index' goes forward in time, if one second passes then
+ * the index is increased by 1 via this function. We wrap around the
+ * index if it reaches 4. (The modulo is fast with the current
+ * SLEEP_HIST_SIZE of 4.)
*/
-static union {
- struct schedule_data {
- struct task_struct * curr;
- cycles_t last_schedule;
- } schedule_data;
- char __pad [SMP_CACHE_BYTES];
-} aligned_data [NR_CPUS] __cacheline_aligned = { {{&init_task,0}}};
-
-#define cpu_curr(cpu) aligned_data[(cpu)].schedule_data.curr
-#define last_schedule(cpu) aligned_data[(cpu)].schedule_data.last_schedule
-
-struct kernel_stat kstat;
-extern struct task_struct *child_reaper;
-
-#ifdef CONFIG_SMP
-
-#define idle_task(cpu) (init_tasks[cpu_number_map(cpu)])
-#define can_schedule(p,cpu) \
- ((p)->cpus_runnable & (p)->cpus_allowed & (1 << cpu))
-
-#else
-
-#define idle_task(cpu) (&init_task)
-#define can_schedule(p,cpu) (1)
-
-#endif
+static inline void new_second(task_t *p)
+{
+ p->hist_idx = (p->hist_idx + 1) % SLEEP_HIST_SIZE;
+}
-void scheduling_functions_start_here(void) { }
+/*
+ * This function clears the load-history entries when a task has spent
+ * more than 4 seconds running.
+ */
+static inline void clear_hist(task_t *p)
+{
+ p->hist[0] = p->hist[1] = p->hist[2] = p->hist[3] = 0;
+}
/*
- * This is the function that decides how desirable a process is..
- * You can weigh different processes against each other depending
- * on what CPU they've run on lately etc to try to handle cache
- * and TLB miss penalties.
- *
- * Return values:
- * -1000: never select this
- * 0: out of time, recalculate counters (but it might still be
- * selected)
- * +ve: "goodness" value (the larger, the better)
- * +1000: realtime process, select this.
+ * This function fills in the load-history entries with the maximum
+ * values when a task has spent more than 4 seconds sleeping.
*/
+static inline void fill_hist(task_t *p)
+{
+ p->hist[0] = p->hist[1] = p->hist[2] = p->hist[3] = HZ;
+}
-static inline int goodness(struct task_struct * p, int this_cpu, struct mm_struct *this_mm)
+/*
+ * This function is called when a task goes sleeping, ie. when the task
+ * has potentially spent alot of time on the runqueue. p->run_timestamp
+ * is the time the task has started running, 'now' is the time when the
+ * task goes to sleep.
+ */
+static inline void update_sleep_avg_deactivate(task_t *p)
{
- int weight;
+ int idx;
+ unsigned long now = jiffies,
+ seconds_passed = now/HZ - p->run_timestamp/HZ;
/*
- * select the current process after every other
- * runnable process, but before the idle thread.
- * Also, dont trigger a counter recalculation.
+ * Do we have to update the history entries becase a
+ * 'new second' has been started? If a new second has
+ * been started then we have to clear all the 'full'
+ * seconds that have been passed during the time the
+ * task was running, and the new current entry has
+ * to be cleared as well.
+ *
+ * Otherwise we only have to update the sleep timestamp.
*/
- weight = -1;
- if (p->policy & SCHED_YIELD)
- goto out;
+ if (unlikely(seconds_passed)) {
+ if (seconds_passed < SLEEP_HIST_SIZE)
+ for (idx = 0; idx < seconds_passed; idx++) {
+ new_second(p);
+ p->hist[p->hist_idx] = 0;
+ }
+ else
+ clear_hist(p);
+ }
+ p->sleep_timestamp = now;
+}
+
+/*
+ * This is called when a task gets runnable and gets moved to the runqueue.
+ * ie. when the task has potentially spent alot of time sleeping.
+ * p->sleep_timestamp is the time the task has started sleeping, 'now' is
+ * the time when we go to the runqueue.
+ */
+static inline void update_sleep_avg_activate(task_t *p, unsigned long now)
+{
+ int idx;
+ unsigned long sleep_ticks,
+ seconds_passed = now/HZ - p->sleep_timestamp/HZ;
/*
- * Non-RT process - normal case first.
+ * Do we have to update the history entries becase a
+ * 'new second' has been started? This is slightly more
+ * complex than the deactivate path, because in the deactivate
+ * path history entries are simply cleared, but here we have
+ * to add any potential time spent sleeping in the current
+ * second. This value is 'sleep_ticks' - it can be anywhere
+ * between 0 and 99. (it cannot be 100 because that would mean
+ * that the current second is over and we'd have to go to the
+ * next history entry.) Another detail is that we might
+ * have gone sleeping in this second, or in any previous second.
+ *
+ * Otherwise we only have to update the run timestamp and the
+ * current history entry.
*/
- if (p->policy == SCHED_OTHER) {
- /*
- * Give the process a first-approximation goodness value
- * according to the number of clock-ticks it has left.
- *
- * Don't do any other calculations if the time slice is
- * over..
- */
- if (!p->time_slice)
- return 0;
-
- weight = p->dyn_prio + 1;
-
-#ifdef CONFIG_SMP
- /* Give a largish advantage to the same processor... */
- /* (this is equivalent to penalizing other processors) */
- if (p->processor == this_cpu)
- weight += PROC_CHANGE_PENALTY;
-#endif
+ if (unlikely(seconds_passed)) {
+ if (seconds_passed < SLEEP_HIST_SIZE) {
+ /*
+ * Update the "last partially-slept" second's entry:
+ */
+ p->hist[p->hist_idx] += HZ - (p->sleep_timestamp % HZ);
+ new_second(p);
- /* .. and a slight advantage to the current MM */
- if (p->mm == this_mm || !p->mm)
- weight += MM_AFFINITY_BONUS;
- weight += 20 - p->nice;
- goto out;
- }
+ /*
+ * Clear any (optional) interim seconds that were
+ * spent fully sleeping:
+ */
+ for (idx = 1; idx < seconds_passed; idx++) {
+ new_second(p);
+ p->hist[p->hist_idx] = HZ;
+ }
+ } else
+ /*
+ * We slept more than 4 seconds, fill in the
+ * history:
+ */
+ fill_hist(p);
+ /* Clear the new current entry: */
+ p->hist[p->hist_idx] = 0;
+ sleep_ticks = now % HZ;
+ } else
+ sleep_ticks = now - p->sleep_timestamp;
/*
- * Realtime process, select the first one on the
- * runqueue (taking priorities within processes
- * into account).
+ * Update the current entry with the amount of
+ * ticks the task spent sleeping:
*/
- weight = 1000 + p->rt_priority;
-out:
- return weight;
+ p->hist[p->hist_idx] += sleep_ticks;
+ p->run_timestamp = now;
}
/*
- * the 'goodness value' of replacing a process on a given CPU.
- * positive value means 'replace', zero or negative means 'dont'.
+ * Get the current 'load average' of the task.
+ *
+ * Naively one would divide the sum by 4. But in fact the current entry
+ * is just a partial history, so we have to divide by the actual portion
+ * we recorded, which is somewhere between 3.0 and 4.0 seconds.
*/
-static inline int preemption_goodness(struct task_struct * prev, struct task_struct * p, int cpu)
+static inline unsigned int get_run_avg(task_t *p, unsigned long new)
{
- return goodness(p, cpu, prev->active_mm) - goodness(prev, cpu, prev->active_mm);
+ return HZ - (p->hist[0] + p->hist[1] + p->hist[2] +
+ p->hist[3]) * HZ / ((SLEEP_HIST_SIZE-1)*HZ + (new % HZ));
}
-/*
- * This is ugly, but reschedule_idle() is very timing-critical.
- * We are called with the runqueue spinlock held and we must
- * not claim the tasklist_lock.
- */
-static FASTCALL(void reschedule_idle(struct task_struct * p));
-
-static void reschedule_idle(struct task_struct * p)
+static inline void activate_task(task_t *p, runqueue_t *rq)
{
-#ifdef CONFIG_SMP
- int this_cpu = smp_processor_id();
- struct task_struct *tsk, *target_tsk;
- int cpu, best_cpu, i, max_prio;
- cycles_t oldest_idle;
+ prio_array_t *array = rq->active;
+ unsigned long now = jiffies;
+ unsigned int penalty;
- /*
- * shortcut if the woken up task's last CPU is
- * idle now.
- */
- best_cpu = p->processor;
- if (can_schedule(p, best_cpu)) {
- tsk = idle_task(best_cpu);
- if (cpu_curr(best_cpu) == tsk) {
- int need_resched;
-send_now_idle:
- /*
- * If need_resched == -1 then we can skip sending
- * the IPI altogether, tsk->need_resched is
- * actively watched by the idle thread.
- */
- need_resched = tsk->need_resched;
- tsk->need_resched = 1;
- if ((best_cpu != this_cpu) && !need_resched)
- smp_send_reschedule(best_cpu);
- return;
- }
- }
+ if (likely(p->run_timestamp == now))
+ goto enqueue;
+ update_sleep_avg_activate(p, now);
/*
- * We know that the preferred CPU has a cache-affine current
- * process, lets try to find a new idle CPU for the woken-up
- * process. Select the least recently active idle CPU. (that
- * one will have the least active cache context.) Also find
- * the executing process which has the least priority.
+ * Give the process a priority penalty if it has not slept often
+ * enough in the past. We scale the priority penalty according
+ * to the current load of the runqueue, and the 'load history'
+ * this process has. Eg. if the CPU has 3 processes running
+ * right now then a process that has slept more than two-thirds
+ * of the time is considered to be 'interactive'. The higher
+ * the load of the CPUs is, the easier it is for a process to
+ * get an non-interactivity penalty.
+ *
+ * the return value of get_run_avg() is an integer between 0 and HZ.
+ * We scale this 'load value' to between 0 and MAX_USER_PRIO/3.
+ * A task that generates 100% load gets the maximum penalty.
*/
- oldest_idle = (cycles_t) -1;
- target_tsk = NULL;
- max_prio = 0;
-
- for (i = 0; i < smp_num_cpus; i++) {
- cpu = cpu_logical_map(i);
- if (!can_schedule(p, cpu))
- continue;
- tsk = cpu_curr(cpu);
- /*
- * We use the first available idle CPU. This creates
- * a priority list between idle CPUs, but this is not
- * a problem.
- */
- if (tsk == idle_task(cpu)) {
-#if defined(__i386__) && defined(CONFIG_SMP)
- /*
- * Check if two siblings are idle in the same
- * physical package. Use them if found.
- */
- if (smp_num_siblings == 2) {
- if (cpu_curr(cpu_sibling_map[cpu]) ==
- idle_task(cpu_sibling_map[cpu])) {
- oldest_idle = last_schedule(cpu);
- target_tsk = tsk;
- break;
- }
-
- }
-#endif
- if (last_schedule(cpu) < oldest_idle) {
- oldest_idle = last_schedule(cpu);
- target_tsk = tsk;
- }
- } else {
- if (oldest_idle == -1ULL) {
- int prio = preemption_goodness(tsk, p, cpu);
-
- if (prio > max_prio) {
- max_prio = prio;
- target_tsk = tsk;
- }
- }
- }
+ penalty = MAX_USER_PRIO * get_run_avg(p, now) / (3 * HZ);
+ if (!rt_task(p)) {
+ p->prio = NICE_TO_PRIO(p->__nice) + penalty;
+ if (p->prio > MAX_PRIO-1)
+ p->prio = MAX_PRIO-1;
}
- tsk = target_tsk;
- if (tsk) {
- if (oldest_idle != -1ULL) {
- best_cpu = tsk->processor;
- goto send_now_idle;
- }
- tsk->need_resched = 1;
- if (tsk->processor != this_cpu)
- smp_send_reschedule(tsk->processor);
- }
- return;
-
+enqueue:
+ enqueue_task(p, array);
+ rq->nr_running++;
+}
-#else /* UP */
- int this_cpu = smp_processor_id();
- struct task_struct *tsk;
+static inline void deactivate_task(struct task_struct *p, runqueue_t *rq)
+{
+ rq->nr_running--;
+ dequeue_task(p, p->array);
+ p->array = NULL;
+ update_sleep_avg_deactivate(p);
+}
- tsk = cpu_curr(this_cpu);
- if (preemption_goodness(tsk, p, this_cpu) > 0)
- tsk->need_resched = 1;
-#endif
+static inline void resched_task(task_t *p)
+{
+ int need_resched;
+
+ need_resched = p->need_resched;
+ wmb();
+ p->need_resched = 1;
+ if (!need_resched)
+ smp_send_reschedule(p->cpu);
}
+#ifdef CONFIG_SMP
+
/*
- * Careful!
- *
- * This has to add the process to the _beginning_ of the
- * run-queue, not the end. See the comment about "This is
- * subtle" in the scheduler proper..
+ * Wait for a process to unschedule. This is used by the exit() and
+ * ptrace() code.
*/
-static inline void add_to_runqueue(struct task_struct * p)
+void wait_task_inactive(task_t * p)
{
- p->dyn_prio += rcl_curr - p->rcl_last;
- p->rcl_last = rcl_curr;
- if (p->dyn_prio > MAX_DYNPRIO)
- p->dyn_prio = MAX_DYNPRIO;
- list_add(&p->run_list, &runqueue_head);
- nr_running++;
-}
+ unsigned long flags;
+ runqueue_t *rq;
-static inline void move_last_runqueue(struct task_struct * p)
-{
- list_del(&p->run_list);
- list_add_tail(&p->run_list, &runqueue_head);
+repeat:
+ rq = task_rq(p);
+ while (unlikely(rq->curr == p)) {
+ cpu_relax();
+ barrier();
+ }
+ lock_task_rq(rq, p, flags);
+ if (unlikely(rq->curr == p)) {
+ unlock_task_rq(rq, p, flags);
+ goto repeat;
+ }
+ unlock_task_rq(rq, p, flags);
}
-static inline void move_first_runqueue(struct task_struct * p)
+/*
+ * Kick the remote CPU if the task is running currently,
+ * this code is used by the signal code to signal tasks
+ * which are in user-mode as quickly as possible.
+ *
+ * (Note that we do this lockless - if the task does anything
+ * while the message is in flight then it will notice the
+ * sigpending condition anyway.)
+ */
+void kick_if_running(task_t * p)
{
- list_del(&p->run_list);
- list_add(&p->run_list, &runqueue_head);
+ if (p == task_rq(p)->curr)
+ resched_task(p);
}
+#endif
/*
* Wake up a process. Put it on the run-queue if it's not
@@ -346,385 +366,394 @@ static inline void move_first_runqueue(struct task_struct * p)
* "current->state = TASK_RUNNING" to mark yourself runnable
* without the overhead of this.
*/
-static inline int try_to_wake_up(struct task_struct * p, int synchronous)
+static int try_to_wake_up(task_t * p, int synchronous)
{
unsigned long flags;
int success = 0;
+ runqueue_t *rq;
- /*
- * We want the common case fall through straight, thus the goto.
- */
- spin_lock_irqsave(&runqueue_lock, flags);
+ lock_task_rq(rq, p, flags);
p->state = TASK_RUNNING;
- if (task_on_runqueue(p))
- goto out;
- add_to_runqueue(p);
- if (!synchronous || !(p->cpus_allowed & (1 << smp_processor_id())))
- reschedule_idle(p);
- success = 1;
-out:
- spin_unlock_irqrestore(&runqueue_lock, flags);
+ if (!p->array) {
+ if (!rt_task(p) && synchronous && (smp_processor_id() < p->cpu)) {
+ spin_lock(&this_rq()->lock);
+ p->cpu = smp_processor_id();
+ activate_task(p, this_rq());
+ spin_unlock(&this_rq()->lock);
+ } else {
+ activate_task(p, rq);
+ if ((rq->curr == rq->idle) ||
+ (p->prio < rq->curr->prio))
+ resched_task(rq->curr);
+ }
+ success = 1;
+ }
+ unlock_task_rq(rq, p, flags);
return success;
}
-inline int wake_up_process(struct task_struct * p)
+inline int wake_up_process(task_t * p)
{
return try_to_wake_up(p, 0);
}
-static void process_timeout(unsigned long __data)
-{
- wake_up_process((struct task_struct *)__data);
-}
-
-/**
- * schedule_timeout - sleep until timeout
- * @timeout: timeout value in jiffies
- *
- * Make the current task sleep until @timeout jiffies have
- * elapsed. The routine will return immediately unless
- * the current task state has been set (see set_current_state()).
- *
- * You can set the task state as follows -
- *
- * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
- * pass before the routine returns. The routine will return 0
- *
- * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
- * delivered to the current task. In this case the remaining time
- * in jiffies will be returned, or 0 if the timer expired in time
- *
- * The current task state is guaranteed to be TASK_RUNNING when this
- * routine returns.
- *
- * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
- * the CPU away without a bound on the timeout. In this case the return
- * value will be %MAX_SCHEDULE_TIMEOUT.
- *
- * In all cases the return value is guaranteed to be non-negative.
- */
-signed long schedule_timeout(signed long timeout)
+void wake_up_forked_process(task_t * p)
{
- struct timer_list timer;
- unsigned long expire;
+ runqueue_t *rq = this_rq();
- switch (timeout)
- {
- case MAX_SCHEDULE_TIMEOUT:
- /*
- * These two special cases are useful to be comfortable
- * in the caller. Nothing more. We could take
- * MAX_SCHEDULE_TIMEOUT from one of the negative value
- * but I' d like to return a valid offset (>=0) to allow
- * the caller to do everything it want with the retval.
- */
- schedule();
- goto out;
- default:
- /*
- * Another bit of PARANOID. Note that the retval will be
- * 0 since no piece of kernel is supposed to do a check
- * for a negative retval of schedule_timeout() (since it
- * should never happens anyway). You just have the printk()
- * that will tell you if something is gone wrong and where.
- */
- if (timeout < 0)
- {
- printk(KERN_ERR "schedule_timeout: wrong timeout "
- "value %lx from %p\n", timeout,
- __builtin_return_address(0));
- current->state = TASK_RUNNING;
- goto out;
- }
+ spin_lock_irq(&rq->lock);
+ p->state = TASK_RUNNING;
+ if (!rt_task(p)) {
+ p->prio += MAX_USER_PRIO/10;
+ if (p->prio > MAX_PRIO-1)
+ p->prio = MAX_PRIO-1;
}
+ activate_task(p, rq);
+ spin_unlock_irq(&rq->lock);
+}
- expire = timeout + jiffies;
-
- init_timer(&timer);
- timer.expires = expire;
- timer.data = (unsigned long) current;
- timer.function = process_timeout;
-
- add_timer(&timer);
- schedule();
- del_timer_sync(&timer);
-
- timeout = expire - jiffies;
-
- out:
- return timeout < 0 ? 0 : timeout;
+asmlinkage void schedule_tail(task_t *prev)
+{
+ spin_unlock_irq(&this_rq()->lock);
}
-/*
- * schedule_tail() is getting called from the fork return path. This
- * cleans up all remaining scheduler things, without impacting the
- * common case.
- */
-static inline void __schedule_tail(struct task_struct *prev)
+static inline void context_switch(task_t *prev, task_t *next)
{
-#ifdef CONFIG_SMP
- int policy;
+ struct mm_struct *mm = next->mm;
+ struct mm_struct *oldmm = prev->active_mm;
- /*
- * prev->policy can be written from here only before `prev'
- * can be scheduled (before setting prev->cpus_runnable to ~0UL).
- * Of course it must also be read before allowing prev
- * to be rescheduled, but since the write depends on the read
- * to complete, wmb() is enough. (the spin_lock() acquired
- * before setting cpus_runnable is not enough because the spin_lock()
- * common code semantics allows code outside the critical section
- * to enter inside the critical section)
- */
- policy = prev->policy;
- prev->policy = policy & ~SCHED_YIELD;
- wmb();
+ prepare_to_switch();
- /*
- * fast path falls through. We have to clear cpus_runnable before
- * checking prev->state to avoid a wakeup race. Protect against
- * the task exiting early.
- */
- task_lock(prev);
- task_release_cpu(prev);
- mb();
- if (prev->state == TASK_RUNNING)
- goto needs_resched;
+ if (!mm) {
+ next->active_mm = oldmm;
+ atomic_inc(&oldmm->mm_count);
+ enter_lazy_tlb(oldmm, next, smp_processor_id());
+ } else
+ switch_mm(oldmm, mm, next, smp_processor_id());
-out_unlock:
- task_unlock(prev); /* Synchronise here with release_task() if prev is TASK_ZOMBIE */
- return;
+ if (!prev->mm) {
+ prev->active_mm = NULL;
+ mmdrop(oldmm);
+ }
/*
- * Slow path - we 'push' the previous process and
- * reschedule_idle() will attempt to find a new
- * processor for it. (but it might preempt the
- * current process as well.) We must take the runqueue
- * lock and re-check prev->state to be correct. It might
- * still happen that this process has a preemption
- * 'in progress' already - but this is not a problem and
- * might happen in other circumstances as well.
+ * Here we just switch the register state and the stack. There are
+ * 3 processes affected by a context switch:
+ *
+ * prev ==> .... ==> (last => next)
+ *
+ * It's the 'much more previous' 'prev' that is on next's stack,
+ * but prev is set to (the just run) 'last' process by switch_to().
+ * This might sound slightly confusing but makes tons of sense.
*/
-needs_resched:
- {
- unsigned long flags;
+ switch_to(prev, next, prev);
+}
- /*
- * Avoid taking the runqueue lock in cases where
- * no preemption-check is necessery:
- */
- if ((prev == idle_task(smp_processor_id())) ||
- (policy & SCHED_YIELD))
- goto out_unlock;
+unsigned long nr_running(void)
+{
+ unsigned long i, sum = 0;
- spin_lock_irqsave(&runqueue_lock, flags);
- if ((prev->state == TASK_RUNNING) && !task_has_cpu(prev))
- reschedule_idle(prev);
- spin_unlock_irqrestore(&runqueue_lock, flags);
- goto out_unlock;
- }
-#else
- prev->policy &= ~SCHED_YIELD;
-#endif /* CONFIG_SMP */
+ for (i = 0; i < smp_num_cpus; i++)
+ sum += cpu_rq(i)->nr_running;
+
+ return sum;
}
-asmlinkage void schedule_tail(struct task_struct *prev)
+unsigned long nr_context_switches(void)
{
- __schedule_tail(prev);
+ unsigned long i, sum = 0;
+
+ for (i = 0; i < smp_num_cpus; i++)
+ sum += cpu_rq(i)->nr_switches;
+
+ return sum;
}
-void expire_task(struct task_struct *p)
+static inline unsigned long max_rq_len(void)
{
- if (unlikely(!p->time_slice))
- goto need_resched;
+ unsigned long i, curr, max = 0;
- if (!--p->time_slice) {
- if (p->dyn_prio)
- p->dyn_prio--;
-need_resched:
- p->need_resched = 1;
+ for (i = 0; i < smp_num_cpus; i++) {
+ curr = cpu_rq(i)->nr_running;
+ if (curr > max)
+ max = curr;
}
+ return max;
}
/*
- * 'schedule()' is the scheduler function. It's a very simple and nice
- * scheduler: it's not perfect, but certainly works for most things.
- *
- * The goto is "interesting".
+ * Current runqueue is empty, try to find work on
+ * other runqueues.
*
- * NOTE!! Task 0 is the 'idle' task, which gets called when no other
- * tasks can run. It can not be killed, and it cannot sleep. The 'state'
- * information in task[0] is never used.
+ * We call this with the current runqueue locked,
+ * irqs disabled.
*/
-asmlinkage void schedule(void)
+static void load_balance(runqueue_t *this_rq)
{
- struct schedule_data * sched_data;
- struct task_struct *prev, *next, *p;
- struct list_head *tmp;
- int this_cpu, c;
-
-
- spin_lock_prefetch(&runqueue_lock);
-
- if (!current->active_mm) BUG();
-need_resched_back:
- prev = current;
- this_cpu = prev->processor;
-
- if (unlikely(in_interrupt())) {
- printk("Scheduling in interrupt\n");
- BUG();
- }
-
- release_kernel_lock(prev, this_cpu);
-
+ int nr_tasks, load, prev_max_load, max_load, idx, i;
+ task_t *next = this_rq->idle, *tmp;
+ runqueue_t *busiest, *rq_tmp;
+ prio_array_t *array;
+ list_t *head, *curr;
+
+ prev_max_load = max_rq_len();
+ nr_tasks = prev_max_load - this_rq->nr_running;
/*
- * 'sched_data' is protected by the fact that we can run
- * only one process per CPU.
+ * It needs an at least ~10% imbalance to trigger balancing:
*/
- sched_data = & aligned_data[this_cpu].schedule_data;
-
- spin_lock_irq(&runqueue_lock);
-
- /* move an exhausted RR process to be last.. */
- if (unlikely(prev->policy == SCHED_RR))
- if (!prev->time_slice) {
- prev->time_slice = TASK_TIMESLICE(prev);
- move_last_runqueue(prev);
- }
+ if (nr_tasks <= 1 + prev_max_load/8)
+ return;
+ prev_max_load++;
- switch (prev->state) {
- case TASK_INTERRUPTIBLE:
- if (signal_pending(prev)) {
- prev->state = TASK_RUNNING;
- break;
+repeat_search:
+ /*
+ * We search all runqueues to find the most busy one.
+ * We do this lockless to reduce cache-bouncing overhead,
+ * we re-check the source CPU with the lock held.
+ */
+ busiest = NULL;
+ max_load = 0;
+ for (i = 0; i < smp_num_cpus; i++) {
+ rq_tmp = cpu_rq(i);
+ load = rq_tmp->nr_running;
+ if ((load > max_load) && (load < prev_max_load) &&
+ (rq_tmp != this_rq)) {
+ busiest = rq_tmp;
+ max_load = load;
}
- default:
- del_from_runqueue(prev);
- case TASK_RUNNING:;
}
- prev->need_resched = 0;
+
+ if (likely(!busiest))
+ return;
+ if (max_load <= this_rq->nr_running)
+ return;
+ prev_max_load = max_load;
+ if (busiest->cpu < this_rq->cpu) {
+ spin_unlock(&this_rq->lock);
+ spin_lock(&busiest->lock);
+ spin_lock(&this_rq->lock);
+ } else
+ spin_lock(&busiest->lock);
+ if (busiest->nr_running <= this_rq->nr_running + 1)
+ goto out_unlock;
/*
- * this is the scheduler proper:
+ * We first consider expired tasks. Those will likely not run
+ * in the near future, thus switching CPUs has the least effect
+ * on them.
*/
+ if (busiest->expired->nr_active)
+ array = busiest->expired;
+ else
+ array = busiest->active;
-repeat_schedule:
+new_array:
/*
- * Default process to select..
+ * Load-balancing does not affect RT tasks, so we start the
+ * searching at priority 128.
*/
- next = idle_task(this_cpu);
- c = -1000;
- list_for_each(tmp, &runqueue_head) {
- p = list_entry(tmp, struct task_struct, run_list);
- if (can_schedule(p, this_cpu)) {
- int weight = goodness(p, this_cpu, prev->active_mm);
- if (weight > c)
- c = weight, next = p;
+ idx = MAX_RT_PRIO;
+skip_bitmap:
+ idx = find_next_zero_bit(array->bitmap, MAX_PRIO, idx);
+ if (idx == MAX_PRIO) {
+ if (array == busiest->expired) {
+ array = busiest->active;
+ goto new_array;
}
+ spin_unlock(&busiest->lock);
+ goto repeat_search;
}
- /* Do we need to re-calculate counters? */
- if (unlikely(!c)) {
- rcl_curr++;
- list_for_each(tmp, &runqueue_head) {
- p = list_entry(tmp, struct task_struct, run_list);
- p->time_slice = TASK_TIMESLICE(p);
- p->rcl_last = rcl_curr;
- }
- goto repeat_schedule;
+ head = array->queue + idx;
+ curr = head->next;
+skip_queue:
+ tmp = list_entry(curr, task_t, run_list);
+ if ((tmp == busiest->curr) || !(tmp->cpus_allowed & (1 << smp_processor_id()))) {
+ curr = curr->next;
+ if (curr != head)
+ goto skip_queue;
+ idx++;
+ goto skip_bitmap;
}
-
+ next = tmp;
/*
- * from this point on nothing can prevent us from
- * switching to the next task, save this fact in
- * sched_data.
+ * take the task out of the other runqueue and
+ * put it into this one:
*/
- sched_data->curr = next;
- task_set_cpu(next, this_cpu);
- spin_unlock_irq(&runqueue_lock);
-
- if (unlikely(prev == next)) {
- /* We won't go through the normal tail, so do this by hand */
- prev->policy &= ~SCHED_YIELD;
- goto same_process;
+ dequeue_task(next, array);
+ busiest->nr_running--;
+ next->cpu = smp_processor_id();
+ this_rq->nr_running++;
+ enqueue_task(next, this_rq->active);
+ if (next->prio < current->prio)
+ current->need_resched = 1;
+ if (--nr_tasks) {
+ if (array == busiest->expired) {
+ array = busiest->active;
+ goto new_array;
+ }
+ spin_unlock(&busiest->lock);
+ goto repeat_search;
}
+out_unlock:
+ spin_unlock(&busiest->lock);
+}
-#ifdef CONFIG_SMP
- /*
- * maintain the per-process 'last schedule' value.
- * (this has to be recalculated even if we reschedule to
- * the same process) Currently this is only used on SMP,
- * and it's approximate, so we do not have to maintain
- * it while holding the runqueue spinlock.
- */
- sched_data->last_schedule = get_cycles();
+#define REBALANCE_TICK (HZ/100)
- /*
- * We drop the scheduler lock early (it's a global spinlock),
- * thus we have to lock the previous process from getting
- * rescheduled during switch_to().
- */
+void idle_tick(void)
+{
+ unsigned long flags;
-#endif /* CONFIG_SMP */
+ if (!(jiffies % REBALANCE_TICK) && likely(this_rq()->curr != NULL)) {
+ spin_lock_irqsave(&this_rq()->lock, flags);
+ load_balance(this_rq());
+ spin_unlock_irqrestore(&this_rq()->lock, flags);
+ }
+}
- kstat.context_swtch++;
+void expire_task(task_t *p)
+{
+ runqueue_t *rq = this_rq();
+ unsigned long flags;
+
+ if (p->array != rq->active) {
+ p->need_resched = 1;
+ return;
+ }
/*
- * there are 3 processes which are affected by a context switch:
- *
- * prev == .... ==> (last => next)
- *
- * It's the 'much more previous' 'prev' that is on next's stack,
- * but prev is set to (the just run) 'last' process by switch_to().
- * This might sound slightly confusing but makes tons of sense.
+ * The task cannot change CPUs because it's the current task.
*/
- prepare_to_switch();
- {
- struct mm_struct *mm = next->mm;
- struct mm_struct *oldmm = prev->active_mm;
- if (!mm) {
- if (next->active_mm) BUG();
- next->active_mm = oldmm;
- atomic_inc(&oldmm->mm_count);
- enter_lazy_tlb(oldmm, next, this_cpu);
- } else {
- if (next->active_mm != mm) BUG();
- switch_mm(oldmm, mm, next, this_cpu);
- }
+ spin_lock_irqsave(&rq->lock, flags);
+ if ((p->policy != SCHED_FIFO) && !--p->time_slice) {
+ p->need_resched = 1;
+ if (rt_task(p))
+ p->time_slice = RT_PRIO_TO_TIMESLICE(p->prio);
+ else
+ p->time_slice = PRIO_TO_TIMESLICE(p->prio);
- if (!prev->mm) {
- prev->active_mm = NULL;
- mmdrop(oldmm);
+ /*
+ * Timeslice used up - discard any possible
+ * priority penalty:
+ */
+ dequeue_task(p, rq->active);
+ /*
+ * Tasks that have nice values of -20 ... -15 are put
+ * back into the active array. If they use up too much
+ * CPU time then they'll get a priority penalty anyway
+ * so this can not starve other processes accidentally.
+ * Otherwise this is pretty handy for sysadmins ...
+ */
+ if (p->prio <= MAX_RT_PRIO + MAX_USER_PRIO/6)
+ enqueue_task(p, rq->active);
+ else
+ enqueue_task(p, rq->expired);
+ } else {
+ /*
+ * Deactivate + activate the task so that the
+ * load estimator gets updated properly:
+ */
+ if (!rt_task(p)) {
+ deactivate_task(p, rq);
+ activate_task(p, rq);
}
}
+ load_balance(rq);
+ spin_unlock_irqrestore(&rq->lock, flags);
+}
- /*
- * This just switches the register state and the
- * stack.
- */
- switch_to(prev, next, prev);
- __schedule_tail(prev);
+void scheduling_functions_start_here(void) { }
+
+/*
+ * 'schedule()' is the main scheduler function.
+ */
+asmlinkage void schedule(void)
+{
+ task_t *prev, *next;
+ prio_array_t *array;
+ runqueue_t *rq;
+ list_t *queue;
+ int idx;
+
+ if (unlikely(in_interrupt()))
+ BUG();
+need_resched_back:
+ prev = current;
+ release_kernel_lock(prev, smp_processor_id());
+ rq = this_rq();
+ spin_lock_irq(&rq->lock);
+
+ switch (prev->state) {
+ case TASK_INTERRUPTIBLE:
+ if (unlikely(signal_pending(prev))) {
+ prev->state = TASK_RUNNING;
+ break;
+ }
+ default:
+ deactivate_task(prev, rq);
+ case TASK_RUNNING:
+ }
+pick_next_task:
+ if (unlikely(!rq->nr_running)) {
+ load_balance(rq);
+ if (rq->nr_running)
+ goto pick_next_task;
+ next = rq->idle;
+ goto switch_tasks;
+ }
+
+ array = rq->active;
+ if (unlikely(!array->nr_active)) {
+ /*
+ * Switch the active and expired arrays.
+ */
+ rq->active = rq->expired;
+ rq->expired = array;
+ array = rq->active;
+ }
+
+ idx = sched_find_first_zero_bit(array->bitmap);
+ queue = array->queue + idx;
+ next = list_entry(queue->next, task_t, run_list);
+
+switch_tasks:
+ prev->need_resched = 0;
+
+ if (likely(prev != next)) {
+ rq->nr_switches++;
+ rq->curr = next;
+ next->cpu = prev->cpu;
+ context_switch(prev, next);
+ /*
+ * The runqueue pointer might be from another CPU
+ * if the new task was last running on a different
+ * CPU - thus re-load it.
+ */
+ barrier();
+ rq = this_rq();
+ }
+ spin_unlock_irq(&rq->lock);
-same_process:
reacquire_kernel_lock(current);
- if (current->need_resched)
+ if (unlikely(current->need_resched))
goto need_resched_back;
return;
}
/*
- * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just wake everything
- * up. If it's an exclusive wakeup (nr_exclusive == small +ve number) then we wake all the
- * non-exclusive tasks and one exclusive task.
+ * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
+ * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
+ * number) then we wake all the non-exclusive tasks and one exclusive task.
*
* There are circumstances in which we can try to wake a task which has already
- * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns zero
- * in this (rare) case, and we handle it by contonuing to scan the queue.
+ * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
+ * zero in this (rare) case, and we handle it by continuing to scan the queue.
*/
static inline void __wake_up_common (wait_queue_head_t *q, unsigned int mode,
int nr_exclusive, const int sync)
{
struct list_head *tmp;
- struct task_struct *p;
+ task_t *p;
list_for_each(tmp,&q->task_list) {
unsigned int state;
@@ -853,8 +882,93 @@ long sleep_on_timeout(wait_queue_head_t *q, long timeout)
return timeout;
}
+/*
+ * Change the current task's CPU affinity. Migrate the process to a
+ * proper CPU and schedule away if the current CPU is removed from
+ * the allowed bitmask.
+ */
+void set_cpus_allowed(task_t *p, unsigned long new_mask)
+{
+ runqueue_t *this_rq = this_rq(), *target_rq;
+ unsigned long this_mask = 1UL << smp_processor_id();
+ int target_cpu;
+
+ new_mask &= cpu_online_map;
+ p->cpus_allowed = new_mask;
+ /*
+ * Can the task run on the current CPU? If not then
+ * migrate the process off to a proper CPU.
+ */
+ if (new_mask & this_mask)
+ return;
+ target_cpu = ffz(~new_mask);
+ target_rq = cpu_rq(target_cpu);
+ if (target_cpu < smp_processor_id()) {
+ spin_lock_irq(&target_rq->lock);
+ spin_lock(&this_rq->lock);
+ } else {
+ spin_lock_irq(&this_rq->lock);
+ spin_lock(&target_rq->lock);
+ }
+ dequeue_task(p, p->array);
+ this_rq->nr_running--;
+ target_rq->nr_running++;
+ enqueue_task(p, target_rq->active);
+ target_rq->curr->need_resched = 1;
+ spin_unlock(&target_rq->lock);
+
+ /*
+ * The easiest solution is to context switch into
+ * the idle thread - which will pick the best task
+ * afterwards:
+ */
+ this_rq->nr_switches++;
+ this_rq->curr = this_rq->idle;
+ this_rq->idle->need_resched = 1;
+ context_switch(current, this_rq->idle);
+ barrier();
+ spin_unlock_irq(&this_rq()->lock);
+}
+
void scheduling_functions_end_here(void) { }
+void set_user_nice(task_t *p, long nice)
+{
+ unsigned long flags;
+ prio_array_t *array;
+ runqueue_t *rq;
+
+ if (p->__nice == nice)
+ return;
+ /*
+ * We have to be careful, if called from sys_setpriority(),
+ * the task might be in the middle of scheduling on another CPU.
+ */
+ lock_task_rq(rq, p, flags);
+ if (rt_task(p)) {
+ p->__nice = nice;
+ goto out_unlock;
+ }
+ array = p->array;
+ if (array) {
+ dequeue_task(p, array);
+ }
+ p->__nice = nice;
+ p->prio = NICE_TO_PRIO(nice);
+ if (array) {
+ enqueue_task(p, array);
+ /*
+ * If the task is runnable and lowered its priority,
+ * or increased its priority then reschedule its CPU:
+ */
+ if ((nice < p->__nice) ||
+ ((p->__nice < nice) && (p == rq->curr)))
+ resched_task(rq->curr);
+ }
+out_unlock:
+ unlock_task_rq(rq, p, flags);
+}
+
#ifndef __alpha__
/*
@@ -865,7 +979,7 @@ void scheduling_functions_end_here(void) { }
asmlinkage long sys_nice(int increment)
{
- long newprio;
+ long nice;
/*
* Setpriority might change our priority at the same moment.
@@ -881,28 +995,30 @@ asmlinkage long sys_nice(int increment)
if (increment > 40)
increment = 40;
- newprio = current->nice + increment;
- if (newprio < -20)
- newprio = -20;
- if (newprio > 19)
- newprio = 19;
- current->nice = newprio;
+ nice = current->__nice + increment;
+ if (nice < -20)
+ nice = -20;
+ if (nice > 19)
+ nice = 19;
+ set_user_nice(current, nice);
return 0;
}
#endif
-static inline struct task_struct *find_process_by_pid(pid_t pid)
+static inline task_t *find_process_by_pid(pid_t pid)
{
return pid ? find_task_by_pid(pid) : current;
}
-static int setscheduler(pid_t pid, int policy,
- struct sched_param *param)
+static int setscheduler(pid_t pid, int policy, struct sched_param *param)
{
struct sched_param lp;
- struct task_struct *p;
+ prio_array_t *array;
+ unsigned long flags;
+ runqueue_t *rq;
int retval;
+ task_t *p;
retval = -EINVAL;
if (!param || pid < 0)
@@ -916,14 +1032,19 @@ static int setscheduler(pid_t pid, int policy,
* We play safe to avoid deadlocks.
*/
read_lock_irq(&tasklist_lock);
- spin_lock(&runqueue_lock);
p = find_process_by_pid(pid);
retval = -ESRCH;
if (!p)
- goto out_unlock;
-
+ goto out_unlock_tasklist;
+
+ /*
+ * To be able to change p->policy safely, the apropriate
+ * runqueue lock must be held.
+ */
+ lock_task_rq(rq,p,flags);
+
if (policy < 0)
policy = p->policy;
else {
@@ -944,30 +1065,36 @@ static int setscheduler(pid_t pid, int policy,
goto out_unlock;
retval = -EPERM;
- if ((policy == SCHED_FIFO || policy == SCHED_RR) &&
+ if ((policy == SCHED_FIFO || policy == SCHED_RR) &&
!capable(CAP_SYS_NICE))
goto out_unlock;
if ((current->euid != p->euid) && (current->euid != p->uid) &&
!capable(CAP_SYS_NICE))
goto out_unlock;
+ array = p->array;
+ if (array)
+ deactivate_task(p, task_rq(p));
retval = 0;
p->policy = policy;
p->rt_priority = lp.sched_priority;
- if (task_on_runqueue(p))
- move_first_runqueue(p);
-
- current->need_resched = 1;
+ if (rt_task(p))
+ p->prio = 99-p->rt_priority;
+ else
+ p->prio = NICE_TO_PRIO(p->__nice);
+ if (array)
+ activate_task(p, task_rq(p));
out_unlock:
- spin_unlock(&runqueue_lock);
+ unlock_task_rq(rq,p,flags);
+out_unlock_tasklist:
read_unlock_irq(&tasklist_lock);
out_nounlock:
return retval;
}
-asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
+asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
struct sched_param *param)
{
return setscheduler(pid, policy, param);
@@ -980,7 +1107,7 @@ asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param *param)
asmlinkage long sys_sched_getscheduler(pid_t pid)
{
- struct task_struct *p;
+ task_t *p;
int retval;
retval = -EINVAL;
@@ -991,7 +1118,7 @@ asmlinkage long sys_sched_getscheduler(pid_t pid)
read_lock(&tasklist_lock);
p = find_process_by_pid(pid);
if (p)
- retval = p->policy & ~SCHED_YIELD;
+ retval = p->policy;
read_unlock(&tasklist_lock);
out_nounlock:
@@ -1000,7 +1127,7 @@ out_nounlock:
asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param *param)
{
- struct task_struct *p;
+ task_t *p;
struct sched_param lp;
int retval;
@@ -1031,42 +1158,28 @@ out_unlock:
asmlinkage long sys_sched_yield(void)
{
+ runqueue_t *rq = this_rq();
+ prio_array_t *array;
+
/*
- * Trick. sched_yield() first counts the number of truly
- * 'pending' runnable processes, then returns if it's
- * only the current processes. (This test does not have
- * to be atomic.) In threaded applications this optimization
- * gets triggered quite often.
+ * Decrease the yielding task's priority by one, to avoid
+ * livelocks. This priority loss is temporary, it's recovered
+ * once the current timeslice expires.
+ *
+ * If priority is already MAX_PRIO-1 then we still
+ * roundrobin the task within the runlist.
*/
+ spin_lock_irq(&rq->lock);
+ array = current->array;
+ dequeue_task(current, array);
+ if (likely(!rt_task(current)))
+ if (current->prio < MAX_PRIO-1)
+ current->prio++;
+ enqueue_task(current, array);
+ spin_unlock_irq(&rq->lock);
- int nr_pending = nr_running;
-
-#if CONFIG_SMP
- int i;
-
- // Subtract non-idle processes running on other CPUs.
- for (i = 0; i < smp_num_cpus; i++) {
- int cpu = cpu_logical_map(i);
- if (aligned_data[cpu].schedule_data.curr != idle_task(cpu))
- nr_pending--;
- }
-#else
- // on UP this process is on the runqueue as well
- nr_pending--;
-#endif
- if (nr_pending) {
- /*
- * This process can only be rescheduled by us,
- * so this is safe without any locking.
- */
- if (current->policy == SCHED_OTHER)
- current->policy |= SCHED_YIELD;
- current->need_resched = 1;
+ schedule();
- current->time_slice = 0;
- if (++current->dyn_prio > MAX_DYNPRIO)
- current->dyn_prio = MAX_DYNPRIO;
- }
return 0;
}
@@ -1104,7 +1217,7 @@ asmlinkage long sys_sched_get_priority_min(int policy)
asmlinkage long sys_sched_rr_get_interval(pid_t pid, struct timespec *interval)
{
struct timespec t;
- struct task_struct *p;
+ task_t *p;
int retval = -EINVAL;
if (pid < 0)
@@ -1114,8 +1227,8 @@ asmlinkage long sys_sched_rr_get_interval(pid_t pid, struct timespec *interval)
read_lock(&tasklist_lock);
p = find_process_by_pid(pid);
if (p)
- jiffies_to_timespec(p->policy & SCHED_FIFO ? 0 : TASK_TIMESLICE(p),
- &t);
+ jiffies_to_timespec(p->policy & SCHED_FIFO ?
+ 0 : RT_PRIO_TO_TIMESLICE(p->prio), &t);
read_unlock(&tasklist_lock);
if (p)
retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
@@ -1123,7 +1236,7 @@ out_nounlock:
return retval;
}
-static void show_task(struct task_struct * p)
+static void show_task(task_t * p)
{
unsigned long free = 0;
int state;
@@ -1171,7 +1284,7 @@ static void show_task(struct task_struct * p)
printk(" (NOTLB)\n");
{
- extern void show_trace_task(struct task_struct *tsk);
+ extern void show_trace_task(task_t *tsk);
show_trace_task(p);
}
}
@@ -1193,7 +1306,7 @@ char * render_sigset_t(sigset_t *set, char *buffer)
void show_state(void)
{
- struct task_struct *p;
+ task_t *p;
#if (BITS_PER_LONG == 32)
printk("\n"
@@ -1216,134 +1329,98 @@ void show_state(void)
read_unlock(&tasklist_lock);
}
-/**
- * reparent_to_init() - Reparent the calling kernel thread to the init task.
- *
- * If a kernel thread is launched as a result of a system call, or if
- * it ever exits, it should generally reparent itself to init so that
- * it is correctly cleaned up on exit.
- *
- * The various task state such as scheduling policy and priority may have
- * been inherited fro a user process, so we reset them to sane values here.
- *
- * NOTE that reparent_to_init() gives the caller full capabilities.
- */
-void reparent_to_init(void)
+extern unsigned long wait_init_idle;
+
+static inline void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2)
{
- write_lock_irq(&tasklist_lock);
-
- /* Reparent to init */
- REMOVE_LINKS(current);
- current->p_pptr = child_reaper;
- current->p_opptr = child_reaper;
- SET_LINKS(current);
-
- /* Set the exit signal to SIGCHLD so we signal init on exit */
- current->exit_signal = SIGCHLD;
-
- /* We also take the runqueue_lock while altering task fields
- * which affect scheduling decisions */
- spin_lock(&runqueue_lock);
-
- current->ptrace = 0;
- current->nice = DEF_NICE;
- current->policy = SCHED_OTHER;
- /* cpus_allowed? */
- /* rt_priority? */
- /* signals? */
- current->cap_effective = CAP_INIT_EFF_SET;
- current->cap_inheritable = CAP_INIT_INH_SET;
- current->cap_permitted = CAP_FULL_SET;
- current->keep_capabilities = 0;
- memcpy(current->rlim, init_task.rlim, sizeof(*(current->rlim)));
- current->user = INIT_USER;
-
- spin_unlock(&runqueue_lock);
- write_unlock_irq(&tasklist_lock);
+ if (rq1 == rq2)
+ spin_lock(&rq1->lock);
+ else {
+ if (rq1->cpu < rq2->cpu) {
+ spin_lock(&rq1->lock);
+ spin_lock(&rq2->lock);
+ } else {
+ spin_lock(&rq2->lock);
+ spin_lock(&rq1->lock);
+ }
+ }
}
-/*
- * Put all the gunge required to become a kernel thread without
- * attached user resources in one place where it belongs.
- */
-
-void daemonize(void)
+static inline void double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2)
{
- struct fs_struct *fs;
-
-
- /*
- * If we were started as result of loading a module, close all of the
- * user space pages. We don't need them, and if we didn't close them
- * they would be locked into memory.
- */
- exit_mm(current);
-
- current->session = 1;
- current->pgrp = 1;
- current->tty = NULL;
-
- /* Become as one with the init task */
-
- exit_fs(current); /* current->fs->count--; */
- fs = init_task.fs;
- current->fs = fs;
- atomic_inc(&fs->count);
- exit_files(current);
- current->files = init_task.files;
- atomic_inc(&current->files->count);
+ spin_unlock(&rq1->lock);
+ if (rq1 != rq2)
+ spin_unlock(&rq2->lock);
}
-extern unsigned long wait_init_idle;
-
void __init init_idle(void)
{
- struct schedule_data * sched_data;
- sched_data = &aligned_data[smp_processor_id()].schedule_data;
+ runqueue_t *this_rq = this_rq(), *rq = current->array->rq;
+ unsigned long flags;
- if (current != &init_task && task_on_runqueue(current)) {
- printk("UGH! (%d:%d) was on the runqueue, removing.\n",
- smp_processor_id(), current->pid);
- del_from_runqueue(current);
+ __save_flags(flags);
+ __cli();
+ double_rq_lock(this_rq, rq);
+
+ this_rq->curr = this_rq->idle = current;
+ deactivate_task(current, rq);
+ current->array = NULL;
+ current->prio = MAX_PRIO;
+ current->state = TASK_RUNNING;
+ clear_bit(smp_processor_id(), &wait_init_idle);
+ double_rq_unlock(this_rq, rq);
+ while (wait_init_idle) {
+ cpu_relax();
+ barrier();
}
- current->dyn_prio = 0;
- sched_data->curr = current;
- sched_data->last_schedule = get_cycles();
- clear_bit(current->processor, &wait_init_idle);
+ current->need_resched = 1;
+ __sti();
}
-extern void init_timervecs (void);
-
-static void fill_tslice_map(void)
-{
- int i;
-
- for (i = 0; i < NICE_RANGE; i++) {
- ts_table[i] = ((MIN_NICE_TSLICE +
- ((MAX_NICE_TSLICE -
- MIN_NICE_TSLICE) / (NICE_RANGE - 1)) * i) * HZ) / 1000000;
- if (!ts_table[i]) ts_table[i] = 1;
- }
-}
+extern void init_timervecs(void);
+extern void timer_bh(void);
+extern void tqueue_bh(void);
+extern void immediate_bh(void);
void __init sched_init(void)
{
+ runqueue_t *rq;
+ int i, j, k;
+
+ for (i = 0; i < NR_CPUS; i++) {
+ runqueue_t *rq = cpu_rq(i);
+ prio_array_t *array;
+
+ rq->active = rq->arrays + 0;
+ rq->expired = rq->arrays + 1;
+ spin_lock_init(&rq->lock);
+ rq->cpu = i;
+
+ for (j = 0; j < 2; j++) {
+ array = rq->arrays + j;
+ array->rq = rq;
+ array->lock = &rq->lock;
+ for (k = 0; k < MAX_PRIO; k++) {
+ INIT_LIST_HEAD(array->queue + k);
+ __set_bit(k, array->bitmap);
+ }
+ // zero delimiter for bitsearch
+ __clear_bit(MAX_PRIO, array->bitmap);
+ }
+ }
/*
* We have to do a little magic to get the first
* process right in SMP mode.
*/
- int cpu = smp_processor_id();
- int nr;
-
- init_task.processor = cpu;
+ rq = this_rq();
+ rq->curr = current;
+ rq->idle = NULL;
+ wake_up_process(current);
- for(nr = 0; nr < PIDHASH_SZ; nr++)
- pidhash[nr] = NULL;
-
- fill_tslice_map();
+ for (i = 0; i < PIDHASH_SZ; i++)
+ pidhash[i] = NULL;
init_timervecs();
-
init_bh(TIMER_BH, timer_bh);
init_bh(TQUEUE_BH, tqueue_bh);
init_bh(IMMEDIATE_BH, immediate_bh);
@@ -1352,5 +1429,5 @@ void __init sched_init(void)
* The boot idle thread does lazy MMU switching as well:
*/
atomic_inc(&init_mm.mm_count);
- enter_lazy_tlb(&init_mm, current, cpu);
+ enter_lazy_tlb(&init_mm, current, smp_processor_id());
}
diff --git a/kernel/signal.c b/kernel/signal.c
index 44acecd85..b5b866aa3 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -478,12 +478,9 @@ static inline void signal_wake_up(struct task_struct *t)
* process of changing - but no harm is done by that
* other than doing an extra (lightweight) IPI interrupt.
*/
- spin_lock(&runqueue_lock);
- if (task_has_cpu(t) && t->processor != smp_processor_id())
- smp_send_reschedule(t->processor);
- spin_unlock(&runqueue_lock);
-#endif /* CONFIG_SMP */
-
+ if ((t->state == TASK_RUNNING) && (t->cpu != smp_processor_id()))
+ kick_if_running(t);
+#endif
if (t->state & TASK_INTERRUPTIBLE) {
wake_up_process(t);
return;
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 97b8eb8cc..4d8bb7b97 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -259,10 +259,9 @@ void tasklet_kill(struct tasklet_struct *t)
while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
current->state = TASK_RUNNING;
- do {
- current->policy |= SCHED_YIELD;
- schedule();
- } while (test_bit(TASKLET_STATE_SCHED, &t->state));
+ do
+ yield();
+ while (test_bit(TASKLET_STATE_SCHED, &t->state));
}
tasklet_unlock_wait(t);
clear_bit(TASKLET_STATE_SCHED, &t->state);
@@ -365,13 +364,13 @@ static int ksoftirqd(void * __bind_cpu)
int cpu = cpu_logical_map(bind_cpu);
daemonize();
- current->nice = 19;
+ set_user_nice(current, 19);
sigfillset(&current->blocked);
/* Migrate to the right CPU */
- current->cpus_allowed = 1UL << cpu;
- while (smp_processor_id() != cpu)
- schedule();
+ set_cpus_allowed(current, 1UL << cpu);
+ if (smp_processor_id() != cpu)
+ BUG();
sprintf(current->comm, "ksoftirqd_CPU%d", bind_cpu);
@@ -400,18 +399,13 @@ static __init int spawn_ksoftirqd(void)
{
int cpu;
- for (cpu = 0; cpu < smp_num_cpus; cpu++) {
+ for (cpu = 0; cpu < smp_num_cpus; cpu++)
if (kernel_thread(ksoftirqd, (void *) (long) cpu,
CLONE_FS | CLONE_FILES | CLONE_SIGNAL) < 0)
printk("spawn_ksoftirqd() failed for cpu %d\n", cpu);
- else {
- while (!ksoftirqd_task(cpu_logical_map(cpu))) {
- current->policy |= SCHED_YIELD;
- schedule();
- }
- }
- }
-
+ else
+ while (!ksoftirqd_task(cpu_logical_map(cpu)))
+ yield();
return 0;
}
diff --git a/kernel/sys.c b/kernel/sys.c
index 765761a4e..21c21ca8b 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -220,10 +220,10 @@ asmlinkage long sys_setpriority(int which, int who, int niceval)
}
if (error == -ESRCH)
error = 0;
- if (niceval < p->nice && !capable(CAP_SYS_NICE))
+ if (niceval < p->__nice && !capable(CAP_SYS_NICE))
error = -EACCES;
else
- p->nice = niceval;
+ set_user_nice(p, niceval);
}
read_unlock(&tasklist_lock);
@@ -249,7 +249,7 @@ asmlinkage long sys_getpriority(int which, int who)
long niceval;
if (!proc_sel(p, which, who))
continue;
- niceval = 20 - p->nice;
+ niceval = 20 - p->__nice;
if (niceval > retval)
retval = niceval;
}
diff --git a/kernel/timer.c b/kernel/timer.c
index c56ec37ea..ce3945cda 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -25,6 +25,8 @@
#include <asm/uaccess.h>
+struct kernel_stat kstat;
+
/*
* Timekeeping variables
*/
@@ -584,13 +586,16 @@ void update_process_times(int user_tick)
update_one_process(p, user_tick, system, cpu);
if (p->pid) {
expire_task(p);
- if (p->nice > 0)
+ if (p->__nice > 0)
kstat.per_cpu_nice[cpu] += user_tick;
else
kstat.per_cpu_user[cpu] += user_tick;
kstat.per_cpu_system[cpu] += system;
- } else if (local_bh_count(cpu) || local_irq_count(cpu) > 1)
- kstat.per_cpu_system[cpu] += system;
+ } else {
+ idle_tick();
+ if (local_bh_count(cpu) || local_irq_count(cpu) > 1)
+ kstat.per_cpu_system[cpu] += system;
+ }
}
/*
@@ -791,6 +796,89 @@ asmlinkage long sys_getegid(void)
#endif
+static void process_timeout(unsigned long __data)
+{
+ wake_up_process((task_t *)__data);
+}
+
+/**
+ * schedule_timeout - sleep until timeout
+ * @timeout: timeout value in jiffies
+ *
+ * Make the current task sleep until @timeout jiffies have
+ * elapsed. The routine will return immediately unless
+ * the current task state has been set (see set_current_state()).
+ *
+ * You can set the task state as follows -
+ *
+ * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
+ * pass before the routine returns. The routine will return 0
+ *
+ * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
+ * delivered to the current task. In this case the remaining time
+ * in jiffies will be returned, or 0 if the timer expired in time
+ *
+ * The current task state is guaranteed to be TASK_RUNNING when this
+ * routine returns.
+ *
+ * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
+ * the CPU away without a bound on the timeout. In this case the return
+ * value will be %MAX_SCHEDULE_TIMEOUT.
+ *
+ * In all cases the return value is guaranteed to be non-negative.
+ */
+signed long schedule_timeout(signed long timeout)
+{
+ struct timer_list timer;
+ unsigned long expire;
+
+ switch (timeout)
+ {
+ case MAX_SCHEDULE_TIMEOUT:
+ /*
+ * These two special cases are useful to be comfortable
+ * in the caller. Nothing more. We could take
+ * MAX_SCHEDULE_TIMEOUT from one of the negative value
+ * but I' d like to return a valid offset (>=0) to allow
+ * the caller to do everything it want with the retval.
+ */
+ schedule();
+ goto out;
+ default:
+ /*
+ * Another bit of PARANOID. Note that the retval will be
+ * 0 since no piece of kernel is supposed to do a check
+ * for a negative retval of schedule_timeout() (since it
+ * should never happens anyway). You just have the printk()
+ * that will tell you if something is gone wrong and where.
+ */
+ if (timeout < 0)
+ {
+ printk(KERN_ERR "schedule_timeout: wrong timeout "
+ "value %lx from %p\n", timeout,
+ __builtin_return_address(0));
+ current->state = TASK_RUNNING;
+ goto out;
+ }
+ }
+
+ expire = timeout + jiffies;
+
+ init_timer(&timer);
+ timer.expires = expire;
+ timer.data = (unsigned long) current;
+ timer.function = process_timeout;
+
+ add_timer(&timer);
+ schedule();
+ del_timer_sync(&timer);
+
+ timeout = expire - jiffies;
+
+ out:
+ return timeout < 0 ? 0 : timeout;
+}
+
/* Thread ID - the internal kernel "pid" */
asmlinkage long sys_gettid(void)
{
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 25a0a67e0..1996d935c 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -82,7 +82,7 @@ static int badness(struct task_struct *p)
* Niced processes are most likely less important, so double
* their badness points.
*/
- if (p->nice > 0)
+ if (p->__nice > 0)
points *= 2;
/*
@@ -149,8 +149,7 @@ void oom_kill_task(struct task_struct *p)
* all the memory it needs. That way it should be able to
* exit() and clear out its resources quickly...
*/
- p->time_slice = 2 * MAX_TSLICE;
- p->dyn_prio = MAX_DYNPRIO + 1;
+ p->time_slice = 2 * MAX_TIMESLICE;
p->flags |= PF_MEMALLOC | PF_MEMDIE;
/* This process has hardware access, be more careful. */
@@ -189,8 +188,7 @@ static void oom_kill(void)
* killing itself before someone else gets the chance to ask
* for more memory.
*/
- current->policy |= SCHED_YIELD;
- schedule();
+ yield();
return;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e5a1ed5ad..76ef2fe75 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -394,9 +394,7 @@ rebalance:
return NULL;
/* Yield for kswapd, and try again */
- current->policy |= SCHED_YIELD;
- __set_current_state(TASK_RUNNING);
- schedule();
+ yield();
goto rebalance;
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 8fc365bfe..3d6ec27c3 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -5,7 +5,7 @@
*
* Implementation of the Transmission Control Protocol(TCP).
*
- * Version: $Id: tcp_output.c,v 1.144 2001-11-06 22:21:08 davem Exp $
+ * Version: $Id: tcp_output.c,v 1.145 2002-01-11 08:45:37 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -1009,8 +1009,7 @@ void tcp_send_fin(struct sock *sk)
skb = alloc_skb(MAX_TCP_HEADER, GFP_KERNEL);
if (skb)
break;
- current->policy |= SCHED_YIELD;
- schedule();
+ yield();
}
/* Reserve space for headers and prepare control bits. */
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index c80e6626d..05cd5b558 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -475,10 +475,8 @@ void dev_deactivate(struct net_device *dev)
dev_watchdog_down(dev);
- while (test_bit(__LINK_STATE_SCHED, &dev->state)) {
- current->policy |= SCHED_YIELD;
- schedule();
- }
+ while (test_bit(__LINK_STATE_SCHED, &dev->state))
+ yield();
spin_unlock_wait(&dev->xmit_lock);
}
diff --git a/net/socket.c b/net/socket.c
index c7cca2cbd..58ac533d0 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -148,8 +148,7 @@ static void net_family_write_lock(void)
while (atomic_read(&net_family_lockct) != 0) {
spin_unlock(&net_family_lock);
- current->policy |= SCHED_YIELD;
- schedule();
+ yield();
spin_lock(&net_family_lock);
}
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 860fc19f3..7ca2e8d46 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -773,8 +773,7 @@ rpc_allocate(unsigned int flags, unsigned int size)
}
if (flags & RPC_TASK_ASYNC)
return NULL;
- current->policy |= SCHED_YIELD;
- schedule();
+ yield();
} while (!signalled());
return NULL;
@@ -1115,8 +1114,7 @@ rpciod_killall(void)
__rpc_schedule();
if (all_tasks) {
dprintk("rpciod_killall: waiting for tasks to exit\n");
- current->policy |= SCHED_YIELD;
- schedule();
+ yield();
}
}
@@ -1186,8 +1184,7 @@ rpciod_down(void)
* wait briefly before checking the process id.
*/
current->sigpending = 0;
- current->policy |= SCHED_YIELD;
- schedule();
+ yield();
/*
* Display a message if we're going to wait longer.
*/
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index e20b1051c..d79a21f2a 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -8,7 +8,7 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * Version: $Id: af_unix.c,v 1.126 2001-11-13 05:06:28 davem Exp $
+ * Version: $Id: af_unix.c,v 1.127 2002-01-11 08:45:37 davem Exp $
*
* Fixes:
* Linus Torvalds : Assorted bug cures.
@@ -564,10 +564,8 @@ retry:
addr->hash)) {
write_unlock(&unix_table_lock);
/* Sanity yield. It is unusual case, but yet... */
- if (!(ordernum&0xFF)) {
- current->policy |= SCHED_YIELD;
- schedule();
- }
+ if (!(ordernum&0xFF))
+ yield();
goto retry;
}
addr->hash ^= sk->type;