aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authordavem <davem>2002-01-12 03:27:02 +0000
committerdavem <davem>2002-01-12 03:27:02 +0000
commit88765ad4c8b1650cf7272b862d8385d63c0965a2 (patch)
tree64e904fda7729451c3682235f04d24737a0cccec
parentc2114c34204bcddd738bb002b011b29c30966d9a (diff)
downloadnetdev-vger-cvs-88765ad4c8b1650cf7272b862d8385d63c0965a2.tar.gz
Merge mainline to 2.5.2-pre11 + Ingo sched H6.
-rw-r--r--Documentation/DocBook/Makefile1
-rw-r--r--Documentation/DocBook/kernel-api.tmpl8
-rw-r--r--Makefile2
-rw-r--r--arch/i386/kernel/nmi.c2
-rw-r--r--drivers/net/eepro100.c2
-rw-r--r--drivers/net/irda/irport.c7
-rw-r--r--drivers/net/irda/nsc-ircc.c7
-rw-r--r--drivers/net/irda/w83977af_ir.c7
-rw-r--r--drivers/net/pcmcia/ray_cs.c172
-rw-r--r--drivers/net/wavelan.c3
-rw-r--r--drivers/pci/pci.ids2
-rw-r--r--drivers/usb/hcd/Makefile6
-rw-r--r--drivers/usb/hcd/ehci-hcd.c2
-rw-r--r--drivers/usb/hcd/ehci-q.c217
-rw-r--r--drivers/usb/hcd/ehci-sched.c2
-rw-r--r--fs/ext3/super.c2
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/sched.h18
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/sched.c227
-rw-r--r--kernel/timer.c3
-rw-r--r--net/irda/ircomm/ircomm_core.c22
-rw-r--r--net/irda/ircomm/ircomm_lmp.c20
-rw-r--r--net/irda/irda_device.c2
-rw-r--r--net/irda/iriap.c2
-rw-r--r--net/irda/irlan/irlan_common.c18
-rw-r--r--net/irda/irlan/irlan_eth.c11
-rw-r--r--net/irda/irlap.c2
-rw-r--r--net/irda/irlap_event.c6
-rw-r--r--net/irda/irlmp_event.c2
-rw-r--r--net/irda/irsyms.c2
31 files changed, 560 insertions, 220 deletions
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index 9c9db8684..693fa2125 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -98,6 +98,7 @@ APISOURCES := $(TOPDIR)/drivers/media/video/videodev.c \
$(TOPDIR)/drivers/sound/sound_firmware.c \
$(TOPDIR)/drivers/net/wan/syncppp.c \
$(TOPDIR)/drivers/net/wan/z85230.c \
+ $(TOPDIR)/drivers/usb/hcd.c \
$(TOPDIR)/drivers/usb/usb.c \
$(TOPDIR)/drivers/video/fbmem.c \
$(TOPDIR)/drivers/video/fbcmap.c \
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl
index a3de95e34..448f3234d 100644
--- a/Documentation/DocBook/kernel-api.tmpl
+++ b/Documentation/DocBook/kernel-api.tmpl
@@ -281,6 +281,14 @@
!Edrivers/usb/usb.c
</sect1>
+ <sect1><title>Host Controller APIs</title>
+ <para>These APIs are only for use by host controller drivers,
+ most of which implement standard register interfaces such as
+ EHCI, OHCI, or UHCI.
+ </para>
+!Edrivers/usb/hcd.c
+ </sect1>
+
</chapter>
<chapter id="uart16x50">
diff --git a/Makefile b/Makefile
index 322ec68e9..fffcc6e58 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 5
SUBLEVEL = 2
-EXTRAVERSION =-pre10
+EXTRAVERSION =-pre11
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index a73237983..d991ca7b3 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -276,7 +276,7 @@ void nmi_watchdog_tick (struct pt_regs * regs)
* wait a few IRQs (5 seconds) before doing the oops ...
*/
alert_counter[cpu]++;
- if (alert_counter[cpu] == 5*nmi_hz) {
+ if (alert_counter[cpu] == 10*nmi_hz) {
spin_lock(&nmi_print_lock);
/*
* We are in trouble anyway, lets at least try
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c
index 98d939356..7576230ae 100644
--- a/drivers/net/eepro100.c
+++ b/drivers/net/eepro100.c
@@ -2267,6 +2267,8 @@ static struct pci_device_id eepro100_pci_tbl[] __devinitdata = {
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82559ER,
PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CAM,
+ PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ID1029,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ID1030,
diff --git a/drivers/net/irda/irport.c b/drivers/net/irda/irport.c
index 53c1c27c4..ab9cfe311 100644
--- a/drivers/net/irda/irport.c
+++ b/drivers/net/irda/irport.c
@@ -140,7 +140,7 @@ irport_open(int i, unsigned int iobase, unsigned int irq)
{
struct net_device *dev;
struct irport_cb *self;
- int ret;
+ void *ret;
int err;
IRDA_DEBUG(0, __FUNCTION__ "()\n");
@@ -169,13 +169,12 @@ irport_open(int i, unsigned int iobase, unsigned int irq)
self->io.fifo_size = 16;
/* Lock the port that we need */
- ret = check_region(self->io.sir_base, self->io.sir_ext);
- if (ret < 0) {
+ ret = request_region(self->io.sir_base, self->io.sir_ext, driver_name);
+ if (!ret) {
IRDA_DEBUG(0, __FUNCTION__ "(), can't get iobase of 0x%03x\n",
self->io.sir_base);
return NULL;
}
- request_region(self->io.sir_base, self->io.sir_ext, driver_name);
/* Initialize QoS for this device */
irda_init_max_qos_capabilies(&self->qos);
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index 29fe3d722..1cb4bb70a 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -246,7 +246,7 @@ static int nsc_ircc_open(int i, chipio_t *info)
struct net_device *dev;
struct nsc_ircc_cb *self;
struct pm_dev *pmdev;
- int ret;
+ void *ret;
int err;
IRDA_DEBUG(2, __FUNCTION__ "()\n");
@@ -282,15 +282,14 @@ static int nsc_ircc_open(int i, chipio_t *info)
self->io.fifo_size = 32;
/* Reserve the ioports that we need */
- ret = check_region(self->io.fir_base, self->io.fir_ext);
- if (ret < 0) {
+ ret = request_region(self->io.fir_base, self->io.fir_ext, driver_name);
+ if (!ret) {
WARNING(__FUNCTION__ "(), can't get iobase of 0x%03x\n",
self->io.fir_base);
dev_self[i] = NULL;
kfree(self);
return -ENODEV;
}
- request_region(self->io.fir_base, self->io.fir_ext, driver_name);
/* Initialize QoS for this device */
irda_init_max_qos_capabilies(&self->qos);
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index 52f19ed6b..416256bcb 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -160,7 +160,7 @@ int w83977af_open(int i, unsigned int iobase, unsigned int irq,
{
struct net_device *dev;
struct w83977af_ir *self;
- int ret;
+ void *ret;
int err;
IRDA_DEBUG(0, __FUNCTION__ "()\n");
@@ -190,14 +190,13 @@ int w83977af_open(int i, unsigned int iobase, unsigned int irq,
self->io.fifo_size = 32;
/* Lock the port that we need */
- ret = check_region(self->io.fir_base, self->io.fir_ext);
- if (ret < 0) {
+ ret = request_region(self->io.fir_base, self->io.fir_ext, driver_name);
+ if (!ret) {
IRDA_DEBUG(0, __FUNCTION__ "(), can't get iobase of 0x%03x\n",
self->io.fir_base);
/* w83977af_cleanup( self); */
return -ENODEV;
}
- request_region(self->io.fir_base, self->io.fir_ext, driver_name);
/* Initialize QoS for this device */
irda_init_max_qos_capabilies(&self->qos);
diff --git a/drivers/net/pcmcia/ray_cs.c b/drivers/net/pcmcia/ray_cs.c
index 736aa52b3..4c09a53df 100644
--- a/drivers/net/pcmcia/ray_cs.c
+++ b/drivers/net/pcmcia/ray_cs.c
@@ -999,7 +999,9 @@ static int ray_event(event_t event, int priority,
/*===========================================================================*/
int ray_dev_init(struct net_device *dev)
{
+#ifdef RAY_IMMEDIATE_INIT
int i;
+#endif /* RAY_IMMEDIATE_INIT */
ray_dev_t *local = dev->priv;
dev_link_t *link = local->finder;
@@ -1008,6 +1010,7 @@ int ray_dev_init(struct net_device *dev)
DEBUG(2,"ray_dev_init - device not present\n");
return -1;
}
+#ifdef RAY_IMMEDIATE_INIT
/* Download startup parameters */
if ( (i = dl_startup_params(dev)) < 0)
{
@@ -1015,7 +1018,14 @@ int ray_dev_init(struct net_device *dev)
"returns 0x%x\n",i);
return -1;
}
-
+#else /* RAY_IMMEDIATE_INIT */
+ /* Postpone the card init so that we can still configure the card,
+ * for example using the Wireless Extensions. The init will happen
+ * in ray_open() - Jean II */
+ DEBUG(1,"ray_dev_init: postponing card init to ray_open() ; Status = %d\n",
+ local->card_status);
+#endif /* RAY_IMMEDIATE_INIT */
+
/* copy mac and broadcast addresses to linux device */
memcpy(&dev->dev_addr, &local->sparm.b4.a_mac_addr, ADDRLEN);
memset(dev->broadcast, 0xff, ETH_ALEN);
@@ -1245,6 +1255,22 @@ static int ray_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
wrq->u.freq.e = 0;
break;
+ /* Set frequency/channel */
+ case SIOCSIWFREQ:
+ /* Reject if card is already initialised */
+ if(local->card_status != CARD_AWAITING_PARAM)
+ {
+ err = -EBUSY;
+ break;
+ }
+
+ /* Setting by channel number */
+ if ((wrq->u.freq.m > USA_HOP_MOD) || (wrq->u.freq.e > 0))
+ err = -EOPNOTSUPP;
+ else
+ local->sparm.b5.a_hop_pattern = wrq->u.freq.m;
+ break;
+
/* Get current network name (ESSID) */
case SIOCGIWESSID:
if (wrq->u.data.pointer)
@@ -1262,6 +1288,46 @@ static int ray_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
break;
+ /* Set desired network name (ESSID) */
+ case SIOCSIWESSID:
+ /* Reject if card is already initialised */
+ if(local->card_status != CARD_AWAITING_PARAM)
+ {
+ err = -EBUSY;
+ break;
+ }
+
+ if (wrq->u.data.pointer)
+ {
+ char card_essid[IW_ESSID_MAX_SIZE + 1];
+
+ /* Check if we asked for `any' */
+ if(wrq->u.data.flags == 0)
+ {
+ /* Corey : can you do that ? */
+ err = -EOPNOTSUPP;
+ }
+ else
+ {
+ /* Check the size of the string */
+ if(wrq->u.data.length >
+ IW_ESSID_MAX_SIZE + 1)
+ {
+ err = -E2BIG;
+ break;
+ }
+ copy_from_user(card_essid,
+ wrq->u.data.pointer,
+ wrq->u.data.length);
+ card_essid[IW_ESSID_MAX_SIZE] = '\0';
+
+ /* Set the ESSID in the card */
+ memcpy(local->sparm.b5.a_current_ess_id, card_essid,
+ IW_ESSID_MAX_SIZE);
+ }
+ }
+ break;
+
/* Get current Access Point (BSSID in our case) */
case SIOCGIWAP:
memcpy(wrq->u.ap_addr.sa_data, local->bss_id, ETH_ALEN);
@@ -1304,6 +1370,34 @@ static int ray_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
wrq->u.rts.fixed = 1;
break;
+ /* Set the desired RTS threshold */
+ case SIOCSIWRTS:
+ {
+ int rthr = wrq->u.rts.value;
+
+ /* Reject if card is already initialised */
+ if(local->card_status != CARD_AWAITING_PARAM)
+ {
+ err = -EBUSY;
+ break;
+ }
+
+ /* if(wrq->u.rts.fixed == 0) we should complain */
+#if WIRELESS_EXT > 8
+ if(wrq->u.rts.disabled)
+ rthr = 32767;
+ else
+#endif /* WIRELESS_EXT > 8 */
+ if((rthr < 0) || (rthr > 2347)) /* What's the max packet size ??? */
+ {
+ err = -EINVAL;
+ break;
+ }
+ local->sparm.b5.a_rts_threshold[0] = (rthr >> 8) & 0xFF;
+ local->sparm.b5.a_rts_threshold[1] = rthr & 0xFF;
+ }
+ break;
+
/* Get the current fragmentation threshold */
case SIOCGIWFRAG:
wrq->u.frag.value = (local->sparm.b5.a_frag_threshold[0] << 8)
@@ -1313,6 +1407,35 @@ static int ray_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
#endif /* WIRELESS_EXT > 8 */
wrq->u.frag.fixed = 1;
break;
+
+ /* Set the desired fragmentation threshold */
+ case SIOCSIWFRAG:
+ {
+ int fthr = wrq->u.frag.value;
+
+ /* Reject if card is already initialised */
+ if(local->card_status != CARD_AWAITING_PARAM)
+ {
+ err = -EBUSY;
+ break;
+ }
+
+ /* if(wrq->u.frag.fixed == 0) should complain */
+#if WIRELESS_EXT > 8
+ if(wrq->u.frag.disabled)
+ fthr = 32767;
+ else
+#endif /* WIRELESS_EXT > 8 */
+ if((fthr < 256) || (fthr > 2347)) /* To check out ! */
+ {
+ err = -EINVAL;
+ break;
+ }
+ local->sparm.b5.a_frag_threshold[0] = (fthr >> 8) & 0xFF;
+ local->sparm.b5.a_frag_threshold[1] = fthr & 0xFF;
+ }
+ break;
+
#endif /* WIRELESS_EXT > 7 */
#if WIRELESS_EXT > 8
@@ -1323,6 +1446,33 @@ static int ray_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
else
wrq->u.mode = IW_MODE_ADHOC;
break;
+
+ /* Set the current mode of operation */
+ case SIOCSIWMODE:
+ {
+ char card_mode = 1;
+
+ /* Reject if card is already initialised */
+ if(local->card_status != CARD_AWAITING_PARAM)
+ {
+ err = -EBUSY;
+ break;
+ }
+
+ switch (wrq->u.mode)
+ {
+ case IW_MODE_ADHOC:
+ card_mode = 0;
+ // Fall through
+ case IW_MODE_INFRA:
+ local->sparm.b5.a_network_type = card_mode;
+ break;
+ default:
+ err = -EINVAL;
+ }
+ }
+ break;
+
#endif /* WIRELESS_EXT > 8 */
#if WIRELESS_EXT > 7
/* ------------------ IWSPY SUPPORT ------------------ */
@@ -1549,6 +1699,21 @@ static int ray_open(struct net_device *dev)
if (link->open == 0) local->num_multi = 0;
link->open++;
+ /* If the card is not started, time to start it ! - Jean II */
+ if(local->card_status == CARD_AWAITING_PARAM) {
+ int i;
+
+ DEBUG(1,"ray_open: doing init now !\n");
+
+ /* Download startup parameters */
+ if ( (i = dl_startup_params(dev)) < 0)
+ {
+ printk(KERN_INFO "ray_dev_init dl_startup_params failed - "
+ "returns 0x%x\n",i);
+ return -1;
+ }
+ }
+
if (sniffer) netif_stop_queue(dev);
else netif_start_queue(dev);
@@ -1572,6 +1737,11 @@ static int ray_dev_close(struct net_device *dev)
if (link->state & DEV_STALE_CONFIG)
mod_timer(&link->release, jiffies + HZ/20);
+ /* In here, we should stop the hardware (stop card from beeing active)
+ * and set local->card_status to CARD_AWAITING_PARAM, so that while the
+ * card is closed we can chage its configuration.
+ * Probably also need a COR reset to get sane state - Jean II */
+
MOD_DEC_USE_COUNT;
return 0;
diff --git a/drivers/net/wavelan.c b/drivers/net/wavelan.c
index 23ba06317..10805eff7 100644
--- a/drivers/net/wavelan.c
+++ b/drivers/net/wavelan.c
@@ -4019,7 +4019,8 @@ static int __init wavelan_config(device * dev)
dev->irq = irq;
- request_region(ioaddr, sizeof(ha_t), "wavelan");
+ if (!request_region(ioaddr, sizeof(ha_t), "wavelan"))
+ return -EBUSY;
dev->mem_start = 0x0000;
dev->mem_end = 0x0000;
diff --git a/drivers/pci/pci.ids b/drivers/pci/pci.ids
index c73f0bd65..fc76f335f 100644
--- a/drivers/pci/pci.ids
+++ b/drivers/pci/pci.ids
@@ -812,6 +812,7 @@
0074 56k Voice Modem
1033 8014 RCV56ACF 56k Voice Modem
009b Vrc5476
+ 00e0 USB 2.0
1034 Framatome Connectors USA Inc.
1035 Comp. & Comm. Research Lab
1036 Future Domain Corp.
@@ -5417,6 +5418,7 @@ C 0c Serial bus controller
03 USB Controller
00 UHCI
10 OHCI
+ 20 EHCI
80 Unspecified
fe USB Device
04 Fibre Channel
diff --git a/drivers/usb/hcd/Makefile b/drivers/usb/hcd/Makefile
index 432740891..453c9cc3c 100644
--- a/drivers/usb/hcd/Makefile
+++ b/drivers/usb/hcd/Makefile
@@ -5,9 +5,9 @@
O_TARGET :=
-obj-$(CONFIG_EHCI_HCD) += ehci-hcd.o
-# obj-$(CONFIG_OHCI_HCD) += ohci-hcd.o
-# obj-$(CONFIG_UHCI_HCD) += uhci-hcd.o
+obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o
+# obj-$(CONFIG_USB_OHCI_HCD) += ohci-hcd.o
+# obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o
# Extract lists of the multi-part drivers.
# The 'int-*' lists are the intermediate files used to build the multi's.
diff --git a/drivers/usb/hcd/ehci-hcd.c b/drivers/usb/hcd/ehci-hcd.c
index 83a33212f..41b8f3fd2 100644
--- a/drivers/usb/hcd/ehci-hcd.c
+++ b/drivers/usb/hcd/ehci-hcd.c
@@ -743,7 +743,7 @@ MODULE_LICENSE ("GPL");
static int __init init (void)
{
dbg (DRIVER_INFO);
- dbg ("block sizes: qh %d qtd %d itd %d sitd %d",
+ dbg ("block sizes: qh %Zd qtd %Zd itd %Zd sitd %Zd",
sizeof (struct ehci_qh), sizeof (struct ehci_qtd),
sizeof (struct ehci_itd), sizeof (struct ehci_sitd));
diff --git a/drivers/usb/hcd/ehci-q.c b/drivers/usb/hcd/ehci-q.c
index 5ca53633a..d559e8945 100644
--- a/drivers/usb/hcd/ehci-q.c
+++ b/drivers/usb/hcd/ehci-q.c
@@ -39,12 +39,6 @@
* buffer low/full speed data so the host collects it at high speed.
*/
-#ifdef EHCI_SOFT_RETRIES
-static int soft_retries = EHCI_SOFT_RETRIES;
-MODULE_PARM (soft_retries, "i");
-MODULE_PARM_DESC (soft_retries, "Number of software retries for endpoint i/o");
-#endif
-
/*-------------------------------------------------------------------------*/
/* fill a qtd, returning how much of the buffer we were able to queue up */
@@ -134,8 +128,9 @@ static inline void qtd_copy_status (struct urb *urb, size_t length, u32 token)
urb->status = -EPIPE;
else /* unknown */
urb->status = -EPROTO;
- dbg ("devpath %s ep %d-%s qtd token %x --> status %d",
- urb->dev->devpath, usb_pipeendpoint (urb->pipe),
+ dbg ("ep %d-%s qtd token %08x --> status %d",
+ /* devpath */
+ usb_pipeendpoint (urb->pipe),
usb_pipein (urb->pipe) ? "in" : "out",
token, urb->status);
@@ -148,8 +143,8 @@ static inline void qtd_copy_status (struct urb *urb, size_t length, u32 token)
usb_pipeendpoint (pipe),
usb_pipeout (pipe));
if (urb->dev->tt && !usb_pipeint (pipe)) {
-err ("must CLEAR_TT_BUFFER, hub %s port %d%s addr %d ep %d",
- urb->dev->tt->hub->devpath, urb->dev->ttport,
+err ("must CLEAR_TT_BUFFER, hub port %d%s addr %d ep %d",
+ urb->dev->ttport, /* devpath */
urb->dev->tt->multi ? "" : " (all-ports TT)",
urb->dev->devnum, usb_pipeendpoint (urb->pipe));
// FIXME something (khubd?) should make the hub
@@ -228,12 +223,10 @@ qh_completions (
struct list_head *qtd_list,
int freeing
) {
- struct ehci_qtd *qtd = 0;
- struct list_head *next = 0;
- u32 token;
+ struct ehci_qtd *qtd, *last;
+ struct list_head *next;
struct ehci_qh *qh = 0;
- struct urb *urb = 0;
- int halted = 0;
+ int unlink = 0, halted = 0;
unsigned long flags;
int retval = 0;
@@ -243,89 +236,116 @@ qh_completions (
return retval;
}
- for (qtd = list_entry (qtd_list->next, struct ehci_qtd, qtd_list);
+ /* scan QTDs till end of list, or we reach an active one */
+ for (qtd = list_entry (qtd_list->next, struct ehci_qtd, qtd_list),
+ last = 0, next = 0;
next != qtd_list;
- qtd = list_entry (next, struct ehci_qtd, qtd_list)) {
- token = le32_to_cpu (qtd->hw_token);
- if (!qh) {
- urb = qtd->urb;
- qh = (struct ehci_qh *) urb->hcpriv;
+ last = qtd, qtd = list_entry (next,
+ struct ehci_qtd, qtd_list)) {
+ struct urb *urb = qtd->urb;
+ u32 token = 0;
+
+ /* qh is non-null iff these qtds were queued to the HC */
+ qh = (struct ehci_qh *) urb->hcpriv;
+
+ /* clean up any state from previous QTD ...*/
+ if (last) {
+ if (likely (last->urb != urb)) {
+ /* complete() can reenter this HCD */
+ spin_unlock_irqrestore (&ehci->lock, flags);
+ if (likely (freeing != 0))
+ ehci_urb_done (ehci, last->buf_dma,
+ last->urb);
+ else
+ ehci_urb_complete (ehci, last->buf_dma,
+ last->urb);
+ spin_lock_irqsave (&ehci->lock, flags);
+ retval++;
+ }
+
+ /* qh overlays can have HC's old cached copies of
+ * next qtd ptrs, if an URB was queued afterwards.
+ */
+ if (qh && cpu_to_le32 (last->qtd_dma) == qh->hw_current
+ && last->hw_next != qh->hw_qtd_next) {
+ qh->hw_alt_next = last->hw_alt_next;
+ qh->hw_qtd_next = last->hw_next;
+ }
+
+ if (likely (freeing != 0))
+ ehci_qtd_free (ehci, last);
+ last = 0;
}
+ next = qtd->qtd_list.next;
+
+ /* if these qtds were queued to the HC, some may be active.
+ * else we're cleaning up after a failed URB submission.
+ */
if (likely (qh != 0)) {
+ int qh_halted;
+
+ qh_halted = __constant_cpu_to_le32 (QTD_STS_HALT)
+ & qh->hw_token;
+ token = le32_to_cpu (qtd->hw_token);
halted = halted
+ || qh_halted
|| (ehci->hcd.state == USB_STATE_HALT)
|| (qh->qh_state == QH_STATE_IDLE);
- if (unlikely ((token & QTD_STS_HALT) != 0)) {
-#ifdef EHCI_SOFT_RETRIES
- /* extra soft retries for protocol errors */
- if (!halted
- && qh->retries < soft_retries
- && (QTD_STS_HALT|QTD_STS_XACT)
- == (token & 0xff)
- && QTD_CERR (token) == 0) {
- if (qh->retries == 0)
- dbg ("soft retry, qh %p qtd %p",
- qh, qtd);
- qh->retries++;
- token &= ~0x0ff;
- token |= QTD_STS_ACTIVE;
- token |= (EHCI_TUNE_CERR << 10);
- /* qtd update not needed */
- qh->hw_token = cpu_to_le32 (token);
- spin_unlock_irqrestore (&ehci->lock,
- flags);
- return;
-
- } else if (qh->retries >= soft_retries
- && soft_retries) {
- dbg ("retried %d times, qh %p qtd %p",
- qh->retries, qh, qtd);
- }
-#endif /* EHCI_SOFT_RETRIES */
- halted = 1;
- }
-
- if (unlikely ((token & QTD_STS_ACTIVE) != 0)) {
- /* stop scan if qtd is visible to the HC */
- if (!halted) {
- urb = 0;
- break;
- }
+ /* QH halts only because of fault or unlink; in both
+ * cases, queued URBs get unlinked. But for unlink,
+ * URBs at the head of the queue can stay linked.
+ */
+ if (unlikely (halted != 0)) {
- /* continue cleanup if HC is halted */
+ /* unlink everything because of HC shutdown? */
if (ehci->hcd.state == USB_STATE_HALT) {
+ freeing = unlink = 1;
urb->status = -ESHUTDOWN;
- goto scrub;
- }
- /* stall? some other urb was unlinked? */
- if (urb->status == -EINPROGRESS) {
-dbg ("?why? qh %p, qtd %p halted, urb %p, token %8x, len %d",
- qh, qtd, urb, token, urb->actual_length);
-spin_unlock_irqrestore (&ehci->lock, flags);
-return retval;
- /*
- * FIXME: write this code. When one queued urb is unlinked,
- * unlink every succeeding urb.
- */
+ /* explicit unlink, starting here? */
+ } else if (qh->qh_state == QH_STATE_IDLE
+ && (urb->status == -ECONNRESET
+ || urb->status == -ENOENT)) {
+ freeing = unlink = 1;
+
+ /* unlink everything because of error? */
+ } else if (qh_halted
+ && !(token & QTD_STS_HALT)) {
+ freeing = unlink = 1;
+ if (urb->status == -EINPROGRESS)
+ urb->status = -ECONNRESET;
+
+ /* unlink the rest? */
+ } else if (unlink) {
+ urb->status = -ECONNRESET;
+
+ /* QH halted to unlink urbs after this? */
+ } else if ((token & QTD_STS_ACTIVE) != 0) {
+ qtd = 0;
continue;
}
- /* else stopped for some other reason */
- }
-scrub:
+ /* Else QH is active, so we must not modify QTDs
+ * that HC may be working on. Break from loop.
+ */
+ } else if (unlikely ((token & QTD_STS_ACTIVE) != 0)) {
+ next = qtd_list;
+ qtd = 0;
+ continue;
+ }
+
spin_lock (&urb->lock);
qtd_copy_status (urb, qtd->length, token);
spin_unlock (&urb->lock);
}
- next = qtd->qtd_list.next;
/*
* NOTE: this won't work right with interrupt urbs that
* need multiple qtds ... only the first scan of qh->qtd_list
* starts at the right qtd, yet multiple scans could happen
* for transfers that are scheduled across multiple uframes.
+ * (Such schedules are not currently allowed!)
*/
if (likely (freeing != 0))
list_del (&qtd->qtd_list);
@@ -347,8 +367,6 @@ scrub:
qtd->hw_buf [0] |= cpu_to_le32 (0x0fff & qtd->buf_dma);
}
- spin_unlock_irqrestore (&ehci->lock, flags);
-
#if 0
if (urb->status == -EINPROGRESS)
vdbg (" qtd %p ok, urb %p, token %8x, len %d",
@@ -364,21 +382,6 @@ scrub:
pci_unmap_single (ehci->hcd.pdev,
qtd->buf_dma, sizeof (struct usb_ctrlrequest),
PCI_DMA_TODEVICE);
-
- /* another queued urb? */
- if (unlikely (qtd->urb != urb)) {
- if (likely (freeing != 0))
- ehci_urb_done (ehci, qtd->buf_dma, urb);
- else
- ehci_urb_complete (ehci, qtd->buf_dma, urb);
- retval++;
- urb = qtd->urb;
- }
-
- if (likely (freeing != 0))
- ehci_qtd_free (ehci, qtd);
- spin_lock_irqsave (&ehci->lock, flags);
- qtd = list_entry (next, struct ehci_qtd, qtd_list);
}
/* patch up list head? */
@@ -389,11 +392,12 @@ scrub:
spin_unlock_irqrestore (&ehci->lock, flags);
/* last urb's completion might still need calling */
- if (likely (qtd && urb)) {
- if (likely (freeing != 0))
- ehci_urb_done (ehci, qtd->buf_dma, urb);
- else
- ehci_urb_complete (ehci, qtd->buf_dma, urb);
+ if (likely (last != 0)) {
+ if (likely (freeing != 0)) {
+ ehci_urb_done (ehci, last->buf_dma, last->urb);
+ ehci_qtd_free (ehci, last);
+ } else
+ ehci_urb_complete (ehci, last->buf_dma, last->urb);
retval++;
}
return retval;
@@ -749,7 +753,9 @@ submit_async (
/* is an URB is queued to this qh already? */
if (unlikely (!list_empty (&qh->qtd_list))) {
struct ehci_qtd *last_qtd;
+ int short_rx = 0;
+ /* update the last qtd's "next" pointer */
// dbg_qh ("non-empty qh", ehci, qh);
last_qtd = list_entry (qh->qtd_list.prev,
struct ehci_qtd, qtd_list);
@@ -760,6 +766,21 @@ submit_async (
&& (epnum & 0x10)) {
// only the last QTD for now
last_qtd->hw_alt_next = hw_next;
+ short_rx = 1;
+ }
+
+ /* Adjust any old copies in qh overlay too.
+ * Interrupt code must cope with case of HC having it
+ * cached, and clobbering these updates.
+ * ... complicates getting rid of extra interrupts!
+ */
+ if (qh->hw_current == cpu_to_le32 (last_qtd->qtd_dma)) {
+ wmb ();
+ qh->hw_qtd_next = hw_next;
+ if (short_rx)
+ qh->hw_alt_next = hw_next
+ | (qh->hw_alt_next & 0x1e);
+ vdbg ("queue to qh %p, patch", qh);
}
/* no URB queued */
@@ -822,8 +843,8 @@ static void end_unlink_async (struct ehci_hcd *ehci)
qh_completions (ehci, &qh->qtd_list, 1);
- // FIXME unlink any urb should unlink all following urbs,
- // so that this will never happen
+ // unlink any urb should now unlink all following urbs, so that
+ // relinking only happens for urbs before the unlinked ones.
if (!list_empty (&qh->qtd_list)
&& HCD_IS_RUNNING (ehci->hcd.state))
qh_link_async (ehci, qh);
diff --git a/drivers/usb/hcd/ehci-sched.c b/drivers/usb/hcd/ehci-sched.c
index 905e506f3..5a971fee2 100644
--- a/drivers/usb/hcd/ehci-sched.c
+++ b/drivers/usb/hcd/ehci-sched.c
@@ -381,7 +381,7 @@ static int intr_submit (
vdbg ("qh %p usecs %d period %d starting frame %d.%d",
qh, qh->usecs, period, frame, uframe);
do {
- if (unlikely ((int)ehci->pshadow [frame].ptr)) {
+ if (unlikely ((long)ehci->pshadow [frame].ptr)) {
// FIXME -- just link to the end, before any qh with a shorter period,
// AND handle it already being (implicitly) linked into this frame
BUG ();
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index b092d720c..46664b11a 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -429,7 +429,7 @@ void ext3_put_super (struct super_block * sb)
J_ASSERT(list_empty(&sbi->s_orphan));
invalidate_bdev(sb->s_bdev, 0);
- if (sbi->journal_bdev != sb->s_bdev) {
+ if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) {
/*
* Invalidate the journal device's buffers. We don't want them
* floating about in memory - the physical journal device may
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index cbe61d619..904d4a406 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1570,6 +1570,7 @@
#define PCI_DEVICE_ID_INTEL_82434 0x04a3
#define PCI_DEVICE_ID_INTEL_I960 0x0960
#define PCI_DEVICE_ID_INTEL_82562ET 0x1031
+#define PCI_DEVICE_ID_INTEL_82801CAM 0x1038
#define PCI_DEVICE_ID_INTEL_82559ER 0x1209
#define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221
#define PCI_DEVICE_ID_INTEL_82092AA_1 0x1222
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e017015a9..a61d733ee 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -144,8 +144,7 @@ extern void trap_init(void);
extern void update_process_times(int user);
extern void update_one_process(struct task_struct *p, unsigned long user,
unsigned long system, int cpu);
-extern void expire_task(struct task_struct *p);
-extern void idle_tick(void);
+extern void scheduler_tick(struct task_struct *p);
#define MAX_SCHEDULE_TIMEOUT LONG_MAX
extern signed long FASTCALL(schedule_timeout(signed long timeout));
@@ -481,14 +480,11 @@ struct task_struct {
/*
* Scales user-nice values [ -20 ... 0 ... 19 ]
- * to static priority [ 24 ... 63 (MAX_PRIO-1) ]
+ * to static priority [ 128 ... 167 (MAX_PRIO-1) ]
*
- * User-nice value of -20 == static priority 24, and
- * user-nice value 19 == static priority 63. The lower
+ * User-nice value of -20 == static priority 128, and
+ * user-nice value 19 == static priority 167. The lower
* the priority value, the higher the task's priority.
- *
- * Note that while static priority cannot go below 24,
- * the priority of a process can go as low as 0.
*/
#define NICE_TO_PRIO(n) (MAX_PRIO-1 + (n) - 19)
@@ -496,16 +492,16 @@ struct task_struct {
/*
* Default timeslice is 90 msecs, maximum is 150 msecs.
- * Minimum timeslice is 20 msecs.
+ * Minimum timeslice is 30 msecs.
*/
-#define MIN_TIMESLICE ( 20 * HZ / 1000)
+#define MIN_TIMESLICE ( 30 * HZ / 1000)
#define MAX_TIMESLICE (150 * HZ / 1000)
#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
/*
- * PRIO_TO_TIMESLICE scales priority values [ 100 ... 139 ]
+ * PRIO_TO_TIMESLICE scales priority values [ 128 ... 167 ]
* to initial time slice values [ MAX_TIMESLICE (150 msec) ... 2 ]
*
* The higher a process's priority, the bigger timeslices
diff --git a/kernel/fork.c b/kernel/fork.c
index 4774f9cdb..3cf59fd03 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -703,7 +703,7 @@ int do_fork(unsigned long clone_flags, unsigned long stack_start,
* runqueue lock is not a problem.
*/
current->time_slice = 1;
- expire_task(current);
+ scheduler_tick(current);
}
p->sleep_timestamp = p->run_timestamp = jiffies;
p->hist[0] = p->hist[1] = p->hist[2] = p->hist[3] = 0;
diff --git a/kernel/sched.c b/kernel/sched.c
index 3fdd23488..88b5ba0f5 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -37,33 +37,36 @@ struct prio_array {
*
* Locking rule: those places that want to lock multiple runqueues
* (such as the load balancing or the process migration code), lock
- * acquire operations must be ordered by rq->cpu.
+ * acquire operations must be ordered by the runqueue's cpu id.
*
* The RT event id is used to avoid calling into the the RT scheduler
* if there is a RT task active in an SMP system but there is no
* RT scheduling activity otherwise.
*/
-static struct runqueue {
- int cpu;
+struct runqueue {
spinlock_t lock;
- unsigned long nr_running, nr_switches, last_rt_event;
+ unsigned long nr_running, nr_switches;
task_t *curr, *idle;
prio_array_t *active, *expired, arrays[2];
- char __pad [SMP_CACHE_BYTES];
-} runqueues [NR_CPUS] __cacheline_aligned;
+ int prev_nr_running[NR_CPUS];
+} ____cacheline_aligned;
+
+static struct runqueue runqueues[NR_CPUS] __cacheline_aligned;
-#define this_rq() (runqueues + smp_processor_id())
-#define task_rq(p) (runqueues + (p)->cpu)
#define cpu_rq(cpu) (runqueues + (cpu))
-#define cpu_curr(cpu) (runqueues[(cpu)].curr)
+#define this_rq() cpu_rq(smp_processor_id())
+#define task_rq(p) cpu_rq((p)->cpu)
+#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
+#define rq_cpu(rq) ((rq) - runqueues)
#define rt_task(p) ((p)->policy != SCHED_OTHER)
+
#define lock_task_rq(rq,p,flags) \
do { \
repeat_lock_task: \
rq = task_rq(p); \
spin_lock_irqsave(&rq->lock, flags); \
- if (unlikely((rq)->cpu != (p)->cpu)) { \
+ if (unlikely(rq_cpu(rq) != (p)->cpu)) { \
spin_unlock_irqrestore(&rq->lock, flags); \
goto repeat_lock_task; \
} \
@@ -136,6 +139,11 @@ static inline void new_second(task_t *p)
}
/*
+ * process load-history tick length. Right now it's 1 second:
+ */
+#define HHZ (HZ)
+
+/*
* This function clears the load-history entries when a task has spent
* more than 4 seconds running.
*/
@@ -150,7 +158,7 @@ static inline void clear_hist(task_t *p)
*/
static inline void fill_hist(task_t *p)
{
- p->hist[0] = p->hist[1] = p->hist[2] = p->hist[3] = HZ;
+ p->hist[0] = p->hist[1] = p->hist[2] = p->hist[3] = HHZ;
}
/*
@@ -163,7 +171,7 @@ static inline void update_sleep_avg_deactivate(task_t *p)
{
int idx;
unsigned long now = jiffies,
- seconds_passed = now/HZ - p->run_timestamp/HZ;
+ seconds_passed = now/HHZ - p->run_timestamp/HHZ;
/*
* Do we have to update the history entries becase a
@@ -197,7 +205,7 @@ static inline void update_sleep_avg_activate(task_t *p, unsigned long now)
{
int idx;
unsigned long sleep_ticks,
- seconds_passed = now/HZ - p->sleep_timestamp/HZ;
+ seconds_passed = now/HHZ - p->sleep_timestamp/HHZ;
/*
* Do we have to update the history entries becase a
@@ -206,7 +214,7 @@ static inline void update_sleep_avg_activate(task_t *p, unsigned long now)
* path history entries are simply cleared, but here we have
* to add any potential time spent sleeping in the current
* second. This value is 'sleep_ticks' - it can be anywhere
- * between 0 and 99. (it cannot be 100 because that would mean
+ * between 0 and HZ-1. (it cannot be HZ because that would mean
* that the current second is over and we'd have to go to the
* next history entry.) Another detail is that we might
* have gone sleeping in this second, or in any previous second.
@@ -219,7 +227,7 @@ static inline void update_sleep_avg_activate(task_t *p, unsigned long now)
/*
* Update the "last partially-slept" second's entry:
*/
- p->hist[p->hist_idx] += HZ - (p->sleep_timestamp % HZ);
+ p->hist[p->hist_idx] += HHZ - (p->sleep_timestamp % HHZ);
new_second(p);
/*
@@ -228,7 +236,7 @@ static inline void update_sleep_avg_activate(task_t *p, unsigned long now)
*/
for (idx = 1; idx < seconds_passed; idx++) {
new_second(p);
- p->hist[p->hist_idx] = HZ;
+ p->hist[p->hist_idx] = HHZ;
}
} else
/*
@@ -239,7 +247,7 @@ static inline void update_sleep_avg_activate(task_t *p, unsigned long now)
/* Clear the new current entry: */
p->hist[p->hist_idx] = 0;
- sleep_ticks = now % HZ;
+ sleep_ticks = now % HHZ;
} else
sleep_ticks = now - p->sleep_timestamp;
/*
@@ -259,8 +267,8 @@ static inline void update_sleep_avg_activate(task_t *p, unsigned long now)
*/
static inline unsigned int get_run_avg(task_t *p, unsigned long new)
{
- return HZ - (p->hist[0] + p->hist[1] + p->hist[2] +
- p->hist[3]) * HZ / ((SLEEP_HIST_SIZE-1)*HZ + (new % HZ));
+ return HHZ - (p->hist[0] + p->hist[1] + p->hist[2] +
+ p->hist[3]) * HHZ / ((SLEEP_HIST_SIZE-1)*HHZ + (new % HHZ));
}
static inline void activate_task(task_t *p, runqueue_t *rq)
@@ -287,7 +295,7 @@ static inline void activate_task(task_t *p, runqueue_t *rq)
* We scale this 'load value' to between 0 and MAX_USER_PRIO/3.
* A task that generates 100% load gets the maximum penalty.
*/
- penalty = MAX_USER_PRIO * get_run_avg(p, now) / (3 * HZ);
+ penalty = MAX_USER_PRIO * get_run_avg(p, now) / (3 * HHZ);
if (!rt_task(p)) {
p->prio = NICE_TO_PRIO(p->__nice) + penalty;
if (p->prio > MAX_PRIO-1)
@@ -313,7 +321,7 @@ static inline void resched_task(task_t *p)
need_resched = p->need_resched;
wmb();
p->need_resched = 1;
- if (!need_resched && p->cpu != smp_processor_id())
+ if (!need_resched && (p->cpu != smp_processor_id()))
smp_send_reschedule(p->cpu);
}
@@ -375,17 +383,9 @@ static int try_to_wake_up(task_t * p, int synchronous)
lock_task_rq(rq, p, flags);
p->state = TASK_RUNNING;
if (!p->array) {
- if (!rt_task(p) && synchronous && (smp_processor_id() < p->cpu)) {
- spin_lock(&this_rq()->lock);
- p->cpu = smp_processor_id();
- activate_task(p, this_rq());
- spin_unlock(&this_rq()->lock);
- } else {
- activate_task(p, rq);
- if ((rq->curr == rq->idle) ||
- (p->prio < rq->curr->prio))
- resched_task(rq->curr);
- }
+ activate_task(p, rq);
+ if ((rq->curr == rq->idle) || (p->prio < rq->curr->prio))
+ resched_task(rq->curr);
success = 1;
}
unlock_task_rq(rq, p, flags);
@@ -482,59 +482,98 @@ static inline unsigned long max_rq_len(void)
}
/*
- * Current runqueue is empty, try to find work on
- * other runqueues.
+ * Current runqueue is empty, or rebalance tick: if there is an
+ * inbalance (current runqueue is too short) then pull from
+ * busiest runqueue(s).
*
* We call this with the current runqueue locked,
* irqs disabled.
*/
-static void load_balance(runqueue_t *this_rq)
+static void load_balance(runqueue_t *this_rq, int idle)
{
- int nr_tasks, load, prev_max_load, max_load, idx, i;
+ int imbalance, nr_running, load, prev_max_load,
+ max_load, idx, i, this_cpu = smp_processor_id();
task_t *next = this_rq->idle, *tmp;
- runqueue_t *busiest, *rq_tmp;
+ runqueue_t *busiest, *rq_src;
prio_array_t *array;
list_t *head, *curr;
- prev_max_load = max_rq_len();
- nr_tasks = prev_max_load - this_rq->nr_running;
- /*
- * It needs an at least ~10% imbalance to trigger balancing:
- */
- if (nr_tasks <= 1 + prev_max_load/8)
- return;
- prev_max_load++;
-
-repeat_search:
/*
* We search all runqueues to find the most busy one.
* We do this lockless to reduce cache-bouncing overhead,
- * we re-check the source CPU with the lock held.
+ * we re-check the 'best' source CPU later on again, with
+ * the lock held.
+ *
+ * We fend off statistical fluctuations in runqueue lengths by
+ * saving the runqueue length during the previous load-balancing
+ * operation and using the smaller one the current and saved lengths.
+ * If a runqueue is long enough for a longer amount of time then
+ * we recognize it and pull tasks from it.
+ *
+ * The 'current runqueue length' is a statistical maximum variable,
+ * for that one we take the longer one - to avoid fluctuations in
+ * the other direction. So for a load-balance to happen it needs
+ * stable long runqueue on the target CPU and stable short runqueue
+ * on the local runqueue.
+ *
+ * We make an exception if this CPU is about to become idle - in
+ * that case we are less picky about moving a task across CPUs and
+ * take what can be taken.
*/
+ if (idle || (this_rq->nr_running > this_rq->prev_nr_running[this_cpu]))
+ nr_running = this_rq->nr_running;
+ else
+ nr_running = this_rq->prev_nr_running[this_cpu];
+ prev_max_load = 1000000000;
+
busiest = NULL;
max_load = 0;
for (i = 0; i < smp_num_cpus; i++) {
- rq_tmp = cpu_rq(i);
- load = rq_tmp->nr_running;
+ rq_src = cpu_rq(i);
+ if (idle || (rq_src->nr_running < this_rq->prev_nr_running[i]))
+ load = rq_src->nr_running;
+ else
+ load = this_rq->prev_nr_running[i];
+ this_rq->prev_nr_running[i] = rq_src->nr_running;
+
if ((load > max_load) && (load < prev_max_load) &&
- (rq_tmp != this_rq)) {
- busiest = rq_tmp;
+ (rq_src != this_rq)) {
+ busiest = rq_src;
max_load = load;
}
}
if (likely(!busiest))
return;
- if (max_load <= this_rq->nr_running)
+
+ imbalance = (max_load - nr_running) / 2;
+
+ /*
+ * It needs an at least ~25% imbalance to trigger balancing.
+ *
+ * prev_max_load makes sure that we do not try to balance
+ * ad infinitum - certain tasks might be impossible to be
+ * pulled into this runqueue.
+ */
+ if (!idle && (imbalance < (max_load + 3)/4))
return;
prev_max_load = max_load;
- if (busiest->cpu < this_rq->cpu) {
+
+ /*
+ * Ok, lets do some actual balancing:
+ */
+
+ if (rq_cpu(busiest) < this_cpu) {
spin_unlock(&this_rq->lock);
spin_lock(&busiest->lock);
spin_lock(&this_rq->lock);
} else
spin_lock(&busiest->lock);
- if (busiest->nr_running <= this_rq->nr_running + 1)
+ /*
+ * Make sure nothing changed since we checked the
+ * runqueue length.
+ */
+ if (busiest->nr_running <= nr_running + 1)
goto out_unlock;
/*
@@ -561,14 +600,14 @@ skip_bitmap:
goto new_array;
}
spin_unlock(&busiest->lock);
- goto repeat_search;
+ goto out_unlock;
}
head = array->queue + idx;
curr = head->next;
skip_queue:
tmp = list_entry(curr, task_t, run_list);
- if ((tmp == busiest->curr) || !(tmp->cpus_allowed & (1 << smp_processor_id()))) {
+ if ((tmp == busiest->curr) || !(tmp->cpus_allowed & (1 << this_cpu))) {
curr = curr->next;
if (curr != head)
goto skip_queue;
@@ -582,41 +621,73 @@ skip_queue:
*/
dequeue_task(next, array);
busiest->nr_running--;
- next->cpu = smp_processor_id();
+ next->cpu = this_cpu;
this_rq->nr_running++;
enqueue_task(next, this_rq->active);
if (next->prio < current->prio)
current->need_resched = 1;
- if (--nr_tasks) {
+ if (!idle && --imbalance) {
if (array == busiest->expired) {
array = busiest->active;
goto new_array;
}
spin_unlock(&busiest->lock);
- goto repeat_search;
}
out_unlock:
spin_unlock(&busiest->lock);
}
-#define REBALANCE_TICK (HZ/100)
+/*
+ * One of the idle_cpu_tick() or the busy_cpu_tick() function will
+ * gets called every timer tick, on every CPU. Our balancing action
+ * frequency and balancing agressivity depends on whether the CPU is
+ * idle or not.
+ *
+ * busy-rebalance every 250 msecs. idle-rebalance every 1 msec. (or on
+ * systems with HZ=100, every 10 msecs.)
+ */
+#define BUSY_REBALANCE_TICK (HZ/4 ?: 1)
+#define IDLE_REBALANCE_TICK (HZ/1000 ?: 1)
-void idle_tick(void)
+static inline void idle_tick(void)
{
- unsigned long flags;
+ if ((jiffies % IDLE_REBALANCE_TICK) ||
+ likely(this_rq()->curr == NULL))
+ return;
+ spin_lock(&this_rq()->lock);
+ load_balance(this_rq(), 1);
+ spin_unlock(&this_rq()->lock);
+}
- if (!(jiffies % REBALANCE_TICK) && likely(this_rq()->curr != NULL)) {
- spin_lock_irqsave(&this_rq()->lock, flags);
- load_balance(this_rq());
- spin_unlock_irqrestore(&this_rq()->lock, flags);
- }
+/*
+ * Should we treat the task as interactive or not.
+ * A task is interactive if it has not exceeded 50%
+ * of the max CPU-hog penalty yet.
+ */
+static int task_interactive(task_t *p, unsigned long now)
+{
+ int penalty;
+
+ if (rt_task(p))
+ return 1;
+ penalty = MAX_USER_PRIO * get_run_avg(p, jiffies) / (3 * HHZ);
+ if (penalty <= MAX_USER_PRIO/6)
+ return 1;
+ return 0;
}
-void expire_task(task_t *p)
+/*
+ * This function gets called by the timer code, with HZ frequency.
+ * We call it with interrupts disabled.
+ */
+void scheduler_tick(task_t *p)
{
+ unsigned long now = jiffies;
runqueue_t *rq = this_rq();
- unsigned long flags;
+ if (p == rq->idle || !rq->idle)
+ return idle_tick();
+ /* Task might have expired already, but not scheduled off yet */
if (p->array != rq->active) {
p->need_resched = 1;
return;
@@ -624,7 +695,7 @@ void expire_task(task_t *p)
/*
* The task cannot change CPUs because it's the current task.
*/
- spin_lock_irqsave(&rq->lock, flags);
+ spin_lock(&rq->lock);
if ((p->policy != SCHED_FIFO) && !--p->time_slice) {
p->need_resched = 1;
if (rt_task(p))
@@ -644,7 +715,7 @@ void expire_task(task_t *p)
* so this can not starve other processes accidentally.
* Otherwise this is pretty handy for sysadmins ...
*/
- if (p->prio <= MAX_RT_PRIO + MAX_USER_PRIO/6)
+ if (task_interactive(p, now))
enqueue_task(p, rq->active);
else
enqueue_task(p, rq->expired);
@@ -658,8 +729,9 @@ void expire_task(task_t *p)
activate_task(p, rq);
}
}
- load_balance(rq);
- spin_unlock_irqrestore(&rq->lock, flags);
+ if (!(now % BUSY_REBALANCE_TICK))
+ load_balance(rq, 0);
+ spin_unlock(&rq->lock);
}
void scheduling_functions_start_here(void) { }
@@ -695,7 +767,7 @@ need_resched_back:
}
pick_next_task:
if (unlikely(!rq->nr_running)) {
- load_balance(rq);
+ load_balance(rq, 1);
if (rq->nr_running)
goto pick_next_task;
next = rq->idle;
@@ -894,6 +966,8 @@ void set_cpus_allowed(task_t *p, unsigned long new_mask)
int target_cpu;
new_mask &= cpu_online_map;
+ if (!new_mask)
+ BUG();
p->cpus_allowed = new_mask;
/*
* Can the task run on the current CPU? If not then
@@ -1336,7 +1410,7 @@ static inline void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2)
if (rq1 == rq2)
spin_lock(&rq1->lock);
else {
- if (rq1->cpu < rq2->cpu) {
+ if (rq_cpu(rq1) < rq_cpu(rq2)) {
spin_lock(&rq1->lock);
spin_lock(&rq2->lock);
} else {
@@ -1394,7 +1468,6 @@ void __init sched_init(void)
rq->active = rq->arrays + 0;
rq->expired = rq->arrays + 1;
spin_lock_init(&rq->lock);
- rq->cpu = i;
for (j = 0; j < 2; j++) {
array = rq->arrays + j;
diff --git a/kernel/timer.c b/kernel/timer.c
index ce3945cda..da17ae4ac 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -585,17 +585,16 @@ void update_process_times(int user_tick)
update_one_process(p, user_tick, system, cpu);
if (p->pid) {
- expire_task(p);
if (p->__nice > 0)
kstat.per_cpu_nice[cpu] += user_tick;
else
kstat.per_cpu_user[cpu] += user_tick;
kstat.per_cpu_system[cpu] += system;
} else {
- idle_tick();
if (local_bh_count(cpu) || local_irq_count(cpu) > 1)
kstat.per_cpu_system[cpu] += system;
}
+ scheduler_tick(p);
}
/*
diff --git a/net/irda/ircomm/ircomm_core.c b/net/irda/ircomm/ircomm_core.c
index 6346d8248..7dccbc057 100644
--- a/net/irda/ircomm/ircomm_core.c
+++ b/net/irda/ircomm/ircomm_core.c
@@ -490,19 +490,35 @@ int ircomm_proc_read(char *buf, char **start, off_t offset, int len)
{
struct ircomm_cb *self;
unsigned long flags;
- int i=0;
save_flags(flags);
cli();
len = 0;
- len += sprintf(buf+len, "Instance %d:\n", i++);
-
self = (struct ircomm_cb *) hashbin_get_first(ircomm);
while (self != NULL) {
ASSERT(self->magic == IRCOMM_MAGIC, return len;);
+ if(self->line < 0x10)
+ len += sprintf(buf+len, "ircomm%d", self->line);
+ else
+ len += sprintf(buf+len, "irlpt%d", self->line - 0x10);
+ len += sprintf(buf+len, " state: %s, ",
+ ircomm_state[ self->state]);
+ len += sprintf(buf+len,
+ "slsap_sel: %#02x, dlsap_sel: %#02x, mode:",
+ self->slsap_sel, self->dlsap_sel);
+ if(self->service_type & IRCOMM_3_WIRE_RAW)
+ len += sprintf(buf+len, " 3-wire-raw");
+ if(self->service_type & IRCOMM_3_WIRE)
+ len += sprintf(buf+len, " 3-wire");
+ if(self->service_type & IRCOMM_9_WIRE)
+ len += sprintf(buf+len, " 9-wire");
+ if(self->service_type & IRCOMM_CENTRONICS)
+ len += sprintf(buf+len, " Centronics");
+ len += sprintf(buf+len, "\n");
+
self = (struct ircomm_cb *) hashbin_get_next(ircomm);
}
restore_flags(flags);
diff --git a/net/irda/ircomm/ircomm_lmp.c b/net/irda/ircomm/ircomm_lmp.c
index 5ddd3e1de..bbd8ac46f 100644
--- a/net/irda/ircomm/ircomm_lmp.c
+++ b/net/irda/ircomm/ircomm_lmp.c
@@ -103,12 +103,30 @@ int ircomm_lmp_connect_request(struct ircomm_cb *self,
*
*
*/
-int ircomm_lmp_connect_response(struct ircomm_cb *self, struct sk_buff *skb)
+int ircomm_lmp_connect_response(struct ircomm_cb *self, struct sk_buff *userdata)
{
+ struct sk_buff *skb;
int ret;
IRDA_DEBUG(0, __FUNCTION__"()\n");
+ /* Any userdata supplied? */
+ if (userdata == NULL) {
+ skb = dev_alloc_skb(64);
+ if (!skb)
+ return -ENOMEM;
+
+ /* Reserve space for MUX and LAP header */
+ skb_reserve(skb, LMP_MAX_HEADER);
+ } else {
+ skb = userdata;
+ /*
+ * Check that the client has reserved enough space for
+ * headers
+ */
+ ASSERT(skb_headroom(skb) >= LMP_MAX_HEADER, return -1;);
+ }
+
ret = irlmp_connect_response(self->lsap, skb);
return 0;
diff --git a/net/irda/irda_device.c b/net/irda/irda_device.c
index 133a5c30e..6c6c5c700 100644
--- a/net/irda/irda_device.c
+++ b/net/irda/irda_device.c
@@ -80,6 +80,7 @@ const char *infrared_mode[] = {
"TV_REMOTE",
};
+#ifdef CONFIG_IRDA_DEBUG
static const char *task_state[] = {
"IRDA_TASK_INIT",
"IRDA_TASK_DONE",
@@ -91,6 +92,7 @@ static const char *task_state[] = {
"IRDA_TASK_CHILD_WAIT",
"IRDA_TASK_CHILD_DONE",
};
+#endif /* CONFIG_IRDA_DEBUG */
static void irda_task_timer_expired(void *data);
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index 7909b30a0..035f07dfd 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -41,6 +41,7 @@
#include <net/irda/iriap_event.h>
#include <net/irda/iriap.h>
+#ifdef CONFIG_IRDA_DEBUG
/* FIXME: This one should go in irlmp.c */
static const char *ias_charset_types[] = {
"CS_ASCII",
@@ -55,6 +56,7 @@ static const char *ias_charset_types[] = {
"CS_ISO_8859_9",
"CS_UNICODE"
};
+#endif /* CONFIG_IRDA_DEBUG */
static hashbin_t *iriap = NULL;
static __u32 service_handle;
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c
index 18ca9dbda..7563dc8c7 100644
--- a/net/irda/irlan/irlan_common.c
+++ b/net/irda/irlan/irlan_common.c
@@ -317,8 +317,15 @@ void irlan_connect_indication(void *instance, void *sap, struct qos_info *qos,
del_timer(&self->watchdog_timer);
- irlan_do_provider_event(self, IRLAN_DATA_CONNECT_INDICATION, skb);
- irlan_do_client_event(self, IRLAN_DATA_CONNECT_INDICATION, skb);
+ /* If you want to pass the skb to *both* state machines, you will
+ * need to skb_clone() it, so that you don't free it twice.
+ * As the state machines don't need it, git rid of it here...
+ * Jean II */
+ if (skb)
+ dev_kfree_skb(skb);
+
+ irlan_do_provider_event(self, IRLAN_DATA_CONNECT_INDICATION, NULL);
+ irlan_do_client_event(self, IRLAN_DATA_CONNECT_INDICATION, NULL);
if (self->provider.access_type == ACCESS_PEER) {
/*
@@ -421,6 +428,13 @@ void irlan_disconnect_indication(void *instance, void *sap, LM_REASON reason,
break;
}
+ /* If you want to pass the skb to *both* state machines, you will
+ * need to skb_clone() it, so that you don't free it twice.
+ * As the state machines don't need it, git rid of it here...
+ * Jean II */
+ if (userdata)
+ dev_kfree_skb(userdata);
+
irlan_do_client_event(self, IRLAN_LMP_DISCONNECT, NULL);
irlan_do_provider_event(self, IRLAN_LMP_DISCONNECT, NULL);
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index 0ca4612cc..49e3151da 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -61,7 +61,16 @@ int irlan_eth_init(struct net_device *dev)
dev->hard_start_xmit = irlan_eth_xmit;
dev->get_stats = irlan_eth_get_stats;
dev->set_multicast_list = irlan_eth_set_multicast_list;
- dev->features |= NETIF_F_DYNALLOC;
+
+ /* NETIF_F_DYNALLOC feature was set by irlan_eth_init() and would
+ * cause the unregister_netdev() to do asynch completion _and_
+ * kfree self->dev afterwards. Which is really bad because the
+ * netdevice was not allocated separately but is embedded in
+ * our control block and therefore gets freed with *self.
+ * The only reason why this would have been enabled is to hide
+ * some netdev refcount issues. If unregister_netdev() blocks
+ * forever, tell us about it... */
+ //dev->features |= NETIF_F_DYNALLOC;
ether_setup(dev);
diff --git a/net/irda/irlap.c b/net/irda/irlap.c
index aeb76abfc..14a30cb6c 100644
--- a/net/irda/irlap.c
+++ b/net/irda/irlap.c
@@ -59,6 +59,7 @@ int sysctl_warn_noreply_time = 3;
extern void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb);
static void __irlap_close(struct irlap_cb *self);
+#ifdef CONFIG_IRDA_DEBUG
static char *lap_reasons[] = {
"ERROR, NOT USED",
"LAP_DISC_INDICATION",
@@ -69,6 +70,7 @@ static char *lap_reasons[] = {
"LAP_PRIMARY_CONFLICT",
"ERROR, NOT USED",
};
+#endif /* CONFIG_IRDA_DEBUG */
#ifdef CONFIG_PROC_FS
int irlap_proc_read(char *, char **, off_t, int);
diff --git a/net/irda/irlap_event.c b/net/irda/irlap_event.c
index 847eb1912..0ad31e381 100644
--- a/net/irda/irlap_event.c
+++ b/net/irda/irlap_event.c
@@ -77,6 +77,7 @@ static int irlap_state_sclose (struct irlap_cb *self, IRLAP_EVENT event,
static int irlap_state_reset_check(struct irlap_cb *, IRLAP_EVENT event,
struct sk_buff *, struct irlap_info *);
+#ifdef CONFIG_IRDA_DEBUG
static const char *irlap_event[] = {
"DISCOVERY_REQUEST",
"CONNECT_REQUEST",
@@ -117,6 +118,7 @@ static const char *irlap_event[] = {
"BACKOFF_TIMER_EXPIRED",
"MEDIA_BUSY_TIMER_EXPIRED",
};
+#endif /* CONFIG_IRDA_DEBUG */
const char *irlap_state[] = {
"LAP_NDM",
@@ -312,7 +314,6 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event,
{
discovery_t *discovery_rsp;
int ret = 0;
- int i;
ASSERT(self != NULL, return -1;);
ASSERT(self->magic == LAP_MAGIC, return -1;);
@@ -478,6 +479,8 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event,
break;
#ifdef CONFIG_IRDA_ULTRA
case SEND_UI_FRAME:
+ {
+ int i;
/* Only allowed to repeat an operation twice */
for (i=0; ((i<2) && (self->media_busy == FALSE)); i++) {
skb = skb_dequeue(&self->txq_ultra);
@@ -492,6 +495,7 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event,
irda_device_set_media_busy(self->netdev, TRUE);
}
break;
+ }
case RECV_UI_FRAME:
/* Only accept broadcast frames in NDM mode */
if (info->caddr != CBROADCAST) {
diff --git a/net/irda/irlmp_event.c b/net/irda/irlmp_event.c
index 3692a913d..d3ff221de 100644
--- a/net/irda/irlmp_event.c
+++ b/net/irda/irlmp_event.c
@@ -49,6 +49,7 @@ const char *irlsap_state[] = {
"LSAP_SETUP_PEND",
};
+#ifdef CONFIG_IRDA_DEBUG
static const char *irlmp_event[] = {
"LM_CONNECT_REQUEST",
"LM_CONNECT_CONFIRM",
@@ -75,6 +76,7 @@ static const char *irlmp_event[] = {
"LM_LAP_DISCOVERY_CONFIRM",
"LM_LAP_IDLE_TIMEOUT",
};
+#endif /* CONFIG_IRDA_DEBUG */
/* LAP Connection control proto declarations */
static void irlmp_state_standby (struct lap_cb *, IRLMP_EVENT,
diff --git a/net/irda/irsyms.c b/net/irda/irsyms.c
index 04f4f996a..59385ccd5 100644
--- a/net/irda/irsyms.c
+++ b/net/irda/irsyms.c
@@ -197,7 +197,7 @@ int __init irda_init(void)
return 0;
}
-static void __exit irda_cleanup(void)
+void __exit irda_cleanup(void)
{
#ifdef CONFIG_SYSCTL
irda_sysctl_unregister();