summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Gortmaker <paul.gortmaker@windriver.com>2013-05-07 19:07:28 -0400
committerPaul Gortmaker <paul.gortmaker@windriver.com>2013-05-07 19:18:49 -0400
commitc541e4702344a2245dda25ce7f709e7c14c6800f (patch)
tree635d67466da5fcf1d3ffb9b05fb795837327ed34
parent046cbd132f63aa77f1e4e6338798eb2014e43705 (diff)
download3.8-rt-patches-c541e4702344a2245dda25ce7f709e7c14c6800f.tar.gz
patches-3.8.9-rt4.tar.xz
md5sum: a1643611c54f993a2201d4a68d614f8e patches-3.8.9-rt4.tar.xz Announce: ----------------------- Dear RT Folks, I'm pleased to announce the 3.8.9-rt4 release. changes since v3.8.9-rt4: - the quilt queue can be imported into git without hiccup - a giant/ huge/ big pile of cpsw patches has been added. If you have an ARM AM33xx (beagle bone for instance) then your network should work. I merged driver related changes DaveM had in his net & net-next tree. - i915 with tracing should not try to grab a spinlock in a preempt-disabled region. Reported by Joakim Hernberg - PPC64 forgot to check the preempt counter in ret_from_except_lite(). Patch sent by Priyanka Jain - __schedule_bug() had a typo in a ifdef and as consequence additional debug output was not printed. Patch sent by Qiang Huang - builds now on ARM/imx. Reported by Arpit Goel. - mce wakeup defered from a workqueue to a kthread. Steven Rostedt found this and sent a patch. - a networking fix for a warning in inet_sk_rx_dst_set(). Caused by a route flush in the right moment. Reported by Mike Galbraith, patch by Eric Dumazet. - the swap_lock has been renamed because it was not unique and caused trouble with weak-per-cpu defines. Reported by Mike Galbraith patch by Steven Rostedt. - preempt_disable_nort() and friends use now barrier() instead of do {} while 0 in the unused case. Clashed with stable commit ("spinlocks and preemption points need to be at least compiler barriers"). Known issues: - SLxB is broken on PowerPC. - suspend / resume seems to program program the timer wrong and wait ages until it continues. The delta patch against v3.8.9-rt3 without drivers/net/../ti/ is appended below and the complete one can be found here: https://www.kernel.org/pub/linux/kernel/projects/rt/3.8/incr/patch-3.8.9-rt3-rt4.patch.xz The RT patch against 3.8.9 can be found here: https://www.kernel.org/pub/linux/kernel/projects/rt/3.8/patch-3.8.9-rt4.patch.xz The split quilt queue is available at: https://www.kernel.org/pub/linux/kernel/projects/rt/3.8/patches-3.8.9-rt4.tar.xz Sebastian ----------------------- http://marc.info/?l=linux-kernel&m=136701481918921&w=2 Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
-rw-r--r--patches/arch-use-pagefault-disabled.patch2
-rw-r--r--patches/completion-use-simple-wait-queues.patch12
-rw-r--r--patches/cond-resched-lock-rt-tweak.patch2
-rw-r--r--patches/cond-resched-softirq-rt.patch6
-rw-r--r--patches/cpsw-collected_cpsw_patches.patch3291
-rw-r--r--patches/cpsw-fix-missplaced-init-chunk.patch29
-rw-r--r--patches/cpsw-net-cpsw-Use-fallback-for-active_slave.patch35
-rw-r--r--patches/cpsw-net-cpsw-use-a-lock-around-source-testing.patch76
-rw-r--r--patches/cpsw-revert-stable-patches.patch42
-rw-r--r--patches/cpu-rt-rework-cpu-down.patch10
-rw-r--r--patches/drm-i915-move-i915_trace_irq_get-out-of-the-tracing-.patch47
-rw-r--r--patches/fs-fscache-remove-spin_lock-from-the-condition-in-wh.patch49
-rw-r--r--patches/fscache_compile_fix.patch34
-rw-r--r--patches/ftrace-crap.patch92
-rw-r--r--patches/ftrace-migrate-disable-tracing.patch4
-rw-r--r--patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch38
-rw-r--r--patches/hrtimer-raise-softirq-if-hrtimer-irq-stalled.patch4
-rw-r--r--patches/hrtimers-prepare-full-preemption.patch8
-rw-r--r--patches/i915_compile_fix.patch7
-rw-r--r--patches/idle-state.patch2
-rw-r--r--patches/ipc-make-rt-aware.patch2
-rw-r--r--patches/ipc-mqueue-add-a-critical-section-to-avoid-a-deadlock.patch2
-rw-r--r--patches/latency-hist.patch8
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/md-raid5-percpu-handling-rt-aware.patch8
-rw-r--r--patches/might-sleep-check-for-idle.patch2
-rw-r--r--patches/migrate-disable-rt-variant.patch14
-rw-r--r--patches/mm-prepare-pf-disable-discoupling.patch4
-rw-r--r--patches/mm-remove-preempt-count-from-pf.patch4
-rw-r--r--patches/mm-rt-kmap-atomic-scheduling.patch6
-rw-r--r--patches/mm-shrink-the-page-frame-to-rt-size.patch2
-rw-r--r--patches/net-make-devnet_rename_seq-a-mutex.patch4
-rw-r--r--patches/net-netif-rx-ni-use-local-bh-disable.patch2
-rw-r--r--patches/net-netif_rx_ni-migrate-disable.patch2
-rw-r--r--patches/net-tx-action-avoid-livelock-on-rt.patch4
-rw-r--r--patches/oleg-signal-rt-fix.patch2
-rw-r--r--patches/percpu-rwsem-compilefix.patch8
-rw-r--r--patches/peter_zijlstra-frob-migrate_disable-2.patch36
-rw-r--r--patches/peter_zijlstra-frob-migrate_disable.patch8
-rw-r--r--patches/peter_zijlstra-frob-pagefault_disable.patch6
-rw-r--r--patches/peterz-raw_pagefault_disable.patch4
-rw-r--r--patches/posix-timers-thread-posix-cpu-timers-on-rt.patch2
-rw-r--r--patches/powerpc-64bit-PREEMPT_RT-Check-preempt_count-before-.patch30
-rw-r--r--patches/preempt-lazy-support.patch24
-rw-r--r--patches/preempt-nort-rt-variants.patch10
-rw-r--r--patches/rcu-force-preempt-rcu-for-rt.patch24
-rw-r--r--patches/revert-80d5c3689b886308247da295a228a54df49a44f6.patch28
-rw-r--r--patches/revert-f646968f8f7c624587de729115d802372b9063dd.patch28
-rw-r--r--patches/revert-f9a8f83b04e0c362a2fc660dbad980d24af209fc.patch45
-rw-r--r--patches/rt-sched-do-not-compare-cpu-masks-in-scheduler.patch2
-rw-r--r--patches/rt-sched-have-migrate_disable-ignore-bounded-threads.patch6
-rw-r--r--patches/rt-sched-postpone-actual-migration-disalbe-to-schedule.patch8
-rw-r--r--patches/sched-adjust-reset-on-fork-always.patch2
-rw-r--r--patches/sched-better-debug-output-for-might-sleep.patch8
-rw-r--r--patches/sched-cond-resched.patch2
-rw-r--r--patches/sched-consider-pi-boosting-in-setscheduler.patch16
-rw-r--r--patches/sched-delay-put-task.patch6
-rw-r--r--patches/sched-enqueue-to-head.patch2
-rw-r--r--patches/sched-fix-the-wrong-macro-name-of-CONFIG_DEBUG_PREEM.patch33
-rw-r--r--patches/sched-might-sleep-do-not-account-rcu-depth.patch2
-rw-r--r--patches/sched-migrate-disable.patch18
-rw-r--r--patches/sched-mmdrop-delayed.patch10
-rw-r--r--patches/sched-rt-fix-migrate_enable-thinko.patch2
-rw-r--r--patches/sched-rt-mutex-wakeup.patch8
-rw-r--r--patches/sched-teach-migrate_disable-about-atomic-contexts.patch6
-rw-r--r--patches/series25
-rw-r--r--patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch2
-rw-r--r--patches/skbufhead-raw-lock.patch10
-rw-r--r--patches/slub_delay_ctor_on_rt.patch4
-rw-r--r--patches/softirq-fix-unplug-deadlock.patch68
-rw-r--r--patches/softirq-local-lock.patch2
-rw-r--r--patches/softirq-make-serving-softirqs-a-task-flag.patch2
-rw-r--r--patches/softirq-preempt-fix-3-re.patch4
-rw-r--r--patches/softirq-split-locks.patch2
-rw-r--r--patches/softirq-thread-do-softirq.patch2
-rw-r--r--patches/spinlock-include-cache.h.patch24
-rw-r--r--patches/stomp-machine-mark-stomper-thread.patch2
-rw-r--r--patches/swap-Use-unique-local-lock-name-for-swap_lock.patch101
-rw-r--r--patches/tcp-force-a-dst-refcount-when-prequeue-packet.patch25
-rw-r--r--patches/tracing-account-for-preempt-off-in-preempt_schedule.patch2
-rw-r--r--patches/treercu-use-simple-waitqueue.patch6
-rw-r--r--patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch4
-rw-r--r--patches/user-use-local-irq-nort.patch2
-rw-r--r--patches/x86-kvm-require-const-tsc-for-rt.patch2
-rw-r--r--patches/x86-mce-Defer-mce-wakeups-to-threads-for-PREEMPT_RT.patch168
-rw-r--r--patches/x86-perf-uncore-deal-with-kfree.patch2
86 files changed, 4475 insertions, 226 deletions
diff --git a/patches/arch-use-pagefault-disabled.patch b/patches/arch-use-pagefault-disabled.patch
index 961cc0c..86efa87 100644
--- a/patches/arch-use-pagefault-disabled.patch
+++ b/patches/arch-use-pagefault-disabled.patch
@@ -264,7 +264,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
retry:
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
-@@ -1108,7 +1108,7 @@ __do_page_fault(struct pt_regs *regs, un
+@@ -1110,7 +1110,7 @@ __do_page_fault(struct pt_regs *regs, un
* If we're in an interrupt, have no user context or are running
* in an atomic region then we must not take the fault:
*/
diff --git a/patches/completion-use-simple-wait-queues.patch b/patches/completion-use-simple-wait-queues.patch
index f7d9d01..a9d6c3a 100644
--- a/patches/completion-use-simple-wait-queues.patch
+++ b/patches/completion-use-simple-wait-queues.patch
@@ -59,7 +59,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct mm_struct;
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3487,10 +3487,10 @@ void complete(struct completion *x)
+@@ -3489,10 +3489,10 @@ void complete(struct completion *x)
{
unsigned long flags;
@@ -73,7 +73,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(complete);
-@@ -3507,10 +3507,10 @@ void complete_all(struct completion *x)
+@@ -3509,10 +3509,10 @@ void complete_all(struct completion *x)
{
unsigned long flags;
@@ -87,7 +87,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(complete_all);
-@@ -3518,20 +3518,20 @@ static inline long __sched
+@@ -3520,20 +3520,20 @@ static inline long __sched
do_wait_for_common(struct completion *x, long timeout, int state)
{
if (!x->done) {
@@ -113,7 +113,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (!x->done)
return timeout;
}
-@@ -3544,9 +3544,9 @@ wait_for_common(struct completion *x, lo
+@@ -3546,9 +3546,9 @@ wait_for_common(struct completion *x, lo
{
might_sleep();
@@ -125,7 +125,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return timeout;
}
-@@ -3677,12 +3677,12 @@ bool try_wait_for_completion(struct comp
+@@ -3679,12 +3679,12 @@ bool try_wait_for_completion(struct comp
unsigned long flags;
int ret = 1;
@@ -140,7 +140,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ret;
}
EXPORT_SYMBOL(try_wait_for_completion);
-@@ -3700,10 +3700,10 @@ bool completion_done(struct completion *
+@@ -3702,10 +3702,10 @@ bool completion_done(struct completion *
unsigned long flags;
int ret = 1;
diff --git a/patches/cond-resched-lock-rt-tweak.patch b/patches/cond-resched-lock-rt-tweak.patch
index 2dff484..3e7164c 100644
--- a/patches/cond-resched-lock-rt-tweak.patch
+++ b/patches/cond-resched-lock-rt-tweak.patch
@@ -9,7 +9,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -2719,7 +2719,7 @@ extern int _cond_resched(void);
+@@ -2720,7 +2720,7 @@ extern int _cond_resched(void);
extern int __cond_resched_lock(spinlock_t *lock);
diff --git a/patches/cond-resched-softirq-rt.patch b/patches/cond-resched-softirq-rt.patch
index dd146ee..b30f4d5 100644
--- a/patches/cond-resched-softirq-rt.patch
+++ b/patches/cond-resched-softirq-rt.patch
@@ -10,7 +10,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -2730,12 +2730,16 @@ extern int __cond_resched_lock(spinlock_
+@@ -2731,12 +2731,16 @@ extern int __cond_resched_lock(spinlock_
__cond_resched_lock(lock); \
})
@@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* Does a critical section need to be broken due to another
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -4366,6 +4366,7 @@ int __cond_resched_lock(spinlock_t *lock
+@@ -4368,6 +4368,7 @@ int __cond_resched_lock(spinlock_t *lock
}
EXPORT_SYMBOL(__cond_resched_lock);
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int __sched __cond_resched_softirq(void)
{
BUG_ON(!in_softirq());
-@@ -4379,6 +4380,7 @@ int __sched __cond_resched_softirq(void)
+@@ -4381,6 +4382,7 @@ int __sched __cond_resched_softirq(void)
return 0;
}
EXPORT_SYMBOL(__cond_resched_softirq);
diff --git a/patches/cpsw-collected_cpsw_patches.patch b/patches/cpsw-collected_cpsw_patches.patch
new file mode 100644
index 0000000..26c5d99
--- /dev/null
+++ b/patches/cpsw-collected_cpsw_patches.patch
@@ -0,0 +1,3291 @@
+From: Sebastian Siewior <bigeasy@linutronix.de>
+Subject: net/cpsw: giant cpsw patch
+
+This patch contains:
+ git diff v3.8..net-merge drivers/net/ethernet/ti arch/arm/boot/dts/am33* include/linux/platform_data/cpsw.h
+where the net-merge branch contains the following branches merged:
+net-next as of 37fe066 net: fix address check in rtnl_fdb_del
+net as of b9e48de isdn/sc: Fix incorrect module_param_array types
+linus as of 697dfd8 Merge tag 'efi-urgent' into x86/urgent
+
+including the following patches:
+
+|commit 15c6ff3bc0ff3464a8c7efcdea09c86454571622
+|Author: Jiri Pirko <jiri@resnulli.us>
+|Date: Tue Jan 1 03:30:17 2013 +0000
+|
+| net: remove unnecessary NET_ADDR_RANDOM "bitclean"
+|
+| NET_ADDR_SET is set in dev_set_mac_address() no need to alter
+| dev->addr_assign_type value in drivers.
+|
+| Signed-off-by: Jiri Pirko <jiri@resnulli.us>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit 7826d43f2db45c9305a6e0ba165650e1a203f517
+|Author: Jiri Pirko <jiri@resnulli.us>
+|Date: Sun Jan 6 00:44:26 2013 +0000
+|
+| ethtool: fix drvinfo strings set in drivers
+|
+| Use strlcpy where possible to ensure the string is \0 terminated.
+| Use always sizeof(string) instead of 32, ETHTOOL_BUSINFO_LEN
+| and custom defines.
+| Use snprintf instead of sprint.
+| Remove unnecessary inits of ->fw_version
+| Remove unnecessary inits of drvinfo struct.
+|
+| Signed-off-by: Jiri Pirko <jiri@resnulli.us>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit 8ff25eebb8161d08ddc48539e6b4afa0b18d778f
+|Author: Kees Cook <keescook@chromium.org>
+|Date: Tue Oct 2 11:18:24 2012 -0700
+|
+| drivers/net/ethernet/ti: remove depends on CONFIG_EXPERIMENTAL
+|
+| The CONFIG_EXPERIMENTAL config item has not carried much meaning for a
+| while now and is almost always enabled by default. As agreed during the
+| Linux kernel summit, remove it from any "depends on" lines in Kconfigs.
+|
+| CC: Tony Lindgren <tony@atomide.com>
+| CC: Mugunthan V N <mugunthanvnm@ti.com>
+| CC: Kevin Hilman <khilman@ti.com>
+| CC: "David S. Miller" <davem@davemloft.net>
+| CC: Cyril Chemparathy <cyril@ti.com>
+| Signed-off-by: Kees Cook <keescook@chromium.org>
+| Acked-by: David S. Miller <davem@davemloft.net>
+|
+|commit f9a8f83b04e0c362a2fc660dbad980d24af209fc
+|Author: Florian Fainelli <florian@openwrt.org>
+|Date: Mon Jan 14 00:52:52 2013 +0000
+|
+| net: phy: remove flags argument from phy_{attach, connect, connect_direct}
+|
+| The flags argument of the phy_{attach,connect,connect_direct} functions
+| is then used to assign a struct phy_device dev_flags with its value.
+| All callers but the tg3 driver pass the flag 0, which results in the
+| underlying PHY drivers in drivers/net/phy/ not being able to actually
+| use any of the flags they would set in dev_flags. This patch gets rid of
+| the flags argument, and passes phydev->dev_flags to the internal PHY
+| library call phy_attach_direct() such that drivers which actually modify
+| a phy device dev_flags get the value preserved for use by the underlying
+| phy driver.
+|
+| Acked-by: Kosta Zertsekel <konszert@marvell.com>
+| Signed-off-by: Florian Fainelli <florian@openwrt.org>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit fae50823d0ee579e006a7ba2b20880e354388b25
+|Author: Mugunthan V N <mugunthanvnm@ti.com>
+|Date: Thu Jan 17 06:31:34 2013 +0000
+|
+| net: ethernet: davinci_cpdma: Add boundary for rx and tx descriptors
+|
+| When there is heavy transmission traffic in the CPDMA, then Rx descriptors
+| memory is also utilized as tx desc memory looses all rx descriptors and the
+| driver stops working then.
+|
+| This patch adds boundary for tx and rx descriptors in bd ram dividing the
+| descriptor memory to ensure that during heavy transmission tx doesn't use
+| rx descriptors.
+|
+| This patch is already applied to davinci_emac driver, since CPSW and
+| davici_dmac shares the same CPDMA, moving the boundry seperation from
+| Davinci EMAC driver to CPDMA driver which was done in the following
+| commit
+|
+| commit 86d8c07ff2448eb4e860e50f34ef6ee78e45c40c
+| Author: Sascha Hauer <s.hauer@pengutronix.de>
+| Date: Tue Jan 3 05:27:47 2012 +0000
+|
+| net/davinci: do not use all descriptors for tx packets
+|
+| The driver uses a shared pool for both rx and tx descriptors.
+| During open it queues fixed number of 128 descriptors for receive
+| packets. For each received packet it tries to queue another
+| descriptor. If this fails the descriptor is lost for rx.
+| The driver has no limitation on tx descriptors to use, so it
+| can happen during a nmap / ping -f attack that the driver
+| allocates all descriptors for tx and looses all rx descriptors.
+| The driver stops working then.
+| To fix this limit the number of tx descriptors used to half of
+| the descriptors available, the rx path uses the other half.
+|
+| Tested on a custom board using nmap / ping -f to the board from
+| two different hosts.
+|
+| Signed-off-by: Mugunthan V N <mugunthanvnm@ti.com>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit 7373470202a3c98e1a338bf1acf51247cd100868
+|Author: Thierry Reding <thierry.reding@avionic-design.de>
+|Date: Mon Jan 21 10:38:39 2013 +0100
+|
+| net: ethernet: davinci: Fix build breakage
+|
+| The correct name of the transmit DMA channel field in struct emac_priv
+| is txchan, not txch.
+|
+| Signed-off-by: Thierry Reding <thierry.reding@avionic-design.de>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit b2adaca92c63b9bb8beb021d554f656e387a7648
+|Author: Joe Perches <joe@perches.com>
+|Date: Sun Feb 3 17:43:58 2013 +0000
+|
+| ethernet: Remove unnecessary alloc/OOM messages, alloc cleanups
+|
+| alloc failures already get standardized OOM
+| messages and a dump_stack.
+|
+| Convert kzalloc's with multiplies to kcalloc.
+| Convert kmalloc's with multiplies to kmalloc_array.
+| Fix a few whitespace defects.
+| Convert a constant 6 to ETH_ALEN.
+| Use parentheses around sizeof.
+| Convert vmalloc/memset to vzalloc.
+| Remove now unused size variables.
+|
+| Signed-off-by: Joe Perches <joe@perches.com>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit e11b220f336c654db876027d40953acef90b0cae
+|Author: Mugunthan V N <mugunthanvnm@ti.com>
+|Date: Tue Feb 5 08:26:47 2013 +0000
+|
+| drivers: net: cpsw: Add helper functions for VLAN ALE implementation
+|
+| Add helper functions for VLAN ALE implementations for Add, Delete
+| Dump VLAN related ALE entries
+|
+| Signed-off-by: Mugunthan V N <mugunthanvnm@ti.com>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit 3b72c2fe0c6bbec42ed7f899931daef227b80322
+|Author: Mugunthan V N <mugunthanvnm@ti.com>
+|Date: Tue Feb 5 08:26:48 2013 +0000
+|
+| drivers: net:ethernet: cpsw: add support for VLAN
+|
+| adding support for VLAN interface for cpsw.
+|
+| CPSW VLAN Capability
+| * Can filter VLAN packets in Hardware
+|
+| Signed-off-by: Mugunthan V N <mugunthanvnm@ti.com>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit f6575c90f6fc637697f130ea4a05892296c9a473
+|Author: Vaibhav Bedia <vaibhav.bedia@ti.com>
+|Date: Tue Jan 29 16:45:07 2013 +0530
+|
+| ARM: DTS: AM33XX: Add nodes for OCMC RAM and WKUP-M3
+|
+| Since AM33XX supports only DT-boot, this is needed
+| for the appropriate device nodes to be created.
+|
+| Note: OCMC RAM is part of the PER power domain and supports
+| retention. The assembly code for low power entry/exit will
+| run from OCMC RAM. To ensure that the OMAP PM code does not
+| attempt to disable the clock to OCMC RAM as part of the
+| suspend process add the no_idle_on_suspend flag.
+|
+| Signed-off-by: Vaibhav Bedia <vaibhav.bedia@ti.com>
+| Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
+| Acked-by: Peter Korsgaard <jacmet@sunsite.dk>
+| Signed-off-by: Paul Walmsley <paul@pwsan.com>
+|
+|commit f6e135c81eeb648c6addc6aeff2ee80f28ea413b
+|Author: Mugunthan V N <mugunthanvnm@ti.com>
+|Date: Mon Feb 11 09:52:18 2013 +0000
+|
+| driver: net: ethernet: davinci_cpdma: add support for directed packet and source port detection
+|
+| * Introduced parameter to add port number for directed packet in cpdma_chan_submit
+| * Source port detection macro with DMA descriptor status
+|
+| Signed-off-by: Mugunthan V N <mugunthanvnm@ti.com>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit 9232b16df2167c8afcb89de39ee85f5091ebacff
+|Author: Mugunthan V N <mugunthanvnm@ti.com>
+|Date: Mon Feb 11 09:52:19 2013 +0000
+|
+| driver: net: ethernet: cpsw: make cpts as pointer
+|
+| As CPTS is common module for both EMAC in Dual EMAC mode so making cpts as
+| pointer.
+|
+| Signed-off-by: Mugunthan V N <mugunthanvnm@ti.com>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit d9ba8f9e6298af71ec1c1fd3d88c3ef68abd0ec3
+|Author: Mugunthan V N <mugunthanvnm@ti.com>
+|Date: Mon Feb 11 09:52:20 2013 +0000
+|
+| driver: net: ethernet: cpsw: dual emac interface implementation
+|
+| The CPSW switch can act as Dual EMAC by segregating the switch ports
+| using VLAN and port VLAN as per the TRM description in
+| 14.3.2.10.2 Dual Mac Mode
+|
+| Following CPSW components will be common for both the interfaces.
+| * Interrupt source is common for both eth interfaces
+| * Interrupt pacing is common for both interfaces
+| * Hardware statistics is common for all the ports
+| * CPDMA is common for both eth interface
+| * CPTS is common for both the interface and it should not be enabled on
+| both the interface as timestamping information doesn't contain port
+| information.
+|
+| Constrains
+| * Reserved VID of One port should not be used in other interface which will
+| enable switching functionality
+| * Same VID must not be used in both the interface which will enable switching
+| functionality
+|
+| Signed-off-by: Mugunthan V N <mugunthanvnm@ti.com>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit 79876e0394aa46e74267a5871c4f4469544dcacf
+|Author: Cyril Roelandt <tipecaml@gmail.com>
+|Date: Tue Feb 12 12:52:30 2013 +0000
+|
+| net: ethernet: ti: remove redundant NULL check.
+|
+| cpdma_chan_destroy() on a NULL pointer is a no-op, so the NULL check in
+| cpdma_ctlr_destroy() can safely be removed.
+|
+| Signed-off-by: Cyril Roelandt <tipecaml@gmail.com>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit 6929e24e4cc46ce8d5b7dd8f8bdf4244c8d77f76
+|Author: Arnd Bergmann <arnd@arndb.de>
+|Date: Thu Feb 14 17:53:01 2013 +0100
+|
+| net: cwdavinci_cpdma: export symbols for cpsw
+|
+| With the support for ARM AM33xx in multiplatform kernels
+| in 3.9, an older bug appears in ARM allmodconfig:
+| When the cpsw driver is built as a module with cpdma
+| support enabled, it uses symbols that the cpdma driver
+| does not export.
+|
+| Without this patch, building allmodconfig results in:
+|
+| ERROR: "cpdma_ctlr_int_ctrl" [drivers/net/ethernet/ti/ti_cpsw.ko] undefined!
+| ERROR: "cpdma_control_set" [drivers/net/ethernet/ti/ti_cpsw.ko] undefined!
+| ERROR: "cpdma_ctlr_eoi" [drivers/net/ethernet/ti/ti_cpsw.ko] undefined!
+|
+| Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+| Acked-by: David S. Miller <davem@davemloft.net>
+| Cc: Mugunthan V N <mugunthanvnm@ti.com>
+| Cc: Vaibhav Hiremath <hvaibhav@ti.com>
+| Cc: Richard Cochran <richardcochran@gmail.com>
+| Cc: netdev@vger.kernel.org
+|
+|commit 510a1e7249298f6bbd049e1ec98041ddf5ef6452
+|Author: Mugunthan V N <mugunthanvnm@ti.com>
+|Date: Sun Feb 17 22:19:20 2013 +0000
+|
+| drivers: net: davinci_cpdma: acknowledge interrupt properly
+|
+| CPDMA interrupts are not properly acknowledged which leads to interrupt
+| storm, only cpdma interrupt 0 is acknowledged in Davinci CPDMA driver.
+| Changed cpdma_ctlr_eoi api to acknowledge 1 and 2 interrupts which are
+| used for rx and tx respectively.
+|
+| Reported-by: Pantelis Antoniou <panto@antoniou-consulting.com>
+| Signed-off-by: Mugunthan V N <mugunthanvnm@ti.com>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit 06991c28f37ad68e5c03777f5c3b679b56e3dac1
+|Merge: 460dc1e 74fef7a
+|Author: Linus Torvalds <torvalds@linux-foundation.org>
+|Date: Thu Feb 21 12:05:51 2013 -0800
+|
+| Merge tag 'driver-core-3.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core
+|
+| Pull driver core patches from Greg Kroah-Hartman:
+| "Here is the big driver core merge for 3.9-rc1
+|
+| There are two major series here, both of which touch lots of drivers
+| all over the kernel, and will cause you some merge conflicts:
+|
+| - add a new function called devm_ioremap_resource() to properly be
+| able to check return values.
+|
+| - remove CONFIG_EXPERIMENTAL
+|
+| Other than those patches, there's not much here, some minor fixes and
+| updates"
+|
+| Fix up trivial conflicts
+|
+| * tag 'driver-core-3.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core: (221 commits)
+| base: memory: fix soft/hard_offline_page permissions
+| drivercore: Fix ordering between deferred_probe and exiting initcalls
+| backlight: fix class_find_device() arguments
+| TTY: mark tty_get_device call with the proper const values
+| driver-core: constify data for class_find_device()
+| firmware: Ignore abort check when no user-helper is used
+| firmware: Reduce ifdef CONFIG_FW_LOADER_USER_HELPER
+| firmware: Make user-mode helper optional
+| firmware: Refactoring for splitting user-mode helper code
+| Driver core: treat unregistered bus_types as having no devices
+| watchdog: Convert to devm_ioremap_resource()
+| thermal: Convert to devm_ioremap_resource()
+| spi: Convert to devm_ioremap_resource()
+| power: Convert to devm_ioremap_resource()
+| mtd: Convert to devm_ioremap_resource()
+| mmc: Convert to devm_ioremap_resource()
+| mfd: Convert to devm_ioremap_resource()
+| media: Convert to devm_ioremap_resource()
+| iommu: Convert to devm_ioremap_resource()
+| drm: Convert to devm_ioremap_resource()
+| ...
+|
+|commit 3298a3511f1e73255a8dc023efd909e569eea037
+|Merge: 5ce7aba acb7452
+|Author: Linus Torvalds <torvalds@linux-foundation.org>
+|Date: Thu Feb 21 15:20:41 2013 -0800
+|
+| Merge tag 'multiplatform' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
+|
+| Pull ARM SoC multiplatform support from Arnd Bergmann:
+| "Converting more ARM platforms to multiplatform support. This time,
+| OMAP gets converted, which is a major step since this is by far the
+| largest platform in terms of code size. The same thing happens to the
+| vt8500 platform."
+|
+| * tag 'multiplatform' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc:
+| net: cwdavinci_cpdma: export symbols for cpsw
+| remoteproc: omap: depend on OMAP_MBOX_FWK
+| [media] davinci: do not include mach/hardware.h
+| ARM: OMAP2+: Make sure files with omap initcalls include soc.h
+| ARM: OMAP2+: Include soc.h to drm.c to fix compiling
+| ARM: OMAP2+: Fix warning for hwspinlock omap_postcore_initcall
+| ARM: multi_v7_defconfig: add ARCH_ZYNQ
+| ARM: multi_v7_defconfig: remove unnecessary CONFIG_GPIOLIB
+| arm: vt8500: Remove remaining mach includes
+| arm: vt8500: Convert debug-macro.S to be multiplatform friendly
+| arm: vt8500: Remove single platform Kconfig options
+| ARM: OMAP2+: Remove now obsolete uncompress.h and debug-macro.S
+| ARM: OMAP2+: Add minimal support for booting vexpress
+| ARM: OMAP2+: Enable ARCH_MULTIPLATFORM support
+| ARM: OMAP2+: Disable code that currently does not work with multiplaform
+| ARM: OMAP2+: Add multiplatform debug_ll support
+| ARM: OMAP: Fix dmaengine init for multiplatform
+| ARM: OMAP: Fix i2c cmdline initcall for multiplatform
+| ARM: OMAP2+: Use omap initcalls
+| ARM: OMAP2+: Limit omap initcalls to omap only on multiplatform kernels
+|
+|commit 0237c11044b3670adcbe80cd6dd721285347f497
+|Author: Daniel Mack <zonque@gmail.com>
+|Date: Tue Feb 26 04:06:20 2013 +0000
+|
+| drivers: net: ethernet: cpsw: consider number of slaves in interation
+|
+| Make cpsw_add_default_vlan() look at the actual number of slaves for its
+| iteration, so boards with less than 2 slaves don't ooops at boot.
+|
+| Signed-off-by: Daniel Mack <zonque@gmail.com>
+| Cc: Mugunthan V N <mugunthanvnm@ti.com>
+| Cc: David S. Miller <davem@davemloft.net>
+| Acked-by: Mugunthan V N <mugunthanvnm@ti.com>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit 7307c00f335a4e986586b12334696098d2fc2bcd
+|Merge: f8f466c 55ccb1a
+|Author: Linus Torvalds <torvalds@linux-foundation.org>
+|Date: Thu Feb 28 20:00:40 2013 -0800
+|
+| Merge tag 'late-omap' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
+|
+| Pull ARM SoC late OMAP changes from Olof Johansson:
+| "This branch contains changes for OMAP that came in late during the
+| release staging, close to when the merge window opened.
+|
+| It contains, among other things:
+|
+| - OMAP PM fixes and some patches for audio device integration
+| - OMAP clock fixes related to common clock conversion
+| - A set of patches cleaning up WFI entry and blocking.
+| - A set of fixes and IP block support for PM on TI AM33xx SoCs
+| (Beaglebone, etc)
+| - A set of smaller fixes and cleanups around AM33xx restart and
+| revision detection, as well as removal of some dead code
+| (CONFIG_32K_TIMER_HZ)"
+|
+| * tag 'late-omap' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc: (34 commits)
+| ARM: omap2: include linux/errno.h in hwmod_reset
+| ARM: OMAP2+: fix some omap_device_build() calls that aren't compiled by default
+| ARM: OMAP4: hwmod data: Enable AESS hwmod device
+| ARM: OMAP4: hwmod data: Update AESS data with memory bank area
+| ARM: OMAP4+: AESS: enable internal auto-gating during initial setup
+| ASoC: TI AESS: add autogating-enable function, callable from architecture code
+| ARM: OMAP2+: hwmod: add enable_preprogram hook
+| ARM: OMAP4: clock data: Add missing clkdm association for dpll_usb
+| ARM: OMAP2+: PM: Fix the dt return condition in pm_late_init()
+| ARM: OMAP2: am33xx-hwmod: Fix "register offset NULL check" bug
+| ARM: OMAP2+: AM33xx: hwmod: add missing HWMOD_NO_IDLEST flags
+| ARM: OMAP: AM33xx hwmod: Add parent-child relationship for PWM subsystem
+| ARM: OMAP: AM33xx hwmod: Corrects PWM subsystem HWMOD entries
+| ARM: DTS: AM33XX: Add nodes for OCMC RAM and WKUP-M3
+| ARM: OMAP2+: AM33XX: Update the hardreset API
+| ARM: OMAP2+: AM33XX: hwmod: Update the WKUP-M3 hwmod with reset status bit
+| ARM: OMAP2+: AM33XX: hwmod: Fixup cpgmac0 hwmod entry
+| ARM: OMAP2+: AM33XX: hwmod: Update TPTC0 hwmod with the right flags
+| ARM: OMAP2+: AM33XX: hwmod: Register OCMC RAM hwmod
+| ARM: OMAP2+: AM33XX: CM/PRM: Use __ASSEMBLER__ macros in header files
+| ...
+|
+|commit 9da060d0ed571bbff434c4a1ef3e48db99a37ee0
+|Merge: e3b5951 aab2b4b
+|Author: Linus Torvalds <torvalds@linux-foundation.org>
+|Date: Tue Mar 5 18:42:29 2013 -0800
+|
+| Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
+|
+| Pull networking fixes from David Miller:
+| "A moderately sized pile of fixes, some specifically for merge window
+| introduced regressions although others are for longer standing items
+| and have been queued up for -stable.
+|
+| I'm kind of tired of all the RDS protocol bugs over the years, to be
+| honest, it's way out of proportion to the number of people who
+| actually use it.
+|
+| 1) Fix missing range initialization in netfilter IPSET, from Jozsef
+| Kadlecsik.
+|
+| 2) ieee80211_local->tim_lock needs to use BH disabling, from Johannes
+| Berg.
+|
+| 3) Fix DMA syncing in SFC driver, from Ben Hutchings.
+|
+| 4) Fix regression in BOND device MAC address setting, from Jiri
+| Pirko.
+|
+| 5) Missing usb_free_urb in ISDN Hisax driver, from Marina Makienko.
+|
+| 6) Fix UDP checksumming in bnx2x driver for 57710 and 57711 chips,
+| fix from Dmitry Kravkov.
+|
+| 7) Missing cfgspace_lock initialization in BCMA driver.
+|
+| 8) Validate parameter size for SCTP assoc stats getsockopt(), from
+| Guenter Roeck.
+|
+| 9) Fix SCTP association hangs, from Lee A Roberts.
+|
+| 10) Fix jumbo frame handling in r8169, from Francois Romieu.
+|
+| 11) Fix phy_device memory leak, from Petr Malat.
+|
+| 12) Omit trailing FCS from frames received in BGMAC driver, from Hauke
+| Mehrtens.
+|
+| 13) Missing socket refcount release in L2TP, from Guillaume Nault.
+|
+| 14) sctp_endpoint_init should respect passed in gfp_t, rather than use
+| GFP_KERNEL unconditionally. From Dan Carpenter.
+|
+| 15) Add AISX AX88179 USB driver, from Freddy Xin.
+|
+| 16) Remove MAINTAINERS entries for drivers deleted during the merge
+| window, from Cesar Eduardo Barros.
+|
+| 17) RDS protocol can try to allocate huge amounts of memory, check
+| that the user's request length makes sense, from Cong Wang.
+|
+| 18) SCTP should use the provided KMALLOC_MAX_SIZE instead of it's own,
+| bogus, definition. From Cong Wang.
+|
+| 19) Fix deadlocks in FEC driver by moving TX reclaim into NAPI poll,
+| from Frank Li. Also, fix a build error introduced in the merge
+| window.
+|
+| 20) Fix bogus purging of default routes in ipv6, from Lorenzo Colitti.
+|
+| 21) Don't double count RTT measurements when we leave the TCP receive
+| fast path, from Neal Cardwell."
+|
+| * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (61 commits)
+| tcp: fix double-counted receiver RTT when leaving receiver fast path
+| CAIF: fix sparse warning for caif_usb
+| rds: simplify a warning message
+| net: fec: fix build error in no MXC platform
+| net: ipv6: Don't purge default router if accept_ra=2
+| net: fec: put tx to napi poll function to fix dead lock
+| sctp: use KMALLOC_MAX_SIZE instead of its own MAX_KMALLOC_SIZE
+| rds: limit the size allocated by rds_message_alloc()
+| MAINTAINERS: remove eexpress
+| MAINTAINERS: remove drivers/net/wan/cycx*
+| MAINTAINERS: remove 3c505
+| caif_dev: fix sparse warnings for caif_flow_cb
+| ax88179_178a: ASIX AX88179_178A USB 3.0/2.0 to gigabit ethernet adapter driver
+| sctp: use the passed in gfp flags instead GFP_KERNEL
+| ipv[4|6]: correct dropwatch false positive in local_deliver_finish
+| l2tp: Restore socket refcount when sendmsg succeeds
+| net/phy: micrel: Disable asymmetric pause for KSZ9021
+| bgmac: omit the fcs
+| phy: Fix phy_device_free memory leak
+| bnx2x: Fix KR2 work-around condition
+| ...
+|
+|commit 720a43efd30f04a0a492c85fb997361c44fbae05
+|Author: Joe Perches <joe@perches.com>
+|Date: Fri Mar 8 15:03:25 2013 +0000
+|
+| drivers:net: Remove unnecessary OOM messages after netdev_alloc_skb
+|
+| Emitting netdev_alloc_skb and netdev_alloc_skb_ip_align OOM
+| messages is unnecessary as there is already a dump_stack
+| after allocation failures.
+|
+| Other trivial changes around these removals:
+|
+| Convert a few comparisons of pointer to 0 to !pointer.
+| Change flow to remove unnecessary label.
+| Remove now unused variable.
+| Hoist assignment from if.
+|
+| Signed-off-by: Joe Perches <joe@perches.com>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit e86ac13b031cf71d8f40ff513e627aac80e6b765
+|Author: Mugunthan V N <mugunthanvnm@ti.com>
+|Date: Mon Mar 11 23:16:35 2013 +0000
+|
+| drivers: net: ethernet: cpsw: change cpts_active_slave to active_slave
+|
+| Change cpts_active_slave to active_slave so that the same DT property
+| can be used to ethtool and SIOCGMIIPHY.
+|
+| CC: Richard Cochran <richardcochran@gmail.com>
+| Signed-off-by: Mugunthan V N <mugunthanvnm@ti.com>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit d3bb9c58b567d240eaaa2dc8bd778696eaed5fbd
+|Author: Mugunthan V N <mugunthanvnm@ti.com>
+|Date: Mon Mar 11 23:16:36 2013 +0000
+|
+| driver: net: ethernet: cpsw: implement ethtool get/set phy setting
+|
+| This patch implements get/set of the phy settings via ethtool apis
+|
+| Signed-off-by: Mugunthan V N <mugunthanvnm@ti.com>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit ff5b8ef2ef3af0fd7e1cf6c8c1ed9ec5afbda422
+|Author: Mugunthan V N <mugunthanvnm@ti.com>
+|Date: Mon Mar 11 23:16:37 2013 +0000
+|
+| driver: net: ethernet: cpsw: implement interrupt pacing via ethtool
+|
+| This patch implements support for interrupt pacing block of CPSW via ethtool
+| Inetrrupt pacing block is common of both the ethernet interface in
+| dual emac mode
+|
+| Signed-off-by: Mugunthan V N <mugunthanvnm@ti.com>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit 11f2c988382b880e602a005c26436043c5d2c274
+|Author: Mugunthan V N <mugunthanvnm@ti.com>
+|Date: Mon Mar 11 23:16:38 2013 +0000
+|
+| drivers: net: ethernet: cpsw: implement get phy_id via ioctl
+|
+| Implement get phy_id via ioctl SIOCGMIIPHY. In switch mode active phy_id
+| is returned and in dual EMAC mode slave's specific phy_id is returned.
+|
+| Signed-off-by: Mugunthan V N <mugunthanvnm@ti.com>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit d35162f89b8f00537d7b240b76d2d0e8b8d29aa0
+|Author: Daniel Mack <zonque@gmail.com>
+|Date: Tue Mar 12 06:31:19 2013 +0000
+|
+| net: ethernet: cpsw: fix usage of cpdma_check_free_tx_desc()
+|
+| Commit fae50823d0 ("net: ethernet: davinci_cpdma: Add boundary for rx
+| and tx descriptors") introduced a function to check the current
+| allocation state of tx packets. The return value is taken into account
+| to stop the netqork queue on the adapter in case there are no free
+| slots.
+|
+| However, cpdma_check_free_tx_desc() returns 'true' if there is room in
+| the bitmap, not 'false', so the usage of the function is wrong.
+|
+| Signed-off-by: Daniel Mack <zonque@gmail.com>
+| Cc: Mugunthan V N <mugunthanvnm@ti.com>
+| Reported-by: Sven Neumann <s.neumann@raumfeld.com>
+| Reported-by: Andreas Fenkart <andreas.fenkart@streamunlimited.com>
+| Tested-by: Mugunthan V N <mugunthanvnm@ti.com>
+| Acked-by: Mugunthan V N <mugunthanvnm@ti.com>
+| Tested-by: Andreas Fenkart <andreas.fenkart@streamunlimited.com>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit 75b9b61bb8a18e75afe7b10dd55681e748fa27df
+|Author: Mugunthan V N <mugunthanvnm@ti.com>
+|Date: Fri Mar 15 04:10:16 2013 +0000
+|
+| drivers: net: ethernet: ti: davinci_emac: fix usage of cpdma_check_free_tx_desc()
+|
+| Fix which was done in the following commit in cpsw driver has
+| to be taken forward to davinci emac driver as well.
+|
+| commit d35162f89b8f00537d7b240b76d2d0e8b8d29aa0
+| Author: Daniel Mack <zonque@gmail.com>
+| Date: Tue Mar 12 06:31:19 2013 +0000
+|
+| net: ethernet: cpsw: fix usage of cpdma_check_free_tx_desc()
+|
+| Commit fae50823d0 ("net: ethernet: davinci_cpdma: Add boundary for rx
+| and tx descriptors") introduced a function to check the current
+| allocation state of tx packets. The return value is taken into account
+| to stop the netqork queue on the adapter in case there are no free
+| slots.
+|
+| However, cpdma_check_free_tx_desc() returns 'true' if there is room in
+| the bitmap, not 'false', so the usage of the function is wrong.
+|
+| Reported-by: Prabhakar Lad <prabhakar.csengg@gmail.com>
+| Tested-by: Prabhakar Lad <prabhakar.csengg@gmail.com>
+| Signed-off-by: Mugunthan V N <mugunthanvnm@ti.com>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit 61816596d1c9026d0ecb20c44f90452c41596ffe
+|Merge: 23a9072 da2191e
+|Author: David S. Miller <davem@davemloft.net>
+|Date: Wed Mar 20 12:46:26 2013 -0400
+|
+| Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
+|
+| Pull in the 'net' tree to get Daniel Borkmann's flow dissector
+| infrastructure change.
+|
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit e052a5893b78d43bd183c6cc33bc346efe6bc6e5
+|Author: Wei Yongjun <yongjun_wei@trendmicro.com.cn>
+|Date: Wed Mar 20 05:01:45 2013 +0000
+|
+| net: ethernet: davinci_emac: make local function emac_poll_controller() static
+|
+| emac_poll_controller() was not declared. It should be static.
+|
+| Signed-off-by: Wei Yongjun <yongjun_wei@trendmicro.com.cn>
+| Acked-by: Mugunthan V N <mugunthanvnm@ti.com>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit ce16294fda230c787ce5c35f61b2f80d14d70a72
+|Author: Lothar Waßmann <LW@KARO-electronics.de>
+|Date: Thu Mar 21 02:20:11 2013 +0000
+|
+| net: ethernet: cpsw: fix erroneous condition in error check
+|
+| The error check in cpsw_probe_dt() has an '&&' where an '||' is
+| meant to be. This causes a NULL pointer dereference when incomplet DT
+| data is passed to the driver ('phy_id' property for cpsw_emac1
+| missing).
+|
+| Signed-off-by: Lothar Waßmann <LW@KARO-electronics.de>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit ea3d1cc285bf1ae1fa81b47418cd7fd79990bb06
+|Merge: 2fa70df f4541d6
+|Author: David S. Miller <davem@davemloft.net>
+|Date: Fri Mar 22 12:53:09 2013 -0400
+|
+| Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
+|
+| Pull to get the thermal netlink multicast group name fix, otherwise
+| the assertion added in net-next to netlink to detect that kind of bug
+| makes systems unbootable for some folks.
+|
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit b8092861efd827deb8d84292674704ee8bf41b04
+|Author: Sekhar Nori <nsekhar@ti.com>
+|Date: Sun Mar 24 23:25:46 2013 +0000
+|
+| net/davinci_emac: use devres APIs
+|
+| Use devres APIs where possible to simplify error handling
+| in driver probe.
+|
+| While at it, also rename the goto targets in error path to
+| introduce some consistency in how they are named.
+|
+| Signed-off-by: Sekhar Nori <nsekhar@ti.com>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit b56d6b3fca6d1214dbc9c5655f26e5d4ec04afc8
+|Author: Mugunthan V N <mugunthanvnm@ti.com>
+|Date: Wed Mar 27 04:41:59 2013 +0000
+|
+| drivers: net: ethernet: cpsw: use netif_wake_queue() while restarting tx queue
+|
+| To restart tx queue use netif_wake_queue() intead of netif_start_queue()
+| so that net schedule will restart transmission immediately which will
+| increase network performance while doing huge data transfers.
+|
+| Reported-by: Dan Franke <dan.franke@schneider-electric.com>
+| Suggested-by: Sriramakrishnan A G <srk@ti.com>
+| Signed-off-by: Mugunthan V N <mugunthanvnm@ti.com>
+| Acked-by: Eric Dumazet <edumazet@google.com>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit 7e51cde276ca820d526c6c21cf8147df595a36bf
+|Author: Mugunthan V N <mugunthanvnm@ti.com>
+|Date: Wed Mar 27 04:42:00 2013 +0000
+|
+| drivers: net: ethernet: davinci_emac: use netif_wake_queue() while restarting tx queue
+|
+| To restart tx queue use netif_wake_queue() intead of netif_start_queue()
+| so that net schedule will restart transmission immediately which will
+| increase network performance while doing huge data transfers.
+|
+| Reported-by: Dan Franke <dan.franke@schneider-electric.com>
+| Suggested-by: Sriramakrishnan A G <srk@ti.com>
+| Signed-off-by: Mugunthan V N <mugunthanvnm@ti.com>
+| Acked-by: Eric Dumazet <edumazet@google.com>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit a210576cf891e9e6d2c238eabcf5c1286b1e7526
+|Merge: 7d4c04f 3658f36
+|Author: David S. Miller <davem@davemloft.net>
+|Date: Mon Apr 1 13:36:50 2013 -0400
+|
+| Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
+|
+| Conflicts:
+| net/mac80211/sta_info.c
+| net/wireless/core.h
+|
+| Two minor conflicts in wireless. Overlapping additions of extern
+| declarations in net/wireless/core.h and a bug fix overlapping with
+| the addition of a boolean parameter to __ieee80211_key_free().
+|
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit 91c4166c1a01c00b8bed74f7a7defa620071de88
+|Author: Mugunthan V N <mugunthanvnm@ti.com>
+|Date: Mon Apr 15 07:31:28 2013 +0000
+|
+| drivers: net: ethernet: cpsw: get slave VLAN id from slave node instead of cpsw node
+|
+| Dual EMAC slave VLAN id must be got from slave node instead of cpsw node as
+| VLAN id for each slave will be different.
+|
+| Reported-by: Mark Jackson <mpfj-list@mimc.co.uk>
+| Signed-off-by: Mugunthan V N <mugunthanvnm@ti.com>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit 1e0a8b13d35510e711fdf72e9a3e30bcb2bd49fa
+|Author: Devendra Naga <devendra.aaru@gmail.com>
+|Date: Tue Apr 16 01:30:38 2013 +0000
+|
+| tlan: cancel work at remove path
+|
+| the work has been scheduled from interrupt, and not been
+| cancelled when the driver is unloaded, which doesn't remove
+| the work item from the global workqueue. call the
+| cancel_work_sync when the driver is removed (rmmod'ed).
+|
+| Cc: Sriram <srk@ti.com>
+| Cc: Cyril Chemparathy <cyril@ti.com>
+| Cc: Vinay Hegde <vinay.hegde@ti.com>
+| Signed-off-by: Devendra Naga <devendra.aaru@gmail.com>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit f646968f8f7c624587de729115d802372b9063dd
+|Author: Patrick McHardy <kaber@trash.net>
+|Date: Fri Apr 19 02:04:27 2013 +0000
+|
+| net: vlan: rename NETIF_F_HW_VLAN_* feature flags to NETIF_F_HW_VLAN_CTAG_*
+|
+| Rename the hardware VLAN acceleration features to include "CTAG" to indicate
+| that they only support CTAGs. Follow up patches will introduce 802.1ad
+| server provider tagging (STAGs) and require the distinction for hardware not
+| supporting acclerating both.
+|
+| Signed-off-by: Patrick McHardy <kaber@trash.net>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit 80d5c3689b886308247da295a228a54df49a44f6
+|Author: Patrick McHardy <kaber@trash.net>
+|Date: Fri Apr 19 02:04:28 2013 +0000
+|
+| net: vlan: prepare for 802.1ad VLAN filtering offload
+|
+| Change the rx_{add,kill}_vid callbacks to take a protocol argument in
+| preparation of 802.1ad support. The protocol argument used so far is
+| always htons(ETH_P_8021Q).
+|
+| Signed-off-by: Patrick McHardy <kaber@trash.net>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit 6e0895c2ea326cc4bb11e8fa2f654628d5754c31
+|Merge: 55fbbe4 60d509f
+|Author: David S. Miller <davem@davemloft.net>
+|Date: Mon Apr 22 20:32:51 2013 -0400
+|
+| Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
+|
+| Conflicts:
+| drivers/net/ethernet/emulex/benet/be_main.c
+| drivers/net/ethernet/intel/igb/igb_main.c
+| drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+| include/net/scm.h
+| net/batman-adv/routing.c
+| net/ipv4/tcp_input.c
+|
+| The e{uid,gid} --> {uid,gid} credentials fix conflicted with the
+| cleanup in net-next to now pass cred structs around.
+|
+| The be2net driver had a bug fix in 'net' that overlapped with the VLAN
+| interface changes by Patrick McHardy in net-next.
+|
+| An IGB conflict existed because in 'net' the build_skb() support was
+| reverted, and in 'net-next' there was a comment style fix within that
+| code.
+|
+| Several batman-adv conflicts were resolved by making sure that all
+| calls to batadv_is_my_mac() are changed to have a new bat_priv first
+| argument.
+|
+| Eric Dumazet's TS ECR fix in TCP in 'net' conflicted with the F-RTO
+| rewrite in 'net-next', mostly overlapping changes.
+|
+| Thanks to Stephen Rothwell and Antonio Quartulli for help with several
+| of these merge resolutions.
+|
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit 817f6d1a13754b043e1a6c1cb713763022860689
+|Author: Sebastian Siewior <bigeasy@linutronix.de>
+|Date: Tue Apr 23 07:31:35 2013 +0000
+|
+| net/davinci_cpdma: don't check for jiffies with interrupts
+|
+| __cpdma_chan_process() holds the lock with interrupts off (and its
+| caller as well), same goes for cpdma_ctlr_start(). With interrupts off,
+| jiffies will not make any progress and if the wait condition never gets
+| true we wait for ever.
+| Tgis patch adds a a simple udelay and counting down attempt.
+|
+| Acked-by: Mugunthan V N <mugunthanvnm@ti.com>
+| Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit aacebbf8026ecdae1b55db3912e65c6b1308f5ed
+|Author: Sebastian Siewior <bigeasy@linutronix.de>
+|Date: Tue Apr 23 07:31:36 2013 +0000
+|
+| net/cpsw: don't continue if we miss to allocate rx skbs
+|
+| if during "ifconfig up" we run out of mem we continue regardless how
+| many skbs we got. In worst case we have zero RX skbs and can't ever
+| receive further packets since the RX skbs are never reallocated. If
+| cpdma_chan_submit() fails we even leak the skb.
+| This patch changes the behavior here:
+| If we fail to allocate an skb during bring up we don't continue and
+| report that error. Same goes for errors from cpdma_chan_submit().
+| While here I changed to __netdev_alloc_skb_ip_align() so GFP_KERNEL can
+| be used.
+|
+| Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+| Acked-by: Mugunthan V N <mugunthanvnm@ti.com>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit fd51cf199421197d14099b4ba382301cc28e5544
+|Author: Sebastian Siewior <bigeasy@linutronix.de>
+|Date: Tue Apr 23 07:31:37 2013 +0000
+|
+| net/cpsw: don't rely only on netif_running() to check which device is active
+|
+| netif_running() reports false before the ->ndo_stop() callback is
+| called. That means if one executes "ifconfig down" and the system
+| receives an interrupt before the interrupt source has been disabled we
+| hang for always for two reasons:
+| - we never disable the interrupt source because devices claim to be
+| already inactive and don't feel responsible.
+| - since the ISR always reports IRQ_HANDLED the line is never deactivated
+| because it looks like the ISR feels responsible.
+|
+| This patch changes the logic in the ISR a little:
+| - If none of the status registers reports an active source (RX or TX,
+| misc is ignored because it is not actived) we leave with IRQ_NONE.
+| - the interrupt is deactivated
+| - The first active network device is taken and napi is scheduled. If
+| none are active (a small race window between ndo_down() and the
+| interrupt the) then we leave and should not come back because the
+| source is off.
+| There is no need to schedule the second NAPI because both share the
+| same dma queue.
+|
+| Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+| Acked-by: Mugunthan V N <mugunthanvnm@ti.com>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit aef614e13dfbdd3b9ae44ad110159f75b9029bba
+|Author: Sebastian Siewior <bigeasy@linutronix.de>
+|Date: Tue Apr 23 07:31:38 2013 +0000
+|
+| net/davinci_cpdma: remove unused argument in cpdma_chan_submit()
+|
+| The gfp_mask argument is not used in cpdma_chan_submit() and always set
+| to GFP_KERNEL even in atomic sections. This patch drops it since it is
+| unused.
+|
+| Acked-by: Mugunthan V N <mugunthanvnm@ti.com>
+| Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit b4727e69b81b71c6e9696185091e8256d863f9be
+|Author: Sebastian Siewior <bigeasy@linutronix.de>
+|Date: Tue Apr 23 07:31:39 2013 +0000
+|
+| net/cpsw: redo rx skb allocation in rx path
+|
+| In case that we run into OOM during the allocation of the new rx-skb we
+| don't get one and we have one skb less than we used to have. If this
+| continues to happen then we end up with no rx-skbs at all.
+| This patch changes the following:
+| - if we fail to allocate the new skb, then we treat the currently
+| completed skb as the new one and so drop the currently received data.
+| - instead of testing multiple times if the device is gone we rely one
+| the status field which is set to -ENOSYS in case the channel is going
+| down and incomplete requests are purged.
+| cpdma_chan_stop() removes most of the packages with -ENOSYS. The
+| currently active packet which is removed has the "tear down" bit set.
+| So if that bit is set, we send ENOSYS as well otherwise we pass the
+| status bits which are required to figure out which of the two possible
+| just finished.
+|
+| Acked-by: Mugunthan V N <mugunthanvnm@ti.com>
+| Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit 4bc21d4162366bb892dc1a4a92110c656e2622ca
+|Author: Sebastian Siewior <bigeasy@linutronix.de>
+|Date: Wed Apr 24 08:48:22 2013 +0000
+|
+| net/ti: add MODULE_DEVICE_TABLE + MODULE_LICENSE
+|
+| If compiled as modules each one of these modules is missing something.
+| With this patch the modules are loaded on demand and don't taint the
+| kernel due to license issues.
+|
+| Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+| Acked-by: Mugunthan V N <mugunthanvnm@ti.com>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit d1bd9acfa3419dc9d5c32589b34a370ca6ae100e
+|Author: Sebastian Siewior <bigeasy@linutronix.de>
+|Date: Wed Apr 24 08:48:23 2013 +0000
+|
+| net/cpsw: make sure modules remove does not leak any ressources
+|
+| This driver does not clean up properly after leaving. Here is a list:
+| - Use unregister_netdev(). free_netdev() is good but not enough
+| - Use the above also on the other ndev in case of dual mac
+| - Free data.slave_data. The name of the strucre makes it look like
+| it is platform_data but it is not. It is just a trick!
+| - Free all irqs. Again: freeing one irq is good start, but freeing all
+| of them is better.
+|
+| With this rmmod & modprobe of cpsw seems to work. The remaining issue
+| is:
+| |WARNING: at fs/sysfs/dir.c:536 sysfs_add_one+0x9c/0xd4()
+| |sysfs: cannot create duplicate filename '/devices/ocp.2/4a100000.ethernet/4a101000.mdio'
+| |WARNING: at lib/kobject.c:196 kobject_add_internal+0x1a4/0x1c8()
+|
+| comming from of_platform_populate() and I am not sure that this belongs
+| here.
+|
+| Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+| Acked-by: Mugunthan V N <mugunthanvnm@ti.com>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit 6e6ceaedb5901c7ebd23e5222726dab5362938bd
+|Author: Sebastian Siewior <bigeasy@linutronix.de>
+|Date: Wed Apr 24 08:48:24 2013 +0000
+|
+| net/cpsw: optimize the for_each_slave_macro()
+|
+| text data bss dec hex filename
+| 15530 92 4 15626 3d0a cpsw.o.before
+| 15478 92 4 15574 3cd6 cpsw.o.after
+|
+| 52 bytes smaller, 13 for each invocation.
+|
+| Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+| Acked-by: Mugunthan V N <mugunthanvnm@ti.com>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+|
+|commit a11fbba9a7d338c4a4e4be624af0334bbf2c9a5a
+|Author: Sebastian Siewior <bigeasy@linutronix.de>
+|Date: Wed Apr 24 08:48:25 2013 +0000
+|
+| net/cpsw: fix irq_disable() with threaded interrupts
+|
+| During high throughput it is likely that we receive both: an RX and TX
+| interrupt. The normal behaviour is that once we enter the ISR the
+| interrupts are disabled in the IRQ chip and so the ISR is invoked only
+| once and the interrupt line is disabled once. It will be re-enabled
+| after napi completes.
+| With threaded interrupts on the other hand the interrupt the interrupt
+| is disabled immediately and the ISR is marked for "later". By having TX
+| and RX interrupt marked pending we invoke them both and disable the
+| interrupt line twice. The napi callback is still executed once and so
+| after it completes we remain with interrupts disabled.
+|
+| The initial patch simply removed the cpsw_{enable|disable}_irq() calls
+| and it worked well on my AM335X ES1.0 (beagle bone). On ES2.0 (beagle
+| bone black) it caused an never ending interrupt (even after the mask via
+| cpsw_intr_disable()) according to Mugunthan V N. Since I don't have the
+| ES2.0 and no idea what is going on this patch tracks the state of the
+| irq_disable() call and execute it only when not yet done.
+| The book keeping is done on the first struct since with dual_emac we can
+| have two of those and only one interrupt line.
+|
+| Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+| Acked-by: Mugunthan V N <mugunthanvnm@ti.com>
+| Signed-off-by: David S. Miller <davem@davemloft.net>
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+
+diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
+index c2f14e8..91fe4f1 100644
+--- a/arch/arm/boot/dts/am33xx.dtsi
++++ b/arch/arm/boot/dts/am33xx.dtsi
+@@ -349,7 +349,7 @@
+ rx_descs = <64>;
+ mac_control = <0x20>;
+ slaves = <2>;
+- cpts_active_slave = <0>;
++ active_slave = <0>;
+ cpts_clock_mult = <0x80000000>;
+ cpts_clock_shift = <29>;
+ reg = <0x4a100000 0x800
+@@ -385,5 +385,19 @@
+ mac-address = [ 00 00 00 00 00 00 ];
+ };
+ };
++
++ ocmcram: ocmcram@40300000 {
++ compatible = "ti,am3352-ocmcram";
++ reg = <0x40300000 0x10000>;
++ ti,hwmods = "ocmcram";
++ ti,no_idle_on_suspend;
++ };
++
++ wkup_m3: wkup_m3@44d00000 {
++ compatible = "ti,am3353-wkup-m3";
++ reg = <0x44d00000 0x4000 /* M3 UMEM */
++ 0x44d80000 0x2000>; /* M3 DMEM */
++ ti,hwmods = "wkup_m3";
++ };
+ };
+ };
+diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
+index 4426151..de71b1e 100644
+--- a/drivers/net/ethernet/ti/Kconfig
++++ b/drivers/net/ethernet/ti/Kconfig
+@@ -88,8 +88,8 @@ config TLAN
+ Please email feedback to <torben.mathiasen@compaq.com>.
+
+ config CPMAC
+- tristate "TI AR7 CPMAC Ethernet support (EXPERIMENTAL)"
+- depends on EXPERIMENTAL && AR7
++ tristate "TI AR7 CPMAC Ethernet support"
++ depends on AR7
+ select PHYLIB
+ ---help---
+ TI AR7 CPMAC Ethernet support
+diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
+index d9625f6..31bbbca 100644
+--- a/drivers/net/ethernet/ti/cpmac.c
++++ b/drivers/net/ethernet/ti/cpmac.c
+@@ -904,10 +904,9 @@ static int cpmac_set_ringparam(struct net_device *dev,
+ static void cpmac_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+ {
+- strcpy(info->driver, "cpmac");
+- strcpy(info->version, CPMAC_VERSION);
+- info->fw_version[0] = '\0';
+- sprintf(info->bus_info, "%s", "cpmac");
++ strlcpy(info->driver, "cpmac", sizeof(info->driver));
++ strlcpy(info->version, CPMAC_VERSION, sizeof(info->version));
++ snprintf(info->bus_info, sizeof(info->bus_info), "%s", "cpmac");
+ info->regdump_len = 0;
+ }
+
+@@ -1173,8 +1172,8 @@ static int cpmac_probe(struct platform_device *pdev)
+ snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT,
+ mdio_bus_id, phy_id);
+
+- priv->phy = phy_connect(dev, priv->phy_name, cpmac_adjust_link, 0,
+- PHY_INTERFACE_MODE_MII);
++ priv->phy = phy_connect(dev, priv->phy_name, cpmac_adjust_link,
++ PHY_INTERFACE_MODE_MII);
+
+ if (IS_ERR(priv->phy)) {
+ if (netif_msg_drv(priv))
+diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
+index 40aff68..4e2d224 100644
+--- a/drivers/net/ethernet/ti/cpsw.c
++++ b/drivers/net/ethernet/ti/cpsw.c
+@@ -32,6 +32,7 @@
+ #include <linux/of.h>
+ #include <linux/of_net.h>
+ #include <linux/of_device.h>
++#include <linux/if_vlan.h>
+
+ #include <linux/platform_data/cpsw.h>
+
+@@ -118,6 +119,20 @@ do { \
+ #define TX_PRIORITY_MAPPING 0x33221100
+ #define CPDMA_TX_PRIORITY_MAP 0x76543210
+
++#define CPSW_VLAN_AWARE BIT(1)
++#define CPSW_ALE_VLAN_AWARE 1
++
++#define CPSW_FIFO_NORMAL_MODE (0 << 15)
++#define CPSW_FIFO_DUAL_MAC_MODE (1 << 15)
++#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 15)
++
++#define CPSW_INTPACEEN (0x3f << 16)
++#define CPSW_INTPRESCALE_MASK (0x7FF << 0)
++#define CPSW_CMINTMAX_CNT 63
++#define CPSW_CMINTMIN_CNT 2
++#define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT)
++#define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1)
++
+ #define cpsw_enable_irq(priv) \
+ do { \
+ u32 i; \
+@@ -131,6 +146,10 @@ do { \
+ disable_irq_nosync(priv->irqs_table[i]); \
+ } while (0);
+
++#define cpsw_slave_index(priv) \
++ ((priv->data.dual_emac) ? priv->emac_port : \
++ priv->data.active_slave)
++
+ static int debug_level;
+ module_param(debug_level, int, 0);
+ MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)");
+@@ -152,6 +171,15 @@ struct cpsw_wr_regs {
+ u32 rx_en;
+ u32 tx_en;
+ u32 misc_en;
++ u32 mem_allign1[8];
++ u32 rx_thresh_stat;
++ u32 rx_stat;
++ u32 tx_stat;
++ u32 misc_stat;
++ u32 mem_allign2[8];
++ u32 rx_imax;
++ u32 tx_imax;
++
+ };
+
+ struct cpsw_ss_regs {
+@@ -250,7 +278,7 @@ struct cpsw_ss_regs {
+ struct cpsw_host_regs {
+ u32 max_blks;
+ u32 blk_cnt;
+- u32 flow_thresh;
++ u32 tx_in_ctl;
+ u32 port_vlan;
+ u32 tx_pri_map;
+ u32 cpdma_tx_pri_map;
+@@ -277,6 +305,9 @@ struct cpsw_slave {
+ u32 mac_control;
+ struct cpsw_slave_data *data;
+ struct phy_device *phy;
++ struct net_device *ndev;
++ u32 port_vlan;
++ u32 open_stat;
+ };
+
+ static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
+@@ -303,6 +334,8 @@ struct cpsw_priv {
+ struct cpsw_host_regs __iomem *host_port_regs;
+ u32 msg_enable;
+ u32 version;
++ u32 coal_intvl;
++ u32 bus_freq_mhz;
+ struct net_device_stats stats;
+ int rx_packet_max;
+ int host_port;
+@@ -315,17 +348,69 @@ struct cpsw_priv {
+ /* snapshot of IRQ numbers */
+ u32 irqs_table[4];
+ u32 num_irqs;
+- struct cpts cpts;
++ bool irq_enabled;
++ struct cpts *cpts;
++ u32 emac_port;
+ };
+
+ #define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi)
+-#define for_each_slave(priv, func, arg...) \
+- do { \
+- int idx; \
+- for (idx = 0; idx < (priv)->data.slaves; idx++) \
+- (func)((priv)->slaves + idx, ##arg); \
++#define for_each_slave(priv, func, arg...) \
++ do { \
++ struct cpsw_slave *slave; \
++ int n; \
++ if (priv->data.dual_emac) \
++ (func)((priv)->slaves + priv->emac_port, ##arg);\
++ else \
++ for (n = (priv)->data.slaves, \
++ slave = (priv)->slaves; \
++ n; n--) \
++ (func)(slave++, ##arg); \
++ } while (0)
++#define cpsw_get_slave_ndev(priv, __slave_no__) \
++ (priv->slaves[__slave_no__].ndev)
++#define cpsw_get_slave_priv(priv, __slave_no__) \
++ ((priv->slaves[__slave_no__].ndev) ? \
++ netdev_priv(priv->slaves[__slave_no__].ndev) : NULL) \
++
++#define cpsw_dual_emac_src_port_detect(status, priv, ndev, skb) \
++ do { \
++ if (!priv->data.dual_emac) \
++ break; \
++ if (CPDMA_RX_SOURCE_PORT(status) == 1) { \
++ ndev = cpsw_get_slave_ndev(priv, 0); \
++ priv = netdev_priv(ndev); \
++ skb->dev = ndev; \
++ } else if (CPDMA_RX_SOURCE_PORT(status) == 2) { \
++ ndev = cpsw_get_slave_ndev(priv, 1); \
++ priv = netdev_priv(ndev); \
++ skb->dev = ndev; \
++ } \
++ } while (0)
++#define cpsw_add_mcast(priv, addr) \
++ do { \
++ if (priv->data.dual_emac) { \
++ struct cpsw_slave *slave = priv->slaves + \
++ priv->emac_port; \
++ int slave_port = cpsw_get_slave_port(priv, \
++ slave->slave_num); \
++ cpsw_ale_add_mcast(priv->ale, addr, \
++ 1 << slave_port | 1 << priv->host_port, \
++ ALE_VLAN, slave->port_vlan, 0); \
++ } else { \
++ cpsw_ale_add_mcast(priv->ale, addr, \
++ ALE_ALL_PORTS << priv->host_port, \
++ 0, 0, 0); \
++ } \
+ } while (0)
+
++static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
++{
++ if (priv->host_port == 0)
++ return slave_num + 1;
++ else
++ return slave_num;
++}
++
+ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
+ {
+ struct cpsw_priv *priv = netdev_priv(ndev);
+@@ -344,8 +429,7 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
+
+ /* program multicast address list into ALE register */
+ netdev_for_each_mc_addr(ha, ndev) {
+- cpsw_ale_add_mcast(priv->ale, (u8 *)ha->addr,
+- ALE_ALL_PORTS << priv->host_port, 0, 0);
++ cpsw_add_mcast(priv, (u8 *)ha->addr);
+ }
+ }
+ }
+@@ -374,9 +458,12 @@ void cpsw_tx_handler(void *token, int len, int status)
+ struct net_device *ndev = skb->dev;
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
++ /* Check whether the queue is stopped due to stalled tx dma, if the
++ * queue is stopped then start the queue as we have free desc for tx
++ */
+ if (unlikely(netif_queue_stopped(ndev)))
+- netif_start_queue(ndev);
+- cpts_tx_timestamp(&priv->cpts, skb);
++ netif_wake_queue(ndev);
++ cpts_tx_timestamp(priv->cpts, skb);
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += len;
+ dev_kfree_skb_any(skb);
+@@ -385,61 +472,69 @@ void cpsw_tx_handler(void *token, int len, int status)
+ void cpsw_rx_handler(void *token, int len, int status)
+ {
+ struct sk_buff *skb = token;
++ struct sk_buff *new_skb;
+ struct net_device *ndev = skb->dev;
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int ret = 0;
+
+- /* free and bail if we are shutting down */
+- if (unlikely(!netif_running(ndev)) ||
+- unlikely(!netif_carrier_ok(ndev))) {
++ cpsw_dual_emac_src_port_detect(status, priv, ndev, skb);
++
++ if (unlikely(status < 0)) {
++ /* the interface is going down, skbs are purged */
+ dev_kfree_skb_any(skb);
+ return;
+ }
+- if (likely(status >= 0)) {
++
++ new_skb = netdev_alloc_skb_ip_align(ndev, priv->rx_packet_max);
++ if (new_skb) {
+ skb_put(skb, len);
+- cpts_rx_timestamp(&priv->cpts, skb);
++ cpts_rx_timestamp(priv->cpts, skb);
+ skb->protocol = eth_type_trans(skb, ndev);
+ netif_receive_skb(skb);
+ priv->stats.rx_bytes += len;
+ priv->stats.rx_packets++;
+- skb = NULL;
+- }
+-
+- if (unlikely(!netif_running(ndev))) {
+- if (skb)
+- dev_kfree_skb_any(skb);
+- return;
++ } else {
++ priv->stats.rx_dropped++;
++ new_skb = skb;
+ }
+
+- if (likely(!skb)) {
+- skb = netdev_alloc_skb_ip_align(ndev, priv->rx_packet_max);
+- if (WARN_ON(!skb))
+- return;
+-
+- ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
+- skb_tailroom(skb), GFP_KERNEL);
+- }
+- WARN_ON(ret < 0);
++ ret = cpdma_chan_submit(priv->rxch, new_skb, new_skb->data,
++ skb_tailroom(new_skb), 0);
++ if (WARN_ON(ret < 0))
++ dev_kfree_skb_any(new_skb);
+ }
+
+ static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
+ {
+ struct cpsw_priv *priv = dev_id;
++ u32 rx, tx, rx_thresh;
+
+- if (likely(netif_running(priv->ndev))) {
+- cpsw_intr_disable(priv);
++ rx_thresh = __raw_readl(&priv->wr_regs->rx_thresh_stat);
++ rx = __raw_readl(&priv->wr_regs->rx_stat);
++ tx = __raw_readl(&priv->wr_regs->tx_stat);
++ if (!rx_thresh && !rx && !tx)
++ return IRQ_NONE;
++
++ cpsw_intr_disable(priv);
++ if (priv->irq_enabled == true) {
+ cpsw_disable_irq(priv);
++ priv->irq_enabled = false;
++ }
++
++ if (netif_running(priv->ndev)) {
+ napi_schedule(&priv->napi);
++ return IRQ_HANDLED;
+ }
+- return IRQ_HANDLED;
+-}
+
+-static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
+-{
+- if (priv->host_port == 0)
+- return slave_num + 1;
+- else
+- return slave_num;
++ priv = cpsw_get_slave_priv(priv, 1);
++ if (!priv)
++ return IRQ_NONE;
++
++ if (netif_running(priv->ndev)) {
++ napi_schedule(&priv->napi);
++ return IRQ_HANDLED;
++ }
++ return IRQ_NONE;
+ }
+
+ static int cpsw_poll(struct napi_struct *napi, int budget)
+@@ -448,19 +543,27 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
+ int num_tx, num_rx;
+
+ num_tx = cpdma_chan_process(priv->txch, 128);
+- num_rx = cpdma_chan_process(priv->rxch, budget);
+-
+- if (num_rx || num_tx)
+- cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n",
+- num_rx, num_tx);
++ if (num_tx)
++ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
+
++ num_rx = cpdma_chan_process(priv->rxch, budget);
+ if (num_rx < budget) {
++ struct cpsw_priv *prim_cpsw;
++
+ napi_complete(napi);
+ cpsw_intr_enable(priv);
+- cpdma_ctlr_eoi(priv->dma);
+- cpsw_enable_irq(priv);
++ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
++ prim_cpsw = cpsw_get_slave_priv(priv, 0);
++ if (prim_cpsw->irq_enabled == false) {
++ cpsw_enable_irq(priv);
++ prim_cpsw->irq_enabled = true;
++ }
+ }
+
++ if (num_rx || num_tx)
++ cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n",
++ num_rx, num_tx);
++
+ return num_rx;
+ }
+
+@@ -548,6 +651,77 @@ static void cpsw_adjust_link(struct net_device *ndev)
+ }
+ }
+
++static int cpsw_get_coalesce(struct net_device *ndev,
++ struct ethtool_coalesce *coal)
++{
++ struct cpsw_priv *priv = netdev_priv(ndev);
++
++ coal->rx_coalesce_usecs = priv->coal_intvl;
++ return 0;
++}
++
++static int cpsw_set_coalesce(struct net_device *ndev,
++ struct ethtool_coalesce *coal)
++{
++ struct cpsw_priv *priv = netdev_priv(ndev);
++ u32 int_ctrl;
++ u32 num_interrupts = 0;
++ u32 prescale = 0;
++ u32 addnl_dvdr = 1;
++ u32 coal_intvl = 0;
++
++ if (!coal->rx_coalesce_usecs)
++ return -EINVAL;
++
++ coal_intvl = coal->rx_coalesce_usecs;
++
++ int_ctrl = readl(&priv->wr_regs->int_control);
++ prescale = priv->bus_freq_mhz * 4;
++
++ if (coal_intvl < CPSW_CMINTMIN_INTVL)
++ coal_intvl = CPSW_CMINTMIN_INTVL;
++
++ if (coal_intvl > CPSW_CMINTMAX_INTVL) {
++ /* Interrupt pacer works with 4us Pulse, we can
++ * throttle further by dilating the 4us pulse.
++ */
++ addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale;
++
++ if (addnl_dvdr > 1) {
++ prescale *= addnl_dvdr;
++ if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr))
++ coal_intvl = (CPSW_CMINTMAX_INTVL
++ * addnl_dvdr);
++ } else {
++ addnl_dvdr = 1;
++ coal_intvl = CPSW_CMINTMAX_INTVL;
++ }
++ }
++
++ num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
++ writel(num_interrupts, &priv->wr_regs->rx_imax);
++ writel(num_interrupts, &priv->wr_regs->tx_imax);
++
++ int_ctrl |= CPSW_INTPACEEN;
++ int_ctrl &= (~CPSW_INTPRESCALE_MASK);
++ int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK);
++ writel(int_ctrl, &priv->wr_regs->int_control);
++
++ cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl);
++ if (priv->data.dual_emac) {
++ int i;
++
++ for (i = 0; i < priv->data.slaves; i++) {
++ priv = netdev_priv(priv->slaves[i].ndev);
++ priv->coal_intvl = coal_intvl;
++ }
++ } else {
++ priv->coal_intvl = coal_intvl;
++ }
++
++ return 0;
++}
++
+ static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val)
+ {
+ static char *leader = "........................................";
+@@ -559,6 +733,54 @@ static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val)
+ leader + strlen(name), val);
+ }
+
++static int cpsw_common_res_usage_state(struct cpsw_priv *priv)
++{
++ u32 i;
++ u32 usage_count = 0;
++
++ if (!priv->data.dual_emac)
++ return 0;
++
++ for (i = 0; i < priv->data.slaves; i++)
++ if (priv->slaves[i].open_stat)
++ usage_count++;
++
++ return usage_count;
++}
++
++static inline int cpsw_tx_packet_submit(struct net_device *ndev,
++ struct cpsw_priv *priv, struct sk_buff *skb)
++{
++ if (!priv->data.dual_emac)
++ return cpdma_chan_submit(priv->txch, skb, skb->data,
++ skb->len, 0);
++
++ if (ndev == cpsw_get_slave_ndev(priv, 0))
++ return cpdma_chan_submit(priv->txch, skb, skb->data,
++ skb->len, 1);
++ else
++ return cpdma_chan_submit(priv->txch, skb, skb->data,
++ skb->len, 2);
++}
++
++static inline void cpsw_add_dual_emac_def_ale_entries(
++ struct cpsw_priv *priv, struct cpsw_slave *slave,
++ u32 slave_port)
++{
++ u32 port_mask = 1 << slave_port | 1 << priv->host_port;
++
++ if (priv->version == CPSW_VERSION_1)
++ slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN);
++ else
++ slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN);
++ cpsw_ale_add_vlan(priv->ale, slave->port_vlan, port_mask,
++ port_mask, port_mask, 0);
++ cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
++ port_mask, ALE_VLAN, slave->port_vlan, 0);
++ cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
++ priv->host_port, ALE_VLAN, slave->port_vlan);
++}
++
+ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
+ {
+ char name[32];
+@@ -588,11 +810,14 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
+
+ slave_port = cpsw_get_slave_port(priv, slave->slave_num);
+
+- cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
+- 1 << slave_port, 0, ALE_MCAST_FWD_2);
++ if (priv->data.dual_emac)
++ cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port);
++ else
++ cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
++ 1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
+
+ slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
+- &cpsw_adjust_link, 0, slave->data->phy_if);
++ &cpsw_adjust_link, slave->data->phy_if);
+ if (IS_ERR(slave->phy)) {
+ dev_err(priv->dev, "phy %s not found on slave %d\n",
+ slave->data->phy_id, slave->slave_num);
+@@ -604,14 +829,44 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
+ }
+ }
+
++static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
++{
++ const int vlan = priv->data.default_vlan;
++ const int port = priv->host_port;
++ u32 reg;
++ int i;
++
++ reg = (priv->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
++ CPSW2_PORT_VLAN;
++
++ writel(vlan, &priv->host_port_regs->port_vlan);
++
++ for (i = 0; i < priv->data.slaves; i++)
++ slave_write(priv->slaves + i, vlan, reg);
++
++ cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port,
++ ALE_ALL_PORTS << port, ALE_ALL_PORTS << port,
++ (ALE_PORT_1 | ALE_PORT_2) << port);
++}
++
+ static void cpsw_init_host_port(struct cpsw_priv *priv)
+ {
++ u32 control_reg;
++ u32 fifo_mode;
++
+ /* soft reset the controller and initialize ale */
+ soft_reset("cpsw", &priv->regs->soft_reset);
+ cpsw_ale_start(priv->ale);
+
+ /* switch to vlan unaware mode */
+- cpsw_ale_control_set(priv->ale, 0, ALE_VLAN_AWARE, 0);
++ cpsw_ale_control_set(priv->ale, priv->host_port, ALE_VLAN_AWARE,
++ CPSW_ALE_VLAN_AWARE);
++ control_reg = readl(&priv->regs->control);
++ control_reg |= CPSW_VLAN_AWARE;
++ writel(control_reg, &priv->regs->control);
++ fifo_mode = (priv->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE :
++ CPSW_FIFO_NORMAL_MODE;
++ writel(fifo_mode, &priv->host_port_regs->tx_in_ctl);
+
+ /* setup host port priority mapping */
+ __raw_writel(CPDMA_TX_PRIORITY_MAP,
+@@ -621,18 +876,32 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
+ cpsw_ale_control_set(priv->ale, priv->host_port,
+ ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+
+- cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port, 0);
+- cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
+- 1 << priv->host_port, 0, ALE_MCAST_FWD_2);
++ if (!priv->data.dual_emac) {
++ cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port,
++ 0, 0);
++ cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
++ 1 << priv->host_port, 0, 0, ALE_MCAST_FWD_2);
++ }
++}
++
++static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv)
++{
++ if (!slave->phy)
++ return;
++ phy_stop(slave->phy);
++ phy_disconnect(slave->phy);
++ slave->phy = NULL;
+ }
+
+ static int cpsw_ndo_open(struct net_device *ndev)
+ {
+ struct cpsw_priv *priv = netdev_priv(ndev);
++ struct cpsw_priv *prim_cpsw;
+ int i, ret;
+ u32 reg;
+
+- cpsw_intr_disable(priv);
++ if (!cpsw_common_res_usage_state(priv))
++ cpsw_intr_disable(priv);
+ netif_carrier_off(ndev);
+
+ pm_runtime_get_sync(&priv->pdev->dev);
+@@ -644,53 +913,81 @@ static int cpsw_ndo_open(struct net_device *ndev)
+ CPSW_RTL_VERSION(reg));
+
+ /* initialize host and slave ports */
+- cpsw_init_host_port(priv);
++ if (!cpsw_common_res_usage_state(priv))
++ cpsw_init_host_port(priv);
+ for_each_slave(priv, cpsw_slave_open, priv);
+
+- /* setup tx dma to fixed prio and zero offset */
+- cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1);
+- cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0);
+-
+- /* disable priority elevation and enable statistics on all ports */
+- __raw_writel(0, &priv->regs->ptype);
+-
+- /* enable statistics collection only on the host port */
+- __raw_writel(0x7, &priv->regs->stat_port_en);
++ /* Add default VLAN */
++ if (!priv->data.dual_emac)
++ cpsw_add_default_vlan(priv);
++
++ if (!cpsw_common_res_usage_state(priv)) {
++ /* setup tx dma to fixed prio and zero offset */
++ cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1);
++ cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0);
++
++ /* disable priority elevation */
++ __raw_writel(0, &priv->regs->ptype);
++
++ /* enable statistics collection only on all ports */
++ __raw_writel(0x7, &priv->regs->stat_port_en);
++
++ if (WARN_ON(!priv->data.rx_descs))
++ priv->data.rx_descs = 128;
++
++ for (i = 0; i < priv->data.rx_descs; i++) {
++ struct sk_buff *skb;
++
++ ret = -ENOMEM;
++ skb = __netdev_alloc_skb_ip_align(priv->ndev,
++ priv->rx_packet_max, GFP_KERNEL);
++ if (!skb)
++ goto err_cleanup;
++ ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
++ skb_tailroom(skb), 0);
++ if (ret < 0) {
++ kfree_skb(skb);
++ goto err_cleanup;
++ }
++ }
++ /* continue even if we didn't manage to submit all
++ * receive descs
++ */
++ cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i);
++ }
+
+- if (WARN_ON(!priv->data.rx_descs))
+- priv->data.rx_descs = 128;
++ /* Enable Interrupt pacing if configured */
++ if (priv->coal_intvl != 0) {
++ struct ethtool_coalesce coal;
+
+- for (i = 0; i < priv->data.rx_descs; i++) {
+- struct sk_buff *skb;
++ coal.rx_coalesce_usecs = (priv->coal_intvl << 4);
++ cpsw_set_coalesce(ndev, &coal);
++ }
+
+- ret = -ENOMEM;
+- skb = netdev_alloc_skb_ip_align(priv->ndev,
+- priv->rx_packet_max);
+- if (!skb)
+- break;
+- ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
+- skb_tailroom(skb), GFP_KERNEL);
+- if (WARN_ON(ret < 0))
+- break;
++ prim_cpsw = cpsw_get_slave_priv(priv, 0);
++ if (prim_cpsw->irq_enabled == false) {
++ if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) {
++ prim_cpsw->irq_enabled = true;
++ cpsw_enable_irq(prim_cpsw);
++ }
+ }
+- /* continue even if we didn't manage to submit all receive descs */
+- cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i);
+
+ cpdma_ctlr_start(priv->dma);
+ cpsw_intr_enable(priv);
+ napi_enable(&priv->napi);
+- cpdma_ctlr_eoi(priv->dma);
++ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
++ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
+
++ if (priv->data.dual_emac)
++ priv->slaves[priv->emac_port].open_stat = true;
+ return 0;
+-}
+
+-static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv)
+-{
+- if (!slave->phy)
+- return;
+- phy_stop(slave->phy);
+- phy_disconnect(slave->phy);
+- slave->phy = NULL;
++err_cleanup:
++ cpdma_ctlr_stop(priv->dma);
++ for_each_slave(priv, cpsw_slave_stop, priv);
++ pm_runtime_put_sync(&priv->pdev->dev);
++ netif_carrier_off(priv->ndev);
++ return ret;
+ }
+
+ static int cpsw_ndo_stop(struct net_device *ndev)
+@@ -701,12 +998,17 @@ static int cpsw_ndo_stop(struct net_device *ndev)
+ netif_stop_queue(priv->ndev);
+ napi_disable(&priv->napi);
+ netif_carrier_off(priv->ndev);
+- cpsw_intr_disable(priv);
+- cpdma_ctlr_int_ctrl(priv->dma, false);
+- cpdma_ctlr_stop(priv->dma);
+- cpsw_ale_stop(priv->ale);
++
++ if (cpsw_common_res_usage_state(priv) <= 1) {
++ cpsw_intr_disable(priv);
++ cpdma_ctlr_int_ctrl(priv->dma, false);
++ cpdma_ctlr_stop(priv->dma);
++ cpsw_ale_stop(priv->ale);
++ }
+ for_each_slave(priv, cpsw_slave_stop, priv);
+ pm_runtime_put_sync(&priv->pdev->dev);
++ if (priv->data.dual_emac)
++ priv->slaves[priv->emac_port].open_stat = false;
+ return 0;
+ }
+
+@@ -724,18 +1026,24 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
+ return NETDEV_TX_OK;
+ }
+
+- if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && priv->cpts.tx_enable)
++ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
++ priv->cpts->tx_enable)
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ skb_tx_timestamp(skb);
+
+- ret = cpdma_chan_submit(priv->txch, skb, skb->data,
+- skb->len, GFP_KERNEL);
++ ret = cpsw_tx_packet_submit(ndev, priv, skb);
+ if (unlikely(ret != 0)) {
+ cpsw_err(priv, tx_err, "desc submit failed\n");
+ goto fail;
+ }
+
++ /* If there is no more tx desc left free then we need to
++ * tell the kernel to stop sending us tx frames.
++ */
++ if (unlikely(!cpdma_check_free_tx_desc(priv->txch)))
++ netif_stop_queue(ndev);
++
+ return NETDEV_TX_OK;
+ fail:
+ priv->stats.tx_dropped++;
+@@ -770,10 +1078,10 @@ static void cpsw_ndo_change_rx_flags(struct net_device *ndev, int flags)
+
+ static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
+ {
+- struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave];
++ struct cpsw_slave *slave = &priv->slaves[priv->data.active_slave];
+ u32 ts_en, seq_id;
+
+- if (!priv->cpts.tx_enable && !priv->cpts.rx_enable) {
++ if (!priv->cpts->tx_enable && !priv->cpts->rx_enable) {
+ slave_write(slave, 0, CPSW1_TS_CTL);
+ return;
+ }
+@@ -781,10 +1089,10 @@ static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
+ seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
+ ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
+
+- if (priv->cpts.tx_enable)
++ if (priv->cpts->tx_enable)
+ ts_en |= CPSW_V1_TS_TX_EN;
+
+- if (priv->cpts.rx_enable)
++ if (priv->cpts->rx_enable)
+ ts_en |= CPSW_V1_TS_RX_EN;
+
+ slave_write(slave, ts_en, CPSW1_TS_CTL);
+@@ -793,16 +1101,21 @@ static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
+
+ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
+ {
+- struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave];
++ struct cpsw_slave *slave;
+ u32 ctrl, mtype;
+
++ if (priv->data.dual_emac)
++ slave = &priv->slaves[priv->emac_port];
++ else
++ slave = &priv->slaves[priv->data.active_slave];
++
+ ctrl = slave_read(slave, CPSW2_CONTROL);
+ ctrl &= ~CTRL_ALL_TS_MASK;
+
+- if (priv->cpts.tx_enable)
++ if (priv->cpts->tx_enable)
+ ctrl |= CTRL_TX_TS_BITS;
+
+- if (priv->cpts.rx_enable)
++ if (priv->cpts->rx_enable)
+ ctrl |= CTRL_RX_TS_BITS;
+
+ mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
+@@ -815,7 +1128,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
+ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
+ {
+ struct cpsw_priv *priv = netdev_priv(dev);
+- struct cpts *cpts = &priv->cpts;
++ struct cpts *cpts = priv->cpts;
+ struct hwtstamp_config cfg;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+@@ -879,14 +1192,26 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
+
+ static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+ {
++ struct cpsw_priv *priv = netdev_priv(dev);
++ struct mii_ioctl_data *data = if_mii(req);
++ int slave_no = cpsw_slave_index(priv);
++
+ if (!netif_running(dev))
+ return -EINVAL;
+
++ switch (cmd) {
+ #ifdef CONFIG_TI_CPTS
+- if (cmd == SIOCSHWTSTAMP)
++ case SIOCSHWTSTAMP:
+ return cpsw_hwtstamp_ioctl(dev, req);
+ #endif
+- return -ENOTSUPP;
++ case SIOCGMIIPHY:
++ data->phy_id = priv->slaves[slave_no].phy->addr;
++ break;
++ default:
++ return -ENOTSUPP;
++ }
++
++ return 0;
+ }
+
+ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
+@@ -901,7 +1226,9 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
+ cpdma_chan_start(priv->txch);
+ cpdma_ctlr_int_ctrl(priv->dma, true);
+ cpsw_intr_enable(priv);
+- cpdma_ctlr_eoi(priv->dma);
++ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
++ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
++
+ }
+
+ static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev)
+@@ -920,10 +1247,79 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev)
+ cpsw_interrupt(ndev->irq, priv);
+ cpdma_ctlr_int_ctrl(priv->dma, true);
+ cpsw_intr_enable(priv);
+- cpdma_ctlr_eoi(priv->dma);
++ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
++ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
++
+ }
+ #endif
+
++static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
++ unsigned short vid)
++{
++ int ret;
++
++ ret = cpsw_ale_add_vlan(priv->ale, vid,
++ ALE_ALL_PORTS << priv->host_port,
++ 0, ALE_ALL_PORTS << priv->host_port,
++ (ALE_PORT_1 | ALE_PORT_2) << priv->host_port);
++ if (ret != 0)
++ return ret;
++
++ ret = cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
++ priv->host_port, ALE_VLAN, vid);
++ if (ret != 0)
++ goto clean_vid;
++
++ ret = cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
++ ALE_ALL_PORTS << priv->host_port,
++ ALE_VLAN, vid, 0);
++ if (ret != 0)
++ goto clean_vlan_ucast;
++ return 0;
++
++clean_vlan_ucast:
++ cpsw_ale_del_ucast(priv->ale, priv->mac_addr,
++ priv->host_port, ALE_VLAN, vid);
++clean_vid:
++ cpsw_ale_del_vlan(priv->ale, vid, 0);
++ return ret;
++}
++
++static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
++ __be16 proto, u16 vid)
++{
++ struct cpsw_priv *priv = netdev_priv(ndev);
++
++ if (vid == priv->data.default_vlan)
++ return 0;
++
++ dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
++ return cpsw_add_vlan_ale_entry(priv, vid);
++}
++
++static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
++ __be16 proto, u16 vid)
++{
++ struct cpsw_priv *priv = netdev_priv(ndev);
++ int ret;
++
++ if (vid == priv->data.default_vlan)
++ return 0;
++
++ dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
++ ret = cpsw_ale_del_vlan(priv->ale, vid, 0);
++ if (ret != 0)
++ return ret;
++
++ ret = cpsw_ale_del_ucast(priv->ale, priv->mac_addr,
++ priv->host_port, ALE_VLAN, vid);
++ if (ret != 0)
++ return ret;
++
++ return cpsw_ale_del_mcast(priv->ale, priv->ndev->broadcast,
++ 0, ALE_VLAN, vid);
++}
++
+ static const struct net_device_ops cpsw_netdev_ops = {
+ .ndo_open = cpsw_ndo_open,
+ .ndo_stop = cpsw_ndo_stop,
+@@ -938,15 +1334,18 @@ static const struct net_device_ops cpsw_netdev_ops = {
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cpsw_ndo_poll_controller,
+ #endif
++ .ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid,
++ .ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid,
+ };
+
+ static void cpsw_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+ {
+ struct cpsw_priv *priv = netdev_priv(ndev);
+- strcpy(info->driver, "TI CPSW Driver v1.0");
+- strcpy(info->version, "1.0");
+- strcpy(info->bus_info, priv->pdev->name);
++
++ strlcpy(info->driver, "TI CPSW Driver v1.0", sizeof(info->driver));
++ strlcpy(info->version, "1.0", sizeof(info->version));
++ strlcpy(info->bus_info, priv->pdev->name, sizeof(info->bus_info));
+ }
+
+ static u32 cpsw_get_msglevel(struct net_device *ndev)
+@@ -974,7 +1373,7 @@ static int cpsw_get_ts_info(struct net_device *ndev,
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+- info->phc_index = priv->cpts.phc_index;
++ info->phc_index = priv->cpts->phc_index;
+ info->tx_types =
+ (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+@@ -993,12 +1392,39 @@ static int cpsw_get_ts_info(struct net_device *ndev,
+ return 0;
+ }
+
++static int cpsw_get_settings(struct net_device *ndev,
++ struct ethtool_cmd *ecmd)
++{
++ struct cpsw_priv *priv = netdev_priv(ndev);
++ int slave_no = cpsw_slave_index(priv);
++
++ if (priv->slaves[slave_no].phy)
++ return phy_ethtool_gset(priv->slaves[slave_no].phy, ecmd);
++ else
++ return -EOPNOTSUPP;
++}
++
++static int cpsw_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
++{
++ struct cpsw_priv *priv = netdev_priv(ndev);
++ int slave_no = cpsw_slave_index(priv);
++
++ if (priv->slaves[slave_no].phy)
++ return phy_ethtool_sset(priv->slaves[slave_no].phy, ecmd);
++ else
++ return -EOPNOTSUPP;
++}
++
+ static const struct ethtool_ops cpsw_ethtool_ops = {
+ .get_drvinfo = cpsw_get_drvinfo,
+ .get_msglevel = cpsw_get_msglevel,
+ .set_msglevel = cpsw_set_msglevel,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = cpsw_get_ts_info,
++ .get_settings = cpsw_get_settings,
++ .set_settings = cpsw_set_settings,
++ .get_coalesce = cpsw_get_coalesce,
++ .set_coalesce = cpsw_set_coalesce,
+ };
+
+ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
+@@ -1011,6 +1437,7 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
+ slave->data = data;
+ slave->regs = regs + slave_reg_ofs;
+ slave->sliver = regs + sliver_reg_ofs;
++ slave->port_vlan = data->dual_emac_res_vlan;
+ }
+
+ static int cpsw_probe_dt(struct cpsw_platform_data *data,
+@@ -1030,12 +1457,12 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
+ }
+ data->slaves = prop;
+
+- if (of_property_read_u32(node, "cpts_active_slave", &prop)) {
+- pr_err("Missing cpts_active_slave property in the DT.\n");
++ if (of_property_read_u32(node, "active_slave", &prop)) {
++ pr_err("Missing active_slave property in the DT.\n");
+ ret = -EINVAL;
+ goto error_ret;
+ }
+- data->cpts_active_slave = prop;
++ data->active_slave = prop;
+
+ if (of_property_read_u32(node, "cpts_clock_mult", &prop)) {
+ pr_err("Missing cpts_clock_mult property in the DT.\n");
+@@ -1051,12 +1478,10 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
+ }
+ data->cpts_clock_shift = prop;
+
+- data->slave_data = kzalloc(sizeof(struct cpsw_slave_data) *
+- data->slaves, GFP_KERNEL);
+- if (!data->slave_data) {
+- pr_err("Could not allocate slave memory.\n");
++ data->slave_data = kcalloc(data->slaves, sizeof(struct cpsw_slave_data),
++ GFP_KERNEL);
++ if (!data->slave_data)
+ return -EINVAL;
+- }
+
+ if (of_property_read_u32(node, "cpdma_channels", &prop)) {
+ pr_err("Missing cpdma_channels property in the DT.\n");
+@@ -1093,6 +1518,9 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
+ }
+ data->mac_control = prop;
+
++ if (!of_property_read_u32(node, "dual_emac", &prop))
++ data->dual_emac = prop;
++
+ /*
+ * Populate all the child nodes here...
+ */
+@@ -1111,7 +1539,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
+ struct platform_device *mdio;
+
+ parp = of_get_property(slave_node, "phy_id", &lenp);
+- if ((parp == NULL) && (lenp != (sizeof(void *) * 2))) {
++ if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
+ pr_err("Missing slave[%d] phy_id property\n", i);
+ ret = -EINVAL;
+ goto error_ret;
+@@ -1126,6 +1554,18 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
+ if (mac_addr)
+ memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
+
++ if (data->dual_emac) {
++ if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
++ &prop)) {
++ pr_err("Missing dual_emac_res_vlan in DT.\n");
++ slave_data->dual_emac_res_vlan = i+1;
++ pr_err("Using %d as Reserved VLAN for %d slave\n",
++ slave_data->dual_emac_res_vlan, i);
++ } else {
++ slave_data->dual_emac_res_vlan = prop;
++ }
++ }
++
+ i++;
+ }
+
+@@ -1136,9 +1576,85 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
+ return ret;
+ }
+
++static int cpsw_probe_dual_emac(struct platform_device *pdev,
++ struct cpsw_priv *priv)
++{
++ struct cpsw_platform_data *data = &priv->data;
++ struct net_device *ndev;
++ struct cpsw_priv *priv_sl2;
++ int ret = 0, i;
++
++ ndev = alloc_etherdev(sizeof(struct cpsw_priv));
++ if (!ndev) {
++ pr_err("cpsw: error allocating net_device\n");
++ return -ENOMEM;
++ }
++
++ priv_sl2 = netdev_priv(ndev);
++ spin_lock_init(&priv_sl2->lock);
++ priv_sl2->data = *data;
++ priv_sl2->pdev = pdev;
++ priv_sl2->ndev = ndev;
++ priv_sl2->dev = &ndev->dev;
++ priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
++ priv_sl2->rx_packet_max = max(rx_packet_max, 128);
++
++ if (is_valid_ether_addr(data->slave_data[1].mac_addr)) {
++ memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr,
++ ETH_ALEN);
++ pr_info("cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr);
++ } else {
++ random_ether_addr(priv_sl2->mac_addr);
++ pr_info("cpsw: Random MACID = %pM\n", priv_sl2->mac_addr);
++ }
++ memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN);
++
++ priv_sl2->slaves = priv->slaves;
++ priv_sl2->clk = priv->clk;
++
++ priv_sl2->coal_intvl = 0;
++ priv_sl2->bus_freq_mhz = priv->bus_freq_mhz;
++
++ priv_sl2->cpsw_res = priv->cpsw_res;
++ priv_sl2->regs = priv->regs;
++ priv_sl2->host_port = priv->host_port;
++ priv_sl2->host_port_regs = priv->host_port_regs;
++ priv_sl2->wr_regs = priv->wr_regs;
++ priv_sl2->dma = priv->dma;
++ priv_sl2->txch = priv->txch;
++ priv_sl2->rxch = priv->rxch;
++ priv_sl2->ale = priv->ale;
++ priv_sl2->emac_port = 1;
++ priv->slaves[1].ndev = ndev;
++ priv_sl2->cpts = priv->cpts;
++ priv_sl2->version = priv->version;
++
++ for (i = 0; i < priv->num_irqs; i++) {
++ priv_sl2->irqs_table[i] = priv->irqs_table[i];
++ priv_sl2->num_irqs = priv->num_irqs;
++ }
++ priv->irq_enabled = true;
++ ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
++
++ ndev->netdev_ops = &cpsw_netdev_ops;
++ SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
++ netif_napi_add(ndev, &priv_sl2->napi, cpsw_poll, CPSW_POLL_WEIGHT);
++
++ /* register the network device */
++ SET_NETDEV_DEV(ndev, &pdev->dev);
++ ret = register_netdev(ndev);
++ if (ret) {
++ pr_err("cpsw: error registering net device\n");
++ free_netdev(ndev);
++ ret = -ENODEV;
++ }
++
++ return ret;
++}
++
+ static int cpsw_probe(struct platform_device *pdev)
+ {
+- struct cpsw_platform_data *data = pdev->dev.platform_data;
++ struct cpsw_platform_data *data;
+ struct net_device *ndev;
+ struct cpsw_priv *priv;
+ struct cpdma_params dma_params;
+@@ -1162,6 +1678,11 @@ static int cpsw_probe(struct platform_device *pdev)
+ priv->dev = &ndev->dev;
+ priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
+ priv->rx_packet_max = max(rx_packet_max, 128);
++ priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
++ if (!ndev) {
++ pr_err("error allocating cpts\n");
++ goto clean_ndev_ret;
++ }
+
+ /*
+ * This may be required here for child devices.
+@@ -1194,12 +1715,17 @@ static int cpsw_probe(struct platform_device *pdev)
+ for (i = 0; i < data->slaves; i++)
+ priv->slaves[i].slave_num = i;
+
++ priv->slaves[0].ndev = ndev;
++ priv->emac_port = 0;
++
+ priv->clk = clk_get(&pdev->dev, "fck");
+ if (IS_ERR(priv->clk)) {
+ dev_err(&pdev->dev, "fck is not found\n");
+ ret = -ENODEV;
+ goto clean_slave_ret;
+ }
++ priv->coal_intvl = 0;
++ priv->bus_freq_mhz = clk_get_rate(priv->clk) / 1000000;
+
+ priv->cpsw_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!priv->cpsw_res) {
+@@ -1248,7 +1774,7 @@ static int cpsw_probe(struct platform_device *pdev)
+ switch (priv->version) {
+ case CPSW_VERSION_1:
+ priv->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
+- priv->cpts.reg = ss_regs + CPSW1_CPTS_OFFSET;
++ priv->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET;
+ dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET;
+ dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET;
+ ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET;
+@@ -1259,7 +1785,7 @@ static int cpsw_probe(struct platform_device *pdev)
+ break;
+ case CPSW_VERSION_2:
+ priv->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
+- priv->cpts.reg = ss_regs + CPSW2_CPTS_OFFSET;
++ priv->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET;
+ dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET;
+ dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET;
+ ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET;
+@@ -1341,12 +1867,12 @@ static int cpsw_probe(struct platform_device *pdev)
+ goto clean_ale_ret;
+ }
+ priv->irqs_table[k] = i;
+- priv->num_irqs = k;
++ priv->num_irqs = k + 1;
+ }
+ k++;
+ }
+
+- ndev->flags |= IFF_ALLMULTI; /* see cpsw_ndo_change_rx_flags() */
++ ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ ndev->netdev_ops = &cpsw_netdev_ops;
+ SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
+@@ -1361,17 +1887,26 @@ static int cpsw_probe(struct platform_device *pdev)
+ goto clean_irq_ret;
+ }
+
+- if (cpts_register(&pdev->dev, &priv->cpts,
++ if (cpts_register(&pdev->dev, priv->cpts,
+ data->cpts_clock_mult, data->cpts_clock_shift))
+ dev_err(priv->dev, "error registering cpts device\n");
+
+ cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n",
+ priv->cpsw_res->start, ndev->irq);
+
++ if (priv->data.dual_emac) {
++ ret = cpsw_probe_dual_emac(pdev, priv);
++ if (ret) {
++ cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
++ goto clean_irq_ret;
++ }
++ }
++
+ return 0;
+
+ clean_irq_ret:
+- free_irq(ndev->irq, priv);
++ for (i = 0; i < priv->num_irqs; i++)
++ free_irq(priv->irqs_table[i], priv);
+ clean_ale_ret:
+ cpsw_ale_destroy(priv->ale);
+ clean_dma_ret:
+@@ -1394,7 +1929,8 @@ static int cpsw_probe(struct platform_device *pdev)
+ pm_runtime_disable(&pdev->dev);
+ kfree(priv->slaves);
+ clean_ndev_ret:
+- free_netdev(ndev);
++ kfree(priv->data.slave_data);
++ free_netdev(priv->ndev);
+ return ret;
+ }
+
+@@ -1402,12 +1938,17 @@ static int cpsw_remove(struct platform_device *pdev)
+ {
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct cpsw_priv *priv = netdev_priv(ndev);
++ int i;
+
+- pr_info("removing device");
+ platform_set_drvdata(pdev, NULL);
++ if (priv->data.dual_emac)
++ unregister_netdev(cpsw_get_slave_ndev(priv, 1));
++ unregister_netdev(ndev);
++
++ cpts_unregister(priv->cpts);
++ for (i = 0; i < priv->num_irqs; i++)
++ free_irq(priv->irqs_table[i], priv);
+
+- cpts_unregister(&priv->cpts);
+- free_irq(ndev->irq, priv);
+ cpsw_ale_destroy(priv->ale);
+ cpdma_chan_destroy(priv->txch);
+ cpdma_chan_destroy(priv->rxch);
+@@ -1421,8 +1962,10 @@ static int cpsw_remove(struct platform_device *pdev)
+ pm_runtime_disable(&pdev->dev);
+ clk_put(priv->clk);
+ kfree(priv->slaves);
++ kfree(priv->data.slave_data);
++ if (priv->data.dual_emac)
++ free_netdev(cpsw_get_slave_ndev(priv, 1));
+ free_netdev(ndev);
+-
+ return 0;
+ }
+
+@@ -1458,6 +2001,7 @@ static const struct of_device_id cpsw_of_mtable[] = {
+ { .compatible = "ti,cpsw", },
+ { /* sentinel */ },
+ };
++MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
+
+ static struct platform_driver cpsw_driver = {
+ .driver = {
+diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
+index 0e9ccc2..7fa60d6 100644
+--- a/drivers/net/ethernet/ti/cpsw_ale.c
++++ b/drivers/net/ethernet/ti/cpsw_ale.c
+@@ -148,7 +148,7 @@ static int cpsw_ale_write(struct cpsw_ale *ale, int idx, u32 *ale_entry)
+ return idx;
+ }
+
+-static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr)
++int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr, u16 vid)
+ {
+ u32 ale_entry[ALE_ENTRY_WORDS];
+ int type, idx;
+@@ -160,6 +160,8 @@ static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr)
+ type = cpsw_ale_get_entry_type(ale_entry);
+ if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
+ continue;
++ if (cpsw_ale_get_vlan_id(ale_entry) != vid)
++ continue;
+ cpsw_ale_get_addr(ale_entry, entry_addr);
+ if (memcmp(entry_addr, addr, 6) == 0)
+ return idx;
+@@ -167,6 +169,22 @@ static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr)
+ return -ENOENT;
+ }
+
++int cpsw_ale_match_vlan(struct cpsw_ale *ale, u16 vid)
++{
++ u32 ale_entry[ALE_ENTRY_WORDS];
++ int type, idx;
++
++ for (idx = 0; idx < ale->params.ale_entries; idx++) {
++ cpsw_ale_read(ale, idx, ale_entry);
++ type = cpsw_ale_get_entry_type(ale_entry);
++ if (type != ALE_TYPE_VLAN)
++ continue;
++ if (cpsw_ale_get_vlan_id(ale_entry) == vid)
++ return idx;
++ }
++ return -ENOENT;
++}
++
+ static int cpsw_ale_match_free(struct cpsw_ale *ale)
+ {
+ u32 ale_entry[ALE_ENTRY_WORDS];
+@@ -274,19 +292,32 @@ int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask)
+ return 0;
+ }
+
+-int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags)
++static inline void cpsw_ale_set_vlan_entry_type(u32 *ale_entry,
++ int flags, u16 vid)
++{
++ if (flags & ALE_VLAN) {
++ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN_ADDR);
++ cpsw_ale_set_vlan_id(ale_entry, vid);
++ } else {
++ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
++ }
++}
++
++int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
++ int flags, u16 vid)
+ {
+ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+ int idx;
+
+- cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
++ cpsw_ale_set_vlan_entry_type(ale_entry, flags, vid);
++
+ cpsw_ale_set_addr(ale_entry, addr);
+ cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT);
+ cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0);
+ cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
+ cpsw_ale_set_port_num(ale_entry, port);
+
+- idx = cpsw_ale_match_addr(ale, addr);
++ idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
+ if (idx < 0)
+ idx = cpsw_ale_match_free(ale);
+ if (idx < 0)
+@@ -298,12 +329,13 @@ int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags)
+ return 0;
+ }
+
+-int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port)
++int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
++ int flags, u16 vid)
+ {
+ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+ int idx;
+
+- idx = cpsw_ale_match_addr(ale, addr);
++ idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
+ if (idx < 0)
+ return -ENOENT;
+
+@@ -313,18 +345,19 @@ int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port)
+ }
+
+ int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
+- int super, int mcast_state)
++ int flags, u16 vid, int mcast_state)
+ {
+ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+ int idx, mask;
+
+- idx = cpsw_ale_match_addr(ale, addr);
++ idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
+ if (idx >= 0)
+ cpsw_ale_read(ale, idx, ale_entry);
+
+- cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
++ cpsw_ale_set_vlan_entry_type(ale_entry, flags, vid);
++
+ cpsw_ale_set_addr(ale_entry, addr);
+- cpsw_ale_set_super(ale_entry, super);
++ cpsw_ale_set_super(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
+ cpsw_ale_set_mcast_state(ale_entry, mcast_state);
+
+ mask = cpsw_ale_get_port_mask(ale_entry);
+@@ -342,12 +375,13 @@ int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
+ return 0;
+ }
+
+-int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask)
++int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
++ int flags, u16 vid)
+ {
+ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+ int idx;
+
+- idx = cpsw_ale_match_addr(ale, addr);
++ idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
+ if (idx < 0)
+ return -EINVAL;
+
+@@ -362,6 +396,55 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask)
+ return 0;
+ }
+
++int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
++ int reg_mcast, int unreg_mcast)
++{
++ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
++ int idx;
++
++ idx = cpsw_ale_match_vlan(ale, vid);
++ if (idx >= 0)
++ cpsw_ale_read(ale, idx, ale_entry);
++
++ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN);
++ cpsw_ale_set_vlan_id(ale_entry, vid);
++
++ cpsw_ale_set_vlan_untag_force(ale_entry, untag);
++ cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast);
++ cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast);
++ cpsw_ale_set_vlan_member_list(ale_entry, port);
++
++ if (idx < 0)
++ idx = cpsw_ale_match_free(ale);
++ if (idx < 0)
++ idx = cpsw_ale_find_ageable(ale);
++ if (idx < 0)
++ return -ENOMEM;
++
++ cpsw_ale_write(ale, idx, ale_entry);
++ return 0;
++}
++
++int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
++{
++ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
++ int idx;
++
++ idx = cpsw_ale_match_vlan(ale, vid);
++ if (idx < 0)
++ return -ENOENT;
++
++ cpsw_ale_read(ale, idx, ale_entry);
++
++ if (port_mask)
++ cpsw_ale_set_vlan_member_list(ale_entry, port_mask);
++ else
++ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
++
++ cpsw_ale_write(ale, idx, ale_entry);
++ return 0;
++}
++
+ struct ale_control_info {
+ const char *name;
+ int offset, port_offset;
+diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
+index 2bd09cb..30daa12 100644
+--- a/drivers/net/ethernet/ti/cpsw_ale.h
++++ b/drivers/net/ethernet/ti/cpsw_ale.h
+@@ -64,8 +64,14 @@ enum cpsw_ale_port_state {
+ };
+
+ /* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */
+-#define ALE_SECURE 1
+-#define ALE_BLOCKED 2
++#define ALE_SECURE BIT(0)
++#define ALE_BLOCKED BIT(1)
++#define ALE_SUPER BIT(2)
++#define ALE_VLAN BIT(3)
++
++#define ALE_PORT_HOST BIT(0)
++#define ALE_PORT_1 BIT(1)
++#define ALE_PORT_2 BIT(2)
+
+ #define ALE_MCAST_FWD 0
+ #define ALE_MCAST_BLOCK_LEARN_FWD 1
+@@ -81,11 +87,17 @@ void cpsw_ale_stop(struct cpsw_ale *ale);
+ int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout);
+ int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask);
+ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask);
+-int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags);
+-int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port);
++int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
++ int flags, u16 vid);
++int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
++ int flags, u16 vid);
+ int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
+- int super, int mcast_state);
+-int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask);
++ int flags, u16 vid, int mcast_state);
++int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
++ int flags, u16 vid);
++int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
++ int reg_mcast, int unreg_mcast);
++int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port);
+
+ int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control);
+ int cpsw_ale_control_set(struct cpsw_ale *ale, int port,
+diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
+index 4995673..49dfd59 100644
+--- a/drivers/net/ethernet/ti/davinci_cpdma.c
++++ b/drivers/net/ethernet/ti/davinci_cpdma.c
+@@ -20,6 +20,7 @@
+ #include <linux/err.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/io.h>
++#include <linux/delay.h>
+
+ #include "davinci_cpdma.h"
+
+@@ -60,6 +61,9 @@
+ #define CPDMA_DESC_EOQ BIT(28)
+ #define CPDMA_DESC_TD_COMPLETE BIT(27)
+ #define CPDMA_DESC_PASS_CRC BIT(26)
++#define CPDMA_DESC_TO_PORT_EN BIT(20)
++#define CPDMA_TO_PORT_SHIFT 16
++#define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16))
+
+ #define CPDMA_TEARDOWN_VALUE 0xfffffffc
+
+@@ -105,13 +109,13 @@ struct cpdma_ctlr {
+ };
+
+ struct cpdma_chan {
++ struct cpdma_desc __iomem *head, *tail;
++ void __iomem *hdp, *cp, *rxfree;
+ enum cpdma_state state;
+ struct cpdma_ctlr *ctlr;
+ int chan_num;
+ spinlock_t lock;
+- struct cpdma_desc __iomem *head, *tail;
+ int count;
+- void __iomem *hdp, *cp, *rxfree;
+ u32 mask;
+ cpdma_handler_fn handler;
+ enum dma_data_direction dir;
+@@ -132,6 +136,14 @@ struct cpdma_chan {
+ #define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld)
+ #define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld)
+
++#define cpdma_desc_to_port(chan, mode, directed) \
++ do { \
++ if (!is_rx_chan(chan) && ((directed == 1) || \
++ (directed == 2))) \
++ mode |= (CPDMA_DESC_TO_PORT_EN | \
++ (directed << CPDMA_TO_PORT_SHIFT)); \
++ } while (0)
++
+ /*
+ * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
+ * emac) have dedicated on-chip memory for these descriptors. Some other
+@@ -217,17 +229,27 @@ desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
+ }
+
+ static struct cpdma_desc __iomem *
+-cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc)
++cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc, bool is_rx)
+ {
+ unsigned long flags;
+ int index;
++ int desc_start;
++ int desc_end;
+ struct cpdma_desc __iomem *desc = NULL;
+
+ spin_lock_irqsave(&pool->lock, flags);
+
+- index = bitmap_find_next_zero_area(pool->bitmap, pool->num_desc, 0,
+- num_desc, 0);
+- if (index < pool->num_desc) {
++ if (is_rx) {
++ desc_start = 0;
++ desc_end = pool->num_desc/2;
++ } else {
++ desc_start = pool->num_desc/2;
++ desc_end = pool->num_desc;
++ }
++
++ index = bitmap_find_next_zero_area(pool->bitmap,
++ desc_end, desc_start, num_desc, 0);
++ if (index < desc_end) {
+ bitmap_set(pool->bitmap, index, num_desc);
+ desc = pool->iomap + pool->desc_size * index;
+ pool->used_desc++;
+@@ -291,14 +313,16 @@ int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
+ }
+
+ if (ctlr->params.has_soft_reset) {
+- unsigned long timeout = jiffies + HZ/10;
++ unsigned timeout = 10 * 100;
+
+ dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
+- while (time_before(jiffies, timeout)) {
++ while (timeout) {
+ if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
+ break;
++ udelay(10);
++ timeout--;
+ }
+- WARN_ON(!time_before(jiffies, timeout));
++ WARN_ON(!timeout);
+ }
+
+ for (i = 0; i < ctlr->num_chan; i++) {
+@@ -439,10 +463,8 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
+ if (ctlr->state != CPDMA_STATE_IDLE)
+ cpdma_ctlr_stop(ctlr);
+
+- for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
+- if (ctlr->channels[i])
+- cpdma_chan_destroy(ctlr->channels[i]);
+- }
++ for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
++ cpdma_chan_destroy(ctlr->channels[i]);
+
+ cpdma_desc_pool_destroy(ctlr->pool);
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+@@ -473,11 +495,13 @@ int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return 0;
+ }
++EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl);
+
+-void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr)
++void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value)
+ {
+- dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, 0);
++ dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value);
+ }
++EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi);
+
+ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
+ cpdma_handler_fn handler)
+@@ -652,7 +676,7 @@ static void __cpdma_chan_submit(struct cpdma_chan *chan,
+ }
+
+ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
+- int len, gfp_t gfp_mask)
++ int len, int directed)
+ {
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc __iomem *desc;
+@@ -668,7 +692,7 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
+ goto unlock_ret;
+ }
+
+- desc = cpdma_desc_alloc(ctlr->pool, 1);
++ desc = cpdma_desc_alloc(ctlr->pool, 1, is_rx_chan(chan));
+ if (!desc) {
+ chan->stats.desc_alloc_fail++;
+ ret = -ENOMEM;
+@@ -682,6 +706,7 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
+
+ buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
+ mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
++ cpdma_desc_to_port(chan, mode, directed);
+
+ desc_write(desc, hw_next, 0);
+ desc_write(desc, hw_buffer, buffer);
+@@ -704,6 +729,29 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
+ }
+ EXPORT_SYMBOL_GPL(cpdma_chan_submit);
+
++bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
++{
++ unsigned long flags;
++ int index;
++ bool ret;
++ struct cpdma_ctlr *ctlr = chan->ctlr;
++ struct cpdma_desc_pool *pool = ctlr->pool;
++
++ spin_lock_irqsave(&pool->lock, flags);
++
++ index = bitmap_find_next_zero_area(pool->bitmap,
++ pool->num_desc, pool->num_desc/2, 1, 0);
++
++ if (index < pool->num_desc)
++ ret = true;
++ else
++ ret = false;
++
++ spin_unlock_irqrestore(&pool->lock, flags);
++ return ret;
++}
++EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc);
++
+ static void __cpdma_chan_free(struct cpdma_chan *chan,
+ struct cpdma_desc __iomem *desc,
+ int outlen, int status)
+@@ -728,6 +776,7 @@ static int __cpdma_chan_process(struct cpdma_chan *chan)
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc __iomem *desc;
+ int status, outlen;
++ int cb_status = 0;
+ struct cpdma_desc_pool *pool = ctlr->pool;
+ dma_addr_t desc_dma;
+ unsigned long flags;
+@@ -749,7 +798,8 @@ static int __cpdma_chan_process(struct cpdma_chan *chan)
+ status = -EBUSY;
+ goto unlock_ret;
+ }
+- status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE);
++ status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE |
++ CPDMA_DESC_PORT_MASK);
+
+ chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
+ chan_write(chan, cp, desc_dma);
+@@ -762,8 +812,12 @@ static int __cpdma_chan_process(struct cpdma_chan *chan)
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
++ if (unlikely(status & CPDMA_DESC_TD_COMPLETE))
++ cb_status = -ENOSYS;
++ else
++ cb_status = status;
+
+- __cpdma_chan_free(chan, desc, outlen, status);
++ __cpdma_chan_free(chan, desc, outlen, cb_status);
+ return status;
+
+ unlock_ret:
+@@ -822,7 +876,7 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
+ struct cpdma_desc_pool *pool = ctlr->pool;
+ unsigned long flags;
+ int ret;
+- unsigned long timeout;
++ unsigned timeout;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->state != CPDMA_STATE_ACTIVE) {
+@@ -837,14 +891,15 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
+ dma_reg_write(ctlr, chan->td, chan_linear(chan));
+
+ /* wait for teardown complete */
+- timeout = jiffies + HZ/10; /* 100 msec */
+- while (time_before(jiffies, timeout)) {
++ timeout = 100 * 100; /* 100 ms */
++ while (timeout) {
+ u32 cp = chan_read(chan, cp);
+ if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
+ break;
+- cpu_relax();
++ udelay(10);
++ timeout--;
+ }
+- WARN_ON(!time_before(jiffies, timeout));
++ WARN_ON(!timeout);
+ chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
+
+ /* handle completed packets */
+@@ -984,3 +1039,6 @@ int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return ret;
+ }
++EXPORT_SYMBOL_GPL(cpdma_control_set);
++
++MODULE_LICENSE("GPL");
+diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h
+index afa19a0..86dee48 100644
+--- a/drivers/net/ethernet/ti/davinci_cpdma.h
++++ b/drivers/net/ethernet/ti/davinci_cpdma.h
+@@ -24,6 +24,13 @@
+ #define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1))
+ #define chan_linear(chan) __chan_linear((chan)->chan_num)
+
++#define CPDMA_RX_SOURCE_PORT(__status__) ((__status__ >> 16) & 0x7)
++
++#define CPDMA_EOI_RX_THRESH 0x0
++#define CPDMA_EOI_RX 0x1
++#define CPDMA_EOI_TX 0x2
++#define CPDMA_EOI_MISC 0x3
++
+ struct cpdma_params {
+ struct device *dev;
+ void __iomem *dmaregs;
+@@ -82,12 +89,13 @@ int cpdma_chan_dump(struct cpdma_chan *chan);
+ int cpdma_chan_get_stats(struct cpdma_chan *chan,
+ struct cpdma_chan_stats *stats);
+ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
+- int len, gfp_t gfp_mask);
++ int len, int directed);
+ int cpdma_chan_process(struct cpdma_chan *chan, int quota);
+
+ int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable);
+-void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr);
++void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value);
+ int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable);
++bool cpdma_check_free_tx_desc(struct cpdma_chan *chan);
+
+ enum cpdma_control {
+ CPDMA_CMD_IDLE, /* write-only */
+diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
+index 2a3e2c5..860e15d 100644
+--- a/drivers/net/ethernet/ti/davinci_emac.c
++++ b/drivers/net/ethernet/ti/davinci_emac.c
+@@ -120,7 +120,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
+ #define EMAC_DEF_TX_CH (0) /* Default 0th channel */
+ #define EMAC_DEF_RX_CH (0) /* Default 0th channel */
+ #define EMAC_DEF_RX_NUM_DESC (128)
+-#define EMAC_DEF_TX_NUM_DESC (128)
+ #define EMAC_DEF_MAX_TX_CH (1) /* Max TX channels configured */
+ #define EMAC_DEF_MAX_RX_CH (1) /* Max RX channels configured */
+ #define EMAC_POLL_WEIGHT (64) /* Default NAPI poll weight */
+@@ -342,7 +341,6 @@ struct emac_priv {
+ u32 mac_hash2;
+ u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS];
+ u32 rx_addr_type;
+- atomic_t cur_tx;
+ const char *phy_id;
+ #ifdef CONFIG_OF
+ struct device_node *phy_node;
+@@ -480,8 +478,8 @@ static void emac_dump_regs(struct emac_priv *priv)
+ static void emac_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+ {
+- strcpy(info->driver, emac_version_string);
+- strcpy(info->version, EMAC_MODULE_VERSION);
++ strlcpy(info->driver, emac_version_string, sizeof(info->driver));
++ strlcpy(info->version, EMAC_MODULE_VERSION, sizeof(info->version));
+ }
+
+ /**
+@@ -1039,7 +1037,7 @@ static void emac_rx_handler(void *token, int len, int status)
+
+ recycle:
+ ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
+- skb_tailroom(skb), GFP_KERNEL);
++ skb_tailroom(skb), 0);
+
+ WARN_ON(ret == -ENOMEM);
+ if (unlikely(ret < 0))
+@@ -1050,12 +1048,12 @@ static void emac_tx_handler(void *token, int len, int status)
+ {
+ struct sk_buff *skb = token;
+ struct net_device *ndev = skb->dev;
+- struct emac_priv *priv = netdev_priv(ndev);
+-
+- atomic_dec(&priv->cur_tx);
+
++ /* Check whether the queue is stopped due to stalled tx dma, if the
++ * queue is stopped then start the queue as we have free desc for tx
++ */
+ if (unlikely(netif_queue_stopped(ndev)))
+- netif_start_queue(ndev);
++ netif_wake_queue(ndev);
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += len;
+ dev_kfree_skb_any(skb);
+@@ -1094,14 +1092,17 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
+ skb_tx_timestamp(skb);
+
+ ret_code = cpdma_chan_submit(priv->txchan, skb, skb->data, skb->len,
+- GFP_KERNEL);
++ 0);
+ if (unlikely(ret_code != 0)) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ dev_err(emac_dev, "DaVinci EMAC: desc submit failed");
+ goto fail_tx;
+ }
+
+- if (atomic_inc_return(&priv->cur_tx) >= EMAC_DEF_TX_NUM_DESC)
++ /* If there is no more tx desc left free then we need to
++ * tell the kernel to stop sending us tx frames.
++ */
++ if (unlikely(!cpdma_check_free_tx_desc(priv->txchan)))
+ netif_stop_queue(ndev);
+
+ return NETDEV_TX_OK;
+@@ -1264,7 +1265,6 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
+ /* Store mac addr in priv and rx channel and set it in EMAC hw */
+ memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len);
+ memcpy(ndev->dev_addr, sa->sa_data, ndev->addr_len);
+- ndev->addr_assign_type &= ~NET_ADDR_RANDOM;
+
+ /* MAC address is configured only after the interface is enabled. */
+ if (netif_running(ndev)) {
+@@ -1438,7 +1438,7 @@ static int emac_poll(struct napi_struct *napi, int budget)
+ * Polled functionality used by netconsole and others in non interrupt mode
+ *
+ */
+-void emac_poll_controller(struct net_device *ndev)
++static void emac_poll_controller(struct net_device *ndev)
+ {
+ struct emac_priv *priv = netdev_priv(ndev);
+
+@@ -1558,7 +1558,7 @@ static int emac_dev_open(struct net_device *ndev)
+ break;
+
+ ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
+- skb_tailroom(skb), GFP_KERNEL);
++ skb_tailroom(skb), 0);
+ if (WARN_ON(ret < 0))
+ break;
+ }
+@@ -1600,7 +1600,7 @@ static int emac_dev_open(struct net_device *ndev)
+
+ if (priv->phy_id && *priv->phy_id) {
+ priv->phydev = phy_connect(ndev, priv->phy_id,
+- &emac_adjust_link, 0,
++ &emac_adjust_link,
+ PHY_INTERFACE_MODE_MII);
+
+ if (IS_ERR(priv->phydev)) {
+@@ -1865,21 +1865,18 @@ static int davinci_emac_probe(struct platform_device *pdev)
+
+
+ /* obtain emac clock from kernel */
+- emac_clk = clk_get(&pdev->dev, NULL);
++ emac_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(emac_clk)) {
+ dev_err(&pdev->dev, "failed to get EMAC clock\n");
+ return -EBUSY;
+ }
+ emac_bus_frequency = clk_get_rate(emac_clk);
+- clk_put(emac_clk);
+
+ /* TODO: Probe PHY here if possible */
+
+ ndev = alloc_etherdev(sizeof(struct emac_priv));
+- if (!ndev) {
+- rc = -ENOMEM;
+- goto no_ndev;
+- }
++ if (!ndev)
++ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ndev);
+ priv = netdev_priv(ndev);
+@@ -1893,7 +1890,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data\n");
+ rc = -ENODEV;
+- goto probe_quit;
++ goto no_pdata;
+ }
+
+ /* MAC addr and PHY mask , RMII enable info from platform_data */
+@@ -1913,23 +1910,23 @@ static int davinci_emac_probe(struct platform_device *pdev)
+ if (!res) {
+ dev_err(&pdev->dev,"error getting res\n");
+ rc = -ENOENT;
+- goto probe_quit;
++ goto no_pdata;
+ }
+
+ priv->emac_base_phys = res->start + pdata->ctrl_reg_offset;
+ size = resource_size(res);
+- if (!request_mem_region(res->start, size, ndev->name)) {
++ if (!devm_request_mem_region(&pdev->dev, res->start,
++ size, ndev->name)) {
+ dev_err(&pdev->dev, "failed request_mem_region() for regs\n");
+ rc = -ENXIO;
+- goto probe_quit;
++ goto no_pdata;
+ }
+
+- priv->remap_addr = ioremap(res->start, size);
++ priv->remap_addr = devm_ioremap(&pdev->dev, res->start, size);
+ if (!priv->remap_addr) {
+ dev_err(&pdev->dev, "unable to map IO\n");
+ rc = -ENOMEM;
+- release_mem_region(res->start, size);
+- goto probe_quit;
++ goto no_pdata;
+ }
+ priv->emac_base = priv->remap_addr + pdata->ctrl_reg_offset;
+ ndev->base_addr = (unsigned long)priv->remap_addr;
+@@ -1962,7 +1959,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
+ if (!priv->dma) {
+ dev_err(&pdev->dev, "error initializing DMA\n");
+ rc = -ENOMEM;
+- goto no_dma;
++ goto no_pdata;
+ }
+
+ priv->txchan = cpdma_chan_create(priv->dma, tx_chan_num(EMAC_DEF_TX_CH),
+@@ -1971,14 +1968,14 @@ static int davinci_emac_probe(struct platform_device *pdev)
+ emac_rx_handler);
+ if (WARN_ON(!priv->txchan || !priv->rxchan)) {
+ rc = -ENOMEM;
+- goto no_irq_res;
++ goto no_cpdma_chan;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "error getting irq res\n");
+ rc = -ENOENT;
+- goto no_irq_res;
++ goto no_cpdma_chan;
+ }
+ ndev->irq = res->start;
+
+@@ -2000,7 +1997,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
+ if (rc) {
+ dev_err(&pdev->dev, "error in register_netdev\n");
+ rc = -ENODEV;
+- goto no_irq_res;
++ goto no_cpdma_chan;
+ }
+
+
+@@ -2015,20 +2012,14 @@ static int davinci_emac_probe(struct platform_device *pdev)
+
+ return 0;
+
+-no_irq_res:
++no_cpdma_chan:
+ if (priv->txchan)
+ cpdma_chan_destroy(priv->txchan);
+ if (priv->rxchan)
+ cpdma_chan_destroy(priv->rxchan);
+ cpdma_ctlr_destroy(priv->dma);
+-no_dma:
+- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+- release_mem_region(res->start, resource_size(res));
+- iounmap(priv->remap_addr);
+-
+-probe_quit:
++no_pdata:
+ free_netdev(ndev);
+-no_ndev:
+ return rc;
+ }
+
+@@ -2041,14 +2032,12 @@ static int davinci_emac_probe(struct platform_device *pdev)
+ */
+ static int davinci_emac_remove(struct platform_device *pdev)
+ {
+- struct resource *res;
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ dev_notice(&ndev->dev, "DaVinci EMAC: davinci_emac_remove()\n");
+
+ platform_set_drvdata(pdev, NULL);
+- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (priv->txchan)
+ cpdma_chan_destroy(priv->txchan);
+@@ -2056,10 +2045,7 @@ static int davinci_emac_remove(struct platform_device *pdev)
+ cpdma_chan_destroy(priv->rxchan);
+ cpdma_ctlr_destroy(priv->dma);
+
+- release_mem_region(res->start, resource_size(res));
+-
+ unregister_netdev(ndev);
+- iounmap(priv->remap_addr);
+ free_netdev(ndev);
+
+ return 0;
+diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
+index cca2550..12aec17 100644
+--- a/drivers/net/ethernet/ti/davinci_mdio.c
++++ b/drivers/net/ethernet/ti/davinci_mdio.c
+@@ -320,10 +320,8 @@ static int davinci_mdio_probe(struct platform_device *pdev)
+ int ret, addr;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+- if (!data) {
+- dev_err(dev, "failed to alloc device data\n");
++ if (!data)
+ return -ENOMEM;
+- }
+
+ data->bus = mdiobus_alloc();
+ if (!data->bus) {
+@@ -487,6 +485,7 @@ static const struct of_device_id davinci_mdio_of_mtable[] = {
+ { .compatible = "ti,davinci_mdio", },
+ { /* sentinel */ },
+ };
++MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable);
+
+ static struct platform_driver davinci_mdio_driver = {
+ .driver = {
+diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
+index 2272538..60c400f 100644
+--- a/drivers/net/ethernet/ti/tlan.c
++++ b/drivers/net/ethernet/ti/tlan.c
+@@ -320,6 +320,7 @@ static void tlan_remove_one(struct pci_dev *pdev)
+ free_netdev(dev);
+
+ pci_set_drvdata(pdev, NULL);
++ cancel_work_sync(&priv->tlan_tqueue);
+ }
+
+ static void tlan_start(struct net_device *dev)
+@@ -1911,10 +1912,8 @@ static void tlan_reset_lists(struct net_device *dev)
+ list->frame_size = TLAN_MAX_FRAME_SIZE;
+ list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
+ skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
+- if (!skb) {
+- netdev_err(dev, "Out of memory for received data\n");
++ if (!skb)
+ break;
+- }
+
+ list->buffer[0].address = pci_map_single(priv->pci_dev,
+ skb->data,
+diff --git a/include/linux/platform_data/cpsw.h b/include/linux/platform_data/cpsw.h
+index 24368a2..bb3cd58 100644
+--- a/include/linux/platform_data/cpsw.h
++++ b/include/linux/platform_data/cpsw.h
+@@ -21,6 +21,8 @@ struct cpsw_slave_data {
+ char phy_id[MII_BUS_ID_SIZE];
+ int phy_if;
+ u8 mac_addr[ETH_ALEN];
++ u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */
++
+ };
+
+ struct cpsw_platform_data {
+@@ -28,13 +30,15 @@ struct cpsw_platform_data {
+ u32 channels; /* number of cpdma channels (symmetric) */
+ u32 slaves; /* number of slave cpgmac ports */
+ struct cpsw_slave_data *slave_data;
+- u32 cpts_active_slave; /* time stamping slave */
++ u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */
+ u32 cpts_clock_mult; /* convert input clock ticks to nanoseconds */
+ u32 cpts_clock_shift; /* convert input clock ticks to nanoseconds */
+ u32 ale_entries; /* ale table size */
+ u32 bd_ram_size; /*buffer descriptor ram size */
+ u32 rx_descs; /* Number of Rx Descriptios */
+ u32 mac_control; /* Mac control register */
++ u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/
++ bool dual_emac; /* Enable Dual EMAC mode */
+ };
+
+ #endif /* __CPSW_H__ */
diff --git a/patches/cpsw-fix-missplaced-init-chunk.patch b/patches/cpsw-fix-missplaced-init-chunk.patch
new file mode 100644
index 0000000..79af446
--- /dev/null
+++ b/patches/cpsw-fix-missplaced-init-chunk.patch
@@ -0,0 +1,29 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Subject: [PATCH] net/cpsw: fix missplaced init chunk
+
+this is a fixup for "net/cpsw: fix irq_disable() with threaded interrupts"
+where the assignment made into the slave and not into the main device.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/net/ethernet/ti/cpsw.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/ti/cpsw.c
++++ b/drivers/net/ethernet/ti/cpsw.c
+@@ -1633,7 +1633,6 @@ static int cpsw_probe_dual_emac(struct p
+ priv_sl2->irqs_table[i] = priv->irqs_table[i];
+ priv_sl2->num_irqs = priv->num_irqs;
+ }
+- priv->irq_enabled = true;
+ ndev->features |= NETIF_F_HW_VLAN_FILTER;
+
+ ndev->netdev_ops = &cpsw_netdev_ops;
+@@ -1872,6 +1871,7 @@ static int cpsw_probe(struct platform_de
+ k++;
+ }
+
++ priv->irq_enabled = true;
+ ndev->features |= NETIF_F_HW_VLAN_FILTER;
+
+ ndev->netdev_ops = &cpsw_netdev_ops;
diff --git a/patches/cpsw-net-cpsw-Use-fallback-for-active_slave.patch b/patches/cpsw-net-cpsw-Use-fallback-for-active_slave.patch
new file mode 100644
index 0000000..5f607a9
--- /dev/null
+++ b/patches/cpsw-net-cpsw-Use-fallback-for-active_slave.patch
@@ -0,0 +1,35 @@
+From f06f021d625c05db6c0114828e87bfe816a3c2aa Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 16 Apr 2013 12:34:09 +0200
+Subject: [PATCH] net/cpsw: Use fallback for active_slave
+
+In case the .dts has not been yet updated we also try to look for the
+old "cpts_active_slave" property.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/net/ethernet/ti/cpsw.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
+index 1b2126d..4701ab3 100644
+--- a/drivers/net/ethernet/ti/cpsw.c
++++ b/drivers/net/ethernet/ti/cpsw.c
+@@ -1424,8 +1424,12 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
+
+ if (of_property_read_u32(node, "active_slave", &prop)) {
+ pr_err("Missing active_slave property in the DT.\n");
+- ret = -EINVAL;
+- goto error_ret;
++ if (of_property_read_u32(node, "cpts_active_slave", &prop)) {
++ ret = -EINVAL;
++ goto error_ret;
++ } else {
++ pr_err("Using old cpts_active_slave as fallback.\n");
++ }
+ }
+ data->active_slave = prop;
+
+--
+1.7.10.4
+
diff --git a/patches/cpsw-net-cpsw-use-a-lock-around-source-testing.patch b/patches/cpsw-net-cpsw-use-a-lock-around-source-testing.patch
new file mode 100644
index 0000000..cefdc4d
--- /dev/null
+++ b/patches/cpsw-net-cpsw-use-a-lock-around-source-testing.patch
@@ -0,0 +1,76 @@
+From a195f411daf93b76cacb11b0230e7f17cb167c12 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 24 Apr 2013 20:01:22 +0200
+Subject: [PATCH] net/cpsw: use a lock around source testing
+
+For some reason on RT it happens that the TX interrupt fires over and
+over again but according to tx_stat there is nothing going on. Same goes
+for RX but not that often.
+With this lock around it this is gone. However I still see from time to
+time interrupts where each source is set to 0.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/net/ethernet/ti/cpsw.c | 12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
+index 2633be6..ef942cd 100644
+--- a/drivers/net/ethernet/ti/cpsw.c
++++ b/drivers/net/ethernet/ti/cpsw.c
+@@ -490,7 +490,7 @@ void cpsw_rx_handler(void *token, int len, int status)
+ skb_put(skb, len);
+ cpts_rx_timestamp(priv->cpts, skb);
+ skb->protocol = eth_type_trans(skb, ndev);
+- netif_receive_skb(skb);
++ netif_rx(skb);
+ priv->stats.rx_bytes += len;
+ priv->stats.rx_packets++;
+ } else {
+@@ -507,19 +507,24 @@ void cpsw_rx_handler(void *token, int len, int status)
+ static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
+ {
+ struct cpsw_priv *priv = dev_id;
++ unsigned long flags;
+ u32 rx, tx, rx_thresh;
+
++ spin_lock_irqsave(&priv->lock, flags);
+ rx_thresh = __raw_readl(&priv->wr_regs->rx_thresh_stat);
+ rx = __raw_readl(&priv->wr_regs->rx_stat);
+ tx = __raw_readl(&priv->wr_regs->tx_stat);
+- if (!rx_thresh && !rx && !tx)
++ if (!rx_thresh && !rx && !tx) {
++ spin_unlock_irqrestore(&priv->lock, flags);
+ return IRQ_NONE;
++ }
+
+ cpsw_intr_disable(priv);
+ if (priv->irq_enabled == true) {
+ cpsw_disable_irq(priv);
+ priv->irq_enabled = false;
+ }
++ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (netif_running(priv->ndev)) {
+ napi_schedule(&priv->napi);
+@@ -541,7 +546,9 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
+ {
+ struct cpsw_priv *priv = napi_to_priv(napi);
+ int num_tx, num_rx;
++ unsigned long flags;
+
++ spin_lock_irqsave(&priv->lock, flags);
+ num_tx = cpdma_chan_process(priv->txch, 128);
+ if (num_tx)
+ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
+@@ -559,6 +566,7 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
+ prim_cpsw->irq_enabled = true;
+ }
+ }
++ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (num_rx || num_tx)
+ cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n",
+--
+1.7.10.4
+
diff --git a/patches/cpsw-revert-stable-patches.patch b/patches/cpsw-revert-stable-patches.patch
new file mode 100644
index 0000000..f12db86
--- /dev/null
+++ b/patches/cpsw-revert-stable-patches.patch
@@ -0,0 +1,42 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>$
+Subject: [PATCH] net/cpsw: revert stable patches v3.8..v3.8.9
+
+and apply them from net & net-next branch.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+diff --git b/drivers/net/ethernet/ti/cpsw.c a/drivers/net/ethernet/ti/cpsw.c
+index 3b1be52..40aff68 100644
+--- b/drivers/net/ethernet/ti/cpsw.c
++++ a/drivers/net/ethernet/ti/cpsw.c
+@@ -375,7 +375,7 @@ void cpsw_tx_handler(void *token, int len, int status)
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ if (unlikely(netif_queue_stopped(ndev)))
+- netif_wake_queue(ndev);
++ netif_start_queue(ndev);
+ cpts_tx_timestamp(&priv->cpts, skb);
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += len;
+@@ -1111,7 +1111,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
+ struct platform_device *mdio;
+
+ parp = of_get_property(slave_node, "phy_id", &lenp);
+- if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
++ if ((parp == NULL) && (lenp != (sizeof(void *) * 2))) {
+ pr_err("Missing slave[%d] phy_id property\n", i);
+ ret = -EINVAL;
+ goto error_ret;
+diff --git b/drivers/net/ethernet/ti/davinci_emac.c a/drivers/net/ethernet/ti/davinci_emac.c
+index 4ebcb24..2a3e2c5 100644
+--- b/drivers/net/ethernet/ti/davinci_emac.c
++++ a/drivers/net/ethernet/ti/davinci_emac.c
+@@ -1055,7 +1055,7 @@ static void emac_tx_handler(void *token, int len, int status)
+ atomic_dec(&priv->cur_tx);
+
+ if (unlikely(netif_queue_stopped(ndev)))
+- netif_wake_queue(ndev);
++ netif_start_queue(ndev);
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += len;
+ dev_kfree_skb_any(skb);
diff --git a/patches/cpu-rt-rework-cpu-down.patch b/patches/cpu-rt-rework-cpu-down.patch
index 7a6496a..54850a6 100644
--- a/patches/cpu-rt-rework-cpu-down.patch
+++ b/patches/cpu-rt-rework-cpu-down.patch
@@ -56,7 +56,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1977,6 +1977,10 @@ extern void do_set_cpus_allowed(struct t
+@@ -1978,6 +1978,10 @@ extern void do_set_cpus_allowed(struct t
extern int set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask);
@@ -67,7 +67,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#else
static inline void do_set_cpus_allowed(struct task_struct *p,
const struct cpumask *new_mask)
-@@ -1989,6 +1993,9 @@ static inline int set_cpus_allowed_ptr(s
+@@ -1990,6 +1994,9 @@ static inline int set_cpus_allowed_ptr(s
return -EINVAL;
return 0;
}
@@ -443,7 +443,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* CPU didn't die: tell everyone. Can't complain. */
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2896,7 +2896,7 @@ void migrate_disable(void)
+@@ -2898,7 +2898,7 @@ void migrate_disable(void)
{
struct task_struct *p = current;
@@ -452,7 +452,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SCHED_DEBUG
p->migrate_disable_atomic++;
#endif
-@@ -2927,7 +2927,7 @@ void migrate_enable(void)
+@@ -2929,7 +2929,7 @@ void migrate_enable(void)
unsigned long flags;
struct rq *rq;
@@ -461,7 +461,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SCHED_DEBUG
p->migrate_disable_atomic--;
#endif
-@@ -4872,6 +4872,84 @@ void do_set_cpus_allowed(struct task_str
+@@ -4874,6 +4874,84 @@ void do_set_cpus_allowed(struct task_str
cpumask_copy(&p->cpus_allowed, new_mask);
}
diff --git a/patches/drm-i915-move-i915_trace_irq_get-out-of-the-tracing-.patch b/patches/drm-i915-move-i915_trace_irq_get-out-of-the-tracing-.patch
new file mode 100644
index 0000000..ec04603
--- /dev/null
+++ b/patches/drm-i915-move-i915_trace_irq_get-out-of-the-tracing-.patch
@@ -0,0 +1,47 @@
+From d841118ac80c5bfb18f47984bc40687eed08b714 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 25 Apr 2013 18:12:52 +0200
+Subject: [PATCH 1/5] drm/i915: move i915_trace_irq_get() out of the tracing
+ macro
+
+Reported-by: Joakim Hernberg <jbh@alchemy.lu>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/gpu/drm/i915/i915_gem_execbuffer.c | 1 +
+ drivers/gpu/drm/i915/i915_trace.h | 1 -
+ drivers/gpu/drm/i915/intel_ringbuffer.h | 2 ++
+ 3 files changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+@@ -1069,6 +1069,7 @@ i915_gem_do_execbuffer(struct drm_device
+ }
+
+ trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
++ i915_trace_irq_get(ring, seqno);
+
+ i915_gem_execbuffer_move_to_active(&objects, ring);
+ i915_gem_execbuffer_retire_commands(dev, file, ring);
+--- a/drivers/gpu/drm/i915/i915_trace.h
++++ b/drivers/gpu/drm/i915/i915_trace.h
+@@ -244,7 +244,6 @@ TRACE_EVENT(i915_gem_ring_dispatch,
+ __entry->ring = ring->id;
+ __entry->seqno = seqno;
+ __entry->flags = flags;
+- i915_trace_irq_get(ring, seqno);
+ ),
+
+ TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
+@@ -232,8 +232,10 @@ static inline u32 intel_ring_get_seqno(s
+
+ static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
+ {
++#ifdef CONFIG_TRACEPOINTS
+ if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
+ ring->trace_irq_seqno = seqno;
++#endif
+ }
+
+ /* DRI warts */
diff --git a/patches/fs-fscache-remove-spin_lock-from-the-condition-in-wh.patch b/patches/fs-fscache-remove-spin_lock-from-the-condition-in-wh.patch
new file mode 100644
index 0000000..fb97bd5
--- /dev/null
+++ b/patches/fs-fscache-remove-spin_lock-from-the-condition-in-wh.patch
@@ -0,0 +1,49 @@
+From d32f5420ac164141683c51e8a8fce666423de492 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 3 Apr 2013 12:43:30 +0200
+Subject: [PATCH] fs/fscache: remove spin_lock() from the condition in while()
+
+The spinlock() within the condition in while() will cause a compile error
+if it is not a function. This is not a problem on mainline but it does not
+look pretty and there is no reason to do it that way.
+That patch writes it a little differently and avoids the double condition.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ fs/fscache/page.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+diff --git a/fs/fscache/page.c b/fs/fscache/page.c
+index ff000e5..c84696c 100644
+--- a/fs/fscache/page.c
++++ b/fs/fscache/page.c
+@@ -796,11 +796,13 @@ void fscache_invalidate_writes(struct fscache_cookie *cookie)
+
+ _enter("");
+
+- while (spin_lock(&cookie->stores_lock),
+- n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
+- ARRAY_SIZE(results),
+- FSCACHE_COOKIE_PENDING_TAG),
+- n > 0) {
++ spin_lock(&cookie->stores_lock);
++ while (1) {
++ n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
++ ARRAY_SIZE(results),
++ FSCACHE_COOKIE_PENDING_TAG);
++ if (n == 0)
++ break;
+ for (i = n - 1; i >= 0; i--) {
+ page = results[i];
+ radix_tree_delete(&cookie->stores, page->index);
+@@ -810,6 +812,7 @@ void fscache_invalidate_writes(struct fscache_cookie *cookie)
+
+ for (i = n - 1; i >= 0; i--)
+ page_cache_release(results[i]);
++ spin_lock(&cookie->stores_lock);
+ }
+
+ spin_unlock(&cookie->stores_lock);
+--
+1.7.10.4
+
diff --git a/patches/fscache_compile_fix.patch b/patches/fscache_compile_fix.patch
deleted file mode 100644
index 7cf833c..0000000
--- a/patches/fscache_compile_fix.patch
+++ /dev/null
@@ -1,34 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Subject: fs/fscache: done merge spin_lock() in while()
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---- a/fs/fscache/page.c
-+++ b/fs/fscache/page.c
-@@ -796,11 +796,13 @@ void fscache_invalidate_writes(struct fs
-
- _enter("");
-
-- while (spin_lock(&cookie->stores_lock),
-- n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
-- ARRAY_SIZE(results),
-- FSCACHE_COOKIE_PENDING_TAG),
-- n > 0) {
-+ do {
-+ spin_lock(&cookie->stores_lock);
-+ n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
-+ ARRAY_SIZE(results),
-+ FSCACHE_COOKIE_PENDING_TAG);
-+ if (n == 0)
-+ break;
- for (i = n - 1; i >= 0; i--) {
- page = results[i];
- radix_tree_delete(&cookie->stores, page->index);
-@@ -810,7 +812,7 @@ void fscache_invalidate_writes(struct fs
-
- for (i = n - 1; i >= 0; i--)
- page_cache_release(results[i]);
-- }
-+ } while (1);
-
- spin_unlock(&cookie->stores_lock);
- _leave("");
diff --git a/patches/ftrace-crap.patch b/patches/ftrace-crap.patch
new file mode 100644
index 0000000..70a9227
--- /dev/null
+++ b/patches/ftrace-crap.patch
@@ -0,0 +1,92 @@
+Subject: ftrace-crap.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 09 Sep 2011 16:55:53 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/trace/trace.c | 26 ++++++++++++++++++++++++--
+ kernel/trace/trace.h | 1 -
+ 2 files changed, 24 insertions(+), 3 deletions(-)
+
+Index: linux-stable/kernel/trace/trace.c
+===================================================================
+--- linux-stable.orig/kernel/trace/trace.c
++++ linux-stable/kernel/trace/trace.c
+@@ -402,11 +402,13 @@ EXPORT_SYMBOL_GPL(tracing_is_on);
+ */
+ void trace_wake_up(void)
+ {
++#ifndef CONFIG_PREEMPT_RT_FULL
+ const unsigned long delay = msecs_to_jiffies(2);
+
+ if (trace_flags & TRACE_ITER_BLOCK)
+ return;
+ schedule_delayed_work(&wakeup_work, delay);
++#endif
+ }
+
+ static int __init set_buf_size(char *str)
+@@ -756,6 +758,12 @@ update_max_tr_single(struct trace_array
+ }
+ #endif /* CONFIG_TRACER_MAX_TRACE */
+
++#ifndef CONFIG_PREEMPT_RT_FULL
++static void default_wait_pipe(struct trace_iterator *iter);
++#else
++#define default_wait_pipe poll_wait_pipe
++#endif
++
+ /**
+ * register_tracer - register a tracer with the ftrace system.
+ * @type - the plugin for the tracer
+@@ -3365,6 +3373,7 @@ static int tracing_release_pipe(struct i
+ return 0;
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ static unsigned int
+ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
+ {
+@@ -3386,8 +3395,7 @@ tracing_poll_pipe(struct file *filp, pol
+ }
+ }
+
+-
+-void default_wait_pipe(struct trace_iterator *iter)
++static void default_wait_pipe(struct trace_iterator *iter)
+ {
+ DEFINE_WAIT(wait);
+
+@@ -3398,6 +3406,20 @@ void default_wait_pipe(struct trace_iter
+
+ finish_wait(&trace_wait, &wait);
+ }
++#else
++static unsigned int
++tracing_poll_pipe(struct file *filp, poll_table *poll_table)
++{
++ struct trace_iterator *iter = filp->private_data;
++
++ if ((trace_flags & TRACE_ITER_BLOCK) || !trace_empty(iter))
++ return POLLIN | POLLRDNORM;
++ poll_wait_pipe(iter);
++ if (!trace_empty(iter))
++ return POLLIN | POLLRDNORM;
++ return 0;
++}
++#endif
+
+ /*
+ * This is a make-shift waitqueue.
+Index: linux-stable/kernel/trace/trace.h
+===================================================================
+--- linux-stable.orig/kernel/trace/trace.h
++++ linux-stable/kernel/trace/trace.h
+@@ -367,7 +367,6 @@ void trace_init_global_iter(struct trace
+
+ void tracing_iter_reset(struct trace_iterator *iter, int cpu);
+
+-void default_wait_pipe(struct trace_iterator *iter);
+ void poll_wait_pipe(struct trace_iterator *iter);
+
+ void ftrace(struct trace_array *tr,
diff --git a/patches/ftrace-migrate-disable-tracing.patch b/patches/ftrace-migrate-disable-tracing.patch
index a85247d..01860c7 100644
--- a/patches/ftrace-migrate-disable-tracing.patch
+++ b/patches/ftrace-migrate-disable-tracing.patch
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define FTRACE_MAX_EVENT \
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -1177,6 +1177,8 @@ tracing_generic_entry_update(struct trac
+@@ -1178,6 +1178,8 @@ tracing_generic_entry_update(struct trac
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
(need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
@@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
-@@ -2034,9 +2036,10 @@ static void print_lat_help_header(struct
+@@ -2035,9 +2037,10 @@ static void print_lat_help_header(struct
seq_puts(m, "# | / _----=> need-resched \n");
seq_puts(m, "# || / _---=> hardirq/softirq \n");
seq_puts(m, "# ||| / _--=> preempt-depth \n");
diff --git a/patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch b/patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
index 930553a..800191d 100644
--- a/patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
+++ b/patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
@@ -41,7 +41,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
ktime_t softirq_time;
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
-@@ -589,8 +589,7 @@ static int hrtimer_reprogram(struct hrti
+@@ -590,8 +590,7 @@ static int hrtimer_reprogram(struct hrti
* When the callback is running, we do not reprogram the clock event
* device. The timer callback is either running on a different CPU or
* the callback is executed in the hrtimer_interrupt context. The
@@ -51,7 +51,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
*/
if (hrtimer_callback_running(timer))
return 0;
-@@ -625,6 +624,9 @@ static int hrtimer_reprogram(struct hrti
+@@ -626,6 +625,9 @@ static int hrtimer_reprogram(struct hrti
return res;
}
@@ -61,7 +61,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
/*
* Initialize the high resolution related parts of cpu_base
*/
-@@ -641,9 +643,18 @@ static inline void hrtimer_init_hres(str
+@@ -642,9 +644,18 @@ static inline void hrtimer_init_hres(str
* and expiry check is done in the hrtimer_interrupt or in the softirq.
*/
static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
@@ -82,7 +82,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
}
static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
-@@ -724,12 +735,18 @@ static inline int hrtimer_switch_to_hres
+@@ -725,12 +736,18 @@ static inline int hrtimer_switch_to_hres
static inline void
hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
@@ -102,7 +102,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
#endif /* CONFIG_HIGH_RES_TIMERS */
-@@ -861,9 +878,9 @@ void hrtimer_wait_for_timer(const struct
+@@ -862,9 +879,9 @@ void hrtimer_wait_for_timer(const struct
{
struct hrtimer_clock_base *base = timer->base;
@@ -114,7 +114,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
}
#else
-@@ -913,6 +930,11 @@ static void __remove_hrtimer(struct hrti
+@@ -914,6 +931,11 @@ static void __remove_hrtimer(struct hrti
if (!(timer->state & HRTIMER_STATE_ENQUEUED))
goto out;
@@ -126,7 +126,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
next_timer = timerqueue_getnext(&base->active);
timerqueue_del(&base->active, &timer->node);
if (&timer->node == next_timer) {
-@@ -1020,9 +1042,19 @@ int __hrtimer_start_range_ns(struct hrti
+@@ -1021,9 +1043,19 @@ int __hrtimer_start_range_ns(struct hrti
*
* XXX send_remote_softirq() ?
*/
@@ -149,7 +149,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
/*
* We need to drop cpu_base->lock to avoid a
* lock ordering issue vs. rq->lock.
-@@ -1030,9 +1062,7 @@ int __hrtimer_start_range_ns(struct hrti
+@@ -1031,9 +1063,7 @@ int __hrtimer_start_range_ns(struct hrti
raw_spin_unlock(&new_base->cpu_base->lock);
raise_softirq_irqoff(HRTIMER_SOFTIRQ);
local_irq_restore(flags);
@@ -160,7 +160,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
}
}
-@@ -1199,6 +1229,7 @@ static void __hrtimer_init(struct hrtime
+@@ -1200,6 +1230,7 @@ static void __hrtimer_init(struct hrtime
base = hrtimer_clockid_to_base(clock_id);
timer->base = &cpu_base->clock_base[base];
@@ -168,7 +168,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
timerqueue_init(&timer->node);
#ifdef CONFIG_TIMER_STATS
-@@ -1282,10 +1313,128 @@ static void __run_hrtimer(struct hrtimer
+@@ -1283,10 +1314,128 @@ static void __run_hrtimer(struct hrtimer
timer->state &= ~HRTIMER_STATE_CALLBACK;
}
@@ -299,7 +299,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
/*
* High resolution timer interrupt
* Called with interrupts disabled
-@@ -1294,7 +1443,7 @@ void hrtimer_interrupt(struct clock_even
+@@ -1295,7 +1444,7 @@ void hrtimer_interrupt(struct clock_even
{
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
ktime_t expires_next, now, entry_time, delta;
@@ -308,7 +308,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
BUG_ON(!cpu_base->hres_active);
cpu_base->nr_events++;
-@@ -1361,7 +1510,10 @@ retry:
+@@ -1362,7 +1511,10 @@ retry:
break;
}
@@ -320,7 +320,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
}
}
-@@ -1376,6 +1528,10 @@ retry:
+@@ -1377,6 +1529,10 @@ retry:
if (expires_next.tv64 == KTIME_MAX ||
!tick_program_event(expires_next, 0)) {
cpu_base->hang_detected = 0;
@@ -331,7 +331,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
return;
}
-@@ -1456,24 +1612,26 @@ void hrtimer_peek_ahead_timers(void)
+@@ -1457,24 +1613,26 @@ void hrtimer_peek_ahead_timers(void)
local_irq_restore(flags);
}
@@ -365,7 +365,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
/*
* Called from timer softirq every jiffy, expire hrtimers:
*
-@@ -1506,7 +1664,7 @@ void hrtimer_run_queues(void)
+@@ -1507,7 +1665,7 @@ void hrtimer_run_queues(void)
struct timerqueue_node *node;
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
struct hrtimer_clock_base *base;
@@ -374,7 +374,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
if (hrtimer_hres_active())
return;
-@@ -1531,12 +1689,16 @@ void hrtimer_run_queues(void)
+@@ -1532,12 +1690,16 @@ void hrtimer_run_queues(void)
hrtimer_get_expires_tv64(timer))
break;
@@ -393,7 +393,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
}
/*
-@@ -1558,6 +1720,7 @@ static enum hrtimer_restart hrtimer_wake
+@@ -1559,6 +1721,7 @@ static enum hrtimer_restart hrtimer_wake
void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
{
sl->timer.function = hrtimer_wakeup;
@@ -401,7 +401,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
sl->task = task;
}
EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
-@@ -1696,6 +1859,7 @@ static void __cpuinit init_hrtimers_cpu(
+@@ -1695,6 +1858,7 @@ static void __cpuinit init_hrtimers_cpu(
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
cpu_base->clock_base[i].cpu_base = cpu_base;
timerqueue_init_head(&cpu_base->clock_base[i].active);
@@ -409,7 +409,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
}
hrtimer_init_hres(cpu_base);
-@@ -1814,9 +1978,7 @@ void __init hrtimers_init(void)
+@@ -1813,9 +1977,7 @@ void __init hrtimers_init(void)
hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
(void *)(long)smp_processor_id());
register_cpu_notifier(&hrtimers_nb);
diff --git a/patches/hrtimer-raise-softirq-if-hrtimer-irq-stalled.patch b/patches/hrtimer-raise-softirq-if-hrtimer-irq-stalled.patch
index 5c8bce5..f1acc64 100644
--- a/patches/hrtimer-raise-softirq-if-hrtimer-irq-stalled.patch
+++ b/patches/hrtimer-raise-softirq-if-hrtimer-irq-stalled.patch
@@ -12,7 +12,7 @@ Cc: stable-rt@vger.kernel.org
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
-@@ -1528,11 +1528,7 @@ retry:
+@@ -1529,11 +1529,7 @@ retry:
if (expires_next.tv64 == KTIME_MAX ||
!tick_program_event(expires_next, 0)) {
cpu_base->hang_detected = 0;
@@ -25,7 +25,7 @@ Cc: stable-rt@vger.kernel.org
}
/*
-@@ -1576,6 +1572,9 @@ retry:
+@@ -1577,6 +1573,9 @@ retry:
tick_program_event(expires_next, 1);
printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
ktime_to_ns(delta));
diff --git a/patches/hrtimers-prepare-full-preemption.patch b/patches/hrtimers-prepare-full-preemption.patch
index be6ba1d..8208370 100644
--- a/patches/hrtimers-prepare-full-preemption.patch
+++ b/patches/hrtimers-prepare-full-preemption.patch
@@ -43,7 +43,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
-@@ -844,6 +844,32 @@ u64 hrtimer_forward(struct hrtimer *time
+@@ -845,6 +845,32 @@ u64 hrtimer_forward(struct hrtimer *time
}
EXPORT_SYMBOL_GPL(hrtimer_forward);
@@ -76,7 +76,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* enqueue_hrtimer - internal function to (re)start a timer
*
-@@ -1094,7 +1120,7 @@ int hrtimer_cancel(struct hrtimer *timer
+@@ -1095,7 +1121,7 @@ int hrtimer_cancel(struct hrtimer *timer
if (ret >= 0)
return ret;
@@ -85,7 +85,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
EXPORT_SYMBOL_GPL(hrtimer_cancel);
-@@ -1509,6 +1535,8 @@ void hrtimer_run_queues(void)
+@@ -1510,6 +1536,8 @@ void hrtimer_run_queues(void)
}
raw_spin_unlock(&cpu_base->lock);
}
@@ -94,7 +94,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -1671,6 +1699,9 @@ static void __cpuinit init_hrtimers_cpu(
+@@ -1670,6 +1698,9 @@ static void __cpuinit init_hrtimers_cpu(
}
hrtimer_init_hres(cpu_base);
diff --git a/patches/i915_compile_fix.patch b/patches/i915_compile_fix.patch
index 0b20079..7911d78 100644
--- a/patches/i915_compile_fix.patch
+++ b/patches/i915_compile_fix.patch
@@ -1,7 +1,14 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Subject: gpu/i915: don't open code these things
+The opencode part is gone in 1f83fee0 ("drm/i915: clear up wedged transitions")
+the owner check is still there.
+
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/gpu/drm/i915/i915_gem.c | 12 +++---------
+ 1 file changed, 3 insertions(+), 9 deletions(-)
+
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -91,7 +91,6 @@ i915_gem_wait_for_error(struct drm_devic
diff --git a/patches/idle-state.patch b/patches/idle-state.patch
index 4f1255c..1958a69 100644
--- a/patches/idle-state.patch
+++ b/patches/idle-state.patch
@@ -9,7 +9,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -4941,6 +4941,7 @@ void __cpuinit init_idle(struct task_str
+@@ -4943,6 +4943,7 @@ void __cpuinit init_idle(struct task_str
rcu_read_unlock();
rq->curr = rq->idle = idle;
diff --git a/patches/ipc-make-rt-aware.patch b/patches/ipc-make-rt-aware.patch
index 8b081cf..e594c95 100644
--- a/patches/ipc-make-rt-aware.patch
+++ b/patches/ipc-make-rt-aware.patch
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
-@@ -912,12 +912,17 @@ static inline void pipelined_send(struct
+@@ -921,12 +921,17 @@ static inline void pipelined_send(struct
struct msg_msg *message,
struct ext_wait_queue *receiver)
{
diff --git a/patches/ipc-mqueue-add-a-critical-section-to-avoid-a-deadlock.patch b/patches/ipc-mqueue-add-a-critical-section-to-avoid-a-deadlock.patch
index 4d65d5b..a4f8fd4 100644
--- a/patches/ipc-mqueue-add-a-critical-section-to-avoid-a-deadlock.patch
+++ b/patches/ipc-mqueue-add-a-critical-section-to-avoid-a-deadlock.patch
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
-@@ -936,13 +936,18 @@ static inline void pipelined_receive(str
+@@ -945,13 +945,18 @@ static inline void pipelined_receive(str
wake_up_interruptible(&info->wait_q);
return;
}
diff --git a/patches/latency-hist.patch b/patches/latency-hist.patch
index 6e646b3..25cdcfe 100644
--- a/patches/latency-hist.patch
+++ b/patches/latency-hist.patch
@@ -228,7 +228,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void *start_site;
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1598,6 +1598,12 @@ struct task_struct {
+@@ -1599,6 +1599,12 @@ struct task_struct {
unsigned long trace;
/* bitmask and counter of trace recursion */
unsigned long trace_recursion;
@@ -355,7 +355,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The timer bases:
-@@ -970,6 +971,17 @@ int __hrtimer_start_range_ns(struct hrti
+@@ -971,6 +972,17 @@ int __hrtimer_start_range_ns(struct hrti
#endif
}
@@ -373,7 +373,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
hrtimer_set_expires_range_ns(timer, tim, delta_ns);
timer_stats_hrtimer_set_start_info(timer);
-@@ -1246,6 +1258,8 @@ static void __run_hrtimer(struct hrtimer
+@@ -1247,6 +1259,8 @@ static void __run_hrtimer(struct hrtimer
#ifdef CONFIG_HIGH_RES_TIMERS
@@ -382,7 +382,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* High resolution timer interrupt
* Called with interrupts disabled
-@@ -1289,6 +1303,15 @@ retry:
+@@ -1290,6 +1304,15 @@ retry:
timer = container_of(node, struct hrtimer, node);
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 45dd8dd..0e1394d 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -12,4 +12,4 @@ Link: http://lkml.kernel.org/n/tip-8vdw4bfcsds27cvox6rpb334@git.kernel.org
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt3
++-rt4
diff --git a/patches/md-raid5-percpu-handling-rt-aware.patch b/patches/md-raid5-percpu-handling-rt-aware.patch
index c4e56e5..34c8b74 100644
--- a/patches/md-raid5-percpu-handling-rt-aware.patch
+++ b/patches/md-raid5-percpu-handling-rt-aware.patch
@@ -20,7 +20,7 @@ Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
-@@ -1415,8 +1415,9 @@ static void __raid_run_ops(struct stripe
+@@ -1418,8 +1418,9 @@ static void __raid_run_ops(struct stripe
struct raid5_percpu *percpu;
unsigned long cpu;
@@ -31,7 +31,7 @@ Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
ops_run_biofill(sh);
overlap_clear++;
-@@ -1468,7 +1469,8 @@ static void __raid_run_ops(struct stripe
+@@ -1471,7 +1472,8 @@ static void __raid_run_ops(struct stripe
if (test_and_clear_bit(R5_Overlap, &dev->flags))
wake_up(&sh->raid_conf->wait_for_overlap);
}
@@ -41,7 +41,7 @@ Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
}
#ifdef CONFIG_MULTICORE_RAID456
-@@ -5093,6 +5095,7 @@ static int raid5_alloc_percpu(struct r5c
+@@ -5139,6 +5141,7 @@ static int raid5_alloc_percpu(struct r5c
break;
}
per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
@@ -51,7 +51,7 @@ Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
conf->cpu_notify.notifier_call = raid456_cpu_notify;
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
-@@ -428,6 +428,7 @@ struct r5conf {
+@@ -429,6 +429,7 @@ struct r5conf {
int recovery_disabled;
/* per cpu variables */
struct raid5_percpu {
diff --git a/patches/might-sleep-check-for-idle.patch b/patches/might-sleep-check-for-idle.patch
index be17e41..ffa6ad0 100644
--- a/patches/might-sleep-check-for-idle.patch
+++ b/patches/might-sleep-check-for-idle.patch
@@ -11,7 +11,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7391,7 +7391,8 @@ void __might_sleep(const char *file, int
+@@ -7393,7 +7393,8 @@ void __might_sleep(const char *file, int
static unsigned long prev_jiffy; /* ratelimiting */
rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
diff --git a/patches/migrate-disable-rt-variant.patch b/patches/migrate-disable-rt-variant.patch
index 68958de..cd46791 100644
--- a/patches/migrate-disable-rt-variant.patch
+++ b/patches/migrate-disable-rt-variant.patch
@@ -9,19 +9,19 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
-@@ -121,11 +121,15 @@ extern void migrate_enable(void);
+@@ -127,11 +127,15 @@ extern void migrate_enable(void);
# define preempt_enable_rt() preempt_enable()
- # define preempt_disable_nort() do { } while (0)
- # define preempt_enable_nort() do { } while (0)
+ # define preempt_disable_nort() barrier()
+ # define preempt_enable_nort() barrier()
+# define migrate_disable_rt() migrate_disable()
+# define migrate_enable_rt() migrate_enable()
#else
- # define preempt_disable_rt() do { } while (0)
- # define preempt_enable_rt() do { } while (0)
+ # define preempt_disable_rt() barrier()
+ # define preempt_enable_rt() barrier()
# define preempt_disable_nort() preempt_disable()
# define preempt_enable_nort() preempt_enable()
-+# define migrate_disable_rt() do { } while (0)
-+# define migrate_enable_rt() do { } while (0)
++# define migrate_disable_rt() barrier()
++# define migrate_enable_rt() barrier()
#endif
#ifdef CONFIG_PREEMPT_NOTIFIERS
diff --git a/patches/mm-prepare-pf-disable-discoupling.patch b/patches/mm-prepare-pf-disable-discoupling.patch
index d3a5458..8e1f115 100644
--- a/patches/mm-prepare-pf-disable-discoupling.patch
+++ b/patches/mm-prepare-pf-disable-discoupling.patch
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1452,6 +1452,7 @@ struct task_struct {
+@@ -1453,6 +1453,7 @@ struct task_struct {
/* mutex deadlock detection */
struct mutex_waiter *blocked_on;
#endif
@@ -80,7 +80,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
p->curr_chain_key = 0;
--- a/mm/memory.c
+++ b/mm/memory.c
-@@ -3669,6 +3669,35 @@ unlock:
+@@ -3717,6 +3717,35 @@ unlock:
return 0;
}
diff --git a/patches/mm-remove-preempt-count-from-pf.patch b/patches/mm-remove-preempt-count-from-pf.patch
index a7ca61d..11a3a8e 100644
--- a/patches/mm-remove-preempt-count-from-pf.patch
+++ b/patches/mm-remove-preempt-count-from-pf.patch
@@ -11,7 +11,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/mm/memory.c
+++ b/mm/memory.c
-@@ -3672,7 +3672,6 @@ unlock:
+@@ -3720,7 +3720,6 @@ unlock:
#ifdef CONFIG_PREEMPT_RT_FULL
void pagefault_disable(void)
{
@@ -19,7 +19,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
current->pagefault_disabled++;
/*
* make sure to have issued the store before a pagefault
-@@ -3690,12 +3689,6 @@ void pagefault_enable(void)
+@@ -3738,12 +3737,6 @@ void pagefault_enable(void)
*/
barrier();
current->pagefault_disabled--;
diff --git a/patches/mm-rt-kmap-atomic-scheduling.patch b/patches/mm-rt-kmap-atomic-scheduling.patch
index 6345236..8c46e0f 100644
--- a/patches/mm-rt-kmap-atomic-scheduling.patch
+++ b/patches/mm-rt-kmap-atomic-scheduling.patch
@@ -215,7 +215,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/cputime.h>
-@@ -1635,6 +1636,12 @@ struct task_struct {
+@@ -1636,6 +1637,12 @@ struct task_struct {
struct rcu_head put_rcu;
int softirq_nestcnt;
#endif
@@ -256,7 +256,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
{
--- a/mm/memory.c
+++ b/mm/memory.c
-@@ -3672,6 +3672,7 @@ unlock:
+@@ -3720,6 +3720,7 @@ unlock:
#ifdef CONFIG_PREEMPT_RT_FULL
void pagefault_disable(void)
{
@@ -264,7 +264,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
current->pagefault_disabled++;
/*
* make sure to have issued the store before a pagefault
-@@ -3689,6 +3690,7 @@ void pagefault_enable(void)
+@@ -3737,6 +3738,7 @@ void pagefault_enable(void)
*/
barrier();
current->pagefault_disabled--;
diff --git a/patches/mm-shrink-the-page-frame-to-rt-size.patch b/patches/mm-shrink-the-page-frame-to-rt-size.patch
index 80f968b..414b406 100644
--- a/patches/mm-shrink-the-page-frame-to-rt-size.patch
+++ b/patches/mm-shrink-the-page-frame-to-rt-size.patch
@@ -102,7 +102,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct page *first_page; /* Compound tail pages */
--- a/mm/memory.c
+++ b/mm/memory.c
-@@ -4264,3 +4264,35 @@ void copy_user_huge_page(struct page *ds
+@@ -4312,3 +4312,35 @@ void copy_user_huge_page(struct page *ds
}
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
diff --git a/patches/net-make-devnet_rename_seq-a-mutex.patch b/patches/net-make-devnet_rename_seq-a-mutex.patch
index f4f675a..b8bb512 100644
--- a/patches/net-make-devnet_rename_seq-a-mutex.patch
+++ b/patches/net-make-devnet_rename_seq-a-mutex.patch
@@ -83,7 +83,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
memcpy(dev->name, oldname, IFNAMSIZ);
goto rollback;
} else {
-@@ -4214,7 +4215,6 @@ static int dev_ifname(struct net *net, s
+@@ -4219,7 +4220,6 @@ static int dev_ifname(struct net *net, s
{
struct net_device *dev;
struct ifreq ifr;
@@ -91,7 +91,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Fetch the caller's info block.
-@@ -4223,19 +4223,18 @@ static int dev_ifname(struct net *net, s
+@@ -4228,19 +4228,18 @@ static int dev_ifname(struct net *net, s
if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
return -EFAULT;
diff --git a/patches/net-netif-rx-ni-use-local-bh-disable.patch b/patches/net-netif-rx-ni-use-local-bh-disable.patch
index 5d96fc4..b83e64e 100644
--- a/patches/net-netif-rx-ni-use-local-bh-disable.patch
+++ b/patches/net-netif-rx-ni-use-local-bh-disable.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3118,11 +3118,9 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -3117,11 +3117,9 @@ int netif_rx_ni(struct sk_buff *skb)
{
int err;
diff --git a/patches/net-netif_rx_ni-migrate-disable.patch b/patches/net-netif_rx_ni-migrate-disable.patch
index b5b5e75..0804b31 100644
--- a/patches/net-netif_rx_ni-migrate-disable.patch
+++ b/patches/net-netif_rx_ni-migrate-disable.patch
@@ -9,7 +9,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3115,11 +3115,11 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -3114,11 +3114,11 @@ int netif_rx_ni(struct sk_buff *skb)
{
int err;
diff --git a/patches/net-tx-action-avoid-livelock-on-rt.patch b/patches/net-tx-action-avoid-livelock-on-rt.patch
index 92a988a..976d039 100644
--- a/patches/net-tx-action-avoid-livelock-on-rt.patch
+++ b/patches/net-tx-action-avoid-livelock-on-rt.patch
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3125,6 +3125,36 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -3124,6 +3124,36 @@ int netif_rx_ni(struct sk_buff *skb)
}
EXPORT_SYMBOL(netif_rx_ni);
@@ -81,7 +81,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void net_tx_action(struct softirq_action *h)
{
struct softnet_data *sd = &__get_cpu_var(softnet_data);
-@@ -3163,7 +3193,7 @@ static void net_tx_action(struct softirq
+@@ -3162,7 +3192,7 @@ static void net_tx_action(struct softirq
head = head->next_sched;
root_lock = qdisc_lock(q);
diff --git a/patches/oleg-signal-rt-fix.patch b/patches/oleg-signal-rt-fix.patch
index b69ab2b..70026cc 100644
--- a/patches/oleg-signal-rt-fix.patch
+++ b/patches/oleg-signal-rt-fix.patch
@@ -76,7 +76,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1412,6 +1412,10 @@ struct task_struct {
+@@ -1413,6 +1413,10 @@ struct task_struct {
sigset_t blocked, real_blocked;
sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
struct sigpending pending;
diff --git a/patches/percpu-rwsem-compilefix.patch b/patches/percpu-rwsem-compilefix.patch
index d6f1fd8..a044c8b 100644
--- a/patches/percpu-rwsem-compilefix.patch
+++ b/patches/percpu-rwsem-compilefix.patch
@@ -1,3 +1,11 @@
+From 49faecbc581de038b423d7abbebe0d7b50ed15ef Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 8 Apr 2013 16:08:46 +0200
+Subject: [PATCH] percpu-rwsem: compile fix
+
+The shortcut on mainline skip lockdep. No idea why this is a good thing.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
lib/percpu-rwsem.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/patches/peter_zijlstra-frob-migrate_disable-2.patch b/patches/peter_zijlstra-frob-migrate_disable-2.patch
index 3c05ad9..4409f5a 100644
--- a/patches/peter_zijlstra-frob-migrate_disable-2.patch
+++ b/patches/peter_zijlstra-frob-migrate_disable-2.patch
@@ -23,7 +23,7 @@ Link: http://lkml.kernel.org/n/tip-275i87sl8e1jcamtchmehonm@git.kernel.org
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
-@@ -108,28 +108,25 @@ do { \
+@@ -114,28 +114,25 @@ do { \
#endif /* CONFIG_PREEMPT_COUNT */
@@ -31,31 +31,31 @@ Link: http://lkml.kernel.org/n/tip-275i87sl8e1jcamtchmehonm@git.kernel.org
-extern void migrate_disable(void);
-extern void migrate_enable(void);
-#else
--# define migrate_disable() do { } while (0)
--# define migrate_enable() do { } while (0)
+-# define migrate_disable() barrier()
+-# define migrate_enable() barrier()
-#endif
-
#ifdef CONFIG_PREEMPT_RT_FULL
# define preempt_disable_rt() preempt_disable()
# define preempt_enable_rt() preempt_enable()
- # define preempt_disable_nort() do { } while (0)
- # define preempt_enable_nort() do { } while (0)
+ # define preempt_disable_nort() barrier()
+ # define preempt_enable_nort() barrier()
-# define migrate_disable_rt() migrate_disable()
-# define migrate_enable_rt() migrate_enable()
+# ifdef CONFIG_SMP
+ extern void migrate_disable(void);
+ extern void migrate_enable(void);
+# else /* CONFIG_SMP */
-+# define migrate_disable() do { } while (0)
-+# define migrate_enable() do { } while (0)
++# define migrate_disable() barrier()
++# define migrate_enable() barrier()
+# endif /* CONFIG_SMP */
#else
- # define preempt_disable_rt() do { } while (0)
- # define preempt_enable_rt() do { } while (0)
+ # define preempt_disable_rt() barrier()
+ # define preempt_enable_rt() barrier()
# define preempt_disable_nort() preempt_disable()
# define preempt_enable_nort() preempt_enable()
--# define migrate_disable_rt() do { } while (0)
--# define migrate_enable_rt() do { } while (0)
+-# define migrate_disable_rt() barrier()
+-# define migrate_enable_rt() barrier()
+# define migrate_disable() preempt_disable()
+# define migrate_enable() preempt_enable()
#endif
@@ -63,7 +63,7 @@ Link: http://lkml.kernel.org/n/tip-275i87sl8e1jcamtchmehonm@git.kernel.org
#ifdef CONFIG_PREEMPT_NOTIFIERS
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1279,7 +1279,9 @@ struct task_struct {
+@@ -1280,7 +1280,9 @@ struct task_struct {
#endif
unsigned int policy;
@@ -73,7 +73,7 @@ Link: http://lkml.kernel.org/n/tip-275i87sl8e1jcamtchmehonm@git.kernel.org
int nr_cpus_allowed;
cpumask_t cpus_allowed;
-@@ -2810,11 +2812,22 @@ static inline void set_task_cpu(struct t
+@@ -2811,11 +2813,22 @@ static inline void set_task_cpu(struct t
#endif /* CONFIG_SMP */
@@ -116,7 +116,7 @@ Link: http://lkml.kernel.org/n/tip-275i87sl8e1jcamtchmehonm@git.kernel.org
* Callback to arch code if there's nosmp or maxcpus=0 on the
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -4734,7 +4734,7 @@ void __cpuinit init_idle(struct task_str
+@@ -4736,7 +4736,7 @@ void __cpuinit init_idle(struct task_str
#ifdef CONFIG_SMP
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
@@ -125,7 +125,7 @@ Link: http://lkml.kernel.org/n/tip-275i87sl8e1jcamtchmehonm@git.kernel.org
if (p->sched_class && p->sched_class->set_cpus_allowed)
p->sched_class->set_cpus_allowed(p, new_mask);
p->nr_cpus_allowed = cpumask_weight(new_mask);
-@@ -4790,7 +4790,7 @@ int set_cpus_allowed_ptr(struct task_str
+@@ -4792,7 +4792,7 @@ int set_cpus_allowed_ptr(struct task_str
do_set_cpus_allowed(p, new_mask);
/* Can the task run on the task's current CPU? If so, we're done */
@@ -134,7 +134,7 @@ Link: http://lkml.kernel.org/n/tip-275i87sl8e1jcamtchmehonm@git.kernel.org
goto out;
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
-@@ -4809,6 +4809,7 @@ out:
+@@ -4811,6 +4811,7 @@ out:
}
EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
@@ -142,7 +142,7 @@ Link: http://lkml.kernel.org/n/tip-275i87sl8e1jcamtchmehonm@git.kernel.org
void migrate_disable(void)
{
struct task_struct *p = current;
-@@ -4901,6 +4902,7 @@ void migrate_enable(void)
+@@ -4903,6 +4904,7 @@ void migrate_enable(void)
preempt_enable();
}
EXPORT_SYMBOL(migrate_enable);
@@ -152,7 +152,7 @@ Link: http://lkml.kernel.org/n/tip-275i87sl8e1jcamtchmehonm@git.kernel.org
* Move (not current) task off this cpu, onto dest cpu. We're doing
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -1178,7 +1178,7 @@ tracing_generic_entry_update(struct trac
+@@ -1179,7 +1179,7 @@ tracing_generic_entry_update(struct trac
((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
(need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
diff --git a/patches/peter_zijlstra-frob-migrate_disable.patch b/patches/peter_zijlstra-frob-migrate_disable.patch
index a6bb210..247a5d3 100644
--- a/patches/peter_zijlstra-frob-migrate_disable.patch
+++ b/patches/peter_zijlstra-frob-migrate_disable.patch
@@ -13,7 +13,7 @@ Link: http://lkml.kernel.org/n/tip-cbz6hkl5r5mvwtx5s3tor2y6@git.kernel.org
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -4829,7 +4829,19 @@ void migrate_disable(void)
+@@ -4831,7 +4831,19 @@ void migrate_disable(void)
preempt_enable();
return;
}
@@ -34,7 +34,7 @@ Link: http://lkml.kernel.org/n/tip-cbz6hkl5r5mvwtx5s3tor2y6@git.kernel.org
p->migrate_disable = 1;
mask = tsk_cpus_allowed(p);
-@@ -4840,7 +4852,7 @@ void migrate_disable(void)
+@@ -4842,7 +4854,7 @@ void migrate_disable(void)
p->sched_class->set_cpus_allowed(p, mask);
p->nr_cpus_allowed = cpumask_weight(mask);
}
@@ -43,7 +43,7 @@ Link: http://lkml.kernel.org/n/tip-cbz6hkl5r5mvwtx5s3tor2y6@git.kernel.org
preempt_enable();
}
EXPORT_SYMBOL(migrate_disable);
-@@ -4868,7 +4880,11 @@ void migrate_enable(void)
+@@ -4870,7 +4882,11 @@ void migrate_enable(void)
return;
}
@@ -56,7 +56,7 @@ Link: http://lkml.kernel.org/n/tip-cbz6hkl5r5mvwtx5s3tor2y6@git.kernel.org
p->migrate_disable = 0;
mask = tsk_cpus_allowed(p);
-@@ -4880,7 +4896,7 @@ void migrate_enable(void)
+@@ -4882,7 +4898,7 @@ void migrate_enable(void)
p->nr_cpus_allowed = cpumask_weight(mask);
}
diff --git a/patches/peter_zijlstra-frob-pagefault_disable.patch b/patches/peter_zijlstra-frob-pagefault_disable.patch
index cd63205..4d511e4 100644
--- a/patches/peter_zijlstra-frob-pagefault_disable.patch
+++ b/patches/peter_zijlstra-frob-pagefault_disable.patch
@@ -270,7 +270,7 @@ Link: http://lkml.kernel.org/n/tip-3yy517m8zsi9fpsf14xfaqkw@git.kernel.org
retry:
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
-@@ -1108,7 +1108,7 @@ __do_page_fault(struct pt_regs *regs, un
+@@ -1110,7 +1110,7 @@ __do_page_fault(struct pt_regs *regs, un
* If we're in an interrupt, have no user context or are running
* in an atomic region then we must not take the fault:
*/
@@ -300,7 +300,7 @@ Link: http://lkml.kernel.org/n/tip-3yy517m8zsi9fpsf14xfaqkw@git.kernel.org
#include <asm/processor.h>
-@@ -1452,7 +1453,9 @@ struct task_struct {
+@@ -1453,7 +1454,9 @@ struct task_struct {
/* mutex deadlock detection */
struct mutex_waiter *blocked_on;
#endif
@@ -310,7 +310,7 @@ Link: http://lkml.kernel.org/n/tip-3yy517m8zsi9fpsf14xfaqkw@git.kernel.org
#ifdef CONFIG_TRACE_IRQFLAGS
unsigned int irq_events;
unsigned long hardirq_enable_ip;
-@@ -1628,6 +1631,17 @@ static inline void set_numabalancing_sta
+@@ -1629,6 +1632,17 @@ static inline void set_numabalancing_sta
}
#endif
diff --git a/patches/peterz-raw_pagefault_disable.patch b/patches/peterz-raw_pagefault_disable.patch
index 0ca7733..2468f28 100644
--- a/patches/peterz-raw_pagefault_disable.patch
+++ b/patches/peterz-raw_pagefault_disable.patch
@@ -129,7 +129,7 @@ Link: http://lkml.kernel.org/n/tip-31keae8mkjiv8esq4rl76cib@git.kernel.org
})
--- a/mm/memory.c
+++ b/mm/memory.c
-@@ -3669,6 +3669,7 @@ unlock:
+@@ -3717,6 +3717,7 @@ unlock:
return 0;
}
@@ -137,7 +137,7 @@ Link: http://lkml.kernel.org/n/tip-31keae8mkjiv8esq4rl76cib@git.kernel.org
void pagefault_disable(void)
{
inc_preempt_count();
-@@ -3697,6 +3698,7 @@ void pagefault_enable(void)
+@@ -3745,6 +3746,7 @@ void pagefault_enable(void)
preempt_check_resched();
}
EXPORT_SYMBOL(pagefault_enable);
diff --git a/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch b/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
index 91fb81c..d2e9ff4 100644
--- a/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
+++ b/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
@@ -43,7 +43,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
[PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1377,6 +1377,9 @@ struct task_struct {
+@@ -1378,6 +1378,9 @@ struct task_struct {
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
diff --git a/patches/powerpc-64bit-PREEMPT_RT-Check-preempt_count-before-.patch b/patches/powerpc-64bit-PREEMPT_RT-Check-preempt_count-before-.patch
new file mode 100644
index 0000000..deb29cf
--- /dev/null
+++ b/patches/powerpc-64bit-PREEMPT_RT-Check-preempt_count-before-.patch
@@ -0,0 +1,30 @@
+From f3700d14f0c52a68373695301cb5fd89252749b2 Mon Sep 17 00:00:00 2001
+From: Priyanka Jain <Priyanka.Jain@freescale.com>
+Date: Mon, 15 Apr 2013 11:18:16 +0530
+Subject: [PATCH 5/5] powerpc/64bit,PREEMPT_RT: Check preempt_count before
+ preempting
+
+In ret_from_except_lite() with CONFIG_PREEMPT enabled,
+add the missing check to compare value of preempt_count
+with zero before continuing with preemption process of
+the current task.
+If preempt_count is non-zero, restore reg and return,
+else continue the preemption process.
+
+Signed-off-by: Priyanka Jain <Priyanka.Jain@freescale.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/powerpc/kernel/entry_64.S | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/powerpc/kernel/entry_64.S
++++ b/arch/powerpc/kernel/entry_64.S
+@@ -643,6 +643,8 @@ resume_kernel:
+ #ifdef CONFIG_PREEMPT
+ /* Check if we need to preempt */
+ lwz r8,TI_PREEMPT(r9)
++ cmpwi 0,r8,0 /* if non-zero, just restore regs and return */
++ bne restore
+ andi. r0,r4,_TIF_NEED_RESCHED
+ bne+ check_count
+
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch
index 8a3cfa1..463e487 100644
--- a/patches/preempt-lazy-support.patch
+++ b/patches/preempt-lazy-support.patch
@@ -146,7 +146,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define add_preempt_count_notrace(val) \
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -2691,6 +2691,52 @@ static inline int test_tsk_need_resched(
+@@ -2692,6 +2692,52 @@ static inline int test_tsk_need_resched(
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
@@ -199,7 +199,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static inline int restart_syscall(void)
{
set_tsk_thread_flag(current, TIF_SIGPENDING);
-@@ -2722,11 +2768,6 @@ static inline int signal_pending_state(l
+@@ -2723,11 +2769,6 @@ static inline int signal_pending_state(l
return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
}
@@ -284,7 +284,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif /* CONFIG_SMP */
#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
-@@ -1718,6 +1760,9 @@ void sched_fork(struct task_struct *p)
+@@ -1720,6 +1762,9 @@ void sched_fork(struct task_struct *p)
/* Want to start with kernel preemption disabled. */
task_thread_info(p)->preempt_count = 1;
#endif
@@ -294,7 +294,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
#endif
-@@ -2926,6 +2971,7 @@ void migrate_disable(void)
+@@ -2928,6 +2973,7 @@ void migrate_disable(void)
return;
}
@@ -302,7 +302,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pin_current_cpu();
p->migrate_disable = 1;
preempt_enable();
-@@ -2981,6 +3027,7 @@ void migrate_enable(void)
+@@ -2983,6 +3029,7 @@ void migrate_enable(void)
unpin_current_cpu();
preempt_enable();
@@ -310,7 +310,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(migrate_enable);
#else
-@@ -3115,6 +3162,7 @@ need_resched:
+@@ -3117,6 +3164,7 @@ need_resched:
put_prev_task(rq, prev);
next = pick_next_task(rq);
clear_tsk_need_resched(prev);
@@ -318,7 +318,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rq->skip_clock_update = 0;
if (likely(prev != next)) {
-@@ -3251,6 +3299,14 @@ asmlinkage void __sched notrace preempt_
+@@ -3253,6 +3301,14 @@ asmlinkage void __sched notrace preempt_
if (likely(ti->preempt_count || irqs_disabled()))
return;
@@ -333,7 +333,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
add_preempt_count_notrace(PREEMPT_ACTIVE);
/*
-@@ -4862,7 +4918,9 @@ void __cpuinit init_idle(struct task_str
+@@ -4864,7 +4920,9 @@ void __cpuinit init_idle(struct task_str
/* Set the preempt count _outside_ the spinlocks! */
task_thread_info(idle)->preempt_count = 0;
@@ -450,7 +450,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -1166,6 +1166,7 @@ tracing_generic_entry_update(struct trac
+@@ -1167,6 +1167,7 @@ tracing_generic_entry_update(struct trac
struct task_struct *tsk = current;
entry->preempt_count = pc & 0xff;
@@ -458,7 +458,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
entry->pid = (tsk) ? tsk->pid : 0;
entry->padding = 0;
entry->flags =
-@@ -1176,7 +1177,8 @@ tracing_generic_entry_update(struct trac
+@@ -1177,7 +1178,8 @@ tracing_generic_entry_update(struct trac
#endif
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
@@ -468,7 +468,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
}
-@@ -2031,15 +2033,17 @@ get_total_entries(struct trace_array *tr
+@@ -2032,15 +2034,17 @@ get_total_entries(struct trace_array *tr
static void print_lat_help_header(struct seq_file *m)
{
@@ -495,7 +495,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void print_event_info(struct trace_array *tr, struct seq_file *m)
-@@ -2063,13 +2067,16 @@ static void print_func_help_header(struc
+@@ -2064,13 +2068,16 @@ static void print_func_help_header(struc
static void print_func_help_header_irq(struct trace_array *tr, struct seq_file *m)
{
print_event_info(tr, m);
diff --git a/patches/preempt-nort-rt-variants.patch b/patches/preempt-nort-rt-variants.patch
index 7a045c5..31fb471 100644
--- a/patches/preempt-nort-rt-variants.patch
+++ b/patches/preempt-nort-rt-variants.patch
@@ -31,18 +31,18 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
barrier(); \
preempt_check_resched(); \
} while (0)
-@@ -104,6 +108,18 @@ do { \
+@@ -110,6 +114,18 @@ do { \
#endif /* CONFIG_PREEMPT_COUNT */
+#ifdef CONFIG_PREEMPT_RT_FULL
+# define preempt_disable_rt() preempt_disable()
+# define preempt_enable_rt() preempt_enable()
-+# define preempt_disable_nort() do { } while (0)
-+# define preempt_enable_nort() do { } while (0)
++# define preempt_disable_nort() barrier()
++# define preempt_enable_nort() barrier()
+#else
-+# define preempt_disable_rt() do { } while (0)
-+# define preempt_enable_rt() do { } while (0)
++# define preempt_disable_rt() barrier()
++# define preempt_enable_rt() barrier()
+# define preempt_disable_nort() preempt_disable()
+# define preempt_enable_nort() preempt_enable()
+#endif
diff --git a/patches/rcu-force-preempt-rcu-for-rt.patch b/patches/rcu-force-preempt-rcu-for-rt.patch
new file mode 100644
index 0000000..8297bc1
--- /dev/null
+++ b/patches/rcu-force-preempt-rcu-for-rt.patch
@@ -0,0 +1,24 @@
+Subject: RCU: Force PREEMPT_RCU for PREEMPT-RT
+From: Ingo Molnar <mingo@elte.hu>
+Date: Fri, 3 Jul 2009 08:30:30 -0500
+
+PREEMPT_RT relies on PREEMPT_RCU - only allow RCU to be configured
+interactively in the !PREEMPT_RT case.
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+---
+ init/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -482,6 +482,7 @@ endchoice
+
+ config PREEMPT_RCU
+ def_bool ( TREE_PREEMPT_RCU || TINY_PREEMPT_RCU )
++ depends on !PREEMPT_RT_FULL
+ help
+ This option enables preemptible-RCU code that is common between
+ the TREE_PREEMPT_RCU and TINY_PREEMPT_RCU implementations.
diff --git a/patches/revert-80d5c3689b886308247da295a228a54df49a44f6.patch b/patches/revert-80d5c3689b886308247da295a228a54df49a44f6.patch
new file mode 100644
index 0000000..4d0753b
--- /dev/null
+++ b/patches/revert-80d5c3689b886308247da295a228a54df49a44f6.patch
@@ -0,0 +1,28 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Subject: [PATCH] net/cpsw: revert 80d5c3689b886308247da295a228a54df49a44f6
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/net/ethernet/ti/cpsw.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/ti/cpsw.c
++++ b/drivers/net/ethernet/ti/cpsw.c
+@@ -1286,7 +1286,7 @@ clean_vid:
+ }
+
+ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
+- __be16 proto, u16 vid)
++ unsigned short vid)
+ {
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+@@ -1298,7 +1298,7 @@ static int cpsw_ndo_vlan_rx_add_vid(stru
+ }
+
+ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
+- __be16 proto, u16 vid)
++ unsigned short vid)
+ {
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int ret;
diff --git a/patches/revert-f646968f8f7c624587de729115d802372b9063dd.patch b/patches/revert-f646968f8f7c624587de729115d802372b9063dd.patch
new file mode 100644
index 0000000..d0c066b
--- /dev/null
+++ b/patches/revert-f646968f8f7c624587de729115d802372b9063dd.patch
@@ -0,0 +1,28 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Subject: [PATCH] net/cpsw: revert f646968f8f7c624587de729115d802372b9063dd
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/net/ethernet/ti/cpsw.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/ti/cpsw.c
++++ b/drivers/net/ethernet/ti/cpsw.c
+@@ -1634,7 +1634,7 @@ static int cpsw_probe_dual_emac(struct p
+ priv_sl2->num_irqs = priv->num_irqs;
+ }
+ priv->irq_enabled = true;
+- ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
++ ndev->features |= NETIF_F_HW_VLAN_FILTER;
+
+ ndev->netdev_ops = &cpsw_netdev_ops;
+ SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
+@@ -1872,7 +1872,7 @@ static int cpsw_probe(struct platform_de
+ k++;
+ }
+
+- ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
++ ndev->features |= NETIF_F_HW_VLAN_FILTER;
+
+ ndev->netdev_ops = &cpsw_netdev_ops;
+ SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
diff --git a/patches/revert-f9a8f83b04e0c362a2fc660dbad980d24af209fc.patch b/patches/revert-f9a8f83b04e0c362a2fc660dbad980d24af209fc.patch
new file mode 100644
index 0000000..f12019b
--- /dev/null
+++ b/patches/revert-f9a8f83b04e0c362a2fc660dbad980d24af209fc.patch
@@ -0,0 +1,45 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Subject: [PATCH] net/cpsw: revert f9a8f83b04e0c362a2fc660dbad980d24af209fc
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/net/ethernet/ti/cpmac.c | 4 ++--
+ drivers/net/ethernet/ti/cpsw.c | 2 +-
+ drivers/net/ethernet/ti/davinci_emac.c | 2 +-
+ 3 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/ti/cpmac.c
++++ b/drivers/net/ethernet/ti/cpmac.c
+@@ -1172,8 +1172,8 @@ static int cpmac_probe(struct platform_d
+ snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT,
+ mdio_bus_id, phy_id);
+
+- priv->phy = phy_connect(dev, priv->phy_name, cpmac_adjust_link,
+- PHY_INTERFACE_MODE_MII);
++ priv->phy = phy_connect(dev, priv->phy_name, cpmac_adjust_link, 0,
++ PHY_INTERFACE_MODE_MII);
+
+ if (IS_ERR(priv->phy)) {
+ if (netif_msg_drv(priv))
+--- a/drivers/net/ethernet/ti/cpsw.c
++++ b/drivers/net/ethernet/ti/cpsw.c
+@@ -817,7 +817,7 @@ static void cpsw_slave_open(struct cpsw_
+ 1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
+
+ slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
+- &cpsw_adjust_link, slave->data->phy_if);
++ &cpsw_adjust_link, 0, slave->data->phy_if);
+ if (IS_ERR(slave->phy)) {
+ dev_err(priv->dev, "phy %s not found on slave %d\n",
+ slave->data->phy_id, slave->slave_num);
+--- a/drivers/net/ethernet/ti/davinci_emac.c
++++ b/drivers/net/ethernet/ti/davinci_emac.c
+@@ -1600,7 +1600,7 @@ static int emac_dev_open(struct net_devi
+
+ if (priv->phy_id && *priv->phy_id) {
+ priv->phydev = phy_connect(ndev, priv->phy_id,
+- &emac_adjust_link,
++ &emac_adjust_link, 0,
+ PHY_INTERFACE_MODE_MII);
+
+ if (IS_ERR(priv->phydev)) {
diff --git a/patches/rt-sched-do-not-compare-cpu-masks-in-scheduler.patch b/patches/rt-sched-do-not-compare-cpu-masks-in-scheduler.patch
index b7c97df..e25c6f7 100644
--- a/patches/rt-sched-do-not-compare-cpu-masks-in-scheduler.patch
+++ b/patches/rt-sched-do-not-compare-cpu-masks-in-scheduler.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2876,16 +2876,12 @@ static inline void update_migrate_disabl
+@@ -2878,16 +2878,12 @@ static inline void update_migrate_disabl
*/
mask = tsk_cpus_allowed(p);
diff --git a/patches/rt-sched-have-migrate_disable-ignore-bounded-threads.patch b/patches/rt-sched-have-migrate_disable-ignore-bounded-threads.patch
index 8a59012..ccdbd58 100644
--- a/patches/rt-sched-have-migrate_disable-ignore-bounded-threads.patch
+++ b/patches/rt-sched-have-migrate_disable-ignore-bounded-threads.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2888,7 +2888,7 @@ void migrate_disable(void)
+@@ -2890,7 +2890,7 @@ void migrate_disable(void)
{
struct task_struct *p = current;
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SCHED_DEBUG
p->migrate_disable_atomic++;
#endif
-@@ -2919,7 +2919,7 @@ void migrate_enable(void)
+@@ -2921,7 +2921,7 @@ void migrate_enable(void)
unsigned long flags;
struct rq *rq;
@@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SCHED_DEBUG
p->migrate_disable_atomic--;
#endif
-@@ -2940,26 +2940,21 @@ void migrate_enable(void)
+@@ -2942,26 +2942,21 @@ void migrate_enable(void)
if (unlikely(migrate_disabled_updated(p))) {
/*
diff --git a/patches/rt-sched-postpone-actual-migration-disalbe-to-schedule.patch b/patches/rt-sched-postpone-actual-migration-disalbe-to-schedule.patch
index 7ccbdea..0f9ae12 100644
--- a/patches/rt-sched-postpone-actual-migration-disalbe-to-schedule.patch
+++ b/patches/rt-sched-postpone-actual-migration-disalbe-to-schedule.patch
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2848,6 +2848,135 @@ static inline void schedule_debug(struct
+@@ -2850,6 +2850,135 @@ static inline void schedule_debug(struct
schedstat_inc(this_rq(), sched_count);
}
@@ -160,7 +160,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void put_prev_task(struct rq *rq, struct task_struct *prev)
{
if (prev->on_rq || rq->skip_clock_update < 0)
-@@ -2941,6 +3070,8 @@ need_resched:
+@@ -2943,6 +3072,8 @@ need_resched:
raw_spin_lock_irq(&rq->lock);
@@ -169,7 +169,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
switch_count = &prev->nivcsw;
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
if (unlikely(signal_pending_state(prev->state, prev))) {
-@@ -4734,7 +4865,7 @@ void __cpuinit init_idle(struct task_str
+@@ -4736,7 +4867,7 @@ void __cpuinit init_idle(struct task_str
#ifdef CONFIG_SMP
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
@@ -178,7 +178,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (p->sched_class && p->sched_class->set_cpus_allowed)
p->sched_class->set_cpus_allowed(p, new_mask);
p->nr_cpus_allowed = cpumask_weight(new_mask);
-@@ -4809,124 +4940,6 @@ out:
+@@ -4811,124 +4942,6 @@ out:
}
EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
diff --git a/patches/sched-adjust-reset-on-fork-always.patch b/patches/sched-adjust-reset-on-fork-always.patch
index 8b7d6d5..b9379be 100644
--- a/patches/sched-adjust-reset-on-fork-always.patch
+++ b/patches/sched-adjust-reset-on-fork-always.patch
@@ -14,7 +14,7 @@ Cc: stable-rt@vger.kernel.org
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -4117,10 +4117,13 @@ recheck:
+@@ -4119,10 +4119,13 @@ recheck:
}
/*
diff --git a/patches/sched-better-debug-output-for-might-sleep.patch b/patches/sched-better-debug-output-for-might-sleep.patch
index d563e26..ffb507e 100644
--- a/patches/sched-better-debug-output-for-might-sleep.patch
+++ b/patches/sched-better-debug-output-for-might-sleep.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1642,6 +1642,9 @@ struct task_struct {
+@@ -1643,6 +1643,9 @@ struct task_struct {
pte_t kmap_pte[KM_TYPE_NR];
# endif
#endif
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_NUMA_BALANCING
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2789,8 +2789,13 @@ void __kprobes add_preempt_count(int val
+@@ -2791,8 +2791,13 @@ void __kprobes add_preempt_count(int val
DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
PREEMPT_MASK - 10);
#endif
@@ -41,7 +41,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(add_preempt_count);
-@@ -2833,6 +2838,13 @@ static noinline void __schedule_bug(stru
+@@ -2835,6 +2840,13 @@ static noinline void __schedule_bug(stru
print_modules();
if (irqs_disabled())
print_irqtrace_events(prev);
@@ -55,7 +55,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
dump_stack();
add_taint(TAINT_WARN);
}
-@@ -7308,6 +7320,13 @@ void __might_sleep(const char *file, int
+@@ -7310,6 +7322,13 @@ void __might_sleep(const char *file, int
debug_show_held_locks(current);
if (irqs_disabled())
print_irqtrace_events(current);
diff --git a/patches/sched-cond-resched.patch b/patches/sched-cond-resched.patch
index fb436a6..5d8a477 100644
--- a/patches/sched-cond-resched.patch
+++ b/patches/sched-cond-resched.patch
@@ -9,7 +9,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -4315,9 +4315,17 @@ static inline int should_resched(void)
+@@ -4317,9 +4317,17 @@ static inline int should_resched(void)
static void __cond_resched(void)
{
diff --git a/patches/sched-consider-pi-boosting-in-setscheduler.patch b/patches/sched-consider-pi-boosting-in-setscheduler.patch
index 064cc44..a482540 100644
--- a/patches/sched-consider-pi-boosting-in-setscheduler.patch
+++ b/patches/sched-consider-pi-boosting-in-setscheduler.patch
@@ -25,7 +25,7 @@ Cc: stable-rt@vger.kernel.org
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -2175,6 +2175,7 @@ extern unsigned int sysctl_sched_cfs_ban
+@@ -2176,6 +2176,7 @@ extern unsigned int sysctl_sched_cfs_ban
#ifdef CONFIG_RT_MUTEXES
extern int rt_mutex_getprio(struct task_struct *p);
extern void rt_mutex_setprio(struct task_struct *p, int prio);
@@ -33,7 +33,7 @@ Cc: stable-rt@vger.kernel.org
extern void rt_mutex_adjust_pi(struct task_struct *p);
static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
{
-@@ -2185,6 +2186,10 @@ static inline int rt_mutex_getprio(struc
+@@ -2186,6 +2187,10 @@ static inline int rt_mutex_getprio(struc
{
return p->normal_prio;
}
@@ -67,7 +67,7 @@ Cc: stable-rt@vger.kernel.org
* This can be both boosting and unboosting. task->pi_lock must be held.
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3764,7 +3764,8 @@ EXPORT_SYMBOL(sleep_on_timeout);
+@@ -3766,7 +3766,8 @@ EXPORT_SYMBOL(sleep_on_timeout);
* This function changes the 'effective' priority of a task. It does
* not touch ->normal_prio like __setscheduler().
*
@@ -77,7 +77,7 @@ Cc: stable-rt@vger.kernel.org
*/
void rt_mutex_setprio(struct task_struct *p, int prio)
{
-@@ -3987,20 +3988,25 @@ static struct task_struct *find_process_
+@@ -3989,20 +3990,25 @@ static struct task_struct *find_process_
return pid ? find_task_by_vpid(pid) : current;
}
@@ -107,7 +107,7 @@ Cc: stable-rt@vger.kernel.org
}
/*
-@@ -4022,6 +4028,7 @@ static bool check_same_owner(struct task
+@@ -4024,6 +4030,7 @@ static bool check_same_owner(struct task
static int __sched_setscheduler(struct task_struct *p, int policy,
const struct sched_param *param, bool user)
{
@@ -115,7 +115,7 @@ Cc: stable-rt@vger.kernel.org
int retval, oldprio, oldpolicy = -1, on_rq, running;
unsigned long flags;
const struct sched_class *prev_class;
-@@ -4149,6 +4156,25 @@ recheck:
+@@ -4151,6 +4158,25 @@ recheck:
task_rq_unlock(rq, p, &flags);
goto recheck;
}
@@ -141,7 +141,7 @@ Cc: stable-rt@vger.kernel.org
on_rq = p->on_rq;
running = task_current(rq, p);
if (on_rq)
-@@ -4156,9 +4182,6 @@ recheck:
+@@ -4158,9 +4184,6 @@ recheck:
if (running)
p->sched_class->put_prev_task(rq, p);
@@ -151,7 +151,7 @@ Cc: stable-rt@vger.kernel.org
prev_class = p->sched_class;
__setscheduler(rq, p, policy, param->sched_priority);
-@@ -4171,7 +4194,6 @@ recheck:
+@@ -4173,7 +4196,6 @@ recheck:
*/
enqueue_task(rq, p, oldprio <= p->prio ? ENQUEUE_HEAD : 0);
}
diff --git a/patches/sched-delay-put-task.patch b/patches/sched-delay-put-task.patch
index 38ffa56..9d345bb 100644
--- a/patches/sched-delay-put-task.patch
+++ b/patches/sched-delay-put-task.patch
@@ -10,7 +10,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1623,6 +1623,9 @@ struct task_struct {
+@@ -1624,6 +1624,9 @@ struct task_struct {
#ifdef CONFIG_UPROBES
struct uprobe_task *utask;
#endif
@@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
};
/* Future-safe accessor for struct task_struct's cpus_allowed. */
-@@ -1813,6 +1816,15 @@ extern struct pid *cad_pid;
+@@ -1814,6 +1817,15 @@ extern struct pid *cad_pid;
extern void free_task(struct task_struct *tsk);
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern void __put_task_struct(struct task_struct *t);
static inline void put_task_struct(struct task_struct *t)
-@@ -1820,6 +1832,7 @@ static inline void put_task_struct(struc
+@@ -1821,6 +1833,7 @@ static inline void put_task_struct(struc
if (atomic_dec_and_test(&t->usage))
__put_task_struct(t);
}
diff --git a/patches/sched-enqueue-to-head.patch b/patches/sched-enqueue-to-head.patch
index 8f19417..4ca9985 100644
--- a/patches/sched-enqueue-to-head.patch
+++ b/patches/sched-enqueue-to-head.patch
@@ -50,7 +50,7 @@ Cc: stable-rt@vger.kernel.org
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -4164,8 +4164,13 @@ recheck:
+@@ -4166,8 +4166,13 @@ recheck:
if (running)
p->sched_class->set_curr_task(rq);
diff --git a/patches/sched-fix-the-wrong-macro-name-of-CONFIG_DEBUG_PREEM.patch b/patches/sched-fix-the-wrong-macro-name-of-CONFIG_DEBUG_PREEM.patch
new file mode 100644
index 0000000..5d99014
--- /dev/null
+++ b/patches/sched-fix-the-wrong-macro-name-of-CONFIG_DEBUG_PREEM.patch
@@ -0,0 +1,33 @@
+From 9c62439e3ede6a24f325a9cf88bddedc6d8e4ef5 Mon Sep 17 00:00:00 2001
+From: Qiang Huang <h.huangqiang@huawei.com>
+Date: Mon, 8 Apr 2013 19:47:29 +0800
+Subject: [PATCH 3/5] sched: fix the wrong macro name of CONFIG_DEBUG_PREEMPT
+
+Might be a typo, would cause some debug code never be run.
+
+Signed-off-by: Qiang Huang <h.huangqiang@huawei.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/sched/core.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -2885,7 +2885,7 @@ static noinline void __schedule_bug(stru
+ print_modules();
+ if (irqs_disabled())
+ print_irqtrace_events(prev);
+-#ifdef DEBUG_PREEMPT
++#ifdef CONFIG_DEBUG_PREEMPT
+ if (in_atomic_preempt_off()) {
+ pr_err("Preemption disabled at:");
+ print_ip_sym(current->preempt_disable_ip);
+@@ -7412,7 +7412,7 @@ void __might_sleep(const char *file, int
+ debug_show_held_locks(current);
+ if (irqs_disabled())
+ print_irqtrace_events(current);
+-#ifdef DEBUG_PREEMPT
++#ifdef CONFIG_DEBUG_PREEMPT
+ if (!preempt_count_equals(preempt_offset)) {
+ pr_err("Preemption disabled at:");
+ print_ip_sym(current->preempt_disable_ip);
diff --git a/patches/sched-might-sleep-do-not-account-rcu-depth.patch b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
index d746c66..3323d63 100644
--- a/patches/sched-might-sleep-do-not-account-rcu-depth.patch
+++ b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
@@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Internal to kernel */
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7058,7 +7058,8 @@ void __init sched_init(void)
+@@ -7060,7 +7060,8 @@ void __init sched_init(void)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
diff --git a/patches/sched-migrate-disable.patch b/patches/sched-migrate-disable.patch
index 1960a04..f701a3b 100644
--- a/patches/sched-migrate-disable.patch
+++ b/patches/sched-migrate-disable.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
-@@ -108,6 +108,14 @@ do { \
+@@ -114,6 +114,14 @@ do { \
#endif /* CONFIG_PREEMPT_COUNT */
@@ -20,8 +20,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+extern void migrate_disable(void);
+extern void migrate_enable(void);
+#else
-+# define migrate_disable() do { } while (0)
-+# define migrate_enable() do { } while (0)
++# define migrate_disable() barrier()
++# define migrate_enable() barrier()
+#endif
+
#ifdef CONFIG_PREEMPT_RT_FULL
@@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
# define preempt_enable_rt() preempt_enable()
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1279,6 +1279,7 @@ struct task_struct {
+@@ -1280,6 +1280,7 @@ struct task_struct {
#endif
unsigned int policy;
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int nr_cpus_allowed;
cpumask_t cpus_allowed;
-@@ -1630,9 +1631,6 @@ struct task_struct {
+@@ -1631,9 +1632,6 @@ struct task_struct {
#endif
};
@@ -47,7 +47,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_NUMA_BALANCING
extern void task_numa_fault(int node, int pages, bool migrated);
extern void set_numabalancing_state(bool enabled);
-@@ -2812,6 +2810,15 @@ static inline void set_task_cpu(struct t
+@@ -2813,6 +2811,15 @@ static inline void set_task_cpu(struct t
#endif /* CONFIG_SMP */
@@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -4734,11 +4734,12 @@ void __cpuinit init_idle(struct task_str
+@@ -4736,11 +4736,12 @@ void __cpuinit init_idle(struct task_str
#ifdef CONFIG_SMP
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
@@ -82,7 +82,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -4789,7 +4790,7 @@ int set_cpus_allowed_ptr(struct task_str
+@@ -4791,7 +4792,7 @@ int set_cpus_allowed_ptr(struct task_str
do_set_cpus_allowed(p, new_mask);
/* Can the task run on the task's current CPU? If so, we're done */
@@ -91,7 +91,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
goto out;
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
-@@ -4808,6 +4809,83 @@ out:
+@@ -4810,6 +4811,83 @@ out:
}
EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
diff --git a/patches/sched-mmdrop-delayed.patch b/patches/sched-mmdrop-delayed.patch
index d7a4f03..0b3cb6f 100644
--- a/patches/sched-mmdrop-delayed.patch
+++ b/patches/sched-mmdrop-delayed.patch
@@ -35,7 +35,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* first nid will either be a valid NID or one of these values */
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -2354,12 +2354,24 @@ extern struct mm_struct * mm_alloc(void)
+@@ -2355,12 +2355,24 @@ extern struct mm_struct * mm_alloc(void)
/* mmdrop drops the mm and the page tables */
extern void __mmdrop(struct mm_struct *);
@@ -84,7 +84,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*/
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1845,8 +1845,12 @@ static void finish_task_switch(struct rq
+@@ -1847,8 +1847,12 @@ static void finish_task_switch(struct rq
finish_arch_post_lock_switch();
fire_sched_in_preempt_notifiers(current);
@@ -98,7 +98,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (unlikely(prev_state == TASK_DEAD)) {
/*
* Remove function-return probe instances associated with this
-@@ -4833,6 +4837,8 @@ static int migration_cpu_stop(void *data
+@@ -4835,6 +4839,8 @@ static int migration_cpu_stop(void *data
#ifdef CONFIG_HOTPLUG_CPU
@@ -107,7 +107,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Ensures that the idle task is using init_mm right before its cpu goes
* offline.
-@@ -4845,7 +4851,12 @@ void idle_task_exit(void)
+@@ -4847,7 +4853,12 @@ void idle_task_exit(void)
if (mm != &init_mm)
switch_mm(mm, &init_mm, current);
@@ -121,7 +121,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -5162,6 +5173,10 @@ migration_call(struct notifier_block *nf
+@@ -5164,6 +5175,10 @@ migration_call(struct notifier_block *nf
case CPU_DEAD:
calc_load_migrate(rq);
diff --git a/patches/sched-rt-fix-migrate_enable-thinko.patch b/patches/sched-rt-fix-migrate_enable-thinko.patch
index 7634a60..66982b4 100644
--- a/patches/sched-rt-fix-migrate_enable-thinko.patch
+++ b/patches/sched-rt-fix-migrate_enable-thinko.patch
@@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -4886,12 +4886,14 @@ void migrate_enable(void)
+@@ -4888,12 +4888,14 @@ void migrate_enable(void)
*/
rq = this_rq();
raw_spin_lock_irqsave(&rq->lock, flags);
diff --git a/patches/sched-rt-mutex-wakeup.patch b/patches/sched-rt-mutex-wakeup.patch
index c5ced9e..d42691a 100644
--- a/patches/sched-rt-mutex-wakeup.patch
+++ b/patches/sched-rt-mutex-wakeup.patch
@@ -10,7 +10,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1062,6 +1062,7 @@ struct sched_domain;
+@@ -1063,6 +1063,7 @@ struct sched_domain;
#define WF_SYNC 0x01 /* waker goes to sleep after wakup */
#define WF_FORK 0x02 /* child wakeup after fork */
#define WF_MIGRATED 0x04 /* internal use, task got migrated */
@@ -18,7 +18,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define ENQUEUE_WAKEUP 1
#define ENQUEUE_HEAD 2
-@@ -1238,6 +1239,7 @@ enum perf_event_task_context {
+@@ -1239,6 +1240,7 @@ enum perf_event_task_context {
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
@@ -26,7 +26,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void *stack;
atomic_t usage;
unsigned int flags; /* per process flags, defined below */
-@@ -2250,6 +2252,7 @@ extern void xtime_update(unsigned long t
+@@ -2251,6 +2253,7 @@ extern void xtime_update(unsigned long t
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
@@ -63,7 +63,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
success = 1; /* we're going to change ->state */
cpu = task_cpu(p);
-@@ -1533,6 +1550,18 @@ int wake_up_process(struct task_struct *
+@@ -1535,6 +1552,18 @@ int wake_up_process(struct task_struct *
}
EXPORT_SYMBOL(wake_up_process);
diff --git a/patches/sched-teach-migrate_disable-about-atomic-contexts.patch b/patches/sched-teach-migrate_disable-about-atomic-contexts.patch
index 2871f82..3e57a8b 100644
--- a/patches/sched-teach-migrate_disable-about-atomic-contexts.patch
+++ b/patches/sched-teach-migrate_disable-about-atomic-contexts.patch
@@ -39,7 +39,7 @@ Link: http://lkml.kernel.org/n/tip-wbot4vsmwhi8vmbf83hsclk6@git.kernel.org
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1281,6 +1281,9 @@ struct task_struct {
+@@ -1282,6 +1282,9 @@ struct task_struct {
unsigned int policy;
#ifdef CONFIG_PREEMPT_RT_FULL
int migrate_disable;
@@ -51,7 +51,7 @@ Link: http://lkml.kernel.org/n/tip-wbot4vsmwhi8vmbf83hsclk6@git.kernel.org
cpumask_t cpus_allowed;
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -4817,6 +4817,17 @@ void migrate_disable(void)
+@@ -4819,6 +4819,17 @@ void migrate_disable(void)
unsigned long flags;
struct rq *rq;
@@ -69,7 +69,7 @@ Link: http://lkml.kernel.org/n/tip-wbot4vsmwhi8vmbf83hsclk6@git.kernel.org
preempt_disable();
if (p->migrate_disable) {
p->migrate_disable++;
-@@ -4865,6 +4876,16 @@ void migrate_enable(void)
+@@ -4867,6 +4878,16 @@ void migrate_enable(void)
unsigned long flags;
struct rq *rq;
diff --git a/patches/series b/patches/series
index b72d275..b079da0 100644
--- a/patches/series
+++ b/patches/series
@@ -20,6 +20,7 @@ seqlock-use-seqcount.patch
generic-cmpxchg-use-raw-local-irq.patch
0001-of-fix-recursive-locking-in-of_get_next_available_ch.patch
+tcp-force-a-dst-refcount-when-prequeue-packet.patch
############################################################
# UPSTREAM FIXES, patches pending
@@ -80,6 +81,20 @@ timekeeping-shorten-seq-count-region.patch
# Submitted to net-dev
############################################################
+# cpsw nightmare
+cpsw-revert-stable-patches.patch
+cpsw-collected_cpsw_patches.patch
+# revert cpsw related patches which rely on changes in net we don't have
+revert-f9a8f83b04e0c362a2fc660dbad980d24af209fc.patch
+revert-f646968f8f7c624587de729115d802372b9063dd.patch
+revert-80d5c3689b886308247da295a228a54df49a44f6.patch
+cpsw-fix-missplaced-init-chunk.patch
+
+# this is strange
+cpsw-net-cpsw-use-a-lock-around-source-testing.patch
+# for lazy me
+cpsw-net-cpsw-Use-fallback-for-active_slave.patch
+
############################################################
# Pending in tip
############################################################
@@ -261,6 +276,7 @@ mm-page-alloc-fix.patch
# MM SWAP
mm-convert-swap-to-percpu-locked.patch
+swap-Use-unique-local-lock-name-for-swap_lock.patch
# MM vmstat
mm-make-vmstat-rt-aware.patch
@@ -395,6 +411,7 @@ rt-mutex-add-sleeping-spinlocks-support.patch
spinlock-types-separate-raw.patch
rtmutex-avoid-include-hell.patch
rt-add-rt-spinlock-to-headers.patch
+spinlock-include-cache.h.patch
rt-add-rt-to-mutex-headers.patch
rwsem-add-rt-variant.patch
rt-add-rt-locks.patch
@@ -430,6 +447,7 @@ fs-ntfs-disable-interrupt-non-rt.patch
# X86
x86-mce-timer-hrtimer.patch
+x86-mce-Defer-mce-wakeups-to-threads-for-PREEMPT_RT.patch
x86-stackprot-no-random-on-rt.patch
x86-use-gen-rwsem-spinlocks-rt.patch
x86-disable-debug-stack.patch
@@ -581,11 +599,15 @@ powerpc-fsl-msi-use-a-different-locklcass-for-the-ca.patch
i2c-omap-drop-the-lock-hard-irq-context.patch
spi-omap-mcspi-check-condition-also-after-timeout.patch
HACK-printk-drop-the-logbuf_lock-more-often.patch
-fscache_compile_fix.patch
+fs-fscache-remove-spin_lock-from-the-condition-in-wh.patch
i915_compile_fix.patch
+#
+# XXX need feedback
+drm-i915-move-i915_trace_irq_get-out-of-the-tracing-.patch
# Enable full RT
powerpc-preempt-lazy-support.patch
+powerpc-64bit-PREEMPT_RT-Check-preempt_count-before-.patch
wait-simple-implementation.patch
rcutiny-use-simple-waitqueue.patch
treercu-use-simple-waitqueue.patch
@@ -602,6 +624,7 @@ idle-state.patch
might-sleep-check-for-idle.patch
wait-simple-rework-for-completions.patch
completion-use-simple-wait-queues.patch
+sched-fix-the-wrong-macro-name-of-CONFIG_DEBUG_PREEM.patch
kconfig-disable-a-few-options-rt.patch
kconfig-preempt-rt-full.patch
diff --git a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
index 809a02f..5e8eeb3 100644
--- a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
+++ b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1407,6 +1407,7 @@ struct task_struct {
+@@ -1408,6 +1408,7 @@ struct task_struct {
/* signal handlers */
struct signal_struct *signal;
struct sighand_struct *sighand;
diff --git a/patches/skbufhead-raw-lock.patch b/patches/skbufhead-raw-lock.patch
index 3f27b9e..b30db3f 100644
--- a/patches/skbufhead-raw-lock.patch
+++ b/patches/skbufhead-raw-lock.patch
@@ -61,7 +61,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
}
-@@ -3528,7 +3528,7 @@ static void flush_backlog(void *arg)
+@@ -3533,7 +3533,7 @@ static void flush_backlog(void *arg)
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
if (skb->dev == dev) {
__skb_unlink(skb, &sd->input_pkt_queue);
@@ -70,7 +70,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
input_queue_head_incr(sd);
}
}
-@@ -3537,10 +3537,13 @@ static void flush_backlog(void *arg)
+@@ -3542,10 +3542,13 @@ static void flush_backlog(void *arg)
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
if (skb->dev == dev) {
__skb_unlink(skb, &sd->process_queue);
@@ -85,7 +85,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static int napi_gro_complete(struct sk_buff *skb)
-@@ -4045,10 +4048,17 @@ static void net_rx_action(struct softirq
+@@ -4050,10 +4053,17 @@ static void net_rx_action(struct softirq
struct softnet_data *sd = &__get_cpu_var(softnet_data);
unsigned long time_limit = jiffies + 2;
int budget = netdev_budget;
@@ -103,7 +103,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
while (!list_empty(&sd->poll_list)) {
struct napi_struct *n;
int work, weight;
-@@ -6530,6 +6540,9 @@ static int dev_cpu_callback(struct notif
+@@ -6535,6 +6545,9 @@ static int dev_cpu_callback(struct notif
netif_rx(skb);
input_queue_head_incr(oldsd);
}
@@ -113,7 +113,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return NOTIFY_OK;
}
-@@ -6802,8 +6815,9 @@ static int __init net_dev_init(void)
+@@ -6807,8 +6820,9 @@ static int __init net_dev_init(void)
struct softnet_data *sd = &per_cpu(softnet_data, i);
memset(sd, 0, sizeof(*sd));
diff --git a/patches/slub_delay_ctor_on_rt.patch b/patches/slub_delay_ctor_on_rt.patch
index 1b6a420..c5dbbeb 100644
--- a/patches/slub_delay_ctor_on_rt.patch
+++ b/patches/slub_delay_ctor_on_rt.patch
@@ -5,6 +5,10 @@ It seems that allocation of plenty objects causes latency on ARM since that
code can not be preempted
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ mm/slub.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1346,8 +1346,10 @@ static void setup_object(struct kmem_cac
diff --git a/patches/softirq-fix-unplug-deadlock.patch b/patches/softirq-fix-unplug-deadlock.patch
new file mode 100644
index 0000000..302ee53
--- /dev/null
+++ b/patches/softirq-fix-unplug-deadlock.patch
@@ -0,0 +1,68 @@
+Subject: softirq: Fix unplug deadlock
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Fri, 30 Sep 2011 15:59:16 +0200
+
+Subject: [RT] softirq: Fix unplug deadlock
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Date: Fri Sep 30 15:52:14 CEST 2011
+
+If ksoftirqd gets woken during hot-unplug, __thread_do_softirq() will
+call pin_current_cpu() which will block on the held cpu_hotplug.lock.
+Moving the offline check in __thread_do_softirq() before the
+pin_current_cpu() call doesn't work, since the wakeup can happen
+before we mark the cpu offline.
+
+So here we have the ksoftirq thread stuck until hotplug finishes, but
+then the ksoftirq CPU_DOWN notifier issues kthread_stop() which will
+wait for the ksoftirq thread to go away -- while holding the hotplug
+lock.
+
+Sort this by delaying the kthread_stop() until CPU_POST_DEAD, which is
+outside of the cpu_hotplug.lock, but still serialized by the
+cpu_add_remove_lock.
+
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: rostedt <rostedt@goodmis.org>
+Cc: Clark Williams <williams@redhat.com>
+Link: http://lkml.kernel.org/r/1317391156.12973.3.camel@twins
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/softirq.c | 8 ++------
+ 1 file changed, 2 insertions(+), 6 deletions(-)
+
+Index: linux-stable/kernel/softirq.c
+===================================================================
+--- linux-stable.orig/kernel/softirq.c
++++ linux-stable/kernel/softirq.c
+@@ -1087,9 +1087,8 @@ static int __cpuinit cpu_callback(struct
+ int hotcpu = (unsigned long)hcpu;
+ struct task_struct *p;
+
+- switch (action) {
++ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_UP_PREPARE:
+- case CPU_UP_PREPARE_FROZEN:
+ p = kthread_create_on_node(run_ksoftirqd,
+ hcpu,
+ cpu_to_node(hotcpu),
+@@ -1102,19 +1101,16 @@ static int __cpuinit cpu_callback(struct
+ per_cpu(ksoftirqd, hotcpu) = p;
+ break;
+ case CPU_ONLINE:
+- case CPU_ONLINE_FROZEN:
+ wake_up_process(per_cpu(ksoftirqd, hotcpu));
+ break;
+ #ifdef CONFIG_HOTPLUG_CPU
+ case CPU_UP_CANCELED:
+- case CPU_UP_CANCELED_FROZEN:
+ if (!per_cpu(ksoftirqd, hotcpu))
+ break;
+ /* Unbind so it can run. Fall thru. */
+ kthread_bind(per_cpu(ksoftirqd, hotcpu),
+ cpumask_any(cpu_online_mask));
+- case CPU_DEAD:
+- case CPU_DEAD_FROZEN: {
++ case CPU_POST_DEAD: {
+ static const struct sched_param param = {
+ .sched_priority = MAX_RT_PRIO-1
+ };
diff --git a/patches/softirq-local-lock.patch b/patches/softirq-local-lock.patch
index 12d490e..13d1bb7 100644
--- a/patches/softirq-local-lock.patch
+++ b/patches/softirq-local-lock.patch
@@ -84,7 +84,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1633,6 +1633,7 @@ struct task_struct {
+@@ -1634,6 +1634,7 @@ struct task_struct {
#endif
#ifdef CONFIG_PREEMPT_RT_BASE
struct rcu_head put_rcu;
diff --git a/patches/softirq-make-serving-softirqs-a-task-flag.patch b/patches/softirq-make-serving-softirqs-a-task-flag.patch
index fbd7bed..1753171 100644
--- a/patches/softirq-make-serving-softirqs-a-task-flag.patch
+++ b/patches/softirq-make-serving-softirqs-a-task-flag.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1856,6 +1856,7 @@ extern void thread_group_cputime_adjuste
+@@ -1857,6 +1857,7 @@ extern void thread_group_cputime_adjuste
/*
* Per process flags
*/
diff --git a/patches/softirq-preempt-fix-3-re.patch b/patches/softirq-preempt-fix-3-re.patch
index c5e4d2f..b365fba 100644
--- a/patches/softirq-preempt-fix-3-re.patch
+++ b/patches/softirq-preempt-fix-3-re.patch
@@ -78,7 +78,7 @@ Cc: stable-rt@vger.kernel.org
#ifndef CONFIG_PREEMPT_RT_BASE
# define preempt_enable_no_resched() sched_preempt_enable_no_resched()
-+# define preempt_check_resched_rt() do { } while (0)
++# define preempt_check_resched_rt() barrier()
#else
# define preempt_enable_no_resched() preempt_enable()
+# define preempt_check_resched_rt() preempt_check_resched()
@@ -89,7 +89,7 @@ Cc: stable-rt@vger.kernel.org
#define preempt_disable_notrace() barrier()
#define preempt_enable_no_resched_notrace() barrier()
#define preempt_enable_notrace() barrier()
-+#define preempt_check_resched_rt() do { } while (0)
++#define preempt_check_resched_rt() barrier()
#endif /* CONFIG_PREEMPT_COUNT */
diff --git a/patches/softirq-split-locks.patch b/patches/softirq-split-locks.patch
index 3947450..f210bb0 100644
--- a/patches/softirq-split-locks.patch
+++ b/patches/softirq-split-locks.patch
@@ -30,7 +30,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1635,6 +1635,7 @@ struct task_struct {
+@@ -1636,6 +1636,7 @@ struct task_struct {
#ifdef CONFIG_PREEMPT_RT_BASE
struct rcu_head put_rcu;
int softirq_nestcnt;
diff --git a/patches/softirq-thread-do-softirq.patch b/patches/softirq-thread-do-softirq.patch
index 8db1db6..cb9b485 100644
--- a/patches/softirq-thread-do-softirq.patch
+++ b/patches/softirq-thread-do-softirq.patch
@@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern void __raise_softirq_irqoff(unsigned int nr);
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3118,7 +3118,7 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -3117,7 +3117,7 @@ int netif_rx_ni(struct sk_buff *skb)
preempt_disable();
err = netif_rx(skb);
if (local_softirq_pending())
diff --git a/patches/spinlock-include-cache.h.patch b/patches/spinlock-include-cache.h.patch
new file mode 100644
index 0000000..c7560ac
--- /dev/null
+++ b/patches/spinlock-include-cache.h.patch
@@ -0,0 +1,24 @@
+From 30202d1996c96177fd9eeb5efc2d1869b7939a75 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 25 Apr 2013 18:31:39 +0200
+Subject: [PATCH 2/5] spinlock: include cache.h
+
+It is used by DEFINE_SPINLOCK
+
+Reported-by: <Arpit Goel <arpitgoel@gmail.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/spinlock_types_rt.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/include/linux/spinlock_types_rt.h
++++ b/include/linux/spinlock_types_rt.h
+@@ -5,6 +5,8 @@
+ #error "Do not include directly. Include spinlock_types.h instead"
+ #endif
+
++#include <linux/cache.h>
++
+ /*
+ * PREEMPT_RT: spinlocks - an RT mutex plus lock-break field:
+ */
diff --git a/patches/stomp-machine-mark-stomper-thread.patch b/patches/stomp-machine-mark-stomper-thread.patch
index 5379d29..fa2121e 100644
--- a/patches/stomp-machine-mark-stomper-thread.patch
+++ b/patches/stomp-machine-mark-stomper-thread.patch
@@ -10,7 +10,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1859,6 +1859,7 @@ extern void thread_group_cputime_adjuste
+@@ -1860,6 +1860,7 @@ extern void thread_group_cputime_adjuste
#define PF_FROZEN 0x00010000 /* frozen for system suspend */
#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
#define PF_KSWAPD 0x00040000 /* I am kswapd */
diff --git a/patches/swap-Use-unique-local-lock-name-for-swap_lock.patch b/patches/swap-Use-unique-local-lock-name-for-swap_lock.patch
new file mode 100644
index 0000000..b3a2284
--- /dev/null
+++ b/patches/swap-Use-unique-local-lock-name-for-swap_lock.patch
@@ -0,0 +1,101 @@
+From 1288d422e020182955745ee09e26d4e6174923c0 Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <rostedt@goodmis.org>
+Date: Tue, 23 Apr 2013 16:10:00 -0400
+Subject: [PATCH] swap: Use unique local lock name for swap_lock
+
+>From lib/Kconfig.debug on CONFIG_FORCE_WEAK_PER_CPU:
+
+---
+s390 and alpha require percpu variables in modules to be
+defined weak to work around addressing range issue which
+puts the following two restrictions on percpu variable
+definitions.
+
+1. percpu symbols must be unique whether static or not
+2. percpu variables can't be defined inside a function
+
+To ensure that generic code follows the above rules, this
+option forces all percpu variables to be defined as weak.
+---
+
+The addition of the local IRQ swap_lock in mm/swap.c broke this config
+as the name "swap_lock" is used through out the kernel. Just do a "git
+grep swap_lock" to see, and the new swap_lock is a local lock which
+defines the swap_lock for per_cpu.
+
+The fix was to rename swap_lock to swapvec_lock which keeps it unique.
+
+Reported-by: Mike Galbraith <bitbucket@online.de>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ mm/swap.c | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+--- a/mm/swap.c
++++ b/mm/swap.c
+@@ -42,7 +42,7 @@ static DEFINE_PER_CPU(struct pagevec, lr
+ static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
+
+ static DEFINE_LOCAL_IRQ_LOCK(rotate_lock);
+-static DEFINE_LOCAL_IRQ_LOCK(swap_lock);
++static DEFINE_LOCAL_IRQ_LOCK(swapvec_lock);
+
+ /*
+ * This path almost never happens for VM activity - pages are normally
+@@ -407,13 +407,13 @@ static void activate_page_drain(int cpu)
+ void activate_page(struct page *page)
+ {
+ if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
+- struct pagevec *pvec = &get_locked_var(swap_lock,
++ struct pagevec *pvec = &get_locked_var(swapvec_lock,
+ activate_page_pvecs);
+
+ page_cache_get(page);
+ if (!pagevec_add(pvec, page))
+ pagevec_lru_move_fn(pvec, __activate_page, NULL);
+- put_locked_var(swap_lock, activate_page_pvecs);
++ put_locked_var(swapvec_lock, activate_page_pvecs);
+ }
+ }
+
+@@ -461,13 +461,13 @@ EXPORT_SYMBOL(mark_page_accessed);
+ */
+ void __lru_cache_add(struct page *page, enum lru_list lru)
+ {
+- struct pagevec *pvec = &get_locked_var(swap_lock, lru_add_pvecs)[lru];
++ struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvecs)[lru];
+
+ page_cache_get(page);
+ if (!pagevec_space(pvec))
+ __pagevec_lru_add(pvec, lru);
+ pagevec_add(pvec, page);
+- put_locked_var(swap_lock, lru_add_pvecs);
++ put_locked_var(swapvec_lock, lru_add_pvecs);
+ }
+ EXPORT_SYMBOL(__lru_cache_add);
+
+@@ -632,19 +632,19 @@ void deactivate_page(struct page *page)
+ return;
+
+ if (likely(get_page_unless_zero(page))) {
+- struct pagevec *pvec = &get_locked_var(swap_lock,
++ struct pagevec *pvec = &get_locked_var(swapvec_lock,
+ lru_deactivate_pvecs);
+
+ if (!pagevec_add(pvec, page))
+ pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
+- put_locked_var(swap_lock, lru_deactivate_pvecs);
++ put_locked_var(swapvec_lock, lru_deactivate_pvecs);
+ }
+ }
+
+ void lru_add_drain(void)
+ {
+- lru_add_drain_cpu(local_lock_cpu(swap_lock));
+- local_unlock_cpu(swap_lock);
++ lru_add_drain_cpu(local_lock_cpu(swapvec_lock));
++ local_unlock_cpu(swapvec_lock);
+ }
+
+ static void lru_add_drain_per_cpu(struct work_struct *dummy)
diff --git a/patches/tcp-force-a-dst-refcount-when-prequeue-packet.patch b/patches/tcp-force-a-dst-refcount-when-prequeue-packet.patch
new file mode 100644
index 0000000..c1e177e
--- /dev/null
+++ b/patches/tcp-force-a-dst-refcount-when-prequeue-packet.patch
@@ -0,0 +1,25 @@
+From c58d04e22ee74888bbc824e44a6429f8161ccf0c Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 24 Apr 2013 18:34:55 -0700
+Subject: [PATCH] tcp: force a dst refcount when prequeue packet
+
+Before escaping RCU protected section and adding packet into
+prequeue, make sure the dst is refcounted.
+
+Reported-by: Mike Galbraith <bitbucket@online.de>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/net/tcp.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1045,6 +1045,7 @@ static inline bool tcp_prequeue(struct s
+ if (sysctl_tcp_low_latency || !tp->ucopy.task)
+ return false;
+
++ skb_dst_force(skb);
+ __skb_queue_tail(&tp->ucopy.prequeue, skb);
+ tp->ucopy.memory += skb->truesize;
+ if (tp->ucopy.memory > sk->sk_rcvbuf) {
diff --git a/patches/tracing-account-for-preempt-off-in-preempt_schedule.patch b/patches/tracing-account-for-preempt-off-in-preempt_schedule.patch
index df79a43..25cc931 100644
--- a/patches/tracing-account-for-preempt-off-in-preempt_schedule.patch
+++ b/patches/tracing-account-for-preempt-off-in-preempt_schedule.patch
@@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3071,7 +3071,16 @@ asmlinkage void __sched notrace preempt_
+@@ -3073,7 +3073,16 @@ asmlinkage void __sched notrace preempt_
do {
add_preempt_count_notrace(PREEMPT_ACTIVE);
diff --git a/patches/treercu-use-simple-waitqueue.patch b/patches/treercu-use-simple-waitqueue.patch
index e89eb39..1c45500 100644
--- a/patches/treercu-use-simple-waitqueue.patch
+++ b/patches/treercu-use-simple-waitqueue.patch
@@ -1,3 +1,9 @@
+From db7ae440c333156392bc56badc610469a4d522ae Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 8 Apr 2013 16:09:57 +0200
+Subject: [PATCH] kernel/treercu: use a simple waitqueue
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/rcutree.c | 13 +++++++------
kernel/rcutree.h | 2 +-
diff --git a/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch b/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
index 01e1f46..629164d 100644
--- a/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
+++ b/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3092,7 +3092,7 @@ int netif_rx(struct sk_buff *skb)
+@@ -3091,7 +3091,7 @@ int netif_rx(struct sk_buff *skb)
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
@@ -46,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rcu_read_lock();
cpu = get_rps_cpu(skb->dev, skb, &rflow);
-@@ -3102,13 +3102,13 @@ int netif_rx(struct sk_buff *skb)
+@@ -3101,13 +3101,13 @@ int netif_rx(struct sk_buff *skb)
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();
diff --git a/patches/user-use-local-irq-nort.patch b/patches/user-use-local-irq-nort.patch
index 3674e06..5c8b2d7 100644
--- a/patches/user-use-local-irq-nort.patch
+++ b/patches/user-use-local-irq-nort.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/user.c
+++ b/kernel/user.c
-@@ -157,11 +157,11 @@ void free_uid(struct user_struct *up)
+@@ -159,11 +159,11 @@ void free_uid(struct user_struct *up)
if (!up)
return;
diff --git a/patches/x86-kvm-require-const-tsc-for-rt.patch b/patches/x86-kvm-require-const-tsc-for-rt.patch
index d74a1cc..5c19134 100644
--- a/patches/x86-kvm-require-const-tsc-for-rt.patch
+++ b/patches/x86-kvm-require-const-tsc-for-rt.patch
@@ -9,7 +9,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -5242,6 +5242,13 @@ int kvm_arch_init(void *opaque)
+@@ -5238,6 +5238,13 @@ int kvm_arch_init(void *opaque)
goto out;
}
diff --git a/patches/x86-mce-Defer-mce-wakeups-to-threads-for-PREEMPT_RT.patch b/patches/x86-mce-Defer-mce-wakeups-to-threads-for-PREEMPT_RT.patch
new file mode 100644
index 0000000..691f937
--- /dev/null
+++ b/patches/x86-mce-Defer-mce-wakeups-to-threads-for-PREEMPT_RT.patch
@@ -0,0 +1,168 @@
+From df12896518bc6db6a717de580116a07cdd19fbd9 Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <rostedt@goodmis.org>
+Date: Thu, 11 Apr 2013 14:33:34 -0400
+Subject: [PATCH 4/5] x86/mce: Defer mce wakeups to threads for PREEMPT_RT
+
+We had a customer report a lockup on a 3.0-rt kernel that had the
+following backtrace:
+
+[ffff88107fca3e80] rt_spin_lock_slowlock at ffffffff81499113
+[ffff88107fca3f40] rt_spin_lock at ffffffff81499a56
+[ffff88107fca3f50] __wake_up at ffffffff81043379
+[ffff88107fca3f80] mce_notify_irq at ffffffff81017328
+[ffff88107fca3f90] intel_threshold_interrupt at ffffffff81019508
+[ffff88107fca3fa0] smp_threshold_interrupt at ffffffff81019fc1
+[ffff88107fca3fb0] threshold_interrupt at ffffffff814a1853
+
+It actually bugged because the lock was taken by the same owner that
+already had that lock. What happened was the thread that was setting
+itself on a wait queue had the lock when an MCE triggered. The MCE
+interrupt does a wake up on its wait list and grabs the same lock.
+
+NOTE: THIS IS NOT A BUG ON MAINLINE
+
+Sorry for yelling, but as I Cc'd mainline maintainers I want them to
+know that this is an PREEMPT_RT bug only. I only Cc'd them for advice.
+
+On PREEMPT_RT the wait queue locks are converted from normal
+"spin_locks" into an rt_mutex (see the rt_spin_lock_slowlock above).
+These are not to be taken by hard interrupt context. This usually isn't
+a problem as most all interrupts in PREEMPT_RT are converted into
+schedulable threads. Unfortunately that's not the case with the MCE irq.
+
+As wait queue locks are notorious for long hold times, we can not
+convert them to raw_spin_locks without causing issues with -rt. But
+Thomas has created a "simple-wait" structure that uses raw spin locks
+which may have been a good fit.
+
+Unfortunately, wait queues are not the only issue, as the mce_notify_irq
+also does a schedule_work(), which grabs the workqueue spin locks that
+have the exact same issue.
+
+Thus, this patch I'm proposing is to move the actual work of the MCE
+interrupt into a helper thread that gets woken up on the MCE interrupt
+and does the work in a schedulable context.
+
+NOTE: THIS PATCH ONLY CHANGES THE BEHAVIOR WHEN PREEMPT_RT IS SET
+
+Oops, sorry for yelling again, but I want to stress that I keep the same
+behavior of mainline when PREEMPT_RT is not set. Thus, this only changes
+the MCE behavior when PREEMPT_RT is configured.
+
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+[bigeasy@linutronix: make mce_notify_work() a proper prototype, use
+ kthread_run()]
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/x86/kernel/cpu/mcheck/mce.c | 78 ++++++++++++++++++++++++++++++---------
+ 1 file changed, 61 insertions(+), 17 deletions(-)
+
+--- a/arch/x86/kernel/cpu/mcheck/mce.c
++++ b/arch/x86/kernel/cpu/mcheck/mce.c
+@@ -18,6 +18,7 @@
+ #include <linux/rcupdate.h>
+ #include <linux/kobject.h>
+ #include <linux/uaccess.h>
++#include <linux/kthread.h>
+ #include <linux/kdebug.h>
+ #include <linux/kernel.h>
+ #include <linux/percpu.h>
+@@ -1345,6 +1346,63 @@ static void mce_do_trigger(struct work_s
+
+ static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
+
++static void __mce_notify_work(void)
++{
++ /* Not more than two messages every minute */
++ static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
++
++ /* wake processes polling /dev/mcelog */
++ wake_up_interruptible(&mce_chrdev_wait);
++
++ /*
++ * There is no risk of missing notifications because
++ * work_pending is always cleared before the function is
++ * executed.
++ */
++ if (mce_helper[0] && !work_pending(&mce_trigger_work))
++ schedule_work(&mce_trigger_work);
++
++ if (__ratelimit(&ratelimit))
++ pr_info(HW_ERR "Machine check events logged\n");
++}
++
++#ifdef CONFIG_PREEMPT_RT_FULL
++struct task_struct *mce_notify_helper;
++
++static int mce_notify_helper_thread(void *unused)
++{
++ while (1) {
++ set_current_state(TASK_INTERRUPTIBLE);
++ schedule();
++ if (kthread_should_stop())
++ break;
++ __mce_notify_work();
++ }
++ return 0;
++}
++
++static int mce_notify_work_init(void)
++{
++ mce_notify_helper = kthread_run(mce_notify_helper_thread, NULL,
++ "mce-notify");
++ if (!mce_notify_helper)
++ return -ENOMEM;
++
++ return 0;
++}
++
++static void mce_notify_work(void)
++{
++ wake_up_process(mce_notify_helper);
++}
++#else
++static void mce_notify_work(void)
++{
++ __mce_notify_work();
++}
++static inline int mce_notify_work_init(void) { return 0; }
++#endif
++
+ /*
+ * Notify the user(s) about new machine check events.
+ * Can be called from interrupt context, but not from machine check/NMI
+@@ -1352,24 +1410,8 @@ static DECLARE_WORK(mce_trigger_work, mc
+ */
+ int mce_notify_irq(void)
+ {
+- /* Not more than two messages every minute */
+- static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
+-
+ if (test_and_clear_bit(0, &mce_need_notify)) {
+- /* wake processes polling /dev/mcelog */
+- wake_up_interruptible(&mce_chrdev_wait);
+-
+- /*
+- * There is no risk of missing notifications because
+- * work_pending is always cleared before the function is
+- * executed.
+- */
+- if (mce_helper[0] && !work_pending(&mce_trigger_work))
+- schedule_work(&mce_trigger_work);
+-
+- if (__ratelimit(&ratelimit))
+- pr_info(HW_ERR "Machine check events logged\n");
+-
++ mce_notify_work();
+ return 1;
+ }
+ return 0;
+@@ -2431,6 +2473,8 @@ static __init int mcheck_init_device(voi
+ /* register character device /dev/mcelog */
+ misc_register(&mce_chrdev_device);
+
++ err = mce_notify_work_init();
++
+ return err;
+ }
+ device_initcall_sync(mcheck_init_device);
diff --git a/patches/x86-perf-uncore-deal-with-kfree.patch b/patches/x86-perf-uncore-deal-with-kfree.patch
index 94e5b9b..7a2f32a 100644
--- a/patches/x86-perf-uncore-deal-with-kfree.patch
+++ b/patches/x86-perf-uncore-deal-with-kfree.patch
@@ -26,7 +26,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define MAX_LBR_ENTRIES 16
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
-@@ -1715,7 +1715,7 @@ static void intel_pmu_cpu_dying(int cpu)
+@@ -1721,7 +1721,7 @@ static void intel_pmu_cpu_dying(int cpu)
pc = cpuc->shared_regs;
if (pc) {
if (pc->core_id == -1 || --pc->refcnt == 0)