summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Gortmaker <paul.gortmaker@windriver.com>2013-11-18 16:27:16 -0500
committerPaul Gortmaker <paul.gortmaker@windriver.com>2013-11-18 16:27:16 -0500
commit3e54bacfe1c92337be5256a19db8c05762d7afa3 (patch)
tree5ae81cee3df6231cec05031a291da83b66f00633
parent2fffea6a0eee52cf99a7fea4e856d41b940b4e79 (diff)
download3.10-rt-patches-3e54bacfe1c92337be5256a19db8c05762d7afa3.tar.gz
patches-3.10.18-rt14.tar.xzv3.10.18-rt14
md5sum: 2e8fed5b41f222ccc30d9f1c357204f9 patches-3.10.18-rt14.tar.xz Announce: ------------------ Dear RT folks! I'm pleased to announce the v3.10.18-rt14 patch set. Changes since v3.10.18-rt13 - a SLUB fix. The delayed free might use wrong kmem_cache structure. - update to Yang Shi's memcontrol sleeping while atomic fix. Thanks you Yang Shi. - dropping the wbinvd in i915. The do_wbinvd module parameter is gone, the fix from mainline has been backported. This has been compile tested, some feedback would be nice. Known issues: - SLAB support not working - The cpsw network driver shows some issues. - bcache is disabled. - an ancient race (since we got sleeping spinlocks) where the TASK_TRACED state is temporary replaced while waiting on a rw lock and the task can't be traced. The delta patch against v3.10.18-rt14 is appended below and can be found here: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/incr/patch-3.10.18-rt13-rt14.patch.xz The RT patch against 3.10.18 can be found here: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patch-3.10.18-rt14.patch.xz The split quilt queue is available at: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.18-rt14.tar.xz Sebastian ------------------ http://marc.info/?l=linux-rt-users&m=138394894205686&w=2 Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
-rw-r--r--patches/Revert-drm-i915-Workaround-incoherence-between-fence.patch93
-rw-r--r--patches/gpu-i915-allow-the-user-not-to-do-the-wbinvd.patch59
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch43
-rw-r--r--patches/mm-slub-do-not-rely-on-slab_cached-passed-to-free_de.patch167
-rw-r--r--patches/series3
6 files changed, 279 insertions, 88 deletions
diff --git a/patches/Revert-drm-i915-Workaround-incoherence-between-fence.patch b/patches/Revert-drm-i915-Workaround-incoherence-between-fence.patch
new file mode 100644
index 0000000..0dd92b4
--- /dev/null
+++ b/patches/Revert-drm-i915-Workaround-incoherence-between-fence.patch
@@ -0,0 +1,93 @@
+From 8064cf36ef9115163db65b3eb8eb95549fa1321e Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Wed, 10 Jul 2013 13:36:24 +0100
+Subject: [PATCH] Revert "drm/i915: Workaround incoherence between fences and
+ LLC across multiple CPUs"
+
+This reverts commit 25ff119 and the follow on for Valleyview commit 2dc8aae.
+
+commit 25ff1195f8a0b3724541ae7bbe331b4296de9c06
+Author: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Thu Apr 4 21:31:03 2013 +0100
+
+ drm/i915: Workaround incoherence between fences and LLC across multiple CPUs
+
+commit 2dc8aae06d53458dd3624dc0accd4f81100ee631
+Author: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Wed May 22 17:08:06 2013 +0100
+
+ drm/i915: Workaround incoherence with fence updates on Valleyview
+
+Jon Bloomfield came up with a plausible explanation and cheap fix
+(drm/i915: Fix incoherence with fence updates on Sandybridge+) for the
+race condition, so lets run with it.
+
+This is a candidate for stable as the old workaround incurs a
+significant cost (calling wbinvd on all CPUs before performing the
+register write) for some workloads as noted by Carsten Emde.
+
+Link: http://lists.freedesktop.org/archives/intel-gfx/2013-June/028819.html
+References: https://www.osadl.org/?id=1543#c7602
+References: https://bugs.freedesktop.org/show_bug.cgi?id=63825
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
+Cc: Jon Bloomfield <jon.bloomfield@intel.com>
+Cc: Carsten Emde <C.Emde@osadl.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+
+Conflicts:
+ drivers/gpu/drm/i915/i915_gem.c
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/gpu/drm/i915/i915_gem.c | 28 +++++-----------------------
+ 1 file changed, 5 insertions(+), 23 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index f6e9b6d..080b1b2 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -2713,35 +2713,17 @@ static inline int fence_number(struct drm_i915_private *dev_priv,
+ return fence - dev_priv->fence_regs;
+ }
+
+-static void i915_gem_write_fence__ipi(void *data)
+-{
+- wbinvd();
+-}
+-
+ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
+ struct drm_i915_fence_reg *fence,
+ bool enable)
+ {
+- struct drm_device *dev = obj->base.dev;
+- struct drm_i915_private *dev_priv = dev->dev_private;
+- int fence_reg = fence_number(dev_priv, fence);
+-
+- /* In order to fully serialize access to the fenced region and
+- * the update to the fence register we need to take extreme
+- * measures on SNB+. In theory, the write to the fence register
+- * flushes all memory transactions before, and coupled with the
+- * mb() placed around the register write we serialise all memory
+- * operations with respect to the changes in the tiler. Yet, on
+- * SNB+ we need to take a step further and emit an explicit wbinvd()
+- * on each processor in order to manually flush all memory
+- * transactions before updating the fence register.
+- */
+- if (HAS_LLC(obj->base.dev))
+- on_each_cpu(i915_gem_write_fence__ipi, NULL, 1);
+- i915_gem_write_fence(dev, fence_reg, enable ? obj : NULL);
++ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
++ int reg = fence_number(dev_priv, fence);
++
++ i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
+
+ if (enable) {
+- obj->fence_reg = fence_reg;
++ obj->fence_reg = reg;
+ fence->obj = obj;
+ list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
+ } else {
+--
+1.8.4.2
+
diff --git a/patches/gpu-i915-allow-the-user-not-to-do-the-wbinvd.patch b/patches/gpu-i915-allow-the-user-not-to-do-the-wbinvd.patch
deleted file mode 100644
index 7b83565..0000000
--- a/patches/gpu-i915-allow-the-user-not-to-do-the-wbinvd.patch
+++ /dev/null
@@ -1,59 +0,0 @@
-From b580b7eedc8ee3990b118003c4793291387c40ac Mon Sep 17 00:00:00 2001
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 21 Jun 2013 11:38:28 +0200
-Subject: [PATCH] gpu: i915: allow the user not to do the wbinvd
-
-The wbinvd() renders the system with i915 unusable on RT. Using this
-expensive instruction avoids GPU trouble according to
- https://bugs.freedesktop.org/show_bug.cgi?id=62191
-
-As a workaround for RT it is recommended to pin each GPU related process
-to the same CPU and then disable this instruction via the module
-paramter.
-
-Cc: stable-rt@vger.kernel.org
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/gpu/drm/i915/i915_gem.c | 17 +++++++++++++++--
- 1 file changed, 15 insertions(+), 2 deletions(-)
-
---- a/drivers/gpu/drm/i915/i915_gem.c
-+++ b/drivers/gpu/drm/i915/i915_gem.c
-@@ -35,6 +35,7 @@
- #include <linux/swap.h>
- #include <linux/pci.h>
- #include <linux/dma-buf.h>
-+#include <linux/module.h>
-
- static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
- static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
-@@ -2713,6 +2714,10 @@ static inline int fence_number(struct dr
- return fence - dev_priv->fence_regs;
- }
-
-+static bool do_wbinvd = true;
-+module_param(do_wbinvd, bool, 0644);
-+MODULE_PARM_DESC(do_wbinvd, "Do expensive synchronization. Say no after you pin each GPU process to the same CPU in order to lower the latency.");
-+
- static void i915_gem_write_fence__ipi(void *data)
- {
- wbinvd();
-@@ -2736,8 +2741,16 @@ static void i915_gem_object_update_fence
- * on each processor in order to manually flush all memory
- * transactions before updating the fence register.
- */
-- if (HAS_LLC(obj->base.dev))
-- on_each_cpu(i915_gem_write_fence__ipi, NULL, 1);
-+ if (HAS_LLC(obj->base.dev)) {
-+ if (do_wbinvd) {
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ pr_err_once("WARNING! The i915 invalidates all caches which increases the latency.");
-+ pr_err_once("As a workaround use 'i915.do_wbinvd=no' and PIN each process doing ");
-+ pr_err_once("any kind of GPU activity to the same CPU to avoid problems.");
-+#endif
-+ on_each_cpu(i915_gem_write_fence__ipi, NULL, 1);
-+ }
-+ }
- i915_gem_write_fence(dev, fence_reg, enable ? obj : NULL);
-
- if (enable) {
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 88925b7..113e534 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -12,4 +12,4 @@ Link: http://lkml.kernel.org/n/tip-8vdw4bfcsds27cvox6rpb334@git.kernel.org
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt13
++-rt14
diff --git a/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch b/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
index 9c38dc0..8f9bee9 100644
--- a/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
+++ b/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
@@ -1,8 +1,7 @@
-From b786e160853afe3c20b765e848b6ebcce434f090 Mon Sep 17 00:00:00 2001
From: Yang Shi <yang.shi@windriver.com>
-Date: Fri, 4 Oct 2013 14:58:57 -0700
-Subject: [PATCH] mm/memcontrol: Don't call schedule_work_on in preemption
- disabled context
+Subject: [V3 PATCH] mm/memcontrol: Don't call schedule_work_on in preemption disabled context
+Date: Wed, 30 Oct 2013 11:48:33 -0700
+Message-ID: <1383158913-16325-1-git-send-email-yang.shi@windriver.com>
The following trace is triggered when running ltp oom test cases:
@@ -38,49 +37,39 @@ Call Trace:
[<ffffffff8169e4c2>] page_fault+0x22/0x30
So, to prevent schedule_work_on from being called in preempt disabled context,
-remove the pair of get_cpu/put_cpu and drain_local_stock shortcut.
+replace the pair of get/put_cpu() to get/put_cpu_light().
Cc: stable-rt@vger.kernel.org
Signed-off-by: Yang Shi <yang.shi@windriver.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- mm/memcontrol.c | 12 +++---------
- 1 file changed, 3 insertions(+), 9 deletions(-)
+
+ mm/memcontrol.c | 4 ++--
+ 1 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
-index 905ce72..f113cb7 100644
+index 82a187a..9944356 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -2436,11 +2436,10 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
- */
- static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
- {
-- int cpu, curcpu;
-+ int cpu;
+@@ -2440,7 +2440,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
/* Notify other cpus that system-wide "drain" is running */
get_online_cpus();
- curcpu = get_cpu();
++ curcpu = get_cpu_light();
for_each_online_cpu(cpu) {
struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
struct mem_cgroup *memcg;
-@@ -2450,14 +2449,9 @@ static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
- continue;
- if (!mem_cgroup_same_or_subtree(root_memcg, memcg))
- continue;
-- if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
-- if (cpu == curcpu)
-- drain_local_stock(&stock->work);
-- else
-- schedule_work_on(cpu, &stock->work);
-- }
-+ if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
-+ schedule_work_on(cpu, &stock->work);
+@@ -2457,7 +2457,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
+ schedule_work_on(cpu, &stock->work);
+ }
}
- put_cpu();
++ put_cpu_light();
if (!sync)
goto out;
--
-1.8.4.rc3
+1.7.5.4
+
diff --git a/patches/mm-slub-do-not-rely-on-slab_cached-passed-to-free_de.patch b/patches/mm-slub-do-not-rely-on-slab_cached-passed-to-free_de.patch
new file mode 100644
index 0000000..d70562b
--- /dev/null
+++ b/patches/mm-slub-do-not-rely-on-slab_cached-passed-to-free_de.patch
@@ -0,0 +1,167 @@
+From b5da5582b114c222a5ec924e0cc6d9a418481a5f Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 8 Nov 2013 12:01:18 +0100
+Subject: [PATCH] mm/slub: do not rely on slab_cached passed to free_delayed()
+
+You can get this backtrace:
+| =============================================================================
+| BUG dentry (Not tainted): Padding overwritten. 0xf15e1ec0-0xf15e1f1f
+| -----------------------------------------------------------------------------
+|
+| Disabling lock debugging due to kernel taint
+| INFO: Slab 0xf6f10b00 objects=21 used=0 fp=0xf15e0480 flags=0x2804080
+| CPU: 6 PID: 1 Comm: systemd Tainted: G B 3.10.17-rt12+ #197
+| Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
+| f6f10b00 f6f10b00 f20a3be8 c149da9e f20a3c74 c110b0d6 c15e010c f6f10b00
+| 00000015 00000000 f15e0480 02804080 64646150 20676e69 7265766f 74697277
+| 2e6e6574 66783020 31653531 2d306365 31667830 66316535 00006631 00000046
+| Call Trace:
+| [<c149da9e>] dump_stack+0x16/0x18
+| [<c110b0d6>] slab_err+0x76/0x80
+| [<c110c231>] ? deactivate_slab+0x3f1/0x4a0
+| [<c110c231>] ? deactivate_slab+0x3f1/0x4a0
+| [<c110b56f>] slab_pad_check.part.54+0xbf/0x150
+| [<c110ba04>] __free_slab+0x124/0x130
+| [<c149bb79>] ? __slab_alloc.constprop.69+0x27b/0x5d3
+| [<c110ba39>] free_delayed+0x29/0x40
+| [<c149bec5>] __slab_alloc.constprop.69+0x5c7/0x5d3
+| [<c1126062>] ? __d_alloc+0x22/0x150
+| [<c1126062>] ? __d_alloc+0x22/0x150
+| [<c11265b0>] ? __d_lookup_rcu+0x160/0x160
+| [<c110d912>] kmem_cache_alloc+0x162/0x190
+| [<c112668b>] ? __d_lookup+0xdb/0x1d0
+| [<c1126062>] ? __d_alloc+0x22/0x150
+| [<c1126062>] __d_alloc+0x22/0x150
+| [<c11261a5>] d_alloc+0x15/0x60
+| [<c111aec1>] lookup_dcache+0x71/0xa0
+| [<c111af0e>] __lookup_hash+0x1e/0x40
+| [<c111b374>] lookup_slow+0x34/0x90
+| [<c111c3c7>] link_path_walk+0x737/0x780
+| [<c111a3d4>] ? path_get+0x24/0x40
+| [<c111a3df>] ? path_get+0x2f/0x40
+| [<c111bfb2>] link_path_walk+0x322/0x780
+| [<c111e3ed>] path_openat.isra.54+0x7d/0x400
+| [<c111f32b>] do_filp_open+0x2b/0x70
+| [<c11110a2>] do_sys_open+0xe2/0x1b0
+| [<c14a319f>] ? restore_all+0xf/0xf
+| [<c102bb80>] ? vmalloc_sync_all+0x10/0x10
+| [<c1111192>] SyS_open+0x22/0x30
+| [<c14a393e>] sysenter_do_call+0x12/0x36
+| Padding f15e1de0: 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZZZZZZZZZ
+| Padding f15e1df0: 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZZZZZZZZZ
+| Padding f15e1e00: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
+| Padding f15e1e10: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
+| Padding f15e1e20: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
+| Padding f15e1e30: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
+| Padding f15e1e40: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
+| Padding f15e1e50: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
+| Padding f15e1e60: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
+| Padding f15e1e70: 6b 6b 6b 6b 6b 6b 6b a5 bb bb bb bb 80 01 5e f1 kkkkkkk.......^.
+| Padding f15e1e80: 53 7e 0d c1 c3 bd 49 c1 12 d9 10 c1 53 7e 0d c1 S~....I.....S~..
+| Padding f15e1e90: 60 7f 0d c1 e0 05 14 c1 ce d1 13 c1 96 d4 13 c1 `...............
+| Padding f15e1ea0: e9 e0 13 c1 f7 48 17 c1 13 6a 17 c1 41 fb 17 c1 .....H...j..A...
+| Padding f15e1eb0: 07 a4 11 c1 22 af 11 c1 74 b3 11 c1 06 d2 11 c1 ...."...t.......
+| Padding f15e1ec0: c6 d2 11 c1 06 00 00 00 01 00 00 00 f3 dc fe ff ................
+| Padding f15e1ed0: 73 7e 0d c1 5d b4 49 c1 ec c4 10 c1 73 7e 0d c1 s~..].I.....s~..
+| Padding f15e1ee0: 50 83 0d c1 79 09 14 c1 fd b9 13 c1 5a f2 13 c1 P...y.......Z...
+| Padding f15e1ef0: 7b 1c 28 c1 03 20 28 c1 9e 25 28 c1 b3 26 28 c1 {.(.. (..%(..&(.
+| Padding f15e1f00: f4 ab 34 c1 bc 89 30 c1 e5 0d 0a c1 c1 0f 0a c1 ..4...0.........
+| Padding f15e1f10: ae 34 0a c1 00 00 00 00 00 00 00 00 f3 dc fe ff .4..............
+| FIX dentry: Restoring 0xf15e1de0-0xf15e1f1f=0x5a
+|
+| =============================================================================
+| BUG dentry (Tainted: G B ): Redzone overwritten
+| -----------------------------------------------------------------------------
+|
+| INFO: 0xf15e009c-0xf15e009f. First byte 0x96 instead of 0xbb
+| INFO: Allocated in __ext4_get_inode_loc+0x3b7/0x460 age=1054261382 cpu=3239295485 pid=-1055657382
+| ext4_iget+0x63/0x9c0
+| ext4_lookup+0x71/0x180
+| lookup_real+0x17/0x40
+| do_last.isra.53+0x72b/0xbc0
+| path_openat.isra.54+0x9d/0x400
+| do_filp_open+0x2b/0x70
+| do_sys_open+0xe2/0x1b0
+| 0x7
+| 0x1
+| 0xfffedcf2
+| mempool_free_slab+0x13/0x20
+| __slab_free+0x3d/0x3ae
+| kmem_cache_free+0x1bc/0x1d0
+| mempool_free_slab+0x13/0x20
+| mempool_free+0x40/0x90
+| bio_put+0x59/0x70
+| INFO: Freed in blk_update_bidi_request+0x13/0x70 age=2779021993 cpu=1515870810 pid=1515870810
+| __blk_end_bidi_request+0x1e/0x50
+| __blk_end_request_all+0x23/0x40
+| virtblk_done+0xf4/0x260
+| vring_interrupt+0x2c/0x50
+| handle_irq_event_percpu+0x45/0x1f0
+| handle_irq_event+0x31/0x50
+| handle_edge_irq+0x6e/0x130
+| 0x5
+| INFO: Slab 0xf6f10b00 objects=21 used=0 fp=0xf15e0480 flags=0x2804080
+| INFO: Object 0xf15e0000 @offset=0 fp=0xc113e0e9
+
+If you try to free memory in irqs_disabled(). This is then added to the
+slub_free_list list. The following allocation then might be from a
+different kmem_cache. If the two caches have a different SLAB_DEBUG_FLAGS
+then one might complain about bad bad marker which are actually not
+used.
+
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ mm/slub.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/mm/slub.c b/mm/slub.c
+index 1378cd1..31c6f9f 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -1428,13 +1428,13 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
+ __free_memcg_kmem_pages(page, order);
+ }
+
+-static void free_delayed(struct kmem_cache *s, struct list_head *h)
++static void free_delayed(struct list_head *h)
+ {
+ while(!list_empty(h)) {
+ struct page *page = list_first_entry(h, struct page, lru);
+
+ list_del(&page->lru);
+- __free_slab(s, page);
++ __free_slab(page->slab_cache, page);
+ }
+ }
+
+@@ -2007,7 +2007,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
+ list_splice_init(&f->list, &tofree);
+ raw_spin_unlock(&f->lock);
+ local_irq_restore(flags);
+- free_delayed(s, &tofree);
++ free_delayed(&tofree);
+ oldpage = NULL;
+ pobjects = 0;
+ pages = 0;
+@@ -2083,7 +2083,7 @@ static void flush_all(struct kmem_cache *s)
+ raw_spin_lock_irq(&f->lock);
+ list_splice_init(&f->list, &tofree);
+ raw_spin_unlock_irq(&f->lock);
+- free_delayed(s, &tofree);
++ free_delayed(&tofree);
+ }
+ }
+
+@@ -2331,7 +2331,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+ list_splice_init(&f->list, &tofree);
+ raw_spin_unlock(&f->lock);
+ local_irq_restore(flags);
+- free_delayed(s, &tofree);
++ free_delayed(&tofree);
+ return freelist;
+
+ new_slab:
+--
+1.8.4.2
+
diff --git a/patches/series b/patches/series
index e00c7e4..62048b2 100644
--- a/patches/series
+++ b/patches/series
@@ -7,6 +7,7 @@
############################################################
hpsa-fix-warning-with-smp_processor_id-in-preemptibl.patch
genirq-Set-irq-thread-to-RT-priority-on-creation.patch
+Revert-drm-i915-Workaround-incoherence-between-fence.patch
############################################################
# UPSTREAM FIXES, patches pending
@@ -284,6 +285,7 @@ mm-allow-slab-rt.patch
# MM SLUB
mm-enable-slub.patch
+mm-slub-do-not-rely-on-slab_cached-passed-to-free_de.patch
slub-enable-irqs-for-no-wait.patch
slub_delay_ctor_on_rt.patch
@@ -618,7 +620,6 @@ mmci-remove-bogus-irq-save.patch
# I915
drm-remove-preempt_disable-from-drm_calc_vbltimestam.patch
i915_compile_fix.patch
-gpu-i915-allow-the-user-not-to-do-the-wbinvd.patch
drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch
# SIMPLE WAITQUEUE