summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@suse.de>2011-08-22 12:53:58 -0700
committerGreg Kroah-Hartman <gregkh@suse.de>2011-08-22 12:53:58 -0700
commit682fbade188b58ff3d6b8545819fb340aa8e9acc (patch)
tree430998b8bf29be3c93c7bc4ea25ee7baef336a44
parentb71e70e5a8c35775e424982e4da9b0df4cb722d7 (diff)
downloadstable-queue-682fbade188b58ff3d6b8545819fb340aa8e9acc.tar.gz
3.0 patches
-rw-r--r--queue-3.0/series2
-rw-r--r--queue-3.0/xen-do-not-enable-pv-ipis-when-vector-callback-not-present.patch54
-rw-r--r--queue-3.0/xen-x86-replace-order-based-range-checking-of-m2p-table-by.patch105
3 files changed, 161 insertions, 0 deletions
diff --git a/queue-3.0/series b/queue-3.0/series
index edd74fc08f..ebdd2a8037 100644
--- a/queue-3.0/series
+++ b/queue-3.0/series
@@ -21,3 +21,5 @@ ext4-resolve-the-hang-of-direct-i-o-read-in-handling-ext4_io_end_unwritten.patch
ext4-fix-nomblk_io_submit-option-so-it-correctly-converts-uninit-blocks.patch
xen-blkfront-drop-name-and-minor-adjustments-for-emulated.patch
xen-blkfront-fix-one-off-warning-about-name-clash.patch
+xen-x86-replace-order-based-range-checking-of-m2p-table-by.patch
+xen-do-not-enable-pv-ipis-when-vector-callback-not-present.patch
diff --git a/queue-3.0/xen-do-not-enable-pv-ipis-when-vector-callback-not-present.patch b/queue-3.0/xen-do-not-enable-pv-ipis-when-vector-callback-not-present.patch
new file mode 100644
index 0000000000..3b11022b90
--- /dev/null
+++ b/queue-3.0/xen-do-not-enable-pv-ipis-when-vector-callback-not-present.patch
@@ -0,0 +1,54 @@
+From 3c05c4bed4ccce3f22f6d7899b308faae24ad198 Mon Sep 17 00:00:00 2001
+From: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
+Date: Wed, 17 Aug 2011 15:15:00 +0200
+Subject: xen: Do not enable PV IPIs when vector callback not present
+
+From: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
+
+commit 3c05c4bed4ccce3f22f6d7899b308faae24ad198 upstream.
+
+Fix regression for HVM case on older (<4.1.1) hypervisors caused by
+
+ commit 99bbb3a84a99cd04ab16b998b20f01a72cfa9f4f
+ Author: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
+ Date: Thu Dec 2 17:55:10 2010 +0000
+
+ xen: PV on HVM: support PV spinlocks and IPIs
+
+This change replaced the SMP operations with event based handlers without
+taking into account that this only works when the hypervisor supports
+callback vectors. This causes unexplainable hangs early on boot for
+HVM guests with more than one CPU.
+
+BugLink: http://bugs.launchpad.net/bugs/791850
+
+Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
+Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
+Tested-and-Reported-by: Stefan Bader <stefan.bader@canonical.com>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/xen/smp.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/xen/smp.c
++++ b/arch/x86/xen/smp.c
+@@ -521,8 +521,6 @@ static void __init xen_hvm_smp_prepare_c
+ native_smp_prepare_cpus(max_cpus);
+ WARN_ON(xen_smp_intr_init(0));
+
+- if (!xen_have_vector_callback)
+- return;
+ xen_init_lock_cpu(0);
+ xen_init_spinlocks();
+ }
+@@ -546,6 +544,8 @@ static void xen_hvm_cpu_die(unsigned int
+
+ void __init xen_hvm_smp_init(void)
+ {
++ if (!xen_have_vector_callback)
++ return;
+ smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
+ smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
+ smp_ops.cpu_up = xen_hvm_cpu_up;
diff --git a/queue-3.0/xen-x86-replace-order-based-range-checking-of-m2p-table-by.patch b/queue-3.0/xen-x86-replace-order-based-range-checking-of-m2p-table-by.patch
new file mode 100644
index 0000000000..81d6fa4422
--- /dev/null
+++ b/queue-3.0/xen-x86-replace-order-based-range-checking-of-m2p-table-by.patch
@@ -0,0 +1,105 @@
+From ccbcdf7cf1b5f6c6db30d84095b9c6c53043af55 Mon Sep 17 00:00:00 2001
+From: Jan Beulich <JBeulich@novell.com>
+Date: Tue, 16 Aug 2011 15:07:41 +0100
+Subject: xen/x86: replace order-based range checking of M2P table by
+ linear one
+
+From: Jan Beulich <JBeulich@novell.com>
+
+commit ccbcdf7cf1b5f6c6db30d84095b9c6c53043af55 upstream.
+
+The order-based approach is not only less efficient (requiring a shift
+and a compare, typical generated code looking like this
+
+ mov eax, [machine_to_phys_order]
+ mov ecx, eax
+ shr ebx, cl
+ test ebx, ebx
+ jnz ...
+
+whereas a direct check requires just a compare, like in
+
+ cmp ebx, [machine_to_phys_nr]
+ jae ...
+
+), but also slightly dangerous in the 32-on-64 case - the element
+address calculation can wrap if the next power of two boundary is
+sufficiently far away from the actual upper limit of the table, and
+hence can result in user space addresses being accessed (with it being
+unknown what may actually be mapped there).
+
+Additionally, the elimination of the mistaken use of fls() here (should
+have been __fls()) fixes a latent issue on x86-64 that would trigger
+if the code was run on a system with memory extending beyond the 44-bit
+boundary.
+
+Signed-off-by: Jan Beulich <jbeulich@novell.com>
+[v1: Based on Jeremy's feedback]
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/include/asm/xen/page.h | 4 ++--
+ arch/x86/xen/enlighten.c | 4 ++--
+ arch/x86/xen/mmu.c | 12 ++++++++----
+ 3 files changed, 12 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/include/asm/xen/page.h
++++ b/arch/x86/include/asm/xen/page.h
+@@ -39,7 +39,7 @@ typedef struct xpaddr {
+ ((unsigned long)((u64)CONFIG_XEN_MAX_DOMAIN_MEMORY * 1024 * 1024 * 1024 / PAGE_SIZE))
+
+ extern unsigned long *machine_to_phys_mapping;
+-extern unsigned int machine_to_phys_order;
++extern unsigned long machine_to_phys_nr;
+
+ extern unsigned long get_phys_to_machine(unsigned long pfn);
+ extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
+@@ -87,7 +87,7 @@ static inline unsigned long mfn_to_pfn(u
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return mfn;
+
+- if (unlikely((mfn >> machine_to_phys_order) != 0)) {
++ if (unlikely(mfn >= machine_to_phys_nr)) {
+ pfn = ~0;
+ goto try_override;
+ }
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -77,8 +77,8 @@ EXPORT_SYMBOL_GPL(xen_domain_type);
+
+ unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START;
+ EXPORT_SYMBOL(machine_to_phys_mapping);
+-unsigned int machine_to_phys_order;
+-EXPORT_SYMBOL(machine_to_phys_order);
++unsigned long machine_to_phys_nr;
++EXPORT_SYMBOL(machine_to_phys_nr);
+
+ struct start_info *xen_start_info;
+ EXPORT_SYMBOL_GPL(xen_start_info);
+--- a/arch/x86/xen/mmu.c
++++ b/arch/x86/xen/mmu.c
+@@ -1626,15 +1626,19 @@ static void __init xen_map_identity_earl
+ void __init xen_setup_machphys_mapping(void)
+ {
+ struct xen_machphys_mapping mapping;
+- unsigned long machine_to_phys_nr_ents;
+
+ if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
+ machine_to_phys_mapping = (unsigned long *)mapping.v_start;
+- machine_to_phys_nr_ents = mapping.max_mfn + 1;
++ machine_to_phys_nr = mapping.max_mfn + 1;
+ } else {
+- machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
++ machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
+ }
+- machine_to_phys_order = fls(machine_to_phys_nr_ents - 1);
++#ifdef CONFIG_X86_32
++ if ((machine_to_phys_mapping + machine_to_phys_nr)
++ < machine_to_phys_mapping)
++ machine_to_phys_nr = (unsigned long *)NULL
++ - machine_to_phys_mapping;
++#endif
+ }
+
+ #ifdef CONFIG_X86_64