aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2004-08-22 22:33:27 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2004-08-22 22:33:27 -0700
commitd983cfb9e563eb65b6772f75e700c7b9b4c984db (patch)
tree90492e695c2c98fabc454834f8bafde9fbaaf651 /arch
parent9024871d2d20b39e5f0b7ef6c28006dee566386f (diff)
downloadhistory-d983cfb9e563eb65b6772f75e700c7b9b4c984db.tar.gz
[PATCH] ppc64: bolted SLB entry for iSeries
Tested, at least basically, on Power4 iSeries with shared processors, on Power4 pSeries and RS64 (non-SLB) iSeries machines. On pSeries SLB machines we "bolt" an SLB entry for the first segment of the vmalloc() area into the SLB, to reduce the SLB miss rate. This caused problems, so was disabled, on iSeries because the bolted entry was not restored properly on shared processor switch. This patch adds information about the bolted vmalloc segment to the lpar map, which should be restored on shared processor switch. Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/ppc64/kernel/head.S10
-rw-r--r--arch/ppc64/mm/slb.c2
2 files changed, 7 insertions, 5 deletions
diff --git a/arch/ppc64/kernel/head.S b/arch/ppc64/kernel/head.S
index a5d67f59951ddd..dc0885144293fd 100644
--- a/arch/ppc64/kernel/head.S
+++ b/arch/ppc64/kernel/head.S
@@ -580,7 +580,7 @@ __end_systemcfg:
* VSID generation algorithm. See include/asm/mmu_context.h.
*/
- .llong 1 /* # ESIDs to be mapped by hypervisor */
+ .llong 2 /* # ESIDs to be mapped by hypervisor */
.llong 1 /* # memory ranges to be mapped by hypervisor */
.llong STAB0_PAGE /* Page # of segment table within load area */
.llong 0 /* Reserved */
@@ -588,8 +588,12 @@ __end_systemcfg:
.llong 0 /* Reserved */
.llong 0 /* Reserved */
.llong 0 /* Reserved */
- .llong 0x0c00000000 /* ESID to map (Kernel at EA = 0xC000000000000000) */
- .llong 0x06a99b4b14 /* VSID to map (Kernel at VA = 0x6a99b4b140000000) */
+ .llong 0xc00000000 /* KERNELBASE ESID */
+ .llong 0x6a99b4b14 /* KERNELBASE VSID */
+ /* We have to list the bolted VMALLOC segment here, too, so that it
+ * will be restored on shared processor switch */
+ .llong 0xd00000000 /* VMALLOCBASE ESID */
+ .llong 0x08d12e6ab /* VMALLOCBASE VSID */
.llong 8192 /* # pages to map (32 MB) */
.llong 0 /* Offset from start of loadarea to start of map */
.llong 0x0006a99b4b140000 /* VPN of first page to map */
diff --git a/arch/ppc64/mm/slb.c b/arch/ppc64/mm/slb.c
index bc61258040a20b..b010583375d92e 100644
--- a/arch/ppc64/mm/slb.c
+++ b/arch/ppc64/mm/slb.c
@@ -36,7 +36,6 @@ static inline void create_slbe(unsigned long ea, unsigned long vsid,
static void slb_add_bolted(void)
{
-#ifndef CONFIG_PPC_ISERIES
WARN_ON(!irqs_disabled());
/* If you change this make sure you change SLB_NUM_BOLTED
@@ -49,7 +48,6 @@ static void slb_add_bolted(void)
SLB_VSID_KERNEL, 1);
asm volatile("isync":::"memory");
-#endif
}
/* Flush all user entries from the segment table of the current processor. */