aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/hash_utils_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/hash_utils_64.c')
-rw-r--r--arch/powerpc/mm/hash_utils_64.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index a606504678bd5a..149351a84b941c 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -368,7 +368,7 @@ static unsigned long __init htab_get_table_size(void)
unsigned long mem_size, rnd_mem_size, pteg_count;
/* If hash size isn't already provided by the platform, we try to
- * retreive it from the device-tree. If it's not there neither, we
+ * retrieve it from the device-tree. If it's not there neither, we
* calculate it now based on the total RAM size
*/
if (ppc64_pft_size == 0)
@@ -456,7 +456,7 @@ void __init htab_initialize(void)
/* create bolted the linear mapping in the hash table */
for (i=0; i < lmb.memory.cnt; i++) {
- base = lmb.memory.region[i].base + KERNELBASE;
+ base = (unsigned long)__va(lmb.memory.region[i].base);
size = lmb.memory.region[i].size;
DBG("creating mapping for region: %lx : %lx\n", base, size);
@@ -498,8 +498,8 @@ void __init htab_initialize(void)
* for either 4K or 16MB pages.
*/
if (tce_alloc_start) {
- tce_alloc_start += KERNELBASE;
- tce_alloc_end += KERNELBASE;
+ tce_alloc_start = (unsigned long)__va(tce_alloc_start);
+ tce_alloc_end = (unsigned long)__va(tce_alloc_end);
if (base + size >= tce_alloc_start)
tce_alloc_start = base + size + 1;
@@ -644,6 +644,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
DBG_LOW(" -> rc=%d\n", rc);
return rc;
}
+EXPORT_SYMBOL_GPL(hash_page);
void hash_preload(struct mm_struct *mm, unsigned long ea,
unsigned long access, unsigned long trap)