aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorMatthew Wilcox <willy@debian.org>2004-08-12 20:49:08 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2004-08-12 20:49:08 -0700
commit064c7c355937f7be5509b1eaf0d77618d61d703d (patch)
tree168d0777f09ce58abe9426a31139a031362d342d /include
parent401f0fbde0a3cb8a6a36f1e24ed78880faeee318 (diff)
downloadhistory-064c7c355937f7be5509b1eaf0d77618d61d703d.tar.gz
[PATCH] PA-RISC update
- __PAGE_OFFSET is 0x10000000 (Randolph Chung) - PA8800 support (Grant Grundler) - debuglocks (Thibaut Varene) - PDC chassis disabling (Thibaut Varene) - Distinguish between Dinos in request_irq (Thibaut Varene) - Document interrupt registers (Randolph Chung) - Revamp CONFIG_DISCONTIGMEM support (Randolph Chung) - Remove STI console warning and special casing (Randolph Chung) - n4000 defconfig (Randolph Chung) - iosapic fixes (Bjorn Helgaas) - Fix a bug in entry.S where pa_dbit_lock was being trashed (Randolph Chung) - SMP support (Randolph Chung, Grant Grundler, James Bottomley) - Clear the pte in the fault handler (Joel Soete) - Change _exit prototype (Carlos O'Donell) - Better unwinding support (Randolph Chung) - GCC 3.4 fixes (Carlos O'Donell, Randolph Chung)
Diffstat (limited to 'include')
-rw-r--r--include/asm-parisc/assembly.h18
-rw-r--r--include/asm-parisc/bitops.h54
-rw-r--r--include/asm-parisc/cacheflush.h28
-rw-r--r--include/asm-parisc/dma-mapping.h1
-rw-r--r--include/asm-parisc/hardware.h1
-rw-r--r--include/asm-parisc/io.h5
-rw-r--r--include/asm-parisc/mmzone.h105
-rw-r--r--include/asm-parisc/numnodes.h9
-rw-r--r--include/asm-parisc/page.h24
-rw-r--r--include/asm-parisc/pci.h22
-rw-r--r--include/asm-parisc/pdc.h259
-rw-r--r--include/asm-parisc/pdcpat.h162
-rw-r--r--include/asm-parisc/pgalloc.h38
-rw-r--r--include/asm-parisc/pgtable.h79
-rw-r--r--include/asm-parisc/smp.h9
-rw-r--r--include/asm-parisc/spinlock.h94
-rw-r--r--include/asm-parisc/system.h10
-rw-r--r--include/asm-parisc/thread_info.h6
-rw-r--r--include/asm-parisc/unistd.h4
-rw-r--r--include/asm-parisc/unwind.h8
20 files changed, 477 insertions, 459 deletions
diff --git a/include/asm-parisc/assembly.h b/include/asm-parisc/assembly.h
index b421d4b8d178f6..5ba8c5cd46bdfe 100644
--- a/include/asm-parisc/assembly.h
+++ b/include/asm-parisc/assembly.h
@@ -24,6 +24,7 @@
#ifdef __LP64__
#define LDREG ldd
#define STREG std
+#define LDREGX ldd,s
#define LDREGM ldd,mb
#define STREGM std,ma
#define RP_OFFSET 16
@@ -31,12 +32,19 @@
#else
#define LDREG ldw
#define STREG stw
+#define LDREGX ldwx,s
#define LDREGM ldwm
#define STREGM stwm
#define RP_OFFSET 20
#define FRAME_SIZE 64
#endif
+#ifdef CONFIG_PA20
+#define BL b,l
+#else
+#define BL bl
+#endif
+
#ifdef __ASSEMBLY__
#ifdef __LP64__
@@ -110,6 +118,16 @@
depd,z \r, 63-\sa, 64-\sa, \t
.endm
+ /* Shift Right - note the r and t can NOT be the same! */
+ .macro shr r, sa, t
+ extru \r, 31-\sa, 32-\sa, \t
+ .endm
+
+ /* pa20w version of shift right */
+ .macro shrd r, sa, t
+ extrd,u \r, 63-\sa, 64-\sa, \t
+ .endm
+
/* load 32-bit 'value' into 'reg' compensating for the ldil
* sign-extension when running in wide mode.
* WARNING!! neither 'value' nor 'reg' can be expressions
diff --git a/include/asm-parisc/bitops.h b/include/asm-parisc/bitops.h
index caa31732820bae..914f70ce0f752f 100644
--- a/include/asm-parisc/bitops.h
+++ b/include/asm-parisc/bitops.h
@@ -30,7 +30,7 @@
#define smp_mb__before_clear_bit() smp_mb()
#define smp_mb__after_clear_bit() smp_mb()
-static __inline__ void set_bit(int nr, void * address)
+static __inline__ void set_bit(int nr, volatile unsigned long * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
@@ -43,7 +43,7 @@ static __inline__ void set_bit(int nr, void * address)
atomic_spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
}
-static __inline__ void __set_bit(int nr, void * address)
+static __inline__ void __set_bit(int nr, volatile unsigned long * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
@@ -53,7 +53,7 @@ static __inline__ void __set_bit(int nr, void * address)
*addr |= mask;
}
-static __inline__ void clear_bit(int nr, void * address)
+static __inline__ void clear_bit(int nr, volatile unsigned long * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
@@ -66,7 +66,7 @@ static __inline__ void clear_bit(int nr, void * address)
atomic_spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
}
-static __inline__ void __clear_bit(unsigned long nr, volatile void * address)
+static __inline__ void __clear_bit(unsigned long nr, volatile unsigned long * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
@@ -76,7 +76,7 @@ static __inline__ void __clear_bit(unsigned long nr, volatile void * address)
*addr &= ~mask;
}
-static __inline__ void change_bit(int nr, void * address)
+static __inline__ void change_bit(int nr, volatile unsigned long * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
@@ -89,7 +89,7 @@ static __inline__ void change_bit(int nr, void * address)
atomic_spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
}
-static __inline__ void __change_bit(int nr, void * address)
+static __inline__ void __change_bit(int nr, volatile unsigned long * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
@@ -99,7 +99,7 @@ static __inline__ void __change_bit(int nr, void * address)
*addr ^= mask;
}
-static __inline__ int test_and_set_bit(int nr, void * address)
+static __inline__ int test_and_set_bit(int nr, volatile unsigned long * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
@@ -116,7 +116,7 @@ static __inline__ int test_and_set_bit(int nr, void * address)
return oldbit;
}
-static __inline__ int __test_and_set_bit(int nr, void * address)
+static __inline__ int __test_and_set_bit(int nr, volatile unsigned long * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
@@ -130,7 +130,7 @@ static __inline__ int __test_and_set_bit(int nr, void * address)
return oldbit;
}
-static __inline__ int test_and_clear_bit(int nr, void * address)
+static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
@@ -147,7 +147,7 @@ static __inline__ int test_and_clear_bit(int nr, void * address)
return oldbit;
}
-static __inline__ int __test_and_clear_bit(int nr, void * address)
+static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
@@ -161,7 +161,7 @@ static __inline__ int __test_and_clear_bit(int nr, void * address)
return oldbit;
}
-static __inline__ int test_and_change_bit(int nr, void * address)
+static __inline__ int test_and_change_bit(int nr, volatile unsigned long * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
@@ -178,7 +178,7 @@ static __inline__ int test_and_change_bit(int nr, void * address)
return oldbit;
}
-static __inline__ int __test_and_change_bit(int nr, void * address)
+static __inline__ int __test_and_change_bit(int nr, volatile unsigned long * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
@@ -192,10 +192,10 @@ static __inline__ int __test_and_change_bit(int nr, void * address)
return oldbit;
}
-static __inline__ int test_bit(int nr, const void *address)
+static __inline__ int test_bit(int nr, const volatile unsigned long *address)
{
unsigned long mask;
- unsigned long *addr = (unsigned long *) address;
+ const unsigned long *addr = (const unsigned long *)address;
addr += (nr >> SHIFT_PER_LONG);
mask = 1L << CHOP_SHIFTCOUNT(nr);
@@ -322,7 +322,7 @@ static __inline__ int fls(int x)
* unlikely to be set. It's guaranteed that at least one of the 140
* bits is cleared.
*/
-static inline int sched_find_first_bit(unsigned long *b)
+static inline int sched_find_first_bit(const unsigned long *b)
{
#ifndef __LP64__
if (unlikely(b[0]))
@@ -354,9 +354,9 @@ static inline int sched_find_first_bit(unsigned long *b)
#define find_first_zero_bit(addr, size) \
find_next_zero_bit((addr), (size), 0)
-static __inline__ unsigned long find_next_zero_bit(void * addr, unsigned long size, unsigned long offset)
+static __inline__ unsigned long find_next_zero_bit(const void * addr, unsigned long size, unsigned long offset)
{
- unsigned long * p = ((unsigned long *) addr) + (offset >> SHIFT_PER_LONG);
+ const unsigned long * p = ((unsigned long *) addr) + (offset >> SHIFT_PER_LONG);
unsigned long result = offset & ~(BITS_PER_LONG-1);
unsigned long tmp;
@@ -389,9 +389,9 @@ found_middle:
return result + ffz(tmp);
}
-static __inline__ unsigned long find_next_bit(unsigned long *addr, unsigned long size, unsigned long offset)
+static __inline__ unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
{
- unsigned long *p = addr + (offset >> 6);
+ const unsigned long *p = addr + (offset >> 6);
unsigned long result = offset & ~(BITS_PER_LONG-1);
unsigned long tmp;
@@ -446,15 +446,15 @@ found_middle:
* disabling interrupts.
*/
#ifdef __LP64__
-#define ext2_set_bit(nr, addr) test_and_set_bit((nr) ^ 0x38, addr)
-#define ext2_set_bit_atomic(l,nr,addr) test_and_set_bit((nr) ^ 0x38, addr)
-#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 0x38, addr)
-#define ext2_clear_bit_atomic(l,nr,addr) test_and_clear_bit((nr) ^ 0x38, addr)
+#define ext2_set_bit(nr, addr) test_and_set_bit((nr) ^ 0x38, (unsigned long *)addr)
+#define ext2_set_bit_atomic(l,nr,addr) test_and_set_bit((nr) ^ 0x38, (unsigned long *)addr)
+#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 0x38, (unsigned long *)addr)
+#define ext2_clear_bit_atomic(l,nr,addr) test_and_clear_bit((nr) ^ 0x38, (unsigned long *)addr)
#else
-#define ext2_set_bit(nr, addr) test_and_set_bit((nr) ^ 0x18, addr)
-#define ext2_set_bit_atomic(l,nr,addr) test_and_set_bit((nr) ^ 0x18, addr)
-#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 0x18, addr)
-#define ext2_clear_bit_atomic(l,nr,addr) test_and_clear_bit((nr) ^ 0x18, addr)
+#define ext2_set_bit(nr, addr) test_and_set_bit((nr) ^ 0x18, (unsigned long *)addr)
+#define ext2_set_bit_atomic(l,nr,addr) test_and_set_bit((nr) ^ 0x18, (unsigned long *)addr)
+#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 0x18, (unsigned long *)addr)
+#define ext2_clear_bit_atomic(l,nr,addr) test_and_clear_bit((nr) ^ 0x18, (unsigned long *)addr)
#endif
#endif /* __KERNEL__ */
diff --git a/include/asm-parisc/cacheflush.h b/include/asm-parisc/cacheflush.h
index a5700c29f18a86..3086cdaf4edda3 100644
--- a/include/asm-parisc/cacheflush.h
+++ b/include/asm-parisc/cacheflush.h
@@ -65,18 +65,7 @@ flush_user_icache_range(unsigned long start, unsigned long end)
#endif
}
-extern void __flush_dcache_page(struct page *page);
-
-static inline void flush_dcache_page(struct page *page)
-{
- struct address_space *mapping = page_mapping(page);
-
- if (mapping && !mapping_mapped(mapping)) {
- set_bit(PG_dcache_dirty, &page->flags);
- } else {
- __flush_dcache_page(page);
- }
-}
+extern void flush_dcache_page(struct page *page);
#define flush_dcache_mmap_lock(mapping) \
spin_lock_irq(&(mapping)->tree_lock)
@@ -115,28 +104,29 @@ static inline void flush_cache_range(struct vm_area_struct *vma,
/* Simple function to work out if we have an existing address translation
* for a user space vma. */
-static inline int translation_exists(struct vm_area_struct *vma,
- unsigned long addr)
+static inline pte_t *__translation_exists(struct mm_struct *mm,
+ unsigned long addr)
{
- pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
+ pgd_t *pgd = pgd_offset(mm, addr);
pmd_t *pmd;
pte_t *pte;
if(pgd_none(*pgd))
- return 0;
+ return NULL;
pmd = pmd_offset(pgd, addr);
if(pmd_none(*pmd) || pmd_bad(*pmd))
- return 0;
+ return NULL;
pte = pte_offset_map(pmd, addr);
/* The PA flush mappings show up as pte_none, but they're
* valid none the less */
if(pte_none(*pte) && ((pte_val(*pte) & _PAGE_FLUSH) == 0))
- return 0;
- return 1;
+ return NULL;
+ return pte;
}
+#define translation_exists(vma, addr) __translation_exists((vma)->vm_mm, addr)
/* Private function to flush a page from the cache of a non-current
diff --git a/include/asm-parisc/dma-mapping.h b/include/asm-parisc/dma-mapping.h
index 89808f7786dc13..be4458181e3892 100644
--- a/include/asm-parisc/dma-mapping.h
+++ b/include/asm-parisc/dma-mapping.h
@@ -4,6 +4,7 @@
#include <linux/mm.h>
#include <linux/config.h>
#include <asm/cacheflush.h>
+#include <asm/scatterlist.h>
/* See Documentation/DMA-mapping.txt */
struct hppa_dma_ops {
diff --git a/include/asm-parisc/hardware.h b/include/asm-parisc/hardware.h
index 3fc29c68f6faa7..b383b3c75ad095 100644
--- a/include/asm-parisc/hardware.h
+++ b/include/asm-parisc/hardware.h
@@ -97,6 +97,7 @@ struct bc_module {
#define HPHW_IOA 12
#define HPHW_BRIDGE 13
#define HPHW_FABRIC 14
+#define HPHW_MC 15
#define HPHW_FAULTY 31
diff --git a/include/asm-parisc/io.h b/include/asm-parisc/io.h
index 714db8ebf31ceb..fe536cfa336684 100644
--- a/include/asm-parisc/io.h
+++ b/include/asm-parisc/io.h
@@ -24,11 +24,6 @@ extern unsigned long parisc_vmerge_max_size;
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt
-/*
- * Change "struct page" to physical address.
- */
-#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT)
-
/* Memory mapped IO */
extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
diff --git a/include/asm-parisc/mmzone.h b/include/asm-parisc/mmzone.h
index 221a7ef1f95ff5..928bf50c4693e2 100644
--- a/include/asm-parisc/mmzone.h
+++ b/include/asm-parisc/mmzone.h
@@ -1,31 +1,102 @@
#ifndef _PARISC_MMZONE_H
#define _PARISC_MMZONE_H
+#ifdef CONFIG_DISCONTIGMEM
+
+#define MAX_PHYSMEM_RANGES 8 /* Fix the size for now (current known max is 3) */
+extern int npmem_ranges;
+
struct node_map_data {
pg_data_t pg_data;
- struct page *adj_node_mem_map;
};
extern struct node_map_data node_data[];
-extern unsigned char *chunkmap;
-
-#define BADCHUNK ((unsigned char)0xff)
-#define CHUNKSZ (256*1024*1024)
-#define CHUNKSHIFT 28
-#define CHUNKMASK (~(CHUNKSZ - 1))
-#define CHUNKNUM(paddr) ((paddr) >> CHUNKSHIFT)
#define NODE_DATA(nid) (&node_data[nid].pg_data)
-#define NODE_MEM_MAP(nid) (NODE_DATA(nid)->node_mem_map)
-#define ADJ_NODE_MEM_MAP(nid) (node_data[nid].adj_node_mem_map)
-#define phys_to_page(paddr) \
- (ADJ_NODE_MEM_MAP(chunkmap[CHUNKNUM((paddr))]) \
- + ((paddr) >> PAGE_SHIFT))
+/*
+ * Given a kernel address, find the home node of the underlying memory.
+ */
+#define kvaddr_to_nid(kaddr) pfn_to_nid(__pa(kaddr) >> PAGE_SHIFT)
+
+#define node_mem_map(nid) (NODE_DATA(nid)->node_mem_map)
+#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
+#define node_end_pfn(nid) \
+({ \
+ pg_data_t *__pgdat = NODE_DATA(nid); \
+ __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \
+})
+#define node_localnr(pfn, nid) ((pfn) - node_start_pfn(nid))
+
+#define local_mapnr(kvaddr) \
+({ \
+ unsigned long __pfn = __pa(kvaddr) >> PAGE_SHIFT; \
+ (__pfn - node_start_pfn(pfn_to_nid(__pfn))); \
+})
+
+#define pfn_to_page(pfn) \
+({ \
+ unsigned long __pfn = (pfn); \
+ int __node = pfn_to_nid(__pfn); \
+ &node_mem_map(__node)[node_localnr(__pfn,__node)]; \
+})
+
+#define page_to_pfn(pg) \
+({ \
+ struct page *__page = pg; \
+ struct zone *__zone = page_zone(__page); \
+ BUG_ON(__zone == NULL); \
+ (unsigned long)(__page - __zone->zone_mem_map) \
+ + __zone->zone_start_pfn; \
+})
+
+/* We have these possible memory map layouts:
+ * Astro: 0-3.75, 67.75-68, 4-64
+ * zx1: 0-1, 257-260, 4-256
+ * Stretch (N-class): 0-2, 4-32, 34-xxx
+ */
+
+/* Since each 1GB can only belong to one region (node), we can create
+ * an index table for pfn to nid lookup; each entry in pfnnid_map
+ * represents 1GB, and contains the node that the memory belongs to. */
+
+#define PFNNID_SHIFT (30 - PAGE_SHIFT)
+#define PFNNID_MAP_MAX 512 /* support 512GB */
+extern unsigned char pfnnid_map[PFNNID_MAP_MAX];
+
+#ifndef __LP64__
+#define pfn_is_io(pfn) ((pfn & (0xf0000000UL >> PAGE_SHIFT)) == (0xf0000000UL >> PAGE_SHIFT))
+#else
+/* io can be 0xf0f0f0f0f0xxxxxx or 0xfffffffff0000000 */
+#define pfn_is_io(pfn) ((pfn & (0xf000000000000000UL >> PAGE_SHIFT)) == (0xf000000000000000UL >> PAGE_SHIFT))
+#endif
+
+static inline int pfn_to_nid(unsigned long pfn)
+{
+ unsigned int i;
+ unsigned char r;
+
+ if (unlikely(pfn_is_io(pfn)))
+ return 0;
+
+ i = pfn >> PFNNID_SHIFT;
+ BUG_ON(i >= sizeof(pfnnid_map) / sizeof(pfnnid_map[0]));
+ r = pfnnid_map[i];
+ BUG_ON(r == 0xff);
+
+ return (int)r;
+}
-#define virt_to_page(kvaddr) phys_to_page(__pa(kvaddr))
+static inline int pfn_valid(int pfn)
+{
+ int nid = pfn_to_nid(pfn);
-/* This is kind of bogus, need to investigate performance of doing it right */
-#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
+ if (nid >= 0)
+ return (pfn < node_end_pfn(nid));
+ return 0;
+}
-#endif /* !_PARISC_MMZONE_H */
+#else /* !CONFIG_DISCONTIGMEM */
+#define MAX_PHYSMEM_RANGES 1
+#endif
+#endif /* _PARISC_MMZONE_H */
diff --git a/include/asm-parisc/numnodes.h b/include/asm-parisc/numnodes.h
new file mode 100644
index 00000000000000..dcdd933eb60b1a
--- /dev/null
+++ b/include/asm-parisc/numnodes.h
@@ -0,0 +1,9 @@
+#ifndef _ASM_MAX_NUMNODES_H
+#define _ASM_MAX_NUMNODES_H
+
+#include <linux/config.h>
+
+/* Max 8 Nodes */
+#define NODES_SHIFT 3
+
+#endif /* _ASM_MAX_NUMNODES_H */
diff --git a/include/asm-parisc/page.h b/include/asm-parisc/page.h
index 54cc342e4c51f7..4a12692f94b465 100644
--- a/include/asm-parisc/page.h
+++ b/include/asm-parisc/page.h
@@ -60,10 +60,15 @@ typedef struct { unsigned long pgprot; } pgprot_t;
#else
#define pte_flags(x) ((x).flags)
#endif
-#define pmd_val(x) ((x).pmd)
-#define pgd_val(x) ((x).pgd)
+
+/* These do not work lvalues, so make sure we don't use them as such. */
+#define pmd_val(x) ((x).pmd + 0)
+#define pgd_val(x) ((x).pgd + 0)
#define pgprot_val(x) ((x).pgprot)
+#define __pmd_val_set(x,n) (x).pmd = (n)
+#define __pgd_val_set(x,n) (x).pgd = (n)
+
#define __pte(x) ((pte_t) { (x) } )
#define __pmd(x) ((pmd_t) { (x) } )
#define __pgd(x) ((pgd_t) { (x) } )
@@ -83,12 +88,6 @@ extern __inline__ int get_order(unsigned long size)
return order;
}
-#ifdef __LP64__
-#define MAX_PHYSMEM_RANGES 8 /* Fix the size for now (current known max is 3) */
-#else
-#define MAX_PHYSMEM_RANGES 1 /* First range is only range that fits in 32 bits */
-#endif
-
typedef struct __physmem_range {
unsigned long start_pfn;
unsigned long pages; /* PAGE_SIZE pages */
@@ -144,15 +143,16 @@ extern int npmem_ranges;
#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
+#ifndef CONFIG_DISCONTIGMEM
#define pfn_to_page(pfn) (mem_map + (pfn))
#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
#define pfn_valid(pfn) ((pfn) < max_mapnr)
+#endif /* CONFIG_DISCONTIGMEM */
+
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
-#ifndef CONFIG_DISCONTIGMEM
-#define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT))
-#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
-#endif /* !CONFIG_DISCONTIGMEM */
+#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
diff --git a/include/asm-parisc/pci.h b/include/asm-parisc/pci.h
index b091f084566225..ba34a4e49d83c1 100644
--- a/include/asm-parisc/pci.h
+++ b/include/asm-parisc/pci.h
@@ -16,28 +16,6 @@
*/
#define PCI_MAX_BUSSES 256
-/* [soapbox on]
-** Who the hell can develop stuff without ASSERT or VASSERT?
-** No one understands all the modules across all platforms.
-** For linux add another dimension - processor architectures.
-**
-** This should be a standard/global macro used liberally
-** in all code. Every respectable engineer I know in HP
-** would support this argument. - grant
-** [soapbox off]
-*/
-#ifdef PCI_DEBUG
-#define ASSERT(expr) \
- if(!(expr)) { \
- printk("\n%s:%d: Assertion " #expr " failed!\n", \
- __FILE__, __LINE__); \
- panic(#expr); \
- }
-#else
-#define ASSERT(expr)
-#endif
-
-
/*
** pci_hba_data (aka H2P_OBJECT in HP/UX)
**
diff --git a/include/asm-parisc/pdc.h b/include/asm-parisc/pdc.h
index 990ddd36deeafc..ce9a9e1b33ff1e 100644
--- a/include/asm-parisc/pdc.h
+++ b/include/asm-parisc/pdc.h
@@ -297,175 +297,6 @@ typedef struct {
#define OSTAT_RUN 6
#define OSTAT_ON 7
-#ifdef __LP64__
-/* PDC PAT CELL */
-#define PDC_PAT_CELL 64L /* Interface for gaining and
- * manipulating cell state within PD */
-#define PDC_PAT_CELL_GET_NUMBER 0L /* Return Cell number */
-#define PDC_PAT_CELL_GET_INFO 1L /* Returns info about Cell */
-#define PDC_PAT_CELL_MODULE 2L /* Returns info about Module */
-#define PDC_PAT_CELL_SET_ATTENTION 9L /* Set Cell Attention indicator */
-#define PDC_PAT_CELL_NUMBER_TO_LOC 10L /* Cell Number -> Location */
-#define PDC_PAT_CELL_WALK_FABRIC 11L /* Walk the Fabric */
-#define PDC_PAT_CELL_GET_RDT_SIZE 12L /* Return Route Distance Table Sizes */
-#define PDC_PAT_CELL_GET_RDT 13L /* Return Route Distance Tables */
-#define PDC_PAT_CELL_GET_LOCAL_PDH_SZ 14L /* Read Local PDH Buffer Size*/
-#define PDC_PAT_CELL_SET_LOCAL_PDH 15L /* Write Local PDH Buffer */
-#define PDC_PAT_CELL_GET_REMOTE_PDH_SZ 16L /* Return Remote PDH Buffer Size */
-#define PDC_PAT_CELL_GET_REMOTE_PDH 17L /* Read Remote PDH Buffer */
-#define PDC_PAT_CELL_GET_DBG_INFO 128L /* Return DBG Buffer Info */
-#define PDC_PAT_CELL_CHANGE_ALIAS 129L /* Change Non-Equivalent Alias Checking */
-
-/*
-** Arg to PDC_PAT_CELL_MODULE memaddr[4]
-**
-** Addresses on the Merced Bus != all Runway Bus addresses.
-** This is intended for programming SBA/LBA chips range registers.
-*/
-#define IO_VIEW 0UL
-#define PA_VIEW 1UL
-
-/* PDC_PAT_CELL_MODULE entity type values */
-#define PAT_ENTITY_CA 0 /* central agent */
-#define PAT_ENTITY_PROC 1 /* processor */
-#define PAT_ENTITY_MEM 2 /* memory controller */
-#define PAT_ENTITY_SBA 3 /* system bus adapter */
-#define PAT_ENTITY_LBA 4 /* local bus adapter */
-#define PAT_ENTITY_PBC 5 /* processor bus converter */
-#define PAT_ENTITY_XBC 6 /* crossbar fabric connect */
-#define PAT_ENTITY_RC 7 /* fabric interconnect */
-
-/* PDC_PAT_CELL_MODULE address range type values */
-#define PAT_PBNUM 0 /* PCI Bus Number */
-#define PAT_LMMIO 1 /* < 4G MMIO Space */
-#define PAT_GMMIO 2 /* > 4G MMIO Space */
-#define PAT_NPIOP 3 /* Non Postable I/O Port Space */
-#define PAT_PIOP 4 /* Postable I/O Port Space */
-#define PAT_AHPA 5 /* Additional HPA Space */
-#define PAT_UFO 6 /* HPA Space (UFO for Mariposa) */
-#define PAT_GNIP 7 /* GNI Reserved Space */
-
-
-/* PDC PAT CHASSIS LOG */
-#define PDC_PAT_CHASSIS_LOG 65L /* Platform logging & forward
- ** progress functions */
-#define PDC_PAT_CHASSIS_WRITE_LOG 0L /* Write Log Entry */
-#define PDC_PAT_CHASSIS_READ_LOG 1L /* Read Log Entry */
-
-
-/* PDC PAT CPU */
-#define PDC_PAT_CPU 67L /* Interface to CPU configuration
- * within the protection domain */
-#define PDC_PAT_CPU_INFO 0L /* Return CPU config info */
-#define PDC_PAT_CPU_DELETE 1L /* Delete CPU */
-#define PDC_PAT_CPU_ADD 2L /* Add CPU */
-#define PDC_PAT_CPU_GET_NUMBER 3L /* Return CPU Number */
-#define PDC_PAT_CPU_GET_HPA 4L /* Return CPU HPA */
-#define PDC_PAT_CPU_STOP 5L /* Stop CPU */
-#define PDC_PAT_CPU_RENDEZVOUS 6L /* Rendezvous CPU */
-#define PDC_PAT_CPU_GET_CLOCK_INFO 7L /* Return CPU Clock info */
-#define PDC_PAT_CPU_GET_RENDEZVOUS_STATE 8L /* Return Rendezvous State */
-#define PDC_PAT_CPU_PLUNGE_FABRIC 128L /* Plunge Fabric */
-#define PDC_PAT_CPU_UPDATE_CACHE_CLEANSING 129L /* Manipulate Cache
- * Cleansing Mode */
-
-/* PDC PAT EVENT */
-#define PDC_PAT_EVENT 68L /* Interface to Platform Events */
-#define PDC_PAT_EVENT_GET_CAPS 0L /* Get Capabilities */
-#define PDC_PAT_EVENT_SET_MODE 1L /* Set Notification Mode */
-#define PDC_PAT_EVENT_SCAN 2L /* Scan Event */
-#define PDC_PAT_EVENT_HANDLE 3L /* Handle Event */
-#define PDC_PAT_EVENT_GET_NB_CALL 4L /* Get Non-Blocking call Args*/
-
-/* PDC PAT HPMC */
-#define PDC_PAT_HPMC 70L /* Cause processor to go into spin
- ** loop, and wait for wake up from
- ** Monarch Processor */
-#define PDC_PAT_HPMC_RENDEZ_CPU 0L /* go into spin loop */
-#define PDC_PAT_HPMC_SET_PARAMS 1L /* Allows OS to specify intr which PDC
- * will use to interrupt OS during machine
- * check rendezvous */
-
-/* parameters for PDC_PAT_HPMC_SET_PARAMS */
-#define HPMC_SET_PARAMS_INTR 1L /* Rendezvous Interrupt */
-#define HPMC_SET_PARAMS_WAKE 2L /* Wake up processor */
-
-/* PDC PAT IO */
-#define PDC_PAT_IO 71L /* On-line services for I/O modules */
-#define PDC_PAT_IO_GET_SLOT_STATUS 5L /* Get Slot Status Info */
-#define PDC_PAT_IO_GET_LOC_FROM_HARDWARE 6L /* Get Physical Location from */
- /* Hardware Path */
-#define PDC_PAT_IO_GET_HARDWARE_FROM_LOC 7L /* Get Hardware Path from
- * Physical Location */
-#define PDC_PAT_IO_GET_PCI_CONFIG_FROM_HW 11L /* Get PCI Configuration
- * Address from Hardware Path */
-#define PDC_PAT_IO_GET_HW_FROM_PCI_CONFIG 12L /* Get Hardware Path
- * from PCI Configuration Address */
-#define PDC_PAT_IO_READ_HOST_BRIDGE_INFO 13L /* Read Host Bridge State Info */
-#define PDC_PAT_IO_CLEAR_HOST_BRIDGE_INFO 14L /* Clear Host Bridge State Info*/
-#define PDC_PAT_IO_GET_PCI_ROUTING_TABLE_SIZE 15L /* Get PCI INT Routing Table
- * Size */
-#define PDC_PAT_IO_GET_PCI_ROUTING_TABLE 16L /* Get PCI INT Routing Table */
-#define PDC_PAT_IO_GET_HINT_TABLE_SIZE 17L /* Get Hint Table Size */
-#define PDC_PAT_IO_GET_HINT_TABLE 18L /* Get Hint Table */
-#define PDC_PAT_IO_PCI_CONFIG_READ 19L /* PCI Config Read */
-#define PDC_PAT_IO_PCI_CONFIG_WRITE 20L /* PCI Config Write */
-#define PDC_PAT_IO_GET_NUM_IO_SLOTS 21L /* Get Number of I/O Bay Slots in
- * Cabinet */
-#define PDC_PAT_IO_GET_LOC_IO_SLOTS 22L /* Get Physical Location of I/O */
- /* Bay Slots in Cabinet */
-#define PDC_PAT_IO_BAY_STATUS_INFO 28L /* Get I/O Bay Slot Status Info */
-#define PDC_PAT_IO_GET_PROC_VIEW 29L /* Get Processor view of IO address */
-#define PDC_PAT_IO_PROG_SBA_DIR_RANGE 30L /* Program directed range */
-
-/* PDC PAT MEM */
-#define PDC_PAT_MEM 72L /* Manage memory page deallocation */
-#define PDC_PAT_MEM_PD_INFO 0L /* Return PDT info for PD */
-#define PDC_PAT_MEM_PD_CLEAR 1L /* Clear PDT for PD */
-#define PDC_PAT_MEM_PD_READ 2L /* Read PDT entries for PD */
-#define PDC_PAT_MEM_PD_RESET 3L /* Reset clear bit for PD */
-#define PDC_PAT_MEM_CELL_INFO 5L /* Return PDT info For Cell */
-#define PDC_PAT_MEM_CELL_CLEAR 6L /* Clear PDT For Cell */
-#define PDC_PAT_MEM_CELL_READ 7L /* Read PDT entries For Cell */
-#define PDC_PAT_MEM_CELL_RESET 8L /* Reset clear bit For Cell */
-#define PDC_PAT_MEM_SETGM 9L /* Set Golden Memory value */
-#define PDC_PAT_MEM_ADD_PAGE 10L /* ADDs a page to the cell */
-#define PDC_PAT_MEM_ADDRESS 11L /* Get Physical Location From*/
- /* Memory Address */
-#define PDC_PAT_MEM_GET_TXT_SIZE 12L /* Get Formatted Text Size */
-#define PDC_PAT_MEM_GET_PD_TXT 13L /* Get PD Formatted Text */
-#define PDC_PAT_MEM_GET_CELL_TXT 14L /* Get Cell Formatted Text */
-#define PDC_PAT_MEM_RD_STATE_INFO 15L /* Read Mem Module State Info*/
-#define PDC_PAT_MEM_CLR_STATE_INFO 16L /*Clear Mem Module State Info*/
-#define PDC_PAT_MEM_CLEAN_RANGE 128L /*Clean Mem in specific range*/
-#define PDC_PAT_MEM_GET_TBL_SIZE 131L /* Get Memory Table Size */
-#define PDC_PAT_MEM_GET_TBL 132L /* Get Memory Table */
-
-/* PDC PAT NVOLATILE */
-#define PDC_PAT_NVOLATILE 73L /* Access Non-Volatile Memory*/
-#define PDC_PAT_NVOLATILE_READ 0L /* Read Non-Volatile Memory */
-#define PDC_PAT_NVOLATILE_WRITE 1L /* Write Non-Volatile Memory */
-#define PDC_PAT_NVOLATILE_GET_SIZE 2L /* Return size of NVM */
-#define PDC_PAT_NVOLATILE_VERIFY 3L /* Verify contents of NVM */
-#define PDC_PAT_NVOLATILE_INIT 4L /* Initialize NVM */
-
-/* PDC PAT PD */
-#define PDC_PAT_PD 74L /* Protection Domain Info */
-#define PDC_PAT_PD_GET_ADDR_MAP 0L /* Get Address Map */
-
-/* PDC_PAT_PD_GET_ADDR_MAP entry types */
-#define PAT_MEMORY_DESCRIPTOR 1
-
-/* PDC_PAT_PD_GET_ADDR_MAP memory types */
-#define PAT_MEMTYPE_MEMORY 0
-#define PAT_MEMTYPE_FIRMWARE 4
-
-/* PDC_PAT_PD_GET_ADDR_MAP memory usage */
-#define PAT_MEMUSE_GENERAL 0
-#define PAT_MEMUSE_GI 128
-#define PAT_MEMUSE_GNI 129
-#endif /* __LP64__ */
-
#ifndef __ASSEMBLY__
#include <linux/types.h>
@@ -478,12 +309,6 @@ extern int pdc_type;
#define PDC_TYPE_SYSTEM_MAP 1 /* 32-bit, but supports PDC_SYSTEM_MAP */
#define PDC_TYPE_SNAKE 2 /* Doesn't support SYSTEM_MAP */
-#ifdef CONFIG_PARISC64
-#define is_pdc_pat() (PDC_TYPE_PAT == pdc_type)
-#else
-#define is_pdc_pat() (0)
-#endif
-
struct pdc_chassis_info { /* for PDC_CHASSIS_INFO */
unsigned long actcnt; /* actual number of bytes returned */
unsigned long maxcnt; /* maximum number of bytes that could be returned */
@@ -521,10 +346,10 @@ struct pdc_cache_cf { /* for PDC_CACHE (I/D-caches) */
#ifdef __LP64__
cc_padW:32,
#endif
- cc_alias:4, /* alias boundaries for virtual addresses */
+ cc_alias: 4, /* alias boundaries for virtual addresses */
cc_block: 4, /* to determine most efficient stride */
cc_line : 3, /* maximum amount written back as a result of store (multiple of 16 bytes) */
- cc_pad0 : 2, /* reserved */
+ cc_shift: 2, /* how much to shift cc_block left */
cc_wt : 1, /* 0 = WT-Dcache, 1 = WB-Dcache */
cc_sh : 2, /* 0 = separate I/D-cache, else shared I/D-cache */
cc_cst : 3, /* 0 = incoherent D-cache, 1=coherent D-cache */
@@ -674,40 +499,6 @@ struct pdc_tod {
unsigned long tod_usec;
};
-#ifdef __LP64__
-struct pdc_pat_cell_num {
- unsigned long cell_num;
- unsigned long cell_loc;
-};
-
-struct pdc_pat_cpu_num {
- unsigned long cpu_num;
- unsigned long cpu_loc;
-};
-
-struct pdc_pat_pd_addr_map_entry {
- unsigned char entry_type; /* 1 = Memory Descriptor Entry Type */
- unsigned char reserve1[5];
- unsigned char memory_type;
- unsigned char memory_usage;
- unsigned long paddr;
- unsigned int pages; /* Length in 4K pages */
- unsigned int reserve2;
- unsigned long cell_map;
-};
-
-/* FIXME: mod[508] should really be a union of the various mod components */
-struct pdc_pat_cell_mod_maddr_block { /* PDC_PAT_CELL_MODULE */
- unsigned long cba; /* function 0 configuration space address */
- unsigned long mod_info; /* module information */
- unsigned long mod_location; /* physical location of the module */
- struct hardware_path mod_path; /* hardware path */
- unsigned long mod[508]; /* PAT cell module components */
-};
-
-typedef struct pdc_pat_cell_mod_maddr_block pdc_pat_cell_mod_maddr_block_t;
-#endif /* __LP64__ */
-
/* architected results from PDC_PIM/transfer hpmc on a PA1.1 machine */
struct pdc_hpmc_pim_11 { /* PDC_PIM */
@@ -969,52 +760,6 @@ int pdc_sti_call(unsigned long func, unsigned long flags,
unsigned long inptr, unsigned long outputr,
unsigned long glob_cfg);
-#ifdef __LP64__
-int pdc_pat_chassis_send_log(unsigned long status, unsigned long data);
-int pdc_pat_cell_get_number(struct pdc_pat_cell_num *cell_info);
-int pdc_pat_cell_module(unsigned long *actcnt, unsigned long ploc, unsigned long mod,
- unsigned long view_type, void *mem_addr);
-int pdc_pat_cpu_get_number(struct pdc_pat_cpu_num *cpu_info, void *hpa);
-int pdc_pat_get_irt_size(unsigned long *num_entries, unsigned long cell_num);
-int pdc_pat_get_irt(void *r_addr, unsigned long cell_num);
-int pdc_pat_pd_get_addr_map(unsigned long *actual_len, void *mem_addr,
- unsigned long count, unsigned long offset);
-
-/********************************************************************
-* PDC_PAT_CELL[Return Cell Module] memaddr[0] conf_base_addr
-* ----------------------------------------------------------
-* Bit 0 to 51 - conf_base_addr
-* Bit 52 to 62 - reserved
-* Bit 63 - endianess bit
-********************************************************************/
-#define PAT_GET_CBA(value) ((value) & 0xfffffffffffff000UL)
-
-/********************************************************************
-* PDC_PAT_CELL[Return Cell Module] memaddr[1] mod_info
-* ----------------------------------------------------
-* Bit 0 to 7 - entity type
-* 0 = central agent, 1 = processor,
-* 2 = memory controller, 3 = system bus adapter,
-* 4 = local bus adapter, 5 = processor bus converter,
-* 6 = crossbar fabric connect, 7 = fabric interconnect,
-* 8 to 254 reserved, 255 = unknown.
-* Bit 8 to 15 - DVI
-* Bit 16 to 23 - IOC functions
-* Bit 24 to 39 - reserved
-* Bit 40 to 63 - mod_pages
-* number of 4K pages a module occupies starting at conf_base_addr
-********************************************************************/
-#define PAT_GET_ENTITY(value) (((value) >> 56) & 0xffUL)
-#define PAT_GET_DVI(value) (((value) >> 48) & 0xffUL)
-#define PAT_GET_IOC(value) (((value) >> 40) & 0xffUL)
-#define PAT_GET_MOD_PAGES(value)(((value) & 0xffffffUL)
-
-#else /* !__LP64__ */
-/* No PAT support for 32-bit kernels...sorry */
-#define pdc_pat_get_irt_size(num_entries, cell_numn) PDC_BAD_PROC
-#define pdc_pat_get_irt(r_addr, cell_num) PDC_BAD_PROC
-#endif /* !__LP64__ */
-
extern void pdc_init(void);
#endif /* __ASSEMBLY__ */
diff --git a/include/asm-parisc/pdcpat.h b/include/asm-parisc/pdcpat.h
index 2f74d900812a10..715d94da61c79d 100644
--- a/include/asm-parisc/pdcpat.h
+++ b/include/asm-parisc/pdcpat.h
@@ -6,12 +6,11 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) Hewlett Packard (Paul Bame <bame@puffin.external.hp.com>)
- * Copyright 2000 (c) Grant Grundler <grundler@puffin.external.hp.com>
+ * Copyright 2000 (c) Hewlett Packard (Paul Bame <bame()spam.parisc-linux.org>)
+ * Copyright 2000,2004 (c) Grant Grundler <grundler()nahspam.parisc-linux.org>
*/
-/* PDC PAT CELL */
#define PDC_PAT_CELL 64L /* Interface for gaining and
* manipulatin g cell state within PD */
#define PDC_PAT_CELL_GET_NUMBER 0L /* Return Cell number */
@@ -60,17 +59,17 @@
#define PAT_GNIP 7 /* GNI Reserved Space */
-/* PDC PAT CHASSIS LOG */
-#define PDC_PAT_CHASSIS_LOG 65L /* Platform logging & forward
- ** progress functions */
+/* PDC PAT CHASSIS LOG -- Platform logging & forward progress functions */
+
+#define PDC_PAT_CHASSIS_LOG 65L
#define PDC_PAT_CHASSIS_WRITE_LOG 0L /* Write Log Entry */
#define PDC_PAT_CHASSIS_READ_LOG 1L /* Read Log Entry */
-/* PDC PAT CPU */
-#define PDC_PAT_CPU 67L /* Interface to CPU configuration
- * within the protection domain */
+/* PDC PAT CPU -- CPU configuration within the protection domain */
+
+#define PDC_PAT_CPU 67L
#define PDC_PAT_CPU_INFO 0L /* Return CPU config info */
#define PDC_PAT_CPU_DELETE 1L /* Delete CPU */
#define PDC_PAT_CPU_ADD 2L /* Add CPU */
@@ -83,32 +82,33 @@
#define PDC_PAT_CPU_PLUNGE_FABRIC 128L /* Plunge Fabric */
#define PDC_PAT_CPU_UPDATE_CACHE_CLEANSING 129L /* Manipulate Cache
* Cleansing Mode */
-/* PDC PAT EVENT */
+/* PDC PAT EVENT -- Platform Events */
-#define PDC_PAT_EVENT 68L /* Interface to Platform Events */
+#define PDC_PAT_EVENT 68L
#define PDC_PAT_EVENT_GET_CAPS 0L /* Get Capabilities */
#define PDC_PAT_EVENT_SET_MODE 1L /* Set Notification Mode */
#define PDC_PAT_EVENT_SCAN 2L /* Scan Event */
#define PDC_PAT_EVENT_HANDLE 3L /* Handle Event */
#define PDC_PAT_EVENT_GET_NB_CALL 4L /* Get Non-Blocking call Args */
-/* PDC PAT HPMC */
+/* PDC PAT HPMC -- Cause processor to go into spin loop, and wait
+ * for wake up from Monarch Processor.
+ */
-#define PDC_PAT_HPMC 70L /* Cause processor to go into spin
- ** loop, and wait for wake up from
- ** Monarch Processor */
+#define PDC_PAT_HPMC 70L
#define PDC_PAT_HPMC_RENDEZ_CPU 0L /* go into spin loop */
#define PDC_PAT_HPMC_SET_PARAMS 1L /* Allows OS to specify intr which PDC
- * will use to interrupt OS during machine
- * check rendezvous */
+ * will use to interrupt OS during
+ * machine check rendezvous */
/* parameters for PDC_PAT_HPMC_SET_PARAMS: */
#define HPMC_SET_PARAMS_INTR 1L /* Rendezvous Interrupt */
#define HPMC_SET_PARAMS_WAKE 2L /* Wake up processor */
-/* PDC PAT IO */
-#define PDC_PAT_IO 71L /* On-line services for I/O modules */
+/* PDC PAT IO -- On-line services for I/O modules */
+
+#define PDC_PAT_IO 71L
#define PDC_PAT_IO_GET_SLOT_STATUS 5L /* Get Slot Status Info*/
#define PDC_PAT_IO_GET_LOC_FROM_HARDWARE 6L /* Get Physical Location from */
/* Hardware Path */
@@ -135,9 +135,10 @@
#define PDC_PAT_IO_GET_PROC_VIEW 29L /* Get Processor view of IO address */
#define PDC_PAT_IO_PROG_SBA_DIR_RANGE 30L /* Program directed range */
-/* PDC PAT MEM */
-#define PDC_PAT_MEM 72L /* Manage memory page deallocation */
+/* PDC PAT MEM -- Manage memory page deallocation */
+
+#define PDC_PAT_MEM 72L
#define PDC_PAT_MEM_PD_INFO 0L /* Return PDT info for PD */
#define PDC_PAT_MEM_PD_CLEAR 1L /* Clear PDT for PD */
#define PDC_PAT_MEM_PD_READ 2L /* Read PDT entries for PD */
@@ -159,18 +160,99 @@
#define PDC_PAT_MEM_GET_TBL_SIZE 131L /* Get Memory Table Size */
#define PDC_PAT_MEM_GET_TBL 132L /* Get Memory Table */
-/* PDC PAT NVOLATILE */
-#define PDC_PAT_NVOLATILE 73L /* Access Non-Volatile Memory */
-#define PDC_PAT_NVOLATILE_READ 0L /* Read Non-Volatile Memory */
-#define PDC_PAT_NVOLATILE_WRITE 1L /* Write Non-Volatile Memory */
-#define PDC_PAT_NVOLATILE_GET_SIZE 2L /* Return size of NVM */
-#define PDC_PAT_NVOLATILE_VERIFY 3L /* Verify contents of NVM */
-#define PDC_PAT_NVOLATILE_INIT 4L /* Initialize NVM */
+/* PDC PAT NVOLATILE -- Access Non-Volatile Memory */
+
+#define PDC_PAT_NVOLATILE 73L
+#define PDC_PAT_NVOLATILE_READ 0L /* Read Non-Volatile Memory */
+#define PDC_PAT_NVOLATILE_WRITE 1L /* Write Non-Volatile Memory */
+#define PDC_PAT_NVOLATILE_GET_SIZE 2L /* Return size of NVM */
+#define PDC_PAT_NVOLATILE_VERIFY 3L /* Verify contents of NVM */
+#define PDC_PAT_NVOLATILE_INIT 4L /* Initialize NVM */
+
+/* PDC PAT PD */
+#define PDC_PAT_PD 74L /* Protection Domain Info */
+#define PDC_PAT_PD_GET_ADDR_MAP 0L /* Get Address Map */
+
+/* PDC_PAT_PD_GET_ADDR_MAP entry types */
+#define PAT_MEMORY_DESCRIPTOR 1
+
+/* PDC_PAT_PD_GET_ADDR_MAP memory types */
+#define PAT_MEMTYPE_MEMORY 0
+#define PAT_MEMTYPE_FIRMWARE 4
+
+/* PDC_PAT_PD_GET_ADDR_MAP memory usage */
+#define PAT_MEMUSE_GENERAL 0
+#define PAT_MEMUSE_GI 128
+#define PAT_MEMUSE_GNI 129
+
#ifndef __ASSEMBLY__
#include <linux/types.h>
+#ifdef CONFIG_PARISC64
+#define is_pdc_pat() (PDC_TYPE_PAT == pdc_type)
+extern int pdc_pat_get_irt_size(unsigned long *num_entries, unsigned long cell_num);
+extern int pdc_pat_get_irt(void *r_addr, unsigned long cell_num);
+#else /* ! CONFIG_PARISC64 */
+/* No PAT support for 32-bit kernels...sorry */
+#define is_pdc_pat() (0)
+#define pdc_pat_get_irt_size(num_entries, cell_numn) PDC_BAD_PROC
+#define pdc_pat_get_irt(r_addr, cell_num) PDC_BAD_PROC
+#endif /* ! CONFIG_PARISC64 */
+
+
+struct pdc_pat_cell_num {
+ unsigned long cell_num;
+ unsigned long cell_loc;
+};
+
+struct pdc_pat_cpu_num {
+ unsigned long cpu_num;
+ unsigned long cpu_loc;
+};
+
+struct pdc_pat_pd_addr_map_entry {
+ unsigned char entry_type; /* 1 = Memory Descriptor Entry Type */
+ unsigned char reserve1[5];
+ unsigned char memory_type;
+ unsigned char memory_usage;
+ unsigned long paddr;
+ unsigned int pages; /* Length in 4K pages */
+ unsigned int reserve2;
+ unsigned long cell_map;
+};
+
+/********************************************************************
+* PDC_PAT_CELL[Return Cell Module] memaddr[0] conf_base_addr
+* ----------------------------------------------------------
+* Bit 0 to 51 - conf_base_addr
+* Bit 52 to 62 - reserved
+* Bit 63 - endianess bit
+********************************************************************/
+#define PAT_GET_CBA(value) ((value) & 0xfffffffffffff000UL)
+
+/********************************************************************
+* PDC_PAT_CELL[Return Cell Module] memaddr[1] mod_info
+* ----------------------------------------------------
+* Bit 0 to 7 - entity type
+* 0 = central agent, 1 = processor,
+* 2 = memory controller, 3 = system bus adapter,
+* 4 = local bus adapter, 5 = processor bus converter,
+* 6 = crossbar fabric connect, 7 = fabric interconnect,
+* 8 to 254 reserved, 255 = unknown.
+* Bit 8 to 15 - DVI
+* Bit 16 to 23 - IOC functions
+* Bit 24 to 39 - reserved
+* Bit 40 to 63 - mod_pages
+* number of 4K pages a module occupies starting at conf_base_addr
+********************************************************************/
+#define PAT_GET_ENTITY(value) (((value) >> 56) & 0xffUL)
+#define PAT_GET_DVI(value) (((value) >> 48) & 0xffUL)
+#define PAT_GET_IOC(value) (((value) >> 40) & 0xffUL)
+#define PAT_GET_MOD_PAGES(value)(((value) & 0xffffffUL)
+
+
/*
** PDC_PAT_CELL_GET_INFO return block
*/
@@ -192,26 +274,34 @@ typedef struct pdc_pat_cell_info_rtn_block {
/* FIXME: mod[508] should really be a union of the various mod components */
struct pdc_pat_cell_mod_maddr_block { /* PDC_PAT_CELL_MODULE */
- unsigned long cba; /* function 0 configuration space address */
- unsigned long mod_info; /* module information */
- unsigned long mod_location; /* physical location of the module */
- unsigned long mod_path; /* module path (device path - layers) */
+ unsigned long cba; /* func 0 cfg space address */
+ unsigned long mod_info; /* module information */
+ unsigned long mod_location; /* physical location of the module */
+ struct hardware_path mod_path; /* module path (device path - layers) */
unsigned long mod[508]; /* PAT cell module components */
} __attribute__((aligned(8))) ;
typedef struct pdc_pat_cell_mod_maddr_block pdc_pat_cell_mod_maddr_block_t;
-extern int pdc_pat_cell_get_number(void *);
-extern int pdc_pat_cell_module(void *, unsigned long, unsigned long, unsigned long, void *);
+extern int pdc_pat_chassis_send_log(unsigned long status, unsigned long data);
+extern int pdc_pat_cell_get_number(struct pdc_pat_cell_num *cell_info);
+extern int pdc_pat_cell_module(unsigned long *actcnt, unsigned long ploc, unsigned long mod, unsigned long view_type, void *mem_addr);
extern int pdc_pat_cell_num_to_loc(void *, unsigned long);
+extern int pdc_pat_cpu_get_number(struct pdc_pat_cpu_num *cpu_info, void *hpa);
+
+extern int pdc_pat_pd_get_addr_map(unsigned long *actual_len, void *mem_addr, unsigned long count, unsigned long offset);
+
+
+extern int pdc_pat_io_pci_cfg_read(unsigned long pci_addr, int pci_size, u32 *val);
+extern int pdc_pat_io_pci_cfg_write(unsigned long pci_addr, int pci_size, u32 val);
+
+
/* Flag to indicate this is a PAT box...don't use this unless you
** really have to...it might go away some day.
*/
-#ifdef __LP64__
extern int pdc_pat; /* arch/parisc/kernel/inventory.c */
-#endif
/********************************************************************
* PDC_PAT_CELL[Return Cell Module] memaddr[0] conf_base_addr
diff --git a/include/asm-parisc/pgalloc.h b/include/asm-parisc/pgalloc.h
index 7b2a36c9d7a07f..53a6fec7df9595 100644
--- a/include/asm-parisc/pgalloc.h
+++ b/include/asm-parisc/pgalloc.h
@@ -21,7 +21,7 @@
* kernel for machines with under 4GB of memory) */
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|GFP_DMA,
+ pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL,
PGD_ALLOC_ORDER);
pgd_t *actual_pgd = pgd;
@@ -30,13 +30,15 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
#ifdef __LP64__
actual_pgd += PTRS_PER_PGD;
/* Populate first pmd with allocated memory. We mark it
- * with _PAGE_GATEWAY as a signal to the system that this
+ * with PxD_FLAG_ATTACHED as a signal to the system that this
* pmd entry may not be cleared. */
- pgd_val(*actual_pgd) = (_PAGE_TABLE | _PAGE_GATEWAY) +
- (__u32)__pa((unsigned long)pgd);
+ __pgd_val_set(*actual_pgd, (PxD_FLAG_PRESENT |
+ PxD_FLAG_VALID |
+ PxD_FLAG_ATTACHED)
+ + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT));
/* The first pmd entry also is marked with _PAGE_GATEWAY as
* a signal that this pmd may not be freed */
- pgd_val(*pgd) = _PAGE_GATEWAY;
+ __pgd_val_set(*pgd, PxD_FLAG_ATTACHED);
#endif
}
return actual_pgd;
@@ -56,14 +58,13 @@ static inline void pgd_free(pgd_t *pgd)
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
{
- pgd_val(*pgd) = _PAGE_TABLE + (__u32)__pa((unsigned long)pmd);
+ __pgd_val_set(*pgd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID) +
+ (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
}
-/* NOTE: pmd must be in ZONE_DMA (<4GB) so the pgd pointer can be
- * housed in 32 bits */
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
- pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT|GFP_DMA,
+ pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
PMD_ORDER);
if (pmd)
memset(pmd, 0, PAGE_SIZE<<PMD_ORDER);
@@ -73,7 +74,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
static inline void pmd_free(pmd_t *pmd)
{
#ifdef __LP64__
- if(pmd_val(*pmd) & _PAGE_GATEWAY)
+ if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
/* This is the permanent pmd attached to the pgd;
* cannot free it */
return;
@@ -102,23 +103,24 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
#ifdef __LP64__
/* preserve the gateway marker if this is the beginning of
* the permanent pmd */
- if(pmd_val(*pmd) & _PAGE_GATEWAY)
- pmd_val(*pmd) = (_PAGE_TABLE | _PAGE_GATEWAY)
- + (__u32)__pa((unsigned long)pte);
+ if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
+ __pmd_val_set(*pmd, (PxD_FLAG_PRESENT |
+ PxD_FLAG_VALID |
+ PxD_FLAG_ATTACHED)
+ + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT));
else
#endif
- pmd_val(*pmd) = _PAGE_TABLE + (__u32)__pa((unsigned long)pte);
+ __pmd_val_set(*pmd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID)
+ + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT));
}
#define pmd_populate(mm, pmd, pte_page) \
pmd_populate_kernel(mm, pmd, page_address(pte_page))
-/* NOTE: pte must be in ZONE_DMA (<4GB) so that the pmd pointer
- * can be housed in 32 bits */
static inline struct page *
pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
- struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT|GFP_DMA);
+ struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
if (likely(page != NULL))
clear_page(page_address(page));
return page;
@@ -127,7 +129,7 @@ pte_alloc_one(struct mm_struct *mm, unsigned long address)
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
{
- pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|GFP_DMA);
+ pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (likely(pte != NULL))
clear_page(pte);
return pte;
diff --git a/include/asm-parisc/pgtable.h b/include/asm-parisc/pgtable.h
index 37da21882a2654..1593a8a7a5fbc7 100644
--- a/include/asm-parisc/pgtable.h
+++ b/include/asm-parisc/pgtable.h
@@ -57,10 +57,14 @@
/* This is the size of the initially mapped kernel memory (i.e. currently
* 0 to 1<<23 == 8MB */
+#ifdef CONFIG_64BIT
+#define KERNEL_INITIAL_ORDER 24
+#else
#define KERNEL_INITIAL_ORDER 23
+#endif
#define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER)
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
#define PT_NLEVELS 3
#define PGD_ORDER 1 /* Number of pages per pgd */
#define PMD_ORDER 1 /* Number of pages per pmd */
@@ -177,6 +181,21 @@ extern void *vmalloc_start;
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _PAGE_KERNEL (_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
+/* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds
+ * are page-aligned, we don't care about the PAGE_OFFSET bits, except
+ * for a few meta-information bits, so we shift the address to be
+ * able to effectively address 40-bits of physical address space. */
+#define _PxD_PRESENT_BIT 31
+#define _PxD_ATTACHED_BIT 30
+#define _PxD_VALID_BIT 29
+
+#define PxD_FLAG_PRESENT (1 << xlate_pabit(_PxD_PRESENT_BIT))
+#define PxD_FLAG_ATTACHED (1 << xlate_pabit(_PxD_ATTACHED_BIT))
+#define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT))
+#define PxD_FLAG_MASK (0xf)
+#define PxD_FLAG_SHIFT (4)
+#define PxD_VALUE_SHIFT (8)
+
#ifndef __ASSEMBLY__
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
@@ -244,49 +263,49 @@ extern unsigned long *empty_zero_page;
#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
#define pte_clear(xp) do { pte_val(*(xp)) = 0; } while (0)
-#ifdef __LP64__
+#define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK)
+#define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
+#define pgd_flag(x) (pgd_val(x) & PxD_FLAG_MASK)
+#define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
+
+#ifdef CONFIG_64BIT
/* The first entry of the permanent pmd is not there if it contains
* the gateway marker */
-#define pmd_none(x) (!pmd_val(x) || pmd_val(x) == _PAGE_GATEWAY)
-#define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK) != _PAGE_TABLE && (pmd_val(x) & ~PAGE_MASK) != (_PAGE_TABLE | _PAGE_GATEWAY))
+#define pmd_none(x) (!pmd_val(x) || pmd_flag(x) == PxD_FLAG_ATTACHED)
#else
#define pmd_none(x) (!pmd_val(x))
-#define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK) != _PAGE_TABLE)
#endif
-#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
+#define pmd_bad(x) (!(pmd_flag(x) & PxD_FLAG_VALID))
+#define pmd_present(x) (pmd_flag(x) & PxD_FLAG_PRESENT)
static inline void pmd_clear(pmd_t *pmd) {
-#ifdef __LP64__
- if(pmd_val(*pmd) & _PAGE_GATEWAY)
+#ifdef CONFIG_64BIT
+ if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
/* This is the entry pointing to the permanent pmd
* attached to the pgd; cannot clear it */
- pmd_val(*pmd) = _PAGE_GATEWAY;
+ __pmd_val_set(*pmd, PxD_FLAG_ATTACHED);
else
#endif
- pmd_val(*pmd) = 0;
+ __pmd_val_set(*pmd, 0);
}
#if PT_NLEVELS == 3
-#define pgd_page(pgd) ((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
+#define pgd_page(pgd) ((unsigned long) __va(pgd_address(pgd)))
/* For 64 bit we have three level tables */
#define pgd_none(x) (!pgd_val(x))
-#ifdef __LP64__
-#define pgd_bad(x) ((pgd_val(x) & ~PAGE_MASK) != _PAGE_TABLE && (pgd_val(x) & ~PAGE_MASK) != (_PAGE_TABLE | _PAGE_GATEWAY))
-#else
-#define pgd_bad(x) ((pgd_val(x) & ~PAGE_MASK) != _PAGE_TABLE)
-#endif
-#define pgd_present(x) (pgd_val(x) & _PAGE_PRESENT)
+#define pgd_bad(x) (!(pgd_flag(x) & PxD_FLAG_VALID))
+#define pgd_present(x) (pgd_flag(x) & PxD_FLAG_PRESENT)
static inline void pgd_clear(pgd_t *pgd) {
-#ifdef __LP64__
- if(pgd_val(*pgd) & _PAGE_GATEWAY)
+#ifdef CONFIG_64BIT
+ if(pgd_flag(*pgd) & PxD_FLAG_ATTACHED)
/* This is the permanent pmd attached to the pgd; cannot
* free it */
return;
#endif
- pgd_val(*pgd) = 0;
+ __pgd_val_set(*pgd, 0);
}
#else
/*
@@ -353,15 +372,11 @@ extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
-#ifdef CONFIG_DISCONTIGMEM
-#define pte_page(x) (phys_to_page(pte_val(x)))
-#else
-#define pte_page(x) (mem_map+(pte_val(x) >> PAGE_SHIFT))
-#endif
+#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
-#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_address(pmd)))
-#define __pmd_page(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+#define __pmd_page(pmd) ((unsigned long) __va(pmd_address(pmd)))
#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
#define pgd_index(address) ((address) >> PGDIR_SHIFT)
@@ -419,7 +434,7 @@ static inline int ptep_test_and_clear_young(pte_t *ptep)
#ifdef CONFIG_SMP
if (!pte_young(*ptep))
return 0;
- return test_and_clear_bit(xlate_pabit(_PAGE_ACCESSED_BIT), ptep);
+ return test_and_clear_bit(xlate_pabit(_PAGE_ACCESSED_BIT), &pte_val(*ptep));
#else
pte_t pte = *ptep;
if (!pte_young(pte))
@@ -434,7 +449,7 @@ static inline int ptep_test_and_clear_dirty(pte_t *ptep)
#ifdef CONFIG_SMP
if (!pte_dirty(*ptep))
return 0;
- return test_and_clear_bit(xlate_pabit(_PAGE_DIRTY_BIT), ptep);
+ return test_and_clear_bit(xlate_pabit(_PAGE_DIRTY_BIT), &pte_val(*ptep));
#else
pte_t pte = *ptep;
if (!pte_dirty(pte))
@@ -444,11 +459,7 @@ static inline int ptep_test_and_clear_dirty(pte_t *ptep)
#endif
}
-#ifdef CONFIG_SMP
extern spinlock_t pa_dbit_lock;
-#else
-static int pa_dbit_lock; /* dummy to keep the compilers happy */
-#endif
static inline pte_t ptep_get_and_clear(pte_t *ptep)
{
@@ -483,7 +494,7 @@ static inline void ptep_set_wrprotect(pte_t *ptep)
static inline void ptep_mkdirty(pte_t *ptep)
{
#ifdef CONFIG_SMP
- set_bit(xlate_pabit(_PAGE_DIRTY_BIT), ptep);
+ set_bit(xlate_pabit(_PAGE_DIRTY_BIT), &pte_val(*ptep));
#else
pte_t old_pte = *ptep;
set_pte(ptep, pte_mkdirty(old_pte));
diff --git a/include/asm-parisc/smp.h b/include/asm-parisc/smp.h
index 1efee58869bb74..fde77ac35463b1 100644
--- a/include/asm-parisc/smp.h
+++ b/include/asm-parisc/smp.h
@@ -52,11 +52,18 @@ extern void smp_send_reschedule(int cpu);
extern unsigned long cpu_present_mask;
#define smp_processor_id() (current_thread_info()->cpu)
-#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map)
#endif /* CONFIG_SMP */
#define NO_PROC_ID 0xFF /* No processor magic marker */
#define ANY_PROC_ID 0xFF /* Any processor magic marker */
+static inline int __cpu_disable (void) {
+ return 0;
+}
+static inline void __cpu_die (unsigned int cpu) {
+ while(1)
+ ;
+}
+extern int __cpu_up (unsigned int cpu);
#endif /* __ASM_SMP_H */
diff --git a/include/asm-parisc/spinlock.h b/include/asm-parisc/spinlock.h
index 1ad3aaa6da315d..7a2c25d477b5b9 100644
--- a/include/asm-parisc/spinlock.h
+++ b/include/asm-parisc/spinlock.h
@@ -8,8 +8,11 @@
* the semaphore address has to be 16-byte aligned.
*/
+#ifndef CONFIG_DEBUG_SPINLOCK
+
+#define __SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
#undef SPIN_LOCK_UNLOCKED
-#define SPIN_LOCK_UNLOCKED (spinlock_t) { { 1, 1, 1, 1 } }
+#define SPIN_LOCK_UNLOCKED (spinlock_t) __SPIN_LOCK_UNLOCKED
#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
@@ -41,6 +44,83 @@ static inline int _raw_spin_trylock(spinlock_t *x)
return __ldcw(a) != 0;
}
+#define spin_lock_own(LOCK, LOCATION) ((void)0)
+
+#else /* !(CONFIG_DEBUG_SPINLOCK) */
+
+#define SPINLOCK_MAGIC 0x1D244B3C
+
+#define __SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 }, SPINLOCK_MAGIC, 10, __FILE__ , NULL, 0, -1, NULL, NULL }
+#undef SPIN_LOCK_UNLOCKED
+#define SPIN_LOCK_UNLOCKED (spinlock_t) __SPIN_LOCK_UNLOCKED
+
+#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
+
+#define CHECK_LOCK(x) \
+ do { \
+ if (unlikely((x)->magic != SPINLOCK_MAGIC)) { \
+ printk(KERN_ERR "%s:%d: spin_is_locked" \
+ " on uninitialized spinlock %p.\n", \
+ __FILE__, __LINE__, (x)); \
+ } \
+ } while(0)
+
+#define spin_is_locked(x) \
+ ({ \
+ CHECK_LOCK(x); \
+ volatile unsigned int *a = __ldcw_align(x); \
+ if (unlikely((*a == 0) && (x)->babble)) { \
+ (x)->babble--; \
+ printk("KERN_WARNING \
+ %s:%d: spin_is_locked(%s/%p) already" \
+ " locked by %s:%d in %s at %p(%d)\n", \
+ __FILE__,__LINE__, (x)->module, (x), \
+ (x)->bfile, (x)->bline, (x)->task->comm,\
+ (x)->previous, (x)->oncpu); \
+ } \
+ *a == 0; \
+ })
+
+#define spin_unlock_wait(x) \
+ do { \
+ CHECK_LOCK(x); \
+ volatile unsigned int *a = __ldcw_align(x); \
+ if (unlikely((*a == 0) && (x)->babble)) { \
+ (x)->babble--; \
+ printk("KERN_WARNING \
+ %s:%d: spin_unlock_wait(%s/%p)" \
+ " owned by %s:%d in %s at %p(%d)\n", \
+ __FILE__,__LINE__, (x)->module, (x), \
+ (x)->bfile, (x)->bline, (x)->task->comm,\
+ (x)->previous, (x)->oncpu); \
+ } \
+ barrier(); \
+ } while (*((volatile unsigned char *)(__ldcw_align(x))) == 0)
+
+extern void _dbg_spin_lock(spinlock_t *lock, const char *base_file, int line_no);
+extern void _dbg_spin_unlock(spinlock_t *lock, const char *, int);
+extern int _dbg_spin_trylock(spinlock_t * lock, const char *, int);
+
+#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
+
+#define _raw_spin_unlock(lock) _dbg_spin_unlock(lock, __FILE__, __LINE__)
+#define _raw_spin_lock(lock) _dbg_spin_lock(lock, __FILE__, __LINE__)
+#define _raw_spin_trylock(lock) _dbg_spin_trylock(lock, __FILE__, __LINE__)
+
+/* just in case we need it */
+#define spin_lock_own(LOCK, LOCATION) \
+do { \
+ volatile unsigned int *a = __ldcw_align(LOCK); \
+ if (!((*a == 0) && ((LOCK)->oncpu == smp_processor_id()))) \
+ printk("KERN_WARNING \
+ %s: called on %d from %p but lock %s on %d\n", \
+ LOCATION, smp_processor_id(), \
+ __builtin_return_address(0), \
+ (*a == 0) ? "taken" : "freed", (LOCK)->on_cpu); \
+} while (0)
+
+#endif /* !(CONFIG_DEBUG_SPINLOCK) */
+
/*
* Read-write spinlocks, allowing multiple readers
* but only one writer.
@@ -50,7 +130,7 @@ typedef struct {
volatile int counter;
} rwlock_t;
-#define RW_LOCK_UNLOCKED (rwlock_t) { { { 1, 1, 1, 1 } }, 0 }
+#define RW_LOCK_UNLOCKED (rwlock_t) { __SPIN_LOCK_UNLOCKED, 0 }
#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while (0)
@@ -59,6 +139,10 @@ typedef struct {
/* read_lock, read_unlock are pretty straightforward. Of course it somehow
* sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */
+#ifdef CONFIG_DEBUG_RWLOCK
+extern void _dbg_read_lock(rwlock_t * rw, const char *bfile, int bline);
+#define _raw_read_lock(rw) _dbg_read_lock(rw, __FILE__, __LINE__)
+#else
static __inline__ void _raw_read_lock(rwlock_t *rw)
{
unsigned long flags;
@@ -70,6 +154,7 @@ static __inline__ void _raw_read_lock(rwlock_t *rw)
_raw_spin_unlock(&rw->lock);
local_irq_restore(flags);
}
+#endif /* CONFIG_DEBUG_RWLOCK */
static __inline__ void _raw_read_unlock(rwlock_t *rw)
{
@@ -92,6 +177,10 @@ static __inline__ void _raw_read_unlock(rwlock_t *rw)
* writers) in interrupt handlers someone fucked up and we'd dead-lock
* sooner or later anyway. prumpf */
+#ifdef CONFIG_DEBUG_RWLOCK
+extern void _dbg_write_lock(rwlock_t * rw, const char *bfile, int bline);
+#define _raw_write_lock(rw) _dbg_write_lock(rw, __FILE__, __LINE__)
+#else
static __inline__ void _raw_write_lock(rwlock_t *rw)
{
retry:
@@ -109,6 +198,7 @@ retry:
/* got it. now leave without unlocking */
rw->counter = -1; /* remember we are locked */
}
+#endif /* CONFIG_DEBUG_RWLOCK */
/* write_unlock is absolutely trivial - we don't have to wait for anything */
diff --git a/include/asm-parisc/system.h b/include/asm-parisc/system.h
index d73c20d42af96a..e2b84285da9386 100644
--- a/include/asm-parisc/system.h
+++ b/include/asm-parisc/system.h
@@ -166,6 +166,16 @@ static inline void set_eiem(unsigned long val)
typedef struct {
volatile unsigned int lock[4];
+#ifdef CONFIG_DEBUG_SPINLOCK
+ unsigned long magic;
+ volatile unsigned int babble;
+ const char *module;
+ char *bfile;
+ int bline;
+ int oncpu;
+ void *previous;
+ struct task_struct * task;
+#endif
} spinlock_t;
#define __lock_aligned __attribute__((__section__(".data.lock_aligned")))
diff --git a/include/asm-parisc/thread_info.h b/include/asm-parisc/thread_info.h
index c709d004952814..07c1a9f408276a 100644
--- a/include/asm-parisc/thread_info.h
+++ b/include/asm-parisc/thread_info.h
@@ -9,11 +9,11 @@
struct thread_info {
struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain;/* execution domain */
- __u32 flags; /* thread_info flags (see TIF_*) */
- __u32 cpu; /* current CPU */
+ unsigned long flags; /* thread_info flags (see TIF_*) */
mm_segment_t addr_limit; /* user-level address space limit */
- struct restart_block restart_block;
+ __u32 cpu; /* current CPU */
__s32 preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
+ struct restart_block restart_block;
};
#define INIT_THREAD_INFO(tsk) \
diff --git a/include/asm-parisc/unistd.h b/include/asm-parisc/unistd.h
index 72b457db142270..c862d24d554740 100644
--- a/include/asm-parisc/unistd.h
+++ b/include/asm-parisc/unistd.h
@@ -958,9 +958,9 @@ static inline int close(int fd)
return sys_close(fd);
}
-static inline int _exit(int exitcode)
+static inline void _exit(int exitcode)
{
- return sys_exit(exitcode);
+ sys_exit(exitcode);
}
static inline pid_t waitpid(pid_t pid, int *wait_stat, int options)
diff --git a/include/asm-parisc/unwind.h b/include/asm-parisc/unwind.h
index 5a52cb1f11749f..ff9396b0172952 100644
--- a/include/asm-parisc/unwind.h
+++ b/include/asm-parisc/unwind.h
@@ -50,22 +50,22 @@ struct unwind_table {
};
struct unwind_frame_info {
- unsigned long sp;
- unsigned long ip;
struct task_struct *t;
/* Eventually we would like to be able to get at any of the registers
available; but for now we only try to get the sp and ip for each
frame */
/* struct pt_regs regs; */
+ unsigned long sp, ip, rp;
unsigned long prev_sp, prev_ip;
};
void * unwind_table_add(const char *name, unsigned long base_addr,
unsigned long gp,
- const void *start, const void *end);
+ void *start, void *end);
void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t,
- struct pt_regs *regs);
+ unsigned long sp, unsigned long ip, unsigned long rp);
void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t);
+void unwind_frame_init_running(struct unwind_frame_info *info, struct pt_regs *regs);
int unwind_once(struct unwind_frame_info *info);
int unwind_to_user(struct unwind_frame_info *info);