aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/sn/io
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/sn/io')
-rw-r--r--arch/ia64/sn/io/Makefile19
-rw-r--r--arch/ia64/sn/io/alenlist.c899
-rw-r--r--arch/ia64/sn/io/ate_utils.c3
-rw-r--r--arch/ia64/sn/io/cdl.c140
-rw-r--r--arch/ia64/sn/io/drivers/Makefile14
-rw-r--r--arch/ia64/sn/io/drivers/hubdev.c (renamed from arch/ia64/sn/io/hubdev.c)46
-rw-r--r--arch/ia64/sn/io/drivers/ifconfig_net.c (renamed from arch/ia64/sn/io/ifconfig_net.c)6
-rw-r--r--arch/ia64/sn/io/drivers/ioconfig_bus.c (renamed from arch/ia64/sn/io/ioconfig_bus.c)33
-rw-r--r--arch/ia64/sn/io/drivers/pciba.c (renamed from arch/ia64/sn/io/pciba.c)129
-rw-r--r--arch/ia64/sn/io/eeprom.c1454
-rw-r--r--arch/ia64/sn/io/efi-rtc.c185
-rw-r--r--arch/ia64/sn/io/hubspc.c251
-rw-r--r--arch/ia64/sn/io/hwgdfs/Makefile12
-rw-r--r--arch/ia64/sn/io/hwgdfs/hcl.c (renamed from arch/ia64/sn/io/hcl.c)716
-rw-r--r--arch/ia64/sn/io/hwgdfs/hcl_util.c (renamed from arch/ia64/sn/io/hcl_util.c)36
-rw-r--r--arch/ia64/sn/io/hwgdfs/invent_stub.c146
-rw-r--r--arch/ia64/sn/io/hwgdfs/labelcl.c (renamed from arch/ia64/sn/io/labelcl.c)17
-rw-r--r--arch/ia64/sn/io/hwgfs/Makefile13
-rw-r--r--arch/ia64/sn/io/hwgfs/hcl.c938
-rw-r--r--arch/ia64/sn/io/hwgfs/hcl_util.c200
-rw-r--r--arch/ia64/sn/io/hwgfs/hwgfs.h23
-rw-r--r--arch/ia64/sn/io/hwgfs/interface.c353
-rw-r--r--arch/ia64/sn/io/hwgfs/invent_stub.c148
-rw-r--r--arch/ia64/sn/io/hwgfs/labelcl.c657
-rw-r--r--arch/ia64/sn/io/hwgfs/ramfs.c233
-rw-r--r--arch/ia64/sn/io/invent.c224
-rw-r--r--arch/ia64/sn/io/io.c198
-rw-r--r--arch/ia64/sn/io/klconflib.c1042
-rw-r--r--arch/ia64/sn/io/klgraph.c804
-rw-r--r--arch/ia64/sn/io/klgraph_hack.c341
-rw-r--r--arch/ia64/sn/io/l1.c3056
-rw-r--r--arch/ia64/sn/io/l1_command.c1378
-rw-r--r--arch/ia64/sn/io/machvec/Makefile12
-rw-r--r--arch/ia64/sn/io/machvec/iomv.c71
-rw-r--r--arch/ia64/sn/io/machvec/pci.c (renamed from arch/ia64/sn/io/pci.c)21
-rw-r--r--arch/ia64/sn/io/machvec/pci_bus_cvlink.c (renamed from arch/ia64/sn/io/sn2/pci_bus_cvlink.c)321
-rw-r--r--arch/ia64/sn/io/machvec/pci_dma.c (renamed from arch/ia64/sn/io/pci_dma.c)225
-rw-r--r--arch/ia64/sn/io/ml_SN_init.c235
-rw-r--r--arch/ia64/sn/io/ml_iograph.c1570
-rw-r--r--arch/ia64/sn/io/module.c312
-rw-r--r--arch/ia64/sn/io/pci_bus_cvlink.c737
-rw-r--r--arch/ia64/sn/io/pciio.c1507
-rw-r--r--arch/ia64/sn/io/platform_init/Makefile12
-rw-r--r--arch/ia64/sn/io/platform_init/irix_io_init.c89
-rw-r--r--arch/ia64/sn/io/platform_init/sgi_io_init.c109
-rw-r--r--arch/ia64/sn/io/sgi_if.c39
-rw-r--r--arch/ia64/sn/io/sgi_io_init.c308
-rw-r--r--arch/ia64/sn/io/sgi_io_sim.c77
-rw-r--r--arch/ia64/sn/io/sn1/hub_intr.c307
-rw-r--r--arch/ia64/sn/io/sn1/hubcounters.c283
-rw-r--r--arch/ia64/sn/io/sn1/huberror.c228
-rw-r--r--arch/ia64/sn/io/sn1/ip37.c47
-rw-r--r--arch/ia64/sn/io/sn1/mem_refcnt.c220
-rw-r--r--arch/ia64/sn/io/sn1/ml_SN_intr.c1154
-rw-r--r--arch/ia64/sn/io/sn1/pcibr.c7704
-rw-r--r--arch/ia64/sn/io/sn2/Makefile7
-rw-r--r--arch/ia64/sn/io/sn2/bte_error.c320
-rw-r--r--arch/ia64/sn/io/sn2/kdba_io.c76
-rw-r--r--arch/ia64/sn/io/sn2/klconflib.c273
-rw-r--r--arch/ia64/sn/io/sn2/klgraph.c72
-rw-r--r--arch/ia64/sn/io/sn2/l1.c86
-rw-r--r--arch/ia64/sn/io/sn2/l1_command.c104
-rw-r--r--arch/ia64/sn/io/sn2/ml_SN_init.c23
-rw-r--r--arch/ia64/sn/io/sn2/ml_SN_intr.c95
-rw-r--r--arch/ia64/sn/io/sn2/ml_iograph.c440
-rw-r--r--arch/ia64/sn/io/sn2/module.c1
-rw-r--r--arch/ia64/sn/io/sn2/pcibr/Makefile8
-rw-r--r--arch/ia64/sn/io/sn2/pcibr/pcibr_ate.c95
-rw-r--r--arch/ia64/sn/io/sn2/pcibr/pcibr_config.c196
-rw-r--r--arch/ia64/sn/io/sn2/pcibr/pcibr_dvr.c1125
-rw-r--r--arch/ia64/sn/io/sn2/pcibr/pcibr_error.c445
-rw-r--r--arch/ia64/sn/io/sn2/pcibr/pcibr_hints.c35
-rw-r--r--arch/ia64/sn/io/sn2/pcibr/pcibr_idbg.c147
-rw-r--r--arch/ia64/sn/io/sn2/pcibr/pcibr_intr.c187
-rw-r--r--arch/ia64/sn/io/sn2/pcibr/pcibr_rrb.c190
-rw-r--r--arch/ia64/sn/io/sn2/pcibr/pcibr_slot.c457
-rw-r--r--arch/ia64/sn/io/sn2/pciio.c442
-rw-r--r--arch/ia64/sn/io/sn2/pic.c55
-rw-r--r--arch/ia64/sn/io/sn2/sgi_io_init.c226
-rw-r--r--arch/ia64/sn/io/sn2/shub.c269
-rw-r--r--arch/ia64/sn/io/sn2/shub_intr.c33
-rw-r--r--arch/ia64/sn/io/sn2/shuberror.c300
-rw-r--r--arch/ia64/sn/io/sn2/shubio.c34
-rw-r--r--arch/ia64/sn/io/sn2/xbow.c252
-rw-r--r--arch/ia64/sn/io/sn2/xtalk.c281
-rw-r--r--arch/ia64/sn/io/stubs.c140
-rw-r--r--arch/ia64/sn/io/xbow.c1325
-rw-r--r--arch/ia64/sn/io/xswitch.c69
-rw-r--r--arch/ia64/sn/io/xtalk.c1024
89 files changed, 5688 insertions, 32446 deletions
diff --git a/arch/ia64/sn/io/Makefile b/arch/ia64/sn/io/Makefile
index 8622f00bc7d572..bd72eda6a8b5d3 100644
--- a/arch/ia64/sn/io/Makefile
+++ b/arch/ia64/sn/io/Makefile
@@ -9,18 +9,15 @@
# Makefile for the sn io routines.
#
-EXTRA_CFLAGS := -DLITTLE_ENDIAN
+EXTRA_CFLAGS := -DLITTLE_ENDIAN -DSHUB_SWAP_WAR
-ifdef CONFIG_IA64_SGI_SN2
-EXTRA_CFLAGS += -DSHUB_SWAP_WAR
-endif
-
-obj-$(CONFIG_IA64_SGI_SN) += stubs.o sgi_if.o xswitch.o klgraph_hack.o \
- hcl.o labelcl.o invent.o sgi_io_sim.o \
- klgraph_hack.o hcl_util.o cdl.o hubdev.o hubspc.o \
- alenlist.o pci.o pci_dma.o ate_utils.o \
- ifconfig_net.o io.o ioconfig_bus.o
+obj-$(CONFIG_IA64_SGI_SN) += sgi_if.o xswitch.o sgi_io_sim.o cdl.o ate_utils.o \
+ io.o machvec/ drivers/ platform_init/
obj-$(CONFIG_IA64_SGI_SN2) += sn2/
-obj-$(CONFIG_PCIBA) += pciba.o
+ifdef CONFIG_HWGFS_FS
+obj-$(CONFIG_HWGFS_FS) += hwgfs/
+else
+obj-$(CONFIG_DEVFS_FS) += hwgdfs/
+endif
diff --git a/arch/ia64/sn/io/alenlist.c b/arch/ia64/sn/io/alenlist.c
deleted file mode 100644
index 329407f6ccea82..00000000000000
--- a/arch/ia64/sn/io/alenlist.c
+++ /dev/null
@@ -1,899 +0,0 @@
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-/* Implementation of Address/Length Lists. */
-
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/mmzone.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/alenlist.h>
-
-/*
- * Logically, an Address/Length List is a list of Pairs, where each pair
- * holds an Address and a Length, all in some Address Space. In this
- * context, "Address Space" is a particular Crosstalk Widget address
- * space, a PCI device address space, a VME bus address space, a
- * physical memory address space, etc.
- *
- * The main use for these Lists is to provide a single mechanism that
- * describes where in an address space a DMA occurs. This allows the
- * various I/O Bus support layers to provide a single interface for
- * DMA mapping and DMA translation without regard to how the DMA target
- * was specified by upper layers. The upper layers commonly specify a
- * DMA target via a buf structure page list, a kernel virtual address,
- * a user virtual address, a vector of addresses (a la uio and iov),
- * or possibly a pfn list.
- *
- * Address/Length Lists also enable drivers to take advantage of their
- * inate scatter/gather capabilities in systems where some address
- * translation may be required between bus adapters. The driver forms
- * a List that represents physical memory targets. This list is passed
- * to the various adapters, which apply various translations. The final
- * list that's returned to the driver is in terms of its local address
- * address space -- addresses which can be passed off to a scatter/gather
- * capable DMA controller.
- *
- * The current implementation is intended to be useful both in kernels
- * that support interrupt threads (INTR_KTHREAD) and in systems that do
- * not support interrupt threads. Of course, in the latter case, some
- * interfaces can be called only within a suspendable context.
- *
- * Basic operations on Address/Length Lists include:
- * alenlist_create Create a list
- * alenlist_clear Clear a list
- * alenlist_destroy Destroy a list
- * alenlist_append Append a Pair to the end of a list
- * alenlist_replace Replace a Pair in the middle of a list
- * alenlist_get Get an Address/Length Pair from a list
- * alenlist_size Return the number of Pairs in a list
- * alenlist_concat Append one list to the end of another
- * alenlist_clone Create a new copy of a list
- *
- * Operations that convert from upper-level specifications to Address/
- * Length Lists currently include:
- * kvaddr_to_alenlist Convert from a kernel virtual address
- * uvaddr_to_alenlist Convert from a user virtual address
- * buf_to_alenlist Convert from a buf structure
- * alenlist_done Tell system that we're done with an alenlist
- * obtained from a conversion.
- * Additional convenience operations:
- * alenpair_init Create a list and initialize it with a Pair
- * alenpair_get Peek at the first pair on a List
- *
- * A supporting type for Address/Length Lists is an alenlist_cursor_t. A
- * cursor marks a position in a List, and determines which Pair is fetched
- * by alenlist_get.
- * alenlist_cursor_create Allocate and initialize a cursor
- * alenlist_cursor_destroy Free space consumed by a cursor
- * alenlist_cursor_init (Re-)Initialize a cursor to point
- * to the start of a list
- * alenlist_cursor_clone Clone a cursor (at the current offset)
- * alenlist_cursor_offset Return the number of bytes into
- * a list that this cursor marks
- * Multiple cursors can point at various points into a List. Also, each
- * list maintains one "internal cursor" which may be updated by alenlist_clear
- * and alenlist_get. If calling code simply wishes to scan sequentially
- * through a list starting at the beginning, and if it is the only user of
- * a list, it can rely on this internal cursor rather than managing a
- * separate explicit cursor.
- *
- * The current implementation allows callers to allocate both cursors and
- * the lists as local stack (structure) variables. This allows for some
- * extra efficiency at the expense of forward binary compatibility. It
- * is recommended that customer drivers refrain from local allocation.
- * In fact, we likely will choose to move the structures out of the public
- * header file into a private place in order to discourage this usage.
- *
- * Currently, no locking is provided by the alenlist implementation.
- *
- * Implementation notes:
- * For efficiency, Pairs are grouped into "chunks" of, say, 32 Pairs
- * and a List consists of some number of these chunks. Chunks are completely
- * invisible to calling code. Chunks should be large enough to hold most
- * standard-sized DMA's, but not so large that they consume excessive space.
- *
- * It is generally expected that Lists will be constructed at one time and
- * scanned at a later time. It is NOT expected that drivers will scan
- * a List while the List is simultaneously extended, although this is
- * theoretically possible with sufficient upper-level locking.
- *
- * In order to support demands of Real-Time drivers and in order to support
- * swapping under low-memory conditions, we support the concept of a
- * "pre-allocated fixed-sized List". After creating a List with
- * alenlist_create, a driver may explicitly grow the list (via "alenlist_grow")
- * to a specific number of Address/Length pairs. It is guaranteed that future
- * operations involving this list will never automatically grow the list
- * (i.e. if growth is ever required, the operation will fail). Additionally,
- * operations that use alenlist's (e.g. DMA operations) accept a flag which
- * causes processing to take place "in-situ"; that is, the input alenlist
- * entries are replaced with output alenlist entries. The combination of
- * pre-allocated Lists and in-situ processing allows us to avoid the
- * potential deadlock scenario where we sleep (waiting for memory) in the
- * swap out path.
- *
- * For debugging, we track the number of allocated Lists in alenlist_count
- * the number of allocated chunks in alenlist_chunk_count, and the number
- * of allocate cursors in alenlist_cursor_count. We also provide a debug
- * routine, alenlist_show, which dumps the contents of an Address/Length List.
- *
- * Currently, Lists are formed by drivers on-demand. Eventually, we may
- * associate an alenlist with a buf structure and keep it up to date as
- * we go along. In that case, buf_to_alenlist simply returns a pointer
- * to the existing List, and increments the Lists's reference count.
- * alenlist_done would decrement the reference count and destroys the List
- * if it was the last reference.
- *
- * Eventually alenlist's may allow better support for user-level scatter/
- * gather operations (e.g. via readv/writev): With proper support, we
- * could potentially handle a vector of reads with a single scatter/gather
- * DMA operation. This could be especially useful on NUMA systems where
- * there's more of a reason for users to use vector I/O operations.
- *
- * Eventually, alenlist's may replace kaio lists, vhand page lists,
- * buffer cache pfdat lists, DMA page lists, etc.
- */
-
-/* Opaque data types */
-
-/* An Address/Length pair. */
-typedef struct alen_s {
- alenaddr_t al_addr;
- size_t al_length;
-} alen_t;
-
-/*
- * Number of elements in one chunk of an Address/Length List.
- *
- * This size should be sufficient to hold at least an "average" size
- * DMA request. Must be at least 1, and should be a power of 2,
- * for efficiency.
- */
-#define ALEN_CHUNK_SZ ((512*1024)/NBPP)
-
-/*
- * A fixed-size set of Address/Length Pairs. Chunks of Pairs are strung together
- * to form a complete Address/Length List. Chunking is entirely hidden within the
- * alenlist implementation, and it simply makes allocation and growth of lists more
- * efficient.
- */
-typedef struct alenlist_chunk_s {
- alen_t alc_pair[ALEN_CHUNK_SZ];/* list of addr/len pairs */
- struct alenlist_chunk_s *alc_next; /* point to next chunk of pairs */
-} *alenlist_chunk_t;
-
-/*
- * An Address/Length List. An Address/Length List is allocated with alenlist_create.
- * Alternatively, a list can be allocated on the stack (local variable of type
- * alenlist_t) and initialized with alenpair_init or with a combination of
- * alenlist_clear and alenlist_append, etc. Code which statically allocates these
- * structures loses forward binary compatibility!
- *
- * A statically allocated List is sufficiently large to hold ALEN_CHUNK_SZ pairs.
- */
-struct alenlist_s {
- unsigned short al_flags;
- unsigned short al_logical_size; /* logical size of list, in pairs */
- unsigned short al_actual_size; /* actual size of list, in pairs */
- struct alenlist_chunk_s *al_last_chunk; /* pointer to last logical chunk */
- struct alenlist_cursor_s al_cursor; /* internal cursor */
- struct alenlist_chunk_s al_chunk; /* initial set of pairs */
- alenaddr_t al_compaction_address; /* used to compact pairs */
-};
-
-/* al_flags field */
-#define AL_FIXED_SIZE 0x1 /* List is pre-allocated, and of fixed size */
-
-
-struct zone *alenlist_zone = NULL;
-struct zone *alenlist_chunk_zone = NULL;
-struct zone *alenlist_cursor_zone = NULL;
-
-#if DEBUG
-int alenlist_count=0; /* Currently allocated Lists */
-int alenlist_chunk_count = 0; /* Currently allocated chunks */
-int alenlist_cursor_count = 0; /* Currently allocate cursors */
-#define INCR_COUNT(ptr) atomic_inc((ptr));
-#define DECR_COUNT(ptr) atomic_dec((ptr));
-#else
-#define INCR_COUNT(ptr)
-#define DECR_COUNT(ptr)
-#endif /* DEBUG */
-
-#if DEBUG
-static void alenlist_show(alenlist_t);
-#endif /* DEBUG */
-
-/*
- * Initialize Address/Length List management. One time initialization.
- */
-void
-alenlist_init(void)
-{
- alenlist_zone = snia_kmem_zone_init(sizeof(struct alenlist_s), "alenlist");
- alenlist_chunk_zone = snia_kmem_zone_init(sizeof(struct alenlist_chunk_s), "alchunk");
- alenlist_cursor_zone = snia_kmem_zone_init(sizeof(struct alenlist_cursor_s), "alcursor");
-#if DEBUG
- idbg_addfunc("alenshow", alenlist_show);
-#endif /* DEBUG */
-}
-
-
-/*
- * Initialize an Address/Length List cursor.
- */
-static void
-do_cursor_init(alenlist_t alenlist, alenlist_cursor_t cursorp)
-{
- cursorp->al_alenlist = alenlist;
- cursorp->al_offset = 0;
- cursorp->al_chunk = &alenlist->al_chunk;
- cursorp->al_index = 0;
- cursorp->al_bcount = 0;
-}
-
-
-/*
- * Create an Address/Length List, and clear it.
- * Set the cursor to the beginning.
- */
-alenlist_t
-alenlist_create(unsigned flags)
-{
- alenlist_t alenlist;
-
- alenlist = snia_kmem_zone_alloc(alenlist_zone, flags & AL_NOSLEEP ? VM_NOSLEEP : 0);
- if (alenlist) {
- INCR_COUNT(&alenlist_count);
-
- alenlist->al_flags = 0;
- alenlist->al_logical_size = 0;
- alenlist->al_actual_size = ALEN_CHUNK_SZ;
- alenlist->al_last_chunk = &alenlist->al_chunk;
- alenlist->al_chunk.alc_next = NULL;
- do_cursor_init(alenlist, &alenlist->al_cursor);
- }
-
- return(alenlist);
-}
-
-
-/*
- * Grow an Address/Length List so that all resources needed to contain
- * the specified number of Pairs are pre-allocated. An Address/Length
- * List that has been explicitly "grown" will never *automatically*
- * grow, shrink, or be destroyed.
- *
- * Pre-allocation is useful for Real-Time drivers and for drivers that
- * may be used along the swap-out path and therefore cannot afford to
- * sleep until memory is freed.
- *
- * The cursor is set to the beginning of the list.
- */
-int
-alenlist_grow(alenlist_t alenlist, size_t npairs)
-{
- /*
- * This interface should be used relatively rarely, so
- * the implementation is kept simple: We clear the List,
- * then append npairs bogus entries. Finally, we mark
- * the list as FIXED_SIZE and re-initialize the internal
- * cursor.
- */
-
- /*
- * Temporarily mark as non-fixed size, since we're about
- * to shrink and expand it.
- */
- alenlist->al_flags &= ~AL_FIXED_SIZE;
-
- /* Free whatever was in the alenlist. */
- alenlist_clear(alenlist);
-
- /* Allocate everything that we need via automatic expansion. */
- while (npairs--)
- if (alenlist_append(alenlist, 0, 0, AL_NOCOMPACT) == ALENLIST_FAILURE)
- return(ALENLIST_FAILURE);
-
- /* Now, mark as FIXED_SIZE */
- alenlist->al_flags |= AL_FIXED_SIZE;
-
- /* Clear out bogus entries */
- alenlist_clear(alenlist);
-
- /* Initialize internal cursor to the beginning */
- do_cursor_init(alenlist, &alenlist->al_cursor);
-
- return(ALENLIST_SUCCESS);
-}
-
-
-/*
- * Clear an Address/Length List so that it holds no pairs.
- */
-void
-alenlist_clear(alenlist_t alenlist)
-{
- alenlist_chunk_t chunk, freechunk;
-
- /*
- * If this List is not FIXED_SIZE, free all the
- * extra chunks.
- */
- if (!(alenlist->al_flags & AL_FIXED_SIZE)) {
- /* First, free any extension alenlist chunks */
- chunk = alenlist->al_chunk.alc_next;
- while (chunk) {
- freechunk = chunk;
- chunk = chunk->alc_next;
- snia_kmem_zone_free(alenlist_chunk_zone, freechunk);
- DECR_COUNT(&alenlist_chunk_count);
- }
- alenlist->al_actual_size = ALEN_CHUNK_SZ;
- alenlist->al_chunk.alc_next = NULL;
- }
-
- alenlist->al_logical_size = 0;
- alenlist->al_last_chunk = &alenlist->al_chunk;
- do_cursor_init(alenlist, &alenlist->al_cursor);
-}
-
-
-/*
- * Create and initialize an Address/Length Pair.
- * This is intended for degenerate lists, consisting of a single
- * address/length pair.
- */
-alenlist_t
-alenpair_init( alenaddr_t address,
- size_t length)
-{
- alenlist_t alenlist;
-
- alenlist = alenlist_create(0);
-
- alenlist->al_logical_size = 1;
- ASSERT(alenlist->al_last_chunk == &alenlist->al_chunk);
- alenlist->al_chunk.alc_pair[0].al_length = length;
- alenlist->al_chunk.alc_pair[0].al_addr = address;
-
- return(alenlist);
-}
-
-/*
- * Return address/length from a degenerate (1-pair) List, or
- * first pair from a larger list. Does NOT update the internal cursor,
- * so this is an easy way to peek at a start address.
- */
-int
-alenpair_get( alenlist_t alenlist,
- alenaddr_t *address,
- size_t *length)
-{
- if (alenlist->al_logical_size == 0)
- return(ALENLIST_FAILURE);
-
- *length = alenlist->al_chunk.alc_pair[0].al_length;
- *address = alenlist->al_chunk.alc_pair[0].al_addr;
- return(ALENLIST_SUCCESS);
-}
-
-
-/*
- * Destroy an Address/Length List.
- */
-void
-alenlist_destroy(alenlist_t alenlist)
-{
- if (alenlist == NULL)
- return;
-
- /*
- * Turn off FIXED_SIZE so this List can be
- * automatically shrunk.
- */
- alenlist->al_flags &= ~AL_FIXED_SIZE;
-
- /* Free extension chunks first */
- if (alenlist->al_chunk.alc_next)
- alenlist_clear(alenlist);
-
- /* Now, free the alenlist itself */
- snia_kmem_zone_free(alenlist_zone, alenlist);
- DECR_COUNT(&alenlist_count);
-}
-
-/*
- * Release an Address/Length List.
- * This is in preparation for a day when alenlist's may be longer-lived, and
- * perhaps associated with a buf structure. We'd add a reference count, and
- * this routine would decrement the count. For now, we create alenlist's on
- * on demand and free them when done. If the driver is not explicitly managing
- * a List for its own use, it should call alenlist_done rather than alenlist_destroy.
- */
-void
-alenlist_done(alenlist_t alenlist)
-{
- alenlist_destroy(alenlist);
-}
-
-
-/*
- * Append another address/length to the end of an Address/Length List,
- * growing the list if permitted and necessary.
- *
- * Returns: SUCCESS/FAILURE
- */
-int
-alenlist_append( alenlist_t alenlist, /* append to this list */
- alenaddr_t address, /* address to append */
- size_t length, /* length to append */
- unsigned flags)
-{
- alen_t *alenp;
- int index, last_index;
-
- index = alenlist->al_logical_size % ALEN_CHUNK_SZ;
-
- if ((alenlist->al_logical_size > 0)) {
- /*
- * See if we can compact this new pair in with the previous entry.
- * al_compaction_address holds that value that we'd need to see
- * in order to compact.
- */
- if (!(flags & AL_NOCOMPACT) &&
- (alenlist->al_compaction_address == address)) {
- last_index = (alenlist->al_logical_size-1) % ALEN_CHUNK_SZ;
- alenp = &(alenlist->al_last_chunk->alc_pair[last_index]);
- alenp->al_length += length;
- alenlist->al_compaction_address += length;
- return(ALENLIST_SUCCESS);
- }
-
- /*
- * If we're out of room in this chunk, move to a new chunk.
- */
- if (index == 0) {
- if (alenlist->al_flags & AL_FIXED_SIZE) {
- alenlist->al_last_chunk = alenlist->al_last_chunk->alc_next;
-
- /* If we're out of space in a FIXED_SIZE List, quit. */
- if (alenlist->al_last_chunk == NULL) {
- ASSERT(alenlist->al_logical_size == alenlist->al_actual_size);
- return(ALENLIST_FAILURE);
- }
- } else {
- alenlist_chunk_t new_chunk;
-
- new_chunk = snia_kmem_zone_alloc(alenlist_chunk_zone,
- flags & AL_NOSLEEP ? VM_NOSLEEP : 0);
-
- if (new_chunk == NULL)
- return(ALENLIST_FAILURE);
-
- alenlist->al_last_chunk->alc_next = new_chunk;
- new_chunk->alc_next = NULL;
- alenlist->al_last_chunk = new_chunk;
- alenlist->al_actual_size += ALEN_CHUNK_SZ;
- INCR_COUNT(&alenlist_chunk_count);
- }
- }
- }
-
- alenp = &(alenlist->al_last_chunk->alc_pair[index]);
- alenp->al_addr = address;
- alenp->al_length = length;
-
- alenlist->al_logical_size++;
- alenlist->al_compaction_address = address + length;
-
- return(ALENLIST_SUCCESS);
-}
-
-
-/*
- * Replace an item in an Address/Length List. Cursor is updated so
- * that alenlist_get will get the next item in the list. This interface
- * is not very useful for drivers; but it is useful to bus providers
- * that need to translate between address spaced in situ. The old Address
- * and Length are returned.
- */
-/* ARGSUSED */
-int
-alenlist_replace( alenlist_t alenlist, /* in: replace in this list */
- alenlist_cursor_t cursorp, /* inout: which item to replace */
- alenaddr_t *addrp, /* inout: address */
- size_t *lengthp, /* inout: length */
- unsigned flags)
-{
- alen_t *alenp;
- alenlist_chunk_t chunk;
- unsigned int index;
- size_t length;
- alenaddr_t addr;
-
- if ((addrp == NULL) || (lengthp == NULL))
- return(ALENLIST_FAILURE);
-
- if (alenlist->al_logical_size == 0)
- return(ALENLIST_FAILURE);
-
- addr = *addrp;
- length = *lengthp;
-
- /*
- * If no cursor explicitly specified, use the Address/Length List's
- * internal cursor.
- */
- if (cursorp == NULL)
- cursorp = &alenlist->al_cursor;
-
- chunk = cursorp->al_chunk;
- index = cursorp->al_index;
-
- ASSERT(cursorp->al_alenlist == alenlist);
- if (cursorp->al_alenlist != alenlist)
- return(ALENLIST_FAILURE);
-
- alenp = &chunk->alc_pair[index];
-
- /* Return old values */
- *addrp = alenp->al_length;
- *lengthp = alenp->al_addr;
-
- /* Set up new values */
- alenp->al_length = length;
- alenp->al_addr = addr;
-
- /* Update cursor to point to next item */
- cursorp->al_bcount = length;
-
- return(ALENLIST_SUCCESS);
-}
-
-
-/*
- * Initialize a cursor in order to walk an alenlist.
- * An alenlist_cursor always points to the last thing that was obtained
- * from the list. If al_chunk is NULL, then nothing has yet been obtained.
- *
- * Note: There is an "internal cursor" associated with every Address/Length List.
- * For users that scan sequentially through a List, it is more efficient to
- * simply use the internal cursor. The caller must insure that no other users
- * will simultaneously scan the List. The caller can reposition the internal
- * cursor by calling alenlist_cursor_init with a NULL cursorp.
- */
-int
-alenlist_cursor_init(alenlist_t alenlist, size_t offset, alenlist_cursor_t cursorp)
-{
- size_t byte_count;
-
- if (cursorp == NULL)
- cursorp = &alenlist->al_cursor;
-
- /* Get internal cursor's byte count for use as a hint.
- *
- * If the internal cursor points passed the point that we're interested in,
- * we need to seek forward from the beginning. Otherwise, we can seek forward
- * from the internal cursor.
- */
- if ((offset > 0) &&
- ((byte_count = alenlist_cursor_offset(alenlist, (alenlist_cursor_t)NULL)) <= offset)) {
- offset -= byte_count;
- alenlist_cursor_clone(alenlist, NULL, cursorp);
- } else
- do_cursor_init(alenlist, cursorp);
-
- /* We could easily speed this up, but it shouldn't be used very often. */
- while (offset != 0) {
- alenaddr_t addr;
- size_t length;
-
- if (alenlist_get(alenlist, cursorp, offset, &addr, &length, 0) != ALENLIST_SUCCESS)
- return(ALENLIST_FAILURE);
- offset -= length;
- }
- return(ALENLIST_SUCCESS);
-}
-
-
-/*
- * Copy a cursor. The source cursor is either an internal alenlist cursor
- * or an explicit cursor.
- */
-int
-alenlist_cursor_clone( alenlist_t alenlist,
- alenlist_cursor_t cursorp_in,
- alenlist_cursor_t cursorp_out)
-{
- ASSERT(cursorp_out);
-
- if (alenlist && cursorp_in)
- if (alenlist != cursorp_in->al_alenlist)
- return(ALENLIST_FAILURE);
-
- if (alenlist)
- *cursorp_out = alenlist->al_cursor; /* small structure copy */
- else if (cursorp_in)
- *cursorp_out = *cursorp_in; /* small structure copy */
- else
- return(ALENLIST_FAILURE); /* no source */
-
- return(ALENLIST_SUCCESS);
-}
-
-/*
- * Return the number of bytes passed so far according to the specified cursor.
- * If cursorp is NULL, use the alenlist's internal cursor.
- */
-size_t
-alenlist_cursor_offset(alenlist_t alenlist, alenlist_cursor_t cursorp)
-{
- ASSERT(!alenlist || !cursorp || (alenlist == cursorp->al_alenlist));
-
- if (cursorp == NULL) {
- ASSERT(alenlist);
- cursorp = &alenlist->al_cursor;
- }
-
- return(cursorp->al_offset);
-}
-
-/*
- * Allocate and initialize an Address/Length List cursor.
- */
-alenlist_cursor_t
-alenlist_cursor_create(alenlist_t alenlist, unsigned flags)
-{
- alenlist_cursor_t cursorp;
-
- ASSERT(alenlist != NULL);
- cursorp = snia_kmem_zone_alloc(alenlist_cursor_zone, flags & AL_NOSLEEP ? VM_NOSLEEP : 0);
- if (cursorp) {
- INCR_COUNT(&alenlist_cursor_count);
- alenlist_cursor_init(alenlist, 0, cursorp);
- }
- return(cursorp);
-}
-
-/*
- * Free an Address/Length List cursor.
- */
-void
-alenlist_cursor_destroy(alenlist_cursor_t cursorp)
-{
- DECR_COUNT(&alenlist_cursor_count);
- snia_kmem_zone_free(alenlist_cursor_zone, cursorp);
-}
-
-
-/*
- * Fetch an address/length pair from an Address/Length List. Update
- * the "cursor" so that next time this routine is called, we'll get
- * the next address range. Never return a length that exceeds maxlength
- * (if non-zero). If maxlength is a power of 2, never return a length
- * that crosses a maxlength boundary. [This may seem strange at first,
- * but it's what many drivers want.]
- *
- * Returns: SUCCESS/FAILURE
- */
-int
-alenlist_get( alenlist_t alenlist, /* in: get from this list */
- alenlist_cursor_t cursorp, /* inout: which item to get */
- size_t maxlength, /* in: at most this length */
- alenaddr_t *addrp, /* out: address */
- size_t *lengthp, /* out: length */
- unsigned flags)
-{
- alen_t *alenp;
- alenlist_chunk_t chunk;
- unsigned int index;
- size_t bcount;
- size_t length;
-
- /*
- * If no cursor explicitly specified, use the Address/Length List's
- * internal cursor.
- */
- if (cursorp == NULL) {
- if (alenlist->al_logical_size == 0)
- return(ALENLIST_FAILURE);
- cursorp = &alenlist->al_cursor;
- }
-
- chunk = cursorp->al_chunk;
- index = cursorp->al_index;
- bcount = cursorp->al_bcount;
-
- ASSERT(cursorp->al_alenlist == alenlist);
- if (cursorp->al_alenlist != alenlist)
- return(ALENLIST_FAILURE);
-
- alenp = &chunk->alc_pair[index];
- length = alenp->al_length - bcount;
-
- /* Bump up to next pair, if we're done with this pair. */
- if (length == 0) {
- cursorp->al_bcount = bcount = 0;
- cursorp->al_index = index = (index + 1) % ALEN_CHUNK_SZ;
-
- /* Bump up to next chunk, if we're done with this chunk. */
- if (index == 0) {
- if (cursorp->al_chunk == alenlist->al_last_chunk)
- return(ALENLIST_FAILURE);
- chunk = chunk->alc_next;
- ASSERT(chunk != NULL);
- } else {
- /* If in last chunk, don't go beyond end. */
- if (cursorp->al_chunk == alenlist->al_last_chunk) {
- int last_size = alenlist->al_logical_size % ALEN_CHUNK_SZ;
- if (last_size && (index >= last_size))
- return(ALENLIST_FAILURE);
- }
- }
-
- alenp = &chunk->alc_pair[index];
- length = alenp->al_length;
- }
-
- /* Constrain what we return according to maxlength */
- if (maxlength) {
- size_t maxlen1 = maxlength - 1;
-
- if ((maxlength & maxlen1) == 0) /* power of 2 */
- maxlength -=
- ((alenp->al_addr + cursorp->al_bcount) & maxlen1);
-
- length = min(maxlength, length);
- }
-
- /* Update the cursor, if desired. */
- if (!(flags & AL_LEAVE_CURSOR)) {
- cursorp->al_bcount += length;
- cursorp->al_chunk = chunk;
- }
-
- *lengthp = length;
- *addrp = alenp->al_addr + bcount;
-
- return(ALENLIST_SUCCESS);
-}
-
-
-/*
- * Return the number of pairs in the specified Address/Length List.
- * (For FIXED_SIZE Lists, this returns the logical size of the List,
- * not the actual capacity of the List.)
- */
-int
-alenlist_size(alenlist_t alenlist)
-{
- return(alenlist->al_logical_size);
-}
-
-
-/*
- * Concatenate two Address/Length Lists.
- */
-void
-alenlist_concat(alenlist_t from,
- alenlist_t to)
-{
- struct alenlist_cursor_s cursor;
- alenaddr_t addr;
- size_t length;
-
- alenlist_cursor_init(from, 0, &cursor);
-
- while(alenlist_get(from, &cursor, (size_t)0, &addr, &length, 0) == ALENLIST_SUCCESS)
- alenlist_append(to, addr, length, 0);
-}
-
-/*
- * Create a copy of a list.
- * (Not all attributes of the old list are cloned. For instance, if
- * a FIXED_SIZE list is cloned, the resulting list is NOT FIXED_SIZE.)
- */
-alenlist_t
-alenlist_clone(alenlist_t old_list, unsigned flags)
-{
- alenlist_t new_list;
-
- new_list = alenlist_create(flags);
- if (new_list != NULL)
- alenlist_concat(old_list, new_list);
-
- return(new_list);
-}
-
-
-/*
- * Convert a kernel virtual address to a Physical Address/Length List.
- */
-alenlist_t
-kvaddr_to_alenlist(alenlist_t alenlist, caddr_t kvaddr, size_t length, unsigned flags)
-{
- alenaddr_t paddr;
- long offset;
- size_t piece_length;
- int created_alenlist;
-
- if (length <=0)
- return(NULL);
-
- /* If caller supplied a List, use it. Otherwise, allocate one. */
- if (alenlist == NULL) {
- alenlist = alenlist_create(0);
- created_alenlist = 1;
- } else {
- alenlist_clear(alenlist);
- created_alenlist = 0;
- }
-
- paddr = kvtophys(kvaddr);
- offset = poff(kvaddr);
-
- /* Handle first page */
- piece_length = min((size_t)(NBPP - offset), length);
- if (alenlist_append(alenlist, paddr, piece_length, flags) == ALENLIST_FAILURE)
- goto failure;
- length -= piece_length;
- kvaddr += piece_length;
-
- /* Handle middle pages */
- while (length >= NBPP) {
- paddr = kvtophys(kvaddr);
- if (alenlist_append(alenlist, paddr, NBPP, flags) == ALENLIST_FAILURE)
- goto failure;
- length -= NBPP;
- kvaddr += NBPP;
- }
-
- /* Handle last page */
- if (length) {
- ASSERT(length < NBPP);
- paddr = kvtophys(kvaddr);
- if (alenlist_append(alenlist, paddr, length, flags) == ALENLIST_FAILURE)
- goto failure;
- }
-
- alenlist_cursor_init(alenlist, 0, NULL);
- return(alenlist);
-
-failure:
- if (created_alenlist)
- alenlist_destroy(alenlist);
- return(NULL);
-}
-
-
-#if DEBUG
-static void
-alenlist_show(alenlist_t alenlist)
-{
- struct alenlist_cursor_s cursor;
- alenaddr_t addr;
- size_t length;
- int i = 0;
-
- alenlist_cursor_init(alenlist, 0, &cursor);
-
- qprintf("Address/Length List@0x%x:\n", alenlist);
- qprintf("logical size=0x%x actual size=0x%x last_chunk at 0x%x\n",
- alenlist->al_logical_size, alenlist->al_actual_size,
- alenlist->al_last_chunk);
- qprintf("cursor: chunk=0x%x index=%d offset=0x%x\n",
- alenlist->al_cursor.al_chunk,
- alenlist->al_cursor.al_index,
- alenlist->al_cursor.al_bcount);
- while(alenlist_get(alenlist, &cursor, (size_t)0, &addr, &length, 0) == ALENLIST_SUCCESS)
- qprintf("%d:\t0x%lx 0x%lx\n", ++i, addr, length);
-}
-#endif /* DEBUG */
diff --git a/arch/ia64/sn/io/ate_utils.c b/arch/ia64/sn/io/ate_utils.c
index a789a1648614db..a5b11ae21b5725 100644
--- a/arch/ia64/sn/io/ate_utils.c
+++ b/arch/ia64/sn/io/ate_utils.c
@@ -4,7 +4,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
@@ -27,7 +27,6 @@
#include <asm/sn/ioerror_handling.h>
#include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
-#include <asm/sn/eeprom.h>
#include <asm/sn/sn_private.h>
#include <asm/sn/ate_utils.h>
diff --git a/arch/ia64/sn/io/cdl.c b/arch/ia64/sn/io/cdl.c
index 8ffba2495eeb49..f5a0e5f2cae725 100644
--- a/arch/ia64/sn/io/cdl.c
+++ b/arch/ia64/sn/io/cdl.c
@@ -4,7 +4,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/config.h>
@@ -18,9 +18,9 @@
#include <asm/sn/xtalk/xbow.h>
/* these get called directly in cdl_add_connpt in fops bypass hack */
-extern int pcibr_attach(devfs_handle_t);
-extern int xbow_attach(devfs_handle_t);
-extern int pic_attach(devfs_handle_t);
+extern int pcibr_attach(vertex_hdl_t);
+extern int xbow_attach(vertex_hdl_t);
+extern int pic_attach(vertex_hdl_t);
/*
@@ -32,75 +32,20 @@ extern int pic_attach(devfs_handle_t);
* IO Infrastructure Drivers e.g. pcibr.
*/
-struct cdl {
- int part_num;
- int mfg_num;
- int (*attach) (devfs_handle_t);
-} dummy_reg;
-
-#ifdef CONFIG_IA64_SGI_SN1
-#define MAX_SGI_IO_INFRA_DRVR 4
-#else
#define MAX_SGI_IO_INFRA_DRVR 7
-#endif
+
static struct cdl sgi_infrastructure_drivers[MAX_SGI_IO_INFRA_DRVR] =
{
{ XBRIDGE_WIDGET_PART_NUM, XBRIDGE_WIDGET_MFGR_NUM, pcibr_attach /* &pcibr_fops */},
{ BRIDGE_WIDGET_PART_NUM, BRIDGE_WIDGET_MFGR_NUM, pcibr_attach /* &pcibr_fops */},
-#ifndef CONFIG_IA64_SGI_SN1
{ PIC_WIDGET_PART_NUM_BUS0, PIC_WIDGET_MFGR_NUM, pic_attach /* &pic_fops */},
{ PIC_WIDGET_PART_NUM_BUS1, PIC_WIDGET_MFGR_NUM, pic_attach /* &pic_fops */},
-#endif
{ XXBOW_WIDGET_PART_NUM, XXBOW_WIDGET_MFGR_NUM, xbow_attach /* &xbow_fops */},
{ XBOW_WIDGET_PART_NUM, XBOW_WIDGET_MFGR_NUM, xbow_attach /* &xbow_fops */},
-#ifndef CONFIG_IA64_SGI_SN1
{ PXBOW_WIDGET_PART_NUM, XXBOW_WIDGET_MFGR_NUM, xbow_attach /* &xbow_fops */},
-#endif
};
/*
- * cdl_new: Called by pciio and xtalk.
- */
-cdl_p
-cdl_new(char *name, char *k1str, char *k2str)
-{
- /*
- * Just return a dummy pointer.
- */
- return((cdl_p)&dummy_reg);
-}
-
-/*
- * cdl_del: Do nothing.
- */
-void
-cdl_del(cdl_p reg)
-{
- return;
-}
-
-/*
- * cdl_add_driver: The driver part number and manufacturers number
- * are statically initialized above.
- *
- Do nothing.
- */
-int
-cdl_add_driver(cdl_p reg, int key1, int key2, char *prefix, int flags, cdl_drv_f *func)
-{
- return 0;
-}
-
-/*
- * cdl_del_driver: Not supported.
- */
-void
-cdl_del_driver(cdl_p reg, char *prefix, cdl_drv_f *func)
-{
- return;
-}
-
-/*
* cdl_add_connpt: We found a device and it's connect point. Call the
* attach routine of that driver.
*
@@ -112,8 +57,8 @@ cdl_del_driver(cdl_p reg, char *prefix, cdl_drv_f *func)
* vertices.
*/
int
-cdl_add_connpt(cdl_p reg, int part_num, int mfg_num,
- devfs_handle_t connpt, int drv_flags)
+cdl_add_connpt(int part_num, int mfg_num,
+ vertex_hdl_t connpt, int drv_flags)
{
int i;
@@ -121,7 +66,6 @@ cdl_add_connpt(cdl_p reg, int part_num, int mfg_num,
* Find the driver entry point and call the attach routine.
*/
for (i = 0; i < MAX_SGI_IO_INFRA_DRVR; i++) {
-
if ( (part_num == sgi_infrastructure_drivers[i].part_num) &&
( mfg_num == sgi_infrastructure_drivers[i].mfg_num) ) {
/*
@@ -139,73 +83,3 @@ cdl_add_connpt(cdl_p reg, int part_num, int mfg_num,
return (0);
}
-
-/*
- * cdl_del_connpt: Not implemented.
- */
-int
-cdl_del_connpt(cdl_p reg, int key1, int key2, devfs_handle_t connpt, int drv_flags)
-{
-
- return(0);
-}
-
-/*
- * cdl_iterate: Not Implemented.
- */
-void
-cdl_iterate(cdl_p reg,
- char *prefix,
- cdl_iter_f * func)
-{
- return;
-}
-
-async_attach_t
-async_attach_new(void)
-{
-
- return(0);
-}
-
-void
-async_attach_free(async_attach_t aa)
-{
- return;
-}
-
-async_attach_t
-async_attach_get_info(devfs_handle_t vhdl)
-{
-
- return(0);
-}
-
-void
-async_attach_add_info(devfs_handle_t vhdl, async_attach_t aa)
-{
- return;
-
-}
-
-void
-async_attach_del_info(devfs_handle_t vhdl)
-{
- return;
-}
-
-void async_attach_signal_start(async_attach_t aa)
-{
- return;
-}
-
-void async_attach_signal_done(async_attach_t aa)
-{
- return;
-}
-
-void async_attach_waitall(async_attach_t aa)
-{
- return;
-}
-
diff --git a/arch/ia64/sn/io/drivers/Makefile b/arch/ia64/sn/io/drivers/Makefile
new file mode 100644
index 00000000000000..c77f100623adec
--- /dev/null
+++ b/arch/ia64/sn/io/drivers/Makefile
@@ -0,0 +1,14 @@
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
+#
+# Makefile for the sn2 io routines.
+
+EXTRA_CFLAGS := -DLITTLE_ENDIAN
+
+obj-y += hubdev.o ioconfig_bus.o ifconfig_net.o
+
+obj-$(CONFIG_PCIBA) += pciba.o
diff --git a/arch/ia64/sn/io/hubdev.c b/arch/ia64/sn/io/drivers/hubdev.c
index 9b346dcd328105..ad312a98c34d97 100644
--- a/arch/ia64/sn/io/hubdev.c
+++ b/arch/ia64/sn/io/drivers/hubdev.c
@@ -1,10 +1,9 @@
-/* $Id$
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/config.h>
@@ -13,21 +12,20 @@
#include <asm/sn/sgi.h>
#include <asm/sn/io.h>
#include <asm/sn/iograph.h>
-#include <asm/sn/sn1/hubdev.h>
#include <asm/sn/sn_private.h>
#include <asm/sn/invent.h>
#include <asm/sn/hcl.h>
#include <asm/sn/labelcl.h>
struct hubdev_callout {
- int (*attach_method)(devfs_handle_t);
+ int (*attach_method)(vertex_hdl_t);
struct hubdev_callout *fp;
};
typedef struct hubdev_callout hubdev_callout_t;
mutex_t hubdev_callout_mutex;
-hubdev_callout_t *hubdev_callout_list = NULL;
+static hubdev_callout_t *hubdev_callout_list;
void
hubdev_init(void)
@@ -37,7 +35,7 @@ hubdev_init(void)
}
void
-hubdev_register(int (*attach_method)(devfs_handle_t))
+hubdev_register(int (*attach_method)(vertex_hdl_t))
{
hubdev_callout_t *callout;
@@ -57,7 +55,7 @@ hubdev_register(int (*attach_method)(devfs_handle_t))
}
int
-hubdev_unregister(int (*attach_method)(devfs_handle_t))
+hubdev_unregister(int (*attach_method)(vertex_hdl_t))
{
hubdev_callout_t **p;
@@ -82,7 +80,7 @@ hubdev_unregister(int (*attach_method)(devfs_handle_t))
int
-hubdev_docallouts(devfs_handle_t hub)
+hubdev_docallouts(vertex_hdl_t hub)
{
hubdev_callout_t *p;
int errcode;
@@ -100,33 +98,3 @@ hubdev_docallouts(devfs_handle_t hub)
mutex_unlock(&hubdev_callout_mutex);
return (0);
}
-
-/*
- * Given a hub vertex, return the base address of the Hspec space
- * for that hub.
- */
-
-#if defined(CONFIG_IA64_SGI_SN1)
-
-caddr_t
-hubdev_prombase_get(devfs_handle_t hub)
-{
- hubinfo_t hinfo = NULL;
-
- hubinfo_get(hub, &hinfo);
- ASSERT(hinfo);
-
- return ((caddr_t)NODE_RBOOT_BASE(hinfo->h_nasid));
-}
-
-cnodeid_t
-hubdev_cnodeid_get(devfs_handle_t hub)
-{
- hubinfo_t hinfo = NULL;
- hubinfo_get(hub, &hinfo);
- ASSERT(hinfo);
-
- return hinfo->h_cnodeid;
-}
-
-#endif /* CONFIG_IA64_SGI_SN1 */
diff --git a/arch/ia64/sn/io/ifconfig_net.c b/arch/ia64/sn/io/drivers/ifconfig_net.c
index c7b2b27edcccdd..5c06058cc4ca79 100644
--- a/arch/ia64/sn/io/ifconfig_net.c
+++ b/arch/ia64/sn/io/drivers/ifconfig_net.c
@@ -6,7 +6,7 @@
*
* ifconfig_net - SGI's Persistent Network Device names.
*
- * Copyright (C) 1992-1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992-1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
@@ -37,8 +37,8 @@
/*
* Some Global definitions.
*/
-devfs_handle_t ifconfig_net_handle = NULL;
-unsigned long ifconfig_net_debug = 0;
+static devfs_handle_t ifconfig_net_handle;
+static unsigned long ifconfig_net_debug;
/*
* ifconfig_net_open - Opens the special device node "/devhw/.ifconfig_net".
diff --git a/arch/ia64/sn/io/ioconfig_bus.c b/arch/ia64/sn/io/drivers/ioconfig_bus.c
index 91dea65c3fceb5..0d043d3f25eb68 100644
--- a/arch/ia64/sn/io/ioconfig_bus.c
+++ b/arch/ia64/sn/io/drivers/ioconfig_bus.c
@@ -1,5 +1,4 @@
-/* $Id$
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
@@ -35,8 +34,8 @@
/*
* Some Global definitions.
*/
-devfs_handle_t ioconfig_bus_handle = NULL;
-unsigned long ioconfig_bus_debug = 0;
+static vertex_hdl_t ioconfig_bus_handle;
+static unsigned long ioconfig_bus_debug;
#ifdef IOCONFIG_BUS_DEBUG
#define DBG(x...) printk(x)
@@ -44,22 +43,22 @@ unsigned long ioconfig_bus_debug = 0;
#define DBG(x...)
#endif
-u64 ioconfig_file = 0;
-u64 ioconfig_file_size = 0;
-u64 ioconfig_activated = 0;
-char ioconfig_kernopts[128];
+static u64 ioconfig_file;
+static u64 ioconfig_file_size;
+static u64 ioconfig_activated;
+static char ioconfig_kernopts[128];
/*
* For debugging purpose .. hardcode a table ..
*/
struct ascii_moduleid *ioconfig_bus_table;
-u64 ioconfig_bus_table_size = 0;
+u64 ioconfig_bus_table_size;
-int free_entry = 0;
-int new_entry = 0;
+static int free_entry;
+static int new_entry;
-int next_basebus_number = 0;
+int next_basebus_number;
void
ioconfig_get_busnum(char *io_moduleid, int *bus_num)
@@ -96,8 +95,8 @@ ioconfig_get_busnum(char *io_moduleid, int *bus_num)
free_entry++;
}
-void
-dump_ioconfig_table()
+static void
+dump_ioconfig_table(void)
{
int index = 0;
@@ -264,14 +263,14 @@ ioconfig_bus_init(void)
*/
DBG("ioconfig_bus_init: Kernel Options given.\n");
(void) build_moduleid_table((char *)ioconfig_kernopts, ioconfig_bus_table);
- (void) dump_ioconfig_table(ioconfig_bus_table);
+ (void) dump_ioconfig_table();
return;
}
if (ioconfig_activated) {
DBG("ioconfig_bus_init: ioconfig file given.\n");
(void) build_moduleid_table((char *)ioconfig_file, ioconfig_bus_table);
- (void) dump_ioconfig_table(ioconfig_bus_table);
+ (void) dump_ioconfig_table();
} else {
DBG("ioconfig_bus_init: ioconfig command not executed in prom\n");
}
@@ -295,7 +294,7 @@ ioconfig_bus_new_entries(void)
index = new_entry;
temp = &ioconfig_bus_table[index];
while (index < free_entry) {
- printk("%s\n", temp);
+ printk("%s\n", (char *)temp);
temp++;
index++;
}
diff --git a/arch/ia64/sn/io/pciba.c b/arch/ia64/sn/io/drivers/pciba.c
index a5090b0f2a075b..c3f01e28896f14 100644
--- a/arch/ia64/sn/io/pciba.c
+++ b/arch/ia64/sn/io/drivers/pciba.c
@@ -17,7 +17,7 @@
* Public License. See the file "COPYING" in the main directory of
* this archive for more details.
*
- * Copyright (C) 2001-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
*
* 03262001 - Initial version by Chad Talbott
*/
@@ -40,12 +40,12 @@
#include <linux/config.h>
-#ifndef CONFIG_DEVFS_FS
-# error PCIBA requires devfs
-#endif
-
#include <linux/module.h>
-#include <linux/devfs_fs_kernel.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/iograph.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/labelcl.h>
#include <linux/pci.h>
#include <linux/list.h>
@@ -58,6 +58,7 @@
#include <linux/capability.h>
#include <asm/uaccess.h>
+#include <asm/sn/sgi.h>
#include <asm/io.h>
#include <asm/pgalloc.h>
#include <asm/page.h>
@@ -139,7 +140,7 @@ struct node_data {
/* flat list of all the device nodes. makes it easy to free
them all when we're unregistered */
struct list_head global_node_list;
- devfs_handle_t devfs_handle;
+ vertex_hdl_t devfs_handle;
void (* cleanup)(struct node_data *);
@@ -183,7 +184,7 @@ void __exit pciba_exit(void);
static status __init register_with_devfs(void);
static void __exit unregister_with_devfs(void);
-static status __init register_pci_device(devfs_handle_t device_dir_handle,
+static status __init register_pci_device(vertex_hdl_t device_dir_handle,
struct pci_dev * dev);
/* file operations */
@@ -281,52 +282,18 @@ free_nodes(void)
}
#endif
-#if !defined(CONFIG_IA64_SGI_SN1)
-
-static status __init
-register_with_devfs(void)
-{
- struct pci_dev * dev;
- devfs_handle_t device_dir_handle;
- char devfs_path[40];
-
- TRACE();
-
- if (!devfs_mk_dir(NULL, "pci", NULL))
- return failure;
-
- /* FIXME: don't forget /dev/pci/mem & /dev/pci/io */
-
- pci_for_each_dev(dev) {
- sprintf(devfs_path, "pci/%02x/%02x.%x",
- dev->bus->number,
- PCI_SLOT(dev->devfn),
- PCI_FUNC(dev->devfn));
-
- device_dir_handle =
- devfs_mk_dir(NULL, devfs_path, NULL);
- if (device_dir_handle == NULL)
- return failure;
-
- if (register_pci_device(device_dir_handle, dev) == failure) {
- devfs_remove("pci");
- return failure;
- }
- }
- return success;
-}
+static vertex_hdl_t pciba_devfs_handle;
-#else
-extern devfs_handle_t
+extern vertex_hdl_t
devfn_to_vertex(unsigned char busnum, unsigned int devfn);
static status __init
register_with_devfs(void)
{
struct pci_dev * dev;
- devfs_handle_t device_dir_handle;
+ vertex_hdl_t device_dir_handle;
TRACE();
@@ -339,7 +306,7 @@ register_with_devfs(void)
return failure;
if (register_pci_device(device_dir_handle, dev) == failure) {
- devfs_remove("pci");
+ hwgraph_vertex_destroy(pciba_devfs_handle);
return failure;
}
}
@@ -357,7 +324,7 @@ unregister_with_devfs(void)
list_for_each(lhp, &global_node_list) {
nd = list_entry(lhp, struct node_data, global_node_list);
- devfs_unregister(nd->devfs_handle);
+ hwgraph_vertex_destroy(nd->devfs_handle);
}
}
@@ -385,12 +352,12 @@ void dma_cleanup(struct node_data * dma_node)
#ifdef DEBUG_PCIBA
dump_allocations(&dma_node->u.dma.dma_allocs);
#endif
- devfs_unregister(dma_node->devfs_handle);
+ hwgraph_vertex_destroy(dma_node->devfs_handle);
}
void init_dma_node(struct node_data * node,
- struct pci_dev * dev, devfs_handle_t dh)
+ struct pci_dev * dev, vertex_hdl_t dh)
{
TRACE();
@@ -409,12 +376,12 @@ void rom_cleanup(struct node_data * rom_node)
pci_write_config_dword(rom_node->u.rom.dev,
PCI_ROM_ADDRESS,
rom_node->u.rom.saved_rom_base_reg);
- devfs_unregister(rom_node->devfs_handle);
+ hwgraph_vertex_destroy(rom_node->devfs_handle);
}
void init_rom_node(struct node_data * node,
- struct pci_dev * dev, devfs_handle_t dh)
+ struct pci_dev * dev, vertex_hdl_t dh)
{
TRACE();
@@ -426,11 +393,11 @@ void init_rom_node(struct node_data * node,
static status __init
-register_pci_device(devfs_handle_t device_dir_handle, struct pci_dev * dev)
+register_pci_device(vertex_hdl_t device_dir_handle, struct pci_dev * dev)
{
struct node_data * nd;
char devfs_path[20];
- devfs_handle_t node_devfs_handle;
+ vertex_hdl_t node_devfs_handle;
int ri;
TRACE();
@@ -440,10 +407,10 @@ register_pci_device(devfs_handle_t device_dir_handle, struct pci_dev * dev)
for (ri = 0; ri < PCI_ROM_RESOURCE; ri++) {
if (pci_resource_len(dev, ri) != 0) {
sprintf(devfs_path, "base/%d", ri);
- if (devfs_register(device_dir_handle, devfs_path,
- DEVFS_FL_NONE,
+ if (hwgraph_register(device_dir_handle, devfs_path,
+ 0, DEVFS_FL_NONE,
0, 0,
- S_IFREG | S_IRUSR | S_IWUSR,
+ S_IFCHR | S_IRUSR | S_IWUSR, 0, 0,
&base_fops,
&dev->resource[ri]) == NULL)
return failure;
@@ -455,9 +422,9 @@ register_pci_device(devfs_handle_t device_dir_handle, struct pci_dev * dev)
for (ri = 0; ri < PCI_ROM_RESOURCE; ri++) {
if (dev->resource[ri].flags & IORESOURCE_MEM &&
pci_resource_len(dev, ri) != 0) {
- if (devfs_register(device_dir_handle, "mem",
- DEVFS_FL_NONE, 0, 0,
- S_IFREG | S_IRUSR | S_IWUSR,
+ if (hwgraph_register(device_dir_handle, "mem",
+ 0, DEVFS_FL_NONE, 0, 0,
+ S_IFCHR | S_IRUSR | S_IWUSR, 0, 0,
&base_fops,
&dev->resource[ri]) == NULL)
return failure;
@@ -470,9 +437,9 @@ register_pci_device(devfs_handle_t device_dir_handle, struct pci_dev * dev)
for (ri = 0; ri < PCI_ROM_RESOURCE; ri++) {
if (dev->resource[ri].flags & IORESOURCE_IO &&
pci_resource_len(dev, ri) != 0) {
- if (devfs_register(device_dir_handle, "io",
- DEVFS_FL_NONE, 0, 0,
- S_IFREG | S_IRUSR | S_IWUSR,
+ if (hwgraph_register(device_dir_handle, "io",
+ 0, DEVFS_FL_NONE, 0, 0,
+ S_IFCHR | S_IRUSR | S_IWUSR, 0, 0,
&base_fops,
&dev->resource[ri]) == NULL)
return failure;
@@ -486,9 +453,9 @@ register_pci_device(devfs_handle_t device_dir_handle, struct pci_dev * dev)
nd = new_node();
if (nd == NULL)
return failure;
- node_devfs_handle = devfs_register(device_dir_handle, "rom",
- DEVFS_FL_NONE, 0, 0,
- S_IFREG | S_IRUSR,
+ node_devfs_handle = hwgraph_register(device_dir_handle, "rom",
+ 0, DEVFS_FL_NONE, 0, 0,
+ S_IFCHR | S_IRUSR, 0, 0,
&rom_fops, nd);
if (node_devfs_handle == NULL)
return failure;
@@ -497,8 +464,8 @@ register_pci_device(devfs_handle_t device_dir_handle, struct pci_dev * dev)
/* register a node that allows ioctl's to read and write to
the device's config space */
- if (devfs_register(device_dir_handle, "config", DEVFS_FL_NONE,
- 0, 0, S_IFREG | S_IRUSR | S_IWUSR,
+ if (hwgraph_register(device_dir_handle, "config", 0, DEVFS_FL_NONE,
+ 0, 0, S_IFCHR | S_IRUSR | S_IWUSR, 0, 0,
&config_fops, dev) == NULL)
return failure;
@@ -510,8 +477,8 @@ register_pci_device(devfs_handle_t device_dir_handle, struct pci_dev * dev)
if (nd == NULL)
return failure;
node_devfs_handle =
- devfs_register(device_dir_handle, "dma", DEVFS_FL_NONE,
- 0, 0, S_IFREG | S_IRUSR | S_IWUSR,
+ hwgraph_register(device_dir_handle, "dma", 0, DEVFS_FL_NONE,
+ 0, 0, S_IFCHR | S_IRUSR | S_IWUSR, 0, 0,
&dma_fops, nd);
if (node_devfs_handle == NULL)
return failure;
@@ -545,7 +512,11 @@ rom_mmap(struct file * file, struct vm_area_struct * vma)
TRACE();
+#ifdef CONFIG_HWGFS_FS
+ nd = (struct node_data * )file->f_dentry->d_fsdata;
+#else
nd = (struct node_data * )file->private_data;
+#endif
pci_pa = pci_resource_start(nd->u.rom.dev, PCI_ROM_RESOURCE);
@@ -575,7 +546,11 @@ rom_release(struct inode * inode, struct file * file)
TRACE();
+#ifdef CONFIG_HWGFS_FS
+ nd = (struct node_data * )file->f_dentry->d_fsdata;
+#else
nd = (struct node_data * )file->private_data;
+#endif
if (nd->u.rom.mmapped) {
nd->u.rom.mmapped = false;
@@ -594,7 +569,11 @@ base_mmap(struct file * file, struct vm_area_struct * vma)
TRACE();
+#ifdef CONFIG_HWGFS_FS
+ resource = (struct resource *)file->f_dentry->d_fsdata;
+#else
resource = (struct resource *)file->private_data;
+#endif
return mmap_pci_address(vma, resource->start);
}
@@ -622,7 +601,11 @@ config_ioctl(struct inode * inode, struct file * file,
_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), _IOC_SIZE(cmd));
DPRINTF("arg = %lx\n", arg);
+#ifdef CONFIG_HWGFS_FS
+ dev = (struct pci_dev *)file->f_dentry->d_fsdata;
+#else
dev = (struct pci_dev *)file->private_data;
+#endif
/* PCIIOCCFG{RD,WR}: read and/or write PCI configuration
space. If both, the read happens first (this becomes a swap
@@ -752,7 +735,11 @@ dma_ioctl(struct inode * inode, struct file * file,
DPRINTF("cmd = %x\n", cmd);
DPRINTF("arg = %lx\n", arg);
+#ifdef CONFIG_HWGFS_FS
+ nd = (struct node_data *)file->f_dentry->d_fsdata;
+#else
nd = (struct node_data *)file->private_data;
+#endif
#ifdef DEBUG_PCIBA
DPRINTF("at dma_ioctl entry\n");
@@ -849,7 +836,11 @@ dma_mmap(struct file * file, struct vm_area_struct * vma)
TRACE();
+#ifdef CONFIG_HWGFS_FS
+ nd = (struct node_data *)file->f_dentry->d_fsdata;
+#else
nd = (struct node_data *)file->private_data;
+#endif
DPRINTF("vma->vm_start is %lx\n", vma->vm_start);
DPRINTF("vma->vm_end is %lx\n", vma->vm_end);
diff --git a/arch/ia64/sn/io/eeprom.c b/arch/ia64/sn/io/eeprom.c
deleted file mode 100644
index 5b190517de6f9f..00000000000000
--- a/arch/ia64/sn/io/eeprom.c
+++ /dev/null
@@ -1,1454 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1999-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-/*
- * WARNING: There is more than one copy of this file in different isms.
- * All copies must be kept exactly in sync.
- * Do not modify this file without also updating the following:
- *
- * irix/kern/io/eeprom.c
- * stand/arcs/lib/libsk/ml/eeprom.c
- * stand/arcs/lib/libkl/io/eeprom.c
- *
- * (from time to time they might not be in sync but that's due to bringup
- * activity - this comment is to remind us that they eventually have to
- * get back together)
- *
- * eeprom.c
- *
- * access to board-mounted EEPROMs via the L1 system controllers
- *
- */
-
-#include <linux/types.h>
-#include <linux/config.h>
-#include <linux/slab.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/io.h>
-#include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/hcl_util.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/eeprom.h>
-#include <asm/sn/router.h>
-#include <asm/sn/module.h>
-#include <asm/sn/ksys/l1.h>
-#include <asm/sn/nodepda.h>
-#include <asm/sn/clksupport.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/simulator.h>
-
-#if defined(EEPROM_DEBUG)
-#define db_printf(x) printk x
-#else
-#define db_printf(x) printk x
-#endif
-
-#define BCOPY(x,y,z) memcpy(y,x,z)
-
-#define UNDERSCORE 0 /* don't convert underscores to hyphens */
-#define HYPHEN 1 /* convert underscores to hyphens */
-
-void copy_ascii_field( char *to, char *from, int length,
- int change_underscore );
-uint64_t generate_unique_id( char *sn, int sn_len );
-uchar_t char_to_base36( char c );
-int nicify( char *dst, eeprom_brd_record_t *src );
-static void int64_to_hex_string( char *out, uint64_t val );
-
-// extern int router_lock( net_vec_t, int, int );
-// extern int router_unlock( net_vec_t );
-#define ROUTER_LOCK(p) // router_lock(p, 10000, 3000000)
-#define ROUTER_UNLOCK(p) // router_unlock(p)
-
-#define IP27LOG_OVNIC "OverrideNIC"
-
-
-/* the following function converts an EEPROM record to a close facsimile
- * of the string returned by reading a Dallas Semiconductor NIC (see
- * one of the many incarnations of nic.c for details on that driver)
- */
-int nicify( char *dst, eeprom_brd_record_t *src )
-{
- int field_len;
- uint64_t unique_id;
- char *cur_dst = dst;
- eeprom_board_ia_t *board;
-
- board = src->board_ia;
- ASSERT( board ); /* there should always be a board info area */
-
- /* copy part number */
- strcpy( cur_dst, "Part:" );
- cur_dst += strlen( cur_dst );
- ASSERT( (board->part_num_tl & FIELD_FORMAT_MASK)
- == FIELD_FORMAT_ASCII );
- field_len = board->part_num_tl & FIELD_LENGTH_MASK;
- copy_ascii_field( cur_dst, board->part_num, field_len, HYPHEN );
- cur_dst += field_len;
-
- /* copy product name */
- strcpy( cur_dst, ";Name:" );
- cur_dst += strlen( cur_dst );
- ASSERT( (board->product_tl & FIELD_FORMAT_MASK) == FIELD_FORMAT_ASCII );
- field_len = board->product_tl & FIELD_LENGTH_MASK;
- copy_ascii_field( cur_dst, board->product, field_len, UNDERSCORE );
- cur_dst += field_len;
-
- /* copy serial number */
- strcpy( cur_dst, ";Serial:" );
- cur_dst += strlen( cur_dst );
- ASSERT( (board->serial_num_tl & FIELD_FORMAT_MASK)
- == FIELD_FORMAT_ASCII );
- field_len = board->serial_num_tl & FIELD_LENGTH_MASK;
- copy_ascii_field( cur_dst, board->serial_num, field_len,
- HYPHEN);
-
- cur_dst += field_len;
-
- /* copy revision */
- strcpy( cur_dst, ";Revision:");
- cur_dst += strlen( cur_dst );
- ASSERT( (board->board_rev_tl & FIELD_FORMAT_MASK)
- == FIELD_FORMAT_ASCII );
- field_len = board->board_rev_tl & FIELD_LENGTH_MASK;
- copy_ascii_field( cur_dst, board->board_rev, field_len, HYPHEN );
- cur_dst += field_len;
-
- /* EEPROMs don't have equivalents for the Group, Capability and
- * Variety fields, so we pad these with 0's
- */
- strcpy( cur_dst, ";Group:ff;Capability:ffffffff;Variety:ff" );
- cur_dst += strlen( cur_dst );
-
- /* use the board serial number to "fake" a laser id */
- strcpy( cur_dst, ";Laser:" );
- cur_dst += strlen( cur_dst );
- unique_id = generate_unique_id( board->serial_num,
- board->serial_num_tl & FIELD_LENGTH_MASK );
- int64_to_hex_string( cur_dst, unique_id );
- strcat( dst, ";" );
-
- return 1;
-}
-
-
-/* These functions borrow heavily from chars2* in nic.c
- */
-void copy_ascii_field( char *to, char *from, int length,
- int change_underscore )
-{
- int i;
- for( i = 0; i < length; i++ ) {
-
- /* change underscores to hyphens if requested */
- if( from[i] == '_' && change_underscore == HYPHEN )
- to[i] = '-';
-
- /* ; and ; are separators, so mustn't appear within
- * a field */
- else if( from[i] == ':' || from[i] == ';' )
- to[i] = '?';
-
- /* I'm not sure why or if ASCII character 0xff would
- * show up in an EEPROM field, but the NIC parsing
- * routines wouldn't like it if it did... so we
- * get rid of it, just in case. */
- else if( (unsigned char)from[i] == (unsigned char)0xff )
- to[i] = ' ';
-
- /* unprintable characters are replaced with . */
- else if( from[i] < ' ' || from[i] >= 0x7f )
- to[i] = '.';
-
- /* otherwise, just copy the character */
- else
- to[i] = from[i];
- }
-
- if( i == 0 ) {
- to[i] = ' '; /* return at least a space... */
- i++;
- }
- to[i] = 0; /* terminating null */
-}
-
-/* Note that int64_to_hex_string currently only has a big-endian
- * implementation.
- */
-#ifdef _MIPSEB
-static void int64_to_hex_string( char *out, uint64_t val )
-{
- int i;
- uchar_t table[] = "0123456789abcdef";
- uchar_t *byte_ptr = (uchar_t *)&val;
- for( i = 0; i < sizeof(uint64_t); i++ ) {
- out[i*2] = table[ ((*byte_ptr) >> 4) & 0x0f ];
- out[i*2+1] = table[ (*byte_ptr) & 0x0f ];
- byte_ptr++;
- }
- out[i*2] = '\0';
-}
-
-#else /* little endian */
-
-static void int64_to_hex_string( char *out, uint64_t val )
-{
-
-
- printk("int64_to_hex_string needs a little-endian implementation.\n");
-}
-#endif /* _MIPSEB */
-
-/* Convert a standard ASCII serial number to a unique integer
- * id number by treating the serial number string as though
- * it were a base 36 number
- */
-uint64_t generate_unique_id( char *sn, int sn_len )
-{
- int uid = 0;
- int i;
-
- #define VALID_BASE36(c) ((c >= '0' && c <='9') \
- || (c >= 'A' && c <='Z') \
- || (c >= 'a' && c <='z'))
-
- for( i = 0; i < sn_len; i++ ) {
- if( !VALID_BASE36(sn[i]) )
- continue;
- uid *= 36;
- uid += char_to_base36( sn[i] );
- }
-
- if( uid == 0 )
- return rtc_time();
-
- return uid;
-}
-
-uchar_t char_to_base36( char c )
-{
- uchar_t val;
-
- if( c >= '0' && c <= '9' )
- val = (c - '0');
-
- else if( c >= 'A' && c <= 'Z' )
- val = (c - 'A' + 10);
-
- else if( c >= 'a' && c <= 'z' )
- val = (c - 'a' + 10);
-
- else val = 0;
-
- return val;
-}
-
-
-/* given a pointer to the three-byte little-endian EEPROM representation
- * of date-of-manufacture, this function translates to a big-endian
- * integer format
- */
-int eeprom_xlate_board_mfr_date( uchar_t *src )
-{
- int rval = 0;
- rval += *src; src++;
- rval += ((int)(*src) << 8); src ++;
- rval += ((int)(*src) << 16);
- return rval;
-}
-
-
-int eeprom_str( char *nic_str, nasid_t nasid, int component )
-{
- eeprom_brd_record_t eep;
- eeprom_board_ia_t board;
- eeprom_chassis_ia_t chassis;
- int r;
-
- if( (component & C_DIMM) == C_DIMM ) {
- /* this function isn't applicable to DIMMs */
- return EEP_PARAM;
- }
- else {
- eep.board_ia = &board;
- eep.spd = NULL;
- if( !(component & SUBORD_MASK) )
- eep.chassis_ia = &chassis; /* only main boards have a chassis
- * info area */
- else
- eep.chassis_ia = NULL;
- }
-
- switch( component & BRICK_MASK ) {
- case C_BRICK:
- r = cbrick_eeprom_read( &eep, nasid, component );
- break;
- case IO_BRICK:
- r = iobrick_eeprom_read( &eep, nasid, component );
- break;
- default:
- return EEP_PARAM; /* must be an invalid component */
- }
- if( r )
- return r;
- if( !nicify( nic_str, &eep ) )
- return EEP_NICIFY;
-
- return EEP_OK;
-}
-
-int vector_eeprom_str( char *nic_str, nasid_t nasid,
- int component, net_vec_t path )
-{
- eeprom_brd_record_t eep;
- eeprom_board_ia_t board;
- eeprom_chassis_ia_t chassis;
- int r;
-
- eep.board_ia = &board;
- if( !(component & SUBORD_MASK) )
- eep.chassis_ia = &chassis; /* only main boards have a chassis
- * info area */
- else
- eep.chassis_ia = NULL;
-
- if( !(component & VECTOR) )
- return EEP_PARAM;
-
- if( (r = vector_eeprom_read( &eep, nasid, path, component )) )
- return r;
-
- if( !nicify( nic_str, &eep ) )
- return EEP_NICIFY;
-
- return EEP_OK;
-}
-
-
-int is_iobrick( int nasid, int widget_num )
-{
- uint32_t wid_reg;
- int part_num, mfg_num;
-
- /* Read the widget's WIDGET_ID register to get
- * its part number and mfg number
- */
- wid_reg = *(volatile int32_t *)
- (NODE_SWIN_BASE( nasid, widget_num ) + WIDGET_ID);
-
- part_num = (wid_reg & WIDGET_PART_NUM) >> WIDGET_PART_NUM_SHFT;
- mfg_num = (wid_reg & WIDGET_MFG_NUM) >> WIDGET_MFG_NUM_SHFT;
-
- /* Is this the "xbow part" of an XBridge? If so, this
- * widget is definitely part of an I/O brick.
- */
- if( part_num == XXBOW_WIDGET_PART_NUM &&
- mfg_num == XXBOW_WIDGET_MFGR_NUM )
-
- return 1;
-
- /* Is this a "bridge part" of an XBridge? If so, once
- * again, we know this widget is part of an I/O brick.
- */
- if( part_num == XBRIDGE_WIDGET_PART_NUM &&
- mfg_num == XBRIDGE_WIDGET_MFGR_NUM )
-
- return 1;
-
- return 0;
-}
-
-
-int cbrick_uid_get( nasid_t nasid, uint64_t *uid )
-{
-#if !defined(CONFIG_SERIAL_SGI_L1_PROTOCOL)
- return EEP_L1;
-#else
- char uid_str[32];
- char msg[BRL1_QSIZE];
- int subch, len;
- l1sc_t sc;
- l1sc_t *scp;
- int local = (nasid == get_nasid());
-
- if ( IS_RUNNING_ON_SIMULATOR() )
- return EEP_L1;
-
- /* If the promlog variable pointed to by IP27LOG_OVNIC is set,
- * use that value for the cbrick UID rather than the EEPROM
- * serial number.
- */
-#ifdef LOG_GETENV
- if( ip27log_getenv( nasid, IP27LOG_OVNIC, uid_str, NULL, 0 ) >= 0 )
- {
- /* We successfully read IP27LOG_OVNIC, so return it as the UID. */
- db_printf(( "cbrick_uid_get:"
- "Overriding UID with environment variable %s\n",
- IP27LOG_OVNIC ));
- *uid = strtoull( uid_str, NULL, 0 );
- return EEP_OK;
- }
-#endif
-
- /* If this brick is retrieving its own uid, use the local l1sc_t to
- * arbitrate access to the l1; otherwise, set up a new one.
- */
- if( local ) {
- scp = get_l1sc();
- }
- else {
- scp = &sc;
- sc_init( &sc, nasid, BRL1_LOCALHUB_UART );
- }
-
- /* fill in msg with the opcode & params */
- BZERO( msg, BRL1_QSIZE );
- if( (subch = sc_open( scp, L1_ADDR_LOCAL )) < 0 )
- return EEP_L1;
-
- if( (len = sc_construct_msg( scp, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- L1_REQ_SER_NUM, 0 )) < 0 )
- {
- sc_close( scp, subch );
- return( EEP_L1 );
- }
-
- /* send the request to the L1 */
- if( sc_command( scp, subch, msg, msg, &len ) ) {
- sc_close( scp, subch );
- return( EEP_L1 );
- }
-
- /* free up subchannel */
- sc_close(scp, subch);
-
- /* check response */
- if( sc_interpret_resp( msg, 2, L1_ARG_ASCII, uid_str ) < 0 )
- {
- return( EEP_L1 );
- }
-
- *uid = generate_unique_id( uid_str, strlen( uid_str ) );
-
- return EEP_OK;
-#endif /* CONFIG_SERIAL_SGI_L1_PROTOCOL */
-}
-
-
-int rbrick_uid_get( nasid_t nasid, net_vec_t path, uint64_t *uid )
-{
-#if !defined(CONFIG_SERIAL_SGI_L1_PROTOCOL)
- return EEP_L1;
-#else
- char uid_str[32];
- char msg[BRL1_QSIZE];
- int subch, len;
- l1sc_t sc;
-
- if ( IS_RUNNING_ON_SIMULATOR() )
- return EEP_L1;
-
-#define FAIL \
- { \
- *uid = rtc_time(); \
- printk( "rbrick_uid_get failed; using current time as uid\n" ); \
- return EEP_OK; \
- }
-
- ROUTER_LOCK(path);
- sc_init( &sc, nasid, path );
-
- /* fill in msg with the opcode & params */
- BZERO( msg, BRL1_QSIZE );
- if( (subch = sc_open( &sc, L1_ADDR_LOCAL )) < 0 ) {
- ROUTER_UNLOCK(path);
- FAIL;
- }
-
- if( (len = sc_construct_msg( &sc, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- L1_REQ_SER_NUM, 0 )) < 0 )
- {
- ROUTER_UNLOCK(path);
- sc_close( &sc, subch );
- FAIL;
- }
-
- /* send the request to the L1 */
- if( sc_command( &sc, subch, msg, msg, &len ) ) {
- ROUTER_UNLOCK(path);
- sc_close( &sc, subch );
- FAIL;
- }
-
- /* free up subchannel */
- ROUTER_UNLOCK(path);
- sc_close(&sc, subch);
-
- /* check response */
- if( sc_interpret_resp( msg, 2, L1_ARG_ASCII, uid_str ) < 0 )
- {
- FAIL;
- }
-
- *uid = generate_unique_id( uid_str, strlen( uid_str ) );
-
- return EEP_OK;
-#endif /* CONFIG_SERIAL_SGI_L1_PROTOCOL */
-}
-
-int iobrick_uid_get( nasid_t nasid, uint64_t *uid )
-{
- eeprom_brd_record_t eep;
- eeprom_board_ia_t board;
- eeprom_chassis_ia_t chassis;
- int r;
-
- eep.board_ia = &board;
- eep.chassis_ia = &chassis;
- eep.spd = NULL;
-
- r = iobrick_eeprom_read( &eep, nasid, IO_BRICK );
- if( r != EEP_OK ) {
- *uid = rtc_time();
- return r;
- }
-
- *uid = generate_unique_id( board.serial_num,
- board.serial_num_tl & FIELD_LENGTH_MASK );
-
- return EEP_OK;
-}
-
-
-int ibrick_mac_addr_get( nasid_t nasid, char *eaddr )
-{
- eeprom_brd_record_t eep;
- eeprom_board_ia_t board;
- eeprom_chassis_ia_t chassis;
- int r;
- char *tmp;
-
- eep.board_ia = &board;
- eep.chassis_ia = &chassis;
- eep.spd = NULL;
-
- r = iobrick_eeprom_read( &eep, nasid, IO_BRICK );
- if( (r != EEP_OK) || (board.mac_addr[0] == '\0') ) {
- db_printf(( "ibrick_mac_addr_get: "
- "Couldn't read MAC address from EEPROM\n" ));
- return EEP_L1;
- }
- else {
- /* successfully read info area */
- int ix;
- tmp = board.mac_addr;
- for( ix = 0; ix < (board.mac_addr_tl & FIELD_LENGTH_MASK); ix++ )
- {
- *eaddr++ = *tmp++;
- }
- *eaddr = '\0';
- }
-
- return EEP_OK;
-}
-
-
-/*
- * eeprom_vertex_info_set
- *
- * Given a vertex handle, a component designation, a starting nasid
- * and (in the case of a router) a vector path to the component, this
- * function will read the EEPROM and attach the resulting information
- * to the vertex in the same string format as that provided by the
- * Dallas Semiconductor NIC drivers. If the vertex already has the
- * string, this function just returns the string.
- */
-
-extern char *nic_vertex_info_get( devfs_handle_t );
-extern void nic_vmc_check( devfs_handle_t, char * );
-/* the following were lifted from nic.c - change later? */
-#define MAX_INFO 2048
-#define NEWSZ(ptr,sz) ((ptr) = kern_malloc((sz)))
-#define DEL(ptr) (kern_free((ptr)))
-
-char *eeprom_vertex_info_set( int component, int nasid, devfs_handle_t v,
- net_vec_t path )
-{
- char *info_tmp;
- int info_len;
- char *info;
-
- /* see if this vertex is already marked */
- info_tmp = nic_vertex_info_get(v);
- if (info_tmp) return info_tmp;
-
- /* get a temporary place for the data */
- NEWSZ(info_tmp, MAX_INFO);
- if (!info_tmp) return NULL;
-
- /* read the EEPROM */
- if( component & R_BRICK ) {
- if( RBRICK_EEPROM_STR( info_tmp, nasid, path ) != EEP_OK )
- return NULL;
- }
- else {
- if( eeprom_str( info_tmp, nasid, component ) != EEP_OK )
- return NULL;
- }
-
- /* allocate a smaller final place */
- info_len = strlen(info_tmp)+1;
- NEWSZ(info, info_len);
- if (info) {
- strcpy(info, info_tmp);
- DEL(info_tmp);
- } else {
- info = info_tmp;
- }
-
- /* add info to the vertex */
- hwgraph_info_add_LBL(v, INFO_LBL_NIC,
- (arbitrary_info_t) info);
-
- /* see if someone else got there first */
- info_tmp = nic_vertex_info_get(v);
- if (info != info_tmp) {
- DEL(info);
- return info_tmp;
- }
-
- /* export the data */
- hwgraph_info_export_LBL(v, INFO_LBL_NIC, info_len);
-
- /* trigger all matching callbacks */
- nic_vmc_check(v, info);
-
- return info;
-}
-
-
-/*********************************************************************
- *
- * stubs for use until the Bedrock/L1 link is available
- *
- */
-
-#include <asm/sn/nic.h>
-
-/* #define EEPROM_TEST */
-
-/* fake eeprom reading functions (replace when the BR/L1 communication
- * channel is in working order)
- */
-
-
-/* generate a charater in [0-9A-Z]; if an "extra" character is
- * specified (such as '_'), include it as one of the possibilities.
- */
-char random_eeprom_ch( char extra )
-{
- char ch;
- int modval = 36;
- if( extra )
- modval++;
-
- ch = rtc_time() % modval;
-
- if( ch < 10 )
- ch += '0';
- else if( ch >= 10 && ch < 36 )
- ch += ('A' - 10);
- else
- ch = extra;
-
- return ch;
-}
-
-/* create a part number of the form xxx-xxxx-xxx.
- * It may be important later to generate different
- * part numbers depending on the component we're
- * supposed to be "reading" from, so the component
- * paramter is provided.
- */
-void fake_a_part_number( char *buf, int component )
-{
- int i;
- switch( component ) {
-
- /* insert component-specific routines here */
-
- case C_BRICK:
- strcpy( buf, "030-1266-001" );
- break;
- default:
- for( i = 0; i < 12; i++ ) {
- if( i == 3 || i == 8 )
- buf[i] = '-';
- else
- buf[i] = random_eeprom_ch(0);
- }
- }
-}
-
-
-/* create a six-character serial number */
-void fake_a_serial_number( char *buf, uint64_t ser )
-{
- int i;
- static const char hexchars[] = "0123456789ABCDEF";
-
- if (ser) {
- for( i = 5; i >=0; i-- ) {
- buf[i] = hexchars[ser & 0xf];
- ser >>= 4;
- }
- }
- else {
- for( i = 0; i < 6; i++ )
- buf[i] = random_eeprom_ch(0);
- }
-}
-
-
-void fake_a_product_name( uchar_t *format, char* buf, int component )
-{
- switch( component & BRICK_MASK ) {
-
- case C_BRICK:
- if( component & SUBORD_MASK ) {
- strcpy( buf, "C_BRICK_SUB" );
- *format = 0xCB;
- }
- else {
- strcpy( buf, "IP35" );
- *format = 0xC4;
- }
- break;
-
- case R_BRICK:
- if( component & SUBORD_MASK ) {
- strcpy( buf, "R_BRICK_SUB" );
- *format = 0xCB;
- }
- else {
- strcpy( buf, "R_BRICK" );
- *format = 0xC7;
- }
- break;
-
- case IO_BRICK:
- if( component & SUBORD_MASK ) {
- strcpy( buf, "IO_BRICK_SUB" );
- *format = 0xCC;
- }
- else {
- strcpy( buf, "IO_BRICK" );
- *format = 0xC8;
- }
- break;
-
- default:
- strcpy( buf, "UNK_DEVICE" );
- *format = 0xCA;
- }
-}
-
-
-
-int fake_an_eeprom_record( eeprom_brd_record_t *buf, int component,
- uint64_t ser )
-{
- eeprom_board_ia_t *board;
- eeprom_chassis_ia_t *chassis;
- int i, cs;
-
- board = buf->board_ia;
- chassis = buf->chassis_ia;
-
- if( !(component & SUBORD_MASK) ) {
- if( !chassis )
- return EEP_PARAM;
- chassis->format = 0;
- chassis->length = 5;
- chassis->type = 0x17;
-
- chassis->part_num_tl = 0xCC;
- fake_a_part_number( chassis->part_num, component );
- chassis->serial_num_tl = 0xC6;
- fake_a_serial_number( chassis->serial_num, ser );
-
- cs = chassis->format + chassis->length + chassis->type
- + chassis->part_num_tl + chassis->serial_num_tl;
- for( i = 0; i < (chassis->part_num_tl & FIELD_LENGTH_MASK); i++ )
- cs += chassis->part_num[i];
- for( i = 0; i < (chassis->serial_num_tl & FIELD_LENGTH_MASK); i++ )
- cs += chassis->serial_num[i];
- chassis->checksum = 256 - (cs % 256);
- }
-
- if( !board )
- return EEP_PARAM;
- board->format = 0;
- board->length = 10;
- board->language = 0;
- board->mfg_date = 1789200; /* noon, 5/26/99 */
- board->manuf_tl = 0xC3;
- strcpy( board->manuf, "SGI" );
-
- fake_a_product_name( &(board->product_tl), board->product, component );
-
- board->serial_num_tl = 0xC6;
- fake_a_serial_number( board->serial_num, ser );
-
- board->part_num_tl = 0xCC;
- fake_a_part_number( board->part_num, component );
-
- board->board_rev_tl = 0xC2;
- board->board_rev[0] = '0';
- board->board_rev[1] = '1';
-
- board->eeprom_size_tl = 0x01;
- board->eeprom_size = 1;
-
- board->temp_waiver_tl = 0xC2;
- board->temp_waiver[0] = '0';
- board->temp_waiver[1] = '1';
-
- cs = board->format + board->length + board->language
- + (board->mfg_date & 0xFF)
- + (board->mfg_date & 0xFF00)
- + (board->mfg_date & 0xFF0000)
- + board->manuf_tl + board->product_tl + board->serial_num_tl
- + board->part_num_tl + board->board_rev_tl
- + board->board_rev[0] + board->board_rev[1]
- + board->eeprom_size_tl + board->eeprom_size + board->temp_waiver_tl
- + board->temp_waiver[0] + board->temp_waiver[1];
- for( i = 0; i < (board->manuf_tl & FIELD_LENGTH_MASK); i++ )
- cs += board->manuf[i];
- for( i = 0; i < (board->product_tl & FIELD_LENGTH_MASK); i++ )
- cs += board->product[i];
- for( i = 0; i < (board->serial_num_tl & FIELD_LENGTH_MASK); i++ )
- cs += board->serial_num[i];
- for( i = 0; i < (board->part_num_tl & FIELD_LENGTH_MASK); i++ )
- cs += board->part_num[i];
-
- board->checksum = 256 - (cs % 256);
-
- return EEP_OK;
-}
-
-#define EEPROM_CHUNKSIZE 64
-
-#if defined(EEPROM_DEBUG)
-#define RETURN_ERROR \
-{ \
- printk( "read_ia error return, component 0x%x, line %d" \
- ", address 0x%x, ia code 0x%x\n", \
- l1_compt, __LINE__, sc->subch[subch].target, ia_code ); \
- return EEP_L1; \
-}
-
-#else
-#define RETURN_ERROR return(EEP_L1)
-#endif
-
-int read_ia( l1sc_t *sc, int subch, int l1_compt,
- int ia_code, char *eep_record )
-{
-#if !defined(CONFIG_SERIAL_SGI_L1_PROTOCOL)
- return EEP_L1;
-#else
- char msg[BRL1_QSIZE]; /* message buffer */
- int len; /* number of bytes used in message buffer */
- int ia_len = EEPROM_CHUNKSIZE; /* remaining bytes in info area */
- int offset = 0; /* current offset into info area */
-
- if ( IS_RUNNING_ON_SIMULATOR() )
- return EEP_L1;
-
- BZERO( msg, BRL1_QSIZE );
-
- /* retrieve EEPROM data in 64-byte chunks
- */
-
- while( ia_len )
- {
- /* fill in msg with opcode & params */
- if( (len = sc_construct_msg( sc, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- L1_REQ_EEPROM, 8,
- L1_ARG_INT, l1_compt,
- L1_ARG_INT, ia_code,
- L1_ARG_INT, offset,
- L1_ARG_INT, ia_len )) < 0 )
- {
- RETURN_ERROR;
- }
-
- /* send the request to the L1 */
-
- if( sc_command( sc, subch, msg, msg, &len ) ) {
- RETURN_ERROR;
- }
-
- /* check response */
- if( sc_interpret_resp( msg, 5,
- L1_ARG_INT, &ia_len,
- L1_ARG_UNKNOWN, &len, eep_record ) < 0 )
- {
- RETURN_ERROR;
- }
-
- if( ia_len > EEPROM_CHUNKSIZE )
- ia_len = EEPROM_CHUNKSIZE;
-
- eep_record += EEPROM_CHUNKSIZE;
- offset += EEPROM_CHUNKSIZE;
- }
-
- return EEP_OK;
-#endif /* CONFIG_SERIAL_SGI_L1_PROTOCOL */
-}
-
-
-int read_spd( l1sc_t *sc, int subch, int l1_compt,
- eeprom_spd_u *spd )
-{
-#if !defined(CONFIG_SERIAL_SGI_L1_PROTOCOL)
- return EEP_L1;
-#else
- char msg[BRL1_QSIZE]; /* message buffer */
- int len; /* number of bytes used in message buffer */
- int resp; /* l1 response code */
- int spd_len = EEPROM_CHUNKSIZE; /* remaining bytes in spd record */
- int offset = 0; /* current offset into spd record */
- char *spd_p = spd->bytes; /* "thumb" for writing to spd */
-
- if ( IS_RUNNING_ON_SIMULATOR() )
- return EEP_L1;
-
- BZERO( msg, BRL1_QSIZE );
-
- /* retrieve EEPROM data in 64-byte chunks
- */
-
- while( spd_len )
- {
- /* fill in msg with opcode & params */
- if( (len = sc_construct_msg( sc, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- L1_REQ_EEPROM, 8,
- L1_ARG_INT, l1_compt,
- L1_ARG_INT, L1_EEP_SPD,
- L1_ARG_INT, offset,
- L1_ARG_INT, spd_len )) < 0 )
- {
- return( EEP_L1 );
- }
-
- /* send the request to the L1 */
- if( sc_command( sc, subch, msg, msg, &len ) ) {
- return( EEP_L1 );
- }
-
- /* check response */
- if( (resp = sc_interpret_resp( msg, 5,
- L1_ARG_INT, &spd_len,
- L1_ARG_UNKNOWN, &len, spd_p )) < 0 )
- {
- /*
- * translate l1 response code to eeprom.c error codes:
- * The L1 response will be L1_RESP_NAVAIL if the spd
- * can't be read (i.e. the spd isn't physically there). It will
- * return L1_RESP_INVAL if the spd exists, but fails the checksum
- * test because the eeprom wasn't programmed, programmed incorrectly,
- * or corrupted. L1_RESP_NAVAIL indicates the eeprom is likely not present,
- * whereas L1_RESP_INVAL indicates the eeprom is present, but the data is
- * invalid.
- */
- if(resp == L1_RESP_INVAL) {
- resp = EEP_BAD_CHECKSUM;
- } else {
- resp = EEP_L1;
- }
- return( resp );
- }
-
- if( spd_len > EEPROM_CHUNKSIZE )
- spd_len = EEPROM_CHUNKSIZE;
-
- spd_p += EEPROM_CHUNKSIZE;
- offset += EEPROM_CHUNKSIZE;
- }
- return EEP_OK;
-#endif /* CONFIG_SERIAL_SGI_L1_PROTOCOL */
-}
-
-
-int read_chassis_ia( l1sc_t *sc, int subch, int l1_compt,
- eeprom_chassis_ia_t *ia )
-{
- char eep_record[512]; /* scratch area for building up info area */
- char *eep_rec_p = eep_record; /* thumb for moving through eep_record */
- int checksum = 0; /* use to verify eeprom record checksum */
- int i;
-
- /* Read in info area record from the L1.
- */
- if( read_ia( sc, subch, l1_compt, L1_EEP_CHASSIS, eep_record )
- != EEP_OK )
- {
- return EEP_L1;
- }
-
- /* Now we've got the whole info area. Transfer it to the data structure.
- */
-
- eep_rec_p = eep_record;
- ia->format = *eep_rec_p++;
- ia->length = *eep_rec_p++;
- if( ia->length == 0 ) {
- /* since we're using 8*ia->length-1 as an array index later, make
- * sure it's sane.
- */
- db_printf(( "read_chassis_ia: eeprom length byte of ZERO\n" ));
- return EEP_L1;
- }
- ia->type = *eep_rec_p++;
-
- ia->part_num_tl = *eep_rec_p++;
-
- (void)BCOPY( eep_rec_p, ia->part_num, (ia->part_num_tl & FIELD_LENGTH_MASK) );
- eep_rec_p += (ia->part_num_tl & FIELD_LENGTH_MASK);
-
- ia->serial_num_tl = *eep_rec_p++;
-
- BCOPY( eep_rec_p, ia->serial_num,
- (ia->serial_num_tl & FIELD_LENGTH_MASK) );
- eep_rec_p += (ia->serial_num_tl & FIELD_LENGTH_MASK);
-
- ia->checksum = eep_record[(8 * ia->length) - 1];
-
- /* verify checksum */
- eep_rec_p = eep_record;
- checksum = 0;
- for( i = 0; i < (8 * ia->length); i++ ) {
- checksum += *eep_rec_p++;
- }
-
- if( (checksum & 0xff) != 0 )
- {
- db_printf(( "read_chassis_ia: bad checksum\n" ));
- db_printf(( "read_chassis_ia: target 0x%x uart 0x%lx\n",
- sc->subch[subch].target, sc->uart ));
- return EEP_BAD_CHECKSUM;
- }
-
- return EEP_OK;
-}
-
-
-int read_board_ia( l1sc_t *sc, int subch, int l1_compt,
- eeprom_board_ia_t *ia )
-{
- char eep_record[512]; /* scratch area for building up info area */
- char *eep_rec_p = eep_record; /* thumb for moving through eep_record */
- int checksum = 0; /* running checksum total */
- int i;
-
- BZERO( ia, sizeof( eeprom_board_ia_t ) );
-
- /* Read in info area record from the L1.
- */
- if( read_ia( sc, subch, l1_compt, L1_EEP_BOARD, eep_record )
- != EEP_OK )
- {
- db_printf(( "read_board_ia: error reading info area from L1\n" ));
- return EEP_L1;
- }
-
- /* Now we've got the whole info area. Transfer it to the data structure.
- */
-
- eep_rec_p = eep_record;
- ia->format = *eep_rec_p++;
- ia->length = *eep_rec_p++;
- if( ia->length == 0 ) {
- /* since we're using 8*ia->length-1 as an array index later, make
- * sure it's sane.
- */
- db_printf(( "read_board_ia: eeprom length byte of ZERO\n" ));
- return EEP_L1;
- }
- ia->language = *eep_rec_p++;
-
- ia->mfg_date = eeprom_xlate_board_mfr_date( (uchar_t *)eep_rec_p );
- eep_rec_p += 3;
-
- ia->manuf_tl = *eep_rec_p++;
-
- BCOPY( eep_rec_p, ia->manuf, (ia->manuf_tl & FIELD_LENGTH_MASK) );
- eep_rec_p += (ia->manuf_tl & FIELD_LENGTH_MASK);
-
- ia->product_tl = *eep_rec_p++;
-
- BCOPY( eep_rec_p, ia->product, (ia->product_tl & FIELD_LENGTH_MASK) );
- eep_rec_p += (ia->product_tl & FIELD_LENGTH_MASK);
-
- ia->serial_num_tl = *eep_rec_p++;
-
- BCOPY(eep_rec_p, ia->serial_num, (ia->serial_num_tl & FIELD_LENGTH_MASK));
- eep_rec_p += (ia->serial_num_tl & FIELD_LENGTH_MASK);
-
- ia->part_num_tl = *eep_rec_p++;
-
- BCOPY( eep_rec_p, ia->part_num, (ia->part_num_tl & FIELD_LENGTH_MASK) );
- eep_rec_p += (ia->part_num_tl & FIELD_LENGTH_MASK);
-
- eep_rec_p++; /* we do not use the FRU file id */
-
- ia->board_rev_tl = *eep_rec_p++;
-
- BCOPY( eep_rec_p, ia->board_rev, (ia->board_rev_tl & FIELD_LENGTH_MASK) );
- eep_rec_p += (ia->board_rev_tl & FIELD_LENGTH_MASK);
-
- ia->eeprom_size_tl = *eep_rec_p++;
- ia->eeprom_size = *eep_rec_p++;
-
- ia->temp_waiver_tl = *eep_rec_p++;
-
- BCOPY( eep_rec_p, ia->temp_waiver,
- (ia->temp_waiver_tl & FIELD_LENGTH_MASK) );
- eep_rec_p += (ia->temp_waiver_tl & FIELD_LENGTH_MASK);
-
- /* if there's more, we must be reading a main board; get
- * additional fields
- */
- if( ((unsigned char)*eep_rec_p != (unsigned char)EEPROM_EOF) ) {
-
- ia->ekey_G_tl = *eep_rec_p++;
- BCOPY( eep_rec_p, (char *)&ia->ekey_G,
- ia->ekey_G_tl & FIELD_LENGTH_MASK );
- eep_rec_p += (ia->ekey_G_tl & FIELD_LENGTH_MASK);
-
- ia->ekey_P_tl = *eep_rec_p++;
- BCOPY( eep_rec_p, (char *)&ia->ekey_P,
- ia->ekey_P_tl & FIELD_LENGTH_MASK );
- eep_rec_p += (ia->ekey_P_tl & FIELD_LENGTH_MASK);
-
- ia->ekey_Y_tl = *eep_rec_p++;
- BCOPY( eep_rec_p, (char *)&ia->ekey_Y,
- ia->ekey_Y_tl & FIELD_LENGTH_MASK );
- eep_rec_p += (ia->ekey_Y_tl & FIELD_LENGTH_MASK);
-
- /*
- * need to get a couple more fields if this is an I brick
- */
- if( ((unsigned char)*eep_rec_p != (unsigned char)EEPROM_EOF) ) {
-
- ia->mac_addr_tl = *eep_rec_p++;
- BCOPY( eep_rec_p, ia->mac_addr,
- ia->mac_addr_tl & FIELD_LENGTH_MASK );
- eep_rec_p += (ia->mac_addr_tl & FIELD_LENGTH_MASK);
-
- ia->ieee1394_cfg_tl = *eep_rec_p++;
- BCOPY( eep_rec_p, ia->ieee1394_cfg,
- ia->ieee1394_cfg_tl & FIELD_LENGTH_MASK );
-
- }
- }
-
- ia->checksum = eep_record[(ia->length * 8) - 1];
-
- /* verify checksum */
- eep_rec_p = eep_record;
- checksum = 0;
- for( i = 0; i < (8 * ia->length); i++ ) {
- checksum += *eep_rec_p++;
- }
-
- if( (checksum & 0xff) != 0 )
- {
- db_printf(( "read_board_ia: bad checksum\n" ));
- db_printf(( "read_board_ia: target 0x%x uart 0x%lx\n",
- sc->subch[subch].target, sc->uart ));
- return EEP_BAD_CHECKSUM;
- }
-
- return EEP_OK;
-}
-
-
-int _cbrick_eeprom_read( eeprom_brd_record_t *buf, l1sc_t *scp,
- int component )
-{
-#if !defined(CONFIG_SERIAL_SGI_L1_PROTOCOL)
- return EEP_L1;
-#else
- int r;
- uint64_t uid = 0;
-#ifdef LOG_GETENV
- char uid_str[32];
-#endif
- int l1_compt, subch;
-
- if ( IS_RUNNING_ON_SIMULATOR() )
- return EEP_L1;
-
- /* make sure we're targeting a cbrick */
- if( !(component & C_BRICK) )
- return EEP_PARAM;
-
- /* If the promlog variable pointed to by IP27LOG_OVNIC is set,
- * use that value for the cbrick UID rather than the EEPROM
- * serial number.
- */
-#ifdef LOG_GETENV
- if( ip27log_getenv( scp->nasid, IP27LOG_OVNIC, uid_str, "0", 0 ) >= 0 )
- {
- db_printf(( "_cbrick_eeprom_read: "
- "Overriding UID with environment variable %s\n",
- IP27LOG_OVNIC ));
- uid = strtoull( uid_str, NULL, 0 );
- }
-#endif
-
- if( (subch = sc_open( scp, L1_ADDR_LOCAL )) < 0 )
- return EEP_L1;
-
- if((component & C_DIMM) == C_DIMM) {
- l1_compt = L1_EEP_DIMM(component & COMPT_MASK);
- r = read_spd(scp,subch,l1_compt, buf->spd);
- sc_close(scp,subch);
- return(r);
- }
-
- switch( component )
- {
- case C_BRICK:
- /* c-brick motherboard */
- l1_compt = L1_EEP_NODE;
- r = read_chassis_ia( scp, subch, l1_compt, buf->chassis_ia );
- if( r != EEP_OK ) {
- sc_close( scp, subch );
- db_printf(( "_cbrick_eeprom_read: using a fake eeprom record\n" ));
- return fake_an_eeprom_record( buf, component, uid );
- }
- if( uid ) {
- /* If IP27LOG_OVNIC is set, we want to put that value
- * in as our UID. */
- fake_a_serial_number( buf->chassis_ia->serial_num, uid );
- buf->chassis_ia->serial_num_tl = 6;
- }
- break;
-
- case C_PIMM:
- /* one of the PIMM boards */
- l1_compt = L1_EEP_PIMM( component & COMPT_MASK );
- break;
-
- default:
- /* unsupported board type */
- sc_close( scp, subch );
- return EEP_PARAM;
- }
-
- r = read_board_ia( scp, subch, l1_compt, buf->board_ia );
- sc_close( scp, subch );
- if( r != EEP_OK )
- {
- db_printf(( "_cbrick_eeprom_read: using a fake eeprom record\n" ));
- return fake_an_eeprom_record( buf, component, uid );
- }
- return EEP_OK;
-#endif /* CONFIG_SERIAL_SGI_L1_PROTOCOL */
-}
-
-
-int cbrick_eeprom_read( eeprom_brd_record_t *buf, nasid_t nasid,
- int component )
-{
-#if !defined(CONFIG_SERIAL_SGI_L1_PROTOCOL)
- return EEP_L1;
-#else
- l1sc_t *scp;
- int local = (nasid == get_nasid());
-
- if ( IS_RUNNING_ON_SIMULATOR() )
- return EEP_L1;
-
- /* If this brick is retrieving its own uid, use the local l1sc_t to
- * arbitrate access to the l1; otherwise, set up a new one (prom) or
- * use an existing remote l1sc_t (kernel)
- */
- if( local ) {
- scp = get_l1sc();
- }
- else {
- scp = &NODEPDA( NASID_TO_COMPACT_NODEID(nasid) )->module->elsc;
- }
-
- return _cbrick_eeprom_read( buf, scp, component );
-#endif /* CONFIG_SERIAL_SGI_L1_PROTOCOL */
-}
-
-
-int iobrick_eeprom_read( eeprom_brd_record_t *buf, nasid_t nasid,
- int component )
-{
-#if !defined(CONFIG_SERIAL_SGI_L1_PROTOCOL)
- return EEP_L1;
-#else
- int r;
- int l1_compt, subch;
- l1sc_t *scp;
- int local = (nasid == get_nasid());
-
- if ( IS_RUNNING_ON_SIMULATOR() )
- return EEP_L1;
-
- /* make sure we're talking to an applicable brick */
- if( !(component & IO_BRICK) ) {
- return EEP_PARAM;
- }
-
- /* If we're talking to this c-brick's attached io brick, use
- * the local l1sc_t; otherwise, set up a new one (prom) or
- * use an existing remote l1sc_t (kernel)
- */
- if( local ) {
- scp = get_l1sc();
- }
- else {
- scp = &NODEPDA( NASID_TO_COMPACT_NODEID(nasid) )->module->elsc;
- }
-
- if( (subch = sc_open( scp, L1_ADDR_LOCALIO )) < 0 )
- return EEP_L1;
-
-
- switch( component )
- {
- case IO_BRICK:
- /* IO brick motherboard */
- l1_compt = L1_EEP_LOGIC;
- r = read_chassis_ia( scp, subch, l1_compt, buf->chassis_ia );
-
- if( r != EEP_OK ) {
- sc_close( scp, subch );
- /*
- * Whenever we no longer need to test on hardware
- * that does not have EEPROMS, then this can be removed.
- */
- r = fake_an_eeprom_record( buf, component, rtc_time() );
- return r;
- }
- break;
-
- case IO_POWER:
- /* IO brick power board */
- l1_compt = L1_EEP_POWER;
- break;
-
- default:
- /* unsupported board type */
- sc_close( scp, subch );
- return EEP_PARAM;
- }
-
- r = read_board_ia( scp, subch, l1_compt, buf->board_ia );
- sc_close( scp, subch );
- if( r != EEP_OK ) {
- return r;
- }
- return EEP_OK;
-#endif /* CONFIG_SERIAL_SGI_L1_PROTOCOL */
-}
-
-
-int vector_eeprom_read( eeprom_brd_record_t *buf, nasid_t nasid,
- net_vec_t path, int component )
-{
-#if !defined(CONFIG_SERIAL_SGI_L1_PROTOCOL)
- return EEP_L1;
-#else
- int r;
- uint64_t uid = 0;
- int l1_compt, subch;
- l1sc_t sc;
-
- if ( IS_RUNNING_ON_SIMULATOR() )
- return EEP_L1;
-
- /* make sure we're targeting an applicable brick */
- if( !(component & VECTOR) )
- return EEP_PARAM;
-
- switch( component & BRICK_MASK )
- {
- case R_BRICK:
- ROUTER_LOCK( path );
- sc_init( &sc, nasid, path );
-
- if( (subch = sc_open( &sc, L1_ADDR_LOCAL )) < 0 )
- {
- db_printf(( "vector_eeprom_read: couldn't open subch\n" ));
- ROUTER_UNLOCK(path);
- return EEP_L1;
- }
- switch( component )
- {
- case R_BRICK:
- /* r-brick motherboard */
- l1_compt = L1_EEP_LOGIC;
- r = read_chassis_ia( &sc, subch, l1_compt, buf->chassis_ia );
- if( r != EEP_OK ) {
- sc_close( &sc, subch );
- ROUTER_UNLOCK( path );
- printk( "vector_eeprom_read: couldn't get rbrick eeprom info;"
- " using current time as uid\n" );
- uid = rtc_time();
- db_printf(("vector_eeprom_read: using a fake eeprom record\n"));
- return fake_an_eeprom_record( buf, component, uid );
- }
- break;
-
- case R_POWER:
- /* r-brick power board */
- l1_compt = L1_EEP_POWER;
- break;
-
- default:
- /* unsupported board type */
- sc_close( &sc, subch );
- ROUTER_UNLOCK( path );
- return EEP_PARAM;
- }
- r = read_board_ia( &sc, subch, l1_compt, buf->board_ia );
- sc_close( &sc, subch );
- ROUTER_UNLOCK( path );
- if( r != EEP_OK ) {
- db_printf(( "vector_eeprom_read: using a fake eeprom record\n" ));
- return fake_an_eeprom_record( buf, component, uid );
- }
- return EEP_OK;
-
- case C_BRICK:
- sc_init( &sc, nasid, path );
- return _cbrick_eeprom_read( buf, &sc, component );
-
- default:
- /* unsupported brick type */
- return EEP_PARAM;
- }
-#endif /* CONFIG_SERIAL_SGI_L1_PROTOCOL */
-}
diff --git a/arch/ia64/sn/io/efi-rtc.c b/arch/ia64/sn/io/efi-rtc.c
deleted file mode 100644
index b0e12fe47dd4be..00000000000000
--- a/arch/ia64/sn/io/efi-rtc.c
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 Silicon Graphics, Inc.
- * Copyright (C) 2001 by Ralf Baechle
- */
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/errno.h>
-#include <linux/efi.h>
-#include <asm/sn/klclock.h>
-
-/*
- * No locking necessary when this is called from efirtc which protects us
- * from racing by efi_rtc_lock.
- */
-#define __swizzle(addr) ((u8 *)((unsigned long)(addr) ^ 3))
-#define read_io_port(addr) (*(volatile u8 *) __swizzle(addr))
-#define write_io_port(addr, data) (*(volatile u8 *) __swizzle(addr) = (data))
-
-#define TOD_SGS_M48T35 1
-#define TOD_DALLAS_DS1386 2
-
-static unsigned long nvram_base = 0;
-static int tod_chip_type;
-
-static int
-get_tod_chip_type(void)
-{
- unsigned char testval;
-
- write_io_port(RTC_DAL_CONTROL_ADDR, RTC_DAL_UPDATE_DISABLE);
- write_io_port(RTC_DAL_DAY_ADDR, 0xff);
- write_io_port(RTC_DAL_CONTROL_ADDR, RTC_DAL_UPDATE_ENABLE);
-
- testval = read_io_port(RTC_DAL_DAY_ADDR);
- if (testval == 0xff)
- return TOD_SGS_M48T35;
-
- return TOD_DALLAS_DS1386;
-}
-
-efi_status_t
-ioc3_get_time(efi_time_t *time, efi_time_cap_t *caps)
-{
- if (!nvram_base) {
- printk(KERN_CRIT "nvram_base is zero\n");
- return EFI_UNSUPPORTED;
- }
-
- memset(time, 0, sizeof(*time));
-
- switch (tod_chip_type) {
- case TOD_SGS_M48T35:
- write_io_port(RTC_SGS_CONTROL_ADDR, RTC_SGS_READ_PROTECT);
-
- time->year = BCD_TO_INT(read_io_port(RTC_SGS_YEAR_ADDR)) + YRREF;
- time->month = BCD_TO_INT(read_io_port(RTC_SGS_MONTH_ADDR));
- time->day = BCD_TO_INT(read_io_port(RTC_SGS_DATE_ADDR));
- time->hour = BCD_TO_INT(read_io_port(RTC_SGS_HOUR_ADDR));
- time->minute = BCD_TO_INT(read_io_port(RTC_SGS_MIN_ADDR));
- time->second = BCD_TO_INT(read_io_port(RTC_SGS_SEC_ADDR));
- time->nanosecond = 0;
-
- write_io_port(RTC_SGS_CONTROL_ADDR, 0);
- break;
-
- case TOD_DALLAS_DS1386:
- write_io_port(RTC_DAL_CONTROL_ADDR, RTC_DAL_UPDATE_DISABLE);
-
- time->nanosecond = 0;
- time->second = BCD_TO_INT(read_io_port(RTC_DAL_SEC_ADDR));
- time->minute = BCD_TO_INT(read_io_port(RTC_DAL_MIN_ADDR));
- time->hour = BCD_TO_INT(read_io_port(RTC_DAL_HOUR_ADDR));
- time->day = BCD_TO_INT(read_io_port(RTC_DAL_DATE_ADDR));
- time->month = BCD_TO_INT(read_io_port(RTC_DAL_MONTH_ADDR));
- time->year = BCD_TO_INT(read_io_port(RTC_DAL_YEAR_ADDR)) + YRREF;
-
- write_io_port(RTC_DAL_CONTROL_ADDR, RTC_DAL_UPDATE_ENABLE);
- break;
-
- default:
- break;
- }
-
- if (caps) {
- caps->resolution = 50000000; /* 50PPM */
- caps->accuracy = 1000; /* 1ms */
- caps->sets_to_zero = 0;
- }
-
- return EFI_SUCCESS;
-}
-
-static efi_status_t ioc3_set_time (efi_time_t *t)
-{
- if (!nvram_base) {
- printk(KERN_CRIT "nvram_base is zero\n");
- return EFI_UNSUPPORTED;
- }
-
- switch (tod_chip_type) {
- case TOD_SGS_M48T35:
- write_io_port(RTC_SGS_CONTROL_ADDR, RTC_SGS_WRITE_ENABLE);
- write_io_port(RTC_SGS_YEAR_ADDR, INT_TO_BCD((t->year - YRREF)));
- write_io_port(RTC_SGS_MONTH_ADDR,INT_TO_BCD(t->month));
- write_io_port(RTC_SGS_DATE_ADDR, INT_TO_BCD(t->day));
- write_io_port(RTC_SGS_HOUR_ADDR, INT_TO_BCD(t->hour));
- write_io_port(RTC_SGS_MIN_ADDR, INT_TO_BCD(t->minute));
- write_io_port(RTC_SGS_SEC_ADDR, INT_TO_BCD(t->second));
- write_io_port(RTC_SGS_CONTROL_ADDR, 0);
- break;
-
- case TOD_DALLAS_DS1386:
- write_io_port(RTC_DAL_CONTROL_ADDR, RTC_DAL_UPDATE_DISABLE);
- write_io_port(RTC_DAL_SEC_ADDR, INT_TO_BCD(t->second));
- write_io_port(RTC_DAL_MIN_ADDR, INT_TO_BCD(t->minute));
- write_io_port(RTC_DAL_HOUR_ADDR, INT_TO_BCD(t->hour));
- write_io_port(RTC_DAL_DATE_ADDR, INT_TO_BCD(t->day));
- write_io_port(RTC_DAL_MONTH_ADDR,INT_TO_BCD(t->month));
- write_io_port(RTC_DAL_YEAR_ADDR, INT_TO_BCD((t->year - YRREF)));
- write_io_port(RTC_DAL_CONTROL_ADDR, RTC_DAL_UPDATE_ENABLE);
- break;
-
- default:
- break;
- }
-
- return EFI_SUCCESS;
-}
-
-/* The following two are not supported atm. */
-static efi_status_t
-ioc3_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, efi_time_t *tm)
-{
- return EFI_UNSUPPORTED;
-}
-
-static efi_status_t
-ioc3_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm)
-{
- return EFI_UNSUPPORTED;
-}
-
-/*
- * It looks like the master IOC3 is usually on bus 0, device 4. Hope
- * that's right
- */
-static __init int efi_ioc3_time_init(void)
-{
- struct pci_dev *dev;
- static struct ioc3 *ioc3;
-
- dev = pci_find_slot(0, PCI_DEVFN(4, 0));
- if (!dev) {
- printk(KERN_CRIT "Couldn't find master IOC3\n");
-
- return -ENODEV;
- }
-
- ioc3 = ioremap(pci_resource_start(dev, 0), pci_resource_len(dev, 0));
- nvram_base = (unsigned long) ioc3 + IOC3_BYTEBUS_DEV0;
-
- tod_chip_type = get_tod_chip_type();
- if (tod_chip_type == 1)
- printk(KERN_NOTICE "TOD type is SGS M48T35\n");
- else if (tod_chip_type == 2)
- printk(KERN_NOTICE "TOD type is Dallas DS1386\n");
- else
- printk(KERN_CRIT "No or unknown TOD\n");
-
- efi.get_time = ioc3_get_time;
- efi.set_time = ioc3_set_time;
- efi.get_wakeup_time = ioc3_get_wakeup_time;
- efi.set_wakeup_time = ioc3_set_wakeup_time;
-
- return 0;
-}
-
-module_init(efi_ioc3_time_init);
diff --git a/arch/ia64/sn/io/hubspc.c b/arch/ia64/sn/io/hubspc.c
deleted file mode 100644
index 803226ec16f991..00000000000000
--- a/arch/ia64/sn/io/hubspc.c
+++ /dev/null
@@ -1,251 +0,0 @@
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992-1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-/*
- * hubspc.c - Hub Memory Space Management Driver
- * This driver implements the managers for the following
- * memory resources:
- * 1) reference counters
- */
-
-#include <linux/types.h>
-#include <linux/config.h>
-#include <linux/slab.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/io.h>
-#include <asm/sn/sn_cpuid.h>
-#include <linux/devfs_fs.h>
-#include <linux/devfs_fs_kernel.h>
-#include <asm/io.h>
-#include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/sn1/mem_refcnt.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/snconfig.h>
-#include <asm/sn/sn1/hubspc.h>
-#include <asm/sn/ksys/elsc.h>
-#include <asm/sn/simulator.h>
-
-
-/* Uncomment the following line for tracing */
-/* #define HUBSPC_DEBUG 1 */
-
-int hubspc_devflag = D_MP;
-
-
-/***********************************************************************/
-/* CPU Prom Space */
-/***********************************************************************/
-
-typedef struct cpuprom_info {
- devfs_handle_t prom_dev;
- devfs_handle_t nodevrtx;
- struct cpuprom_info *next;
-}cpuprom_info_t;
-
-static cpuprom_info_t *cpuprom_head;
-static spinlock_t cpuprom_spinlock;
-#define PROM_LOCK() mutex_spinlock(&cpuprom_spinlock)
-#define PROM_UNLOCK(s) mutex_spinunlock(&cpuprom_spinlock, (s))
-
-/*
- * Add prominfo to the linked list maintained.
- */
-void
-prominfo_add(devfs_handle_t hub, devfs_handle_t prom)
-{
- cpuprom_info_t *info;
- unsigned long s;
-
- info = kmalloc(sizeof(cpuprom_info_t), GFP_KERNEL);
- ASSERT(info);
- info->prom_dev = prom;
- info->nodevrtx = hub;
-
-
- s = PROM_LOCK();
- info->next = cpuprom_head;
- cpuprom_head = info;
- PROM_UNLOCK(s);
-}
-
-void
-prominfo_del(devfs_handle_t prom)
-{
- unsigned long s;
- cpuprom_info_t *info;
- cpuprom_info_t **prev;
-
- s = PROM_LOCK();
- prev = &cpuprom_head;
- while ( (info = *prev) ) {
- if (info->prom_dev == prom) {
- *prev = info->next;
- PROM_UNLOCK(s);
- return;
- }
-
- prev = &info->next;
- }
- PROM_UNLOCK(s);
- ASSERT(0);
-}
-
-devfs_handle_t
-prominfo_nodeget(devfs_handle_t prom)
-{
- unsigned long s;
- cpuprom_info_t *info;
-
- s = PROM_LOCK();
- info = cpuprom_head;
- while (info) {
- if(info->prom_dev == prom) {
- PROM_UNLOCK(s);
- return info->nodevrtx;
- }
- info = info->next;
- }
- PROM_UNLOCK(s);
- return 0;
-}
-
-#if defined(CONFIG_IA64_SGI_SN1)
-#define SN_PROMVERSION INV_IP35PROM
-
-/* Add "detailed" labelled inventory information to the
- * prom vertex
- */
-void
-cpuprom_detailed_inventory_info_add(devfs_handle_t prom_dev,devfs_handle_t node)
-{
- invent_miscinfo_t *cpuprom_inventory_info;
- extern invent_generic_t *klhwg_invent_alloc(cnodeid_t cnode,
- int class, int size);
- cnodeid_t cnode = hubdev_cnodeid_get(node);
-
- /* Allocate memory for the extra inventory information
- * for the prom
- */
- cpuprom_inventory_info = (invent_miscinfo_t *)
- klhwg_invent_alloc(cnode, INV_PROM, sizeof(invent_miscinfo_t));
-
- ASSERT(cpuprom_inventory_info);
-
- /* Set the enabled flag so that the hinv interprets this
- * information
- */
- cpuprom_inventory_info->im_gen.ig_flag = INVENT_ENABLED;
- cpuprom_inventory_info->im_type = SN_PROMVERSION;
- /* Store prom revision into inventory information */
- cpuprom_inventory_info->im_rev = IP27CONFIG.pvers_rev;
- cpuprom_inventory_info->im_version = IP27CONFIG.pvers_vers;
-
- /* Store this info as labelled information hanging off the
- * prom device vertex
- */
- hwgraph_info_add_LBL(prom_dev, INFO_LBL_DETAIL_INVENT,
- (arbitrary_info_t) cpuprom_inventory_info);
- /* Export this information so that user programs can get to
- * this by using attr_get()
- */
- hwgraph_info_export_LBL(prom_dev, INFO_LBL_DETAIL_INVENT,
- sizeof(invent_miscinfo_t));
-}
-
-#endif /* CONFIG_IA64_SGI_SN1 */
-
-
-/***********************************************************************/
-/* Base Hub Space Driver */
-/***********************************************************************/
-
-/*
- * hubspc_init
- * Registration of the hubspc devices with the hub manager
- */
-void
-hubspc_init(void)
-{
- /*
- * Register with the hub manager
- */
-
- /* The reference counters */
-#if defined(CONFIG_IA64_SGI_SN1)
- hubdev_register(mem_refcnt_attach);
-#endif
-
-#ifdef CONFIG_IA64_SGI_SN1
- /* L1 system controller link */
- if ( !IS_RUNNING_ON_SIMULATOR() ) {
- /* initialize the L1 link */
- extern void l1_init(void);
- l1_init();
- }
-#endif /* CONFIG_IA64_SGI_SN1 */
-#ifdef HUBSPC_DEBUG
- printk("hubspc_init: Completed\n");
-#endif /* HUBSPC_DEBUG */
- /* Initialize spinlocks */
- mutex_spinlock_init(&cpuprom_spinlock);
-}
-
-/* ARGSUSED */
-int
-hubspc_open(devfs_handle_t *devp, mode_t oflag, int otyp, cred_t *crp)
-{
- return (0);
-}
-
-
-/* ARGSUSED */
-int
-hubspc_close(devfs_handle_t dev, int oflag, int otyp, cred_t *crp)
-{
- return (0);
-}
-
-/* ARGSUSED */
-int
-hubspc_map(devfs_handle_t dev, vhandl_t *vt, off_t off, size_t len, uint prot)
-{
- /*REFERENCED*/
- int errcode = 0;
-
- /* check validity of request */
- if( len == 0 ) {
- return -ENXIO;
- }
-
- return errcode;
-}
-
-/* ARGSUSED */
-int
-hubspc_unmap(devfs_handle_t dev, vhandl_t *vt)
-{
- return (0);
-
-}
-
-/* ARGSUSED */
-int
-hubspc_ioctl(devfs_handle_t dev,
- int cmd,
- void *arg,
- int mode,
- cred_t *cred_p,
- int *rvalp)
-{
- return (0);
-
-}
diff --git a/arch/ia64/sn/io/hwgdfs/Makefile b/arch/ia64/sn/io/hwgdfs/Makefile
new file mode 100644
index 00000000000000..530337ad96aec2
--- /dev/null
+++ b/arch/ia64/sn/io/hwgdfs/Makefile
@@ -0,0 +1,12 @@
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
+#
+# Makefile for the sn2 io routines.
+
+EXTRA_CFLAGS := -DLITTLE_ENDIAN
+
+obj-y += hcl.o labelcl.o hcl_util.o invent_stub.o
diff --git a/arch/ia64/sn/io/hcl.c b/arch/ia64/sn/io/hwgdfs/hcl.c
index a8b8b98a6f81df..66c165691a52ac 100644
--- a/arch/ia64/sn/io/hcl.c
+++ b/arch/ia64/sn/io/hwgdfs/hcl.c
@@ -1,12 +1,11 @@
-/* $Id$
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* hcl - SGI's Hardware Graph compatibility layer.
*
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
@@ -23,13 +22,16 @@
#include <asm/sn/invent.h>
#include <asm/sn/hcl.h>
#include <asm/sn/labelcl.h>
+#include <asm/sn/simulator.h>
#define HCL_NAME "SGI-HWGRAPH COMPATIBILITY DRIVER"
#define HCL_TEMP_NAME "HCL_TEMP_NAME_USED_FOR_HWGRAPH_VERTEX_CREATE"
#define HCL_TEMP_NAME_LEN 44
#define HCL_VERSION "1.0"
-devfs_handle_t hwgraph_root = NULL;
-devfs_handle_t linux_busnum = NULL;
+vertex_hdl_t hwgraph_root;
+vertex_hdl_t linux_busnum;
+
+extern void pci_bus_cvlink_init(void);
/*
* Debug flag definition.
@@ -48,7 +50,7 @@ static unsigned int boot_options = OPTION_NONE;
/*
* Some Global definitions.
*/
-devfs_handle_t hcl_handle = NULL;
+static vertex_hdl_t hcl_handle;
invplace_t invplace_none = {
GRAPH_VERTEX_NONE,
@@ -132,25 +134,19 @@ struct file_operations hcl_fops = {
* Not a bad place to be ..
*
*/
-#ifdef MODULE
-int init_module (void)
-#else
int __init init_hcl(void)
-#endif
{
extern void string_table_init(struct string_table *);
extern struct string_table label_string_table;
extern int init_ifconfig_net(void);
+ extern int init_ioconfig_bus(void);
+ int status = 0;
int rv = 0;
-#if defined(CONFIG_HCL_DEBUG) && !defined(MODULE)
- printk ("\n%s: v%s Colin Ngam (cngam@sgi.com)\n",
- HCL_NAME, HCL_VERSION);
-
- hcl_debug = hcl_debug_init;
- printk ("%s: hcl_debug: 0x%0x\n", HCL_NAME, hcl_debug);
- printk ("\n%s: boot_options: 0x%0x\n", HCL_NAME, boot_options);
-#endif
+ if (IS_RUNNING_ON_SIMULATOR()) {
+ extern u64 klgraph_addr[];
+ klgraph_addr[0] = 0xe000003000030000;
+ }
/*
* Create the hwgraph_root on devfs.
@@ -159,6 +155,8 @@ int __init init_hcl(void)
if (rv)
printk ("WARNING: init_hcl: Failed to create hwgraph_root. Error = %d.\n", rv);
+ status = devfs_set_flags (hwgraph_root, DEVFS_FL_HIDE);
+
/*
* Create the hcl driver to support inventory entry manipulations.
* By default, it is expected that devfs is mounted on /dev.
@@ -189,11 +187,14 @@ int __init init_hcl(void)
return(0);
}
+ pci_bus_cvlink_init();
+
/*
* Initialize the ifconfgi_net driver that does network devices
* Persistent Naming.
*/
init_ifconfig_net();
+ init_ioconfig_bus();
return(0);
@@ -237,15 +238,9 @@ __setup("hcl=", hcl_setup);
*
*/
void
-hwgraph_fastinfo_set(devfs_handle_t de, arbitrary_info_t fastinfo)
+hwgraph_fastinfo_set(vertex_hdl_t de, arbitrary_info_t fastinfo)
{
-
- if (hcl_debug) {
- printk("HCL: hwgraph_fastinfo_set handle 0x%p fastinfo %ld\n", (void *)de, fastinfo);
- }
-
labelcl_info_replace_IDX(de, HWGRAPH_FASTINFO, fastinfo, NULL);
-
}
@@ -254,7 +249,7 @@ hwgraph_fastinfo_set(devfs_handle_t de, arbitrary_info_t fastinfo)
*
*/
arbitrary_info_t
-hwgraph_fastinfo_get(devfs_handle_t de)
+hwgraph_fastinfo_get(vertex_hdl_t de)
{
arbitrary_info_t fastinfo;
int rv;
@@ -278,7 +273,7 @@ hwgraph_fastinfo_get(devfs_handle_t de)
* devfs node is the parent. This effectively changes this assumption.
*/
int
-hwgraph_connectpt_set(devfs_handle_t de, devfs_handle_t connect_de)
+hwgraph_connectpt_set(vertex_hdl_t de, vertex_hdl_t connect_de)
{
int rv;
@@ -295,19 +290,19 @@ hwgraph_connectpt_set(devfs_handle_t de, devfs_handle_t connect_de)
* hwgraph_connectpt_get: Returns the entry's connect point in the devfs
* tree.
*/
-devfs_handle_t
-hwgraph_connectpt_get(devfs_handle_t de)
+vertex_hdl_t
+hwgraph_connectpt_get(vertex_hdl_t de)
{
int rv;
arbitrary_info_t info;
- devfs_handle_t connect;
+ vertex_hdl_t connect;
rv = labelcl_info_get_IDX(de, HWGRAPH_CONNECTPT, &info);
if (rv != 0) {
return(NULL);
}
- connect = (devfs_handle_t)info;
+ connect = (vertex_hdl_t)info;
return(connect);
}
@@ -318,15 +313,15 @@ hwgraph_connectpt_get(devfs_handle_t de)
* Note that a directory entry in devfs can have children
* but it cannot be a char|block special file.
*/
-devfs_handle_t
-hwgraph_mk_dir(devfs_handle_t de, const char *name,
+vertex_hdl_t
+hwgraph_mk_dir(vertex_hdl_t de, const char *name,
unsigned int namelen, void *info)
{
int rv;
labelcl_info_t *labelcl_info = NULL;
- devfs_handle_t new_devfs_handle = NULL;
- devfs_handle_t parent = NULL;
+ vertex_hdl_t new_devfs_handle = NULL;
+ vertex_hdl_t parent = NULL;
/*
* Create the device info structure for hwgraph compatiblity support.
@@ -372,18 +367,13 @@ hwgraph_mk_dir(devfs_handle_t de, const char *name,
}
/*
- * hwgraph_vertex_create - Create a vertex by giving it a temp name.
- */
-
-/*
* hwgraph_path_add - Create a directory node with the given path starting
- * from the given devfs_handle_t.
+ * from the given vertex_hdl_t.
*/
-extern char * dev_to_name(devfs_handle_t, char *, uint);
int
-hwgraph_path_add(devfs_handle_t fromv,
+hwgraph_path_add(vertex_hdl_t fromv,
char *path,
- devfs_handle_t *new_de)
+ vertex_hdl_t *new_de)
{
unsigned int namelen = strlen(path);
@@ -420,8 +410,8 @@ hwgraph_path_add(devfs_handle_t fromv,
* Note that a file entry cannot have children .. it is like a
* char|block special vertex in hwgraph.
*/
-devfs_handle_t
-hwgraph_register(devfs_handle_t de, const char *name,
+vertex_hdl_t
+hwgraph_register(vertex_hdl_t de, const char *name,
unsigned int namelen, unsigned int flags,
unsigned int major, unsigned int minor,
umode_t mode, uid_t uid, gid_t gid,
@@ -431,8 +421,8 @@ hwgraph_register(devfs_handle_t de, const char *name,
int rv;
void *labelcl_info = NULL;
- devfs_handle_t new_devfs_handle = NULL;
- devfs_handle_t parent = NULL;
+ vertex_hdl_t new_devfs_handle = NULL;
+ vertex_hdl_t parent = NULL;
/*
* Create the labelcl info structure for hwgraph compatiblity support.
@@ -467,7 +457,7 @@ hwgraph_register(devfs_handle_t de, const char *name,
/*
* We need to clean up!
*/
- printk(KERN_WARNING "HCL: Unable to set the connect point to its parent 0x%p\n",
+ printk(KERN_WARNING "HCL: Unable to set the connect point to it's parent 0x%p\n",
(void *)new_devfs_handle);
}
@@ -488,14 +478,14 @@ hwgraph_register(devfs_handle_t de, const char *name,
* hwgraph_mk_symlink - Create a symbolic link.
*/
int
-hwgraph_mk_symlink(devfs_handle_t de, const char *name, unsigned int namelen,
+hwgraph_mk_symlink(vertex_hdl_t de, const char *name, unsigned int namelen,
unsigned int flags, const char *link, unsigned int linklen,
- devfs_handle_t *handle, void *info)
+ vertex_hdl_t *handle, void *info)
{
void *labelcl_info = NULL;
int status = 0;
- devfs_handle_t new_devfs_handle = NULL;
+ vertex_hdl_t new_devfs_handle = NULL;
/*
* Create the labelcl info structure for hwgraph compatiblity support.
@@ -528,29 +518,10 @@ hwgraph_mk_symlink(devfs_handle_t de, const char *name, unsigned int namelen,
}
/*
- * hwgraph_vertex_get_next - this routine returns the next sibbling for the
- * device entry given in de. If there are no more sibbling, NULL
- * is returned in next_sibbling.
- *
- * Currently we do not have any protection against de being deleted
- * while it's handle is being held.
- */
-int
-hwgraph_vertex_get_next(devfs_handle_t *next_sibbling, devfs_handle_t *de)
-{
- *next_sibbling = devfs_get_next_sibling (*de);
-
- if (*next_sibbling != NULL)
- *de = *next_sibbling;
- return (0);
-}
-
-
-/*
* hwgraph_vertex_destroy - Destroy the devfs entry
*/
int
-hwgraph_vertex_destroy(devfs_handle_t de)
+hwgraph_vertex_destroy(vertex_hdl_t de)
{
void *labelcl_info = NULL;
@@ -565,28 +536,19 @@ hwgraph_vertex_destroy(devfs_handle_t de)
}
/*
-** See if a vertex has an outgoing edge with a specified name.
-** Vertices in the hwgraph *implicitly* contain these edges:
-** "." refers to "current vertex"
-** ".." refers to "connect point vertex"
-** "char" refers to current vertex (character device access)
-** "block" refers to current vertex (block device access)
-*/
-
-/*
* hwgraph_edge_add - This routines has changed from the original conext.
* All it does now is to create a symbolic link from "from" to "to".
*/
/* ARGSUSED */
int
-hwgraph_edge_add(devfs_handle_t from, devfs_handle_t to, char *name)
+hwgraph_edge_add(vertex_hdl_t from, vertex_hdl_t to, char *name)
{
char *path;
char *s1;
char *index;
int name_start;
- devfs_handle_t handle = NULL;
+ vertex_hdl_t handle = NULL;
int rv;
int i, count;
@@ -630,11 +592,11 @@ hwgraph_edge_add(devfs_handle_t from, devfs_handle_t to, char *name)
}
/* ARGSUSED */
int
-hwgraph_edge_get(devfs_handle_t from, char *name, devfs_handle_t *toptr)
+hwgraph_edge_get(vertex_hdl_t from, char *name, vertex_hdl_t *toptr)
{
int namelen = 0;
- devfs_handle_t target_handle = NULL;
+ vertex_hdl_t target_handle = NULL;
if (name == NULL)
return(-1);
@@ -673,7 +635,8 @@ hwgraph_edge_get(devfs_handle_t from, char *name, devfs_handle_t *toptr)
* Call devfs to get the devfs entry.
*/
namelen = (int) strlen(name);
- target_handle = devfs_get_handle(from, name, 1); /* Yes traverse symbolic links */
+ target_handle = devfs_find_handle (from, name, 0, 0,
+ 0, 1); /* Yes traverse symbolic links */
if (target_handle == NULL)
return(-1);
else
@@ -683,130 +646,13 @@ hwgraph_edge_get(devfs_handle_t from, char *name, devfs_handle_t *toptr)
return(0);
}
-
-/*
- * hwgraph_edge_get_next - Retrieves the next sibbling given the current
- * entry number "placeptr".
- *
- * Allow the caller to retrieve walk through the sibblings of "source"
- * devfs_handle_t. The implicit edges "." and ".." is returned first
- * followed by each of the real children.
- *
- * We may end up returning garbage if another thread perform any deletion
- * in this directory before "placeptr".
- *
- */
-/* ARGSUSED */
-int
-hwgraph_edge_get_next(devfs_handle_t source, char *name, devfs_handle_t *target,
- uint *placeptr)
-
-{
-
- uint which_place;
- unsigned int namelen = 0;
- const char *tempname = NULL;
-
- if (placeptr == NULL)
- return(-1);
-
- which_place = *placeptr;
-
-again:
- if (which_place <= HWGRAPH_RESERVED_PLACES) {
- if (which_place == EDGE_PLACE_WANT_CURRENT) {
- /*
- * Looking for "."
- * Return the current devfs handle.
- */
- if (name != NULL)
- strcpy(name, HWGRAPH_EDGELBL_DOT);
-
- if (target != NULL) {
- *target = source;
- /* XXX should incr "source" ref count here if we
- * ever implement ref counts */
- }
-
- } else if (which_place == EDGE_PLACE_WANT_CONNECTPT) {
- /*
- * Looking for the connect point or parent.
- * If the connect point is set .. it returns the connect point.
- * Otherwise, it returns the parent .. will we support
- * connect point?
- */
- devfs_handle_t connect_point = hwgraph_connectpt_get(source);
-
- if (connect_point == NULL) {
- /*
- * No connectpoint set .. either the User
- * explicitly NULL it or this node was not
- * created via hcl.
- */
- which_place++;
- goto again;
- }
-
- if (name != NULL)
- strcpy(name, HWGRAPH_EDGELBL_DOTDOT);
-
- if (target != NULL)
- *target = connect_point;
-
- } else if (which_place == EDGE_PLACE_WANT_REAL_EDGES) {
- /*
- * return first "real" entry in directory, and increment
- * placeptr. Next time around we should have
- * which_place > HWGRAPH_RESERVED_EDGES so we'll fall through
- * this nested if block.
- */
- *target = devfs_get_first_child(source);
- if (*target && name) {
- tempname = devfs_get_name(*target, &namelen);
- if (tempname && namelen)
- strcpy(name, tempname);
- }
-
- *placeptr = which_place + 1;
- return (0);
- }
-
- *placeptr = which_place+1;
- return(0);
- }
-
- /*
- * walk linked list, (which_place - HWGRAPH_RESERVED_PLACES) times
- */
- {
- devfs_handle_t curr;
- int i = 0;
-
- for (curr=devfs_get_first_child(source), i= i+HWGRAPH_RESERVED_PLACES;
- curr!=NULL && i<which_place;
- curr=devfs_get_next_sibling(curr), i++)
- ;
- *target = curr;
- *placeptr = which_place + 1;
- if (curr && name) {
- tempname = devfs_get_name(*target, &namelen);
- if (tempname && namelen)
- strcpy(name, tempname);
- }
- }
- if (target == NULL)
- return(-1);
- else
- return(0);
-}
-
/*
* hwgraph_info_add_LBL - Adds a new label for the device. Mark the info_desc
* of the label as INFO_DESC_PRIVATE and store the info in the label.
*/
/* ARGSUSED */
int
-hwgraph_info_add_LBL( devfs_handle_t de,
+hwgraph_info_add_LBL( vertex_hdl_t de,
char *name,
arbitrary_info_t info)
{
@@ -818,7 +664,7 @@ hwgraph_info_add_LBL( devfs_handle_t de,
*/
/* ARGSUSED */
int
-hwgraph_info_remove_LBL( devfs_handle_t de,
+hwgraph_info_remove_LBL( vertex_hdl_t de,
char *name,
arbitrary_info_t *old_info)
{
@@ -831,7 +677,7 @@ hwgraph_info_remove_LBL( devfs_handle_t de,
*/
/* ARGSUSED */
int
-hwgraph_info_replace_LBL( devfs_handle_t de,
+hwgraph_info_replace_LBL( vertex_hdl_t de,
char *name,
arbitrary_info_t info,
arbitrary_info_t *old_info)
@@ -846,7 +692,7 @@ hwgraph_info_replace_LBL( devfs_handle_t de,
*/
/* ARGSUSED */
int
-hwgraph_info_get_LBL( devfs_handle_t de,
+hwgraph_info_get_LBL( vertex_hdl_t de,
char *name,
arbitrary_info_t *infop)
{
@@ -861,7 +707,7 @@ hwgraph_info_get_LBL( devfs_handle_t de,
*/
/* ARGSUSED */
int
-hwgraph_info_get_exported_LBL( devfs_handle_t de,
+hwgraph_info_get_exported_LBL( vertex_hdl_t de,
char *name,
int *export_info,
arbitrary_info_t *infop)
@@ -885,7 +731,7 @@ hwgraph_info_get_exported_LBL( devfs_handle_t de,
*/
/* ARGSUSED */
int
-hwgraph_info_get_next_LBL( devfs_handle_t de,
+hwgraph_info_get_next_LBL( vertex_hdl_t de,
char *buf,
arbitrary_info_t *infop,
labelcl_info_place_t *place)
@@ -899,7 +745,7 @@ hwgraph_info_get_next_LBL( devfs_handle_t de,
*/
/* ARGSUSED */
int
-hwgraph_info_export_LBL(devfs_handle_t de, char *name, int nbytes)
+hwgraph_info_export_LBL(vertex_hdl_t de, char *name, int nbytes)
{
arbitrary_info_t info;
int rc;
@@ -926,7 +772,7 @@ hwgraph_info_export_LBL(devfs_handle_t de, char *name, int nbytes)
*/
/* ARGSUSED */
int
-hwgraph_info_unexport_LBL(devfs_handle_t de, char *name)
+hwgraph_info_unexport_LBL(vertex_hdl_t de, char *name)
{
arbitrary_info_t info;
int rc;
@@ -946,13 +792,16 @@ hwgraph_info_unexport_LBL(devfs_handle_t de, char *name)
*
*/
int
-hwgraph_path_lookup( devfs_handle_t start_vertex_handle,
+hwgraph_path_lookup( vertex_hdl_t start_vertex_handle,
char *lookup_path,
- devfs_handle_t *vertex_handle_ptr,
+ vertex_hdl_t *vertex_handle_ptr,
char **remainder)
{
- *vertex_handle_ptr = devfs_get_handle(start_vertex_handle, /* start dir */
+ *vertex_handle_ptr = devfs_find_handle(start_vertex_handle, /* start dir */
lookup_path, /* path */
+ 0, /* major */
+ 0, /* minor */
+ 0, /* char | block */
1); /* traverse symlinks */
if (*vertex_handle_ptr == NULL)
return(-1);
@@ -965,14 +814,17 @@ hwgraph_path_lookup( devfs_handle_t start_vertex_handle,
*
*/
graph_error_t
-hwgraph_traverse(devfs_handle_t de, char *path, devfs_handle_t *found)
+hwgraph_traverse(vertex_hdl_t de, char *path, vertex_hdl_t *found)
{
/*
* get the directory entry (path should end in a directory)
*/
- *found = devfs_get_handle(de, /* start dir */
+ *found = devfs_find_handle(de, /* start dir */
path, /* path */
+ 0, /* major */
+ 0, /* minor */
+ 0, /* char | block */
1); /* traverse symlinks */
if (*found == NULL)
return(GRAPH_NOT_FOUND);
@@ -984,142 +836,18 @@ hwgraph_traverse(devfs_handle_t de, char *path, devfs_handle_t *found)
* hwgraph_path_to_vertex - Return the devfs entry handle for the given
* pathname .. assume traverse symlinks too!.
*/
-devfs_handle_t
+vertex_hdl_t
hwgraph_path_to_vertex(char *path)
{
- return(devfs_get_handle(NULL, /* start dir */
+ return(devfs_find_handle(NULL, /* start dir */
path, /* path */
+ 0, /* major */
+ 0, /* minor */
+ 0, /* char | block */
1)); /* traverse symlinks */
}
/*
- * hwgraph_path_to_dev - Returns the devfs_handle_t of the given path ..
- * We only deal with devfs handle and not devfs_handle_t.
-*/
-devfs_handle_t
-hwgraph_path_to_dev(char *path)
-{
- devfs_handle_t de;
-
- de = hwgraph_path_to_vertex(path);
- return(de);
-}
-
-/*
- * hwgraph_block_device_get - return the handle of the block device file.
- * The assumption here is that de is a directory.
-*/
-devfs_handle_t
-hwgraph_block_device_get(devfs_handle_t de)
-{
- return(devfs_get_handle(de, /* start dir */
- "block", /* path */
- 1)); /* traverse symlinks */
-}
-
-/*
- * hwgraph_char_device_get - return the handle of the char device file.
- * The assumption here is that de is a directory.
-*/
-devfs_handle_t
-hwgraph_char_device_get(devfs_handle_t de)
-{
- return(devfs_get_handle(de, /* start dir */
- "char", /* path */
- 1)); /* traverse symlinks */
-}
-
-/*
-** Inventory is now associated with a vertex in the graph. For items that
-** belong in the inventory but have no vertex
-** (e.g. old non-graph-aware drivers), we create a bogus vertex under the
-** INFO_LBL_INVENT name.
-**
-** For historical reasons, we prevent exact duplicate entries from being added
-** to a single vertex.
-*/
-
-/*
- * hwgraph_inventory_add - Adds an inventory entry into de.
- */
-int
-hwgraph_inventory_add( devfs_handle_t de,
- int class,
- int type,
- major_t controller,
- minor_t unit,
- int state)
-{
- inventory_t *pinv = NULL, *old_pinv = NULL, *last_pinv = NULL;
- int rv;
-
- /*
- * Add our inventory data to the list of inventory data
- * associated with this vertex.
- */
-again:
- /* GRAPH_LOCK_UPDATE(&invent_lock); */
- rv = labelcl_info_get_LBL(de,
- INFO_LBL_INVENT,
- NULL, (arbitrary_info_t *)&old_pinv);
- if ((rv != LABELCL_SUCCESS) && (rv != LABELCL_NOT_FOUND))
- goto failure;
-
- /*
- * Seek to end of inventory items associated with this
- * vertex. Along the way, make sure we're not duplicating
- * an inventory item (for compatibility with old add_to_inventory)
- */
- for (;old_pinv; last_pinv = old_pinv, old_pinv = old_pinv->inv_next) {
- if ((int)class != -1 && old_pinv->inv_class != class)
- continue;
- if ((int)type != -1 && old_pinv->inv_type != type)
- continue;
- if ((int)state != -1 && old_pinv->inv_state != state)
- continue;
- if ((int)controller != -1
- && old_pinv->inv_controller != controller)
- continue;
- if ((int)unit != -1 && old_pinv->inv_unit != unit)
- continue;
-
- /* exact duplicate of previously-added inventory item */
- rv = LABELCL_DUP;
- goto failure;
- }
-
- /* Not a duplicate, so we know that we need to add something. */
- if (pinv == NULL) {
- /* Release lock while we wait for memory. */
- /* GRAPH_LOCK_DONE_UPDATE(&invent_lock); */
- pinv = (inventory_t *)kmalloc(sizeof(inventory_t), GFP_KERNEL);
- replace_in_inventory(pinv, class, type, controller, unit, state);
- goto again;
- }
-
- pinv->inv_next = NULL;
- if (last_pinv) {
- last_pinv->inv_next = pinv;
- } else {
- rv = labelcl_info_add_LBL(de, INFO_LBL_INVENT,
- sizeof(inventory_t), (arbitrary_info_t)pinv);
-
- if (!rv)
- goto failure;
- }
-
- /* GRAPH_LOCK_DONE_UPDATE(&invent_lock); */
- return(0);
-
-failure:
- /* GRAPH_LOCK_DONE_UPDATE(&invent_lock); */
- if (pinv)
- kfree(pinv);
- return(rv);
-}
-
-
-/*
* hwgraph_inventory_remove - Removes an inventory entry.
*
* Remove an inventory item associated with a vertex. It is the caller's
@@ -1127,169 +855,14 @@ failure:
* inventory from a vertex and simultaneously removing that vertex.
*/
int
-hwgraph_inventory_remove( devfs_handle_t de,
+hwgraph_inventory_remove( vertex_hdl_t de,
int class,
int type,
major_t controller,
minor_t unit,
int state)
{
- inventory_t *pinv = NULL, *last_pinv = NULL, *next_pinv = NULL;
- labelcl_error_t rv;
-
- /*
- * We never remove stuff from ".invent" ..
- */
- if (!de)
- return (-1);
-
- /*
- * Remove our inventory data to the list of inventory data
- * associated with this vertex.
- */
- /* GRAPH_LOCK_UPDATE(&invent_lock); */
- rv = labelcl_info_get_LBL(de,
- INFO_LBL_INVENT,
- NULL, (arbitrary_info_t *)&pinv);
- if (rv != LABELCL_SUCCESS)
- goto failure;
-
- /*
- * Search through inventory items associated with this
- * vertex, looking for a match.
- */
- for (;pinv; pinv = next_pinv) {
- next_pinv = pinv->inv_next;
-
- if(((int)class == -1 || pinv->inv_class == class) &&
- ((int)type == -1 || pinv->inv_type == type) &&
- ((int)state == -1 || pinv->inv_state == state) &&
- ((int)controller == -1 || pinv->inv_controller == controller) &&
- ((int)unit == -1 || pinv->inv_unit == unit)) {
-
- /* Found a matching inventory item. Remove it. */
- if (last_pinv) {
- last_pinv->inv_next = pinv->inv_next;
- } else {
- rv = hwgraph_info_replace_LBL(de, INFO_LBL_INVENT, (arbitrary_info_t)pinv->inv_next, NULL);
- if (rv != LABELCL_SUCCESS)
- goto failure;
- }
-
- pinv->inv_next = NULL; /* sanity */
- kfree(pinv);
- } else
- last_pinv = pinv;
- }
-
- if (last_pinv == NULL) {
- rv = hwgraph_info_remove_LBL(de, INFO_LBL_INVENT, NULL);
- if (rv != LABELCL_SUCCESS)
- goto failure;
- }
-
- rv = LABELCL_SUCCESS;
-
-failure:
- /* GRAPH_LOCK_DONE_UPDATE(&invent_lock); */
- return(rv);
-}
-
-/*
- * hwgraph_inventory_get_next - Get next inventory item associated with the
- * specified vertex.
- *
- * No locking is really needed. We don't yet have the ability
- * to remove inventory items, and new items are always added to
- * the end of a vertex' inventory list.
- *
- * However, a devfs entry can be removed!
-*/
-int
-hwgraph_inventory_get_next(devfs_handle_t de, invplace_t *place, inventory_t **ppinv)
-{
- inventory_t *pinv;
- labelcl_error_t rv;
-
- if (de == NULL)
- return(LABELCL_BAD_PARAM);
-
- if (place->invplace_vhdl == NULL) {
- place->invplace_vhdl = de;
- place->invplace_inv = NULL;
- }
-
- if (de != place->invplace_vhdl)
- return(LABELCL_BAD_PARAM);
-
- if (place->invplace_inv == NULL) {
- /* Just starting on this vertex */
- rv = labelcl_info_get_LBL(de, INFO_LBL_INVENT,
- NULL, (arbitrary_info_t *)&pinv);
- if (rv != LABELCL_SUCCESS)
- return(LABELCL_NOT_FOUND);
-
- } else {
- /* Advance to next item on this vertex */
- pinv = place->invplace_inv->inv_next;
- }
- place->invplace_inv = pinv;
- *ppinv = pinv;
-
- return(LABELCL_SUCCESS);
-}
-
-/*
- * hwgraph_controller_num_get - Returns the controller number in the inventory
- * entry.
- */
-int
-hwgraph_controller_num_get(devfs_handle_t device)
-{
- inventory_t *pinv;
- invplace_t invplace = { NULL, NULL, NULL };
- int val = -1;
- if ((pinv = device_inventory_get_next(device, &invplace)) != NULL) {
- val = (pinv->inv_class == INV_NETWORK)? pinv->inv_unit: pinv->inv_controller;
- }
-#ifdef DEBUG
- /*
- * It does not make any sense to call this on vertexes with multiple
- * inventory structs chained together
- */
- if ( device_inventory_get_next(device, &invplace) != NULL ) {
- printk("Should panic here ... !\n");
-#endif
- return (val);
-}
-
-/*
- * hwgraph_controller_num_set - Sets the controller number in the inventory
- * entry.
- */
-void
-hwgraph_controller_num_set(devfs_handle_t device, int contr_num)
-{
- inventory_t *pinv;
- invplace_t invplace = { NULL, NULL, NULL };
- if ((pinv = device_inventory_get_next(device, &invplace)) != NULL) {
- if (pinv->inv_class == INV_NETWORK)
- pinv->inv_unit = contr_num;
- else {
- if (pinv->inv_class == INV_FCNODE)
- pinv = device_inventory_get_next(device, &invplace);
- if (pinv != NULL)
- pinv->inv_controller = contr_num;
- }
- }
-#ifdef DEBUG
- /*
- * It does not make any sense to call this on vertexes with multiple
- * inventory structs chained together
- */
- if(pinv != NULL)
- ASSERT(device_inventory_get_next(device, &invplace) == NULL);
-#endif
+ return(0); /* Just a Stub for IRIX code. */
}
/*
@@ -1304,7 +877,7 @@ hwgraph_controller_num_set(devfs_handle_t device, int contr_num)
* "/" but rather it just stops right before /dev ..
*/
int
-hwgraph_vertex_name_get(devfs_handle_t vhdl, char *buf, uint buflen)
+hwgraph_vertex_name_get(vertex_hdl_t vhdl, char *buf, uint buflen)
{
char *locbuf;
int pos;
@@ -1339,7 +912,7 @@ hwgraph_vertex_name_get(devfs_handle_t vhdl, char *buf, uint buflen)
#define DEVNAME_UNKNOWN "UnknownDevice"
char *
-vertex_to_name(devfs_handle_t vhdl, char *buf, uint buflen)
+vertex_to_name(vertex_hdl_t vhdl, char *buf, uint buflen)
{
if (hwgraph_vertex_name_get(vhdl, buf, buflen) == GRAPH_SUCCESS)
return(buf);
@@ -1347,139 +920,15 @@ vertex_to_name(devfs_handle_t vhdl, char *buf, uint buflen)
return(DEVNAME_UNKNOWN);
}
-#ifdef LATER
-/*
-** Return the compact node id of the node that ultimately "owns" the specified
-** vertex. In order to do this, we walk back through masters and connect points
-** until we reach a vertex that represents a node.
-*/
-cnodeid_t
-master_node_get(devfs_handle_t vhdl)
-{
- cnodeid_t cnodeid;
- devfs_handle_t master;
-
- for (;;) {
- cnodeid = nodevertex_to_cnodeid(vhdl);
- if (cnodeid != CNODEID_NONE)
- return(cnodeid);
-
- master = device_master_get(vhdl);
-
- /* Check for exceptional cases */
- if (master == vhdl) {
- /* Since we got a reference to the "master" thru
- * device_master_get() we should decrement
- * its reference count by 1
- */
- hwgraph_vertex_unref(master);
- return(CNODEID_NONE);
- }
-
- if (master == GRAPH_VERTEX_NONE) {
- master = hwgraph_connectpt_get(vhdl);
- if ((master == GRAPH_VERTEX_NONE) ||
- (master == vhdl)) {
- if (master == vhdl)
- /* Since we got a reference to the
- * "master" thru
- * hwgraph_connectpt_get() we should
- * decrement its reference count by 1
- */
- hwgraph_vertex_unref(master);
- return(CNODEID_NONE);
- }
- }
-
- vhdl = master;
- /* Decrement the reference to "master" which was got
- * either thru device_master_get() or hwgraph_connectpt_get()
- * above.
- */
- hwgraph_vertex_unref(master);
- }
-}
-
-/*
- * Using the canonical path name to get hold of the desired vertex handle will
- * not work on multi-hub sn0 nodes. Hence, we use the following (slightly
- * convoluted) algorithm.
- *
- * - Start at the vertex corresponding to the driver (provided as input parameter)
- * - Loop till you reach a vertex which has EDGE_LBL_MEMORY
- * - If EDGE_LBL_CONN exists, follow that up.
- * else if EDGE_LBL_MASTER exists, follow that up.
- * else follow EDGE_LBL_DOTDOT up.
- *
- * * We should be at desired hub/heart vertex now *
- * - Follow EDGE_LBL_CONN to the widget vertex.
- *
- * - return vertex handle of this widget.
- */
-devfs_handle_t
-mem_vhdl_get(devfs_handle_t drv_vhdl)
-{
-devfs_handle_t cur_vhdl, cur_upper_vhdl;
-devfs_handle_t tmp_mem_vhdl, mem_vhdl;
-graph_error_t loop_rv;
-
- /* Initializations */
- cur_vhdl = drv_vhdl;
- loop_rv = ~GRAPH_SUCCESS;
-
- /* Loop till current vertex has EDGE_LBL_MEMORY */
- while (loop_rv != GRAPH_SUCCESS) {
-
- if ((hwgraph_edge_get(cur_vhdl, EDGE_LBL_CONN, &cur_upper_vhdl)) == GRAPH_SUCCESS) {
-
- } else if ((hwgraph_edge_get(cur_vhdl, EDGE_LBL_MASTER, &cur_upper_vhdl)) == GRAPH_SUCCESS) {
- } else { /* Follow HWGRAPH_EDGELBL_DOTDOT up */
- (void) hwgraph_edge_get(cur_vhdl, HWGRAPH_EDGELBL_DOTDOT, &cur_upper_vhdl);
- }
-
- cur_vhdl = cur_upper_vhdl;
-
-#if DEBUG && HWG_DEBUG
- printf("Current vhdl %d \n", cur_vhdl);
-#endif /* DEBUG */
-
- loop_rv = hwgraph_edge_get(cur_vhdl, EDGE_LBL_MEMORY, &tmp_mem_vhdl);
- }
-
- /* We should be at desired hub/heart vertex now */
- if ((hwgraph_edge_get(cur_vhdl, EDGE_LBL_CONN, &mem_vhdl)) != GRAPH_SUCCESS)
- return (GRAPH_VERTEX_NONE);
-
- return (mem_vhdl);
-}
-#endif /* LATER */
-
-
-/*
-** Add a char device -- if the driver supports it -- at a specified vertex.
-*/
graph_error_t
-hwgraph_char_device_add( devfs_handle_t from,
- char *path,
- char *prefix,
- devfs_handle_t *devhdl)
-{
- devfs_handle_t xx = NULL;
-
- printk("WARNING: hwgraph_char_device_add() not supported .. use hwgraph_register.\n");
- *devhdl = xx; // Must set devhdl
- return(GRAPH_SUCCESS);
-}
-
-graph_error_t
-hwgraph_edge_remove(devfs_handle_t from, char *name, devfs_handle_t *toptr)
+hwgraph_edge_remove(vertex_hdl_t from, char *name, vertex_hdl_t *toptr)
{
printk("WARNING: hwgraph_edge_remove NOT supported.\n");
return(GRAPH_ILLEGAL_REQUEST);
}
graph_error_t
-hwgraph_vertex_unref(devfs_handle_t vhdl)
+hwgraph_vertex_unref(vertex_hdl_t vhdl)
{
return(GRAPH_ILLEGAL_REQUEST);
}
@@ -1487,17 +936,12 @@ hwgraph_vertex_unref(devfs_handle_t vhdl)
EXPORT_SYMBOL(hwgraph_mk_dir);
EXPORT_SYMBOL(hwgraph_path_add);
-EXPORT_SYMBOL(hwgraph_char_device_add);
EXPORT_SYMBOL(hwgraph_register);
EXPORT_SYMBOL(hwgraph_vertex_destroy);
-
EXPORT_SYMBOL(hwgraph_fastinfo_get);
-EXPORT_SYMBOL(hwgraph_edge_get);
-
EXPORT_SYMBOL(hwgraph_fastinfo_set);
EXPORT_SYMBOL(hwgraph_connectpt_set);
EXPORT_SYMBOL(hwgraph_connectpt_get);
-EXPORT_SYMBOL(hwgraph_edge_get_next);
EXPORT_SYMBOL(hwgraph_info_add_LBL);
EXPORT_SYMBOL(hwgraph_info_remove_LBL);
EXPORT_SYMBOL(hwgraph_info_replace_LBL);
@@ -1508,8 +952,4 @@ EXPORT_SYMBOL(hwgraph_info_export_LBL);
EXPORT_SYMBOL(hwgraph_info_unexport_LBL);
EXPORT_SYMBOL(hwgraph_path_lookup);
EXPORT_SYMBOL(hwgraph_traverse);
-EXPORT_SYMBOL(hwgraph_path_to_vertex);
-EXPORT_SYMBOL(hwgraph_path_to_dev);
-EXPORT_SYMBOL(hwgraph_block_device_get);
-EXPORT_SYMBOL(hwgraph_char_device_get);
EXPORT_SYMBOL(hwgraph_vertex_name_get);
diff --git a/arch/ia64/sn/io/hcl_util.c b/arch/ia64/sn/io/hwgdfs/hcl_util.c
index 961abe3a3eec4d..7e21ae99fcba95 100644
--- a/arch/ia64/sn/io/hcl_util.c
+++ b/arch/ia64/sn/io/hwgdfs/hcl_util.c
@@ -1,10 +1,10 @@
-/* $Id$
+/* $Id: hcl_util.c,v 1.3 2003/04/24 13:59:39 pfg Exp $
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
@@ -21,8 +21,8 @@
#include <asm/sn/hcl_util.h>
#include <asm/sn/nodepda.h>
-static devfs_handle_t hwgraph_all_cnodes = GRAPH_VERTEX_NONE;
-extern devfs_handle_t hwgraph_root;
+static vertex_hdl_t hwgraph_all_cnodes = GRAPH_VERTEX_NONE;
+extern vertex_hdl_t hwgraph_root;
/*
@@ -30,11 +30,11 @@ extern devfs_handle_t hwgraph_root;
** controller or adapter or other piece of hardware that the given
** vertex passes through on the way to the rest of the system.
*/
-devfs_handle_t
-device_master_get(devfs_handle_t vhdl)
+vertex_hdl_t
+device_master_get(vertex_hdl_t vhdl)
{
graph_error_t rc;
- devfs_handle_t master;
+ vertex_hdl_t master;
rc = hwgraph_edge_get(vhdl, EDGE_LBL_MASTER, &master);
if (rc == GRAPH_SUCCESS)
@@ -48,7 +48,7 @@ device_master_get(devfs_handle_t vhdl)
** Returns 0 on success, non-0 indicates failure
*/
int
-device_master_set(devfs_handle_t vhdl, devfs_handle_t master)
+device_master_set(vertex_hdl_t vhdl, vertex_hdl_t master)
{
graph_error_t rc;
@@ -63,10 +63,10 @@ device_master_set(devfs_handle_t vhdl, devfs_handle_t master)
** until we reach a vertex that represents a node.
*/
cnodeid_t
-master_node_get(devfs_handle_t vhdl)
+master_node_get(vertex_hdl_t vhdl)
{
cnodeid_t cnodeid;
- devfs_handle_t master;
+ vertex_hdl_t master;
for (;;) {
cnodeid = nodevertex_to_cnodeid(vhdl);
@@ -96,11 +96,11 @@ master_node_get(devfs_handle_t vhdl)
}
}
-static devfs_handle_t hwgraph_all_cpuids = GRAPH_VERTEX_NONE;
+static vertex_hdl_t hwgraph_all_cpuids = GRAPH_VERTEX_NONE;
extern int maxcpus;
void
-mark_cpuvertex_as_cpu(devfs_handle_t vhdl, cpuid_t cpuid)
+mark_cpuvertex_as_cpu(vertex_hdl_t vhdl, cpuid_t cpuid)
{
if (cpuid == CPU_NONE)
return;
@@ -128,7 +128,7 @@ mark_cpuvertex_as_cpu(devfs_handle_t vhdl, cpuid_t cpuid)
** compact node ID; otherwise, return CNODEID_NONE.
*/
cnodeid_t
-nodevertex_to_cnodeid(devfs_handle_t vhdl)
+nodevertex_to_cnodeid(vertex_hdl_t vhdl)
{
int rv = 0;
arbitrary_info_t cnodeid = CNODEID_NONE;
@@ -139,7 +139,7 @@ nodevertex_to_cnodeid(devfs_handle_t vhdl)
}
void
-mark_nodevertex_as_node(devfs_handle_t vhdl, cnodeid_t cnodeid)
+mark_nodevertex_as_node(vertex_hdl_t vhdl, cnodeid_t cnodeid)
{
if (cnodeid == CNODEID_NONE)
return;
@@ -169,7 +169,7 @@ mark_nodevertex_as_node(devfs_handle_t vhdl, cnodeid_t cnodeid)
** otherwise, return CPU_NONE.
*/
cpuid_t
-cpuvertex_to_cpuid(devfs_handle_t vhdl)
+cpuvertex_to_cpuid(vertex_hdl_t vhdl)
{
arbitrary_info_t cpuid = CPU_NONE;
@@ -180,9 +180,9 @@ cpuvertex_to_cpuid(devfs_handle_t vhdl)
/*
-** dev_to_name converts a devfs_handle_t into a canonical name. If the devfs_handle_t
+** dev_to_name converts a vertex_hdl_t into a canonical name. If the vertex_hdl_t
** represents a vertex in the hardware graph, it is converted in the
-** normal way for vertices. If the devfs_handle_t is an old devfs_handle_t (one which
+** normal way for vertices. If the vertex_hdl_t is an old vertex_hdl_t (one which
** does not represent a hwgraph vertex), we synthesize a name based
** on major/minor number.
**
@@ -192,7 +192,7 @@ cpuvertex_to_cpuid(devfs_handle_t vhdl)
** returns "UnknownDevice".
*/
char *
-dev_to_name(devfs_handle_t dev, char *buf, uint buflen)
+dev_to_name(vertex_hdl_t dev, char *buf, uint buflen)
{
return(vertex_to_name(dev, buf, buflen));
}
diff --git a/arch/ia64/sn/io/hwgdfs/invent_stub.c b/arch/ia64/sn/io/hwgdfs/invent_stub.c
new file mode 100644
index 00000000000000..de92938c3156dd
--- /dev/null
+++ b/arch/ia64/sn/io/hwgdfs/invent_stub.c
@@ -0,0 +1,146 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All rights reserved.
+ */
+
+/*
+ * Hardware Inventory
+ *
+ * See sys/sn/invent.h for an explanation of the hardware inventory contents.
+ *
+ */
+#include <linux/types.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/labelcl.h>
+
+void
+inventinit(void)
+{
+}
+
+/*
+ * For initializing/updating an inventory entry.
+ */
+void
+replace_in_inventory(
+ inventory_t *pinv, int class, int type,
+ int controller, int unit, int state)
+{
+}
+
+/*
+ * Inventory addition
+ *
+ * XXX NOTE: Currently must be called after dynamic memory allocator is
+ * initialized.
+ *
+ */
+void
+add_to_inventory(int class, int type, int controller, int unit, int state)
+{
+}
+
+
+/*
+ * Inventory retrieval
+ *
+ * These two routines are intended to prevent the caller from having to know
+ * the internal structure of the inventory table.
+ *
+ * The caller of get_next_inventory is supposed to call start_scan_invent
+ * before the irst call to get_next_inventory, and the caller is required
+ * to call end_scan_invent after the last call to get_next_inventory.
+ */
+inventory_t *
+get_next_inventory(invplace_t *place)
+{
+ return((inventory_t *) NULL);
+}
+
+/* ARGSUSED */
+int
+get_sizeof_inventory(int abi)
+{
+ return sizeof(inventory_t);
+}
+
+/* Must be called prior to first call to get_next_inventory */
+void
+start_scan_inventory(invplace_t *iplace)
+{
+}
+
+/* Must be called after last call to get_next_inventory */
+void
+end_scan_inventory(invplace_t *iplace)
+{
+}
+
+/*
+ * Hardware inventory scanner.
+ *
+ * Calls fun() for every entry in inventory list unless fun() returns something
+ * other than 0.
+ */
+int
+scaninvent(int (*fun)(inventory_t *, void *), void *arg)
+{
+ return 0;
+}
+
+/*
+ * Find a particular inventory object
+ *
+ * pinv can be a pointer to an inventory entry and the search will begin from
+ * there, or it can be 0 in which case the search starts at the beginning.
+ * A -1 for any of the other arguments is a wildcard (i.e. it always matches).
+ */
+inventory_t *
+find_inventory(inventory_t *pinv, int class, int type, int controller,
+ int unit, int state)
+{
+ return((inventory_t *) NULL);
+}
+
+
+/*
+** Retrieve inventory data associated with a device.
+*/
+inventory_t *
+device_inventory_get_next( vertex_hdl_t device,
+ invplace_t *invplace)
+{
+ return((inventory_t *) NULL);
+}
+
+
+/*
+** Associate canonical inventory information with a device (and
+** add it to the general inventory).
+*/
+void
+device_inventory_add( vertex_hdl_t device,
+ int class,
+ int type,
+ major_t controller,
+ minor_t unit,
+ int state)
+{
+}
+
+int
+device_controller_num_get(vertex_hdl_t device)
+{
+ return (0);
+}
+
+void
+device_controller_num_set(vertex_hdl_t device, int contr_num)
+{
+}
diff --git a/arch/ia64/sn/io/labelcl.c b/arch/ia64/sn/io/hwgdfs/labelcl.c
index 33c2bd8c4da74b..f734fbf2f50fcc 100644
--- a/arch/ia64/sn/io/labelcl.c
+++ b/arch/ia64/sn/io/hwgdfs/labelcl.c
@@ -4,7 +4,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 2001-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (c) 2001-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
@@ -219,7 +219,7 @@ labelcl_info_destroy(labelcl_info_t *labelcl_info)
* Error is returned if we find another label with the same name.
*/
int
-labelcl_info_add_LBL(devfs_handle_t de,
+labelcl_info_add_LBL(vertex_hdl_t de,
char *info_name,
arb_info_desc_t info_desc,
arbitrary_info_t info)
@@ -275,7 +275,6 @@ labelcl_info_add_LBL(devfs_handle_t de,
if (!strcmp(info_name, old_label_list[i].name)) {
/* Not allowed to add duplicate labelled info names. */
kfree(new_label_list);
- printk(KERN_WARNING "labelcl_info_add_LBL: Duplicate label name %s for vertex 0x%p\n", info_name, (void *)de);
return(-1);
}
new_label_list[i] = old_label_list[i]; /* structure copy */
@@ -298,7 +297,7 @@ labelcl_info_add_LBL(devfs_handle_t de,
* labelcl_info_remove_LBL - Remove a label entry.
*/
int
-labelcl_info_remove_LBL(devfs_handle_t de,
+labelcl_info_remove_LBL(vertex_hdl_t de,
char *info_name,
arb_info_desc_t *info_desc,
arbitrary_info_t *info)
@@ -388,7 +387,7 @@ found:
* Label entry must exist.
*/
int
-labelcl_info_replace_LBL(devfs_handle_t de,
+labelcl_info_replace_LBL(vertex_hdl_t de,
char *info_name,
arb_info_desc_t info_desc,
arbitrary_info_t info,
@@ -446,7 +445,7 @@ labelcl_info_replace_LBL(devfs_handle_t de,
* given label entry.
*/
int
-labelcl_info_get_LBL(devfs_handle_t de,
+labelcl_info_get_LBL(vertex_hdl_t de,
char *info_name,
arb_info_desc_t *info_desc,
arbitrary_info_t *info)
@@ -493,7 +492,7 @@ labelcl_info_get_LBL(devfs_handle_t de,
* labelcl_info_get_next_LBL - returns the next label entry on the list.
*/
int
-labelcl_info_get_next_LBL(devfs_handle_t de,
+labelcl_info_get_next_LBL(vertex_hdl_t de,
char *buffer,
arb_info_desc_t *info_descp,
arbitrary_info_t *infop,
@@ -543,7 +542,7 @@ labelcl_info_get_next_LBL(devfs_handle_t de,
int
-labelcl_info_replace_IDX(devfs_handle_t de,
+labelcl_info_replace_IDX(vertex_hdl_t de,
int index,
arbitrary_info_t info,
arbitrary_info_t *old_info)
@@ -607,7 +606,7 @@ labelcl_info_connectpt_set(struct devfs_entry *de,
*
*/
int
-labelcl_info_get_IDX(devfs_handle_t de,
+labelcl_info_get_IDX(vertex_hdl_t de,
int index,
arbitrary_info_t *info)
{
diff --git a/arch/ia64/sn/io/hwgfs/Makefile b/arch/ia64/sn/io/hwgfs/Makefile
new file mode 100644
index 00000000000000..5107e0c9ae964a
--- /dev/null
+++ b/arch/ia64/sn/io/hwgfs/Makefile
@@ -0,0 +1,13 @@
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
+#
+# Makefile for the sn2 io routines.
+
+EXTRA_CFLAGS := -DLITTLE_ENDIAN
+
+obj-y += hcl.o labelcl.o hcl_util.o invent_stub.o \
+ ramfs.o interface.o
diff --git a/arch/ia64/sn/io/hwgfs/hcl.c b/arch/ia64/sn/io/hwgfs/hcl.c
new file mode 100644
index 00000000000000..bdb27ccaec6ac5
--- /dev/null
+++ b/arch/ia64/sn/io/hwgfs/hcl.c
@@ -0,0 +1,938 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * hcl - SGI's Hardware Graph compatibility layer.
+ *
+ * Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/sched.h> /* needed for smp_lock.h :( */
+#include <linux/smp_lock.h>
+#include <asm/sn/sgi.h>
+#include <asm/io.h>
+#include <asm/sn/iograph.h>
+#include <asm/sn/hwgfs.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/labelcl.h>
+#include <asm/sn/simulator.h>
+
+#define HCL_NAME "SGI-HWGRAPH COMPATIBILITY DRIVER"
+#define HCL_TEMP_NAME "HCL_TEMP_NAME_USED_FOR_HWGRAPH_VERTEX_CREATE"
+#define HCL_TEMP_NAME_LEN 44
+#define HCL_VERSION "1.0"
+
+#define vertex_hdl_t hwgfs_handle_t
+vertex_hdl_t hwgraph_root;
+vertex_hdl_t linux_busnum;
+
+extern void pci_bus_cvlink_init(void);
+
+/*
+ * Debug flag definition.
+ */
+#define OPTION_NONE 0x00
+#define HCL_DEBUG_NONE 0x00000
+#define HCL_DEBUG_ALL 0x0ffff
+#if defined(CONFIG_HCL_DEBUG)
+static unsigned int hcl_debug_init __initdata = HCL_DEBUG_NONE;
+#endif
+static unsigned int hcl_debug = HCL_DEBUG_NONE;
+#if defined(CONFIG_HCL_DEBUG) && !defined(MODULE)
+static unsigned int boot_options = OPTION_NONE;
+#endif
+
+/*
+ * Some Global definitions.
+ */
+vertex_hdl_t hcl_handle;
+
+invplace_t invplace_none = {
+ GRAPH_VERTEX_NONE,
+ GRAPH_VERTEX_PLACE_NONE,
+ NULL
+};
+
+/*
+ * HCL device driver.
+ * The purpose of this device driver is to provide a facility
+ * for User Level Apps e.g. hinv, ioconfig etc. an ioctl path
+ * to manipulate label entries without having to implement
+ * system call interfaces. This methodology will enable us to
+ * make this feature module loadable.
+ */
+static int hcl_open(struct inode * inode, struct file * filp)
+{
+ if (hcl_debug) {
+ printk("HCL: hcl_open called.\n");
+ }
+
+ return(0);
+
+}
+
+static int hcl_close(struct inode * inode, struct file * filp)
+{
+
+ if (hcl_debug) {
+ printk("HCL: hcl_close called.\n");
+ }
+
+ return(0);
+
+}
+
+static int hcl_ioctl(struct inode * inode, struct file * file,
+ unsigned int cmd, unsigned long arg)
+{
+
+ if (hcl_debug) {
+ printk("HCL: hcl_ioctl called.\n");
+ }
+
+ switch (cmd) {
+ default:
+ if (hcl_debug) {
+ printk("HCL: hcl_ioctl cmd = 0x%x\n", cmd);
+ }
+ }
+
+ return(0);
+
+}
+
+struct file_operations hcl_fops = {
+ (struct module *)0,
+ NULL, /* lseek - default */
+ NULL, /* read - general block-dev read */
+ NULL, /* write - general block-dev write */
+ NULL, /* readdir - bad */
+ NULL, /* poll */
+ hcl_ioctl, /* ioctl */
+ NULL, /* mmap */
+ hcl_open, /* open */
+ NULL, /* flush */
+ hcl_close, /* release */
+ NULL, /* fsync */
+ NULL, /* fasync */
+ NULL, /* lock */
+ NULL, /* readv */
+ NULL, /* writev */
+};
+
+
+/*
+ * init_hcl() - Boot time initialization.
+ *
+ */
+int __init init_hcl(void)
+{
+ extern void string_table_init(struct string_table *);
+ extern struct string_table label_string_table;
+ extern int init_ifconfig_net(void);
+ extern int init_ioconfig_bus(void);
+ extern int init_hwgfs_fs(void);
+ int rv = 0;
+
+ if (IS_RUNNING_ON_SIMULATOR()) {
+ extern u64 klgraph_addr[];
+ klgraph_addr[0] = 0xe000003000030000;
+ }
+
+ init_hwgfs_fs();
+
+ /*
+ * Create the hwgraph_root.
+ */
+ rv = hwgraph_path_add(NULL, EDGE_LBL_HW, &hwgraph_root);
+ if (rv)
+ printk ("WARNING: init_hcl: Failed to create hwgraph_root. Error = %d.\n", rv);
+
+ /*
+ * Create the hcl driver to support inventory entry manipulations.
+ *
+ */
+ hcl_handle = hwgraph_register(hwgraph_root, ".hcl",
+ 0, DEVFS_FL_AUTO_DEVNUM,
+ 0, 0,
+ S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
+ &hcl_fops, NULL);
+
+ if (hcl_handle == NULL) {
+ panic("HCL: Unable to create HCL Driver in init_hcl().\n");
+ return(0);
+ }
+
+ /*
+ * Initialize the HCL string table.
+ */
+
+ string_table_init(&label_string_table);
+
+ /*
+ * Create the directory that links Linux bus numbers to our Xwidget.
+ */
+ rv = hwgraph_path_add(hwgraph_root, EDGE_LBL_LINUX_BUS, &linux_busnum);
+ if (linux_busnum == NULL) {
+ panic("HCL: Unable to create %s\n", EDGE_LBL_LINUX_BUS);
+ return(0);
+ }
+
+ pci_bus_cvlink_init();
+
+ /*
+ * Initialize the ifconfgi_net driver that does network devices
+ * Persistent Naming.
+ */
+ init_ifconfig_net();
+ init_ioconfig_bus();
+
+ return(0);
+
+}
+
+
+/*
+ * hcl_setup() - Process boot time parameters if given.
+ * "hcl="
+ * This routine gets called only if "hcl=" is given in the
+ * boot line and before init_hcl().
+ *
+ * We currently do not have any boot options .. when we do,
+ * functionalities can be added here.
+ *
+ */
+static int __init hcl_setup(char *str)
+{
+ while ( (*str != '\0') && !isspace (*str) )
+ {
+#ifdef CONFIG_HCL_DEBUG
+ if (strncmp (str, "all", 3) == 0) {
+ hcl_debug_init |= HCL_DEBUG_ALL;
+ str += 3;
+ } else
+ return 0;
+#endif
+ if (*str != ',') return 0;
+ ++str;
+ }
+
+ return 1;
+
+}
+
+__setup("hcl=", hcl_setup);
+
+
+/*
+ * Set device specific "fast information".
+ *
+ */
+void
+hwgraph_fastinfo_set(vertex_hdl_t de, arbitrary_info_t fastinfo)
+{
+ labelcl_info_replace_IDX(de, HWGRAPH_FASTINFO, fastinfo, NULL);
+}
+
+
+/*
+ * Get device specific "fast information".
+ *
+ */
+arbitrary_info_t
+hwgraph_fastinfo_get(vertex_hdl_t de)
+{
+ arbitrary_info_t fastinfo;
+ int rv;
+
+ if (!de) {
+ printk(KERN_WARNING "HCL: hwgraph_fastinfo_get handle given is NULL.\n");
+ return(-1);
+ }
+
+ rv = labelcl_info_get_IDX(de, HWGRAPH_FASTINFO, &fastinfo);
+ if (rv == 0)
+ return(fastinfo);
+
+ return(0);
+}
+
+
+/*
+ * hwgraph_connectpt_set - Sets the connect point handle in de to the
+ * given connect_de handle. By default, the connect point of the
+ * node is the parent. This effectively changes this assumption.
+ */
+int
+hwgraph_connectpt_set(vertex_hdl_t de, vertex_hdl_t connect_de)
+{
+ int rv;
+
+ if (!de)
+ return(-1);
+
+ rv = labelcl_info_connectpt_set(de, connect_de);
+
+ return(rv);
+}
+
+
+/*
+ * hwgraph_connectpt_get: Returns the entry's connect point.
+ *
+ */
+vertex_hdl_t
+hwgraph_connectpt_get(vertex_hdl_t de)
+{
+ int rv;
+ arbitrary_info_t info;
+ vertex_hdl_t connect;
+
+ rv = labelcl_info_get_IDX(de, HWGRAPH_CONNECTPT, &info);
+ if (rv != 0) {
+ return(NULL);
+ }
+
+ connect = (vertex_hdl_t)info;
+ return(connect);
+
+}
+
+
+/*
+ * hwgraph_mk_dir - Creates a directory entry.
+ */
+vertex_hdl_t
+hwgraph_mk_dir(vertex_hdl_t de, const char *name,
+ unsigned int namelen, void *info)
+{
+
+ int rv;
+ labelcl_info_t *labelcl_info = NULL;
+ vertex_hdl_t new_handle = NULL;
+ vertex_hdl_t parent = NULL;
+
+ /*
+ * Create the device info structure for hwgraph compatiblity support.
+ */
+ labelcl_info = labelcl_info_create();
+ if (!labelcl_info)
+ return(NULL);
+
+ /*
+ * Create an entry.
+ */
+ new_handle = hwgfs_mk_dir(de, name, (void *)labelcl_info);
+ if (!new_handle) {
+ labelcl_info_destroy(labelcl_info);
+ return(NULL);
+ }
+
+ /*
+ * Get the parent handle.
+ */
+ parent = hwgfs_get_parent (new_handle);
+
+ /*
+ * To provide the same semantics as the hwgraph, set the connect point.
+ */
+ rv = hwgraph_connectpt_set(new_handle, parent);
+ if (!rv) {
+ /*
+ * We need to clean up!
+ */
+ }
+
+ /*
+ * If the caller provides a private data pointer, save it in the
+ * labelcl info structure(fastinfo). This can be retrieved via
+ * hwgraph_fastinfo_get()
+ */
+ if (info)
+ hwgraph_fastinfo_set(new_handle, (arbitrary_info_t)info);
+
+ return(new_handle);
+
+}
+
+/*
+ * hwgraph_path_add - Create a directory node with the given path starting
+ * from the given fromv.
+ */
+int
+hwgraph_path_add(vertex_hdl_t fromv,
+ char *path,
+ vertex_hdl_t *new_de)
+{
+
+ unsigned int namelen = strlen(path);
+ int rv;
+
+ /*
+ * We need to handle the case when fromv is NULL ..
+ * in this case we need to create the path from the
+ * hwgraph root!
+ */
+ if (fromv == NULL)
+ fromv = hwgraph_root;
+
+ /*
+ * check the entry doesn't already exist, if it does
+ * then we simply want new_de to point to it (otherwise
+ * we'll overwrite the existing labelcl_info struct)
+ */
+ rv = hwgraph_edge_get(fromv, path, new_de);
+ if (rv) { /* couldn't find entry so we create it */
+ *new_de = hwgraph_mk_dir(fromv, path, namelen, NULL);
+ if (new_de == NULL)
+ return(-1);
+ else
+ return(0);
+ }
+ else
+ return(0);
+
+}
+
+/*
+ * hwgraph_register - Creates a special device file.
+ *
+ */
+vertex_hdl_t
+hwgraph_register(vertex_hdl_t de, const char *name,
+ unsigned int namelen, unsigned int flags,
+ unsigned int major, unsigned int minor,
+ umode_t mode, uid_t uid, gid_t gid,
+ struct file_operations *fops,
+ void *info)
+{
+
+ vertex_hdl_t new_handle = NULL;
+
+ /*
+ * Create an entry.
+ */
+ new_handle = hwgfs_register(de, name, flags, major,
+ minor, mode, fops, info);
+
+ return(new_handle);
+
+}
+
+
+/*
+ * hwgraph_mk_symlink - Create a symbolic link.
+ */
+int
+hwgraph_mk_symlink(vertex_hdl_t de, const char *name, unsigned int namelen,
+ unsigned int flags, const char *link, unsigned int linklen,
+ vertex_hdl_t *handle, void *info)
+{
+
+ void *labelcl_info = NULL;
+ int status = 0;
+ vertex_hdl_t new_handle = NULL;
+
+ /*
+ * Create the labelcl info structure for hwgraph compatiblity support.
+ */
+ labelcl_info = labelcl_info_create();
+ if (!labelcl_info)
+ return(-1);
+
+ /*
+ * Create a symbolic link.
+ */
+ status = hwgfs_mk_symlink(de, name, flags, link,
+ &new_handle, labelcl_info);
+ if ( (!new_handle) || (!status) ){
+ labelcl_info_destroy((labelcl_info_t *)labelcl_info);
+ return(-1);
+ }
+
+ /*
+ * If the caller provides a private data pointer, save it in the
+ * labelcl info structure(fastinfo). This can be retrieved via
+ * hwgraph_fastinfo_get()
+ */
+ if (info)
+ hwgraph_fastinfo_set(new_handle, (arbitrary_info_t)info);
+
+ *handle = new_handle;
+ return(0);
+
+}
+
+/*
+ * hwgraph_vertex_destroy - Destroy the entry
+ */
+int
+hwgraph_vertex_destroy(vertex_hdl_t de)
+{
+
+ void *labelcl_info = NULL;
+
+ labelcl_info = hwgfs_get_info(de);
+ hwgfs_unregister(de);
+
+ if (labelcl_info)
+ labelcl_info_destroy((labelcl_info_t *)labelcl_info);
+
+ return(0);
+}
+
+#if 0
+/*
+ * hwgraph_edge_add - This routines has changed from the original conext.
+ * All it does now is to create a symbolic link from "from" to "to".
+ */
+/* ARGSUSED */
+int
+hwgraph_edge_add(vertex_hdl_t from, vertex_hdl_t to, char *name)
+{
+
+ char *path, *link;
+ vertex_hdl_t handle = NULL;
+ int rv, i;
+
+ handle = hwgfs_find_handle(from, name, 0, 0, 0, 1);
+ if (handle) {
+ return(0);
+ }
+
+ path = kmalloc(1024, GFP_KERNEL);
+ memset(path, 0x0, 1024);
+ link = kmalloc(1024, GFP_KERNEL);
+ memset(path, 0x0, 1024);
+ i = hwgfs_generate_path (to, link, 1024);
+ rv = hwgfs_mk_symlink (from, (const char *)name,
+ DEVFS_FL_DEFAULT, link,
+ &handle, NULL);
+ return(0);
+
+
+}
+#endif
+
+int
+hwgraph_edge_add(vertex_hdl_t from, vertex_hdl_t to, char *name)
+{
+
+ char *path, *link;
+ char *s1;
+ char *index;
+ vertex_hdl_t handle = NULL;
+ int rv;
+ int i, count;
+
+ path = kmalloc(1024, GFP_KERNEL);
+ memset((char *)path, 0x0, 1024);
+ link = kmalloc(1024, GFP_KERNEL);
+ memset((char *)link, 0x0, 1024);
+
+ i = hwgfs_generate_path (from, path, 1024);
+ s1 = (char *)path;
+ count = 0;
+ while (1) {
+ index = strstr (s1, "/");
+ if (index) {
+ count++;
+ s1 = ++index;
+ } else {
+ count++;
+ break;
+ }
+ }
+
+ for (i = 0; i < count; i++) {
+ strcat((char *)link,"../");
+ }
+
+ memset(path, 0x0, 1024);
+ i = hwgfs_generate_path (to, path, 1024);
+ strcat((char *)link, (char *)path);
+
+ /*
+ * Otherwise, just create a symlink to the vertex.
+ * In this case the vertex was previous created with a REAL pathname.
+ */
+ rv = hwgfs_mk_symlink (from, (const char *)name,
+ DEVFS_FL_DEFAULT, link,
+ &handle, NULL);
+ kfree(path);
+ kfree(link);
+
+ return(rv);
+
+
+}
+
+/* ARGSUSED */
+int
+hwgraph_edge_get(vertex_hdl_t from, char *name, vertex_hdl_t *toptr)
+{
+
+ vertex_hdl_t target_handle = NULL;
+
+ if (name == NULL)
+ return(-1);
+
+ if (toptr == NULL)
+ return(-1);
+
+ /*
+ * If the name is "." just return the current entry handle.
+ */
+ if (!strcmp(name, HWGRAPH_EDGELBL_DOT)) {
+ if (toptr) {
+ *toptr = from;
+ }
+ } else if (!strcmp(name, HWGRAPH_EDGELBL_DOTDOT)) {
+ /*
+ * Hmmm .. should we return the connect point or parent ..
+ * see in hwgraph, the concept of parent is the connectpt!
+ *
+ * Maybe we should see whether the connectpt is set .. if
+ * not just return the parent!
+ */
+ target_handle = hwgraph_connectpt_get(from);
+ if (target_handle) {
+ /*
+ * Just return the connect point.
+ */
+ *toptr = target_handle;
+ return(0);
+ }
+ target_handle = hwgfs_get_parent(from);
+ *toptr = target_handle;
+
+ } else {
+ target_handle = hwgfs_find_handle (from, name, 0, 0,
+ 0, 1); /* Yes traverse symbolic links */
+ }
+
+ if (target_handle == NULL)
+ return(-1);
+ else
+ *toptr = target_handle;
+
+ return(0);
+}
+
+/*
+ * hwgraph_info_add_LBL - Adds a new label for the device. Mark the info_desc
+ * of the label as INFO_DESC_PRIVATE and store the info in the label.
+ */
+/* ARGSUSED */
+int
+hwgraph_info_add_LBL( vertex_hdl_t de,
+ char *name,
+ arbitrary_info_t info)
+{
+ return(labelcl_info_add_LBL(de, name, INFO_DESC_PRIVATE, info));
+}
+
+/*
+ * hwgraph_info_remove_LBL - Remove the label entry for the device.
+ */
+/* ARGSUSED */
+int
+hwgraph_info_remove_LBL( vertex_hdl_t de,
+ char *name,
+ arbitrary_info_t *old_info)
+{
+ return(labelcl_info_remove_LBL(de, name, NULL, old_info));
+}
+
+/*
+ * hwgraph_info_replace_LBL - replaces an existing label with
+ * a new label info value.
+ */
+/* ARGSUSED */
+int
+hwgraph_info_replace_LBL( vertex_hdl_t de,
+ char *name,
+ arbitrary_info_t info,
+ arbitrary_info_t *old_info)
+{
+ return(labelcl_info_replace_LBL(de, name,
+ INFO_DESC_PRIVATE, info,
+ NULL, old_info));
+}
+/*
+ * hwgraph_info_get_LBL - Get and return the info value in the label of the
+ * device.
+ */
+/* ARGSUSED */
+int
+hwgraph_info_get_LBL(vertex_hdl_t de,
+ char *name,
+ arbitrary_info_t *infop)
+{
+ return(labelcl_info_get_LBL(de, name, NULL, infop));
+}
+
+/*
+ * hwgraph_info_get_exported_LBL - Retrieve the info_desc and info pointer
+ * of the given label for the device. The weird thing is that the label
+ * that matches the name is return irrespective of the info_desc value!
+ * Do not understand why the word "exported" is used!
+ */
+/* ARGSUSED */
+int
+hwgraph_info_get_exported_LBL(vertex_hdl_t de,
+ char *name,
+ int *export_info,
+ arbitrary_info_t *infop)
+{
+ int rc;
+ arb_info_desc_t info_desc;
+
+ rc = labelcl_info_get_LBL(de, name, &info_desc, infop);
+ if (rc == 0)
+ *export_info = (int)info_desc;
+
+ return(rc);
+}
+
+/*
+ * hwgraph_info_get_next_LBL - Returns the next label info given the
+ * current label entry in place.
+ *
+ * Once again this has no locking or reference count for protection.
+ *
+ */
+/* ARGSUSED */
+int
+hwgraph_info_get_next_LBL(vertex_hdl_t de,
+ char *buf,
+ arbitrary_info_t *infop,
+ labelcl_info_place_t *place)
+{
+ return(labelcl_info_get_next_LBL(de, buf, NULL, infop, place));
+}
+
+/*
+ * hwgraph_info_export_LBL - Retrieve the specified label entry and modify
+ * the info_desc field with the given value in nbytes.
+ */
+/* ARGSUSED */
+int
+hwgraph_info_export_LBL(vertex_hdl_t de, char *name, int nbytes)
+{
+ arbitrary_info_t info;
+ int rc;
+
+ if (nbytes == 0)
+ nbytes = INFO_DESC_EXPORT;
+
+ if (nbytes < 0)
+ return(-1);
+
+ rc = labelcl_info_get_LBL(de, name, NULL, &info);
+ if (rc != 0)
+ return(rc);
+
+ rc = labelcl_info_replace_LBL(de, name,
+ nbytes, info, NULL, NULL);
+
+ return(rc);
+}
+
+/*
+ * hwgraph_info_unexport_LBL - Retrieve the given label entry and change the
+ * label info_descr filed to INFO_DESC_PRIVATE.
+ */
+/* ARGSUSED */
+int
+hwgraph_info_unexport_LBL(vertex_hdl_t de, char *name)
+{
+ arbitrary_info_t info;
+ int rc;
+
+ rc = labelcl_info_get_LBL(de, name, NULL, &info);
+ if (rc != 0)
+ return(rc);
+
+ rc = labelcl_info_replace_LBL(de, name,
+ INFO_DESC_PRIVATE, info, NULL, NULL);
+
+ return(rc);
+}
+
+/*
+ * hwgraph_path_lookup - return the handle for the given path.
+ *
+ */
+int
+hwgraph_path_lookup(vertex_hdl_t start_vertex_handle,
+ char *lookup_path,
+ vertex_hdl_t *vertex_handle_ptr,
+ char **remainder)
+{
+ *vertex_handle_ptr = hwgfs_find_handle(start_vertex_handle, /* start dir */
+ lookup_path, /* path */
+ 0, /* major */
+ 0, /* minor */
+ 0, /* char | block */
+ 1); /* traverse symlinks */
+ if (*vertex_handle_ptr == NULL)
+ return(-1);
+ else
+ return(0);
+}
+
+/*
+ * hwgraph_traverse - Find and return the handle starting from de.
+ *
+ */
+graph_error_t
+hwgraph_traverse(vertex_hdl_t de, char *path, vertex_hdl_t *found)
+{
+ /*
+ * get the directory entry (path should end in a directory)
+ */
+
+ *found = hwgfs_find_handle(de, /* start dir */
+ path, /* path */
+ 0, /* major */
+ 0, /* minor */
+ 0, /* char | block */
+ 1); /* traverse symlinks */
+ if (*found == NULL)
+ return(GRAPH_NOT_FOUND);
+ else
+ return(GRAPH_SUCCESS);
+}
+
+/*
+ * hwgraph_path_to_vertex - Return the entry handle for the given
+ * pathname .. assume traverse symlinks too!.
+ */
+vertex_hdl_t
+hwgraph_path_to_vertex(char *path)
+{
+ return(hwgfs_find_handle(NULL, /* start dir */
+ path, /* path */
+ 0, /* major */
+ 0, /* minor */
+ 0, /* char | block */
+ 1)); /* traverse symlinks */
+}
+
+/*
+ * hwgraph_inventory_remove - Removes an inventory entry.
+ *
+ * Remove an inventory item associated with a vertex. It is the caller's
+ * responsibility to make sure that there are no races between removing
+ * inventory from a vertex and simultaneously removing that vertex.
+*/
+int
+hwgraph_inventory_remove( vertex_hdl_t de,
+ int class,
+ int type,
+ major_t controller,
+ minor_t unit,
+ int state)
+{
+ return(0); /* Just a Stub for IRIX code. */
+}
+
+/*
+ * Find the canonical name for a given vertex by walking back through
+ * connectpt's until we hit the hwgraph root vertex (or until we run
+ * out of buffer space or until something goes wrong).
+ *
+ * COMPATIBILITY FUNCTIONALITY
+ * Walks back through 'parents', not necessarily the same as connectpts.
+ *
+ * Need to resolve the fact that does not return the path from
+ * "/" but rather it just stops right before /dev ..
+ */
+int
+hwgraph_vertex_name_get(vertex_hdl_t vhdl, char *buf, uint buflen)
+{
+ char *locbuf;
+ int pos;
+
+ if (buflen < 1)
+ return(-1); /* XXX should be GRAPH_BAD_PARAM ? */
+
+ locbuf = kmalloc(buflen, GFP_KERNEL);
+
+ pos = hwgfs_generate_path(vhdl, locbuf, buflen);
+ if (pos < 0) {
+ kfree(locbuf);
+ return pos;
+ }
+
+ strcpy(buf, &locbuf[pos]);
+ kfree(locbuf);
+ return 0;
+}
+
+/*
+** vertex_to_name converts a vertex into a canonical name by walking
+** back through connect points until we hit the hwgraph root (or until
+** we run out of buffer space).
+**
+** Usually returns a pointer to the original buffer, filled in as
+** appropriate. If the buffer is too small to hold the entire name,
+** or if anything goes wrong while determining the name, vertex_to_name
+** returns "UnknownDevice".
+*/
+
+#define DEVNAME_UNKNOWN "UnknownDevice"
+
+char *
+vertex_to_name(vertex_hdl_t vhdl, char *buf, uint buflen)
+{
+ if (hwgraph_vertex_name_get(vhdl, buf, buflen) == GRAPH_SUCCESS)
+ return(buf);
+ else
+ return(DEVNAME_UNKNOWN);
+}
+
+graph_error_t
+hwgraph_edge_remove(vertex_hdl_t from, char *name, vertex_hdl_t *toptr)
+{
+ return(GRAPH_ILLEGAL_REQUEST);
+}
+
+graph_error_t
+hwgraph_vertex_unref(vertex_hdl_t vhdl)
+{
+ return(GRAPH_ILLEGAL_REQUEST);
+}
+
+
+EXPORT_SYMBOL(hwgraph_mk_dir);
+EXPORT_SYMBOL(hwgraph_path_add);
+EXPORT_SYMBOL(hwgraph_register);
+EXPORT_SYMBOL(hwgraph_vertex_destroy);
+EXPORT_SYMBOL(hwgraph_fastinfo_get);
+EXPORT_SYMBOL(hwgraph_fastinfo_set);
+EXPORT_SYMBOL(hwgraph_connectpt_set);
+EXPORT_SYMBOL(hwgraph_connectpt_get);
+EXPORT_SYMBOL(hwgraph_info_add_LBL);
+EXPORT_SYMBOL(hwgraph_info_remove_LBL);
+EXPORT_SYMBOL(hwgraph_info_replace_LBL);
+EXPORT_SYMBOL(hwgraph_info_get_LBL);
+EXPORT_SYMBOL(hwgraph_info_get_exported_LBL);
+EXPORT_SYMBOL(hwgraph_info_get_next_LBL);
+EXPORT_SYMBOL(hwgraph_info_export_LBL);
+EXPORT_SYMBOL(hwgraph_info_unexport_LBL);
+EXPORT_SYMBOL(hwgraph_path_lookup);
+EXPORT_SYMBOL(hwgraph_traverse);
+EXPORT_SYMBOL(hwgraph_vertex_name_get);
diff --git a/arch/ia64/sn/io/hwgfs/hcl_util.c b/arch/ia64/sn/io/hwgfs/hcl_util.c
new file mode 100644
index 00000000000000..6b6bb228bd6716
--- /dev/null
+++ b/arch/ia64/sn/io/hwgfs/hcl_util.c
@@ -0,0 +1,200 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <asm/sn/sgi.h>
+#include <asm/io.h>
+#include <asm/sn/io.h>
+#include <asm/sn/iograph.h>
+#include <asm/sn/hwgfs.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/labelcl.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl_util.h>
+#include <asm/sn/nodepda.h>
+
+static vertex_hdl_t hwgraph_all_cnodes = GRAPH_VERTEX_NONE;
+extern vertex_hdl_t hwgraph_root;
+
+
+/*
+** Return the "master" for a given vertex. A master vertex is a
+** controller or adapter or other piece of hardware that the given
+** vertex passes through on the way to the rest of the system.
+*/
+vertex_hdl_t
+device_master_get(vertex_hdl_t vhdl)
+{
+ graph_error_t rc;
+ vertex_hdl_t master;
+
+ rc = hwgraph_edge_get(vhdl, EDGE_LBL_MASTER, &master);
+ if (rc == GRAPH_SUCCESS)
+ return(master);
+ else
+ return(GRAPH_VERTEX_NONE);
+}
+
+/*
+** Set the master for a given vertex.
+** Returns 0 on success, non-0 indicates failure
+*/
+int
+device_master_set(vertex_hdl_t vhdl, vertex_hdl_t master)
+{
+ graph_error_t rc;
+
+ rc = hwgraph_edge_add(vhdl, master, EDGE_LBL_MASTER);
+ return(rc != GRAPH_SUCCESS);
+}
+
+
+/*
+** Return the compact node id of the node that ultimately "owns" the specified
+** vertex. In order to do this, we walk back through masters and connect points
+** until we reach a vertex that represents a node.
+*/
+cnodeid_t
+master_node_get(vertex_hdl_t vhdl)
+{
+ cnodeid_t cnodeid;
+ vertex_hdl_t master;
+
+ for (;;) {
+ cnodeid = nodevertex_to_cnodeid(vhdl);
+ if (cnodeid != CNODEID_NONE)
+ return(cnodeid);
+
+ master = device_master_get(vhdl);
+
+ /* Check for exceptional cases */
+ if (master == vhdl) {
+ /* Since we got a reference to the "master" thru
+ * device_master_get() we should decrement
+ * its reference count by 1
+ */
+ return(CNODEID_NONE);
+ }
+
+ if (master == GRAPH_VERTEX_NONE) {
+ master = hwgraph_connectpt_get(vhdl);
+ if ((master == GRAPH_VERTEX_NONE) ||
+ (master == vhdl)) {
+ return(CNODEID_NONE);
+ }
+ }
+
+ vhdl = master;
+ }
+}
+
+static vertex_hdl_t hwgraph_all_cpuids = GRAPH_VERTEX_NONE;
+extern int maxcpus;
+
+void
+mark_cpuvertex_as_cpu(vertex_hdl_t vhdl, cpuid_t cpuid)
+{
+ if (cpuid == CPU_NONE)
+ return;
+
+ (void)labelcl_info_add_LBL(vhdl, INFO_LBL_CPUID, INFO_DESC_EXPORT,
+ (arbitrary_info_t)cpuid);
+ {
+ char cpuid_buffer[10];
+
+ if (hwgraph_all_cpuids == GRAPH_VERTEX_NONE) {
+ (void)hwgraph_path_add( hwgraph_root,
+ EDGE_LBL_CPUNUM,
+ &hwgraph_all_cpuids);
+ }
+
+ sprintf(cpuid_buffer, "%ld", cpuid);
+ (void)hwgraph_edge_add( hwgraph_all_cpuids,
+ vhdl,
+ cpuid_buffer);
+ }
+}
+
+/*
+** If the specified device represents a node, return its
+** compact node ID; otherwise, return CNODEID_NONE.
+*/
+cnodeid_t
+nodevertex_to_cnodeid(vertex_hdl_t vhdl)
+{
+ int rv = 0;
+ arbitrary_info_t cnodeid = CNODEID_NONE;
+
+ rv = labelcl_info_get_LBL(vhdl, INFO_LBL_CNODEID, NULL, &cnodeid);
+
+ return((cnodeid_t)cnodeid);
+}
+
+void
+mark_nodevertex_as_node(vertex_hdl_t vhdl, cnodeid_t cnodeid)
+{
+ if (cnodeid == CNODEID_NONE)
+ return;
+
+ cnodeid_to_vertex(cnodeid) = vhdl;
+ labelcl_info_add_LBL(vhdl, INFO_LBL_CNODEID, INFO_DESC_EXPORT,
+ (arbitrary_info_t)cnodeid);
+
+ {
+ char cnodeid_buffer[10];
+
+ if (hwgraph_all_cnodes == GRAPH_VERTEX_NONE) {
+ (void)hwgraph_path_add( hwgraph_root,
+ EDGE_LBL_NODENUM,
+ &hwgraph_all_cnodes);
+ }
+
+ sprintf(cnodeid_buffer, "%d", cnodeid);
+ (void)hwgraph_edge_add( hwgraph_all_cnodes,
+ vhdl,
+ cnodeid_buffer);
+ }
+}
+
+/*
+** If the specified device represents a CPU, return its cpuid;
+** otherwise, return CPU_NONE.
+*/
+cpuid_t
+cpuvertex_to_cpuid(vertex_hdl_t vhdl)
+{
+ arbitrary_info_t cpuid = CPU_NONE;
+
+ (void)labelcl_info_get_LBL(vhdl, INFO_LBL_CPUID, NULL, &cpuid);
+
+ return((cpuid_t)cpuid);
+}
+
+
+/*
+** dev_to_name converts a vertex_hdl_t into a canonical name. If the vertex_hdl_t
+** represents a vertex in the hardware graph, it is converted in the
+** normal way for vertices. If the vertex_hdl_t is an old vertex_hdl_t (one which
+** does not represent a hwgraph vertex), we synthesize a name based
+** on major/minor number.
+**
+** Usually returns a pointer to the original buffer, filled in as
+** appropriate. If the buffer is too small to hold the entire name,
+** or if anything goes wrong while determining the name, dev_to_name
+** returns "UnknownDevice".
+*/
+char *
+dev_to_name(vertex_hdl_t dev, char *buf, uint buflen)
+{
+ return(vertex_to_name(dev, buf, buflen));
+}
+
+
diff --git a/arch/ia64/sn/io/hwgfs/hwgfs.h b/arch/ia64/sn/io/hwgfs/hwgfs.h
new file mode 100644
index 00000000000000..3f260c227f68a1
--- /dev/null
+++ b/arch/ia64/sn/io/hwgfs/hwgfs.h
@@ -0,0 +1,23 @@
+
+typedef struct dentry *hwgfs_handle_t;
+
+extern hwgfs_handle_t hwgfs_register(hwgfs_handle_t dir, const char *name,
+ unsigned int flags,
+ unsigned int major, unsigned int minor,
+ umode_t mode, void *ops, void *info);
+extern int hwgfs_mk_symlink(hwgfs_handle_t dir, const char *name,
+ unsigned int flags, const char *link,
+ hwgfs_handle_t *handle, void *info);
+extern hwgfs_handle_t hwgfs_mk_dir(hwgfs_handle_t dir, const char *name,
+ void *info);
+extern void hwgfs_unregister(hwgfs_handle_t de);
+
+extern hwgfs_handle_t hwgfs_find_handle(hwgfs_handle_t dir, const char *name,
+ unsigned int major,unsigned int minor,
+ char type, int traverse_symlinks);
+extern hwgfs_handle_t hwgfs_get_parent(hwgfs_handle_t de);
+extern int hwgfs_generate_path(hwgfs_handle_t de, char *path, int buflen);
+
+extern void *hwgfs_get_info(hwgfs_handle_t de);
+extern int hwgfs_set_info(hwgfs_handle_t de, void *info);
+
diff --git a/arch/ia64/sn/io/hwgfs/interface.c b/arch/ia64/sn/io/hwgfs/interface.c
new file mode 100644
index 00000000000000..b1d5d702877032
--- /dev/null
+++ b/arch/ia64/sn/io/hwgfs/interface.c
@@ -0,0 +1,353 @@
+/*
+ * Copyright (c) 2003 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * Portions based on Adam Richter's smalldevfs and thus
+ * Copyright 2002-2003 Yggdrasil Computing, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <asm/sn/hwgfs.h>
+
+
+extern struct vfsmount *hwgfs_vfsmount;
+
+/* TODO: Move this to some .h file or, more likely, use a slightly
+ different interface from lookup_create. */
+extern struct dentry *lookup_create(struct nameidata *nd, int is_dir);
+
+static int
+walk_parents_mkdir(
+ const char **path,
+ struct nameidata *nd,
+ int is_dir)
+{
+ char *slash;
+ char buf[strlen(*path)+1];
+ int error;
+
+ while ((slash = strchr(*path, '/')) != NULL) {
+ int len = slash - *path;
+ memcpy(buf, *path, len);
+ buf[len] = '\0';
+
+ error = link_path_walk(buf, nd);
+ if (unlikely(error))
+ return error;
+
+ nd->dentry = lookup_create(nd, is_dir);
+ if (unlikely(IS_ERR(nd->dentry)))
+ return PTR_ERR(nd->dentry);
+
+ if (!nd->dentry->d_inode)
+ error = vfs_mkdir(nd->dentry->d_parent->d_inode,
+ nd->dentry, 0755);
+
+ up(&nd->dentry->d_parent->d_inode->i_sem);
+ if (unlikely(error))
+ return error;
+
+ *path += len + 1;
+ }
+
+ return 0;
+}
+
+/* On success, returns with parent_inode->i_sem taken. */
+static int
+hwgfs_decode(
+ hwgfs_handle_t dir,
+ const char *name,
+ int is_dir,
+ struct inode **parent_inode,
+ struct dentry **dentry)
+{
+ struct nameidata nd;
+ int error;
+
+ if (!dir)
+ dir = hwgfs_vfsmount->mnt_sb->s_root;
+
+ memset(&nd, 0, sizeof(nd));
+ nd.flags = LOOKUP_PARENT;
+ nd.mnt = mntget(hwgfs_vfsmount);
+ nd.dentry = dget(dir);
+
+ error = walk_parents_mkdir(&name, &nd, is_dir);
+ if (unlikely(error))
+ return error;
+
+ error = link_path_walk(name, &nd);
+ if (unlikely(error))
+ return error;
+
+ *dentry = lookup_create(&nd, is_dir);
+
+ if (unlikely(IS_ERR(*dentry)))
+ return PTR_ERR(*dentry);
+ *parent_inode = (*dentry)->d_parent->d_inode;
+ return 0;
+}
+
+static int
+path_len(
+ struct dentry *de,
+ struct dentry *root)
+{
+ int len = 0;
+
+ while (de != root) {
+ len += de->d_name.len + 1; /* count the '/' */
+ de = de->d_parent;
+ }
+ return len; /* -1 because we omit the leading '/',
+ +1 because we include trailing '\0' */
+}
+
+int
+hwgfs_generate_path(
+ hwgfs_handle_t de,
+ char *path,
+ int buflen)
+{
+ struct dentry *hwgfs_root;
+ int len;
+ char *path_orig = path;
+
+ if (unlikely(de == NULL))
+ return -EINVAL;
+
+ hwgfs_root = hwgfs_vfsmount->mnt_sb->s_root;
+ if (unlikely(de == hwgfs_root))
+ return -EINVAL;
+
+ spin_lock(&dcache_lock);
+ len = path_len(de, hwgfs_root);
+ if (len > buflen) {
+ spin_unlock(&dcache_lock);
+ return -ENAMETOOLONG;
+ }
+
+ path += len - 1;
+ *path = '\0';
+
+ for (;;) {
+ path -= de->d_name.len;
+ memcpy(path, de->d_name.name, de->d_name.len);
+ de = de->d_parent;
+ if (de == hwgfs_root)
+ break;
+ *(--path) = '/';
+ }
+
+ spin_unlock(&dcache_lock);
+ BUG_ON(path != path_orig);
+ return 0;
+}
+
+hwgfs_handle_t
+hwgfs_register(
+ hwgfs_handle_t dir,
+ const char *name,
+ unsigned int flags,
+ unsigned int major,
+ unsigned int minor,
+ umode_t mode,
+ void *ops,
+ void *info)
+{
+ dev_t devnum = MKDEV(major, minor);
+ struct inode *parent_inode;
+ struct dentry *dentry;
+ int error;
+
+ error = hwgfs_decode(dir, name, 0, &parent_inode, &dentry);
+ if (likely(!error)) {
+ error = vfs_mknod(parent_inode, dentry, mode, devnum);
+ if (likely(!error)) {
+ /*
+ * Do this inside parents i_sem to avoid racing
+ * with lookups.
+ */
+ if (S_ISCHR(mode))
+ dentry->d_inode->i_fop = ops;
+ dentry->d_fsdata = info;
+ up(&parent_inode->i_sem);
+ } else {
+ up(&parent_inode->i_sem);
+ dput(dentry);
+ dentry = NULL;
+ }
+ }
+
+ return dentry;
+}
+
+int
+hwgfs_mk_symlink(
+ hwgfs_handle_t dir,
+ const char *name,
+ unsigned int flags,
+ const char *link,
+ hwgfs_handle_t *handle,
+ void *info)
+{
+ struct inode *parent_inode;
+ struct dentry *dentry;
+ int error;
+
+ error = hwgfs_decode(dir, name, 0, &parent_inode, &dentry);
+ if (likely(!error)) {
+ error = vfs_symlink(parent_inode, dentry, link);
+ dentry->d_fsdata = info;
+ if (handle)
+ *handle = dentry;
+ up(&parent_inode->i_sem);
+ /* dput(dentry); */
+ }
+ return error;
+}
+
+hwgfs_handle_t
+hwgfs_mk_dir(
+ hwgfs_handle_t dir,
+ const char *name,
+ void *info)
+{
+ struct inode *parent_inode;
+ struct dentry *dentry;
+ int error;
+
+ error = hwgfs_decode(dir, name, 1, &parent_inode, &dentry);
+ if (likely(!error)) {
+ error = vfs_mkdir(parent_inode, dentry, 0755);
+ up(&parent_inode->i_sem);
+
+ if (unlikely(error)) {
+ dput(dentry);
+ dentry = NULL;
+ } else {
+ dentry->d_fsdata = info;
+ }
+ }
+ return dentry;
+}
+
+void
+hwgfs_unregister(
+ hwgfs_handle_t de)
+{
+ struct inode *parent_inode = de->d_parent->d_inode;
+
+ if (S_ISDIR(de->d_inode->i_mode))
+ vfs_rmdir(parent_inode, de);
+ else
+ vfs_unlink(parent_inode, de);
+}
+
+/* XXX: this function is utterly bogus. Every use of it is racy and the
+ prototype is stupid. You have been warned. --hch. */
+hwgfs_handle_t
+hwgfs_find_handle(
+ hwgfs_handle_t base,
+ const char *name,
+ unsigned int major, /* IGNORED */
+ unsigned int minor, /* IGNORED */
+ char type, /* IGNORED */
+ int traverse_symlinks)
+{
+ struct dentry *dentry = NULL;
+ struct nameidata nd;
+ int error;
+
+ BUG_ON(*name=='/');
+
+ memset(&nd, 0, sizeof(nd));
+
+ nd.mnt = mntget(hwgfs_vfsmount);
+ nd.dentry = dget(base ? base : hwgfs_vfsmount->mnt_sb->s_root);
+ if (traverse_symlinks)
+ nd.flags = LOOKUP_FOLLOW;
+
+ error = link_path_walk(name, &nd);
+ if (likely(!error)) {
+ dentry = nd.dentry;
+ path_release(&nd); /* stale data from here! */
+ }
+
+ return dentry;
+}
+
+hwgfs_handle_t
+hwgfs_get_parent(
+ hwgfs_handle_t de)
+{
+ struct dentry *parent;
+
+ spin_lock(&de->d_lock);
+ parent = de->d_parent;
+ spin_unlock(&de->d_lock);
+
+ return parent;
+}
+
+int
+hwgfs_set_info(
+ hwgfs_handle_t de,
+ void *info)
+{
+ if (unlikely(de == NULL))
+ return -EINVAL;
+ de->d_fsdata = info;
+ return 0;
+}
+
+void *
+hwgfs_get_info(
+ hwgfs_handle_t de)
+{
+ return de->d_fsdata;
+}
+
+EXPORT_SYMBOL(hwgfs_generate_path);
+EXPORT_SYMBOL(hwgfs_register);
+EXPORT_SYMBOL(hwgfs_unregister);
+EXPORT_SYMBOL(hwgfs_mk_symlink);
+EXPORT_SYMBOL(hwgfs_mk_dir);
+EXPORT_SYMBOL(hwgfs_find_handle);
+EXPORT_SYMBOL(hwgfs_get_parent);
+EXPORT_SYMBOL(hwgfs_set_info);
+EXPORT_SYMBOL(hwgfs_get_info);
diff --git a/arch/ia64/sn/io/hwgfs/invent_stub.c b/arch/ia64/sn/io/hwgfs/invent_stub.c
new file mode 100644
index 00000000000000..0087bf21164bf3
--- /dev/null
+++ b/arch/ia64/sn/io/hwgfs/invent_stub.c
@@ -0,0 +1,148 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All rights reserved.
+ */
+
+/*
+ * Hardware Inventory
+ *
+ * See sys/sn/invent.h for an explanation of the hardware inventory contents.
+ *
+ */
+#include <linux/types.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/hwgfs.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/labelcl.h>
+#include <asm/sn/invent.h>
+
+void
+inventinit(void)
+{
+}
+
+/*
+ * For initializing/updating an inventory entry.
+ */
+void
+replace_in_inventory(
+ inventory_t *pinv, int class, int type,
+ int controller, int unit, int state)
+{
+}
+
+/*
+ * Inventory addition
+ *
+ * XXX NOTE: Currently must be called after dynamic memory allocator is
+ * initialized.
+ *
+ */
+void
+add_to_inventory(int class, int type, int controller, int unit, int state)
+{
+}
+
+
+/*
+ * Inventory retrieval
+ *
+ * These two routines are intended to prevent the caller from having to know
+ * the internal structure of the inventory table.
+ *
+ * The caller of get_next_inventory is supposed to call start_scan_invent
+ * before the irst call to get_next_inventory, and the caller is required
+ * to call end_scan_invent after the last call to get_next_inventory.
+ */
+inventory_t *
+get_next_inventory(invplace_t *place)
+{
+ return((inventory_t *) NULL);
+}
+
+/* ARGSUSED */
+int
+get_sizeof_inventory(int abi)
+{
+ return sizeof(inventory_t);
+}
+
+/* Must be called prior to first call to get_next_inventory */
+void
+start_scan_inventory(invplace_t *iplace)
+{
+}
+
+/* Must be called after last call to get_next_inventory */
+void
+end_scan_inventory(invplace_t *iplace)
+{
+}
+
+/*
+ * Hardware inventory scanner.
+ *
+ * Calls fun() for every entry in inventory list unless fun() returns something
+ * other than 0.
+ */
+int
+scaninvent(int (*fun)(inventory_t *, void *), void *arg)
+{
+ return 0;
+}
+
+/*
+ * Find a particular inventory object
+ *
+ * pinv can be a pointer to an inventory entry and the search will begin from
+ * there, or it can be 0 in which case the search starts at the beginning.
+ * A -1 for any of the other arguments is a wildcard (i.e. it always matches).
+ */
+inventory_t *
+find_inventory(inventory_t *pinv, int class, int type, int controller,
+ int unit, int state)
+{
+ return((inventory_t *) NULL);
+}
+
+
+/*
+** Retrieve inventory data associated with a device.
+*/
+inventory_t *
+device_inventory_get_next( vertex_hdl_t device,
+ invplace_t *invplace)
+{
+ return((inventory_t *) NULL);
+}
+
+
+/*
+** Associate canonical inventory information with a device (and
+** add it to the general inventory).
+*/
+void
+device_inventory_add( vertex_hdl_t device,
+ int class,
+ int type,
+ major_t controller,
+ minor_t unit,
+ int state)
+{
+}
+
+int
+device_controller_num_get(vertex_hdl_t device)
+{
+ return (0);
+}
+
+void
+device_controller_num_set(vertex_hdl_t device, int contr_num)
+{
+}
diff --git a/arch/ia64/sn/io/hwgfs/labelcl.c b/arch/ia64/sn/io/hwgfs/labelcl.c
new file mode 100644
index 00000000000000..0521b4c32955c5
--- /dev/null
+++ b/arch/ia64/sn/io/hwgfs/labelcl.c
@@ -0,0 +1,657 @@
+/* labelcl - SGI's Hwgraph Compatibility Layer.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2001-2003 Silicon Graphics, Inc. All rights reserved.
+*/
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/sched.h> /* needed for smp_lock.h :( */
+#include <linux/smp_lock.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/hwgfs.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/labelcl.h>
+
+/*
+** Very simple and dumb string table that supports only find/insert.
+** In practice, if this table gets too large, we may need a more
+** efficient data structure. Also note that currently there is no
+** way to delete an item once it's added. Therefore, name collision
+** will return an error.
+*/
+
+struct string_table label_string_table;
+
+
+
+/*
+ * string_table_init - Initialize the given string table.
+ */
+void
+string_table_init(struct string_table *string_table)
+{
+ string_table->string_table_head = NULL;
+ string_table->string_table_generation = 0;
+
+ /*
+ * We nedd to initialize locks here!
+ */
+
+ return;
+}
+
+
+/*
+ * string_table_destroy - Destroy the given string table.
+ */
+void
+string_table_destroy(struct string_table *string_table)
+{
+ struct string_table_item *item, *next_item;
+
+ item = string_table->string_table_head;
+ while (item) {
+ next_item = item->next;
+
+ STRTBL_FREE(item);
+ item = next_item;
+ }
+
+ /*
+ * We need to destroy whatever lock we have here
+ */
+
+ return;
+}
+
+
+
+/*
+ * string_table_insert - Insert an entry in the string table .. duplicate
+ * names are not allowed.
+ */
+char *
+string_table_insert(struct string_table *string_table, char *name)
+{
+ struct string_table_item *item, *new_item = NULL, *last_item = NULL;
+
+again:
+ /*
+ * Need to lock the table ..
+ */
+ item = string_table->string_table_head;
+ last_item = NULL;
+
+ while (item) {
+ if (!strcmp(item->string, name)) {
+ /*
+ * If we allocated space for the string and the found that
+ * someone else already entered it into the string table,
+ * free the space we just allocated.
+ */
+ if (new_item)
+ STRTBL_FREE(new_item);
+
+
+ /*
+ * Search optimization: move the found item to the head
+ * of the list.
+ */
+ if (last_item != NULL) {
+ last_item->next = item->next;
+ item->next = string_table->string_table_head;
+ string_table->string_table_head = item;
+ }
+ goto out;
+ }
+ last_item = item;
+ item=item->next;
+ }
+
+ /*
+ * name was not found, so add it to the string table.
+ */
+ if (new_item == NULL) {
+ long old_generation = string_table->string_table_generation;
+
+ new_item = STRTBL_ALLOC(strlen(name));
+
+ strcpy(new_item->string, name);
+
+ /*
+ * While we allocated memory for the new string, someone else
+ * changed the string table.
+ */
+ if (old_generation != string_table->string_table_generation) {
+ goto again;
+ }
+ } else {
+ /* At this we only have the string table lock in access mode.
+ * Promote the access lock to an update lock for the string
+ * table insertion below.
+ */
+ long old_generation =
+ string_table->string_table_generation;
+
+ /*
+ * After we did the unlock and wer waiting for update
+ * lock someone could have potentially updated
+ * the string table. Check the generation number
+ * for this case. If it is the case we have to
+ * try all over again.
+ */
+ if (old_generation !=
+ string_table->string_table_generation) {
+ goto again;
+ }
+ }
+
+ /*
+ * At this point, we're committed to adding new_item to the string table.
+ */
+ new_item->next = string_table->string_table_head;
+ item = string_table->string_table_head = new_item;
+ string_table->string_table_generation++;
+
+out:
+ /*
+ * Need to unlock here.
+ */
+ return(item->string);
+}
+
+/*
+ * labelcl_info_create - Creates the data structure that will hold the
+ * device private information asscoiated with a entry.
+ * The pointer to this structure is what gets stored in the
+ * (void * info).
+ */
+labelcl_info_t *
+labelcl_info_create()
+{
+
+ labelcl_info_t *new = NULL;
+
+ /* Initial allocation does not include any area for labels */
+ if ( ( new = (labelcl_info_t *)kmalloc (sizeof(labelcl_info_t), GFP_KERNEL) ) == NULL )
+ return NULL;
+
+ memset (new, 0, sizeof(labelcl_info_t));
+ new->hwcl_magic = LABELCL_MAGIC;
+ return( new);
+
+}
+
+/*
+ * labelcl_info_destroy - Frees the data structure that holds the
+ * device private information asscoiated with a entry. This
+ * data structure was created by device_info_create().
+ *
+ * The caller is responsible for nulling the (void *info) in the
+ * corresponding entry.
+ */
+int
+labelcl_info_destroy(labelcl_info_t *labelcl_info)
+{
+
+ if (labelcl_info == NULL)
+ return(0);
+
+ /* Free the label list */
+ if (labelcl_info->label_list)
+ kfree(labelcl_info->label_list);
+
+ /* Now free the label info area */
+ labelcl_info->hwcl_magic = 0;
+ kfree(labelcl_info);
+
+ return(0);
+}
+
+/*
+ * labelcl_info_add_LBL - Adds a new label entry in the labelcl info
+ * structure.
+ *
+ * Error is returned if we find another label with the same name.
+ */
+int
+labelcl_info_add_LBL(vertex_hdl_t de,
+ char *info_name,
+ arb_info_desc_t info_desc,
+ arbitrary_info_t info)
+{
+ labelcl_info_t *labelcl_info = NULL;
+ int num_labels;
+ int new_label_list_size;
+ label_info_t *old_label_list, *new_label_list = NULL;
+ char *name;
+ int i;
+
+ if (de == NULL)
+ return(-1);
+
+ labelcl_info = hwgfs_get_info(de);
+ if (labelcl_info == NULL)
+ return(-1);
+
+ if (labelcl_info->hwcl_magic != LABELCL_MAGIC)
+ return(-1);
+
+ if (info_name == NULL)
+ return(-1);
+
+ if (strlen(info_name) >= LABEL_LENGTH_MAX)
+ return(-1);
+
+ name = string_table_insert(&label_string_table, info_name);
+
+ num_labels = labelcl_info->num_labels;
+ new_label_list_size = sizeof(label_info_t) * (num_labels+1);
+
+ /*
+ * Create a new label info area.
+ */
+ if (new_label_list_size != 0) {
+ new_label_list = (label_info_t *) kmalloc(new_label_list_size, GFP_KERNEL);
+
+ if (new_label_list == NULL)
+ return(-1);
+ }
+
+ /*
+ * At this point, we are committed to adding the labelled info,
+ * if there isn't already information there with the same name.
+ */
+ old_label_list = labelcl_info->label_list;
+
+ /*
+ * Look for matching info name.
+ */
+ for (i=0; i<num_labels; i++) {
+ if (!strcmp(info_name, old_label_list[i].name)) {
+ /* Not allowed to add duplicate labelled info names. */
+ kfree(new_label_list);
+ return(-1);
+ }
+ new_label_list[i] = old_label_list[i]; /* structure copy */
+ }
+
+ new_label_list[num_labels].name = name;
+ new_label_list[num_labels].desc = info_desc;
+ new_label_list[num_labels].info = info;
+
+ labelcl_info->num_labels = num_labels+1;
+ labelcl_info->label_list = new_label_list;
+
+ if (old_label_list != NULL)
+ kfree(old_label_list);
+
+ return(0);
+}
+
+/*
+ * labelcl_info_remove_LBL - Remove a label entry.
+ */
+int
+labelcl_info_remove_LBL(vertex_hdl_t de,
+ char *info_name,
+ arb_info_desc_t *info_desc,
+ arbitrary_info_t *info)
+{
+ labelcl_info_t *labelcl_info = NULL;
+ int num_labels;
+ int new_label_list_size;
+ label_info_t *old_label_list, *new_label_list = NULL;
+ arb_info_desc_t label_desc_found;
+ arbitrary_info_t label_info_found;
+ int i;
+
+ if (de == NULL)
+ return(-1);
+
+ labelcl_info = hwgfs_get_info(de);
+ if (labelcl_info == NULL)
+ return(-1);
+
+ if (labelcl_info->hwcl_magic != LABELCL_MAGIC)
+ return(-1);
+
+ num_labels = labelcl_info->num_labels;
+ if (num_labels == 0) {
+ return(-1);
+ }
+
+ /*
+ * Create a new info area.
+ */
+ new_label_list_size = sizeof(label_info_t) * (num_labels-1);
+ if (new_label_list_size) {
+ new_label_list = (label_info_t *) kmalloc(new_label_list_size, GFP_KERNEL);
+ if (new_label_list == NULL)
+ return(-1);
+ }
+
+ /*
+ * At this point, we are committed to removing the labelled info,
+ * if it still exists.
+ */
+ old_label_list = labelcl_info->label_list;
+
+ /*
+ * Find matching info name.
+ */
+ for (i=0; i<num_labels; i++) {
+ if (!strcmp(info_name, old_label_list[i].name)) {
+ label_desc_found = old_label_list[i].desc;
+ label_info_found = old_label_list[i].info;
+ goto found;
+ }
+ if (i < num_labels-1) /* avoid walking off the end of the new vertex */
+ new_label_list[i] = old_label_list[i]; /* structure copy */
+ }
+
+ /* The named info doesn't exist. */
+ if (new_label_list)
+ kfree(new_label_list);
+
+ return(-1);
+
+found:
+ /* Finish up rest of labelled info */
+ for (i=i+1; i<num_labels; i++)
+ new_label_list[i-1] = old_label_list[i]; /* structure copy */
+
+ labelcl_info->num_labels = num_labels+1;
+ labelcl_info->label_list = new_label_list;
+
+ kfree(old_label_list);
+
+ if (info != NULL)
+ *info = label_info_found;
+
+ if (info_desc != NULL)
+ *info_desc = label_desc_found;
+
+ return(0);
+}
+
+
+/*
+ * labelcl_info_replace_LBL - Replace an existing label entry with the
+ * given new information.
+ *
+ * Label entry must exist.
+ */
+int
+labelcl_info_replace_LBL(vertex_hdl_t de,
+ char *info_name,
+ arb_info_desc_t info_desc,
+ arbitrary_info_t info,
+ arb_info_desc_t *old_info_desc,
+ arbitrary_info_t *old_info)
+{
+ labelcl_info_t *labelcl_info = NULL;
+ int num_labels;
+ label_info_t *label_list;
+ int i;
+
+ if (de == NULL)
+ return(-1);
+
+ labelcl_info = hwgfs_get_info(de);
+ if (labelcl_info == NULL)
+ return(-1);
+
+ if (labelcl_info->hwcl_magic != LABELCL_MAGIC)
+ return(-1);
+
+ num_labels = labelcl_info->num_labels;
+ if (num_labels == 0) {
+ return(-1);
+ }
+
+ if (info_name == NULL)
+ return(-1);
+
+ label_list = labelcl_info->label_list;
+
+ /*
+ * Verify that information under info_name already exists.
+ */
+ for (i=0; i<num_labels; i++)
+ if (!strcmp(info_name, label_list[i].name)) {
+ if (old_info != NULL)
+ *old_info = label_list[i].info;
+
+ if (old_info_desc != NULL)
+ *old_info_desc = label_list[i].desc;
+
+ label_list[i].info = info;
+ label_list[i].desc = info_desc;
+
+ return(0);
+ }
+
+
+ return(-1);
+}
+
+/*
+ * labelcl_info_get_LBL - Retrieve and return the information for the
+ * given label entry.
+ */
+int
+labelcl_info_get_LBL(vertex_hdl_t de,
+ char *info_name,
+ arb_info_desc_t *info_desc,
+ arbitrary_info_t *info)
+{
+ labelcl_info_t *labelcl_info = NULL;
+ int num_labels;
+ label_info_t *label_list;
+ int i;
+
+ if (de == NULL)
+ return(-1);
+
+ labelcl_info = hwgfs_get_info(de);
+ if (labelcl_info == NULL)
+ return(-1);
+
+ if (labelcl_info->hwcl_magic != LABELCL_MAGIC)
+ return(-1);
+
+ num_labels = labelcl_info->num_labels;
+ if (num_labels == 0) {
+ return(-1);
+ }
+
+ label_list = labelcl_info->label_list;
+
+ /*
+ * Find information under info_name.
+ */
+ for (i=0; i<num_labels; i++)
+ if (!strcmp(info_name, label_list[i].name)) {
+ if (info != NULL)
+ *info = label_list[i].info;
+ if (info_desc != NULL)
+ *info_desc = label_list[i].desc;
+
+ return(0);
+ }
+
+ return(-1);
+}
+
+/*
+ * labelcl_info_get_next_LBL - returns the next label entry on the list.
+ */
+int
+labelcl_info_get_next_LBL(vertex_hdl_t de,
+ char *buffer,
+ arb_info_desc_t *info_descp,
+ arbitrary_info_t *infop,
+ labelcl_info_place_t *placeptr)
+{
+ labelcl_info_t *labelcl_info = NULL;
+ uint which_info;
+ label_info_t *label_list;
+
+ if ((buffer == NULL) && (infop == NULL))
+ return(-1);
+
+ if (placeptr == NULL)
+ return(-1);
+
+ if (de == NULL)
+ return(-1);
+
+ labelcl_info = hwgfs_get_info(de);
+ if (labelcl_info == NULL)
+ return(-1);
+
+ if (labelcl_info->hwcl_magic != LABELCL_MAGIC)
+ return(-1);
+
+ which_info = *placeptr;
+
+ if (which_info >= labelcl_info->num_labels) {
+ return(-1);
+ }
+
+ label_list = (label_info_t *) labelcl_info->label_list;
+
+ if (buffer != NULL)
+ strcpy(buffer, label_list[which_info].name);
+
+ if (infop)
+ *infop = label_list[which_info].info;
+
+ if (info_descp)
+ *info_descp = label_list[which_info].desc;
+
+ *placeptr = which_info + 1;
+
+ return(0);
+}
+
+
+int
+labelcl_info_replace_IDX(vertex_hdl_t de,
+ int index,
+ arbitrary_info_t info,
+ arbitrary_info_t *old_info)
+{
+ arbitrary_info_t *info_list_IDX;
+ labelcl_info_t *labelcl_info = NULL;
+
+ if (de == NULL) {
+ printk(KERN_ALERT "labelcl: NULL handle given.\n");
+ return(-1);
+ }
+
+ labelcl_info = hwgfs_get_info(de);
+ if (labelcl_info == NULL) {
+ printk(KERN_ALERT "labelcl: Entry %p does not have info pointer.\n", (void *)de);
+ return(-1);
+ }
+
+ if (labelcl_info->hwcl_magic != LABELCL_MAGIC)
+ return(-1);
+
+ if ( (index < 0) || (index >= HWGRAPH_NUM_INDEX_INFO) )
+ return(-1);
+
+ /*
+ * Replace information at the appropriate index in this vertex with
+ * the new info.
+ */
+ info_list_IDX = labelcl_info->IDX_list;
+ if (old_info != NULL)
+ *old_info = info_list_IDX[index];
+ info_list_IDX[index] = info;
+
+ return(0);
+
+}
+
+/*
+ * labelcl_info_connectpt_set - Sets the connectpt.
+ */
+int
+labelcl_info_connectpt_set(hwgfs_handle_t de,
+ hwgfs_handle_t connect_de)
+{
+ arbitrary_info_t old_info;
+ int rv;
+
+ rv = labelcl_info_replace_IDX(de, HWGRAPH_CONNECTPT,
+ (arbitrary_info_t) connect_de, &old_info);
+
+ if (rv) {
+ return(rv);
+ }
+
+ return(0);
+}
+
+
+/*
+ * labelcl_info_get_IDX - Returns the information pointed at by index.
+ *
+ */
+int
+labelcl_info_get_IDX(vertex_hdl_t de,
+ int index,
+ arbitrary_info_t *info)
+{
+ arbitrary_info_t *info_list_IDX;
+ labelcl_info_t *labelcl_info = NULL;
+
+ if (de == NULL)
+ return(-1);
+
+ labelcl_info = hwgfs_get_info(de);
+ if (labelcl_info == NULL)
+ return(-1);
+
+ if (labelcl_info->hwcl_magic != LABELCL_MAGIC)
+ return(-1);
+
+ if ( (index < 0) || (index >= HWGRAPH_NUM_INDEX_INFO) )
+ return(-1);
+
+ /*
+ * Return information at the appropriate index in this vertex.
+ */
+ info_list_IDX = labelcl_info->IDX_list;
+ if (info != NULL)
+ *info = info_list_IDX[index];
+
+ return(0);
+}
+
+/*
+ * labelcl_info_connectpt_get - Retrieve the connect point for a device entry.
+ */
+hwgfs_handle_t
+labelcl_info_connectpt_get(hwgfs_handle_t de)
+{
+ int rv;
+ arbitrary_info_t info;
+
+ rv = labelcl_info_get_IDX(de, HWGRAPH_CONNECTPT, &info);
+ if (rv)
+ return(NULL);
+
+ return((hwgfs_handle_t) info);
+}
diff --git a/arch/ia64/sn/io/hwgfs/ramfs.c b/arch/ia64/sn/io/hwgfs/ramfs.c
new file mode 100644
index 00000000000000..0bad4c76da9feb
--- /dev/null
+++ b/arch/ia64/sn/io/hwgfs/ramfs.c
@@ -0,0 +1,233 @@
+/*
+ * Copyright (c) 2003 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * Mostly shameless copied from Linus Torvalds' ramfs and thus
+ * Copyright (C) 2000 Linus Torvalds.
+ * 2000 Transmeta Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include <linux/module.h>
+#include <linux/backing-dev.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <asm/uaccess.h>
+
+/* some random number */
+#define HWGFS_MAGIC 0x12061983
+
+static struct super_operations hwgfs_ops;
+static struct address_space_operations hwgfs_aops;
+static struct file_operations hwgfs_file_operations;
+static struct inode_operations hwgfs_file_inode_operations;
+static struct inode_operations hwgfs_dir_inode_operations;
+
+static struct backing_dev_info hwgfs_backing_dev_info = {
+ .ra_pages = 0, /* No readahead */
+ .memory_backed = 1, /* Does not contribute to dirty memory */
+};
+
+struct inode *hwgfs_get_inode(struct super_block *sb, int mode, dev_t dev)
+{
+ struct inode * inode = new_inode(sb);
+
+ if (inode) {
+ inode->i_mode = mode;
+ inode->i_uid = current->fsuid;
+ inode->i_gid = current->fsgid;
+ inode->i_blksize = PAGE_CACHE_SIZE;
+ inode->i_blocks = 0;
+ inode->i_rdev = NODEV;
+ inode->i_mapping->a_ops = &hwgfs_aops;
+ inode->i_mapping->backing_dev_info = &hwgfs_backing_dev_info;
+ inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ switch (mode & S_IFMT) {
+ default:
+ init_special_inode(inode, mode, dev);
+ break;
+ case S_IFREG:
+ inode->i_op = &hwgfs_file_inode_operations;
+ inode->i_fop = &hwgfs_file_operations;
+ break;
+ case S_IFDIR:
+ inode->i_op = &hwgfs_dir_inode_operations;
+ inode->i_fop = &simple_dir_operations;
+ inode->i_nlink++;
+ break;
+ case S_IFLNK:
+ inode->i_op = &page_symlink_inode_operations;
+ break;
+ }
+ }
+ return inode;
+}
+
+static int hwgfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
+{
+ struct inode * inode = hwgfs_get_inode(dir->i_sb, mode, dev);
+ int error = -ENOSPC;
+
+ if (inode) {
+ d_instantiate(dentry, inode);
+ dget(dentry); /* Extra count - pin the dentry in core */
+ error = 0;
+ }
+ return error;
+}
+
+static int hwgfs_mkdir(struct inode * dir, struct dentry * dentry, int mode)
+{
+ return hwgfs_mknod(dir, dentry, mode | S_IFDIR, 0);
+}
+
+static int hwgfs_create(struct inode *dir, struct dentry *dentry, int mode)
+{
+ return hwgfs_mknod(dir, dentry, mode | S_IFREG, 0);
+}
+
+static int hwgfs_symlink(struct inode * dir, struct dentry *dentry, const char * symname)
+{
+ struct inode *inode;
+ int error = -ENOSPC;
+
+ inode = hwgfs_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0);
+ if (inode) {
+ int l = strlen(symname)+1;
+ error = page_symlink(inode, symname, l);
+ if (!error) {
+ d_instantiate(dentry, inode);
+ dget(dentry);
+ } else
+ iput(inode);
+ }
+ return error;
+}
+
+static struct address_space_operations hwgfs_aops = {
+ .readpage = simple_readpage,
+ .prepare_write = simple_prepare_write,
+ .commit_write = simple_commit_write
+};
+
+static struct file_operations hwgfs_file_operations = {
+ .read = generic_file_read,
+ .write = generic_file_write,
+ .mmap = generic_file_mmap,
+ .fsync = simple_sync_file,
+ .sendfile = generic_file_sendfile,
+};
+
+static struct inode_operations hwgfs_file_inode_operations = {
+ .getattr = simple_getattr,
+};
+
+static struct inode_operations hwgfs_dir_inode_operations = {
+ .create = hwgfs_create,
+ .lookup = simple_lookup,
+ .link = simple_link,
+ .unlink = simple_unlink,
+ .symlink = hwgfs_symlink,
+ .mkdir = hwgfs_mkdir,
+ .rmdir = simple_rmdir,
+ .mknod = hwgfs_mknod,
+ .rename = simple_rename,
+};
+
+static struct super_operations hwgfs_ops = {
+ .statfs = simple_statfs,
+ .drop_inode = generic_delete_inode,
+};
+
+static int hwgfs_fill_super(struct super_block * sb, void * data, int silent)
+{
+ struct inode * inode;
+ struct dentry * root;
+
+ sb->s_blocksize = PAGE_CACHE_SIZE;
+ sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_magic = HWGFS_MAGIC;
+ sb->s_op = &hwgfs_ops;
+ inode = hwgfs_get_inode(sb, S_IFDIR | 0755, 0);
+ if (!inode)
+ return -ENOMEM;
+
+ root = d_alloc_root(inode);
+ if (!root) {
+ iput(inode);
+ return -ENOMEM;
+ }
+ sb->s_root = root;
+ return 0;
+}
+
+static struct super_block *hwgfs_get_sb(struct file_system_type *fs_type,
+ int flags, char *dev_name, void *data)
+{
+ return get_sb_single(fs_type, flags, data, hwgfs_fill_super);
+}
+
+static struct file_system_type hwgfs_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "hwgfs",
+ .get_sb = hwgfs_get_sb,
+ .kill_sb = kill_litter_super,
+};
+
+struct vfsmount *hwgfs_vfsmount;
+
+int __init init_hwgfs_fs(void)
+{
+ int error;
+
+ error = register_filesystem(&hwgfs_fs_type);
+ if (error)
+ return error;
+
+ hwgfs_vfsmount = kern_mount(&hwgfs_fs_type);
+ if (IS_ERR(hwgfs_vfsmount))
+ goto fail;
+ return 0;
+
+fail:
+ unregister_filesystem(&hwgfs_fs_type);
+ return PTR_ERR(hwgfs_vfsmount);
+}
+
+static void __exit exit_hwgfs_fs(void)
+{
+ unregister_filesystem(&hwgfs_fs_type);
+}
+
+MODULE_LICENSE("GPL");
+
+module_init(init_hwgfs_fs)
+module_exit(exit_hwgfs_fs)
diff --git a/arch/ia64/sn/io/invent.c b/arch/ia64/sn/io/invent.c
deleted file mode 100644
index 9fce77d24cc132..00000000000000
--- a/arch/ia64/sn/io/invent.c
+++ /dev/null
@@ -1,224 +0,0 @@
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-/*
- * Hardware Inventory
- *
- * See sys/sn/invent.h for an explanation of the hardware inventory contents.
- *
- */
-#include <linux/types.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-
-void
-inventinit(void)
-{
-}
-
-/*
- * For initializing/updating an inventory entry.
- */
-void
-replace_in_inventory(
- inventory_t *pinv, int class, int type,
- int controller, int unit, int state)
-{
- pinv->inv_class = class;
- pinv->inv_type = type;
- pinv->inv_controller = controller;
- pinv->inv_unit = unit;
- pinv->inv_state = state;
-}
-
-/*
- * Inventory addition
- *
- * XXX NOTE: Currently must be called after dynamic memory allocator is
- * initialized.
- *
- */
-void
-add_to_inventory(int class, int type, int controller, int unit, int state)
-{
- (void)device_inventory_add((devfs_handle_t)GRAPH_VERTEX_NONE, class, type,
- controller, unit, state);
-}
-
-
-/*
- * Inventory retrieval
- *
- * These two routines are intended to prevent the caller from having to know
- * the internal structure of the inventory table.
- *
- * The caller of get_next_inventory is supposed to call start_scan_invent
- * before the irst call to get_next_inventory, and the caller is required
- * to call end_scan_invent after the last call to get_next_inventory.
- */
-inventory_t *
-get_next_inventory(invplace_t *place)
-{
- inventory_t *pinv;
- devfs_handle_t device = place->invplace_vhdl;
- int rv;
-
- while ((pinv = device_inventory_get_next(device, place)) == NULL) {
- /*
- * We've exhausted inventory items on the last device.
- * Advance to next device.
- */
- place->invplace_inv = NULL; /* Start from beginning invent on this device */
- rv = hwgraph_vertex_get_next(&device, &place->invplace_vplace);
- if (rv == LABELCL_SUCCESS) {
- place->invplace_vhdl = device;
- }
- else {
- place->invplace_vhdl = GRAPH_VERTEX_NONE;
- return(NULL);
- }
- }
-
- return(pinv);
-}
-
-/* ARGSUSED */
-int
-get_sizeof_inventory(int abi)
-{
- return sizeof(inventory_t);
-}
-
-/* Must be called prior to first call to get_next_inventory */
-void
-start_scan_inventory(invplace_t *iplace)
-{
- *iplace = INVPLACE_NONE;
-}
-
-/* Must be called after last call to get_next_inventory */
-void
-end_scan_inventory(invplace_t *iplace)
-{
- devfs_handle_t vhdl = iplace->invplace_vhdl;
- if (vhdl != GRAPH_VERTEX_NONE)
- hwgraph_vertex_unref(vhdl);
- *iplace = INVPLACE_NONE; /* paranoia */
-}
-
-/*
- * Hardware inventory scanner.
- *
- * Calls fun() for every entry in inventory list unless fun() returns something
- * other than 0.
- */
-int
-scaninvent(int (*fun)(inventory_t *, void *), void *arg)
-{
- inventory_t *ie;
- invplace_t iplace = { NULL,NULL, NULL };
- int rc;
-
- ie = 0;
- rc = 0;
- start_scan_inventory(&iplace);
- while ((ie = (inventory_t *)get_next_inventory(&iplace))) {
- rc = (*fun)(ie, arg);
- if (rc)
- break;
- }
- end_scan_inventory(&iplace);
- return rc;
-}
-
-/*
- * Find a particular inventory object
- *
- * pinv can be a pointer to an inventory entry and the search will begin from
- * there, or it can be 0 in which case the search starts at the beginning.
- * A -1 for any of the other arguments is a wildcard (i.e. it always matches).
- */
-inventory_t *
-find_inventory(inventory_t *pinv, int class, int type, int controller,
- int unit, int state)
-{
- invplace_t iplace = { NULL,NULL, NULL };
-
- start_scan_inventory(&iplace);
- while ((pinv = (inventory_t *)get_next_inventory(&iplace)) != NULL) {
- if (class != -1 && pinv->inv_class != class)
- continue;
- if (type != -1 && pinv->inv_type != type)
- continue;
-
- /* XXXX - perhaps the "state" entry should be ignored so an
- * an existing entry can be updated. See vino_init() and
- * ml/IP22.c:add_ioboard() for an example.
- */
- if (state != -1 && pinv->inv_state != state)
- continue;
- if (controller != -1
- && pinv->inv_controller != controller)
- continue;
- if (unit != -1 && pinv->inv_unit != unit)
- continue;
- break;
- }
- end_scan_inventory(&iplace);
-
- return(pinv);
-}
-
-
-/*
-** Retrieve inventory data associated with a device.
-*/
-inventory_t *
-device_inventory_get_next( devfs_handle_t device,
- invplace_t *invplace)
-{
- inventory_t *pinv;
- int rv;
-
- rv = hwgraph_inventory_get_next(device, invplace, &pinv);
- if (rv == LABELCL_SUCCESS)
- return(pinv);
- else
- return(NULL);
-}
-
-
-/*
-** Associate canonical inventory information with a device (and
-** add it to the general inventory).
-*/
-void
-device_inventory_add( devfs_handle_t device,
- int class,
- int type,
- major_t controller,
- minor_t unit,
- int state)
-{
- hwgraph_inventory_add(device, class, type, controller, unit, state);
-}
-
-int
-device_controller_num_get(devfs_handle_t device)
-{
- return (hwgraph_controller_num_get(device));
-}
-
-void
-device_controller_num_set(devfs_handle_t device, int contr_num)
-{
- hwgraph_controller_num_set(device, contr_num);
-}
diff --git a/arch/ia64/sn/io/io.c b/arch/ia64/sn/io/io.c
index c37cd8232649a8..6059bbc37e5b57 100644
--- a/arch/ia64/sn/io/io.c
+++ b/arch/ia64/sn/io/io.c
@@ -4,7 +4,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992-1997, 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (C) 1992-1997, 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/config.h>
@@ -29,17 +29,11 @@
#include <asm/sn/sn_cpuid.h>
extern xtalk_provider_t hub_provider;
-extern void hub_intr_init(devfs_handle_t hubv);
+extern void hub_intr_init(vertex_hdl_t hubv);
+static int force_fire_and_forget = 1;
+static int ignore_conveyor_override;
-/*
- * Perform any initializations needed to support hub-based I/O.
- * Called once during startup.
- */
-void
-hubio_init(void)
-{
-}
/*
* Implementation of hub iobus operations.
@@ -58,8 +52,8 @@ hubio_init(void)
/*
* Setup pio structures needed for a particular hub.
*/
-void
-hub_pio_init(devfs_handle_t hubv)
+static void
+hub_pio_init(vertex_hdl_t hubv)
{
xwidgetnum_t widget;
hubinfo_t hubinfo;
@@ -114,7 +108,7 @@ hub_pio_init(devfs_handle_t hubv)
*/
/* ARGSUSED */
hub_piomap_t
-hub_piomap_alloc(devfs_handle_t dev, /* set up mapping for this device */
+hub_piomap_alloc(vertex_hdl_t dev, /* set up mapping for this device */
device_desc_t dev_desc, /* device descriptor */
iopaddr_t xtalk_addr, /* map for this xtalk_addr range */
size_t byte_count,
@@ -123,7 +117,7 @@ hub_piomap_alloc(devfs_handle_t dev, /* set up mapping for this device */
{
xwidget_info_t widget_info = xwidget_info_get(dev);
xwidgetnum_t widget = xwidget_info_id_get(widget_info);
- devfs_handle_t hubv = xwidget_info_master_get(widget_info);
+ vertex_hdl_t hubv = xwidget_info_master_get(widget_info);
hubinfo_t hubinfo;
hub_piomap_t bw_piomap;
int bigwin, free_bw_index;
@@ -288,7 +282,7 @@ done:
void
hub_piomap_free(hub_piomap_t hub_piomap)
{
- devfs_handle_t hubv;
+ vertex_hdl_t hubv;
hubinfo_t hubinfo;
nasid_t nasid;
unsigned long s;
@@ -371,7 +365,7 @@ hub_piomap_done(hub_piomap_t hub_piomap) /* done with these mapping resources */
*/
/* ARGSUSED */
caddr_t
-hub_piotrans_addr( devfs_handle_t dev, /* translate to this device */
+hub_piotrans_addr( vertex_hdl_t dev, /* translate to this device */
device_desc_t dev_desc, /* device descriptor */
iopaddr_t xtalk_addr, /* Crosstalk address */
size_t byte_count, /* map this many bytes */
@@ -379,7 +373,7 @@ hub_piotrans_addr( devfs_handle_t dev, /* translate to this device */
{
xwidget_info_t widget_info = xwidget_info_get(dev);
xwidgetnum_t widget = xwidget_info_id_get(widget_info);
- devfs_handle_t hubv = xwidget_info_master_get(widget_info);
+ vertex_hdl_t hubv = xwidget_info_master_get(widget_info);
hub_piomap_t hub_piomap;
hubinfo_t hubinfo;
caddr_t addr;
@@ -416,7 +410,7 @@ hub_piotrans_addr( devfs_handle_t dev, /* translate to this device */
*/
/* ARGSUSED */
hub_dmamap_t
-hub_dmamap_alloc( devfs_handle_t dev, /* set up mappings for this device */
+hub_dmamap_alloc( vertex_hdl_t dev, /* set up mappings for this device */
device_desc_t dev_desc, /* device descriptor */
size_t byte_count_max, /* max size of a mapping */
unsigned flags) /* defined in dma.h */
@@ -424,7 +418,7 @@ hub_dmamap_alloc( devfs_handle_t dev, /* set up mappings for this device */
hub_dmamap_t dmamap;
xwidget_info_t widget_info = xwidget_info_get(dev);
xwidgetnum_t widget = xwidget_info_id_get(widget_info);
- devfs_handle_t hubv = xwidget_info_master_get(widget_info);
+ vertex_hdl_t hubv = xwidget_info_master_get(widget_info);
dmamap = kmalloc(sizeof(struct hub_dmamap_s), GFP_ATOMIC);
dmamap->hdma_xtalk_info.xd_dev = dev;
@@ -460,7 +454,7 @@ hub_dmamap_addr( hub_dmamap_t dmamap, /* use these mapping resources */
paddr_t paddr, /* map for this address */
size_t byte_count) /* map this many bytes */
{
- devfs_handle_t vhdl;
+ vertex_hdl_t vhdl;
ASSERT(dmamap->hdma_flags & HUB_DMAMAP_IS_VALID);
@@ -479,12 +473,7 @@ hub_dmamap_addr( hub_dmamap_t dmamap, /* use these mapping resources */
}
/* There isn't actually any DMA mapping hardware on the hub. */
-#ifdef CONFIG_IA64_SGI_SN2
return( (PHYS_TO_DMA(paddr)) );
-#else
- /* no translation needed */
- return(paddr);
-#endif
}
/*
@@ -498,7 +487,7 @@ hub_dmamap_list(hub_dmamap_t hub_dmamap, /* use these mapping resources */
alenlist_t palenlist, /* map this area of memory */
unsigned flags)
{
- devfs_handle_t vhdl;
+ vertex_hdl_t vhdl;
ASSERT(hub_dmamap->hdma_flags & HUB_DMAMAP_IS_VALID);
@@ -527,7 +516,7 @@ hub_dmamap_list(hub_dmamap_t hub_dmamap, /* use these mapping resources */
void
hub_dmamap_done(hub_dmamap_t hub_dmamap) /* done with these mapping resources */
{
- devfs_handle_t vhdl;
+ vertex_hdl_t vhdl;
if (hub_dmamap->hdma_flags & HUB_DMAMAP_USED) {
hub_dmamap->hdma_flags &= ~HUB_DMAMAP_USED;
@@ -549,18 +538,13 @@ hub_dmamap_done(hub_dmamap_t hub_dmamap) /* done with these mapping resources */
*/
/* ARGSUSED */
iopaddr_t
-hub_dmatrans_addr( devfs_handle_t dev, /* translate for this device */
+hub_dmatrans_addr( vertex_hdl_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
paddr_t paddr, /* system physical address */
size_t byte_count, /* length */
unsigned flags) /* defined in dma.h */
{
-#ifdef CONFIG_IA64_SGI_SN2
return( (PHYS_TO_DMA(paddr)) );
-#else
- /* no translation needed */
- return(paddr);
-#endif
}
/*
@@ -570,7 +554,7 @@ hub_dmatrans_addr( devfs_handle_t dev, /* translate for this device */
*/
/* ARGSUSED */
alenlist_t
-hub_dmatrans_list( devfs_handle_t dev, /* translate for this device */
+hub_dmatrans_list( vertex_hdl_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
alenlist_t palenlist, /* system address/length list */
unsigned flags) /* defined in dma.h */
@@ -589,7 +573,7 @@ hub_dmamap_drain( hub_dmamap_t map)
/*ARGSUSED*/
void
-hub_dmaaddr_drain( devfs_handle_t vhdl,
+hub_dmaaddr_drain( vertex_hdl_t vhdl,
paddr_t addr,
size_t bytes)
{
@@ -598,7 +582,7 @@ hub_dmaaddr_drain( devfs_handle_t vhdl,
/*ARGSUSED*/
void
-hub_dmalist_drain( devfs_handle_t vhdl,
+hub_dmalist_drain( vertex_hdl_t vhdl,
alenlist_t list)
{
/* XXX- flush caches, if cache coherency WAR is needed */
@@ -612,10 +596,8 @@ hub_dmalist_drain( devfs_handle_t vhdl,
* Perform initializations that allow this hub to start crosstalk support.
*/
void
-hub_provider_startup(devfs_handle_t hubv)
+hub_provider_startup(vertex_hdl_t hubv)
{
- extern void hub_pio_init(devfs_handle_t hubv);
-
hub_pio_init(hubv);
hub_intr_init(hubv);
}
@@ -624,7 +606,7 @@ hub_provider_startup(devfs_handle_t hubv)
* Shutdown crosstalk support from a hub.
*/
void
-hub_provider_shutdown(devfs_handle_t hub)
+hub_provider_shutdown(vertex_hdl_t hub)
{
/* TBD */
xtalk_provider_unregister(hub);
@@ -666,46 +648,6 @@ hub_check_window_equiv(void *addra, void *addrb)
/*
- * Determine whether two PCI addresses actually refer to the same device.
- * This only works if both addresses are in small windows. It's used to
- * determine whether prom addresses refer to particular PCI devices.
- */
-/*
- * XXX - This won't work as written if we ever have more than two nodes
- * on a crossbow. In that case, we'll need an array or partners.
- */
-int
-hub_check_pci_equiv(void *addra, void *addrb)
-{
- nasid_t nasida, nasidb;
-
- /*
- * This is for a permanent workaround that causes us to use a
- * big window in place of small window 0.
- */
- if (!hub_check_window_equiv(addra, addrb))
- return 0;
-
- /* If the offsets aren't the same, forget it. */
- if (SWIN_WIDGETADDR((__psunsigned_t)addra) !=
- (SWIN_WIDGETADDR((__psunsigned_t)addrb)))
- return 0;
-
- /* Now, check the nasids */
- nasida = NASID_GET(addra);
- nasidb = NASID_GET(addrb);
-
- ASSERT(NASID_TO_COMPACT_NODEID(nasida) != INVALID_NASID);
- ASSERT(NASID_TO_COMPACT_NODEID(nasidb) != INVALID_NASID);
-
- /*
- * Either the NASIDs must be the same or they must be crossbow
- * partners (on the same crossbow).
- */
- return (check_nasid_equiv(nasida, nasidb));
-}
-
-/*
* hub_setup_prb(nasid, prbnum, credits, conveyor)
*
* Put a PRB into fire-and-forget mode if conveyor isn't set. Otherwise,
@@ -716,8 +658,6 @@ hub_setup_prb(nasid_t nasid, int prbnum, int credits, int conveyor)
{
iprb_t prb;
int prb_offset;
- extern int force_fire_and_forget;
- extern volatile int ignore_conveyor_override;
if (force_fire_and_forget && !ignore_conveyor_override)
if (conveyor == HUB_PIO_CONVEYOR)
@@ -776,13 +716,8 @@ hub_set_piomode(nasid_t nasid, int conveyor)
int direct_connect;
hubii_wcr_t ii_wcr;
int prbnum;
- int cons_lock = 0;
ASSERT(NASID_TO_COMPACT_NODEID(nasid) != INVALID_CNODEID);
- if (nasid == get_console_nasid()) {
- PUTBUF_LOCK(s);
- cons_lock = 1;
- }
ii_iowa = REMOTE_HUB_L(nasid, IIO_OUTWIDGET_ACCESS);
REMOTE_HUB_S(nasid, IIO_OUTWIDGET_ACCESS, 0);
@@ -812,9 +747,6 @@ hub_set_piomode(nasid_t nasid, int conveyor)
}
REMOTE_HUB_S(nasid, IIO_OUTWIDGET_ACCESS, ii_iowa);
-
- if (cons_lock)
- PUTBUF_UNLOCK(s);
}
/* Interface to allow special drivers to set hub specific
* device flags.
@@ -842,90 +774,6 @@ hub_widget_flags_set(nasid_t nasid,
return 1;
}
-/* Interface to allow special drivers to set hub specific
- * device flags.
- * Return 0 on failure , 1 on success
- */
-int
-hub_device_flags_set(devfs_handle_t widget_vhdl,
- hub_widget_flags_t flags)
-{
- xwidget_info_t widget_info = xwidget_info_get(widget_vhdl);
- xwidgetnum_t widget_num = xwidget_info_id_get(widget_info);
- devfs_handle_t hub_vhdl = xwidget_info_master_get(widget_info);
- hubinfo_t hub_info = 0;
- nasid_t nasid;
- unsigned long s;
- int rv;
-
- /* Use the nasid from the hub info hanging off the hub vertex
- * and widget number from the widget vertex
- */
- hubinfo_get(hub_vhdl, &hub_info);
- /* Being over cautious by grabbing a lock */
- s = mutex_spinlock(&hub_info->h_bwlock);
- nasid = hub_info->h_nasid;
- rv = hub_widget_flags_set(nasid,widget_num,flags);
- mutex_spinunlock(&hub_info->h_bwlock, s);
-
- return rv;
-}
-
-/*
- * hub_device_inquiry
- * Find out the xtalk widget related information stored in this
- * hub's II.
- */
-void
-hub_device_inquiry(devfs_handle_t xbus_vhdl, xwidgetnum_t widget)
-{
- devfs_handle_t xconn, hub_vhdl;
- char widget_name[8];
- hubreg_t ii_iidem,ii_iiwa, ii_iowa;
- hubinfo_t hubinfo;
- nasid_t nasid;
- int d;
-
- sprintf(widget_name, "%d", widget);
- if (hwgraph_traverse(xbus_vhdl, widget_name, &xconn)
- != GRAPH_SUCCESS)
- return;
-
- hub_vhdl = device_master_get(xconn);
- if (hub_vhdl == GRAPH_VERTEX_NONE)
- return;
-
- hubinfo_get(hub_vhdl, &hubinfo);
- if (!hubinfo)
- return;
-
- nasid = hubinfo->h_nasid;
-
- ii_iidem = REMOTE_HUB_L(nasid, IIO_IIDEM);
- ii_iiwa = REMOTE_HUB_L(nasid, IIO_IIWA);
- ii_iowa = REMOTE_HUB_L(nasid, IIO_IOWA);
-
-#if defined(SUPPORT_PRINTING_V_FORMAT)
- printk("Inquiry Info for %v\n", xconn);
-#else
- printk("Inquiry Info for %p\n", (void *)xconn);
-#endif
-
- printk("\tDevices shutdown [ ");
-
- for (d = 0 ; d <= 7 ; d++)
- if (!(ii_iidem & (IIO_IIDEM_WIDGETDEV_MASK(widget,d))))
- printk(" %d", d);
-
- printk("]\n");
-
- printk("\tInbound access ? %s\n",
- ii_iiwa & IIO_IIWA_WIDGET(widget) ? "yes" : "no");
-
- printk("\tOutbound access ? %s\n",
- ii_iowa & IIO_IOWA_WIDGET(widget) ? "yes" : "no");
-
-}
/*
* A pointer to this structure hangs off of every hub hwgraph vertex.
@@ -955,8 +803,6 @@ xtalk_provider_t hub_provider = {
(xtalk_intr_free_f *) hub_intr_free,
(xtalk_intr_connect_f *) hub_intr_connect,
(xtalk_intr_disconnect_f *) hub_intr_disconnect,
- (xtalk_intr_cpu_get_f *) hub_intr_cpu_get,
-
(xtalk_provider_startup_f *) hub_provider_startup,
(xtalk_provider_shutdown_f *) hub_provider_shutdown,
};
diff --git a/arch/ia64/sn/io/klconflib.c b/arch/ia64/sn/io/klconflib.c
deleted file mode 100644
index 8b9feb29a2b16c..00000000000000
--- a/arch/ia64/sn/io/klconflib.c
+++ /dev/null
@@ -1,1042 +0,0 @@
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-
-#include <linux/types.h>
-#include <linux/config.h>
-#include <linux/ctype.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/sn_sal.h>
-#include <asm/sn/io.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/klconfig.h>
-#include <asm/sn/nodepda.h>
-#include <asm/sn/module.h>
-#include <asm/sn/router.h>
-#include <asm/sn/xtalk/xbow.h>
-
-#define printf printk
-int hasmetarouter;
-
-#define LDEBUG 0
-#define NIC_UNKNOWN ((nic_t) -1)
-
-#undef DEBUG_KLGRAPH
-#ifdef DEBUG_KLGRAPH
-#define DBG(x...) printk(x)
-#else
-#define DBG(x...)
-#endif /* DEBUG_KLGRAPH */
-
-static void sort_nic_names(lboard_t *) ;
-
-u64 klgraph_addr[MAX_COMPACT_NODES];
-
-lboard_t *
-find_lboard(lboard_t *start, unsigned char brd_type)
-{
- /* Search all boards stored on this node. */
- while (start) {
- if (start->brd_type == brd_type)
- return start;
- start = KLCF_NEXT(start);
- }
-
- /* Didn't find it. */
- return (lboard_t *)NULL;
-}
-
-lboard_t *
-find_lboard_class(lboard_t *start, unsigned char brd_type)
-{
- /* Search all boards stored on this node. */
- while (start) {
- if (KLCLASS(start->brd_type) == KLCLASS(brd_type))
- return start;
- start = KLCF_NEXT(start);
- }
-
- /* Didn't find it. */
- return (lboard_t *)NULL;
-}
-
-klinfo_t *
-find_component(lboard_t *brd, klinfo_t *kli, unsigned char struct_type)
-{
- int index, j;
-
- if (kli == (klinfo_t *)NULL) {
- index = 0;
- } else {
- for (j = 0; j < KLCF_NUM_COMPS(brd); j++) {
- if (kli == KLCF_COMP(brd, j))
- break;
- }
- index = j;
- if (index == KLCF_NUM_COMPS(brd)) {
- DBG("find_component: Bad pointer: 0x%p\n", kli);
- return (klinfo_t *)NULL;
- }
- index++; /* next component */
- }
-
- for (; index < KLCF_NUM_COMPS(brd); index++) {
- kli = KLCF_COMP(brd, index);
- DBG("find_component: brd %p kli %p request type = 0x%x kli type 0x%x\n", brd, kli, kli->struct_type, KLCF_COMP_TYPE(kli));
- if (KLCF_COMP_TYPE(kli) == struct_type)
- return kli;
- }
-
- /* Didn't find it. */
- return (klinfo_t *)NULL;
-}
-
-klinfo_t *
-find_first_component(lboard_t *brd, unsigned char struct_type)
-{
- return find_component(brd, (klinfo_t *)NULL, struct_type);
-}
-
-lboard_t *
-find_lboard_modslot(lboard_t *start, moduleid_t mod, slotid_t slot)
-{
- /* Search all boards stored on this node. */
- while (start) {
- if (MODULE_MATCH(start->brd_module, mod) &&
- (start->brd_slot == slot))
- return start;
- start = KLCF_NEXT(start);
- }
-
- /* Didn't find it. */
- return (lboard_t *)NULL;
-}
-
-lboard_t *
-find_lboard_module(lboard_t *start, moduleid_t mod)
-{
- /* Search all boards stored on this node. */
- while (start) {
- if (MODULE_MATCH(start->brd_module, mod))
- return start;
- start = KLCF_NEXT(start);
- }
-
- /* Didn't find it. */
- return (lboard_t *)NULL;
-}
-
-lboard_t *
-find_lboard_module_class(lboard_t *start, moduleid_t mod,
- unsigned char brd_type)
-{
- while (start) {
-
- DBG("find_lboard_module_class: lboard 0x%p, start->brd_module 0x%x, mod 0x%x, start->brd_type 0x%x, brd_type 0x%x\n", start, start->brd_module, mod, start->brd_type, brd_type);
-
- if (MODULE_MATCH(start->brd_module, mod) &&
- (KLCLASS(start->brd_type) == KLCLASS(brd_type)))
- return start;
- start = KLCF_NEXT(start);
- }
-
- /* Didn't find it. */
- return (lboard_t *)NULL;
-}
-
-
-/*
- * Convert a NIC name to a name for use in the hardware graph.
- */
-void
-nic_name_convert(char *old_name, char *new_name)
-{
- int i;
- char c;
- char *compare_ptr;
-
- if ((old_name[0] == '\0') || (old_name[1] == '\0')) {
- strcpy(new_name, EDGE_LBL_XWIDGET);
- } else {
- for (i = 0; i < strlen(old_name); i++) {
- c = old_name[i];
-
- if (isalpha(c))
- new_name[i] = tolower(c);
- else if (isdigit(c))
- new_name[i] = c;
- else
- new_name[i] = '_';
- }
- new_name[i] = '\0';
- }
-
- /* XXX -
- * Since a bunch of boards made it out with weird names like
- * IO6-fibbbed and IO6P2, we need to look for IO6 in a name and
- * replace it with "baseio" to avoid confusion in the field.
- * We also have to make sure we don't report media_io instead of
- * baseio.
- */
-
- /* Skip underscores at the beginning of the name */
- for (compare_ptr = new_name; (*compare_ptr) == '_'; compare_ptr++)
- ;
-
- /*
- * Check for some names we need to replace. Early boards
- * had junk following the name so check only the first
- * characters.
- */
- if (!strncmp(new_name, "io6", 3) ||
- !strncmp(new_name, "mio", 3) ||
- !strncmp(new_name, "media_io", 8))
- strcpy(new_name, "baseio");
- else if (!strncmp(new_name, "divo", 4))
- strcpy(new_name, "divo") ;
-
-}
-
-/* Check if the given board corresponds to the global
- * master io6
- */
-int
-is_master_baseio(nasid_t nasid,moduleid_t module,slotid_t slot)
-{
- lboard_t *board;
-
-#if defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
-/* If this works then look for callers of is_master_baseio()
- * (e.g. iograph.c) and let them pass in a slot if they want
- */
- board = find_lboard_module((lboard_t *)KL_CONFIG_INFO(nasid), module);
-#else
- board = find_lboard_modslot((lboard_t *)KL_CONFIG_INFO(nasid), module, slot);
-#endif
-
-#ifndef _STANDALONE
- {
- cnodeid_t cnode = NASID_TO_COMPACT_NODEID(nasid);
-
- if (!board && (NODEPDA(cnode)->xbow_peer != INVALID_NASID))
-#if defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
- board = find_lboard_module((lboard_t *)
- KL_CONFIG_INFO(NODEPDA(cnode)->xbow_peer),
- module);
-#else
- board = find_lboard_modslot((lboard_t *)
- KL_CONFIG_INFO(NODEPDA(cnode)->xbow_peer),
- module, slot);
-#endif
- }
-#endif
- if (!board)
- return(0);
- return(board->brd_flags & GLOBAL_MASTER_IO6);
-}
-/*
- * Find the lboard structure and get the board name.
- * If we can't find the structure or it's too low a revision,
- * use default name.
- */
-lboard_t *
-get_board_name(nasid_t nasid, moduleid_t mod, slotid_t slot, char *name)
-{
- lboard_t *brd;
-
- brd = find_lboard_modslot((lboard_t *)KL_CONFIG_INFO(nasid),
- mod, slot);
-
-#ifndef _STANDALONE
- {
- cnodeid_t cnode = NASID_TO_COMPACT_NODEID(nasid);
-
- if (!brd && (NODEPDA(cnode)->xbow_peer != INVALID_NASID))
- brd = find_lboard_modslot((lboard_t *)
- KL_CONFIG_INFO(NODEPDA(cnode)->xbow_peer),
- mod, slot);
- }
-#endif
-
- if (!brd || (brd->brd_sversion < 2)) {
- strcpy(name, EDGE_LBL_XWIDGET);
- } else {
- nic_name_convert(brd->brd_name, name);
- }
-
- /*
- * PV # 540860
- * If the name is not 'baseio'
- * get the lowest of all the names in the nic string.
- * This is needed for boards like divo, which can have
- * a bunch of daughter cards, but would like to be called
- * divo. We could do this for baseio
- * but it has some special case names that we would not
- * like to disturb at this point.
- */
-
- /* gfx boards don't need any of this name scrambling */
- if (brd && (KLCLASS(brd->brd_type) == KLCLASS_GFX)) {
- return(brd);
- }
-
- if (!(!strcmp(name, "baseio") )) {
- if (brd) {
- sort_nic_names(brd) ;
- /* Convert to small case, '-' to '_' etc */
- nic_name_convert(brd->brd_name, name) ;
- }
- }
-
- return(brd);
-}
-
-/*
- * get_actual_nasid
- *
- * Completely disabled brds have their klconfig on
- * some other nasid as they have no memory. But their
- * actual nasid is hidden in the klconfig. Use this
- * routine to get it. Works for normal boards too.
- */
-nasid_t
-get_actual_nasid(lboard_t *brd)
-{
- klhub_t *hub ;
-
- if (!brd)
- return INVALID_NASID ;
-
- /* find out if we are a completely disabled brd. */
-
- hub = (klhub_t *)find_first_component(brd, KLSTRUCT_HUB);
- if (!hub)
- return INVALID_NASID ;
- if (!(hub->hub_info.flags & KLINFO_ENABLE)) /* disabled node brd */
- return hub->hub_info.physid ;
- else
- return brd->brd_nasid ;
-}
-
-int
-xbow_port_io_enabled(nasid_t nasid, int link)
-{
- lboard_t *brd;
- klxbow_t *xbow_p;
-
- /*
- * look for boards that might contain an xbow or xbridge
- */
- brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IOBRICK_XBOW);
- if (brd == NULL) return 0;
-
- if ((xbow_p = (klxbow_t *)find_component(brd, NULL, KLSTRUCT_XBOW))
- == NULL)
- return 0;
-
- if (!XBOW_PORT_TYPE_IO(xbow_p, link) || !XBOW_PORT_IS_ENABLED(xbow_p, link))
- return 0;
-
- DBG("xbow_port_io_enabled: brd 0x%p xbow_p 0x%p \n", brd, xbow_p);
-
- return 1;
-}
-
-void
-board_to_path(lboard_t *brd, char *path)
-{
- moduleid_t modnum;
- char *board_name;
-
- ASSERT(brd);
-
- switch (KLCLASS(brd->brd_type)) {
-
- case KLCLASS_NODE:
- board_name = EDGE_LBL_NODE;
- break;
- case KLCLASS_ROUTER:
- if (brd->brd_type == KLTYPE_META_ROUTER) {
- board_name = EDGE_LBL_META_ROUTER;
- hasmetarouter++;
- } else if (brd->brd_type == KLTYPE_REPEATER_ROUTER) {
- board_name = EDGE_LBL_REPEATER_ROUTER;
- hasmetarouter++;
- } else
- board_name = EDGE_LBL_ROUTER;
- break;
- case KLCLASS_MIDPLANE:
- board_name = EDGE_LBL_MIDPLANE;
- break;
- case KLCLASS_IO:
- board_name = EDGE_LBL_IO;
- break;
- case KLCLASS_IOBRICK:
- if (brd->brd_type == KLTYPE_PBRICK)
- board_name = EDGE_LBL_PBRICK;
- else if (brd->brd_type == KLTYPE_IBRICK)
- board_name = EDGE_LBL_IBRICK;
- else if (brd->brd_type == KLTYPE_XBRICK)
- board_name = EDGE_LBL_XBRICK;
- else
- board_name = EDGE_LBL_IOBRICK;
- break;
- default:
- board_name = EDGE_LBL_UNKNOWN;
- }
-
- modnum = brd->brd_module;
-
- ASSERT(modnum != MODULE_UNKNOWN && modnum != INVALID_MODULE);
-#ifdef __ia64
- {
- char buffer[16];
- memset(buffer, 0, 16);
- format_module_id(buffer, modnum, MODULE_FORMAT_BRIEF);
- sprintf(path, EDGE_LBL_MODULE "/%s/%s", buffer, board_name);
- }
-#else
- sprintf(path, "%H/%s", modnum, board_name);
-#endif
-}
-
-/*
- * Get the module number for a NASID.
- */
-moduleid_t
-get_module_id(nasid_t nasid)
-{
- lboard_t *brd;
-
- brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_SNIA);
-
- if (!brd)
- return INVALID_MODULE;
- else
- return brd->brd_module;
-}
-
-
-#define MHZ 1000000
-
-
-/* Get the canonical hardware graph name for the given pci component
- * on the given io board.
- */
-void
-device_component_canonical_name_get(lboard_t *brd,
- klinfo_t *component,
- char *name)
-{
- moduleid_t modnum;
- slotid_t slot;
- char board_name[20];
-
- ASSERT(brd);
-
- /* Get the module number of this board */
- modnum = brd->brd_module;
-
- /* Convert the [ CLASS | TYPE ] kind of slotid
- * into a string
- */
- slot = brd->brd_slot;
- ASSERT(modnum != MODULE_UNKNOWN && modnum != INVALID_MODULE);
-
- /* Get the io board name */
- if (!brd || (brd->brd_sversion < 2)) {
- strcpy(name, EDGE_LBL_XWIDGET);
- } else {
- nic_name_convert(brd->brd_name, board_name);
- }
-
- /* Give out the canonical name of the pci device*/
- sprintf(name,
- "/dev/hw/"EDGE_LBL_MODULE "/%x/"EDGE_LBL_SLOT"/%s/"
- EDGE_LBL_PCI"/%d",
- modnum, board_name,KLCF_BRIDGE_W_ID(component));
-}
-
-/*
- * Get the serial number of the main component of a board
- * Returns 0 if a valid serial number is found
- * 1 otherwise.
- * Assumptions: Nic manufacturing string has the following format
- * *Serial:<serial_number>;*
- */
-static int
-component_serial_number_get(lboard_t *board,
- klconf_off_t mfg_nic_offset,
- char *serial_number,
- char *key_pattern)
-{
-
- char *mfg_nic_string;
- char *serial_string,*str;
- int i;
- char *serial_pattern = "Serial:";
-
- /* We have an error on a null mfg nic offset */
- if (!mfg_nic_offset)
- return(1);
- /* Get the hub's manufacturing nic information
- * which is in the form of a pre-formatted string
- */
- mfg_nic_string =
- (char *)NODE_OFFSET_TO_K0(NASID_GET(board),
- mfg_nic_offset);
- /* There is no manufacturing nic info */
- if (!mfg_nic_string)
- return(1);
-
- str = mfg_nic_string;
- /* Look for the key pattern first (if it is specified)
- * and then print the serial number corresponding to that.
- */
- if (strcmp(key_pattern,"") &&
- !(str = strstr(mfg_nic_string,key_pattern)))
- return(1);
-
- /* There is no serial number info in the manufacturing
- * nic info
- */
- if (!(serial_string = strstr(str,serial_pattern)))
- return(1);
-
- serial_string = serial_string + strlen(serial_pattern);
- /* Copy the serial number information from the klconfig */
- i = 0;
- while (serial_string[i] != ';') {
- serial_number[i] = serial_string[i];
- i++;
- }
- serial_number[i] = 0;
-
- return(0);
-}
-/*
- * Get the serial number of a board
- * Returns 0 if a valid serial number is found
- * 1 otherwise.
- */
-
-int
-board_serial_number_get(lboard_t *board,char *serial_number)
-{
- ASSERT(board && serial_number);
- if (!board || !serial_number)
- return(1);
-
- strcpy(serial_number,"");
- switch(KLCLASS(board->brd_type)) {
- case KLCLASS_CPU: { /* Node board */
- klhub_t *hub;
-
- /* Get the hub component information */
- hub = (klhub_t *)find_first_component(board,
- KLSTRUCT_HUB);
- /* If we don't have a hub component on an IP27
- * then we have a weird klconfig.
- */
- if (!hub)
- return(1);
- /* Get the serial number information from
- * the hub's manufacturing nic info
- */
- if (component_serial_number_get(board,
- hub->hub_mfg_nic,
- serial_number,
-#if defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
- "IP37"))
-#else
- "IP27"))
- /* Try with IP31 key if IP27 key fails */
- if (component_serial_number_get(board,
- hub->hub_mfg_nic,
- serial_number,
- "IP31"))
-#endif /* CONFIG_IA64_SGI_SN1 */
- return(1);
- break;
- }
- case KLCLASS_IO: { /* IO board */
- if (KLTYPE(board->brd_type) == KLTYPE_TPU) {
- /* Special case for TPU boards */
- kltpu_t *tpu;
-
- /* Get the tpu component information */
- tpu = (kltpu_t *)find_first_component(board,
- KLSTRUCT_TPU);
- /* If we don't have a tpu component on a tpu board
- * then we have a weird klconfig.
- */
- if (!tpu)
- return(1);
- /* Get the serial number information from
- * the tpu's manufacturing nic info
- */
- if (component_serial_number_get(board,
- tpu->tpu_mfg_nic,
- serial_number,
- ""))
- return(1);
- break;
- } else if ((KLTYPE(board->brd_type) == KLTYPE_GSN_A) ||
- (KLTYPE(board->brd_type) == KLTYPE_GSN_B)) {
- /* Special case for GSN boards */
- klgsn_t *gsn;
-
- /* Get the gsn component information */
- gsn = (klgsn_t *)find_first_component(board,
- ((KLTYPE(board->brd_type) == KLTYPE_GSN_A) ?
- KLSTRUCT_GSN_A : KLSTRUCT_GSN_B));
- /* If we don't have a gsn component on a gsn board
- * then we have a weird klconfig.
- */
- if (!gsn)
- return(1);
- /* Get the serial number information from
- * the gsn's manufacturing nic info
- */
- if (component_serial_number_get(board,
- gsn->gsn_mfg_nic,
- serial_number,
- ""))
- return(1);
- break;
- } else {
- klbri_t *bridge;
-
- /* Get the bridge component information */
- bridge = (klbri_t *)find_first_component(board,
- KLSTRUCT_BRI);
- /* If we don't have a bridge component on an IO board
- * then we have a weird klconfig.
- */
- if (!bridge)
- return(1);
- /* Get the serial number information from
- * the bridge's manufacturing nic info
- */
- if (component_serial_number_get(board,
- bridge->bri_mfg_nic,
- serial_number,
- ""))
- return(1);
- break;
- }
- }
- case KLCLASS_ROUTER: { /* Router board */
- klrou_t *router;
-
- /* Get the router component information */
- router = (klrou_t *)find_first_component(board,
- KLSTRUCT_ROU);
- /* If we don't have a router component on a router board
- * then we have a weird klconfig.
- */
- if (!router)
- return(1);
- /* Get the serial number information from
- * the router's manufacturing nic info
- */
- if (component_serial_number_get(board,
- router->rou_mfg_nic,
- serial_number,
- ""))
- return(1);
- break;
- }
- case KLCLASS_GFX: { /* Gfx board */
- klgfx_t *graphics;
-
- /* Get the graphics component information */
- graphics = (klgfx_t *)find_first_component(board, KLSTRUCT_GFX);
- /* If we don't have a gfx component on a gfx board
- * then we have a weird klconfig.
- */
- if (!graphics)
- return(1);
- /* Get the serial number information from
- * the graphics's manufacturing nic info
- */
- if (component_serial_number_get(board,
- graphics->gfx_mfg_nic,
- serial_number,
- ""))
- return(1);
- break;
- }
- default:
- strcpy(serial_number,"");
- break;
- }
- return(0);
-}
-
-#include "asm/sn/sn_private.h"
-
-xwidgetnum_t
-nodevertex_widgetnum_get(devfs_handle_t node_vtx)
-{
- hubinfo_t hubinfo_p;
-
- hwgraph_info_get_LBL(node_vtx, INFO_LBL_NODE_INFO,
- (arbitrary_info_t *) &hubinfo_p);
- return(hubinfo_p->h_widgetid);
-}
-
-devfs_handle_t
-nodevertex_xbow_peer_get(devfs_handle_t node_vtx)
-{
- hubinfo_t hubinfo_p;
- nasid_t xbow_peer_nasid;
- cnodeid_t xbow_peer;
-
- hwgraph_info_get_LBL(node_vtx, INFO_LBL_NODE_INFO,
- (arbitrary_info_t *) &hubinfo_p);
- xbow_peer_nasid = hubinfo_p->h_nodepda->xbow_peer;
- if(xbow_peer_nasid == INVALID_NASID)
- return ( (devfs_handle_t)-1);
- xbow_peer = NASID_TO_COMPACT_NODEID(xbow_peer_nasid);
- return(NODEPDA(xbow_peer)->node_vertex);
-}
-
-/* NIC Sorting Support */
-
-#define MAX_NICS_PER_STRING 32
-#define MAX_NIC_NAME_LEN 32
-
-static char *
-get_nic_string(lboard_t *lb)
-{
- int i;
- klinfo_t *k = NULL ;
- klconf_off_t mfg_off = 0 ;
- char *mfg_nic = NULL ;
-
- for (i = 0; i < KLCF_NUM_COMPS(lb); i++) {
- k = KLCF_COMP(lb, i) ;
- switch(k->struct_type) {
- case KLSTRUCT_BRI:
- mfg_off = ((klbri_t *)k)->bri_mfg_nic ;
- break ;
-
- case KLSTRUCT_HUB:
- mfg_off = ((klhub_t *)k)->hub_mfg_nic ;
- break ;
-
- case KLSTRUCT_ROU:
- mfg_off = ((klrou_t *)k)->rou_mfg_nic ;
- break ;
-
- case KLSTRUCT_GFX:
- mfg_off = ((klgfx_t *)k)->gfx_mfg_nic ;
- break ;
-
- case KLSTRUCT_TPU:
- mfg_off = ((kltpu_t *)k)->tpu_mfg_nic ;
- break ;
-
- case KLSTRUCT_GSN_A:
- case KLSTRUCT_GSN_B:
- mfg_off = ((klgsn_t *)k)->gsn_mfg_nic ;
- break ;
-
- case KLSTRUCT_XTHD:
- mfg_off = ((klxthd_t *)k)->xthd_mfg_nic ;
- break;
-
- default:
- mfg_off = 0 ;
- break ;
- }
- if (mfg_off)
- break ;
- }
-
- if ((mfg_off) && (k))
- mfg_nic = (char *)NODE_OFFSET_TO_K0(k->nasid, mfg_off) ;
-
- return mfg_nic ;
-}
-
-char *
-get_first_string(char **ptrs, int n)
-{
- int i ;
- char *tmpptr ;
-
- if ((ptrs == NULL) || (n == 0))
- return NULL ;
-
- tmpptr = ptrs[0] ;
-
- if (n == 1)
- return tmpptr ;
-
- for (i = 0 ; i < n ; i++) {
- if (strcmp(tmpptr, ptrs[i]) > 0)
- tmpptr = ptrs[i] ;
- }
-
- return tmpptr ;
-}
-
-int
-get_ptrs(char *idata, char **ptrs, int n, char *label)
-{
- int i = 0 ;
- char *tmp = idata ;
-
- if ((ptrs == NULL) || (idata == NULL) || (label == NULL) || (n == 0))
- return 0 ;
-
- while ( (tmp = strstr(tmp, label)) ){
- tmp += strlen(label) ;
- /* check for empty name field, and last NULL ptr */
- if ((i < (n-1)) && (*tmp != ';')) {
- ptrs[i++] = tmp ;
- }
- }
-
- ptrs[i] = NULL ;
-
- return i ;
-}
-
-/*
- * sort_nic_names
- *
- * Does not really do sorting. Find the alphabetically lowest
- * name among all the nic names found in a nic string.
- *
- * Return:
- * Nothing
- *
- * Side Effects:
- *
- * lb->brd_name gets the new name found
- */
-
-static void
-sort_nic_names(lboard_t *lb)
-{
- char *nic_str ;
- char *ptrs[MAX_NICS_PER_STRING] ;
- char name[MAX_NIC_NAME_LEN] ;
- char *tmp, *tmp1 ;
-
- *name = 0 ;
-
- /* Get the nic pointer from the lb */
-
- if ((nic_str = get_nic_string(lb)) == NULL)
- return ;
-
- tmp = get_first_string(ptrs,
- get_ptrs(nic_str, ptrs, MAX_NICS_PER_STRING, "Name:")) ;
-
- if (tmp == NULL)
- return ;
-
- if ( (tmp1 = strchr(tmp, ';')) ){
- strncpy(name, tmp, tmp1-tmp) ;
- name[tmp1-tmp] = 0 ;
- } else {
- strncpy(name, tmp, (sizeof(name) -1)) ;
- name[sizeof(name)-1] = 0 ;
- }
-
- strcpy(lb->brd_name, name) ;
-}
-
-
-
-char brick_types[MAX_BRICK_TYPES + 1] = "crikxdp789012345";
-
-#if defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
-
-/*
- * Format a module id for printing.
- */
-void
-format_module_id(char *buffer, moduleid_t m, int fmt)
-{
- int rack, position;
- char brickchar;
-
- rack = MODULE_GET_RACK(m);
- ASSERT(MODULE_GET_BTYPE(m) < MAX_BRICK_TYPES);
- brickchar = MODULE_GET_BTCHAR(m);
- position = MODULE_GET_BPOS(m);
-
- if (fmt == MODULE_FORMAT_BRIEF) {
- /* Brief module number format, eg. 002c15 */
-
- /* Decompress the rack number */
- *buffer++ = '0' + RACK_GET_CLASS(rack);
- *buffer++ = '0' + RACK_GET_GROUP(rack);
- *buffer++ = '0' + RACK_GET_NUM(rack);
-
- /* Add the brick type */
- *buffer++ = brickchar;
- }
- else if (fmt == MODULE_FORMAT_LONG) {
- /* Fuller hwgraph format, eg. rack/002/bay/15 */
-
- strcpy(buffer, EDGE_LBL_RACK "/"); buffer += strlen(buffer);
-
- *buffer++ = '0' + RACK_GET_CLASS(rack);
- *buffer++ = '0' + RACK_GET_GROUP(rack);
- *buffer++ = '0' + RACK_GET_NUM(rack);
-
- strcpy(buffer, "/" EDGE_LBL_RPOS "/"); buffer += strlen(buffer);
- }
-
- /* Add the bay position, using at least two digits */
- if (position < 10)
- *buffer++ = '0';
- sprintf(buffer, "%d", position);
-
-}
-
-/*
- * Parse a module id, in either brief or long form.
- * Returns < 0 on error.
- * The long form does not include a brick type, so it defaults to 0 (CBrick)
- */
-int
-parse_module_id(char *buffer)
-{
- unsigned int v, rack, bay, type, form;
- moduleid_t m;
- char c;
-
- if (strstr(buffer, EDGE_LBL_RACK "/") == buffer) {
- form = MODULE_FORMAT_LONG;
- buffer += strlen(EDGE_LBL_RACK "/");
-
- /* A long module ID must be exactly 5 non-template chars. */
- if (strlen(buffer) != strlen("/" EDGE_LBL_RPOS "/") + 5)
- return -1;
- }
- else {
- form = MODULE_FORMAT_BRIEF;
-
- /* A brief module id must be exactly 6 characters */
- if (strlen(buffer) != 6)
- return -2;
- }
-
- /* The rack number must be exactly 3 digits */
- if (!(isdigit(buffer[0]) && isdigit(buffer[1]) && isdigit(buffer[2])))
- return -3;
-
- rack = 0;
- v = *buffer++ - '0';
- if (v > RACK_CLASS_MASK(rack) >> RACK_CLASS_SHFT(rack))
- return -4;
- RACK_ADD_CLASS(rack, v);
-
- v = *buffer++ - '0';
- if (v > RACK_GROUP_MASK(rack) >> RACK_GROUP_SHFT(rack))
- return -5;
- RACK_ADD_GROUP(rack, v);
-
- v = *buffer++ - '0';
- /* rack numbers are 1-based */
- if (v-1 > RACK_NUM_MASK(rack) >> RACK_NUM_SHFT(rack))
- return -6;
- RACK_ADD_NUM(rack, v);
-
- if (form == MODULE_FORMAT_BRIEF) {
- /* Next should be a module type character. Accept ucase or lcase. */
- c = *buffer++;
- if (!isalpha(c))
- return -7;
-
- /* strchr() returns a pointer into brick_types[], or NULL */
- type = (unsigned int)(strchr(brick_types, tolower(c)) - brick_types);
- if (type > MODULE_BTYPE_MASK >> MODULE_BTYPE_SHFT)
- return -8;
- }
- else {
- /* Hardcode the module type, and skip over the boilerplate */
- type = MODULE_CBRICK;
-
- if (strstr(buffer, "/" EDGE_LBL_RPOS "/") != buffer)
- return -9;
-
- buffer += strlen("/" EDGE_LBL_RPOS "/");
- }
-
- /* The bay number is last. Make sure it's exactly two digits */
-
- if (!(isdigit(buffer[0]) && isdigit(buffer[1]) && !buffer[2]))
- return -10;
-
- bay = 10 * (buffer[0] - '0') + (buffer[1] - '0');
-
- if (bay > MODULE_BPOS_MASK >> MODULE_BPOS_SHFT)
- return -11;
-
- m = RBT_TO_MODULE(rack, bay, type);
-
- /* avoid sign extending the moduleid_t */
- return (int)(unsigned short)m;
-}
-
-#else /* CONFIG_IA64_SGI_SN1 */
-
-/*
- * Format a module id for printing.
- */
-void
-format_module_id(char *buffer, moduleid_t m, int fmt)
-{
- if (fmt == MODULE_FORMAT_BRIEF) {
- sprintf(buffer, "%d", m);
- }
- else if (fmt == MODULE_FORMAT_LONG) {
- sprintf(buffer, EDGE_LBL_MODULE "/%d", m);
- }
-}
-
-/*
- * Parse a module id, in either brief or long form.
- * Returns < 0 on error.
- */
-int
-parse_module_id(char *buffer)
-{
- moduleid_t m;
- char c;
-
- if (strstr(buffer, EDGE_LBL_MODULE "/") == buffer)
- buffer += strlen(EDGE_LBL_MODULE "/");
-
- for (m = 0; *buffer; buffer++) {
- c = *buffer;
- if (!isdigit(c))
- return -1;
- m = 10 * m + (c - '0');
- }
-
- /* avoid sign extending the moduleid_t */
- return (int)(unsigned short)m;
-}
-
-#endif /* CONFIG_IA64_SGI_SN1 */
-
-
diff --git a/arch/ia64/sn/io/klgraph.c b/arch/ia64/sn/io/klgraph.c
deleted file mode 100644
index dff9b11dee0d93..00000000000000
--- a/arch/ia64/sn/io/klgraph.c
+++ /dev/null
@@ -1,804 +0,0 @@
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-/*
- * klgraph.c-
- * This file specifies the interface between the kernel and the PROM's
- * configuration data structures.
- */
-
-#include <linux/types.h>
-#include <linux/config.h>
-#include <linux/slab.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/sn_sal.h>
-#include <asm/sn/io.h>
-#include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/kldir.h>
-#include <asm/sn/gda.h>
-#include <asm/sn/klconfig.h>
-#include <asm/sn/router.h>
-#include <asm/sn/xtalk/xbow.h>
-#include <asm/sn/hcl_util.h>
-
-/* #define KLGRAPH_DEBUG 1 */
-#ifdef KLGRAPH_DEBUG
-#define GRPRINTF(x) printk x
-#define CE_GRPANIC CE_PANIC
-#else
-#define GRPRINTF(x)
-#define CE_GRPANIC CE_PANIC
-#endif
-
-#include <asm/sn/sn_private.h>
-
-extern char arg_maxnodes[];
-extern u64 klgraph_addr[];
-
-/*
- * Support for verbose inventory via hardware graph.
- * klhwg_invent_alloc allocates the necessary size of inventory information
- * and fills in the generic information.
- */
-invent_generic_t *
-klhwg_invent_alloc(cnodeid_t cnode, int class, int size)
-{
- invent_generic_t *invent;
-
- invent = kern_malloc(size);
- if (!invent) return NULL;
-
- invent->ig_module = NODE_MODULEID(cnode);
- invent->ig_slot = SLOTNUM_GETSLOT(NODE_SLOTID(cnode));
- invent->ig_invclass = class;
-
- return invent;
-}
-
-/*
- * Add information about the baseio prom version number
- * as a part of detailed inventory info in the hwgraph.
- */
-void
-klhwg_baseio_inventory_add(devfs_handle_t baseio_vhdl,cnodeid_t cnode)
-{
- invent_miscinfo_t *baseio_inventory;
- unsigned char version = 0,revision = 0;
-
- /* Allocate memory for the "detailed inventory" info
- * for the baseio
- */
- baseio_inventory = (invent_miscinfo_t *)
- klhwg_invent_alloc(cnode, INV_PROM, sizeof(invent_miscinfo_t));
- baseio_inventory->im_type = INV_IO6PROM;
- /* Read the io6prom revision from the nvram */
-#ifdef LATER
- nvram_prom_version_get(&version,&revision);
-#endif
- /* Store the revision info in the inventory */
- baseio_inventory->im_version = version;
- baseio_inventory->im_rev = revision;
- /* Put the inventory info in the hardware graph */
- hwgraph_info_add_LBL(baseio_vhdl, INFO_LBL_DETAIL_INVENT,
- (arbitrary_info_t) baseio_inventory);
- /* Make the information available to the user programs
- * thru hwgfs.
- */
- hwgraph_info_export_LBL(baseio_vhdl, INFO_LBL_DETAIL_INVENT,
- sizeof(invent_miscinfo_t));
-}
-
-char *hub_rev[] = {
- "0.0",
- "1.0",
- "2.0",
- "2.1",
- "2.2",
- "2.3"
-};
-
-/*
- * Add detailed cpu inventory info to the hardware graph.
- */
-void
-klhwg_hub_invent_info(devfs_handle_t hubv,
- cnodeid_t cnode,
- klhub_t *hub)
-{
- invent_miscinfo_t *hub_invent;
-
- hub_invent = (invent_miscinfo_t *)
- klhwg_invent_alloc(cnode, INV_MISC, sizeof(invent_miscinfo_t));
- if (!hub_invent)
- return;
-
- if (KLCONFIG_INFO_ENABLED((klinfo_t *)hub))
- hub_invent->im_gen.ig_flag = INVENT_ENABLED;
-
- hub_invent->im_type = INV_HUB;
- hub_invent->im_rev = hub->hub_info.revision;
- hub_invent->im_speed = hub->hub_speed;
- hwgraph_info_add_LBL(hubv, INFO_LBL_DETAIL_INVENT,
- (arbitrary_info_t) hub_invent);
- hwgraph_info_export_LBL(hubv, INFO_LBL_DETAIL_INVENT,
- sizeof(invent_miscinfo_t));
-}
-
-/* ARGSUSED */
-void
-klhwg_add_hub(devfs_handle_t node_vertex, klhub_t *hub, cnodeid_t cnode)
-{
-#if defined(CONFIG_IA64_SGI_SN1)
- devfs_handle_t myhubv;
- devfs_handle_t hub_mon;
- devfs_handle_t synergy;
- devfs_handle_t fsb0;
- devfs_handle_t fsb1;
- int rc;
- extern struct file_operations hub_mon_fops;
-
- GRPRINTF(("klhwg_add_hub: adding %s\n", EDGE_LBL_HUB));
-
- (void) hwgraph_path_add(node_vertex, EDGE_LBL_HUB, &myhubv);
- rc = device_master_set(myhubv, node_vertex);
-
- /*
- * hub perf stats.
- */
- rc = hwgraph_info_add_LBL(myhubv, INFO_LBL_HUB_INFO,
- (arbitrary_info_t)(&NODEPDA(cnode)->hubstats));
-
- if (rc != GRAPH_SUCCESS) {
- printk(KERN_WARNING "klhwg_add_hub: Can't add hub info label 0x%p, code %d",
- (void *)myhubv, rc);
- }
-
- klhwg_hub_invent_info(myhubv, cnode, hub);
-
- hub_mon = hwgraph_register(myhubv, EDGE_LBL_PERFMON,
- 0, DEVFS_FL_AUTO_DEVNUM,
- 0, 0,
- S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
- &hub_mon_fops,
- (void *)(long)cnode);
-
- init_hub_stats(cnode, NODEPDA(cnode));
-
- /*
- * synergy perf
- */
- (void) hwgraph_path_add(myhubv, EDGE_LBL_SYNERGY, &synergy);
- (void) hwgraph_path_add(synergy, "0", &fsb0);
- (void) hwgraph_path_add(synergy, "1", &fsb1);
-
- fsb0 = hwgraph_register(fsb0, EDGE_LBL_PERFMON,
- 0, DEVFS_FL_AUTO_DEVNUM,
- 0, 0,
- S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
- &synergy_mon_fops, (void *)SYNERGY_PERF_INFO(cnode, 0));
-
- fsb1 = hwgraph_register(fsb1, EDGE_LBL_PERFMON,
- 0, DEVFS_FL_AUTO_DEVNUM,
- 0, 0,
- S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
- &synergy_mon_fops, (void *)SYNERGY_PERF_INFO(cnode, 1));
-#endif /* CONFIG_IA64_SGI_SN1 */
-}
-
-void
-klhwg_add_xbow(cnodeid_t cnode, nasid_t nasid)
-{
- lboard_t *brd;
- klxbow_t *xbow_p;
- nasid_t hub_nasid;
- cnodeid_t hub_cnode;
- int widgetnum;
- devfs_handle_t xbow_v, hubv;
- /*REFERENCED*/
- graph_error_t err;
-
- if ((brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IOBRICK_XBOW)) == NULL)
- return;
-
- if (KL_CONFIG_DUPLICATE_BOARD(brd))
- return;
-
- GRPRINTF(("klhwg_add_xbow: adding cnode %d nasid %d xbow edges\n",
- cnode, nasid));
-
- if ((xbow_p = (klxbow_t *)find_component(brd, NULL, KLSTRUCT_XBOW))
- == NULL)
- return;
-
-#ifdef LATER
- /*
- * We cannot support this function in devfs .. see below where
- * we use hwgraph_path_add() to create this vertex with a known
- * name.
- */
- err = hwgraph_vertex_create(&xbow_v);
- ASSERT(err == GRAPH_SUCCESS);
-
- xswitch_vertex_init(xbow_v);
-#endif /* LATER */
-
- for (widgetnum = HUB_WIDGET_ID_MIN; widgetnum <= HUB_WIDGET_ID_MAX; widgetnum++) {
- if (!XBOW_PORT_TYPE_HUB(xbow_p, widgetnum))
- continue;
-
- hub_nasid = XBOW_PORT_NASID(xbow_p, widgetnum);
- if (hub_nasid == INVALID_NASID) {
- printk(KERN_WARNING "hub widget %d, skipping xbow graph\n", widgetnum);
- continue;
- }
-
- hub_cnode = NASID_TO_COMPACT_NODEID(hub_nasid);
-
- if (is_specified(arg_maxnodes) && hub_cnode == INVALID_CNODEID) {
- continue;
- }
-
- hubv = cnodeid_to_vertex(hub_cnode);
-
- err = hwgraph_path_add(hubv, EDGE_LBL_XTALK, &xbow_v);
- if (err != GRAPH_SUCCESS) {
- if (err == GRAPH_DUP)
- printk(KERN_WARNING "klhwg_add_xbow: Check for "
- "working routers and router links!");
-
- PRINT_PANIC("klhwg_add_xbow: Failed to add "
- "edge: vertex 0x%p to vertex 0x%p,"
- "error %d\n",
- (void *)hubv, (void *)xbow_v, err);
- }
- xswitch_vertex_init(xbow_v);
-
- NODEPDA(hub_cnode)->xbow_vhdl = xbow_v;
-
- /*
- * XXX - This won't work is we ever hook up two hubs
- * by crosstown through a crossbow.
- */
- if (hub_nasid != nasid) {
- NODEPDA(hub_cnode)->xbow_peer = nasid;
- NODEPDA(NASID_TO_COMPACT_NODEID(nasid))->xbow_peer =
- hub_nasid;
- }
-
- GRPRINTF(("klhwg_add_xbow: adding port nasid %d %s to vertex 0x%p\n",
- hub_nasid, EDGE_LBL_XTALK, hubv));
-
-#ifdef LATER
- err = hwgraph_edge_add(hubv, xbow_v, EDGE_LBL_XTALK);
- if (err != GRAPH_SUCCESS) {
- if (err == GRAPH_DUP)
- printk(KERN_WARNING "klhwg_add_xbow: Check for "
- "working routers and router links!");
-
- PRINT_PANIC("klhwg_add_xbow: Failed to add "
- "edge: vertex 0x%p (0x%p) to vertex 0x%p (0x%p), "
- "error %d\n",
- hubv, hubv, xbow_v, xbow_v, err);
- }
-#endif
- }
-}
-
-
-/* ARGSUSED */
-void
-klhwg_add_node(devfs_handle_t hwgraph_root, cnodeid_t cnode, gda_t *gdap)
-{
- nasid_t nasid;
- lboard_t *brd;
- klhub_t *hub;
- devfs_handle_t node_vertex = NULL;
- char path_buffer[100];
- int rv;
- char *s;
- int board_disabled = 0;
-
- nasid = COMPACT_TO_NASID_NODEID(cnode);
- brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_SNIA);
- GRPRINTF(("klhwg_add_node: Adding cnode %d, nasid %d, brd 0x%p\n",
- cnode, nasid, brd));
- ASSERT(brd);
-
- do {
-
- /* Generate a hardware graph path for this board. */
- board_to_path(brd, path_buffer);
-
- GRPRINTF(("klhwg_add_node: adding %s to vertex 0x%p\n",
- path_buffer, hwgraph_root));
- rv = hwgraph_path_add(hwgraph_root, path_buffer, &node_vertex);
-
- if (rv != GRAPH_SUCCESS)
- PRINT_PANIC("Node vertex creation failed. "
- "Path == %s",
- path_buffer);
-
- hub = (klhub_t *)find_first_component(brd, KLSTRUCT_HUB);
- ASSERT(hub);
- if(hub->hub_info.flags & KLINFO_ENABLE)
- board_disabled = 0;
- else
- board_disabled = 1;
-
- if(!board_disabled) {
- mark_nodevertex_as_node(node_vertex,
- cnode + board_disabled * numnodes);
-
- s = dev_to_name(node_vertex, path_buffer, sizeof(path_buffer));
- NODEPDA(cnode)->hwg_node_name =
- kmalloc(strlen(s) + 1,
- GFP_KERNEL);
- ASSERT_ALWAYS(NODEPDA(cnode)->hwg_node_name != NULL);
- strcpy(NODEPDA(cnode)->hwg_node_name, s);
-
- hubinfo_set(node_vertex, NODEPDA(cnode)->pdinfo);
-
- /* Set up node board's slot */
- NODEPDA(cnode)->slotdesc = brd->brd_slot;
-
- /* Set up the module we're in */
- NODEPDA(cnode)->module_id = brd->brd_module;
- NODEPDA(cnode)->module = module_lookup(brd->brd_module);
- }
-
- if(!board_disabled)
- klhwg_add_hub(node_vertex, hub, cnode);
-
- brd = KLCF_NEXT(brd);
- if (brd)
- brd = find_lboard(brd, KLTYPE_SNIA);
- else
- break;
- } while(brd);
-}
-
-
-/* ARGSUSED */
-void
-klhwg_add_all_routers(devfs_handle_t hwgraph_root)
-{
- nasid_t nasid;
- cnodeid_t cnode;
- lboard_t *brd;
- devfs_handle_t node_vertex;
- char path_buffer[100];
- int rv;
-
- for (cnode = 0; cnode < numnodes; cnode++) {
- nasid = COMPACT_TO_NASID_NODEID(cnode);
-
- GRPRINTF(("klhwg_add_all_routers: adding router on cnode %d\n",
- cnode));
-
- brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid),
- KLTYPE_ROUTER);
-
- if (!brd)
- /* No routers stored in this node's memory */
- continue;
-
- do {
- ASSERT(brd);
- GRPRINTF(("Router board struct is %p\n", brd));
-
- /* Don't add duplicate boards. */
- if (brd->brd_flags & DUPLICATE_BOARD)
- continue;
-
- GRPRINTF(("Router 0x%p module number is %d\n", brd, brd->brd_module));
- /* Generate a hardware graph path for this board. */
- board_to_path(brd, path_buffer);
-
- GRPRINTF(("Router path is %s\n", path_buffer));
-
- /* Add the router */
- GRPRINTF(("klhwg_add_all_routers: adding %s to vertex 0x%p\n",
- path_buffer, hwgraph_root));
- rv = hwgraph_path_add(hwgraph_root, path_buffer, &node_vertex);
-
- if (rv != GRAPH_SUCCESS)
- PRINT_PANIC("Router vertex creation "
- "failed. Path == %s",
- path_buffer);
-
- GRPRINTF(("klhwg_add_all_routers: get next board from 0x%p\n",
- brd));
- /* Find the rest of the routers stored on this node. */
- } while ( (brd = find_lboard_class(KLCF_NEXT(brd),
- KLTYPE_ROUTER)) );
-
- GRPRINTF(("klhwg_add_all_routers: Done.\n"));
- }
-
-}
-
-/* ARGSUSED */
-void
-klhwg_connect_one_router(devfs_handle_t hwgraph_root, lboard_t *brd,
- cnodeid_t cnode, nasid_t nasid)
-{
- klrou_t *router;
- char path_buffer[50];
- char dest_path[50];
- devfs_handle_t router_hndl;
- devfs_handle_t dest_hndl;
- int rc;
- int port;
- lboard_t *dest_brd;
-
- GRPRINTF(("klhwg_connect_one_router: Connecting router on cnode %d\n",
- cnode));
-
- /* Don't add duplicate boards. */
- if (brd->brd_flags & DUPLICATE_BOARD) {
- GRPRINTF(("klhwg_connect_one_router: Duplicate router 0x%p on cnode %d\n",
- brd, cnode));
- return;
- }
-
- /* Generate a hardware graph path for this board. */
- board_to_path(brd, path_buffer);
-
- rc = hwgraph_traverse(hwgraph_root, path_buffer, &router_hndl);
-
- if (rc != GRAPH_SUCCESS && is_specified(arg_maxnodes))
- return;
-
- if (rc != GRAPH_SUCCESS)
- printk(KERN_WARNING "Can't find router: %s", path_buffer);
-
- /* We don't know what to do with multiple router components */
- if (brd->brd_numcompts != 1) {
- PRINT_PANIC("klhwg_connect_one_router: %d cmpts on router\n",
- brd->brd_numcompts);
- return;
- }
-
-
- /* Convert component 0 to klrou_t ptr */
- router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd),
- brd->brd_compts[0]);
-
- for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
- /* See if the port's active */
- if (router->rou_port[port].port_nasid == INVALID_NASID) {
- GRPRINTF(("klhwg_connect_one_router: port %d inactive.\n",
- port));
- continue;
- }
- if (is_specified(arg_maxnodes) && NASID_TO_COMPACT_NODEID(router->rou_port[port].port_nasid)
- == INVALID_CNODEID) {
- continue;
- }
-
- dest_brd = (lboard_t *)NODE_OFFSET_TO_K0(
- router->rou_port[port].port_nasid,
- router->rou_port[port].port_offset);
-
- /* Generate a hardware graph path for this board. */
- board_to_path(dest_brd, dest_path);
-
- rc = hwgraph_traverse(hwgraph_root, dest_path, &dest_hndl);
-
- if (rc != GRAPH_SUCCESS) {
- if (is_specified(arg_maxnodes) && KL_CONFIG_DUPLICATE_BOARD(dest_brd))
- continue;
- PRINT_PANIC("Can't find router: %s", dest_path);
- }
- GRPRINTF(("klhwg_connect_one_router: Link from %s/%d to %s\n",
- path_buffer, port, dest_path));
-
- sprintf(dest_path, "%d", port);
-
- rc = hwgraph_edge_add(router_hndl, dest_hndl, dest_path);
-
- if (rc == GRAPH_DUP) {
- GRPRINTF(("Skipping port %d. nasid %d %s/%s\n",
- port, router->rou_port[port].port_nasid,
- path_buffer, dest_path));
- continue;
- }
-
- if (rc != GRAPH_SUCCESS && !is_specified(arg_maxnodes))
- PRINT_PANIC("Can't create edge: %s/%s to vertex 0x%p error 0x%x\n",
- path_buffer, dest_path, (void *)dest_hndl, rc);
-
- }
-}
-
-
-void
-klhwg_connect_routers(devfs_handle_t hwgraph_root)
-{
- nasid_t nasid;
- cnodeid_t cnode;
- lboard_t *brd;
-
- for (cnode = 0; cnode < numnodes; cnode++) {
- nasid = COMPACT_TO_NASID_NODEID(cnode);
-
- GRPRINTF(("klhwg_connect_routers: Connecting routers on cnode %d\n",
- cnode));
-
- brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid),
- KLTYPE_ROUTER);
-
- if (!brd)
- continue;
-
- do {
-
- nasid = COMPACT_TO_NASID_NODEID(cnode);
-
- klhwg_connect_one_router(hwgraph_root, brd,
- cnode, nasid);
-
- /* Find the rest of the routers stored on this node. */
- } while ( (brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER)) );
- }
-}
-
-
-
-void
-klhwg_connect_hubs(devfs_handle_t hwgraph_root)
-{
- nasid_t nasid;
- cnodeid_t cnode;
- lboard_t *brd;
- klhub_t *hub;
- lboard_t *dest_brd;
- devfs_handle_t hub_hndl;
- devfs_handle_t dest_hndl;
- char path_buffer[50];
- char dest_path[50];
- graph_error_t rc;
-
- for (cnode = 0; cnode < numnodes; cnode++) {
- nasid = COMPACT_TO_NASID_NODEID(cnode);
-
- GRPRINTF(("klhwg_connect_hubs: Connecting hubs on cnode %d\n",
- cnode));
-
- brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_SNIA);
- ASSERT(brd);
-
- hub = (klhub_t *)find_first_component(brd, KLSTRUCT_HUB);
- ASSERT(hub);
-
- /* See if the port's active */
- if (hub->hub_port.port_nasid == INVALID_NASID) {
- GRPRINTF(("klhwg_connect_hubs: port inactive.\n"));
- continue;
- }
-
- if (is_specified(arg_maxnodes) && NASID_TO_COMPACT_NODEID(hub->hub_port.port_nasid) == INVALID_CNODEID)
- continue;
-
- /* Generate a hardware graph path for this board. */
- board_to_path(brd, path_buffer);
-
- GRPRINTF(("klhwg_connect_hubs: Hub path is %s.\n", path_buffer));
- rc = hwgraph_traverse(hwgraph_root, path_buffer, &hub_hndl);
-
- if (rc != GRAPH_SUCCESS)
- printk(KERN_WARNING "Can't find hub: %s", path_buffer);
-
- dest_brd = (lboard_t *)NODE_OFFSET_TO_K0(
- hub->hub_port.port_nasid,
- hub->hub_port.port_offset);
-
- /* Generate a hardware graph path for this board. */
- board_to_path(dest_brd, dest_path);
-
- rc = hwgraph_traverse(hwgraph_root, dest_path, &dest_hndl);
-
- if (rc != GRAPH_SUCCESS) {
- if (is_specified(arg_maxnodes) && KL_CONFIG_DUPLICATE_BOARD(dest_brd))
- continue;
- PRINT_PANIC("Can't find board: %s", dest_path);
- } else {
-
-
- GRPRINTF(("klhwg_connect_hubs: Link from %s to %s.\n",
- path_buffer, dest_path));
-
- rc = hwgraph_edge_add(hub_hndl, dest_hndl, EDGE_LBL_INTERCONNECT);
-
- if (rc != GRAPH_SUCCESS)
- PRINT_PANIC("Can't create edge: %s/%s to vertex 0x%p, error 0x%x\n",
- path_buffer, dest_path, (void *)dest_hndl, rc);
-
- }
- }
-}
-
-/* Store the pci/vme disabled board information as extended administrative
- * hints which can later be used by the drivers using the device/driver
- * admin interface.
- */
-void
-klhwg_device_disable_hints_add(void)
-{
- cnodeid_t cnode; /* node we are looking at */
- nasid_t nasid; /* nasid of the node */
- lboard_t *board; /* board we are looking at */
- int comp_index; /* component index */
- klinfo_t *component; /* component in the board we are
- * looking at
- */
- char device_name[MAXDEVNAME];
-
-#ifdef LATER
- device_admin_table_init();
-#endif
- for(cnode = 0; cnode < numnodes; cnode++) {
- nasid = COMPACT_TO_NASID_NODEID(cnode);
- board = (lboard_t *)KL_CONFIG_INFO(nasid);
- /* Check out all the board info stored on a node */
- while(board) {
- /* No need to look at duplicate boards or non-io
- * boards
- */
- if (KL_CONFIG_DUPLICATE_BOARD(board) ||
- KLCLASS(board->brd_type) != KLCLASS_IO) {
- board = KLCF_NEXT(board);
- continue;
- }
- /* Check out all the components of a board */
- for (comp_index = 0;
- comp_index < KLCF_NUM_COMPS(board);
- comp_index++) {
- component = KLCF_COMP(board,comp_index);
- /* If the component is enabled move on to
- * the next component
- */
- if (KLCONFIG_INFO_ENABLED(component))
- continue;
- /* NOTE : Since the prom only supports
- * the disabling of pci devices the following
- * piece of code makes sense.
- * Make sure that this assumption is valid
- */
- /* This component is disabled. Store this
- * hint in the extended device admin table
- */
- /* Get the canonical name of the pci device */
- device_component_canonical_name_get(board,
- component,
- device_name);
-#ifdef LATER
- device_admin_table_update(device_name,
- ADMIN_LBL_DISABLED,
- "yes");
-#endif
-#ifdef DEBUG
- printf("%s DISABLED\n",device_name);
-#endif
- }
- /* go to the next board info stored on this
- * node
- */
- board = KLCF_NEXT(board);
- }
- }
-}
-
-void
-klhwg_add_all_modules(devfs_handle_t hwgraph_root)
-{
- cmoduleid_t cm;
- char name[128];
- devfs_handle_t vhdl;
- int rc;
- char buffer[16];
-
- /* Add devices under each module */
-
- for (cm = 0; cm < nummodules; cm++) {
- /* Use module as module vertex fastinfo */
-
-#ifdef __ia64
- memset(buffer, 0, 16);
- format_module_id(buffer, modules[cm]->id, MODULE_FORMAT_BRIEF);
- sprintf(name, EDGE_LBL_MODULE "/%s", buffer);
-#else
- sprintf(name, EDGE_LBL_MODULE "/%x", modules[cm]->id);
-#endif
-
- rc = hwgraph_path_add(hwgraph_root, name, &vhdl);
- ASSERT(rc == GRAPH_SUCCESS);
- rc = rc;
-
- hwgraph_fastinfo_set(vhdl, (arbitrary_info_t) modules[cm]);
-
- /* Add system controller */
-
-#ifdef __ia64
- sprintf(name,
- EDGE_LBL_MODULE "/%s/" EDGE_LBL_L1,
- buffer);
-#else
- sprintf(name,
- EDGE_LBL_MODULE "/%x/" EDGE_LBL_L1,
- modules[cm]->id);
-#endif
-
- rc = hwgraph_path_add(hwgraph_root, name, &vhdl);
- ASSERT_ALWAYS(rc == GRAPH_SUCCESS);
- rc = rc;
-
- hwgraph_info_add_LBL(vhdl,
- INFO_LBL_ELSC,
- (arbitrary_info_t) (__psint_t) 1);
-
-#ifdef LATER
- sndrv_attach(vhdl);
-#else
- /*
- * We need to call the drivers attach routine ..
- */
- FIXME("klhwg_add_all_modules: Need code to call driver attach.\n");
-#endif
- }
-}
-
-void
-klhwg_add_all_nodes(devfs_handle_t hwgraph_root)
-{
- //gda_t *gdap = GDA;
- gda_t *gdap;
- cnodeid_t cnode;
-
- gdap = (gda_t *)0xe000000000002400;
-
- FIXME("klhwg_add_all_nodes: FIX GDA\n");
-
- for (cnode = 0; cnode < numnodes; cnode++) {
- ASSERT(gdap->g_nasidtable[cnode] != INVALID_NASID);
- klhwg_add_node(hwgraph_root, cnode, gdap);
- }
-
- for (cnode = 0; cnode < numnodes; cnode++) {
- ASSERT(gdap->g_nasidtable[cnode] != INVALID_NASID);
-
- klhwg_add_xbow(cnode, gdap->g_nasidtable[cnode]);
- }
-
- /*
- * As for router hardware inventory information, we set this
- * up in router.c.
- */
-
- klhwg_add_all_routers(hwgraph_root);
- klhwg_connect_routers(hwgraph_root);
- klhwg_connect_hubs(hwgraph_root);
-
- /* Assign guardian nodes to each of the
- * routers in the system.
- */
-
-#ifdef LATER
- router_guardians_set(hwgraph_root);
-#endif
-
- /* Go through the entire system's klconfig
- * to figure out which pci components have been disabled
- */
- klhwg_device_disable_hints_add();
-
-}
diff --git a/arch/ia64/sn/io/klgraph_hack.c b/arch/ia64/sn/io/klgraph_hack.c
deleted file mode 100644
index e8cefe29e4ea5f..00000000000000
--- a/arch/ia64/sn/io/klgraph_hack.c
+++ /dev/null
@@ -1,341 +0,0 @@
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-
-/*
- * This is a temporary file that statically initializes the expected
- * initial klgraph information that is normally provided by prom.
- */
-
-#include <linux/config.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/io.h>
-#include <asm/sn/klconfig.h>
-#include <asm/sn/simulator.h>
-
-extern u64 klgraph_addr[];
-void * real_port;
-void * real_io_base;
-void * real_addr;
-
-char *BW0 = NULL;
-
-kl_config_hdr_t *linux_klcfg;
-
-#ifdef DEFINE_DUMP_RTNS
-/* forward declarations */
-static void dump_ii(void), dump_crossbow(void);
-static void clear_ii_error(void);
-#endif /* DEFINE_DUMP_RTNS */
-
-#define SYNERGY_WIDGET ((char *)0xc0000e0000000000)
-#define SYNERGY_SWIZZLE ((char *)0xc0000e0000000400)
-#define HUBREG ((char *)0xc0000a0001e00000)
-#define WIDGET0 ((char *)0xc0000a0000000000)
-#define WIDGET4 ((char *)0xc0000a0000000004)
-
-#define SYNERGY_WIDGET ((char *)0xc0000e0000000000)
-#define SYNERGY_SWIZZLE ((char *)0xc0000e0000000400)
-#define HUBREG ((char *)0xc0000a0001e00000)
-#define WIDGET0 ((char *)0xc0000a0000000000)
-
-#define convert(a,b,c) temp = (u64 *)a; *temp = b; temp++; *temp = c
-
-void
-klgraph_hack_init(void)
-{
-
- u64 *temp;
-
-#ifdef CONFIG_IA64_SGI_SN1
- /*
- * We need to know whether we are booting from PROM or
- * boot from disk.
- */
- linux_klcfg = (kl_config_hdr_t *)0xe000000000030000;
- if (linux_klcfg->ch_magic == 0xbeedbabe) {
- return;
- } else {
- panic("klgraph_hack_init: Unable to locate KLCONFIG TABLE\n");
- }
-
- convert(0x0000000000030000, 0x00000000beedbabe, 0x0000004800000000);
-
-#else
-
- if (IS_RUNNING_ON_SIMULATOR()) {
- printk("Creating FAKE Klconfig Structure for Embeded Kernel\n");
- klgraph_addr[0] = 0xe000003000030000;
-
- /*
- * klconfig entries initialization - mankato
- */
- convert(0xe000003000030000, 0x00000000beedbabe, 0x0000004800000000);
- convert(0xe000003000030010, 0x0003007000000018, 0x800002000f820178);
- convert(0xe000003000030020, 0x80000a000f024000, 0x800002000f800000);
- convert(0xe000003000030030, 0x0300fafa00012580, 0x00000000040f0000);
- convert(0xe000003000030040, 0x0000000000000000, 0x0003097000030070);
- convert(0xe000003000030050, 0x00030970000303b0, 0x0003181000033f70);
- convert(0xe000003000030060, 0x0003d51000037570, 0x0000000000038330);
- convert(0xe000003000030070, 0x0203110100030140, 0x0001000000000101);
- convert(0xe000003000030080, 0x0900000000000000, 0x000000004e465e67);
- convert(0xe000003000030090, 0x0003097000000000, 0x00030b1000030a40);
- convert(0xe0000030000300a0, 0x00030cb000030be0, 0x000315a0000314d0);
- convert(0xe0000030000300b0, 0x0003174000031670, 0x0000000000000000);
- convert(0xe000003000030100, 0x000000000000001a, 0x3350490000000000);
- convert(0xe000003000030110, 0x0000000000000037, 0x0000000000000000);
- convert(0xe000003000030140, 0x0002420100030210, 0x0001000000000101);
- convert(0xe000003000030150, 0x0100000000000000, 0xffffffffffffffff);
- convert(0xe000003000030160, 0x00030d8000000000, 0x0000000000030e50);
- convert(0xe0000030000301c0, 0x0000000000000000, 0x0000000000030070);
- convert(0xe0000030000301d0, 0x0000000000000025, 0x424f490000000000);
- convert(0xe0000030000301e0, 0x000000004b434952, 0x0000000000000000);
- convert(0xe000003000030210, 0x00027101000302e0, 0x00010000000e4101);
- convert(0xe000003000030220, 0x0200000000000000, 0xffffffffffffffff);
- convert(0xe000003000030230, 0x00030f2000000000, 0x0000000000030ff0);
- convert(0xe000003000030290, 0x0000000000000000, 0x0000000000030140);
- convert(0xe0000030000302a0, 0x0000000000000026, 0x7262490000000000);
- convert(0xe0000030000302b0, 0x00000000006b6369, 0x0000000000000000);
- convert(0xe0000030000302e0, 0x0002710100000000, 0x00010000000f3101);
- convert(0xe0000030000302f0, 0x0500000000000000, 0xffffffffffffffff);
- convert(0xe000003000030300, 0x000310c000000000, 0x0003126000031190);
- convert(0xe000003000030310, 0x0003140000031330, 0x0000000000000000);
- convert(0xe000003000030360, 0x0000000000000000, 0x0000000000030140);
- convert(0xe000003000030370, 0x0000000000000029, 0x7262490000000000);
- convert(0xe000003000030380, 0x00000000006b6369, 0x0000000000000000);
- convert(0xe000003000030970, 0x0000000002010102, 0x0000000000000000);
- convert(0xe000003000030980, 0x000000004e465e67, 0xffffffff00000000);
- /* convert(0x00000000000309a0, 0x0000000000037570, 0x0000000100000000); */
- convert(0xe0000030000309a0, 0x0000000000037570, 0xffffffff00000000);
- convert(0xe0000030000309b0, 0x0000000000030070, 0x0000000000000000);
- convert(0xe0000030000309c0, 0x000000000003f420, 0x0000000000000000);
- convert(0xe000003000030a40, 0x0000000002010125, 0x0000000000000000);
- convert(0xe000003000030a50, 0xffffffffffffffff, 0xffffffff00000000);
- convert(0xe000003000030a70, 0x0000000000037b78, 0x0000000000000000);
- convert(0xe000003000030b10, 0x0000000002010125, 0x0000000000000000);
- convert(0xe000003000030b20, 0xffffffffffffffff, 0xffffffff00000000);
- convert(0xe000003000030b40, 0x0000000000037d30, 0x0000000000000001);
- convert(0xe000003000030be0, 0x00000000ff010203, 0x0000000000000000);
- convert(0xe000003000030bf0, 0xffffffffffffffff, 0xffffffff000000ff);
- convert(0xe000003000030c10, 0x0000000000037ee8, 0x0100010000000200);
- convert(0xe000003000030cb0, 0x00000000ff310111, 0x0000000000000000);
- convert(0xe000003000030cc0, 0xffffffffffffffff, 0x0000000000000000);
- convert(0xe000003000030d80, 0x0000000002010104, 0x0000000000000000);
- convert(0xe000003000030d90, 0xffffffffffffffff, 0x00000000000000ff);
- convert(0xe000003000030db0, 0x0000000000037f18, 0x0000000000000000);
- convert(0xe000003000030dc0, 0x0000000000000000, 0x0003007000060000);
- convert(0xe000003000030de0, 0x0000000000000000, 0x0003021000050000);
- convert(0xe000003000030df0, 0x000302e000050000, 0x0000000000000000);
- convert(0xe000003000030e30, 0x0000000000000000, 0x000000000000000a);
- convert(0xe000003000030e50, 0x00000000ff00011a, 0x0000000000000000);
- convert(0xe000003000030e60, 0xffffffffffffffff, 0x0000000000000000);
- convert(0xe000003000030e80, 0x0000000000037fe0, 0x9e6e9e9e9e9e9e9e);
- convert(0xe000003000030e90, 0x000000000000bc6e, 0x0000000000000000);
- convert(0xe000003000030f20, 0x0000000002010205, 0x00000000d0020000);
- convert(0xe000003000030f30, 0xffffffffffffffff, 0x0000000e0000000e);
- convert(0xe000003000030f40, 0x000000000000000e, 0x0000000000000000);
- convert(0xe000003000030f50, 0x0000000000038010, 0x00000000000007ff);
- convert(0xe000003000030f70, 0x0000000000000000, 0x0000000022001077);
- convert(0xe000003000030fa0, 0x0000000000000000, 0x000000000003f4a8);
- convert(0xe000003000030ff0, 0x0000000000310120, 0x0000000000000000);
- convert(0xe000003000031000, 0xffffffffffffffff, 0xffffffff00000002);
- convert(0xe000003000031010, 0x000000000000000e, 0x0000000000000000);
- convert(0xe000003000031020, 0x0000000000038088, 0x0000000000000000);
- convert(0xe0000030000310c0, 0x0000000002010205, 0x00000000d0020000);
- convert(0xe0000030000310d0, 0xffffffffffffffff, 0x0000000f0000000f);
- convert(0xe0000030000310e0, 0x000000000000000f, 0x0000000000000000);
- convert(0xe0000030000310f0, 0x00000000000380b8, 0x00000000000007ff);
- convert(0xe000003000031120, 0x0000000022001077, 0x00000000000310a9);
- convert(0xe000003000031130, 0x00000000580211c1, 0x000000008009104c);
- convert(0xe000003000031140, 0x0000000000000000, 0x000000000003f4c0);
- convert(0xe000003000031190, 0x0000000000310120, 0x0000000000000000);
- convert(0xe0000030000311a0, 0xffffffffffffffff, 0xffffffff00000003);
- convert(0xe0000030000311b0, 0x000000000000000f, 0x0000000000000000);
- convert(0xe0000030000311c0, 0x0000000000038130, 0x0000000000000000);
- convert(0xe000003000031260, 0x0000000000110106, 0x0000000000000000);
- convert(0xe000003000031270, 0xffffffffffffffff, 0xffffffff00000004);
- convert(0xe000003000031270, 0xffffffffffffffff, 0xffffffff00000004);
- convert(0xe000003000031280, 0x000000000000000f, 0x0000000000000000);
- convert(0xe0000030000312a0, 0x00000000ff110013, 0x0000000000000000);
- convert(0xe0000030000312b0, 0xffffffffffffffff, 0xffffffff00000000);
- convert(0xe0000030000312c0, 0x000000000000000f, 0x0000000000000000);
- convert(0xe0000030000312e0, 0x0000000000110012, 0x0000000000000000);
- convert(0xe0000030000312f0, 0xffffffffffffffff, 0xffffffff00000000);
- convert(0xe000003000031300, 0x000000000000000f, 0x0000000000000000);
- convert(0xe000003000031310, 0x0000000000038160, 0x0000000000000000);
- convert(0xe000003000031330, 0x00000000ff310122, 0x0000000000000000);
- convert(0xe000003000031340, 0xffffffffffffffff, 0xffffffff00000005);
- convert(0xe000003000031350, 0x000000000000000f, 0x0000000000000000);
- convert(0xe000003000031360, 0x0000000000038190, 0x0000000000000000);
- convert(0xe000003000031400, 0x0000000000310121, 0x0000000000000000);
- convert(0xe000003000031400, 0x0000000000310121, 0x0000000000000000);
- convert(0xe000003000031410, 0xffffffffffffffff, 0xffffffff00000006);
- convert(0xe000003000031420, 0x000000000000000f, 0x0000000000000000);
- convert(0xe000003000031430, 0x00000000000381c0, 0x0000000000000000);
- convert(0xe0000030000314d0, 0x00000000ff010201, 0x0000000000000000);
- convert(0xe0000030000314e0, 0xffffffffffffffff, 0xffffffff00000000);
- convert(0xe000003000031500, 0x00000000000381f0, 0x000030430000ffff);
- convert(0xe000003000031510, 0x000000000000ffff, 0x0000000000000000);
- convert(0xe0000030000315a0, 0x00000020ff000201, 0x0000000000000000);
- convert(0xe0000030000315b0, 0xffffffffffffffff, 0xffffffff00000001);
- convert(0xe0000030000315d0, 0x0000000000038240, 0x00003f3f0000ffff);
- convert(0xe0000030000315e0, 0x000000000000ffff, 0x0000000000000000);
- convert(0xe000003000031670, 0x00000000ff010201, 0x0000000000000000);
- convert(0xe000003000031680, 0xffffffffffffffff, 0x0000000100000002);
- convert(0xe0000030000316a0, 0x0000000000038290, 0x000030430000ffff);
- convert(0xe0000030000316b0, 0x000000000000ffff, 0x0000000000000000);
- convert(0xe000003000031740, 0x00000020ff000201, 0x0000000000000000);
- convert(0xe000003000031750, 0xffffffffffffffff, 0x0000000500000003);
- convert(0xe000003000031770, 0x00000000000382e0, 0x00003f3f0000ffff);
- convert(0xe000003000031780, 0x000000000000ffff, 0x0000000000000000);
-}
-
-#endif
-
-}
-
-
-
-
-
-#ifdef DEFINE_DUMP_RTNS
-/*
- * these were useful for printing out registers etc
- * during bringup
- */
-
-static void
-xdump(long long *addr, int count)
-{
- int ii;
- volatile long long *xx = addr;
-
- for ( ii = 0; ii < count; ii++, xx++ ) {
- printk("0x%p : 0x%p\n", (void *)xx, (void *)*xx);
- }
-}
-
-static void
-xdump32(unsigned int *addr, int count)
-{
- int ii;
- volatile unsigned int *xx = addr;
-
- for ( ii = 0; ii < count; ii++, xx++ ) {
- printk("0x%p : 0x%0x\n", (void *)xx, (int)*xx);
- }
-}
-
-static void
-clear_ii_error(void)
-{
- volatile long long *tmp;
-
- printk("... WSTAT ");
- xdump((long long *)0xc0000a0001c00008, 1);
- printk("... WCTRL ");
- xdump((long long *)0xc0000a0001c00020, 1);
- printk("... WLCSR ");
- xdump((long long *)0xc0000a0001c00128, 1);
- printk("... IIDSR ");
- xdump((long long *)0xc0000a0001c00138, 1);
- printk("... IOPRBs ");
- xdump((long long *)0xc0000a0001c00198, 9);
- printk("... IXSS ");
- xdump((long long *)0xc0000a0001c00210, 1);
- printk("... IBLS0 ");
- xdump((long long *)0xc0000a0001c10000, 1);
- printk("... IBLS1 ");
- xdump((long long *)0xc0000a0001c20000, 1);
-
- /* Write IOERR clear to clear the CRAZY bit in the status */
- tmp = (long long *)0xc0000a0001c001f8; *tmp = (long long)0xffffffff;
-
- /* dump out local block error registers */
- printk("... ");
- xdump((long long *)0xc0000a0001e04040, 1); /* LB_ERROR_BITS */
- printk("... ");
- xdump((long long *)0xc0000a0001e04050, 1); /* LB_ERROR_HDR1 */
- printk("... ");
- xdump((long long *)0xc0000a0001e04058, 1); /* LB_ERROR_HDR2 */
- /* and clear the LB_ERROR_BITS */
- tmp = (long long *)0xc0000a0001e04040; *tmp = 0x0;
- printk("clr: ");
- xdump((long long *)0xc0000a0001e04040, 1); /* LB_ERROR_BITS */
- tmp = (long long *)0xc0000a0001e04050; *tmp = 0x0;
- tmp = (long long *)0xc0000a0001e04058; *tmp = 0x0;
-}
-
-
-static void
-dump_ii(void)
-{
- printk("===== Dump the II regs =====\n");
- xdump((long long *)0xc0000a0001c00000, 2);
- xdump((long long *)0xc0000a0001c00020, 1);
- xdump((long long *)0xc0000a0001c00100, 37);
- xdump((long long *)0xc0000a0001c00300, 98);
- xdump((long long *)0xc0000a0001c10000, 6);
- xdump((long long *)0xc0000a0001c20000, 6);
- xdump((long long *)0xc0000a0001c30000, 2);
-
- xdump((long long *)0xc0000a0000000000, 1);
- xdump((long long *)0xc0000a0001000000, 1);
- xdump((long long *)0xc0000a0002000000, 1);
- xdump((long long *)0xc0000a0003000000, 1);
- xdump((long long *)0xc0000a0004000000, 1);
- xdump((long long *)0xc0000a0005000000, 1);
- xdump((long long *)0xc0000a0006000000, 1);
- xdump((long long *)0xc0000a0007000000, 1);
- xdump((long long *)0xc0000a0008000000, 1);
- xdump((long long *)0xc0000a0009000000, 1);
- xdump((long long *)0xc0000a000a000000, 1);
- xdump((long long *)0xc0000a000b000000, 1);
- xdump((long long *)0xc0000a000c000000, 1);
- xdump((long long *)0xc0000a000d000000, 1);
- xdump((long long *)0xc0000a000e000000, 1);
- xdump((long long *)0xc0000a000f000000, 1);
-}
-
-static void
-dump_crossbow(void)
-{
- printk("===== Dump the Crossbow regs =====\n");
- clear_ii_error();
- xdump32((unsigned int *)0xc0000a0000000004, 1);
- clear_ii_error();
- xdump32((unsigned int *)0xc0000a0000000000, 1);
- printk("and again..\n");
- xdump32((unsigned int *)0xc0000a0000000000, 1);
- xdump32((unsigned int *)0xc0000a0000000000, 1);
-
-
- clear_ii_error();
-
- xdump32((unsigned int *)0xc000020000000004, 1);
- clear_ii_error();
- xdump32((unsigned int *)0xc000020000000000, 1);
- clear_ii_error();
-
- xdump32((unsigned int *)0xc0000a0000800004, 1);
- clear_ii_error();
- xdump32((unsigned int *)0xc0000a0000800000, 1);
- clear_ii_error();
-
- xdump32((unsigned int *)0xc000020000800004, 1);
- clear_ii_error();
- xdump32((unsigned int *)0xc000020000800000, 1);
- clear_ii_error();
-
-
-}
-#endif /* DEFINE_DUMP_RTNS */
diff --git a/arch/ia64/sn/io/l1.c b/arch/ia64/sn/io/l1.c
deleted file mode 100644
index fb7d48539e7843..00000000000000
--- a/arch/ia64/sn/io/l1.c
+++ /dev/null
@@ -1,3056 +0,0 @@
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992-1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-/* In general, this file is organized in a hierarchy from lower-level
- * to higher-level layers, as follows:
- *
- * UART routines
- * Bedrock/L1 "PPP-like" protocol implementation
- * System controller "message" interface (allows multiplexing
- * of various kinds of requests and responses with
- * console I/O)
- * Console interface:
- * "l1_cons", the glue that allows the L1 to act
- * as the system console for the stdio libraries
- *
- * Routines making use of the system controller "message"-style interface
- * can be found in l1_command.c.
- */
-
-
-#include <linux/types.h>
-#include <linux/config.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/io.h>
-#include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/hcl_util.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/eeprom.h>
-#include <asm/sn/router.h>
-#include <asm/sn/module.h>
-#include <asm/sn/ksys/l1.h>
-#include <asm/sn/nodepda.h>
-#include <asm/sn/clksupport.h>
-#include <asm/sn/sn_sal.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/uart16550.h>
-#include <asm/sn/simulator.h>
-
-
-/* Make all console writes atomic */
-#define SYNC_CONSOLE_WRITE 1
-
-
-/*********************************************************************
- * Hardware-level (UART) driver routines.
- */
-
-/* macros for reading/writing registers */
-
-#define LD(x) (*(volatile uint64_t *)(x))
-#define SD(x, v) (LD(x) = (uint64_t) (v))
-
-/* location of uart receive/xmit data register */
-#if defined(CONFIG_IA64_SGI_SN1)
-#define L1_UART_BASE(n) ((ulong)REMOTE_HSPEC_ADDR((n), 0x00000080))
-#define LOCK_HUB REMOTE_HUB_ADDR
-#elif defined(CONFIG_IA64_SGI_SN2)
-#define L1_UART_BASE(n) ((ulong)REMOTE_HUB((n), SH_JUNK_BUS_UART0))
-#define LOCK_HUB REMOTE_HUB
-typedef u64 rtc_time_t;
-#endif
-
-
-#define ADDR_L1_REG(n, r) ( L1_UART_BASE(n) | ( (r) << 3 ) )
-#define READ_L1_UART_REG(n, r) ( LD(ADDR_L1_REG((n), (r))) )
-#define WRITE_L1_UART_REG(n, r, v) ( SD(ADDR_L1_REG((n), (r)), (v)) )
-
-/* upper layer interface calling methods */
-#define SERIAL_INTERRUPT_MODE 0
-#define SERIAL_POLLED_MODE 1
-
-
-/* UART-related #defines */
-
-#define UART_BAUD_RATE 57600
-#define UART_FIFO_DEPTH 16
-#define UART_DELAY_SPAN 10
-#define UART_PUTC_TIMEOUT 50000
-#define UART_INIT_TIMEOUT 100000
-
-/* error codes */
-#define UART_SUCCESS 0
-#define UART_TIMEOUT (-1)
-#define UART_LINK (-2)
-#define UART_NO_CHAR (-3)
-#define UART_VECTOR (-4)
-
-#define UART_DELAY(x) udelay(x)
-
-/* Some debug counters */
-#define L1C_INTERRUPTS 0
-#define L1C_OUR_R_INTERRUPTS 1
-#define L1C_OUR_X_INTERRUPTS 2
-#define L1C_SEND_CALLUPS 3
-#define L1C_RECEIVE_CALLUPS 4
-#define L1C_SET_BAUD 5
-#define L1C_ALREADY_LOCKED L1C_SET_BAUD
-#define L1C_R_IRQ 6
-#define L1C_R_IRQ_RET 7
-#define L1C_LOCK_TIMEOUTS 8
-#define L1C_LOCK_COUNTER 9
-#define L1C_UNLOCK_COUNTER 10
-#define L1C_REC_STALLS 11
-#define L1C_CONNECT_CALLS 12
-#define L1C_SIZE L1C_CONNECT_CALLS /* Set to the last one */
-
-uint64_t L1_collectibles[L1C_SIZE + 1];
-
-
-/*
- * Some macros for handling Endian-ness
- */
-
-#define COPY_INT_TO_BUFFER(_b, _i, _n) \
- { \
- _b[_i++] = (_n >> 24) & 0xff; \
- _b[_i++] = (_n >> 16) & 0xff; \
- _b[_i++] = (_n >> 8) & 0xff; \
- _b[_i++] = _n & 0xff; \
- }
-
-#define COPY_BUFFER_TO_INT(_b, _i, _n) \
- { \
- _n = (_b[_i++] << 24) & 0xff; \
- _n |= (_b[_i++] << 16) & 0xff; \
- _n |= (_b[_i++] << 8) & 0xff; \
- _n |= _b[_i++] & 0xff; \
- }
-
-#define COPY_BUFFER_TO_BUFFER(_b, _i, _bn) \
- { \
- char *_xyz = (char *)_bn; \
- _xyz[3] = _b[_i++]; \
- _xyz[2] = _b[_i++]; \
- _xyz[1] = _b[_i++]; \
- _xyz[0] = _b[_i++]; \
- }
-
-void snia_kmem_free(void *where, int size);
-
-#define ALREADY_LOCKED 1
-#define NOT_LOCKED 0
-static int early_l1_serial_out(nasid_t, char *, int, int /* defines above*/ );
-
-#define BCOPY(x,y,z) memcpy(y,x,z)
-
-uint8_t L1_interrupts_connected; /* Non-zero when we are in interrupt mode */
-
-
-/*
- * Console locking defines and functions.
- *
- */
-
-uint8_t L1_cons_is_inited = 0; /* non-zero when console is init'd */
-nasid_t Master_console_nasid = (nasid_t)-1;
-extern nasid_t console_nasid;
-
-u64 ia64_sn_get_console_nasid(void);
-
-inline nasid_t
-get_master_nasid(void)
-{
-#if defined(CONFIG_IA64_SGI_SN1)
- nasid_t nasid = Master_console_nasid;
-
- if ( nasid == (nasid_t)-1 ) {
- nasid = (nasid_t)ia64_sn_get_console_nasid();
- if ( (nasid < 0) || (nasid >= MAX_NASIDS) ) {
- /* Out of bounds, use local */
- console_nasid = nasid = get_nasid();
- }
- else {
- /* Got a valid nasid, set the console_nasid */
- char xx[100];
-/* zzzzzz - force nasid to 0 for now */
- sprintf(xx, "Master console is set to nasid %d (%d)\n", 0, (int)nasid);
-nasid = 0;
-/* end zzzzzz */
- xx[99] = (char)0;
- early_l1_serial_out(nasid, xx, strlen(xx), NOT_LOCKED);
- Master_console_nasid = console_nasid = nasid;
- }
- }
- return(nasid);
-#else
- return((nasid_t)0);
-#endif /* CONFIG_IA64_SGI_SN1 */
-}
-
-
-#if defined(CONFIG_IA64_SGI_SN1)
-
-#define HUB_LOCK 16
-
-#define PRIMARY_LOCK_TIMEOUT 10000000
-#define HUB_LOCK_REG(n) LOCK_HUB(n, MD_PERF_CNT0)
-
-#define SET_BITS(reg, bits) SD(reg, LD(reg) | (bits))
-#define CLR_BITS(reg, bits) SD(reg, LD(reg) & ~(bits))
-#define TST_BITS(reg, bits) ((LD(reg) & (bits)) != 0)
-
-#define HUB_TEST_AND_SET(n) LD(LOCK_HUB(n,LB_SCRATCH_REG3_RZ))
-#define HUB_CLEAR(n) SD(LOCK_HUB(n,LB_SCRATCH_REG3),0)
-
-#define RTC_TIME_MAX ((rtc_time_t) ~0ULL)
-
-/*
- * primary_lock
- *
- * Allows CPU's 0-3 to mutually exclude the hub from one another by
- * obtaining a blocking lock. Does nothing if only one CPU is active.
- *
- * This lock should be held just long enough to set or clear a global
- * lock bit. After a relatively short timeout period, this routine
- * figures something is wrong, and steals the lock. It does not set
- * any other CPU to "dead".
- */
-inline void
-primary_lock(nasid_t nasid)
-{
- rtc_time_t expire;
-
- expire = rtc_time() + PRIMARY_LOCK_TIMEOUT;
-
- while (HUB_TEST_AND_SET(nasid)) {
- if (rtc_time() > expire) {
- HUB_CLEAR(nasid);
- }
- }
-}
-
-/*
- * primary_unlock (internal)
- *
- * Counterpart to primary_lock
- */
-
-inline void
-primary_unlock(nasid_t nasid)
-{
- HUB_CLEAR(nasid);
-}
-
-/*
- * hub_unlock
- *
- * Counterpart to hub_lock_timeout and hub_lock
- */
-
-inline void
-hub_unlock(nasid_t nasid, int level)
-{
- uint64_t mask = 1ULL << level;
-
- primary_lock(nasid);
- CLR_BITS(HUB_LOCK_REG(nasid), mask);
- primary_unlock(nasid);
-}
-
-/*
- * hub_lock_timeout
- *
- * Uses primary_lock to implement multiple lock levels.
- *
- * There are 20 lock levels from 0 to 19 (limited by the number of bits
- * in HUB_LOCK_REG). To prevent deadlock, multiple locks should be
- * obtained in order of increasingly higher level, and released in the
- * reverse order.
- *
- * A timeout value of 0 may be used for no timeout.
- *
- * Returns 0 if successful, -1 if lock times out.
- */
-
-inline int
-hub_lock_timeout(nasid_t nasid, int level, rtc_time_t timeout)
-{
- uint64_t mask = 1ULL << level;
- rtc_time_t expire = (timeout ? rtc_time() + timeout : RTC_TIME_MAX);
- int done = 0;
-
- while (! done) {
- while (TST_BITS(HUB_LOCK_REG(nasid), mask)) {
- if (rtc_time() > expire)
- return -1;
- }
-
- primary_lock(nasid);
-
- if (! TST_BITS(HUB_LOCK_REG(nasid), mask)) {
- SET_BITS(HUB_LOCK_REG(nasid), mask);
- done = 1;
- }
- primary_unlock(nasid);
- }
- return 0;
-}
-
-
-#define LOCK_TIMEOUT (0x1500000 * 1) /* 0x1500000 is ~30 sec */
-
-void
-lock_console(nasid_t nasid)
-{
- int ret;
-
- /* If we already have it locked, just return */
- L1_collectibles[L1C_LOCK_COUNTER]++;
-
- ret = hub_lock_timeout(nasid, HUB_LOCK, (rtc_time_t)LOCK_TIMEOUT);
- if ( ret != 0 ) {
- L1_collectibles[L1C_LOCK_TIMEOUTS]++;
- /* timeout */
- hub_unlock(nasid, HUB_LOCK);
- /* If the 2nd lock fails, just pile ahead.... */
- hub_lock_timeout(nasid, HUB_LOCK, (rtc_time_t)LOCK_TIMEOUT);
- L1_collectibles[L1C_LOCK_TIMEOUTS]++;
- }
-}
-
-inline void
-unlock_console(nasid_t nasid)
-{
- L1_collectibles[L1C_UNLOCK_COUNTER]++;
- hub_unlock(nasid, HUB_LOCK);
-}
-
-#else /* SN2 */
-inline void lock_console(nasid_t n) {}
-inline void unlock_console(nasid_t n) {}
-
-#endif /* CONFIG_IA64_SGI_SN1 */
-
-int
-get_L1_baud(void)
-{
- return UART_BAUD_RATE;
-}
-
-
-/* uart driver functions */
-
-static inline void
-uart_delay( rtc_time_t delay_span )
-{
- UART_DELAY( delay_span );
-}
-
-#define UART_PUTC_READY(n) (READ_L1_UART_REG((n), REG_LSR) & LSR_XHRE)
-
-static int
-uart_putc( l1sc_t *sc )
-{
- WRITE_L1_UART_REG( sc->nasid, REG_DAT, sc->send[sc->sent] );
- return UART_SUCCESS;
-}
-
-
-static int
-uart_getc( l1sc_t *sc )
-{
- u_char lsr_reg = 0;
- nasid_t nasid = sc->nasid;
-
- if( (lsr_reg = READ_L1_UART_REG( nasid, REG_LSR )) &
- (LSR_RCA | LSR_PARERR | LSR_FRMERR) )
- {
- if( lsr_reg & LSR_RCA )
- return( (u_char)READ_L1_UART_REG( nasid, REG_DAT ) );
- else if( lsr_reg & (LSR_PARERR | LSR_FRMERR) ) {
- return UART_LINK;
- }
- }
-
- return UART_NO_CHAR;
-}
-
-
-#define PROM_SER_CLK_SPEED 12000000
-#define PROM_SER_DIVISOR(x) (PROM_SER_CLK_SPEED / ((x) * 16))
-
-static void
-uart_init( l1sc_t *sc, int baud )
-{
- rtc_time_t expire;
- int clkdiv;
- nasid_t nasid;
-
- clkdiv = PROM_SER_DIVISOR(baud);
- expire = rtc_time() + UART_INIT_TIMEOUT;
- nasid = sc->nasid;
-
- /* make sure the transmit FIFO is empty */
- while( !(READ_L1_UART_REG( nasid, REG_LSR ) & LSR_XSRE) ) {
- uart_delay( UART_DELAY_SPAN );
- if( rtc_time() > expire ) {
- break;
- }
- }
-
- if ( sc->uart == BRL1_LOCALHUB_UART )
- lock_console(nasid);
-
- /* Setup for the proper baud rate */
- WRITE_L1_UART_REG( nasid, REG_LCR, LCR_DLAB );
- uart_delay( UART_DELAY_SPAN );
- WRITE_L1_UART_REG( nasid, REG_DLH, (clkdiv >> 8) & 0xff );
- uart_delay( UART_DELAY_SPAN );
- WRITE_L1_UART_REG( nasid, REG_DLL, clkdiv & 0xff );
- uart_delay( UART_DELAY_SPAN );
-
- /* set operating parameters and set DLAB to 0 */
-
- /* 8bit, one stop, clear request to send, auto flow control */
- WRITE_L1_UART_REG( nasid, REG_LCR, LCR_BITS8 | LCR_STOP1 );
- uart_delay( UART_DELAY_SPAN );
- WRITE_L1_UART_REG( nasid, REG_MCR, MCR_RTS | MCR_AFE );
- uart_delay( UART_DELAY_SPAN );
-
- /* disable interrupts */
- WRITE_L1_UART_REG( nasid, REG_ICR, 0x0 );
- uart_delay( UART_DELAY_SPAN );
-
- /* enable FIFO mode and reset both FIFOs, trigger on 1 */
- WRITE_L1_UART_REG( nasid, REG_FCR, FCR_FIFOEN );
- uart_delay( UART_DELAY_SPAN );
- WRITE_L1_UART_REG( nasid, REG_FCR, FCR_FIFOEN | FCR_RxFIFO | FCR_TxFIFO | RxLVL0);
-
- if ( sc->uart == BRL1_LOCALHUB_UART )
- unlock_console(nasid);
-}
-
-/* This requires the console lock */
-
-#if defined(CONFIG_IA64_SGI_SN1)
-
-static void
-uart_intr_enable( l1sc_t *sc, u_char mask )
-{
- u_char lcr_reg, icr_reg;
- nasid_t nasid = sc->nasid;
-
- if ( sc->uart == BRL1_LOCALHUB_UART )
- lock_console(nasid);
-
- /* make sure that the DLAB bit in the LCR register is 0
- */
- lcr_reg = READ_L1_UART_REG( nasid, REG_LCR );
- lcr_reg &= ~(LCR_DLAB);
- WRITE_L1_UART_REG( nasid, REG_LCR, lcr_reg );
-
- /* enable indicated interrupts
- */
- icr_reg = READ_L1_UART_REG( nasid, REG_ICR );
- icr_reg |= mask;
- WRITE_L1_UART_REG( nasid, REG_ICR, icr_reg /*(ICR_RIEN | ICR_TIEN)*/ );
-
- if ( sc->uart == BRL1_LOCALHUB_UART )
- unlock_console(nasid);
-}
-
-/* This requires the console lock */
-static void
-uart_intr_disable( l1sc_t *sc, u_char mask )
-{
- u_char lcr_reg, icr_reg;
- nasid_t nasid = sc->nasid;
-
- if ( sc->uart == BRL1_LOCALHUB_UART )
- lock_console(nasid);
-
- /* make sure that the DLAB bit in the LCR register is 0
- */
- lcr_reg = READ_L1_UART_REG( nasid, REG_LCR );
- lcr_reg &= ~(LCR_DLAB);
- WRITE_L1_UART_REG( nasid, REG_LCR, lcr_reg );
-
- /* enable indicated interrupts
- */
- icr_reg = READ_L1_UART_REG( nasid, REG_ICR );
- icr_reg &= mask;
- WRITE_L1_UART_REG( nasid, REG_ICR, icr_reg /*(ICR_RIEN | ICR_TIEN)*/ );
-
- if ( sc->uart == BRL1_LOCALHUB_UART )
- unlock_console(nasid);
-}
-#endif /* CONFIG_IA64_SGI_SN1 */
-
-#define uart_enable_xmit_intr(sc) \
- uart_intr_enable((sc), ICR_TIEN)
-
-#define uart_disable_xmit_intr(sc) \
- uart_intr_disable((sc), ~(ICR_TIEN))
-
-#define uart_enable_recv_intr(sc) \
- uart_intr_enable((sc), ICR_RIEN)
-
-#define uart_disable_recv_intr(sc) \
- uart_intr_disable((sc), ~(ICR_RIEN))
-
-
-/*********************************************************************
- * Routines for accessing a remote (router) UART
- */
-
-#define READ_RTR_L1_UART_REG(p, n, r, v) \
- { \
- if( vector_read_node( (p), (n), 0, \
- RR_JBUS1(r), (v) ) ) { \
- return UART_VECTOR; \
- } \
- }
-
-#define WRITE_RTR_L1_UART_REG(p, n, r, v) \
- { \
- if( vector_write_node( (p), (n), 0, \
- RR_JBUS1(r), (v) ) ) { \
- return UART_VECTOR; \
- } \
- }
-
-#define RTR_UART_PUTC_TIMEOUT UART_PUTC_TIMEOUT*10
-#define RTR_UART_DELAY_SPAN UART_DELAY_SPAN
-#define RTR_UART_INIT_TIMEOUT UART_INIT_TIMEOUT*10
-
-static int
-rtr_uart_putc( l1sc_t *sc )
-{
- uint64_t regval, c;
- nasid_t nasid = sc->nasid;
- net_vec_t path = sc->uart;
- rtc_time_t expire = rtc_time() + RTR_UART_PUTC_TIMEOUT;
-
- c = (sc->send[sc->sent] & 0xffULL);
-
- while( 1 )
- {
- /* Check for "tx hold reg empty" bit. */
- READ_RTR_L1_UART_REG( path, nasid, REG_LSR, &regval );
- if( regval & LSR_XHRE )
- {
- WRITE_RTR_L1_UART_REG( path, nasid, REG_DAT, c );
- return UART_SUCCESS;
- }
-
- if( rtc_time() >= expire )
- {
- return UART_TIMEOUT;
- }
- uart_delay( RTR_UART_DELAY_SPAN );
- }
-}
-
-
-static int
-rtr_uart_getc( l1sc_t *sc )
-{
- uint64_t regval;
- nasid_t nasid = sc->nasid;
- net_vec_t path = sc->uart;
-
- READ_RTR_L1_UART_REG( path, nasid, REG_LSR, &regval );
- if( regval & (LSR_RCA | LSR_PARERR | LSR_FRMERR) )
- {
- if( regval & LSR_RCA )
- {
- READ_RTR_L1_UART_REG( path, nasid, REG_DAT, &regval );
- return( (int)regval );
- }
- else
- {
- return UART_LINK;
- }
- }
-
- return UART_NO_CHAR;
-}
-
-
-static int
-rtr_uart_init( l1sc_t *sc, int baud )
-{
- rtc_time_t expire;
- int clkdiv;
- nasid_t nasid;
- net_vec_t path;
- uint64_t regval;
-
- clkdiv = PROM_SER_DIVISOR(baud);
- expire = rtc_time() + RTR_UART_INIT_TIMEOUT;
- nasid = sc->nasid;
- path = sc->uart;
-
- /* make sure the transmit FIFO is empty */
- while(1) {
- READ_RTR_L1_UART_REG( path, nasid, REG_LSR, &regval );
- if( regval & LSR_XSRE ) {
- break;
- }
- if( rtc_time() > expire ) {
- break;
- }
- uart_delay( RTR_UART_DELAY_SPAN );
- }
-
- WRITE_RTR_L1_UART_REG( path, nasid, REG_LCR, LCR_DLAB );
- uart_delay( UART_DELAY_SPAN );
- WRITE_RTR_L1_UART_REG( path, nasid, REG_DLH, (clkdiv >> 8) & 0xff );
- uart_delay( UART_DELAY_SPAN );
- WRITE_RTR_L1_UART_REG( path, nasid, REG_DLL, clkdiv & 0xff );
- uart_delay( UART_DELAY_SPAN );
-
- /* set operating parameters and set DLAB to 0 */
- WRITE_RTR_L1_UART_REG( path, nasid, REG_LCR, LCR_BITS8 | LCR_STOP1 );
- uart_delay( UART_DELAY_SPAN );
- WRITE_RTR_L1_UART_REG( path, nasid, REG_MCR, MCR_RTS | MCR_AFE );
- uart_delay( UART_DELAY_SPAN );
-
- /* disable interrupts */
- WRITE_RTR_L1_UART_REG( path, nasid, REG_ICR, 0x0 );
- uart_delay( UART_DELAY_SPAN );
-
- /* enable FIFO mode and reset both FIFOs */
- WRITE_RTR_L1_UART_REG( path, nasid, REG_FCR, FCR_FIFOEN );
- uart_delay( UART_DELAY_SPAN );
- WRITE_RTR_L1_UART_REG( path, nasid, REG_FCR,
- FCR_FIFOEN | FCR_RxFIFO | FCR_TxFIFO );
-
- return 0;
-}
-
-/*********************************************************************
- * locking macros
- */
-
-#define L1SC_SEND_LOCK(l,p) { if ((l)->uart == BRL1_LOCALHUB_UART) spin_lock_irqsave(&((l)->send_lock),p); }
-#define L1SC_SEND_UNLOCK(l,p) { if ((l)->uart == BRL1_LOCALHUB_UART) spin_unlock_irqrestore(&((l)->send_lock), p); }
-#define L1SC_RECV_LOCK(l,p) { if ((l)->uart == BRL1_LOCALHUB_UART) spin_lock_irqsave(&((l)->recv_lock), p); }
-#define L1SC_RECV_UNLOCK(l,p) { if ((l)->uart == BRL1_LOCALHUB_UART) spin_unlock_irqrestore(&((l)->recv_lock), p); }
-
-
-/*********************************************************************
- * subchannel manipulation
- *
- * The SUBCH_[UN]LOCK macros are used to arbitrate subchannel
- * allocation. SUBCH_DATA_[UN]LOCK control access to data structures
- * associated with particular subchannels (e.g., receive queues).
- *
- */
-#define SUBCH_LOCK(sc, p) spin_lock_irqsave( &((sc)->subch_lock), p )
-#define SUBCH_UNLOCK(sc, p) spin_unlock_irqrestore( &((sc)->subch_lock), p )
-#define SUBCH_DATA_LOCK(sbch, p) spin_lock_irqsave( &((sbch)->data_lock), p )
-#define SUBCH_DATA_UNLOCK(sbch, p) spin_unlock_irqrestore( &((sbch)->data_lock), p )
-
-
-/*
- * set a function to be called for subchannel ch in the event of
- * a transmission low-water interrupt from the uart
- */
-void
-subch_set_tx_notify( l1sc_t *sc, int ch, brl1_notif_t func )
-{
- unsigned long pl = 0;
-
- L1SC_SEND_LOCK( sc, pl );
-#if !defined(SYNC_CONSOLE_WRITE)
- if ( func && !sc->send_in_use )
- uart_enable_xmit_intr( sc );
-#endif
- sc->subch[ch].tx_notify = func;
- L1SC_SEND_UNLOCK(sc, pl );
-}
-
-/*
- * set a function to be called for subchannel ch when data is received
- */
-void
-subch_set_rx_notify( l1sc_t *sc, int ch, brl1_notif_t func )
-{
- unsigned long pl = 0;
- brl1_sch_t *subch = &(sc->subch[ch]);
-
- SUBCH_DATA_LOCK( subch, pl );
- sc->subch[ch].rx_notify = func;
- SUBCH_DATA_UNLOCK( subch, pl );
-}
-
-/*********************************************************************
- * Queue manipulation macros
- *
- *
- */
-#define NEXT(p) (((p) + 1) & (BRL1_QSIZE-1)) /* assume power of 2 */
-
-#define cq_init(q) bzero((q), sizeof (*(q)))
-#define cq_empty(q) ((q)->ipos == (q)->opos)
-#define cq_full(q) (NEXT((q)->ipos) == (q)->opos)
-#define cq_used(q) ((q)->opos <= (q)->ipos ? \
- (q)->ipos - (q)->opos : \
- BRL1_QSIZE + (q)->ipos - (q)->opos)
-#define cq_room(q) ((q)->opos <= (q)->ipos ? \
- BRL1_QSIZE - 1 + (q)->opos - (q)->ipos : \
- (q)->opos - (q)->ipos - 1)
-#define cq_add(q, c) ((q)->buf[(q)->ipos] = (u_char) (c), \
- (q)->ipos = NEXT((q)->ipos))
-#define cq_rem(q, c) ((c) = (q)->buf[(q)->opos], \
- (q)->opos = NEXT((q)->opos))
-#define cq_discard(q) ((q)->opos = NEXT((q)->opos))
-
-#define cq_tent_full(q) (NEXT((q)->tent_next) == (q)->opos)
-#define cq_tent_len(q) ((q)->ipos <= (q)->tent_next ? \
- (q)->tent_next - (q)->ipos : \
- BRL1_QSIZE + (q)->tent_next - (q)->ipos)
-#define cq_tent_add(q, c) \
- ((q)->buf[(q)->tent_next] = (u_char) (c), \
- (q)->tent_next = NEXT((q)->tent_next))
-#define cq_commit_tent(q) \
- ((q)->ipos = (q)->tent_next)
-#define cq_discard_tent(q) \
- ((q)->tent_next = (q)->ipos)
-
-
-
-
-/*********************************************************************
- * CRC-16 (for checking bedrock/L1 packets).
- *
- * These are based on RFC 1662 ("PPP in HDLC-like framing").
- */
-
-static unsigned short fcstab[256] = {
- 0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf,
- 0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7,
- 0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e,
- 0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876,
- 0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd,
- 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5,
- 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c,
- 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974,
- 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb,
- 0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3,
- 0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a,
- 0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72,
- 0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9,
- 0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1,
- 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738,
- 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70,
- 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7,
- 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff,
- 0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036,
- 0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e,
- 0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5,
- 0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd,
- 0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134,
- 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c,
- 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3,
- 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb,
- 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232,
- 0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a,
- 0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1,
- 0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9,
- 0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330,
- 0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78
-};
-
-#define INIT_CRC 0xFFFF /* initial CRC value */
-#define GOOD_CRC 0xF0B8 /* "good" final CRC value */
-
-static unsigned short crc16_calc( unsigned short crc, u_char c )
-{
- return( (crc >> 8) ^ fcstab[(crc ^ c) & 0xff] );
-}
-
-
-/***********************************************************************
- * The following functions implement the PPP-like bedrock/L1 protocol
- * layer.
- *
- */
-
-#define BRL1_FLAG_CH 0x7e
-#define BRL1_ESC_CH 0x7d
-#define BRL1_XOR_CH 0x20
-
-/* L1<->Bedrock packet types */
-#define BRL1_REQUEST 0x00
-#define BRL1_RESPONSE 0x20
-#define BRL1_EVENT 0x40
-
-#define BRL1_PKT_TYPE_MASK 0xE0
-#define BRL1_SUBCH_MASK 0x1F
-
-#define PKT_TYPE(tsb) ((tsb) & BRL1_PKT_TYPE_MASK)
-#define SUBCH(tsb) ((tsb) & BRL1_SUBCH_MASK)
-
-/* timeouts */
-#define BRL1_INIT_TIMEOUT 500000
-
-/*
- * brl1_discard_packet is a dummy "receive callback" used to get rid
- * of packets we don't want
- */
-void brl1_discard_packet( int dummy0, void *dummy1, struct pt_regs *dummy2, l1sc_t *sc, int ch )
-{
- unsigned long pl = 0;
- brl1_sch_t *subch = &sc->subch[ch];
-
- sc_cq_t *q = subch->iqp;
- SUBCH_DATA_LOCK( subch, pl );
- q->opos = q->ipos;
- atomic_set(&(subch->packet_arrived), 0);
- SUBCH_DATA_UNLOCK( subch, pl );
-}
-
-
-/*
- * brl1_send_chars sends the send buffer in the l1sc_t structure
- * out through the uart. Assumes that the caller has locked the
- * UART (or send buffer in the kernel).
- *
- * This routine doesn't block-- if you want it to, call it in
- * a loop.
- */
-static int
-brl1_send_chars( l1sc_t *sc )
-{
- /* We track the depth of the C brick's UART's
- * fifo in software, and only check if the UART is accepting
- * characters when our count indicates that the fifo should
- * be full.
- *
- * For remote (router) UARTs, we check with the UART before sending every
- * character.
- */
- if( sc->uart == BRL1_LOCALHUB_UART ) {
- if( !(sc->fifo_space) && UART_PUTC_READY( sc->nasid ) )
- sc->fifo_space = UART_FIFO_DEPTH;
-
- while( (sc->sent < sc->send_len) && (sc->fifo_space) ) {
- uart_putc( sc );
- sc->fifo_space--;
- sc->sent++;
- }
- }
- else {
-
- /* remote (router) UARTs */
-
- int result;
- int tries = 0;
-
- while( sc->sent < sc->send_len ) {
- result = sc->putc_f( sc );
- if( result >= 0 ) {
- (sc->sent)++;
- continue;
- }
- if( result == UART_TIMEOUT ) {
- tries++;
- /* send this character in TIMEOUT_RETRIES... */
- if( tries < 30 /* TIMEOUT_RETRIES */ ) {
- continue;
- }
- /* ...or else... */
- else {
- /* ...drop the packet. */
- sc->sent = sc->send_len;
- return sc->send_len;
- }
- }
- if( result < 0 ) {
- return result;
- }
- }
- }
- return sc->sent;
-}
-
-
-/* brl1_send formats up a packet and (at least begins to) send it
- * to the uart. If the send buffer is in use when this routine obtains
- * the lock, it will behave differently depending on the "wait" parameter.
- * For wait == 0 (most I/O), it will return 0 (as in "zero bytes sent"),
- * hopefully encouraging the caller to back off (unlock any high-level
- * spinlocks) and allow the buffer some time to drain. For wait==1 (high-
- * priority I/O along the lines of kernel error messages), we will flush
- * the current contents of the send buffer and beat on the uart
- * until our message has been completely transmitted.
- */
-
-static int
-brl1_send( l1sc_t *sc, char *msg, int len, u_char type_and_subch, int wait )
-{
- unsigned long pl = 0;
- int index;
- int pkt_len = 0;
- unsigned short crc = INIT_CRC;
- char *send_ptr = sc->send;
-
-
- if( sc->send_in_use && !(wait) ) {
- /* We are in the middle of sending, but can wait until done */
- return 0;
- }
- else if( sc->send_in_use ) {
- /* buffer's in use, but we're synchronous I/O, so we're going
- * to send whatever's in there right now and take the buffer
- */
- int counter = 0;
-
- if ( sc->uart == BRL1_LOCALHUB_UART )
- lock_console(sc->nasid);
- L1SC_SEND_LOCK(sc, pl);
- while( sc->sent < sc->send_len ) {
- brl1_send_chars( sc );
- if ( counter++ > 0xfffff ) {
- char *str = "Looping waiting for uart to clear (1)\n";
- early_l1_serial_out(sc->nasid, str, strlen(str), ALREADY_LOCKED);
- break;
- }
- }
- }
- else {
- if ( sc->uart == BRL1_LOCALHUB_UART )
- lock_console(sc->nasid);
- L1SC_SEND_LOCK(sc, pl);
- sc->send_in_use = 1;
- }
- *send_ptr++ = BRL1_FLAG_CH;
- *send_ptr++ = type_and_subch;
- pkt_len += 2;
- crc = crc16_calc( crc, type_and_subch );
-
- /* limit number of characters accepted to max payload size */
- if( len > (BRL1_QSIZE - 1) )
- len = (BRL1_QSIZE - 1);
-
- /* copy in the message buffer (inserting PPP
- * framing info where necessary)
- */
- for( index = 0; index < len; index++ ) {
-
- switch( *msg ) {
-
- case BRL1_FLAG_CH:
- *send_ptr++ = BRL1_ESC_CH;
- *send_ptr++ = (*msg) ^ BRL1_XOR_CH;
- pkt_len += 2;
- break;
-
- case BRL1_ESC_CH:
- *send_ptr++ = BRL1_ESC_CH;
- *send_ptr++ = (*msg) ^ BRL1_XOR_CH;
- pkt_len += 2;
- break;
-
- default:
- *send_ptr++ = *msg;
- pkt_len++;
- }
- crc = crc16_calc( crc, *msg );
- msg++;
- }
- crc ^= 0xffff;
-
- for( index = 0; index < sizeof(crc); index++ ) {
- char crc_char = (char)(crc & 0x00FF);
- if( (crc_char == BRL1_ESC_CH) || (crc_char == BRL1_FLAG_CH) ) {
- *send_ptr++ = BRL1_ESC_CH;
- pkt_len++;
- crc_char ^= BRL1_XOR_CH;
- }
- *send_ptr++ = crc_char;
- pkt_len++;
- crc >>= 8;
- }
-
- *send_ptr++ = BRL1_FLAG_CH;
- pkt_len++;
-
- sc->send_len = pkt_len;
- sc->sent = 0;
-
- {
- int counter = 0;
- do {
- brl1_send_chars( sc );
- if ( counter++ > 0xfffff ) {
- char *str = "Looping waiting for uart to clear (2)\n";
- early_l1_serial_out(sc->nasid, str, strlen(str), ALREADY_LOCKED);
- break;
- }
- } while( (sc->sent < sc->send_len) && wait );
- }
-
- if ( sc->uart == BRL1_LOCALHUB_UART )
- unlock_console(sc->nasid);
-
- if( sc->sent == sc->send_len ) {
- /* success! release the send buffer and call the callup */
-#if !defined(SYNC_CONSOLE_WRITE)
- brl1_notif_t callup;
-#endif
-
- sc->send_in_use = 0;
- /* call any upper layer that's asked for notification */
-#if defined(XX_SYNC_CONSOLE_WRITE)
- /*
- * This is probably not a good idea - since the l1_ write func can be called multiple
- * time within the callup function.
- */
- callup = subch->tx_notify;
- if( callup && (SUBCH(type_and_subch) == SC_CONS_SYSTEM) ) {
- L1_collectibles[L1C_SEND_CALLUPS]++;
- (*callup)(sc->subch[SUBCH(type_and_subch)].irq_frame.bf_irq,
- sc->subch[SUBCH(type_and_subch)].irq_frame.bf_dev_id,
- sc->subch[SUBCH(type_and_subch)].irq_frame.bf_regs, sc, SUBCH(type_and_subch));
- }
-#endif /* SYNC_CONSOLE_WRITE */
- }
-#if !defined(SYNC_CONSOLE_WRITE)
- else if ( !wait ) {
- /* enable low-water interrupts so buffer will be drained */
- uart_enable_xmit_intr(sc);
- }
-#endif
-
- L1SC_SEND_UNLOCK(sc, pl);
-
- return len;
-}
-
-/* brl1_send_cont is intended to be called as an interrupt service
- * routine. It sends until the UART won't accept any more characters,
- * or until an error is encountered (in which case we surrender the
- * send buffer and give up trying to send the packet). Once the
- * last character in the packet has been sent, this routine releases
- * the send buffer and calls any previously-registered "low-water"
- * output routines.
- */
-
-#if !defined(SYNC_CONSOLE_WRITE)
-
-int
-brl1_send_cont( l1sc_t *sc )
-{
- unsigned long pl = 0;
- int done = 0;
- brl1_notif_t callups[BRL1_NUM_SUBCHANS];
- brl1_notif_t *callup;
- brl1_sch_t *subch;
- int index;
-
- /*
- * I'm not sure how I think this is to be handled - whether the lock is held
- * over the interrupt - but it seems like it is a bad idea....
- */
-
- if ( sc->uart == BRL1_LOCALHUB_UART )
- lock_console(sc->nasid);
- L1SC_SEND_LOCK(sc, pl);
- brl1_send_chars( sc );
- done = (sc->sent == sc->send_len);
- if( done ) {
- sc->send_in_use = 0;
-#if !defined(SYNC_CONSOLE_WRITE)
- uart_disable_xmit_intr(sc);
-#endif
- }
- if ( sc->uart == BRL1_LOCALHUB_UART )
- unlock_console(sc->nasid);
- /* Release the lock */
- L1SC_SEND_UNLOCK(sc, pl);
-
- return 0;
-}
-#endif /* SYNC_CONSOLE_WRITE */
-
-/* internal function -- used by brl1_receive to read a character
- * from the uart and check whether errors occurred in the process.
- */
-static int
-read_uart( l1sc_t *sc, int *c, int *result )
-{
- *c = sc->getc_f( sc );
-
- /* no character is available */
- if( *c == UART_NO_CHAR ) {
- *result = BRL1_NO_MESSAGE;
- return 0;
- }
-
- /* some error in UART */
- if( *c < 0 ) {
- *result = BRL1_LINK;
- return 0;
- }
-
- /* everything's fine */
- *result = BRL1_VALID;
- return 1;
-}
-
-
-/*
- * brl1_receive
- *
- * This function reads a Bedrock-L1 protocol packet into the l1sc_t
- * response buffer.
- *
- * The operation of this function can be expressed as a finite state
- * machine:
- *
-
-START STATE INPUT TRANSITION
-==========================================================
-BRL1_IDLE (reset or error) flag BRL1_FLAG
- other BRL1_IDLE@
-
-BRL1_FLAG (saw a flag (0x7e)) flag BRL1_FLAG
- escape BRL1_IDLE@
- header byte BRL1_HDR
- other BRL1_IDLE@
-
-BRL1_HDR (saw a type/subch byte)(see below) BRL1_BODY
- BRL1_HDR
-
-BRL1_BODY (reading packet body) flag BRL1_FLAG
- escape BRL1_ESC
- other BRL1_BODY
-
-BRL1_ESC (saw an escape (0x7d)) flag BRL1_FLAG@
- escape BRL1_IDLE@
- other BRL1_BODY
-==========================================================
-
-"@" denotes an error transition.
-
- * The BRL1_HDR state is a transient state which doesn't read input,
- * but just provides a way in to code which decides to whom an
- * incoming packet should be directed.
- *
- * brl1_receive can be used to poll for input from the L1, or as
- * an interrupt service routine. It reads as much data as is
- * ready from the junk bus UART and places into the appropriate
- * input queues according to subchannel. The header byte is
- * stripped from console-type data, but is retained for message-
- * type data (L1 responses). A length byte will also be
- * prepended to message-type packets.
- *
- * This routine is non-blocking; if the caller needs to block
- * for input, it must call brl1_receive in a loop.
- *
- * brl1_receive returns when there is no more input, the queue
- * for the current incoming message is full, or there is an
- * error (parity error, bad header, bad CRC, etc.).
- */
-
-#define STATE_SET(l,s) ((l)->brl1_state = (s))
-#define STATE_GET(l) ((l)->brl1_state)
-
-#define LAST_HDR_SET(l,h) ((l)->brl1_last_hdr = (h))
-#define LAST_HDR_GET(l) ((l)->brl1_last_hdr)
-
-#define VALID_HDR(c) \
- ( SUBCH((c)) <= SC_CONS_SYSTEM \
- ? PKT_TYPE((c)) == BRL1_REQUEST \
- : ( PKT_TYPE((c)) == BRL1_RESPONSE || \
- PKT_TYPE((c)) == BRL1_EVENT ) )
-
-#define IS_TTY_PKT(l) ( SUBCH(LAST_HDR_GET(l)) <= SC_CONS_SYSTEM ? 1 : 0 )
-
-
-int
-brl1_receive( l1sc_t *sc, int mode )
-{
- int result; /* value to be returned by brl1_receive */
- int c; /* most-recently-read character */
- int done; /* set done to break out of recv loop */
- unsigned long pl = 0, cpl = 0;
- sc_cq_t *q; /* pointer to queue we're working with */
-
- result = BRL1_NO_MESSAGE;
-
- L1SC_RECV_LOCK(sc, cpl);
-
- done = 0;
- while( !done )
- {
- switch( STATE_GET(sc) )
- {
-
- case BRL1_IDLE:
- /* Initial or error state. Waiting for a flag character
- * to resynchronize with the L1.
- */
-
- if( !read_uart( sc, &c, &result ) ) {
-
- /* error reading uart */
- done = 1;
- continue;
- }
-
- if( c == BRL1_FLAG_CH ) {
- /* saw a flag character */
- STATE_SET( sc, BRL1_FLAG );
- continue;
- }
- break;
-
- case BRL1_FLAG:
- /* One or more flag characters have been read; look for
- * the beginning of a packet (header byte).
- */
-
- if( !read_uart( sc, &c, &result ) ) {
-
- /* error reading uart */
- if( c != UART_NO_CHAR )
- STATE_SET( sc, BRL1_IDLE );
-
- done = 1;
- continue;
- }
-
- if( c == BRL1_FLAG_CH ) {
- /* multiple flags are OK */
- continue;
- }
-
- if( !VALID_HDR( c ) ) {
- /* if c isn't a flag it should have been
- * a valid header, so we have an error
- */
- result = BRL1_PROTOCOL;
- STATE_SET( sc, BRL1_IDLE );
- done = 1;
- continue;
- }
-
- /* we have a valid header byte */
- LAST_HDR_SET( sc, c );
- STATE_SET( sc, BRL1_HDR );
-
- break;
-
- case BRL1_HDR:
- /* A header byte has been read. Do some bookkeeping. */
- q = sc->subch[ SUBCH( LAST_HDR_GET(sc) ) ].iqp;
- ASSERT(q);
-
- if( !IS_TTY_PKT(sc) ) {
- /* if this is an event or command response rather
- * than console I/O, we need to reserve a couple
- * of extra spaces in the queue for the header
- * byte and a length byte; if we can't, stay in
- * the BRL1_HDR state.
- */
- if( cq_room( q ) < 2 ) {
- result = BRL1_FULL_Q;
- done = 1;
- continue;
- }
- cq_tent_add( q, 0 ); /* reserve length byte */
- cq_tent_add( q, LAST_HDR_GET( sc ) ); /* record header byte */
- }
- STATE_SET( sc, BRL1_BODY );
-
- break;
-
- case BRL1_BODY:
- /* A header byte has been read. We are now attempting
- * to receive the packet body.
- */
-
- q = sc->subch[ SUBCH( LAST_HDR_GET(sc) ) ].iqp;
- ASSERT(q);
-
- /* if the queue we want to write into is full, don't read from
- * the uart (this provides backpressure to the L1 side)
- */
- if( cq_tent_full( q ) ) {
- result = BRL1_FULL_Q;
- done = 1;
- continue;
- }
-
- if( !read_uart( sc, &c, &result ) ) {
-
- /* error reading uart */
- if( c != UART_NO_CHAR )
- STATE_SET( sc, BRL1_IDLE );
- done = 1;
- continue;
- }
-
- if( c == BRL1_ESC_CH ) {
- /* prepare to unescape the next character */
- STATE_SET( sc, BRL1_ESC );
- continue;
- }
-
- if( c == BRL1_FLAG_CH ) {
- /* flag signifies the end of a packet */
-
- unsigned short crc; /* holds the crc as we calculate it */
- int i; /* index variable */
- brl1_sch_t *subch; /* subchannel for received packet */
- brl1_notif_t callup; /* "data ready" callup */
-
- /* whatever else may happen, we've seen a flag and we're
- * starting a new packet
- */
- STATE_SET( sc, BRL1_FLAG );
-
- /* if the packet body has less than 2 characters,
- * it can't be a well-formed packet. Discard it.
- */
- if( cq_tent_len( q ) < /* 2 + possible length byte */
- (2 + (IS_TTY_PKT(sc) ? 0 : 1)) )
- {
- result = BRL1_PROTOCOL;
- cq_discard_tent( q );
- STATE_SET( sc, BRL1_FLAG );
- done = 1;
- continue;
- }
-
- /* check CRC */
-
- /* accumulate CRC, starting with the header byte and
- * ending with the transmitted CRC. This should
- * result in a known good value.
- */
- crc = crc16_calc( INIT_CRC, LAST_HDR_GET(sc) );
- for( i = (q->ipos + (IS_TTY_PKT(sc) ? 0 : 2)) % BRL1_QSIZE;
- i != q->tent_next;
- i = (i + 1) % BRL1_QSIZE )
- {
- crc = crc16_calc( crc, q->buf[i] );
- }
-
- /* verify the caclulated crc against the "good" crc value;
- * if we fail, discard the bad packet and return an error.
- */
- if( crc != (unsigned short)GOOD_CRC ) {
- result = BRL1_CRC;
- cq_discard_tent( q );
- STATE_SET( sc, BRL1_FLAG );
- done = 1;
- continue;
- }
-
- /* so the crc check was ok. Now we discard the CRC
- * from the end of the received bytes.
- */
- q->tent_next += (BRL1_QSIZE - 2);
- q->tent_next %= BRL1_QSIZE;
-
- /* get the subchannel and lock it */
- subch = &(sc->subch[SUBCH( LAST_HDR_GET(sc) )]);
- SUBCH_DATA_LOCK( subch, pl );
-
- /* if this isn't a console packet, we need to record
- * a length byte
- */
- if( !IS_TTY_PKT(sc) ) {
- q->buf[q->ipos] = cq_tent_len( q ) - 1;
- }
-
- /* record packet for posterity */
- cq_commit_tent( q );
- result = BRL1_VALID;
-
- /* notify subchannel owner that there's something
- * on the queue for them
- */
- atomic_inc(&(subch->packet_arrived));
- callup = subch->rx_notify;
- SUBCH_DATA_UNLOCK( subch, pl );
-
- if( callup && (mode == SERIAL_INTERRUPT_MODE) ) {
- L1SC_RECV_UNLOCK( sc, cpl );
- L1_collectibles[L1C_RECEIVE_CALLUPS]++;
- (*callup)( sc->subch[SUBCH(LAST_HDR_GET(sc))].irq_frame.bf_irq,
- sc->subch[SUBCH(LAST_HDR_GET(sc))].irq_frame.bf_dev_id,
- sc->subch[SUBCH(LAST_HDR_GET(sc))].irq_frame.bf_regs,
- sc, SUBCH(LAST_HDR_GET(sc)) );
- L1SC_RECV_LOCK( sc, cpl );
- }
- continue; /* go back for more! */
- }
-
- /* none of the special cases applied; we've got a normal
- * body character
- */
- cq_tent_add( q, c );
-
- break;
-
- case BRL1_ESC:
- /* saw an escape character. The next character will need
- * to be unescaped.
- */
-
- q = sc->subch[ SUBCH( LAST_HDR_GET(sc) ) ].iqp;
- ASSERT(q);
-
- /* if the queue we want to write into is full, don't read from
- * the uart (this provides backpressure to the L1 side)
- */
- if( cq_tent_full( q ) ) {
- result = BRL1_FULL_Q;
- done = 1;
- continue;
- }
-
- if( !read_uart( sc, &c, &result ) ) {
-
- /* error reading uart */
- if( c != UART_NO_CHAR ) {
- cq_discard_tent( q );
- STATE_SET( sc, BRL1_IDLE );
- }
- done = 1;
- continue;
- }
-
- if( c == BRL1_FLAG_CH ) {
- /* flag after escape is an error */
- STATE_SET( sc, BRL1_FLAG );
- cq_discard_tent( q );
- result = BRL1_PROTOCOL;
- done = 1;
- continue;
- }
-
- if( c == BRL1_ESC_CH ) {
- /* two consecutive escapes is an error */
- STATE_SET( sc, BRL1_IDLE );
- cq_discard_tent( q );
- result = BRL1_PROTOCOL;
- done = 1;
- continue;
- }
-
- /* otherwise, we've got a character that needs
- * to be unescaped
- */
- cq_tent_add( q, (c ^ BRL1_XOR_CH) );
- STATE_SET( sc, BRL1_BODY );
-
- break;
-
- } /* end of switch( STATE_GET(sc) ) */
- } /* end of while(!done) */
-
- L1SC_RECV_UNLOCK( sc, cpl );
-
- return result;
-}
-
-
-/* brl1_init initializes the Bedrock/L1 protocol layer. This includes
- * zeroing out the send and receive state information.
- */
-
-void
-brl1_init( l1sc_t *sc, nasid_t nasid, net_vec_t uart )
-{
- int i;
- brl1_sch_t *subch;
-
- bzero( sc, sizeof( *sc ) );
- sc->nasid = nasid;
- sc->uart = uart;
- sc->getc_f = (uart == BRL1_LOCALHUB_UART ? uart_getc : rtr_uart_getc);
- sc->putc_f = (uart == BRL1_LOCALHUB_UART ? uart_putc : rtr_uart_putc);
- sc->sol = 1;
- subch = sc->subch;
-
- /* initialize L1 subchannels
- */
-
- /* assign processor TTY channels */
- for( i = 0; i < CPUS_PER_NODE; i++, subch++ ) {
- subch->use = BRL1_SUBCH_RSVD;
- subch->packet_arrived = ATOMIC_INIT(0);
- spin_lock_init( &(subch->data_lock) );
- sv_init( &(subch->arrive_sv), &(subch->data_lock), SV_MON_SPIN | SV_ORDER_FIFO /* | SV_INTS */ );
- subch->tx_notify = NULL;
- /* (for now, drop elscuart packets in the kernel) */
- subch->rx_notify = brl1_discard_packet;
- subch->iqp = &sc->garbage_q;
- }
-
- /* assign system TTY channel (first free subchannel after each
- * processor's individual TTY channel has been assigned)
- */
- subch->use = BRL1_SUBCH_RSVD;
- subch->packet_arrived = ATOMIC_INIT(0);
- spin_lock_init( &(subch->data_lock) );
- sv_init( &(subch->arrive_sv), &subch->data_lock, SV_MON_SPIN | SV_ORDER_FIFO /* | SV_INTS */ );
- subch->tx_notify = NULL;
- if( sc->uart == BRL1_LOCALHUB_UART ) {
- subch->iqp = snia_kmem_zalloc_node( sizeof(sc_cq_t), KM_NOSLEEP, NASID_TO_COMPACT_NODEID(nasid) );
- ASSERT( subch->iqp );
- cq_init( subch->iqp );
- subch->rx_notify = NULL;
- }
- else {
- /* we shouldn't be getting console input from remote UARTs */
- subch->iqp = &sc->garbage_q;
- subch->rx_notify = brl1_discard_packet;
- }
- subch++; i++;
-
- /* "reserved" subchannels (0x05-0x0F); for now, throw away
- * incoming packets
- */
- for( ; i < 0x10; i++, subch++ ) {
- subch->use = BRL1_SUBCH_FREE;
- subch->packet_arrived = ATOMIC_INIT(0);
- subch->tx_notify = NULL;
- subch->rx_notify = brl1_discard_packet;
- subch->iqp = &sc->garbage_q;
- }
-
- /* remaining subchannels are free */
- for( ; i < BRL1_NUM_SUBCHANS; i++, subch++ ) {
- subch->use = BRL1_SUBCH_FREE;
- subch->packet_arrived = ATOMIC_INIT(0);
- subch->tx_notify = NULL;
- subch->rx_notify = brl1_discard_packet;
- subch->iqp = &sc->garbage_q;
- }
-
- /* initialize synchronization structures
- */
- spin_lock_init( &(sc->subch_lock) );
- spin_lock_init( &(sc->send_lock) );
- spin_lock_init( &(sc->recv_lock) );
-
- if( sc->uart == BRL1_LOCALHUB_UART ) {
- uart_init( sc, UART_BAUD_RATE );
- }
- else {
- rtr_uart_init( sc, UART_BAUD_RATE );
- }
-
- /* Set up remaining fields using L1 command functions-- elsc_module_get
- * to read the module id, elsc_debug_get to see whether or not we're
- * in verbose mode.
- */
- {
- extern int elsc_module_get(l1sc_t *);
-
- sc->modid = elsc_module_get( sc );
- sc->modid = (sc->modid < 0 ? INVALID_MODULE : sc->modid);
- sc->verbose = 1;
- }
-}
-
-/*********************************************************************
- * These are interrupt-related functions used in the kernel to service
- * the L1.
- */
-
-/*
- * brl1_intrd is the function which is called on a console interrupt.
- */
-
-#if defined(CONFIG_IA64_SGI_SN1)
-
-static void
-brl1_intrd(int irq, void *dev_id, struct pt_regs *stuff)
-{
- u_char isr_reg;
- l1sc_t *sc = get_elsc();
- int ret;
-
- L1_collectibles[L1C_INTERRUPTS]++;
- isr_reg = READ_L1_UART_REG(sc->nasid, REG_ISR);
-
- /* Save for callup args in console */
- sc->subch[SC_CONS_SYSTEM].irq_frame.bf_irq = irq;
- sc->subch[SC_CONS_SYSTEM].irq_frame.bf_dev_id = dev_id;
- sc->subch[SC_CONS_SYSTEM].irq_frame.bf_regs = stuff;
-
-#if defined(SYNC_CONSOLE_WRITE)
- while( isr_reg & ISR_RxRDY )
-#else
- while( isr_reg & (ISR_RxRDY | ISR_TxRDY) )
-#endif
- {
- if( isr_reg & ISR_RxRDY ) {
- L1_collectibles[L1C_OUR_R_INTERRUPTS]++;
- ret = brl1_receive(sc, SERIAL_INTERRUPT_MODE);
- if ( (ret != BRL1_VALID) && (ret != BRL1_NO_MESSAGE) && (ret != BRL1_PROTOCOL) && (ret != BRL1_CRC) )
- L1_collectibles[L1C_REC_STALLS] = ret;
- }
-#if !defined(SYNC_CONSOLE_WRITE)
- if( (isr_reg & ISR_TxRDY) || (sc->send_in_use && UART_PUTC_READY(sc->nasid)) ) {
- L1_collectibles[L1C_OUR_X_INTERRUPTS]++;
- brl1_send_cont(sc);
- }
-#endif /* SYNC_CONSOLE_WRITE */
- isr_reg = READ_L1_UART_REG(sc->nasid, REG_ISR);
- }
-}
-#endif /* CONFIG_IA64_SGI_SN1 */
-
-
-/*
- * Install a callback function for the system console subchannel
- * to allow an upper layer to be notified when the send buffer
- * has been emptied.
- */
-static inline void
-l1_tx_notif( brl1_notif_t func )
-{
- subch_set_tx_notify( &NODEPDA(NASID_TO_COMPACT_NODEID(get_master_nasid()))->module->elsc,
- SC_CONS_SYSTEM, func );
-}
-
-
-/*
- * Install a callback function for the system console subchannel
- * to allow an upper layer to be notified when a packet has been
- * received.
- */
-static inline void
-l1_rx_notif( brl1_notif_t func )
-{
- subch_set_rx_notify( &NODEPDA(NASID_TO_COMPACT_NODEID(get_master_nasid()))->module->elsc,
- SC_CONS_SYSTEM, func );
-}
-
-
-/* brl1_intr is called directly from the uart interrupt; after it runs, the
- * interrupt "daemon" xthread is signalled to continue.
- */
-void
-brl1_intr( void )
-{
-}
-
-#define BRL1_INTERRUPT_LEVEL 65 /* linux request_irq() value */
-
-/* Return the current interrupt level */
-
-//#define CONSOLE_POLLING_ALSO
-
-int
-l1_get_intr_value( void )
-{
-#ifdef CONSOLE_POLLING_ALSO
- return(0);
-#else
- return(BRL1_INTERRUPT_LEVEL);
-#endif
-}
-
-/* Disconnect the callup functions - throw away interrupts */
-
-void
-l1_unconnect_intr(void)
-{
- /* UnRegister the upper-level callup functions */
- l1_rx_notif((brl1_notif_t)NULL);
- l1_tx_notif((brl1_notif_t)NULL);
- /* We do NOT unregister the interrupts */
-}
-
-/* Set up uart interrupt handling for this node's uart */
-
-void
-l1_connect_intr(void *rx_notify, void *tx_notify)
-{
- l1sc_t *sc;
- nasid_t nasid;
-#if defined(CONFIG_IA64_SGI_SN1)
- int tmp;
-#endif
- nodepda_t *console_nodepda;
- int intr_connect_level(cpuid_t, int, ilvl_t, intr_func_t);
-
- if ( L1_interrupts_connected ) {
- /* Interrupts are connected, so just register the callups */
- l1_rx_notif((brl1_notif_t)rx_notify);
- l1_tx_notif((brl1_notif_t)tx_notify);
-
- L1_collectibles[L1C_CONNECT_CALLS]++;
- return;
- }
- else
- L1_interrupts_connected = 1;
-
- nasid = get_master_nasid();
- console_nodepda = NODEPDA(NASID_TO_COMPACT_NODEID(nasid));
- sc = &console_nodepda->module->elsc;
- sc->intr_cpu = console_nodepda->node_first_cpu;
-
-#if defined(CONFIG_IA64_SGI_SN1)
- if ( intr_connect_level(sc->intr_cpu, UART_INTR, INTPEND0_MAXMASK, (intr_func_t)brl1_intr) ) {
- L1_interrupts_connected = 0; /* FAILS !! */
- }
- else {
- void synergy_intr_connect(int, int);
-
- synergy_intr_connect(UART_INTR, sc->intr_cpu);
- L1_collectibles[L1C_R_IRQ]++;
- tmp = request_irq(BRL1_INTERRUPT_LEVEL, brl1_intrd, SA_INTERRUPT | SA_SHIRQ, "l1_protocol_driver", (void *)sc);
- L1_collectibles[L1C_R_IRQ_RET] = (uint64_t)tmp;
- if ( tmp ) {
- L1_interrupts_connected = 0; /* FAILS !! */
- }
- else {
- /* Register the upper-level callup functions */
- l1_rx_notif((brl1_notif_t)rx_notify);
- l1_tx_notif((brl1_notif_t)tx_notify);
-
- /* Set the uarts the way we like it */
- uart_enable_recv_intr( sc );
- uart_disable_xmit_intr( sc );
- }
- }
-#endif /* CONFIG_IA64_SGI_SN1 */
-}
-
-
-/* Set the line speed */
-
-void
-l1_set_baud(int baud)
-{
-#if 0
- nasid_t nasid;
- static void uart_init(l1sc_t *, int);
-#endif
-
- L1_collectibles[L1C_SET_BAUD]++;
-
-#if 0
- if ( L1_cons_is_inited ) {
- nasid = get_master_nasid();
- if ( NODEPDA(NASID_TO_COMPACT_NODEID(nasid))->module != (module_t *)0 )
- uart_init(&NODEPDA(NASID_TO_COMPACT_NODEID(nasid))->module->elsc, baud);
- }
-#endif
- return;
-}
-
-
-/* These are functions to use from serial_in/out when in protocol
- * mode to send and receive uart control regs. These are external
- * interfaces into the protocol driver.
- */
-
-void
-l1_control_out(int offset, int value)
-{
- nasid_t nasid = get_master_nasid();
- WRITE_L1_UART_REG(nasid, offset, value);
-}
-
-/* Console input exported interface. Return a register value. */
-
-int
-l1_control_in_polled(int offset)
-{
- static int l1_control_in_local(int, int);
-
- return(l1_control_in_local(offset, SERIAL_POLLED_MODE));
-}
-
-int
-l1_control_in(int offset)
-{
- static int l1_control_in_local(int, int);
-
- return(l1_control_in_local(offset, SERIAL_INTERRUPT_MODE));
-}
-
-static int
-l1_control_in_local(int offset, int mode)
-{
- nasid_t nasid;
- int ret, input;
- static int l1_poll(l1sc_t *, int);
-
- nasid = get_master_nasid();
- ret = READ_L1_UART_REG(nasid, offset);
-
- if ( offset == REG_LSR ) {
- ret |= (LSR_XHRE | LSR_XSRE); /* can send anytime */
- if ( L1_cons_is_inited ) {
- if ( NODEPDA(NASID_TO_COMPACT_NODEID(nasid))->module != (module_t *)0 ) {
- input = l1_poll(&NODEPDA(NASID_TO_COMPACT_NODEID(nasid))->module->elsc, mode);
- if ( input ) {
- ret |= LSR_RCA;
- }
- }
- }
- }
- return(ret);
-}
-
-/*
- * Console input exported interface. Return a character (if one is available)
- */
-
-int
-l1_serial_in_polled(void)
-{
- static int l1_serial_in_local(int mode);
-
- return(l1_serial_in_local(SERIAL_POLLED_MODE));
-}
-
-int
-l1_serial_in(void)
-{
- static int l1_serial_in_local(int mode);
-
- return(l1_serial_in_local(SERIAL_INTERRUPT_MODE));
-}
-
-static int
-l1_serial_in_local(int mode)
-{
- nasid_t nasid;
- l1sc_t *sc;
- int value;
- static int l1_getc( l1sc_t *, int );
- static inline l1sc_t *early_sc_init(nasid_t);
-
- nasid = get_master_nasid();
- sc = early_sc_init(nasid);
- if ( L1_cons_is_inited ) {
- if ( NODEPDA(NASID_TO_COMPACT_NODEID(nasid))->module != (module_t *)0 ) {
- sc = &NODEPDA(NASID_TO_COMPACT_NODEID(nasid))->module->elsc;
- }
- }
- value = l1_getc(sc, mode);
- return(value);
-}
-
-/* Console output exported interface. Write message to the console. */
-
-int
-l1_serial_out( char *str, int len )
-{
- nasid_t nasid = get_master_nasid();
- int l1_write(l1sc_t *, char *, int, int);
-
- if ( L1_cons_is_inited ) {
- if ( NODEPDA(NASID_TO_COMPACT_NODEID(nasid))->module != (module_t *)0 )
- return(l1_write(&NODEPDA(NASID_TO_COMPACT_NODEID(nasid))->module->elsc, str, len,
-#if defined(SYNC_CONSOLE_WRITE)
- 1
-#else
- !L1_interrupts_connected
-#endif
- ));
- }
- return(early_l1_serial_out(nasid, str, len, NOT_LOCKED));
-}
-
-
-/*
- * These are the 'early' functions - when we need to do things before we have
- * all the structs setup.
- */
-
-static l1sc_t Early_console; /* fake l1sc_t */
-static int Early_console_inited = 0;
-
-static void
-early_brl1_init( l1sc_t *sc, nasid_t nasid, net_vec_t uart )
-{
- int i;
- brl1_sch_t *subch;
-
- bzero( sc, sizeof( *sc ) );
- sc->nasid = nasid;
- sc->uart = uart;
- sc->getc_f = (uart == BRL1_LOCALHUB_UART ? uart_getc : rtr_uart_getc);
- sc->putc_f = (uart == BRL1_LOCALHUB_UART ? uart_putc : rtr_uart_putc);
- sc->sol = 1;
- subch = sc->subch;
-
- /* initialize L1 subchannels
- */
-
- /* assign processor TTY channels */
- for( i = 0; i < CPUS_PER_NODE; i++, subch++ ) {
- subch->use = BRL1_SUBCH_RSVD;
- subch->packet_arrived = ATOMIC_INIT(0);
- subch->tx_notify = NULL;
- subch->rx_notify = NULL;
- subch->iqp = &sc->garbage_q;
- }
-
- /* assign system TTY channel (first free subchannel after each
- * processor's individual TTY channel has been assigned)
- */
- subch->use = BRL1_SUBCH_RSVD;
- subch->packet_arrived = ATOMIC_INIT(0);
- subch->tx_notify = NULL;
- subch->rx_notify = NULL;
- if( sc->uart == BRL1_LOCALHUB_UART ) {
- static sc_cq_t x_iqp;
-
- subch->iqp = &x_iqp;
- ASSERT( subch->iqp );
- cq_init( subch->iqp );
- }
- else {
- /* we shouldn't be getting console input from remote UARTs */
- subch->iqp = &sc->garbage_q;
- }
- subch++; i++;
-
- /* "reserved" subchannels (0x05-0x0F); for now, throw away
- * incoming packets
- */
- for( ; i < 0x10; i++, subch++ ) {
- subch->use = BRL1_SUBCH_FREE;
- subch->packet_arrived = ATOMIC_INIT(0);
- subch->tx_notify = NULL;
- subch->rx_notify = NULL;
- subch->iqp = &sc->garbage_q;
- }
-
- /* remaining subchannels are free */
- for( ; i < BRL1_NUM_SUBCHANS; i++, subch++ ) {
- subch->use = BRL1_SUBCH_FREE;
- subch->packet_arrived = ATOMIC_INIT(0);
- subch->tx_notify = NULL;
- subch->rx_notify = NULL;
- subch->iqp = &sc->garbage_q;
- }
-}
-
-static inline l1sc_t *
-early_sc_init(nasid_t nasid)
-{
- /* This is for early I/O */
- if ( Early_console_inited == 0 ) {
- early_brl1_init(&Early_console, nasid, BRL1_LOCALHUB_UART);
- Early_console_inited = 1;
- }
- return(&Early_console);
-}
-
-#define PUTCHAR(ch) \
- { \
- while( (!(READ_L1_UART_REG( nasid, REG_LSR ) & LSR_XHRE)) || \
- (!(READ_L1_UART_REG( nasid, REG_MSR ) & MSR_CTS)) ); \
- WRITE_L1_UART_REG( nasid, REG_DAT, (ch) ); \
- }
-
-static int
-early_l1_serial_out( nasid_t nasid, char *str, int len, int lock_state )
-{
- int ret, sent = 0;
- char *msg = str;
- static int early_l1_send( nasid_t nasid, char *str, int len, int lock_state );
-
- while ( sent < len ) {
- ret = early_l1_send(nasid, msg, len - sent, lock_state);
- sent += ret;
- msg += ret;
- }
- return(len);
-}
-
-static inline int
-early_l1_send( nasid_t nasid, char *str, int len, int lock_state )
-{
- int sent;
- char crc_char;
- unsigned short crc = INIT_CRC;
-
- if( len > (BRL1_QSIZE - 1) )
- len = (BRL1_QSIZE - 1);
-
- sent = len;
- if ( lock_state == NOT_LOCKED )
- lock_console(nasid);
-
- PUTCHAR( BRL1_FLAG_CH );
- PUTCHAR( BRL1_EVENT | SC_CONS_SYSTEM );
- crc = crc16_calc( crc, (BRL1_EVENT | SC_CONS_SYSTEM) );
-
- while( len ) {
-
- if( (*str == BRL1_FLAG_CH) || (*str == BRL1_ESC_CH) ) {
- PUTCHAR( BRL1_ESC_CH );
- PUTCHAR( (*str) ^ BRL1_XOR_CH );
- }
- else {
- PUTCHAR( *str );
- }
-
- crc = crc16_calc( crc, *str );
-
- str++; len--;
- }
-
- crc ^= 0xffff;
- crc_char = crc & 0xff;
- if( (crc_char == BRL1_ESC_CH) || (crc_char == BRL1_FLAG_CH) ) {
- crc_char ^= BRL1_XOR_CH;
- PUTCHAR( BRL1_ESC_CH );
- }
- PUTCHAR( crc_char );
- crc_char = (crc >> 8) & 0xff;
- if( (crc_char == BRL1_ESC_CH) || (crc_char == BRL1_FLAG_CH) ) {
- crc_char ^= BRL1_XOR_CH;
- PUTCHAR( BRL1_ESC_CH );
- }
- PUTCHAR( crc_char );
- PUTCHAR( BRL1_FLAG_CH );
-
- if ( lock_state == NOT_LOCKED )
- unlock_console(nasid);
- return sent;
-}
-
-
-/*********************************************************************
- * l1_cons functions
- *
- * These allow the L1 to act as the system console. They're intended
- * to abstract away most of the br/l1 internal details from the
- * _L1_cons_* functions (in the prom-- see "l1_console.c") and
- * l1_* functions (in the kernel-- see "sio_l1.c") that they support.
- *
- */
-
-static int
-l1_poll( l1sc_t *sc, int mode )
-{
- int ret;
-
- /* in case this gets called before the l1sc_t structure for the module_t
- * struct for this node is initialized (i.e., if we're called with a
- * zero l1sc_t pointer)...
- */
-
-
- if( !sc ) {
- return 0;
- }
-
- if( atomic_read(&sc->subch[SC_CONS_SYSTEM].packet_arrived) ) {
- return 1;
- }
-
- ret = brl1_receive( sc, mode );
- if ( (ret != BRL1_VALID) && (ret != BRL1_NO_MESSAGE) && (ret != BRL1_PROTOCOL) && (ret != BRL1_CRC) )
- L1_collectibles[L1C_REC_STALLS] = ret;
-
- if( atomic_read(&sc->subch[SC_CONS_SYSTEM].packet_arrived) ) {
- return 1;
- }
- return 0;
-}
-
-
-/* pull a character off of the system console queue (if one is available)
- */
-static int
-l1_getc( l1sc_t *sc, int mode )
-{
- unsigned long pl = 0;
- int c;
-
- brl1_sch_t *subch = &(sc->subch[SC_CONS_SYSTEM]);
- sc_cq_t *q = subch->iqp;
-
- if( !l1_poll( sc, mode ) ) {
- return 0;
- }
-
- SUBCH_DATA_LOCK( subch, pl );
- if( cq_empty( q ) ) {
- atomic_set(&subch->packet_arrived, 0);
- SUBCH_DATA_UNLOCK( subch, pl );
- return 0;
- }
- cq_rem( q, c );
- if( cq_empty( q ) )
- atomic_set(&subch->packet_arrived, 0);
- SUBCH_DATA_UNLOCK( subch, pl );
-
- return c;
-}
-
-/*
- * Write a message to the L1 on the system console subchannel.
- *
- * Danger: don't use a non-zero value for the wait parameter unless you're
- * someone important (like a kernel error message).
- */
-
-int
-l1_write( l1sc_t *sc, char *msg, int len, int wait )
-{
- int sent = 0, ret = 0;
-
- if ( wait ) {
- while ( sent < len ) {
- ret = brl1_send( sc, msg, len - sent, (SC_CONS_SYSTEM | BRL1_EVENT), wait );
- sent += ret;
- msg += ret;
- }
- ret = len;
- }
- else {
- ret = brl1_send( sc, msg, len, (SC_CONS_SYSTEM | BRL1_EVENT), wait );
- }
- return(ret);
-}
-
-/* initialize the system console subchannel
- */
-void
-l1_init(void)
-{
- /* All we do now is remember that we have been called */
- L1_cons_is_inited = 1;
-}
-
-
-/*********************************************************************
- * The following functions and definitions implement the "message"-
- * style interface to the L1 system controller.
- *
- * Note that throughout this file, "sc" generally stands for "system
- * controller", while "subchannels" tend to be represented by
- * variables with names like subch or ch.
- *
- */
-
-#ifdef L1_DEBUG
-#define L1_DBG_PRF(x) printf x
-#else
-#define L1_DBG_PRF(x)
-#endif
-
-/*
- * sc_data_ready is called to signal threads that are blocked on l1 input.
- */
-void
-sc_data_ready( int dummy0, void *dummy1, struct pt_regs *dummy2, l1sc_t *sc, int ch )
-{
- unsigned long pl = 0;
-
- brl1_sch_t *subch = &(sc->subch[ch]);
- SUBCH_DATA_LOCK( subch, pl );
- sv_signal( &(subch->arrive_sv) );
- SUBCH_DATA_UNLOCK( subch, pl );
-}
-
-/* sc_open reserves a subchannel to send a request to the L1 (the
- * L1's response will arrive on the same channel). The number
- * returned by sc_open is the system controller subchannel
- * acquired.
- */
-int
-sc_open( l1sc_t *sc, uint target )
-{
- /* The kernel version implements a locking scheme to arbitrate
- * subchannel assignment.
- */
- int ch;
- unsigned long pl = 0;
- brl1_sch_t *subch;
-
- SUBCH_LOCK( sc, pl );
-
- /* Look for a free subchannel. Subchannels 0-15 are reserved
- * for other purposes.
- */
- for( subch = &(sc->subch[BRL1_CMD_SUBCH]), ch = BRL1_CMD_SUBCH;
- ch < BRL1_NUM_SUBCHANS; subch++, ch++ ) {
- if( subch->use == BRL1_SUBCH_FREE )
- break;
- }
-
- if( ch == BRL1_NUM_SUBCHANS ) {
- /* there were no subchannels available! */
- SUBCH_UNLOCK( sc, pl );
- return SC_NSUBCH;
- }
-
- subch->use = BRL1_SUBCH_RSVD;
- SUBCH_UNLOCK( sc, pl );
-
- atomic_set(&subch->packet_arrived, 0);
- subch->target = target;
- spin_lock_init( &(subch->data_lock) );
- sv_init( &(subch->arrive_sv), &(subch->data_lock), SV_MON_SPIN | SV_ORDER_FIFO /* | SV_INTS */);
- subch->tx_notify = NULL;
- subch->rx_notify = sc_data_ready;
- subch->iqp = snia_kmem_zalloc_node( sizeof(sc_cq_t), KM_NOSLEEP,
- NASID_TO_COMPACT_NODEID(sc->nasid) );
- ASSERT( subch->iqp );
- cq_init( subch->iqp );
-
- return ch;
-}
-
-
-/* sc_close frees a Bedrock<->L1 subchannel.
- */
-int
-sc_close( l1sc_t *sc, int ch )
-{
- unsigned long pl = 0;
- brl1_sch_t *subch;
-
- SUBCH_LOCK( sc, pl );
- subch = &(sc->subch[ch]);
- if( subch->use != BRL1_SUBCH_RSVD ) {
- /* we're trying to close a subchannel that's not open */
- SUBCH_UNLOCK( sc, pl );
- return SC_NOPEN;
- }
-
- atomic_set(&subch->packet_arrived, 0);
- subch->use = BRL1_SUBCH_FREE;
-
- sv_broadcast( &(subch->arrive_sv) );
- sv_destroy( &(subch->arrive_sv) );
- spin_lock_destroy( &(subch->data_lock) );
-
- ASSERT( subch->iqp && (subch->iqp != &sc->garbage_q) );
- snia_kmem_free( subch->iqp, sizeof(sc_cq_t) );
- subch->iqp = &sc->garbage_q;
- subch->tx_notify = NULL;
- subch->rx_notify = brl1_discard_packet;
-
- SUBCH_UNLOCK( sc, pl );
-
- return SC_SUCCESS;
-}
-
-
-/* sc_construct_msg builds a bedrock-to-L1 request in the supplied
- * buffer. Returns the length of the message. The
- * safest course when passing a buffer to be filled in is to use
- * BRL1_QSIZE as the buffer size.
- *
- * Command arguments are passed as type/argument pairs, i.e., to
- * pass the number 5 as an argument to an L1 command, call
- * sc_construct_msg as follows:
- *
- * char msg[BRL1_QSIZE];
- * msg_len = sc_construct_msg( msg,
- * BRL1_QSIZE,
- * target_component,
- * L1_ADDR_TASK_BOGUSTASK,
- * L1_BOGUSTASK_REQ_BOGUSREQ,
- * 2,
- * L1_ARG_INT, 5 );
- *
- * To pass an additional ASCII argument, you'd do the following:
- *
- * char *str;
- * ... str points to a null-terminated ascii string ...
- * msg_len = sc_construct_msg( msg,
- * BRL1_QSIZE,
- * target_component,
- * L1_ADDR_TASK_BOGUSTASK,
- * L1_BOGUSTASK_REQ_BOGUSREQ,
- * 4,
- * L1_ARG_INT, 5,
- * L1_ARG_ASCII, str );
- *
- * Finally, arbitrary data of unknown type is passed using the argtype
- * code L1_ARG_UNKNOWN, a data length, and a buffer pointer, e.g.
- *
- * msg_len = sc_construct_msg( msg,
- * BRL1_QSIZE,
- * target_component,
- * L1_ADDR_TASK_BOGUSTASK,
- * L1_BOGUSTASK_REQ_BOGUSREQ,
- * 3,
- * L1_ARG_UNKNOWN, 32, bufptr );
- *
- * ...passes 32 bytes of data starting at bufptr. Note that no string or
- * "unknown"-type argument should be long enough to overflow the message
- * buffer.
- *
- * To construct a message for an L1 command that requires no arguments,
- * you'd use the following:
- *
- * msg_len = sc_construct_msg( msg,
- * BRL1_QSIZE,
- * target_component,
- * L1_ADDR_TASK_BOGUSTASK,
- * L1_BOGUSTASK_REQ_BOGUSREQ,
- * 0 );
- *
- * The final 0 means "no varargs". Notice that this parameter is used to hold
- * the number of additional arguments to sc_construct_msg, _not_ the actual
- * number of arguments used by the L1 command (so 2 per L1_ARG_[INT,ASCII]
- * type argument, and 3 per L1_ARG_UNKOWN type argument). A call to construct
- * an L1 command which required three integer arguments and two arguments of
- * some arbitrary (unknown) type would pass 12 as the value for this parameter.
- *
- * ENDIANNESS WARNING: The following code does a lot of copying back-and-forth
- * between byte arrays and four-byte big-endian integers. Depending on the
- * system controller connection and endianness of future architectures, some
- * rewriting might be necessary.
- */
-int
-sc_construct_msg( l1sc_t *sc, /* system controller struct */
- int ch, /* subchannel for this message */
- char *msg, /* message buffer */
- int msg_len, /* size of message buffer */
- l1addr_t addr_task, /* target system controller task */
- short req_code, /* 16-bit request code */
- int req_nargs, /* # of arguments (varargs) passed */
- ... ) /* any additional parameters */
-{
- uint32_t buf32; /* 32-bit buffer used to bounce things around */
- void *bufptr; /* used to hold command argument addresses */
- va_list al; /* variable argument list */
- int index; /* current index into msg buffer */
- int argno; /* current position in varargs list */
- int l1_argno; /* running total of arguments to l1 */
- int l1_arg_t; /* argument type/length */
- int l1_argno_byte; /* offset of argument count byte */
-
- index = argno = 0;
-
- /* set up destination address */
- if( (msg_len -= sizeof( buf32 )) < 0 )
- return -1;
- L1_ADDRESS_TO_TASK( &buf32, sc->subch[ch].target, addr_task );
- COPY_INT_TO_BUFFER(msg, index, buf32);
-
- /* copy request code */
- if( (msg_len -= 2) < 0 )
- return( -1 );
- msg[index++] = ((req_code >> 8) & 0xff);
- msg[index++] = (req_code & 0xff);
-
- if( !req_nargs ) {
- return index;
- }
-
- /* reserve a byte for the argument count */
- if( (msg_len -= 1) < 0 )
- return( -1 );
- l1_argno_byte = index++;
- l1_argno = 0;
-
- /* copy additional arguments */
- va_start( al, req_nargs );
- while( argno < req_nargs ) {
- l1_argno++;
- l1_arg_t = va_arg( al, int ); argno++;
- switch( l1_arg_t )
- {
- case L1_ARG_INT:
- if( (msg_len -= (sizeof( buf32 ) + 1)) < 0 )
- return( -1 );
- msg[index++] = L1_ARG_INT;
- buf32 = (unsigned)va_arg( al, int ); argno++;
- COPY_INT_TO_BUFFER(msg, index, buf32);
- break;
-
- case L1_ARG_ASCII:
- bufptr = va_arg( al, char* ); argno++;
- if( (msg_len -= (strlen( bufptr ) + 2)) < 0 )
- return( -1 );
- msg[index++] = L1_ARG_ASCII;
- strcpy( (char *)&(msg[index]), (char *)bufptr );
- index += (strlen( bufptr ) + 1); /* include terminating null */
- break;
-
- case L1_ARG_UNKNOWN:
- {
- int arglen;
-
- arglen = va_arg( al, int ); argno++;
- bufptr = va_arg( al, void* ); argno++;
- if( (msg_len -= (arglen + 1)) < 0 )
- return( -1 );
- msg[index++] = L1_ARG_UNKNOWN | arglen;
- BCOPY( bufptr, &(msg[index]), arglen );
- index += arglen;
- break;
- }
-
- default: /* unhandled argument type */
- return -1;
- }
- }
-
- va_end( al );
- msg[l1_argno_byte] = l1_argno;
-
- return index;
-}
-
-
-
-/* sc_interpret_resp verifies an L1 response to a bedrock request, and
- * breaks the response data up into the constituent parts. If the
- * response message indicates error, or if a mismatch is found in the
- * expected number and type of arguments, an error is returned. The
- * arguments to this function work very much like the arguments to
- * sc_construct_msg, above, except that L1_ARG_INTs must be followed
- * by a _pointer_ to an integer that can be filled in by this function.
- */
-int
-sc_interpret_resp( char *resp, /* buffer received from L1 */
- int resp_nargs, /* number of _varargs_ passed in */
- ... )
-{
- uint32_t buf32; /* 32-bit buffer used to bounce things around */
- void *bufptr; /* used to hold response field addresses */
- va_list al; /* variable argument list */
- int index; /* current index into response buffer */
- int argno; /* current position in varargs list */
- int l1_fldno; /* number of resp fields received from l1 */
- int l1_fld_t; /* field type/length */
-
- index = argno = 0;
-
-#if defined(L1_DEBUG)
-#define DUMP_RESP \
- { \
- int ix; \
- char outbuf[512]; \
- sprintf( outbuf, "sc_interpret_resp error line %d: ", __LINE__ ); \
- for( ix = 0; ix < 16; ix++ ) { \
- sprintf( &outbuf[strlen(outbuf)], "%x ", resp[ix] ); \
- } \
- printk( "%s\n", outbuf ); \
- }
-#else
-#define DUMP_RESP
-#endif /* L1_DEBUG */
-
- /* check response code */
- COPY_BUFFER_TO_INT(resp, index, buf32);
- if( buf32 != L1_RESP_OK ) {
- DUMP_RESP;
- return buf32;
- }
-
- /* get number of response fields */
- l1_fldno = resp[index++];
-
- va_start( al, resp_nargs );
-
- /* copy out response fields */
- while( argno < resp_nargs ) {
- l1_fldno--;
- l1_fld_t = va_arg( al, int ); argno++;
- switch( l1_fld_t )
- {
- case L1_ARG_INT:
- if( resp[index++] != L1_ARG_INT ) {
- /* type mismatch */
- va_end( al );
- DUMP_RESP;
- return -1;
- }
- bufptr = va_arg( al, int* ); argno++;
- COPY_BUFFER_TO_BUFFER(resp, index, bufptr);
- break;
-
- case L1_ARG_ASCII:
- if( resp[index++] != L1_ARG_ASCII ) {
- /* type mismatch */
- va_end( al );
- DUMP_RESP;
- return -1;
- }
- bufptr = va_arg( al, char* ); argno++;
- strcpy( (char *)bufptr, (char *)&(resp[index]) );
- /* include terminating null */
- index += (strlen( &(resp[index]) ) + 1);
- break;
-
- default:
- if( (l1_fld_t & L1_ARG_UNKNOWN) == L1_ARG_UNKNOWN )
- {
- int *arglen;
-
- arglen = va_arg( al, int* ); argno++;
- bufptr = va_arg( al, void* ); argno++;
- *arglen = ((resp[index++] & ~L1_ARG_UNKNOWN) & 0xff);
- BCOPY( &(resp[index]), bufptr, *arglen );
- index += (*arglen);
- }
-
- else {
- /* unhandled type */
- va_end( al );
- DUMP_RESP;
- return -1;
- }
- }
- }
- va_end( al );
-
- if( (l1_fldno != 0) || (argno != resp_nargs) ) {
- /* wrong number of arguments */
- DUMP_RESP;
- return -1;
- }
- return 0;
-}
-
-
-
-
-/* sc_send takes as arguments a system controller struct, a
- * buffer which contains a Bedrock<->L1 "request" message,
- * the message length, and the subchannel (presumably obtained
- * from an earlier invocation of sc_open) over which the
- * message is to be sent. The final argument ("wait") indicates
- * whether the send is to be performed synchronously or not.
- *
- * sc_send returns either zero or an error value. Synchronous sends
- * (wait != 0) will not return until the data has actually been sent
- * to the UART. Synchronous sends generally receive privileged
- * treatment. The intent is that they be used sparingly, for such
- * purposes as kernel printf's (the "ducons" routines). Run-of-the-mill
- * console output and L1 requests should NOT use a non-zero value
- * for wait.
- */
-int
-sc_send( l1sc_t *sc, int ch, char *msg, int len, int wait )
-{
- char type_and_subch;
- int result;
-
- if( (ch < 0) || ( ch >= BRL1_NUM_SUBCHANS) ) {
- return SC_BADSUBCH;
- }
-
- /* Verify that this is an open subchannel
- */
- if( sc->subch[ch].use == BRL1_SUBCH_FREE ) {
- return SC_NOPEN;
- }
-
- type_and_subch = (BRL1_REQUEST | ((u_char)ch));
- result = brl1_send( sc, msg, len, type_and_subch, wait );
-
- /* If we sent as much as we asked to, return "ok". */
- if( result == len )
- return( SC_SUCCESS );
-
- /* Or, if we sent less, than either the UART is busy or
- * we're trying to send too large a packet anyway.
- */
- else if( result >= 0 && result < len )
- return( SC_BUSY );
-
- /* Or, if something else went wrong (result < 0), then
- * return that error value.
- */
- else
- return( result );
-}
-
-
-
-/* subch_pull_msg pulls a message off the receive queue for subch
- * and places it the buffer pointed to by msg. This routine should only
- * be called when the caller already knows a message is available on the
- * receive queue (and, in the kernel, only when the subchannel data lock
- * is held by the caller).
- */
-static void
-subch_pull_msg( brl1_sch_t *subch, char *msg, int *len )
-{
- sc_cq_t *q; /* receive queue */
- int before_wrap, /* packet may be split into two different */
- after_wrap; /* pieces to accommodate queue wraparound */
-
- /* pull message off the receive queue */
- q = subch->iqp;
-
- cq_rem( q, *len ); /* remove length byte and store */
- cq_discard( q ); /* remove type/subch byte and discard */
-
- if ( *len > 0 )
- (*len)--; /* don't count type/subch byte in length returned */
-
- if( (q->opos + (*len)) > BRL1_QSIZE ) {
- before_wrap = BRL1_QSIZE - q->opos;
- after_wrap = (*len) - before_wrap;
- }
- else {
- before_wrap = (*len);
- after_wrap = 0;
- }
-
- BCOPY( q->buf + q->opos, msg, before_wrap );
- if( after_wrap ) {
- BCOPY( q->buf, msg + before_wrap, after_wrap );
- q->opos = after_wrap;
- }
- else {
- q->opos = ((q->opos + before_wrap) & (BRL1_QSIZE - 1));
- }
- atomic_dec(&(subch->packet_arrived));
-}
-
-
-/* sc_recv_poll can be called as a blocking or non-blocking function;
- * it attempts to pull a message off of the subchannel specified
- * in the argument list (ch).
- *
- * The "block" argument, if non-zero, is interpreted as a timeout
- * delay (to avoid permanent waiting).
- */
-
-int
-sc_recv_poll( l1sc_t *sc, int ch, char *msg, int *len, uint64_t block )
-{
- int is_msg = 0;
- unsigned long pl = 0;
- brl1_sch_t *subch = &(sc->subch[ch]);
-
- rtc_time_t exp_time = rtc_time() + block;
-
- /* sanity check-- make sure this is an open subchannel */
- if( subch->use == BRL1_SUBCH_FREE )
- return( SC_NOPEN );
-
- do {
-
- /* kick the next lower layer and see if it pulls anything in
- */
- brl1_receive( sc, SERIAL_POLLED_MODE );
- is_msg = atomic_read(&subch->packet_arrived);
-
- } while( block && !is_msg && (rtc_time() < exp_time) );
-
- if( !is_msg ) {
- /* no message and we didn't care to wait for one */
- return( SC_NMSG );
- }
-
- SUBCH_DATA_LOCK( subch, pl );
- subch_pull_msg( subch, msg, len );
- SUBCH_DATA_UNLOCK( subch, pl );
-
- return( SC_SUCCESS );
-}
-
-
-/* Like sc_recv_poll, sc_recv_intr can be called in either a blocking
- * or non-blocking mode. Rather than polling until an appointed timeout,
- * however, sc_recv_intr sleeps on a syncrhonization variable until a
- * signal from the lower layer tells us that a packet has arrived.
- *
- * sc_recv_intr can't be used with remote (router) L1s.
- */
-int
-sc_recv_intr( l1sc_t *sc, int ch, char *msg, int *len, uint64_t block )
-{
- int is_msg = 0;
- unsigned long pl = 0;
- brl1_sch_t *subch = &(sc->subch[ch]);
-
- do {
- SUBCH_DATA_LOCK(subch, pl);
- is_msg = atomic_read(&subch->packet_arrived);
- if( !is_msg && block ) {
- /* wake me when you've got something */
- subch->rx_notify = sc_data_ready;
- sv_wait( &(subch->arrive_sv), 0, 0);
- if( subch->use == BRL1_SUBCH_FREE ) {
- /* oops-- somebody closed our subchannel while we were
- * sleeping!
- */
-
- /* no need to unlock since the channel's closed anyhow */
- return( SC_NOPEN );
- }
- }
- } while( !is_msg && block );
-
- if( !is_msg ) {
- /* no message and we didn't care to wait for one */
- SUBCH_DATA_UNLOCK( subch, pl );
- return( SC_NMSG );
- }
-
- subch_pull_msg( subch, msg, len );
- SUBCH_DATA_UNLOCK( subch, pl );
-
- return( SC_SUCCESS );
-}
-
-/* sc_command implements a (blocking) combination of sc_send and sc_recv.
- * It is intended to be the SN1 equivalent of SN0's "elsc_command", which
- * issued a system controller command and then waited for a response from
- * the system controller before returning.
- *
- * cmd points to the outgoing command; resp points to the buffer in
- * which the response is to be stored. Both buffers are assumed to
- * be the same length; if there is any doubt as to whether the
- * response buffer is long enough to hold the L1's response, then
- * make it BRL1_QSIZE bytes-- no Bedrock<->L1 message can be any
- * bigger.
- *
- * Be careful using the same buffer for both cmd and resp; it could get
- * hairy if there were ever an L1 command request that spanned multiple
- * packets. (On the other hand, that would require some additional
- * rewriting of the L1 command interface anyway.)
- */
-#define __RETRIES 50
-#define __WAIT_SEND 1 // ( sc->uart != BRL1_LOCALHUB_UART )
-#define __WAIT_RECV 10000000
-
-
-int
-sc_command( l1sc_t *sc, int ch, char *cmd, char *resp, int *len )
-{
-#ifndef CONFIG_SERIAL_SGI_L1_PROTOCOL
- return SC_NMSG;
-#else
- int result;
- int retries;
-
- if ( IS_RUNNING_ON_SIMULATOR() )
- return SC_NMSG;
-
- retries = __RETRIES;
-
- while( (result = sc_send( sc, ch, cmd, *len, __WAIT_SEND )) < 0 ) {
- if( result == SC_BUSY ) {
- retries--;
- if( retries <= 0 )
- return result;
- uart_delay(500);
- }
- else {
- return result;
- }
- }
-
- /* block on sc_recv_* */
- if( (sc->uart == BRL1_LOCALHUB_UART) && L1_interrupts_connected ) {
- return( sc_recv_intr( sc, ch, resp, len, __WAIT_RECV ) );
- }
- else {
- return( sc_recv_poll( sc, ch, resp, len, __WAIT_RECV ) );
- }
-#endif /* CONFIG_SERIAL_SGI_L1_PROTOCOL */
-}
-
-/* sc_command_kern is a knuckle-dragging, no-patience version of sc_command
- * used in situations where the kernel has a command that shouldn't be
- * delayed until the send buffer clears. sc_command should be used instead
- * under most circumstances.
- */
-
-int
-sc_command_kern( l1sc_t *sc, int ch, char *cmd, char *resp, int *len )
-{
-#ifndef CONFIG_SERIAL_SGI_L1_PROTOCOL
- return SC_NMSG;
-#else
- int result;
-
- if ( IS_RUNNING_ON_SIMULATOR() )
- return SC_NMSG;
-
- if( (result = sc_send( sc, ch, cmd, *len, 1 )) < 0 ) {
- return result;
- }
-
- return( sc_recv_poll( sc, ch, resp, len, __WAIT_RECV ) );
-#endif /* CONFIG_SERIAL_SGI_L1_PROTOCOL */
-}
-
-
-
-/* sc_poll checks the queue corresponding to the given
- * subchannel to see if there's anything available. If
- * not, it kicks the brl1 layer and then checks again.
- *
- * Returns 1 if input is available on the given queue,
- * 0 otherwise.
- */
-
-int
-sc_poll( l1sc_t *sc, int ch )
-{
- brl1_sch_t *subch = &(sc->subch[ch]);
-
- if( atomic_read(&subch->packet_arrived) )
- return 1;
-
- brl1_receive( sc, SERIAL_POLLED_MODE );
-
- if( atomic_read(&subch->packet_arrived) )
- return 1;
-
- return 0;
-}
-
-/* for now, sc_init just calls brl1_init */
-
-void
-sc_init( l1sc_t *sc, nasid_t nasid, net_vec_t uart )
-{
- if ( !IS_RUNNING_ON_SIMULATOR() )
- brl1_init( sc, nasid, uart );
-}
-
-/* sc_dispatch_env_event handles events sent from the system control
- * network's environmental monitor tasks.
- */
-
-#if defined(LINUX_KERNEL_THREADS)
-
-static void
-sc_dispatch_env_event( uint code, int argc, char *args, int maxlen )
-{
- int j, i = 0;
- uint32_t ESPcode;
-
- switch( code ) {
- /* for now, all codes do the same thing: grab two arguments
- * and print a cmn_err_tag message */
- default:
- /* check number of arguments */
- if( argc != 2 ) {
- L1_DBG_PRF(( "sc_dispatch_env_event: "
- "expected 2 arguments, got %d\n", argc ));
- return;
- }
-
- /* get ESP code (integer argument) */
- if( args[i++] != L1_ARG_INT ) {
- L1_DBG_PRF(( "sc_dispatch_env_event: "
- "expected integer argument\n" ));
- return;
- }
- /* WARNING: highly endian */
- COPY_BUFFER_TO_INT(args, i, ESPcode);
-
- /* verify string argument */
- if( args[i++] != L1_ARG_ASCII ) {
- L1_DBG_PRF(( "sc_dispatch_env_event: "
- "expected an ASCII string\n" ));
- return;
- }
- for( j = i; j < maxlen; j++ ) {
- if( args[j] == '\0' ) break; /* found string termination */
- }
- if( j == maxlen ) {
- j--;
- L1_DBG_PRF(( "sc_dispatch_env_event: "
- "message too long-- truncating\n" ));
- }
-
- /* strip out trailing cr/lf */
- for( ;
- j > 1 && ((args[j-1] == 0xd) || (args[j-1] == 0xa));
- j-- );
- args[j] = '\0';
-
- /* strip out leading cr/lf */
- for( ;
- i < j && ((args[i] == 0xd) || (args[i] == 0xa));
- i++ );
- }
-}
-
-
-/* sc_event waits for events to arrive from the system controller, and
- * prints appropriate messages to the syslog.
- */
-
-static void
-sc_event( l1sc_t *sc, int ch )
-{
- char event[BRL1_QSIZE];
- int i;
- int result;
- int event_len;
- uint32_t ev_src;
- uint32_t ev_code;
- int ev_argc;
-
- while(1) {
-
- bzero( event, BRL1_QSIZE );
-
- /*
- * wait for an event
- */
- result = sc_recv_intr( sc, ch, event, &event_len, 1 );
- if( result != SC_SUCCESS ) {
- printk(KERN_WARNING "Error receiving sysctl event on nasid %d\n",
- sc->nasid );
- }
- else {
- /*
- * an event arrived; break it down into useful pieces
- */
-#if defined(L1_DEBUG) && 0
- int ix;
- printf( "Event packet received:\n" );
- for (ix = 0; ix < 64; ix++) {
- printf( "%x%x ", ((event[ix] >> 4) & ((uint64_t)0xf)),
- (event[ix] & ((uint64_t)0xf)) );
- if( (ix % 16) == 0xf ) printf( "\n" );
- }
-#endif /* L1_DEBUG */
-
- i = 0;
-
- /* get event source */
- COPY_BUFFER_TO_INT(event, i, ev_src);
- COPY_BUFFER_TO_INT(event, i, ev_code);
-
- /* get arg count */
- ev_argc = (event[i++] & 0xffUL);
-
- /* dispatch events by task */
- switch( (ev_src & L1_ADDR_TASK_MASK) >> L1_ADDR_TASK_SHFT )
- {
- case L1_ADDR_TASK_ENV: /* environmental monitor event */
- sc_dispatch_env_event( ev_code, ev_argc, &(event[i]),
- BRL1_QSIZE - i );
- break;
-
- default: /* unhandled task type */
- L1_DBG_PRF(( "Unhandled event type received from system "
- "controllers: source task %x\n",
- (ev_src & L1_ADDR_TASK_MASK) >> L1_ADDR_TASK_SHFT
- ));
- }
- }
-
- }
-}
-
-/* sc_listen sets up a service thread to listen for incoming events.
- */
-
-void
-sc_listen( l1sc_t *sc )
-{
- int result;
- unsigned long pl = 0;
- brl1_sch_t *subch;
-
- char msg[BRL1_QSIZE];
- int len; /* length of message being sent */
- int ch; /* system controller subchannel used */
-
- extern int msc_shutdown_pri;
-
- /* grab the designated "event subchannel" */
- SUBCH_LOCK( sc, pl );
- subch = &(sc->subch[BRL1_EVENT_SUBCH]);
- if( subch->use != BRL1_SUBCH_FREE ) {
- SUBCH_UNLOCK( sc, pl );
- printk(KERN_WARNING "sysctl event subchannel in use! "
- "Not monitoring sysctl events.\n" );
- return;
- }
- subch->use = BRL1_SUBCH_RSVD;
- SUBCH_UNLOCK( sc, pl );
-
- atomic_set(&subch->packet_arrived, 0);
- subch->target = BRL1_LOCALHUB_UART;
- spin_lock_init( &(subch->data_lock) );
- sv_init( &(subch->arrive_sv), &(subch->data_lock), SV_MON_SPIN | SV_ORDER_FIFO /* | SV_INTS */);
- subch->tx_notify = NULL;
- subch->rx_notify = sc_data_ready;
- subch->iqp = snia_kmem_zalloc_node( sizeof(sc_cq_t), KM_NOSLEEP,
- NASID_TO_COMPACT_NODEID(sc->nasid) );
- ASSERT( subch->iqp );
- cq_init( subch->iqp );
-
- /* set up a thread to listen for events */
- sthread_create( "sysctl event handler", 0, 0, 0, msc_shutdown_pri,
- KT_PS, (st_func_t *) sc_event,
- (void *)sc, (void *)(uint64_t)BRL1_EVENT_SUBCH, 0, 0 );
-
- /* signal the L1 to begin sending events */
- bzero( msg, BRL1_QSIZE );
- ch = sc_open( sc, L1_ADDR_LOCAL );
-
- if( (len = sc_construct_msg( sc, ch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- L1_REQ_EVENT_SUBCH, 2,
- L1_ARG_INT, BRL1_EVENT_SUBCH )) < 0 )
- {
- sc_close( sc, ch );
- L1_DBG_PRF(( "Failure in sc_construct_msg (%d)\n", len ));
- goto err_return;
- }
-
- result = sc_command_kern( sc, ch, msg, msg, &len );
- if( result < 0 )
- {
- sc_close( sc, ch );
- L1_DBG_PRF(( "Failure in sc_command_kern (%d)\n", result ));
- goto err_return;
- }
-
- sc_close( sc, ch );
-
- result = sc_interpret_resp( msg, 0 );
- if( result < 0 )
- {
- L1_DBG_PRF(( "Failure in sc_interpret_resp (%d)\n", result ));
- goto err_return;
- }
-
- /* everything went fine; just return */
- return;
-
-err_return:
- /* there was a problem; complain */
- printk(KERN_WARNING "failed to set sysctl event-monitoring subchannel. "
- "Sysctl events will not be monitored.\n" );
-}
-
-#endif /* LINUX_KERNEL_THREADS */
diff --git a/arch/ia64/sn/io/l1_command.c b/arch/ia64/sn/io/l1_command.c
deleted file mode 100644
index 3da21f1d7da233..00000000000000
--- a/arch/ia64/sn/io/l1_command.c
+++ /dev/null
@@ -1,1378 +0,0 @@
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000 - 2001 Silicon Graphics, Inc.
- * All rights reserved.
- */
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/io.h>
-#include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/hcl_util.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/eeprom.h>
-#include <asm/sn/router.h>
-#include <asm/sn/module.h>
-#include <asm/sn/ksys/l1.h>
-#include <asm/sn/nodepda.h>
-#include <asm/sn/clksupport.h>
-
-#define ELSC_TIMEOUT 1000000 /* ELSC response timeout (usec) */
-#define LOCK_TIMEOUT 5000000 /* Hub lock timeout (usec) */
-
-#define LD(x) (*(volatile uint64_t *)(x))
-#define SD(x, v) (LD(x) = (uint64_t) (v))
-
-#define hub_cpu_get() 0
-
-#define LBYTE(caddr) (*(char *) caddr)
-
-extern char *bcopy(const char * src, char * dest, int count);
-
-#define LDEBUG 0
-
-/*
- * ELSC data is in NVRAM page 7 at the following offsets.
- */
-
-#define NVRAM_MAGIC_AD 0x700 /* magic number used for init */
-#define NVRAM_PASS_WD 0x701 /* password (4 bytes in length) */
-#define NVRAM_DBG1 0x705 /* virtual XOR debug switches */
-#define NVRAM_DBG2 0x706 /* physical XOR debug switches */
-#define NVRAM_CFG 0x707 /* ELSC Configuration info */
-#define NVRAM_MODULE 0x708 /* system module number */
-#define NVRAM_BIST_FLG 0x709 /* BIST flags (2 bits per nodeboard) */
-#define NVRAM_PARTITION 0x70a /* module's partition id */
-#define NVRAM_DOMAIN 0x70b /* module's domain id */
-#define NVRAM_CLUSTER 0x70c /* module's cluster id */
-#define NVRAM_CELL 0x70d /* module's cellid */
-
-#define NVRAM_MAGIC_NO 0x37 /* value of magic number */
-#define NVRAM_SIZE 16 /* 16 bytes in nvram */
-
-/*
- * Declare a static ELSC NVRAM buffer to hold all data read from
- * and written to NVRAM. This nvram "cache" will be used only during the
- * IP27prom execution.
- */
-static char elsc_nvram_buffer[NVRAM_SIZE];
-
-#define SC_COMMAND sc_command
-
-/*
- * elsc_init
- *
- * Initialize ELSC structure
- */
-
-void elsc_init(elsc_t *e, nasid_t nasid)
-{
- sc_init((l1sc_t *)e, nasid, BRL1_LOCALHUB_UART);
-}
-
-
-/*
- * elsc_errmsg
- *
- * Given a negative error code,
- * returns a corresponding static error string.
- */
-
-char *elsc_errmsg(int code)
-{
- switch (code) {
- case ELSC_ERROR_CMD_SEND:
- return "Command send error";
- case ELSC_ERROR_CMD_CHECKSUM:
- return "Command packet checksum error";
- case ELSC_ERROR_CMD_UNKNOWN:
- return "Unknown command";
- case ELSC_ERROR_CMD_ARGS:
- return "Invalid command argument(s)";
- case ELSC_ERROR_CMD_PERM:
- return "Permission denied";
- case ELSC_ERROR_RESP_TIMEOUT:
- return "System controller response timeout";
- case ELSC_ERROR_RESP_CHECKSUM:
- return "Response packet checksum error";
- case ELSC_ERROR_RESP_FORMAT:
- return "Response format error";
- case ELSC_ERROR_RESP_DIR:
- return "Response direction error";
- case ELSC_ERROR_MSG_LOST:
- return "Message lost because queue is full";
- case ELSC_ERROR_LOCK_TIMEOUT:
- return "Timed out getting ELSC lock";
- case ELSC_ERROR_DATA_SEND:
- return "Error sending data";
- case ELSC_ERROR_NIC:
- return "NIC protocol error";
- case ELSC_ERROR_NVMAGIC:
- return "Bad magic number in NVRAM";
- case ELSC_ERROR_MODULE:
- return "Module location protocol error";
- default:
- return "Unknown error";
- }
-}
-
-/*
- * elsc_nvram_init
- *
- * Initializes reads and writes to NVRAM. This will perform a single
- * read to NVRAM, getting all data at once. When the PROM tries to
- * read NVRAM, it returns the data from the buffer being read. If the
- * PROM tries to write out to NVRAM, the write is done, and the internal
- * buffer is updated.
- */
-
-void elsc_nvram_init(nasid_t nasid, uchar_t *elsc_nvram_data)
-{
- /* This might require implementation of multiple-packet request/responses
- * if it's to provide the same behavior that was available in SN0.
- */
- nasid = nasid;
- elsc_nvram_data = elsc_nvram_data;
-}
-
-/*
- * elsc_nvram_copy
- *
- * Copies the content of a buffer into the static buffer in this library.
- */
-
-void elsc_nvram_copy(uchar_t *elsc_nvram_data)
-{
- memcpy(elsc_nvram_buffer, elsc_nvram_data, NVRAM_SIZE);
-}
-
-/*
- * elsc_nvram_write
- *
- * Copies bytes from 'buf' into NVRAM, starting at NVRAM address
- * 'addr' which must be between 0 and 2047.
- *
- * If 'len' is non-negative, the routine copies 'len' bytes.
- *
- * If 'len' is negative, the routine treats the data as a string and
- * copies bytes up to and including a NUL-terminating zero, but not
- * to exceed '-len' bytes.
- */
-
-int elsc_nvram_write(elsc_t *e, int addr, char *buf, int len)
-{
- /* Here again, we might need to work out the details of a
- * multiple-packet protocol.
- */
-
- /* For now, pretend it worked. */
- e = e;
- addr = addr;
- buf = buf;
- return (len < 0 ? -len : len);
-}
-
-/*
- * elsc_nvram_read
- *
- * Copies bytes from NVRAM into 'buf', starting at NVRAM address
- * 'addr' which must be between 0 and 2047.
- *
- * If 'len' is non-negative, the routine copies 'len' bytes.
- *
- * If 'len' is negative, the routine treats the data as a string and
- * copies bytes up to and including a NUL-terminating zero, but not
- * to exceed '-len' bytes. NOTE: This method is no longer supported.
- * It was never used in the first place.
- */
-
-int elsc_nvram_read(elsc_t *e, int addr, char *buf, int len)
-{
- /* multiple packets? */
- e = e;
- addr = addr;
- buf = buf;
- len = len;
- return -1;
-}
-
-
-/*
- * Command Set
- */
-
-int elsc_version(elsc_t *e, char *result)
-{
- char msg[BRL1_QSIZE];
- int len; /* length of message being sent */
- int subch; /* system controller subchannel used */
- int major, /* major rev number */
- minor, /* minor rev number */
- bugfix; /* bugfix rev number */
-
- /* fill in msg with the opcode & params */
- bzero( msg, BRL1_QSIZE );
- subch = sc_open( (l1sc_t *)e, L1_ADDR_LOCAL );
-
- if( (len = sc_construct_msg( (l1sc_t *)e, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- L1_REQ_FW_REV, 0 )) < 0 )
- {
- sc_close( e, subch );
- return( ELSC_ERROR_CMD_ARGS );
- }
-
- /* send the request to the L1 */
- if( SC_COMMAND( (l1sc_t *)e, subch, msg, msg, &len ) < 0 )
- {
- sc_close( e, subch );
- return( ELSC_ERROR_CMD_SEND );
- }
-
- /* free up subchannel */
- sc_close( (l1sc_t *)e, subch );
-
- /* check response */
- if( sc_interpret_resp( msg, 6, L1_ARG_INT, &major,
- L1_ARG_INT, &minor, L1_ARG_INT, &bugfix )
- < 0 )
- {
- return( ELSC_ERROR_RESP_FORMAT );
- }
-
- sprintf( result, "%d.%d.%d", major, minor, bugfix );
-
- return 0;
-}
-
-int elsc_debug_set(elsc_t *e, u_char byte1, u_char byte2)
-{
- /* shush compiler */
- e = e;
- byte1 = byte1;
- byte2 = byte2;
-
- /* fill in a buffer with the opcode & params; call sc_command */
-
- return 0;
-}
-
-int elsc_debug_get(elsc_t *e, u_char *byte1, u_char *byte2)
-{
- char msg[BRL1_QSIZE];
- int subch; /* system controller subchannel used */
- int dbg_sw; /* holds debug switch settings */
- int len; /* number of msg buffer bytes used */
-
- /* fill in msg with the opcode & params */
- bzero( msg, BRL1_QSIZE );
- if( (subch = sc_open( (l1sc_t *)e, L1_ADDR_LOCAL )) < 0 ) {
- return( ELSC_ERROR_CMD_SEND );
- }
-
- if( (len = sc_construct_msg( (l1sc_t *)e, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- L1_REQ_RDBG, 0 ) ) < 0 )
- {
- sc_close( e, subch );
- return( ELSC_ERROR_CMD_ARGS );
- }
-
- /* send the request to the L1 */
- if( sc_command( (l1sc_t *)e, subch, msg, msg, &len ) < 0 )
- {
- sc_close( e, subch );
- return( ELSC_ERROR_CMD_SEND );
- }
-
- /* free up subchannel */
- sc_close( (l1sc_t *)e, subch );
-
- /* check response */
- if( sc_interpret_resp( msg, 2, L1_ARG_INT, &dbg_sw ) < 0 )
- {
- return( ELSC_ERROR_RESP_FORMAT );
- }
-
- /* copy out debug switch settings (last two bytes of the
- * integer response)
- */
- *byte1 = ((dbg_sw >> 8) & 0xFF);
- *byte2 = (dbg_sw & 0xFF);
-
- return 0;
-}
-
-
-/*
- * elsc_rack_bay_get fills in the two int * arguments with the
- * rack number and bay number of the L1 being addressed
- */
-int elsc_rack_bay_get(elsc_t *e, uint *rack, uint *bay)
-{
- char msg[BRL1_QSIZE]; /* L1 request/response info */
- int subch; /* system controller subchannel used */
- int len; /* length of message */
- uint32_t buf32; /* used to copy 32-bit rack/bay out of msg */
-
- /* fill in msg with the opcode & params */
- bzero( msg, BRL1_QSIZE );
- if( (subch = sc_open( (l1sc_t *)e, L1_ADDR_LOCAL )) < 0 ) {
- return( ELSC_ERROR_CMD_SEND );
- }
-
- if( (len = sc_construct_msg( (l1sc_t *)e, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- L1_REQ_RRACK, 0 )) < 0 )
- {
- sc_close( e, subch );
- return( ELSC_ERROR_CMD_ARGS );
- }
-
-
- /* send the request to the L1 */
- if( sc_command( (l1sc_t *)e, subch, msg, msg, &len ) ) {
- sc_close( e, subch );
- return( ELSC_ERROR_CMD_SEND );
- }
-
- /* free up subchannel */
- sc_close(e, subch);
-
- /* check response */
- if( sc_interpret_resp( msg, 2, L1_ARG_INT, &buf32 ) < 0 )
- {
- return( ELSC_ERROR_RESP_FORMAT );
- }
-
- /* extract rack/bay info
- *
- * note that the 32-bit value returned by the L1 actually
- * only uses the low-order sixteen bits for rack and bay
- * information. A "normal" L1 address puts rack and bay
- * information in bit positions 12 through 28. So if
- * we initially shift the value returned 12 bits to the left,
- * we can use the L1 addressing #define's to extract the
- * values we need (see ksys/l1.h for a complete list of the
- * various fields of an L1 address).
- */
- buf32 <<= L1_ADDR_BAY_SHFT;
-
- *rack = (buf32 & L1_ADDR_RACK_MASK) >> L1_ADDR_RACK_SHFT;
- *bay = (buf32 & L1_ADDR_BAY_MASK) >> L1_ADDR_BAY_SHFT;
-
- return 0;
-}
-
-
-/* elsc_rack_bay_type_get fills in the three int * arguments with the
- * rack number, bay number and brick type of the L1 being addressed. Note
- * that if the L1 operation fails and this function returns an error value,
- * garbage may be written to brick_type.
- */
-int elsc_rack_bay_type_get( l1sc_t *sc, uint *rack,
- uint *bay, uint *brick_type )
-{
- char msg[BRL1_QSIZE]; /* L1 request/response info */
- int subch; /* system controller subchannel used */
- int len; /* length of message */
- uint32_t buf32; /* used to copy 32-bit rack & bay out of msg */
-
- /* fill in msg with the opcode & params */
- bzero( msg, BRL1_QSIZE );
- if( (subch = sc_open( sc, L1_ADDR_LOCAL )) < 0 ) {
- return ELSC_ERROR_CMD_SEND;
- }
-
- if( (len = sc_construct_msg( sc, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- L1_REQ_RRBT, 0 )) < 0 )
- {
- sc_close( sc, subch );
- return( ELSC_ERROR_CMD_ARGS );
- }
-
- /* send the request to the L1 */
- if( SC_COMMAND( sc, subch, msg, msg, &len ) ) {
- sc_close( sc, subch );
- return( ELSC_ERROR_CMD_SEND );
- }
-
- /* free up subchannel */
- sc_close( sc, subch );
-
- /* check response */
- if( sc_interpret_resp( msg, 4, L1_ARG_INT, &buf32,
- L1_ARG_INT, brick_type ) < 0 )
- {
- return( ELSC_ERROR_RESP_FORMAT );
- }
-
- /* extract rack/bay info
- *
- * note that the 32-bit value returned by the L1 actually
- * only uses the low-order sixteen bits for rack and bay
- * information. A "normal" L1 address puts rack and bay
- * information in bit positions 12 through 28. So if
- * we initially shift the value returned 12 bits to the left,
- * we can use the L1 addressing #define's to extract the
- * values we need (see ksys/l1.h for a complete list of the
- * various fields of an L1 address).
- */
- buf32 <<= L1_ADDR_BAY_SHFT;
-
- *rack = (buf32 & L1_ADDR_RACK_MASK) >> L1_ADDR_RACK_SHFT;
- *bay = (buf32 & L1_ADDR_BAY_MASK) >> L1_ADDR_BAY_SHFT;
-
- /* convert brick_type to lower case */
- *brick_type = *brick_type - 'A' + 'a';
-
- return 0;
-}
-
-
-int elsc_module_get(elsc_t *e)
-{
- extern char brick_types[];
- uint rnum, rack, bay, bricktype, t;
- int ret;
-
- /* construct module ID from rack and slot info */
-
- if ((ret = elsc_rack_bay_type_get(e, &rnum, &bay, &bricktype)) < 0) {
- return ret;
- }
-
- /* report unset location info. with a special, otherwise invalid modid */
- if (rnum == 0 && bay == 0)
- return MODULE_NOT_SET;
-
- if (bay > MODULE_BPOS_MASK >> MODULE_BPOS_SHFT)
- return ELSC_ERROR_MODULE;
-
- /* Build a moduleid_t-compatible rack number */
-
- rack = 0;
- t = rnum / 100; /* rack class (CPU/IO) */
- if (t > RACK_CLASS_MASK(rack) >> RACK_CLASS_SHFT(rack))
- return ELSC_ERROR_MODULE;
- RACK_ADD_CLASS(rack, t);
- rnum %= 100;
-
- t = rnum / 10; /* rack group */
- if (t > RACK_GROUP_MASK(rack) >> RACK_GROUP_SHFT(rack))
- return ELSC_ERROR_MODULE;
- RACK_ADD_GROUP(rack, t);
-
- t = rnum % 10; /* rack number (one-based) */
- if (t-1 > RACK_NUM_MASK(rack) >> RACK_NUM_SHFT(rack))
- return ELSC_ERROR_MODULE;
- RACK_ADD_NUM(rack, t);
-
- for( t = 0; t < MAX_BRICK_TYPES; t++ ) {
- if( brick_types[t] == bricktype )
- return RBT_TO_MODULE(rack, bay, t);
- }
-
- return ELSC_ERROR_MODULE;
-}
-
-int elsc_partition_set(elsc_t *e, int partition)
-{
- char msg[BRL1_QSIZE]; /* L1 request/response info */
- int subch; /* system controller subchannel used */
- int len; /* length of message */
-
- /* fill in msg with the opcode & params */
- bzero( msg, BRL1_QSIZE );
- if( (subch = sc_open( e, L1_ADDR_LOCAL )) < 0 ) {
- return ELSC_ERROR_CMD_SEND;
- }
-
- if( (len = sc_construct_msg( e, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- L1_REQ_PARTITION_SET, 2,
- L1_ARG_INT, partition )) < 0 )
- {
-
- sc_close( e, subch );
- return( ELSC_ERROR_CMD_ARGS );
- }
-
- /* send the request to the L1 */
- if( sc_command( e, subch, msg, msg, &len ) ) {
- sc_close( e, subch );
- return( ELSC_ERROR_CMD_SEND );
- }
-
- /* free up subchannel */
- sc_close( e, subch );
-
- /* check response */
- if( sc_interpret_resp( msg, 0 ) < 0 )
- {
- return( ELSC_ERROR_RESP_FORMAT );
- }
-
- return( 0 );
-}
-
-int elsc_partition_get(elsc_t *e)
-{
- char msg[BRL1_QSIZE]; /* L1 request/response info */
- int subch; /* system controller subchannel used */
- int len; /* length of message */
- uint32_t partition_id; /* used to copy partition id out of msg */
-
- /* fill in msg with the opcode & params */
- bzero( msg, BRL1_QSIZE );
- if( (subch = sc_open( e, L1_ADDR_LOCAL )) < 0 ) {
- return ELSC_ERROR_CMD_SEND;
- }
-
- if( (len = sc_construct_msg( e, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- L1_REQ_PARTITION_GET, 0 )) < 0 )
-
- {
- sc_close( e, subch );
- return( ELSC_ERROR_CMD_ARGS );
- }
-
- /* send the request to the L1 */
- if( sc_command( e, subch, msg, msg, &len ) ) {
- sc_close( e, subch );
- return( ELSC_ERROR_CMD_SEND );
- }
-
- /* free up subchannel */
- sc_close( e, subch );
-
- /* check response */
- if( sc_interpret_resp( msg, 2, L1_ARG_INT, &partition_id ) < 0 )
- {
- return( ELSC_ERROR_RESP_FORMAT );
- }
-
- return( partition_id );
-}
-
-
-/*
- * elsc_cons_subch selects the "active" console subchannel for this node
- * (i.e., the one that will currently receive input)
- */
-int elsc_cons_subch(elsc_t *e, uint ch)
-{
- char msg[BRL1_QSIZE]; /* L1 request/response info */
- int subch; /* system controller subchannel used */
- int len; /* length of message */
-
- /* fill in msg with the opcode & params */
- bzero( msg, BRL1_QSIZE );
- subch = sc_open( e, L1_ADDR_LOCAL );
-
- if( (len = sc_construct_msg( e, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- L1_REQ_CONS_SUBCH, 2,
- L1_ARG_INT, ch)) < 0 )
- {
- sc_close( e, subch );
- return( ELSC_ERROR_CMD_ARGS );
- }
-
- /* send the request to the L1 */
- if( SC_COMMAND( e, subch, msg, msg, &len ) ) {
- sc_close( e, subch );
- return( ELSC_ERROR_CMD_SEND );
- }
-
- /* free up subchannel */
- sc_close( e, subch );
-
- /* check response */
- if( sc_interpret_resp( msg, 0 ) < 0 )
- {
- return( ELSC_ERROR_RESP_FORMAT );
- }
-
- return 0;
-}
-
-
-/*
- * elsc_cons_node should only be executed by one node. It declares to
- * the system controller that the node from which it is called will be
- * the owner of the system console.
- */
-int elsc_cons_node(elsc_t *e)
-{
- char msg[BRL1_QSIZE]; /* L1 request/response info */
- int subch; /* system controller subchannel used */
- int len; /* length of message */
-
- /* fill in msg with the opcode & params */
- bzero( msg, BRL1_QSIZE );
- subch = sc_open( e, L1_ADDR_LOCAL );
-
- if( (len = sc_construct_msg( e, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- L1_REQ_CONS_NODE, 0 )) < 0 )
- {
- sc_close( e, subch );
- return( ELSC_ERROR_CMD_ARGS );
- }
-
- /* send the request to the L1 */
- if( SC_COMMAND( e, subch, msg, msg, &len ) ) {
- sc_close( e, subch );
- return( ELSC_ERROR_CMD_SEND );
- }
-
- /* free up subchannel */
- sc_close( e, subch );
-
- /* check response */
- if( sc_interpret_resp( msg, 0 ) < 0 )
- {
- return( ELSC_ERROR_RESP_FORMAT );
- }
-
- return 0;
-}
-
-
-/* elsc_display_line writes up to 12 characters to either the top or bottom
- * line of the L1 display. line points to a buffer containing the message
- * to be displayed. The zero-based line number is specified by lnum (so
- * lnum == 0 specifies the top line and lnum == 1 specifies the bottom).
- * Lines longer than 12 characters, or line numbers not less than
- * L1_DISPLAY_LINES, cause elsc_display_line to return an error.
- */
-int elsc_display_line(elsc_t *e, char *line, int lnum)
-{
- char msg[BRL1_QSIZE];
- int subch; /* system controller subchannel used */
- int len; /* number of msg buffer bytes used */
-
- /* argument sanity checking */
- if( !(lnum < L1_DISPLAY_LINES) )
- return( ELSC_ERROR_CMD_ARGS );
- if( !(strlen( line ) <= L1_DISPLAY_LINE_LENGTH) )
- return( ELSC_ERROR_CMD_ARGS );
-
- /* fill in msg with the opcode & params */
- bzero( msg, BRL1_QSIZE );
- subch = sc_open( (l1sc_t *)e, L1_ADDR_LOCAL );
-
- if( (len = sc_construct_msg( (l1sc_t *)e, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- (L1_REQ_DISP1+lnum), 2,
- L1_ARG_ASCII, line )) < 0 )
- {
- sc_close( e, subch );
- return( ELSC_ERROR_CMD_ARGS );
- }
-
- /* send the request to the L1 */
- if( SC_COMMAND( (l1sc_t *)e, subch, msg, msg, &len ) < 0 )
- {
- sc_close( e, subch );
- return( ELSC_ERROR_CMD_SEND );
- }
-
- /* free up subchannel */
- sc_close( (l1sc_t *)e, subch );
-
- /* check response */
- if( sc_interpret_resp( msg, 0 ) < 0 )
- {
- return( ELSC_ERROR_RESP_FORMAT );
- }
-
- return 0;
-}
-
-
-/* elsc_display_mesg silently drops message characters beyond the 12th.
- */
-int elsc_display_mesg(elsc_t *e, char *chr)
-{
-
- char line[L1_DISPLAY_LINE_LENGTH+1];
- int numlines, i;
- int result;
-
- numlines = (strlen( chr ) + L1_DISPLAY_LINE_LENGTH - 1) /
- L1_DISPLAY_LINE_LENGTH;
-
- if( numlines > L1_DISPLAY_LINES )
- numlines = L1_DISPLAY_LINES;
-
- for( i = 0; i < numlines; i++ )
- {
- strncpy( line, chr, L1_DISPLAY_LINE_LENGTH );
- line[L1_DISPLAY_LINE_LENGTH] = '\0';
-
- /* generally we want to leave the first line of the L1 display
- * alone (so the L1 can manipulate it). If you need to be able
- * to display to both lines (for debugging purposes), define
- * L1_DISP_2LINES in irix/kern/ksys/l1.h, or add -DL1_DISP_2LINES
- * to your 'defs file.
- */
-#if defined(L1_DISP_2LINES)
- if( (result = elsc_display_line( e, line, i )) < 0 )
-#else
- if( (result = elsc_display_line( e, line, i+1 )) < 0 )
-#endif
-
- return result;
-
- chr += L1_DISPLAY_LINE_LENGTH;
- }
-
- return 0;
-}
-
-
-int elsc_password_set(elsc_t *e, char *password)
-{
- /* shush compiler */
- e = e;
- password = password;
-
- /* fill in buffer with the opcode & params; call elsc_command */
-
- return 0;
-}
-
-int elsc_password_get(elsc_t *e, char *password)
-{
- /* shush compiler */
- e = e;
- password = password;
-
- /* fill in buffer with the opcode & params; call elsc_command */
-
- return 0;
-}
-
-
-/*
- * sc_portspeed_get
- *
- * retrieve the current portspeed setting for the bedrock II
- */
-int sc_portspeed_get(l1sc_t *sc)
-{
- char msg[BRL1_QSIZE];
- int len; /* length of message being sent */
- int subch; /* system controller subchannel used */
- int portspeed_a, portspeed_b;
- /* ioport clock rates */
-
- bzero( msg, BRL1_QSIZE );
- subch = sc_open( sc, L1_ADDR_LOCAL );
-
- if( (len = sc_construct_msg( sc, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- L1_REQ_PORTSPEED,
- 0 )) < 0 )
- {
- sc_close( sc, subch );
- return( ELSC_ERROR_CMD_ARGS );
- }
-
- /* send the request to the L1 */
- if( sc_command( sc, subch, msg, msg, &len ) < 0 )
- {
- sc_close( sc, subch );
- return( ELSC_ERROR_CMD_SEND );
- }
-
- /* free up subchannel */
- sc_close( sc, subch );
-
- /* check response */
- if( sc_interpret_resp( msg, 4,
- L1_ARG_INT, &portspeed_a,
- L1_ARG_INT, &portspeed_b ) < 0 )
- {
- return( ELSC_ERROR_RESP_FORMAT );
- }
-
- /* for the c-brick, we ignore the portspeed_b value */
- return (portspeed_a ? 600 : 400);
-}
-
-/*
- * elsc_power_query
- *
- * To be used after system reset, this command returns 1 if the reset
- * was the result of a power-on, 0 otherwise.
- *
- * The power query status is cleared to 0 after it is read.
- */
-
-int elsc_power_query(elsc_t *e)
-{
- e = e; /* shush the compiler */
-
- /* fill in buffer with the opcode & params; call elsc_command */
-
- return 1;
-}
-
-int elsc_rpwr_query(elsc_t *e, int is_master)
-{
- /* shush the compiler */
- e = e;
- is_master = is_master;
-
- /* fill in buffer with the opcode & params; call elsc_command */
-
- return 0;
-}
-
-/*
- * elsc_power_down
- *
- * Sets up system to shut down in "sec" seconds (or modifies the
- * shutdown time if one is already in effect). Use 0 to power
- * down immediately.
- */
-
-int elsc_power_down(elsc_t *e, int sec)
-{
- /* shush compiler */
- e = e;
- sec = sec;
-
- /* fill in buffer with the opcode & params; call elsc_command */
-
- return 0;
-}
-
-
-int elsc_system_reset(elsc_t *e)
-{
- char msg[BRL1_QSIZE];
- int subch; /* system controller subchannel used */
- int len; /* number of msg buffer bytes used */
- int result;
-
- /* fill in msg with the opcode & params */
- bzero( msg, BRL1_QSIZE );
- if( (subch = sc_open( e, L1_ADDR_LOCAL )) < 0 ) {
- return ELSC_ERROR_CMD_SEND;
- }
-
- if( (len = sc_construct_msg( e, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- L1_REQ_RESET, 0 )) < 0 )
- {
- sc_close( e, subch );
- return( ELSC_ERROR_CMD_ARGS );
- }
-
- /* send the request to the L1 */
- if( (result = sc_command( e, subch, msg, msg, &len )) ) {
- sc_close( e, subch );
- if( result == SC_NMSG ) {
- /* timeout is OK. We've sent the reset. Now it's just
- * a matter of time...
- */
- return( 0 );
- }
- return( ELSC_ERROR_CMD_SEND );
- }
-
- /* free up subchannel */
- sc_close( e, subch );
-
- /* check response */
- if( sc_interpret_resp( msg, 0 ) < 0 )
- {
- return( ELSC_ERROR_RESP_FORMAT );
- }
-
- return 0;
-}
-
-
-int elsc_power_cycle(elsc_t *e)
-{
- /* shush compiler */
- e = e;
-
- /* fill in buffer with the opcode & params; call sc_command */
-
- return 0;
-}
-
-
-/*
- * L1 Support for reading
- * cbrick uid.
- */
-
-int elsc_nic_get(elsc_t *e, uint64_t *nic, int verbose)
-{
- /* this parameter included only for SN0 compatibility */
- verbose = verbose;
-
- /* We don't go straight to the bedrock/L1 protocol on this one, but let
- * the eeprom layer prepare the eeprom data as we would like it to
- * appear to the caller
- */
- return cbrick_uid_get( e->nasid, nic );
-}
-
-
-int _elsc_hbt(elsc_t *e, int ival, int rdly)
-{
- e = e;
- ival = ival;
- rdly = rdly;
-
- /* fill in buffer with the opcode & params; call elsc_command */
-
- return 0;
-}
-
-
-/* send a command string to an L1 */
-int sc_command_interp( l1sc_t *sc, l1addr_t compt, l1addr_t rack, l1addr_t bay,
- char *cmd )
-{
- char msg[BRL1_QSIZE];
- int len; /* length of message being sent */
- int subch; /* system controller subchannel used */
- l1addr_t target; /* target system controller for command */
-
- /* fill in msg with the opcode & params */
- bzero( msg, BRL1_QSIZE );
-
- L1_BUILD_ADDR( &target, compt, rack, bay, 0 );
- subch = sc_open( sc, target );
-
- if( (len = sc_construct_msg( sc, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_CMD, L1_REQ_EXEC_CMD, 2,
- L1_ARG_ASCII, cmd )) < 0 )
- {
- sc_close( sc, subch );
- return( ELSC_ERROR_CMD_ARGS );
- }
-
- /* send the request to the L1 */
- if( SC_COMMAND( sc, subch, msg, msg, &len ) < 0 )
- {
- sc_close( sc, subch );
- return( ELSC_ERROR_CMD_SEND );
- }
-
- /* free up subchannel */
- sc_close( sc, subch );
-
- /* check response */
- if( sc_interpret_resp( msg, 0 ) < 0 )
- {
- return( ELSC_ERROR_RESP_FORMAT );
- }
-
- return 0;
-}
-
-/*
- * sc_power_down
- *
- * Shuts down the c-brick associated with sc, and any attached I/O bricks
- * or other c-bricks (won't go through r-bricks).
- */
-
-int sc_power_down(l1sc_t *sc)
-{
- return sc_command_interp( sc, L1_ADDR_TYPE_L1, L1_ADDR_RACK_LOCAL,
- L1_ADDR_BAY_LOCAL, "* pwr d" );
-}
-
-
-/*
- * sc_power_down_all
- *
- * Works similarly to sc_power_down, except that the request is sent to the
- * closest L2 and EVERYBODY gets turned off.
- */
-
-int sc_power_down_all(l1sc_t *sc)
-{
- if( nodepda->num_routers > 0 ) {
- return sc_command_interp( sc, L1_ADDR_TYPE_L2, L1_ADDR_RACK_LOCAL,
- L1_ADDR_BAY_LOCAL, "* pwr d" );
- }
- else {
- return sc_power_down( sc );
- }
-}
-
-
-/*
- * Routines for reading the R-brick's L1
- */
-
-int router_module_get( nasid_t nasid, net_vec_t path )
-{
- uint rnum, rack, bay, t;
- int ret;
- l1sc_t sc;
-
- /* prepare l1sc_t struct */
- sc_init( &sc, nasid, path );
-
- /* construct module ID from rack and slot info */
-
- if ((ret = elsc_rack_bay_get(&sc, &rnum, &bay)) < 0)
- return ret;
-
- /* report unset location info. with a special, otherwise invalid modid */
- if (rnum == 0 && bay == 0)
- return MODULE_NOT_SET;
-
- if (bay > MODULE_BPOS_MASK >> MODULE_BPOS_SHFT)
- return ELSC_ERROR_MODULE;
-
- /* Build a moduleid_t-compatible rack number */
-
- rack = 0;
- t = rnum / 100; /* rack class (CPU/IO) */
- if (t > RACK_CLASS_MASK(rack) >> RACK_CLASS_SHFT(rack))
- return ELSC_ERROR_MODULE;
- RACK_ADD_CLASS(rack, t);
- rnum %= 100;
-
- t = rnum / 10; /* rack group */
- if (t > RACK_GROUP_MASK(rack) >> RACK_GROUP_SHFT(rack))
- return ELSC_ERROR_MODULE;
- RACK_ADD_GROUP(rack, t);
-
- t = rnum % 10; /* rack number (one-based) */
- if (t-1 > RACK_NUM_MASK(rack) >> RACK_NUM_SHFT(rack))
- return ELSC_ERROR_MODULE;
- RACK_ADD_NUM(rack, t);
-
- ret = RBT_TO_MODULE(rack, bay, MODULE_RBRICK);
- return ret;
-}
-
-
-/*
- * iobrick routines
- */
-
-/* iobrick_rack_bay_type_get fills in the three int * arguments with the
- * rack number, bay number and brick type of the L1 being addressed. Note
- * that if the L1 operation fails and this function returns an error value,
- * garbage may be written to brick_type.
- */
-int iobrick_rack_bay_type_get( l1sc_t *sc, uint *rack,
- uint *bay, uint *brick_type )
-{
- char msg[BRL1_QSIZE]; /* L1 request/response info */
- int subch; /* system controller subchannel used */
- int len; /* length of message */
- uint32_t buf32; /* used to copy 32-bit rack & bay out of msg */
-
- /* fill in msg with the opcode & params */
- bzero( msg, BRL1_QSIZE );
- if( (subch = sc_open( sc, L1_ADDR_LOCALIO )) < 0 ) {
- return( ELSC_ERROR_CMD_SEND );
- }
-
- if( (len = sc_construct_msg( sc, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- L1_REQ_RRBT, 0 )) < 0 )
- {
- sc_close( sc, subch );
- return( ELSC_ERROR_CMD_ARGS );
- }
-
- /* send the request to the L1 */
- if( sc_command( sc, subch, msg, msg, &len ) ) {
- sc_close( sc, subch );
- return( ELSC_ERROR_CMD_SEND );
- }
-
- /* free up subchannel */
- sc_close( sc, subch );
-
- /* check response */
- if( sc_interpret_resp( msg, 4, L1_ARG_INT, &buf32,
- L1_ARG_INT, brick_type ) < 0 )
- {
- return( ELSC_ERROR_RESP_FORMAT );
- }
-
- /* extract rack/bay info
- *
- * note that the 32-bit value returned by the L1 actually
- * only uses the low-order sixteen bits for rack and bay
- * information. A "normal" L1 address puts rack and bay
- * information in bit positions 12 through 28. So if
- * we initially shift the value returned 12 bits to the left,
- * we can use the L1 addressing #define's to extract the
- * values we need (see ksys/l1.h for a complete list of the
- * various fields of an L1 address).
- */
- buf32 <<= L1_ADDR_BAY_SHFT;
-
- *rack = (buf32 & L1_ADDR_RACK_MASK) >> L1_ADDR_RACK_SHFT;
- *bay = (buf32 & L1_ADDR_BAY_MASK) >> L1_ADDR_BAY_SHFT;
-
- return 0;
-}
-
-
-int iobrick_module_get(l1sc_t *sc)
-{
- uint rnum, rack, bay, brick_type, t;
- int ret;
-
- /* construct module ID from rack and slot info */
-
- if ((ret = iobrick_rack_bay_type_get(sc, &rnum, &bay, &brick_type)) < 0)
- return ret;
-
- if (bay > MODULE_BPOS_MASK >> MODULE_BPOS_SHFT)
- return ELSC_ERROR_MODULE;
-
- /* Build a moduleid_t-compatible rack number */
-
- rack = 0;
- t = rnum / 100; /* rack class (CPU/IO) */
- if (t > RACK_CLASS_MASK(rack) >> RACK_CLASS_SHFT(rack))
- return ELSC_ERROR_MODULE;
- RACK_ADD_CLASS(rack, t);
- rnum %= 100;
-
- t = rnum / 10; /* rack group */
- if (t > RACK_GROUP_MASK(rack) >> RACK_GROUP_SHFT(rack))
- return ELSC_ERROR_MODULE;
- RACK_ADD_GROUP(rack, t);
-
- t = rnum % 10; /* rack number (one-based) */
- if (t-1 > RACK_NUM_MASK(rack) >> RACK_NUM_SHFT(rack))
- return ELSC_ERROR_MODULE;
- RACK_ADD_NUM(rack, t);
-
- switch( brick_type ) {
- case 'I':
- brick_type = MODULE_IBRICK; break;
- case 'P':
- brick_type = MODULE_PBRICK; break;
- case 'X':
- brick_type = MODULE_XBRICK; break;
- }
-
- ret = RBT_TO_MODULE(rack, bay, brick_type);
-
- return ret;
-}
-
-/* iobrick_get_sys_snum asks the attached iobrick for the system
- * serial number. This function will only be relevant to the master
- * cbrick (the one attached to the bootmaster ibrick); other nodes
- * may call the function, but the value returned to the master node
- * will be the one used as the system serial number by the kernel.
- */
-
-int
-iobrick_get_sys_snum( l1sc_t *sc, char *snum_str )
-{
- char msg[BRL1_QSIZE]; /* L1 request/response info */
- int subch; /* system controller subchannel used */
- int len; /* length of message */
-
- /* fill in msg with the opcode & params */
- bzero( msg, BRL1_QSIZE );
- if( (subch = sc_open( sc, L1_ADDR_LOCALIO )) < 0 ) {
- return( ELSC_ERROR_CMD_SEND );
- }
-
- if( (len = sc_construct_msg( sc, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- L1_REQ_SYS_SERIAL, 0 )) < 0 )
- {
- sc_close( sc, subch );
- return( ELSC_ERROR_CMD_ARGS );
- }
-
- /* send the request to the L1 */
- if( sc_command( sc, subch, msg, msg, &len ) ) {
- sc_close( sc, subch );
- return( ELSC_ERROR_CMD_SEND );
- }
-
- /* free up subchannel */
- sc_close( sc, subch );
-
- /* check response */
- return( sc_interpret_resp( msg, 2, L1_ARG_ASCII, snum_str ) );
-}
-
-
-/*
- * The following functions apply (or cut off) power to the specified
- * pci bus or slot.
- */
-
-int
-iobrick_pci_pwr( l1sc_t *sc, int bus, int slot, int req_code )
-{
-#if 0 /* The "bedrock request" method of performing this function
- * seems to be broken in the L1, so for now use the command-
- * interpreter method
- */
-
- char msg[BRL1_QSIZE];
- int len; /* length of message being sent */
- int subch; /* system controller subchannel used */
-
- /* fill in msg with the opcode & params */
- bzero( msg, BRL1_QSIZE );
- subch = sc_open( sc, L1_ADDR_LOCALIO );
-
- if( (len = sc_construct_msg( sc, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- req_code, 4,
- L1_ARG_INT, bus,
- L1_ARG_INT, slot )) < 0 )
- {
- sc_close( sc, subch );
- return( ELSC_ERROR_CMD_ARGS );
- }
-
- /* send the request to the L1 */
- if( SC_COMMAND(sc, subch, msg, msg, &len ) < 0 )
- {
- sc_close( sc, subch );
- return( ELSC_ERROR_CMD_SEND );
- }
-
- /* free up subchannel */
- sc_close( sc, subch );
-
- /* check response */
- if( sc_interpret_resp( msg, 0 ) < 0 )
- {
- return( ELSC_ERROR_RESP_FORMAT );
- }
-
- return 0;
-
-#else
- char cmd[64];
- char *fxn;
-
- switch( req_code )
- {
- case L1_REQ_PCI_UP:
- fxn = "u";
- break;
- case L1_REQ_PCI_DOWN:
- fxn = "d";
- break;
- case L1_REQ_PCI_RESET:
- fxn = "rst";
- break;
- default:
- return( ELSC_ERROR_CMD_ARGS );
- }
-
- if( slot == -1 )
- sprintf( cmd, "pci %d %s", bus, fxn );
- else
- sprintf( cmd, "pci %d %d %s", bus, slot, fxn );
-
- return sc_command_interp( sc, L1_ADDR_TYPE_IOBRICK,
- L1_ADDR_RACK_LOCAL, L1_ADDR_BAY_LOCAL, cmd );
-#endif
-}
-
-int
-iobrick_pci_slot_pwr( l1sc_t *sc, int bus, int slot, int up )
-{
- return iobrick_pci_pwr( sc, bus, slot, up );
-}
-
-int
-iobrick_pci_bus_pwr( l1sc_t *sc, int bus, int up )
-{
- return iobrick_pci_pwr( sc, bus, -1, up );
-}
-
-
-int
-iobrick_pci_slot_rst( l1sc_t *sc, int bus, int slot )
-{
- return iobrick_pci_pwr( sc, bus, slot, L1_REQ_PCI_RESET );
-}
-
-int
-iobrick_pci_bus_rst( l1sc_t *sc, int bus )
-{
- return iobrick_pci_pwr( sc, bus, -1, L1_REQ_PCI_RESET );
-}
-
-
-/* get the L1 firmware version for an iobrick */
-int
-iobrick_sc_version( l1sc_t *sc, char *result )
-{
- char msg[BRL1_QSIZE];
- int len; /* length of message being sent */
- int subch; /* system controller subchannel used */
- int major, /* major rev number */
- minor, /* minor rev number */
- bugfix; /* bugfix rev number */
-
- /* fill in msg with the opcode & params */
- bzero( msg, BRL1_QSIZE );
- subch = sc_open( sc, L1_ADDR_LOCALIO );
-
- if( (len = sc_construct_msg( sc, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- L1_REQ_FW_REV, 0 )) < 0 )
- {
- sc_close( sc, subch );
- return( ELSC_ERROR_CMD_ARGS );
- }
-
- /* send the request to the L1 */
- if( SC_COMMAND(sc, subch, msg, msg, &len ) < 0 )
- {
- sc_close( sc, subch );
- return( ELSC_ERROR_CMD_SEND );
- }
-
- /* free up subchannel */
- sc_close( sc, subch );
-
- /* check response */
- if( sc_interpret_resp( msg, 6, L1_ARG_INT, &major,
- L1_ARG_INT, &minor, L1_ARG_INT, &bugfix )
- < 0 )
- {
- return( ELSC_ERROR_RESP_FORMAT );
- }
-
- sprintf( result, "%d.%d.%d", major, minor, bugfix );
-
- return 0;
-}
diff --git a/arch/ia64/sn/io/machvec/Makefile b/arch/ia64/sn/io/machvec/Makefile
new file mode 100644
index 00000000000000..238f3e1c5fd9a3
--- /dev/null
+++ b/arch/ia64/sn/io/machvec/Makefile
@@ -0,0 +1,12 @@
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
+#
+# Makefile for the sn2 io routines.
+
+EXTRA_CFLAGS := -DLITTLE_ENDIAN
+
+obj-y += pci.o pci_dma.o pci_bus_cvlink.o iomv.o
diff --git a/arch/ia64/sn/io/machvec/iomv.c b/arch/ia64/sn/io/machvec/iomv.c
new file mode 100644
index 00000000000000..e69d0b81d9531e
--- /dev/null
+++ b/arch/ia64/sn/io/machvec/iomv.c
@@ -0,0 +1,71 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <asm/io.h>
+#include <asm/delay.h>
+#include <asm/sn/simulator.h>
+#include <asm/sn/pda.h>
+#include <asm/sn/sn_cpuid.h>
+
+/**
+ * sn_io_addr - convert an in/out port to an i/o address
+ * @port: port to convert
+ *
+ * Legacy in/out instructions are converted to ld/st instructions
+ * on IA64. This routine will convert a port number into a valid
+ * SN i/o address. Used by sn_in*() and sn_out*().
+ */
+void *
+sn_io_addr(unsigned long port)
+{
+ if (!IS_RUNNING_ON_SIMULATOR()) {
+ return( (void *) (port | __IA64_UNCACHED_OFFSET));
+ } else {
+ unsigned long io_base;
+ unsigned long addr;
+
+ /*
+ * word align port, but need more than 10 bits
+ * for accessing registers in bedrock local block
+ * (so we don't do port&0xfff)
+ */
+ if ((port >= 0x1f0 && port <= 0x1f7) ||
+ port == 0x3f6 || port == 0x3f7) {
+ io_base = (0xc000000fcc000000 | ((unsigned long)get_nasid() << 38));
+ addr = io_base | ((port >> 2) << 12) | (port & 0xfff);
+ } else {
+ addr = __ia64_get_io_port_base() | ((port >> 2) << 2);
+ }
+ return(void *) addr;
+ }
+}
+
+EXPORT_SYMBOL(sn_io_addr);
+
+/**
+ * sn_mmiob - I/O space memory barrier
+ *
+ * Acts as a memory mapped I/O barrier for platforms that queue writes to
+ * I/O space. This ensures that subsequent writes to I/O space arrive after
+ * all previous writes. For most ia64 platforms, this is a simple
+ * 'mf.a' instruction. For other platforms, mmiob() may have to read
+ * a chipset register to ensure ordering.
+ *
+ * On SN2, we wait for the PIO_WRITE_STATUS SHub register to clear.
+ * See PV 871084 for details about the WAR about zero value.
+ *
+ */
+void
+sn_mmiob (void)
+{
+ while ((((volatile unsigned long) (*pda->pio_write_status_addr)) & SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK) !=
+ SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK)
+ udelay(1);
+}
diff --git a/arch/ia64/sn/io/pci.c b/arch/ia64/sn/io/machvec/pci.c
index 974272d7216f06..5bc8f37b9c2e90 100644
--- a/arch/ia64/sn/io/pci.c
+++ b/arch/ia64/sn/io/machvec/pci.c
@@ -6,7 +6,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 1997, 1998, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (c) 1997, 1998, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/init.h>
#include <linux/types.h>
@@ -40,8 +40,8 @@
#ifdef CONFIG_PCI
-extern devfs_handle_t pci_bus_to_vertex(unsigned char);
-extern devfs_handle_t devfn_to_vertex(unsigned char bus, unsigned char devfn);
+extern vertex_hdl_t pci_bus_to_vertex(unsigned char);
+extern vertex_hdl_t devfn_to_vertex(unsigned char bus, unsigned char devfn);
/*
* snia64_read_config_byte - Read a byte from the config area of the device.
@@ -51,7 +51,7 @@ static int snia64_read_config_byte (struct pci_dev *dev,
{
unsigned long res = 0;
unsigned size = 1;
- devfs_handle_t device_vertex;
+ vertex_hdl_t device_vertex;
if ( (dev == (struct pci_dev *)0) || (val == (unsigned char *)0) ) {
return PCIBIOS_DEVICE_NOT_FOUND;
@@ -75,7 +75,7 @@ static int snia64_read_config_word (struct pci_dev *dev,
{
unsigned long res = 0;
unsigned size = 2; /* 2 bytes */
- devfs_handle_t device_vertex;
+ vertex_hdl_t device_vertex;
if ( (dev == (struct pci_dev *)0) || (val == (unsigned short *)0) ) {
return PCIBIOS_DEVICE_NOT_FOUND;
@@ -99,7 +99,7 @@ static int snia64_read_config_dword (struct pci_dev *dev,
{
unsigned long res = 0;
unsigned size = 4; /* 4 bytes */
- devfs_handle_t device_vertex;
+ vertex_hdl_t device_vertex;
if (where & 3) {
return PCIBIOS_BAD_REGISTER_NUMBER;
@@ -125,7 +125,7 @@ static int snia64_read_config_dword (struct pci_dev *dev,
static int snia64_write_config_byte (struct pci_dev *dev,
int where, unsigned char val)
{
- devfs_handle_t device_vertex;
+ vertex_hdl_t device_vertex;
if ( dev == (struct pci_dev *)0 ) {
return PCIBIOS_DEVICE_NOT_FOUND;
@@ -155,7 +155,7 @@ static int snia64_write_config_byte (struct pci_dev *dev,
static int snia64_write_config_word (struct pci_dev *dev,
int where, unsigned short val)
{
- devfs_handle_t device_vertex = NULL;
+ vertex_hdl_t device_vertex = NULL;
if (where & 1) {
return PCIBIOS_BAD_REGISTER_NUMBER;
@@ -188,7 +188,7 @@ static int snia64_write_config_word (struct pci_dev *dev,
static int snia64_write_config_dword (struct pci_dev *dev,
int where, unsigned int val)
{
- devfs_handle_t device_vertex;
+ vertex_hdl_t device_vertex;
if (where & 3) {
return PCIBIOS_BAD_REGISTER_NUMBER;
@@ -276,9 +276,6 @@ pci_fixup_ioc3(struct pci_dev *d)
d->resource[i].flags = 0UL;
}
-#ifdef CONFIG_IA64_SGI_SN1
- *(volatile u32 *)0xc0000a000f000220 |= 0x90000;
-#endif
d->subsystem_vendor = 0;
d->subsystem_device = 0;
diff --git a/arch/ia64/sn/io/sn2/pci_bus_cvlink.c b/arch/ia64/sn/io/machvec/pci_bus_cvlink.c
index 586786bc9c3286..df3e1dfe5b250b 100644
--- a/arch/ia64/sn/io/sn2/pci_bus_cvlink.c
+++ b/arch/ia64/sn/io/machvec/pci_bus_cvlink.c
@@ -1,5 +1,4 @@
-/* $Id$
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
@@ -15,7 +14,6 @@
#include <linux/sched.h>
#include <linux/ioport.h>
#include <asm/sn/types.h>
-#include <asm/sn/hack.h>
#include <asm/sn/sgi.h>
#include <asm/sn/io.h>
#include <asm/sn/driver.h>
@@ -38,23 +36,24 @@
#include <asm/sn/pci/pci_bus_cvlink.h>
#include <asm/sn/simulator.h>
#include <asm/sn/sn_cpuid.h>
+#include <asm/sn/arch.h>
extern int bridge_rev_b_data_check_disable;
-devfs_handle_t busnum_to_pcibr_vhdl[MAX_PCI_XWIDGET];
+vertex_hdl_t busnum_to_pcibr_vhdl[MAX_PCI_XWIDGET];
nasid_t busnum_to_nid[MAX_PCI_XWIDGET];
void * busnum_to_atedmamaps[MAX_PCI_XWIDGET];
unsigned char num_bridges;
-static int done_probing = 0;
+static int done_probing;
+extern irqpda_t *irqpdaindr;
-static int pci_bus_map_create(devfs_handle_t xtalk, char * io_moduleid);
-devfs_handle_t devfn_to_vertex(unsigned char busnum, unsigned int devfn);
+static int pci_bus_map_create(vertex_hdl_t xtalk, char * io_moduleid);
+vertex_hdl_t devfn_to_vertex(unsigned char busnum, unsigned int devfn);
-extern unsigned char Is_pic_on_this_nasid[512];
-
-extern void sn_init_irq_desc(void);
extern void register_pcibr_intr(int irq, pcibr_intr_t intr);
+void sn_dma_flush_init(unsigned long start, unsigned long end, int idx, int pin, int slot);
+
/*
* For the given device, initialize whether it is a PIC device.
@@ -78,7 +77,7 @@ pci_bus_cvlink_init(void)
extern void ioconfig_bus_init(void);
- memset(busnum_to_pcibr_vhdl, 0x0, sizeof(devfs_handle_t) * MAX_PCI_XWIDGET);
+ memset(busnum_to_pcibr_vhdl, 0x0, sizeof(vertex_hdl_t) * MAX_PCI_XWIDGET);
memset(busnum_to_nid, 0x0, sizeof(nasid_t) * MAX_PCI_XWIDGET);
memset(busnum_to_atedmamaps, 0x0, sizeof(void *) * MAX_PCI_XWIDGET);
@@ -92,11 +91,11 @@ pci_bus_cvlink_init(void)
* pci_bus_to_vertex() - Given a logical Linux Bus Number returns the associated
* pci bus vertex from the SGI IO Infrastructure.
*/
-devfs_handle_t
+vertex_hdl_t
pci_bus_to_vertex(unsigned char busnum)
{
- devfs_handle_t pci_bus = NULL;
+ vertex_hdl_t pci_bus = NULL;
/*
@@ -110,15 +109,15 @@ pci_bus_to_vertex(unsigned char busnum)
* devfn_to_vertex() - returns the vertex of the device given the bus, slot,
* and function numbers.
*/
-devfs_handle_t
+vertex_hdl_t
devfn_to_vertex(unsigned char busnum, unsigned int devfn)
{
int slot = 0;
int func = 0;
char name[16];
- devfs_handle_t pci_bus = NULL;
- devfs_handle_t device_vertex = (devfs_handle_t)NULL;
+ vertex_hdl_t pci_bus = NULL;
+ vertex_hdl_t device_vertex = (vertex_hdl_t)NULL;
/*
* Go get the pci bus vertex.
@@ -126,9 +125,9 @@ devfn_to_vertex(unsigned char busnum, unsigned int devfn)
pci_bus = pci_bus_to_vertex(busnum);
if (!pci_bus) {
/*
- * During probing, the Linux pci code invents non existant
+ * During probing, the Linux pci code invents non-existent
* bus numbers and pci_dev structures and tries to access
- * them to determine existance. Don't crib during probing.
+ * them to determine existence. Don't crib during probing.
*/
if (done_probing)
printk("devfn_to_vertex: Invalid bus number %d given.\n", busnum);
@@ -219,6 +218,199 @@ printk("set_flush_addresses: xbow_buf_sync\n");
}
+struct sn_flush_nasid_entry flush_nasid_list[MAX_NASIDS];
+
+// Initialize the data structures for flushing write buffers after a PIO read.
+// The theory is:
+// Take an unused int. pin and associate it with a pin that is in use.
+// After a PIO read, force an interrupt on the unused pin, forcing a write buffer flush
+// on the in use pin. This will prevent the race condition between PIO read responses and
+// DMA writes.
+void
+sn_dma_flush_init(unsigned long start, unsigned long end, int idx, int pin, int slot) {
+ nasid_t nasid;
+ unsigned long dnasid;
+ int wid_num;
+ int bus;
+ struct sn_flush_device_list *p;
+ bridge_t *b;
+ bridgereg_t dev_sel;
+ extern int isIO9(int);
+ int bwin;
+ int i;
+
+ nasid = NASID_GET(start);
+ wid_num = SWIN_WIDGETNUM(start);
+ bus = (start >> 23) & 0x1;
+ bwin = BWIN_WINDOWNUM(start);
+
+ if (flush_nasid_list[nasid].widget_p == NULL) {
+ flush_nasid_list[nasid].widget_p = (struct sn_flush_device_list **)kmalloc((HUB_WIDGET_ID_MAX+1) *
+ sizeof(struct sn_flush_device_list *), GFP_KERNEL);
+ memset(flush_nasid_list[nasid].widget_p, 0, (HUB_WIDGET_ID_MAX+1) * sizeof(struct sn_flush_device_list *));
+ }
+ if (bwin > 0) {
+ bwin--;
+ switch (bwin) {
+ case 0:
+ flush_nasid_list[nasid].iio_itte1 = HUB_L(IIO_ITTE_GET(nasid, 0));
+ wid_num = ((flush_nasid_list[nasid].iio_itte1) >> 8) & 0xf;
+ bus = flush_nasid_list[nasid].iio_itte1 & 0xf;
+ if (bus == 0x4 || bus == 0x8)
+ bus = 0;
+ else
+ bus = 1;
+ break;
+ case 1:
+ flush_nasid_list[nasid].iio_itte2 = HUB_L(IIO_ITTE_GET(nasid, 1));
+ wid_num = ((flush_nasid_list[nasid].iio_itte2) >> 8) & 0xf;
+ bus = flush_nasid_list[nasid].iio_itte2 & 0xf;
+ if (bus == 0x4 || bus == 0x8)
+ bus = 0;
+ else
+ bus = 1;
+ break;
+ case 2:
+ flush_nasid_list[nasid].iio_itte3 = HUB_L(IIO_ITTE_GET(nasid, 2));
+ wid_num = ((flush_nasid_list[nasid].iio_itte3) >> 8) & 0xf;
+ bus = flush_nasid_list[nasid].iio_itte3 & 0xf;
+ if (bus == 0x4 || bus == 0x8)
+ bus = 0;
+ else
+ bus = 1;
+ break;
+ case 3:
+ flush_nasid_list[nasid].iio_itte4 = HUB_L(IIO_ITTE_GET(nasid, 3));
+ wid_num = ((flush_nasid_list[nasid].iio_itte4) >> 8) & 0xf;
+ bus = flush_nasid_list[nasid].iio_itte4 & 0xf;
+ if (bus == 0x4 || bus == 0x8)
+ bus = 0;
+ else
+ bus = 1;
+ break;
+ case 4:
+ flush_nasid_list[nasid].iio_itte5 = HUB_L(IIO_ITTE_GET(nasid, 4));
+ wid_num = ((flush_nasid_list[nasid].iio_itte5) >> 8) & 0xf;
+ bus = flush_nasid_list[nasid].iio_itte5 & 0xf;
+ if (bus == 0x4 || bus == 0x8)
+ bus = 0;
+ else
+ bus = 1;
+ break;
+ case 5:
+ flush_nasid_list[nasid].iio_itte6 = HUB_L(IIO_ITTE_GET(nasid, 5));
+ wid_num = ((flush_nasid_list[nasid].iio_itte6) >> 8) & 0xf;
+ bus = flush_nasid_list[nasid].iio_itte6 & 0xf;
+ if (bus == 0x4 || bus == 0x8)
+ bus = 0;
+ else
+ bus = 1;
+ break;
+ case 6:
+ flush_nasid_list[nasid].iio_itte7 = HUB_L(IIO_ITTE_GET(nasid, 6));
+ wid_num = ((flush_nasid_list[nasid].iio_itte7) >> 8) & 0xf;
+ bus = flush_nasid_list[nasid].iio_itte7 & 0xf;
+ if (bus == 0x4 || bus == 0x8)
+ bus = 0;
+ else
+ bus = 1;
+ break;
+ }
+ }
+
+ // if it's IO9, bus 1, we don't care about slots 1, 3, and 4. This is
+ // because these are the IOC4 slots and we don't flush them.
+ if (isIO9(nasid) && bus == 0 && (slot == 1 || slot == 4)) {
+ return;
+ }
+ if (flush_nasid_list[nasid].widget_p[wid_num] == NULL) {
+ flush_nasid_list[nasid].widget_p[wid_num] = (struct sn_flush_device_list *)kmalloc(
+ DEV_PER_WIDGET * sizeof (struct sn_flush_device_list), GFP_KERNEL);
+ memset(flush_nasid_list[nasid].widget_p[wid_num], 0,
+ DEV_PER_WIDGET * sizeof (struct sn_flush_device_list));
+ p = &flush_nasid_list[nasid].widget_p[wid_num][0];
+ for (i=0; i<DEV_PER_WIDGET;i++) {
+ p->bus = -1;
+ p->pin = -1;
+ p++;
+ }
+ }
+
+ p = &flush_nasid_list[nasid].widget_p[wid_num][0];
+ for (i=0;i<DEV_PER_WIDGET; i++) {
+ if (p->pin == pin && p->bus == bus) break;
+ if (p->pin < 0) {
+ p->pin = pin;
+ p->bus = bus;
+ break;
+ }
+ p++;
+ }
+
+ for (i=0; i<PCI_ROM_RESOURCE; i++) {
+ if (p->bar_list[i].start == 0) {
+ p->bar_list[i].start = start;
+ p->bar_list[i].end = end;
+ break;
+ }
+ }
+ b = (bridge_t *)(NODE_SWIN_BASE(nasid, wid_num) | (bus << 23) );
+
+ // If it's IO9, then slot 2 maps to slot 7 and slot 6 maps to slot 8.
+ // To see this is non-trivial. By drawing pictures and reading manuals and talking
+ // to HW guys, we can see that on IO9 bus 1, slots 7 and 8 are always unused.
+ // Further, since we short-circuit slots 1, 3, and 4 above, we only have to worry
+ // about the case when there is a card in slot 2. A multifunction card will appear
+ // to be in slot 6 (from an interrupt point of view) also. That's the most we'll
+ // have to worry about. A four function card will overload the interrupt lines in
+ // slot 2 and 6.
+ // We also need to special case the 12160 device in slot 3. Fortunately, we have
+ // a spare intr. line for pin 4, so we'll use that for the 12160.
+ // All other buses have slot 3 and 4 and slots 7 and 8 unused. Since we can only
+ // see slots 1 and 2 and slots 5 and 6 coming through here for those buses (this
+ // is true only on Pxbricks with 2 physical slots per bus), we just need to add
+ // 2 to the slot number to find an unused slot.
+ // We have convinced ourselves that we will never see a case where two different cards
+ // in two different slots will ever share an interrupt line, so there is no need to
+ // special case this.
+
+ if (isIO9(nasid) && wid_num == 0xc && bus == 0) {
+ if (slot == 2) {
+ p->force_int_addr = (unsigned long)&b->b_force_always[6].intr;
+ dev_sel = b->b_int_device;
+ dev_sel |= (1<<18);
+ b->b_int_device = dev_sel;
+ dnasid = NASID_GET(virt_to_phys(&p->flush_addr));
+ b->p_int_addr_64[6] = (virt_to_phys(&p->flush_addr) & 0xfffffffff) |
+ (dnasid << 36) | (0xfUL << 48);
+ } else if (slot == 3) { /* 12160 SCSI device in IO9 */
+ p->force_int_addr = (unsigned long)&b->b_force_always[4].intr;
+ dev_sel = b->b_int_device;
+ dev_sel |= (2<<12);
+ b->b_int_device = dev_sel;
+ dnasid = NASID_GET(virt_to_phys(&p->flush_addr));
+ b->p_int_addr_64[4] = (virt_to_phys(&p->flush_addr) & 0xfffffffff) |
+ (dnasid << 36) | (0xfUL << 48);
+ } else { /* slot == 6 */
+ p->force_int_addr = (unsigned long)&b->b_force_always[7].intr;
+ dev_sel = b->b_int_device;
+ dev_sel |= (5<<21);
+ b->b_int_device = dev_sel;
+ dnasid = NASID_GET(virt_to_phys(&p->flush_addr));
+ b->p_int_addr_64[7] = (virt_to_phys(&p->flush_addr) & 0xfffffffff) |
+ (dnasid << 36) | (0xfUL << 48);
+ }
+ } else {
+ p->force_int_addr = (unsigned long)&b->b_force_always[pin + 2].intr;
+ dev_sel = b->b_int_device;
+ dev_sel |= ((slot - 1) << ( pin * 3) );
+ b->b_int_device = dev_sel;
+ dnasid = NASID_GET(virt_to_phys(&p->flush_addr));
+ b->p_int_addr_64[pin + 2] = (virt_to_phys(&p->flush_addr) & 0xfffffffff) |
+ (dnasid << 36) | (0xfUL << 48);
+ }
+}
+
/*
* Most drivers currently do not properly tell the arch specific pci dma
* interfaces whether they can handle A64. Here is where we privately
@@ -264,37 +456,22 @@ sn_pci_fixup(int arg)
struct sn_device_sysdata *device_sysdata;
pciio_intr_t intr_handle;
int cpuid, bit;
- devfs_handle_t device_vertex;
+ vertex_hdl_t device_vertex;
pciio_intr_line_t lines;
extern void sn_pci_find_bios(void);
extern int numnodes;
int cnode;
- extern void io_sh_swapper(int, int);
-
- for (cnode = 0; cnode < numnodes; cnode++) {
- if ( !Is_pic_on_this_nasid[cnodeid_to_nasid(cnode)] )
- io_sh_swapper((cnodeid_to_nasid(cnode)), 0);
- }
if (arg == 0) {
#ifdef CONFIG_PROC_FS
extern void register_sn_procfs(void);
#endif
- sn_init_irq_desc();
sn_pci_find_bios();
for (cnode = 0; cnode < numnodes; cnode++) {
extern void intr_init_vecblk(nodepda_t *npda, cnodeid_t, int);
intr_init_vecblk(NODEPDA(cnode), cnode, 0);
}
-
- /*
- * When we return to generic Linux, Swapper is always on ..
- */
- for (cnode = 0; cnode < numnodes; cnode++) {
- if ( !Is_pic_on_this_nasid[cnodeid_to_nasid(cnode)] )
- io_sh_swapper((cnodeid_to_nasid(cnode)), 1);
- }
#ifdef CONFIG_PROC_FS
register_sn_procfs();
#endif
@@ -323,13 +500,19 @@ sn_pci_fixup(int arg)
ioport_resource.end = 0xcfffffffffffffff;
/*
+ * Set the root start and end for Mem Resource.
+ */
+ iomem_resource.start = 0;
+ iomem_resource.end = 0xffffffffffffffff;
+
+ /*
* Initialize the device vertex in the pci_dev struct.
*/
pci_for_each_dev(device_dev) {
unsigned int irq;
int idx;
u16 cmd;
- devfs_handle_t vhdl;
+ vertex_hdl_t vhdl;
unsigned long size;
extern int bit_pos_to_irq(int);
@@ -423,15 +606,36 @@ sn_pci_fixup(int arg)
device_sysdata = (struct sn_device_sysdata *)device_dev->sysdata;
device_vertex = device_sysdata->vhdl;
+ irqpdaindr->current = device_dev;
intr_handle = pciio_intr_alloc(device_vertex, NULL, lines, device_vertex);
- bit = intr_handle->pi_irq;
+ irq = intr_handle->pi_irq;
+ irqpdaindr->device_dev[irq] = device_dev;
cpuid = intr_handle->pi_cpu;
- irq = bit;
- irq = irq + (cpuid << 8);
pciio_intr_connect(intr_handle, (intr_func_t)0, (intr_arg_t)0);
device_dev->irq = irq;
register_pcibr_intr(irq, (pcibr_intr_t)intr_handle);
+
+ for (idx = 0; idx < PCI_ROM_RESOURCE; idx++) {
+ int ibits = ((pcibr_intr_t)intr_handle)->bi_ibits;
+ int i;
+
+ size = device_dev->resource[idx].end -
+ device_dev->resource[idx].start;
+ if (size == 0) continue;
+
+ for (i=0; i<8; i++) {
+ if (ibits & (1 << i) ) {
+ sn_dma_flush_init(device_dev->resource[idx].start,
+ device_dev->resource[idx].end,
+ idx,
+ i,
+ PCI_SLOT(device_dev->devfn));
+ }
+ }
+ }
+
+ }
#ifdef ajmtestintr
{
int slot = PCI_SLOT(device_dev->devfn);
@@ -448,13 +652,6 @@ sn_pci_fixup(int arg)
request_irq(irq, intr_test_handle_intr,0,NULL, NULL);
}
#endif
-
- }
-
- for (cnode = 0; cnode < numnodes; cnode++) {
- if ( !Is_pic_on_this_nasid[cnodeid_to_nasid(cnode)] )
- io_sh_swapper((cnodeid_to_nasid(cnode)), 1);
- }
}
/*
@@ -506,18 +703,20 @@ linux_bus_cvlink(void)
*
*/
static int
-pci_bus_map_create(devfs_handle_t xtalk, char * io_moduleid)
+pci_bus_map_create(vertex_hdl_t xtalk, char * io_moduleid)
{
- devfs_handle_t master_node_vertex = NULL;
- devfs_handle_t xwidget = NULL;
- devfs_handle_t pci_bus = NULL;
+ vertex_hdl_t master_node_vertex = NULL;
+ vertex_hdl_t xwidget = NULL;
+ vertex_hdl_t pci_bus = NULL;
hubinfo_t hubinfo = NULL;
xwidgetnum_t widgetnum;
char pathname[128];
graph_error_t rv;
int bus;
int basebus_num;
+ extern void ioconfig_get_busnum(char *, int *);
+
int bus_number;
/*
@@ -666,12 +865,14 @@ int
pci_bus_to_hcl_cvlink(void)
{
- devfs_handle_t devfs_hdl = NULL;
- devfs_handle_t xtalk = NULL;
+ vertex_hdl_t devfs_hdl = NULL;
+ vertex_hdl_t xtalk = NULL;
int rv = 0;
char name[256];
char tmp_name[256];
- int i, ii;
+ int i, ii, j;
+ char *brick_name;
+ extern void ioconfig_bus_new_entries(void);
/*
* Figure out which IO Brick is connected to the Compute Bricks.
@@ -695,18 +896,28 @@ pci_bus_to_hcl_cvlink(void)
}
}
- devfs_hdl = hwgraph_path_to_vertex("/dev/hw/module");
+ devfs_hdl = hwgraph_path_to_vertex("hw/module");
for (i = 0; i < nummodules ; i++) {
+ for ( j = 0; j < 3; j++ ) {
+ if ( j == 0 )
+ brick_name = EDGE_LBL_PBRICK;
+ else if ( j == 1 )
+ brick_name = EDGE_LBL_PXBRICK;
+ else
+ brick_name = EDGE_LBL_IXBRICK;
+
for ( ii = 0; ii < 2 ; ii++ ) {
memset(name, 0, 256);
memset(tmp_name, 0, 256);
format_module_id(name, modules[i]->id, MODULE_FORMAT_BRIEF);
- sprintf(tmp_name, "/slab/%d/Pbrick/xtalk", geo_slab(modules[i]->geoid[ii]));
+ sprintf(tmp_name, "/slab/%d/%s/xtalk", geo_slab(modules[i]->geoid[ii]), brick_name);
strcat(name, tmp_name);
xtalk = NULL;
rv = hwgraph_edge_get(devfs_hdl, name, &xtalk);
- pci_bus_map_create(xtalk, (char *)&(modules[i]->io[ii].moduleid));
+ if ( rv == 0 )
+ pci_bus_map_create(xtalk, (char *)&(modules[i]->io[ii].moduleid));
}
+ }
}
/*
diff --git a/arch/ia64/sn/io/pci_dma.c b/arch/ia64/sn/io/machvec/pci_dma.c
index 837da01be05494..b2e0f4e2ff9323 100644
--- a/arch/ia64/sn/io/pci_dma.c
+++ b/arch/ia64/sn/io/machvec/pci_dma.c
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2000,2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2000,2002-2003 Silicon Graphics, Inc. All rights reserved.
*
* Routines for PCI DMA mapping. See Documentation/DMA-mapping.txt for
* a description of how these routines should be used.
@@ -35,14 +35,15 @@
/*
* For ATE allocations
*/
-pciio_dmamap_t get_free_pciio_dmamap(devfs_handle_t);
+pciio_dmamap_t get_free_pciio_dmamap(vertex_hdl_t);
void free_pciio_dmamap(pcibr_dmamap_t);
static struct sn_dma_maps_s *find_sn_dma_map(dma_addr_t, unsigned char);
+void sn_pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction);
/*
* Toplogy stuff
*/
-extern devfs_handle_t busnum_to_pcibr_vhdl[];
+extern vertex_hdl_t busnum_to_pcibr_vhdl[];
extern nasid_t busnum_to_nid[];
extern void * busnum_to_atedmamaps[];
@@ -54,7 +55,7 @@ extern void * busnum_to_atedmamaps[];
* by @pci_bus.
*/
pciio_dmamap_t
-get_free_pciio_dmamap(devfs_handle_t pci_bus)
+get_free_pciio_dmamap(vertex_hdl_t pci_bus)
{
int i;
struct sn_dma_maps_s *sn_dma_map = NULL;
@@ -122,50 +123,6 @@ find_sn_dma_map(dma_addr_t dma_addr, unsigned char busnum)
}
/**
- * sn_dma_sync - try to flush DMA buffers into the coherence domain
- * @hwdev: device to flush
- *
- * This routine flushes all DMA buffers for the device into the II of
- * the destination hub.
- *
- * NOTE!: this does not mean that the data is in the "coherence domain",
- * but it is very close. In other words, this routine *does not work*
- * as advertised due to hardware bugs. That said, it should be good enough for
- * most situations.
- */
-void
-sn_dma_sync(struct pci_dev *hwdev)
-{
-
-#ifdef SN_DMA_SYNC
-
- struct sn_device_sysdata *device_sysdata;
- volatile unsigned long dummy;
-
- /*
- * A DMA sync is supposed to ensure that
- * all the DMA from a particular device
- * is complete and coherent. We
- * try to do this by
- * 1. flushing the write wuffers from Bridge
- * 2. flushing the Xbow port.
- * Unfortunately, this only gets the DMA transactions 'very close' to
- * the coherence domain, but not quite in it.
- */
- device_sysdata = (struct sn_device_sysdata *)hwdev->sysdata;
- dummy = (volatile unsigned long ) *device_sysdata->dma_buf_sync;
-
- /*
- * For the Xbow port flush, we may be denied the request because
- * someone else may be flushing the port .. try again.
- */
- while((volatile unsigned long ) *device_sysdata->xbow_buf_sync) {
- udelay(2);
- }
-#endif
-}
-
-/**
* sn_pci_alloc_consistent - allocate memory for coherent DMA
* @hwdev: device to allocate for
* @size: size of the region
@@ -188,7 +145,7 @@ void *
sn_pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle)
{
void *cpuaddr;
- devfs_handle_t vhdl;
+ vertex_hdl_t vhdl;
struct sn_device_sysdata *device_sysdata;
unsigned long phys_addr;
pciio_dmamap_t dma_map = 0;
@@ -227,38 +184,22 @@ sn_pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_hand
* device on the same bus is already mapped with different
* attributes or to a different memory region.
*/
-#ifdef CONFIG_IA64_SGI_SN1
- *dma_handle = pciio_dmatrans_addr(vhdl, NULL, phys_addr, size,
- PCIIO_BYTE_STREAM |
- PCIIO_DMA_CMD);
-#elif defined(CONFIG_IA64_SGI_SN2)
*dma_handle = pciio_dmatrans_addr(vhdl, NULL, phys_addr, size,
((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
PCIIO_DMA_CMD);
-#else
-#error unsupported platform
-#endif
/*
* It is a 32 bit card and we cannot do direct mapping,
* so we try to use an ATE.
*/
if (!(*dma_handle)) {
-#ifdef CONFIG_IA64_SGI_SN1
- dma_map = pciio_dmamap_alloc(vhdl, NULL, size,
- PCIIO_BYTE_STREAM |
- PCIIO_DMA_CMD);
-#elif defined(CONFIG_IA64_SGI_SN2)
dma_map = pciio_dmamap_alloc(vhdl, NULL, size,
((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
PCIIO_DMA_CMD);
-#else
-#error unsupported platform
-#endif
if (!dma_map) {
printk(KERN_ERR "sn_pci_alloc_consistent: Unable to "
"allocate anymore 32 bit page map entries.\n");
- BUG();
+ return 0;
}
*dma_handle = (dma_addr_t) pciio_dmamap_addr(dma_map,phys_addr,
size);
@@ -316,11 +257,12 @@ sn_pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int dire
{
int i;
- devfs_handle_t vhdl;
- dma_addr_t dma_addr;
+ vertex_hdl_t vhdl;
unsigned long phys_addr;
struct sn_device_sysdata *device_sysdata;
pciio_dmamap_t dma_map;
+ struct sn_dma_maps_s *sn_dma_map;
+ struct scatterlist *saved_sg = sg;
/* can't go anywhere w/o a direction in life */
if (direction == PCI_DMA_NONE)
@@ -337,40 +279,20 @@ sn_pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int dire
* scatterlist.
*/
for (i = 0; i < nents; i++, sg++) {
- /* this catches incorrectly written drivers that
- attempt to map scatterlists that they have
- previously mapped. we print a warning and
- continue, but the driver should be fixed */
- switch (((u64)sg->dma_address) >> 60) {
- case 0xa:
- case 0xb:
-#ifdef DEBUG
-/* This needs to be cleaned up at some point. */
- NAG("A PCI driver (for device at%8s) has attempted to "
- "map a scatterlist that was previously mapped at "
- "%p - this is currently being worked around.\n",
- hwdev->slot_name, (void *)sg->dma_address);
- phys_addr = (u64)sg->dma_address & TO_PHYS_MASK;
- break;
-#endif
- default: /* not previously mapped, get the phys. addr */
- phys_addr = __pa(sg->dma_address);
- break;
- }
- sg->page = NULL;
- dma_addr = 0;
+ phys_addr = __pa(sg->dma_address ? sg->dma_address :
+ page_address(sg->page) + sg->offset);
/*
* Handle the most common case: 64 bit cards. This
* call should always succeed.
*/
if (IS_PCIA64(hwdev)) {
- dma_addr = pciio_dmatrans_addr(vhdl, NULL, phys_addr,
+ sg->dma_address = pciio_dmatrans_addr(vhdl, NULL, phys_addr,
sg->length,
((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
PCIIO_DMA_DATA |
PCIIO_DMA_A64);
- sg->dma_address = (char *)dma_addr;
+ sg->dma_length = sg->length;
continue;
}
@@ -378,24 +300,15 @@ sn_pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int dire
* Handle 32-63 bit cards via direct mapping
*/
if (IS_PCI32G(hwdev)) {
-#ifdef CONFIG_IA64_SGI_SN1
- dma_addr = pciio_dmatrans_addr(vhdl, NULL, phys_addr,
- sg->length,
- PCIIO_BYTE_STREAM |
- PCIIO_DMA_DATA);
-#elif defined(CONFIG_IA64_SGI_SN2)
- dma_addr = pciio_dmatrans_addr(vhdl, NULL, phys_addr,
+ sg->dma_address = pciio_dmatrans_addr(vhdl, NULL, phys_addr,
sg->length,
((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
PCIIO_DMA_DATA);
-#else
-#error unsupported platform
-#endif
+ sg->dma_length = sg->length;
/*
* See if we got a direct map entry
*/
- if (dma_addr) {
- sg->dma_address = (char *)dma_addr;
+ if (sg->dma_address) {
continue;
}
@@ -405,27 +318,25 @@ sn_pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int dire
* It is a 32 bit card and we cannot do direct mapping,
* so we use an ATE.
*/
- dma_map = 0;
-#ifdef CONFIG_IA64_SGI_SN1
- dma_map = pciio_dmamap_alloc(vhdl, NULL, sg->length,
- PCIIO_BYTE_STREAM |
- PCIIO_DMA_DATA);
-#elif defined(CONFIG_IA64_SGI_SN2)
dma_map = pciio_dmamap_alloc(vhdl, NULL, sg->length,
((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
PCIIO_DMA_DATA);
-#else
-#error unsupported platform
-#endif
if (!dma_map) {
printk(KERN_ERR "sn_pci_map_sg: Unable to allocate "
"anymore 32 bit page map entries.\n");
- BUG();
+ /*
+ * We will need to free all previously allocated entries.
+ */
+ if (i > 0) {
+ sn_pci_unmap_sg(hwdev, saved_sg, i, direction);
+ }
+ return (0);
}
- dma_addr = pciio_dmamap_addr(dma_map, phys_addr, sg->length);
- sg->dma_address = (char *)dma_addr;
- sg->page = (struct page *)dma_map;
-
+
+ sg->dma_address = pciio_dmamap_addr(dma_map, phys_addr, sg->length);
+ sg->dma_length = sg->length;
+ sn_dma_map = (struct sn_dma_maps_s *)dma_map;
+ sn_dma_map->dma_addr = sg->dma_address;
}
return nents;
@@ -453,20 +364,21 @@ sn_pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int di
if (direction == PCI_DMA_NONE)
BUG();
- for (i = 0; i < nents; i++, sg++)
- if (sg->page) {
- /*
- * We maintain the DMA Map pointer in sg->page if
- * it is ever allocated.
- */
- sg->dma_address = 0;
- sn_dma_map = (struct sn_dma_maps_s *)sg->page;
- pciio_dmamap_done((pciio_dmamap_t)sn_dma_map);
- pciio_dmamap_free((pciio_dmamap_t)sn_dma_map);
- sn_dma_map->dma_addr = 0;
- sg->page = 0;
- }
+ for (i = 0; i < nents; i++, sg++){
+
+ if (IS_PCI32_MAPPED(sg->dma_address)) {
+ sn_dma_map = NULL;
+ sn_dma_map = find_sn_dma_map(sg->dma_address, hwdev->bus->number);
+ if (sn_dma_map) {
+ pciio_dmamap_done((pciio_dmamap_t)sn_dma_map);
+ pciio_dmamap_free((pciio_dmamap_t)sn_dma_map);
+ sn_dma_map->dma_addr = (dma_addr_t)NULL;
+ }
+ }
+ sg->dma_address = (dma_addr_t)NULL;
+ sg->dma_length = 0;
+ }
}
/**
@@ -492,7 +404,7 @@ sn_pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int di
dma_addr_t
sn_pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
{
- devfs_handle_t vhdl;
+ vertex_hdl_t vhdl;
dma_addr_t dma_addr;
unsigned long phys_addr;
struct sn_device_sysdata *device_sysdata;
@@ -534,17 +446,9 @@ sn_pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
* First try to get a 32 bit direct map register.
*/
if (IS_PCI32G(hwdev)) {
-#ifdef CONFIG_IA64_SGI_SN1
- dma_addr = pciio_dmatrans_addr(vhdl, NULL, phys_addr, size,
- PCIIO_BYTE_STREAM |
- PCIIO_DMA_DATA);
-#elif defined(CONFIG_IA64_SGI_SN2)
dma_addr = pciio_dmatrans_addr(vhdl, NULL, phys_addr, size,
((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
PCIIO_DMA_DATA);
-#else
-#error unsupported platform
-#endif
if (dma_addr)
return dma_addr;
}
@@ -554,21 +458,14 @@ sn_pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
* let's use the PMU instead.
*/
dma_map = NULL;
-#ifdef CONFIG_IA64_SGI_SN1
- dma_map = pciio_dmamap_alloc(vhdl, NULL, size, PCIIO_BYTE_STREAM |
- PCIIO_DMA_DATA);
-#elif defined(CONFIG_IA64_SGI_SN2)
dma_map = pciio_dmamap_alloc(vhdl, NULL, size,
((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
PCIIO_DMA_DATA);
-#else
-#error unsupported platform
-#endif
if (!dma_map) {
printk(KERN_ERR "pci_map_single: Unable to allocate anymore "
"32 bit page map entries.\n");
- BUG();
+ return 0;
}
dma_addr = (dma_addr_t) pciio_dmamap_addr(dma_map, phys_addr, size);
@@ -620,17 +517,14 @@ sn_pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int
* @direction: DMA direction
*
* This routine is supposed to sync the DMA region specified
- * by @dma_handle into the 'coherence domain'. See sn_dma_sync()
- * above for more information. Also known as
- * platform_pci_dma_sync_single() by the IA64 machvec code.
+ * by @dma_handle into the 'coherence domain'. We do not need to do
+ * anything on our platform.
*/
void
sn_pci_dma_sync_single(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction)
{
- if (direction == PCI_DMA_NONE)
- BUG();
+ return;
- sn_dma_sync(hwdev);
}
/**
@@ -641,30 +535,14 @@ sn_pci_dma_sync_single(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size
* @direction: DMA direction
*
* This routine is supposed to sync the DMA regions specified
- * by @sg into the 'coherence domain'. See sn_dma_sync()
- * above for more information. Also known as
- * platform_pci_dma_sync_sg() by the IA64 machvec code.
+ * by @sg into the 'coherence domain'. We do not need to do anything
+ * on our platform.
*/
void
sn_pci_dma_sync_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
{
- if (direction == PCI_DMA_NONE)
- BUG();
+ return;
- sn_dma_sync(hwdev);
-}
-
-/**
- * sn_dma_address - get the DMA address for the first entry of a scatterlist
- * @sg: sg to look at
- *
- * Gets the DMA address for the scatterlist @sg. Also known as
- * platform_dma_address() by the IA64 machvec code.
- */
-unsigned long
-sn_dma_address(struct scatterlist *sg)
-{
- return ((unsigned long)sg->dma_address);
}
/**
@@ -695,6 +573,5 @@ EXPORT_SYMBOL(sn_pci_map_sg);
EXPORT_SYMBOL(sn_pci_unmap_sg);
EXPORT_SYMBOL(sn_pci_alloc_consistent);
EXPORT_SYMBOL(sn_pci_free_consistent);
-EXPORT_SYMBOL(sn_dma_address);
EXPORT_SYMBOL(sn_pci_dma_supported);
diff --git a/arch/ia64/sn/io/ml_SN_init.c b/arch/ia64/sn/io/ml_SN_init.c
deleted file mode 100644
index d6bfddc90f9364..00000000000000
--- a/arch/ia64/sn/io/ml_SN_init.c
+++ /dev/null
@@ -1,235 +0,0 @@
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/types.h>
-#include <linux/config.h>
-#include <linux/slab.h>
-#include <linux/bootmem.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/io.h>
-#include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/sn_private.h>
-#include <asm/sn/klconfig.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/snconfig.h>
-
-extern int numcpus;
-extern char arg_maxnodes[];
-extern cpuid_t master_procid;
-#if defined(CONFIG_IA64_SGI_SN1)
-extern synergy_da_t *Synergy_da_indr[];
-#endif
-
-extern int hasmetarouter;
-
-int maxcpus;
-cpumask_t boot_cpumask;
-hubreg_t region_mask = 0;
-
-
-extern xwidgetnum_t hub_widget_id(nasid_t);
-
-extern int valid_icache_reasons; /* Reasons to flush the icache */
-extern int valid_dcache_reasons; /* Reasons to flush the dcache */
-extern u_char miniroot;
-extern volatile int need_utlbmiss_patch;
-extern void iograph_early_init(void);
-
-nasid_t master_nasid = INVALID_NASID;
-
-
-/*
- * mlreset(int slave)
- * very early machine reset - at this point NO interrupts have been
- * enabled; nor is memory, tlb, p0, etc setup.
- *
- * slave is zero when mlreset is called for the master processor and
- * is nonzero thereafter.
- */
-
-
-void
-mlreset(int slave)
-{
- if (!slave) {
- /*
- * We are the master cpu and node.
- */
- master_nasid = get_nasid();
- set_master_bridge_base();
-
- /* We're the master processor */
- master_procid = smp_processor_id();
- master_nasid = cpuid_to_nasid(master_procid);
-
- /*
- * master_nasid we get back better be same as one from
- * get_nasid()
- */
- ASSERT_ALWAYS(master_nasid == get_nasid());
-
- /* early initialization of iograph */
- iograph_early_init();
-
- /* Initialize Hub Pseudodriver Management */
- hubdev_init();
-
- } else { /* slave != 0 */
- /*
- * This code is performed ONLY by slave processors.
- */
-
- }
-}
-
-
-/* XXX - Move the meat of this to intr.c ? */
-/*
- * Set up the platform-dependent fields in the nodepda.
- */
-void init_platform_nodepda(nodepda_t *npda, cnodeid_t node)
-{
- hubinfo_t hubinfo;
-#ifdef CONFIG_IA64_SGI_SN1
- int sn;
-#endif
-
- extern void router_map_init(nodepda_t *);
- extern void router_queue_init(nodepda_t *,cnodeid_t);
- extern void intr_init_vecblk(nodepda_t *, cnodeid_t, int);
-
- /* Allocate per-node platform-dependent data */
- hubinfo = (hubinfo_t)alloc_bootmem_node(NODE_DATA(node), sizeof(struct hubinfo_s));
-
- npda->pdinfo = (void *)hubinfo;
- hubinfo->h_nodepda = npda;
- hubinfo->h_cnodeid = node;
- hubinfo->h_nasid = COMPACT_TO_NASID_NODEID(node);
-
- spin_lock_init(&hubinfo->h_crblock);
-
- hubinfo->h_widgetid = hub_widget_id(hubinfo->h_nasid);
- npda->xbow_peer = INVALID_NASID;
-
- /*
- * Initialize the linked list of
- * router info pointers to the dependent routers
- */
- npda->npda_rip_first = NULL;
-
- /*
- * npda_rip_last always points to the place
- * where the next element is to be inserted
- * into the list
- */
- npda->npda_rip_last = &npda->npda_rip_first;
- npda->module_id = INVALID_MODULE;
-
-#ifdef CONFIG_IA64_SGI_SN1
- /*
- * Initialize the interrupts.
- * On sn2, this is done at pci init time,
- * because sn2 needs the cpus checked in
- * when it initializes interrupts. This is
- * so we don't see all the nodes as headless.
- */
- for (sn=0; sn<NUM_SUBNODES; sn++) {
- intr_init_vecblk(npda, node, sn);
- }
-#endif /* CONFIG_IA64_SGI_SN1 */
-
- mutex_init_locked(&npda->xbow_sema); /* init it locked? */
-
-#ifdef LATER
-
- /* Setup the (module,slot) --> nic mapping for all the routers
- * in the system. This is useful during error handling when
- * there is no shared memory.
- */
- router_map_init(npda);
-
- /* Allocate memory for the per-node router traversal queue */
- router_queue_init(npda,node);
- npda->sbe_info = alloc_bootmem_node(NODE_DATA(node), sizeof (sbe_info_t));
- ASSERT(npda->sbe_info);
-
-#endif /* LATER */
-}
-
-/* XXX - Move the interrupt stuff to intr.c ? */
-/*
- * Set up the platform-dependent fields in the processor pda.
- * Must be done _after_ init_platform_nodepda().
- * If we need a lock here, something else is wrong!
- */
-void init_platform_pda(cpuid_t cpu)
-{
-#if defined(CONFIG_IA64_SGI_SN1)
- hub_intmasks_t *intmasks;
- int i, subnode;
- cnodeid_t cnode;
- synergy_da_t *sda;
- int which_synergy;
-
-
- cnode = cpuid_to_cnodeid(cpu);
- which_synergy = cpuid_to_synergy(cpu);
-
- sda = Synergy_da_indr[(cnode * 2) + which_synergy];
- intmasks = &sda->s_intmasks;
-
- /* Clear INT_PEND0 masks. */
- for (i = 0; i < N_INTPEND0_MASKS; i++)
- intmasks->intpend0_masks[i] = 0;
-
- /* Set up pointer to the vector block in the nodepda. */
- /* (Cant use SUBNODEPDA - not working yet) */
- subnode = cpuid_to_subnode(cpu);
- intmasks->dispatch0 = &NODEPDA(cnode)->snpda[cpuid_to_subnode(cpu)].intr_dispatch0;
- intmasks->dispatch1 = &NODEPDA(cnode)->snpda[cpuid_to_subnode(cpu)].intr_dispatch1;
- if (intmasks->dispatch0 != &SUBNODEPDA(cnode, subnode)->intr_dispatch0 ||
- intmasks->dispatch1 != &SUBNODEPDA(cnode, subnode)->intr_dispatch1)
- panic("xxx");
- intmasks->dispatch0 = &SUBNODEPDA(cnode, subnode)->intr_dispatch0;
- intmasks->dispatch1 = &SUBNODEPDA(cnode, subnode)->intr_dispatch1;
-
- /* Clear INT_PEND1 masks. */
- for (i = 0; i < N_INTPEND1_MASKS; i++)
- intmasks->intpend1_masks[i] = 0;
-#endif /* CONFIG_IA64_SGI_SN1 */
-}
-
-void
-update_node_information(cnodeid_t cnodeid)
-{
- nodepda_t *npda = NODEPDA(cnodeid);
- nodepda_router_info_t *npda_rip;
-
- /* Go through the list of router info
- * structures and copy some frequently
- * accessed info from the info hanging
- * off the corresponding router vertices
- */
- npda_rip = npda->npda_rip_first;
- while(npda_rip) {
- if (npda_rip->router_infop) {
- npda_rip->router_portmask =
- npda_rip->router_infop->ri_portmask;
- npda_rip->router_slot =
- npda_rip->router_infop->ri_slotnum;
- } else {
- /* No router, no ports. */
- npda_rip->router_portmask = 0;
- }
- npda_rip = npda_rip->router_next;
- }
-}
diff --git a/arch/ia64/sn/io/ml_iograph.c b/arch/ia64/sn/io/ml_iograph.c
deleted file mode 100644
index eb6e3e60bf87aa..00000000000000
--- a/arch/ia64/sn/io/ml_iograph.c
+++ /dev/null
@@ -1,1570 +0,0 @@
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/types.h>
-#include <linux/config.h>
-#include <linux/slab.h>
-#include <linux/ctype.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/sn_sal.h>
-#include <asm/sn/io.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/hcl_util.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/xtalk/xbow.h>
-#include <asm/sn/pci/bridge.h>
-#include <asm/sn/klconfig.h>
-#include <asm/sn/eeprom.h>
-#include <asm/sn/sn_private.h>
-#include <asm/sn/pci/pcibr.h>
-#include <asm/sn/xtalk/xtalk.h>
-#include <asm/sn/xtalk/xswitch.h>
-#include <asm/sn/xtalk/xwidget.h>
-#include <asm/sn/xtalk/xtalk_private.h>
-#include <asm/sn/xtalk/xtalkaddrs.h>
-
-/* #define IOGRAPH_DEBUG */
-#ifdef IOGRAPH_DEBUG
-#define DBG(x...) printk(x)
-#else
-#define DBG(x...)
-#endif /* IOGRAPH_DEBUG */
-
-/* #define PROBE_TEST */
-
-/* At most 2 hubs can be connected to an xswitch */
-#define NUM_XSWITCH_VOLUNTEER 2
-
-/*
- * Track which hubs have volunteered to manage devices hanging off of
- * a Crosstalk Switch (e.g. xbow). This structure is allocated,
- * initialized, and hung off the xswitch vertex early on when the
- * xswitch vertex is created.
- */
-typedef struct xswitch_vol_s {
- mutex_t xswitch_volunteer_mutex;
- int xswitch_volunteer_count;
- devfs_handle_t xswitch_volunteer[NUM_XSWITCH_VOLUNTEER];
-} *xswitch_vol_t;
-
-void
-xswitch_vertex_init(devfs_handle_t xswitch)
-{
- xswitch_vol_t xvolinfo;
- int rc;
-
- xvolinfo = kmalloc(sizeof(struct xswitch_vol_s), GFP_KERNEL);
- mutex_init(&xvolinfo->xswitch_volunteer_mutex);
- xvolinfo->xswitch_volunteer_count = 0;
- rc = hwgraph_info_add_LBL(xswitch,
- INFO_LBL_XSWITCH_VOL,
- (arbitrary_info_t)xvolinfo);
- ASSERT(rc == GRAPH_SUCCESS); rc = rc;
-}
-
-
-/*
- * When assignment of hubs to widgets is complete, we no longer need the
- * xswitch volunteer structure hanging around. Destroy it.
- */
-static void
-xswitch_volunteer_delete(devfs_handle_t xswitch)
-{
- xswitch_vol_t xvolinfo;
- int rc;
-
- rc = hwgraph_info_remove_LBL(xswitch,
- INFO_LBL_XSWITCH_VOL,
- (arbitrary_info_t *)&xvolinfo);
-#ifdef LATER
- ASSERT(rc == GRAPH_SUCCESS); rc = rc;
-#endif
-
- kfree(xvolinfo);
-}
-/*
- * A Crosstalk master volunteers to manage xwidgets on the specified xswitch.
- */
-/* ARGSUSED */
-static void
-volunteer_for_widgets(devfs_handle_t xswitch, devfs_handle_t master)
-{
- xswitch_vol_t xvolinfo = NULL;
-
- (void)hwgraph_info_get_LBL(xswitch,
- INFO_LBL_XSWITCH_VOL,
- (arbitrary_info_t *)&xvolinfo);
- if (xvolinfo == NULL) {
-#ifdef LATER
- if (!is_headless_node_vertex(master)) {
-#if defined(SUPPORT_PRINTING_V_FORMAT)
- printk(KERN_WARNING "volunteer for widgets: vertex %v has no info label",
- xswitch);
-#else
- printk(KERN_WARNING "volunteer for widgets: vertex 0x%x has no info label",
- xswitch);
-#endif
- }
-#endif /* LATER */
- return;
- }
-
- mutex_lock(&xvolinfo->xswitch_volunteer_mutex);
- ASSERT(xvolinfo->xswitch_volunteer_count < NUM_XSWITCH_VOLUNTEER);
- xvolinfo->xswitch_volunteer[xvolinfo->xswitch_volunteer_count] = master;
- xvolinfo->xswitch_volunteer_count++;
- mutex_unlock(&xvolinfo->xswitch_volunteer_mutex);
-}
-
-extern int xbow_port_io_enabled(nasid_t nasid, int widgetnum);
-
-/*
- * Assign all the xwidgets hanging off the specified xswitch to the
- * Crosstalk masters that have volunteered for xswitch duty.
- */
-/* ARGSUSED */
-static void
-assign_widgets_to_volunteers(devfs_handle_t xswitch, devfs_handle_t hubv)
-{
- int curr_volunteer, num_volunteer;
- xwidgetnum_t widgetnum;
- xswitch_info_t xswitch_info;
- xswitch_vol_t xvolinfo = NULL;
- nasid_t nasid;
- hubinfo_t hubinfo;
-
- hubinfo_get(hubv, &hubinfo);
- nasid = hubinfo->h_nasid;
-
- xswitch_info = xswitch_info_get(xswitch);
- ASSERT(xswitch_info != NULL);
-
- (void)hwgraph_info_get_LBL(xswitch,
- INFO_LBL_XSWITCH_VOL,
- (arbitrary_info_t *)&xvolinfo);
- if (xvolinfo == NULL) {
-#ifdef LATER
- if (!is_headless_node_vertex(hubv)) {
-#if defined(SUPPORT_PRINTING_V_FORMAT)
- printk(KERN_WARNING "assign_widgets_to_volunteers:vertex %v has "
- " no info label",
- xswitch);
-#else
- printk(KERN_WARNING "assign_widgets_to_volunteers:vertex 0x%x has "
- " no info label",
- xswitch);
-#endif
- }
-#endif /* LATER */
- return;
- }
-
- num_volunteer = xvolinfo->xswitch_volunteer_count;
- ASSERT(num_volunteer > 0);
- curr_volunteer = 0;
-
- /* Assign master hub for xswitch itself. */
- if (HUB_WIDGET_ID_MIN > 0) {
- hubv = xvolinfo->xswitch_volunteer[0];
- xswitch_info_master_assignment_set(xswitch_info, (xwidgetnum_t)0, hubv);
- }
-
- /*
- * TBD: Use administrative information to alter assignment of
- * widgets to hubs.
- */
- for (widgetnum=HUB_WIDGET_ID_MIN; widgetnum <= HUB_WIDGET_ID_MAX; widgetnum++) {
-
- /*
- * Ignore disabled/empty ports.
- */
- if (!xbow_port_io_enabled(nasid, widgetnum))
- continue;
-
- /*
- * If this is the master IO board, assign it to the same
- * hub that owned it in the prom.
- */
- if (is_master_nasid_widget(nasid, widgetnum)) {
- int i;
-
- for (i=0; i<num_volunteer; i++) {
- hubv = xvolinfo->xswitch_volunteer[i];
- hubinfo_get(hubv, &hubinfo);
- nasid = hubinfo->h_nasid;
- if (nasid == get_console_nasid())
- goto do_assignment;
- }
-#ifdef LATER
- PRINT_PANIC("Nasid == %d, console nasid == %d",
- nasid, get_console_nasid());
-#endif
- }
-
-
- /*
- * Do a round-robin assignment among the volunteer nodes.
- */
- hubv = xvolinfo->xswitch_volunteer[curr_volunteer];
- curr_volunteer = (curr_volunteer + 1) % num_volunteer;
- /* fall through */
-
-do_assignment:
- /*
- * At this point, we want to make hubv the master of widgetnum.
- */
- xswitch_info_master_assignment_set(xswitch_info, widgetnum, hubv);
- }
-
- xswitch_volunteer_delete(xswitch);
-}
-
-/*
- * Early iograph initialization. Called by master CPU in mlreset().
- * Useful for including iograph.o in kernel.o.
- */
-void
-iograph_early_init(void)
-{
-/*
- * Need new way to get this information ..
- */
- cnodeid_t cnode;
- nasid_t nasid;
- lboard_t *board;
-
- /*
- * Init. the board-to-hwgraph link early, so FRU analyzer
- * doesn't trip on leftover values if we panic early on.
- */
- for(cnode = 0; cnode < numnodes; cnode++) {
- nasid = COMPACT_TO_NASID_NODEID(cnode);
- board = (lboard_t *)KL_CONFIG_INFO(nasid);
- DBG("iograph_early_init: Found board 0x%p\n", board);
-
- /* Check out all the board info stored on a node */
- while(board) {
- board->brd_graph_link = GRAPH_VERTEX_NONE;
- board = KLCF_NEXT(board);
- DBG("iograph_early_init: Found board 0x%p\n", board);
-
-
- }
- }
-
- hubio_init();
-}
-
-#ifdef LINUX_KERNEL_THREADS
-static struct semaphore io_init_sema;
-#endif
-
-/*
- * Let boot processor know that we're done initializing our node's IO
- * and then exit.
- */
-/* ARGSUSED */
-static void
-io_init_done(cnodeid_t cnodeid,cpu_cookie_t c)
-{
- /* Let boot processor know that we're done. */
-#ifdef LINUX_KERNEL_THREADS
- up(&io_init_sema);
-#endif
-#ifdef LATER
- /* This is for the setnoderun done when the io_init thread
- * started
- */
- restorenoderun(c);
- sthread_exit();
-#endif
-}
-
-/*
- * Probe to see if this hub's xtalk link is active. If so,
- * return the Crosstalk Identification of the widget that we talk to.
- * This is called before any of the Crosstalk infrastructure for
- * this hub is set up. It's usually called on the node that we're
- * probing, but not always.
- *
- * TBD: Prom code should actually do this work, and pass through
- * hwid for our use.
- */
-static void
-early_probe_for_widget(devfs_handle_t hubv, xwidget_hwid_t hwid)
-{
- hubreg_t llp_csr_reg;
- nasid_t nasid;
- hubinfo_t hubinfo;
-
- hubinfo_get(hubv, &hubinfo);
- nasid = hubinfo->h_nasid;
-
- llp_csr_reg = REMOTE_HUB_L(nasid, IIO_LLP_CSR);
- /*
- * If link is up, read the widget's part number.
- * A direct connect widget must respond to widgetnum=0.
- */
- if (llp_csr_reg & IIO_LLP_CSR_IS_UP) {
- /* TBD: Put hub into "indirect" mode */
- /*
- * We're able to read from a widget because our hub's
- * WIDGET_ID was set up earlier.
- */
- widgetreg_t widget_id = *(volatile widgetreg_t *)
- (RAW_NODE_SWIN_BASE(nasid, 0x0) + WIDGET_ID);
-
- DBG("early_probe_for_widget: Hub Vertex 0x%p is UP widget_id = 0x%x Register 0x%p\n", hubv, widget_id,
- (volatile widgetreg_t *)(RAW_NODE_SWIN_BASE(nasid, 0x0) + WIDGET_ID) );
-
- hwid->part_num = XWIDGET_PART_NUM(widget_id);
- hwid->rev_num = XWIDGET_REV_NUM(widget_id);
- hwid->mfg_num = XWIDGET_MFG_NUM(widget_id);
-
- /* TBD: link reset */
- } else {
-
- hwid->part_num = XWIDGET_PART_NUM_NONE;
- hwid->rev_num = XWIDGET_REV_NUM_NONE;
- hwid->mfg_num = XWIDGET_MFG_NUM_NONE;
- }
-
-}
-
-/* Add inventory information to the widget vertex
- * Right now (module,slot,revision) is being
- * added as inventory information.
- */
-static void
-xwidget_inventory_add(devfs_handle_t widgetv,
- lboard_t *board,
- struct xwidget_hwid_s hwid)
-{
- if (!board)
- return;
- /* Donot add inventory information for the baseio
- * on a speedo with an xbox. It has already been
- * taken care of in SN00_vmc.
- * Speedo with xbox's baseio comes in at slot io1 (widget 9)
- */
- device_inventory_add(widgetv,INV_IOBD,board->brd_type,
- board->brd_module,
- SLOTNUM_GETSLOT(board->brd_slot),
- hwid.rev_num);
-}
-
-/*
- * io_xswitch_widget_init
- *
- */
-
-/* defined in include/linux/ctype.h */
-/* #define toupper(c) (islower(c) ? (c) - 'a' + 'A' : (c)) */
-
-void
-io_xswitch_widget_init(devfs_handle_t xswitchv,
- devfs_handle_t hubv,
- xwidgetnum_t widgetnum,
- async_attach_t aa)
-{
- xswitch_info_t xswitch_info;
- xwidgetnum_t hub_widgetid;
- devfs_handle_t widgetv;
- cnodeid_t cnode;
- widgetreg_t widget_id;
- nasid_t nasid, peer_nasid;
- struct xwidget_hwid_s hwid;
- hubinfo_t hubinfo;
- /*REFERENCED*/
- int rc;
- char slotname[SLOTNUM_MAXLENGTH];
- char pathname[128];
- char new_name[64];
- moduleid_t module;
- slotid_t slot;
- lboard_t *board = NULL;
- char buffer[16];
- slotid_t get_widget_slotnum(int xbow, int widget);
-
- DBG("\nio_xswitch_widget_init: hubv 0x%p, xswitchv 0x%p, widgetnum 0x%x\n", hubv, xswitchv, widgetnum);
- /*
- * Verify that xswitchv is indeed an attached xswitch.
- */
- xswitch_info = xswitch_info_get(xswitchv);
- ASSERT(xswitch_info != NULL);
-
- hubinfo_get(hubv, &hubinfo);
- nasid = hubinfo->h_nasid;
- cnode = NASID_TO_COMPACT_NODEID(nasid);
- hub_widgetid = hubinfo->h_widgetid;
-
-
- /* Who's the other guy on out crossbow (if anyone) */
- peer_nasid = NODEPDA(cnode)->xbow_peer;
- if (peer_nasid == INVALID_NASID)
- /* If I don't have a peer, use myself. */
- peer_nasid = nasid;
-
-
- /* Check my xbow structure and my peer's */
- if (!xbow_port_io_enabled(nasid, widgetnum) &&
- !xbow_port_io_enabled(peer_nasid, widgetnum)) {
- return;
- }
-
- if (xswitch_info_link_ok(xswitch_info, widgetnum)) {
- char name[4];
- /*
- * If the current hub is not supposed to be the master
- * for this widgetnum, then skip this widget.
- */
- if (xswitch_info_master_assignment_get(xswitch_info,
- widgetnum) != hubv) {
- return;
- }
-
- module = NODEPDA(cnode)->module_id;
-#ifdef XBRIDGE_REGS_SIM
- /* hardwire for now...could do this with something like:
- * xbow_soft_t soft = hwgraph_fastinfo_get(vhdl);
- * xbow_t xbow = soft->base;
- * xbowreg_t xwidget_id = xbow->xb_wid_id;
- * but I don't feel like figuring out vhdl right now..
- * and I know for a fact the answer is 0x2d000049
- */
- DBG("io_xswitch_widget_init: XBRIDGE_REGS_SIM FIXME: reading xwidget id: hardwired to xbridge (0x2d000049).\n");
- DBG("XWIDGET_PART_NUM(0x2d000049)= 0x%x\n", XWIDGET_PART_NUM(0x2d000049));
- if (XWIDGET_PART_NUM(0x2d000049)==XXBOW_WIDGET_PART_NUM) {
-#else
- if (nasid_has_xbridge(nasid)) {
-#endif /* XBRIDGE_REGS_SIM */
- board = find_lboard_module_class(
- (lboard_t *)KL_CONFIG_INFO(nasid),
- module,
- KLTYPE_IOBRICK);
-
-DBG("io_xswitch_widget_init: Board 0x%p\n", board);
-{
- lboard_t dummy;
-
-
- if (board) {
- DBG("io_xswitch_widget_init: Found KLTYPE_IOBRICK Board 0x%p brd_type 0x%x\n", board, board->brd_type);
- } else {
- DBG("io_xswitch_widget_init: FIXME did not find IOBOARD\n");
- board = &dummy;
- }
-
-}
-
- /*
- * Make sure we really want to say xbrick, pbrick,
- * etc. rather than XIO, graphics, etc.
- */
-
-#ifdef SUPPORT_PRINTING_M_FORMAT
- sprintf(pathname, EDGE_LBL_MODULE "/%M/"
- "%cbrick" "/%s/%d",
- NODEPDA(cnode)->module_id,
-
-#else
- memset(buffer, 0, 16);
- format_module_id(buffer, NODEPDA(cnode)->module_id, MODULE_FORMAT_BRIEF);
- sprintf(pathname, EDGE_LBL_MODULE "/%s/"
- "%cbrick" "/%s/%d",
- buffer,
-#endif
-
- (board->brd_type == KLTYPE_IBRICK) ? 'I' :
- (board->brd_type == KLTYPE_PBRICK) ? 'P' :
- (board->brd_type == KLTYPE_XBRICK) ? 'X' : '?',
- EDGE_LBL_XTALK, widgetnum);
- }
-
- DBG("io_xswitch_widget_init: path= %s\n", pathname);
- rc = hwgraph_path_add(hwgraph_root, pathname, &widgetv);
-
- ASSERT(rc == GRAPH_SUCCESS);
-
- /* This is needed to let the user programs to map the
- * module,slot numbers to the corresponding widget numbers
- * on the crossbow.
- */
- rc = device_master_set(hwgraph_connectpt_get(widgetv), hubv);
-
- /* If we are looking at the global master io6
- * then add information about the version of
- * the io6prom as a part of "detailed inventory"
- * information.
- */
- if (is_master_baseio(nasid,
- NODEPDA(cnode)->module_id,
- get_widget_slotnum(0,widgetnum))) {
- extern void klhwg_baseio_inventory_add(devfs_handle_t,
- cnodeid_t);
- module = NODEPDA(cnode)->module_id;
-
-#ifdef XBRIDGE_REGS_SIM
- DBG("io_xswitch_widget_init: XBRIDGE_REGS_SIM FIXME: reading xwidget id: hardwired to xbridge (0x2d000049).\n");
- if (XWIDGET_PART_NUM(0x2d000049)==XXBOW_WIDGET_PART_NUM) {
-#else
- if (nasid_has_xbridge(nasid)) {
-#endif /* XBRIDGE_REGS_SIM */
- board = find_lboard_module(
- (lboard_t *)KL_CONFIG_INFO(nasid),
- module);
- /*
- * Change iobrick to correct i/o brick
- */
-#ifdef SUPPORT_PRINTING_M_FORMAT
- sprintf(pathname, EDGE_LBL_MODULE "/%M/"
-#else
- sprintf(pathname, EDGE_LBL_MODULE "/%x/"
-#endif
- "iobrick" "/%s/%d",
- NODEPDA(cnode)->module_id,
- EDGE_LBL_XTALK, widgetnum);
- } else {
- slot = get_widget_slotnum(0, widgetnum);
- board = get_board_name(nasid, module, slot,
- new_name);
- /*
- * Create the vertex for the widget,
- * using the decimal
- * widgetnum as the name of the primary edge.
- */
-#ifdef SUPPORT_PRINTING_M_FORMAT
- sprintf(pathname, EDGE_LBL_MODULE "/%M/"
- EDGE_LBL_SLOT "/%s/%s",
- NODEPDA(cnode)->module_id,
- slotname, new_name);
-#else
- memset(buffer, 0, 16);
- format_module_id(buffer, NODEPDA(cnode)->module_id, MODULE_FORMAT_BRIEF);
- sprintf(pathname, EDGE_LBL_MODULE "/%s/"
- EDGE_LBL_SLOT "/%s/%s",
- buffer,
- slotname, new_name);
-#endif
- }
-
- rc = hwgraph_path_add(hwgraph_root, pathname, &widgetv);
- DBG("io_xswitch_widget_init: (2) path= %s\n", pathname);
- /*
- * This is a weird ass code needed for error injection
- * purposes.
- */
- rc = device_master_set(hwgraph_connectpt_get(widgetv), hubv);
-
- klhwg_baseio_inventory_add(widgetv,cnode);
- }
- sprintf(name, "%d", widgetnum);
- DBG("io_xswitch_widget_init: FIXME hwgraph_edge_add %s xswitchv 0x%p, widgetv 0x%p\n", name, xswitchv, widgetv);
- rc = hwgraph_edge_add(xswitchv, widgetv, name);
-
- /*
- * crosstalk switch code tracks which
- * widget is attached to each link.
- */
- xswitch_info_vhdl_set(xswitch_info, widgetnum, widgetv);
-
- /*
- * Peek at the widget to get its crosstalk part and
- * mfgr numbers, then present it to the generic xtalk
- * bus provider to have its driver attach routine
- * called (or not).
- */
-#ifdef XBRIDGE_REGS_SIM
- widget_id = 0x2d000049;
- DBG("io_xswitch_widget_init: XBRIDGE_REGS_SIM FIXME: id hardwired to widget_id\n");
-#else
- widget_id = XWIDGET_ID_READ(nasid, widgetnum);
-#endif /* XBRIDGE_REGS_SIM */
- hwid.part_num = XWIDGET_PART_NUM(widget_id);
- hwid.rev_num = XWIDGET_REV_NUM(widget_id);
- hwid.mfg_num = XWIDGET_MFG_NUM(widget_id);
- /* Store some inventory information about
- * the xwidget in the hardware graph.
- */
- xwidget_inventory_add(widgetv,board,hwid);
-
- (void)xwidget_register(&hwid, widgetv, widgetnum,
- hubv, hub_widgetid,
- aa);
-
-#ifdef SN0_USE_BTE
- bte_bpush_war(cnode, (void *)board);
-#endif
- }
-
-}
-
-
-static void
-io_init_xswitch_widgets(devfs_handle_t xswitchv, cnodeid_t cnode)
-{
- xwidgetnum_t widgetnum;
- async_attach_t aa;
-
- aa = async_attach_new();
-
- DBG("io_init_xswitch_widgets: xswitchv 0x%p for cnode %d\n", xswitchv, cnode);
-
- for (widgetnum = HUB_WIDGET_ID_MIN; widgetnum <= HUB_WIDGET_ID_MAX;
- widgetnum++) {
- io_xswitch_widget_init(xswitchv,
- cnodeid_to_vertex(cnode),
- widgetnum, aa);
- }
- /*
- * Wait for parallel attach threads, if any, to complete.
- */
- async_attach_waitall(aa);
- async_attach_free(aa);
-}
-
-/*
- * For each PCI bridge connected to the xswitch, add a link from the
- * board's klconfig info to the bridge's hwgraph vertex. This lets
- * the FRU analyzer find the bridge without traversing the hardware
- * graph and risking hangs.
- */
-static void
-io_link_xswitch_widgets(devfs_handle_t xswitchv, cnodeid_t cnodeid)
-{
- xwidgetnum_t widgetnum;
- char pathname[128];
- devfs_handle_t vhdl;
- nasid_t nasid, peer_nasid;
- lboard_t *board;
-
-
-
- /* And its connected hub's nasids */
- nasid = COMPACT_TO_NASID_NODEID(cnodeid);
- peer_nasid = NODEPDA(cnodeid)->xbow_peer;
-
- /*
- * Look for paths matching "<widgetnum>/pci" under xswitchv.
- * For every widget, init. its lboard's hwgraph link. If the
- * board has a PCI bridge, point the link to it.
- */
- for (widgetnum = HUB_WIDGET_ID_MIN; widgetnum <= HUB_WIDGET_ID_MAX;
- widgetnum++) {
- sprintf(pathname, "%d", widgetnum);
- if (hwgraph_traverse(xswitchv, pathname, &vhdl) !=
- GRAPH_SUCCESS)
- continue;
-
- board = find_lboard_module((lboard_t *)KL_CONFIG_INFO(nasid),
- NODEPDA(cnodeid)->module_id);
- if (board == NULL && peer_nasid != INVALID_NASID) {
- /*
- * Try to find the board on our peer
- */
- board = find_lboard_module(
- (lboard_t *)KL_CONFIG_INFO(peer_nasid),
- NODEPDA(cnodeid)->module_id);
- }
- if (board == NULL) {
-#if defined(SUPPORT_PRINTING_V_FORMAT)
- printk(KERN_WARNING "Could not find PROM info for vertex %v, "
- "FRU analyzer may fail",
- vhdl);
-#else
- printk(KERN_WARNING "Could not find PROM info for vertex 0x%p, "
- "FRU analyzer may fail",
- (void *)vhdl);
-#endif
- return;
- }
-
- sprintf(pathname, "%d/"EDGE_LBL_PCI, widgetnum);
- if (hwgraph_traverse(xswitchv, pathname, &vhdl) ==
- GRAPH_SUCCESS)
- board->brd_graph_link = vhdl;
- else
- board->brd_graph_link = GRAPH_VERTEX_NONE;
- }
-}
-
-/*
- * Initialize all I/O on the specified node.
- */
-static void
-io_init_node(cnodeid_t cnodeid)
-{
- /*REFERENCED*/
- devfs_handle_t hubv, switchv, widgetv;
- struct xwidget_hwid_s hwid;
- hubinfo_t hubinfo;
- int is_xswitch;
- nodepda_t *npdap;
- struct semaphore *peer_sema = 0;
- uint32_t widget_partnum;
- nodepda_router_info_t *npda_rip;
- cpu_cookie_t c = 0;
- extern int hubdev_docallouts(devfs_handle_t);
-
-#ifdef LATER
- /* Try to execute on the node that we're initializing. */
- c = setnoderun(cnodeid);
-#endif
- npdap = NODEPDA(cnodeid);
-
- /*
- * Get the "top" vertex for this node's hardware
- * graph; it will carry the per-hub hub-specific
- * data, and act as the crosstalk provider master.
- * It's canonical path is probably something of the
- * form /hw/module/%M/slot/%d/node
- */
- hubv = cnodeid_to_vertex(cnodeid);
- DBG("io_init_node: Initialize IO for cnode %d hubv(node) 0x%p npdap 0x%p\n", cnodeid, hubv, npdap);
-
- ASSERT(hubv != GRAPH_VERTEX_NONE);
-
- hubdev_docallouts(hubv);
-
- /*
- * Set up the dependent routers if we have any.
- */
- npda_rip = npdap->npda_rip_first;
-
- while(npda_rip) {
- /* If the router info has not been initialized
- * then we need to do the router initialization
- */
- if (!npda_rip->router_infop) {
- router_init(cnodeid,0,npda_rip);
- }
- npda_rip = npda_rip->router_next;
- }
-
- /*
- * Read mfg info on this hub
- */
-#ifdef LATER
- printk("io_init_node: FIXME need to implement HUB_VERTEX_MFG_INFO\n");
- HUB_VERTEX_MFG_INFO(hubv);
-#endif /* LATER */
-
- /*
- * If nothing connected to this hub's xtalk port, we're done.
- */
- early_probe_for_widget(hubv, &hwid);
- if (hwid.part_num == XWIDGET_PART_NUM_NONE) {
-#ifdef PROBE_TEST
- if ((cnodeid == 1) || (cnodeid == 2)) {
- int index;
-
- for (index = 0; index < 600; index++)
- DBG("Interfering with device probing!!!\n");
- }
-#endif
- /* io_init_done takes cpu cookie as 2nd argument
- * to do a restorenoderun for the setnoderun done
- * at the start of this thread
- */
-
- DBG("**** io_init_node: Node's 0x%p hub widget has XWIDGET_PART_NUM_NONE ****\n", hubv);
- return;
- /* NOTREACHED */
- }
-
- /*
- * attach our hub_provider information to hubv,
- * so we can use it as a crosstalk provider "master"
- * vertex.
- */
- xtalk_provider_register(hubv, &hub_provider);
- xtalk_provider_startup(hubv);
-
- /*
- * Create a vertex to represent the crosstalk bus
- * attached to this hub, and a vertex to be used
- * as the connect point for whatever is out there
- * on the other side of our crosstalk connection.
- *
- * Crosstalk Switch drivers "climb up" from their
- * connection point to try and take over the switch
- * point.
- *
- * Of course, the edges and verticies may already
- * exist, in which case our net effect is just to
- * associate the "xtalk_" driver with the connection
- * point for the device.
- */
-
- (void)hwgraph_path_add(hubv, EDGE_LBL_XTALK, &switchv);
-
- DBG("io_init_node: Created 'xtalk' entry to '../node/' xtalk vertex 0x%p\n", switchv);
-
- ASSERT(switchv != GRAPH_VERTEX_NONE);
-
- (void)hwgraph_edge_add(hubv, switchv, EDGE_LBL_IO);
-
- DBG("io_init_node: Created symlink 'io' from ../node/io to ../node/xtalk \n");
-
- /*
- * We need to find the widget id and update the basew_id field
- * accordingly. In particular, SN00 has direct connected bridge,
- * and hence widget id is Not 0.
- */
-
- widget_partnum = (((*(volatile int32_t *)(NODE_SWIN_BASE(COMPACT_TO_NASID_NODEID(cnodeid), 0) + WIDGET_ID))) & WIDGET_PART_NUM) >> WIDGET_PART_NUM_SHFT;
-
- if (widget_partnum == BRIDGE_WIDGET_PART_NUM ||
- widget_partnum == XBRIDGE_WIDGET_PART_NUM){
- npdap->basew_id = (((*(volatile int32_t *)(NODE_SWIN_BASE(COMPACT_TO_NASID_NODEID(cnodeid), 0) + BRIDGE_WID_CONTROL))) & WIDGET_WIDGET_ID);
-
- DBG("io_init_node: Found XBRIDGE widget_partnum= 0x%x\n", widget_partnum);
-
- } else if (widget_partnum == XBOW_WIDGET_PART_NUM ||
- widget_partnum == XXBOW_WIDGET_PART_NUM) {
- /*
- * Xbow control register does not have the widget ID field.
- * So, hard code the widget ID to be zero.
- */
- DBG("io_init_node: Found XBOW widget_partnum= 0x%x\n", widget_partnum);
- npdap->basew_id = 0;
-
- } else if (widget_partnum == XG_WIDGET_PART_NUM) {
- /*
- * OK, WTF do we do here if we have an XG direct connected to a HUB/Bedrock???
- * So, hard code the widget ID to be zero?
- */
- npdap->basew_id = 0;
- npdap->basew_id = (((*(volatile int32_t *)(NODE_SWIN_BASE(COMPACT_TO_NASID_NODEID(cnodeid), 0) + BRIDGE_WID_CONTROL))) & WIDGET_WIDGET_ID);
- } else {
- npdap->basew_id = (((*(volatile int32_t *)(NODE_SWIN_BASE(COMPACT_TO_NASID_NODEID(cnodeid), 0) + BRIDGE_WID_CONTROL))) & WIDGET_WIDGET_ID);
-
- panic(" ****io_init_node: Unknown Widget Part Number 0x%x Widgt ID 0x%x attached to Hubv 0x%p ****\n", widget_partnum, npdap->basew_id, (void *)hubv);
-
- /*NOTREACHED*/
- }
- {
- char widname[10];
- sprintf(widname, "%x", npdap->basew_id);
- (void)hwgraph_path_add(switchv, widname, &widgetv);
- DBG("io_init_node: Created '%s' to '..node/xtalk/' vertex 0x%p\n", widname, widgetv);
- ASSERT(widgetv != GRAPH_VERTEX_NONE);
- }
-
- nodepda->basew_xc = widgetv;
-
- is_xswitch = xwidget_hwid_is_xswitch(&hwid);
-
- /*
- * Try to become the master of the widget. If this is an xswitch
- * with multiple hubs connected, only one will succeed. Mastership
- * of an xswitch is used only when touching registers on that xswitch.
- * The slave xwidgets connected to the xswitch can be owned by various
- * masters.
- */
- if (device_master_set(widgetv, hubv) == 0) {
-
- /* Only one hub (thread) per Crosstalk device or switch makes
- * it to here.
- */
-
- /*
- * Initialize whatever xwidget is hanging off our hub.
- * Whatever it is, it's accessible through widgetnum 0.
- */
- hubinfo_get(hubv, &hubinfo);
-
- (void)xwidget_register(&hwid, widgetv, npdap->basew_id, hubv, hubinfo->h_widgetid, NULL);
-
- if (!is_xswitch) {
- /* io_init_done takes cpu cookie as 2nd argument
- * to do a restorenoderun for the setnoderun done
- * at the start of this thread
- */
- io_init_done(cnodeid,c);
- /* NOTREACHED */
- }
-
- /*
- * Special handling for Crosstalk Switches (e.g. xbow).
- * We need to do things in roughly the following order:
- * 1) Initialize xswitch hardware (done above)
- * 2) Determine which hubs are available to be widget masters
- * 3) Discover which links are active from the xswitch
- * 4) Assign xwidgets hanging off the xswitch to hubs
- * 5) Initialize all xwidgets on the xswitch
- */
-
- volunteer_for_widgets(switchv, hubv);
-
- /* If there's someone else on this crossbow, recognize him */
- if (npdap->xbow_peer != INVALID_NASID) {
- nodepda_t *peer_npdap = NODEPDA(NASID_TO_COMPACT_NODEID(npdap->xbow_peer));
- peer_sema = &peer_npdap->xbow_sema;
- volunteer_for_widgets(switchv, peer_npdap->node_vertex);
- }
-
- assign_widgets_to_volunteers(switchv, hubv);
-
- /* Signal that we're done */
- if (peer_sema) {
- mutex_unlock(peer_sema);
- }
-
- }
- else {
- /* Wait 'til master is done assigning widgets. */
- mutex_lock(&npdap->xbow_sema);
- }
-
-#ifdef PROBE_TEST
- if ((cnodeid == 1) || (cnodeid == 2)) {
- int index;
-
- for (index = 0; index < 500; index++)
- DBG("Interfering with device probing!!!\n");
- }
-#endif
- /* Now both nodes can safely inititialize widgets */
- io_init_xswitch_widgets(switchv, cnodeid);
- io_link_xswitch_widgets(switchv, cnodeid);
-
- /* io_init_done takes cpu cookie as 2nd argument
- * to do a restorenoderun for the setnoderun done
- * at the start of this thread
- */
- io_init_done(cnodeid,c);
-
- DBG("\nio_init_node: DONE INITIALIZED ALL I/O FOR CNODEID %d\n\n", cnodeid);
-}
-
-
-#define IOINIT_STKSZ (16 * 1024)
-
-#define __DEVSTR1 "/../.master/"
-#define __DEVSTR2 "/target/"
-#define __DEVSTR3 "/lun/0/disk/partition/"
-#define __DEVSTR4 "/../ef"
-
-#if defined(CONFIG_IA64_SGI_SN1)
-/*
- * Currently, we need to allow for 5 IBrick slots with 1 FC each
- * plus an internal 1394.
- *
- * ioconfig starts numbering SCSI's at NUM_BASE_IO_SCSI_CTLR.
- */
-#define NUM_BASE_IO_SCSI_CTLR 6
-#else
-#define NUM_BASE_IO_SCSI_CTLR 6
-#endif
-/*
- * This tells ioconfig where it can start numbering scsi controllers.
- * Below this base number, platform-specific handles the numbering.
- * XXX Irix legacy..controller numbering should be part of devfsd's job
- */
-int num_base_io_scsi_ctlr = 2; /* used by syssgi */
-devfs_handle_t base_io_scsi_ctlr_vhdl[NUM_BASE_IO_SCSI_CTLR];
-static devfs_handle_t baseio_enet_vhdl,baseio_console_vhdl;
-
-/*
- * Put the logical controller number information in the
- * scsi controller vertices for each scsi controller that
- * is in a "fixed position".
- */
-static void
-scsi_ctlr_nums_add(devfs_handle_t pci_vhdl)
-{
- {
- int i;
-
- num_base_io_scsi_ctlr = NUM_BASE_IO_SCSI_CTLR;
-
- /* Initialize base_io_scsi_ctlr_vhdl array */
- for (i=0; i<NUM_BASE_IO_SCSI_CTLR; i++)
- base_io_scsi_ctlr_vhdl[i] = GRAPH_VERTEX_NONE;
- }
- {
- /*
- * May want to consider changing the SN0 code, above, to work more like
- * the way this works.
- */
- devfs_handle_t base_ibrick_xbridge_vhdl;
- devfs_handle_t base_ibrick_xtalk_widget_vhdl;
- devfs_handle_t scsi_ctlr_vhdl;
- int i;
- graph_error_t rv;
-
- /*
- * This is a table of "well-known" SCSI controllers and their well-known
- * controller numbers. The names in the table start from the base IBrick's
- * Xbridge vertex, so the first component is the xtalk widget number.
- */
- static struct {
- char *base_ibrick_scsi_path;
- int controller_number;
- } hardwired_scsi_controllers[] = {
- {"15/" EDGE_LBL_PCI "/1/" EDGE_LBL_SCSI_CTLR "/0", 0},
- {"15/" EDGE_LBL_PCI "/2/" EDGE_LBL_SCSI_CTLR "/0", 1},
- {"15/" EDGE_LBL_PCI "/3/" EDGE_LBL_SCSI_CTLR "/0", 2},
- {"14/" EDGE_LBL_PCI "/1/" EDGE_LBL_SCSI_CTLR "/0", 3},
- {"14/" EDGE_LBL_PCI "/2/" EDGE_LBL_SCSI_CTLR "/0", 4},
- {"15/" EDGE_LBL_PCI "/6/ohci/0/" EDGE_LBL_SCSI_CTLR "/0", 5},
- {NULL, -1} /* must be last */
- };
-
- base_ibrick_xtalk_widget_vhdl = hwgraph_connectpt_get(pci_vhdl);
- ASSERT_ALWAYS(base_ibrick_xtalk_widget_vhdl != GRAPH_VERTEX_NONE);
-
- base_ibrick_xbridge_vhdl = hwgraph_connectpt_get(base_ibrick_xtalk_widget_vhdl);
- ASSERT_ALWAYS(base_ibrick_xbridge_vhdl != GRAPH_VERTEX_NONE);
- hwgraph_vertex_unref(base_ibrick_xtalk_widget_vhdl);
-
- /*
- * Iterate through the list of well-known SCSI controllers.
- * For each controller found, set it's controller number according
- * to the table.
- */
- for (i=0; hardwired_scsi_controllers[i].base_ibrick_scsi_path != NULL; i++) {
- rv = hwgraph_path_lookup(base_ibrick_xbridge_vhdl,
- hardwired_scsi_controllers[i].base_ibrick_scsi_path, &scsi_ctlr_vhdl, NULL);
-
- if (rv != GRAPH_SUCCESS) /* No SCSI at this path */
- continue;
-
- ASSERT(hardwired_scsi_controllers[i].controller_number < NUM_BASE_IO_SCSI_CTLR);
- base_io_scsi_ctlr_vhdl[hardwired_scsi_controllers[i].controller_number] = scsi_ctlr_vhdl;
- device_controller_num_set(scsi_ctlr_vhdl, hardwired_scsi_controllers[i].controller_number);
- hwgraph_vertex_unref(scsi_ctlr_vhdl); /* (even though we're actually keeping a reference) */
- }
-
- hwgraph_vertex_unref(base_ibrick_xbridge_vhdl);
- }
-}
-
-
-#include <asm/sn/ioerror_handling.h>
-devfs_handle_t sys_critical_graph_root = GRAPH_VERTEX_NONE;
-
-/* Define the system critical vertices and connect them through
- * a canonical parent-child relationships for easy traversal
- * during io error handling.
- */
-static void
-sys_critical_graph_init(void)
-{
- devfs_handle_t bridge_vhdl,master_node_vhdl;
- devfs_handle_t xbow_vhdl = GRAPH_VERTEX_NONE;
- extern devfs_handle_t hwgraph_root;
- devfs_handle_t pci_slot_conn;
- int slot;
- devfs_handle_t baseio_console_conn;
-
- DBG("sys_critical_graph_init: FIXME.\n");
- baseio_console_conn = hwgraph_connectpt_get(baseio_console_vhdl);
-
- if (baseio_console_conn == NULL) {
- return;
- }
-
- /* Get the vertex handle for the baseio bridge */
- bridge_vhdl = device_master_get(baseio_console_conn);
-
- /* Get the master node of the baseio card */
- master_node_vhdl = cnodeid_to_vertex(
- master_node_get(baseio_console_vhdl));
-
- /* Add the "root->node" part of the system critical graph */
-
- sys_critical_graph_vertex_add(hwgraph_root,master_node_vhdl);
-
- /* Check if we have a crossbow */
- if (hwgraph_traverse(master_node_vhdl,
- EDGE_LBL_XTALK"/0",
- &xbow_vhdl) == GRAPH_SUCCESS) {
- /* We have a crossbow.Add "node->xbow" part of the system
- * critical graph.
- */
- sys_critical_graph_vertex_add(master_node_vhdl,xbow_vhdl);
-
- /* Add "xbow->baseio bridge" of the system critical graph */
- sys_critical_graph_vertex_add(xbow_vhdl,bridge_vhdl);
-
- hwgraph_vertex_unref(xbow_vhdl);
- } else
- /* We donot have a crossbow. Add "node->baseio_bridge"
- * part of the system critical graph.
- */
- sys_critical_graph_vertex_add(master_node_vhdl,bridge_vhdl);
-
- /* Add all the populated PCI slot vertices to the system critical
- * graph with the bridge vertex as the parent.
- */
- for (slot = 0 ; slot < 8; slot++) {
- char slot_edge[10];
-
- sprintf(slot_edge,"%d",slot);
- if (hwgraph_traverse(bridge_vhdl,slot_edge, &pci_slot_conn)
- != GRAPH_SUCCESS)
- continue;
- sys_critical_graph_vertex_add(bridge_vhdl,pci_slot_conn);
- hwgraph_vertex_unref(pci_slot_conn);
- }
-
- hwgraph_vertex_unref(bridge_vhdl);
-
- /* Add the "ioc3 pci connection point -> console ioc3" part
- * of the system critical graph
- */
-
- if (hwgraph_traverse(baseio_console_vhdl,"..",&pci_slot_conn) ==
- GRAPH_SUCCESS) {
- sys_critical_graph_vertex_add(pci_slot_conn,
- baseio_console_vhdl);
- hwgraph_vertex_unref(pci_slot_conn);
- }
-
- /* Add the "ethernet pci connection point -> base ethernet" part of
- * the system critical graph
- */
- if (hwgraph_traverse(baseio_enet_vhdl,"..",&pci_slot_conn) ==
- GRAPH_SUCCESS) {
- sys_critical_graph_vertex_add(pci_slot_conn,
- baseio_enet_vhdl);
- hwgraph_vertex_unref(pci_slot_conn);
- }
-
- /* Add the "scsi controller pci connection point -> base scsi
- * controller" part of the system critical graph
- */
- if (hwgraph_traverse(base_io_scsi_ctlr_vhdl[0],
- "../..",&pci_slot_conn) == GRAPH_SUCCESS) {
- sys_critical_graph_vertex_add(pci_slot_conn,
- base_io_scsi_ctlr_vhdl[0]);
- hwgraph_vertex_unref(pci_slot_conn);
- }
- if (hwgraph_traverse(base_io_scsi_ctlr_vhdl[1],
- "../..",&pci_slot_conn) == GRAPH_SUCCESS) {
- sys_critical_graph_vertex_add(pci_slot_conn,
- base_io_scsi_ctlr_vhdl[1]);
- hwgraph_vertex_unref(pci_slot_conn);
- }
- hwgraph_vertex_unref(baseio_console_conn);
-
-}
-
-static void
-baseio_ctlr_num_set(void)
-{
- char name[MAXDEVNAME];
- devfs_handle_t console_vhdl, pci_vhdl, enet_vhdl;
- devfs_handle_t ioc3_console_vhdl_get(void);
-
-
- DBG("baseio_ctlr_num_set; FIXME\n");
- console_vhdl = ioc3_console_vhdl_get();
- if (console_vhdl == GRAPH_VERTEX_NONE)
- return;
- /* Useful for setting up the system critical graph */
- baseio_console_vhdl = console_vhdl;
-
- vertex_to_name(console_vhdl,name,MAXDEVNAME);
-
- strcat(name,__DEVSTR1);
- pci_vhdl = hwgraph_path_to_vertex(name);
- scsi_ctlr_nums_add(pci_vhdl);
- /* Unref the pci_vhdl due to the reference by hwgraph_path_to_vertex
- */
- hwgraph_vertex_unref(pci_vhdl);
-
- vertex_to_name(console_vhdl, name, MAXDEVNAME);
- strcat(name, __DEVSTR4);
- enet_vhdl = hwgraph_path_to_vertex(name);
-
- /* Useful for setting up the system critical graph */
- baseio_enet_vhdl = enet_vhdl;
-
- device_controller_num_set(enet_vhdl, 0);
- /* Unref the enet_vhdl due to the reference by hwgraph_path_to_vertex
- */
- hwgraph_vertex_unref(enet_vhdl);
-}
-/* #endif */
-
-void
-sn00_rrb_alloc(devfs_handle_t vhdl, int *vendor_list)
-{
- /* REFERENCED */
- int rtn_val;
-
- /*
- ** sn00 population: errb orrb
- ** 0- ql 3+?
- ** 1- ql 2
- ** 2- ioc3 ethernet 2+?
- ** 3- ioc3 secondary 1
- ** 4- 0
- ** 5- PCI slot
- ** 6- PCI slot
- ** 7- PCI slot
- */
-
- /* The following code implements this heuristic for getting
- * maximum usage out of the rrbs
- *
- * constraints:
- * 8 bit ql1 needs 1+1
- * ql0 or ql5,6,7 wants 1+2
- * ethernet wants 2 or more
- *
- * rules for even rrbs:
- * if nothing in slot 6
- * 4 rrbs to 0 and 2 (0xc8889999)
- * else
- * 3 2 3 to slots 0 2 6 (0xc8899bbb)
- *
- * rules for odd rrbs
- * if nothing in slot 5 or 7 (0xc8889999)
- * 4 rrbs to 1 and 3
- * else if 1 thing in 5 or 7 (0xc8899aaa) or (0xc8899bbb)
- * 3 2 3 to slots 1 3 5|7
- * else
- * 2 1 3 2 to slots 1 3 5 7 (note: if there's a ql card in 7 this
- * (0xc89aaabb) may short what it wants therefore the
- * rule should be to plug pci slots in order)
- */
-
-
- if (vendor_list[6] != PCIIO_VENDOR_ID_NONE) {
- /* something in slot 6 */
- rtn_val = pcibr_alloc_all_rrbs(vhdl, 0, 3,1, 2,0, 0,0, 3,0);
- }
- else {
- rtn_val = pcibr_alloc_all_rrbs(vhdl, 0, 4,1, 4,0, 0,0, 0,0);
- }
- if (rtn_val)
- printk(KERN_WARNING "sn00_rrb_alloc: pcibr_alloc_all_rrbs failed");
-
- if ((vendor_list[5] != PCIIO_VENDOR_ID_NONE) &&
- (vendor_list[7] != PCIIO_VENDOR_ID_NONE)) {
- /* soemthing in slot 5 and 7 */
- rtn_val = pcibr_alloc_all_rrbs(vhdl, 1, 2,1, 1,0, 3,0, 2,0);
- }
- else if (vendor_list[5] != PCIIO_VENDOR_ID_NONE) {
- /* soemthing in slot 5 but not 7 */
- rtn_val = pcibr_alloc_all_rrbs(vhdl, 1, 3,1, 2,0, 3,0, 0,0);
- }
- else if (vendor_list[7] != PCIIO_VENDOR_ID_NONE) {
- /* soemthing in slot 7 but not 5 */
- rtn_val = pcibr_alloc_all_rrbs(vhdl, 1, 3,1, 2,0, 0,0, 3,0);
- }
- else {
- /* nothing in slot 5 or 7 */
- rtn_val = pcibr_alloc_all_rrbs(vhdl, 1, 4,1, 4,0, 0,0, 0,0);
- }
- if (rtn_val)
- printk(KERN_WARNING "sn00_rrb_alloc: pcibr_alloc_all_rrbs failed");
-}
-
-
-/*
- * Initialize all I/O devices. Starting closest to nodes, probe and
- * initialize outward.
- */
-void
-init_all_devices(void)
-{
- /* Governor on init threads..bump up when safe
- * (beware many devfs races)
- */
-#ifdef LATER
- int io_init_node_threads = 2;
-#endif
- cnodeid_t cnodeid, active;
-
-#ifdef LINUX_KERNEL_THREADS
- sema_init(&io_init_sema, 0);
-#endif
-
- active = 0;
- for (cnodeid = 0; cnodeid < numnodes; cnodeid++) {
-#ifdef LINUX_KERNEL_THREADS
- char thread_name[16];
- extern int io_init_pri;
-
- /*
- * Spawn a service thread for each node to initialize all
- * I/O on that node. Each thread attempts to bind itself
- * to the node whose I/O it's initializing.
- */
- sprintf(thread_name, "IO_init[%d]", cnodeid);
-
- (void)sthread_create(thread_name, 0, IOINIT_STKSZ, 0,
- io_init_pri, KT_PS, (st_func_t *)io_init_node,
- (void *)(long)cnodeid, 0, 0, 0);
-#else
- DBG("init_all_devices: Calling io_init_node() for cnode %d\n", cnodeid);
- io_init_node(cnodeid);
-
- DBG("init_all_devices: Done io_init_node() for cnode %d\n", cnodeid);
-
-#endif /* LINUX_KERNEL_THREADS */
-
-#ifdef LINUX_KERNEL_THREADS
- /* Limit how many nodes go at once, to not overload hwgraph */
- /* TBD: Should timeout */
- DBG("started thread for cnode %d\n", cnodeid);
- active++;
- if (io_init_node_threads &&
- active >= io_init_node_threads) {
- down(&io_init_sema);
- active--;
- }
-#endif /* LINUX_KERNEL_THREADS */
- }
-
-#ifdef LINUX_KERNEL_THREADS
- /* Wait until all IO_init threads are done */
-
- while (active > 0) {
-#ifdef AA_DEBUG
- DBG("waiting, %d still active\n", active);
-#endif
- down(&io_init_sema);
- active--;
- }
-
-#endif /* LINUX_KERNEL_THREADS */
-
- for (cnodeid = 0; cnodeid < numnodes; cnodeid++)
- /*
- * Update information generated by IO init.
- */
- update_node_information(cnodeid);
-
- baseio_ctlr_num_set();
- /* Setup the system critical graph (which is a subgraph of the
- * main hwgraph). This information is useful during io error
- * handling.
- */
- sys_critical_graph_init();
-
-#if HWG_PRINT
- hwgraph_print();
-#endif
-
-}
-
-#define toint(x) ((int)(x) - (int)('0'))
-
-void
-devnamefromarcs(char *devnm)
-{
- int val;
- char tmpnm[MAXDEVNAME];
- char *tmp1, *tmp2;
-
- val = strncmp(devnm, "dks", 3);
- if (val != 0)
- return;
- tmp1 = devnm + 3;
- if (!isdigit(*tmp1))
- return;
-
- val = 0;
- while (isdigit(*tmp1)) {
- val = 10*val+toint(*tmp1);
- tmp1++;
- }
-
- if(*tmp1 != 'd')
- return;
- else
- tmp1++;
-
- if ((val < 0) || (val >= NUM_BASE_IO_SCSI_CTLR)) {
- int i;
- int viable_found = 0;
-
- DBG("Only controller numbers 0..%d are supported for\n", NUM_BASE_IO_SCSI_CTLR-1);
- DBG("prom \"root\" variables of the form dksXdXsX.\n");
- DBG("To use another disk you must use the full hardware graph path\n\n");
- DBG("Possible controller numbers for use in 'dksXdXsX' on this system: ");
- for (i=0; i<NUM_BASE_IO_SCSI_CTLR; i++) {
- if (base_io_scsi_ctlr_vhdl[i] != GRAPH_VERTEX_NONE) {
- DBG("%d ", i);
- viable_found=1;
- }
- }
- if (viable_found)
- DBG("\n");
- else
- DBG("none found!\n");
-
-#ifdef LATER
- if (kdebug)
- debug("ring");
-#endif
- DELAY(15000000);
- //prom_reboot();
- panic("FIXME: devnamefromarcs: should call prom_reboot here.\n");
- /* NOTREACHED */
- }
-
- ASSERT(base_io_scsi_ctlr_vhdl[val] != GRAPH_VERTEX_NONE);
- vertex_to_name(base_io_scsi_ctlr_vhdl[val],
- tmpnm,
- MAXDEVNAME);
- tmp2 = tmpnm + strlen(tmpnm);
- strcpy(tmp2, __DEVSTR2);
- tmp2 += strlen(__DEVSTR2);
- while (*tmp1 != 's') {
- if((*tmp2++ = *tmp1++) == '\0')
- return;
- }
- tmp1++;
- strcpy(tmp2, __DEVSTR3);
- tmp2 += strlen(__DEVSTR3);
- while ( (*tmp2++ = *tmp1++) )
- ;
- tmp2--;
- *tmp2++ = '/';
- strcpy(tmp2, EDGE_LBL_BLOCK);
- strcpy(devnm,tmpnm);
-}
-
-static
-struct io_brick_map_s io_brick_tab[] = {
-
-/* Ibrick widget number to PCI bus number map */
- {
- 'I', /* Ibrick type */
- /* PCI Bus # Widget # */
- { 0, 0, 0, 0, 0, 0, 0, 0, /* 0x0 - 0x7 */
- 0, /* 0x8 */
- 0, /* 0x9 */
- 0, 0, /* 0xa - 0xb */
- 0, /* 0xc */
- 0, /* 0xd */
- 2, /* 0xe */
- 1 /* 0xf */
- }
- },
-
-/* Pbrick widget number to PCI bus number map */
- {
- 'P', /* Pbrick type */
- /* PCI Bus # Widget # */
- { 0, 0, 0, 0, 0, 0, 0, 0, /* 0x0 - 0x7 */
- 2, /* 0x8 */
- 1, /* 0x9 */
- 0, 0, /* 0xa - 0xb */
- 5, /* 0xc */
- 6, /* 0xd */
- 4, /* 0xe */
- 3 /* 0xf */
- }
- },
-
-/* Xbrick widget to XIO slot map */
- {
- 'X', /* Xbrick type */
- /* XIO Slot # Widget # */
- { 0, 0, 0, 0, 0, 0, 0, 0, /* 0x0 - 0x7 */
- 1, /* 0x8 */
- 2, /* 0x9 */
- 0, 0, /* 0xa - 0xb */
- 3, /* 0xc */
- 4, /* 0xd */
- 0, /* 0xe */
- 0 /* 0xf */
- }
- }
-};
-
-/*
- * Use the brick's type to map a widget number to a meaningful int
- */
-int
-io_brick_map_widget(char brick_type, int widget_num)
-{
- int num_bricks, i;
-
- /* Calculate number of bricks in table */
- num_bricks = sizeof(io_brick_tab)/sizeof(io_brick_tab[0]);
-
- /* Look for brick prefix in table */
- for (i = 0; i < num_bricks; i++) {
- if (brick_type == io_brick_tab[i].ibm_type)
- return(io_brick_tab[i].ibm_map_wid[widget_num]);
- }
-
- return 0;
-
-}
-
-/*
- * Use the device's vertex to map the device's widget to a meaningful int
- */
-int
-io_path_map_widget(devfs_handle_t vertex)
-{
- char hw_path_name[MAXDEVNAME];
- char *wp, *bp, *sp = NULL;
- int widget_num;
- long atoi(char *);
- int hwgraph_vertex_name_get(devfs_handle_t vhdl, char *buf, uint buflen);
-
-
- /* Get the full path name of the vertex */
- if (GRAPH_SUCCESS != hwgraph_vertex_name_get(vertex, hw_path_name,
- MAXDEVNAME))
- return 0;
-
- /* Find the widget number in the path name */
- wp = strstr(hw_path_name, "/"EDGE_LBL_XTALK"/");
- if (wp == NULL)
- return 0;
- widget_num = atoi(wp+7);
- if (widget_num < XBOW_PORT_8 || widget_num > XBOW_PORT_F)
- return 0;
-
- /* Find "brick" in the path name */
- bp = strstr(hw_path_name, "brick");
- if (bp == NULL)
- return 0;
-
- /* Find preceding slash */
- sp = bp;
- while (sp > hw_path_name) {
- sp--;
- if (*sp == '/')
- break;
- }
-
- /* Invalid if no preceding slash */
- if (!sp)
- return 0;
-
- /* Bump slash pointer to "brick" prefix */
- sp++;
- /*
- * Verify "brick" prefix length; valid exaples:
- * 'I' from "/Ibrick"
- * 'P' from "/Pbrick"
- * 'X' from "/Xbrick"
- */
- if ((bp - sp) != 1)
- return 0;
-
- return (io_brick_map_widget(*sp, widget_num));
-
-}
diff --git a/arch/ia64/sn/io/module.c b/arch/ia64/sn/io/module.c
deleted file mode 100644
index b53648a9101211..00000000000000
--- a/arch/ia64/sn/io/module.c
+++ /dev/null
@@ -1,312 +0,0 @@
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/sn_sal.h>
-#include <asm/sn/io.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/xtalk/xbow.h>
-#include <asm/sn/pci/bridge.h>
-#include <asm/sn/klconfig.h>
-#include <asm/sn/sn1/hubdev.h>
-#include <asm/sn/module.h>
-#include <asm/sn/pci/pcibr.h>
-#include <asm/sn/xtalk/xswitch.h>
-#include <asm/sn/nodepda.h>
-#include <asm/sn/sn_cpuid.h>
-
-
-/* #define LDEBUG 1 */
-
-#ifdef LDEBUG
-#define DPRINTF printk
-#define printf printk
-#else
-#define DPRINTF(x...)
-#endif
-
-module_t *modules[MODULE_MAX];
-int nummodules;
-
-#define SN00_SERIAL_FUDGE 0x3b1af409d513c2
-#define SN0_SERIAL_FUDGE 0x6e
-
-void
-encode_int_serial(uint64_t src,uint64_t *dest)
-{
- uint64_t val;
- int i;
-
- val = src + SN00_SERIAL_FUDGE;
-
-
- for (i = 0; i < sizeof(long long); i++) {
- ((char*)dest)[i] =
- ((char*)&val)[sizeof(long long)/2 +
- ((i%2) ? ((i/2 * -1) - 1) : (i/2))];
- }
-}
-
-
-void
-decode_int_serial(uint64_t src, uint64_t *dest)
-{
- uint64_t val;
- int i;
-
- for (i = 0; i < sizeof(long long); i++) {
- ((char*)&val)[sizeof(long long)/2 +
- ((i%2) ? ((i/2 * -1) - 1) : (i/2))] =
- ((char*)&src)[i];
- }
-
- *dest = val - SN00_SERIAL_FUDGE;
-}
-
-
-void
-encode_str_serial(const char *src, char *dest)
-{
- int i;
-
- for (i = 0; i < MAX_SERIAL_NUM_SIZE; i++) {
-
- dest[i] = src[MAX_SERIAL_NUM_SIZE/2 +
- ((i%2) ? ((i/2 * -1) - 1) : (i/2))] +
- SN0_SERIAL_FUDGE;
- }
-}
-
-void
-decode_str_serial(const char *src, char *dest)
-{
- int i;
-
- for (i = 0; i < MAX_SERIAL_NUM_SIZE; i++) {
- dest[MAX_SERIAL_NUM_SIZE/2 +
- ((i%2) ? ((i/2 * -1) - 1) : (i/2))] = src[i] -
- SN0_SERIAL_FUDGE;
- }
-}
-
-
-module_t *module_lookup(moduleid_t id)
-{
- int i;
-
- for (i = 0; i < nummodules; i++)
- if (modules[i]->id == id) {
- DPRINTF("module_lookup: found m=0x%p\n", modules[i]);
- return modules[i];
- }
-
- return NULL;
-}
-
-/*
- * module_add_node
- *
- * The first time a new module number is seen, a module structure is
- * inserted into the module list in order sorted by module number
- * and the structure is initialized.
- *
- * The node number is added to the list of nodes in the module.
- */
-
-module_t *module_add_node(moduleid_t id, cnodeid_t n)
-{
- module_t *m;
- int i;
- char buffer[16];
-
-#ifdef __ia64
- memset(buffer, 0, 16);
- format_module_id(buffer, id, MODULE_FORMAT_BRIEF);
- DPRINTF("module_add_node: id=%s node=%d\n", buffer, n);
-#endif
-
- if ((m = module_lookup(id)) == 0) {
-#ifdef LATER
- m = kmem_zalloc_node(sizeof (module_t), KM_NOSLEEP, n);
-#else
- m = kmalloc(sizeof (module_t), GFP_KERNEL);
- memset(m, 0 , sizeof(module_t));
-#endif
- ASSERT_ALWAYS(m);
-
- m->id = id;
- spin_lock_init(&m->lock);
-
- mutex_init_locked(&m->thdcnt);
-
-// set_elsc(&m->elsc);
- elsc_init(&m->elsc, COMPACT_TO_NASID_NODEID(n));
- spin_lock_init(&m->elsclock);
-
- /* Insert in sorted order by module number */
-
- for (i = nummodules; i > 0 && modules[i - 1]->id > id; i--)
- modules[i] = modules[i - 1];
-
- modules[i] = m;
- nummodules++;
- }
-
- m->nodes[m->nodecnt++] = n;
-
- DPRINTF("module_add_node: module %s now has %d nodes\n", buffer, m->nodecnt);
-
- return m;
-}
-
-int module_probe_snum(module_t *m, nasid_t nasid)
-{
- lboard_t *board;
- klmod_serial_num_t *comp;
- char * bcopy(const char * src, char * dest, int count);
- char serial_number[16];
-
- /*
- * record brick serial number
- */
- board = find_lboard((lboard_t *) KL_CONFIG_INFO(nasid), KLTYPE_SNIA);
-
- if (! board || KL_CONFIG_DUPLICATE_BOARD(board))
- {
-#if LDEBUG
- printf ("module_probe_snum: no IP35 board found!\n");
-#endif
- return 0;
- }
-
- board_serial_number_get( board, serial_number );
- if( serial_number[0] != '\0' ) {
- encode_str_serial( serial_number, m->snum.snum_str );
- m->snum_valid = 1;
- }
-#if LDEBUG
- else {
- printf("module_probe_snum: brick serial number is null!\n");
- }
- printf("module_probe_snum: brick serial number == %s\n", serial_number);
-#endif /* DEBUG */
-
- board = find_lboard((lboard_t *) KL_CONFIG_INFO(nasid),
- KLTYPE_IOBRICK_XBOW);
-
- if (! board || KL_CONFIG_DUPLICATE_BOARD(board))
- return 0;
-
- comp = GET_SNUM_COMP(board);
-
- if (comp) {
-#if LDEBUG
- int i;
-
- printf("********found module with id %x and string", m->id);
-
- for (i = 0; i < MAX_SERIAL_NUM_SIZE; i++)
- printf(" %x ", comp->snum.snum_str[i]);
-
- printf("\n"); /* Fudged string is not ASCII */
-#endif
-
- if (comp->snum.snum_str[0] != '\0') {
- bcopy(comp->snum.snum_str,
- m->sys_snum,
- MAX_SERIAL_NUM_SIZE);
- m->sys_snum_valid = 1;
- }
- }
-
- if (m->sys_snum_valid)
- return 1;
- else {
- DPRINTF("Invalid serial number for module %d, "
- "possible missing or invalid NIC.", m->id);
- return 0;
- }
-}
-
-void
-io_module_init(void)
-{
- cnodeid_t node;
- lboard_t *board;
- nasid_t nasid;
- int nserial;
- module_t *m;
-
- DPRINTF("*******module_init\n");
-
- nserial = 0;
-
- for (node = 0; node < numnodes; node++) {
- nasid = COMPACT_TO_NASID_NODEID(node);
-
- board = find_lboard((lboard_t *) KL_CONFIG_INFO(nasid), KLTYPE_SNIA);
- ASSERT(board);
-
- m = module_add_node(board->brd_module, node);
-
- if (! m->snum_valid && module_probe_snum(m, nasid))
- nserial++;
- }
-
- DPRINTF("********found total of %d serial numbers in the system\n",
- nserial);
-
- if (nserial == 0)
- printk(KERN_WARNING "io_module_init: No serial number found.\n");
-}
-
-elsc_t *get_elsc(void)
-{
- return &NODEPDA(cpuid_to_cnodeid(smp_processor_id()))->module->elsc;
-}
-
-int
-get_kmod_info(cmoduleid_t cmod, module_info_t *mod_info)
-{
- int i;
-
- if (cmod < 0 || cmod >= nummodules)
- return EINVAL;
-
- if (! modules[cmod]->snum_valid)
- return ENXIO;
-
- mod_info->mod_num = modules[cmod]->id;
- {
- char temp[MAX_SERIAL_NUM_SIZE];
-
- decode_str_serial(modules[cmod]->snum.snum_str, temp);
-
- /* if this is an invalid serial number return an error */
- if (temp[0] != 'K')
- return ENXIO;
-
- mod_info->serial_num = 0;
-
- for (i = 0; i < MAX_SERIAL_NUM_SIZE && temp[i] != '\0'; i++) {
- mod_info->serial_num <<= 4;
- mod_info->serial_num |= (temp[i] & 0xf);
-
- mod_info->serial_str[i] = temp[i];
- }
-
- mod_info->serial_str[i] = '\0';
- }
-
- return 0;
-}
diff --git a/arch/ia64/sn/io/pci_bus_cvlink.c b/arch/ia64/sn/io/pci_bus_cvlink.c
deleted file mode 100644
index 3f8f282dc88753..00000000000000
--- a/arch/ia64/sn/io/pci_bus_cvlink.c
+++ /dev/null
@@ -1,737 +0,0 @@
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/config.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <asm/sn/types.h>
-#include <asm/sn/hack.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/io.h>
-#include <asm/sn/driver.h>
-#include <asm/sn/iograph.h>
-#include <asm/param.h>
-#include <asm/sn/pio.h>
-#include <asm/sn/xtalk/xwidget.h>
-#include <asm/sn/sn_private.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/hcl_util.h>
-#include <asm/sn/intr.h>
-#include <asm/sn/xtalk/xtalkaddrs.h>
-#include <asm/sn/klconfig.h>
-#include <asm/sn/nodepda.h>
-#include <asm/sn/pci/pciio.h>
-#include <asm/sn/pci/pcibr.h>
-#include <asm/sn/pci/pcibr_private.h>
-#include <asm/sn/pci/pci_bus_cvlink.h>
-#include <asm/sn/simulator.h>
-#include <asm/sn/sn_cpuid.h>
-
-extern int bridge_rev_b_data_check_disable;
-
-devfs_handle_t busnum_to_pcibr_vhdl[MAX_PCI_XWIDGET];
-nasid_t busnum_to_nid[MAX_PCI_XWIDGET];
-void * busnum_to_atedmamaps[MAX_PCI_XWIDGET];
-unsigned char num_bridges;
-static int done_probing = 0;
-
-static int pci_bus_map_create(devfs_handle_t xtalk);
-devfs_handle_t devfn_to_vertex(unsigned char busnum, unsigned int devfn);
-
-#define SN1_IOPORTS_UNIT 256
-#define MAX_IOPORTS 0xffff
-#define MAX_IOPORTS_CHUNKS (MAX_IOPORTS / SN1_IOPORTS_UNIT)
-struct ioports_to_tlbs_s ioports_to_tlbs[MAX_IOPORTS_CHUNKS];
-unsigned long sn1_allocate_ioports(unsigned long pci_address);
-
-extern void sn1_init_irq_desc(void);
-
-
-
-/*
- * pci_bus_cvlink_init() - To be called once during initialization before
- * SGI IO Infrastructure init is called.
- */
-void
-pci_bus_cvlink_init(void)
-{
- memset(busnum_to_pcibr_vhdl, 0x0, sizeof(devfs_handle_t) * MAX_PCI_XWIDGET);
- memset(busnum_to_nid, 0x0, sizeof(nasid_t) * MAX_PCI_XWIDGET);
-
- memset(busnum_to_atedmamaps, 0x0, sizeof(void *) * MAX_PCI_XWIDGET);
-
- memset(ioports_to_tlbs, 0x0, sizeof(ioports_to_tlbs));
-
- num_bridges = 0;
-}
-
-/*
- * pci_bus_to_vertex() - Given a logical Linux Bus Number returns the associated
- * pci bus vertex from the SGI IO Infrastructure.
- */
-devfs_handle_t
-pci_bus_to_vertex(unsigned char busnum)
-{
-
- devfs_handle_t pci_bus = NULL;
-
-
- /*
- * First get the xwidget vertex.
- */
- pci_bus = busnum_to_pcibr_vhdl[busnum];
- return(pci_bus);
-}
-
-/*
- * devfn_to_vertex() - returns the vertex of the device given the bus, slot,
- * and function numbers.
- */
-devfs_handle_t
-devfn_to_vertex(unsigned char busnum, unsigned int devfn)
-{
-
- int slot = 0;
- int func = 0;
- char name[16];
- devfs_handle_t pci_bus = NULL;
- devfs_handle_t device_vertex = (devfs_handle_t)NULL;
-
- /*
- * Go get the pci bus vertex.
- */
- pci_bus = pci_bus_to_vertex(busnum);
- if (!pci_bus) {
- /*
- * During probing, the Linux pci code invents non-existent
- * bus numbers and pci_dev structures and tries to access
- * them to determine existence. Don't crib during probing.
- */
- if (done_probing)
- printk("devfn_to_vertex: Invalid bus number %d given.\n", busnum);
- return(NULL);
- }
-
-
- /*
- * Go get the slot&function vertex.
- * Should call pciio_slot_func_to_name() when ready.
- */
- slot = PCI_SLOT(devfn);
- func = PCI_FUNC(devfn);
-
- /*
- * For a NON Multi-function card the name of the device looks like:
- * ../pci/1, ../pci/2 ..
- */
- if (func == 0) {
- sprintf(name, "%d", slot);
- if (hwgraph_traverse(pci_bus, name, &device_vertex) ==
- GRAPH_SUCCESS) {
- if (device_vertex) {
- return(device_vertex);
- }
- }
- }
-
- /*
- * This maybe a multifunction card. It's names look like:
- * ../pci/1a, ../pci/1b, etc.
- */
- sprintf(name, "%d%c", slot, 'a'+func);
- if (hwgraph_traverse(pci_bus, name, &device_vertex) != GRAPH_SUCCESS) {
- if (!device_vertex) {
- return(NULL);
- }
- }
-
- return(device_vertex);
-}
-
-/*
- * For the given device, initialize the addresses for both the Device(x) Flush
- * Write Buffer register and the Xbow Flush Register for the port the PCI bus
- * is connected.
- */
-static void
-set_flush_addresses(struct pci_dev *device_dev,
- struct sn1_device_sysdata *device_sysdata)
-{
- pciio_info_t pciio_info = pciio_info_get(device_sysdata->vhdl);
- pciio_slot_t pciio_slot = pciio_info_slot_get(pciio_info);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- bridge_t *bridge = pcibr_soft->bs_base;
-
- device_sysdata->dma_buf_sync = (volatile unsigned int *)
- &(bridge->b_wr_req_buf[pciio_slot].reg);
- device_sysdata->xbow_buf_sync = (volatile unsigned int *)
- XBOW_PRIO_LINKREGS_PTR(NODE_SWIN_BASE(get_nasid(), 0),
- pcibr_soft->bs_xid);
-#ifdef DEBUG
-
- printk("set_flush_addresses: dma_buf_sync %p xbow_buf_sync %p\n",
- device_sysdata->dma_buf_sync, device_sysdata->xbow_buf_sync);
-
- while((volatile unsigned int )*device_sysdata->dma_buf_sync);
- while((volatile unsigned int )*device_sysdata->xbow_buf_sync);
-#endif
-
-}
-
-/*
- * Most drivers currently do not properly tell the arch specific pci dma
- * interfaces whether they can handle A64. Here is where we privately
- * keep track of this.
- */
-static void __init
-set_sn1_pci64(struct pci_dev *dev)
-{
- unsigned short vendor = dev->vendor;
- unsigned short device = dev->device;
-
- if (vendor == PCI_VENDOR_ID_QLOGIC) {
- if ((device == PCI_DEVICE_ID_QLOGIC_ISP2100) ||
- (device == PCI_DEVICE_ID_QLOGIC_ISP2200)) {
- SET_PCIA64(dev);
- return;
- }
- }
-
- if (vendor == PCI_VENDOR_ID_SGI) {
- if (device == PCI_DEVICE_ID_SGI_IOC3) {
- SET_PCIA64(dev);
- return;
- }
- }
-
-}
-
-/*
- * sn1_allocate_ioports() - This routine provides the allocation and
- * mappings between Linux style IOPORTs management.
- *
- * For simplicity sake, SN1 will allocate IOPORTs in chunks of
- * 256bytes .. irrespective of what the card desires. This may
- * have to change when we understand how to deal with legacy ioports
- * which are hardcoded in some drivers e.g. SVGA.
- *
- * Ofcourse, the SN1 IO Infrastructure has no concept of IOPORT numbers.
- * It will remain so. The IO Infrastructure will continue to map
- * IO Resource just like IRIX. When this is done, we map IOPORT
- * chunks to these resources. The Linux drivers will see and use real
- * IOPORT numbers. The various IOPORT access macros e.g. inb/outb etc.
- * does the munging of these IOPORT numbers to make a Uncache Virtual
- * Address. This address via the tlb entries generates the PCI Address
- * allocated by the SN1 IO Infrastructure Layer.
- */
-static unsigned long sn1_ioport_num = 0x1000; /* Reserve room for Legacy stuff */
-unsigned long
-sn1_allocate_ioports(unsigned long pci_address)
-{
-
- unsigned long ioport_index;
-
- /*
- * Just some idiot checking ..
- */
- if ( sn1_ioport_num > 0xffff ) {
- printk("sn1_allocate_ioports: No more IO PORTS available\n");
- return(-1);
- }
-
- /*
- * See Section 4.1.1.5 of Intel IA-64 Acrchitecture Software Developer's
- * Manual for details.
- */
- ioport_index = sn1_ioport_num / SN1_IOPORTS_UNIT;
-
- ioports_to_tlbs[ioport_index].p = 1; /* Present Bit */
- ioports_to_tlbs[ioport_index].rv_1 = 0; /* 1 Bit */
- ioports_to_tlbs[ioport_index].ma = 4; /* Memory Attributes 3 bits*/
- ioports_to_tlbs[ioport_index].a = 1; /* Set Data Access Bit Fault 1 Bit*/
- ioports_to_tlbs[ioport_index].d = 1; /* Dirty Bit */
- ioports_to_tlbs[ioport_index].pl = 0;/* Privilege Level - All levels can R/W*/
- ioports_to_tlbs[ioport_index].ar = 3; /* Access Rights - R/W only*/
- ioports_to_tlbs[ioport_index].ppn = pci_address >> 12; /* 4K page size */
- ioports_to_tlbs[ioport_index].ed = 0; /* Exception Deferral Bit */
- ioports_to_tlbs[ioport_index].ig = 0; /* Ignored */
-
- /* printk("sn1_allocate_ioports: ioport_index 0x%x ioports_to_tlbs 0x%p\n", ioport_index, ioports_to_tlbs[ioport_index]); */
-
- sn1_ioport_num += SN1_IOPORTS_UNIT;
-
- return(sn1_ioport_num - SN1_IOPORTS_UNIT);
-}
-
-/*
- * sn1_pci_fixup() - This routine is called when platform_pci_fixup() is
- * invoked at the end of pcibios_init() to link the Linux pci
- * infrastructure to SGI IO Infrasturcture - ia64/kernel/pci.c
- *
- * Other platform specific fixup can also be done here.
- */
-void
-sn1_pci_fixup(int arg)
-{
- struct list_head *ln;
- struct pci_bus *pci_bus = NULL;
- struct pci_dev *device_dev = NULL;
- struct sn1_widget_sysdata *widget_sysdata;
- struct sn1_device_sysdata *device_sysdata;
-#ifdef SN1_IOPORTS
- unsigned long ioport;
-#endif
- pciio_intr_t intr_handle;
- int cpuid, bit;
- devfs_handle_t device_vertex;
- pciio_intr_line_t lines;
- extern void sn1_pci_find_bios(void);
-#ifdef CONFIG_IA64_SGI_SN2
- extern int numnodes;
- int cnode;
-#endif /* CONFIG_IA64_SGI_SN2 */
-
-
- if (arg == 0) {
- sn1_init_irq_desc();
- sn1_pci_find_bios();
-#ifdef CONFIG_IA64_SGI_SN2
- for (cnode = 0; cnode < numnodes; cnode++) {
- extern void intr_init_vecblk(nodepda_t *npda, cnodeid_t, int);
- intr_init_vecblk(NODEPDA(cnode), cnode, 0);
- }
-#endif /* CONFIG_IA64_SGI_SN2 */
- return;
- }
-
-#if 0
-{
- devfs_handle_t bridge_vhdl = pci_bus_to_vertex(0);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) hwgraph_fastinfo_get(bridge_vhdl);
- bridge_t *bridge = pcibr_soft->bs_base;
- printk("pci_fixup_ioc3: Before devreg fixup\n");
- printk("pci_fixup_ioc3: Devreg 0 0x%x\n", bridge->b_device[0].reg);
- printk("pci_fixup_ioc3: Devreg 1 0x%x\n", bridge->b_device[1].reg);
- printk("pci_fixup_ioc3: Devreg 2 0x%x\n", bridge->b_device[2].reg);
- printk("pci_fixup_ioc3: Devreg 3 0x%x\n", bridge->b_device[3].reg);
- printk("pci_fixup_ioc3: Devreg 4 0x%x\n", bridge->b_device[4].reg);
- printk("pci_fixup_ioc3: Devreg 5 0x%x\n", bridge->b_device[5].reg);
- printk("pci_fixup_ioc3: Devreg 6 0x%x\n", bridge->b_device[6].reg);
- printk("pci_fixup_ioc3: Devreg 7 0x%x\n", bridge->b_device[7].reg);
-}
-#endif
- done_probing = 1;
-
- /*
- * Initialize the pci bus vertex in the pci_bus struct.
- */
- for( ln = pci_root_buses.next; ln != &pci_root_buses; ln = ln->next) {
- pci_bus = pci_bus_b(ln);
- widget_sysdata = kmalloc(sizeof(struct sn1_widget_sysdata),
- GFP_KERNEL);
- widget_sysdata->vhdl = pci_bus_to_vertex(pci_bus->number);
- pci_bus->sysdata = (void *)widget_sysdata;
- }
-
- /*
- * set the root start and end so that drivers calling check_region()
- * won't see a conflict
- */
-#ifdef SN1_IOPORTS
- ioport_resource.start = sn1_ioport_num;
- ioport_resource.end = 0xffff;
-#else
-#if defined(CONFIG_IA64_SGI_SN1)
- if ( IS_RUNNING_ON_SIMULATOR() ) {
- /*
- * IDE legacy IO PORTs are supported in Medusa.
- * Just open up IO PORTs from 0 .. ioport_resource.end.
- */
- ioport_resource.start = 0;
- } else {
- /*
- * We do not support Legacy IO PORT numbers.
- */
- ioport_resource.start |= IO_SWIZ_BASE | __IA64_UNCACHED_OFFSET;
- }
- ioport_resource.end |= (HSPEC_SWIZ_BASE-1) | __IA64_UNCACHED_OFFSET;
-#else
- // Need something here for sn2.... ZXZXZX
-#endif
-#endif
-
- /*
- * Initialize the device vertex in the pci_dev struct.
- */
- pci_for_each_dev(device_dev) {
- unsigned int irq;
- int idx;
- u16 cmd;
- devfs_handle_t vhdl;
- unsigned long size;
- extern int bit_pos_to_irq(int);
-
- if (device_dev->vendor == PCI_VENDOR_ID_SGI &&
- device_dev->device == PCI_DEVICE_ID_SGI_IOC3) {
- extern void pci_fixup_ioc3(struct pci_dev *d);
- pci_fixup_ioc3(device_dev);
- }
-
- /* Set the device vertex */
-
- device_sysdata = kmalloc(sizeof(struct sn1_device_sysdata),
- GFP_KERNEL);
- device_sysdata->vhdl = devfn_to_vertex(device_dev->bus->number, device_dev->devfn);
- device_sysdata->isa64 = 0;
- /*
- * Set the xbridge Device(X) Write Buffer Flush and Xbow Flush
- * register addresses.
- */
- (void) set_flush_addresses(device_dev, device_sysdata);
-
- device_dev->sysdata = (void *) device_sysdata;
- set_sn1_pci64(device_dev);
- pci_read_config_word(device_dev, PCI_COMMAND, &cmd);
-
- /*
- * Set the resources address correctly. The assumption here
- * is that the addresses in the resource structure has been
- * read from the card and it was set in the card by our
- * Infrastructure ..
- */
- vhdl = device_sysdata->vhdl;
- for (idx = 0; idx < PCI_ROM_RESOURCE; idx++) {
- size = 0;
- size = device_dev->resource[idx].end -
- device_dev->resource[idx].start;
- if (size) {
- device_dev->resource[idx].start = (unsigned long)pciio_pio_addr(vhdl, 0, PCIIO_SPACE_WIN(idx), 0, size, 0, PCIIO_BYTE_STREAM);
- device_dev->resource[idx].start |= __IA64_UNCACHED_OFFSET;
- }
- else
- continue;
-
- device_dev->resource[idx].end =
- device_dev->resource[idx].start + size;
-
-#ifdef CONFIG_IA64_SGI_SN1
- /*
- * Adjust the addresses to go to the SWIZZLE ..
- */
- device_dev->resource[idx].start =
- device_dev->resource[idx].start & 0xfffff7ffffffffff;
- device_dev->resource[idx].end =
- device_dev->resource[idx].end & 0xfffff7ffffffffff;
-#endif
-
- if (device_dev->resource[idx].flags & IORESOURCE_IO) {
- cmd |= PCI_COMMAND_IO;
-#ifdef SN1_IOPORTS
- ioport = sn1_allocate_ioports(device_dev->resource[idx].start);
- if (ioport < 0) {
- printk("sn1_pci_fixup: PCI Device 0x%x on PCI Bus %d not mapped to IO PORTs .. IO PORTs exhausted\n", device_dev->devfn, device_dev->bus->number);
- continue;
- }
- pciio_config_set(vhdl, (unsigned) PCI_BASE_ADDRESS_0 + (idx * 4), 4, (res + (ioport & 0xfff)));
-
-printk("sn1_pci_fixup: ioport number %d mapped to pci address 0x%lx\n", ioport, (res + (ioport & 0xfff)));
-
- device_dev->resource[idx].start = ioport;
- device_dev->resource[idx].end = ioport + SN1_IOPORTS_UNIT;
-#endif
- }
- if (device_dev->resource[idx].flags & IORESOURCE_MEM)
- cmd |= PCI_COMMAND_MEMORY;
- }
- /*
- * Now handle the ROM resource ..
- */
- size = device_dev->resource[PCI_ROM_RESOURCE].end -
- device_dev->resource[PCI_ROM_RESOURCE].start;
-
- if (size) {
- device_dev->resource[PCI_ROM_RESOURCE].start =
- (unsigned long) pciio_pio_addr(vhdl, 0, PCIIO_SPACE_ROM, 0,
- size, 0, PCIIO_BYTE_STREAM);
- device_dev->resource[PCI_ROM_RESOURCE].start |= __IA64_UNCACHED_OFFSET;
- device_dev->resource[PCI_ROM_RESOURCE].end =
- device_dev->resource[PCI_ROM_RESOURCE].start + size;
-
-#ifdef CONFIG_IA64_SGI_SN1
- /*
- * go through synergy swizzled space
- */
- device_dev->resource[PCI_ROM_RESOURCE].start &= 0xfffff7ffffffffffUL;
- device_dev->resource[PCI_ROM_RESOURCE].end &= 0xfffff7ffffffffffUL;
-#endif
-
- }
-
- /*
- * Update the Command Word on the Card.
- */
- cmd |= PCI_COMMAND_MASTER; /* If the device doesn't support */
- /* bit gets dropped .. no harm */
- pci_write_config_word(device_dev, PCI_COMMAND, cmd);
-
- pci_read_config_byte(device_dev, PCI_INTERRUPT_PIN, (unsigned char *)&lines);
- if (device_dev->vendor == PCI_VENDOR_ID_SGI &&
- device_dev->device == PCI_DEVICE_ID_SGI_IOC3 ) {
- lines = 1;
- }
-
- device_sysdata = (struct sn1_device_sysdata *)device_dev->sysdata;
- device_vertex = device_sysdata->vhdl;
-
- intr_handle = pciio_intr_alloc(device_vertex, NULL, lines, device_vertex);
-
- bit = intr_handle->pi_irq;
- cpuid = intr_handle->pi_cpu;
-#ifdef CONFIG_IA64_SGI_SN1
- irq = bit_pos_to_irq(bit);
-#else /* SN2 */
- irq = bit;
-#endif
- irq = irq + (cpuid << 8);
- pciio_intr_connect(intr_handle);
- device_dev->irq = irq;
-#ifdef ajmtestintr
- {
- int slot = PCI_SLOT(device_dev->devfn);
- static int timer_set = 0;
- pcibr_intr_t pcibr_intr = (pcibr_intr_t)intr_handle;
- pcibr_soft_t pcibr_soft = pcibr_intr->bi_soft;
- extern void intr_test_handle_intr(int, void*, struct pt_regs *);
-
- if (!timer_set) {
- intr_test_set_timer();
- timer_set = 1;
- }
- intr_test_register_irq(irq, pcibr_soft, slot);
- request_irq(irq, intr_test_handle_intr,0,NULL, NULL);
- }
-#endif
-
- }
-
-#if 0
-
-{
- devfs_handle_t bridge_vhdl = pci_bus_to_vertex(0);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) hwgraph_fastinfo_get(bridge_vhdl);
- bridge_t *bridge = pcibr_soft->bs_base;
-
- printk("pci_fixup_ioc3: Before devreg fixup\n");
- printk("pci_fixup_ioc3: Devreg 0 0x%x\n", bridge->b_device[0].reg);
- printk("pci_fixup_ioc3: Devreg 1 0x%x\n", bridge->b_device[1].reg);
- printk("pci_fixup_ioc3: Devreg 2 0x%x\n", bridge->b_device[2].reg);
- printk("pci_fixup_ioc3: Devreg 3 0x%x\n", bridge->b_device[3].reg);
- printk("pci_fixup_ioc3: Devreg 4 0x%x\n", bridge->b_device[4].reg);
- printk("pci_fixup_ioc3: Devreg 5 0x%x\n", bridge->b_device[5].reg);
- printk("pci_fixup_ioc3: Devreg 6 0x%x\n", bridge->b_device[6].reg);
- printk("pci_fixup_ioc3: Devreg 7 0x%x\n", bridge->b_device[7].reg);
-}
-
-printk("testing Big Window: 0xC0000200c0000000 %p\n", *( (volatile uint64_t *)0xc0000200a0000000));
-printk("testing Big Window: 0xC0000200c0000008 %p\n", *( (volatile uint64_t *)0xc0000200a0000008));
-
-#endif
-
-}
-
-/*
- * pci_bus_map_create() - Called by pci_bus_to_hcl_cvlink() to finish the job.
- *
- * Linux PCI Bus numbers are assigned from lowest module_id numbers
- * (rack/slot etc.) starting from HUB_WIDGET_ID_MAX down to
- * HUB_WIDGET_ID_MIN:
- * widgetnum 15 gets lower Bus Number than widgetnum 14 etc.
- *
- * Given 2 modules 001c01 and 001c02 we get the following mappings:
- * 001c01, widgetnum 15 = Bus number 0
- * 001c01, widgetnum 14 = Bus number 1
- * 001c02, widgetnum 15 = Bus number 3
- * 001c02, widgetnum 14 = Bus number 4
- * etc.
- *
- * The rational for starting Bus Number 0 with Widget number 15 is because
- * the system boot disks are always connected via Widget 15 Slot 0 of the
- * I-brick. Linux creates /dev/sd* devices(naming) strating from Bus Number 0
- * Therefore, /dev/sda1 will be the first disk, on Widget 15 of the lowest
- * module id(Master Cnode) of the system.
- *
- */
-static int
-pci_bus_map_create(devfs_handle_t xtalk)
-{
-
- devfs_handle_t master_node_vertex = NULL;
- devfs_handle_t xwidget = NULL;
- devfs_handle_t pci_bus = NULL;
- hubinfo_t hubinfo = NULL;
- xwidgetnum_t widgetnum;
- char pathname[128];
- graph_error_t rv;
-
- /*
- * Loop throught this vertex and get the Xwidgets ..
- */
- for (widgetnum = HUB_WIDGET_ID_MAX; widgetnum >= HUB_WIDGET_ID_MIN; widgetnum--) {
-#if 0
- {
- int pos;
- char dname[256];
- pos = devfs_generate_path(xtalk, dname, 256);
- printk("%s : path= %s\n", __FUNCTION__, &dname[pos]);
- }
-#endif
-
- sprintf(pathname, "%d", widgetnum);
- xwidget = NULL;
-
- /*
- * Example - /hw/module/001c16/Pbrick/xtalk/8 is the xwidget
- * /hw/module/001c16/Pbrick/xtalk/8/pci/1 is device
- */
- rv = hwgraph_traverse(xtalk, pathname, &xwidget);
- if ( (rv != GRAPH_SUCCESS) ) {
- if (!xwidget)
- continue;
- }
-
- sprintf(pathname, "%d/"EDGE_LBL_PCI, widgetnum);
- pci_bus = NULL;
- if (hwgraph_traverse(xtalk, pathname, &pci_bus) != GRAPH_SUCCESS)
- if (!pci_bus)
- continue;
-
- /*
- * Assign the correct bus number and also the nasid of this
- * pci Xwidget.
- *
- * Should not be any race here ...
- */
- num_bridges++;
- busnum_to_pcibr_vhdl[num_bridges - 1] = pci_bus;
-
- /*
- * Get the master node and from there get the NASID.
- */
- master_node_vertex = device_master_get(xwidget);
- if (!master_node_vertex) {
- printk("WARNING: pci_bus_map_create: Unable to get .master for vertex 0x%p\n", (void *)xwidget);
- }
-
- hubinfo_get(master_node_vertex, &hubinfo);
- if (!hubinfo) {
- printk("WARNING: pci_bus_map_create: Unable to get hubinfo for master node vertex 0x%p\n", (void *)master_node_vertex);
- return(1);
- } else {
- busnum_to_nid[num_bridges - 1] = hubinfo->h_nasid;
- }
-
- /*
- * Pre assign DMA maps needed for 32 Bits Page Map DMA.
- */
- busnum_to_atedmamaps[num_bridges - 1] = (void *) kmalloc(
- sizeof(struct sn1_dma_maps_s) * MAX_ATE_MAPS, GFP_KERNEL);
- if (!busnum_to_atedmamaps[num_bridges - 1])
- printk("WARNING: pci_bus_map_create: Unable to precreate ATE DMA Maps for busnum %d vertex 0x%p\n", num_bridges - 1, (void *)xwidget);
-
- memset(busnum_to_atedmamaps[num_bridges - 1], 0x0,
- sizeof(struct sn1_dma_maps_s) * MAX_ATE_MAPS);
-
- }
-
- return(0);
-}
-
-/*
- * pci_bus_to_hcl_cvlink() - This routine is called after SGI IO Infrastructure
- * initialization has completed to set up the mappings between Xbridge
- * and logical pci bus numbers. We also set up the NASID for each of these
- * xbridges.
- *
- * Must be called before pci_init() is invoked.
- */
-int
-pci_bus_to_hcl_cvlink(void)
-{
-
- devfs_handle_t devfs_hdl = NULL;
- devfs_handle_t xtalk = NULL;
- int rv = 0;
- char name[256];
- int master_iobrick;
- int i;
-
- /*
- * Iterate throught each xtalk links in the system ..
- * /hw/module/001c01/node/xtalk/ 8|9|10|11|12|13|14|15
- *
- * /hw/module/001c01/node/xtalk/15 -> /hw/module/001c01/Ibrick/xtalk/15
- *
- * What if it is not pci?
- */
- devfs_hdl = hwgraph_path_to_vertex("/dev/hw/module");
-
- /*
- * To provide consistent(not persistent) device naming, we need to start
- * bus number allocation from the C-Brick with the lowest module id e.g. 001c01
- * with an attached I-Brick. Find the master_iobrick.
- */
- master_iobrick = -1;
- for (i = 0; i < nummodules; i++) {
- moduleid_t iobrick_id;
- iobrick_id = iobrick_module_get(&modules[i]->elsc);
- if (iobrick_id > 0) { /* Valid module id */
- if (MODULE_GET_BTYPE(iobrick_id) == MODULE_IBRICK) {
- master_iobrick = i;
- break;
- }
- }
- }
-
- /*
- * The master_iobrick gets bus 0 and 1.
- */
- if (master_iobrick >= 0) {
- memset(name, 0, 256);
- format_module_id(name, modules[master_iobrick]->id, MODULE_FORMAT_BRIEF);
- strcat(name, "/node/xtalk");
- xtalk = NULL;
- rv = hwgraph_edge_get(devfs_hdl, name, &xtalk);
- pci_bus_map_create(xtalk);
- }
-
- /*
- * Now go do the rest of the modules, starting from the C-Brick with the lowest
- * module id, remembering to skip the master_iobrick, which was done above.
- */
- for (i = 0; i < nummodules; i++) {
- if (i == master_iobrick) {
- continue; /* Did the master_iobrick already. */
- }
-
- memset(name, 0, 256);
- format_module_id(name, modules[i]->id, MODULE_FORMAT_BRIEF);
- strcat(name, "/node/xtalk");
- xtalk = NULL;
- rv = hwgraph_edge_get(devfs_hdl, name, &xtalk);
- pci_bus_map_create(xtalk);
- }
-
- return(0);
-}
diff --git a/arch/ia64/sn/io/pciio.c b/arch/ia64/sn/io/pciio.c
deleted file mode 100644
index a6f130e702cd56..00000000000000
--- a/arch/ia64/sn/io/pciio.c
+++ /dev/null
@@ -1,1507 +0,0 @@
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-#define USRPCI 0
-
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/config.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/xtalk/xbow.h> /* Must be before iograph.h to get MAX_PORT_NUM */
-#include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/hcl_util.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/pci/bridge.h>
-#include <asm/sn/ioerror_handling.h>
-#include <asm/sn/pci/pciio.h>
-#include <asm/sn/pci/pciio_private.h>
-#include <asm/sn/sn_sal.h>
-#include <asm/sn/io.h>
-#include <asm/sn/pci/pci_bus_cvlink.h>
-#include <asm/sn/simulator.h>
-
-#define DEBUG_PCIIO
-#undef DEBUG_PCIIO /* turn this on for yet more console output */
-
-
-#define GET_NEW(ptr) (ptr = kmalloc(sizeof (*(ptr)), GFP_KERNEL))
-#define DO_DEL(ptr) (kfree(ptr))
-
-char pciio_info_fingerprint[] = "pciio_info";
-
-cdl_p pciio_registry = NULL;
-
-int
-badaddr_val(volatile void *addr, int len, volatile void *ptr)
-{
- int ret = 0;
- volatile void *new_addr;
-
- switch (len) {
- case 4:
- new_addr = (void *)(((u64) addr)^4);
- ret = ia64_sn_probe_io_slot((long)new_addr, len, (void *)ptr);
- break;
- default:
- printk(KERN_WARNING "badaddr_val given len %x but supports len of 4 only\n", len);
- }
-
- if (ret < 0)
- panic("badaddr_val: unexpected status (%d) in probing", ret);
- return(ret);
-
-}
-
-
-nasid_t
-get_console_nasid(void)
-{
- extern nasid_t console_nasid;
- if (console_nasid < 0) {
- console_nasid = ia64_sn_get_console_nasid();
- if (console_nasid < 0) {
-// ZZZ What do we do if we don't get a console nasid on the hardware????
- if (IS_RUNNING_ON_SIMULATOR() )
- console_nasid = master_nasid;
- }
- }
- return console_nasid;
-}
-
-int
-hub_dma_enabled(devfs_handle_t xconn_vhdl)
-{
- return(0);
-}
-
-int
-hub_error_devenable(devfs_handle_t xconn_vhdl, int devnum, int error_code)
-{
- return(0);
-}
-
-void
-ioerror_dump(char *name, int error_code, int error_mode, ioerror_t *ioerror)
-{
-}
-
-/******
- ****** end hack defines ......
- ******/
-
-
-
-
-/* =====================================================================
- * PCI Generic Bus Provider
- * Implement PCI provider operations. The pciio* layer provides a
- * platform-independent interface for PCI devices. This layer
- * switches among the possible implementations of a PCI adapter.
- */
-
-/* =====================================================================
- * Provider Function Location SHORTCUT
- *
- * On platforms with only one possible PCI provider, macros can be
- * set up at the top that cause the table lookups and indirections to
- * completely disappear.
- */
-
-#if defined(CONFIG_IA64_SGI_SN1)
-/*
- * For the moment, we will assume that IP27
- * only use Bridge ASICs to provide PCI support.
- */
-#include <asm/sn/pci/pcibr.h>
-#define DEV_FUNC(dev,func) pcibr_##func
-#define CAST_PIOMAP(x) ((pcibr_piomap_t)(x))
-#define CAST_DMAMAP(x) ((pcibr_dmamap_t)(x))
-#define CAST_INTR(x) ((pcibr_intr_t)(x))
-#endif /* CONFIG_IA64_SGI_SN1 */
-
-/* =====================================================================
- * Function Table of Contents
- */
-
-#if !defined(DEV_FUNC)
-static pciio_provider_t *pciio_to_provider_fns(devfs_handle_t dev);
-#endif
-
-pciio_piomap_t pciio_piomap_alloc(devfs_handle_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, size_t, unsigned);
-void pciio_piomap_free(pciio_piomap_t);
-caddr_t pciio_piomap_addr(pciio_piomap_t, iopaddr_t, size_t);
-
-void pciio_piomap_done(pciio_piomap_t);
-caddr_t pciio_piotrans_addr(devfs_handle_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, unsigned);
-caddr_t pciio_pio_addr(devfs_handle_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, pciio_piomap_t *, unsigned);
-
-iopaddr_t pciio_piospace_alloc(devfs_handle_t, device_desc_t, pciio_space_t, size_t, size_t);
-void pciio_piospace_free(devfs_handle_t, pciio_space_t, iopaddr_t, size_t);
-
-pciio_dmamap_t pciio_dmamap_alloc(devfs_handle_t, device_desc_t, size_t, unsigned);
-void pciio_dmamap_free(pciio_dmamap_t);
-iopaddr_t pciio_dmamap_addr(pciio_dmamap_t, paddr_t, size_t);
-alenlist_t pciio_dmamap_list(pciio_dmamap_t, alenlist_t, unsigned);
-void pciio_dmamap_done(pciio_dmamap_t);
-iopaddr_t pciio_dmatrans_addr(devfs_handle_t, device_desc_t, paddr_t, size_t, unsigned);
-alenlist_t pciio_dmatrans_list(devfs_handle_t, device_desc_t, alenlist_t, unsigned);
-void pciio_dmamap_drain(pciio_dmamap_t);
-void pciio_dmaaddr_drain(devfs_handle_t, paddr_t, size_t);
-void pciio_dmalist_drain(devfs_handle_t, alenlist_t);
-iopaddr_t pciio_dma_addr(devfs_handle_t, device_desc_t, paddr_t, size_t, pciio_dmamap_t *, unsigned);
-
-pciio_intr_t pciio_intr_alloc(devfs_handle_t, device_desc_t, pciio_intr_line_t, devfs_handle_t);
-void pciio_intr_free(pciio_intr_t);
-int pciio_intr_connect(pciio_intr_t);
-void pciio_intr_disconnect(pciio_intr_t);
-devfs_handle_t pciio_intr_cpu_get(pciio_intr_t);
-
-void pciio_slot_func_to_name(char *, pciio_slot_t, pciio_function_t);
-
-void pciio_provider_startup(devfs_handle_t);
-void pciio_provider_shutdown(devfs_handle_t);
-
-pciio_endian_t pciio_endian_set(devfs_handle_t, pciio_endian_t, pciio_endian_t);
-pciio_priority_t pciio_priority_set(devfs_handle_t, pciio_priority_t);
-devfs_handle_t pciio_intr_dev_get(pciio_intr_t);
-
-devfs_handle_t pciio_pio_dev_get(pciio_piomap_t);
-pciio_slot_t pciio_pio_slot_get(pciio_piomap_t);
-pciio_space_t pciio_pio_space_get(pciio_piomap_t);
-iopaddr_t pciio_pio_pciaddr_get(pciio_piomap_t);
-ulong pciio_pio_mapsz_get(pciio_piomap_t);
-caddr_t pciio_pio_kvaddr_get(pciio_piomap_t);
-
-devfs_handle_t pciio_dma_dev_get(pciio_dmamap_t);
-pciio_slot_t pciio_dma_slot_get(pciio_dmamap_t);
-
-pciio_info_t pciio_info_chk(devfs_handle_t);
-pciio_info_t pciio_info_get(devfs_handle_t);
-void pciio_info_set(devfs_handle_t, pciio_info_t);
-devfs_handle_t pciio_info_dev_get(pciio_info_t);
-pciio_slot_t pciio_info_slot_get(pciio_info_t);
-pciio_function_t pciio_info_function_get(pciio_info_t);
-pciio_vendor_id_t pciio_info_vendor_id_get(pciio_info_t);
-pciio_device_id_t pciio_info_device_id_get(pciio_info_t);
-devfs_handle_t pciio_info_master_get(pciio_info_t);
-arbitrary_info_t pciio_info_mfast_get(pciio_info_t);
-pciio_provider_t *pciio_info_pops_get(pciio_info_t);
-error_handler_f *pciio_info_efunc_get(pciio_info_t);
-error_handler_arg_t *pciio_info_einfo_get(pciio_info_t);
-pciio_space_t pciio_info_bar_space_get(pciio_info_t, int);
-iopaddr_t pciio_info_bar_base_get(pciio_info_t, int);
-size_t pciio_info_bar_size_get(pciio_info_t, int);
-iopaddr_t pciio_info_rom_base_get(pciio_info_t);
-size_t pciio_info_rom_size_get(pciio_info_t);
-
-void pciio_init(void);
-int pciio_attach(devfs_handle_t);
-
-void pciio_provider_register(devfs_handle_t, pciio_provider_t *pciio_fns);
-void pciio_provider_unregister(devfs_handle_t);
-pciio_provider_t *pciio_provider_fns_get(devfs_handle_t);
-
-int pciio_driver_register(pciio_vendor_id_t, pciio_device_id_t, char *driver_prefix, unsigned);
-void pciio_driver_unregister(char *driver_prefix);
-
-devfs_handle_t pciio_device_register(devfs_handle_t, devfs_handle_t, pciio_slot_t, pciio_function_t, pciio_vendor_id_t, pciio_device_id_t);
-
-void pciio_device_unregister(devfs_handle_t);
-pciio_info_t pciio_device_info_new(pciio_info_t, devfs_handle_t, pciio_slot_t, pciio_function_t, pciio_vendor_id_t, pciio_device_id_t);
-void pciio_device_info_free(pciio_info_t);
-devfs_handle_t pciio_device_info_register(devfs_handle_t, pciio_info_t);
-void pciio_device_info_unregister(devfs_handle_t, pciio_info_t);
-int pciio_device_attach(devfs_handle_t, int);
-int pciio_device_detach(devfs_handle_t, int);
-void pciio_error_register(devfs_handle_t, error_handler_f *, error_handler_arg_t);
-
-int pciio_reset(devfs_handle_t);
-int pciio_write_gather_flush(devfs_handle_t);
-int pciio_slot_inuse(devfs_handle_t);
-
-/* =====================================================================
- * Provider Function Location
- *
- * If there is more than one possible provider for
- * this platform, we need to examine the master
- * vertex of the current vertex for a provider
- * function structure, and indirect through the
- * appropriately named member.
- */
-
-#if !defined(DEV_FUNC)
-
-static pciio_provider_t *
-pciio_to_provider_fns(devfs_handle_t dev)
-{
- pciio_info_t card_info;
- pciio_provider_t *provider_fns;
-
- /*
- * We're called with two types of vertices, one is
- * the bridge vertex (ends with "pci") and the other is the
- * pci slot vertex (ends with "pci/[0-8]"). For the first type
- * we need to get the provider from the PFUNCS label. For
- * the second we get it from fastinfo/c_pops.
- */
- provider_fns = pciio_provider_fns_get(dev);
- if (provider_fns == NULL) {
- card_info = pciio_info_get(dev);
- if (card_info != NULL) {
- provider_fns = pciio_info_pops_get(card_info);
- }
- }
-
- if (provider_fns == NULL)
-#if defined(SUPPORT_PRINTING_V_FORMAT)
- PRINT_PANIC("%v: provider_fns == NULL", dev);
-#else
- PRINT_PANIC("0x%p: provider_fns == NULL", (void *)dev);
-#endif
-
- return provider_fns;
-
-}
-
-#define DEV_FUNC(dev,func) pciio_to_provider_fns(dev)->func
-#define CAST_PIOMAP(x) ((pciio_piomap_t)(x))
-#define CAST_DMAMAP(x) ((pciio_dmamap_t)(x))
-#define CAST_INTR(x) ((pciio_intr_t)(x))
-#endif
-
-/*
- * Many functions are not passed their vertex
- * information directly; rather, they must
- * dive through a resource map. These macros
- * are available to coordinate this detail.
- */
-#define PIOMAP_FUNC(map,func) DEV_FUNC((map)->pp_dev,func)
-#define DMAMAP_FUNC(map,func) DEV_FUNC((map)->pd_dev,func)
-#define INTR_FUNC(intr_hdl,func) DEV_FUNC((intr_hdl)->pi_dev,func)
-
-/* =====================================================================
- * PIO MANAGEMENT
- *
- * For mapping system virtual address space to
- * pciio space on a specified card
- */
-
-pciio_piomap_t
-pciio_piomap_alloc(devfs_handle_t dev, /* set up mapping for this device */
- device_desc_t dev_desc, /* device descriptor */
- pciio_space_t space, /* CFG, MEM, IO, or a device-decoded window */
- iopaddr_t addr, /* lowest address (or offset in window) */
- size_t byte_count, /* size of region containing our mappings */
- size_t byte_count_max, /* maximum size of a mapping */
- unsigned flags)
-{ /* defined in sys/pio.h */
- return (pciio_piomap_t) DEV_FUNC(dev, piomap_alloc)
- (dev, dev_desc, space, addr, byte_count, byte_count_max, flags);
-}
-
-void
-pciio_piomap_free(pciio_piomap_t pciio_piomap)
-{
- PIOMAP_FUNC(pciio_piomap, piomap_free)
- (CAST_PIOMAP(pciio_piomap));
-}
-
-caddr_t
-pciio_piomap_addr(pciio_piomap_t pciio_piomap, /* mapping resources */
- iopaddr_t pciio_addr, /* map for this pciio address */
- size_t byte_count)
-{ /* map this many bytes */
- pciio_piomap->pp_kvaddr = PIOMAP_FUNC(pciio_piomap, piomap_addr)
- (CAST_PIOMAP(pciio_piomap), pciio_addr, byte_count);
-
- return pciio_piomap->pp_kvaddr;
-}
-
-void
-pciio_piomap_done(pciio_piomap_t pciio_piomap)
-{
- PIOMAP_FUNC(pciio_piomap, piomap_done)
- (CAST_PIOMAP(pciio_piomap));
-}
-
-caddr_t
-pciio_piotrans_addr(devfs_handle_t dev, /* translate for this device */
- device_desc_t dev_desc, /* device descriptor */
- pciio_space_t space, /* CFG, MEM, IO, or a device-decoded window */
- iopaddr_t addr, /* starting address (or offset in window) */
- size_t byte_count, /* map this many bytes */
- unsigned flags)
-{ /* (currently unused) */
- return DEV_FUNC(dev, piotrans_addr)
- (dev, dev_desc, space, addr, byte_count, flags);
-}
-
-caddr_t
-pciio_pio_addr(devfs_handle_t dev, /* translate for this device */
- device_desc_t dev_desc, /* device descriptor */
- pciio_space_t space, /* CFG, MEM, IO, or a device-decoded window */
- iopaddr_t addr, /* starting address (or offset in window) */
- size_t byte_count, /* map this many bytes */
- pciio_piomap_t *mapp, /* where to return the map pointer */
- unsigned flags)
-{ /* PIO flags */
- pciio_piomap_t map = 0;
- int errfree = 0;
- caddr_t res;
-
- if (mapp) {
- map = *mapp; /* possible pre-allocated map */
- *mapp = 0; /* record "no map used" */
- }
-
- res = pciio_piotrans_addr
- (dev, dev_desc, space, addr, byte_count, flags);
- if (res)
- return res; /* pciio_piotrans worked */
-
- if (!map) {
- map = pciio_piomap_alloc
- (dev, dev_desc, space, addr, byte_count, byte_count, flags);
- if (!map)
- return res; /* pciio_piomap_alloc failed */
- errfree = 1;
- }
-
- res = pciio_piomap_addr
- (map, addr, byte_count);
- if (!res) {
- if (errfree)
- pciio_piomap_free(map);
- return res; /* pciio_piomap_addr failed */
- }
- if (mapp)
- *mapp = map; /* pass back map used */
-
- return res; /* pciio_piomap_addr succeeded */
-}
-
-iopaddr_t
-pciio_piospace_alloc(devfs_handle_t dev, /* Device requiring space */
- device_desc_t dev_desc, /* Device descriptor */
- pciio_space_t space, /* MEM32/MEM64/IO */
- size_t byte_count, /* Size of mapping */
- size_t align)
-{ /* Alignment needed */
- if (align < NBPP)
- align = NBPP;
- return DEV_FUNC(dev, piospace_alloc)
- (dev, dev_desc, space, byte_count, align);
-}
-
-void
-pciio_piospace_free(devfs_handle_t dev, /* Device freeing space */
- pciio_space_t space, /* Type of space */
- iopaddr_t pciaddr, /* starting address */
- size_t byte_count)
-{ /* Range of address */
- DEV_FUNC(dev, piospace_free)
- (dev, space, pciaddr, byte_count);
-}
-
-/* =====================================================================
- * DMA MANAGEMENT
- *
- * For mapping from pci space to system
- * physical space.
- */
-
-pciio_dmamap_t
-pciio_dmamap_alloc(devfs_handle_t dev, /* set up mappings for this device */
- device_desc_t dev_desc, /* device descriptor */
- size_t byte_count_max, /* max size of a mapping */
- unsigned flags)
-{ /* defined in dma.h */
- return (pciio_dmamap_t) DEV_FUNC(dev, dmamap_alloc)
- (dev, dev_desc, byte_count_max, flags);
-}
-
-void
-pciio_dmamap_free(pciio_dmamap_t pciio_dmamap)
-{
- DMAMAP_FUNC(pciio_dmamap, dmamap_free)
- (CAST_DMAMAP(pciio_dmamap));
-}
-
-iopaddr_t
-pciio_dmamap_addr(pciio_dmamap_t pciio_dmamap, /* use these mapping resources */
- paddr_t paddr, /* map for this address */
- size_t byte_count)
-{ /* map this many bytes */
- return DMAMAP_FUNC(pciio_dmamap, dmamap_addr)
- (CAST_DMAMAP(pciio_dmamap), paddr, byte_count);
-}
-
-alenlist_t
-pciio_dmamap_list(pciio_dmamap_t pciio_dmamap, /* use these mapping resources */
- alenlist_t alenlist, /* map this Address/Length List */
- unsigned flags)
-{
- return DMAMAP_FUNC(pciio_dmamap, dmamap_list)
- (CAST_DMAMAP(pciio_dmamap), alenlist, flags);
-}
-
-void
-pciio_dmamap_done(pciio_dmamap_t pciio_dmamap)
-{
- DMAMAP_FUNC(pciio_dmamap, dmamap_done)
- (CAST_DMAMAP(pciio_dmamap));
-}
-
-iopaddr_t
-pciio_dmatrans_addr(devfs_handle_t dev, /* translate for this device */
- device_desc_t dev_desc, /* device descriptor */
- paddr_t paddr, /* system physical address */
- size_t byte_count, /* length */
- unsigned flags)
-{ /* defined in dma.h */
- return DEV_FUNC(dev, dmatrans_addr)
- (dev, dev_desc, paddr, byte_count, flags);
-}
-
-alenlist_t
-pciio_dmatrans_list(devfs_handle_t dev, /* translate for this device */
- device_desc_t dev_desc, /* device descriptor */
- alenlist_t palenlist, /* system address/length list */
- unsigned flags)
-{ /* defined in dma.h */
- return DEV_FUNC(dev, dmatrans_list)
- (dev, dev_desc, palenlist, flags);
-}
-
-iopaddr_t
-pciio_dma_addr(devfs_handle_t dev, /* translate for this device */
- device_desc_t dev_desc, /* device descriptor */
- paddr_t paddr, /* system physical address */
- size_t byte_count, /* length */
- pciio_dmamap_t *mapp, /* map to use, then map we used */
- unsigned flags)
-{ /* PIO flags */
- pciio_dmamap_t map = 0;
- int errfree = 0;
- iopaddr_t res;
-
- if (mapp) {
- map = *mapp; /* possible pre-allocated map */
- *mapp = 0; /* record "no map used" */
- }
-
- res = pciio_dmatrans_addr
- (dev, dev_desc, paddr, byte_count, flags);
- if (res)
- return res; /* pciio_dmatrans worked */
-
- if (!map) {
- map = pciio_dmamap_alloc
- (dev, dev_desc, byte_count, flags);
- if (!map)
- return res; /* pciio_dmamap_alloc failed */
- errfree = 1;
- }
-
- res = pciio_dmamap_addr
- (map, paddr, byte_count);
- if (!res) {
- if (errfree)
- pciio_dmamap_free(map);
- return res; /* pciio_dmamap_addr failed */
- }
- if (mapp)
- *mapp = map; /* pass back map used */
-
- return res; /* pciio_dmamap_addr succeeded */
-}
-
-void
-pciio_dmamap_drain(pciio_dmamap_t map)
-{
- DMAMAP_FUNC(map, dmamap_drain)
- (CAST_DMAMAP(map));
-}
-
-void
-pciio_dmaaddr_drain(devfs_handle_t dev, paddr_t addr, size_t size)
-{
- DEV_FUNC(dev, dmaaddr_drain)
- (dev, addr, size);
-}
-
-void
-pciio_dmalist_drain(devfs_handle_t dev, alenlist_t list)
-{
- DEV_FUNC(dev, dmalist_drain)
- (dev, list);
-}
-
-/* =====================================================================
- * INTERRUPT MANAGEMENT
- *
- * Allow crosstalk devices to establish interrupts
- */
-
-/*
- * Allocate resources required for an interrupt as specified in intr_desc.
- * Return resource handle in intr_hdl.
- */
-pciio_intr_t
-pciio_intr_alloc(devfs_handle_t dev, /* which Crosstalk device */
- device_desc_t dev_desc, /* device descriptor */
- pciio_intr_line_t lines, /* INTR line(s) to attach */
- devfs_handle_t owner_dev)
-{ /* owner of this interrupt */
- return (pciio_intr_t) DEV_FUNC(dev, intr_alloc)
- (dev, dev_desc, lines, owner_dev);
-}
-
-/*
- * Free resources consumed by intr_alloc.
- */
-void
-pciio_intr_free(pciio_intr_t intr_hdl)
-{
- INTR_FUNC(intr_hdl, intr_free)
- (CAST_INTR(intr_hdl));
-}
-
-/*
- * Associate resources allocated with a previous pciio_intr_alloc call with the
- * described handler, arg, name, etc.
- *
- * Returns 0 on success, returns <0 on failure.
- */
-int
-pciio_intr_connect(pciio_intr_t intr_hdl) /* pciio intr resource handle */
-{
- return INTR_FUNC(intr_hdl, intr_connect)
- (CAST_INTR(intr_hdl));
-}
-
-/*
- * Disassociate handler with the specified interrupt.
- */
-void
-pciio_intr_disconnect(pciio_intr_t intr_hdl)
-{
- INTR_FUNC(intr_hdl, intr_disconnect)
- (CAST_INTR(intr_hdl));
-}
-
-/*
- * Return a hwgraph vertex that represents the CPU currently
- * targeted by an interrupt.
- */
-devfs_handle_t
-pciio_intr_cpu_get(pciio_intr_t intr_hdl)
-{
- return INTR_FUNC(intr_hdl, intr_cpu_get)
- (CAST_INTR(intr_hdl));
-}
-
-void
-pciio_slot_func_to_name(char *name,
- pciio_slot_t slot,
- pciio_function_t func)
-{
- /*
- * standard connection points:
- *
- * PCIIO_SLOT_NONE: .../pci/direct
- * PCIIO_FUNC_NONE: .../pci/<SLOT> ie. .../pci/3
- * multifunction: .../pci/<SLOT><FUNC> ie. .../pci/3c
- */
-
- if (slot == PCIIO_SLOT_NONE)
- sprintf(name, "direct");
- else if (func == PCIIO_FUNC_NONE)
- sprintf(name, "%d", slot);
- else
- sprintf(name, "%d%c", slot, 'a'+func);
-}
-
-/* =====================================================================
- * CONFIGURATION MANAGEMENT
- */
-
-/*
- * Startup a crosstalk provider
- */
-void
-pciio_provider_startup(devfs_handle_t pciio_provider)
-{
- DEV_FUNC(pciio_provider, provider_startup)
- (pciio_provider);
-}
-
-/*
- * Shutdown a crosstalk provider
- */
-void
-pciio_provider_shutdown(devfs_handle_t pciio_provider)
-{
- DEV_FUNC(pciio_provider, provider_shutdown)
- (pciio_provider);
-}
-
-/*
- * Specify endianness constraints. The driver tells us what the device
- * does and how it would like to see things in memory. We reply with
- * how things will actually appear in memory.
- */
-pciio_endian_t
-pciio_endian_set(devfs_handle_t dev,
- pciio_endian_t device_end,
- pciio_endian_t desired_end)
-{
- ASSERT((device_end == PCIDMA_ENDIAN_BIG) || (device_end == PCIDMA_ENDIAN_LITTLE));
- ASSERT((desired_end == PCIDMA_ENDIAN_BIG) || (desired_end == PCIDMA_ENDIAN_LITTLE));
-
-#if DEBUG
-#if defined(SUPPORT_PRINTING_V_FORMAT)
- printk(KERN_ALERT "%v: pciio_endian_set is going away.\n"
- "\tplease use PCIIO_BYTE_STREAM or PCIIO_WORD_VALUES in your\n"
- "\tpciio_dmamap_alloc and pciio_dmatrans calls instead.\n",
- dev);
-#else
- printk(KERN_ALERT "0x%x: pciio_endian_set is going away.\n"
- "\tplease use PCIIO_BYTE_STREAM or PCIIO_WORD_VALUES in your\n"
- "\tpciio_dmamap_alloc and pciio_dmatrans calls instead.\n",
- dev);
-#endif
-#endif
-
- return DEV_FUNC(dev, endian_set)
- (dev, device_end, desired_end);
-}
-
-/*
- * Specify PCI arbitration priority.
- */
-pciio_priority_t
-pciio_priority_set(devfs_handle_t dev,
- pciio_priority_t device_prio)
-{
- ASSERT((device_prio == PCI_PRIO_HIGH) || (device_prio == PCI_PRIO_LOW));
-
- return DEV_FUNC(dev, priority_set)
- (dev, device_prio);
-}
-
-/*
- * Read value of configuration register
- */
-uint64_t
-pciio_config_get(devfs_handle_t dev,
- unsigned reg,
- unsigned size)
-{
- uint64_t value = 0;
- unsigned shift = 0;
-
- /* handle accesses that cross words here,
- * since that's common code between all
- * possible providers.
- */
- while (size > 0) {
- unsigned biw = 4 - (reg&3);
- if (biw > size)
- biw = size;
-
- value |= DEV_FUNC(dev, config_get)
- (dev, reg, biw) << shift;
-
- shift += 8*biw;
- reg += biw;
- size -= biw;
- }
- return value;
-}
-
-/*
- * Change value of configuration register
- */
-void
-pciio_config_set(devfs_handle_t dev,
- unsigned reg,
- unsigned size,
- uint64_t value)
-{
- /* handle accesses that cross words here,
- * since that's common code between all
- * possible providers.
- */
- while (size > 0) {
- unsigned biw = 4 - (reg&3);
- if (biw > size)
- biw = size;
-
- DEV_FUNC(dev, config_set)
- (dev, reg, biw, value);
- reg += biw;
- size -= biw;
- value >>= biw * 8;
- }
-}
-
-/* =====================================================================
- * GENERIC PCI SUPPORT FUNCTIONS
- */
-
-/*
- * Issue a hardware reset to a card.
- */
-int
-pciio_reset(devfs_handle_t dev)
-{
- return DEV_FUNC(dev, reset) (dev);
-}
-
-/*
- * flush write gather buffers
- */
-int
-pciio_write_gather_flush(devfs_handle_t dev)
-{
- return DEV_FUNC(dev, write_gather_flush) (dev);
-}
-
-devfs_handle_t
-pciio_intr_dev_get(pciio_intr_t pciio_intr)
-{
- return (pciio_intr->pi_dev);
-}
-
-/****** Generic crosstalk pio interfaces ******/
-devfs_handle_t
-pciio_pio_dev_get(pciio_piomap_t pciio_piomap)
-{
- return (pciio_piomap->pp_dev);
-}
-
-pciio_slot_t
-pciio_pio_slot_get(pciio_piomap_t pciio_piomap)
-{
- return (pciio_piomap->pp_slot);
-}
-
-pciio_space_t
-pciio_pio_space_get(pciio_piomap_t pciio_piomap)
-{
- return (pciio_piomap->pp_space);
-}
-
-iopaddr_t
-pciio_pio_pciaddr_get(pciio_piomap_t pciio_piomap)
-{
- return (pciio_piomap->pp_pciaddr);
-}
-
-ulong
-pciio_pio_mapsz_get(pciio_piomap_t pciio_piomap)
-{
- return (pciio_piomap->pp_mapsz);
-}
-
-caddr_t
-pciio_pio_kvaddr_get(pciio_piomap_t pciio_piomap)
-{
- return (pciio_piomap->pp_kvaddr);
-}
-
-/****** Generic crosstalk dma interfaces ******/
-devfs_handle_t
-pciio_dma_dev_get(pciio_dmamap_t pciio_dmamap)
-{
- return (pciio_dmamap->pd_dev);
-}
-
-pciio_slot_t
-pciio_dma_slot_get(pciio_dmamap_t pciio_dmamap)
-{
- return (pciio_dmamap->pd_slot);
-}
-
-/****** Generic pci slot information interfaces ******/
-
-pciio_info_t
-pciio_info_chk(devfs_handle_t pciio)
-{
- arbitrary_info_t ainfo = 0;
-
- hwgraph_info_get_LBL(pciio, INFO_LBL_PCIIO, &ainfo);
- return (pciio_info_t) ainfo;
-}
-
-pciio_info_t
-pciio_info_get(devfs_handle_t pciio)
-{
- pciio_info_t pciio_info;
-
- pciio_info = (pciio_info_t) hwgraph_fastinfo_get(pciio);
-
-#ifdef DEBUG_PCIIO
- {
- int pos;
- char dname[256];
- pos = devfs_generate_path(pciio, dname, 256);
- printk("%s : path= %s\n", __FUNCTION__, &dname[pos]);
- }
-#endif /* DEBUG_PCIIO */
-
- if ((pciio_info != NULL) &&
- (pciio_info->c_fingerprint != pciio_info_fingerprint)
- && (pciio_info->c_fingerprint != NULL)) {
-
- return((pciio_info_t)-1); /* Should panic .. */
- }
-
-
- return pciio_info;
-}
-
-void
-pciio_info_set(devfs_handle_t pciio, pciio_info_t pciio_info)
-{
- if (pciio_info != NULL)
- pciio_info->c_fingerprint = pciio_info_fingerprint;
- hwgraph_fastinfo_set(pciio, (arbitrary_info_t) pciio_info);
-
- /* Also, mark this vertex as a PCI slot
- * and use the pciio_info, so pciio_info_chk
- * can work (and be fairly efficient).
- */
- hwgraph_info_add_LBL(pciio, INFO_LBL_PCIIO,
- (arbitrary_info_t) pciio_info);
-}
-
-devfs_handle_t
-pciio_info_dev_get(pciio_info_t pciio_info)
-{
- return (pciio_info->c_vertex);
-}
-
-/*ARGSUSED*/
-pciio_bus_t
-pciio_info_bus_get(pciio_info_t pciio_info)
-{
- /* XXX for now O2 always gets back bus 0 */
- return (pciio_bus_t)0;
-}
-
-pciio_slot_t
-pciio_info_slot_get(pciio_info_t pciio_info)
-{
- return (pciio_info->c_slot);
-}
-
-pciio_function_t
-pciio_info_function_get(pciio_info_t pciio_info)
-{
- return (pciio_info->c_func);
-}
-
-pciio_vendor_id_t
-pciio_info_vendor_id_get(pciio_info_t pciio_info)
-{
- return (pciio_info->c_vendor);
-}
-
-pciio_device_id_t
-pciio_info_device_id_get(pciio_info_t pciio_info)
-{
- return (pciio_info->c_device);
-}
-
-devfs_handle_t
-pciio_info_master_get(pciio_info_t pciio_info)
-{
- return (pciio_info->c_master);
-}
-
-arbitrary_info_t
-pciio_info_mfast_get(pciio_info_t pciio_info)
-{
- return (pciio_info->c_mfast);
-}
-
-pciio_provider_t *
-pciio_info_pops_get(pciio_info_t pciio_info)
-{
- return (pciio_info->c_pops);
-}
-
-error_handler_f *
-pciio_info_efunc_get(pciio_info_t pciio_info)
-{
- return (pciio_info->c_efunc);
-}
-
-error_handler_arg_t *
-pciio_info_einfo_get(pciio_info_t pciio_info)
-{
- return (pciio_info->c_einfo);
-}
-
-pciio_space_t
-pciio_info_bar_space_get(pciio_info_t info, int win)
-{
- return info->c_window[win].w_space;
-}
-
-iopaddr_t
-pciio_info_bar_base_get(pciio_info_t info, int win)
-{
- return info->c_window[win].w_base;
-}
-
-size_t
-pciio_info_bar_size_get(pciio_info_t info, int win)
-{
- return info->c_window[win].w_size;
-}
-
-iopaddr_t
-pciio_info_rom_base_get(pciio_info_t info)
-{
- return info->c_rbase;
-}
-
-size_t
-pciio_info_rom_size_get(pciio_info_t info)
-{
- return info->c_rsize;
-}
-
-
-/* =====================================================================
- * GENERIC PCI INITIALIZATION FUNCTIONS
- */
-
-/*
- * pciioinit: called once during device driver
- * initializtion if this driver is configured into
- * the system.
- */
-void
-pciio_init(void)
-{
- cdl_p cp;
-
-#if DEBUG && ATTACH_DEBUG
- printf("pciio_init\n");
-#endif
- /* Allocate the registry.
- * We might already have one.
- * If we don't, go get one.
- * MPness: someone might have
- * set one up for us while we
- * were not looking; use an atomic
- * compare-and-swap to commit to
- * using the new registry if and
- * only if nobody else did first.
- * If someone did get there first,
- * toss the one we allocated back
- * into the pool.
- */
- if (pciio_registry == NULL) {
- cp = cdl_new(EDGE_LBL_PCI, "vendor", "device");
- if (!compare_and_swap_ptr((void **) &pciio_registry, NULL, (void *) cp)) {
- cdl_del(cp);
- }
- }
- ASSERT(pciio_registry != NULL);
-}
-
-/*
- * pciioattach: called for each vertex in the graph
- * that is a PCI provider.
- */
-/*ARGSUSED */
-int
-pciio_attach(devfs_handle_t pciio)
-{
-#if DEBUG && ATTACH_DEBUG
-#if defined(SUPPORT_PRINTING_V_FORMAT)
- printk("%v: pciio_attach\n", pciio);
-#else
- printk("0x%x: pciio_attach\n", pciio);
-#endif
-#endif
- return 0;
-}
-
-/*
- * Associate a set of pciio_provider functions with a vertex.
- */
-void
-pciio_provider_register(devfs_handle_t provider, pciio_provider_t *pciio_fns)
-{
- hwgraph_info_add_LBL(provider, INFO_LBL_PFUNCS, (arbitrary_info_t) pciio_fns);
-}
-
-/*
- * Disassociate a set of pciio_provider functions with a vertex.
- */
-void
-pciio_provider_unregister(devfs_handle_t provider)
-{
- arbitrary_info_t ainfo;
-
- hwgraph_info_remove_LBL(provider, INFO_LBL_PFUNCS, (long *) &ainfo);
-}
-
-/*
- * Obtain a pointer to the pciio_provider functions for a specified Crosstalk
- * provider.
- */
-pciio_provider_t *
-pciio_provider_fns_get(devfs_handle_t provider)
-{
- arbitrary_info_t ainfo = 0;
-
- (void) hwgraph_info_get_LBL(provider, INFO_LBL_PFUNCS, &ainfo);
- return (pciio_provider_t *) ainfo;
-}
-
-/*ARGSUSED4 */
-int
-pciio_driver_register(
- pciio_vendor_id_t vendor_id,
- pciio_device_id_t device_id,
- char *driver_prefix,
- unsigned flags)
-{
- /* a driver's init routine might call
- * pciio_driver_register before the
- * system calls pciio_init; so we
- * make the init call ourselves here.
- */
- if (pciio_registry == NULL)
- pciio_init();
-
- return cdl_add_driver(pciio_registry,
- vendor_id, device_id,
- driver_prefix, flags, NULL);
-}
-
-/*
- * Remove an initialization function.
- */
-void
-pciio_driver_unregister(
- char *driver_prefix)
-{
- /* before a driver calls unregister,
- * it must have called register; so
- * we can assume we have a registry here.
- */
- ASSERT(pciio_registry != NULL);
-
- cdl_del_driver(pciio_registry, driver_prefix, NULL);
-}
-
-/*
- * Set the slot status for a device supported by the
- * driver being registered.
- */
-void
-pciio_driver_reg_callback(
- devfs_handle_t pconn_vhdl,
- int key1,
- int key2,
- int error)
-{
-}
-
-/*
- * Set the slot status for a device supported by the
- * driver being unregistered.
- */
-void
-pciio_driver_unreg_callback(
- devfs_handle_t pconn_vhdl,
- int key1,
- int key2,
- int error)
-{
-}
-
-/*
- * Call some function with each vertex that
- * might be one of this driver's attach points.
- */
-void
-pciio_iterate(char *driver_prefix,
- pciio_iter_f * func)
-{
- /* a driver's init routine might call
- * pciio_iterate before the
- * system calls pciio_init; so we
- * make the init call ourselves here.
- */
- if (pciio_registry == NULL)
- pciio_init();
-
- ASSERT(pciio_registry != NULL);
-
- cdl_iterate(pciio_registry, driver_prefix, (cdl_iter_f *) func);
-}
-
-devfs_handle_t
-pciio_device_register(
- devfs_handle_t connectpt, /* vertex for /hw/.../pciio/%d */
- devfs_handle_t master, /* card's master ASIC (PCI provider) */
- pciio_slot_t slot, /* card's slot */
- pciio_function_t func, /* card's func */
- pciio_vendor_id_t vendor_id,
- pciio_device_id_t device_id)
-{
- return pciio_device_info_register
- (connectpt, pciio_device_info_new (NULL, master, slot, func,
- vendor_id, device_id));
-}
-
-void
-pciio_device_unregister(devfs_handle_t pconn)
-{
- DEV_FUNC(pconn,device_unregister)(pconn);
-}
-
-pciio_info_t
-pciio_device_info_new(
- pciio_info_t pciio_info,
- devfs_handle_t master,
- pciio_slot_t slot,
- pciio_function_t func,
- pciio_vendor_id_t vendor_id,
- pciio_device_id_t device_id)
-{
- if (!pciio_info)
- GET_NEW(pciio_info);
- ASSERT(pciio_info != NULL);
-
- pciio_info->c_slot = slot;
- pciio_info->c_func = func;
- pciio_info->c_vendor = vendor_id;
- pciio_info->c_device = device_id;
- pciio_info->c_master = master;
- pciio_info->c_mfast = hwgraph_fastinfo_get(master);
- pciio_info->c_pops = pciio_provider_fns_get(master);
- pciio_info->c_efunc = 0;
- pciio_info->c_einfo = 0;
-
- return pciio_info;
-}
-
-void
-pciio_device_info_free(pciio_info_t pciio_info)
-{
- /* NOTE : pciio_info is a structure within the pcibr_info
- * and not a pointer to memory allocated on the heap !!
- */
- BZERO((char *)pciio_info,sizeof(pciio_info));
-}
-
-devfs_handle_t
-pciio_device_info_register(
- devfs_handle_t connectpt, /* vertex at center of bus */
- pciio_info_t pciio_info) /* details about the connectpt */
-{
- char name[32];
- devfs_handle_t pconn;
- int device_master_set(devfs_handle_t, devfs_handle_t);
-
- pciio_slot_func_to_name(name,
- pciio_info->c_slot,
- pciio_info->c_func);
-
- if (GRAPH_SUCCESS !=
- hwgraph_path_add(connectpt, name, &pconn))
- return pconn;
-
- pciio_info->c_vertex = pconn;
- pciio_info_set(pconn, pciio_info);
-#ifdef DEBUG_PCIIO
- {
- int pos;
- char dname[256];
- pos = devfs_generate_path(pconn, dname, 256);
- printk("%s : pconn path= %s \n", __FUNCTION__, &dname[pos]);
- }
-#endif /* DEBUG_PCIIO */
-
- /*
- * create link to our pci provider
- */
-
- device_master_set(pconn, pciio_info->c_master);
-
-#if USRPCI
- /*
- * Call into usrpci provider to let it initialize for
- * the given slot.
- */
- if (pciio_info->c_slot != PCIIO_SLOT_NONE)
- usrpci_device_register(pconn, pciio_info->c_master, pciio_info->c_slot);
-#endif
-
- return pconn;
-}
-
-void
-pciio_device_info_unregister(devfs_handle_t connectpt,
- pciio_info_t pciio_info)
-{
- char name[32];
- devfs_handle_t pconn;
-
- if (!pciio_info)
- return;
-
- pciio_slot_func_to_name(name,
- pciio_info->c_slot,
- pciio_info->c_func);
-
- hwgraph_edge_remove(connectpt,name,&pconn);
- pciio_info_set(pconn,0);
-
- /* Remove the link to our pci provider */
- hwgraph_edge_remove(pconn, EDGE_LBL_MASTER, NULL);
-
-
- hwgraph_vertex_unref(pconn);
- hwgraph_vertex_destroy(pconn);
-
-}
-/* Add the pci card inventory information to the hwgraph
- */
-static void
-pciio_device_inventory_add(devfs_handle_t pconn_vhdl)
-{
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
-
- ASSERT(pciio_info);
- ASSERT(pciio_info->c_vertex == pconn_vhdl);
-
- /* Donot add inventory for non-existent devices */
- if ((pciio_info->c_vendor == PCIIO_VENDOR_ID_NONE) ||
- (pciio_info->c_device == PCIIO_DEVICE_ID_NONE))
- return;
- device_inventory_add(pconn_vhdl,INV_IOBD,INV_PCIADAP,
- pciio_info->c_vendor,pciio_info->c_device,
- pciio_info->c_slot);
-}
-
-static void
-pciio_device_inventory_remove(devfs_handle_t pconn_vhdl)
-{
-#ifdef LATER
- hwgraph_inventory_remove(pconn_vhdl,-1,-1,-1,-1,-1);
-#endif
-}
-
-/*ARGSUSED */
-int
-pciio_device_attach(devfs_handle_t pconn,
- int drv_flags)
-{
- pciio_info_t pciio_info;
- pciio_vendor_id_t vendor_id;
- pciio_device_id_t device_id;
-
-
- pciio_device_inventory_add(pconn);
- pciio_info = pciio_info_get(pconn);
-
- vendor_id = pciio_info->c_vendor;
- device_id = pciio_info->c_device;
-
- /* we don't start attaching things until
- * all the driver init routines (including
- * pciio_init) have been called; so we
- * can assume here that we have a registry.
- */
- ASSERT(pciio_registry != NULL);
-
- return(cdl_add_connpt(pciio_registry, vendor_id, device_id, pconn, drv_flags));
-}
-
-int
-pciio_device_detach(devfs_handle_t pconn,
- int drv_flags)
-{
- pciio_info_t pciio_info;
- pciio_vendor_id_t vendor_id;
- pciio_device_id_t device_id;
-
- pciio_device_inventory_remove(pconn);
- pciio_info = pciio_info_get(pconn);
-
- vendor_id = pciio_info->c_vendor;
- device_id = pciio_info->c_device;
-
- /* we don't start attaching things until
- * all the driver init routines (including
- * pciio_init) have been called; so we
- * can assume here that we have a registry.
- */
- ASSERT(pciio_registry != NULL);
-
- return(cdl_del_connpt(pciio_registry, vendor_id, device_id,
- pconn, drv_flags));
-
-}
-
-/*
- * pciio_error_register:
- * arrange for a function to be called with
- * a specified first parameter plus other
- * information when an error is encountered
- * and traced to the pci slot corresponding
- * to the connection point pconn.
- *
- * may also be called with a null function
- * pointer to "unregister" the error handler.
- *
- * NOTE: subsequent calls silently overwrite
- * previous data for this vertex. We assume that
- * cooperating drivers, well, cooperate ...
- */
-void
-pciio_error_register(devfs_handle_t pconn,
- error_handler_f *efunc,
- error_handler_arg_t einfo)
-{
- pciio_info_t pciio_info;
-
- pciio_info = pciio_info_get(pconn);
- ASSERT(pciio_info != NULL);
- pciio_info->c_efunc = efunc;
- pciio_info->c_einfo = einfo;
-}
-
-/*
- * Check if any device has been found in this slot, and return
- * true or false
- * vhdl is the vertex for the slot
- */
-int
-pciio_slot_inuse(devfs_handle_t pconn_vhdl)
-{
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
-
- ASSERT(pciio_info);
- ASSERT(pciio_info->c_vertex == pconn_vhdl);
- if (pciio_info->c_vendor) {
- /*
- * Non-zero value for vendor indicate
- * a board being found in this slot.
- */
- return 1;
- }
- return 0;
-}
-
-int
-pciio_dma_enabled(devfs_handle_t pconn_vhdl)
-{
- return DEV_FUNC(pconn_vhdl, dma_enabled)(pconn_vhdl);
-}
-
-/*
- * These are complementary Linux interfaces that takes in a pci_dev * as the
- * first arguement instead of devfs_handle_t.
- */
-iopaddr_t snia_pciio_dmatrans_addr(struct pci_dev *, device_desc_t, paddr_t, size_t, unsigned);
-pciio_dmamap_t snia_pciio_dmamap_alloc(struct pci_dev *, device_desc_t, size_t, unsigned);
-void snia_pciio_dmamap_free(pciio_dmamap_t);
-iopaddr_t snia_pciio_dmamap_addr(pciio_dmamap_t, paddr_t, size_t);
-void snia_pciio_dmamap_done(pciio_dmamap_t);
-pciio_endian_t snia_pciio_endian_set(struct pci_dev *pci_dev, pciio_endian_t device_end,
- pciio_endian_t desired_end);
-
-#include <linux/module.h>
-EXPORT_SYMBOL(snia_pciio_dmatrans_addr);
-EXPORT_SYMBOL(snia_pciio_dmamap_alloc);
-EXPORT_SYMBOL(snia_pciio_dmamap_free);
-EXPORT_SYMBOL(snia_pciio_dmamap_addr);
-EXPORT_SYMBOL(snia_pciio_dmamap_done);
-EXPORT_SYMBOL(snia_pciio_endian_set);
-
-pciio_endian_t
-snia_pciio_endian_set(struct pci_dev *pci_dev,
- pciio_endian_t device_end,
- pciio_endian_t desired_end)
-{
- devfs_handle_t dev = PCIDEV_VERTEX(pci_dev);
-
- return DEV_FUNC(dev, endian_set)
- (dev, device_end, desired_end);
-}
-
-iopaddr_t
-snia_pciio_dmatrans_addr(struct pci_dev *pci_dev, /* translate for this device */
- device_desc_t dev_desc, /* device descriptor */
- paddr_t paddr, /* system physical address */
- size_t byte_count, /* length */
- unsigned flags)
-{ /* defined in dma.h */
-
- devfs_handle_t dev = PCIDEV_VERTEX(pci_dev);
-
- return DEV_FUNC(dev, dmatrans_addr)
- (dev, dev_desc, paddr, byte_count, flags);
-}
-
-pciio_dmamap_t
-snia_pciio_dmamap_alloc(struct pci_dev *pci_dev, /* set up mappings for this device */
- device_desc_t dev_desc, /* device descriptor */
- size_t byte_count_max, /* max size of a mapping */
- unsigned flags)
-{ /* defined in dma.h */
-
- devfs_handle_t dev = PCIDEV_VERTEX(pci_dev);
-
- return (pciio_dmamap_t) DEV_FUNC(dev, dmamap_alloc)
- (dev, dev_desc, byte_count_max, flags);
-}
-
-void
-snia_pciio_dmamap_free(pciio_dmamap_t pciio_dmamap)
-{
- DMAMAP_FUNC(pciio_dmamap, dmamap_free)
- (CAST_DMAMAP(pciio_dmamap));
-}
-
-iopaddr_t
-snia_pciio_dmamap_addr(pciio_dmamap_t pciio_dmamap, /* use these mapping resources */
- paddr_t paddr, /* map for this address */
- size_t byte_count)
-{ /* map this many bytes */
- return DMAMAP_FUNC(pciio_dmamap, dmamap_addr)
- (CAST_DMAMAP(pciio_dmamap), paddr, byte_count);
-}
-
-void
-snia_pciio_dmamap_done(pciio_dmamap_t pciio_dmamap)
-{
- DMAMAP_FUNC(pciio_dmamap, dmamap_done)
- (CAST_DMAMAP(pciio_dmamap));
-}
-
diff --git a/arch/ia64/sn/io/platform_init/Makefile b/arch/ia64/sn/io/platform_init/Makefile
new file mode 100644
index 00000000000000..05ffc316269c04
--- /dev/null
+++ b/arch/ia64/sn/io/platform_init/Makefile
@@ -0,0 +1,12 @@
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
+#
+# Makefile for the sn2 io routines.
+
+EXTRA_CFLAGS := -DLITTLE_ENDIAN
+
+obj-y += sgi_io_init.o irix_io_init.o
diff --git a/arch/ia64/sn/io/platform_init/irix_io_init.c b/arch/ia64/sn/io/platform_init/irix_io_init.c
new file mode 100644
index 00000000000000..3078774b30e353
--- /dev/null
+++ b/arch/ia64/sn/io/platform_init/irix_io_init.c
@@ -0,0 +1,89 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/io.h>
+#include <asm/sn/sn_cpuid.h>
+#include <asm/sn/klconfig.h>
+#include <asm/sn/sn_private.h>
+#include <asm/sn/pci/pciba.h>
+#include <linux/smp.h>
+#include <asm/sn/simulator.h>
+
+extern void init_all_devices(void);
+extern void klhwg_add_all_modules(vertex_hdl_t);
+extern void klhwg_add_all_nodes(vertex_hdl_t);
+
+extern vertex_hdl_t hwgraph_root;
+extern void io_module_init(void);
+extern int pci_bus_to_hcl_cvlink(void);
+extern void mlreset(void);
+
+/* #define DEBUG_IO_INIT 1 */
+#ifdef DEBUG_IO_INIT
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif /* DEBUG_IO_INIT */
+
+/*
+ * This routine is responsible for the setup of all the IRIX hwgraph style
+ * stuff that's been pulled into linux. It's called by sn_pci_find_bios which
+ * is called just before the generic Linux PCI layer does its probing (by
+ * platform_pci_fixup aka sn_pci_fixup).
+ *
+ * It is very IMPORTANT that this call is only made by the Master CPU!
+ *
+ */
+
+void
+irix_io_init(void)
+{
+ cnodeid_t cnode;
+
+ /*
+ * This is the Master CPU. Emulate mlsetup and main.c in Irix.
+ */
+ mlreset();
+
+ /*
+ * Initialize platform-dependent vertices in the hwgraph:
+ * module
+ * node
+ * cpu
+ * memory
+ * slot
+ * hub
+ * router
+ * xbow
+ */
+
+ io_module_init(); /* Use to be called module_init() .. */
+ klhwg_add_all_modules(hwgraph_root);
+ klhwg_add_all_nodes(hwgraph_root);
+
+ for (cnode = 0; cnode < numnodes; cnode++) {
+ extern void per_hub_init(cnodeid_t);
+ per_hub_init(cnode);
+ }
+
+ /* We can do headless hub cnodes here .. */
+
+ /*
+ *
+ * Our IO Infrastructure drivers are in place ..
+ * Initialize the whole IO Infrastructure .. xwidget/device probes.
+ *
+ */
+ init_all_devices();
+ pci_bus_to_hcl_cvlink();
+}
diff --git a/arch/ia64/sn/io/platform_init/sgi_io_init.c b/arch/ia64/sn/io/platform_init/sgi_io_init.c
new file mode 100644
index 00000000000000..3583456b1ccc3f
--- /dev/null
+++ b/arch/ia64/sn/io/platform_init/sgi_io_init.c
@@ -0,0 +1,109 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/io.h>
+#include <asm/sn/sn_cpuid.h>
+#include <asm/sn/klconfig.h>
+#include <asm/sn/sn_private.h>
+#include <asm/sn/pda.h>
+#include <linux/smp.h>
+
+extern int init_hcl(void);
+
+/*
+ * per_hub_init
+ *
+ * This code is executed once for each Hub chip.
+ */
+void
+per_hub_init(cnodeid_t cnode)
+{
+ nasid_t nasid;
+ nodepda_t *npdap;
+ ii_icmr_u_t ii_icmr;
+ ii_ibcr_u_t ii_ibcr;
+ ii_ilcsr_u_t ii_ilcsr;
+
+ nasid = COMPACT_TO_NASID_NODEID(cnode);
+
+ ASSERT(nasid != INVALID_NASID);
+ ASSERT(NASID_TO_COMPACT_NODEID(nasid) == cnode);
+
+ npdap = NODEPDA(cnode);
+
+ /* Disable the request and reply errors. */
+ REMOTE_HUB_S(nasid, IIO_IWEIM, 0xC000);
+
+ /*
+ * Set the total number of CRBs that can be used.
+ */
+ ii_icmr.ii_icmr_regval= 0x0;
+ ii_icmr.ii_icmr_fld_s.i_c_cnt = 0xf;
+ if (enable_shub_wars_1_1() ) {
+ // Set bit one of ICMR to prevent II from sending interrupt for II bug.
+ ii_icmr.ii_icmr_regval |= 0x1;
+ }
+ REMOTE_HUB_S(nasid, IIO_ICMR, ii_icmr.ii_icmr_regval);
+
+ /*
+ * Set the number of CRBs that both of the BTEs combined
+ * can use minus 1.
+ */
+ ii_ibcr.ii_ibcr_regval= 0x0;
+ ii_ilcsr.ii_ilcsr_regval = REMOTE_HUB_L(nasid, IIO_LLP_CSR);
+ if (ii_ilcsr.ii_ilcsr_fld_s.i_llp_stat & LNK_STAT_WORKING) {
+ ii_ibcr.ii_ibcr_fld_s.i_count = 0x8;
+ } else {
+ /*
+ * if the LLP is down, there is no attached I/O, so
+ * give BTE all the CRBs.
+ */
+ ii_ibcr.ii_ibcr_fld_s.i_count = 0x14;
+ }
+ REMOTE_HUB_S(nasid, IIO_IBCR, ii_ibcr.ii_ibcr_regval);
+
+ /*
+ * Set CRB timeout to be 10ms.
+ */
+ REMOTE_HUB_S(nasid, IIO_ICTP, 0xffffff );
+ REMOTE_HUB_S(nasid, IIO_ICTO, 0xff);
+
+ /* Initialize error interrupts for this hub. */
+ hub_error_init(cnode);
+}
+
+/*
+ * This routine is responsible for the setup of all the IRIX hwgraph style
+ * stuff that's been pulled into linux. It's called by sn_pci_find_bios which
+ * is called just before the generic Linux PCI layer does its probing (by
+ * platform_pci_fixup aka sn_pci_fixup).
+ *
+ * It is very IMPORTANT that this call is only made by the Master CPU!
+ *
+ */
+
+void
+sgi_master_io_infr_init(void)
+{
+ extern void irix_io_init(void);
+
+ init_hcl(); /* Sets up the hwgraph compatibility layer with devfs */
+ irix_io_init(); /* Do IRIX Compatibility IO Init */
+
+#ifdef CONFIG_KDB
+ {
+ extern void kdba_io_init(void);
+ kdba_io_init();
+ }
+#endif
+
+}
diff --git a/arch/ia64/sn/io/sgi_if.c b/arch/ia64/sn/io/sgi_if.c
index 2303cba48f3c4b..a4d6539011327e 100644
--- a/arch/ia64/sn/io/sgi_if.c
+++ b/arch/ia64/sn/io/sgi_if.c
@@ -4,7 +4,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
@@ -20,8 +20,6 @@
#include <asm/sn/pci/pciio.h>
#include <asm/sn/slotnum.h>
-unsigned char Is_pic_on_this_nasid[512]; /* non-0 when this is a pic shub */
-
void *
snia_kmem_zalloc(size_t size, int flag)
{
@@ -37,13 +35,6 @@ snia_kmem_free(void *ptr, size_t size)
kfree(ptr);
}
-int
-nic_vertex_info_match(devfs_handle_t v, char *s)
-{
- /* we don't support this */
- return(0);
-}
-
/*
* the alloc/free_node routines do a simple kmalloc for now ..
*/
@@ -104,34 +95,6 @@ atoi(register char *p)
return (neg ? n : -n);
}
-char *
-strtok_r(char *string, const char *sepset, char **lasts)
-{
- register char *q, *r;
-
- /*first or subsequent call*/
- if (string == NULL)
- string = *lasts;
-
- if(string == 0) /* return if no tokens remaining */
- return(NULL);
-
- q = string + strspn(string, sepset); /* skip leading separators */
-
- if(*q == '\0') { /* return if no tokens remaining */
- *lasts = 0; /* indicate this is last token */
- return(NULL);
- }
-
- if((r = strpbrk(q, sepset)) == NULL) /* move past token */
- *lasts = 0; /* indicate this is last token */
- else {
- *r = '\0';
- *lasts = r+1;
- }
- return(q);
-}
-
/*
* print_register() allows formatted printing of bit fields. individual
* bit fields are described by a struct reg_desc, multiple bit fields within
diff --git a/arch/ia64/sn/io/sgi_io_init.c b/arch/ia64/sn/io/sgi_io_init.c
deleted file mode 100644
index c984fe6b7118c7..00000000000000
--- a/arch/ia64/sn/io/sgi_io_init.c
+++ /dev/null
@@ -1,308 +0,0 @@
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/types.h>
-#include <linux/config.h>
-#include <linux/slab.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/io.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/klconfig.h>
-#include <asm/sn/sn_private.h>
-#include <asm/sn/pci/pciba.h>
-#include <linux/smp.h>
-
-extern void mlreset(int );
-extern int init_hcl(void);
-extern void klgraph_hack_init(void);
-extern void hubspc_init(void);
-extern void pciio_init(void);
-extern void pcibr_init(void);
-extern void xtalk_init(void);
-extern void xbow_init(void);
-extern void xbmon_init(void);
-extern void pciiox_init(void);
-extern void usrpci_init(void);
-extern void ioc3_init(void);
-extern void initialize_io(void);
-#if defined(CONFIG_IA64_SGI_SN1)
-extern void intr_clear_all(nasid_t);
-#endif
-extern void klhwg_add_all_modules(devfs_handle_t);
-extern void klhwg_add_all_nodes(devfs_handle_t);
-
-void sn_mp_setup(void);
-extern devfs_handle_t hwgraph_root;
-extern void io_module_init(void);
-extern void pci_bus_cvlink_init(void);
-extern void temp_hack(void);
-
-extern int pci_bus_to_hcl_cvlink(void);
-
-/* #define DEBUG_IO_INIT */
-#ifdef DEBUG_IO_INIT
-#define DBG(x...) printk(x)
-#else
-#define DBG(x...)
-#endif /* DEBUG_IO_INIT */
-
-/*
- * per_hub_init
- *
- * This code is executed once for each Hub chip.
- */
-static void
-per_hub_init(cnodeid_t cnode)
-{
- nasid_t nasid;
- nodepda_t *npdap;
- ii_icmr_u_t ii_icmr;
- ii_ibcr_u_t ii_ibcr;
-
- nasid = COMPACT_TO_NASID_NODEID(cnode);
-
- ASSERT(nasid != INVALID_NASID);
- ASSERT(NASID_TO_COMPACT_NODEID(nasid) == cnode);
-
- npdap = NODEPDA(cnode);
-
-#if defined(CONFIG_IA64_SGI_SN1)
- /* initialize per-node synergy perf instrumentation */
- npdap->synergy_perf_enabled = 0; /* off by default */
- npdap->synergy_perf_lock = SPIN_LOCK_UNLOCKED;
- npdap->synergy_perf_freq = SYNERGY_PERF_FREQ_DEFAULT;
- npdap->synergy_inactive_intervals = 0;
- npdap->synergy_active_intervals = 0;
- npdap->synergy_perf_data = NULL;
- npdap->synergy_perf_first = NULL;
-#endif /* CONFIG_IA64_SGI_SN1 */
-
-
- /*
- * Set the total number of CRBs that can be used.
- */
- ii_icmr.ii_icmr_regval= 0x0;
- ii_icmr.ii_icmr_fld_s.i_c_cnt = 0xF;
- REMOTE_HUB_S(nasid, IIO_ICMR, ii_icmr.ii_icmr_regval);
-
- /*
- * Set the number of CRBs that both of the BTEs combined
- * can use minus 1.
- */
- ii_ibcr.ii_ibcr_regval= 0x0;
- ii_ibcr.ii_ibcr_fld_s.i_count = 0x8;
- REMOTE_HUB_S(nasid, IIO_IBCR, ii_ibcr.ii_ibcr_regval);
-
- /*
- * Set CRB timeout to be 10ms.
- */
- REMOTE_HUB_S(nasid, IIO_ICTP, 0x1000 );
- REMOTE_HUB_S(nasid, IIO_ICTO, 0xff);
-
-
-#if defined(CONFIG_IA64_SGI_SN1)
- /* Reserve all of the hardwired interrupt levels. */
- intr_reserve_hardwired(cnode);
-#endif
-
- /* Initialize error interrupts for this hub. */
- hub_error_init(cnode);
-}
-
-/*
- * This routine is responsible for the setup of all the IRIX hwgraph style
- * stuff that's been pulled into linux. It's called by sn1_pci_find_bios which
- * is called just before the generic Linux PCI layer does its probing (by
- * platform_pci_fixup aka sn1_pci_fixup).
- *
- * It is very IMPORTANT that this call is only made by the Master CPU!
- *
- */
-
-void
-sgi_master_io_infr_init(void)
-{
- int cnode;
-
- /*
- * Do any early init stuff .. einit_tbl[] etc.
- */
- DBG("--> sgi_master_io_infr_init: calling init_hcl().\n");
- init_hcl(); /* Sets up the hwgraph compatibility layer with devfs */
-
- /*
- * initialize the Linux PCI to xwidget vertexes ..
- */
- DBG("--> sgi_master_io_infr_init: calling pci_bus_cvlink_init().\n");
- pci_bus_cvlink_init();
-
-#ifdef BRINGUP
-#ifdef CONFIG_IA64_SGI_SN1
- /*
- * Hack to provide statically initialzed klgraph entries.
- */
- DBG("--> sgi_master_io_infr_init: calling klgraph_hack_init()\n");
- klgraph_hack_init();
-#endif /* CONFIG_IA64_SGI_SN1 */
-#endif /* BRINGUP */
-
- /*
- * This is the Master CPU. Emulate mlsetup and main.c in Irix.
- */
- DBG("--> sgi_master_io_infr_init: calling mlreset(0).\n");
- mlreset(0); /* Master .. */
-
- /*
- * allowboot() is called by kern/os/main.c in main()
- * Emulate allowboot() ...
- * per_cpu_init() - only need per_hub_init()
- * cpu_io_setup() - Nothing to do.
- *
- */
- DBG("--> sgi_master_io_infr_init: calling sn_mp_setup().\n");
- sn_mp_setup();
-
- DBG("--> sgi_master_io_infr_init: calling per_hub_init(0).\n");
- for (cnode = 0; cnode < numnodes; cnode++) {
- per_hub_init(cnode);
- }
-
- /* We can do headless hub cnodes here .. */
-
- /*
- * io_init[] stuff.
- *
- * Get SGI IO Infrastructure drivers to init and register with
- * each other etc.
- */
-
- DBG("--> sgi_master_io_infr_init: calling hubspc_init()\n");
- hubspc_init();
-
- DBG("--> sgi_master_io_infr_init: calling pciio_init()\n");
- pciio_init();
-
- DBG("--> sgi_master_io_infr_init: calling pcibr_init()\n");
- pcibr_init();
-
- DBG("--> sgi_master_io_infr_init: calling xtalk_init()\n");
- xtalk_init();
-
- DBG("--> sgi_master_io_infr_init: calling xbow_init()\n");
- xbow_init();
-
- DBG("--> sgi_master_io_infr_init: calling xbmon_init()\n");
- xbmon_init();
-
- DBG("--> sgi_master_io_infr_init: calling pciiox_init()\n");
- pciiox_init();
-
- DBG("--> sgi_master_io_infr_init: calling usrpci_init()\n");
- usrpci_init();
-
- DBG("--> sgi_master_io_infr_init: calling ioc3_init()\n");
- ioc3_init();
-
- /*
- *
- * Our IO Infrastructure drivers are in place ..
- * Initialize the whole IO Infrastructure .. xwidget/device probes.
- *
- */
- DBG("--> sgi_master_io_infr_init: Start Probe and IO Initialization\n");
- initialize_io();
-
- DBG("--> sgi_master_io_infr_init: Setting up SGI IO Links for Linux PCI\n");
- pci_bus_to_hcl_cvlink();
-
-#ifdef CONFIG_PCIBA
- DBG("--> sgi_master_io_infr_init: calling pciba_init()\n");
- pciba_init();
-#endif
-
- DBG("--> Leave sgi_master_io_infr_init: DONE setting up SGI Links for PCI\n");
-}
-
-/*
- * sgi_slave_io_infr_init - This routine must be called on all cpus except
- * the Master CPU.
- */
-void
-sgi_slave_io_infr_init(void)
-{
- /* Emulate cboot() .. */
- mlreset(1); /* This is a slave cpu */
-
- // per_hub_init(0); /* Need to get and send in actual cnode number */
-
- /* Done */
-}
-
-/*
- * One-time setup for MP SN.
- * Allocate per-node data, slurp prom klconfig information and
- * convert it to hwgraph information.
- */
-void
-sn_mp_setup(void)
-{
- cnodeid_t cnode;
- cpuid_t cpu;
-
- for (cpu = 0; cpu < NR_CPUS; cpu++) {
- /* Skip holes in CPU space */
- if (cpu_enabled(cpu)) {
- init_platform_pda(cpu);
- }
- }
-
- /*
- * Initialize platform-dependent vertices in the hwgraph:
- * module
- * node
- * cpu
- * memory
- * slot
- * hub
- * router
- * xbow
- */
-
- DBG("sn_mp_io_setup: calling io_module_init()\n");
- io_module_init(); /* Use to be called module_init() .. */
-
- DBG("sn_mp_setup: calling klhwg_add_all_modules()\n");
- klhwg_add_all_modules(hwgraph_root);
- DBG("sn_mp_setup: calling klhwg_add_all_nodes()\n");
- klhwg_add_all_nodes(hwgraph_root);
-
-
- for (cnode = 0; cnode < numnodes; cnode++) {
-
- /*
- * This routine clears the Hub's Interrupt registers.
- */
- /*
- * We need to move this intr_clear_all() routine
- * from SN/intr.c to a more appropriate file.
- * Talk to Al Mayer.
- */
-#if defined(CONFIG_IA64_SGI_SN1)
- intr_clear_all(COMPACT_TO_NASID_NODEID(cnode));
-#endif
- /* now init the hub */
- // per_hub_init(cnode);
-
- }
-
-#if defined(CONFIG_IA64_SGI_SN1)
- synergy_perf_init();
-#endif
-
-}
diff --git a/arch/ia64/sn/io/sgi_io_sim.c b/arch/ia64/sn/io/sgi_io_sim.c
index ba119a2cd8e4b7..056fb53d09a537 100644
--- a/arch/ia64/sn/io/sgi_io_sim.c
+++ b/arch/ia64/sn/io/sgi_io_sim.c
@@ -4,7 +4,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/config.h>
@@ -15,18 +15,11 @@
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/klconfig.h>
#include <asm/sn/module.h>
-#include <asm/sn/nic.h>
#include <asm/sn/sn_private.h>
-cpuid_t master_procid = 0;
+cpuid_t master_procid;
char arg_maxnodes[4];
-extern void init_all_devices(void);
-
-#if defined(CONFIG_IA64_SGI_SN1)
-synergy_da_t *Synergy_da_indr[MAX_COMPACT_NODES * 2];
-#endif
-
/*
* Return non-zero if the given variable was specified
*/
@@ -36,44 +29,12 @@ is_specified(char *s)
return (strlen(s) != 0);
}
-void xbmon_init(void)
-{
- FIXME("xbmon_init : no-op\n");
-
-}
-
-void pciiox_init(void)
-{
- FIXME("pciiox_init : no-op\n");
-
-}
-
-void usrpci_init(void)
-{
- FIXME("usrpci_init : no-op\n");
-
-}
-
-void ioc3_init(void)
-{
- FIXME("ioc3_init : no-op\n");
-
-}
-
-void initialize_io(void)
-{
-
- init_all_devices();
-}
-
/*
* Routines provided by ml/SN/promif.c.
*/
-static __psunsigned_t master_bridge_base = (__psunsigned_t)NULL;
+static __psunsigned_t master_bridge_base;
nasid_t console_nasid = (nasid_t)-1;
-#if !defined(CONFIG_IA64_SGI_SN1)
char master_baseio_wid;
-#endif
static char console_wid;
static char console_pcislot;
@@ -95,27 +56,6 @@ check_nasid_equiv(nasid_t nasida, nasid_t nasidb)
return 0;
}
-#if defined(CONFIG_IA64_SGI_SN1)
-int
-is_master_nasid_widget(nasid_t test_nasid, xwidgetnum_t test_wid)
-{
-
- /*
- * If the widget numbers are different, we're not the master.
- */
- if (test_wid != (xwidgetnum_t)console_wid)
- return 0;
-
- /*
- * If the NASIDs are the same or equivalent, we're the master.
- */
- if (check_nasid_equiv(test_nasid, console_nasid)) {
- return 1;
- } else {
- return 0;
- }
-}
-#else
int
is_master_baseio_nasid_widget(nasid_t test_nasid, xwidgetnum_t test_wid)
{
@@ -137,14 +77,3 @@ is_master_baseio_nasid_widget(nasid_t test_nasid, xwidgetnum_t test_wid)
return 0;
}
}
-#endif /* CONFIG_IA64_SGI_SN1 */
-
-/*
- * Routines provided by ml/SN/nvram.c
- */
-void
-nvram_baseinit(void)
-{
- FIXME("nvram_baseinit : no-op\n");
-
-}
diff --git a/arch/ia64/sn/io/sn1/hub_intr.c b/arch/ia64/sn/io/sn1/hub_intr.c
deleted file mode 100644
index 474f6c6b28524d..00000000000000
--- a/arch/ia64/sn/io/sn1/hub_intr.c
+++ /dev/null
@@ -1,307 +0,0 @@
-/* $Id: hub_intr.c,v 1.1 2002/02/28 17:31:25 marcelo Exp $
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992-1997, 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
- */
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <asm/sn/types.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/driver.h>
-#include <asm/sn/iograph.h>
-#include <asm/param.h>
-#include <asm/sn/pio.h>
-#include <asm/sn/xtalk/xwidget.h>
-#include <asm/sn/io.h>
-#include <asm/sn/sn_private.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/hcl_util.h>
-#include <asm/sn/intr.h>
-#include <asm/sn/xtalk/xtalkaddrs.h>
-#include <asm/sn/klconfig.h>
-#include <asm/sn/sn_cpuid.h>
-
-extern xtalk_provider_t hub_provider;
-
-/* ARGSUSED */
-void
-hub_intr_init(devfs_handle_t hubv)
-{
-}
-
-/*
- * hub_device_desc_update
- * Update the passed in device descriptor with the actual the
- * target cpu number and interrupt priority level.
- * NOTE : These might be the same as the ones passed in thru
- * the descriptor.
- */
-static void
-hub_device_desc_update(device_desc_t dev_desc,
- ilvl_t intr_swlevel,
- cpuid_t cpu)
-{
-}
-
-int allocate_my_bit = INTRCONNECT_ANYBIT;
-
-/*
- * Allocate resources required for an interrupt as specified in dev_desc.
- * Returns a hub interrupt handle on success, or 0 on failure.
- */
-static hub_intr_t
-do_hub_intr_alloc(devfs_handle_t dev, /* which crosstalk device */
- device_desc_t dev_desc, /* device descriptor */
- devfs_handle_t owner_dev, /* owner of this interrupt, if known */
- int uncond_nothread) /* unconditionally non-threaded */
-{
- cpuid_t cpu = (cpuid_t)0; /* cpu to receive interrupt */
- int cpupicked = 0;
- int bit; /* interrupt vector */
- /*REFERENCED*/
- int intr_resflags = 0;
- hub_intr_t intr_hdl;
- cnodeid_t nodeid; /* node to receive interrupt */
- /*REFERENCED*/
- nasid_t nasid; /* nasid to receive interrupt */
- struct xtalk_intr_s *xtalk_info;
- iopaddr_t xtalk_addr; /* xtalk addr on hub to set intr */
- xwidget_info_t xwidget_info; /* standard crosstalk widget info handle */
- char *intr_name = NULL;
- ilvl_t intr_swlevel = (ilvl_t)0;
- extern int default_intr_pri;
- extern void synergy_intr_alloc(int, int);
-
-
- if (dev_desc) {
- if (dev_desc->flags & D_INTR_ISERR) {
- intr_resflags = II_ERRORINT;
- } else if (!uncond_nothread && !(dev_desc->flags & D_INTR_NOTHREAD)) {
- intr_resflags = II_THREADED;
- } else {
- /* Neither an error nor a thread. */
- intr_resflags = 0;
- }
- } else {
- intr_swlevel = default_intr_pri;
- if (!uncond_nothread)
- intr_resflags = II_THREADED;
- }
-
- /* XXX - Need to determine if the interrupt should be threaded. */
-
- /* If the cpu has not been picked already then choose a candidate
- * interrupt target and reserve the interrupt bit
- */
- if (!cpupicked) {
- cpu = intr_heuristic(dev,dev_desc,allocate_my_bit,
- intr_resflags,owner_dev,
- intr_name,&bit);
- }
-
- /* At this point we SHOULD have a valid cpu */
- if (cpu == CPU_NONE) {
-#if defined(SUPPORT_PRINTING_V_FORMAT)
- printk(KERN_WARNING "%v hub_intr_alloc could not allocate interrupt\n",
- owner_dev);
-#else
- printk(KERN_WARNING "%p hub_intr_alloc could not allocate interrupt\n",
- (void *)owner_dev);
-#endif
- return(0);
-
- }
-
- /* If the cpu has been picked already (due to the bridge data
- * corruption bug) then try to reserve an interrupt bit .
- */
- if (cpupicked) {
- bit = intr_reserve_level(cpu, allocate_my_bit,
- intr_resflags,
- owner_dev, intr_name);
- if (bit < 0) {
-#if defined(SUPPORT_PRINTING_V_FORMAT)
- printk(KERN_WARNING "Could not reserve an interrupt bit for cpu "
- " %d and dev %v\n",
- cpu,owner_dev);
-#else
- printk(KERN_WARNING "Could not reserve an interrupt bit for cpu "
- " %d and dev %p\n",
- (int)cpu, (void *)owner_dev);
-#endif
-
- return(0);
- }
- }
-
- nodeid = cpuid_to_cnodeid(cpu);
- nasid = cpuid_to_nasid(cpu);
- xtalk_addr = HUBREG_AS_XTALKADDR(nasid, PIREG(PI_INT_PEND_MOD, cpuid_to_subnode(cpu)));
-
- /*
- * Allocate an interrupt handle, and fill it in. There are two
- * pieces to an interrupt handle: the piece needed by generic
- * xtalk code which is used by crosstalk device drivers, and
- * the piece needed by low-level IP27 hardware code.
- */
- intr_hdl = snia_kmem_alloc_node(sizeof(struct hub_intr_s), KM_NOSLEEP, nodeid);
- ASSERT_ALWAYS(intr_hdl);
-
- /*
- * Fill in xtalk information for generic xtalk interfaces that
- * operate on xtalk_intr_hdl's.
- */
- xtalk_info = &intr_hdl->i_xtalk_info;
- xtalk_info->xi_dev = dev;
- xtalk_info->xi_vector = bit;
- xtalk_info->xi_addr = xtalk_addr;
-
- /*
- * Regardless of which CPU we ultimately interrupt, a given crosstalk
- * widget always handles interrupts (and PIO and DMA) through its
- * designated "master" crosstalk provider.
- */
- xwidget_info = xwidget_info_get(dev);
- if (xwidget_info)
- xtalk_info->xi_target = xwidget_info_masterid_get(xwidget_info);
-
- /* Fill in low level hub information for hub_* interrupt interface */
- intr_hdl->i_swlevel = intr_swlevel;
- intr_hdl->i_cpuid = cpu;
- intr_hdl->i_bit = bit;
- intr_hdl->i_flags = HUB_INTR_IS_ALLOCED;
-
- /* Store the actual interrupt priority level & interrupt target
- * cpu back in the device descriptor.
- */
- hub_device_desc_update(dev_desc, intr_swlevel, cpu);
- synergy_intr_alloc((int)bit, (int)cpu);
- return(intr_hdl);
-}
-
-/*
- * Allocate resources required for an interrupt as specified in dev_desc.
- * Returns a hub interrupt handle on success, or 0 on failure.
- */
-hub_intr_t
-hub_intr_alloc( devfs_handle_t dev, /* which crosstalk device */
- device_desc_t dev_desc, /* device descriptor */
- devfs_handle_t owner_dev) /* owner of this interrupt, if known */
-{
- return(do_hub_intr_alloc(dev, dev_desc, owner_dev, 0));
-}
-
-/*
- * Allocate resources required for an interrupt as specified in dev_desc.
- * Uncondtionally request non-threaded, regardless of what the device
- * descriptor might say.
- * Returns a hub interrupt handle on success, or 0 on failure.
- */
-hub_intr_t
-hub_intr_alloc_nothd(devfs_handle_t dev, /* which crosstalk device */
- device_desc_t dev_desc, /* device descriptor */
- devfs_handle_t owner_dev) /* owner of this interrupt, if known */
-{
- return(do_hub_intr_alloc(dev, dev_desc, owner_dev, 1));
-}
-
-/*
- * Free resources consumed by intr_alloc.
- */
-void
-hub_intr_free(hub_intr_t intr_hdl)
-{
- cpuid_t cpu = intr_hdl->i_cpuid;
- int bit = intr_hdl->i_bit;
- xtalk_intr_t xtalk_info;
-
- if (intr_hdl->i_flags & HUB_INTR_IS_CONNECTED) {
- /* Setting the following fields in the xtalk interrupt info
- * clears the interrupt target register in the xtalk user
- */
- xtalk_info = &intr_hdl->i_xtalk_info;
- xtalk_info->xi_dev = NODEV;
- xtalk_info->xi_vector = 0;
- xtalk_info->xi_addr = 0;
- hub_intr_disconnect(intr_hdl);
- }
-
- if (intr_hdl->i_flags & HUB_INTR_IS_ALLOCED)
- kfree(intr_hdl);
-
- intr_unreserve_level(cpu, bit);
-}
-
-
-/*
- * Associate resources allocated with a previous hub_intr_alloc call with the
- * described handler, arg, name, etc.
- */
-/*ARGSUSED*/
-int
-hub_intr_connect( hub_intr_t intr_hdl, /* xtalk intr resource handle */
- xtalk_intr_setfunc_t setfunc, /* func to set intr hw */
- void *setfunc_arg) /* arg to setfunc */
-{
- int rv;
- cpuid_t cpu = intr_hdl->i_cpuid;
- int bit = intr_hdl->i_bit;
- extern int synergy_intr_connect(int, int);
-
- ASSERT(intr_hdl->i_flags & HUB_INTR_IS_ALLOCED);
-
- rv = intr_connect_level(cpu, bit, intr_hdl->i_swlevel, NULL);
- if (rv < 0)
- return(rv);
-
- intr_hdl->i_xtalk_info.xi_setfunc = setfunc;
- intr_hdl->i_xtalk_info.xi_sfarg = setfunc_arg;
-
- if (setfunc) (*setfunc)((xtalk_intr_t)intr_hdl);
-
- intr_hdl->i_flags |= HUB_INTR_IS_CONNECTED;
- return(synergy_intr_connect((int)bit, (int)cpu));
-}
-
-
-/*
- * Disassociate handler with the specified interrupt.
- */
-void
-hub_intr_disconnect(hub_intr_t intr_hdl)
-{
- /*REFERENCED*/
- int rv;
- cpuid_t cpu = intr_hdl->i_cpuid;
- int bit = intr_hdl->i_bit;
- xtalk_intr_setfunc_t setfunc;
-
- setfunc = intr_hdl->i_xtalk_info.xi_setfunc;
-
- /* TBD: send disconnected interrupts somewhere harmless */
- if (setfunc) (*setfunc)((xtalk_intr_t)intr_hdl);
-
- rv = intr_disconnect_level(cpu, bit);
- ASSERT(rv == 0);
- intr_hdl->i_flags &= ~HUB_INTR_IS_CONNECTED;
-}
-
-
-/*
- * Return a hwgraph vertex that represents the CPU currently
- * targeted by an interrupt.
- */
-devfs_handle_t
-hub_intr_cpu_get(hub_intr_t intr_hdl)
-{
- cpuid_t cpuid = intr_hdl->i_cpuid;
- ASSERT(cpuid != CPU_NONE);
-
- return(cpuid_to_vertex(cpuid));
-}
diff --git a/arch/ia64/sn/io/sn1/hubcounters.c b/arch/ia64/sn/io/sn1/hubcounters.c
deleted file mode 100644
index 0d3717fb69bbdb..00000000000000
--- a/arch/ia64/sn/io/sn1/hubcounters.c
+++ /dev/null
@@ -1,283 +0,0 @@
-/* $Id: hubcounters.c,v 1.1 2002/02/28 17:31:25 marcelo Exp $
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992-1997,2000-2002 Silicon Graphics, Inc.
- * All rights reserved.
- */
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <asm/types.h>
-#include <asm/sn/io.h>
-#include <asm/sn/nodepda.h>
-#include <asm/sn/iograph.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/router.h>
-#include <asm/sn/snconfig.h>
-#include <asm/sn/slotnum.h>
-#include <asm/sn/clksupport.h>
-#include <asm/sn/sndrv.h>
-
-extern void hubni_error_handler(char *, int); /* huberror.c */
-
-static int hubstats_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
-struct file_operations hub_mon_fops = {
- ioctl: hubstats_ioctl,
-};
-
-#define HUB_CAPTURE_TICKS (2 * HZ)
-
-#define HUB_ERR_THRESH 500
-#define USEC_PER_SEC 1000000
-#define NSEC_PER_SEC USEC_PER_SEC*1000
-
-volatile int hub_print_usecs = 600 * USEC_PER_SEC;
-
-/* Return success if the hub's crosstalk link is working */
-int
-hub_xtalk_link_up(nasid_t nasid)
-{
- hubreg_t llp_csr_reg;
-
- /* Read the IO LLP control status register */
- llp_csr_reg = REMOTE_HUB_L(nasid, IIO_LLP_CSR);
-
- /* Check if the xtalk link is working */
- if (llp_csr_reg & IIO_LLP_CSR_IS_UP)
- return(1);
-
- return(0);
-
-
-}
-
-static char *error_flag_to_type(unsigned char error_flag)
-{
- switch(error_flag) {
- case 0x1: return ("NI retries");
- case 0x2: return ("NI SN errors");
- case 0x4: return ("NI CB errors");
- case 0x8: return ("II CB errors");
- case 0x10: return ("II SN errors");
- default: return ("Errors");
- }
-}
-
-int
-print_hub_error(hubstat_t *hsp, hubreg_t reg,
- int64_t delta, unsigned char error_flag)
-{
- int64_t rate;
-
- reg *= hsp->hs_per_minute; /* Convert to minutes */
- rate = reg / delta;
-
- if (rate > HUB_ERR_THRESH) {
-
- if(hsp->hs_maint & error_flag)
- {
- printk( "Excessive %s (%ld/min) on %s",
- error_flag_to_type(error_flag), rate, hsp->hs_name);
- }
- else
- {
- hsp->hs_maint |= error_flag;
- printk( "Excessive %s (%ld/min) on %s",
- error_flag_to_type(error_flag), rate, hsp->hs_name);
- }
- return 1;
- } else {
- return 0;
- }
-}
-
-
-int
-check_hub_error_rates(hubstat_t *hsp)
-{
- int64_t delta = hsp->hs_timestamp - hsp->hs_timebase;
- int printed = 0;
-
- printed += print_hub_error(hsp, hsp->hs_ni_retry_errors,
- delta, 0x1);
-
-#if 0
- printed += print_hub_error(hsp, hsp->hs_ni_sn_errors,
- delta, 0x2);
-#endif
-
- printed += print_hub_error(hsp, hsp->hs_ni_cb_errors,
- delta, 0x4);
-
-
- /* If the hub's xtalk link is not working there is
- * no need to print the "Excessive..." warning
- * messages
- */
- if (!hub_xtalk_link_up(hsp->hs_nasid))
- return(printed);
-
-
- printed += print_hub_error(hsp, hsp->hs_ii_cb_errors,
- delta, 0x8);
-
- printed += print_hub_error(hsp, hsp->hs_ii_sn_errors,
- delta, 0x10);
-
- return printed;
-}
-
-
-void
-capture_hub_stats(cnodeid_t cnodeid, struct nodepda_s *npda)
-{
- nasid_t nasid;
- hubstat_t *hsp = &(npda->hubstats);
- hubreg_t port_error;
- ii_illr_u_t illr;
- int count;
- int overflow = 0;
-
- /*
- * If our link wasn't up at boot time, don't worry about error rates.
- */
- if (!(hsp->hs_ni_port_status & NPS_LINKUP_MASK)) {
- printk("capture_hub_stats: cnode=%d hs_ni_port_status=0x%016lx : link is not up\n",
- cnodeid, hsp->hs_ni_port_status);
- return;
- }
-
- nasid = COMPACT_TO_NASID_NODEID(cnodeid);
-
- hsp->hs_timestamp = GET_RTC_COUNTER();
-
- port_error = REMOTE_HUB_L(nasid, NI_PORT_ERROR_CLEAR);
- count = ((port_error & NPE_RETRYCOUNT_MASK) >> NPE_RETRYCOUNT_SHFT);
- hsp->hs_ni_retry_errors += count;
- if (count == NPE_COUNT_MAX)
- overflow = 1;
- count = ((port_error & NPE_SNERRCOUNT_MASK) >> NPE_SNERRCOUNT_SHFT);
- hsp->hs_ni_sn_errors += count;
- if (count == NPE_COUNT_MAX)
- overflow = 1;
- count = ((port_error & NPE_CBERRCOUNT_MASK) >> NPE_CBERRCOUNT_SHFT);
- hsp->hs_ni_cb_errors += count;
- if (overflow || count == NPE_COUNT_MAX)
- hsp->hs_ni_overflows++;
-
- if (port_error & NPE_FATAL_ERRORS) {
-#ifdef ajm
- hubni_error_handler("capture_hub_stats", 1);
-#else
- printk("Error: hubni_error_handler in capture_hub_stats");
-#endif
- }
-
- illr.ii_illr_regval = REMOTE_HUB_L(nasid, IIO_LLP_LOG);
- REMOTE_HUB_S(nasid, IIO_LLP_LOG, 0);
-
- hsp->hs_ii_sn_errors += illr.ii_illr_fld_s.i_sn_cnt;
- hsp->hs_ii_cb_errors += illr.ii_illr_fld_s.i_cb_cnt;
- if ((illr.ii_illr_fld_s.i_sn_cnt == IIO_LLP_SN_MAX) ||
- (illr.ii_illr_fld_s.i_cb_cnt == IIO_LLP_CB_MAX))
- hsp->hs_ii_overflows++;
-
- if (hsp->hs_print) {
- if (check_hub_error_rates(hsp)) {
- hsp->hs_last_print = GET_RTC_COUNTER();
- hsp->hs_print = 0;
- }
- } else {
- if ((GET_RTC_COUNTER() -
- hsp->hs_last_print) > hub_print_usecs)
- hsp->hs_print = 1;
- }
-
- npda->hubticks = HUB_CAPTURE_TICKS;
-}
-
-
-void
-init_hub_stats(cnodeid_t cnodeid, struct nodepda_s *npda)
-{
- hubstat_t *hsp = &(npda->hubstats);
- nasid_t nasid = cnodeid_to_nasid(cnodeid);
- bzero(&(npda->hubstats), sizeof(hubstat_t));
-
- hsp->hs_version = HUBSTAT_VERSION;
- hsp->hs_cnode = cnodeid;
- hsp->hs_nasid = nasid;
- hsp->hs_timebase = GET_RTC_COUNTER();
- hsp->hs_ni_port_status = REMOTE_HUB_L(nasid, NI_PORT_STATUS);
-
- /* Clear the II error counts. */
- REMOTE_HUB_S(nasid, IIO_LLP_LOG, 0);
-
- /* Clear the NI counts. */
- REMOTE_HUB_L(nasid, NI_PORT_ERROR_CLEAR);
-
- hsp->hs_per_minute = (long long)RTC_CYCLES_PER_SEC * 60LL;
-
- npda->hubticks = HUB_CAPTURE_TICKS;
-
- /* XX should use kmem_alloc_node */
- hsp->hs_name = (char *)kmalloc(MAX_HUB_PATH, GFP_KERNEL);
- ASSERT_ALWAYS(hsp->hs_name);
-
- sprintf(hsp->hs_name, "/dev/hw/" EDGE_LBL_MODULE "/%03d/"
- EDGE_LBL_NODE "/" EDGE_LBL_HUB,
- npda->module_id);
-
- hsp->hs_last_print = 0;
- hsp->hs_print = 1;
-
- hub_print_usecs = hub_print_usecs;
-
-#if 0
- printk("init_hub_stats: cnode=%d nasid=%d hs_version=%d hs_ni_port_status=0x%016lx\n",
- cnodeid, nasid, hsp->hs_version, hsp->hs_ni_port_status);
-#endif
-}
-
-static int
-hubstats_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- cnodeid_t cnode;
- nodepda_t *npdap;
- uint64_t longarg;
- devfs_handle_t d;
-
- if ((d = devfs_get_handle_from_inode(inode)) == NULL)
- return -ENODEV;
- cnode = (cnodeid_t)hwgraph_fastinfo_get(d);
- npdap = NODEPDA(cnode);
-
- if (npdap->hubstats.hs_version != HUBSTAT_VERSION) {
- init_hub_stats(cnode, npdap);
- }
-
- switch (cmd) {
- case SNDRV_GET_INFOSIZE:
- longarg = sizeof(hubstat_t);
- if (copy_to_user((void *)arg, &longarg, sizeof(longarg))) {
- return -EFAULT;
- }
- break;
-
- case SNDRV_GET_HUBINFO:
- /* refresh npda->hubstats */
- capture_hub_stats(cnode, npdap);
- if (copy_to_user((void *)arg, &npdap->hubstats, sizeof(hubstat_t))) {
- return -EFAULT;
- }
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
diff --git a/arch/ia64/sn/io/sn1/huberror.c b/arch/ia64/sn/io/sn1/huberror.c
deleted file mode 100644
index 67780a759e6979..00000000000000
--- a/arch/ia64/sn/io/sn1/huberror.c
+++ /dev/null
@@ -1,228 +0,0 @@
-/* $Id: huberror.c,v 1.1 2002/02/28 17:31:25 marcelo Exp $
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <asm/smp.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/io.h>
-#include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/sn_private.h>
-#include <asm/sn/klconfig.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/pci/pciio.h>
-#include <asm/sn/pci/pcibr.h>
-#include <asm/sn/xtalk/xtalk.h>
-#include <asm/sn/pci/pcibr_private.h>
-#include <asm/sn/intr.h>
-
-extern void hubni_eint_init(cnodeid_t cnode);
-extern void hubii_eint_init(cnodeid_t cnode);
-extern void hubii_eint_handler (int irq, void *arg, struct pt_regs *ep);
-extern void snia_error_intr_handler(int irq, void *devid, struct pt_regs *pt_regs);
-
-extern int maxcpus;
-
-#define HUB_ERROR_PERIOD (120 * HZ) /* 2 minutes */
-
-
-void
-hub_error_clear(nasid_t nasid)
-{
- int i;
- hubreg_t idsr;
- int sn;
-
- for(sn=0; sn<NUM_SUBNODES; sn++) {
- REMOTE_HUB_PI_S(nasid, sn, PI_ERR_INT_PEND, -1);
- REMOTE_HUB_PI_S(nasid, sn, PI_ERR_STATUS0_A_CLR, -1);
- REMOTE_HUB_PI_S(nasid, sn, PI_ERR_STATUS0_B_CLR, -1);
- REMOTE_HUB_PI_S(nasid, sn, PI_SPURIOUS_HDR_0, 0);
- REMOTE_HUB_PI_S(nasid, sn, PI_SPURIOUS_HDR_1, 0);
- }
-
- REMOTE_HUB_L(nasid, MD_DIR_ERROR_CLR);
- REMOTE_HUB_L(nasid, MD_MEM_ERROR_CLR);
- REMOTE_HUB_L(nasid, MD_MISC1_ERROR_CLR);
- REMOTE_HUB_L(nasid, MD_PROTOCOL_ERR_CLR);
-
- /*
- * Make sure spurious write response errors are cleared
- * (values are from hub_set_prb())
- */
- for (i = 0; i <= HUB_WIDGET_ID_MAX - HUB_WIDGET_ID_MIN + 1; i++) {
- iprb_t prb;
-
- prb.iprb_regval = REMOTE_HUB_L(nasid, IIO_IOPRB_0 + (i * sizeof(hubreg_t)));
-
- /* Clear out some fields */
- prb.iprb_ovflow = 1;
- prb.iprb_bnakctr = 0;
- prb.iprb_anakctr = 0;
-
- /*
- * PIO reads in fire-and-forget mode on bedrock 1.0 don't
- * frob the credit count properly, making the responses appear
- * spurious. So don't use fire-and-forget mode. Bug 761802.
- */
- prb.iprb_ff = 0; /* disable fire-and-forget mode by default */
-
- prb.iprb_xtalkctr = 3; /* approx. PIO credits for the widget */
-
- REMOTE_HUB_S(nasid, IIO_IOPRB_0 + (i * sizeof(hubreg_t)), prb.iprb_regval);
- }
-
- REMOTE_HUB_S(nasid, IIO_IO_ERR_CLR, -1);
- idsr = REMOTE_HUB_L(nasid, IIO_IIDSR);
- REMOTE_HUB_S(nasid, IIO_IIDSR, (idsr & ~(IIO_IIDSR_SENT_MASK)));
-
- REMOTE_HUB_L(nasid, NI_PORT_ERROR_CLEAR);
- /* No need to clear NI_PORT_HEADER regs; they are continually overwritten*/
-
- REMOTE_HUB_S(nasid, LB_ERROR_MASK_CLR, -1);
- REMOTE_HUB_S(nasid, LB_ERROR_HDR1, 0);
-
- /* Clear XB error regs, in order */
- for (i = 0;
- i <= XB_FIRST_ERROR_CLEAR - XB_POQ0_ERROR_CLEAR;
- i += sizeof(hubreg_t)) {
- REMOTE_HUB_S(nasid, XB_POQ0_ERROR_CLEAR + i, 0);
- }
-}
-
-
-/*
- * Function : hub_error_init
- * Purpose : initialize the error handling requirements for a given hub.
- * Parameters : cnode, the compact nodeid.
- * Assumptions : Called only once per hub, either by a local cpu. Or by a
- * remote cpu, when this hub is headless.(cpuless)
- * Returns : None
- */
-
-void
-hub_error_init(cnodeid_t cnode)
-{
- nasid_t nasid;
-
- nasid = cnodeid_to_nasid(cnode);
- hub_error_clear(nasid);
-
- /*
- * Now setup the hub ii and ni error interrupt handler.
- */
-
- hubii_eint_init(cnode);
- hubni_eint_init(cnode);
-
- return;
-}
-
-/*
- * Function : hubii_eint_init
- * Parameters : cnode
- * Purpose : to initialize the hub iio error interrupt.
- * Assumptions : Called once per hub, by the cpu which will ultimately
- * handle this interrupt.
- * Returns : None.
- */
-
-
-void
-hubii_eint_init(cnodeid_t cnode)
-{
- int bit, rv;
- ii_iidsr_u_t hubio_eint;
- hubinfo_t hinfo;
- cpuid_t intr_cpu;
- devfs_handle_t hub_v;
- ii_ilcsr_u_t ilcsr;
- int bit_pos_to_irq(int bit);
- int synergy_intr_connect(int bit, int cpuid);
-
-
- hub_v = (devfs_handle_t)cnodeid_to_vertex(cnode);
- ASSERT_ALWAYS(hub_v);
- hubinfo_get(hub_v, &hinfo);
-
- ASSERT(hinfo);
- ASSERT(hinfo->h_cnodeid == cnode);
-
- ilcsr.ii_ilcsr_regval = REMOTE_HUB_L(hinfo->h_nasid, IIO_ILCSR);
-
- if ((ilcsr.ii_ilcsr_fld_s.i_llp_stat & 0x2) == 0) {
- /*
- * HUB II link is not up.
- * Just disable LLP, and don't connect any interrupts.
- */
- ilcsr.ii_ilcsr_fld_s.i_llp_en = 0;
- REMOTE_HUB_S(hinfo->h_nasid, IIO_ILCSR, ilcsr.ii_ilcsr_regval);
- return;
- }
- /* Select a possible interrupt target where there is a free interrupt
- * bit and also reserve the interrupt bit for this IO error interrupt
- */
- intr_cpu = intr_heuristic(hub_v,0,INTRCONNECT_ANYBIT,II_ERRORINT,hub_v,
- "HUB IO error interrupt",&bit);
- if (intr_cpu == CPU_NONE) {
- printk("hubii_eint_init: intr_reserve_level failed, cnode %d", cnode);
- return;
- }
-
- rv = intr_connect_level(intr_cpu, bit, 0, NULL);
- synergy_intr_connect(bit, intr_cpu);
- request_irq(bit_pos_to_irq(bit) + (intr_cpu << 8), hubii_eint_handler, 0, "SN hub error", (void *)hub_v);
- ASSERT_ALWAYS(rv >= 0);
- hubio_eint.ii_iidsr_regval = 0;
- hubio_eint.ii_iidsr_fld_s.i_enable = 1;
- hubio_eint.ii_iidsr_fld_s.i_level = bit;/* Take the least significant bits*/
- hubio_eint.ii_iidsr_fld_s.i_node = COMPACT_TO_NASID_NODEID(cnode);
- hubio_eint.ii_iidsr_fld_s.i_pi_id = cpuid_to_subnode(intr_cpu);
- REMOTE_HUB_S(hinfo->h_nasid, IIO_IIDSR, hubio_eint.ii_iidsr_regval);
-
-}
-
-void
-hubni_eint_init(cnodeid_t cnode)
-{
- int intr_bit;
- cpuid_t targ;
-
-
- if ((targ = cnodeid_to_cpuid(cnode)) == CPU_NONE)
- return;
-
- /* The prom chooses which cpu gets these interrupts, but we
- * don't know which one it chose. We will register all of the
- * cpus to be sure. This only costs us an irqaction per cpu.
- */
- for (; targ < CPUS_PER_NODE; targ++) {
- if (!cpu_enabled(targ) ) continue;
- /* connect the INTEND1 bits. */
- for (intr_bit = XB_ERROR; intr_bit <= MSC_PANIC_INTR; intr_bit++) {
- intr_connect_level(targ, intr_bit, II_ERRORINT, NULL);
- }
- request_irq(SGI_HUB_ERROR_IRQ + (targ << 8), snia_error_intr_handler, 0, "SN hub error", NULL);
- /* synergy masks are initialized in the prom to enable all interrupts. */
- /* We'll just leave them that way, here, for these interrupts. */
- }
-}
-
-
-/*ARGSUSED*/
-void
-hubii_eint_handler (int irq, void *arg, struct pt_regs *ep)
-{
-
- panic("Hubii interrupt\n");
-}
diff --git a/arch/ia64/sn/io/sn1/ip37.c b/arch/ia64/sn/io/sn1/ip37.c
deleted file mode 100644
index 2ec567d54984c6..00000000000000
--- a/arch/ia64/sn/io/sn1/ip37.c
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-/*
- * ip37.c
- * Support for IP35/IP37 machines
- */
-
-#include <linux/types.h>
-
-#include <asm/sn/sgi.h>
-#include <asm/sn/io.h>
-#include <asm/sn/klconfig.h>
-#include <asm/sn/pci/bridge.h> /* for bridge_t */
-
-
-xwidgetnum_t
-hub_widget_id(nasid_t nasid)
-{
- hubii_wcr_t ii_wcr; /* the control status register */
-
- ii_wcr.wcr_reg_value = REMOTE_HUB_L(nasid,IIO_WCR);
-
- return ii_wcr.wcr_fields_s.wcr_widget_id;
-}
-
-int
-is_fine_dirmode(void)
-{
- return (((LOCAL_HUB_L(LB_REV_ID) & LRI_SYSTEM_SIZE_MASK)
- >> LRI_SYSTEM_SIZE_SHFT) == SYSTEM_SIZE_SMALL);
-
-}
-
-
-void
-ni_reset_port(void)
-{
- LOCAL_HUB_S(NI_RESET_ENABLE, NRE_RESETOK);
- LOCAL_HUB_S(NI_PORT_RESET, NPR_PORTRESET | NPR_LOCALRESET);
-}
diff --git a/arch/ia64/sn/io/sn1/mem_refcnt.c b/arch/ia64/sn/io/sn1/mem_refcnt.c
deleted file mode 100644
index c39f6d2249720d..00000000000000
--- a/arch/ia64/sn/io/sn1/mem_refcnt.c
+++ /dev/null
@@ -1,220 +0,0 @@
-/* $Id: mem_refcnt.c,v 1.1 2002/02/28 17:31:25 marcelo Exp $
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/types.h>
-#include <asm/sn/arch.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/io.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/hcl_util.h>
-#include <asm/sn/nodepda.h>
-#include <asm/sn/iograph.h>
-#include <asm/sn/sn1/mem_refcnt.h>
-#include <asm/sn/sn1/hwcntrs.h>
-#include <asm/sn/sn1/hubspc.h>
-// From numa_hw.h
-
-#define MIGR_COUNTER_MAX_GET(nodeid) \
- (NODEPDA_MCD((nodeid))->migr_system_kparms.migr_threshold_reference)
-/*
- * Get the Absolute Theshold
- */
-#define MIGR_THRESHOLD_ABS_GET(nodeid) ( \
- MD_MIG_VALUE_THRESH_GET(COMPACT_TO_NASID_NODEID(nodeid)))
-/*
- * Get the current Differential Threshold
- */
-#define MIGR_THRESHOLD_DIFF_GET(nodeid) \
- (NODEPDA_MCD(nodeid)->migr_as_kparms.migr_base_threshold)
-
-#define NUM_OF_HW_PAGES_PER_SW_PAGE() (NBPP / MD_PAGE_SIZE)
-
-// #include "migr_control.h"
-
-int
-mem_refcnt_attach(devfs_handle_t hub)
-{
-#if 0
- devfs_handle_t refcnt_dev;
-
- hwgraph_char_device_add(hub,
- "refcnt",
- "hubspc_",
- &refcnt_dev);
- device_info_set(refcnt_dev, (void*)(ulong)HUBSPC_REFCOUNTERS);
-#endif
-
- return (0);
-}
-
-
-/*ARGSUSED*/
-int
-mem_refcnt_open(devfs_handle_t *devp, mode_t oflag, int otyp, cred_t *crp)
-{
- cnodeid_t node;
-
- node = master_node_get(*devp);
-
- ASSERT( (node >= 0) && (node < numnodes) );
-
- if (NODEPDA(node)->migr_refcnt_counterbuffer == NULL) {
- return (ENODEV);
- }
-
- ASSERT( NODEPDA(node)->migr_refcnt_counterbase != NULL );
- ASSERT( NODEPDA(node)->migr_refcnt_cbsize != (size_t)0 );
-
- return (0);
-}
-
-/*ARGSUSED*/
-int
-mem_refcnt_close(devfs_handle_t dev, int oflag, int otyp, cred_t *crp)
-{
- return 0;
-}
-
-/*ARGSUSED*/
-int
-mem_refcnt_mmap(devfs_handle_t dev, vhandl_t *vt, off_t off, size_t len, uint prot)
-{
- cnodeid_t node;
- int errcode;
- char* buffer;
- size_t blen;
-
- node = master_node_get(dev);
-
- ASSERT( (node >= 0) && (node < numnodes) );
-
- ASSERT( NODEPDA(node)->migr_refcnt_counterbuffer != NULL);
- ASSERT( NODEPDA(node)->migr_refcnt_counterbase != NULL );
- ASSERT( NODEPDA(node)->migr_refcnt_cbsize != 0 );
-
- /*
- * XXXX deal with prot's somewhere around here....
- */
-
- buffer = NODEPDA(node)->migr_refcnt_counterbuffer;
- blen = NODEPDA(node)->migr_refcnt_cbsize;
-
- /*
- * Force offset to be a multiple of sizeof(refcnt_t)
- * We round up.
- */
-
- off = (((off - 1)/sizeof(refcnt_t)) + 1) * sizeof(refcnt_t);
-
- if ( ((buffer + blen) - (buffer + off + len)) < 0 ) {
- return (EPERM);
- }
-
- errcode = v_mapphys(vt,
- buffer + off,
- len);
-
- return errcode;
-}
-
-/*ARGSUSED*/
-int
-mem_refcnt_unmap(devfs_handle_t dev, vhandl_t *vt)
-{
- return 0;
-}
-
-/* ARGSUSED */
-int
-mem_refcnt_ioctl(devfs_handle_t dev,
- int cmd,
- void *arg,
- int mode,
- cred_t *cred_p,
- int *rvalp)
-{
- cnodeid_t node;
- int errcode;
- extern int numnodes;
-
- node = master_node_get(dev);
-
- ASSERT( (node >= 0) && (node < numnodes) );
-
- ASSERT( NODEPDA(node)->migr_refcnt_counterbuffer != NULL);
- ASSERT( NODEPDA(node)->migr_refcnt_counterbase != NULL );
- ASSERT( NODEPDA(node)->migr_refcnt_cbsize != 0 );
-
- errcode = 0;
-
- switch (cmd) {
- case RCB_INFO_GET:
- {
- rcb_info_t rcb;
-
- rcb.rcb_len = NODEPDA(node)->migr_refcnt_cbsize;
-
- rcb.rcb_sw_sets = NODEPDA(node)->migr_refcnt_numsets;
- rcb.rcb_sw_counters_per_set = numnodes;
- rcb.rcb_sw_counter_size = sizeof(refcnt_t);
-
- rcb.rcb_base_pages = NODEPDA(node)->migr_refcnt_numsets /
- NUM_OF_HW_PAGES_PER_SW_PAGE();
- rcb.rcb_base_page_size = NBPP;
- rcb.rcb_base_paddr = ctob(slot_getbasepfn(node, 0));
-
- rcb.rcb_cnodeid = node;
- rcb.rcb_granularity = MD_PAGE_SIZE;
-#ifdef LATER
- rcb.rcb_hw_counter_max = MIGR_COUNTER_MAX_GET(node);
- rcb.rcb_diff_threshold = MIGR_THRESHOLD_DIFF_GET(node);
-#endif
- rcb.rcb_abs_threshold = MIGR_THRESHOLD_ABS_GET(node);
- rcb.rcb_num_slots = MAX_MEM_SLOTS;
-
- if (COPYOUT(&rcb, arg, sizeof(rcb_info_t))) {
- errcode = EFAULT;
- }
-
- break;
- }
- case RCB_SLOT_GET:
- {
- rcb_slot_t slot[MAX_MEM_SLOTS];
- int s;
- int nslots;
-
- nslots = MAX_MEM_SLOTS;
- ASSERT(nslots <= MAX_MEM_SLOTS);
- for (s = 0; s < nslots; s++) {
- slot[s].base = (uint64_t)ctob(slot_getbasepfn(node, s));
-#ifdef LATER
- slot[s].size = (uint64_t)ctob(slot_getsize(node, s));
-#else
- slot[s].size = (uint64_t)1;
-#endif
- }
- if (COPYOUT(&slot[0], arg, nslots * sizeof(rcb_slot_t))) {
- errcode = EFAULT;
- }
-
- *rvalp = nslots;
- break;
- }
-
- default:
- errcode = EINVAL;
- break;
-
- }
-
- return errcode;
-}
diff --git a/arch/ia64/sn/io/sn1/ml_SN_intr.c b/arch/ia64/sn/io/sn1/ml_SN_intr.c
deleted file mode 100644
index d768bef601250b..00000000000000
--- a/arch/ia64/sn/io/sn1/ml_SN_intr.c
+++ /dev/null
@@ -1,1154 +0,0 @@
-/* $Id: ml_SN_intr.c,v 1.1 2002/02/28 17:31:25 marcelo Exp $
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-/*
- * intr.c-
- * This file contains all of the routines necessary to set up and
- * handle interrupts on an IP27 board.
- */
-
-#ident "$Revision: 1.1 $"
-
-#include <linux/types.h>
-#include <linux/config.h>
-#include <linux/slab.h>
-#include <asm/smp.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/io.h>
-#include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/sn_private.h>
-#include <asm/sn/klconfig.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/pci/pciio.h>
-#include <asm/sn/pci/pcibr.h>
-#include <asm/sn/xtalk/xtalk.h>
-#include <asm/sn/pci/pcibr_private.h>
-#include <asm/sn/intr.h>
-
-
-#if DEBUG_INTR_TSTAMP_DEBUG
-#include <sys/debug.h>
-#include <sys/idbg.h>
-#include <sys/inst.h>
-void do_splx_log(int, int);
-void spldebug_log_event(int);
-#endif
-
-#ifdef CONFIG_SMP
-extern unsigned long cpu_online_map;
-#endif
-#define cpu_allows_intr(cpu) (1)
-// If I understand what's going on with this, 32 should work.
-// physmem_maxradius seems to be the maximum number of router
-// hops to get from one end of the system to the other. With
-// a maximally configured machine, with the dumbest possible
-// topology, we would make 32 router hops. For what we're using
-// it for, the dumbest possible should suffice.
-#define physmem_maxradius() 32
-
-#define SUBNODE_ANY (-1)
-
-extern int nmied;
-extern int hub_intr_wakeup_cnt;
-extern synergy_da_t *Synergy_da_indr[];
-extern cpuid_t master_procid;
-
-extern cnodeid_t master_node_get(devfs_handle_t vhdl);
-
-extern void snia_error_intr_handler(int irq, void *devid, struct pt_regs *pt_regs);
-
-
-#define INTR_LOCK(vecblk) \
- (s = mutex_spinlock(&(vecblk)->vector_lock))
-#define INTR_UNLOCK(vecblk) \
- mutex_spinunlock(&(vecblk)->vector_lock, s)
-
-/*
- * REACT/Pro
- */
-
-
-
-/*
- * Find first bit set
- * Used outside this file also
- */
-int ms1bit(unsigned long x)
-{
- int b;
-
- if (x >> 32) b = 32, x >>= 32;
- else b = 0;
- if (x >> 16) b += 16, x >>= 16;
- if (x >> 8) b += 8, x >>= 8;
- if (x >> 4) b += 4, x >>= 4;
- if (x >> 2) b += 2, x >>= 2;
-
- return b + (int) (x >> 1);
-}
-
-/* ARGSUSED */
-void
-intr_stray(void *lvl)
-{
- printk(KERN_WARNING "Stray Interrupt - level %ld to cpu %d", (long)lvl, smp_processor_id());
-}
-
-#if defined(DEBUG)
-
-/* Infrastructure to gather the device - target cpu mapping info */
-#define MAX_DEVICES 1000 /* Reasonable large number . Need not be
- * the exact maximum # devices possible.
- */
-#define MAX_NAME 100
-typedef struct {
- dev_t dev; /* device */
- cpuid_t cpuid; /* target cpu */
- cnodeid_t cnodeid;/* node on which the target cpu is present */
- int bit; /* intr bit reserved */
- char intr_name[MAX_NAME]; /* name of the interrupt */
-} intr_dev_targ_map_t;
-
-intr_dev_targ_map_t intr_dev_targ_map[MAX_DEVICES];
-uint64_t intr_dev_targ_map_size;
-spinlock_t intr_dev_targ_map_lock;
-
-/* Print out the device - target cpu mapping.
- * This routine is used only in the idbg command
- * "intrmap"
- */
-void
-intr_dev_targ_map_print(cnodeid_t cnodeid)
-{
- int i,j,size = 0;
- int print_flag = 0,verbose = 0;
- char node_name[10];
-
- if (cnodeid != CNODEID_NONE) {
- nodepda_t *npda;
-
- npda = NODEPDA(cnodeid);
- for (j=0; j<NUM_SUBNODES; j++) {
- qprintf("\n SUBNODE %d\n INT_PEND0: ", j);
- for(i = 0 ; i < N_INTPEND_BITS ; i++)
- qprintf("%d",SNPDA(npda,j)->intr_dispatch0.info[i].ii_flags);
- qprintf("\n INT_PEND1: ");
- for(i = 0 ; i < N_INTPEND_BITS ; i++)
- qprintf("%d",SNPDA(npda,j)->intr_dispatch1.info[i].ii_flags);
- }
- verbose = 1;
- }
- qprintf("\n Device - Target Map [Interrupts: %s Node%s]\n\n",
- (verbose ? "All" : "Non-hardwired"),
- (cnodeid == CNODEID_NONE) ? "s: All" : node_name);
-
- qprintf("Device\tCpu\tCnode\tIntr_bit\tIntr_name\n");
- for (i = 0 ; i < intr_dev_targ_map_size ; i++) {
-
- print_flag = 0;
- if (verbose) {
- if (cnodeid != CNODEID_NONE) {
- if (cnodeid == intr_dev_targ_map[i].cnodeid)
- print_flag = 1;
- } else {
- print_flag = 1;
- }
- } else {
- if (intr_dev_targ_map[i].dev != 0) {
- if (cnodeid != CNODEID_NONE) {
- if (cnodeid ==
- intr_dev_targ_map[i].cnodeid)
- print_flag = 1;
- } else {
- print_flag = 1;
- }
- }
- }
- if (print_flag) {
- size++;
- qprintf("%d\t%d\t%d\t%d\t%s\n",
- intr_dev_targ_map[i].dev,
- intr_dev_targ_map[i].cpuid,
- intr_dev_targ_map[i].cnodeid,
- intr_dev_targ_map[i].bit,
- intr_dev_targ_map[i].intr_name);
- }
-
- }
- qprintf("\nTotal : %d\n",size);
-}
-#endif /* DEBUG */
-
-/*
- * The spinlocks have already been initialized. Now initialize the interrupt
- * vectors. One processor on each hub does the work.
- */
-void
-intr_init_vecblk(nodepda_t *npda, cnodeid_t node, int sn)
-{
- int i, ip=0;
- intr_vecblk_t *vecblk;
- subnode_pda_t *snpda;
-
-
- snpda = SNPDA(npda,sn);
- do {
- if (ip == 0) {
- vecblk = &snpda->intr_dispatch0;
- } else {
- vecblk = &snpda->intr_dispatch1;
- }
-
- /* Initialize this vector. */
- for (i = 0; i < N_INTPEND_BITS; i++) {
- vecblk->vectors[i].iv_func = intr_stray;
- vecblk->vectors[i].iv_prefunc = NULL;
- vecblk->vectors[i].iv_arg = (void *)(__psint_t)(ip * N_INTPEND_BITS + i);
-
- vecblk->info[i].ii_owner_dev = 0;
- strcpy(vecblk->info[i].ii_name, "Unused");
- vecblk->info[i].ii_flags = 0; /* No flags */
- vecblk->vectors[i].iv_mustruncpu = -1; /* No CPU yet. */
-
- }
-
- mutex_spinlock_init(&vecblk->vector_lock);
-
- vecblk->vector_count = 0;
- for (i = 0; i < CPUS_PER_SUBNODE; i++)
- vecblk->cpu_count[i] = 0;
-
- vecblk->vector_state = VECTOR_UNINITED;
-
- } while (++ip < 2);
-
-}
-
-
-/*
- * do_intr_reserve_level(cpuid_t cpu, int bit, int resflags, int reserve,
- * devfs_handle_t owner_dev, char *name)
- * Internal work routine to reserve or unreserve an interrupt level.
- * cpu is the CPU to which the interrupt will be sent.
- * bit is the level bit to reserve. -1 means any level
- * resflags should include II_ERRORINT if this is an
- * error interrupt, II_THREADED if the interrupt handler
- * will be threaded, or 0 otherwise.
- * reserve should be set to II_RESERVE or II_UNRESERVE
- * to get or clear a reservation.
- * owner_dev is the device that "owns" this interrupt, if supplied
- * name is a human-readable name for this interrupt, if supplied
- * intr_reserve_level returns the bit reserved or -1 to indicate an error
- */
-static int
-do_intr_reserve_level(cpuid_t cpu, int bit, int resflags, int reserve,
- devfs_handle_t owner_dev, char *name)
-{
- intr_vecblk_t *vecblk;
- hub_intmasks_t *hub_intmasks;
- unsigned long s;
- int rv = 0;
- int ip;
- synergy_da_t *sda;
- int which_synergy;
- cnodeid_t cnode;
-
- ASSERT(bit < N_INTPEND_BITS * 2);
-
- cnode = cpuid_to_cnodeid(cpu);
- which_synergy = cpuid_to_synergy(cpu);
- sda = Synergy_da_indr[(cnode * 2) + which_synergy];
- hub_intmasks = &sda->s_intmasks;
- // hub_intmasks = &pdaindr[cpu].pda->p_intmasks;
-
- // if (pdaindr[cpu].pda == NULL) return -1;
- if ((bit < N_INTPEND_BITS) && !(resflags & II_ERRORINT)) {
- vecblk = hub_intmasks->dispatch0;
- ip = 0;
- } else {
- ASSERT((bit >= N_INTPEND_BITS) || (bit == -1));
- bit -= N_INTPEND_BITS; /* Get position relative to INT_PEND1 reg. */
- vecblk = hub_intmasks->dispatch1;
- ip = 1;
- }
-
- INTR_LOCK(vecblk);
-
- if (bit <= -1) {
- bit = 0;
- ASSERT(reserve == II_RESERVE);
- /* Choose any available level */
- for (; bit < N_INTPEND_BITS; bit++) {
- if (!(vecblk->info[bit].ii_flags & II_RESERVE)) {
- rv = bit;
- break;
- }
- }
-
- /* Return -1 if all interrupt levels int this register are taken. */
- if (bit == N_INTPEND_BITS)
- rv = -1;
-
- } else {
- /* Reserve a particular level if it's available. */
- if ((vecblk->info[bit].ii_flags & II_RESERVE) == reserve) {
- /* Can't (un)reserve a level that's already (un)reserved. */
- rv = -1;
- } else {
- rv = bit;
- }
- }
-
- /* Reserve the level and bump the count. */
- if (rv != -1) {
- if (reserve) {
- int maxlen = sizeof(vecblk->info[bit].ii_name) - 1;
- int namelen;
- vecblk->info[bit].ii_flags |= (II_RESERVE | resflags);
- vecblk->info[bit].ii_owner_dev = owner_dev;
- /* Copy in the name. */
- namelen = name ? strlen(name) : 0;
- strncpy(vecblk->info[bit].ii_name, name, min(namelen, maxlen));
- vecblk->info[bit].ii_name[maxlen] = '\0';
- vecblk->vector_count++;
- } else {
- vecblk->info[bit].ii_flags = 0; /* Clear all the flags */
- vecblk->info[bit].ii_owner_dev = 0;
- /* Clear the name. */
- vecblk->info[bit].ii_name[0] = '\0';
- vecblk->vector_count--;
- }
- }
-
- INTR_UNLOCK(vecblk);
-
-#if defined(DEBUG)
- if (rv >= 0) {
- int namelen = name ? strlen(name) : 0;
- /* Gather this device - target cpu mapping information
- * in a table which can be used later by the idbg "intrmap"
- * command
- */
- s = mutex_spinlock(&intr_dev_targ_map_lock);
- if (intr_dev_targ_map_size < MAX_DEVICES) {
- intr_dev_targ_map_t *p;
-
- p = &intr_dev_targ_map[intr_dev_targ_map_size];
- p->dev = owner_dev;
- p->cpuid = cpu;
- p->cnodeid = cpuid_to_cnodeid(cpu);
- p->bit = ip * N_INTPEND_BITS + rv;
- strncpy(p->intr_name,
- name,
- min(MAX_NAME,namelen));
- intr_dev_targ_map_size++;
- }
- mutex_spinunlock(&intr_dev_targ_map_lock,s);
- }
-#endif /* DEBUG */
-
- return (((rv == -1) ? rv : (ip * N_INTPEND_BITS) + rv)) ;
-}
-
-
-/*
- * WARNING: This routine should only be called from within ml/SN.
- * Reserve an interrupt level.
- */
-int
-intr_reserve_level(cpuid_t cpu, int bit, int resflags, devfs_handle_t owner_dev, char *name)
-{
- return(do_intr_reserve_level(cpu, bit, resflags, II_RESERVE, owner_dev, name));
-}
-
-
-/*
- * WARNING: This routine should only be called from within ml/SN.
- * Unreserve an interrupt level.
- */
-void
-intr_unreserve_level(cpuid_t cpu, int bit)
-{
- (void)do_intr_reserve_level(cpu, bit, 0, II_UNRESERVE, 0, NULL);
-}
-
-/*
- * Get values that vary depending on which CPU and bit we're operating on
- */
-static hub_intmasks_t *
-intr_get_ptrs(cpuid_t cpu, int bit,
- int *new_bit, /* Bit relative to the register */
- hubreg_t **intpend_masks, /* Masks for this register */
- intr_vecblk_t **vecblk, /* Vecblock for this interrupt */
- int *ip) /* Which intpend register */
-{
- hub_intmasks_t *hub_intmasks;
- synergy_da_t *sda;
- int which_synergy;
- cnodeid_t cnode;
-
- ASSERT(bit < N_INTPEND_BITS * 2);
-
- cnode = cpuid_to_cnodeid(cpu);
- which_synergy = cpuid_to_synergy(cpu);
- sda = Synergy_da_indr[(cnode * 2) + which_synergy];
- hub_intmasks = &sda->s_intmasks;
-
- // hub_intmasks = &pdaindr[cpu].pda->p_intmasks;
-
- if (bit < N_INTPEND_BITS) {
- *intpend_masks = hub_intmasks->intpend0_masks;
- *vecblk = hub_intmasks->dispatch0;
- *ip = 0;
- *new_bit = bit;
- } else {
- *intpend_masks = hub_intmasks->intpend1_masks;
- *vecblk = hub_intmasks->dispatch1;
- *ip = 1;
- *new_bit = bit - N_INTPEND_BITS;
- }
-
- return hub_intmasks;
-}
-
-
-/*
- * intr_connect_level(cpuid_t cpu, int bit, ilvl_t intr_swlevel,
- * intr_func_t intr_func, void *intr_arg);
- * This is the lowest-level interface to the interrupt code. It shouldn't
- * be called from outside the ml/SN directory.
- * intr_connect_level hooks up an interrupt to a particular bit in
- * the INT_PEND0/1 masks. Returns 0 on success.
- * cpu is the CPU to which the interrupt will be sent.
- * bit is the level bit to connect to
- * intr_swlevel tells which software level to use
- * intr_func is the interrupt handler
- * intr_arg is an arbitrary argument interpreted by the handler
- * intr_prefunc is a prologue function, to be called
- * with interrupts disabled, to disable
- * the interrupt at source. It is called
- * with the same argument. Should be NULL for
- * typical interrupts, which can be masked
- * by the infrastructure at the level bit.
- * intr_connect_level returns 0 on success or nonzero on an error
- */
-/* ARGSUSED */
-int
-intr_connect_level(cpuid_t cpu, int bit, ilvl_t intr_swlevel, intr_func_t intr_prefunc)
-{
- intr_vecblk_t *vecblk;
- hubreg_t *intpend_masks;
- int rv = 0;
- int ip;
- unsigned long s;
-
- ASSERT(bit < N_INTPEND_BITS * 2);
-
- (void)intr_get_ptrs(cpu, bit, &bit, &intpend_masks,
- &vecblk, &ip);
-
- INTR_LOCK(vecblk);
-
- if ((vecblk->info[bit].ii_flags & II_INUSE) ||
- (!(vecblk->info[bit].ii_flags & II_RESERVE))) {
- /* Can't assign to a level that's in use or isn't reserved. */
- rv = -1;
- } else {
- /* Stuff parameters into vector and info */
- vecblk->vectors[bit].iv_prefunc = intr_prefunc;
- vecblk->info[bit].ii_flags |= II_INUSE;
- }
-
- /* Now stuff the masks if everything's okay. */
- if (!rv) {
- int lslice;
- volatile hubreg_t *mask_reg;
- // nasid_t nasid = COMPACT_TO_NASID_NODEID(cpuid_to_cnodeid(cpu));
- nasid_t nasid = cpuid_to_nasid(cpu);
- int subnode = cpuid_to_subnode(cpu);
-
- /* Make sure it's not already pending when we connect it. */
- REMOTE_HUB_PI_CLR_INTR(nasid, subnode, bit + ip * N_INTPEND_BITS);
-
- if (bit >= GFX_INTR_A && bit <= CC_PEND_B) {
- intpend_masks[0] |= (1ULL << (uint64_t)bit);
- }
-
- lslice = cpuid_to_localslice(cpu);
- vecblk->cpu_count[lslice]++;
-#if SN1
- /*
- * On SN1, there are 8 interrupt mask registers per node:
- * PI_0 MASK_0 A
- * PI_0 MASK_1 A
- * PI_0 MASK_0 B
- * PI_0 MASK_1 B
- * PI_1 MASK_0 A
- * PI_1 MASK_1 A
- * PI_1 MASK_0 B
- * PI_1 MASK_1 B
- */
-#endif
- if (ip == 0) {
- mask_reg = REMOTE_HUB_PI_ADDR(nasid, subnode,
- PI_INT_MASK0_A + PI_INT_MASK_OFFSET * lslice);
- } else {
- mask_reg = REMOTE_HUB_PI_ADDR(nasid, subnode,
- PI_INT_MASK1_A + PI_INT_MASK_OFFSET * lslice);
- }
-
- HUB_S(mask_reg, intpend_masks[0]);
- }
-
- INTR_UNLOCK(vecblk);
-
- return rv;
-}
-
-
-/*
- * intr_disconnect_level(cpuid_t cpu, int bit)
- *
- * This is the lowest-level interface to the interrupt code. It should
- * not be called from outside the ml/SN directory.
- * intr_disconnect_level removes a particular bit from an interrupt in
- * the INT_PEND0/1 masks. Returns 0 on success or nonzero on failure.
- */
-int
-intr_disconnect_level(cpuid_t cpu, int bit)
-{
- intr_vecblk_t *vecblk;
- hubreg_t *intpend_masks;
- unsigned long s;
- int rv = 0;
- int ip;
-
- (void)intr_get_ptrs(cpu, bit, &bit, &intpend_masks,
- &vecblk, &ip);
-
- INTR_LOCK(vecblk);
-
- if ((vecblk->info[bit].ii_flags & (II_RESERVE | II_INUSE)) !=
- ((II_RESERVE | II_INUSE))) {
- /* Can't remove a level that's not in use or isn't reserved. */
- rv = -1;
- } else {
- /* Stuff parameters into vector and info */
- vecblk->vectors[bit].iv_func = (intr_func_t)NULL;
- vecblk->vectors[bit].iv_prefunc = (intr_func_t)NULL;
- vecblk->vectors[bit].iv_arg = 0;
- vecblk->info[bit].ii_flags &= ~II_INUSE;
-#ifdef BASE_ITHRTEAD
- vecblk->vectors[bit].iv_mustruncpu = -1; /* No mustrun CPU any more. */
-#endif
- }
-
- /* Now clear the masks if everything's okay. */
- if (!rv) {
- int lslice;
- volatile hubreg_t *mask_reg;
-
- intpend_masks[0] &= ~(1ULL << (uint64_t)bit);
- lslice = cpuid_to_localslice(cpu);
- vecblk->cpu_count[lslice]--;
- mask_reg = REMOTE_HUB_PI_ADDR(COMPACT_TO_NASID_NODEID(cpuid_to_cnodeid(cpu)),
- cpuid_to_subnode(cpu),
- ip == 0 ? PI_INT_MASK0_A : PI_INT_MASK1_A);
- mask_reg = (volatile hubreg_t *)((__psunsigned_t)mask_reg +
- (PI_INT_MASK_OFFSET * lslice));
- *mask_reg = intpend_masks[0];
- }
-
- INTR_UNLOCK(vecblk);
-
- return rv;
-}
-
-/*
- * Actually block or unblock an interrupt
- */
-void
-do_intr_block_bit(cpuid_t cpu, int bit, int block)
-{
- intr_vecblk_t *vecblk;
- int ip;
- unsigned long s;
- hubreg_t *intpend_masks;
- volatile hubreg_t mask_value;
- volatile hubreg_t *mask_reg;
-
- intr_get_ptrs(cpu, bit, &bit, &intpend_masks, &vecblk, &ip);
-
- INTR_LOCK(vecblk);
-
- if (block)
- /* Block */
- intpend_masks[0] &= ~(1ULL << (uint64_t)bit);
- else
- /* Unblock */
- intpend_masks[0] |= (1ULL << (uint64_t)bit);
-
- if (ip == 0) {
- mask_reg = REMOTE_HUB_PI_ADDR(COMPACT_TO_NASID_NODEID(cpuid_to_cnodeid(cpu)),
- cpuid_to_subnode(cpu), PI_INT_MASK0_A);
- } else {
- mask_reg = REMOTE_HUB_PI_ADDR(COMPACT_TO_NASID_NODEID(cpuid_to_cnodeid(cpu)),
- cpuid_to_subnode(cpu), PI_INT_MASK1_A);
- }
-
- HUB_S(mask_reg, intpend_masks[0]);
-
- /*
- * Wait for it to take effect. (One read should suffice.)
- * This is only necessary when blocking an interrupt
- */
- if (block)
- while ((mask_value = HUB_L(mask_reg)) != intpend_masks[0])
- ;
-
- INTR_UNLOCK(vecblk);
-}
-
-
-/*
- * Block a particular interrupt (cpu/bit pair).
- */
-/* ARGSUSED */
-void
-intr_block_bit(cpuid_t cpu, int bit)
-{
- do_intr_block_bit(cpu, bit, 1);
-}
-
-
-/*
- * Unblock a particular interrupt (cpu/bit pair).
- */
-/* ARGSUSED */
-void
-intr_unblock_bit(cpuid_t cpu, int bit)
-{
- do_intr_block_bit(cpu, bit, 0);
-}
-
-
-/* verifies that the specified CPUID is on the specified SUBNODE (if any) */
-#define cpu_on_subnode(cpuid, which_subnode) \
- (((which_subnode) == SUBNODE_ANY) || (cpuid_to_subnode(cpuid) == (which_subnode)))
-
-
-/*
- * Choose one of the CPUs on a specified node or subnode to receive
- * interrupts. Don't pick a cpu which has been specified as a NOINTR cpu.
- *
- * Among all acceptable CPUs, the CPU that has the fewest total number
- * of interrupts targetted towards it is chosen. Note that we never
- * consider how frequent each of these interrupts might occur, so a rare
- * hardware error interrupt is weighted equally with a disk interrupt.
- */
-static cpuid_t
-do_intr_cpu_choose(cnodeid_t cnode, int which_subnode)
-{
- cpuid_t cpu, best_cpu = CPU_NONE;
- int slice, min_count=1000;
-
- min_count = 1000;
- for (slice=0; slice < CPUS_PER_NODE; slice++) {
- intr_vecblk_t *vecblk0, *vecblk1;
- int total_intrs_to_slice;
- subnode_pda_t *snpda;
- int local_cpu_num;
-
- cpu = cnode_slice_to_cpuid(cnode, slice);
- if (cpu == CPU_NONE)
- continue;
-
- /* If this cpu isn't enabled for interrupts, skip it */
- if (!cpu_enabled(cpu) || !cpu_allows_intr(cpu))
- continue;
-
- /* If this isn't the right subnode, skip it */
- if (!cpu_on_subnode(cpu, which_subnode))
- continue;
-
- /* OK, this one's a potential CPU for interrupts */
- snpda = SUBNODEPDA(cnode,SUBNODE(slice));
- vecblk0 = &snpda->intr_dispatch0;
- vecblk1 = &snpda->intr_dispatch1;
- local_cpu_num = LOCALCPU(slice);
- total_intrs_to_slice = vecblk0->cpu_count[local_cpu_num] +
- vecblk1->cpu_count[local_cpu_num];
-
- if (min_count > total_intrs_to_slice) {
- min_count = total_intrs_to_slice;
- best_cpu = cpu;
- }
- }
- return best_cpu;
-}
-
-/*
- * Choose an appropriate interrupt target CPU on a specified node.
- * If which_subnode is SUBNODE_ANY, then subnode is not considered.
- * Otherwise, the chosen CPU must be on the specified subnode.
- */
-static cpuid_t
-intr_cpu_choose_from_node(cnodeid_t cnode, int which_subnode)
-{
- return(do_intr_cpu_choose(cnode, which_subnode));
-}
-
-
-/* Make it easy to identify subnode vertices in the hwgraph */
-void
-mark_subnodevertex_as_subnode(devfs_handle_t vhdl, int which_subnode)
-{
- graph_error_t rv;
-
- ASSERT(0 <= which_subnode);
- ASSERT(which_subnode < NUM_SUBNODES);
-
- rv = hwgraph_info_add_LBL(vhdl, INFO_LBL_CPUBUS, (arbitrary_info_t)which_subnode);
- ASSERT_ALWAYS(rv == GRAPH_SUCCESS);
-
- rv = hwgraph_info_export_LBL(vhdl, INFO_LBL_CPUBUS, sizeof(arbitrary_info_t));
- ASSERT_ALWAYS(rv == GRAPH_SUCCESS);
-}
-
-
-/*
- * Given a device descriptor, extract interrupt target information and
- * choose an appropriate CPU. Return CPU_NONE if we can't make sense
- * out of the target information.
- * TBD: Should this be considered platform-independent code?
- */
-
-
-/*
- * intr_bit_reserve_test(cpuid,which_subnode,cnode,req_bit,intr_resflags,
- * owner_dev,intr_name,*resp_bit)
- * Either cpuid is not CPU_NONE or cnodeid not CNODE_NONE but
- * not both.
- * 1. If cpuid is specified, this routine tests if this cpu can be a valid
- * interrupt target candidate.
- * 2. If cnodeid is specified, this routine tests if there is a cpu on
- * this node which can be a valid interrupt target candidate.
- * 3. If a valid interrupt target cpu candidate is found then an attempt at
- * reserving an interrupt bit on the corresponding cnode is made.
- *
- * If steps 1 & 2 both fail or step 3 fails then we are not able to get a valid
- * interrupt target cpu then routine returns CPU_NONE (failure)
- * Otherwise routine returns cpuid of interrupt target (success)
- */
-static cpuid_t
-intr_bit_reserve_test(cpuid_t cpuid,
- int favor_subnode,
- cnodeid_t cnodeid,
- int req_bit,
- int intr_resflags,
- devfs_handle_t owner_dev,
- char *intr_name,
- int *resp_bit)
-{
-
- ASSERT((cpuid==CPU_NONE) || (cnodeid==CNODEID_NONE));
-
- if (cnodeid != CNODEID_NONE) {
- /* Try to choose a interrupt cpu candidate */
- cpuid = intr_cpu_choose_from_node(cnodeid, favor_subnode);
- }
-
- if (cpuid != CPU_NONE) {
- /* Try to reserve an interrupt bit on the hub
- * corresponding to the canidate cnode. If we
- * are successful then we got a cpu which can
- * act as an interrupt target for the io device.
- * Otherwise we need to continue the search
- * further.
- */
- *resp_bit = do_intr_reserve_level(cpuid,
- req_bit,
- intr_resflags,
- II_RESERVE,
- owner_dev,
- intr_name);
-
- if (*resp_bit >= 0)
- /* The interrupt target specified was fine */
- return(cpuid);
- }
- return(CPU_NONE);
-}
-/*
- * intr_heuristic(dev_t dev,device_desc_t dev_desc,
- * int req_bit,int intr_resflags,dev_t owner_dev,
- * char *intr_name,int *resp_bit)
- *
- * Choose an interrupt destination for an interrupt.
- * dev is the device for which the interrupt is being set up
- * dev_desc is a description of hardware and policy that could
- * help determine where this interrupt should go
- * req_bit is the interrupt bit requested
- * (can be INTRCONNECT_ANY_BIT in which the first available
- * interrupt bit is used)
- * intr_resflags indicates whether we want to (un)reserve bit
- * owner_dev is the owner device
- * intr_name is the readable interrupt name
- * resp_bit indicates whether we succeeded in getting the required
- * action { (un)reservation} done
- * negative value indicates failure
- *
- */
-/* ARGSUSED */
-cpuid_t
-intr_heuristic(devfs_handle_t dev,
- device_desc_t dev_desc,
- int req_bit,
- int intr_resflags,
- devfs_handle_t owner_dev,
- char *intr_name,
- int *resp_bit)
-{
- cpuid_t cpuid; /* possible intr targ*/
- cnodeid_t candidate; /* possible canidate */
- int which_subnode = SUBNODE_ANY;
-
-/* SN1 + pcibr Addressing Limitation */
- {
- devfs_handle_t pconn_vhdl;
- pcibr_soft_t pcibr_soft;
-
- /*
- * This combination of SN1 and Bridge hardware has an odd "limitation".
- * Due to the choice of addresses for PI0 and PI1 registers on SN1
- * and historical limitations in Bridge, Bridge is unable to
- * send interrupts to both PI0 CPUs and PI1 CPUs -- we have
- * to choose one set or the other. That choice is implicitly
- * made when Bridge first attaches its error interrupt. After
- * that point, all subsequent interrupts are restricted to the
- * same PI number (though it's possible to send interrupts to
- * the same PI number on a different node).
- *
- * Since neither SN1 nor Bridge designers are willing to admit a
- * bug, we can't really call this a "workaround". It's a permanent
- * solution for an SN1-specific and Bridge-specific hardware
- * limitation that won't ever be lifted.
- */
- if ((hwgraph_edge_get(dev, EDGE_LBL_PCI, &pconn_vhdl) == GRAPH_SUCCESS) &&
- ((pcibr_soft = pcibr_soft_get(pconn_vhdl)) != NULL)) {
- /*
- * We "know" that the error interrupt is the first
- * interrupt set up by pcibr_attach. Send all interrupts
- * on this bridge to the same subnode number.
- */
- if (pcibr_soft->bsi_err_intr) {
- which_subnode = cpuid_to_subnode(((hub_intr_t) pcibr_soft->bsi_err_intr)->i_cpuid);
- }
- }
- }
-
- /* Check if we can find a valid interrupt target candidate on
- * the master node for the device.
- */
- cpuid = intr_bit_reserve_test(CPU_NONE,
- which_subnode,
- master_node_get(dev),
- req_bit,
- intr_resflags,
- owner_dev,
- intr_name,
- resp_bit);
-
- if (cpuid != CPU_NONE) {
- if (cpu_on_subnode(cpuid, which_subnode))
- return(cpuid); /* got a valid interrupt target */
- else
- intr_unreserve_level(cpuid, *resp_bit);
- }
-
- printk(KERN_WARNING "Cannot target interrupts to closest node(%d): (0x%lx)\n",
- master_node_get(dev),(unsigned long)owner_dev);
-
- /* Fall through into the default algorithm
- * (exhaustive-search-for-the-nearest-possible-interrupt-target)
- * for finding the interrupt target
- */
-
- {
- /*
- * Do a stupid round-robin assignment of the node.
- * (Should do a "nearest neighbor" but not for SN1.
- */
- static cnodeid_t last_node = -1;
-
- if (last_node >= numnodes) last_node = 0;
- for (candidate = last_node + 1; candidate != last_node; candidate++) {
- if (candidate == numnodes) candidate = 0;
- cpuid = intr_bit_reserve_test(CPU_NONE,
- which_subnode,
- candidate,
- req_bit,
- intr_resflags,
- owner_dev,
- intr_name,
- resp_bit);
-
- if (cpuid != CPU_NONE) {
- if (cpu_on_subnode(cpuid, which_subnode)) {
- last_node = candidate;
- return(cpuid); /* got a valid interrupt target */
- }
- else
- intr_unreserve_level(cpuid, *resp_bit);
- }
- }
- last_node = candidate;
- }
-
- printk(KERN_WARNING "Cannot target interrupts to any close node: %ld (0x%lx)\n",
- (long)owner_dev, (unsigned long)owner_dev);
-
- /* In the worst case try to allocate interrupt bits on the
- * master processor's node. We may get here during error interrupt
- * allocation phase when the topology matrix is not yet setup
- * and hence cannot do an exhaustive search.
- */
- ASSERT(cpu_allows_intr(master_procid));
- cpuid = intr_bit_reserve_test(master_procid,
- which_subnode,
- CNODEID_NONE,
- req_bit,
- intr_resflags,
- owner_dev,
- intr_name,
- resp_bit);
-
- if (cpuid != CPU_NONE) {
- if (cpu_on_subnode(cpuid, which_subnode))
- return(cpuid);
- else
- intr_unreserve_level(cpuid, *resp_bit);
- }
-
- printk(KERN_WARNING "Cannot target interrupts: (0x%lx)\n",
- (unsigned long)owner_dev);
-
- return(CPU_NONE); /* Should never get here */
-}
-
-struct hardwired_intr_s {
- signed char level;
- int flags;
- char *name;
-} const hardwired_intr[] = {
- { INT_PEND0_BASELVL + RESERVED_INTR, 0, "Reserved" },
- { INT_PEND0_BASELVL + GFX_INTR_A, 0, "Gfx A" },
- { INT_PEND0_BASELVL + GFX_INTR_B, 0, "Gfx B" },
- { INT_PEND0_BASELVL + PG_MIG_INTR, II_THREADED, "Migration" },
- { INT_PEND0_BASELVL + UART_INTR, II_THREADED, "Bedrock/L1" },
- { INT_PEND0_BASELVL + CC_PEND_A, 0, "Crosscall A" },
- { INT_PEND0_BASELVL + CC_PEND_B, 0, "Crosscall B" },
- { INT_PEND1_BASELVL + CLK_ERR_INTR, II_ERRORINT, "Clock Error" },
- { INT_PEND1_BASELVL + COR_ERR_INTR_A, II_ERRORINT, "Correctable Error A" },
- { INT_PEND1_BASELVL + COR_ERR_INTR_B, II_ERRORINT, "Correctable Error B" },
- { INT_PEND1_BASELVL + MD_COR_ERR_INTR, II_ERRORINT, "MD Correct. Error" },
- { INT_PEND1_BASELVL + NI_ERROR_INTR, II_ERRORINT, "NI Error" },
- { INT_PEND1_BASELVL + NI_BRDCAST_ERR_A, II_ERRORINT, "Remote NI Error"},
- { INT_PEND1_BASELVL + NI_BRDCAST_ERR_B, II_ERRORINT, "Remote NI Error"},
- { INT_PEND1_BASELVL + MSC_PANIC_INTR, II_ERRORINT, "MSC Panic" },
- { INT_PEND1_BASELVL + LLP_PFAIL_INTR_A, II_ERRORINT, "LLP Pfail WAR" },
- { INT_PEND1_BASELVL + LLP_PFAIL_INTR_B, II_ERRORINT, "LLP Pfail WAR" },
- { INT_PEND1_BASELVL + NACK_INT_A, 0, "CPU A Nack count == NACK_CMP" },
- { INT_PEND1_BASELVL + NACK_INT_B, 0, "CPU B Nack count == NACK_CMP" },
- { INT_PEND1_BASELVL + LB_ERROR, 0, "Local Block Error" },
- { INT_PEND1_BASELVL + XB_ERROR, 0, "Local XBar Error" },
- { -1, 0, (char *)NULL},
-};
-
-/*
- * Reserve all of the hardwired interrupt levels so they're not used as
- * general purpose bits later.
- */
-void
-intr_reserve_hardwired(cnodeid_t cnode)
-{
- cpuid_t cpu;
- int level;
- int i;
- char subnode_done[NUM_SUBNODES];
-
- // cpu = cnodetocpu(cnode);
- for (cpu = 0; cpu < smp_num_cpus; cpu++) {
- if (cpuid_to_cnodeid(cpu) == cnode) {
- break;
- }
- }
- if (cpu == smp_num_cpus) cpu = CPU_NONE;
- if (cpu == CPU_NONE) {
- printk("Node %d has no CPUs", cnode);
- return;
- }
-
- for (i=0; i<NUM_SUBNODES; i++)
- subnode_done[i] = 0;
-
- for (; cpu<smp_num_cpus && cpu_enabled(cpu) && cpuid_to_cnodeid(cpu) == cnode; cpu++) {
- int which_subnode = cpuid_to_subnode(cpu);
- if (subnode_done[which_subnode])
- continue;
- subnode_done[which_subnode] = 1;
-
- for (i = 0; hardwired_intr[i].level != -1; i++) {
- level = hardwired_intr[i].level;
-
- if (level != intr_reserve_level(cpu, level,
- hardwired_intr[i].flags,
- (devfs_handle_t) NULL,
- hardwired_intr[i].name))
- panic("intr_reserve_hardwired: Can't reserve level %d, cpu %ld.", level, cpu);
- }
- }
-}
-
-
-/*
- * Check and clear interrupts.
- */
-/*ARGSUSED*/
-static void
-intr_clear_bits(nasid_t nasid, volatile hubreg_t *pend, int base_level,
- char *name)
-{
- volatile hubreg_t bits;
- int i;
-
- /* Check pending interrupts */
- if ((bits = HUB_L(pend)) != 0) {
- for (i = 0; i < N_INTPEND_BITS; i++) {
- if (bits & (1 << i)) {
-#ifdef INTRDEBUG
- printk(KERN_WARNING "Nasid %d interrupt bit %d set in %s",
- nasid, i, name);
-#endif
- LOCAL_HUB_CLR_INTR(base_level + i);
- }
- }
- }
-}
-
-/*
- * Clear out our interrupt registers.
- */
-void
-intr_clear_all(nasid_t nasid)
-{
- int sn;
-
- for(sn=0; sn<NUM_SUBNODES; sn++) {
- REMOTE_HUB_PI_S(nasid, sn, PI_INT_MASK0_A, 0);
- REMOTE_HUB_PI_S(nasid, sn, PI_INT_MASK0_B, 0);
- REMOTE_HUB_PI_S(nasid, sn, PI_INT_MASK1_A, 0);
- REMOTE_HUB_PI_S(nasid, sn, PI_INT_MASK1_B, 0);
-
- intr_clear_bits(nasid, REMOTE_HUB_PI_ADDR(nasid, sn, PI_INT_PEND0),
- INT_PEND0_BASELVL, "INT_PEND0");
- intr_clear_bits(nasid, REMOTE_HUB_PI_ADDR(nasid, sn, PI_INT_PEND1),
- INT_PEND1_BASELVL, "INT_PEND1");
- }
-}
-
-/*
- * Dump information about a particular interrupt vector.
- */
-static void
-dump_vector(intr_info_t *info, intr_vector_t *vector, int bit, hubreg_t ip,
- hubreg_t ima, hubreg_t imb, void (*pf)(char *, ...))
-{
- hubreg_t value = 1LL << bit;
-
- pf(" Bit %02d: %s: func 0x%x arg 0x%x prefunc 0x%x\n",
- bit, info->ii_name,
- vector->iv_func, vector->iv_arg, vector->iv_prefunc);
- pf(" vertex 0x%x %s%s",
- info->ii_owner_dev,
- ((info->ii_flags) & II_RESERVE) ? "R" : "U",
- ((info->ii_flags) & II_INUSE) ? "C" : "-");
- pf("%s%s%s%s",
- ip & value ? "P" : "-",
- ima & value ? "A" : "-",
- imb & value ? "B" : "-",
- ((info->ii_flags) & II_ERRORINT) ? "E" : "-");
- pf("\n");
-}
-
-
-/*
- * Dump information about interrupt vector assignment.
- */
-void
-intr_dumpvec(cnodeid_t cnode, void (*pf)(char *, ...))
-{
- nodepda_t *npda;
- int ip, sn, bit;
- intr_vecblk_t *dispatch;
- hubreg_t ipr, ima, imb;
- nasid_t nasid;
-
- if ((cnode < 0) || (cnode >= numnodes)) {
- pf("intr_dumpvec: cnodeid out of range: %d\n", cnode);
- return ;
- }
-
- nasid = COMPACT_TO_NASID_NODEID(cnode);
-
- if (nasid == INVALID_NASID) {
- pf("intr_dumpvec: Bad cnodeid: %d\n", cnode);
- return ;
- }
-
-
- npda = NODEPDA(cnode);
-
- for (sn = 0; sn < NUM_SUBNODES; sn++) {
- for (ip = 0; ip < 2; ip++) {
- dispatch = ip ? &(SNPDA(npda,sn)->intr_dispatch1) : &(SNPDA(npda,sn)->intr_dispatch0);
- ipr = REMOTE_HUB_PI_L(nasid, sn, ip ? PI_INT_PEND1 : PI_INT_PEND0);
- ima = REMOTE_HUB_PI_L(nasid, sn, ip ? PI_INT_MASK1_A : PI_INT_MASK0_A);
- imb = REMOTE_HUB_PI_L(nasid, sn, ip ? PI_INT_MASK1_B : PI_INT_MASK0_B);
-
- pf("Node %d INT_PEND%d:\n", cnode, ip);
-
- if (dispatch->ithreads_enabled)
- pf(" Ithreads enabled\n");
- else
- pf(" Ithreads disabled\n");
- pf(" vector_count = %d, vector_state = %d\n",
- dispatch->vector_count,
- dispatch->vector_state);
- pf(" CPU A count %d, CPU B count %d\n",
- dispatch->cpu_count[0],
- dispatch->cpu_count[1]);
- pf(" &vector_lock = 0x%x\n",
- &(dispatch->vector_lock));
- for (bit = 0; bit < N_INTPEND_BITS; bit++) {
- if ((dispatch->info[bit].ii_flags & II_RESERVE) ||
- (ipr & (1L << bit))) {
- dump_vector(&(dispatch->info[bit]),
- &(dispatch->vectors[bit]),
- bit, ipr, ima, imb, pf);
- }
- }
- pf("\n");
- }
- }
-}
-
diff --git a/arch/ia64/sn/io/sn1/pcibr.c b/arch/ia64/sn/io/sn1/pcibr.c
deleted file mode 100644
index 92594ce11da607..00000000000000
--- a/arch/ia64/sn/io/sn1/pcibr.c
+++ /dev/null
@@ -1,7704 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-int NeedXbridgeSwap = 0;
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/arch.h>
-#include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/xtalk/xwidget.h>
-#include <asm/sn/pci/bridge.h>
-#include <asm/sn/pci/pciio.h>
-#include <asm/sn/pci/pcibr.h>
-#include <asm/sn/pci/pcibr_private.h>
-#include <asm/sn/pci/pci_defs.h>
-#include <asm/sn/prio.h>
-#include <asm/sn/xtalk/xbow.h>
-#include <asm/sn/ioc3.h>
-#include <asm/sn/eeprom.h>
-#include <asm/sn/io.h>
-#include <asm/sn/sn_private.h>
-
-#ifdef __ia64
-#define rmallocmap atemapalloc
-#define rmfreemap atemapfree
-#define rmfree atefree
-#define rmalloc atealloc
-#endif
-
-extern boolean_t is_sys_critical_vertex(devfs_handle_t);
-
-#undef PCIBR_ATE_DEBUG
-
-#if 0
-#define DEBUG 1 /* To avoid lots of bad printk() formats leave off */
-#endif
-#define PCI_DEBUG 1
-#define ATTACH_DEBUG 1
-#define PCIBR_SOFT_LIST 1
-
-#ifndef LOCAL
-#define LOCAL static
-#endif
-
-/*
- * Macros related to the Lucent USS 302/312 usb timeout workaround. It
- * appears that if the lucent part can get into a retry loop if it sees a
- * DAC on the bus during a pio read retry. The loop is broken after about
- * 1ms, so we need to set up bridges holding this part to allow at least
- * 1ms for pio.
- */
-
-#define USS302_TIMEOUT_WAR
-
-#ifdef USS302_TIMEOUT_WAR
-#define LUCENT_USBHC_VENDOR_ID_NUM 0x11c1
-#define LUCENT_USBHC302_DEVICE_ID_NUM 0x5801
-#define LUCENT_USBHC312_DEVICE_ID_NUM 0x5802
-#define USS302_BRIDGE_TIMEOUT_HLD 4
-#endif
-
-#define PCIBR_LLP_CONTROL_WAR
-#if defined (PCIBR_LLP_CONTROL_WAR)
-int pcibr_llp_control_war_cnt;
-#endif /* PCIBR_LLP_CONTROL_WAR */
-
-int pcibr_devflag = D_MP;
-
-#ifdef LATER
-#define F(s,n) { 1l<<(s),-(s), n }
-
-struct reg_desc bridge_int_status_desc[] =
-{
- F(31, "MULTI_ERR"),
- F(30, "PMU_ESIZE_EFAULT"),
- F(29, "UNEXPECTED_RESP"),
- F(28, "BAD_XRESP_PACKET"),
- F(27, "BAD_XREQ_PACKET"),
- F(26, "RESP_XTALK_ERROR"),
- F(25, "REQ_XTALK_ERROR"),
- F(24, "INVALID_ADDRESS"),
- F(23, "UNSUPPORTED_XOP"),
- F(22, "XREQ_FIFO_OFLOW"),
- F(21, "LLP_REC_SNERROR"),
- F(20, "LLP_REC_CBERROR"),
- F(19, "LLP_RCTY"),
- F(18, "LLP_TX_RETRY"),
- F(17, "LLP_TCTY"),
- F(16, "SSRAM_PERR"),
- F(15, "PCI_ABORT"),
- F(14, "PCI_PARITY"),
- F(13, "PCI_SERR"),
- F(12, "PCI_PERR"),
- F(11, "PCI_MASTER_TOUT"),
- F(10, "PCI_RETRY_CNT"),
- F(9, "XREAD_REQ_TOUT"),
- F(8, "GIO_BENABLE_ERR"),
- F(7, "INT7"),
- F(6, "INT6"),
- F(5, "INT5"),
- F(4, "INT4"),
- F(3, "INT3"),
- F(2, "INT2"),
- F(1, "INT1"),
- F(0, "INT0"),
- {0}
-};
-
-struct reg_values space_v[] =
-{
- {PCIIO_SPACE_NONE, "none"},
- {PCIIO_SPACE_ROM, "ROM"},
- {PCIIO_SPACE_IO, "I/O"},
- {PCIIO_SPACE_MEM, "MEM"},
- {PCIIO_SPACE_MEM32, "MEM(32)"},
- {PCIIO_SPACE_MEM64, "MEM(64)"},
- {PCIIO_SPACE_CFG, "CFG"},
- {PCIIO_SPACE_WIN(0), "WIN(0)"},
- {PCIIO_SPACE_WIN(1), "WIN(1)"},
- {PCIIO_SPACE_WIN(2), "WIN(2)"},
- {PCIIO_SPACE_WIN(3), "WIN(3)"},
- {PCIIO_SPACE_WIN(4), "WIN(4)"},
- {PCIIO_SPACE_WIN(5), "WIN(5)"},
- {PCIIO_SPACE_BAD, "BAD"},
- {0}
-};
-
-struct reg_desc space_desc[] =
-{
- {0xFF, 0, "space", 0, space_v},
- {0}
-};
-
-#if DEBUG
-#define device_desc device_bits
-LOCAL struct reg_desc device_bits[] =
-{
- {BRIDGE_DEV_ERR_LOCK_EN, 0, "ERR_LOCK_EN"},
- {BRIDGE_DEV_PAGE_CHK_DIS, 0, "PAGE_CHK_DIS"},
- {BRIDGE_DEV_FORCE_PCI_PAR, 0, "FORCE_PCI_PAR"},
- {BRIDGE_DEV_VIRTUAL_EN, 0, "VIRTUAL_EN"},
- {BRIDGE_DEV_PMU_WRGA_EN, 0, "PMU_WRGA_EN"},
- {BRIDGE_DEV_DIR_WRGA_EN, 0, "DIR_WRGA_EN"},
- {BRIDGE_DEV_DEV_SIZE, 0, "DEV_SIZE"},
- {BRIDGE_DEV_RT, 0, "RT"},
- {BRIDGE_DEV_SWAP_PMU, 0, "SWAP_PMU"},
- {BRIDGE_DEV_SWAP_DIR, 0, "SWAP_DIR"},
- {BRIDGE_DEV_PREF, 0, "PREF"},
- {BRIDGE_DEV_PRECISE, 0, "PRECISE"},
- {BRIDGE_DEV_COH, 0, "COH"},
- {BRIDGE_DEV_BARRIER, 0, "BARRIER"},
- {BRIDGE_DEV_GBR, 0, "GBR"},
- {BRIDGE_DEV_DEV_SWAP, 0, "DEV_SWAP"},
- {BRIDGE_DEV_DEV_IO_MEM, 0, "DEV_IO_MEM"},
- {BRIDGE_DEV_OFF_MASK, BRIDGE_DEV_OFF_ADDR_SHFT, "DEV_OFF", "%x"},
- {0}
-};
-#endif /* DEBUG */
-
-#ifdef SUPPORT_PRINTING_R_FORMAT
-LOCAL struct reg_values xio_cmd_pactyp[] =
-{
- {0x0, "RdReq"},
- {0x1, "RdResp"},
- {0x2, "WrReqWithResp"},
- {0x3, "WrResp"},
- {0x4, "WrReqNoResp"},
- {0x5, "Reserved(5)"},
- {0x6, "FetchAndOp"},
- {0x7, "Reserved(7)"},
- {0x8, "StoreAndOp"},
- {0x9, "Reserved(9)"},
- {0xa, "Reserved(a)"},
- {0xb, "Reserved(b)"},
- {0xc, "Reserved(c)"},
- {0xd, "Reserved(d)"},
- {0xe, "SpecialReq"},
- {0xf, "SpecialResp"},
- {0}
-};
-
-LOCAL struct reg_desc xio_cmd_bits[] =
-{
- {WIDGET_DIDN, -28, "DIDN", "%x"},
- {WIDGET_SIDN, -24, "SIDN", "%x"},
- {WIDGET_PACTYP, -20, "PACTYP", 0, xio_cmd_pactyp},
- {WIDGET_TNUM, -15, "TNUM", "%x"},
- {WIDGET_COHERENT, 0, "COHERENT"},
- {WIDGET_DS, 0, "DS"},
- {WIDGET_GBR, 0, "GBR"},
- {WIDGET_VBPM, 0, "VBPM"},
- {WIDGET_ERROR, 0, "ERROR"},
- {WIDGET_BARRIER, 0, "BARRIER"},
- {0}
-};
-#endif /* SUPPORT_PRINTING_R_FORMAT */
-
-#if PCIBR_FREEZE_TIME || PCIBR_ATE_DEBUG
-LOCAL struct reg_desc ate_bits[] =
-{
- {0xFFFF000000000000ull, -48, "RMF", "%x"},
- {~(IOPGSIZE - 1) & /* may trim off some low bits */
- 0x0000FFFFFFFFF000ull, 0, "XIO", "%x"},
- {0x0000000000000F00ull, -8, "port", "%x"},
- {0x0000000000000010ull, 0, "Barrier"},
- {0x0000000000000008ull, 0, "Prefetch"},
- {0x0000000000000004ull, 0, "Precise"},
- {0x0000000000000002ull, 0, "Coherent"},
- {0x0000000000000001ull, 0, "Valid"},
- {0}
-};
-#endif
-
-#if PCIBR_ATE_DEBUG
-LOCAL struct reg_values ssram_sizes[] =
-{
- {BRIDGE_CTRL_SSRAM_512K, "512k"},
- {BRIDGE_CTRL_SSRAM_128K, "128k"},
- {BRIDGE_CTRL_SSRAM_64K, "64k"},
- {BRIDGE_CTRL_SSRAM_1K, "1k"},
- {0}
-};
-
-LOCAL struct reg_desc control_bits[] =
-{
- {BRIDGE_CTRL_FLASH_WR_EN, 0, "FLASH_WR_EN"},
- {BRIDGE_CTRL_EN_CLK50, 0, "EN_CLK50"},
- {BRIDGE_CTRL_EN_CLK40, 0, "EN_CLK40"},
- {BRIDGE_CTRL_EN_CLK33, 0, "EN_CLK33"},
- {BRIDGE_CTRL_RST_MASK, -24, "RST", "%x"},
- {BRIDGE_CTRL_IO_SWAP, 0, "IO_SWAP"},
- {BRIDGE_CTRL_MEM_SWAP, 0, "MEM_SWAP"},
- {BRIDGE_CTRL_PAGE_SIZE, 0, "PAGE_SIZE"},
- {BRIDGE_CTRL_SS_PAR_BAD, 0, "SS_PAR_BAD"},
- {BRIDGE_CTRL_SS_PAR_EN, 0, "SS_PAR_EN"},
- {BRIDGE_CTRL_SSRAM_SIZE_MASK, 0, "SSRAM_SIZE", 0, ssram_sizes},
- {BRIDGE_CTRL_F_BAD_PKT, 0, "F_BAD_PKT"},
- {BRIDGE_CTRL_LLP_XBAR_CRD_MASK, -12, "LLP_XBAR_CRD", "%d"},
- {BRIDGE_CTRL_CLR_RLLP_CNT, 0, "CLR_RLLP_CNT"},
- {BRIDGE_CTRL_CLR_TLLP_CNT, 0, "CLR_TLLP_CNT"},
- {BRIDGE_CTRL_SYS_END, 0, "SYS_END"},
- {BRIDGE_CTRL_MAX_TRANS_MASK, -4, "MAX_TRANS", "%d"},
- {BRIDGE_CTRL_WIDGET_ID_MASK, 0, "WIDGET_ID", "%x"},
- {0}
-};
-#endif
-#endif /* LATER */
-
-/* kbrick widgetnum-to-bus layout */
-int p_busnum[MAX_PORT_NUM] = { /* widget# */
- 0, 0, 0, 0, 0, 0, 0, 0, /* 0x0 - 0x7 */
- 2, /* 0x8 */
- 1, /* 0x9 */
- 0, 0, /* 0xa - 0xb */
- 5, /* 0xc */
- 6, /* 0xd */
- 4, /* 0xe */
- 3, /* 0xf */
-};
-
-/*
- * Additional PIO spaces per slot are
- * recorded in this structure.
- */
-struct pciio_piospace_s {
- pciio_piospace_t next; /* another space for this device */
- char free; /* 1 if free, 0 if in use */
- pciio_space_t space; /* Which space is in use */
- iopaddr_t start; /* Starting address of the PIO space */
- size_t count; /* size of PIO space */
-};
-
-#if PCIBR_SOFT_LIST
-pcibr_list_p pcibr_list = 0;
-#endif
-
-#define INFO_LBL_PCIBR_ASIC_REV "_pcibr_asic_rev"
-
-#define PCIBR_D64_BASE_UNSET (0xFFFFFFFFFFFFFFFF)
-#define PCIBR_D32_BASE_UNSET (0xFFFFFFFF)
-
-#define PCIBR_VALID_SLOT(s) (s < 8)
-
-#ifdef SN_XXX
-extern int hub_device_flags_set(devfs_handle_t widget_dev,
- hub_widget_flags_t flags);
-#endif
-extern pciio_dmamap_t get_free_pciio_dmamap(devfs_handle_t);
-extern void free_pciio_dmamap(pcibr_dmamap_t);
-
-/*
- * This is the file operation table for the pcibr driver.
- * As each of the functions are implemented, put the
- * appropriate function name below.
- */
-struct file_operations pcibr_fops = {
- owner: THIS_MODULE,
- llseek: NULL,
- read: NULL,
- write: NULL,
- readdir: NULL,
- poll: NULL,
- ioctl: NULL,
- mmap: NULL,
- open: NULL,
- flush: NULL,
- release: NULL,
- fsync: NULL,
- fasync: NULL,
- lock: NULL,
- readv: NULL,
- writev: NULL
-};
-
-extern devfs_handle_t hwgraph_root;
-extern graph_error_t hwgraph_vertex_unref(devfs_handle_t vhdl);
-extern int cap_able(uint64_t x);
-extern uint64_t rmalloc(struct map *mp, size_t size);
-extern void rmfree(struct map *mp, size_t size, uint64_t a);
-extern int hwgraph_vertex_name_get(devfs_handle_t vhdl, char *buf, uint buflen);
-extern long atoi(register char *p);
-extern char *dev_to_name(devfs_handle_t dev, char *buf, uint buflen);
-extern cnodeid_t nodevertex_to_cnodeid(devfs_handle_t vhdl);
-extern graph_error_t hwgraph_edge_remove(devfs_handle_t from, char *name, devfs_handle_t *toptr);
-extern struct map *rmallocmap(uint64_t mapsiz);
-extern void rmfreemap(struct map *mp);
-extern int compare_and_swap_ptr(void **location, void *old_ptr, void *new_ptr);
-extern int io_path_map_widget(devfs_handle_t vertex);
-
-
-
-/* =====================================================================
- * Function Table of Contents
- *
- * The order of functions in this file has stopped
- * making much sense. We might want to take a look
- * at it some time and bring back some sanity, or
- * perhaps bust this file into smaller chunks.
- */
-
-LOCAL void do_pcibr_rrb_clear(bridge_t *, int);
-LOCAL void do_pcibr_rrb_flush(bridge_t *, int);
-LOCAL int do_pcibr_rrb_count_valid(bridge_t *, pciio_slot_t);
-LOCAL int do_pcibr_rrb_count_avail(bridge_t *, pciio_slot_t);
-LOCAL int do_pcibr_rrb_alloc(bridge_t *, pciio_slot_t, int);
-LOCAL int do_pcibr_rrb_free(bridge_t *, pciio_slot_t, int);
-
-LOCAL void do_pcibr_rrb_autoalloc(pcibr_soft_t, int, int);
-
-int pcibr_wrb_flush(devfs_handle_t);
-int pcibr_rrb_alloc(devfs_handle_t, int *, int *);
-int pcibr_rrb_check(devfs_handle_t, int *, int *, int *, int *);
-int pcibr_alloc_all_rrbs(devfs_handle_t, int, int, int, int, int, int, int, int, int);
-void pcibr_rrb_flush(devfs_handle_t);
-
-LOCAL int pcibr_try_set_device(pcibr_soft_t, pciio_slot_t, unsigned, bridgereg_t);
-void pcibr_release_device(pcibr_soft_t, pciio_slot_t, bridgereg_t);
-
-LOCAL void pcibr_clearwidint(bridge_t *);
-LOCAL void pcibr_setwidint(xtalk_intr_t);
-LOCAL int pcibr_probe_slot(bridge_t *, cfg_p, unsigned *);
-
-void pcibr_init(void);
-int pcibr_attach(devfs_handle_t);
-int pcibr_detach(devfs_handle_t);
-int pcibr_open(devfs_handle_t *, int, int, cred_t *);
-int pcibr_close(devfs_handle_t, int, int, cred_t *);
-int pcibr_map(devfs_handle_t, vhandl_t *, off_t, size_t, uint);
-int pcibr_unmap(devfs_handle_t, vhandl_t *);
-int pcibr_ioctl(devfs_handle_t, int, void *, int, struct cred *, int *);
-
-void pcibr_freeblock_sub(iopaddr_t *, iopaddr_t *, iopaddr_t, size_t);
-
-LOCAL int pcibr_init_ext_ate_ram(bridge_t *);
-LOCAL int pcibr_ate_alloc(pcibr_soft_t, int);
-LOCAL void pcibr_ate_free(pcibr_soft_t, int, int);
-
-LOCAL pcibr_info_t pcibr_info_get(devfs_handle_t);
-LOCAL pcibr_info_t pcibr_device_info_new(pcibr_soft_t, pciio_slot_t, pciio_function_t, pciio_vendor_id_t, pciio_device_id_t);
-LOCAL void pcibr_device_info_free(devfs_handle_t, pciio_slot_t);
-LOCAL iopaddr_t pcibr_addr_pci_to_xio(devfs_handle_t, pciio_slot_t, pciio_space_t, iopaddr_t, size_t, unsigned);
-
-pcibr_piomap_t pcibr_piomap_alloc(devfs_handle_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, size_t, unsigned);
-void pcibr_piomap_free(pcibr_piomap_t);
-caddr_t pcibr_piomap_addr(pcibr_piomap_t, iopaddr_t, size_t);
-void pcibr_piomap_done(pcibr_piomap_t);
-caddr_t pcibr_piotrans_addr(devfs_handle_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, unsigned);
-iopaddr_t pcibr_piospace_alloc(devfs_handle_t, device_desc_t, pciio_space_t, size_t, size_t);
-void pcibr_piospace_free(devfs_handle_t, pciio_space_t, iopaddr_t, size_t);
-
-LOCAL iopaddr_t pcibr_flags_to_d64(unsigned, pcibr_soft_t);
-LOCAL bridge_ate_t pcibr_flags_to_ate(unsigned);
-
-pcibr_dmamap_t pcibr_dmamap_alloc(devfs_handle_t, device_desc_t, size_t, unsigned);
-void pcibr_dmamap_free(pcibr_dmamap_t);
-LOCAL bridge_ate_p pcibr_ate_addr(pcibr_soft_t, int);
-LOCAL iopaddr_t pcibr_addr_xio_to_pci(pcibr_soft_t, iopaddr_t, size_t);
-iopaddr_t pcibr_dmamap_addr(pcibr_dmamap_t, paddr_t, size_t);
-alenlist_t pcibr_dmamap_list(pcibr_dmamap_t, alenlist_t, unsigned);
-void pcibr_dmamap_done(pcibr_dmamap_t);
-cnodeid_t pcibr_get_dmatrans_node(devfs_handle_t);
-iopaddr_t pcibr_dmatrans_addr(devfs_handle_t, device_desc_t, paddr_t, size_t, unsigned);
-alenlist_t pcibr_dmatrans_list(devfs_handle_t, device_desc_t, alenlist_t, unsigned);
-void pcibr_dmamap_drain(pcibr_dmamap_t);
-void pcibr_dmaaddr_drain(devfs_handle_t, paddr_t, size_t);
-void pcibr_dmalist_drain(devfs_handle_t, alenlist_t);
-iopaddr_t pcibr_dmamap_pciaddr_get(pcibr_dmamap_t);
-
-static unsigned pcibr_intr_bits(pciio_info_t info, pciio_intr_line_t lines);
-pcibr_intr_t pcibr_intr_alloc(devfs_handle_t, device_desc_t, pciio_intr_line_t, devfs_handle_t);
-void pcibr_intr_free(pcibr_intr_t);
-LOCAL void pcibr_setpciint(xtalk_intr_t);
-int pcibr_intr_connect(pcibr_intr_t);
-void pcibr_intr_disconnect(pcibr_intr_t);
-
-devfs_handle_t pcibr_intr_cpu_get(pcibr_intr_t);
-void pcibr_xintr_preset(void *, int, xwidgetnum_t, iopaddr_t, xtalk_intr_vector_t);
-void pcibr_intr_func(intr_arg_t);
-
-void pcibr_provider_startup(devfs_handle_t);
-void pcibr_provider_shutdown(devfs_handle_t);
-
-int pcibr_reset(devfs_handle_t);
-pciio_endian_t pcibr_endian_set(devfs_handle_t, pciio_endian_t, pciio_endian_t);
-int pcibr_priority_bits_set(pcibr_soft_t, pciio_slot_t, pciio_priority_t);
-pciio_priority_t pcibr_priority_set(devfs_handle_t, pciio_priority_t);
-int pcibr_device_flags_set(devfs_handle_t, pcibr_device_flags_t);
-
-LOCAL cfg_p pcibr_config_addr(devfs_handle_t, unsigned);
-uint64_t pcibr_config_get(devfs_handle_t, unsigned, unsigned);
-LOCAL uint64_t do_pcibr_config_get(cfg_p, unsigned, unsigned);
-void pcibr_config_set(devfs_handle_t, unsigned, unsigned, uint64_t);
-LOCAL void do_pcibr_config_set(cfg_p, unsigned, unsigned, uint64_t);
-
-LOCAL pcibr_hints_t pcibr_hints_get(devfs_handle_t, int);
-void pcibr_hints_fix_rrbs(devfs_handle_t);
-void pcibr_hints_dualslot(devfs_handle_t, pciio_slot_t, pciio_slot_t);
-void pcibr_hints_intr_bits(devfs_handle_t, pcibr_intr_bits_f *);
-void pcibr_set_rrb_callback(devfs_handle_t, rrb_alloc_funct_t);
-void pcibr_hints_handsoff(devfs_handle_t);
-void pcibr_hints_subdevs(devfs_handle_t, pciio_slot_t, ulong);
-
-LOCAL int pcibr_slot_info_init(devfs_handle_t,pciio_slot_t);
-LOCAL int pcibr_slot_info_free(devfs_handle_t,pciio_slot_t);
-
-#ifdef LATER
-LOCAL int pcibr_slot_info_return(pcibr_soft_t, pciio_slot_t,
- pcibr_slot_info_resp_t);
-LOCAL void pcibr_slot_func_info_return(pcibr_info_h, int,
- pcibr_slot_func_info_resp_t);
-#endif /* LATER */
-
-LOCAL int pcibr_slot_addr_space_init(devfs_handle_t,pciio_slot_t);
-LOCAL int pcibr_slot_device_init(devfs_handle_t, pciio_slot_t);
-LOCAL int pcibr_slot_guest_info_init(devfs_handle_t,pciio_slot_t);
-LOCAL int pcibr_slot_initial_rrb_alloc(devfs_handle_t,pciio_slot_t);
-LOCAL int pcibr_slot_call_device_attach(devfs_handle_t,
- pciio_slot_t, int);
-LOCAL int pcibr_slot_call_device_detach(devfs_handle_t,
- pciio_slot_t, int);
-
-LOCAL int pcibr_slot_detach(devfs_handle_t, pciio_slot_t, int);
-LOCAL int pcibr_is_slot_sys_critical(devfs_handle_t, pciio_slot_t);
-#ifdef LATER
-LOCAL int pcibr_slot_query(devfs_handle_t, pcibr_slot_info_req_t);
-#endif
-
-/* =====================================================================
- * RRB management
- */
-
-#define LSBIT(word) ((word) &~ ((word)-1))
-
-#define PCIBR_RRB_SLOT_VIRTUAL 8
-
-LOCAL void
-do_pcibr_rrb_clear(bridge_t *bridge, int rrb)
-{
- bridgereg_t status;
-
- /* bridge_lock must be held;
- * this RRB must be disabled.
- */
-
- /* wait until RRB has no outstanduing XIO packets. */
- while ((status = bridge->b_resp_status) & BRIDGE_RRB_INUSE(rrb)) {
- ; /* XXX- beats on bridge. bad idea? */
- }
-
- /* if the RRB has data, drain it. */
- if (status & BRIDGE_RRB_VALID(rrb)) {
- bridge->b_resp_clear = BRIDGE_RRB_CLEAR(rrb);
-
- /* wait until RRB is no longer valid. */
- while ((status = bridge->b_resp_status) & BRIDGE_RRB_VALID(rrb)) {
- ; /* XXX- beats on bridge. bad idea? */
- }
- }
-}
-
-LOCAL void
-do_pcibr_rrb_flush(bridge_t *bridge, int rrbn)
-{
- reg_p rrbp = &bridge->b_rrb_map[rrbn & 1].reg;
- bridgereg_t rrbv;
- int shft = 4 * (rrbn >> 1);
- unsigned ebit = BRIDGE_RRB_EN << shft;
-
- rrbv = *rrbp;
- if (rrbv & ebit)
- *rrbp = rrbv & ~ebit;
-
- do_pcibr_rrb_clear(bridge, rrbn);
-
- if (rrbv & ebit)
- *rrbp = rrbv;
-}
-
-/*
- * pcibr_rrb_count_valid: count how many RRBs are
- * marked valid for the specified PCI slot on this
- * bridge.
- *
- * NOTE: The "slot" parameter for all pcibr_rrb
- * management routines must include the "virtual"
- * bit; when manageing both the normal and the
- * virtual channel, separate calls to these
- * routines must be made. To denote the virtual
- * channel, add PCIBR_RRB_SLOT_VIRTUAL to the slot
- * number.
- *
- * IMPL NOTE: The obvious algorithm is to iterate
- * through the RRB fields, incrementing a count if
- * the RRB is valid and matches the slot. However,
- * it is much simpler to use an algorithm derived
- * from the "partitioned add" idea. First, XOR in a
- * pattern such that the fields that match this
- * slot come up "all ones" and all other fields
- * have zeros in the mismatching bits. Then AND
- * together the bits in the field, so we end up
- * with one bit turned on for each field that
- * matched. Now we need to count these bits. This
- * can be done either with a series of shift/add
- * instructions or by using "tmp % 15"; I expect
- * that the cascaded shift/add will be faster.
- */
-
-LOCAL int
-do_pcibr_rrb_count_valid(bridge_t *bridge,
- pciio_slot_t slot)
-{
- bridgereg_t tmp;
-
- tmp = bridge->b_rrb_map[slot & 1].reg;
- tmp ^= 0x11111111 * (7 - slot / 2);
- tmp &= (0xCCCCCCCC & tmp) >> 2;
- tmp &= (0x22222222 & tmp) >> 1;
- tmp += tmp >> 4;
- tmp += tmp >> 8;
- tmp += tmp >> 16;
- return tmp & 15;
-}
-
-/*
- * do_pcibr_rrb_count_avail: count how many RRBs are
- * available to be allocated for the specified slot.
- *
- * IMPL NOTE: similar to the above, except we are
- * just counting how many fields have the valid bit
- * turned off.
- */
-LOCAL int
-do_pcibr_rrb_count_avail(bridge_t *bridge,
- pciio_slot_t slot)
-{
- bridgereg_t tmp;
-
- tmp = bridge->b_rrb_map[slot & 1].reg;
- tmp = (0x88888888 & ~tmp) >> 3;
- tmp += tmp >> 4;
- tmp += tmp >> 8;
- tmp += tmp >> 16;
- return tmp & 15;
-}
-
-/*
- * do_pcibr_rrb_alloc: allocate some additional RRBs
- * for the specified slot. Returns -1 if there were
- * insufficient free RRBs to satisfy the request,
- * or 0 if the request was fulfilled.
- *
- * Note that if a request can be partially filled,
- * it will be, even if we return failure.
- *
- * IMPL NOTE: again we avoid iterating across all
- * the RRBs; instead, we form up a word containing
- * one bit for each free RRB, then peel the bits
- * off from the low end.
- */
-LOCAL int
-do_pcibr_rrb_alloc(bridge_t *bridge,
- pciio_slot_t slot,
- int more)
-{
- int rv = 0;
- bridgereg_t reg, tmp, bit;
-
- reg = bridge->b_rrb_map[slot & 1].reg;
- tmp = (0x88888888 & ~reg) >> 3;
- while (more-- > 0) {
- bit = LSBIT(tmp);
- if (!bit) {
- rv = -1;
- break;
- }
- tmp &= ~bit;
- reg = ((reg & ~(bit * 15)) | (bit * (8 + slot / 2)));
- }
- bridge->b_rrb_map[slot & 1].reg = reg;
- return rv;
-}
-
-/*
- * do_pcibr_rrb_free: release some of the RRBs that
- * have been allocated for the specified
- * slot. Returns zero for success, or negative if
- * it was unable to free that many RRBs.
- *
- * IMPL NOTE: We form up a bit for each RRB
- * allocated to the slot, aligned with the VALID
- * bitfield this time; then we peel bits off one at
- * a time, releasing the corresponding RRB.
- */
-LOCAL int
-do_pcibr_rrb_free(bridge_t *bridge,
- pciio_slot_t slot,
- int less)
-{
- int rv = 0;
- bridgereg_t reg, tmp, clr, bit;
- int i;
-
- clr = 0;
- reg = bridge->b_rrb_map[slot & 1].reg;
-
- /* This needs to be done otherwise the rrb's on the virtual channel
- * for this slot won't be freed !!
- */
- tmp = reg & 0xbbbbbbbb;
-
- tmp ^= (0x11111111 * (7 - slot / 2));
- tmp &= (0x33333333 & tmp) << 2;
- tmp &= (0x44444444 & tmp) << 1;
- while (less-- > 0) {
- bit = LSBIT(tmp);
- if (!bit) {
- rv = -1;
- break;
- }
- tmp &= ~bit;
- reg &= ~bit;
- clr |= bit;
- }
- bridge->b_rrb_map[slot & 1].reg = reg;
-
- for (i = 0; i < 8; i++)
- if (clr & (8 << (4 * i)))
- do_pcibr_rrb_clear(bridge, (2 * i) + (slot & 1));
-
- return rv;
-}
-
-LOCAL void
-do_pcibr_rrb_autoalloc(pcibr_soft_t pcibr_soft,
- int slot,
- int more_rrbs)
-{
- bridge_t *bridge = pcibr_soft->bs_base;
- int got;
-
- for (got = 0; got < more_rrbs; ++got) {
- if (pcibr_soft->bs_rrb_res[slot & 7] > 0)
- pcibr_soft->bs_rrb_res[slot & 7]--;
- else if (pcibr_soft->bs_rrb_avail[slot & 1] > 0)
- pcibr_soft->bs_rrb_avail[slot & 1]--;
- else
- break;
- if (do_pcibr_rrb_alloc(bridge, slot, 1) < 0)
- break;
-#if PCIBR_RRB_DEBUG
- printk( "do_pcibr_rrb_autoalloc: add one to slot %d%s\n",
- slot & 7, slot & 8 ? "v" : "");
-#endif
- pcibr_soft->bs_rrb_valid[slot]++;
- }
-#if PCIBR_RRB_DEBUG
- printk("%s: %d+%d free RRBs. Allocation list:\n", pcibr_soft->bs_name,
- pcibr_soft->bs_rrb_avail[0],
- pcibr_soft->bs_rrb_avail[1]);
- for (slot = 0; slot < 8; ++slot)
- printk("\t%d+%d+%d",
- 0xFFF & pcibr_soft->bs_rrb_valid[slot],
- 0xFFF & pcibr_soft->bs_rrb_valid[slot + PCIBR_RRB_SLOT_VIRTUAL],
- pcibr_soft->bs_rrb_res[slot]);
- printk("\n");
-#endif
-}
-
-/*
- * Device driver interface to flush the write buffers for a specified
- * device hanging off the bridge.
- */
-int
-pcibr_wrb_flush(devfs_handle_t pconn_vhdl)
-{
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
- pciio_slot_t pciio_slot = pciio_info_slot_get(pciio_info);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- bridge_t *bridge = pcibr_soft->bs_base;
- volatile bridgereg_t *wrb_flush;
-
- wrb_flush = &(bridge->b_wr_req_buf[pciio_slot].reg);
- while (*wrb_flush);
-
- return(0);
-}
-/*
- * Device driver interface to request RRBs for a specified device
- * hanging off a Bridge. The driver requests the total number of
- * RRBs it would like for the normal channel (vchan0) and for the
- * "virtual channel" (vchan1). The actual number allocated to each
- * channel is returned.
- *
- * If we cannot allocate at least one RRB to a channel that needs
- * at least one, return -1 (failure). Otherwise, satisfy the request
- * as best we can and return 0.
- */
-int
-pcibr_rrb_alloc(devfs_handle_t pconn_vhdl,
- int *count_vchan0,
- int *count_vchan1)
-{
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
- pciio_slot_t pciio_slot = pciio_info_slot_get(pciio_info);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- bridge_t *bridge = pcibr_soft->bs_base;
- int desired_vchan0;
- int desired_vchan1;
- int orig_vchan0;
- int orig_vchan1;
- int delta_vchan0;
- int delta_vchan1;
- int final_vchan0;
- int final_vchan1;
- int avail_rrbs;
- unsigned long s;
- int error;
-
- /*
- * TBD: temper request with admin info about RRB allocation,
- * and according to demand from other devices on this Bridge.
- *
- * One way of doing this would be to allocate two RRBs
- * for each device on the bus, before any drivers start
- * asking for extras. This has the weakness that one
- * driver might not give back an "extra" RRB until after
- * another driver has already failed to get one that
- * it wanted.
- */
-
- s = pcibr_lock(pcibr_soft);
-
- /* How many RRBs do we own? */
- orig_vchan0 = pcibr_soft->bs_rrb_valid[pciio_slot];
- orig_vchan1 = pcibr_soft->bs_rrb_valid[pciio_slot + PCIBR_RRB_SLOT_VIRTUAL];
-
- /* How many RRBs do we want? */
- desired_vchan0 = count_vchan0 ? *count_vchan0 : orig_vchan0;
- desired_vchan1 = count_vchan1 ? *count_vchan1 : orig_vchan1;
-
- /* How many RRBs are free? */
- avail_rrbs = pcibr_soft->bs_rrb_avail[pciio_slot & 1]
- + pcibr_soft->bs_rrb_res[pciio_slot];
-
- /* Figure desired deltas */
- delta_vchan0 = desired_vchan0 - orig_vchan0;
- delta_vchan1 = desired_vchan1 - orig_vchan1;
-
- /* Trim back deltas to something
- * that we can actually meet, by
- * decreasing the ending allocation
- * for whichever channel wants
- * more RRBs. If both want the same
- * number, cut the second channel.
- * NOTE: do not change the allocation for
- * a channel that was passed as NULL.
- */
- while ((delta_vchan0 + delta_vchan1) > avail_rrbs) {
- if (count_vchan0 &&
- (!count_vchan1 ||
- ((orig_vchan0 + delta_vchan0) >
- (orig_vchan1 + delta_vchan1))))
- delta_vchan0--;
- else
- delta_vchan1--;
- }
-
- /* Figure final RRB allocations
- */
- final_vchan0 = orig_vchan0 + delta_vchan0;
- final_vchan1 = orig_vchan1 + delta_vchan1;
-
- /* If either channel wants RRBs but our actions
- * would leave it with none, declare an error,
- * but DO NOT change any RRB allocations.
- */
- if ((desired_vchan0 && !final_vchan0) ||
- (desired_vchan1 && !final_vchan1)) {
-
- error = -1;
-
- } else {
-
- /* Commit the allocations: free, then alloc.
- */
- if (delta_vchan0 < 0)
- (void) do_pcibr_rrb_free(bridge, pciio_slot, -delta_vchan0);
- if (delta_vchan1 < 0)
- (void) do_pcibr_rrb_free(bridge, PCIBR_RRB_SLOT_VIRTUAL + pciio_slot, -delta_vchan1);
-
- if (delta_vchan0 > 0)
- (void) do_pcibr_rrb_alloc(bridge, pciio_slot, delta_vchan0);
- if (delta_vchan1 > 0)
- (void) do_pcibr_rrb_alloc(bridge, PCIBR_RRB_SLOT_VIRTUAL + pciio_slot, delta_vchan1);
-
- /* Return final values to caller.
- */
- if (count_vchan0)
- *count_vchan0 = final_vchan0;
- if (count_vchan1)
- *count_vchan1 = final_vchan1;
-
- /* prevent automatic changes to this slot's RRBs
- */
- pcibr_soft->bs_rrb_fixed |= 1 << pciio_slot;
-
- /* Track the actual allocations, release
- * any further reservations, and update the
- * number of available RRBs.
- */
-
- pcibr_soft->bs_rrb_valid[pciio_slot] = final_vchan0;
- pcibr_soft->bs_rrb_valid[pciio_slot + PCIBR_RRB_SLOT_VIRTUAL] = final_vchan1;
- pcibr_soft->bs_rrb_avail[pciio_slot & 1] =
- pcibr_soft->bs_rrb_avail[pciio_slot & 1]
- + pcibr_soft->bs_rrb_res[pciio_slot]
- - delta_vchan0
- - delta_vchan1;
- pcibr_soft->bs_rrb_res[pciio_slot] = 0;
-
-#if PCIBR_RRB_DEBUG
- printk("pcibr_rrb_alloc: slot %d set to %d+%d; %d+%d free\n",
- pciio_slot, final_vchan0, final_vchan1,
- pcibr_soft->bs_rrb_avail[0],
- pcibr_soft->bs_rrb_avail[1]);
- for (pciio_slot = 0; pciio_slot < 8; ++pciio_slot)
- printk("\t%d+%d+%d",
- 0xFFF & pcibr_soft->bs_rrb_valid[pciio_slot],
- 0xFFF & pcibr_soft->bs_rrb_valid[pciio_slot + PCIBR_RRB_SLOT_VIRTUAL],
- pcibr_soft->bs_rrb_res[pciio_slot]);
- printk("\n");
-#endif
-
- error = 0;
- }
-
- pcibr_unlock(pcibr_soft, s);
- return error;
-}
-
-/*
- * Device driver interface to check the current state
- * of the RRB allocations.
- *
- * pconn_vhdl is your PCI connection point (specifies which
- * PCI bus and which slot).
- *
- * count_vchan0 points to where to return the number of RRBs
- * assigned to the primary DMA channel, used by all DMA
- * that does not explicitly ask for the alternate virtual
- * channel.
- *
- * count_vchan1 points to where to return the number of RRBs
- * assigned to the secondary DMA channel, used when
- * PCIBR_VCHAN1 and PCIIO_DMA_A64 are specified.
- *
- * count_reserved points to where to return the number of RRBs
- * that have been automatically reserved for your device at
- * startup, but which have not been assigned to a
- * channel. RRBs must be assigned to a channel to be used;
- * this can be done either with an explicit pcibr_rrb_alloc
- * call, or automatically by the infrastructure when a DMA
- * translation is constructed. Any call to pcibr_rrb_alloc
- * will release any unassigned reserved RRBs back to the
- * free pool.
- *
- * count_pool points to where to return the number of RRBs
- * that are currently unassigned and unreserved. This
- * number can (and will) change as other drivers make calls
- * to pcibr_rrb_alloc, or automatically allocate RRBs for
- * DMA beyond their initial reservation.
- *
- * NULL may be passed for any of the return value pointers
- * the caller is not interested in.
- *
- * The return value is "0" if all went well, or "-1" if
- * there is a problem. Additionally, if the wrong vertex
- * is passed in, one of the subsidiary support functions
- * could panic with a "bad pciio fingerprint."
- */
-
-int
-pcibr_rrb_check(devfs_handle_t pconn_vhdl,
- int *count_vchan0,
- int *count_vchan1,
- int *count_reserved,
- int *count_pool)
-{
- pciio_info_t pciio_info;
- pciio_slot_t pciio_slot;
- pcibr_soft_t pcibr_soft;
- unsigned long s;
- int error = -1;
-
- if ((pciio_info = pciio_info_get(pconn_vhdl)) &&
- (pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info)) &&
- ((pciio_slot = pciio_info_slot_get(pciio_info)) < 8)) {
-
- s = pcibr_lock(pcibr_soft);
-
- if (count_vchan0)
- *count_vchan0 =
- pcibr_soft->bs_rrb_valid[pciio_slot];
-
- if (count_vchan1)
- *count_vchan1 =
- pcibr_soft->bs_rrb_valid[pciio_slot + PCIBR_RRB_SLOT_VIRTUAL];
-
- if (count_reserved)
- *count_reserved =
- pcibr_soft->bs_rrb_res[pciio_slot];
-
- if (count_pool)
- *count_pool =
- pcibr_soft->bs_rrb_avail[pciio_slot & 1];
-
- error = 0;
-
- pcibr_unlock(pcibr_soft, s);
- }
- return error;
-}
-
-/* pcibr_alloc_all_rrbs allocates all the rrbs available in the quantities
- * requested for each of the devies. The evn_odd argument indicates whether
- * allcoation for the odd or even rrbs is requested and next group of four pairse
- * are the amount to assign to each device (they should sum to <= 8) and
- * whether to set the viritual bit for that device (1 indictaes yes, 0 indicates no)
- * the devices in order are either 0, 2, 4, 6 or 1, 3, 5, 7
- * if even_odd is even we alloc even rrbs else we allocate odd rrbs
- * returns 0 if no errors else returns -1
- */
-
-int
-pcibr_alloc_all_rrbs(devfs_handle_t vhdl, int even_odd,
- int dev_1_rrbs, int virt1, int dev_2_rrbs, int virt2,
- int dev_3_rrbs, int virt3, int dev_4_rrbs, int virt4)
-{
- devfs_handle_t pcibr_vhdl;
- pcibr_soft_t pcibr_soft = NULL;
- bridge_t *bridge = NULL;
-
- uint32_t rrb_setting = 0;
- int rrb_shift = 7;
- uint32_t cur_rrb;
- int dev_rrbs[4];
- int virt[4];
- int i, j;
- unsigned long s;
-
- if (GRAPH_SUCCESS ==
- hwgraph_traverse(vhdl, EDGE_LBL_PCI, &pcibr_vhdl)) {
- pcibr_soft = pcibr_soft_get(pcibr_vhdl);
- if (pcibr_soft)
- bridge = pcibr_soft->bs_base;
- hwgraph_vertex_unref(pcibr_vhdl);
- }
- if (bridge == NULL)
- bridge = (bridge_t *) xtalk_piotrans_addr
- (vhdl, NULL, 0, sizeof(bridge_t), 0);
-
- even_odd &= 1;
-
- dev_rrbs[0] = dev_1_rrbs;
- dev_rrbs[1] = dev_2_rrbs;
- dev_rrbs[2] = dev_3_rrbs;
- dev_rrbs[3] = dev_4_rrbs;
-
- virt[0] = virt1;
- virt[1] = virt2;
- virt[2] = virt3;
- virt[3] = virt4;
-
- if ((dev_1_rrbs + dev_2_rrbs + dev_3_rrbs + dev_4_rrbs) > 8) {
- return -1;
- }
- if ((dev_1_rrbs < 0) || (dev_2_rrbs < 0) || (dev_3_rrbs < 0) || (dev_4_rrbs < 0)) {
- return -1;
- }
- /* walk through rrbs */
- for (i = 0; i < 4; i++) {
- if (virt[i]) {
- cur_rrb = i | 0xc;
- cur_rrb = cur_rrb << (rrb_shift * 4);
- rrb_shift--;
- rrb_setting = rrb_setting | cur_rrb;
- dev_rrbs[i] = dev_rrbs[i] - 1;
- }
- for (j = 0; j < dev_rrbs[i]; j++) {
- cur_rrb = i | 0x8;
- cur_rrb = cur_rrb << (rrb_shift * 4);
- rrb_shift--;
- rrb_setting = rrb_setting | cur_rrb;
- }
- }
-
- if (pcibr_soft)
- s = pcibr_lock(pcibr_soft);
-
- bridge->b_rrb_map[even_odd].reg = rrb_setting;
-
- if (pcibr_soft) {
-
- pcibr_soft->bs_rrb_fixed |= 0x55 << even_odd;
-
- /* since we've "FIXED" the allocations
- * for these slots, we probably can dispense
- * with tracking avail/res/valid data, but
- * keeping it up to date helps debugging.
- */
-
- pcibr_soft->bs_rrb_avail[even_odd] =
- 8 - (dev_1_rrbs + dev_2_rrbs + dev_3_rrbs + dev_4_rrbs);
-
- pcibr_soft->bs_rrb_res[even_odd + 0] = 0;
- pcibr_soft->bs_rrb_res[even_odd + 2] = 0;
- pcibr_soft->bs_rrb_res[even_odd + 4] = 0;
- pcibr_soft->bs_rrb_res[even_odd + 6] = 0;
-
- pcibr_soft->bs_rrb_valid[even_odd + 0] = dev_1_rrbs - virt1;
- pcibr_soft->bs_rrb_valid[even_odd + 2] = dev_2_rrbs - virt2;
- pcibr_soft->bs_rrb_valid[even_odd + 4] = dev_3_rrbs - virt3;
- pcibr_soft->bs_rrb_valid[even_odd + 6] = dev_4_rrbs - virt4;
-
- pcibr_soft->bs_rrb_valid[even_odd + 0 + PCIBR_RRB_SLOT_VIRTUAL] = virt1;
- pcibr_soft->bs_rrb_valid[even_odd + 2 + PCIBR_RRB_SLOT_VIRTUAL] = virt2;
- pcibr_soft->bs_rrb_valid[even_odd + 4 + PCIBR_RRB_SLOT_VIRTUAL] = virt3;
- pcibr_soft->bs_rrb_valid[even_odd + 6 + PCIBR_RRB_SLOT_VIRTUAL] = virt4;
-
- pcibr_unlock(pcibr_soft, s);
- }
- return 0;
-}
-
-/*
- * pcibr_rrb_flush: chase down all the RRBs assigned
- * to the specified connection point, and flush
- * them.
- */
-void
-pcibr_rrb_flush(devfs_handle_t pconn_vhdl)
-{
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- pciio_slot_t pciio_slot = pciio_info_slot_get(pciio_info);
- bridge_t *bridge = pcibr_soft->bs_base;
- unsigned long s;
- reg_p rrbp;
- unsigned rrbm;
- int i;
- int rrbn;
- unsigned sval;
- unsigned mask;
-
- sval = BRIDGE_RRB_EN | (pciio_slot >> 1);
- mask = BRIDGE_RRB_EN | BRIDGE_RRB_PDEV;
- rrbn = pciio_slot & 1;
- rrbp = &bridge->b_rrb_map[rrbn].reg;
-
- s = pcibr_lock(pcibr_soft);
- rrbm = *rrbp;
- for (i = 0; i < 8; ++i) {
- if ((rrbm & mask) == sval)
- do_pcibr_rrb_flush(bridge, rrbn);
- rrbm >>= 4;
- rrbn += 2;
- }
- pcibr_unlock(pcibr_soft, s);
-}
-
-/* =====================================================================
- * Device(x) register management
- */
-
-/* pcibr_try_set_device: attempt to modify Device(x)
- * for the specified slot on the specified bridge
- * as requested in flags, limited to the specified
- * bits. Returns which BRIDGE bits were in conflict,
- * or ZERO if everything went OK.
- *
- * Caller MUST hold pcibr_lock when calling this function.
- */
-LOCAL int
-pcibr_try_set_device(pcibr_soft_t pcibr_soft,
- pciio_slot_t slot,
- unsigned flags,
- bridgereg_t mask)
-{
- bridge_t *bridge;
- pcibr_soft_slot_t slotp;
- bridgereg_t old;
- bridgereg_t new;
- bridgereg_t chg;
- bridgereg_t bad;
- bridgereg_t badpmu;
- bridgereg_t badd32;
- bridgereg_t badd64;
- bridgereg_t fix;
- unsigned long s;
- bridgereg_t xmask;
-
- xmask = mask;
- if (pcibr_soft->bs_xbridge) {
- if (mask == BRIDGE_DEV_PMU_BITS)
- xmask = XBRIDGE_DEV_PMU_BITS;
- if (mask == BRIDGE_DEV_D64_BITS)
- xmask = XBRIDGE_DEV_D64_BITS;
- }
-
- slotp = &pcibr_soft->bs_slot[slot];
-
- s = pcibr_lock(pcibr_soft);
-
- bridge = pcibr_soft->bs_base;
-
- old = slotp->bss_device;
-
- /* figure out what the desired
- * Device(x) bits are based on
- * the flags specified.
- */
-
- new = old;
-
- /* Currently, we inherit anything that
- * the new caller has not specified in
- * one way or another, unless we take
- * action here to not inherit.
- *
- * This is needed for the "swap" stuff,
- * since it could have been set via
- * pcibr_endian_set -- altho note that
- * any explicit PCIBR_BYTE_STREAM or
- * PCIBR_WORD_VALUES will freely override
- * the effect of that call (and vice
- * versa, no protection either way).
- *
- * I want to get rid of pcibr_endian_set
- * in favor of tracking DMA endianness
- * using the flags specified when DMA
- * channels are created.
- */
-
-#define BRIDGE_DEV_WRGA_BITS (BRIDGE_DEV_PMU_WRGA_EN | BRIDGE_DEV_DIR_WRGA_EN)
-#define BRIDGE_DEV_SWAP_BITS (BRIDGE_DEV_SWAP_PMU | BRIDGE_DEV_SWAP_DIR)
-
- /* Do not use Barrier, Write Gather,
- * or Prefetch unless asked.
- * Leave everything else as it
- * was from the last time.
- */
- new = new
- & ~BRIDGE_DEV_BARRIER
- & ~BRIDGE_DEV_WRGA_BITS
- & ~BRIDGE_DEV_PREF
- ;
-
- /* Generic macro flags
- */
- if (flags & PCIIO_DMA_DATA) {
- new = (new
- & ~BRIDGE_DEV_BARRIER) /* barrier off */
- | BRIDGE_DEV_PREF; /* prefetch on */
-
- }
- if (flags & PCIIO_DMA_CMD) {
- new = ((new
- & ~BRIDGE_DEV_PREF) /* prefetch off */
- & ~BRIDGE_DEV_WRGA_BITS) /* write gather off */
- | BRIDGE_DEV_BARRIER; /* barrier on */
- }
- /* Generic detail flags
- */
- if (flags & PCIIO_WRITE_GATHER)
- new |= BRIDGE_DEV_WRGA_BITS;
- if (flags & PCIIO_NOWRITE_GATHER)
- new &= ~BRIDGE_DEV_WRGA_BITS;
-
- if (flags & PCIIO_PREFETCH)
- new |= BRIDGE_DEV_PREF;
- if (flags & PCIIO_NOPREFETCH)
- new &= ~BRIDGE_DEV_PREF;
-
- if (flags & PCIBR_WRITE_GATHER)
- new |= BRIDGE_DEV_WRGA_BITS;
- if (flags & PCIBR_NOWRITE_GATHER)
- new &= ~BRIDGE_DEV_WRGA_BITS;
-
- if (flags & PCIIO_BYTE_STREAM)
- new |= (pcibr_soft->bs_xbridge) ?
- BRIDGE_DEV_SWAP_DIR : BRIDGE_DEV_SWAP_BITS;
- if (flags & PCIIO_WORD_VALUES)
- new &= (pcibr_soft->bs_xbridge) ?
- ~BRIDGE_DEV_SWAP_DIR : ~BRIDGE_DEV_SWAP_BITS;
-
- /* Provider-specific flags
- */
- if (flags & PCIBR_PREFETCH)
- new |= BRIDGE_DEV_PREF;
- if (flags & PCIBR_NOPREFETCH)
- new &= ~BRIDGE_DEV_PREF;
-
- if (flags & PCIBR_PRECISE)
- new |= BRIDGE_DEV_PRECISE;
- if (flags & PCIBR_NOPRECISE)
- new &= ~BRIDGE_DEV_PRECISE;
-
- if (flags & PCIBR_BARRIER)
- new |= BRIDGE_DEV_BARRIER;
- if (flags & PCIBR_NOBARRIER)
- new &= ~BRIDGE_DEV_BARRIER;
-
- if (flags & PCIBR_64BIT)
- new |= BRIDGE_DEV_DEV_SIZE;
- if (flags & PCIBR_NO64BIT)
- new &= ~BRIDGE_DEV_DEV_SIZE;
-
- chg = old ^ new; /* what are we changing, */
- chg &= xmask; /* of the interesting bits */
-
- if (chg) {
-
- badd32 = slotp->bss_d32_uctr ? (BRIDGE_DEV_D32_BITS & chg) : 0;
- if (pcibr_soft->bs_xbridge) {
- badpmu = slotp->bss_pmu_uctr ? (XBRIDGE_DEV_PMU_BITS & chg) : 0;
- badd64 = slotp->bss_d64_uctr ? (XBRIDGE_DEV_D64_BITS & chg) : 0;
- } else {
- badpmu = slotp->bss_pmu_uctr ? (BRIDGE_DEV_PMU_BITS & chg) : 0;
- badd64 = slotp->bss_d64_uctr ? (BRIDGE_DEV_D64_BITS & chg) : 0;
- }
- bad = badpmu | badd32 | badd64;
-
- if (bad) {
-
- /* some conflicts can be resolved by
- * forcing the bit on. this may cause
- * some performance degredation in
- * the stream(s) that want the bit off,
- * but the alternative is not allowing
- * the new stream at all.
- */
- if ( (fix = bad & (BRIDGE_DEV_PRECISE |
- BRIDGE_DEV_BARRIER)) ){
- bad &= ~fix;
- /* don't change these bits if
- * they are already set in "old"
- */
- chg &= ~(fix & old);
- }
- /* some conflicts can be resolved by
- * forcing the bit off. this may cause
- * some performance degredation in
- * the stream(s) that want the bit on,
- * but the alternative is not allowing
- * the new stream at all.
- */
- if ( (fix = bad & (BRIDGE_DEV_WRGA_BITS |
- BRIDGE_DEV_PREF)) ) {
- bad &= ~fix;
- /* don't change these bits if
- * we wanted to turn them on.
- */
- chg &= ~(fix & new);
- }
- /* conflicts in other bits mean
- * we can not establish this DMA
- * channel while the other(s) are
- * still present.
- */
- if (bad) {
- pcibr_unlock(pcibr_soft, s);
-#if (DEBUG && PCIBR_DEV_DEBUG)
- printk("pcibr_try_set_device: mod blocked by %R\n", bad, device_bits);
-#endif
- return bad;
- }
- }
- }
- if (mask == BRIDGE_DEV_PMU_BITS)
- slotp->bss_pmu_uctr++;
- if (mask == BRIDGE_DEV_D32_BITS)
- slotp->bss_d32_uctr++;
- if (mask == BRIDGE_DEV_D64_BITS)
- slotp->bss_d64_uctr++;
-
- /* the value we want to write is the
- * original value, with the bits for
- * our selected changes flipped, and
- * with any disabled features turned off.
- */
- new = old ^ chg; /* only change what we want to change */
-
- if (slotp->bss_device == new) {
- pcibr_unlock(pcibr_soft, s);
- return 0;
- }
- bridge->b_device[slot].reg = new;
- slotp->bss_device = new;
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
- pcibr_unlock(pcibr_soft, s);
-#if DEBUG && PCIBR_DEV_DEBUG
- printk("pcibr Device(%d): 0x%p\n", slot, bridge->b_device[slot].reg);
-#endif
-
- return 0;
-}
-
-void
-pcibr_release_device(pcibr_soft_t pcibr_soft,
- pciio_slot_t slot,
- bridgereg_t mask)
-{
- pcibr_soft_slot_t slotp;
- unsigned long s;
-
- slotp = &pcibr_soft->bs_slot[slot];
-
- s = pcibr_lock(pcibr_soft);
-
- if (mask == BRIDGE_DEV_PMU_BITS)
- slotp->bss_pmu_uctr--;
- if (mask == BRIDGE_DEV_D32_BITS)
- slotp->bss_d32_uctr--;
- if (mask == BRIDGE_DEV_D64_BITS)
- slotp->bss_d64_uctr--;
-
- pcibr_unlock(pcibr_soft, s);
-}
-
-/*
- * flush write gather buffer for slot
- */
-LOCAL void
-pcibr_device_write_gather_flush(pcibr_soft_t pcibr_soft,
- pciio_slot_t slot)
-{
- bridge_t *bridge;
- unsigned long s;
- volatile uint32_t wrf;
- s = pcibr_lock(pcibr_soft);
- bridge = pcibr_soft->bs_base;
- wrf = bridge->b_wr_req_buf[slot].reg;
- pcibr_unlock(pcibr_soft, s);
-}
-
-/* =====================================================================
- * Bridge (pcibr) "Device Driver" entry points
- */
-
-/*
- * pcibr_probe_slot: read a config space word
- * while trapping any errors; reutrn zero if
- * all went OK, or nonzero if there was an error.
- * The value read, if any, is passed back
- * through the valp parameter.
- */
-LOCAL int
-pcibr_probe_slot(bridge_t *bridge,
- cfg_p cfg,
- unsigned *valp)
-{
- int rv;
- bridgereg_t old_enable, new_enable;
- int badaddr_val(volatile void *, int, volatile void *);
-
-
- old_enable = bridge->b_int_enable;
- new_enable = old_enable & ~BRIDGE_IMR_PCI_MST_TIMEOUT;
-
- bridge->b_int_enable = new_enable;
-
- /*
- * The xbridge doesn't clear b_err_int_view unless
- * multi-err is cleared...
- */
- if (is_xbridge(bridge))
- if (bridge->b_err_int_view & BRIDGE_ISR_PCI_MST_TIMEOUT) {
- bridge->b_int_rst_stat = BRIDGE_IRR_MULTI_CLR;
- }
-
- if (bridge->b_int_status & BRIDGE_IRR_PCI_GRP) {
- bridge->b_int_rst_stat = BRIDGE_IRR_PCI_GRP_CLR;
- (void) bridge->b_wid_tflush; /* flushbus */
- }
- rv = badaddr_val((void *) cfg, 4, valp);
-
- /*
- * The xbridge doesn't set master timeout in b_int_status
- * here. Fortunately it's in error_interrupt_view.
- */
- if (is_xbridge(bridge))
- if (bridge->b_err_int_view & BRIDGE_ISR_PCI_MST_TIMEOUT) {
- bridge->b_int_rst_stat = BRIDGE_IRR_MULTI_CLR;
- rv = 1; /* unoccupied slot */
- }
-
- bridge->b_int_enable = old_enable;
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
-
- return rv;
-}
-
-/*
- * pcibr_init: called once during system startup or
- * when a loadable driver is loaded.
- *
- * The driver_register function should normally
- * be in _reg, not _init. But the pcibr driver is
- * required by devinit before the _reg routines
- * are called, so this is an exception.
- */
-void
-pcibr_init(void)
-{
-#if DEBUG && ATTACH_DEBUG
- printk("pcibr_init\n");
-#endif
-
- xwidget_driver_register(XBRIDGE_WIDGET_PART_NUM,
- XBRIDGE_WIDGET_MFGR_NUM,
- "pcibr_",
- 0);
- xwidget_driver_register(BRIDGE_WIDGET_PART_NUM,
- BRIDGE_WIDGET_MFGR_NUM,
- "pcibr_",
- 0);
-}
-
-/*
- * open/close mmap/munmap interface would be used by processes
- * that plan to map the PCI bridge, and muck around with the
- * registers. This is dangerous to do, and will be allowed
- * to a select brand of programs. Typically these are
- * diagnostics programs, or some user level commands we may
- * write to do some weird things.
- * To start with expect them to have root priveleges.
- * We will ask for more later.
- */
-/* ARGSUSED */
-int
-pcibr_open(devfs_handle_t *devp, int oflag, int otyp, cred_t *credp)
-{
- return 0;
-}
-
-/*ARGSUSED */
-int
-pcibr_close(devfs_handle_t dev, int oflag, int otyp, cred_t *crp)
-{
- return 0;
-}
-
-/*ARGSUSED */
-int
-pcibr_map(devfs_handle_t dev, vhandl_t *vt, off_t off, size_t len, uint prot)
-{
- int error;
- devfs_handle_t vhdl = dev_to_vhdl(dev);
- devfs_handle_t pcibr_vhdl = hwgraph_connectpt_get(vhdl);
- pcibr_soft_t pcibr_soft = pcibr_soft_get(pcibr_vhdl);
- bridge_t *bridge = pcibr_soft->bs_base;
-
- hwgraph_vertex_unref(pcibr_vhdl);
-
- ASSERT(pcibr_soft);
- len = ctob(btoc(len)); /* Make len page aligned */
- error = v_mapphys(vt, (void *) ((__psunsigned_t) bridge + off), len);
-
- /*
- * If the offset being mapped corresponds to the flash prom
- * base, and if the mapping succeeds, and if the user
- * has requested the protections to be WRITE, enable the
- * flash prom to be written.
- *
- * XXX- deprecate this in favor of using the
- * real flash driver ...
- */
- if (!error &&
- ((off == BRIDGE_EXTERNAL_FLASH) ||
- (len > BRIDGE_EXTERNAL_FLASH))) {
- int s;
-
- /*
- * ensure that we write and read without any interruption.
- * The read following the write is required for the Bridge war
- */
- s = splhi();
- bridge->b_wid_control |= BRIDGE_CTRL_FLASH_WR_EN;
- bridge->b_wid_control; /* inval addr bug war */
- splx(s);
- }
- return error;
-}
-
-/*ARGSUSED */
-int
-pcibr_unmap(devfs_handle_t dev, vhandl_t *vt)
-{
- devfs_handle_t pcibr_vhdl = hwgraph_connectpt_get((devfs_handle_t) dev);
- pcibr_soft_t pcibr_soft = pcibr_soft_get(pcibr_vhdl);
- bridge_t *bridge = pcibr_soft->bs_base;
-
- hwgraph_vertex_unref(pcibr_vhdl);
-
- /*
- * If flashprom write was enabled, disable it, as
- * this is the last unmap.
- */
- if (bridge->b_wid_control & BRIDGE_CTRL_FLASH_WR_EN) {
- int s;
-
- /*
- * ensure that we write and read without any interruption.
- * The read following the write is required for the Bridge war
- */
- s = splhi();
- bridge->b_wid_control &= ~BRIDGE_CTRL_FLASH_WR_EN;
- bridge->b_wid_control; /* inval addr bug war */
- splx(s);
- }
- return 0;
-}
-
-/* This is special case code used by grio. There are plans to make
- * this a bit more general in the future, but till then this should
- * be sufficient.
- */
-pciio_slot_t
-pcibr_device_slot_get(devfs_handle_t dev_vhdl)
-{
- char devname[MAXDEVNAME];
- devfs_handle_t tdev;
- pciio_info_t pciio_info;
- pciio_slot_t slot = PCIIO_SLOT_NONE;
-
- vertex_to_name(dev_vhdl, devname, MAXDEVNAME);
-
- /* run back along the canonical path
- * until we find a PCI connection point.
- */
- tdev = hwgraph_connectpt_get(dev_vhdl);
- while (tdev != GRAPH_VERTEX_NONE) {
- pciio_info = pciio_info_chk(tdev);
- if (pciio_info) {
- slot = pciio_info_slot_get(pciio_info);
- break;
- }
- hwgraph_vertex_unref(tdev);
- tdev = hwgraph_connectpt_get(tdev);
- }
- hwgraph_vertex_unref(tdev);
-
- return slot;
-}
-
-/*==========================================================================
- * BRIDGE PCI SLOT RELATED IOCTLs
- */
-char *pci_space_name[] = {"NONE",
- "ROM",
- "IO",
- "",
- "MEM",
- "MEM32",
- "MEM64",
- "CFG",
- "WIN0",
- "WIN1",
- "WIN2",
- "WIN3",
- "WIN4",
- "WIN5",
- "",
- "BAD"};
-
-
-/*ARGSUSED */
-int
-pcibr_ioctl(devfs_handle_t dev,
- int cmd,
- void *arg,
- int flag,
- struct cred *cr,
- int *rvalp)
-{
- devfs_handle_t pcibr_vhdl = hwgraph_connectpt_get((devfs_handle_t)dev);
-#ifdef LATER
- pcibr_soft_t pcibr_soft = pcibr_soft_get(pcibr_vhdl);
-#endif
- int error = 0;
-
- hwgraph_vertex_unref(pcibr_vhdl);
-
- switch (cmd) {
-#ifdef LATER
- case GIOCSETBW:
- {
- grio_ioctl_info_t info;
- pciio_slot_t slot = 0;
-
- if (!cap_able((uint64_t)CAP_DEVICE_MGT)) {
- error = EPERM;
- break;
- }
- if (COPYIN(arg, &info, sizeof(grio_ioctl_info_t))) {
- error = EFAULT;
- break;
- }
-#ifdef GRIO_DEBUG
- printk("pcibr:: prev_vhdl: %d reqbw: %lld\n",
- info.prev_vhdl, info.reqbw);
-#endif /* GRIO_DEBUG */
-
- if ((slot = pcibr_device_slot_get(info.prev_vhdl)) ==
- PCIIO_SLOT_NONE) {
- error = EIO;
- break;
- }
- if (info.reqbw)
- pcibr_priority_bits_set(pcibr_soft, slot, PCI_PRIO_HIGH);
- break;
- }
-
- case GIOCRELEASEBW:
- {
- grio_ioctl_info_t info;
- pciio_slot_t slot = 0;
-
- if (!cap_able(CAP_DEVICE_MGT)) {
- error = EPERM;
- break;
- }
- if (COPYIN(arg, &info, sizeof(grio_ioctl_info_t))) {
- error = EFAULT;
- break;
- }
-#ifdef GRIO_DEBUG
- printk("pcibr:: prev_vhdl: %d reqbw: %lld\n",
- info.prev_vhdl, info.reqbw);
-#endif /* GRIO_DEBUG */
-
- if ((slot = pcibr_device_slot_get(info.prev_vhdl)) ==
- PCIIO_SLOT_NONE) {
- error = EIO;
- break;
- }
- if (info.reqbw)
- pcibr_priority_bits_set(pcibr_soft, slot, PCI_PRIO_LOW);
- break;
- }
-
- case PCIBR_SLOT_POWERUP:
- {
- pciio_slot_t slot;
-
- if (!cap_able(CAP_DEVICE_MGT)) {
- error = EPERM;
- break;
- }
-
- slot = (pciio_slot_t)(uint64_t)arg;
- error = pcibr_slot_powerup(pcibr_vhdl,slot);
- break;
- }
- case PCIBR_SLOT_SHUTDOWN:
- if (!cap_able(CAP_DEVICE_MGT)) {
- error = EPERM;
- break;
- }
-
- slot = (pciio_slot_t)(uint64_t)arg;
- error = pcibr_slot_powerup(pcibr_vhdl,slot);
- break;
- }
- case PCIBR_SLOT_QUERY:
- {
- struct pcibr_slot_info_req_s req;
-
- if (!cap_able(CAP_DEVICE_MGT)) {
- error = EPERM;
- break;
- }
-
- if (COPYIN(arg, &req, sizeof(req))) {
- error = EFAULT;
- break;
- }
-
- error = pcibr_slot_query(pcibr_vhdl, &req);
- break;
- }
-#endif /* LATER */
- default:
- break;
-
- }
-
- return error;
-}
-
-void
-pcibr_freeblock_sub(iopaddr_t *free_basep,
- iopaddr_t *free_lastp,
- iopaddr_t base,
- size_t size)
-{
- iopaddr_t free_base = *free_basep;
- iopaddr_t free_last = *free_lastp;
- iopaddr_t last = base + size - 1;
-
- if ((last < free_base) || (base > free_last)); /* free block outside arena */
-
- else if ((base <= free_base) && (last >= free_last))
- /* free block contains entire arena */
- *free_basep = *free_lastp = 0;
-
- else if (base <= free_base)
- /* free block is head of arena */
- *free_basep = last + 1;
-
- else if (last >= free_last)
- /* free block is tail of arena */
- *free_lastp = base - 1;
-
- /*
- * We are left with two regions: the free area
- * in the arena "below" the block, and the free
- * area in the arena "above" the block. Keep
- * the one that is bigger.
- */
-
- else if ((base - free_base) > (free_last - last))
- *free_lastp = base - 1; /* keep lower chunk */
- else
- *free_basep = last + 1; /* keep upper chunk */
-}
-
-/* Convert from ssram_bits in control register to number of SSRAM entries */
-#define ATE_NUM_ENTRIES(n) _ate_info[n]
-
-/* Possible choices for number of ATE entries in Bridge's SSRAM */
-LOCAL int _ate_info[] =
-{
- 0, /* 0 entries */
- 8 * 1024, /* 8K entries */
- 16 * 1024, /* 16K entries */
- 64 * 1024 /* 64K entries */
-};
-
-#define ATE_NUM_SIZES (sizeof(_ate_info) / sizeof(int))
-#define ATE_PROBE_VALUE 0x0123456789abcdefULL
-
-/*
- * Determine the size of this bridge's external mapping SSRAM, and set
- * the control register appropriately to reflect this size, and initialize
- * the external SSRAM.
- */
-LOCAL int
-pcibr_init_ext_ate_ram(bridge_t *bridge)
-{
- int largest_working_size = 0;
- int num_entries, entry;
- int i, j;
- bridgereg_t old_enable, new_enable;
- int s;
-
- /* Probe SSRAM to determine its size. */
- old_enable = bridge->b_int_enable;
- new_enable = old_enable & ~BRIDGE_IMR_PCI_MST_TIMEOUT;
- bridge->b_int_enable = new_enable;
-
- for (i = 1; i < ATE_NUM_SIZES; i++) {
- /* Try writing a value */
- bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(i) - 1] = ATE_PROBE_VALUE;
-
- /* Guard against wrap */
- for (j = 1; j < i; j++)
- bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(j) - 1] = 0;
-
- /* See if value was written */
- if (bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(i) - 1] == ATE_PROBE_VALUE)
- largest_working_size = i;
- }
- bridge->b_int_enable = old_enable;
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
-
- /*
- * ensure that we write and read without any interruption.
- * The read following the write is required for the Bridge war
- */
-
- s = splhi();
- bridge->b_wid_control = (bridge->b_wid_control
- & ~BRIDGE_CTRL_SSRAM_SIZE_MASK)
- | BRIDGE_CTRL_SSRAM_SIZE(largest_working_size);
- bridge->b_wid_control; /* inval addr bug war */
- splx(s);
-
- num_entries = ATE_NUM_ENTRIES(largest_working_size);
-
-#if PCIBR_ATE_DEBUG
- if (num_entries)
- printk("bridge at 0x%x: clearing %d external ATEs\n", bridge, num_entries);
- else
- printk("bridge at 0x%x: no externa9422l ATE RAM found\n", bridge);
-#endif
-
- /* Initialize external mapping entries */
- for (entry = 0; entry < num_entries; entry++)
- bridge->b_ext_ate_ram[entry] = 0;
-
- return (num_entries);
-}
-
-/*
- * Allocate "count" contiguous Bridge Address Translation Entries
- * on the specified bridge to be used for PCI to XTALK mappings.
- * Indices in rm map range from 1..num_entries. Indicies returned
- * to caller range from 0..num_entries-1.
- *
- * Return the start index on success, -1 on failure.
- */
-LOCAL int
-pcibr_ate_alloc(pcibr_soft_t pcibr_soft, int count)
-{
- int index = 0;
-
- index = (int) rmalloc(pcibr_soft->bs_int_ate_map, (size_t) count);
-/* printk("Colin: pcibr_ate_alloc - index %d count %d \n", index, count); */
-
- if (!index && pcibr_soft->bs_ext_ate_map)
- index = (int) rmalloc(pcibr_soft->bs_ext_ate_map, (size_t) count);
-
- /* rmalloc manages resources in the 1..n
- * range, with 0 being failure.
- * pcibr_ate_alloc manages resources
- * in the 0..n-1 range, with -1 being failure.
- */
- return index - 1;
-}
-
-LOCAL void
-pcibr_ate_free(pcibr_soft_t pcibr_soft, int index, int count)
-/* Who says there's no such thing as a free meal? :-) */
-{
- /* note the "+1" since rmalloc handles 1..n but
- * we start counting ATEs at zero.
- */
-/* printk("Colin: pcibr_ate_free - index %d count %d\n", index, count); */
-
- rmfree((index < pcibr_soft->bs_int_ate_size)
- ? pcibr_soft->bs_int_ate_map
- : pcibr_soft->bs_ext_ate_map,
- count, index + 1);
-}
-
-LOCAL pcibr_info_t
-pcibr_info_get(devfs_handle_t vhdl)
-{
- return (pcibr_info_t) pciio_info_get(vhdl);
-}
-
-pcibr_info_t
-pcibr_device_info_new(
- pcibr_soft_t pcibr_soft,
- pciio_slot_t slot,
- pciio_function_t rfunc,
- pciio_vendor_id_t vendor,
- pciio_device_id_t device)
-{
- pcibr_info_t pcibr_info;
- pciio_function_t func;
- int ibit;
-
- func = (rfunc == PCIIO_FUNC_NONE) ? 0 : rfunc;
-
- NEW(pcibr_info);
- pciio_device_info_new(&pcibr_info->f_c,
- pcibr_soft->bs_vhdl,
- slot, rfunc,
- vendor, device);
-
- if (slot != PCIIO_SLOT_NONE) {
-
- /*
- * Currently favored mapping from PCI
- * slot number and INTA/B/C/D to Bridge
- * PCI Interrupt Bit Number:
- *
- * SLOT A B C D
- * 0 0 4 0 4
- * 1 1 5 1 5
- * 2 2 6 2 6
- * 3 3 7 3 7
- * 4 4 0 4 0
- * 5 5 1 5 1
- * 6 6 2 6 2
- * 7 7 3 7 3
- *
- * XXX- allow pcibr_hints to override default
- * XXX- allow ADMIN to override pcibr_hints
- */
- for (ibit = 0; ibit < 4; ++ibit)
- pcibr_info->f_ibit[ibit] =
- (slot + 4 * ibit) & 7;
-
- /*
- * Record the info in the sparse func info space.
- */
- if (func < pcibr_soft->bs_slot[slot].bss_ninfo)
- pcibr_soft->bs_slot[slot].bss_infos[func] = pcibr_info;
- }
- return pcibr_info;
-}
-
-void
-pcibr_device_info_free(devfs_handle_t pcibr_vhdl, pciio_slot_t slot)
-{
- pcibr_soft_t pcibr_soft = pcibr_soft_get(pcibr_vhdl);
- pcibr_info_t pcibr_info;
- pciio_function_t func;
- pcibr_soft_slot_t slotp = &pcibr_soft->bs_slot[slot];
- int nfunc = slotp->bss_ninfo;
-
-
- for (func = 0; func < nfunc; func++) {
- pcibr_info = slotp->bss_infos[func];
-
- if (!pcibr_info)
- continue;
-
- slotp->bss_infos[func] = 0;
- pciio_device_info_unregister(pcibr_vhdl, &pcibr_info->f_c);
- pciio_device_info_free(&pcibr_info->f_c);
- DEL(pcibr_info);
- }
-
- /* Clear the DEVIO(x) for this slot */
- slotp->bss_devio.bssd_space = PCIIO_SPACE_NONE;
- slotp->bss_devio.bssd_base = PCIBR_D32_BASE_UNSET;
- slotp->bss_device = 0;
-
-
- /* Reset the mapping usage counters */
- slotp->bss_pmu_uctr = 0;
- slotp->bss_d32_uctr = 0;
- slotp->bss_d64_uctr = 0;
-
- /* Clear the Direct translation info */
- slotp->bss_d64_base = PCIBR_D64_BASE_UNSET;
- slotp->bss_d64_flags = 0;
- slotp->bss_d32_base = PCIBR_D32_BASE_UNSET;
- slotp->bss_d32_flags = 0;
-
- /* Clear out shadow info necessary for the external SSRAM workaround */
- slotp->bss_ext_ates_active = ATOMIC_INIT(0);
- slotp->bss_cmd_pointer = 0;
- slotp->bss_cmd_shadow = 0;
-
-}
-
-/*
- * PCI_ADDR_SPACE_LIMITS_LOAD
- * Gets the current values of
- * pci io base,
- * pci io last,
- * pci low memory base,
- * pci low memory last,
- * pci high memory base,
- * pci high memory last
- */
-#define PCI_ADDR_SPACE_LIMITS_LOAD() \
- pci_io_fb = pcibr_soft->bs_spinfo.pci_io_base; \
- pci_io_fl = pcibr_soft->bs_spinfo.pci_io_last; \
- pci_lo_fb = pcibr_soft->bs_spinfo.pci_swin_base; \
- pci_lo_fl = pcibr_soft->bs_spinfo.pci_swin_last; \
- pci_hi_fb = pcibr_soft->bs_spinfo.pci_mem_base; \
- pci_hi_fl = pcibr_soft->bs_spinfo.pci_mem_last;
-/*
- * PCI_ADDR_SPACE_LIMITS_STORE
- * Sets the current values of
- * pci io base,
- * pci io last,
- * pci low memory base,
- * pci low memory last,
- * pci high memory base,
- * pci high memory last
- */
-#define PCI_ADDR_SPACE_LIMITS_STORE() \
- pcibr_soft->bs_spinfo.pci_io_base = pci_io_fb; \
- pcibr_soft->bs_spinfo.pci_io_last = pci_io_fl; \
- pcibr_soft->bs_spinfo.pci_swin_base = pci_lo_fb; \
- pcibr_soft->bs_spinfo.pci_swin_last = pci_lo_fl; \
- pcibr_soft->bs_spinfo.pci_mem_base = pci_hi_fb; \
- pcibr_soft->bs_spinfo.pci_mem_last = pci_hi_fl;
-
-#define PCI_ADDR_SPACE_LIMITS_PRINT() \
- printf("+++++++++++++++++++++++\n" \
- "IO base 0x%x last 0x%x\n" \
- "SWIN base 0x%x last 0x%x\n" \
- "MEM base 0x%x last 0x%x\n" \
- "+++++++++++++++++++++++\n", \
- pcibr_soft->bs_spinfo.pci_io_base, \
- pcibr_soft->bs_spinfo.pci_io_last, \
- pcibr_soft->bs_spinfo.pci_swin_base, \
- pcibr_soft->bs_spinfo.pci_swin_last, \
- pcibr_soft->bs_spinfo.pci_mem_base, \
- pcibr_soft->bs_spinfo.pci_mem_last);
-
-/*
- * pcibr_slot_info_init
- * Probe for this slot and see if it is populated.
- * If it is populated initialize the generic PCI infrastructural
- * information associated with this particular PCI device.
- */
-int
-pcibr_slot_info_init(devfs_handle_t pcibr_vhdl,
- pciio_slot_t slot)
-{
- pcibr_soft_t pcibr_soft;
- pcibr_info_h pcibr_infoh;
- pcibr_info_t pcibr_info;
- bridge_t *bridge;
- cfg_p cfgw;
- unsigned idword;
- unsigned pfail;
- unsigned idwords[8];
- pciio_vendor_id_t vendor;
- pciio_device_id_t device;
- unsigned htype;
- cfg_p wptr;
- int win;
- pciio_space_t space;
- iopaddr_t pci_io_fb, pci_io_fl;
- iopaddr_t pci_lo_fb, pci_lo_fl;
- iopaddr_t pci_hi_fb, pci_hi_fl;
- int nfunc;
- pciio_function_t rfunc;
- int func;
- devfs_handle_t conn_vhdl;
- pcibr_soft_slot_t slotp;
-
- /* Get the basic software information required to proceed */
- pcibr_soft = pcibr_soft_get(pcibr_vhdl);
- if (!pcibr_soft)
- return(EINVAL);
-
- bridge = pcibr_soft->bs_base;
- if (!PCIBR_VALID_SLOT(slot))
- return(EINVAL);
-
- /* If we have a host slot (eg:- IOC3 has 2 PCI slots and the initialization
- * is done by the host slot then we are done.
- */
- if (pcibr_soft->bs_slot[slot].has_host) {
- return(0);
- }
-
- /* Check for a slot with any system critical functions */
- if (pcibr_is_slot_sys_critical(pcibr_vhdl, slot))
- return(EPERM);
-
- /* Load the current values of allocated PCI address spaces */
- PCI_ADDR_SPACE_LIMITS_LOAD();
-
- /* Try to read the device-id/vendor-id from the config space */
- cfgw = bridge->b_type0_cfg_dev[slot].l;
-
- if (pcibr_probe_slot(bridge, cfgw, &idword))
- return(ENODEV);
-
- slotp = &pcibr_soft->bs_slot[slot];
- slotp->slot_status |= SLOT_POWER_UP;
-
- vendor = 0xFFFF & idword;
- /* If the vendor id is not valid then the slot is not populated
- * and we are done.
- */
- if (vendor == 0xFFFF)
- return(ENODEV);
-
- device = 0xFFFF & (idword >> 16);
- htype = do_pcibr_config_get(cfgw, PCI_CFG_HEADER_TYPE, 1);
-
- nfunc = 1;
- rfunc = PCIIO_FUNC_NONE;
- pfail = 0;
-
- /* NOTE: if a card claims to be multifunction
- * but only responds to config space 0, treat
- * it as a unifunction card.
- */
-
- if (htype & 0x80) { /* MULTIFUNCTION */
- for (func = 1; func < 8; ++func) {
- cfgw = bridge->b_type0_cfg_dev[slot].f[func].l;
- if (pcibr_probe_slot(bridge, cfgw, &idwords[func])) {
- pfail |= 1 << func;
- continue;
- }
- vendor = 0xFFFF & idwords[func];
- if (vendor == 0xFFFF) {
- pfail |= 1 << func;
- continue;
- }
- nfunc = func + 1;
- rfunc = 0;
- }
- cfgw = bridge->b_type0_cfg_dev[slot].l;
- }
- NEWA(pcibr_infoh, nfunc);
-
- pcibr_soft->bs_slot[slot].bss_ninfo = nfunc;
- pcibr_soft->bs_slot[slot].bss_infos = pcibr_infoh;
-
- for (func = 0; func < nfunc; ++func) {
- unsigned cmd_reg;
-
- if (func) {
- if (pfail & (1 << func))
- continue;
-
- idword = idwords[func];
- cfgw = bridge->b_type0_cfg_dev[slot].f[func].l;
-
- device = 0xFFFF & (idword >> 16);
- htype = do_pcibr_config_get(cfgw, PCI_CFG_HEADER_TYPE, 1);
- rfunc = func;
- }
- htype &= 0x7f;
- if (htype != 0x00) {
- printk(KERN_WARNING "%s pcibr: pci slot %d func %d has strange header type 0x%x\n",
- pcibr_soft->bs_name, slot, func, htype);
- continue;
- }
-#if DEBUG && ATTACH_DEBUG
- printk(KERN_NOTICE
- "%s pcibr: pci slot %d func %d: vendor 0x%x device 0x%x",
- pcibr_soft->bs_name, slot, func, vendor, device);
-#endif
-
- pcibr_info = pcibr_device_info_new
- (pcibr_soft, slot, rfunc, vendor, device);
- conn_vhdl = pciio_device_info_register(pcibr_vhdl, &pcibr_info->f_c);
- if (func == 0)
- slotp->slot_conn = conn_vhdl;
-
-#ifdef LITTLE_ENDIAN
- cmd_reg = cfgw[(PCI_CFG_COMMAND ^ 4) / 4];
-#else
- cmd_reg = cfgw[PCI_CFG_COMMAND / 4];
-#endif
-
- wptr = cfgw + PCI_CFG_BASE_ADDR_0 / 4;
-
- for (win = 0; win < PCI_CFG_BASE_ADDRS; ++win) {
- iopaddr_t base, mask, code;
- size_t size;
-
- /*
- * GET THE BASE & SIZE OF THIS WINDOW:
- *
- * The low two or four bits of the BASE register
- * determines which address space we are in; the
- * rest is a base address. BASE registers
- * determine windows that are power-of-two sized
- * and naturally aligned, so we can get the size
- * of a window by writing all-ones to the
- * register, reading it back, and seeing which
- * bits are used for decode; the least
- * significant nonzero bit is also the size of
- * the window.
- *
- * WARNING: someone may already have allocated
- * some PCI space to this window, and in fact
- * PIO may be in process at this very moment
- * from another processor (or even from this
- * one, if we get interrupted)! So, if the BASE
- * already has a nonzero address, be generous
- * and use the LSBit of that address as the
- * size; this could overstate the window size.
- * Usually, when one card is set up, all are set
- * up; so, since we don't bitch about
- * overlapping windows, we are ok.
- *
- * UNFORTUNATELY, some cards do not clear their
- * BASE registers on reset. I have two heuristics
- * that can detect such cards: first, if the
- * decode enable is turned off for the space
- * that the window uses, we can disregard the
- * initial value. second, if the address is
- * outside the range that we use, we can disregard
- * it as well.
- *
- * This is looking very PCI generic. Except for
- * knowing how many slots and where their config
- * spaces are, this window loop and the next one
- * could probably be shared with other PCI host
- * adapters. It would be interesting to see if
- * this could be pushed up into pciio, when we
- * start supporting more PCI providers.
- */
-#ifdef LITTLE_ENDIAN
- base = wptr[((win*4)^4)/4];
-#else
- base = wptr[win];
-#endif
-
- if (base & PCI_BA_IO_SPACE) {
- /* BASE is in I/O space. */
- space = PCIIO_SPACE_IO;
- mask = -4;
- code = base & 3;
- base = base & mask;
- if (base == 0) {
- ; /* not assigned */
- } else if (!(cmd_reg & PCI_CMD_IO_SPACE)) {
- base = 0; /* decode not enabled */
- }
- } else {
- /* BASE is in MEM space. */
- space = PCIIO_SPACE_MEM;
- mask = -16;
- code = base & PCI_BA_MEM_LOCATION; /* extract BAR type */
- base = base & mask;
- if (base == 0) {
- ; /* not assigned */
- } else if (!(cmd_reg & PCI_CMD_MEM_SPACE)) {
- base = 0; /* decode not enabled */
- } else if (base & 0xC0000000) {
- base = 0; /* outside permissable range */
- } else if ((code == PCI_BA_MEM_64BIT) &&
-#ifdef LITTLE_ENDIAN
- (wptr[(((win + 1)*4)^4)/4] != 0)) {
-#else
- (wptr[win + 1] != 0)) {
-#endif /* LITTLE_ENDIAN */
- base = 0; /* outside permissable range */
- }
- }
-
- if (base != 0) { /* estimate size */
- size = base & -base;
- } else { /* calculate size */
-#ifdef LITTLE_ENDIAN
- wptr[((win*4)^4)/4] = ~0; /* turn on all bits */
- size = wptr[((win*4)^4)/4]; /* get stored bits */
-#else
- wptr[win] = ~0; /* turn on all bits */
- size = wptr[win]; /* get stored bits */
-#endif /* LITTLE_ENDIAN */
- size &= mask; /* keep addr */
- size &= -size; /* keep lsbit */
- if (size == 0)
- continue;
- }
-
- pcibr_info->f_window[win].w_space = space;
- pcibr_info->f_window[win].w_base = base;
- pcibr_info->f_window[win].w_size = size;
-
- /*
- * If this window already has PCI space
- * allocated for it, "subtract" that space from
- * our running freeblocks. Don't worry about
- * overlaps in existing allocated windows; we
- * may be overstating their sizes anyway.
- */
-
- if (base && size) {
- if (space == PCIIO_SPACE_IO) {
- pcibr_freeblock_sub(&pci_io_fb,
- &pci_io_fl,
- base, size);
- } else {
- pcibr_freeblock_sub(&pci_lo_fb,
- &pci_lo_fl,
- base, size);
- pcibr_freeblock_sub(&pci_hi_fb,
- &pci_hi_fl,
- base, size);
- }
- }
-#if defined(IOC3_VENDOR_ID_NUM) && defined(IOC3_DEVICE_ID_NUM)
- /*
- * IOC3 BASE_ADDR* BUG WORKAROUND
- *
-
- * If we write to BASE1 on the IOC3, the
- * data in BASE0 is replaced. The
- * original workaround was to remember
- * the value of BASE0 and restore it
- * when we ran off the end of the BASE
- * registers; however, a later
- * workaround was added (I think it was
- * rev 1.44) to avoid setting up
- * anything but BASE0, with the comment
- * that writing all ones to BASE1 set
- * the enable-parity-error test feature
- * in IOC3's SCR bit 14.
- *
- * So, unless we defer doing any PCI
- * space allocation until drivers
- * attach, and set up a way for drivers
- * (the IOC3 in paricular) to tell us
- * generically to keep our hands off
- * BASE registers, we gotta "know" about
- * the IOC3 here.
- *
- * Too bad the PCI folks didn't reserve the
- * all-zero value for 'no BASE here' (it is a
- * valid code for an uninitialized BASE in
- * 32-bit PCI memory space).
- */
-
- if ((vendor == IOC3_VENDOR_ID_NUM) &&
- (device == IOC3_DEVICE_ID_NUM))
- break;
-#endif
- if (code == PCI_BA_MEM_64BIT) {
- win++; /* skip upper half */
-#ifdef LITTLE_ENDIAN
- wptr[((win*4)^4)/4] = 0; /* which must be zero */
-#else
- wptr[win] = 0; /* which must be zero */
-#endif /* LITTLE_ENDIAN */
- }
- } /* next win */
- } /* next func */
-
- /* Store back the values for allocated PCI address spaces */
- PCI_ADDR_SPACE_LIMITS_STORE();
- return(0);
-}
-
-/*
- * pcibr_slot_info_free
- * Remove all the PCI infrastructural information associated
- * with a particular PCI device.
- */
-int
-pcibr_slot_info_free(devfs_handle_t pcibr_vhdl,
- pciio_slot_t slot)
-{
- pcibr_soft_t pcibr_soft;
- pcibr_info_h pcibr_infoh;
- int nfunc;
-
- pcibr_soft = pcibr_soft_get(pcibr_vhdl);
-
- if (!pcibr_soft || !PCIBR_VALID_SLOT(slot))
- return(EINVAL);
-
- nfunc = pcibr_soft->bs_slot[slot].bss_ninfo;
-
- pcibr_device_info_free(pcibr_vhdl, slot);
-
- pcibr_infoh = pcibr_soft->bs_slot[slot].bss_infos;
- DELA(pcibr_infoh,nfunc);
- pcibr_soft->bs_slot[slot].bss_ninfo = 0;
-
- return(0);
-}
-
-int as_debug = 0;
-/*
- * pcibr_slot_addr_space_init
- * Reserve chunks of PCI address space as required by
- * the base registers in the card.
- */
-int
-pcibr_slot_addr_space_init(devfs_handle_t pcibr_vhdl,
- pciio_slot_t slot)
-{
- pcibr_soft_t pcibr_soft;
- pcibr_info_h pcibr_infoh;
- pcibr_info_t pcibr_info;
- bridge_t *bridge;
- iopaddr_t pci_io_fb, pci_io_fl;
- iopaddr_t pci_lo_fb, pci_lo_fl;
- iopaddr_t pci_hi_fb, pci_hi_fl;
- size_t align;
- iopaddr_t mask;
- int nbars;
- int nfunc;
- int func;
- int win;
-
- pcibr_soft = pcibr_soft_get(pcibr_vhdl);
-
- if (!pcibr_soft || !PCIBR_VALID_SLOT(slot))
- return(EINVAL);
-
- bridge = pcibr_soft->bs_base;
-
- /* Get the current values for the allocated PCI address spaces */
- PCI_ADDR_SPACE_LIMITS_LOAD();
-
- if (as_debug)
-#ifdef LATER
- PCI_ADDR_SPACE_LIMITS_PRINT();
-#endif
- /* allocate address space,
- * for windows that have not been
- * previously assigned.
- */
- if (pcibr_soft->bs_slot[slot].has_host) {
- return(0);
- }
-
- nfunc = pcibr_soft->bs_slot[slot].bss_ninfo;
- if (nfunc < 1)
- return(EINVAL);
-
- pcibr_infoh = pcibr_soft->bs_slot[slot].bss_infos;
- if (!pcibr_infoh)
- return(EINVAL);
-
- /*
- * Try to make the DevIO windows not
- * overlap by pushing the "io" and "hi"
- * allocation areas up to the next one
- * or two megabyte bound. This also
- * keeps them from being zero.
- *
- * DO NOT do this with "pci_lo" since
- * the entire "lo" area is only a
- * megabyte, total ...
- */
- align = (slot < 2) ? 0x200000 : 0x100000;
- mask = -align;
- pci_io_fb = (pci_io_fb + align - 1) & mask;
- pci_hi_fb = (pci_hi_fb + align - 1) & mask;
-
- for (func = 0; func < nfunc; ++func) {
- cfg_p cfgw;
- cfg_p wptr;
- pciio_space_t space;
- iopaddr_t base;
- size_t size;
- cfg_p pci_cfg_cmd_reg_p;
- unsigned pci_cfg_cmd_reg;
- unsigned pci_cfg_cmd_reg_add = 0;
-
- pcibr_info = pcibr_infoh[func];
-
- if (!pcibr_info)
- continue;
-
- if (pcibr_info->f_vendor == PCIIO_VENDOR_ID_NONE)
- continue;
-
- cfgw = bridge->b_type0_cfg_dev[slot].f[func].l;
- wptr = cfgw + PCI_CFG_BASE_ADDR_0 / 4;
-
- nbars = PCI_CFG_BASE_ADDRS;
-
- for (win = 0; win < nbars; ++win) {
-
- space = pcibr_info->f_window[win].w_space;
- base = pcibr_info->f_window[win].w_base;
- size = pcibr_info->f_window[win].w_size;
-
- if (size < 1)
- continue;
-
- if (base >= size) {
-#if DEBUG && PCI_DEBUG
- printk("pcibr: slot %d func %d window %d is in %d[0x%x..0x%x], alloc by prom\n",
- slot, func, win, space, base, base + size - 1);
-#endif
- continue; /* already allocated */
- }
- align = size; /* ie. 0x00001000 */
- if (align < _PAGESZ)
- align = _PAGESZ; /* ie. 0x00004000 */
- mask = -align; /* ie. 0xFFFFC000 */
-
- switch (space) {
- case PCIIO_SPACE_IO:
- base = (pci_io_fb + align - 1) & mask;
- if ((base + size) > pci_io_fl) {
- base = 0;
- break;
- }
- pci_io_fb = base + size;
- break;
-
- case PCIIO_SPACE_MEM:
-#ifdef LITTLE_ENDIAN
- if ((wptr[((win*4)^4)/4] & PCI_BA_MEM_LOCATION) ==
-#else
- if ((wptr[win] & PCI_BA_MEM_LOCATION) ==
-#endif /* LITTLE_ENDIAN */
- PCI_BA_MEM_1MEG) {
- /* allocate from 20-bit PCI space */
- base = (pci_lo_fb + align - 1) & mask;
- if ((base + size) > pci_lo_fl) {
- base = 0;
- break;
- }
- pci_lo_fb = base + size;
- } else {
- /* allocate from 32-bit or 64-bit PCI space */
- base = (pci_hi_fb + align - 1) & mask;
- if ((base + size) > pci_hi_fl) {
- base = 0;
- break;
- }
- pci_hi_fb = base + size;
- }
- break;
-
- default:
- base = 0;
-#if DEBUG && PCI_DEBUG
- printk("pcibr: slot %d window %d had bad space code %d\n",
- slot, win, space);
-#endif
- }
- pcibr_info->f_window[win].w_base = base;
-#ifdef LITTLE_ENDIAN
- wptr[((win*4)^4)/4] = base;
-#if DEBUG && PCI_DEBUG
- printk("Setting base address 0x%p base 0x%x\n", &(wptr[((win*4)^4)/4]), base);
-#endif
-#else
- wptr[win] = base;
-#endif /* LITTLE_ENDIAN */
-
-#if DEBUG && PCI_DEBUG
- if (base >= size)
- printk("pcibr: slot %d func %d window %d is in %d [0x%x..0x%x], alloc by pcibr\n",
- slot, func, win, space, base, base + size - 1);
- else
- printk("pcibr: slot %d func %d window %d, unable to alloc 0x%x in 0x%p\n",
- slot, func, win, size, space);
-#endif
- } /* next base */
-
- /*
- * Allocate space for the EXPANSION ROM
- * NOTE: DO NOT DO THIS ON AN IOC3,
- * as it blows the system away.
- */
- base = size = 0;
- if ((pcibr_soft->bs_slot[slot].bss_vendor_id != IOC3_VENDOR_ID_NUM) ||
- (pcibr_soft->bs_slot[slot].bss_device_id != IOC3_DEVICE_ID_NUM)) {
-
- wptr = cfgw + PCI_EXPANSION_ROM / 4;
-#ifdef LITTLE_ENDIAN
- wptr[1] = 0xFFFFF000;
- mask = wptr[1];
-#else
- *wptr = 0xFFFFF000;
- mask = *wptr;
-#endif /* LITTLE_ENDIAN */
- if (mask & 0xFFFFF000) {
- size = mask & -mask;
- align = size;
- if (align < _PAGESZ)
- align = _PAGESZ;
- mask = -align;
- base = (pci_hi_fb + align - 1) & mask;
- if ((base + size) > pci_hi_fl)
- base = size = 0;
- else {
- pci_hi_fb = base + size;
-#ifdef LITTLE_ENDIAN
- wptr[1] = base;
-#else
- *wptr = base;
-#endif /* LITTLE_ENDIAN */
-#if DEBUG && PCI_DEBUG
- printk("%s/%d ROM in 0x%lx..0x%lx (alloc by pcibr)\n",
- pcibr_soft->bs_name, slot,
- base, base + size - 1);
-#endif
- }
- }
- }
- pcibr_info->f_rbase = base;
- pcibr_info->f_rsize = size;
-
- /*
- * if necessary, update the board's
- * command register to enable decoding
- * in the windows we added.
- *
- * There are some bits we always want to
- * be sure are set.
- */
- pci_cfg_cmd_reg_add |= PCI_CMD_IO_SPACE;
-
- /*
- * The Adaptec 1160 FC Controller WAR #767995:
- * The part incorrectly ignores the upper 32 bits of a 64 bit
- * address when decoding references to its registers so to
- * keep it from responding to a bus cycle that it shouldn't
- * we only use I/O space to get at it's registers. Don't
- * enable memory space accesses on that PCI device.
- */
- #define FCADP_VENDID 0x9004 /* Adaptec Vendor ID from fcadp.h */
- #define FCADP_DEVID 0x1160 /* Adaptec 1160 Device ID from fcadp.h */
-
- if ((pcibr_info->f_vendor != FCADP_VENDID) ||
- (pcibr_info->f_device != FCADP_DEVID))
- pci_cfg_cmd_reg_add |= PCI_CMD_MEM_SPACE;
-
- pci_cfg_cmd_reg_add |= PCI_CMD_BUS_MASTER;
-
- pci_cfg_cmd_reg_p = cfgw + PCI_CFG_COMMAND / 4;
- pci_cfg_cmd_reg = *pci_cfg_cmd_reg_p;
-#if PCI_FBBE /* XXX- check here to see if dev can do fast-back-to-back */
- if (!((pci_cfg_cmd_reg >> 16) & PCI_STAT_F_BK_BK_CAP))
- fast_back_to_back_enable = 0;
-#endif
- pci_cfg_cmd_reg &= 0xFFFF;
- if (pci_cfg_cmd_reg_add & ~pci_cfg_cmd_reg)
- *pci_cfg_cmd_reg_p = pci_cfg_cmd_reg | pci_cfg_cmd_reg_add;
-
- } /* next func */
-
- /* Now that we have allocated new chunks of PCI address spaces to this
- * card we need to update the bookkeeping values which indicate
- * the current PCI address space allocations.
- */
- PCI_ADDR_SPACE_LIMITS_STORE();
- return(0);
-}
-
-/*
- * pcibr_slot_device_init
- * Setup the device register in the bridge for this PCI slot.
- */
-int
-pcibr_slot_device_init(devfs_handle_t pcibr_vhdl,
- pciio_slot_t slot)
-{
- pcibr_soft_t pcibr_soft;
- bridge_t *bridge;
- bridgereg_t devreg;
-
- pcibr_soft = pcibr_soft_get(pcibr_vhdl);
-
- if (!pcibr_soft || !PCIBR_VALID_SLOT(slot))
- return(EINVAL);
-
- bridge = pcibr_soft->bs_base;
-
- /*
- * Adjustments to Device(x)
- * and init of bss_device shadow
- */
- devreg = bridge->b_device[slot].reg;
- devreg &= ~BRIDGE_DEV_PAGE_CHK_DIS;
- devreg |= BRIDGE_DEV_COH | BRIDGE_DEV_VIRTUAL_EN;
-#ifdef LITTLE_ENDIAN
- devreg |= BRIDGE_DEV_DEV_SWAP;
-#endif
- pcibr_soft->bs_slot[slot].bss_device = devreg;
- bridge->b_device[slot].reg = devreg;
-
-#if DEBUG && PCI_DEBUG
- printk("pcibr Device(%d): 0x%lx\n", slot, bridge->b_device[slot].reg);
-#endif
-
-#if DEBUG && PCI_DEBUG
- printk("pcibr: PCI space allocation done.\n");
-#endif
-
- return(0);
-}
-
-/*
- * pcibr_slot_guest_info_init
- * Setup the host/guest relations for a PCI slot.
- */
-int
-pcibr_slot_guest_info_init(devfs_handle_t pcibr_vhdl,
- pciio_slot_t slot)
-{
- pcibr_soft_t pcibr_soft;
- pcibr_info_h pcibr_infoh;
- pcibr_info_t pcibr_info;
- pcibr_soft_slot_t slotp;
-
- pcibr_soft = pcibr_soft_get(pcibr_vhdl);
-
- if (!pcibr_soft || !PCIBR_VALID_SLOT(slot))
- return(EINVAL);
-
- slotp = &pcibr_soft->bs_slot[slot];
-
- /* create info and verticies for guest slots;
- * for compatibilitiy macros, create info
- * for even unpopulated slots (but do not
- * build verticies for them).
- */
- if (pcibr_soft->bs_slot[slot].bss_ninfo < 1) {
- NEWA(pcibr_infoh, 1);
- pcibr_soft->bs_slot[slot].bss_ninfo = 1;
- pcibr_soft->bs_slot[slot].bss_infos = pcibr_infoh;
-
- pcibr_info = pcibr_device_info_new
- (pcibr_soft, slot, PCIIO_FUNC_NONE,
- PCIIO_VENDOR_ID_NONE, PCIIO_DEVICE_ID_NONE);
-
- if (pcibr_soft->bs_slot[slot].has_host) {
- slotp->slot_conn = pciio_device_info_register
- (pcibr_vhdl, &pcibr_info->f_c);
- }
- }
-
- /* generate host/guest relations
- */
- if (pcibr_soft->bs_slot[slot].has_host) {
- int host = pcibr_soft->bs_slot[slot].host_slot;
- pcibr_soft_slot_t host_slotp = &pcibr_soft->bs_slot[host];
-
- hwgraph_edge_add(slotp->slot_conn,
- host_slotp->slot_conn,
- EDGE_LBL_HOST);
-
- /* XXX- only gives us one guest edge per
- * host. If/when we have a host with more than
- * one guest, we will need to figure out how
- * the host finds all its guests, and sorts
- * out which one is which.
- */
- hwgraph_edge_add(host_slotp->slot_conn,
- slotp->slot_conn,
- EDGE_LBL_GUEST);
- }
-
- return(0);
-}
-
-/*
- * pcibr_slot_initial_rrb_alloc
- * Allocate a default number of rrbs for this slot on
- * the two channels. This is dictated by the rrb allocation
- * strategy routine defined per platform.
- */
-
-int
-pcibr_slot_initial_rrb_alloc(devfs_handle_t pcibr_vhdl,
- pciio_slot_t slot)
-{
- pcibr_soft_t pcibr_soft;
- pcibr_info_h pcibr_infoh;
- pcibr_info_t pcibr_info;
- bridge_t *bridge;
- int c0, c1;
- int r;
-
- pcibr_soft = pcibr_soft_get(pcibr_vhdl);
-
- if (!pcibr_soft || !PCIBR_VALID_SLOT(slot))
- return(EINVAL);
-
- bridge = pcibr_soft->bs_base;
-
- /* How may RRBs are on this slot?
- */
- c0 = do_pcibr_rrb_count_valid(bridge, slot);
- c1 = do_pcibr_rrb_count_valid(bridge, slot + PCIBR_RRB_SLOT_VIRTUAL);
-
-#if PCIBR_RRB_DEBUG
- printk("pcibr_attach: slot %d started with %d+%d\n", slot, c0, c1);
-#endif
-
- /* Do we really need any?
- */
- pcibr_infoh = pcibr_soft->bs_slot[slot].bss_infos;
- pcibr_info = pcibr_infoh[0];
- if ((pcibr_info->f_vendor == PCIIO_VENDOR_ID_NONE) &&
- !pcibr_soft->bs_slot[slot].has_host) {
- if (c0 > 0)
- do_pcibr_rrb_free(bridge, slot, c0);
- if (c1 > 0)
- do_pcibr_rrb_free(bridge, slot + PCIBR_RRB_SLOT_VIRTUAL, c1);
- pcibr_soft->bs_rrb_valid[slot] = 0x1000;
- pcibr_soft->bs_rrb_valid[slot + PCIBR_RRB_SLOT_VIRTUAL] = 0x1000;
- return(ENODEV);
- }
-
- pcibr_soft->bs_rrb_avail[slot & 1] -= c0 + c1;
- pcibr_soft->bs_rrb_valid[slot] = c0;
- pcibr_soft->bs_rrb_valid[slot + PCIBR_RRB_SLOT_VIRTUAL] = c1;
-
- pcibr_soft->bs_rrb_avail[0] = do_pcibr_rrb_count_avail(bridge, 0);
- pcibr_soft->bs_rrb_avail[1] = do_pcibr_rrb_count_avail(bridge, 1);
-
- r = 3 - (c0 + c1);
-
- if (r > 0) {
- pcibr_soft->bs_rrb_res[slot] = r;
- pcibr_soft->bs_rrb_avail[slot & 1] -= r;
- }
-
-#if PCIBR_RRB_DEBUG
- printk("\t%d+%d+%d",
- 0xFFF & pcibr_soft->bs_rrb_valid[slot],
- 0xFFF & pcibr_soft->bs_rrb_valid[slot + PCIBR_RRB_SLOT_VIRTUAL],
- pcibr_soft->bs_rrb_res[slot]);
- printk("\n");
-#endif
-
- return(0);
-}
-
-/*
- * pcibr_slot_call_device_attach
- * This calls the associated driver attach routine for the PCI
- * card in this slot.
- */
-int
-pcibr_slot_call_device_attach(devfs_handle_t pcibr_vhdl,
- pciio_slot_t slot,
- int drv_flags)
-{
- pcibr_soft_t pcibr_soft;
- pcibr_info_h pcibr_infoh;
- pcibr_info_t pcibr_info;
- async_attach_t aa = NULL;
- int func;
- devfs_handle_t xconn_vhdl,conn_vhdl;
- int nfunc;
- int error_func;
- int error_slot = 0;
- int error = ENODEV;
-
- pcibr_soft = pcibr_soft_get(pcibr_vhdl);
-
- if (!pcibr_soft || !PCIBR_VALID_SLOT(slot))
- return(EINVAL);
-
-
- if (pcibr_soft->bs_slot[slot].has_host) {
- return(EPERM);
- }
-
- xconn_vhdl = pcibr_soft->bs_conn;
- aa = async_attach_get_info(xconn_vhdl);
-
- nfunc = pcibr_soft->bs_slot[slot].bss_ninfo;
- pcibr_infoh = pcibr_soft->bs_slot[slot].bss_infos;
-
- for (func = 0; func < nfunc; ++func) {
-
- pcibr_info = pcibr_infoh[func];
-
- if (!pcibr_info)
- continue;
-
- if (pcibr_info->f_vendor == PCIIO_VENDOR_ID_NONE)
- continue;
-
- conn_vhdl = pcibr_info->f_vertex;
-
-#ifdef LATER
- /*
- * Activate if and when we support cdl.
- */
- if (aa)
- async_attach_add_info(conn_vhdl, aa);
-#endif /* LATER */
-
- error_func = pciio_device_attach(conn_vhdl, drv_flags);
-
- pcibr_info->f_att_det_error = error_func;
-
- if (error_func)
- error_slot = error_func;
-
- error = error_slot;
-
- } /* next func */
-
- if (error) {
- if ((error != ENODEV) && (error != EUNATCH))
- pcibr_soft->bs_slot[slot].slot_status |= SLOT_STARTUP_INCMPLT;
- } else {
- pcibr_soft->bs_slot[slot].slot_status |= SLOT_STARTUP_CMPLT;
- }
-
- return(error);
-}
-
-/*
- * pcibr_slot_call_device_detach
- * This calls the associated driver detach routine for the PCI
- * card in this slot.
- */
-int
-pcibr_slot_call_device_detach(devfs_handle_t pcibr_vhdl,
- pciio_slot_t slot,
- int drv_flags)
-{
- pcibr_soft_t pcibr_soft;
- pcibr_info_h pcibr_infoh;
- pcibr_info_t pcibr_info;
- int func;
- devfs_handle_t conn_vhdl = GRAPH_VERTEX_NONE;
- int nfunc;
- int error_func;
- int error_slot = 0;
- int error = ENODEV;
-
- pcibr_soft = pcibr_soft_get(pcibr_vhdl);
-
- if (!pcibr_soft || !PCIBR_VALID_SLOT(slot))
- return(EINVAL);
-
- if (pcibr_soft->bs_slot[slot].has_host)
- return(EPERM);
-
- /* Make sure that we do not detach a system critical function vertex */
- if(pcibr_is_slot_sys_critical(pcibr_vhdl, slot))
- return(EPERM);
-
- nfunc = pcibr_soft->bs_slot[slot].bss_ninfo;
- pcibr_infoh = pcibr_soft->bs_slot[slot].bss_infos;
-
- for (func = 0; func < nfunc; ++func) {
-
- pcibr_info = pcibr_infoh[func];
-
- if (!pcibr_info)
- continue;
-
- if (pcibr_info->f_vendor == PCIIO_VENDOR_ID_NONE)
- continue;
-
- conn_vhdl = pcibr_info->f_vertex;
-
- error_func = pciio_device_detach(conn_vhdl, drv_flags);
-
- pcibr_info->f_att_det_error = error_func;
-
- if (error_func)
- error_slot = error_func;
-
- error = error_slot;
-
- } /* next func */
-
- pcibr_soft->bs_slot[slot].slot_status &= ~SLOT_STATUS_MASK;
-
- if (error) {
- if ((error != ENODEV) && (error != EUNATCH))
- pcibr_soft->bs_slot[slot].slot_status |= SLOT_SHUTDOWN_INCMPLT;
- } else {
- if (conn_vhdl != GRAPH_VERTEX_NONE)
- pcibr_device_unregister(conn_vhdl);
- pcibr_soft->bs_slot[slot].slot_status |= SLOT_SHUTDOWN_CMPLT;
- }
-
- return(error);
-}
-
-/*
- * pcibr_slot_detach
- * This is a place holder routine to keep track of all the
- * slot-specific freeing that needs to be done.
- */
-int
-pcibr_slot_detach(devfs_handle_t pcibr_vhdl,
- pciio_slot_t slot,
- int drv_flags)
-{
- int error;
-
- /* Call the device detach function */
- error = (pcibr_slot_call_device_detach(pcibr_vhdl, slot, drv_flags));
- return (error);
-
-}
-
-/*
- * pcibr_is_slot_sys_critical
- * Check slot for any functions that are system critical.
- * Return 1 if any are system critical or 0 otherwise.
- *
- * This function will always return 0 when called by
- * pcibr_attach() because the system critical vertices
- * have not yet been set in the hwgraph.
- */
-int
-pcibr_is_slot_sys_critical(devfs_handle_t pcibr_vhdl,
- pciio_slot_t slot)
-{
- pcibr_soft_t pcibr_soft;
- pcibr_info_h pcibr_infoh;
- pcibr_info_t pcibr_info;
- devfs_handle_t conn_vhdl = GRAPH_VERTEX_NONE;
- int nfunc;
- int func;
-
- pcibr_soft = pcibr_soft_get(pcibr_vhdl);
- if (!pcibr_soft || !PCIBR_VALID_SLOT(slot))
- return(0);
-
- nfunc = pcibr_soft->bs_slot[slot].bss_ninfo;
- pcibr_infoh = pcibr_soft->bs_slot[slot].bss_infos;
-
- for (func = 0; func < nfunc; ++func) {
-
- pcibr_info = pcibr_infoh[func];
- if (!pcibr_info)
- continue;
-
- if (pcibr_info->f_vendor == PCIIO_VENDOR_ID_NONE)
- continue;
-
- conn_vhdl = pcibr_info->f_vertex;
- if (is_sys_critical_vertex(conn_vhdl)) {
-#if defined(SUPPORT_PRINTING_V_FORMAT)
- printk(KERN_WARNING "%v is a system critical device vertex\n", conn_vhdl);
-#else
- printk(KERN_WARNING "%p is a system critical device vertex\n", (void *)conn_vhdl);
-#endif
- return(1);
- }
-
- }
-
- return(0);
-}
-
-/*
- * pcibr_device_unregister
- * This frees up any hardware resources reserved for this PCI device
- * and removes any PCI infrastructural information setup for it.
- * This is usually used at the time of shutting down of the PCI card.
- */
-int
-pcibr_device_unregister(devfs_handle_t pconn_vhdl)
-{
- pciio_info_t pciio_info;
- devfs_handle_t pcibr_vhdl;
- pciio_slot_t slot;
- pcibr_soft_t pcibr_soft;
- bridge_t *bridge;
- int error_call;
- int error = 0;
-
- pciio_info = pciio_info_get(pconn_vhdl);
-
- pcibr_vhdl = pciio_info_master_get(pciio_info);
- slot = pciio_info_slot_get(pciio_info);
-
- pcibr_soft = pcibr_soft_get(pcibr_vhdl);
- bridge = pcibr_soft->bs_base;
-
- /* Clear all the hardware xtalk resources for this device */
- xtalk_widgetdev_shutdown(pcibr_soft->bs_conn, slot);
-
- /* Flush all the rrbs */
- pcibr_rrb_flush(pconn_vhdl);
-
- /* Free the rrbs allocated to this slot */
- error_call = do_pcibr_rrb_free(bridge, slot,
- pcibr_soft->bs_rrb_valid[slot] +
- pcibr_soft->bs_rrb_valid[slot +
- PCIBR_RRB_SLOT_VIRTUAL]);
-
- if (error_call)
- error = ERANGE;
-
- pcibr_soft->bs_rrb_valid[slot] = 0;
- pcibr_soft->bs_rrb_valid[slot + PCIBR_RRB_SLOT_VIRTUAL] = 0;
- pcibr_soft->bs_rrb_res[slot] = 0;
-
- /* Flush the write buffers !! */
- error_call = pcibr_wrb_flush(pconn_vhdl);
-
- if (error_call)
- error = error_call;
-
- /* Clear the information specific to the slot */
- error_call = pcibr_slot_info_free(pcibr_vhdl, slot);
-
- if (error_call)
- error = error_call;
-
- return(error);
-
-}
-
-/*
- * build a convenience link path in the
- * form of ".../<iobrick>/bus/<busnum>"
- *
- * returns 1 on success, 0 otherwise
- *
- * depends on hwgraph separator == '/'
- */
-int
-pcibr_bus_cnvlink(devfs_handle_t f_c, int slot)
-{
- char dst[MAXDEVNAME];
- char *dp = dst;
- char *cp, *xp;
- int widgetnum;
- char pcibus[8];
- devfs_handle_t nvtx, svtx;
- int rv;
-
-#if DEBUG
- printk("pcibr_bus_cnvlink: slot= %d f_c= %p\n",
- slot, f_c);
- {
- int pos;
- char dname[256];
- pos = devfs_generate_path(f_c, dname, 256);
- printk("%s : path= %s\n", __FUNCTION__, &dname[pos]);
- }
-#endif
-
- if (GRAPH_SUCCESS != hwgraph_vertex_name_get(f_c, dst, MAXDEVNAME))
- return 0;
-
- /* dst example == /hw/module/001c02/Pbrick/xtalk/8/pci/direct */
-
- /* find the widget number */
- xp = strstr(dst, "/"EDGE_LBL_XTALK"/");
- if (xp == NULL)
- return 0;
- widgetnum = atoi(xp+7);
- if (widgetnum < XBOW_PORT_8 || widgetnum > XBOW_PORT_F)
- return 0;
-
- /* remove "/pci/direct" from path */
- cp = strstr(dst, "/" EDGE_LBL_PCI "/" "direct");
- if (cp == NULL)
- return 0;
- *cp = (char)NULL;
-
- /* get the vertex for the widget */
- if (GRAPH_SUCCESS != hwgraph_traverse(NULL, dp, &svtx))
- return 0;
-
- *xp = (char)NULL; /* remove "/xtalk/..." from path */
-
- /* dst example now == /hw/module/001c02/Pbrick */
-
- /* get the bus number */
- strcat(dst, "/bus");
- sprintf(pcibus, "%d", p_busnum[widgetnum]);
-
- /* link to bus to widget */
- rv = hwgraph_path_add(NULL, dp, &nvtx);
- if (GRAPH_SUCCESS == rv)
- rv = hwgraph_edge_add(nvtx, svtx, pcibus);
-
- return (rv == GRAPH_SUCCESS);
-}
-
-
-/*
- * pcibr_attach: called every time the crosstalk
- * infrastructure is asked to initialize a widget
- * that matches the part number we handed to the
- * registration routine above.
- */
-/*ARGSUSED */
-int
-pcibr_attach(devfs_handle_t xconn_vhdl)
-{
- /* REFERENCED */
- graph_error_t rc;
- devfs_handle_t pcibr_vhdl;
- devfs_handle_t ctlr_vhdl;
- bridge_t *bridge = NULL;
- bridgereg_t id;
- int rev;
- pcibr_soft_t pcibr_soft;
- pcibr_info_t pcibr_info;
- xwidget_info_t info;
- xtalk_intr_t xtalk_intr;
- device_desc_t dev_desc = (device_desc_t)0;
- int slot;
- int ibit;
- devfs_handle_t noslot_conn;
- char devnm[MAXDEVNAME], *s;
- pcibr_hints_t pcibr_hints;
- bridgereg_t b_int_enable;
- unsigned rrb_fixed = 0;
-
- iopaddr_t pci_io_fb, pci_io_fl;
- iopaddr_t pci_lo_fb, pci_lo_fl;
- iopaddr_t pci_hi_fb, pci_hi_fl;
-
- int spl_level;
-#ifdef LATER
- char *nicinfo = (char *)0;
-#endif
-
-#if PCI_FBBE
- int fast_back_to_back_enable;
-#endif
- l1sc_t *scp;
- nasid_t nasid;
-
- async_attach_t aa = NULL;
-
- aa = async_attach_get_info(xconn_vhdl);
-
-#if DEBUG && ATTACH_DEBUG
- printk("pcibr_attach: xconn_vhdl= %p\n", xconn_vhdl);
- {
- int pos;
- char dname[256];
- pos = devfs_generate_path(xconn_vhdl, dname, 256);
- printk("%s : path= %s \n", __FUNCTION__, &dname[pos]);
- }
-#endif
-
- /* Setup the PRB for the bridge in CONVEYOR BELT
- * mode. PRBs are setup in default FIRE-AND-FORGET
- * mode during the initialization.
- */
- hub_device_flags_set(xconn_vhdl, HUB_PIO_CONVEYOR);
-
- bridge = (bridge_t *)
- xtalk_piotrans_addr(xconn_vhdl, NULL,
- 0, sizeof(bridge_t), 0);
-
-#ifndef MEDUSA_HACK
- if ((bridge->b_wid_stat & BRIDGE_STAT_PCI_GIO_N) == 0)
- return -1; /* someone else handles GIO bridges. */
-#endif
-
- if (XWIDGET_PART_REV_NUM(bridge->b_wid_id) == XBRIDGE_PART_REV_A)
- NeedXbridgeSwap = 1;
-
- /*
- * Create the vertex for the PCI bus, which we
- * will also use to hold the pcibr_soft and
- * which will be the "master" vertex for all the
- * pciio connection points we will hang off it.
- * This needs to happen before we call nic_bridge_vertex_info
- * as we are some of the *_vmc functions need access to the edges.
- *
- * Opening this vertex will provide access to
- * the Bridge registers themselves.
- */
- rc = hwgraph_path_add(xconn_vhdl, EDGE_LBL_PCI, &pcibr_vhdl);
- ASSERT(rc == GRAPH_SUCCESS);
-
- ctlr_vhdl = NULL;
- ctlr_vhdl = hwgraph_register(pcibr_vhdl, EDGE_LBL_CONTROLLER,
- 0, DEVFS_FL_AUTO_DEVNUM,
- 0, 0,
- S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
- &pcibr_fops, NULL);
-
- ASSERT(ctlr_vhdl != NULL);
-
- /*
- * decode the nic, and hang its stuff off our
- * connection point where other drivers can get
- * at it.
- */
-#ifdef LATER
- nicinfo = BRIDGE_VERTEX_MFG_INFO(xconn_vhdl, (nic_data_t) & bridge->b_nic);
-#endif
-
- /*
- * Get the hint structure; if some NIC callback
- * marked this vertex as "hands-off" then we
- * just return here, before doing anything else.
- */
- pcibr_hints = pcibr_hints_get(xconn_vhdl, 0);
-
- if (pcibr_hints && pcibr_hints->ph_hands_off)
- return -1; /* generic operations disabled */
-
- id = bridge->b_wid_id;
- rev = XWIDGET_PART_REV_NUM(id);
-
- hwgraph_info_add_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, (arbitrary_info_t) rev);
-
- /*
- * allocate soft state structure, fill in some
- * fields, and hook it up to our vertex.
- */
- NEW(pcibr_soft);
- BZERO(pcibr_soft, sizeof *pcibr_soft);
- pcibr_soft_set(pcibr_vhdl, pcibr_soft);
-
- pcibr_soft->bs_conn = xconn_vhdl;
- pcibr_soft->bs_vhdl = pcibr_vhdl;
- pcibr_soft->bs_base = bridge;
- pcibr_soft->bs_rev_num = rev;
- pcibr_soft->bs_intr_bits = pcibr_intr_bits;
- if (is_xbridge(bridge)) {
- pcibr_soft->bs_int_ate_size = XBRIDGE_INTERNAL_ATES;
- pcibr_soft->bs_xbridge = 1;
- } else {
- pcibr_soft->bs_int_ate_size = BRIDGE_INTERNAL_ATES;
- pcibr_soft->bs_xbridge = 0;
- }
-
- nasid = NASID_GET(bridge);
- scp = &NODEPDA( NASID_TO_COMPACT_NODEID(nasid) )->module->elsc;
- pcibr_soft->bs_l1sc = scp;
- pcibr_soft->bs_moduleid = iobrick_module_get(scp);
- pcibr_soft->bsi_err_intr = 0;
-
- /* Bridges up through REV C
- * are unable to set the direct
- * byteswappers to BYTE_STREAM.
- */
- if (pcibr_soft->bs_rev_num <= BRIDGE_PART_REV_C) {
- pcibr_soft->bs_pio_end_io = PCIIO_WORD_VALUES;
- pcibr_soft->bs_pio_end_mem = PCIIO_WORD_VALUES;
- }
-#if PCIBR_SOFT_LIST
- {
- pcibr_list_p self;
-
- NEW(self);
- self->bl_soft = pcibr_soft;
- self->bl_vhdl = pcibr_vhdl;
- self->bl_next = pcibr_list;
- pcibr_list = self;
- }
-#endif
-
- /*
- * get the name of this bridge vertex and keep the info. Use this
- * only where it is really needed now: like error interrupts.
- */
- s = dev_to_name(pcibr_vhdl, devnm, MAXDEVNAME);
- pcibr_soft->bs_name = kmalloc(strlen(s) + 1, GFP_KERNEL);
- strcpy(pcibr_soft->bs_name, s);
-
-#if SHOW_REVS || DEBUG
-#if !DEBUG
- if (kdebug)
-#endif
- printk("%sBridge ASIC: rev %s (code=0x%x) at %s\n",
- is_xbridge(bridge) ? "X" : "",
- (rev == BRIDGE_PART_REV_A) ? "A" :
- (rev == BRIDGE_PART_REV_B) ? "B" :
- (rev == BRIDGE_PART_REV_C) ? "C" :
- (rev == BRIDGE_PART_REV_D) ? "D" :
- (rev == XBRIDGE_PART_REV_A) ? "A" :
- (rev == XBRIDGE_PART_REV_B) ? "B" :
- "unknown",
- rev, pcibr_soft->bs_name);
-#endif
-
- info = xwidget_info_get(xconn_vhdl);
- pcibr_soft->bs_xid = xwidget_info_id_get(info);
- pcibr_soft->bs_master = xwidget_info_master_get(info);
- pcibr_soft->bs_mxid = xwidget_info_masterid_get(info);
-
- /*
- * Init bridge lock.
- */
- spin_lock_init(&pcibr_soft->bs_lock);
-
- /*
- * If we have one, process the hints structure.
- */
- if (pcibr_hints) {
- rrb_fixed = pcibr_hints->ph_rrb_fixed;
-
- pcibr_soft->bs_rrb_fixed = rrb_fixed;
-
- if (pcibr_hints->ph_intr_bits)
- pcibr_soft->bs_intr_bits = pcibr_hints->ph_intr_bits;
-
- for (slot = 0; slot < 8; ++slot) {
- int hslot = pcibr_hints->ph_host_slot[slot] - 1;
-
- if (hslot < 0) {
- pcibr_soft->bs_slot[slot].host_slot = slot;
- } else {
- pcibr_soft->bs_slot[slot].has_host = 1;
- pcibr_soft->bs_slot[slot].host_slot = hslot;
- }
- }
- }
- /*
- * set up initial values for state fields
- */
- for (slot = 0; slot < 8; ++slot) {
- pcibr_soft->bs_slot[slot].bss_devio.bssd_space = PCIIO_SPACE_NONE;
- pcibr_soft->bs_slot[slot].bss_d64_base = PCIBR_D64_BASE_UNSET;
- pcibr_soft->bs_slot[slot].bss_d32_base = PCIBR_D32_BASE_UNSET;
- pcibr_soft->bs_slot[slot].bss_ext_ates_active = ATOMIC_INIT(0);
- }
-
- for (ibit = 0; ibit < 8; ++ibit) {
- pcibr_soft->bs_intr[ibit].bsi_xtalk_intr = 0;
- pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_soft = pcibr_soft;
- pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_list = NULL;
- pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_stat =
- &(bridge->b_int_status);
- pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_hdlrcnt = 0;
- pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_shared = 0;
- pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_connected = 0;
- }
-
- /*
- * Initialize various Bridge registers.
- */
-
- /*
- * On pre-Rev.D bridges, set the PCI_RETRY_CNT
- * to zero to avoid dropping stores. (#475347)
- */
- if (rev < BRIDGE_PART_REV_D)
- bridge->b_bus_timeout &= ~BRIDGE_BUS_PCI_RETRY_MASK;
-
- /*
- * Clear all pending interrupts.
- */
- bridge->b_int_rst_stat = (BRIDGE_IRR_ALL_CLR);
-
- /*
- * Until otherwise set up,
- * assume all interrupts are
- * from slot 7.
- */
- bridge->b_int_device = (uint32_t) 0xffffffff;
-
- {
- bridgereg_t dirmap;
- paddr_t paddr;
- iopaddr_t xbase;
- xwidgetnum_t xport;
- iopaddr_t offset;
- int num_entries = 0;
- int entry;
- cnodeid_t cnodeid;
- nasid_t nasid;
-
- /* Set the Bridge's 32-bit PCI to XTalk
- * Direct Map register to the most useful
- * value we can determine. Note that we
- * must use a single xid for all of:
- * direct-mapped 32-bit DMA accesses
- * direct-mapped 64-bit DMA accesses
- * DMA accesses through the PMU
- * interrupts
- * This is the only way to guarantee that
- * completion interrupts will reach a CPU
- * after all DMA data has reached memory.
- * (Of course, there may be a few special
- * drivers/controlers that explicitly manage
- * this ordering problem.)
- */
-
- cnodeid = 0; /* default node id */
- /*
- * Determine the base address node id to be used for all 32-bit
- * Direct Mapping I/O. The default is node 0, but this can be changed
- * via a DEVICE_ADMIN directive and the PCIBUS_DMATRANS_NODE
- * attribute in the irix.sm config file. A device driver can obtain
- * this node value via a call to pcibr_get_dmatrans_node().
- */
- nasid = COMPACT_TO_NASID_NODEID(cnodeid);
- paddr = NODE_OFFSET(nasid) + 0;
-
- /* currently, we just assume that if we ask
- * for a DMA mapping to "zero" the XIO
- * host will transmute this into a request
- * for the lowest hunk of memory.
- */
- xbase = xtalk_dmatrans_addr(xconn_vhdl, 0,
- paddr, _PAGESZ, 0);
-
- if (xbase != XIO_NOWHERE) {
- if (XIO_PACKED(xbase)) {
- xport = XIO_PORT(xbase);
- xbase = XIO_ADDR(xbase);
- } else
- xport = pcibr_soft->bs_mxid;
-
- offset = xbase & ((1ull << BRIDGE_DIRMAP_OFF_ADDRSHFT) - 1ull);
- xbase >>= BRIDGE_DIRMAP_OFF_ADDRSHFT;
-
- dirmap = xport << BRIDGE_DIRMAP_W_ID_SHFT;
-
- if (xbase)
- dirmap |= BRIDGE_DIRMAP_OFF & xbase;
- else if (offset >= (512 << 20))
- dirmap |= BRIDGE_DIRMAP_ADD512;
-
- bridge->b_dir_map = dirmap;
- }
- /*
- * Set bridge's idea of page size according to the system's
- * idea of "IO page size". TBD: The idea of IO page size
- * should really go away.
- */
- /*
- * ensure that we write and read without any interruption.
- * The read following the write is required for the Bridge war
- */
- spl_level = splhi();
-#if IOPGSIZE == 4096
- bridge->b_wid_control &= ~BRIDGE_CTRL_PAGE_SIZE;
-#elif IOPGSIZE == 16384
- bridge->b_wid_control |= BRIDGE_CTRL_PAGE_SIZE;
-#else
- <<<Unable to deal with IOPGSIZE >>>;
-#endif
- bridge->b_wid_control; /* inval addr bug war */
- splx(spl_level);
-
- /* Initialize internal mapping entries */
- for (entry = 0; entry < pcibr_soft->bs_int_ate_size; entry++)
- bridge->b_int_ate_ram[entry].wr = 0;
-
- /*
- * Determine if there's external mapping SSRAM on this
- * bridge. Set up Bridge control register appropriately,
- * inititlize SSRAM, and set software up to manage RAM
- * entries as an allocatable resource.
- *
- * Currently, we just use the rm* routines to manage ATE
- * allocation. We should probably replace this with a
- * Best Fit allocator.
- *
- * For now, if we have external SSRAM, avoid using
- * the internal ssram: we can't turn PREFETCH on
- * when we use the internal SSRAM; and besides,
- * this also guarantees that no allocation will
- * straddle the internal/external line, so we
- * can increment ATE write addresses rather than
- * recomparing against BRIDGE_INTERNAL_ATES every
- * time.
- */
- if (is_xbridge(bridge))
- num_entries = 0;
- else
- num_entries = pcibr_init_ext_ate_ram(bridge);
-
- /* we always have 128 ATEs (512 for Xbridge) inside the chip
- * even if disabled for debugging.
- */
- pcibr_soft->bs_int_ate_map = rmallocmap(pcibr_soft->bs_int_ate_size);
- pcibr_ate_free(pcibr_soft, 0, pcibr_soft->bs_int_ate_size);
-#if PCIBR_ATE_DEBUG
- printk("pcibr_attach: %d INTERNAL ATEs\n", pcibr_soft->bs_int_ate_size);
-#endif
-
- if (num_entries > pcibr_soft->bs_int_ate_size) {
-#if PCIBR_ATE_NOTBOTH /* for debug -- forces us to use external ates */
- printk("pcibr_attach: disabling internal ATEs.\n");
- pcibr_ate_alloc(pcibr_soft, pcibr_soft->bs_int_ate_size);
-#endif
- pcibr_soft->bs_ext_ate_map = rmallocmap(num_entries);
- pcibr_ate_free(pcibr_soft, pcibr_soft->bs_int_ate_size,
- num_entries - pcibr_soft->bs_int_ate_size);
-#if PCIBR_ATE_DEBUG
- printk("pcibr_attach: %d EXTERNAL ATEs\n",
- num_entries - pcibr_soft->bs_int_ate_size);
-#endif
- }
- }
-
- {
- bridgereg_t dirmap;
- iopaddr_t xbase;
-
- /*
- * now figure the *real* xtalk base address
- * that dirmap sends us to.
- */
- dirmap = bridge->b_dir_map;
- if (dirmap & BRIDGE_DIRMAP_OFF)
- xbase = (iopaddr_t)(dirmap & BRIDGE_DIRMAP_OFF)
- << BRIDGE_DIRMAP_OFF_ADDRSHFT;
- else if (dirmap & BRIDGE_DIRMAP_ADD512)
- xbase = 512 << 20;
- else
- xbase = 0;
-
- pcibr_soft->bs_dir_xbase = xbase;
-
- /* it is entirely possible that we may, at this
- * point, have our dirmap pointing somewhere
- * other than our "master" port.
- */
- pcibr_soft->bs_dir_xport =
- (dirmap & BRIDGE_DIRMAP_W_ID) >> BRIDGE_DIRMAP_W_ID_SHFT;
- }
-
- /* pcibr sources an error interrupt;
- * figure out where to send it.
- *
- * If any interrupts are enabled in bridge,
- * then the prom set us up and our interrupt
- * has already been reconnected in mlreset
- * above.
- *
- * Need to set the D_INTR_ISERR flag
- * in the dev_desc used for allocating the
- * error interrupt, so our interrupt will
- * be properly routed and prioritized.
- *
- * If our crosstalk provider wants to
- * fix widget error interrupts to specific
- * destinations, D_INTR_ISERR is how it
- * knows to do this.
- */
-
- xtalk_intr = xtalk_intr_alloc(xconn_vhdl, dev_desc, pcibr_vhdl);
- ASSERT(xtalk_intr != NULL);
-
- pcibr_soft->bsi_err_intr = xtalk_intr;
-
- /*
- * On IP35 with XBridge, we do some extra checks in pcibr_setwidint
- * in order to work around some addressing limitations. In order
- * for that fire wall to work properly, we need to make sure we
- * start from a known clean state.
- */
- pcibr_clearwidint(bridge);
-
- xtalk_intr_connect(xtalk_intr, (xtalk_intr_setfunc_t)pcibr_setwidint, (void *)bridge);
-
- /*
- * now we can start handling error interrupts;
- * enable all of them.
- * NOTE: some PCI ints may already be enabled.
- */
- b_int_enable = bridge->b_int_enable | BRIDGE_ISR_ERRORS;
-
-
- bridge->b_int_enable = b_int_enable;
- bridge->b_int_mode = 0; /* do not send "clear interrupt" packets */
-
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
-
- /*
- * Depending on the rev of bridge, disable certain features.
- * Easiest way seems to be to force the PCIBR_NOwhatever
- * flag to be on for all DMA calls, which overrides any
- * PCIBR_whatever flag or even the setting of whatever
- * from the PCIIO_DMA_class flags (or even from the other
- * PCIBR flags, since NO overrides YES).
- */
- pcibr_soft->bs_dma_flags = 0;
-
- /* PREFETCH:
- * Always completely disabled for REV.A;
- * at "pcibr_prefetch_enable_rev", anyone
- * asking for PCIIO_PREFETCH gets it.
- * Between these two points, you have to ask
- * for PCIBR_PREFETCH, which promises that
- * your driver knows about known Bridge WARs.
- */
- if (pcibr_soft->bs_rev_num < BRIDGE_PART_REV_B)
- pcibr_soft->bs_dma_flags |= PCIBR_NOPREFETCH;
- else if (pcibr_soft->bs_rev_num <
- (BRIDGE_WIDGET_PART_NUM << 4 | pcibr_prefetch_enable_rev))
- pcibr_soft->bs_dma_flags |= PCIIO_NOPREFETCH;
-
- /* WRITE_GATHER:
- * Disabled up to but not including the
- * rev number in pcibr_wg_enable_rev. There
- * is no "WAR range" as with prefetch.
- */
- if (pcibr_soft->bs_rev_num <
- (BRIDGE_WIDGET_PART_NUM << 4 | pcibr_wg_enable_rev))
- pcibr_soft->bs_dma_flags |= PCIBR_NOWRITE_GATHER;
-
- pciio_provider_register(pcibr_vhdl, &pcibr_provider);
- pciio_provider_startup(pcibr_vhdl);
-
- pci_io_fb = 0x00000004; /* I/O FreeBlock Base */
- pci_io_fl = 0xFFFFFFFF; /* I/O FreeBlock Last */
-
- pci_lo_fb = 0x00000010; /* Low Memory FreeBlock Base */
- pci_lo_fl = 0x001FFFFF; /* Low Memory FreeBlock Last */
-
- pci_hi_fb = 0x00200000; /* High Memory FreeBlock Base */
- pci_hi_fl = 0x3FFFFFFF; /* High Memory FreeBlock Last */
-
-
- PCI_ADDR_SPACE_LIMITS_STORE();
-
- /* build "no-slot" connection point
- */
- pcibr_info = pcibr_device_info_new
- (pcibr_soft, PCIIO_SLOT_NONE, PCIIO_FUNC_NONE,
- PCIIO_VENDOR_ID_NONE, PCIIO_DEVICE_ID_NONE);
- noslot_conn = pciio_device_info_register
- (pcibr_vhdl, &pcibr_info->f_c);
-
- /* Remember the no slot connection point info for tearing it
- * down during detach.
- */
- pcibr_soft->bs_noslot_conn = noslot_conn;
- pcibr_soft->bs_noslot_info = pcibr_info;
-#if PCI_FBBE
- fast_back_to_back_enable = 1;
-#endif
-
-#if PCI_FBBE
- if (fast_back_to_back_enable) {
- /*
- * All devices on the bus are capable of fast back to back, so
- * we need to set the fast back to back bit in all devices on
- * the bus that are capable of doing such accesses.
- */
- }
-#endif
-
-#ifdef LATER
- /* If the bridge has been reset then there is no need to reset
- * the individual PCI slots.
- */
- for (slot = 0; slot < 8; ++slot)
- /* Reset all the slots */
- (void)pcibr_slot_reset(pcibr_vhdl, slot);
-#endif
-
- for (slot = 0; slot < 8; ++slot)
- /* Find out what is out there */
- (void)pcibr_slot_info_init(pcibr_vhdl,slot);
-
- for (slot = 0; slot < 8; ++slot)
- /* Set up the address space for this slot in the pci land */
- (void)pcibr_slot_addr_space_init(pcibr_vhdl,slot);
-
- for (slot = 0; slot < 8; ++slot)
- /* Setup the device register */
- (void)pcibr_slot_device_init(pcibr_vhdl, slot);
-
-#ifndef __ia64
- for (slot = 0; slot < 8; ++slot)
- /* Set up convenience links */
- if (is_xbridge(bridge))
- if (pcibr_soft->bs_slot[slot].bss_ninfo > 0) /* if occupied */
- pcibr_bus_cnvlink(pcibr_info->f_vertex, slot);
-#endif
-
- for (slot = 0; slot < 8; ++slot)
- /* Setup host/guest relations */
- (void)pcibr_slot_guest_info_init(pcibr_vhdl,slot);
-
- for (slot = 0; slot < 8; ++slot)
- /* Initial RRB management */
- (void)pcibr_slot_initial_rrb_alloc(pcibr_vhdl,slot);
-
- /* driver attach routines should be called out from generic linux code */
- for (slot = 0; slot < 8; ++slot)
- /* Call the device attach */
- (void)pcibr_slot_call_device_attach(pcibr_vhdl, slot, 0);
-
- /*
- * Each Pbrick PCI bus only has slots 1 and 2. Similarly for
- * widget 0xe on Ibricks. Allocate RRB's accordingly.
- */
- if (pcibr_soft->bs_moduleid > 0) {
- switch (MODULE_GET_BTCHAR(pcibr_soft->bs_moduleid)) {
- case 'p': /* Pbrick */
- do_pcibr_rrb_autoalloc(pcibr_soft, 1, 8);
- do_pcibr_rrb_autoalloc(pcibr_soft, 2, 8);
- break;
- case 'i': /* Ibrick */
- /* port 0xe on the Ibrick only has slots 1 and 2 */
- if (pcibr_soft->bs_xid == 0xe) {
- do_pcibr_rrb_autoalloc(pcibr_soft, 1, 8);
- do_pcibr_rrb_autoalloc(pcibr_soft, 2, 8);
- }
- else {
- /* allocate one RRB for the serial port */
- do_pcibr_rrb_autoalloc(pcibr_soft, 0, 1);
- }
- break;
- } /* switch */
- }
-
-#ifdef LATER
- if (strstr(nicinfo, XTALK_PCI_PART_NUM)) {
- do_pcibr_rrb_autoalloc(pcibr_soft, 1, 8);
-#if PCIBR_RRB_DEBUG
- printf("\n\nFound XTALK_PCI (030-1275) at %v\n", xconn_vhdl);
-
- printf("pcibr_attach: %v Shoebox RRB MANAGEMENT: %d+%d free\n",
- pcibr_vhdl,
- pcibr_soft->bs_rrb_avail[0],
- pcibr_soft->bs_rrb_avail[1]);
-
- for (slot = 0; slot < 8; ++slot)
- printf("\t%d+%d+%d",
- 0xFFF & pcibr_soft->bs_rrb_valid[slot],
- 0xFFF & pcibr_soft->bs_rrb_valid[slot + PCIBR_RRB_SLOT_VIRTUAL],
- pcibr_soft->bs_rrb_res[slot]);
-
- printf("\n");
-#endif
- }
-#else
- FIXME("pcibr_attach: Call do_pcibr_rrb_autoalloc nicinfo\n");
-#endif
-
- if (aa)
- async_attach_add_info(noslot_conn, aa);
-
- pciio_device_attach(noslot_conn, 0);
-
-
- /*
- * Tear down pointer to async attach info -- async threads for
- * bridge's descendants may be running but the bridge's work is done.
- */
- if (aa)
- async_attach_del_info(xconn_vhdl);
-
- return 0;
-}
-/*
- * pcibr_detach:
- * Detach the bridge device from the hwgraph after cleaning out all the
- * underlying vertices.
- */
-int
-pcibr_detach(devfs_handle_t xconn)
-{
- pciio_slot_t slot;
- devfs_handle_t pcibr_vhdl;
- pcibr_soft_t pcibr_soft;
- bridge_t *bridge;
-
- /* Get the bridge vertex from its xtalk connection point */
- if (hwgraph_traverse(xconn, EDGE_LBL_PCI, &pcibr_vhdl) != GRAPH_SUCCESS)
- return(1);
-
- pcibr_soft = pcibr_soft_get(pcibr_vhdl);
- bridge = pcibr_soft->bs_base;
-
- /* Disable the interrupts from the bridge */
- bridge->b_int_enable = 0;
-
- /* Detach all the PCI devices talking to this bridge */
- for(slot = 0; slot < 8; slot++) {
-#ifdef DEBUG
- printk("pcibr_device_detach called for %p/%d\n",
- pcibr_vhdl,slot);
-#endif
- pcibr_slot_detach(pcibr_vhdl, slot, 0);
- }
-
- /* Unregister the no-slot connection point */
- pciio_device_info_unregister(pcibr_vhdl,
- &(pcibr_soft->bs_noslot_info->f_c));
-
- spin_lock_destroy(&pcibr_soft->bs_lock);
- kfree(pcibr_soft->bs_name);
-
- /* Error handler gets unregistered when the widget info is
- * cleaned
- */
- /* Free the soft ATE maps */
- if (pcibr_soft->bs_int_ate_map)
- rmfreemap(pcibr_soft->bs_int_ate_map);
- if (pcibr_soft->bs_ext_ate_map)
- rmfreemap(pcibr_soft->bs_ext_ate_map);
-
- /* Disconnect the error interrupt and free the xtalk resources
- * associated with it.
- */
- xtalk_intr_disconnect(pcibr_soft->bsi_err_intr);
- xtalk_intr_free(pcibr_soft->bsi_err_intr);
-
- /* Clear the software state maintained by the bridge driver for this
- * bridge.
- */
- DEL(pcibr_soft);
- /* Remove the Bridge revision labelled info */
- (void)hwgraph_info_remove_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, NULL);
- /* Remove the character device associated with this bridge */
- (void)hwgraph_edge_remove(pcibr_vhdl, EDGE_LBL_CONTROLLER, NULL);
- /* Remove the PCI bridge vertex */
- (void)hwgraph_edge_remove(xconn, EDGE_LBL_PCI, NULL);
-
- return(0);
-}
-
-int
-pcibr_asic_rev(devfs_handle_t pconn_vhdl)
-{
- devfs_handle_t pcibr_vhdl;
- arbitrary_info_t ainfo;
-
- if (GRAPH_SUCCESS !=
- hwgraph_traverse(pconn_vhdl, EDGE_LBL_MASTER, &pcibr_vhdl))
- return -1;
-
- if (GRAPH_SUCCESS !=
- hwgraph_info_get_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, &ainfo))
- return -1;
-
- return (int) ainfo;
-}
-
-int
-pcibr_write_gather_flush(devfs_handle_t pconn_vhdl)
-{
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- pciio_slot_t slot;
- slot = pciio_info_slot_get(pciio_info);
- pcibr_device_write_gather_flush(pcibr_soft, slot);
- return 0;
-}
-
-/* =====================================================================
- * PIO MANAGEMENT
- */
-
-LOCAL iopaddr_t
-pcibr_addr_pci_to_xio(devfs_handle_t pconn_vhdl,
- pciio_slot_t slot,
- pciio_space_t space,
- iopaddr_t pci_addr,
- size_t req_size,
- unsigned flags)
-{
- pcibr_info_t pcibr_info = pcibr_info_get(pconn_vhdl);
- pciio_info_t pciio_info = &pcibr_info->f_c;
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- bridge_t *bridge = pcibr_soft->bs_base;
-
- unsigned bar; /* which BASE reg on device is decoding */
- iopaddr_t xio_addr = XIO_NOWHERE;
-
- pciio_space_t wspace; /* which space device is decoding */
- iopaddr_t wbase; /* base of device decode on PCI */
- size_t wsize; /* size of device decode on PCI */
-
- int try; /* DevIO(x) window scanning order control */
- int win; /* which DevIO(x) window is being used */
- pciio_space_t mspace; /* target space for devio(x) register */
- iopaddr_t mbase; /* base of devio(x) mapped area on PCI */
- size_t msize; /* size of devio(x) mapped area on PCI */
- size_t mmask; /* addr bits stored in Device(x) */
-
- unsigned long s;
-
- s = pcibr_lock(pcibr_soft);
-
- if (pcibr_soft->bs_slot[slot].has_host) {
- slot = pcibr_soft->bs_slot[slot].host_slot;
- pcibr_info = pcibr_soft->bs_slot[slot].bss_infos[0];
- }
- if (space == PCIIO_SPACE_NONE)
- goto done;
-
- if (space == PCIIO_SPACE_CFG) {
- /*
- * Usually, the first mapping
- * established to a PCI device
- * is to its config space.
- *
- * In any case, we definitely
- * do NOT need to worry about
- * PCI BASE registers, and
- * MUST NOT attempt to point
- * the DevIO(x) window at
- * this access ...
- */
- if (((flags & PCIIO_BYTE_STREAM) == 0) &&
- ((pci_addr + req_size) <= BRIDGE_TYPE0_CFG_FUNC_OFF))
- xio_addr = pci_addr + BRIDGE_TYPE0_CFG_DEV(slot);
-
- goto done;
- }
- if (space == PCIIO_SPACE_ROM) {
- /* PIO to the Expansion Rom.
- * Driver is responsible for
- * enabling and disabling
- * decodes properly.
- */
- wbase = pcibr_info->f_rbase;
- wsize = pcibr_info->f_rsize;
-
- /*
- * While the driver should know better
- * than to attempt to map more space
- * than the device is decoding, he might
- * do it; better to bail out here.
- */
- if ((pci_addr + req_size) > wsize)
- goto done;
-
- pci_addr += wbase;
- space = PCIIO_SPACE_MEM;
- }
- /*
- * reduce window mappings to raw
- * space mappings (maybe allocating
- * windows), and try for DevIO(x)
- * usage (setting it if it is available).
- */
- bar = space - PCIIO_SPACE_WIN0;
- if (bar < 6) {
- wspace = pcibr_info->f_window[bar].w_space;
- if (wspace == PCIIO_SPACE_NONE)
- goto done;
-
- /* get PCI base and size */
- wbase = pcibr_info->f_window[bar].w_base;
- wsize = pcibr_info->f_window[bar].w_size;
-
- /*
- * While the driver should know better
- * than to attempt to map more space
- * than the device is decoding, he might
- * do it; better to bail out here.
- */
- if ((pci_addr + req_size) > wsize)
- goto done;
-
- /* shift from window relative to
- * decoded space relative.
- */
- pci_addr += wbase;
- space = wspace;
- } else
- bar = -1;
-
- /* Scan all the DevIO(x) windows twice looking for one
- * that can satisfy our request. The first time through,
- * only look at assigned windows; the second time, also
- * look at PCIIO_SPACE_NONE windows. Arrange the order
- * so we always look at our own window first.
- *
- * We will not attempt to satisfy a single request
- * by concatinating multiple windows.
- */
- for (try = 0; try < 16; ++try) {
- bridgereg_t devreg;
- unsigned offset;
-
- win = (try + slot) % 8;
-
- /* If this DevIO(x) mapping area can provide
- * a mapping to this address, use it.
- */
- msize = (win < 2) ? 0x200000 : 0x100000;
- mmask = -msize;
- if (space != PCIIO_SPACE_IO)
- mmask &= 0x3FFFFFFF;
-
- offset = pci_addr & (msize - 1);
-
- /* If this window can't possibly handle that request,
- * go on to the next window.
- */
- if (((pci_addr & (msize - 1)) + req_size) > msize)
- continue;
-
- devreg = pcibr_soft->bs_slot[win].bss_device;
-
- /* Is this window "nailed down"?
- * If not, maybe we can use it.
- * (only check this the second time through)
- */
- mspace = pcibr_soft->bs_slot[win].bss_devio.bssd_space;
- if ((try > 7) && (mspace == PCIIO_SPACE_NONE)) {
-
- /* If this is the primary DevIO(x) window
- * for some other device, skip it.
- */
- if ((win != slot) &&
- (PCIIO_VENDOR_ID_NONE !=
- pcibr_soft->bs_slot[win].bss_vendor_id))
- continue;
-
- /* It's a free window, and we fit in it.
- * Set up Device(win) to our taste.
- */
- mbase = pci_addr & mmask;
-
- /* check that we would really get from
- * here to there.
- */
- if ((mbase | offset) != pci_addr)
- continue;
-
- devreg &= ~BRIDGE_DEV_OFF_MASK;
- if (space != PCIIO_SPACE_IO)
- devreg |= BRIDGE_DEV_DEV_IO_MEM;
- else
- devreg &= ~BRIDGE_DEV_DEV_IO_MEM;
- devreg |= (mbase >> 20) & BRIDGE_DEV_OFF_MASK;
-
- /* default is WORD_VALUES.
- * if you specify both,
- * operation is undefined.
- */
- if (flags & PCIIO_BYTE_STREAM)
- devreg |= BRIDGE_DEV_DEV_SWAP;
- else
- devreg &= ~BRIDGE_DEV_DEV_SWAP;
-
- if (pcibr_soft->bs_slot[win].bss_device != devreg) {
- bridge->b_device[win].reg = devreg;
- pcibr_soft->bs_slot[win].bss_device = devreg;
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
-
-#if DEBUG && PCI_DEBUG
- printk("pcibr Device(%d): 0x%lx\n", win, bridge->b_device[win].reg);
-#endif
- }
- pcibr_soft->bs_slot[win].bss_devio.bssd_space = space;
- pcibr_soft->bs_slot[win].bss_devio.bssd_base = mbase;
- xio_addr = BRIDGE_DEVIO(win) + (pci_addr - mbase);
-
-#if DEBUG && PCI_DEBUG
- printk("%s LINE %d map to space %d space desc 0x%x[%lx..%lx] for slot %d allocates DevIO(%d) devreg 0x%x\n",
- __FUNCTION__, __LINE__, space, space_desc,
- pci_addr, pci_addr + req_size - 1,
- slot, win, devreg);
-#endif
-
- goto done;
- } /* endif DevIO(x) not pointed */
- mbase = pcibr_soft->bs_slot[win].bss_devio.bssd_base;
-
- /* Now check for request incompat with DevIO(x)
- */
- if ((mspace != space) ||
- (pci_addr < mbase) ||
- ((pci_addr + req_size) > (mbase + msize)) ||
- ((flags & PCIIO_BYTE_STREAM) && !(devreg & BRIDGE_DEV_DEV_SWAP)) ||
- (!(flags & PCIIO_BYTE_STREAM) && (devreg & BRIDGE_DEV_DEV_SWAP)))
- continue;
-
- /* DevIO(x) window is pointed at PCI space
- * that includes our target. Calculate the
- * final XIO address, release the lock and
- * return.
- */
- xio_addr = BRIDGE_DEVIO(win) + (pci_addr - mbase);
-
-#if DEBUG && PCI_DEBUG
- printk("%s LINE %d map to space %d [0x%p..0x%p] for slot %d uses DevIO(%d)\n",
- __FUNCTION__, __LINE__, space, pci_addr, pci_addr + req_size - 1, slot, win);
-#endif
- goto done;
- }
-
- switch (space) {
- /*
- * Accesses to device decode
- * areas that do a not fit
- * within the DevIO(x) space are
- * modified to be accesses via
- * the direct mapping areas.
- *
- * If necessary, drivers can
- * explicitly ask for mappings
- * into these address spaces,
- * but this should never be needed.
- */
- case PCIIO_SPACE_MEM: /* "mem space" */
- case PCIIO_SPACE_MEM32: /* "mem, use 32-bit-wide bus" */
- if ((pci_addr + BRIDGE_PCI_MEM32_BASE + req_size - 1) <=
- BRIDGE_PCI_MEM32_LIMIT)
- xio_addr = pci_addr + BRIDGE_PCI_MEM32_BASE;
- break;
-
- case PCIIO_SPACE_MEM64: /* "mem, use 64-bit-wide bus" */
- if ((pci_addr + BRIDGE_PCI_MEM64_BASE + req_size - 1) <=
- BRIDGE_PCI_MEM64_LIMIT)
- xio_addr = pci_addr + BRIDGE_PCI_MEM64_BASE;
- break;
-
- case PCIIO_SPACE_IO: /* "i/o space" */
- /* Bridge Hardware Bug WAR #482741:
- * The 4G area that maps directly from
- * XIO space to PCI I/O space is busted
- * until Bridge Rev D.
- */
- if ((pcibr_soft->bs_rev_num > BRIDGE_PART_REV_C) &&
- ((pci_addr + BRIDGE_PCI_IO_BASE + req_size - 1) <=
- BRIDGE_PCI_IO_LIMIT))
- xio_addr = pci_addr + BRIDGE_PCI_IO_BASE;
- break;
- }
-
- /* Check that "Direct PIO" byteswapping matches,
- * try to change it if it does not.
- */
- if (xio_addr != XIO_NOWHERE) {
- unsigned bst; /* nonzero to set bytestream */
- unsigned *bfp; /* addr of record of how swapper is set */
- unsigned swb; /* which control bit to mung */
- unsigned bfo; /* current swapper setting */
- unsigned bfn; /* desired swapper setting */
-
- bfp = ((space == PCIIO_SPACE_IO)
- ? (&pcibr_soft->bs_pio_end_io)
- : (&pcibr_soft->bs_pio_end_mem));
-
- bfo = *bfp;
-
- bst = flags & PCIIO_BYTE_STREAM;
-
- bfn = bst ? PCIIO_BYTE_STREAM : PCIIO_WORD_VALUES;
-
- if (bfn == bfo) { /* we already match. */
- ;
- } else if (bfo != 0) { /* we have a conflict. */
-#if DEBUG && PCI_DEBUG
- printk("pcibr_addr_pci_to_xio: swap conflict in space %d , was%s%s, want%s%s\n",
- space,
- bfo & PCIIO_BYTE_STREAM ? " BYTE_STREAM" : "",
- bfo & PCIIO_WORD_VALUES ? " WORD_VALUES" : "",
- bfn & PCIIO_BYTE_STREAM ? " BYTE_STREAM" : "",
- bfn & PCIIO_WORD_VALUES ? " WORD_VALUES" : "");
-#endif
- xio_addr = XIO_NOWHERE;
- } else { /* OK to make the change. */
- bridgereg_t octl, nctl;
-
- swb = (space == PCIIO_SPACE_IO) ? BRIDGE_CTRL_IO_SWAP : BRIDGE_CTRL_MEM_SWAP;
- octl = bridge->b_wid_control;
- nctl = bst ? octl | swb : octl & ~swb;
-
- if (octl != nctl) /* make the change if any */
- bridge->b_wid_control = nctl;
-
- *bfp = bfn; /* record the assignment */
-
-#if DEBUG && PCI_DEBUG
- printk("pcibr_addr_pci_to_xio: swap for space %d set to%s%s\n",
- space,
- bfn & PCIIO_BYTE_STREAM ? " BYTE_STREAM" : "",
- bfn & PCIIO_WORD_VALUES ? " WORD_VALUES" : "");
-#endif
- }
- }
- done:
- pcibr_unlock(pcibr_soft, s);
- return xio_addr;
-}
-
-/*ARGSUSED6 */
-pcibr_piomap_t
-pcibr_piomap_alloc(devfs_handle_t pconn_vhdl,
- device_desc_t dev_desc,
- pciio_space_t space,
- iopaddr_t pci_addr,
- size_t req_size,
- size_t req_size_max,
- unsigned flags)
-{
- pcibr_info_t pcibr_info = pcibr_info_get(pconn_vhdl);
- pciio_info_t pciio_info = &pcibr_info->f_c;
- pciio_slot_t pciio_slot = pciio_info_slot_get(pciio_info);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
-
- pcibr_piomap_t *mapptr;
- pcibr_piomap_t maplist;
- pcibr_piomap_t pcibr_piomap;
- iopaddr_t xio_addr;
- xtalk_piomap_t xtalk_piomap;
- unsigned long s;
-
- /* Make sure that the req sizes are non-zero */
- if ((req_size < 1) || (req_size_max < 1))
- return NULL;
-
- /*
- * Code to translate slot/space/addr
- * into xio_addr is common between
- * this routine and pcibr_piotrans_addr.
- */
- xio_addr = pcibr_addr_pci_to_xio(pconn_vhdl, pciio_slot, space, pci_addr, req_size, flags);
-
- if (xio_addr == XIO_NOWHERE)
- return NULL;
-
- /* Check the piomap list to see if there is already an allocated
- * piomap entry but not in use. If so use that one. Otherwise
- * allocate a new piomap entry and add it to the piomap list
- */
- mapptr = &(pcibr_info->f_piomap);
-
- s = pcibr_lock(pcibr_soft);
- for (pcibr_piomap = *mapptr;
- pcibr_piomap != NULL;
- pcibr_piomap = pcibr_piomap->bp_next) {
- if (pcibr_piomap->bp_mapsz == 0)
- break;
- }
-
- if (pcibr_piomap)
- mapptr = NULL;
- else {
- pcibr_unlock(pcibr_soft, s);
- NEW(pcibr_piomap);
- }
-
- pcibr_piomap->bp_dev = pconn_vhdl;
- pcibr_piomap->bp_slot = pciio_slot;
- pcibr_piomap->bp_flags = flags;
- pcibr_piomap->bp_space = space;
- pcibr_piomap->bp_pciaddr = pci_addr;
- pcibr_piomap->bp_mapsz = req_size;
- pcibr_piomap->bp_soft = pcibr_soft;
- pcibr_piomap->bp_toc[0] = ATOMIC_INIT(0);
-
- if (mapptr) {
- s = pcibr_lock(pcibr_soft);
- maplist = *mapptr;
- pcibr_piomap->bp_next = maplist;
- *mapptr = pcibr_piomap;
- }
- pcibr_unlock(pcibr_soft, s);
-
-
- if (pcibr_piomap) {
- xtalk_piomap =
- xtalk_piomap_alloc(xconn_vhdl, 0,
- xio_addr,
- req_size, req_size_max,
- flags & PIOMAP_FLAGS);
- if (xtalk_piomap) {
- pcibr_piomap->bp_xtalk_addr = xio_addr;
- pcibr_piomap->bp_xtalk_pio = xtalk_piomap;
- } else {
- pcibr_piomap->bp_mapsz = 0;
- pcibr_piomap = 0;
- }
- }
- return pcibr_piomap;
-}
-
-/*ARGSUSED */
-void
-pcibr_piomap_free(pcibr_piomap_t pcibr_piomap)
-{
- xtalk_piomap_free(pcibr_piomap->bp_xtalk_pio);
- pcibr_piomap->bp_xtalk_pio = 0;
- pcibr_piomap->bp_mapsz = 0;
-}
-
-/*ARGSUSED */
-caddr_t
-pcibr_piomap_addr(pcibr_piomap_t pcibr_piomap,
- iopaddr_t pci_addr,
- size_t req_size)
-{
- return xtalk_piomap_addr(pcibr_piomap->bp_xtalk_pio,
- pcibr_piomap->bp_xtalk_addr +
- pci_addr - pcibr_piomap->bp_pciaddr,
- req_size);
-}
-
-/*ARGSUSED */
-void
-pcibr_piomap_done(pcibr_piomap_t pcibr_piomap)
-{
- xtalk_piomap_done(pcibr_piomap->bp_xtalk_pio);
-}
-
-/*ARGSUSED */
-caddr_t
-pcibr_piotrans_addr(devfs_handle_t pconn_vhdl,
- device_desc_t dev_desc,
- pciio_space_t space,
- iopaddr_t pci_addr,
- size_t req_size,
- unsigned flags)
-{
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
- pciio_slot_t pciio_slot = pciio_info_slot_get(pciio_info);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
-
- iopaddr_t xio_addr;
-
- xio_addr = pcibr_addr_pci_to_xio(pconn_vhdl, pciio_slot, space, pci_addr, req_size, flags);
-
- if (xio_addr == XIO_NOWHERE)
- return NULL;
-
- return xtalk_piotrans_addr(xconn_vhdl, 0, xio_addr, req_size, flags & PIOMAP_FLAGS);
-}
-
-/*
- * PIO Space allocation and management.
- * Allocate and Manage the PCI PIO space (mem and io space)
- * This routine is pretty simplistic at this time, and
- * does pretty trivial management of allocation and freeing..
- * The current scheme is prone for fragmentation..
- * Change the scheme to use bitmaps.
- */
-
-/*ARGSUSED */
-iopaddr_t
-pcibr_piospace_alloc(devfs_handle_t pconn_vhdl,
- device_desc_t dev_desc,
- pciio_space_t space,
- size_t req_size,
- size_t alignment)
-{
- pcibr_info_t pcibr_info = pcibr_info_get(pconn_vhdl);
- pciio_info_t pciio_info = &pcibr_info->f_c;
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
-
- pciio_piospace_t piosp;
- unsigned long s;
-
- iopaddr_t *pciaddr, *pcilast;
- iopaddr_t start_addr;
- size_t align_mask;
-
- /*
- * Check for proper alignment
- */
- ASSERT(alignment >= NBPP);
- ASSERT((alignment & (alignment - 1)) == 0);
-
- align_mask = alignment - 1;
- s = pcibr_lock(pcibr_soft);
-
- /*
- * First look if a previously allocated chunk exists.
- */
- if ((piosp = pcibr_info->f_piospace)) {
- /*
- * Look through the list for a right sized free chunk.
- */
- do {
- if (piosp->free &&
- (piosp->space == space) &&
- (piosp->count >= req_size) &&
- !(piosp->start & align_mask)) {
- piosp->free = 0;
- pcibr_unlock(pcibr_soft, s);
- return piosp->start;
- }
- piosp = piosp->next;
- } while (piosp);
- }
- ASSERT(!piosp);
-
- switch (space) {
- case PCIIO_SPACE_IO:
- pciaddr = &pcibr_soft->bs_spinfo.pci_io_base;
- pcilast = &pcibr_soft->bs_spinfo.pci_io_last;
- break;
- case PCIIO_SPACE_MEM:
- case PCIIO_SPACE_MEM32:
- pciaddr = &pcibr_soft->bs_spinfo.pci_mem_base;
- pcilast = &pcibr_soft->bs_spinfo.pci_mem_last;
- break;
- default:
- ASSERT(0);
- pcibr_unlock(pcibr_soft, s);
- return 0;
- }
-
- start_addr = *pciaddr;
-
- /*
- * Align start_addr.
- */
- if (start_addr & align_mask)
- start_addr = (start_addr + align_mask) & ~align_mask;
-
- if ((start_addr + req_size) > *pcilast) {
- /*
- * If too big a request, reject it.
- */
- pcibr_unlock(pcibr_soft, s);
- return 0;
- }
- *pciaddr = (start_addr + req_size);
-
- NEW(piosp);
- piosp->free = 0;
- piosp->space = space;
- piosp->start = start_addr;
- piosp->count = req_size;
- piosp->next = pcibr_info->f_piospace;
- pcibr_info->f_piospace = piosp;
-
- pcibr_unlock(pcibr_soft, s);
- return start_addr;
-}
-
-/*ARGSUSED */
-void
-pcibr_piospace_free(devfs_handle_t pconn_vhdl,
- pciio_space_t space,
- iopaddr_t pciaddr,
- size_t req_size)
-{
- pcibr_info_t pcibr_info = pcibr_info_get(pconn_vhdl);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pcibr_info->f_mfast;
-
- pciio_piospace_t piosp;
- unsigned long s;
- char name[1024];
-
- /*
- * Look through the bridge data structures for the pciio_piospace_t
- * structure corresponding to 'pciaddr'
- */
- s = pcibr_lock(pcibr_soft);
- piosp = pcibr_info->f_piospace;
- while (piosp) {
- /*
- * Piospace free can only be for the complete
- * chunk and not parts of it..
- */
- if (piosp->start == pciaddr) {
- if (piosp->count == req_size)
- break;
- /*
- * Improper size passed for freeing..
- * Print a message and break;
- */
- hwgraph_vertex_name_get(pconn_vhdl, name, 1024);
- printk(KERN_WARNING "pcibr_piospace_free: error");
- printk(KERN_WARNING "Device %s freeing size (0x%lx) different than allocated (0x%lx)",
- name, req_size, piosp->count);
- printk(KERN_WARNING "Freeing 0x%lx instead", piosp->count);
- break;
- }
- piosp = piosp->next;
- }
-
- if (!piosp) {
- printk(KERN_WARNING
- "pcibr_piospace_free: Address 0x%lx size 0x%lx - No match\n",
- pciaddr, req_size);
- pcibr_unlock(pcibr_soft, s);
- return;
- }
- piosp->free = 1;
- pcibr_unlock(pcibr_soft, s);
- return;
-}
-
-/* =====================================================================
- * DMA MANAGEMENT
- *
- * The Bridge ASIC provides three methods of doing
- * DMA: via a "direct map" register available in
- * 32-bit PCI space (which selects a contiguous 2G
- * address space on some other widget), via
- * "direct" addressing via 64-bit PCI space (all
- * destination information comes from the PCI
- * address, including transfer attributes), and via
- * a "mapped" region that allows a bunch of
- * different small mappings to be established with
- * the PMU.
- *
- * For efficiency, we most prefer to use the 32-bit
- * direct mapping facility, since it requires no
- * resource allocations. The advantage of using the
- * PMU over the 64-bit direct is that single-cycle
- * PCI addressing can be used; the advantage of
- * using 64-bit direct over PMU addressing is that
- * we do not have to allocate entries in the PMU.
- */
-
-/*
- * Convert PCI-generic software flags and Bridge-specific software flags
- * into Bridge-specific Direct Map attribute bits.
- */
-LOCAL iopaddr_t
-pcibr_flags_to_d64(unsigned flags, pcibr_soft_t pcibr_soft)
-{
- iopaddr_t attributes = 0;
-
- /* Sanity check: Bridge only allows use of VCHAN1 via 64-bit addrs */
-#ifdef LATER
- ASSERT_ALWAYS(!(flags & PCIBR_VCHAN1) || (flags & PCIIO_DMA_A64));
-#endif
-
- /* Generic macro flags
- */
- if (flags & PCIIO_DMA_DATA) { /* standard data channel */
- attributes &= ~PCI64_ATTR_BAR; /* no barrier bit */
- attributes |= PCI64_ATTR_PREF; /* prefetch on */
- }
- if (flags & PCIIO_DMA_CMD) { /* standard command channel */
- attributes |= PCI64_ATTR_BAR; /* barrier bit on */
- attributes &= ~PCI64_ATTR_PREF; /* disable prefetch */
- }
- /* Generic detail flags
- */
- if (flags & PCIIO_PREFETCH)
- attributes |= PCI64_ATTR_PREF;
- if (flags & PCIIO_NOPREFETCH)
- attributes &= ~PCI64_ATTR_PREF;
-
- /* the swap bit is in the address attributes for xbridge */
- if (pcibr_soft->bs_xbridge) {
- if (flags & PCIIO_BYTE_STREAM)
- attributes |= PCI64_ATTR_SWAP;
- if (flags & PCIIO_WORD_VALUES)
- attributes &= ~PCI64_ATTR_SWAP;
- }
-
- /* Provider-specific flags
- */
- if (flags & PCIBR_BARRIER)
- attributes |= PCI64_ATTR_BAR;
- if (flags & PCIBR_NOBARRIER)
- attributes &= ~PCI64_ATTR_BAR;
-
- if (flags & PCIBR_PREFETCH)
- attributes |= PCI64_ATTR_PREF;
- if (flags & PCIBR_NOPREFETCH)
- attributes &= ~PCI64_ATTR_PREF;
-
- if (flags & PCIBR_PRECISE)
- attributes |= PCI64_ATTR_PREC;
- if (flags & PCIBR_NOPRECISE)
- attributes &= ~PCI64_ATTR_PREC;
-
- if (flags & PCIBR_VCHAN1)
- attributes |= PCI64_ATTR_VIRTUAL;
- if (flags & PCIBR_VCHAN0)
- attributes &= ~PCI64_ATTR_VIRTUAL;
-
- return (attributes);
-}
-
-/*
- * Convert PCI-generic software flags and Bridge-specific software flags
- * into Bridge-specific Address Translation Entry attribute bits.
- */
-LOCAL bridge_ate_t
-pcibr_flags_to_ate(unsigned flags)
-{
- bridge_ate_t attributes;
-
- /* default if nothing specified:
- * NOBARRIER
- * NOPREFETCH
- * NOPRECISE
- * COHERENT
- * Plus the valid bit
- */
- attributes = ATE_CO | ATE_V;
-
- /* Generic macro flags
- */
- if (flags & PCIIO_DMA_DATA) { /* standard data channel */
- attributes &= ~ATE_BAR; /* no barrier */
- attributes |= ATE_PREF; /* prefetch on */
- }
- if (flags & PCIIO_DMA_CMD) { /* standard command channel */
- attributes |= ATE_BAR; /* barrier bit on */
- attributes &= ~ATE_PREF; /* disable prefetch */
- }
- /* Generic detail flags
- */
- if (flags & PCIIO_PREFETCH)
- attributes |= ATE_PREF;
- if (flags & PCIIO_NOPREFETCH)
- attributes &= ~ATE_PREF;
-
- /* Provider-specific flags
- */
- if (flags & PCIBR_BARRIER)
- attributes |= ATE_BAR;
- if (flags & PCIBR_NOBARRIER)
- attributes &= ~ATE_BAR;
-
- if (flags & PCIBR_PREFETCH)
- attributes |= ATE_PREF;
- if (flags & PCIBR_NOPREFETCH)
- attributes &= ~ATE_PREF;
-
- if (flags & PCIBR_PRECISE)
- attributes |= ATE_PREC;
- if (flags & PCIBR_NOPRECISE)
- attributes &= ~ATE_PREC;
-
- return (attributes);
-}
-
-/*ARGSUSED */
-pcibr_dmamap_t
-pcibr_dmamap_alloc(devfs_handle_t pconn_vhdl,
- device_desc_t dev_desc,
- size_t req_size_max,
- unsigned flags)
-{
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
- pciio_slot_t slot;
- xwidgetnum_t xio_port;
-
- xtalk_dmamap_t xtalk_dmamap;
- pcibr_dmamap_t pcibr_dmamap;
- int ate_count;
- int ate_index;
-
- /* merge in forced flags */
- flags |= pcibr_soft->bs_dma_flags;
-
-#ifdef IRIX
- NEWf(pcibr_dmamap, flags);
-#else
- /*
- * On SNIA64, these maps are pre-allocated because pcibr_dmamap_alloc()
- * can be called within an interrupt thread.
- */
- pcibr_dmamap = (pcibr_dmamap_t)get_free_pciio_dmamap(pcibr_soft->bs_vhdl);
-#endif
-
- if (!pcibr_dmamap)
- return 0;
-
- xtalk_dmamap = xtalk_dmamap_alloc(xconn_vhdl, dev_desc, req_size_max,
- flags & DMAMAP_FLAGS);
- if (!xtalk_dmamap) {
-#if PCIBR_ATE_DEBUG
- printk("pcibr_attach: xtalk_dmamap_alloc failed\n");
-#endif
-#ifdef IRIX
- DEL(pcibr_dmamap);
-#else
- free_pciio_dmamap(pcibr_dmamap);
-#endif
- return 0;
- }
- xio_port = pcibr_soft->bs_mxid;
- slot = pciio_info_slot_get(pciio_info);
-
- pcibr_dmamap->bd_dev = pconn_vhdl;
- pcibr_dmamap->bd_slot = slot;
- pcibr_dmamap->bd_soft = pcibr_soft;
- pcibr_dmamap->bd_xtalk = xtalk_dmamap;
- pcibr_dmamap->bd_max_size = req_size_max;
- pcibr_dmamap->bd_xio_port = xio_port;
-
- if (flags & PCIIO_DMA_A64) {
- if (!pcibr_try_set_device(pcibr_soft, slot, flags, BRIDGE_DEV_D64_BITS)) {
- iopaddr_t pci_addr;
- int have_rrbs;
- int min_rrbs;
-
- /* Device is capable of A64 operations,
- * and the attributes of the DMA are
- * consistent with any previous DMA
- * mappings using shared resources.
- */
-
- pci_addr = pcibr_flags_to_d64(flags, pcibr_soft);
-
- pcibr_dmamap->bd_flags = flags;
- pcibr_dmamap->bd_xio_addr = 0;
- pcibr_dmamap->bd_pci_addr = pci_addr;
-
- /* Make sure we have an RRB (or two).
- */
- if (!(pcibr_soft->bs_rrb_fixed & (1 << slot))) {
- if (flags & PCIBR_VCHAN1)
- slot += PCIBR_RRB_SLOT_VIRTUAL;
- have_rrbs = pcibr_soft->bs_rrb_valid[slot];
- if (have_rrbs < 2) {
- if (pci_addr & PCI64_ATTR_PREF)
- min_rrbs = 2;
- else
- min_rrbs = 1;
- if (have_rrbs < min_rrbs)
- do_pcibr_rrb_autoalloc(pcibr_soft, slot, min_rrbs - have_rrbs);
- }
- }
-#if PCIBR_ATE_DEBUG
- printk("pcibr_dmamap_alloc: using direct64\n");
-#endif
- return pcibr_dmamap;
- }
-#if PCIBR_ATE_DEBUG
- printk("pcibr_dmamap_alloc: unable to use direct64\n");
-#endif
- flags &= ~PCIIO_DMA_A64;
- }
- if (flags & PCIIO_FIXED) {
- /* warning: mappings may fail later,
- * if direct32 can't get to the address.
- */
- if (!pcibr_try_set_device(pcibr_soft, slot, flags, BRIDGE_DEV_D32_BITS)) {
- /* User desires DIRECT A32 operations,
- * and the attributes of the DMA are
- * consistent with any previous DMA
- * mappings using shared resources.
- * Mapping calls may fail if target
- * is outside the direct32 range.
- */
-#if PCIBR_ATE_DEBUG
- printk("pcibr_dmamap_alloc: using direct32\n");
-#endif
- pcibr_dmamap->bd_flags = flags;
- pcibr_dmamap->bd_xio_addr = pcibr_soft->bs_dir_xbase;
- pcibr_dmamap->bd_pci_addr = PCI32_DIRECT_BASE;
- return pcibr_dmamap;
- }
-#if PCIBR_ATE_DEBUG
- printk("pcibr_dmamap_alloc: unable to use direct32\n");
-#endif
- /* If the user demands FIXED and we can't
- * give it to him, fail.
- */
- xtalk_dmamap_free(xtalk_dmamap);
-#ifdef IRIX
- DEL(pcibr_dmamap);
-#else
- free_pciio_dmamap(pcibr_dmamap);
-#endif
- return 0;
- }
- /*
- * Allocate Address Translation Entries from the mapping RAM.
- * Unless the PCIBR_NO_ATE_ROUNDUP flag is specified,
- * the maximum number of ATEs is based on the worst-case
- * scenario, where the requested target is in the
- * last byte of an ATE; thus, mapping IOPGSIZE+2
- * does end up requiring three ATEs.
- */
- if (!(flags & PCIBR_NO_ATE_ROUNDUP)) {
- ate_count = IOPG((IOPGSIZE - 1) /* worst case start offset */
- +req_size_max /* max mapping bytes */
- - 1) + 1; /* round UP */
- } else { /* assume requested target is page aligned */
- ate_count = IOPG(req_size_max /* max mapping bytes */
- - 1) + 1; /* round UP */
- }
-
- ate_index = pcibr_ate_alloc(pcibr_soft, ate_count);
-
- if (ate_index != -1) {
- if (!pcibr_try_set_device(pcibr_soft, slot, flags, BRIDGE_DEV_PMU_BITS)) {
- bridge_ate_t ate_proto;
- int have_rrbs;
- int min_rrbs;
-
-#if PCIBR_ATE_DEBUG
- printk("pcibr_dmamap_alloc: using PMU\n");
-#endif
-
- ate_proto = pcibr_flags_to_ate(flags);
-
- pcibr_dmamap->bd_flags = flags;
- pcibr_dmamap->bd_pci_addr =
- PCI32_MAPPED_BASE + IOPGSIZE * ate_index;
- /*
- * for xbridge the byte-swap bit == bit 29 of PCI address
- */
- if (pcibr_soft->bs_xbridge) {
- if (flags & PCIIO_BYTE_STREAM)
- ATE_SWAP_ON(pcibr_dmamap->bd_pci_addr);
- /*
- * If swap was set in bss_device in pcibr_endian_set()
- * we need to change the address bit.
- */
- if (pcibr_soft->bs_slot[slot].bss_device &
- BRIDGE_DEV_SWAP_PMU)
- ATE_SWAP_ON(pcibr_dmamap->bd_pci_addr);
- if (flags & PCIIO_WORD_VALUES)
- ATE_SWAP_OFF(pcibr_dmamap->bd_pci_addr);
- }
- pcibr_dmamap->bd_xio_addr = 0;
- pcibr_dmamap->bd_ate_ptr = pcibr_ate_addr(pcibr_soft, ate_index);
- pcibr_dmamap->bd_ate_index = ate_index;
- pcibr_dmamap->bd_ate_count = ate_count;
- pcibr_dmamap->bd_ate_proto = ate_proto;
-
- /* Make sure we have an RRB (or two).
- */
- if (!(pcibr_soft->bs_rrb_fixed & (1 << slot))) {
- have_rrbs = pcibr_soft->bs_rrb_valid[slot];
- if (have_rrbs < 2) {
- if (ate_proto & ATE_PREF)
- min_rrbs = 2;
- else
- min_rrbs = 1;
- if (have_rrbs < min_rrbs)
- do_pcibr_rrb_autoalloc(pcibr_soft, slot, min_rrbs - have_rrbs);
- }
- }
- if (ate_index >= pcibr_soft->bs_int_ate_size &&
- !pcibr_soft->bs_xbridge) {
- bridge_t *bridge = pcibr_soft->bs_base;
- volatile unsigned *cmd_regp;
- unsigned cmd_reg;
- unsigned long s;
-
- pcibr_dmamap->bd_flags |= PCIBR_DMAMAP_SSRAM;
-
- s = pcibr_lock(pcibr_soft);
- cmd_regp = &(bridge->
- b_type0_cfg_dev[slot].
- l[PCI_CFG_COMMAND / 4]);
- cmd_reg = *cmd_regp;
- pcibr_soft->bs_slot[slot].bss_cmd_pointer = cmd_regp;
- pcibr_soft->bs_slot[slot].bss_cmd_shadow = cmd_reg;
- pcibr_unlock(pcibr_soft, s);
- }
- return pcibr_dmamap;
- }
-#if PCIBR_ATE_DEBUG
- printk("pcibr_dmamap_alloc: unable to use PMU\n");
-#endif
- pcibr_ate_free(pcibr_soft, ate_index, ate_count);
- }
- /* total failure: sorry, you just can't
- * get from here to there that way.
- */
-#if PCIBR_ATE_DEBUG
- printk("pcibr_dmamap_alloc: complete failure.\n");
-#endif
- xtalk_dmamap_free(xtalk_dmamap);
-#ifdef IRIX
- DEL(pcibr_dmamap);
-#else
- free_pciio_dmamap(pcibr_dmamap);
-#endif
- return 0;
-}
-
-/*ARGSUSED */
-void
-pcibr_dmamap_free(pcibr_dmamap_t pcibr_dmamap)
-{
- pcibr_soft_t pcibr_soft = pcibr_dmamap->bd_soft;
- pciio_slot_t slot = pcibr_dmamap->bd_slot;
-
- unsigned flags = pcibr_dmamap->bd_flags;
-
- /* Make sure that bss_ext_ates_active
- * is properly kept up to date.
- */
-
- if (PCIBR_DMAMAP_BUSY & flags)
- if (PCIBR_DMAMAP_SSRAM & flags)
- atomic_dec(&(pcibr_soft->bs_slot[slot]. bss_ext_ates_active));
-
- xtalk_dmamap_free(pcibr_dmamap->bd_xtalk);
-
- if (pcibr_dmamap->bd_flags & PCIIO_DMA_A64) {
- pcibr_release_device(pcibr_soft, slot, BRIDGE_DEV_D64_BITS);
- }
- if (pcibr_dmamap->bd_ate_count) {
- pcibr_ate_free(pcibr_dmamap->bd_soft,
- pcibr_dmamap->bd_ate_index,
- pcibr_dmamap->bd_ate_count);
- pcibr_release_device(pcibr_soft, slot, BRIDGE_DEV_PMU_BITS);
- }
-#ifdef IRIX
- DEL(pcibr_dmamap);
-#else
- free_pciio_dmamap(pcibr_dmamap);
-#endif
-}
-
-/*
- * Setup an Address Translation Entry as specified. Use either the Bridge
- * internal maps or the external map RAM, as appropriate.
- */
-LOCAL bridge_ate_p
-pcibr_ate_addr(pcibr_soft_t pcibr_soft,
- int ate_index)
-{
- bridge_t *bridge = pcibr_soft->bs_base;
-
- return (ate_index < pcibr_soft->bs_int_ate_size)
- ? &(bridge->b_int_ate_ram[ate_index].wr)
- : &(bridge->b_ext_ate_ram[ate_index]);
-}
-
-/*
- * pcibr_addr_xio_to_pci: given a PIO range, hand
- * back the corresponding base PCI MEM address;
- * this is used to short-circuit DMA requests that
- * loop back onto this PCI bus.
- */
-LOCAL iopaddr_t
-pcibr_addr_xio_to_pci(pcibr_soft_t soft,
- iopaddr_t xio_addr,
- size_t req_size)
-{
- iopaddr_t xio_lim = xio_addr + req_size - 1;
- iopaddr_t pci_addr;
- pciio_slot_t slot;
-
- if ((xio_addr >= BRIDGE_PCI_MEM32_BASE) &&
- (xio_lim <= BRIDGE_PCI_MEM32_LIMIT)) {
- pci_addr = xio_addr - BRIDGE_PCI_MEM32_BASE;
- return pci_addr;
- }
- if ((xio_addr >= BRIDGE_PCI_MEM64_BASE) &&
- (xio_lim <= BRIDGE_PCI_MEM64_LIMIT)) {
- pci_addr = xio_addr - BRIDGE_PCI_MEM64_BASE;
- return pci_addr;
- }
- for (slot = 0; slot < 8; ++slot)
- if ((xio_addr >= BRIDGE_DEVIO(slot)) &&
- (xio_lim < BRIDGE_DEVIO(slot + 1))) {
- bridgereg_t dev;
-
- dev = soft->bs_slot[slot].bss_device;
- pci_addr = dev & BRIDGE_DEV_OFF_MASK;
- pci_addr <<= BRIDGE_DEV_OFF_ADDR_SHFT;
- pci_addr += xio_addr - BRIDGE_DEVIO(slot);
- return (dev & BRIDGE_DEV_DEV_IO_MEM) ? pci_addr : PCI_NOWHERE;
- }
- return 0;
-}
-
-/* We are starting to get more complexity
- * surrounding writing ATEs, so pull
- * the writing code into this new function.
- */
-
-#if PCIBR_FREEZE_TIME
-#define ATE_FREEZE() s = ate_freeze(pcibr_dmamap, &freeze_time, cmd_regs)
-#else
-#define ATE_FREEZE() s = ate_freeze(pcibr_dmamap, cmd_regs)
-#endif
-
-LOCAL unsigned
-ate_freeze(pcibr_dmamap_t pcibr_dmamap,
-#if PCIBR_FREEZE_TIME
- unsigned *freeze_time_ptr,
-#endif
- unsigned *cmd_regs)
-{
- pcibr_soft_t pcibr_soft = pcibr_dmamap->bd_soft;
-#ifdef LATER
- int dma_slot = pcibr_dmamap->bd_slot;
-#endif
- int ext_ates = pcibr_dmamap->bd_flags & PCIBR_DMAMAP_SSRAM;
- int slot;
-
- unsigned long s;
- unsigned cmd_reg;
- volatile unsigned *cmd_lwa;
- unsigned cmd_lwd;
-
- if (!ext_ates)
- return 0;
-
- /* Bridge Hardware Bug WAR #484930:
- * Bridge can't handle updating External ATEs
- * while DMA is occurring that uses External ATEs,
- * even if the particular ATEs involved are disjoint.
- */
-
- /* need to prevent anyone else from
- * unfreezing the grant while we
- * are working; also need to prevent
- * this thread from being interrupted
- * to keep PCI grant freeze time
- * at an absolute minimum.
- */
- s = pcibr_lock(pcibr_soft);
-
-#ifdef LATER
- /* just in case pcibr_dmamap_done was not called */
- if (pcibr_dmamap->bd_flags & PCIBR_DMAMAP_BUSY) {
- pcibr_dmamap->bd_flags &= ~PCIBR_DMAMAP_BUSY;
- if (pcibr_dmamap->bd_flags & PCIBR_DMAMAP_SSRAM)
- atomic_dec(&(pcibr_soft->bs_slot[dma_slot]. bss_ext_ates_active));
- xtalk_dmamap_done(pcibr_dmamap->bd_xtalk);
- }
-#endif /* LATER */
-#if PCIBR_FREEZE_TIME
- *freeze_time_ptr = get_timestamp();
-#endif
-
- cmd_lwa = 0;
- for (slot = 0; slot < 8; ++slot)
- if (atomic_read(&pcibr_soft->bs_slot[slot].bss_ext_ates_active)) {
- cmd_reg = pcibr_soft->
- bs_slot[slot].
- bss_cmd_shadow;
- if (cmd_reg & PCI_CMD_BUS_MASTER) {
- cmd_lwa = pcibr_soft->
- bs_slot[slot].
- bss_cmd_pointer;
- cmd_lwd = cmd_reg ^ PCI_CMD_BUS_MASTER;
- cmd_lwa[0] = cmd_lwd;
- }
- cmd_regs[slot] = cmd_reg;
- } else
- cmd_regs[slot] = 0;
-
- if (cmd_lwa) {
- bridge_t *bridge = pcibr_soft->bs_base;
-
- /* Read the last master bit that has been cleared. This PIO read
- * on the PCI bus is to ensure the completion of any DMAs that
- * are due to bus requests issued by PCI devices before the
- * clearing of master bits.
- */
- cmd_lwa[0];
-
- /* Flush all the write buffers in the bridge */
- for (slot = 0; slot < 8; ++slot)
- if (atomic_read(&pcibr_soft->bs_slot[slot].bss_ext_ates_active)) {
- /* Flush the write buffer associated with this
- * PCI device which might be using dma map RAM.
- */
- bridge->b_wr_req_buf[slot].reg;
- }
- }
- return s;
-}
-
-#define ATE_WRITE() ate_write(ate_ptr, ate_count, ate)
-
-LOCAL void
-ate_write(bridge_ate_p ate_ptr,
- int ate_count,
- bridge_ate_t ate)
-{
- while (ate_count-- > 0) {
- *ate_ptr++ = ate;
- ate += IOPGSIZE;
- }
-}
-
-
-#if PCIBR_FREEZE_TIME
-#define ATE_THAW() ate_thaw(pcibr_dmamap, ate_index, ate, ate_total, freeze_time, cmd_regs, s)
-#else
-#define ATE_THAW() ate_thaw(pcibr_dmamap, ate_index, cmd_regs, s)
-#endif
-
-LOCAL void
-ate_thaw(pcibr_dmamap_t pcibr_dmamap,
- int ate_index,
-#if PCIBR_FREEZE_TIME
- bridge_ate_t ate,
- int ate_total,
- unsigned freeze_time_start,
-#endif
- unsigned *cmd_regs,
- unsigned s)
-{
- pcibr_soft_t pcibr_soft = pcibr_dmamap->bd_soft;
- int dma_slot = pcibr_dmamap->bd_slot;
- int slot;
- bridge_t *bridge = pcibr_soft->bs_base;
- int ext_ates = pcibr_dmamap->bd_flags & PCIBR_DMAMAP_SSRAM;
-
- unsigned cmd_reg;
-
-#if PCIBR_FREEZE_TIME
- unsigned freeze_time;
- static unsigned max_freeze_time = 0;
- static unsigned max_ate_total;
-#endif
-
- if (!ext_ates)
- return;
-
- /* restore cmd regs */
- for (slot = 0; slot < 8; ++slot)
- if ((cmd_reg = cmd_regs[slot]) & PCI_CMD_BUS_MASTER)
- bridge->b_type0_cfg_dev[slot].l[PCI_CFG_COMMAND / 4] = cmd_reg;
-
- pcibr_dmamap->bd_flags |= PCIBR_DMAMAP_BUSY;
- atomic_inc(&(pcibr_soft->bs_slot[dma_slot]. bss_ext_ates_active));
-
-#if PCIBR_FREEZE_TIME
- freeze_time = get_timestamp() - freeze_time_start;
-
- if ((max_freeze_time < freeze_time) ||
- (max_ate_total < ate_total)) {
- if (max_freeze_time < freeze_time)
- max_freeze_time = freeze_time;
- if (max_ate_total < ate_total)
- max_ate_total = ate_total;
- pcibr_unlock(pcibr_soft, s);
- printk("%s: pci freeze time %d usec for %d ATEs\n"
- "\tfirst ate: %R\n",
- pcibr_soft->bs_name,
- freeze_time * 1000 / 1250,
- ate_total,
- ate, ate_bits);
- } else
-#endif
- pcibr_unlock(pcibr_soft, s);
-}
-
-/*ARGSUSED */
-iopaddr_t
-pcibr_dmamap_addr(pcibr_dmamap_t pcibr_dmamap,
- paddr_t paddr,
- size_t req_size)
-{
- pcibr_soft_t pcibr_soft;
- iopaddr_t xio_addr;
- xwidgetnum_t xio_port;
- iopaddr_t pci_addr;
- unsigned flags;
-
- ASSERT(pcibr_dmamap != NULL);
- ASSERT(req_size > 0);
- ASSERT(req_size <= pcibr_dmamap->bd_max_size);
-
- pcibr_soft = pcibr_dmamap->bd_soft;
-
- flags = pcibr_dmamap->bd_flags;
-
- xio_addr = xtalk_dmamap_addr(pcibr_dmamap->bd_xtalk, paddr, req_size);
- if (XIO_PACKED(xio_addr)) {
- xio_port = XIO_PORT(xio_addr);
- xio_addr = XIO_ADDR(xio_addr);
- } else
- xio_port = pcibr_dmamap->bd_xio_port;
-
- /* If this DMA is to an address that
- * refers back to this Bridge chip,
- * reduce it back to the correct
- * PCI MEM address.
- */
- if (xio_port == pcibr_soft->bs_xid) {
- pci_addr = pcibr_addr_xio_to_pci(pcibr_soft, xio_addr, req_size);
- } else if (flags & PCIIO_DMA_A64) {
- /* A64 DMA:
- * always use 64-bit direct mapping,
- * which always works.
- * Device(x) was set up during
- * dmamap allocation.
- */
-
- /* attributes are already bundled up into bd_pci_addr.
- */
- pci_addr = pcibr_dmamap->bd_pci_addr
- | ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT)
- | xio_addr;
-
- /* Bridge Hardware WAR #482836:
- * If the transfer is not cache aligned
- * and the Bridge Rev is <= B, force
- * prefetch to be off.
- */
- if (flags & PCIBR_NOPREFETCH)
- pci_addr &= ~PCI64_ATTR_PREF;
-
-#if DEBUG && PCIBR_DMA_DEBUG
- printk("pcibr_dmamap_addr (direct64):\n"
- "\twanted paddr [0x%x..0x%x]\n"
- "\tXIO port 0x%x offset 0x%x\n"
- "\treturning PCI 0x%x\n",
- paddr, paddr + req_size - 1,
- xio_port, xio_addr, pci_addr);
-#endif
- } else if (flags & PCIIO_FIXED) {
- /* A32 direct DMA:
- * always use 32-bit direct mapping,
- * which may fail.
- * Device(x) was set up during
- * dmamap allocation.
- */
-
- if (xio_port != pcibr_soft->bs_dir_xport)
- pci_addr = 0; /* wrong DIDN */
- else if (xio_addr < pcibr_dmamap->bd_xio_addr)
- pci_addr = 0; /* out of range */
- else if ((xio_addr + req_size) >
- (pcibr_dmamap->bd_xio_addr + BRIDGE_DMA_DIRECT_SIZE))
- pci_addr = 0; /* out of range */
- else
- pci_addr = pcibr_dmamap->bd_pci_addr +
- xio_addr - pcibr_dmamap->bd_xio_addr;
-
-#if DEBUG && PCIBR_DMA_DEBUG
- printk("pcibr_dmamap_addr (direct32):\n"
- "\twanted paddr [0x%x..0x%x]\n"
- "\tXIO port 0x%x offset 0x%x\n"
- "\treturning PCI 0x%x\n",
- paddr, paddr + req_size - 1,
- xio_port, xio_addr, pci_addr);
-#endif
- } else {
- bridge_t *bridge = pcibr_soft->bs_base;
- iopaddr_t offset = IOPGOFF(xio_addr);
- bridge_ate_t ate_proto = pcibr_dmamap->bd_ate_proto;
- int ate_count = IOPG(offset + req_size - 1) + 1;
-
- int ate_index = pcibr_dmamap->bd_ate_index;
- unsigned cmd_regs[8];
- unsigned s;
-
-#if PCIBR_FREEZE_TIME
- int ate_total = ate_count;
- unsigned freeze_time;
-#endif
-
-#if PCIBR_ATE_DEBUG
- bridge_ate_t ate_cmp;
- bridge_ate_p ate_cptr;
- unsigned ate_lo, ate_hi;
- int ate_bad = 0;
- int ate_rbc = 0;
-#endif
- bridge_ate_p ate_ptr = pcibr_dmamap->bd_ate_ptr;
- bridge_ate_t ate;
-
- /* Bridge Hardware WAR #482836:
- * If the transfer is not cache aligned
- * and the Bridge Rev is <= B, force
- * prefetch to be off.
- */
- if (flags & PCIBR_NOPREFETCH)
- ate_proto &= ~ATE_PREF;
-
- ate = ate_proto
- | (xio_port << ATE_TIDSHIFT)
- | (xio_addr - offset);
-
- pci_addr = pcibr_dmamap->bd_pci_addr + offset;
-
- /* Fill in our mapping registers
- * with the appropriate xtalk data,
- * and hand back the PCI address.
- */
-
- ASSERT(ate_count > 0);
- if (ate_count <= pcibr_dmamap->bd_ate_count) {
- ATE_FREEZE();
- ATE_WRITE();
- ATE_THAW();
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
- } else {
- /* The number of ATE's required is greater than the number
- * allocated for this map. One way this can happen is if
- * pcibr_dmamap_alloc() was called with the PCIBR_NO_ATE_ROUNDUP
- * flag, and then when that map is used (right now), the
- * target address tells us we really did need to roundup.
- * The other possibility is that the map is just plain too
- * small to handle the requested target area.
- */
-#if PCIBR_ATE_DEBUG
- printk(KERN_WARNING "pcibr_dmamap_addr :\n"
- "\twanted paddr [0x%x..0x%x]\n"
- "\tate_count 0x%x bd_ate_count 0x%x\n"
- "\tATE's required > number allocated\n",
- paddr, paddr + req_size - 1,
- ate_count, pcibr_dmamap->bd_ate_count);
-#endif
- pci_addr = 0;
- }
-
- }
- return pci_addr;
-}
-
-/*ARGSUSED */
-alenlist_t
-pcibr_dmamap_list(pcibr_dmamap_t pcibr_dmamap,
- alenlist_t palenlist,
- unsigned flags)
-{
- pcibr_soft_t pcibr_soft;
- bridge_t *bridge=NULL;
-
- unsigned al_flags = (flags & PCIIO_NOSLEEP) ? AL_NOSLEEP : 0;
- int inplace = flags & PCIIO_INPLACE;
-
- alenlist_t pciio_alenlist = 0;
- alenlist_t xtalk_alenlist;
- size_t length;
- iopaddr_t offset;
- unsigned direct64;
- int ate_index = 0;
- int ate_count = 0;
- int ate_total = 0;
- bridge_ate_p ate_ptr = (bridge_ate_p)0;
- bridge_ate_t ate_proto = (bridge_ate_t)0;
- bridge_ate_t ate_prev;
- bridge_ate_t ate;
- alenaddr_t xio_addr;
- xwidgetnum_t xio_port;
- iopaddr_t pci_addr;
- alenaddr_t new_addr;
-
- unsigned cmd_regs[8];
- unsigned s = 0;
-
-#if PCIBR_FREEZE_TIME
- unsigned freeze_time;
-#endif
- int ate_freeze_done = 0; /* To pair ATE_THAW
- * with an ATE_FREEZE
- */
-
- pcibr_soft = pcibr_dmamap->bd_soft;
-
- xtalk_alenlist = xtalk_dmamap_list(pcibr_dmamap->bd_xtalk, palenlist,
- flags & DMAMAP_FLAGS);
- if (!xtalk_alenlist)
- goto fail;
-
- alenlist_cursor_init(xtalk_alenlist, 0, NULL);
-
- if (inplace) {
- pciio_alenlist = xtalk_alenlist;
- } else {
- pciio_alenlist = alenlist_create(al_flags);
- if (!pciio_alenlist)
- goto fail;
- }
-
- direct64 = pcibr_dmamap->bd_flags & PCIIO_DMA_A64;
- if (!direct64) {
- bridge = pcibr_soft->bs_base;
- ate_ptr = pcibr_dmamap->bd_ate_ptr;
- ate_index = pcibr_dmamap->bd_ate_index;
- ate_proto = pcibr_dmamap->bd_ate_proto;
- ATE_FREEZE();
- ate_freeze_done = 1; /* Remember that we need to do an ATE_THAW */
- }
- pci_addr = pcibr_dmamap->bd_pci_addr;
-
- ate_prev = 0; /* matches no valid ATEs */
- while (ALENLIST_SUCCESS ==
- alenlist_get(xtalk_alenlist, NULL, 0,
- &xio_addr, &length, al_flags)) {
- if (XIO_PACKED(xio_addr)) {
- xio_port = XIO_PORT(xio_addr);
- xio_addr = XIO_ADDR(xio_addr);
- } else
- xio_port = pcibr_dmamap->bd_xio_port;
-
- if (xio_port == pcibr_soft->bs_xid) {
- new_addr = pcibr_addr_xio_to_pci(pcibr_soft, xio_addr, length);
- if (new_addr == PCI_NOWHERE)
- goto fail;
- } else if (direct64) {
- new_addr = pci_addr | xio_addr
- | ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT);
-
- /* Bridge Hardware WAR #482836:
- * If the transfer is not cache aligned
- * and the Bridge Rev is <= B, force
- * prefetch to be off.
- */
- if (flags & PCIBR_NOPREFETCH)
- new_addr &= ~PCI64_ATTR_PREF;
-
- } else {
- /* calculate the ate value for
- * the first address. If it
- * matches the previous
- * ATE written (ie. we had
- * multiple blocks in the
- * same IOPG), then back up
- * and reuse that ATE.
- *
- * We are NOT going to
- * aggressively try to
- * reuse any other ATEs.
- */
- offset = IOPGOFF(xio_addr);
- ate = ate_proto
- | (xio_port << ATE_TIDSHIFT)
- | (xio_addr - offset);
- if (ate == ate_prev) {
-#if PCIBR_ATE_DEBUG
- printk("pcibr_dmamap_list: ATE share\n");
-#endif
- ate_ptr--;
- ate_index--;
- pci_addr -= IOPGSIZE;
- }
- new_addr = pci_addr + offset;
-
- /* Fill in the hardware ATEs
- * that contain this block.
- */
- ate_count = IOPG(offset + length - 1) + 1;
- ate_total += ate_count;
-
- /* Ensure that this map contains enough ATE's */
- if (ate_total > pcibr_dmamap->bd_ate_count) {
-#if PCIBR_ATE_DEBUG
- printk(KERN_WARNING "pcibr_dmamap_list :\n"
- "\twanted xio_addr [0x%x..0x%x]\n"
- "\tate_total 0x%x bd_ate_count 0x%x\n"
- "\tATE's required > number allocated\n",
- xio_addr, xio_addr + length - 1,
- ate_total, pcibr_dmamap->bd_ate_count);
-#endif
- goto fail;
- }
-
- ATE_WRITE();
-
- ate_index += ate_count;
- ate_ptr += ate_count;
-
- ate_count <<= IOPFNSHIFT;
- ate += ate_count;
- pci_addr += ate_count;
- }
-
- /* write the PCI DMA address
- * out to the scatter-gather list.
- */
- if (inplace) {
- if (ALENLIST_SUCCESS !=
- alenlist_replace(pciio_alenlist, NULL,
- &new_addr, &length, al_flags))
- goto fail;
- } else {
- if (ALENLIST_SUCCESS !=
- alenlist_append(pciio_alenlist,
- new_addr, length, al_flags))
- goto fail;
- }
- }
- if (!inplace)
- alenlist_done(xtalk_alenlist);
-
- /* Reset the internal cursor of the alenlist to be returned back
- * to the caller.
- */
- alenlist_cursor_init(pciio_alenlist, 0, NULL);
-
-
- /* In case an ATE_FREEZE was done do the ATE_THAW to unroll all the
- * changes that ATE_FREEZE has done to implement the external SSRAM
- * bug workaround.
- */
- if (ate_freeze_done) {
- ATE_THAW();
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
- }
- return pciio_alenlist;
-
- fail:
- /* There are various points of failure after doing an ATE_FREEZE
- * We need to do an ATE_THAW. Otherwise the ATEs are locked forever.
- * The decision to do an ATE_THAW needs to be based on whether a
- * an ATE_FREEZE was done before.
- */
- if (ate_freeze_done) {
- ATE_THAW();
- bridge->b_wid_tflush;
- }
- if (pciio_alenlist && !inplace)
- alenlist_destroy(pciio_alenlist);
- return 0;
-}
-
-/*ARGSUSED */
-void
-pcibr_dmamap_done(pcibr_dmamap_t pcibr_dmamap)
-{
- /*
- * We could go through and invalidate ATEs here;
- * for performance reasons, we don't.
- * We also don't enforce the strict alternation
- * between _addr/_list and _done, but Hub does.
- */
-
- if (pcibr_dmamap->bd_flags & PCIBR_DMAMAP_BUSY) {
- pcibr_dmamap->bd_flags &= ~PCIBR_DMAMAP_BUSY;
-
- if (pcibr_dmamap->bd_flags & PCIBR_DMAMAP_SSRAM)
- atomic_dec(&(pcibr_dmamap->bd_soft->bs_slot[pcibr_dmamap->bd_slot]. bss_ext_ates_active));
- }
-
- xtalk_dmamap_done(pcibr_dmamap->bd_xtalk);
-}
-
-
-/*
- * For each bridge, the DIR_OFF value in the Direct Mapping Register
- * determines the PCI to Crosstalk memory mapping to be used for all
- * 32-bit Direct Mapping memory accesses. This mapping can be to any
- * node in the system. This function will return that compact node id.
- */
-
-/*ARGSUSED */
-cnodeid_t
-pcibr_get_dmatrans_node(devfs_handle_t pconn_vhdl)
-{
-
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
-
- return(NASID_TO_COMPACT_NODEID(NASID_GET(pcibr_soft->bs_dir_xbase)));
-}
-
-/*ARGSUSED */
-iopaddr_t
-pcibr_dmatrans_addr(devfs_handle_t pconn_vhdl,
- device_desc_t dev_desc,
- paddr_t paddr,
- size_t req_size,
- unsigned flags)
-{
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
- pciio_slot_t pciio_slot = pciio_info_slot_get(pciio_info);
- pcibr_soft_slot_t slotp = &pcibr_soft->bs_slot[pciio_slot];
-
- xwidgetnum_t xio_port;
- iopaddr_t xio_addr;
- iopaddr_t pci_addr;
-
- int have_rrbs;
- int min_rrbs;
-
- /* merge in forced flags */
- flags |= pcibr_soft->bs_dma_flags;
-
- xio_addr = xtalk_dmatrans_addr(xconn_vhdl, 0, paddr, req_size,
- flags & DMAMAP_FLAGS);
-
- if (!xio_addr) {
-#if PCIBR_DMA_DEBUG
- printk("pcibr_dmatrans_addr:\n"
- "\tpciio connection point %v\n"
- "\txtalk connection point %v\n"
- "\twanted paddr [0x%x..0x%x]\n"
- "\txtalk_dmatrans_addr returned 0x%x\n",
- pconn_vhdl, xconn_vhdl,
- paddr, paddr + req_size - 1,
- xio_addr);
-#endif
- return 0;
- }
- /*
- * find which XIO port this goes to.
- */
- if (XIO_PACKED(xio_addr)) {
- if (xio_addr == XIO_NOWHERE) {
-#if PCIBR_DMA_DEBUG
- printk("pcibr_dmatrans_addr:\n"
- "\tpciio connection point %v\n"
- "\txtalk connection point %v\n"
- "\twanted paddr [0x%x..0x%x]\n"
- "\txtalk_dmatrans_addr returned 0x%x\n",
- pconn_vhdl, xconn_vhdl,
- paddr, paddr + req_size - 1,
- xio_addr);
-#endif
- return 0;
- }
- xio_port = XIO_PORT(xio_addr);
- xio_addr = XIO_ADDR(xio_addr);
-
- } else
- xio_port = pcibr_soft->bs_mxid;
-
- /*
- * If this DMA comes back to us,
- * return the PCI MEM address on
- * which it would land, or NULL
- * if the target is something
- * on bridge other than PCI MEM.
- */
- if (xio_port == pcibr_soft->bs_xid) {
- pci_addr = pcibr_addr_xio_to_pci(pcibr_soft, xio_addr, req_size);
- return pci_addr;
- }
- /* If the caller can use A64, try to
- * satisfy the request with the 64-bit
- * direct map. This can fail if the
- * configuration bits in Device(x)
- * conflict with our flags.
- */
-
- if (flags & PCIIO_DMA_A64) {
- pci_addr = slotp->bss_d64_base;
- if (!(flags & PCIBR_VCHAN1))
- flags |= PCIBR_VCHAN0;
- if ((pci_addr != PCIBR_D64_BASE_UNSET) &&
- (flags == slotp->bss_d64_flags)) {
-
- pci_addr |= xio_addr
- | ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT);
-
-#if DEBUG && PCIBR_DMA_DEBUG
-#if HWG_PERF_CHECK
- if (xio_addr != 0x20000000)
-#endif
- printk("pcibr_dmatrans_addr: [reuse]\n"
- "\tpciio connection point %v\n"
- "\txtalk connection point %v\n"
- "\twanted paddr [0x%x..0x%x]\n"
- "\txtalk_dmatrans_addr returned 0x%x\n"
- "\tdirect 64bit address is 0x%x\n",
- pconn_vhdl, xconn_vhdl,
- paddr, paddr + req_size - 1,
- xio_addr, pci_addr);
-#endif
- return (pci_addr);
- }
- if (!pcibr_try_set_device(pcibr_soft, pciio_slot, flags, BRIDGE_DEV_D64_BITS)) {
- pci_addr = pcibr_flags_to_d64(flags, pcibr_soft);
- slotp->bss_d64_flags = flags;
- slotp->bss_d64_base = pci_addr;
- pci_addr |= xio_addr
- | ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT);
-
- /* Make sure we have an RRB (or two).
- */
- if (!(pcibr_soft->bs_rrb_fixed & (1 << pciio_slot))) {
- if (flags & PCIBR_VCHAN1)
- pciio_slot += PCIBR_RRB_SLOT_VIRTUAL;
- have_rrbs = pcibr_soft->bs_rrb_valid[pciio_slot];
- if (have_rrbs < 2) {
- if (pci_addr & PCI64_ATTR_PREF)
- min_rrbs = 2;
- else
- min_rrbs = 1;
- if (have_rrbs < min_rrbs)
- do_pcibr_rrb_autoalloc(pcibr_soft, pciio_slot, min_rrbs - have_rrbs);
- }
- }
-#if PCIBR_DMA_DEBUG
-#if HWG_PERF_CHECK
- if (xio_addr != 0x20000000)
-#endif
- printk("pcibr_dmatrans_addr:\n"
- "\tpciio connection point %v\n"
- "\txtalk connection point %v\n"
- "\twanted paddr [0x%x..0x%x]\n"
- "\txtalk_dmatrans_addr returned 0x%x\n"
- "\tdirect 64bit address is 0x%x\n"
- "\tnew flags: 0x%x\n",
- pconn_vhdl, xconn_vhdl,
- paddr, paddr + req_size - 1,
- xio_addr, pci_addr, (uint64_t) flags);
-#endif
- return (pci_addr);
- }
- /* our flags conflict with Device(x).
- */
- flags = flags
- & ~PCIIO_DMA_A64
- & ~PCIBR_VCHAN0
- ;
-
-#if PCIBR_DMA_DEBUG
- printk("pcibr_dmatrans_addr:\n"
- "\tpciio connection point %v\n"
- "\txtalk connection point %v\n"
- "\twanted paddr [0x%x..0x%x]\n"
- "\txtalk_dmatrans_addr returned 0x%x\n"
- "\tUnable to set Device(x) bits for Direct-64\n",
- pconn_vhdl, xconn_vhdl,
- paddr, paddr + req_size - 1,
- xio_addr);
-#endif
- }
- /* Try to satisfy the request with the 32-bit direct
- * map. This can fail if the configuration bits in
- * Device(x) conflict with our flags, or if the
- * target address is outside where DIR_OFF points.
- */
- {
- size_t map_size = 1ULL << 31;
- iopaddr_t xio_base = pcibr_soft->bs_dir_xbase;
- iopaddr_t offset = xio_addr - xio_base;
- iopaddr_t endoff = req_size + offset;
-
- if ((req_size > map_size) ||
- (xio_addr < xio_base) ||
- (xio_port != pcibr_soft->bs_dir_xport) ||
- (endoff > map_size)) {
-#if PCIBR_DMA_DEBUG
- printk("pcibr_dmatrans_addr:\n"
- "\tpciio connection point %v\n"
- "\txtalk connection point %v\n"
- "\twanted paddr [0x%x..0x%x]\n"
- "\txtalk_dmatrans_addr returned 0x%x\n"
- "\txio region outside direct32 target\n",
- pconn_vhdl, xconn_vhdl,
- paddr, paddr + req_size - 1,
- xio_addr);
-#endif
- } else {
- pci_addr = slotp->bss_d32_base;
- if ((pci_addr != PCIBR_D32_BASE_UNSET) &&
- (flags == slotp->bss_d32_flags)) {
-
- pci_addr |= offset;
-
-#if DEBUG && PCIBR_DMA_DEBUG
- printk("pcibr_dmatrans_addr: [reuse]\n"
- "\tpciio connection point %v\n"
- "\txtalk connection point %v\n"
- "\twanted paddr [0x%x..0x%x]\n"
- "\txtalk_dmatrans_addr returned 0x%x\n"
- "\tmapped via direct32 offset 0x%x\n"
- "\twill DMA via pci addr 0x%x\n",
- pconn_vhdl, xconn_vhdl,
- paddr, paddr + req_size - 1,
- xio_addr, offset, pci_addr);
-#endif
- return (pci_addr);
- }
- if (!pcibr_try_set_device(pcibr_soft, pciio_slot, flags, BRIDGE_DEV_D32_BITS)) {
-
- pci_addr = PCI32_DIRECT_BASE;
- slotp->bss_d32_flags = flags;
- slotp->bss_d32_base = pci_addr;
- pci_addr |= offset;
-
- /* Make sure we have an RRB (or two).
- */
- if (!(pcibr_soft->bs_rrb_fixed & (1 << pciio_slot))) {
- have_rrbs = pcibr_soft->bs_rrb_valid[pciio_slot];
- if (have_rrbs < 2) {
- if (slotp->bss_device & BRIDGE_DEV_PREF)
- min_rrbs = 2;
- else
- min_rrbs = 1;
- if (have_rrbs < min_rrbs)
- do_pcibr_rrb_autoalloc(pcibr_soft, pciio_slot, min_rrbs - have_rrbs);
- }
- }
-#if PCIBR_DMA_DEBUG
-#if HWG_PERF_CHECK
- if (xio_addr != 0x20000000)
-#endif
- printk("pcibr_dmatrans_addr:\n"
- "\tpciio connection point %v\n"
- "\txtalk connection point %v\n"
- "\twanted paddr [0x%x..0x%x]\n"
- "\txtalk_dmatrans_addr returned 0x%x\n"
- "\tmapped via direct32 offset 0x%x\n"
- "\twill DMA via pci addr 0x%x\n"
- "\tnew flags: 0x%x\n",
- pconn_vhdl, xconn_vhdl,
- paddr, paddr + req_size - 1,
- xio_addr, offset, pci_addr, (uint64_t) flags);
-#endif
- return (pci_addr);
- }
- /* our flags conflict with Device(x).
- */
-#if PCIBR_DMA_DEBUG
- printk("pcibr_dmatrans_addr:\n"
- "\tpciio connection point %v\n"
- "\txtalk connection point %v\n"
- "\twanted paddr [0x%x..0x%x]\n"
- "\txtalk_dmatrans_addr returned 0x%x\n"
- "\tUnable to set Device(x) bits for Direct-32\n",
- pconn_vhdl, xconn_vhdl,
- paddr, paddr + req_size - 1,
- xio_addr);
-#endif
- }
- }
-
-#if PCIBR_DMA_DEBUG
- printk("pcibr_dmatrans_addr:\n"
- "\tpciio connection point %v\n"
- "\txtalk connection point %v\n"
- "\twanted paddr [0x%x..0x%x]\n"
- "\txtalk_dmatrans_addr returned 0x%x\n"
- "\tno acceptable PCI address found or constructable\n",
- pconn_vhdl, xconn_vhdl,
- paddr, paddr + req_size - 1,
- xio_addr);
-#endif
-
- return 0;
-}
-
-/*ARGSUSED */
-alenlist_t
-pcibr_dmatrans_list(devfs_handle_t pconn_vhdl,
- device_desc_t dev_desc,
- alenlist_t palenlist,
- unsigned flags)
-{
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
- pciio_slot_t pciio_slot = pciio_info_slot_get(pciio_info);
- pcibr_soft_slot_t slotp = &pcibr_soft->bs_slot[pciio_slot];
- xwidgetnum_t xio_port;
-
- alenlist_t pciio_alenlist = 0;
- alenlist_t xtalk_alenlist = 0;
-
- int inplace;
- unsigned direct64;
- unsigned al_flags;
-
- iopaddr_t xio_base;
- alenaddr_t xio_addr;
- size_t xio_size;
-
- size_t map_size;
- iopaddr_t pci_base;
- alenaddr_t pci_addr;
-
- unsigned relbits = 0;
-
- /* merge in forced flags */
- flags |= pcibr_soft->bs_dma_flags;
-
- inplace = flags & PCIIO_INPLACE;
- direct64 = flags & PCIIO_DMA_A64;
- al_flags = (flags & PCIIO_NOSLEEP) ? AL_NOSLEEP : 0;
-
- if (direct64) {
- map_size = 1ull << 48;
- xio_base = 0;
- pci_base = slotp->bss_d64_base;
- if ((pci_base != PCIBR_D64_BASE_UNSET) &&
- (flags == slotp->bss_d64_flags)) {
- /* reuse previous base info */
- } else if (pcibr_try_set_device(pcibr_soft, pciio_slot, flags, BRIDGE_DEV_D64_BITS) < 0) {
- /* DMA configuration conflict */
- goto fail;
- } else {
- relbits = BRIDGE_DEV_D64_BITS;
- pci_base =
- pcibr_flags_to_d64(flags, pcibr_soft);
- }
- } else {
- xio_base = pcibr_soft->bs_dir_xbase;
- map_size = 1ull << 31;
- pci_base = slotp->bss_d32_base;
- if ((pci_base != PCIBR_D32_BASE_UNSET) &&
- (flags == slotp->bss_d32_flags)) {
- /* reuse previous base info */
- } else if (pcibr_try_set_device(pcibr_soft, pciio_slot, flags, BRIDGE_DEV_D32_BITS) < 0) {
- /* DMA configuration conflict */
- goto fail;
- } else {
- relbits = BRIDGE_DEV_D32_BITS;
- pci_base = PCI32_DIRECT_BASE;
- }
- }
-
- xtalk_alenlist = xtalk_dmatrans_list(xconn_vhdl, 0, palenlist,
- flags & DMAMAP_FLAGS);
- if (!xtalk_alenlist)
- goto fail;
-
- alenlist_cursor_init(xtalk_alenlist, 0, NULL);
-
- if (inplace) {
- pciio_alenlist = xtalk_alenlist;
- } else {
- pciio_alenlist = alenlist_create(al_flags);
- if (!pciio_alenlist)
- goto fail;
- }
-
- while (ALENLIST_SUCCESS ==
- alenlist_get(xtalk_alenlist, NULL, 0,
- &xio_addr, &xio_size, al_flags)) {
-
- /*
- * find which XIO port this goes to.
- */
- if (XIO_PACKED(xio_addr)) {
- if (xio_addr == XIO_NOWHERE) {
-#if PCIBR_DMA_DEBUG
- printk("pcibr_dmatrans_addr:\n"
- "\tpciio connection point %v\n"
- "\txtalk connection point %v\n"
- "\twanted paddr [0x%x..0x%x]\n"
- "\txtalk_dmatrans_addr returned 0x%x\n",
- pconn_vhdl, xconn_vhdl,
- paddr, paddr + req_size - 1,
- xio_addr);
-#endif
- return 0;
- }
- xio_port = XIO_PORT(xio_addr);
- xio_addr = XIO_ADDR(xio_addr);
- } else
- xio_port = pcibr_soft->bs_mxid;
-
- /*
- * If this DMA comes back to us,
- * return the PCI MEM address on
- * which it would land, or NULL
- * if the target is something
- * on bridge other than PCI MEM.
- */
- if (xio_port == pcibr_soft->bs_xid) {
- pci_addr = pcibr_addr_xio_to_pci(pcibr_soft, xio_addr, xio_size);
- if ( (pci_addr == (alenaddr_t)NULL) )
- goto fail;
- } else if (direct64) {
- ASSERT(xio_port != 0);
- pci_addr = pci_base | xio_addr
- | ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT);
- } else {
- iopaddr_t offset = xio_addr - xio_base;
- iopaddr_t endoff = xio_size + offset;
-
- if ((xio_size > map_size) ||
- (xio_addr < xio_base) ||
- (xio_port != pcibr_soft->bs_dir_xport) ||
- (endoff > map_size))
- goto fail;
-
- pci_addr = pci_base + (xio_addr - xio_base);
- }
-
- /* write the PCI DMA address
- * out to the scatter-gather list.
- */
- if (inplace) {
- if (ALENLIST_SUCCESS !=
- alenlist_replace(pciio_alenlist, NULL,
- &pci_addr, &xio_size, al_flags))
- goto fail;
- } else {
- if (ALENLIST_SUCCESS !=
- alenlist_append(pciio_alenlist,
- pci_addr, xio_size, al_flags))
- goto fail;
- }
- }
-
- if (relbits) {
- if (direct64) {
- slotp->bss_d64_flags = flags;
- slotp->bss_d64_base = pci_base;
- } else {
- slotp->bss_d32_flags = flags;
- slotp->bss_d32_base = pci_base;
- }
- }
- if (!inplace)
- alenlist_done(xtalk_alenlist);
-
- /* Reset the internal cursor of the alenlist to be returned back
- * to the caller.
- */
- alenlist_cursor_init(pciio_alenlist, 0, NULL);
- return pciio_alenlist;
-
- fail:
- if (relbits)
- pcibr_release_device(pcibr_soft, pciio_slot, relbits);
- if (pciio_alenlist && !inplace)
- alenlist_destroy(pciio_alenlist);
- return 0;
-}
-
-void
-pcibr_dmamap_drain(pcibr_dmamap_t map)
-{
- xtalk_dmamap_drain(map->bd_xtalk);
-}
-
-void
-pcibr_dmaaddr_drain(devfs_handle_t pconn_vhdl,
- paddr_t paddr,
- size_t bytes)
-{
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
-
- xtalk_dmaaddr_drain(xconn_vhdl, paddr, bytes);
-}
-
-void
-pcibr_dmalist_drain(devfs_handle_t pconn_vhdl,
- alenlist_t list)
-{
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
-
- xtalk_dmalist_drain(xconn_vhdl, list);
-}
-
-/*
- * Get the starting PCIbus address out of the given DMA map.
- * This function is supposed to be used by a close friend of PCI bridge
- * since it relies on the fact that the starting address of the map is fixed at
- * the allocation time in the current implementation of PCI bridge.
- */
-iopaddr_t
-pcibr_dmamap_pciaddr_get(pcibr_dmamap_t pcibr_dmamap)
-{
- return (pcibr_dmamap->bd_pci_addr);
-}
-
-/*
- * There are end cases where a deadlock can occur if interrupt
- * processing completes and the Bridge b_int_status bit is still set.
- *
- * One scenerio is if a second PCI interrupt occurs within 60ns of
- * the previous interrupt being cleared. In this case the Bridge
- * does not detect the transition, the Bridge b_int_status bit
- * remains set, and because no transition was detected no interrupt
- * packet is sent to the Hub/Heart.
- *
- * A second scenerio is possible when a b_int_status bit is being
- * shared by multiple devices:
- * Device #1 generates interrupt
- * Bridge b_int_status bit set
- * Device #2 generates interrupt
- * interrupt processing begins
- * ISR for device #1 runs and
- * clears interrupt
- * Device #1 generates interrupt
- * ISR for device #2 runs and
- * clears interrupt
- * (b_int_status bit still set)
- * interrupt processing completes
- *
- * Interrupt processing is now complete, but an interrupt is still
- * outstanding for Device #1. But because there was no transition of
- * the b_int_status bit, no interrupt packet will be generated and
- * a deadlock will occur.
- *
- * To avoid these deadlock situations, this function is used
- * to check if a specific Bridge b_int_status bit is set, and if so,
- * cause the setting of the corresponding interrupt bit.
- *
- * On a XBridge (IP35), we do this by writing the appropriate Bridge Force
- * Interrupt register.
- */
-void
-pcibr_force_interrupt(pcibr_intr_wrap_t wrap)
-{
- unsigned bit;
- pcibr_soft_t pcibr_soft = wrap->iw_soft;
- bridge_t *bridge = pcibr_soft->bs_base;
- cpuid_t cpuvertex_to_cpuid(devfs_handle_t vhdl);
-
- bit = wrap->iw_intr;
-
- if (pcibr_soft->bs_xbridge) {
- bridge->b_force_pin[bit].intr = 1;
- } else if ((1 << bit) & *wrap->iw_stat) {
- cpuid_t cpu;
- unsigned intr_bit;
- xtalk_intr_t xtalk_intr =
- pcibr_soft->bs_intr[bit].bsi_xtalk_intr;
-
- intr_bit = (short) xtalk_intr_vector_get(xtalk_intr);
- cpu = cpuvertex_to_cpuid(xtalk_intr_cpu_get(xtalk_intr));
- REMOTE_CPU_SEND_INTR(cpu, intr_bit);
- }
-}
-
-/* =====================================================================
- * INTERRUPT MANAGEMENT
- */
-
-static unsigned
-pcibr_intr_bits(pciio_info_t info,
- pciio_intr_line_t lines)
-{
- pciio_slot_t slot = pciio_info_slot_get(info);
- unsigned bbits = 0;
-
- /*
- * Currently favored mapping from PCI
- * slot number and INTA/B/C/D to Bridge
- * PCI Interrupt Bit Number:
- *
- * SLOT A B C D
- * 0 0 4 0 4
- * 1 1 5 1 5
- * 2 2 6 2 6
- * 3 3 7 3 7
- * 4 4 0 4 0
- * 5 5 1 5 1
- * 6 6 2 6 2
- * 7 7 3 7 3
- */
-
- if (slot < 8) {
- if (lines & (PCIIO_INTR_LINE_A| PCIIO_INTR_LINE_C))
- bbits |= 1 << slot;
- if (lines & (PCIIO_INTR_LINE_B| PCIIO_INTR_LINE_D))
- bbits |= 1 << (slot ^ 4);
- }
- return bbits;
-}
-
-
-/*ARGSUSED */
-pcibr_intr_t
-pcibr_intr_alloc(devfs_handle_t pconn_vhdl,
- device_desc_t dev_desc,
- pciio_intr_line_t lines,
- devfs_handle_t owner_dev)
-{
- pcibr_info_t pcibr_info = pcibr_info_get(pconn_vhdl);
- pciio_slot_t pciio_slot = pcibr_info->f_slot;
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pcibr_info->f_mfast;
- devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
- bridge_t *bridge = pcibr_soft->bs_base;
- int is_threaded = 0;
- int thread_swlevel;
-
- xtalk_intr_t *xtalk_intr_p;
- pcibr_intr_t *pcibr_intr_p;
- pcibr_intr_list_t *intr_list_p;
-
- unsigned pcibr_int_bits;
- unsigned pcibr_int_bit;
- xtalk_intr_t xtalk_intr = (xtalk_intr_t)0;
- hub_intr_t hub_intr;
- pcibr_intr_t pcibr_intr;
- pcibr_intr_list_t intr_entry;
- pcibr_intr_list_t intr_list;
- bridgereg_t int_dev;
-
-#if DEBUG && INTR_DEBUG
- printk("%v: pcibr_intr_alloc\n"
- "%v:%s%s%s%s%s\n",
- owner_dev, pconn_vhdl,
- !(lines & 15) ? " No INTs?" : "",
- lines & 1 ? " INTA" : "",
- lines & 2 ? " INTB" : "",
- lines & 4 ? " INTC" : "",
- lines & 8 ? " INTD" : "");
-#endif
-
- NEW(pcibr_intr);
- if (!pcibr_intr)
- return NULL;
-
- if (dev_desc) {
- cpuid_t intr_target_from_desc(device_desc_t, int);
- } else {
- extern int default_intr_pri;
-
- is_threaded = 1; /* PCI interrupts are threaded, by default */
- thread_swlevel = default_intr_pri;
- }
-
- pcibr_intr->bi_dev = pconn_vhdl;
- pcibr_intr->bi_lines = lines;
- pcibr_intr->bi_soft = pcibr_soft;
- pcibr_intr->bi_ibits = 0; /* bits will be added below */
- pcibr_intr->bi_flags = is_threaded ? 0 : PCIIO_INTR_NOTHREAD;
- pcibr_intr->bi_mustruncpu = CPU_NONE;
- mutex_spinlock_init(&pcibr_intr->bi_ibuf.ib_lock);
-
- pcibr_int_bits = pcibr_soft->bs_intr_bits((pciio_info_t)pcibr_info, lines);
-
-
- /*
- * For each PCI interrupt line requested, figure
- * out which Bridge PCI Interrupt Line it maps
- * to, and make sure there are xtalk resources
- * allocated for it.
- */
-#if DEBUG && INTR_DEBUG
- printk("pcibr_int_bits: 0x%X\n", pcibr_int_bits);
-#endif
- for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit ++) {
- if (pcibr_int_bits & (1 << pcibr_int_bit)) {
- xtalk_intr_p = &pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr;
-
- xtalk_intr = *xtalk_intr_p;
-
- if (xtalk_intr == NULL) {
- /*
- * This xtalk_intr_alloc is constrained for two reasons:
- * 1) Normal interrupts and error interrupts need to be delivered
- * through a single xtalk target widget so that there aren't any
- * ordering problems with DMA, completion interrupts, and error
- * interrupts. (Use of xconn_vhdl forces this.)
- *
- * 2) On IP35, addressing constraints on IP35 and Bridge force
- * us to use a single PI number for all interrupts from a
- * single Bridge. (IP35-specific code forces this, and we
- * verify in pcibr_setwidint.)
- */
-
- /*
- * All code dealing with threaded PCI interrupt handlers
- * is located at the pcibr level. Because of this,
- * we always want the lower layers (hub/heart_intr_alloc,
- * intr_level_connect) to treat us as non-threaded so we
- * don't set up a duplicate threaded environment. We make
- * this happen by calling a special xtalk interface.
- */
- xtalk_intr = xtalk_intr_alloc_nothd(xconn_vhdl, dev_desc,
- owner_dev);
-#if DEBUG && INTR_DEBUG
- printk("%v: xtalk_intr=0x%X\n", xconn_vhdl, xtalk_intr);
-#endif
-
- /* both an assert and a runtime check on this:
- * we need to check in non-DEBUG kernels, and
- * the ASSERT gets us more information when
- * we use DEBUG kernels.
- */
- ASSERT(xtalk_intr != NULL);
- if (xtalk_intr == NULL) {
- /* it is quite possible that our
- * xtalk_intr_alloc failed because
- * someone else got there first,
- * and we can find their results
- * in xtalk_intr_p.
- */
- if (!*xtalk_intr_p) {
-#ifdef SUPPORT_PRINTING_V_FORMAT
- printk(KERN_ALERT
- "pcibr_intr_alloc %v: unable to get xtalk interrupt resources",
- xconn_vhdl);
-#else
- printk(KERN_ALERT
- "pcibr_intr_alloc 0x%p: unable to get xtalk interrupt resources",
- (void *)xconn_vhdl);
-#endif
- /* yes, we leak resources here. */
- return 0;
- }
- } else if (compare_and_swap_ptr((void **) xtalk_intr_p, NULL, xtalk_intr)) {
- /*
- * now tell the bridge which slot is
- * using this interrupt line.
- */
- int_dev = bridge->b_int_device;
- int_dev &= ~BRIDGE_INT_DEV_MASK(pcibr_int_bit);
- int_dev |= pciio_slot << BRIDGE_INT_DEV_SHFT(pcibr_int_bit);
- bridge->b_int_device = int_dev; /* XXXMP */
-
-#if DEBUG && INTR_DEBUG
- printk("%v: bridge intr bit %d clears my wrb\n",
- pconn_vhdl, pcibr_int_bit);
-#endif
- } else {
- /* someone else got one allocated first;
- * free the one we just created, and
- * retrieve the one they allocated.
- */
- xtalk_intr_free(xtalk_intr);
- xtalk_intr = *xtalk_intr_p;
-#if PARANOID
- /* once xtalk_intr is set, we never clear it,
- * so if the CAS fails above, this condition
- * can "never happen" ...
- */
- if (!xtalk_intr) {
- printk(KERN_ALERT
- "pcibr_intr_alloc %v: unable to set xtalk interrupt resources",
- xconn_vhdl);
- /* yes, we leak resources here. */
- return 0;
- }
-#endif
- }
- }
-
- pcibr_intr->bi_ibits |= 1 << pcibr_int_bit;
-
- NEW(intr_entry);
- intr_entry->il_next = NULL;
- intr_entry->il_intr = pcibr_intr;
- intr_entry->il_wrbf = &(bridge->b_wr_req_buf[pciio_slot].reg);
- intr_list_p =
- &pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_list;
-#if DEBUG && INTR_DEBUG
-#if defined(SUPPORT_PRINTING_V_FORMAT)
- printk("0x%x: Bridge bit %d wrap=0x%x\n",
- pconn_vhdl, pcibr_int_bit,
- pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap);
-#else
- printk("%v: Bridge bit %d wrap=0x%x\n",
- pconn_vhdl, pcibr_int_bit,
- pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap);
-#endif
-#endif
-
- if (compare_and_swap_ptr((void **) intr_list_p, NULL, intr_entry)) {
- /* we are the first interrupt on this bridge bit.
- */
-#if DEBUG && INTR_DEBUG
- printk("%v INT 0x%x (bridge bit %d) allocated [FIRST]\n",
- pconn_vhdl, pcibr_int_bits, pcibr_int_bit);
-#endif
- continue;
- }
- intr_list = *intr_list_p;
- pcibr_intr_p = &intr_list->il_intr;
- if (compare_and_swap_ptr((void **) pcibr_intr_p, NULL, pcibr_intr)) {
- /* first entry on list was erased,
- * and we replaced it, so we
- * don't need our intr_entry.
- */
- DEL(intr_entry);
-#if DEBUG && INTR_DEBUG
- printk("%v INT 0x%x (bridge bit %d) replaces erased first\n",
- pconn_vhdl, pcibr_int_bits, pcibr_int_bit);
-#endif
- continue;
- }
- intr_list_p = &intr_list->il_next;
- if (compare_and_swap_ptr((void **) intr_list_p, NULL, intr_entry)) {
- /* we are the new second interrupt on this bit.
- */
- pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared = 1;
-#if DEBUG && INTR_DEBUG
- printk("%v INT 0x%x (bridge bit %d) is new SECOND\n",
- pconn_vhdl, pcibr_int_bits, pcibr_int_bit);
-#endif
- continue;
- }
- while (1) {
- pcibr_intr_p = &intr_list->il_intr;
- if (compare_and_swap_ptr((void **) pcibr_intr_p, NULL, pcibr_intr)) {
- /* an entry on list was erased,
- * and we replaced it, so we
- * don't need our intr_entry.
- */
- DEL(intr_entry);
-#if DEBUG && INTR_DEBUG
- printk("%v INT 0x%x (bridge bit %d) replaces erased Nth\n",
- pconn_vhdl, pcibr_int_bits, pcibr_int_bit);
-#endif
- break;
- }
- intr_list_p = &intr_list->il_next;
- if (compare_and_swap_ptr((void **) intr_list_p, NULL, intr_entry)) {
- /* entry appended to share list
- */
-#if DEBUG && INTR_DEBUG
- printk("%v INT 0x%x (bridge bit %d) is new Nth\n",
- pconn_vhdl, pcibr_int_bits, pcibr_int_bit);
-#endif
- break;
- }
- /* step to next record in chain
- */
- intr_list = *intr_list_p;
- }
- }
- }
-
-#if DEBUG && INTR_DEBUG
- printk("%v pcibr_intr_alloc complete\n", pconn_vhdl);
-#endif
- hub_intr = (hub_intr_t)xtalk_intr;
- pcibr_intr->bi_irq = hub_intr->i_bit;
- pcibr_intr->bi_cpu = hub_intr->i_cpuid;
- return pcibr_intr;
-}
-
-/*ARGSUSED */
-void
-pcibr_intr_free(pcibr_intr_t pcibr_intr)
-{
- unsigned pcibr_int_bits = pcibr_intr->bi_ibits;
- pcibr_soft_t pcibr_soft = pcibr_intr->bi_soft;
- unsigned pcibr_int_bit;
- pcibr_intr_list_t intr_list;
- int intr_shared;
- xtalk_intr_t *xtalk_intrp;
-
- for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++) {
- if (pcibr_int_bits & (1 << pcibr_int_bit)) {
- for (intr_list =
- pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_list;
- intr_list != NULL;
- intr_list = intr_list->il_next)
- if (compare_and_swap_ptr((void **) &intr_list->il_intr,
- pcibr_intr,
- NULL)) {
-#if DEBUG && INTR_DEBUG
- printk("%s: cleared a handler from bit %d\n",
- pcibr_soft->bs_name, pcibr_int_bit);
-#endif
- }
- /* If this interrupt line is not being shared between multiple
- * devices release the xtalk interrupt resources.
- */
- intr_shared =
- pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared;
- xtalk_intrp = &pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr;
-
- if ((!intr_shared) && (*xtalk_intrp)) {
-
- bridge_t *bridge = pcibr_soft->bs_base;
- bridgereg_t int_dev;
-
- xtalk_intr_free(*xtalk_intrp);
- *xtalk_intrp = 0;
-
- /* Clear the PCI device interrupt to bridge interrupt pin
- * mapping.
- */
- int_dev = bridge->b_int_device;
- int_dev &= ~BRIDGE_INT_DEV_MASK(pcibr_int_bit);
- bridge->b_int_device = int_dev;
-
- }
- }
- }
- DEL(pcibr_intr);
-}
-
-LOCAL void
-pcibr_setpciint(xtalk_intr_t xtalk_intr)
-{
- iopaddr_t addr = xtalk_intr_addr_get(xtalk_intr);
- xtalk_intr_vector_t vect = xtalk_intr_vector_get(xtalk_intr);
- bridgereg_t *int_addr = (bridgereg_t *)
- xtalk_intr_sfarg_get(xtalk_intr);
-
- *int_addr = ((BRIDGE_INT_ADDR_HOST & (addr >> 30)) |
- (BRIDGE_INT_ADDR_FLD & vect));
-}
-
-/*ARGSUSED */
-int
-pcibr_intr_connect(pcibr_intr_t pcibr_intr)
-{
- pcibr_soft_t pcibr_soft = pcibr_intr->bi_soft;
- bridge_t *bridge = pcibr_soft->bs_base;
- unsigned pcibr_int_bits = pcibr_intr->bi_ibits;
- unsigned pcibr_int_bit;
- bridgereg_t b_int_enable;
- unsigned long s;
-
- if (pcibr_intr == NULL)
- return -1;
-
-#if DEBUG && INTR_DEBUG
- printk("%v: pcibr_intr_connect\n",
- pcibr_intr->bi_dev);
-#endif
-
- *((volatile unsigned *)&pcibr_intr->bi_flags) |= PCIIO_INTR_CONNECTED;
-
- /*
- * For each PCI interrupt line requested, figure
- * out which Bridge PCI Interrupt Line it maps
- * to, and make sure there are xtalk resources
- * allocated for it.
- */
- for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++)
- if (pcibr_int_bits & (1 << pcibr_int_bit)) {
- xtalk_intr_t xtalk_intr;
-
- xtalk_intr = pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr;
-
- /*
- * If this interrupt line is being shared and the connect has
- * already been done, no need to do it again.
- */
- if (pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_connected)
- continue;
-
-
- /*
- * Use the pcibr wrapper function to handle all Bridge interrupts
- * regardless of whether the interrupt line is shared or not.
- */
- xtalk_intr_connect(xtalk_intr, (xtalk_intr_setfunc_t) pcibr_setpciint,
- (void *)&(bridge->b_int_addr[pcibr_int_bit].addr));
- pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_connected = 1;
-
-#if DEBUG && INTR_DEBUG
- printk("%v bridge bit %d wrapper connected\n",
- pcibr_intr->bi_dev, pcibr_int_bit);
-#endif
- }
- s = pcibr_lock(pcibr_soft);
- b_int_enable = bridge->b_int_enable;
- b_int_enable |= pcibr_int_bits;
- bridge->b_int_enable = b_int_enable;
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
- pcibr_unlock(pcibr_soft, s);
-
- return 0;
-}
-
-/*ARGSUSED */
-void
-pcibr_intr_disconnect(pcibr_intr_t pcibr_intr)
-{
- pcibr_soft_t pcibr_soft = pcibr_intr->bi_soft;
- bridge_t *bridge = pcibr_soft->bs_base;
- unsigned pcibr_int_bits = pcibr_intr->bi_ibits;
- unsigned pcibr_int_bit;
- bridgereg_t b_int_enable;
- unsigned long s;
-
- /* Stop calling the function. Now.
- */
- *((volatile unsigned *)&pcibr_intr->bi_flags) &= ~PCIIO_INTR_CONNECTED;
- /*
- * For each PCI interrupt line requested, figure
- * out which Bridge PCI Interrupt Line it maps
- * to, and disconnect the interrupt.
- */
-
- /* don't disable interrupts for lines that
- * are shared between devices.
- */
- for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++)
- if ((pcibr_int_bits & (1 << pcibr_int_bit)) &&
- (pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared))
- pcibr_int_bits &= ~(1 << pcibr_int_bit);
- if (!pcibr_int_bits)
- return;
-
- s = pcibr_lock(pcibr_soft);
- b_int_enable = bridge->b_int_enable;
- b_int_enable &= ~pcibr_int_bits;
- bridge->b_int_enable = b_int_enable;
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
- pcibr_unlock(pcibr_soft, s);
-
- for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++)
- if (pcibr_int_bits & (1 << pcibr_int_bit)) {
- /* if the interrupt line is now shared,
- * do not disconnect it.
- */
- if (pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared)
- continue;
-
- xtalk_intr_disconnect(pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr);
- pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_connected = 0;
-
-#if DEBUG && INTR_DEBUG
- printk("%s: xtalk disconnect done for Bridge bit %d\n",
- pcibr_soft->bs_name, pcibr_int_bit);
-#endif
-
- /* if we are sharing the interrupt line,
- * connect us up; this closes the hole
- * where the another pcibr_intr_alloc()
- * was in progress as we disconnected.
- */
- if (!pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared)
- continue;
-
- xtalk_intr_connect(pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr,
- (xtalk_intr_setfunc_t)pcibr_setpciint,
- (void *) &(bridge->b_int_addr[pcibr_int_bit].addr));
- }
-}
-
-/*ARGSUSED */
-devfs_handle_t
-pcibr_intr_cpu_get(pcibr_intr_t pcibr_intr)
-{
- pcibr_soft_t pcibr_soft = pcibr_intr->bi_soft;
- unsigned pcibr_int_bits = pcibr_intr->bi_ibits;
- unsigned pcibr_int_bit;
-
- for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++)
- if (pcibr_int_bits & (1 << pcibr_int_bit))
- return xtalk_intr_cpu_get(pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr);
- return 0;
-}
-
-/* =====================================================================
- * INTERRUPT HANDLING
- */
-LOCAL void
-pcibr_clearwidint(bridge_t *bridge)
-{
- bridge->b_wid_int_upper = 0;
- bridge->b_wid_int_lower = 0;
-}
-
-LOCAL void
-pcibr_setwidint(xtalk_intr_t intr)
-{
- xwidgetnum_t targ = xtalk_intr_target_get(intr);
- iopaddr_t addr = xtalk_intr_addr_get(intr);
- xtalk_intr_vector_t vect = xtalk_intr_vector_get(intr);
- widgetreg_t NEW_b_wid_int_upper, NEW_b_wid_int_lower;
- widgetreg_t OLD_b_wid_int_upper, OLD_b_wid_int_lower;
-
- bridge_t *bridge = (bridge_t *)xtalk_intr_sfarg_get(intr);
-
- NEW_b_wid_int_upper = ( (0x000F0000 & (targ << 16)) |
- XTALK_ADDR_TO_UPPER(addr));
- NEW_b_wid_int_lower = XTALK_ADDR_TO_LOWER(addr);
-
- OLD_b_wid_int_upper = bridge->b_wid_int_upper;
- OLD_b_wid_int_lower = bridge->b_wid_int_lower;
-
- /* Verify that all interrupts from this Bridge are using a single PI */
- if ((OLD_b_wid_int_upper != 0) && (OLD_b_wid_int_lower != 0)) {
- /*
- * Once set, these registers shouldn't change; they should
- * be set multiple times with the same values.
- *
- * If we're attempting to change these registers, it means
- * that our heuristics for allocating interrupts in a way
- * appropriate for IP35 have failed, and the admin needs to
- * explicitly direct some interrupts (or we need to make the
- * heuristics more clever).
- *
- * In practice, we hope this doesn't happen very often, if
- * at all.
- */
- if ((OLD_b_wid_int_upper != NEW_b_wid_int_upper) ||
- (OLD_b_wid_int_lower != NEW_b_wid_int_lower)) {
- printk(KERN_WARNING "Interrupt allocation is too complex.\n");
- printk(KERN_WARNING "Use explicit administrative interrupt targetting.\n");
- printk(KERN_WARNING "bridge=0x%lx targ=0x%x\n", (unsigned long)bridge, targ);
- printk(KERN_WARNING "NEW=0x%x/0x%x OLD=0x%x/0x%x\n",
- NEW_b_wid_int_upper, NEW_b_wid_int_lower,
- OLD_b_wid_int_upper, OLD_b_wid_int_lower);
- PRINT_PANIC("PCI Bridge interrupt targetting error\n");
- }
- }
-
- bridge->b_wid_int_upper = NEW_b_wid_int_upper;
- bridge->b_wid_int_lower = NEW_b_wid_int_lower;
- bridge->b_int_host_err = vect;
-}
-
-/*
- * pcibr_intr_preset: called during mlreset time
- * if the platform specific code needs to route
- * one of the Bridge's xtalk interrupts before the
- * xtalk infrastructure is available.
- */
-void
-pcibr_xintr_preset(void *which_widget,
- int which_widget_intr,
- xwidgetnum_t targ,
- iopaddr_t addr,
- xtalk_intr_vector_t vect)
-{
- bridge_t *bridge = (bridge_t *) which_widget;
-
- if (which_widget_intr == -1) {
- /* bridge widget error interrupt */
- bridge->b_wid_int_upper = ( (0x000F0000 & (targ << 16)) |
- XTALK_ADDR_TO_UPPER(addr));
- bridge->b_wid_int_lower = XTALK_ADDR_TO_LOWER(addr);
- bridge->b_int_host_err = vect;
-
- /* turn on all interrupts except
- * the PCI interrupt requests,
- * at least at heart.
- */
- bridge->b_int_enable |= ~BRIDGE_IMR_INT_MSK;
-
- } else {
- /* routing a PCI device interrupt.
- * targ and low 38 bits of addr must
- * be the same as the already set
- * value for the widget error interrupt.
- */
- bridge->b_int_addr[which_widget_intr].addr =
- ((BRIDGE_INT_ADDR_HOST & (addr >> 30)) |
- (BRIDGE_INT_ADDR_FLD & vect));
- /*
- * now bridge can let it through;
- * NB: still should be blocked at
- * xtalk provider end, until the service
- * function is set.
- */
- bridge->b_int_enable |= 1 << vect;
- }
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
-}
-
-
-/*
- * pcibr_intr_func()
- *
- * This is the pcibr interrupt "wrapper" function that is called,
- * in interrupt context, to initiate the interrupt handler(s) registered
- * (via pcibr_intr_alloc/connect) for the occurring interrupt. Non-threaded
- * handlers will be called directly, and threaded handlers will have their
- * thread woken up.
- */
-void
-pcibr_intr_func(intr_arg_t arg)
-{
- pcibr_intr_wrap_t wrap = (pcibr_intr_wrap_t) arg;
- reg_p wrbf;
- pcibr_intr_t intr;
- pcibr_intr_list_t list;
- int clearit;
- int do_nonthreaded = 1;
- int is_threaded = 0;
- int x = 0;
-
- /*
- * If any handler is still running from a previous interrupt
- * just return. If there's a need to call the handler(s) again,
- * another interrupt will be generated either by the device or by
- * pcibr_force_interrupt().
- */
-
- if (wrap->iw_hdlrcnt) {
- return;
- }
-
- /*
- * Call all interrupt handlers registered.
- * First, the pcibr_intrd threads for any threaded handlers will be
- * awoken, then any non-threaded handlers will be called sequentially.
- */
-
- clearit = 1;
- while (do_nonthreaded) {
- for (list = wrap->iw_list; list != NULL; list = list->il_next) {
- if ((intr = list->il_intr) &&
- (intr->bi_flags & PCIIO_INTR_CONNECTED)) {
-
- /*
- * This device may have initiated write
- * requests since the bridge last saw
- * an edge on this interrupt input; flushing
- * the buffer prior to invoking the handler
- * should help but may not be sufficient if we
- * get more requests after the flush, followed
- * by the card deciding it wants service, before
- * the interrupt handler checks to see if things need
- * to be done.
- *
- * There is a similar race condition if
- * an interrupt handler loops around and
- * notices further service is required.
- * Perhaps we need to have an explicit
- * call that interrupt handlers need to
- * do between noticing that DMA to memory
- * has completed, but before observing the
- * contents of memory?
- */
-
- if ((do_nonthreaded) && (!is_threaded)) {
- /* Non-threaded.
- * Call the interrupt handler at interrupt level
- */
-
- /* Only need to flush write buffers if sharing */
-
- if ((wrap->iw_shared) && (wrbf = list->il_wrbf)) {
- if ((x = *wrbf)) /* write request buffer flush */
-#ifdef SUPPORT_PRINTING_V_FORMAT
- printk(KERN_ALERT "pcibr_intr_func %v: \n"
- "write buffer flush failed, wrbf=0x%x\n",
- list->il_intr->bi_dev, wrbf);
-#else
- printk(KERN_ALERT "pcibr_intr_func %p: \n"
- "write buffer flush failed, wrbf=0x%lx\n",
- (void *)list->il_intr->bi_dev, (long) wrbf);
-#endif
- }
- }
-
- clearit = 0;
- }
- }
-
- do_nonthreaded = 0;
- /*
- * If the non-threaded handler was the last to complete,
- * (i.e., no threaded handlers still running) force an
- * interrupt to avoid a potential deadlock situation.
- */
- if (wrap->iw_hdlrcnt == 0) {
- pcibr_force_interrupt(wrap);
- }
- }
-
- /* If there were no handlers,
- * disable the interrupt and return.
- * It will get enabled again after
- * a handler is connected.
- * If we don't do this, we would
- * sit here and spin through the
- * list forever.
- */
- if (clearit) {
- pcibr_soft_t pcibr_soft = wrap->iw_soft;
- bridge_t *bridge = pcibr_soft->bs_base;
- bridgereg_t b_int_enable;
- bridgereg_t mask = 1 << wrap->iw_intr;
- unsigned long s;
-
- s = pcibr_lock(pcibr_soft);
- b_int_enable = bridge->b_int_enable;
- b_int_enable &= ~mask;
- bridge->b_int_enable = b_int_enable;
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
- pcibr_unlock(pcibr_soft, s);
- return;
- }
-}
-
-/* =====================================================================
- * CONFIGURATION MANAGEMENT
- */
-/*ARGSUSED */
-void
-pcibr_provider_startup(devfs_handle_t pcibr)
-{
-}
-
-/*ARGSUSED */
-void
-pcibr_provider_shutdown(devfs_handle_t pcibr)
-{
-}
-
-int
-pcibr_reset(devfs_handle_t conn)
-{
- pciio_info_t pciio_info = pciio_info_get(conn);
- pciio_slot_t pciio_slot = pciio_info_slot_get(pciio_info);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- bridge_t *bridge = pcibr_soft->bs_base;
- bridgereg_t ctlreg;
- unsigned cfgctl[8];
- unsigned long s;
- int f, nf;
- pcibr_info_h pcibr_infoh;
- pcibr_info_t pcibr_info;
- int win;
-
- if (pcibr_soft->bs_slot[pciio_slot].has_host) {
- pciio_slot = pcibr_soft->bs_slot[pciio_slot].host_slot;
- pcibr_info = pcibr_soft->bs_slot[pciio_slot].bss_infos[0];
- }
- if (pciio_slot < 4) {
- s = pcibr_lock(pcibr_soft);
- nf = pcibr_soft->bs_slot[pciio_slot].bss_ninfo;
- pcibr_infoh = pcibr_soft->bs_slot[pciio_slot].bss_infos;
- for (f = 0; f < nf; ++f)
- if (pcibr_infoh[f])
- cfgctl[f] = bridge->b_type0_cfg_dev[pciio_slot].f[f].l[PCI_CFG_COMMAND / 4];
-
- ctlreg = bridge->b_wid_control;
- bridge->b_wid_control = ctlreg | BRIDGE_CTRL_RST(pciio_slot);
- /* XXX delay? */
- bridge->b_wid_control = ctlreg;
- /* XXX delay? */
-
- for (f = 0; f < nf; ++f)
- if ((pcibr_info = pcibr_infoh[f]))
- for (win = 0; win < 6; ++win)
- if (pcibr_info->f_window[win].w_base != 0)
- bridge->b_type0_cfg_dev[pciio_slot].f[f].l[PCI_CFG_BASE_ADDR(win) / 4] =
- pcibr_info->f_window[win].w_base;
- for (f = 0; f < nf; ++f)
- if (pcibr_infoh[f])
- bridge->b_type0_cfg_dev[pciio_slot].f[f].l[PCI_CFG_COMMAND / 4] = cfgctl[f];
- pcibr_unlock(pcibr_soft, s);
-
- return 0;
- }
-#ifdef SUPPORT_PRINTING_V_FORMAT
- printk(KERN_WARNING "%v: pcibr_reset unimplemented for slot %d\n",
- conn, pciio_slot);
-#endif
- return -1;
-}
-
-pciio_endian_t
-pcibr_endian_set(devfs_handle_t pconn_vhdl,
- pciio_endian_t device_end,
- pciio_endian_t desired_end)
-{
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
- pciio_slot_t pciio_slot = pciio_info_slot_get(pciio_info);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- bridgereg_t devreg;
- unsigned long s;
-
- /*
- * Bridge supports hardware swapping; so we can always
- * arrange for the caller's desired endianness.
- */
-
- s = pcibr_lock(pcibr_soft);
- devreg = pcibr_soft->bs_slot[pciio_slot].bss_device;
- if (device_end != desired_end)
- devreg |= BRIDGE_DEV_SWAP_BITS;
- else
- devreg &= ~BRIDGE_DEV_SWAP_BITS;
-
- /* NOTE- if we ever put SWAP bits
- * onto the disabled list, we will
- * have to change the logic here.
- */
- if (pcibr_soft->bs_slot[pciio_slot].bss_device != devreg) {
- bridge_t *bridge = pcibr_soft->bs_base;
-
- bridge->b_device[pciio_slot].reg = devreg;
- pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
- }
- pcibr_unlock(pcibr_soft, s);
-
-#if DEBUG && PCIBR_DEV_DEBUG
- printk("pcibr Device(%d): 0x%p\n", pciio_slot, bridge->b_device[pciio_slot].reg);
-#endif
-
- return desired_end;
-}
-
-/* This (re)sets the GBR and REALTIME bits and also keeps track of how
- * many sets are outstanding. Reset succeeds only if the number of outstanding
- * sets == 1.
- */
-int
-pcibr_priority_bits_set(pcibr_soft_t pcibr_soft,
- pciio_slot_t pciio_slot,
- pciio_priority_t device_prio)
-{
- unsigned long s;
- int *counter;
- bridgereg_t rtbits = 0;
- bridgereg_t devreg;
- int rc = PRIO_SUCCESS;
-
- /* in dual-slot configurations, the host and the
- * guest have separate DMA resources, so they
- * have separate requirements for priority bits.
- */
-
- counter = &(pcibr_soft->bs_slot[pciio_slot].bss_pri_uctr);
-
- /*
- * Bridge supports PCI notions of LOW and HIGH priority
- * arbitration rings via a "REAL_TIME" bit in the per-device
- * Bridge register. The "GBR" bit controls access to the GBR
- * ring on the xbow. These two bits are (re)set together.
- *
- * XXX- Bug in Rev B Bridge Si:
- * Symptom: Prefetcher starts operating incorrectly. This happens
- * due to corruption of the address storage ram in the prefetcher
- * when a non-real time PCI request is pulled and a real-time one is
- * put in it's place. Workaround: Use only a single arbitration ring
- * on PCI bus. GBR and RR can still be uniquely used per
- * device. NETLIST MERGE DONE, WILL BE FIXED IN REV C.
- */
-
- if (pcibr_soft->bs_rev_num != BRIDGE_PART_REV_B)
- rtbits |= BRIDGE_DEV_RT;
-
- /* NOTE- if we ever put DEV_RT or DEV_GBR on
- * the disabled list, we will have to take
- * it into account here.
- */
-
- s = pcibr_lock(pcibr_soft);
- devreg = pcibr_soft->bs_slot[pciio_slot].bss_device;
- if (device_prio == PCI_PRIO_HIGH) {
- if ((++*counter == 1)) {
- if (rtbits)
- devreg |= rtbits;
- else
- rc = PRIO_FAIL;
- }
- } else if (device_prio == PCI_PRIO_LOW) {
- if (*counter <= 0)
- rc = PRIO_FAIL;
- else if (--*counter == 0)
- if (rtbits)
- devreg &= ~rtbits;
- }
- if (pcibr_soft->bs_slot[pciio_slot].bss_device != devreg) {
- bridge_t *bridge = pcibr_soft->bs_base;
-
- bridge->b_device[pciio_slot].reg = devreg;
- pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
- }
- pcibr_unlock(pcibr_soft, s);
-
- return rc;
-}
-
-pciio_priority_t
-pcibr_priority_set(devfs_handle_t pconn_vhdl,
- pciio_priority_t device_prio)
-{
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
- pciio_slot_t pciio_slot = pciio_info_slot_get(pciio_info);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
-
- (void) pcibr_priority_bits_set(pcibr_soft, pciio_slot, device_prio);
-
- return device_prio;
-}
-
-/*
- * Interfaces to allow special (e.g. SGI) drivers to set/clear
- * Bridge-specific device flags. Many flags are modified through
- * PCI-generic interfaces; we don't allow them to be directly
- * manipulated here. Only flags that at this point seem pretty
- * Bridge-specific can be set through these special interfaces.
- * We may add more flags as the need arises, or remove flags and
- * create PCI-generic interfaces as the need arises.
- *
- * Returns 0 on failure, 1 on success
- */
-int
-pcibr_device_flags_set(devfs_handle_t pconn_vhdl,
- pcibr_device_flags_t flags)
-{
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
- pciio_slot_t pciio_slot = pciio_info_slot_get(pciio_info);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- bridgereg_t set = 0;
- bridgereg_t clr = 0;
-
- ASSERT((flags & PCIBR_DEVICE_FLAGS) == flags);
-
- if (flags & PCIBR_WRITE_GATHER)
- set |= BRIDGE_DEV_PMU_WRGA_EN;
- if (flags & PCIBR_NOWRITE_GATHER)
- clr |= BRIDGE_DEV_PMU_WRGA_EN;
-
- if (flags & PCIBR_WRITE_GATHER)
- set |= BRIDGE_DEV_DIR_WRGA_EN;
- if (flags & PCIBR_NOWRITE_GATHER)
- clr |= BRIDGE_DEV_DIR_WRGA_EN;
-
- if (flags & PCIBR_PREFETCH)
- set |= BRIDGE_DEV_PREF;
- if (flags & PCIBR_NOPREFETCH)
- clr |= BRIDGE_DEV_PREF;
-
- if (flags & PCIBR_PRECISE)
- set |= BRIDGE_DEV_PRECISE;
- if (flags & PCIBR_NOPRECISE)
- clr |= BRIDGE_DEV_PRECISE;
-
- if (flags & PCIBR_BARRIER)
- set |= BRIDGE_DEV_BARRIER;
- if (flags & PCIBR_NOBARRIER)
- clr |= BRIDGE_DEV_BARRIER;
-
- if (flags & PCIBR_64BIT)
- set |= BRIDGE_DEV_DEV_SIZE;
- if (flags & PCIBR_NO64BIT)
- clr |= BRIDGE_DEV_DEV_SIZE;
-
- if (set || clr) {
- bridgereg_t devreg;
- unsigned long s;
-
- s = pcibr_lock(pcibr_soft);
- devreg = pcibr_soft->bs_slot[pciio_slot].bss_device;
- devreg = (devreg & ~clr) | set;
- if (pcibr_soft->bs_slot[pciio_slot].bss_device != devreg) {
- bridge_t *bridge = pcibr_soft->bs_base;
-
- bridge->b_device[pciio_slot].reg = devreg;
- pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
- }
- pcibr_unlock(pcibr_soft, s);
-#if DEBUG && PCIBR_DEV_DEBUG
- printk("pcibr Device(%d): %R\n", pciio_slot, bridge->b_device[pciio_slot].regbridge->b_device[pciio_slot].reg, device_bits);
-#endif
- }
- return (1);
-}
-
-#ifdef LITTLE_ENDIAN
-/*
- * on sn-ia we need to twiddle the the addresses going out
- * the pci bus because we use the unswizzled synergy space
- * (the alternative is to use the swizzled synergy space
- * and byte swap the data)
- */
-#define CB(b,r) (((volatile uint8_t *) b)[((r)^4)])
-#define CS(b,r) (((volatile uint16_t *) b)[((r^4)/2)])
-#define CW(b,r) (((volatile uint32_t *) b)[((r^4)/4)])
-#else
-#define CB(b,r) (((volatile uint8_t *) cfgbase)[(r)^3])
-#define CS(b,r) (((volatile uint16_t *) cfgbase)[((r)/2)^1])
-#define CW(b,r) (((volatile uint32_t *) cfgbase)[(r)/4])
-#endif /* LITTLE_ENDIAN */
-
-
-LOCAL cfg_p
-pcibr_config_addr(devfs_handle_t conn,
- unsigned reg)
-{
- pcibr_info_t pcibr_info;
- pciio_slot_t pciio_slot;
- pciio_function_t pciio_func;
- pcibr_soft_t pcibr_soft;
- bridge_t *bridge;
- cfg_p cfgbase = (cfg_p)0;
-
- pcibr_info = pcibr_info_get(conn);
-
- pciio_slot = pcibr_info->f_slot;
- if (pciio_slot == PCIIO_SLOT_NONE)
- pciio_slot = PCI_TYPE1_SLOT(reg);
-
- pciio_func = pcibr_info->f_func;
- if (pciio_func == PCIIO_FUNC_NONE)
- pciio_func = PCI_TYPE1_FUNC(reg);
-
- pcibr_soft = (pcibr_soft_t) pcibr_info->f_mfast;
-
- bridge = pcibr_soft->bs_base;
-
- cfgbase = bridge->b_type0_cfg_dev[pciio_slot].f[pciio_func].l;
-
- return cfgbase;
-}
-
-uint64_t
-pcibr_config_get(devfs_handle_t conn,
- unsigned reg,
- unsigned size)
-{
- return do_pcibr_config_get(pcibr_config_addr(conn, reg),
- PCI_TYPE1_REG(reg), size);
-}
-
-LOCAL uint64_t
-do_pcibr_config_get(
- cfg_p cfgbase,
- unsigned reg,
- unsigned size)
-{
- unsigned value;
-
-
- value = CW(cfgbase, reg);
-
- if (reg & 3)
- value >>= 8 * (reg & 3);
- if (size < 4)
- value &= (1 << (8 * size)) - 1;
-
- return value;
-}
-
-void
-pcibr_config_set(devfs_handle_t conn,
- unsigned reg,
- unsigned size,
- uint64_t value)
-{
- do_pcibr_config_set(pcibr_config_addr(conn, reg),
- PCI_TYPE1_REG(reg), size, value);
-}
-
-LOCAL void
-do_pcibr_config_set(cfg_p cfgbase,
- unsigned reg,
- unsigned size,
- uint64_t value)
-{
- switch (size) {
- case 1:
- CB(cfgbase, reg) = value;
- break;
- case 2:
- if (reg & 1) {
- CB(cfgbase, reg) = value;
- CB(cfgbase, reg + 1) = value >> 8;
- } else
- CS(cfgbase, reg) = value;
- break;
- case 3:
- if (reg & 1) {
- CB(cfgbase, reg) = value;
- CS(cfgbase, (reg + 1)) = value >> 8;
- } else {
- CS(cfgbase, reg) = value;
- CB(cfgbase, reg + 2) = value >> 16;
- }
- break;
-
- case 4:
- CW(cfgbase, reg) = value;
- break;
- }
-}
-
-pciio_provider_t pcibr_provider =
-{
- (pciio_piomap_alloc_f *) pcibr_piomap_alloc,
- (pciio_piomap_free_f *) pcibr_piomap_free,
- (pciio_piomap_addr_f *) pcibr_piomap_addr,
- (pciio_piomap_done_f *) pcibr_piomap_done,
- (pciio_piotrans_addr_f *) pcibr_piotrans_addr,
- (pciio_piospace_alloc_f *) pcibr_piospace_alloc,
- (pciio_piospace_free_f *) pcibr_piospace_free,
-
- (pciio_dmamap_alloc_f *) pcibr_dmamap_alloc,
- (pciio_dmamap_free_f *) pcibr_dmamap_free,
- (pciio_dmamap_addr_f *) pcibr_dmamap_addr,
- (pciio_dmamap_list_f *) pcibr_dmamap_list,
- (pciio_dmamap_done_f *) pcibr_dmamap_done,
- (pciio_dmatrans_addr_f *) pcibr_dmatrans_addr,
- (pciio_dmatrans_list_f *) pcibr_dmatrans_list,
- (pciio_dmamap_drain_f *) pcibr_dmamap_drain,
- (pciio_dmaaddr_drain_f *) pcibr_dmaaddr_drain,
- (pciio_dmalist_drain_f *) pcibr_dmalist_drain,
-
- (pciio_intr_alloc_f *) pcibr_intr_alloc,
- (pciio_intr_free_f *) pcibr_intr_free,
- (pciio_intr_connect_f *) pcibr_intr_connect,
- (pciio_intr_disconnect_f *) pcibr_intr_disconnect,
- (pciio_intr_cpu_get_f *) pcibr_intr_cpu_get,
-
- (pciio_provider_startup_f *) pcibr_provider_startup,
- (pciio_provider_shutdown_f *) pcibr_provider_shutdown,
- (pciio_reset_f *) pcibr_reset,
- (pciio_write_gather_flush_f *) pcibr_write_gather_flush,
- (pciio_endian_set_f *) pcibr_endian_set,
- (pciio_priority_set_f *) pcibr_priority_set,
- (pciio_config_get_f *) pcibr_config_get,
- (pciio_config_set_f *) pcibr_config_set,
-
- (pciio_error_devenable_f *) 0,
- (pciio_error_extract_f *) 0,
-
-#ifdef LATER
- (pciio_driver_reg_callback_f *) pcibr_driver_reg_callback,
- (pciio_driver_unreg_callback_f *) pcibr_driver_unreg_callback,
-#else
- (pciio_driver_reg_callback_f *) 0,
- (pciio_driver_unreg_callback_f *) 0,
-#endif
- (pciio_device_unregister_f *) pcibr_device_unregister,
- (pciio_dma_enabled_f *) pcibr_dma_enabled,
-};
-
-LOCAL pcibr_hints_t
-pcibr_hints_get(devfs_handle_t xconn_vhdl, int alloc)
-{
- arbitrary_info_t ainfo = 0;
- graph_error_t rv;
- pcibr_hints_t hint;
-
- rv = hwgraph_info_get_LBL(xconn_vhdl, INFO_LBL_PCIBR_HINTS, &ainfo);
-
- if (alloc && (rv != GRAPH_SUCCESS)) {
-
- NEW(hint);
- hint->rrb_alloc_funct = NULL;
- hint->ph_intr_bits = NULL;
- rv = hwgraph_info_add_LBL(xconn_vhdl,
- INFO_LBL_PCIBR_HINTS,
- (arbitrary_info_t) hint);
- if (rv != GRAPH_SUCCESS)
- goto abnormal_exit;
-
- rv = hwgraph_info_get_LBL(xconn_vhdl, INFO_LBL_PCIBR_HINTS, &ainfo);
-
- if (rv != GRAPH_SUCCESS)
- goto abnormal_exit;
-
- if (ainfo != (arbitrary_info_t) hint)
- goto abnormal_exit;
- }
- return (pcibr_hints_t) ainfo;
-
-abnormal_exit:
-#ifdef LATER
- printf("SHOULD NOT BE HERE\n");
-#endif
- DEL(hint);
- return(NULL);
-
-}
-
-void
-pcibr_hints_fix_some_rrbs(devfs_handle_t xconn_vhdl, unsigned mask)
-{
- pcibr_hints_t hint = pcibr_hints_get(xconn_vhdl, 1);
-
- if (hint)
- hint->ph_rrb_fixed = mask;
-#if DEBUG
- else
- printk("pcibr_hints_fix_rrbs: pcibr_hints_get failed at\n"
- "\t%p\n", xconn_vhdl);
-#endif
-}
-
-void
-pcibr_hints_fix_rrbs(devfs_handle_t xconn_vhdl)
-{
- pcibr_hints_fix_some_rrbs(xconn_vhdl, 0xFF);
-}
-
-void
-pcibr_hints_dualslot(devfs_handle_t xconn_vhdl,
- pciio_slot_t host,
- pciio_slot_t guest)
-{
- pcibr_hints_t hint = pcibr_hints_get(xconn_vhdl, 1);
-
- if (hint)
- hint->ph_host_slot[guest] = host + 1;
-#if DEBUG
- else
- printk("pcibr_hints_dualslot: pcibr_hints_get failed at\n"
- "\t%p\n", xconn_vhdl);
-#endif
-}
-
-void
-pcibr_hints_intr_bits(devfs_handle_t xconn_vhdl,
- pcibr_intr_bits_f *xxx_intr_bits)
-{
- pcibr_hints_t hint = pcibr_hints_get(xconn_vhdl, 1);
-
- if (hint)
- hint->ph_intr_bits = xxx_intr_bits;
-#if DEBUG
- else
- printk("pcibr_hints_intr_bits: pcibr_hints_get failed at\n"
- "\t%p\n", xconn_vhdl);
-#endif
-}
-
-void
-pcibr_set_rrb_callback(devfs_handle_t xconn_vhdl, rrb_alloc_funct_t rrb_alloc_funct)
-{
- pcibr_hints_t hint = pcibr_hints_get(xconn_vhdl, 1);
-
- if (hint)
- hint->rrb_alloc_funct = rrb_alloc_funct;
-}
-
-void
-pcibr_hints_handsoff(devfs_handle_t xconn_vhdl)
-{
- pcibr_hints_t hint = pcibr_hints_get(xconn_vhdl, 1);
-
- if (hint)
- hint->ph_hands_off = 1;
-#if DEBUG
- else
- printk("pcibr_hints_handsoff: pcibr_hints_get failed at\n"
- "\t%p\n", xconn_vhdl);
-#endif
-}
-
-void
-pcibr_hints_subdevs(devfs_handle_t xconn_vhdl,
- pciio_slot_t slot,
- uint64_t subdevs)
-{
- arbitrary_info_t ainfo = 0;
- char sdname[16];
- devfs_handle_t pconn_vhdl = GRAPH_VERTEX_NONE;
-
- sprintf(sdname, "pci/%d", slot);
- (void) hwgraph_path_add(xconn_vhdl, sdname, &pconn_vhdl);
- if (pconn_vhdl == GRAPH_VERTEX_NONE) {
-#if DEBUG
- printk("pcibr_hints_subdevs: hwgraph_path_create failed at\n"
- "\t%p (seeking %s)\n", xconn_vhdl, sdname);
-#endif
- return;
- }
- hwgraph_info_get_LBL(pconn_vhdl, INFO_LBL_SUBDEVS, &ainfo);
- if (ainfo == 0) {
- uint64_t *subdevp;
-
- NEW(subdevp);
- if (!subdevp) {
-#if DEBUG
- printk("pcibr_hints_subdevs: subdev ptr alloc failed at\n"
- "\t%p\n", pconn_vhdl);
-#endif
- return;
- }
- *subdevp = subdevs;
- hwgraph_info_add_LBL(pconn_vhdl, INFO_LBL_SUBDEVS, (arbitrary_info_t) subdevp);
- hwgraph_info_get_LBL(pconn_vhdl, INFO_LBL_SUBDEVS, &ainfo);
- if (ainfo == (arbitrary_info_t) subdevp)
- return;
- DEL(subdevp);
- if (ainfo == (arbitrary_info_t) NULL) {
-#if DEBUG
- printk("pcibr_hints_subdevs: null subdevs ptr at\n"
- "\t%p\n", pconn_vhdl);
-#endif
- return;
- }
-#if DEBUG
- printk("pcibr_subdevs_get: dup subdev add_LBL at\n"
- "\t%p\n", pconn_vhdl);
-#endif
- }
- *(uint64_t *) ainfo = subdevs;
-}
-
-
-#ifdef LATER
-
-#include <sys/idbg.h>
-#include <sys/idbgentry.h>
-
-char *pci_space[] = {"NONE",
- "ROM",
- "IO",
- "",
- "MEM",
- "MEM32",
- "MEM64",
- "CFG",
- "WIN0",
- "WIN1",
- "WIN2",
- "WIN3",
- "WIN4",
- "WIN5",
- "",
- "BAD"};
-
-void
-idbg_pss_func(pcibr_info_h pcibr_infoh, int func)
-{
- pcibr_info_t pcibr_info = pcibr_infoh[func];
- char name[MAXDEVNAME];
- int win;
-
- if (!pcibr_info)
- return;
- qprintf("Per-slot Function Info\n");
-#ifdef SUPPORT_PRINTING_V_FORMAT
- sprintf(name, "%v", pcibr_info->f_vertex);
-#endif
- qprintf("\tSlot Name : %s\n",name);
- qprintf("\tPCI Bus : %d ",pcibr_info->f_bus);
- qprintf("Slot : %d ", pcibr_info->f_slot);
- qprintf("Function : %d ", pcibr_info->f_func);
- qprintf("VendorId : 0x%x " , pcibr_info->f_vendor);
- qprintf("DeviceId : 0x%x\n", pcibr_info->f_device);
-#ifdef SUPPORT_PRINTING_V_FORMAT
- sprintf(name, "%v", pcibr_info->f_master);
-#endif
- qprintf("\tBus provider : %s\n",name);
- qprintf("\tProvider Fns : 0x%x ", pcibr_info->f_pops);
- qprintf("Error Handler : 0x%x Arg 0x%x\n",
- pcibr_info->f_efunc,pcibr_info->f_einfo);
- for(win = 0 ; win < 6 ; win++)
- qprintf("\tBase Reg #%d space %s base 0x%x size 0x%x\n",
- win,pci_space[pcibr_info->f_window[win].w_space],
- pcibr_info->f_window[win].w_base,
- pcibr_info->f_window[win].w_size);
-
- qprintf("\tRom base 0x%x size 0x%x\n",
- pcibr_info->f_rbase,pcibr_info->f_rsize);
-
- qprintf("\tInterrupt Bit Map\n");
- qprintf("\t\tPCI Int#\tBridge Pin#\n");
- for (win = 0 ; win < 4; win++)
- qprintf("\t\tINT%c\t\t%d\n",win+'A',pcibr_info->f_ibit[win]);
- qprintf("\n");
-}
-
-
-void
-idbg_pss_info(pcibr_soft_t pcibr_soft, pciio_slot_t slot)
-{
- pcibr_soft_slot_t pss;
- char slot_conn_name[MAXDEVNAME];
- int func;
-
- pss = &pcibr_soft->bs_slot[slot];
- qprintf("PCI INFRASTRUCTURAL INFO FOR SLOT %d\n", slot);
- qprintf("\tHost Present ? %s ", pss->has_host ? "yes" : "no");
- qprintf("\tHost Slot : %d\n",pss->host_slot);
- sprintf(slot_conn_name, "%v", pss->slot_conn);
- qprintf("\tSlot Conn : %s\n",slot_conn_name);
- qprintf("\t#Functions : %d\n",pss->bss_ninfo);
- for (func = 0; func < pss->bss_ninfo; func++)
- idbg_pss_func(pss->bss_infos,func);
- qprintf("\tSpace : %s ",pci_space[pss->bss_devio.bssd_space]);
- qprintf("\tBase : 0x%x ", pss->bss_devio.bssd_base);
- qprintf("\tShadow Devreg : 0x%x\n", pss->bss_device);
- qprintf("\tUsage counts : pmu %d d32 %d d64 %d\n",
- pss->bss_pmu_uctr,pss->bss_d32_uctr,pss->bss_d64_uctr);
-
- qprintf("\tDirect Trans Info : d64_base 0x%x d64_flags 0x%x"
- "d32_base 0x%x d32_flags 0x%x\n",
- pss->bss_d64_base, pss->bss_d64_flags,
- pss->bss_d32_base, pss->bss_d32_flags);
-
- qprintf("\tExt ATEs active ? %s",
- atomic_read(&pss->bss_ext_ates_active) ? "yes" : "no");
- qprintf(" Command register : 0x%x ", pss->bss_cmd_pointer);
- qprintf(" Shadow command val : 0x%x\n", pss->bss_cmd_shadow);
-
- qprintf("\tRRB Info : Valid %d+%d Reserved %d\n",
- pcibr_soft->bs_rrb_valid[slot],
- pcibr_soft->bs_rrb_valid[slot + PCIBR_RRB_SLOT_VIRTUAL],
- pcibr_soft->bs_rrb_res[slot]);
-
-}
-
-int ips = 0;
-
-void
-idbg_pss(pcibr_soft_t pcibr_soft)
-{
- pciio_slot_t slot;
-
-
- if (ips >= 0 && ips < 8)
- idbg_pss_info(pcibr_soft,ips);
- else if (ips < 0)
- for (slot = 0; slot < 8; slot++)
- idbg_pss_info(pcibr_soft,slot);
- else
- qprintf("Invalid ips %d\n",ips);
-}
-
-#endif /* LATER */
-
-int
-pcibr_dma_enabled(devfs_handle_t pconn_vhdl)
-{
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
-
-
- return xtalk_dma_enabled(pcibr_soft->bs_conn);
-}
diff --git a/arch/ia64/sn/io/sn2/Makefile b/arch/ia64/sn/io/sn2/Makefile
index 106bd31b96db69..f8521c8bcea3fb 100644
--- a/arch/ia64/sn/io/sn2/Makefile
+++ b/arch/ia64/sn/io/sn2/Makefile
@@ -11,10 +11,9 @@
EXTRA_CFLAGS := -DLITTLE_ENDIAN
-obj-y += pcibr/ bte_error.o geo_op.o klconflib.o klgraph.o l1.o \
- l1_command.o ml_iograph.o ml_SN_init.o ml_SN_intr.o module.o \
- pci_bus_cvlink.o pciio.o pic.o sgi_io_init.o shub.o shuberror.o \
- shub_intr.o shubio.o xbow.o xtalk.o
+obj-y += pcibr/ ml_SN_intr.o shub_intr.o shuberror.o shub.o bte_error.o \
+ pic.o geo_op.o l1.o l1_command.o klconflib.o klgraph.o ml_SN_init.o \
+ ml_iograph.o module.o pciio.o xbow.o xtalk.o shubio.o
obj-$(CONFIG_KDB) += kdba_io.o
obj-$(CONFIG_SHUB_1_0_SPECIFIC) += efi-rtc.o
diff --git a/arch/ia64/sn/io/sn2/bte_error.c b/arch/ia64/sn/io/sn2/bte_error.c
index 8e086e1c1b4f09..4ab2cb0e9938ca 100644
--- a/arch/ia64/sn/io/sn2/bte_error.c
+++ b/arch/ia64/sn/io/sn2/bte_error.c
@@ -1,10 +1,35 @@
-/* $Id: bte_error.c,v 1.1 2002/02/28 17:31:25 marcelo Exp $
+/*
*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
*
- * Copyright (C) 1992 - 1997, 2000,2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/NoticeExplan
*/
@@ -29,121 +54,208 @@
#include <asm/sn/sn2/shubio.h>
#include <asm/sn/bte.h>
-/************************************************************************
- * *
- * BTE ERROR RECOVERY *
- * *
- * Given a BTE error, the node causing the error must do the following: *
- * a) Clear all crbs relating to that BTE *
- * 1) Read CRBA value for crb in question *
- * 2) Mark CRB as VALID, store local physical *
- * address known to be good in the address field *
- * (bte_notification_targ is a known good local *
- * address). *
- * 3) Write CRBA *
- * 4) Using ICCR, FLUSH the CRB, and wait for it to *
- * complete. *
- * ... BTE BUSY bit should now be clear (or at least *
- * should be after ALL CRBs associated with the *
- * transfer are complete. *
- * *
- * b) Re-enable BTE *
- * 1) Write IMEM with BTE Enable + XXX bits
- * 2) Write IECLR with BTE clear bits
- * 3) Clear IIDSR INT_SENT bits.
- * *
- ************************************************************************/
-
-/*
- * >>> bte_crb_error_handler needs to be broken into two parts. The
- * first should cleanup the CRB. The second should wait until all bte
- * related CRB's are complete and then do the error reset.
+
+/*
+ * Bte error handling is done in two parts. The first captures
+ * any crb related errors. Since there can be multiple crbs per
+ * interface and multiple interfaces active, we need to wait until
+ * all active crbs are completed. This is the first job of the
+ * second part error handler. When all bte related CRBs are cleanly
+ * completed, it resets the interfaces and gets them ready for new
+ * transfers to be queued.
*/
-void
-bte_crb_error_handler(devfs_handle_t hub_v, int btenum,
- int crbnum, ioerror_t *ioe, int bteop)
+
+
+void bte_error_handler(unsigned long);
+
+
/*
- * Function: bte_crb_error_handler
- * Purpose: Process a CRB for a specific HUB/BTE
- * Parameters: hub_v - vertex of hub in HW graph
- * btenum - bte number on hub (0 == a, 1 == b)
- * crbnum - crb number being processed
- * Notes:
- * This routine assumes serialization at a higher level. A CRB
- * should not be processed more than once. The error recovery
- * follows the following sequence - if you change this, be real
- * sure about what you are doing.
- *
+ * First part error handler. This is called whenever any error CRB interrupt
+ * is generated by the II.
*/
+void
+bte_crb_error_handler(vertex_hdl_t hub_v, int btenum,
+ int crbnum, ioerror_t * ioe, int bteop)
{
- hubinfo_t hinfo;
- icrba_t crba;
- icrbb_t crbb;
- nasid_t n;
- hubreg_t iidsr, imem, ieclr;
+ hubinfo_t hinfo;
+ struct bteinfo_s *bte;
+
hubinfo_get(hub_v, &hinfo);
+ bte = &hinfo->h_nodepda->bte_if[btenum];
+
+ /*
+ * The caller has already figured out the error type, we save that
+ * in the bte handle structure for the thread excercising the
+ * interface to consume.
+ */
+ switch (ioe->ie_errortype) {
+ case IIO_ICRB_ECODE_PERR:
+ bte->bh_error = BTEFAIL_POISON;
+ break;
+ case IIO_ICRB_ECODE_WERR:
+ bte->bh_error = BTEFAIL_PROT;
+ break;
+ case IIO_ICRB_ECODE_AERR:
+ bte->bh_error = BTEFAIL_ACCESS;
+ break;
+ case IIO_ICRB_ECODE_TOUT:
+ bte->bh_error = BTEFAIL_TOUT;
+ break;
+ case IIO_ICRB_ECODE_XTERR:
+ bte->bh_error = BTEFAIL_XTERR;
+ break;
+ case IIO_ICRB_ECODE_DERR:
+ bte->bh_error = BTEFAIL_DIR;
+ break;
+ case IIO_ICRB_ECODE_PWERR:
+ case IIO_ICRB_ECODE_PRERR:
+ /* NO BREAK */
+ default:
+ bte->bh_error = BTEFAIL_ERROR;
+ }
+ bte->bte_error_count++;
+
+ BTE_PRINTK(("Got an error on cnode %d bte %d\n",
+ bte->bte_cnode, bte->bte_num));
+ bte_error_handler((unsigned long) hinfo->h_nodepda);
+}
- n = hinfo->h_nasid;
-
+/*
+ * Second part error handler. Wait until all BTE related CRBs are completed
+ * and then reset the interfaces.
+ */
+void
+bte_error_handler(unsigned long _nodepda)
+{
+ struct nodepda_s *err_nodepda = (struct nodepda_s *) _nodepda;
+ spinlock_t *recovery_lock = &err_nodepda->bte_recovery_lock;
+ struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer;
+ nasid_t nasid;
+ int i;
+ int valid_crbs;
+ unsigned long irq_flags;
+ volatile u64 *notify;
+ bte_result_t bh_error;
+ ii_imem_u_t imem; /* II IMEM Register */
+ ii_icrb0_d_u_t icrbd; /* II CRB Register D */
+ ii_ibcr_u_t ibcr;
+ ii_icmr_u_t icmr;
+
+
+ BTE_PRINTK(("bte_error_handler(%p) - %d\n", err_nodepda,
+ smp_processor_id()));
+
+ spin_lock_irqsave(recovery_lock, irq_flags);
+
+ if ((err_nodepda->bte_if[0].bh_error == BTE_SUCCESS) &&
+ (err_nodepda->bte_if[1].bh_error == BTE_SUCCESS)) {
+ BTE_PRINTK(("eh:%p:%d Nothing to do.\n", err_nodepda,
+ smp_processor_id()));
+ spin_unlock_irqrestore(recovery_lock, irq_flags);
+ return;
+ }
/*
- * The following 10 lines (or so) are adapted from IRIXs
- * bte_crb_error function. No clear documentation tells
- * why the crb needs to complete normally in order for
- * the BTE to resume normal operations. This first step
- * appears vital!
+ * Lock all interfaces on this node to prevent new transfers
+ * from being queued.
*/
+ for (i = 0; i < BTES_PER_NODE; i++) {
+ if (err_nodepda->bte_if[i].cleanup_active) {
+ continue;
+ }
+ spin_lock(&err_nodepda->bte_if[i].spinlock);
+ BTE_PRINTK(("eh:%p:%d locked %d\n", err_nodepda,
+ smp_processor_id(), i));
+ err_nodepda->bte_if[i].cleanup_active = 1;
+ }
+
+ /* Determine information about our hub */
+ nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode);
+
/*
- * Zero error and error code to prevent error_dump complaining
- * about these CRBs. Copy the CRB to the notification line.
- * The crb address is in shub format (physical address shifted
- * right by cacheline size).
+ * A BTE transfer can use multiple CRBs. We need to make sure
+ * that all the BTE CRBs are complete (or timed out) before
+ * attempting to clean up the error. Resetting the BTE while
+ * there are still BTE CRBs active will hang the BTE.
+ * We should look at all the CRBs to see if they are allocated
+ * to the BTE and see if they are still active. When none
+ * are active, we can continue with the cleanup.
+ *
+ * We also want to make sure that the local NI port is up.
+ * When a router resets the NI port can go down, while it
+ * goes through the LLP handshake, but then comes back up.
*/
- crbb.ii_icrb0_b_regval = REMOTE_HUB_L(n, IIO_ICRB_B(crbnum));
- crbb.b_error=0;
- crbb.b_ecode=0;
- REMOTE_HUB_S(n, IIO_ICRB_B(crbnum), crbb.ii_icrb0_b_regval);
-
- crba.ii_icrb0_a_regval = REMOTE_HUB_L(n, IIO_ICRB_A(crbnum));
- crba.a_addr = TO_PHYS((u64)&nodepda->bte_if[btenum].notify) >> 3;
- crba.a_valid = 1;
- REMOTE_HUB_S(n, IIO_ICRB_A(crbnum), crba.ii_icrb0_a_regval);
-
- REMOTE_HUB_S(n, IIO_ICCR,
- IIO_ICCR_PENDING | IIO_ICCR_CMD_FLUSH | crbnum);
-
- while (REMOTE_HUB_L(n, IIO_ICCR) & IIO_ICCR_PENDING)
- ;
-
-
- /* Terminate the BTE. */
- /* >>> The other bte transfer will need to be restarted. */
- HUB_L((shubreg_t *)((nodepda->bte_if[btenum].bte_base_addr +
- IIO_IBCT0 - IIO_IBLS0)));
-
- imem = REMOTE_HUB_L(n, IIO_IMEM);
- ieclr = REMOTE_HUB_L(n, IIO_IECLR);
- if (btenum == 0) {
- imem |= IIO_IMEM_W0ESD | IIO_IMEM_B0ESD;
- ieclr|= IECLR_BTE0;
- } else {
- imem |= IIO_IMEM_W0ESD | IIO_IMEM_B1ESD;
- ieclr|= IECLR_BTE1;
+ icmr.ii_icmr_regval = REMOTE_HUB_L(nasid, IIO_ICMR);
+ if (icmr.ii_icmr_fld_s.i_crb_mark != 0) {
+ /*
+ * There are errors which still need to be cleaned up by
+ * hubiio_crb_error_handler
+ */
+ mod_timer(recovery_timer, HZ * 5);
+ BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda,
+ smp_processor_id()));
+ spin_unlock_irqrestore(recovery_lock, irq_flags);
+ return;
}
- REMOTE_HUB_S(n, IIO_IMEM, imem);
- REMOTE_HUB_S(n, IIO_IECLR, ieclr);
-
- iidsr = REMOTE_HUB_L(n, IIO_IIDSR);
- iidsr &= ~IIO_IIDSR_SENT_MASK;
- iidsr |= IIO_IIDSR_ENB_MASK;
- REMOTE_HUB_S(n, IIO_IIDSR, iidsr);
+ if (icmr.ii_icmr_fld_s.i_crb_vld != 0) {
+ valid_crbs = icmr.ii_icmr_fld_s.i_crb_vld;
- bte_reset_nasid(n);
+ for (i = 0; i < IIO_NUM_CRBS; i++) {
+ if (!((1 << i) & valid_crbs)) {
+ /* This crb was not marked as valid, ignore */
+ continue;
+ }
+ icrbd.ii_icrb0_d_regval =
+ REMOTE_HUB_L(nasid, IIO_ICRB_D(i));
+ if (icrbd.d_bteop) {
+ mod_timer(recovery_timer, HZ * 5);
+ BTE_PRINTK(("eh:%p:%d Valid %d, Giving up\n",
+ err_nodepda, smp_processor_id(), i));
+ spin_unlock_irqrestore(recovery_lock,
+ irq_flags);
+ return;
+ }
+ }
+ }
- *nodepda->bte_if[btenum].most_rcnt_na = IBLS_ERROR;
-}
+ BTE_PRINTK(("eh:%p:%d Cleaning up\n", err_nodepda,
+ smp_processor_id()));
+ /* Reenable both bte interfaces */
+ imem.ii_imem_regval = REMOTE_HUB_L(nasid, IIO_IMEM);
+ imem.ii_imem_fld_s.i_b0_esd = imem.ii_imem_fld_s.i_b1_esd = 1;
+ REMOTE_HUB_S(nasid, IIO_IMEM, imem.ii_imem_regval);
+
+ /* Reinitialize both BTE state machines. */
+ ibcr.ii_ibcr_regval = REMOTE_HUB_L(nasid, IIO_IBCR);
+ ibcr.ii_ibcr_fld_s.i_soft_reset = 1;
+ REMOTE_HUB_S(nasid, IIO_IBCR, ibcr.ii_ibcr_regval);
+
+
+ for (i = 0; i < BTES_PER_NODE; i++) {
+ bh_error = err_nodepda->bte_if[i].bh_error;
+ if (bh_error != BTE_SUCCESS) {
+ /* There is an error which needs to be notified */
+ notify = err_nodepda->bte_if[i].most_rcnt_na;
+ BTE_PRINTK(("cnode %d bte %d error=0x%lx\n",
+ err_nodepda->bte_if[i].bte_cnode,
+ err_nodepda->bte_if[i].bte_num,
+ IBLS_ERROR | (u64) bh_error));
+ *notify = IBLS_ERROR | bh_error;
+ err_nodepda->bte_if[i].bh_error = BTE_SUCCESS;
+ }
+
+ err_nodepda->bte_if[i].cleanup_active = 0;
+ BTE_PRINTK(("eh:%p:%d Unlocked %d\n", err_nodepda,
+ smp_processor_id(), i));
+ spin_unlock(&pda->cpu_bte_if[i]->spinlock);
+ }
+
+ del_timer(recovery_timer);
+
+ spin_unlock_irqrestore(recovery_lock, irq_flags);
+}
diff --git a/arch/ia64/sn/io/sn2/kdba_io.c b/arch/ia64/sn/io/sn2/kdba_io.c
new file mode 100644
index 00000000000000..51f03577f8e081
--- /dev/null
+++ b/arch/ia64/sn/io/sn2/kdba_io.c
@@ -0,0 +1,76 @@
+/*
+ * Kernel Debugger Architecture Dependent POD functions.
+ *
+ * Copyright (C) 1999-2003 Silicon Graphics, Inc. All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/NoticeExplan
+ */
+
+#include <linux/types.h>
+#include <linux/kdb.h>
+//#include <linux/kdbprivate.h>
+
+/**
+ * kdba_io - enter POD mode from kdb
+ * @argc: arg count
+ * @argv: arg values
+ * @envp: kdb env. vars
+ * @regs: current register state
+ *
+ * Enter POD mode from kdb using SGI SN specific SAL function call.
+ */
+static int
+kdba_io(int argc, const char **argv, const char **envp, struct pt_regs *regs)
+{
+ kdb_printf("kdba_io entered with addr 0x%p\n", (void *) regs);
+
+ return(0);
+}
+
+/**
+ * kdba_io_init - register 'io' command with kdb
+ *
+ * Register the 'io' command with kdb at load time.
+ */
+void
+kdba_io_init(void)
+{
+ kdb_register("io", kdba_io, "<vaddr>", "Display IO Contents", 0);
+}
+
+/**
+ * kdba_io_exit - unregister the 'io' command
+ *
+ * Tell kdb that the 'io' command is no longer available.
+ */
+static void __exit
+kdba_exit(void)
+{
+ kdb_unregister("io");
+}
diff --git a/arch/ia64/sn/io/sn2/klconflib.c b/arch/ia64/sn/io/sn2/klconflib.c
index 4d1a92a00ed7db..d3a48ada4ad815 100644
--- a/arch/ia64/sn/io/sn2/klconflib.c
+++ b/arch/ia64/sn/io/sn2/klconflib.c
@@ -24,8 +24,6 @@
#include <asm/sn/router.h>
#include <asm/sn/xtalk/xbow.h>
-#define printf printk
-int hasmetarouter;
#define LDEBUG 0
#define NIC_UNKNOWN ((nic_t) -1)
@@ -37,10 +35,11 @@ int hasmetarouter;
#define DBG(x...)
#endif /* DEBUG_KLGRAPH */
-static void sort_nic_names(lboard_t *) ;
-
u64 klgraph_addr[MAX_COMPACT_NODES];
-int module_number = 0;
+static int hasmetarouter;
+
+
+char brick_types[MAX_BRICK_TYPES + 1] = "crikxdpn%#=012345";
lboard_t *
find_lboard(lboard_t *start, unsigned char brd_type)
@@ -135,23 +134,6 @@ find_lboard_module(lboard_t *start, geoid_t geoid)
return (lboard_t *)NULL;
}
-lboard_t *
-find_lboard_module_class(lboard_t *start, geoid_t geoid,
- unsigned char brd_type)
-{
- while (start) {
- DBG("find_lboard_module_class: lboard 0x%p, start->brd_geoid 0x%x, mod 0x%x, start->brd_type 0x%x, brd_type 0x%x\n", start, start->brd_geoid, geoid, start->brd_type, brd_type);
-
- if (geo_cmp(start->brd_geoid, geoid) &&
- (KLCLASS(start->brd_type) == KLCLASS(brd_type)))
- return start;
- start = KLCF_NEXT(start);
- }
-
- /* Didn't find it. */
- return (lboard_t *)NULL;
-}
-
/*
* Convert a NIC name to a name for use in the hardware graph.
*/
@@ -205,63 +187,6 @@ nic_name_convert(char *old_name, char *new_name)
}
/*
- * Find the lboard structure and get the board name.
- * If we can't find the structure or it's too low a revision,
- * use default name.
- */
-lboard_t *
-get_board_name(nasid_t nasid, geoid_t geoid, slotid_t slot, char *name)
-{
- lboard_t *brd;
-
- brd = find_lboard_modslot((lboard_t *)KL_CONFIG_INFO(nasid),
- geoid);
-
-#ifndef _STANDALONE
- {
- cnodeid_t cnode = NASID_TO_COMPACT_NODEID(nasid);
-
- if (!brd && (NODEPDA(cnode)->xbow_peer != INVALID_NASID))
- brd = find_lboard_modslot((lboard_t *)
- KL_CONFIG_INFO(NODEPDA(cnode)->xbow_peer),
- geoid);
- }
-#endif
-
- if (!brd || (brd->brd_sversion < 2)) {
- strcpy(name, EDGE_LBL_XWIDGET);
- } else {
- nic_name_convert(brd->brd_name, name);
- }
-
- /*
- * PV # 540860
- * If the name is not 'baseio'
- * get the lowest of all the names in the nic string.
- * This is needed for boards like divo, which can have
- * a bunch of daughter cards, but would like to be called
- * divo. We could do this for baseio
- * but it has some special case names that we would not
- * like to disturb at this point.
- */
-
- /* gfx boards don't need any of this name scrambling */
- if (brd && (KLCLASS(brd->brd_type) == KLCLASS_GFX)) {
- return(brd);
- }
-
- if (!(!strcmp(name, "baseio") )) {
- if (brd) {
- sort_nic_names(brd) ;
- /* Convert to small case, '-' to '_' etc */
- nic_name_convert(brd->brd_name, name) ;
- }
- }
-
- return(brd);
-}
-
-/*
* get_actual_nasid
*
* Completely disabled brds have their klconfig on
@@ -341,12 +266,20 @@ board_to_path(lboard_t *brd, char *path)
board_name = EDGE_LBL_IO;
break;
case KLCLASS_IOBRICK:
- if (brd->brd_type == KLTYPE_PBRICK)
+ if (brd->brd_type == KLTYPE_PXBRICK)
+ board_name = EDGE_LBL_PXBRICK;
+ else if (brd->brd_type == KLTYPE_IXBRICK)
+ board_name = EDGE_LBL_IXBRICK;
+ else if (brd->brd_type == KLTYPE_PBRICK)
board_name = EDGE_LBL_PBRICK;
else if (brd->brd_type == KLTYPE_IBRICK)
board_name = EDGE_LBL_IBRICK;
else if (brd->brd_type == KLTYPE_XBRICK)
board_name = EDGE_LBL_XBRICK;
+ else if (brd->brd_type == KLTYPE_PEBRICK)
+ board_name = EDGE_LBL_PEBRICK;
+ else if (brd->brd_type == KLTYPE_CGBRICK)
+ board_name = EDGE_LBL_CGBRICK;
else
board_name = EDGE_LBL_IOBRICK;
break;
@@ -623,185 +556,6 @@ board_serial_number_get(lboard_t *board,char *serial_number)
#include "asm/sn/sn_private.h"
-xwidgetnum_t
-nodevertex_widgetnum_get(devfs_handle_t node_vtx)
-{
- hubinfo_t hubinfo_p;
-
- hwgraph_info_get_LBL(node_vtx, INFO_LBL_NODE_INFO,
- (arbitrary_info_t *) &hubinfo_p);
- return(hubinfo_p->h_widgetid);
-}
-
-devfs_handle_t
-nodevertex_xbow_peer_get(devfs_handle_t node_vtx)
-{
- hubinfo_t hubinfo_p;
- nasid_t xbow_peer_nasid;
- cnodeid_t xbow_peer;
-
- hwgraph_info_get_LBL(node_vtx, INFO_LBL_NODE_INFO,
- (arbitrary_info_t *) &hubinfo_p);
- xbow_peer_nasid = hubinfo_p->h_nodepda->xbow_peer;
- if(xbow_peer_nasid == INVALID_NASID)
- return ( (devfs_handle_t)-1);
- xbow_peer = NASID_TO_COMPACT_NODEID(xbow_peer_nasid);
- return(NODEPDA(xbow_peer)->node_vertex);
-}
-
-/* NIC Sorting Support */
-
-#define MAX_NICS_PER_STRING 32
-#define MAX_NIC_NAME_LEN 32
-
-static char *
-get_nic_string(lboard_t *lb)
-{
- int i;
- klinfo_t *k = NULL ;
- klconf_off_t mfg_off = 0 ;
- char *mfg_nic = NULL ;
-
- for (i = 0; i < KLCF_NUM_COMPS(lb); i++) {
- k = KLCF_COMP(lb, i) ;
- switch(k->struct_type) {
- case KLSTRUCT_BRI:
- mfg_off = ((klbri_t *)k)->bri_mfg_nic ;
- break ;
-
- case KLSTRUCT_HUB:
- mfg_off = ((klhub_t *)k)->hub_mfg_nic ;
- break ;
-
- case KLSTRUCT_ROU:
- mfg_off = ((klrou_t *)k)->rou_mfg_nic ;
- break ;
-
- case KLSTRUCT_GFX:
- mfg_off = ((klgfx_t *)k)->gfx_mfg_nic ;
- break ;
-
- case KLSTRUCT_TPU:
- mfg_off = ((kltpu_t *)k)->tpu_mfg_nic ;
- break ;
-
- case KLSTRUCT_GSN_A:
- case KLSTRUCT_GSN_B:
- mfg_off = ((klgsn_t *)k)->gsn_mfg_nic ;
- break ;
-
- case KLSTRUCT_XTHD:
- mfg_off = ((klxthd_t *)k)->xthd_mfg_nic ;
- break;
-
- default:
- mfg_off = 0 ;
- break ;
- }
- if (mfg_off)
- break ;
- }
-
- if ((mfg_off) && (k))
- mfg_nic = (char *)NODE_OFFSET_TO_K0(k->nasid, mfg_off) ;
-
- return mfg_nic ;
-}
-
-char *
-get_first_string(char **ptrs, int n)
-{
- int i ;
- char *tmpptr ;
-
- if ((ptrs == NULL) || (n == 0))
- return NULL ;
-
- tmpptr = ptrs[0] ;
-
- if (n == 1)
- return tmpptr ;
-
- for (i = 0 ; i < n ; i++) {
- if (strcmp(tmpptr, ptrs[i]) > 0)
- tmpptr = ptrs[i] ;
- }
-
- return tmpptr ;
-}
-
-int
-get_ptrs(char *idata, char **ptrs, int n, char *label)
-{
- int i = 0 ;
- char *tmp = idata ;
-
- if ((ptrs == NULL) || (idata == NULL) || (label == NULL) || (n == 0))
- return 0 ;
-
- while ( (tmp = strstr(tmp, label)) ){
- tmp += strlen(label) ;
- /* check for empty name field, and last NULL ptr */
- if ((i < (n-1)) && (*tmp != ';')) {
- ptrs[i++] = tmp ;
- }
- }
-
- ptrs[i] = NULL ;
-
- return i ;
-}
-
-/*
- * sort_nic_names
- *
- * Does not really do sorting. Find the alphabetically lowest
- * name among all the nic names found in a nic string.
- *
- * Return:
- * Nothing
- *
- * Side Effects:
- *
- * lb->brd_name gets the new name found
- */
-
-static void
-sort_nic_names(lboard_t *lb)
-{
- char *nic_str ;
- char *ptrs[MAX_NICS_PER_STRING] ;
- char name[MAX_NIC_NAME_LEN] ;
- char *tmp, *tmp1 ;
-
- *name = 0 ;
-
- /* Get the nic pointer from the lb */
-
- if ((nic_str = get_nic_string(lb)) == NULL)
- return ;
-
- tmp = get_first_string(ptrs,
- get_ptrs(nic_str, ptrs, MAX_NICS_PER_STRING, "Name:")) ;
-
- if (tmp == NULL)
- return ;
-
- if ( (tmp1 = strchr(tmp, ';')) ){
- strncpy(name, tmp, tmp1-tmp) ;
- name[tmp1-tmp] = 0 ;
- } else {
- strncpy(name, tmp, (sizeof(name) -1)) ;
- name[sizeof(name)-1] = 0 ;
- }
-
- strcpy(lb->brd_name, name) ;
-}
-
-
-
-char brick_types[MAX_BRICK_TYPES + 1] = "crikxdpn%#012345";
-
/*
* Format a module id for printing.
*/
@@ -814,6 +568,7 @@ format_module_id(char *buffer, moduleid_t m, int fmt)
rack = MODULE_GET_RACK(m);
ASSERT(MODULE_GET_BTYPE(m) < MAX_BRICK_TYPES);
brickchar = MODULE_GET_BTCHAR(m);
+
position = MODULE_GET_BPOS(m);
if (fmt == MODULE_FORMAT_BRIEF) {
diff --git a/arch/ia64/sn/io/sn2/klgraph.c b/arch/ia64/sn/io/sn2/klgraph.c
index 532a8a73693b0e..010b06956bf383 100644
--- a/arch/ia64/sn/io/sn2/klgraph.c
+++ b/arch/ia64/sn/io/sn2/klgraph.c
@@ -23,7 +23,6 @@
#include <asm/sn/hcl.h>
#include <asm/sn/labelcl.h>
#include <asm/sn/kldir.h>
-#include <asm/sn/gda.h>
#include <asm/sn/klconfig.h>
#include <asm/sn/router.h>
#include <asm/sn/xtalk/xbow.h>
@@ -42,7 +41,7 @@
extern char arg_maxnodes[];
extern u64 klgraph_addr[];
-void mark_cpuvertex_as_cpu(devfs_handle_t vhdl, cpuid_t cpuid);
+void mark_cpuvertex_as_cpu(vertex_hdl_t vhdl, cpuid_t cpuid);
/*
@@ -69,7 +68,7 @@ klhwg_invent_alloc(cnodeid_t cnode, int class, int size)
* Add detailed disabled cpu inventory info to the hardware graph.
*/
void
-klhwg_disabled_cpu_invent_info(devfs_handle_t cpuv,
+klhwg_disabled_cpu_invent_info(vertex_hdl_t cpuv,
cnodeid_t cnode,
klcpu_t *cpu, slotid_t slot)
{
@@ -118,7 +117,7 @@ klhwg_disabled_cpu_invent_info(devfs_handle_t cpuv,
* Add detailed cpu inventory info to the hardware graph.
*/
void
-klhwg_cpu_invent_info(devfs_handle_t cpuv,
+klhwg_cpu_invent_info(vertex_hdl_t cpuv,
cnodeid_t cnode,
klcpu_t *cpu)
{
@@ -153,7 +152,7 @@ klhwg_cpu_invent_info(devfs_handle_t cpuv,
* as a part of detailed inventory info in the hwgraph.
*/
void
-klhwg_baseio_inventory_add(devfs_handle_t baseio_vhdl,cnodeid_t cnode)
+klhwg_baseio_inventory_add(vertex_hdl_t baseio_vhdl,cnodeid_t cnode)
{
invent_miscinfo_t *baseio_inventory;
unsigned char version = 0,revision = 0;
@@ -177,20 +176,11 @@ klhwg_baseio_inventory_add(devfs_handle_t baseio_vhdl,cnodeid_t cnode)
sizeof(invent_miscinfo_t));
}
-char *hub_rev[] = {
- "0.0",
- "1.0",
- "2.0",
- "2.1",
- "2.2",
- "2.3"
-};
-
/*
* Add detailed cpu inventory info to the hardware graph.
*/
void
-klhwg_hub_invent_info(devfs_handle_t hubv,
+klhwg_hub_invent_info(vertex_hdl_t hubv,
cnodeid_t cnode,
klhub_t *hub)
{
@@ -215,10 +205,10 @@ klhwg_hub_invent_info(devfs_handle_t hubv,
/* ARGSUSED */
void
-klhwg_add_hub(devfs_handle_t node_vertex, klhub_t *hub, cnodeid_t cnode)
+klhwg_add_hub(vertex_hdl_t node_vertex, klhub_t *hub, cnodeid_t cnode)
{
- devfs_handle_t myhubv;
- devfs_handle_t hub_mon;
+ vertex_hdl_t myhubv;
+ vertex_hdl_t hub_mon;
int rc;
extern struct file_operations shub_mon_fops;
@@ -234,9 +224,9 @@ klhwg_add_hub(devfs_handle_t node_vertex, klhub_t *hub, cnodeid_t cnode)
/* ARGSUSED */
void
-klhwg_add_disabled_cpu(devfs_handle_t node_vertex, cnodeid_t cnode, klcpu_t *cpu, slotid_t slot)
+klhwg_add_disabled_cpu(vertex_hdl_t node_vertex, cnodeid_t cnode, klcpu_t *cpu, slotid_t slot)
{
- devfs_handle_t my_cpu;
+ vertex_hdl_t my_cpu;
char name[120];
cpuid_t cpu_id;
nasid_t nasid;
@@ -257,9 +247,9 @@ klhwg_add_disabled_cpu(devfs_handle_t node_vertex, cnodeid_t cnode, klcpu_t *cpu
/* ARGSUSED */
void
-klhwg_add_cpu(devfs_handle_t node_vertex, cnodeid_t cnode, klcpu_t *cpu)
+klhwg_add_cpu(vertex_hdl_t node_vertex, cnodeid_t cnode, klcpu_t *cpu)
{
- devfs_handle_t my_cpu, cpu_dir;
+ vertex_hdl_t my_cpu, cpu_dir;
char name[120];
cpuid_t cpu_id;
nasid_t nasid;
@@ -295,7 +285,7 @@ klhwg_add_xbow(cnodeid_t cnode, nasid_t nasid)
nasid_t hub_nasid;
cnodeid_t hub_cnode;
int widgetnum;
- devfs_handle_t xbow_v, hubv;
+ vertex_hdl_t xbow_v, hubv;
/*REFERENCED*/
graph_error_t err;
@@ -363,12 +353,12 @@ klhwg_add_xbow(cnodeid_t cnode, nasid_t nasid)
/* ARGSUSED */
void
-klhwg_add_node(devfs_handle_t hwgraph_root, cnodeid_t cnode, gda_t *gdap)
+klhwg_add_node(vertex_hdl_t hwgraph_root, cnodeid_t cnode)
{
nasid_t nasid;
lboard_t *brd;
klhub_t *hub;
- devfs_handle_t node_vertex = NULL;
+ vertex_hdl_t node_vertex = NULL;
char path_buffer[100];
int rv;
char *s;
@@ -382,7 +372,7 @@ klhwg_add_node(devfs_handle_t hwgraph_root, cnodeid_t cnode, gda_t *gdap)
ASSERT(brd);
do {
- devfs_handle_t cpu_dir;
+ vertex_hdl_t cpu_dir;
/* Generate a hardware graph path for this board. */
board_to_path(brd, path_buffer);
@@ -443,7 +433,7 @@ klhwg_add_node(devfs_handle_t hwgraph_root, cnodeid_t cnode, gda_t *gdap)
while (cpu) {
cpuid_t cpu_id;
cpu_id = nasid_slice_to_cpuid(nasid,cpu->cpu_info.physid);
- if (cpu_enabled(cpu_id))
+ if (cpu_online(cpu_id))
klhwg_add_cpu(node_vertex, cnode, cpu);
else
klhwg_add_disabled_cpu(node_vertex, cnode, cpu, brd->brd_slot);
@@ -466,12 +456,12 @@ klhwg_add_node(devfs_handle_t hwgraph_root, cnodeid_t cnode, gda_t *gdap)
/* ARGSUSED */
void
-klhwg_add_all_routers(devfs_handle_t hwgraph_root)
+klhwg_add_all_routers(vertex_hdl_t hwgraph_root)
{
nasid_t nasid;
cnodeid_t cnode;
lboard_t *brd;
- devfs_handle_t node_vertex;
+ vertex_hdl_t node_vertex;
char path_buffer[100];
int rv;
@@ -525,14 +515,14 @@ klhwg_add_all_routers(devfs_handle_t hwgraph_root)
/* ARGSUSED */
void
-klhwg_connect_one_router(devfs_handle_t hwgraph_root, lboard_t *brd,
+klhwg_connect_one_router(vertex_hdl_t hwgraph_root, lboard_t *brd,
cnodeid_t cnode, nasid_t nasid)
{
klrou_t *router;
char path_buffer[50];
char dest_path[50];
- devfs_handle_t router_hndl;
- devfs_handle_t dest_hndl;
+ vertex_hdl_t router_hndl;
+ vertex_hdl_t dest_hndl;
int rc;
int port;
lboard_t *dest_brd;
@@ -619,7 +609,7 @@ klhwg_connect_one_router(devfs_handle_t hwgraph_root, lboard_t *brd,
void
-klhwg_connect_routers(devfs_handle_t hwgraph_root)
+klhwg_connect_routers(vertex_hdl_t hwgraph_root)
{
nasid_t nasid;
cnodeid_t cnode;
@@ -652,15 +642,15 @@ klhwg_connect_routers(devfs_handle_t hwgraph_root)
void
-klhwg_connect_hubs(devfs_handle_t hwgraph_root)
+klhwg_connect_hubs(vertex_hdl_t hwgraph_root)
{
nasid_t nasid;
cnodeid_t cnode;
lboard_t *brd;
klhub_t *hub;
lboard_t *dest_brd;
- devfs_handle_t hub_hndl;
- devfs_handle_t dest_hndl;
+ vertex_hdl_t hub_hndl;
+ vertex_hdl_t dest_hndl;
char path_buffer[50];
char dest_path[50];
graph_error_t rc;
@@ -796,12 +786,12 @@ klhwg_device_disable_hints_add(void)
}
void
-klhwg_add_all_modules(devfs_handle_t hwgraph_root)
+klhwg_add_all_modules(vertex_hdl_t hwgraph_root)
{
cmoduleid_t cm;
char name[128];
- devfs_handle_t vhdl;
- devfs_handle_t module_vhdl;
+ vertex_hdl_t vhdl;
+ vertex_hdl_t module_vhdl;
int rc;
char buffer[16];
@@ -837,12 +827,12 @@ klhwg_add_all_modules(devfs_handle_t hwgraph_root)
}
void
-klhwg_add_all_nodes(devfs_handle_t hwgraph_root)
+klhwg_add_all_nodes(vertex_hdl_t hwgraph_root)
{
cnodeid_t cnode;
for (cnode = 0; cnode < numnodes; cnode++) {
- klhwg_add_node(hwgraph_root, cnode, NULL);
+ klhwg_add_node(hwgraph_root, cnode);
}
for (cnode = 0; cnode < numnodes; cnode++) {
diff --git a/arch/ia64/sn/io/sn2/l1.c b/arch/ia64/sn/io/sn2/l1.c
index c83e1c0ade72fc..6576b4ca0947e9 100644
--- a/arch/ia64/sn/io/sn2/l1.c
+++ b/arch/ia64/sn/io/sn2/l1.c
@@ -29,6 +29,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
+#include <asm/io.h>
#include <asm/sn/sgi.h>
#include <asm/sn/io.h>
#include <asm/sn/iograph.h>
@@ -36,7 +37,6 @@
#include <asm/sn/hcl.h>
#include <asm/sn/hcl_util.h>
#include <asm/sn/labelcl.h>
-#include <asm/sn/eeprom.h>
#include <asm/sn/router.h>
#include <asm/sn/module.h>
#include <asm/sn/ksys/l1.h>
@@ -50,6 +50,9 @@
#define UART_BAUD_RATE 57600
+static int L1_connected; /* non-zero when interrupts are enabled */
+
+
int
get_L1_baud(void)
{
@@ -62,7 +65,23 @@ get_L1_baud(void)
int
l1_get_intr_value( void )
{
- return(0);
+ cpuid_t intr_cpuid;
+ nasid_t console_nasid;
+ int major, minor;
+ extern nasid_t get_console_nasid(void);
+
+ /* if it is an old prom, run in poll mode */
+
+ major = sn_sal_rev_major();
+ minor = sn_sal_rev_minor();
+ if ( (major < 1) || ((major == 1) && (minor < 10)) ) {
+ /* before version 1.10 doesn't work */
+ return (0);
+ }
+
+ console_nasid = get_console_nasid();
+ intr_cpuid = NODEPDA(NASID_TO_COMPACT_NODEID(console_nasid))->node_first_cpu;
+ return CPU_VECTOR_TO_IRQ(intr_cpuid, SGI_UART_VECTOR);
}
/* Disconnect the callup functions - throw away interrupts */
@@ -74,19 +93,45 @@ l1_unconnect_intr(void)
/* Set up uart interrupt handling for this node's uart */
-void
-l1_connect_intr(void *rx_notify, void *tx_notify)
+int
+l1_connect_intr(void *intr_func, void *arg, struct pt_regs *ep)
{
-#if 0
- // Will need code here for sn2 - something like this
- console_nodepda = NODEPDA(NASID_TO_COMPACT_NODEID(get_master_nasid());
- intr_connect_level(console_nodepda->node_first_cpu,
- SGI_UART_VECTOR, INTPEND0_MAXMASK,
- dummy_intr_func);
- request_irq(SGI_UART_VECTOR | (console_nodepda->node_first_cpu << 8),
- intr_func, SA_INTERRUPT | SA_SHIRQ,
- "l1_protocol_driver", (void *)sc);
-#endif
+ cpuid_t intr_cpuid;
+ nasid_t console_nasid;
+ unsigned int console_irq;
+ int result;
+ extern int intr_connect_level(cpuid_t, int, ilvl_t, intr_func_t);
+ extern nasid_t get_console_nasid(void);
+
+
+ /* don't call to connect multiple times - we DON'T support changing the handler */
+
+ if ( !L1_connected ) {
+ L1_connected++;
+ console_nasid = get_console_nasid();
+ intr_cpuid = NODEPDA(NASID_TO_COMPACT_NODEID(console_nasid))->node_first_cpu;
+ console_irq = CPU_VECTOR_TO_IRQ(intr_cpuid, SGI_UART_VECTOR);
+ result = intr_connect_level(intr_cpuid, SGI_UART_VECTOR,
+ 0 /*not used*/, 0 /*not used*/);
+ if (result != SGI_UART_VECTOR) {
+ if (result < 0)
+ printk(KERN_WARNING "L1 console driver : intr_connect_level failed %d\n", result);
+ else
+ printk(KERN_WARNING "L1 console driver : intr_connect_level returns wrong bit %d\n", result);
+ return (-1);
+ }
+
+ result = request_irq(console_irq, intr_func, SA_INTERRUPT,
+ "SGI L1 console driver", (void *)arg);
+ if (result < 0) {
+ printk(KERN_WARNING "L1 console driver : request_irq failed %d\n", result);
+ return (-1);
+ }
+
+ /* ask SAL to turn on interrupts in the UART itself */
+ ia64_sn_console_intr_enable(SAL_CONSOLE_INTR_RECV);
+ }
+ return (0);
}
@@ -195,7 +240,7 @@ l1_serial_in_local(void)
int
l1_serial_out( char *str, int len )
{
- int counter = len;
+ int tmp;
/* Ignore empty messages */
if ( len == 0 )
@@ -216,6 +261,8 @@ l1_serial_out( char *str, int len )
if ( IS_RUNNING_ON_SIMULATOR() ) {
extern u64 master_node_bedrock_address;
void early_sn_setup(void);
+ int counter = len;
+
if (!master_node_bedrock_address)
early_sn_setup();
if ( master_node_bedrock_address != (u64)0 ) {
@@ -237,8 +284,9 @@ l1_serial_out( char *str, int len )
}
/* Attempt to write things out thru the sal */
- if ( ia64_sn_console_putb(str, len) )
- return(0);
-
- return((counter <= 0) ? 0 : (len - counter));
+ if ( L1_connected )
+ tmp = ia64_sn_console_xmit_chars(str, len);
+ else
+ tmp = ia64_sn_console_putb(str, len);
+ return ((tmp < 0) ? 0 : tmp);
}
diff --git a/arch/ia64/sn/io/sn2/l1_command.c b/arch/ia64/sn/io/sn2/l1_command.c
index 9826308a6edea8..280d2bb2ad02b3 100644
--- a/arch/ia64/sn/io/sn2/l1_command.c
+++ b/arch/ia64/sn/io/sn2/l1_command.c
@@ -16,7 +16,6 @@
#include <asm/sn/hcl.h>
#include <asm/sn/hcl_util.h>
#include <asm/sn/labelcl.h>
-#include <asm/sn/eeprom.h>
#include <asm/sn/router.h>
#include <asm/sn/module.h>
#include <asm/sn/ksys/l1.h>
@@ -26,37 +25,6 @@
#include <asm/sn/sn_sal.h>
#include <linux/ctype.h>
-#define ELSC_TIMEOUT 1000000 /* ELSC response timeout (usec) */
-#define LOCK_TIMEOUT 5000000 /* Hub lock timeout (usec) */
-
-#define hub_cpu_get() 0
-
-#define LBYTE(caddr) (*(char *) caddr)
-
-extern char *bcopy(const char * src, char * dest, int count);
-
-#define LDEBUG 0
-
-/*
- * ELSC data is in NVRAM page 7 at the following offsets.
- */
-
-#define NVRAM_MAGIC_AD 0x700 /* magic number used for init */
-#define NVRAM_PASS_WD 0x701 /* password (4 bytes in length) */
-#define NVRAM_DBG1 0x705 /* virtual XOR debug switches */
-#define NVRAM_DBG2 0x706 /* physical XOR debug switches */
-#define NVRAM_CFG 0x707 /* ELSC Configuration info */
-#define NVRAM_MODULE 0x708 /* system module number */
-#define NVRAM_BIST_FLG 0x709 /* BIST flags (2 bits per nodeboard) */
-#define NVRAM_PARTITION 0x70a /* module's partition id */
-#define NVRAM_DOMAIN 0x70b /* module's domain id */
-#define NVRAM_CLUSTER 0x70c /* module's cluster id */
-#define NVRAM_CELL 0x70d /* module's cellid */
-
-#define NVRAM_MAGIC_NO 0x37 /* value of magic number */
-#define NVRAM_SIZE 16 /* 16 bytes in nvram */
-
-
/* elsc_display_line writes up to 12 characters to either the top or bottom
* line of the L1 display. line points to a buffer containing the message
* to be displayed. The zero-based line number is specified by lnum (so
@@ -69,6 +37,7 @@ int elsc_display_line(nasid_t nasid, char *line, int lnum)
return 0;
}
+
/*
* iobrick routines
*/
@@ -88,9 +57,9 @@ int iobrick_rack_bay_type_get( nasid_t nasid, uint *rack,
if ( ia64_sn_sysctl_iobrick_module_get(nasid, &result) )
return( ELSC_ERROR_CMD_SEND );
- *rack = (result & L1_ADDR_RACK_MASK) >> L1_ADDR_RACK_SHFT;
- *bay = (result & L1_ADDR_BAY_MASK) >> L1_ADDR_BAY_SHFT;
- *brick_type = (result & L1_ADDR_TYPE_MASK) >> L1_ADDR_TYPE_SHFT;
+ *rack = (result & MODULE_RACK_MASK) >> MODULE_RACK_SHFT;
+ *bay = (result & MODULE_BPOS_MASK) >> MODULE_BPOS_SHFT;
+ *brick_type = (result & MODULE_BTYPE_MASK) >> MODULE_BTYPE_SHFT;
*brick_type = toupper(*brick_type);
return 0;
@@ -99,14 +68,12 @@ int iobrick_rack_bay_type_get( nasid_t nasid, uint *rack,
int iomoduleid_get(nasid_t nasid)
{
-
int result = 0;
if ( ia64_sn_sysctl_iobrick_module_get(nasid, &result) )
return( ELSC_ERROR_CMD_SEND );
return result;
-
}
int iobrick_module_get(nasid_t nasid)
@@ -142,11 +109,15 @@ int iobrick_module_get(nasid_t nasid)
RACK_ADD_NUM(rack, t);
switch( brick_type ) {
- case 'I':
+ case L1_BRICKTYPE_IX:
+ brick_type = MODULE_IXBRICK; break;
+ case L1_BRICKTYPE_PX:
+ brick_type = MODULE_PXBRICK; break;
+ case L1_BRICKTYPE_I:
brick_type = MODULE_IBRICK; break;
- case 'P':
+ case L1_BRICKTYPE_P:
brick_type = MODULE_PBRICK; break;
- case 'X':
+ case L1_BRICKTYPE_X:
brick_type = MODULE_XBRICK; break;
}
@@ -154,7 +125,7 @@ int iobrick_module_get(nasid_t nasid)
return ret;
}
-#ifdef CONFIG_PCI
+
/*
* iobrick_module_get_nasid() returns a module_id which has the brick
* type encoded in bits 15-12, but this is not the true brick type...
@@ -179,29 +150,54 @@ iobrick_type_get_nasid(nasid_t nasid)
/* convert to a module.h brick type */
for( t = 0; t < MAX_BRICK_TYPES; t++ ) {
- if( brick_types[t] == type )
+ if( brick_types[t] == type ) {
return t;
+ }
}
return -1; /* unknown brick */
}
-#endif
+
int iobrick_module_get_nasid(nasid_t nasid)
{
int io_moduleid;
-#ifdef PIC_LATER
- uint rack, bay;
+ io_moduleid = iobrick_module_get(nasid);
+ return io_moduleid;
+}
+
+/*
+ * given a L1 bricktype, return a bricktype string. This string is the
+ * string that will be used in the hwpath for I/O bricks
+ */
+char *
+iobrick_L1bricktype_to_name(int type)
+{
+ switch (type)
+ {
+ default:
+ return("Unknown");
+
+ case L1_BRICKTYPE_X:
+ return("Xbrick");
- if (PEBRICK_NODE(nasid)) {
- if (peer_iobrick_rack_bay_get(nasid, &rack, &bay)) {
- printf("Could not read rack and bay location "
- "of PEBrick at nasid %d\n", nasid);
- }
+ case L1_BRICKTYPE_I:
+ return("Ibrick");
- io_moduleid = peer_iobrick_module_get(sc, rack, bay);
+ case L1_BRICKTYPE_P:
+ return("Pbrick");
+
+ case L1_BRICKTYPE_PX:
+ return("PXbrick");
+
+ case L1_BRICKTYPE_IX:
+ return("IXbrick");
+
+ case L1_BRICKTYPE_C:
+ return("Cbrick");
+
+ case L1_BRICKTYPE_R:
+ return("Rbrick");
}
-#endif /* PIC_LATER */
- io_moduleid = iobrick_module_get(nasid);
- return io_moduleid;
}
+
diff --git a/arch/ia64/sn/io/sn2/ml_SN_init.c b/arch/ia64/sn/io/sn2/ml_SN_init.c
index 51829ce6e02ed3..f085fceadd9b1f 100644
--- a/arch/ia64/sn/io/sn2/ml_SN_init.c
+++ b/arch/ia64/sn/io/sn2/ml_SN_init.c
@@ -19,25 +19,12 @@
#include <asm/sn/sn_private.h>
#include <asm/sn/klconfig.h>
#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/snconfig.h>
-extern int numcpus;
-extern char arg_maxnodes[];
extern cpuid_t master_procid;
-
-extern int hasmetarouter;
-
int maxcpus;
-cpumask_t boot_cpumask;
-hubreg_t region_mask = 0;
-
extern xwidgetnum_t hub_widget_id(nasid_t);
-extern int valid_icache_reasons; /* Reasons to flush the icache */
-extern int valid_dcache_reasons; /* Reasons to flush the dcache */
-extern u_char miniroot;
-extern volatile int need_utlbmiss_patch;
extern void iograph_early_init(void);
nasid_t master_nasid = INVALID_NASID; /* This is the partition master nasid */
@@ -123,16 +110,6 @@ void init_platform_nodepda(nodepda_t *npda, cnodeid_t node)
mutex_init_locked(&npda->xbow_sema); /* init it locked? */
}
-/* XXX - Move the interrupt stuff to intr.c ? */
-/*
- * Set up the platform-dependent fields in the processor pda.
- * Must be done _after_ init_platform_nodepda().
- * If we need a lock here, something else is wrong!
- */
-void init_platform_pda(cpuid_t cpu)
-{
-}
-
void
update_node_information(cnodeid_t cnodeid)
{
diff --git a/arch/ia64/sn/io/sn2/ml_SN_intr.c b/arch/ia64/sn/io/sn2/ml_SN_intr.c
index e42a347cac05e0..31da1ccb1bc552 100644
--- a/arch/ia64/sn/io/sn2/ml_SN_intr.c
+++ b/arch/ia64/sn/io/sn2/ml_SN_intr.c
@@ -4,7 +4,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992-1997, 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (C) 1992-1997, 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*/
/*
@@ -40,11 +40,14 @@
#include <asm/sal.h>
#include <asm/sn/sn_sal.h>
-extern irqpda_t *irqpdaindr[];
-extern cnodeid_t master_node_get(devfs_handle_t vhdl);
+extern irqpda_t *irqpdaindr;
+extern cnodeid_t master_node_get(vertex_hdl_t vhdl);
extern nasid_t master_nasid;
// Initialize some shub registers for interrupts, both IO and error.
+//
+
+
void
intr_init_vecblk( nodepda_t *npda,
@@ -58,6 +61,8 @@ intr_init_vecblk( nodepda_t *npda,
nodepda_t *lnodepda;
sh_ii_int0_enable_u_t ii_int_enable;
sh_int_node_id_config_u_t node_id_config;
+ sh_local_int5_config_u_t local5_config;
+ sh_local_int5_enable_u_t local5_enable;
extern void sn_init_cpei_timer(void);
static int timer_added = 0;
@@ -93,6 +98,19 @@ intr_init_vecblk( nodepda_t *npda,
HUB_S( (unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_PI_ERROR_MASK), 0);
HUB_S( (unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_PI_CRBP_ERROR_MASK), 0);
+ // Config and enable UART interrupt, all nodes.
+
+ local5_config.sh_local_int5_config_regval = 0;
+ local5_config.sh_local_int5_config_s.idx = SGI_UART_VECTOR;
+ local5_config.sh_local_int5_config_s.pid = cpu0;
+ HUB_S( (unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_LOCAL_INT5_CONFIG),
+ local5_config.sh_local_int5_config_regval);
+
+ local5_enable.sh_local_int5_enable_regval = 0;
+ local5_enable.sh_local_int5_enable_s.uart_int = 1;
+ HUB_S( (unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_LOCAL_INT5_ENABLE),
+ local5_enable.sh_local_int5_enable_regval);
+
// The II_INT_CONFIG register for cpu 0.
ii_int_config.sh_ii_int0_config_regval = 0;
@@ -119,13 +137,6 @@ intr_init_vecblk( nodepda_t *npda,
// Enable interrupts for II_INT0 and 1.
ii_int_enable.sh_ii_int0_enable_regval = 0;
ii_int_enable.sh_ii_int0_enable_s.ii_enable = 1;
-#ifdef BUS_INT_WAR
- /* Dont enable any ints from II. We will poll for interrupts. */
- ii_int_enable.sh_ii_int0_enable_s.ii_enable = 0;
-
- /* Enable IPIs. We use them ONLY for send INITs to hung cpus */
- *(volatile long*)GLOBAL_MMR_ADDR(nasid, SH_IPI_INT_ENABLE) = 1;
-#endif
HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_II_INT0_ENABLE),
ii_int_enable.sh_ii_int0_enable_regval);
@@ -147,7 +158,8 @@ do_intr_reserve_level(cpuid_t cpu,
int reserve)
{
int i;
- irqpda_t *irqs = irqpdaindr[cpu];
+ irqpda_t *irqs = irqpdaindr;
+ int min_shared;
if (reserve) {
if (bit < 0) {
@@ -158,8 +170,32 @@ do_intr_reserve_level(cpuid_t cpu,
}
}
}
- if (bit < 0) {
- return -1;
+ if (bit < 0) { /* ran out of irqs. Have to share. This will be rare. */
+ min_shared = 256;
+ for (i=IA64_SN2_FIRST_DEVICE_VECTOR; i < IA64_SN2_LAST_DEVICE_VECTOR; i++) {
+ /* Share with the same device class */
+ if (irqpdaindr->current->vendor == irqpdaindr->device_dev[i]->vendor &&
+ irqpdaindr->current->device == irqpdaindr->device_dev[i]->device &&
+ irqpdaindr->share_count[i] < min_shared) {
+ min_shared = irqpdaindr->share_count[i];
+ bit = i;
+ }
+ }
+ min_shared = 256;
+ if (bit < 0) { /* didn't find a matching device, just pick one. This will be */
+ /* exceptionally rare. */
+ for (i=IA64_SN2_FIRST_DEVICE_VECTOR; i < IA64_SN2_LAST_DEVICE_VECTOR; i++) {
+ if (irqpdaindr->share_count[i] < min_shared) {
+ min_shared = irqpdaindr->share_count[i];
+ bit = i;
+ }
+ }
+ }
+ irqpdaindr->share_count[bit]++;
+ }
+ if (irqs->irq_flags[bit] & SN2_IRQ_SHARED) {
+ irqs->irq_flags[bit] |= SN2_IRQ_RESERVED;
+ return bit;
}
if (irqs->irq_flags[bit] & SN2_IRQ_RESERVED) {
return -1;
@@ -183,7 +219,7 @@ int
intr_reserve_level(cpuid_t cpu,
int bit,
int resflags,
- devfs_handle_t owner_dev,
+ vertex_hdl_t owner_dev,
char *name)
{
return(do_intr_reserve_level(cpu, bit, 1));
@@ -203,9 +239,13 @@ do_intr_connect_level(cpuid_t cpu,
int bit,
int connect)
{
- irqpda_t *irqs = irqpdaindr[cpu];
+ irqpda_t *irqs = irqpdaindr;
if (connect) {
+ if (irqs->irq_flags[bit] & SN2_IRQ_SHARED) {
+ irqs->irq_flags[bit] |= SN2_IRQ_CONNECTED;
+ return bit;
+ }
if (irqs->irq_flags[bit] & SN2_IRQ_CONNECTED) {
return -1;
} else {
@@ -248,24 +288,29 @@ do_intr_cpu_choose(cnodeid_t cnode) {
int slice, min_count = 1000;
irqpda_t *irqs;
- for (slice = 0; slice < CPUS_PER_NODE; slice++) {
+ for (slice = CPUS_PER_NODE - 1; slice >= 0; slice--) {
int intrs;
cpu = cnode_slice_to_cpuid(cnode, slice);
- if (cpu == CPU_NONE) {
+ if (cpu == num_online_cpus()) {
continue;
}
- if (!cpu_enabled(cpu)) {
+ if (!cpu_online(cpu)) {
continue;
}
- irqs = irqpdaindr[cpu];
+ irqs = irqpdaindr;
intrs = irqs->num_irq_used;
if (min_count > intrs) {
min_count = intrs;
best_cpu = cpu;
+ if ( enable_shub_wars_1_1() ) {
+ /* Rather than finding the best cpu, always return the first cpu*/
+ /* This forces all interrupts to the same cpu */
+ break;
+ }
}
}
return best_cpu;
@@ -285,7 +330,7 @@ intr_bit_reserve_test(cpuid_t cpu,
cnodeid_t cnode,
int req_bit,
int resflags,
- devfs_handle_t owner_dev,
+ vertex_hdl_t owner_dev,
char *name,
int *resp_bit)
{
@@ -307,18 +352,18 @@ intr_bit_reserve_test(cpuid_t cpu,
// Find the node to assign for this interrupt.
cpuid_t
-intr_heuristic(devfs_handle_t dev,
+intr_heuristic(vertex_hdl_t dev,
device_desc_t dev_desc,
int req_bit,
int resflags,
- devfs_handle_t owner_dev,
+ vertex_hdl_t owner_dev,
char *name,
int *resp_bit)
{
cpuid_t cpuid;
cpuid_t candidate = CPU_NONE;
cnodeid_t candidate_node;
- devfs_handle_t pconn_vhdl;
+ vertex_hdl_t pconn_vhdl;
pcibr_soft_t pcibr_soft;
int bit;
@@ -369,8 +414,8 @@ intr_heuristic(devfs_handle_t dev,
if (candidate != CPU_NONE) {
printk("Cannot target interrupt to target node (%ld).\n",candidate);
return CPU_NONE; } else {
- printk("Cannot target interrupt to closest node (%d) 0x%p\n",
- master_node_get(dev), (void *)owner_dev);
+ /* printk("Cannot target interrupt to closest node (%d) 0x%p\n",
+ master_node_get(dev), (void *)owner_dev); */
}
// We couldn't put it on the closest node. Try to find another one.
diff --git a/arch/ia64/sn/io/sn2/ml_iograph.c b/arch/ia64/sn/io/sn2/ml_iograph.c
index 83599fafa98cb6..3af17cd392afe6 100644
--- a/arch/ia64/sn/io/sn2/ml_iograph.c
+++ b/arch/ia64/sn/io/sn2/ml_iograph.c
@@ -22,7 +22,6 @@
#include <asm/sn/xtalk/xbow.h>
#include <asm/sn/pci/bridge.h>
#include <asm/sn/klconfig.h>
-#include <asm/sn/eeprom.h>
#include <asm/sn/sn_private.h>
#include <asm/sn/pci/pcibr.h>
#include <asm/sn/xtalk/xtalk.h>
@@ -43,8 +42,6 @@
/* At most 2 hubs can be connected to an xswitch */
#define NUM_XSWITCH_VOLUNTEER 2
-extern unsigned char Is_pic_on_this_nasid[512];
-
/*
* Track which hubs have volunteered to manage devices hanging off of
* a Crosstalk Switch (e.g. xbow). This structure is allocated,
@@ -54,11 +51,11 @@ extern unsigned char Is_pic_on_this_nasid[512];
typedef struct xswitch_vol_s {
mutex_t xswitch_volunteer_mutex;
int xswitch_volunteer_count;
- devfs_handle_t xswitch_volunteer[NUM_XSWITCH_VOLUNTEER];
+ vertex_hdl_t xswitch_volunteer[NUM_XSWITCH_VOLUNTEER];
} *xswitch_vol_t;
void
-xswitch_vertex_init(devfs_handle_t xswitch)
+xswitch_vertex_init(vertex_hdl_t xswitch)
{
xswitch_vol_t xvolinfo;
int rc;
@@ -78,7 +75,7 @@ xswitch_vertex_init(devfs_handle_t xswitch)
* xswitch volunteer structure hanging around. Destroy it.
*/
static void
-xswitch_volunteer_delete(devfs_handle_t xswitch)
+xswitch_volunteer_delete(vertex_hdl_t xswitch)
{
xswitch_vol_t xvolinfo;
int rc;
@@ -94,10 +91,10 @@ xswitch_volunteer_delete(devfs_handle_t xswitch)
*/
/* ARGSUSED */
static void
-volunteer_for_widgets(devfs_handle_t xswitch, devfs_handle_t master)
+volunteer_for_widgets(vertex_hdl_t xswitch, vertex_hdl_t master)
{
xswitch_vol_t xvolinfo = NULL;
- devfs_handle_t hubv;
+ vertex_hdl_t hubv;
hubinfo_t hubinfo;
(void)hwgraph_info_get_LBL(xswitch,
@@ -140,7 +137,7 @@ extern int xbow_port_io_enabled(nasid_t nasid, int widgetnum);
*/
/* ARGSUSED */
static void
-assign_widgets_to_volunteers(devfs_handle_t xswitch, devfs_handle_t hubv)
+assign_widgets_to_volunteers(vertex_hdl_t xswitch, vertex_hdl_t hubv)
{
xswitch_info_t xswitch_info;
xswitch_vol_t xvolinfo = NULL;
@@ -223,18 +220,6 @@ assign_widgets_to_volunteers(devfs_handle_t xswitch, devfs_handle_t hubv)
bt = iobrick_type_get_nasid(nasid);
if (bt >= 0) {
- /*
- * PXBRICK has two busses per widget so this
- * algorithm wouldn't work (all busses would
- * be assigned to one volunteer). Change the
- * bricktype to PBRICK whose mapping is setup
- * suchthat 2 of the PICs will be assigned to
- * one volunteer and the other one will be
- * assigned to the other volunteer.
- */
- if (bt == MODULE_PXBRICK)
- bt = MODULE_PBRICK;
-
i = io_brick_map_widget(bt, widgetnum) & 1;
}
}
@@ -281,8 +266,6 @@ iograph_early_init(void)
DBG("iograph_early_init: Found board 0x%p\n", board);
}
}
-
- hubio_init();
}
/*
@@ -307,7 +290,7 @@ io_init_done(cnodeid_t cnodeid,cpu_cookie_t c)
* hwid for our use.
*/
static void
-early_probe_for_widget(devfs_handle_t hubv, xwidget_hwid_t hwid)
+early_probe_for_widget(vertex_hdl_t hubv, xwidget_hwid_t hwid)
{
hubreg_t llp_csr_reg;
nasid_t nasid;
@@ -351,7 +334,7 @@ early_probe_for_widget(devfs_handle_t hubv, xwidget_hwid_t hwid)
* added as inventory information.
*/
static void
-xwidget_inventory_add(devfs_handle_t widgetv,
+xwidget_inventory_add(vertex_hdl_t widgetv,
lboard_t *board,
struct xwidget_hwid_s hwid)
{
@@ -374,14 +357,13 @@ xwidget_inventory_add(devfs_handle_t widgetv,
*/
void
-io_xswitch_widget_init(devfs_handle_t xswitchv,
- devfs_handle_t hubv,
- xwidgetnum_t widgetnum,
- async_attach_t aa)
+io_xswitch_widget_init(vertex_hdl_t xswitchv,
+ vertex_hdl_t hubv,
+ xwidgetnum_t widgetnum)
{
xswitch_info_t xswitch_info;
xwidgetnum_t hub_widgetid;
- devfs_handle_t widgetv;
+ vertex_hdl_t widgetv;
cnodeid_t cnode;
widgetreg_t widget_id;
nasid_t nasid, peer_nasid;
@@ -427,6 +409,7 @@ io_xswitch_widget_init(devfs_handle_t xswitchv,
char name[4];
lboard_t dummy;
+
/*
* If the current hub is not supposed to be the master
* for this widgetnum, then skip this widget.
@@ -470,12 +453,15 @@ io_xswitch_widget_init(devfs_handle_t xswitchv,
memset(buffer, 0, 16);
format_module_id(buffer, geo_module(board->brd_geoid), MODULE_FORMAT_BRIEF);
- sprintf(pathname, EDGE_LBL_MODULE "/%s/" EDGE_LBL_SLAB "/%d" "/%cbrick" "/%s/%d",
+
+ sprintf(pathname, EDGE_LBL_MODULE "/%s/" EDGE_LBL_SLAB "/%d" "/%s" "/%s/%d",
buffer,
geo_slab(board->brd_geoid),
- (board->brd_type == KLTYPE_IBRICK) ? 'I' :
- (board->brd_type == KLTYPE_PBRICK) ? 'P' :
- (board->brd_type == KLTYPE_XBRICK) ? 'X' : '?',
+ (board->brd_type == KLTYPE_IBRICK) ? EDGE_LBL_IBRICK :
+ (board->brd_type == KLTYPE_PBRICK) ? EDGE_LBL_PBRICK :
+ (board->brd_type == KLTYPE_PXBRICK) ? EDGE_LBL_PXBRICK :
+ (board->brd_type == KLTYPE_IXBRICK) ? EDGE_LBL_IXBRICK :
+ (board->brd_type == KLTYPE_XBRICK) ? EDGE_LBL_XBRICK : "?brick",
EDGE_LBL_XTALK, widgetnum);
DBG("io_xswitch_widget_init: path= %s\n", pathname);
@@ -514,36 +500,46 @@ io_xswitch_widget_init(devfs_handle_t xswitchv,
xwidget_inventory_add(widgetv,board,hwid);
(void)xwidget_register(&hwid, widgetv, widgetnum,
- hubv, hub_widgetid,
- aa);
+ hubv, hub_widgetid);
ia64_sn_sysctl_iobrick_module_get(nasid, &io_module);
if (io_module >= 0) {
char buffer[16];
- devfs_handle_t to, from;
+ vertex_hdl_t to, from;
+ char *brick_name;
+ extern char *iobrick_L1bricktype_to_name(int type);
+
memset(buffer, 0, 16);
format_module_id(buffer, geo_module(board->brd_geoid), MODULE_FORMAT_BRIEF);
- bt = toupper(MODULE_GET_BTCHAR(io_module));
+ if ( islower(MODULE_GET_BTCHAR(io_module)) ) {
+ bt = toupper(MODULE_GET_BTCHAR(io_module));
+ }
+ else {
+ bt = MODULE_GET_BTCHAR(io_module);
+ }
+
+ brick_name = iobrick_L1bricktype_to_name(bt);
+
/* Add a helper vertex so xbow monitoring
* can identify the brick type. It's simply
* an edge from the widget 0 vertex to the
* brick vertex.
*/
- sprintf(pathname, "/dev/hw/" EDGE_LBL_MODULE "/%s/"
+ sprintf(pathname, EDGE_LBL_HW "/" EDGE_LBL_MODULE "/%s/"
EDGE_LBL_SLAB "/%d/"
EDGE_LBL_NODE "/" EDGE_LBL_XTALK "/"
"0",
buffer, geo_slab(board->brd_geoid));
from = hwgraph_path_to_vertex(pathname);
ASSERT_ALWAYS(from);
- sprintf(pathname, "/dev/hw/" EDGE_LBL_MODULE "/%s/"
+ sprintf(pathname, EDGE_LBL_HW "/" EDGE_LBL_MODULE "/%s/"
EDGE_LBL_SLAB "/%d/"
- "%cbrick",
- buffer, geo_slab(board->brd_geoid), bt);
+ "%s",
+ buffer, geo_slab(board->brd_geoid), brick_name);
to = hwgraph_path_to_vertex(pathname);
ASSERT_ALWAYS(to);
@@ -566,12 +562,9 @@ link_done:
static void
-io_init_xswitch_widgets(devfs_handle_t xswitchv, cnodeid_t cnode)
+io_init_xswitch_widgets(vertex_hdl_t xswitchv, cnodeid_t cnode)
{
xwidgetnum_t widgetnum;
- async_attach_t aa;
-
- aa = async_attach_new();
DBG("io_init_xswitch_widgets: xswitchv 0x%p for cnode %d\n", xswitchv, cnode);
@@ -579,13 +572,8 @@ io_init_xswitch_widgets(devfs_handle_t xswitchv, cnodeid_t cnode)
widgetnum++) {
io_xswitch_widget_init(xswitchv,
cnodeid_to_vertex(cnode),
- widgetnum, aa);
+ widgetnum);
}
- /*
- * Wait for parallel attach threads, if any, to complete.
- */
- async_attach_waitall(aa);
- async_attach_free(aa);
}
/*
@@ -595,11 +583,11 @@ io_init_xswitch_widgets(devfs_handle_t xswitchv, cnodeid_t cnode)
* graph and risking hangs.
*/
static void
-io_link_xswitch_widgets(devfs_handle_t xswitchv, cnodeid_t cnodeid)
+io_link_xswitch_widgets(vertex_hdl_t xswitchv, cnodeid_t cnodeid)
{
xwidgetnum_t widgetnum;
char pathname[128];
- devfs_handle_t vhdl;
+ vertex_hdl_t vhdl;
nasid_t nasid, peer_nasid;
lboard_t *board;
@@ -638,21 +626,12 @@ io_link_xswitch_widgets(devfs_handle_t xswitchv, cnodeid_t cnodeid)
return;
}
- if ( Is_pic_on_this_nasid[nasid] ) {
- /* Check both buses */
- sprintf(pathname, "%d/"EDGE_LBL_PCIX_0, widgetnum);
- if (hwgraph_traverse(xswitchv, pathname, &vhdl) == GRAPH_SUCCESS)
- board->brd_graph_link = vhdl;
- else {
- sprintf(pathname, "%d/"EDGE_LBL_PCIX_1, widgetnum);
- if (hwgraph_traverse(xswitchv, pathname, &vhdl) == GRAPH_SUCCESS)
- board->brd_graph_link = vhdl;
- else
- board->brd_graph_link = GRAPH_VERTEX_NONE;
- }
- }
+ /* Check both buses */
+ sprintf(pathname, "%d/"EDGE_LBL_PCIX_0, widgetnum);
+ if (hwgraph_traverse(xswitchv, pathname, &vhdl) == GRAPH_SUCCESS)
+ board->brd_graph_link = vhdl;
else {
- sprintf(pathname, "%d/"EDGE_LBL_PCI, widgetnum);
+ sprintf(pathname, "%d/"EDGE_LBL_PCIX_1, widgetnum);
if (hwgraph_traverse(xswitchv, pathname, &vhdl) == GRAPH_SUCCESS)
board->brd_graph_link = vhdl;
else
@@ -668,16 +647,15 @@ static void
io_init_node(cnodeid_t cnodeid)
{
/*REFERENCED*/
- devfs_handle_t hubv, switchv, widgetv;
+ vertex_hdl_t hubv, switchv, widgetv;
struct xwidget_hwid_s hwid;
hubinfo_t hubinfo;
int is_xswitch;
nodepda_t *npdap;
struct semaphore *peer_sema = 0;
uint32_t widget_partnum;
- nodepda_router_info_t *npda_rip;
cpu_cookie_t c = 0;
- extern int hubdev_docallouts(devfs_handle_t);
+ extern int hubdev_docallouts(vertex_hdl_t);
npdap = NODEPDA(cnodeid);
@@ -696,21 +674,6 @@ io_init_node(cnodeid_t cnodeid)
hubdev_docallouts(hubv);
/*
- * Set up the dependent routers if we have any.
- */
- npda_rip = npdap->npda_rip_first;
-
- while(npda_rip) {
- /* If the router info has not been initialized
- * then we need to do the router initialization
- */
- if (!npda_rip->router_infop) {
- router_init(cnodeid,0,npda_rip);
- }
- npda_rip = npda_rip->router_next;
- }
-
- /*
* Read mfg info on this hub
*/
@@ -833,7 +796,7 @@ io_init_node(cnodeid_t cnodeid)
*/
hubinfo_get(hubv, &hubinfo);
- (void)xwidget_register(&hwid, widgetv, npdap->basew_id, hubv, hubinfo->h_widgetid, NULL);
+ (void)xwidget_register(&hwid, widgetv, npdap->basew_id, hubv, hubinfo->h_widgetid);
if (!is_xswitch) {
/* io_init_done takes cpu cookie as 2nd argument
@@ -915,231 +878,9 @@ io_init_node(cnodeid_t cnodeid)
* XXX Irix legacy..controller numbering should be part of devfsd's job
*/
int num_base_io_scsi_ctlr = 2; /* used by syssgi */
-devfs_handle_t base_io_scsi_ctlr_vhdl[NUM_BASE_IO_SCSI_CTLR];
-static devfs_handle_t baseio_enet_vhdl,baseio_console_vhdl;
-
-/*
- * Put the logical controller number information in the
- * scsi controller vertices for each scsi controller that
- * is in a "fixed position".
- */
-static void
-scsi_ctlr_nums_add(devfs_handle_t pci_vhdl)
-{
- {
- int i;
-
- num_base_io_scsi_ctlr = NUM_BASE_IO_SCSI_CTLR;
-
- /* Initialize base_io_scsi_ctlr_vhdl array */
- for (i=0; i<num_base_io_scsi_ctlr; i++)
- base_io_scsi_ctlr_vhdl[i] = GRAPH_VERTEX_NONE;
- }
- {
- /*
- * May want to consider changing the SN0 code, above, to work more like
- * the way this works.
- */
- devfs_handle_t base_ibrick_xbridge_vhdl;
- devfs_handle_t base_ibrick_xtalk_widget_vhdl;
- devfs_handle_t scsi_ctlr_vhdl;
- int i;
- graph_error_t rv;
-
- /*
- * This is a table of "well-known" SCSI controllers and their well-known
- * controller numbers. The names in the table start from the base IBrick's
- * Xbridge vertex, so the first component is the xtalk widget number.
- */
- static struct {
- char *base_ibrick_scsi_path;
- int controller_number;
- } hardwired_scsi_controllers[] = {
- {"15/" EDGE_LBL_PCI "/1/" EDGE_LBL_SCSI_CTLR "/0", 0},
- {"15/" EDGE_LBL_PCI "/2/" EDGE_LBL_SCSI_CTLR "/0", 1},
- {"15/" EDGE_LBL_PCI "/3/" EDGE_LBL_SCSI_CTLR "/0", 2},
- {"14/" EDGE_LBL_PCI "/1/" EDGE_LBL_SCSI_CTLR "/0", 3},
- {"14/" EDGE_LBL_PCI "/2/" EDGE_LBL_SCSI_CTLR "/0", 4},
- {"15/" EDGE_LBL_PCI "/6/ohci/0/" EDGE_LBL_SCSI_CTLR "/0", 5},
- {NULL, -1} /* must be last */
- };
-
- base_ibrick_xtalk_widget_vhdl = hwgraph_connectpt_get(pci_vhdl);
- ASSERT_ALWAYS(base_ibrick_xtalk_widget_vhdl != GRAPH_VERTEX_NONE);
-
- base_ibrick_xbridge_vhdl = hwgraph_connectpt_get(base_ibrick_xtalk_widget_vhdl);
- ASSERT_ALWAYS(base_ibrick_xbridge_vhdl != GRAPH_VERTEX_NONE);
- hwgraph_vertex_unref(base_ibrick_xtalk_widget_vhdl);
-
- /*
- * Iterate through the list of well-known SCSI controllers.
- * For each controller found, set it's controller number according
- * to the table.
- */
- for (i=0; hardwired_scsi_controllers[i].base_ibrick_scsi_path != NULL; i++) {
- rv = hwgraph_path_lookup(base_ibrick_xbridge_vhdl,
- hardwired_scsi_controllers[i].base_ibrick_scsi_path, &scsi_ctlr_vhdl, NULL);
-
- if (rv != GRAPH_SUCCESS) /* No SCSI at this path */
- continue;
-
- ASSERT(hardwired_scsi_controllers[i].controller_number < NUM_BASE_IO_SCSI_CTLR);
- base_io_scsi_ctlr_vhdl[hardwired_scsi_controllers[i].controller_number] = scsi_ctlr_vhdl;
- device_controller_num_set(scsi_ctlr_vhdl, hardwired_scsi_controllers[i].controller_number);
- hwgraph_vertex_unref(scsi_ctlr_vhdl); /* (even though we're actually keeping a reference) */
- }
-
- hwgraph_vertex_unref(base_ibrick_xbridge_vhdl);
- }
-}
-
+vertex_hdl_t base_io_scsi_ctlr_vhdl[NUM_BASE_IO_SCSI_CTLR];
#include <asm/sn/ioerror_handling.h>
-devfs_handle_t sys_critical_graph_root = GRAPH_VERTEX_NONE;
-
-/* Define the system critical vertices and connect them through
- * a canonical parent-child relationships for easy traversal
- * during io error handling.
- */
-static void
-sys_critical_graph_init(void)
-{
- devfs_handle_t bridge_vhdl,master_node_vhdl;
- devfs_handle_t xbow_vhdl = GRAPH_VERTEX_NONE;
- extern devfs_handle_t hwgraph_root;
- devfs_handle_t pci_slot_conn;
- int slot;
- devfs_handle_t baseio_console_conn;
-
- DBG("sys_critical_graph_init: FIXME.\n");
- baseio_console_conn = hwgraph_connectpt_get(baseio_console_vhdl);
-
- if (baseio_console_conn == NULL) {
- return;
- }
-
- /* Get the vertex handle for the baseio bridge */
- bridge_vhdl = device_master_get(baseio_console_conn);
-
- /* Get the master node of the baseio card */
- master_node_vhdl = cnodeid_to_vertex(
- master_node_get(baseio_console_vhdl));
-
- /* Add the "root->node" part of the system critical graph */
-
- sys_critical_graph_vertex_add(hwgraph_root,master_node_vhdl);
-
- /* Check if we have a crossbow */
- if (hwgraph_traverse(master_node_vhdl,
- EDGE_LBL_XTALK"/0",
- &xbow_vhdl) == GRAPH_SUCCESS) {
- /* We have a crossbow.Add "node->xbow" part of the system
- * critical graph.
- */
- sys_critical_graph_vertex_add(master_node_vhdl,xbow_vhdl);
-
- /* Add "xbow->baseio bridge" of the system critical graph */
- sys_critical_graph_vertex_add(xbow_vhdl,bridge_vhdl);
-
- hwgraph_vertex_unref(xbow_vhdl);
- } else
- /* We donot have a crossbow. Add "node->baseio_bridge"
- * part of the system critical graph.
- */
- sys_critical_graph_vertex_add(master_node_vhdl,bridge_vhdl);
-
- /* Add all the populated PCI slot vertices to the system critical
- * graph with the bridge vertex as the parent.
- */
- for (slot = 0 ; slot < 8; slot++) {
- char slot_edge[10];
-
- sprintf(slot_edge,"%d",slot);
- if (hwgraph_traverse(bridge_vhdl,slot_edge, &pci_slot_conn)
- != GRAPH_SUCCESS)
- continue;
- sys_critical_graph_vertex_add(bridge_vhdl,pci_slot_conn);
- hwgraph_vertex_unref(pci_slot_conn);
- }
-
- hwgraph_vertex_unref(bridge_vhdl);
-
- /* Add the "ioc3 pci connection point -> console ioc3" part
- * of the system critical graph
- */
-
- if (hwgraph_traverse(baseio_console_vhdl,"..",&pci_slot_conn) ==
- GRAPH_SUCCESS) {
- sys_critical_graph_vertex_add(pci_slot_conn,
- baseio_console_vhdl);
- hwgraph_vertex_unref(pci_slot_conn);
- }
-
- /* Add the "ethernet pci connection point -> base ethernet" part of
- * the system critical graph
- */
- if (hwgraph_traverse(baseio_enet_vhdl,"..",&pci_slot_conn) ==
- GRAPH_SUCCESS) {
- sys_critical_graph_vertex_add(pci_slot_conn,
- baseio_enet_vhdl);
- hwgraph_vertex_unref(pci_slot_conn);
- }
-
- /* Add the "scsi controller pci connection point -> base scsi
- * controller" part of the system critical graph
- */
- if (hwgraph_traverse(base_io_scsi_ctlr_vhdl[0],
- "../..",&pci_slot_conn) == GRAPH_SUCCESS) {
- sys_critical_graph_vertex_add(pci_slot_conn,
- base_io_scsi_ctlr_vhdl[0]);
- hwgraph_vertex_unref(pci_slot_conn);
- }
- if (hwgraph_traverse(base_io_scsi_ctlr_vhdl[1],
- "../..",&pci_slot_conn) == GRAPH_SUCCESS) {
- sys_critical_graph_vertex_add(pci_slot_conn,
- base_io_scsi_ctlr_vhdl[1]);
- hwgraph_vertex_unref(pci_slot_conn);
- }
- hwgraph_vertex_unref(baseio_console_conn);
-
-}
-
-static void
-baseio_ctlr_num_set(void)
-{
- char name[MAXDEVNAME];
- devfs_handle_t console_vhdl, pci_vhdl, enet_vhdl;
- devfs_handle_t ioc3_console_vhdl_get(void);
-
-
- DBG("baseio_ctlr_num_set; FIXME\n");
- console_vhdl = ioc3_console_vhdl_get();
- if (console_vhdl == GRAPH_VERTEX_NONE)
- return;
- /* Useful for setting up the system critical graph */
- baseio_console_vhdl = console_vhdl;
-
- vertex_to_name(console_vhdl,name,MAXDEVNAME);
-
- strcat(name,__DEVSTR1);
- pci_vhdl = hwgraph_path_to_vertex(name);
- scsi_ctlr_nums_add(pci_vhdl);
- /* Unref the pci_vhdl due to the reference by hwgraph_path_to_vertex
- */
- hwgraph_vertex_unref(pci_vhdl);
-
- vertex_to_name(console_vhdl, name, MAXDEVNAME);
- strcat(name, __DEVSTR4);
- enet_vhdl = hwgraph_path_to_vertex(name);
-
- /* Useful for setting up the system critical graph */
- baseio_enet_vhdl = enet_vhdl;
-
- device_controller_num_set(enet_vhdl, 0);
- /* Unref the enet_vhdl due to the reference by hwgraph_path_to_vertex
- */
- hwgraph_vertex_unref(enet_vhdl);
-}
/* #endif */
/*
@@ -1168,13 +909,6 @@ init_all_devices(void)
*/
update_node_information(cnodeid);
- baseio_ctlr_num_set();
- /* Setup the system critical graph (which is a subgraph of the
- * main hwgraph). This information is useful during io error
- * handling.
- */
- sys_critical_graph_init();
-
#if HWG_PRINT
hwgraph_print();
#endif
@@ -1300,6 +1034,20 @@ struct io_brick_map_s io_brick_tab[] = {
}
},
+/* IXbrick widget number to PCI bus number map */
+ { MODULE_IXBRICK, /* IXbrick type */
+ /* PCI Bus # Widget # */
+ { 0, 0, 0, 0, 0, 0, 0, 0, /* 0x0 - 0x7 */
+ 0, /* 0x8 */
+ 0, /* 0x9 */
+ 0, 0, /* 0xa - 0xb */
+ 1, /* 0xc */
+ 5, /* 0xd */
+ 0, /* 0xe */
+ 3 /* 0xf */
+ }
+ },
+
/* Xbrick widget to XIO slot map */
{ MODULE_XBRICK, /* Xbrick type */
/* XIO Slot # Widget # */
@@ -1335,61 +1083,3 @@ io_brick_map_widget(int brick_type, int widget_num)
return 0;
}
-
-/*
- * Use the device's vertex to map the device's widget to a meaningful int
- */
-int
-io_path_map_widget(devfs_handle_t vertex)
-{
- char hw_path_name[MAXDEVNAME];
- char *wp, *bp, *sp = NULL;
- int widget_num;
- long atoi(char *);
- int hwgraph_vertex_name_get(devfs_handle_t vhdl, char *buf, uint buflen);
-
-
- /* Get the full path name of the vertex */
- if (GRAPH_SUCCESS != hwgraph_vertex_name_get(vertex, hw_path_name,
- MAXDEVNAME))
- return 0;
-
- /* Find the widget number in the path name */
- wp = strstr(hw_path_name, "/"EDGE_LBL_XTALK"/");
- if (wp == NULL)
- return 0;
- widget_num = atoi(wp+7);
- if (widget_num < XBOW_PORT_8 || widget_num > XBOW_PORT_F)
- return 0;
-
- /* Find "brick" in the path name */
- bp = strstr(hw_path_name, "brick");
- if (bp == NULL)
- return 0;
-
- /* Find preceding slash */
- sp = bp;
- while (sp > hw_path_name) {
- sp--;
- if (*sp == '/')
- break;
- }
-
- /* Invalid if no preceding slash */
- if (!sp)
- return 0;
-
- /* Bump slash pointer to "brick" prefix */
- sp++;
- /*
- * Verify "brick" prefix length; valid exaples:
- * 'I' from "/Ibrick"
- * 'P' from "/Pbrick"
- * 'X' from "/Xbrick"
- */
- if ((bp - sp) != 1)
- return 0;
-
- return (io_brick_map_widget((int)*sp, widget_num));
-
-}
diff --git a/arch/ia64/sn/io/sn2/module.c b/arch/ia64/sn/io/sn2/module.c
index 9b01b6144f307c..4679cf22e69812 100644
--- a/arch/ia64/sn/io/sn2/module.c
+++ b/arch/ia64/sn/io/sn2/module.c
@@ -18,7 +18,6 @@
#include <asm/sn/xtalk/xbow.h>
#include <asm/sn/pci/bridge.h>
#include <asm/sn/klconfig.h>
-#include <asm/sn/sn1/hubdev.h>
#include <asm/sn/module.h>
#include <asm/sn/pci/pcibr.h>
#include <asm/sn/xtalk/xswitch.h>
diff --git a/arch/ia64/sn/io/sn2/pcibr/Makefile b/arch/ia64/sn/io/sn2/pcibr/Makefile
index 0b384ee57a066e..c7fdccba1ed1ae 100644
--- a/arch/ia64/sn/io/sn2/pcibr/Makefile
+++ b/arch/ia64/sn/io/sn2/pcibr/Makefile
@@ -9,13 +9,9 @@
# Makefile for the sn2 specific pci bridge routines.
#
-EXTRA_CFLAGS := -DLITTLE_ENDIAN
+EXTRA_CFLAGS := -DLITTLE_ENDIAN -DSHUB_SWAP_WAR
-ifdef CONFIG_IA64_SGI_SN2
-EXTRA_CFLAGS += -DSHUB_SWAP_WAR
-endif
-
-obj-$(CONFIG_IA64_SGI_SN2) += pcibr_dvr.o pcibr_ate.o pcibr_config.o \
+obj-$(CONFIG_IA64_SGI_SN2) += pcibr_ate.o pcibr_config.o \
pcibr_dvr.o pcibr_hints.o \
pcibr_intr.o pcibr_rrb.o pcibr_slot.o \
pcibr_error.o
diff --git a/arch/ia64/sn/io/sn2/pcibr/pcibr_ate.c b/arch/ia64/sn/io/sn2/pcibr/pcibr_ate.c
index 5b8460ee01d124..ed31eedfab93c7 100644
--- a/arch/ia64/sn/io/sn2/pcibr/pcibr_ate.c
+++ b/arch/ia64/sn/io/sn2/pcibr/pcibr_ate.c
@@ -4,7 +4,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
@@ -27,7 +27,6 @@
#include <asm/sn/prio.h>
#include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
-#include <asm/sn/eeprom.h>
#include <asm/sn/io.h>
#include <asm/sn/sn_private.h>
@@ -101,73 +100,26 @@ pcibr_init_ext_ate_ram(bridge_t *bridge)
int i, j;
bridgereg_t old_enable, new_enable;
int s;
- int this_is_pic = is_pic(bridge);
/* Probe SSRAM to determine its size. */
- if ( this_is_pic ) {
- old_enable = bridge->b_int_enable;
- new_enable = old_enable & ~BRIDGE_IMR_PCI_MST_TIMEOUT;
- bridge->b_int_enable = new_enable;
- }
- else {
- if (io_get_sh_swapper(NASID_GET(bridge))) {
- old_enable = BRIDGE_REG_GET32((&bridge->b_int_enable));
- new_enable = old_enable & ~BRIDGE_IMR_PCI_MST_TIMEOUT;
- BRIDGE_REG_SET32((&bridge->b_int_enable)) = new_enable;
- }
- else {
- old_enable = bridge->b_int_enable;
- new_enable = old_enable & ~BRIDGE_IMR_PCI_MST_TIMEOUT;
- bridge->b_int_enable = new_enable;
- }
- }
+ old_enable = bridge->b_int_enable;
+ new_enable = old_enable & ~BRIDGE_IMR_PCI_MST_TIMEOUT;
+ bridge->b_int_enable = new_enable;
for (i = 1; i < ATE_NUM_SIZES; i++) {
/* Try writing a value */
- if ( this_is_pic ) {
- bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(i) - 1] = ATE_PROBE_VALUE;
- }
- else {
- if (io_get_sh_swapper(NASID_GET(bridge)))
- bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(i) - 1] = __swab64(ATE_PROBE_VALUE);
- else
- bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(i) - 1] = ATE_PROBE_VALUE;
- }
+ bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(i) - 1] = ATE_PROBE_VALUE;
/* Guard against wrap */
for (j = 1; j < i; j++)
bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(j) - 1] = 0;
/* See if value was written */
- if ( this_is_pic ) {
- if (bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(i) - 1] == ATE_PROBE_VALUE)
+ if (bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(i) - 1] == ATE_PROBE_VALUE)
largest_working_size = i;
- }
- else {
- if (io_get_sh_swapper(NASID_GET(bridge))) {
- if (bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(i) - 1] == __swab64(ATE_PROBE_VALUE))
- largest_working_size = i;
- else {
- if (bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(i) - 1] == ATE_PROBE_VALUE)
- largest_working_size = i;
- }
- }
- }
- }
- if ( this_is_pic ) {
- bridge->b_int_enable = old_enable;
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
- }
- else {
- if (io_get_sh_swapper(NASID_GET(bridge))) {
- BRIDGE_REG_SET32((&bridge->b_int_enable)) = old_enable;
- BRIDGE_REG_GET32((&bridge->b_wid_tflush)); /* wait until Bridge PIO complete */
- }
- else {
- bridge->b_int_enable = old_enable;
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
- }
}
+ bridge->b_int_enable = old_enable;
+ bridge->b_wid_tflush; /* wait until Bridge PIO complete */
/*
* ensure that we write and read without any interruption.
@@ -175,26 +127,10 @@ pcibr_init_ext_ate_ram(bridge_t *bridge)
*/
s = splhi();
- if ( this_is_pic ) {
- bridge->b_wid_control = (bridge->b_wid_control
+ bridge->b_wid_control = (bridge->b_wid_control
& ~BRIDGE_CTRL_SSRAM_SIZE_MASK)
| BRIDGE_CTRL_SSRAM_SIZE(largest_working_size);
- bridge->b_wid_control; /* inval addr bug war */
- }
- else {
- if (io_get_sh_swapper(NASID_GET(bridge))) {
- BRIDGE_REG_SET32((&(bridge->b_wid_control))) =
- __swab32((BRIDGE_REG_GET32((&bridge->b_wid_control))
- & ~BRIDGE_CTRL_SSRAM_SIZE_MASK)
- | BRIDGE_CTRL_SSRAM_SIZE(largest_working_size));
- BRIDGE_REG_GET32((&bridge->b_wid_control));/* inval addr bug war */
- }
- else {
- bridge->b_wid_control = (bridge->b_wid_control & ~BRIDGE_CTRL_SSRAM_SIZE_MASK)
- | BRIDGE_CTRL_SSRAM_SIZE(largest_working_size);
- bridge->b_wid_control; /* inval addr bug war */
- }
- }
+ bridge->b_wid_control; /* inval addr bug war */
splx(s);
num_entries = ATE_NUM_ENTRIES(largest_working_size);
@@ -423,16 +359,7 @@ ate_freeze(pcibr_dmamap_t pcibr_dmamap,
/* Flush the write buffer associated with this
* PCI device which might be using dma map RAM.
*/
- if ( is_pic(bridge) ) {
- bridge->b_wr_req_buf[slot].reg;
- }
- else {
- if (io_get_sh_swapper(NASID_GET(bridge)) ) {
- BRIDGE_REG_GET32((&bridge->b_wr_req_buf[slot].reg));
- }
- else
- bridge->b_wr_req_buf[slot].reg;
- }
+ bridge->b_wr_req_buf[slot].reg;
}
}
}
diff --git a/arch/ia64/sn/io/sn2/pcibr/pcibr_config.c b/arch/ia64/sn/io/sn2/pcibr/pcibr_config.c
index d3f3913b05de96..77a9f9a3686d94 100644
--- a/arch/ia64/sn/io/sn2/pcibr/pcibr_config.c
+++ b/arch/ia64/sn/io/sn2/pcibr/pcibr_config.c
@@ -4,7 +4,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
@@ -28,19 +28,16 @@
#include <asm/sn/prio.h>
#include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
-#include <asm/sn/eeprom.h>
#include <asm/sn/io.h>
#include <asm/sn/sn_private.h>
-extern pcibr_info_t pcibr_info_get(devfs_handle_t);
+extern pcibr_info_t pcibr_info_get(vertex_hdl_t);
-uint64_t pcibr_config_get(devfs_handle_t, unsigned, unsigned);
-uint64_t do_pcibr_config_get(int, cfg_p, unsigned, unsigned);
-void pcibr_config_set(devfs_handle_t, unsigned, unsigned, uint64_t);
-void do_pcibr_config_set(int, cfg_p, unsigned, unsigned, uint64_t);
-static void swap_do_pcibr_config_set(cfg_p, unsigned, unsigned, uint64_t);
+uint64_t pcibr_config_get(vertex_hdl_t, unsigned, unsigned);
+uint64_t do_pcibr_config_get(cfg_p, unsigned, unsigned);
+void pcibr_config_set(vertex_hdl_t, unsigned, unsigned, uint64_t);
+void do_pcibr_config_set(cfg_p, unsigned, unsigned, uint64_t);
-#ifdef LITTLE_ENDIAN
/*
* on sn-ia we need to twiddle the the addresses going out
* the pci bus because we use the unswizzled synergy space
@@ -51,18 +48,13 @@ static void swap_do_pcibr_config_set(cfg_p, unsigned, unsigned, uint64_t);
#define CS(b,r) (((volatile uint16_t *) b)[((r^4)/2)])
#define CW(b,r) (((volatile uint32_t *) b)[((r^4)/4)])
-#define CBP(b,r) (((volatile uint8_t *) b)[(r)^3])
-#define CSP(b,r) (((volatile uint16_t *) b)[((r)/2)^1])
+#define CBP(b,r) (((volatile uint8_t *) b)[(r)])
+#define CSP(b,r) (((volatile uint16_t *) b)[((r)/2)])
#define CWP(b,r) (((volatile uint32_t *) b)[(r)/4])
#define SCB(b,r) (((volatile uint8_t *) b)[((r)^3)])
#define SCS(b,r) (((volatile uint16_t *) b)[((r^2)/2)])
#define SCW(b,r) (((volatile uint32_t *) b)[((r)/4)])
-#else
-#define CB(b,r) (((volatile uint8_t *) cfgbase)[(r)^3])
-#define CS(b,r) (((volatile uint16_t *) cfgbase)[((r)/2)^1])
-#define CW(b,r) (((volatile uint32_t *) cfgbase)[(r)/4])
-#endif
/*
* Return a config space address for given slot / func / offset. Note the
@@ -84,8 +76,7 @@ pcibr_func_config_addr(bridge_t *bridge, pciio_bus_t bus, pciio_slot_t slot,
/*
* Type 0 config space
*/
- if (is_pic(bridge))
- slot++;
+ slot++;
return &bridge->b_type0_cfg_dev[slot].f[func].l[offset];
}
@@ -109,7 +100,7 @@ pcibr_slot_config_get(bridge_t *bridge, pciio_slot_t slot, int offset)
cfg_p cfg_base;
cfg_base = pcibr_slot_config_addr(bridge, slot, 0);
- return (do_pcibr_config_get(is_pic(bridge), cfg_base, offset, sizeof(unsigned)));
+ return (do_pcibr_config_get(cfg_base, offset, sizeof(unsigned)));
}
/*
@@ -122,7 +113,7 @@ pcibr_func_config_get(bridge_t *bridge, pciio_slot_t slot,
cfg_p cfg_base;
cfg_base = pcibr_func_config_addr(bridge, 0, slot, func, 0);
- return (do_pcibr_config_get(is_pic(bridge), cfg_base, offset, sizeof(unsigned)));
+ return (do_pcibr_config_get(cfg_base, offset, sizeof(unsigned)));
}
/*
@@ -135,7 +126,7 @@ pcibr_slot_config_set(bridge_t *bridge, pciio_slot_t slot,
cfg_p cfg_base;
cfg_base = pcibr_slot_config_addr(bridge, slot, 0);
- do_pcibr_config_set(is_pic(bridge), cfg_base, offset, sizeof(unsigned), val);
+ do_pcibr_config_set(cfg_base, offset, sizeof(unsigned), val);
}
/*
@@ -148,13 +139,13 @@ pcibr_func_config_set(bridge_t *bridge, pciio_slot_t slot,
cfg_p cfg_base;
cfg_base = pcibr_func_config_addr(bridge, 0, slot, func, 0);
- do_pcibr_config_set(is_pic(bridge), cfg_base, offset, sizeof(unsigned), val);
+ do_pcibr_config_set(cfg_base, offset, sizeof(unsigned), val);
}
int pcibr_config_debug = 0;
cfg_p
-pcibr_config_addr(devfs_handle_t conn,
+pcibr_config_addr(vertex_hdl_t conn,
unsigned reg)
{
pcibr_info_t pcibr_info;
@@ -183,19 +174,6 @@ pcibr_config_addr(devfs_handle_t conn,
pciio_func = PCI_TYPE1_FUNC(reg);
ASSERT(pciio_bus != 0);
-#if 0
- } else if (conn != pciio_info_hostdev_get(pciio_info)) {
- /*
- * Conn is on a subordinate bus, so get bus/slot/func directly from
- * its pciio_info_t structure.
- */
- pciio_bus = pciio_info->c_bus;
- pciio_slot = pciio_info->c_slot;
- pciio_func = pciio_info->c_func;
- if (pciio_func == PCIIO_FUNC_NONE) {
- pciio_func = 0;
- }
-#endif
} else {
/*
* Conn is directly connected to the host bus. PCI bus number is
@@ -224,44 +202,23 @@ pcibr_config_addr(devfs_handle_t conn,
return cfgbase;
}
-extern unsigned char Is_pic_on_this_nasid[];
uint64_t
-pcibr_config_get(devfs_handle_t conn,
+pcibr_config_get(vertex_hdl_t conn,
unsigned reg,
unsigned size)
{
- if ( !Is_pic_on_this_nasid[ NASID_GET((pcibr_config_addr(conn, reg)))] )
- return do_pcibr_config_get(0, pcibr_config_addr(conn, reg),
- PCI_TYPE1_REG(reg), size);
- else
- return do_pcibr_config_get(1, pcibr_config_addr(conn, reg),
+ return do_pcibr_config_get(pcibr_config_addr(conn, reg),
PCI_TYPE1_REG(reg), size);
}
uint64_t
-do_pcibr_config_get(
- int pic,
- cfg_p cfgbase,
+do_pcibr_config_get(cfg_p cfgbase,
unsigned reg,
unsigned size)
{
unsigned value;
- if ( pic ) {
- value = CWP(cfgbase, reg);
- }
- else {
- if ( io_get_sh_swapper(NASID_GET(cfgbase)) ) {
- /*
- * Shub Swapper on - 0 returns PCI Offset 0 but byte swapped!
- * Do not swizzle address and byte swap the result.
- */
- value = SCW(cfgbase, reg);
- value = __swab32(value);
- } else {
- value = CW(cfgbase, reg);
- }
- }
+ value = CWP(cfgbase, reg);
if (reg & 3)
value >>= 8 * (reg & 3);
if (size < 4)
@@ -270,108 +227,43 @@ do_pcibr_config_get(
}
void
-pcibr_config_set(devfs_handle_t conn,
+pcibr_config_set(vertex_hdl_t conn,
unsigned reg,
unsigned size,
uint64_t value)
{
- if ( Is_pic_on_this_nasid[ NASID_GET((pcibr_config_addr(conn, reg)))] )
- do_pcibr_config_set(1, pcibr_config_addr(conn, reg),
- PCI_TYPE1_REG(reg), size, value);
- else
- swap_do_pcibr_config_set(pcibr_config_addr(conn, reg),
+ do_pcibr_config_set(pcibr_config_addr(conn, reg),
PCI_TYPE1_REG(reg), size, value);
}
void
-do_pcibr_config_set(int pic,
- cfg_p cfgbase,
+do_pcibr_config_set(cfg_p cfgbase,
unsigned reg,
unsigned size,
uint64_t value)
{
- if ( pic ) {
- switch (size) {
- case 1:
+ switch (size) {
+ case 1:
+ CBP(cfgbase, reg) = value;
+ break;
+ case 2:
+ if (reg & 1) {
CBP(cfgbase, reg) = value;
- break;
- case 2:
- if (reg & 1) {
- CBP(cfgbase, reg) = value;
- CBP(cfgbase, reg + 1) = value >> 8;
- } else
- CSP(cfgbase, reg) = value;
- break;
- case 3:
- if (reg & 1) {
- CBP(cfgbase, reg) = value;
- CSP(cfgbase, (reg + 1)) = value >> 8;
- } else {
- CSP(cfgbase, reg) = value;
- CBP(cfgbase, reg + 2) = value >> 16;
- }
- break;
- case 4:
- CWP(cfgbase, reg) = value;
- break;
- }
- }
- else {
- switch (size) {
- case 1:
- CB(cfgbase, reg) = value;
- break;
- case 2:
- if (reg & 1) {
- CB(cfgbase, reg) = value;
- CB(cfgbase, reg + 1) = value >> 8;
- } else
- CS(cfgbase, reg) = value;
- break;
- case 3:
- if (reg & 1) {
- CB(cfgbase, reg) = value;
- CS(cfgbase, (reg + 1)) = value >> 8;
- } else {
- CS(cfgbase, reg) = value;
- CB(cfgbase, reg + 2) = value >> 16;
- }
- break;
- case 4:
- CW(cfgbase, reg) = value;
- break;
- }
- }
-}
-
-void
-swap_do_pcibr_config_set(cfg_p cfgbase,
- unsigned reg,
- unsigned size,
- uint64_t value)
-{
-
- uint64_t temp_value = 0;
-
- switch (size) {
- case 1:
- SCB(cfgbase, reg) = value;
- break;
- case 2:
- temp_value = __swab16(value);
- if (reg & 1) {
- SCB(cfgbase, reg) = temp_value;
- SCB(cfgbase, reg + 1) = temp_value >> 8;
- } else
- SCS(cfgbase, reg) = temp_value;
- break;
- case 3:
- BUG();
- break;
-
- case 4:
- temp_value = __swab32(value);
- SCW(cfgbase, reg) = temp_value;
- break;
- }
+ CBP(cfgbase, reg + 1) = value >> 8;
+ } else
+ CSP(cfgbase, reg) = value;
+ break;
+ case 3:
+ if (reg & 1) {
+ CBP(cfgbase, reg) = value;
+ CSP(cfgbase, (reg + 1)) = value >> 8;
+ } else {
+ CSP(cfgbase, reg) = value;
+ CBP(cfgbase, reg + 2) = value >> 16;
+ }
+ break;
+ case 4:
+ CWP(cfgbase, reg) = value;
+ break;
+ }
}
diff --git a/arch/ia64/sn/io/sn2/pcibr/pcibr_dvr.c b/arch/ia64/sn/io/sn2/pcibr/pcibr_dvr.c
index 9b2ce991d5f1f1..d5308e67667fc6 100644
--- a/arch/ia64/sn/io/sn2/pcibr/pcibr_dvr.c
+++ b/arch/ia64/sn/io/sn2/pcibr/pcibr_dvr.c
@@ -4,13 +4,16 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
#include <asm/sn/sgi.h>
+#include <asm/sn/sn_sal.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/addrs.h>
#include <asm/sn/arch.h>
@@ -18,6 +21,7 @@
#include <asm/sn/invent.h>
#include <asm/sn/hcl.h>
#include <asm/sn/labelcl.h>
+#include <asm/sn/klconfig.h>
#include <asm/sn/xtalk/xwidget.h>
#include <asm/sn/pci/bridge.h>
#include <asm/sn/pci/pciio.h>
@@ -27,7 +31,6 @@
#include <asm/sn/prio.h>
#include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
-#include <asm/sn/eeprom.h>
#include <asm/sn/io.h>
#include <asm/sn/sn_private.h>
@@ -76,30 +79,6 @@ int pcibr_debug_slot = -1; /* '-1' for all slots */
int pcibr_devflag = D_MP;
-/*
- * This is the file operation table for the pcibr driver.
- * As each of the functions are implemented, put the
- * appropriate function name below.
- */
-struct file_operations pcibr_fops = {
- owner: THIS_MODULE,
- llseek: NULL,
- read: NULL,
- write: NULL,
- readdir: NULL,
- poll: NULL,
- ioctl: NULL,
- mmap: NULL,
- open: NULL,
- flush: NULL,
- release: NULL,
- fsync: NULL,
- fasync: NULL,
- lock: NULL,
- readv: NULL,
- writev: NULL
-};
-
/* kbrick widgetnum-to-bus layout */
int p_busnum[MAX_PORT_NUM] = { /* widget# */
0, 0, 0, 0, 0, 0, 0, 0, /* 0x0 - 0x7 */
@@ -116,17 +95,16 @@ int p_busnum[MAX_PORT_NUM] = { /* widget# */
pcibr_list_p pcibr_list = 0;
#endif
-extern int hwgraph_vertex_name_get(devfs_handle_t vhdl, char *buf, uint buflen);
-extern int hub_device_flags_set(devfs_handle_t widget_dev, hub_widget_flags_t flags);
+extern int hwgraph_vertex_name_get(vertex_hdl_t vhdl, char *buf, uint buflen);
extern long atoi(register char *p);
-extern cnodeid_t nodevertex_to_cnodeid(devfs_handle_t vhdl);
-extern char *dev_to_name(devfs_handle_t dev, char *buf, uint buflen);
+extern cnodeid_t nodevertex_to_cnodeid(vertex_hdl_t vhdl);
+extern char *dev_to_name(vertex_hdl_t dev, char *buf, uint buflen);
extern struct map *atemapalloc(uint64_t);
extern void atefree(struct map *, size_t, uint64_t);
extern void atemapfree(struct map *);
-extern pciio_dmamap_t get_free_pciio_dmamap(devfs_handle_t);
+extern pciio_dmamap_t get_free_pciio_dmamap(vertex_hdl_t);
extern void free_pciio_dmamap(pcibr_dmamap_t);
-extern void xwidget_error_register(devfs_handle_t, error_handler_f *, error_handler_arg_t);
+extern void xwidget_error_register(vertex_hdl_t, error_handler_f *, error_handler_arg_t);
#define ATE_WRITE() ate_write(pcibr_soft, ate_ptr, ate_count, ate)
#if PCIBR_FREEZE_TIME
@@ -153,9 +131,9 @@ extern void xwidget_error_register(devfs_handle_t, error_handler_f *, error_han
extern int do_pcibr_rrb_free_all(pcibr_soft_t, bridge_t *, pciio_slot_t);
extern void do_pcibr_rrb_autoalloc(pcibr_soft_t, int, int, int);
-extern int pcibr_wrb_flush(devfs_handle_t);
-extern int pcibr_rrb_alloc(devfs_handle_t, int *, int *);
-extern void pcibr_rrb_flush(devfs_handle_t);
+extern int pcibr_wrb_flush(vertex_hdl_t);
+extern int pcibr_rrb_alloc(vertex_hdl_t, int *, int *);
+extern void pcibr_rrb_flush(vertex_hdl_t);
static int pcibr_try_set_device(pcibr_soft_t, pciio_slot_t, unsigned, bridgereg_t);
void pcibr_release_device(pcibr_soft_t, pciio_slot_t, bridgereg_t);
@@ -166,21 +144,19 @@ extern void pcibr_clearwidint(bridge_t *);
extern iopaddr_t pcibr_bus_addr_alloc(pcibr_soft_t, pciio_win_info_t,
pciio_space_t, int, int, int);
-void pcibr_init(void);
-int pcibr_attach(devfs_handle_t);
-int pcibr_attach2(devfs_handle_t, bridge_t *, devfs_handle_t,
+int pcibr_attach(vertex_hdl_t);
+int pcibr_attach2(vertex_hdl_t, bridge_t *, vertex_hdl_t,
int, pcibr_soft_t *);
-int pcibr_detach(devfs_handle_t);
-int pcibr_open(devfs_handle_t *, int, int, cred_t *);
-int pcibr_close(devfs_handle_t, int, int, cred_t *);
-int pcibr_map(devfs_handle_t, vhandl_t *, off_t, size_t, uint);
-int pcibr_unmap(devfs_handle_t, vhandl_t *);
-int pcibr_ioctl(devfs_handle_t, int, void *, int, struct cred *, int *);
+int pcibr_detach(vertex_hdl_t);
+int pcibr_close(vertex_hdl_t, int, int, cred_t *);
+int pcibr_map(vertex_hdl_t, vhandl_t *, off_t, size_t, uint);
+int pcibr_unmap(vertex_hdl_t, vhandl_t *);
+int pcibr_ioctl(vertex_hdl_t, int, void *, int, struct cred *, int *);
int pcibr_pcix_rbars_calc(pcibr_soft_t);
extern int pcibr_init_ext_ate_ram(bridge_t *);
extern int pcibr_ate_alloc(pcibr_soft_t, int);
extern void pcibr_ate_free(pcibr_soft_t, int, int);
-extern int pcibr_widget_to_bus(devfs_handle_t pcibr_vhdl);
+extern int pcibr_widget_to_bus(vertex_hdl_t pcibr_vhdl);
extern unsigned ate_freeze(pcibr_dmamap_t pcibr_dmamap,
#if PCIBR_FREEZE_TIME
@@ -197,45 +173,43 @@ extern void ate_thaw(pcibr_dmamap_t pcibr_dmamap, int ate_index,
unsigned *cmd_regs,
unsigned s);
-pcibr_info_t pcibr_info_get(devfs_handle_t);
+pcibr_info_t pcibr_info_get(vertex_hdl_t);
-static iopaddr_t pcibr_addr_pci_to_xio(devfs_handle_t, pciio_slot_t, pciio_space_t, iopaddr_t, size_t, unsigned);
+static iopaddr_t pcibr_addr_pci_to_xio(vertex_hdl_t, pciio_slot_t, pciio_space_t, iopaddr_t, size_t, unsigned);
-pcibr_piomap_t pcibr_piomap_alloc(devfs_handle_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, size_t, unsigned);
+pcibr_piomap_t pcibr_piomap_alloc(vertex_hdl_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, size_t, unsigned);
void pcibr_piomap_free(pcibr_piomap_t);
caddr_t pcibr_piomap_addr(pcibr_piomap_t, iopaddr_t, size_t);
void pcibr_piomap_done(pcibr_piomap_t);
-caddr_t pcibr_piotrans_addr(devfs_handle_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, unsigned);
-iopaddr_t pcibr_piospace_alloc(devfs_handle_t, device_desc_t, pciio_space_t, size_t, size_t);
-void pcibr_piospace_free(devfs_handle_t, pciio_space_t, iopaddr_t, size_t);
+caddr_t pcibr_piotrans_addr(vertex_hdl_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, unsigned);
+iopaddr_t pcibr_piospace_alloc(vertex_hdl_t, device_desc_t, pciio_space_t, size_t, size_t);
+void pcibr_piospace_free(vertex_hdl_t, pciio_space_t, iopaddr_t, size_t);
static iopaddr_t pcibr_flags_to_d64(unsigned, pcibr_soft_t);
extern bridge_ate_t pcibr_flags_to_ate(unsigned);
-pcibr_dmamap_t pcibr_dmamap_alloc(devfs_handle_t, device_desc_t, size_t, unsigned);
+pcibr_dmamap_t pcibr_dmamap_alloc(vertex_hdl_t, device_desc_t, size_t, unsigned);
void pcibr_dmamap_free(pcibr_dmamap_t);
extern bridge_ate_p pcibr_ate_addr(pcibr_soft_t, int);
static iopaddr_t pcibr_addr_xio_to_pci(pcibr_soft_t, iopaddr_t, size_t);
iopaddr_t pcibr_dmamap_addr(pcibr_dmamap_t, paddr_t, size_t);
-alenlist_t pcibr_dmamap_list(pcibr_dmamap_t, alenlist_t, unsigned);
void pcibr_dmamap_done(pcibr_dmamap_t);
-cnodeid_t pcibr_get_dmatrans_node(devfs_handle_t);
-iopaddr_t pcibr_dmatrans_addr(devfs_handle_t, device_desc_t, paddr_t, size_t, unsigned);
-alenlist_t pcibr_dmatrans_list(devfs_handle_t, device_desc_t, alenlist_t, unsigned);
+cnodeid_t pcibr_get_dmatrans_node(vertex_hdl_t);
+iopaddr_t pcibr_dmatrans_addr(vertex_hdl_t, device_desc_t, paddr_t, size_t, unsigned);
void pcibr_dmamap_drain(pcibr_dmamap_t);
-void pcibr_dmaaddr_drain(devfs_handle_t, paddr_t, size_t);
-void pcibr_dmalist_drain(devfs_handle_t, alenlist_t);
+void pcibr_dmaaddr_drain(vertex_hdl_t, paddr_t, size_t);
+void pcibr_dmalist_drain(vertex_hdl_t, alenlist_t);
iopaddr_t pcibr_dmamap_pciaddr_get(pcibr_dmamap_t);
extern unsigned pcibr_intr_bits(pciio_info_t info,
pciio_intr_line_t lines, int nslots);
-extern pcibr_intr_t pcibr_intr_alloc(devfs_handle_t, device_desc_t, pciio_intr_line_t, devfs_handle_t);
+extern pcibr_intr_t pcibr_intr_alloc(vertex_hdl_t, device_desc_t, pciio_intr_line_t, vertex_hdl_t);
extern void pcibr_intr_free(pcibr_intr_t);
extern void pcibr_setpciint(xtalk_intr_t);
extern int pcibr_intr_connect(pcibr_intr_t, intr_func_t, intr_arg_t);
extern void pcibr_intr_disconnect(pcibr_intr_t);
-extern devfs_handle_t pcibr_intr_cpu_get(pcibr_intr_t);
+extern vertex_hdl_t pcibr_intr_cpu_get(pcibr_intr_t);
extern void pcibr_intr_func(intr_arg_t);
extern void print_bridge_errcmd(uint32_t, char *);
@@ -253,51 +227,76 @@ extern int pcibr_dmard_error(pcibr_soft_t, int, ioerror_mode_t, ioe
extern int pcibr_dmawr_error(pcibr_soft_t, int, ioerror_mode_t, ioerror_t *);
extern int pcibr_error_handler(error_handler_arg_t, int, ioerror_mode_t, ioerror_t *);
extern int pcibr_error_handler_wrapper(error_handler_arg_t, int, ioerror_mode_t, ioerror_t *);
-void pcibr_provider_startup(devfs_handle_t);
-void pcibr_provider_shutdown(devfs_handle_t);
+void pcibr_provider_startup(vertex_hdl_t);
+void pcibr_provider_shutdown(vertex_hdl_t);
-int pcibr_reset(devfs_handle_t);
-pciio_endian_t pcibr_endian_set(devfs_handle_t, pciio_endian_t, pciio_endian_t);
+int pcibr_reset(vertex_hdl_t);
+pciio_endian_t pcibr_endian_set(vertex_hdl_t, pciio_endian_t, pciio_endian_t);
int pcibr_priority_bits_set(pcibr_soft_t, pciio_slot_t, pciio_priority_t);
-pciio_priority_t pcibr_priority_set(devfs_handle_t, pciio_priority_t);
-int pcibr_device_flags_set(devfs_handle_t, pcibr_device_flags_t);
-
-extern cfg_p pcibr_config_addr(devfs_handle_t, unsigned);
-extern uint64_t pcibr_config_get(devfs_handle_t, unsigned, unsigned);
-extern void pcibr_config_set(devfs_handle_t, unsigned, unsigned, uint64_t);
-
-extern pcibr_hints_t pcibr_hints_get(devfs_handle_t, int);
-extern void pcibr_hints_fix_rrbs(devfs_handle_t);
-extern void pcibr_hints_dualslot(devfs_handle_t, pciio_slot_t, pciio_slot_t);
-extern void pcibr_hints_intr_bits(devfs_handle_t, pcibr_intr_bits_f *);
-extern void pcibr_set_rrb_callback(devfs_handle_t, rrb_alloc_funct_t);
-extern void pcibr_hints_handsoff(devfs_handle_t);
-extern void pcibr_hints_subdevs(devfs_handle_t, pciio_slot_t, uint64_t);
-
-extern int pcibr_slot_reset(devfs_handle_t,pciio_slot_t);
-extern int pcibr_slot_info_init(devfs_handle_t,pciio_slot_t);
-extern int pcibr_slot_info_free(devfs_handle_t,pciio_slot_t);
+pciio_priority_t pcibr_priority_set(vertex_hdl_t, pciio_priority_t);
+int pcibr_device_flags_set(vertex_hdl_t, pcibr_device_flags_t);
+
+extern cfg_p pcibr_config_addr(vertex_hdl_t, unsigned);
+extern uint64_t pcibr_config_get(vertex_hdl_t, unsigned, unsigned);
+extern void pcibr_config_set(vertex_hdl_t, unsigned, unsigned, uint64_t);
+
+extern pcibr_hints_t pcibr_hints_get(vertex_hdl_t, int);
+extern void pcibr_hints_fix_rrbs(vertex_hdl_t);
+extern void pcibr_hints_dualslot(vertex_hdl_t, pciio_slot_t, pciio_slot_t);
+extern void pcibr_hints_intr_bits(vertex_hdl_t, pcibr_intr_bits_f *);
+extern void pcibr_set_rrb_callback(vertex_hdl_t, rrb_alloc_funct_t);
+extern void pcibr_hints_handsoff(vertex_hdl_t);
+extern void pcibr_hints_subdevs(vertex_hdl_t, pciio_slot_t, uint64_t);
+
+extern int pcibr_slot_info_init(vertex_hdl_t,pciio_slot_t);
+extern int pcibr_slot_info_free(vertex_hdl_t,pciio_slot_t);
extern int pcibr_slot_info_return(pcibr_soft_t, pciio_slot_t,
pcibr_slot_info_resp_t);
extern void pcibr_slot_func_info_return(pcibr_info_h, int,
pcibr_slot_func_info_resp_t);
-extern int pcibr_slot_addr_space_init(devfs_handle_t,pciio_slot_t);
+extern int pcibr_slot_addr_space_init(vertex_hdl_t,pciio_slot_t);
extern int pcibr_slot_pcix_rbar_init(pcibr_soft_t, pciio_slot_t);
-extern int pcibr_slot_device_init(devfs_handle_t, pciio_slot_t);
-extern int pcibr_slot_guest_info_init(devfs_handle_t,pciio_slot_t);
-extern int pcibr_slot_call_device_attach(devfs_handle_t,
+extern int pcibr_slot_device_init(vertex_hdl_t, pciio_slot_t);
+extern int pcibr_slot_guest_info_init(vertex_hdl_t,pciio_slot_t);
+extern int pcibr_slot_call_device_attach(vertex_hdl_t,
pciio_slot_t, int);
-extern int pcibr_slot_call_device_detach(devfs_handle_t,
+extern int pcibr_slot_call_device_detach(vertex_hdl_t,
pciio_slot_t, int);
-extern int pcibr_slot_attach(devfs_handle_t, pciio_slot_t, int,
+extern int pcibr_slot_attach(vertex_hdl_t, pciio_slot_t, int,
char *, int *);
-extern int pcibr_slot_detach(devfs_handle_t, pciio_slot_t, int,
+extern int pcibr_slot_detach(vertex_hdl_t, pciio_slot_t, int,
char *, int *);
-extern int pcibr_is_slot_sys_critical(devfs_handle_t, pciio_slot_t);
-extern int pcibr_slot_initial_rrb_alloc(devfs_handle_t, pciio_slot_t);
-extern int pcibr_initial_rrb(devfs_handle_t, pciio_slot_t, pciio_slot_t);
+extern int pcibr_slot_initial_rrb_alloc(vertex_hdl_t, pciio_slot_t);
+extern int pcibr_initial_rrb(vertex_hdl_t, pciio_slot_t, pciio_slot_t);
+/*
+ * This is the file operation table for the pcibr driver.
+ * As each of the functions are implemented, put the
+ * appropriate function name below.
+ */
+static int pcibr_mmap(struct file * file, struct vm_area_struct * vma);
+static int pcibr_open(struct inode *, struct file *);
+struct file_operations pcibr_fops = {
+ owner: THIS_MODULE,
+ llseek: NULL,
+ read: NULL,
+ write: NULL,
+ readdir: NULL,
+ poll: NULL,
+ ioctl: NULL,
+ mmap: pcibr_mmap,
+ open: pcibr_open,
+ flush: NULL,
+ release: NULL,
+ fsync: NULL,
+ fasync: NULL,
+ lock: NULL,
+ readv: NULL,
+ writev: NULL,
+ sendpage: NULL,
+ get_unmapped_area: NULL
+};
/* =====================================================================
* Device(x) register management
@@ -624,30 +623,6 @@ pcibr_device_write_gather_flush(pcibr_soft_t pcibr_soft,
/*
- * pcibr_init: called once during system startup or
- * when a loadable driver is loaded.
- *
- * The driver_register function should normally
- * be in _reg, not _init. But the pcibr driver is
- * required by devinit before the _reg routines
- * are called, so this is an exception.
- */
-void
-pcibr_init(void)
-{
- PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INIT, NULL, "pcibr_init()\n"));
-
- xwidget_driver_register(XBRIDGE_WIDGET_PART_NUM,
- XBRIDGE_WIDGET_MFGR_NUM,
- "pcibr_",
- 0);
- xwidget_driver_register(BRIDGE_WIDGET_PART_NUM,
- BRIDGE_WIDGET_MFGR_NUM,
- "pcibr_",
- 0);
-}
-
-/*
* open/close mmap/munmap interface would be used by processes
* that plan to map the PCI bridge, and muck around with the
* registers. This is dangerous to do, and will be allowed
@@ -659,25 +634,50 @@ pcibr_init(void)
*/
/* ARGSUSED */
int
-pcibr_open(devfs_handle_t *devp, int oflag, int otyp, cred_t *credp)
+pcibr_open(struct inode *x, struct file *y)
{
return 0;
}
/*ARGSUSED */
int
-pcibr_close(devfs_handle_t dev, int oflag, int otyp, cred_t *crp)
+pcibr_close(vertex_hdl_t dev, int oflag, int otyp, cred_t *crp)
{
return 0;
}
+static int
+pcibr_mmap(struct file * file, struct vm_area_struct * vma)
+{
+ vertex_hdl_t pcibr_vhdl;
+ pcibr_soft_t pcibr_soft;
+ bridge_t *bridge;
+ unsigned long phys_addr;
+ int error = 0;
+
+#ifdef CONFIG_HWGFS_FS
+ pcibr_vhdl = (vertex_hdl_t) file->f_dentry->d_fsdata;
+#else
+ pcibr_vhdl = (vertex_hdl_t) file->private_data;
+#endif
+ pcibr_soft = pcibr_soft_get(pcibr_vhdl);
+ bridge = pcibr_soft->bs_base;
+ phys_addr = (unsigned long)bridge & ~0xc000000000000000; /* Mask out the Uncache bits */
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_flags |= VM_RESERVED | VM_IO;
+ error = io_remap_page_range(vma, phys_addr, vma->vm_start,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ return(error);
+}
+
/*ARGSUSED */
int
-pcibr_map(devfs_handle_t dev, vhandl_t *vt, off_t off, size_t len, uint prot)
+pcibr_map(vertex_hdl_t dev, vhandl_t *vt, off_t off, size_t len, uint prot)
{
int error;
- devfs_handle_t vhdl = dev_to_vhdl(dev);
- devfs_handle_t pcibr_vhdl = hwgraph_connectpt_get(vhdl);
+ vertex_hdl_t vhdl = dev_to_vhdl(dev);
+ vertex_hdl_t pcibr_vhdl = hwgraph_connectpt_get(vhdl);
pcibr_soft_t pcibr_soft = pcibr_soft_get(pcibr_vhdl);
bridge_t *bridge = pcibr_soft->bs_base;
@@ -721,9 +721,9 @@ pcibr_map(devfs_handle_t dev, vhandl_t *vt, off_t off, size_t len, uint prot)
/*ARGSUSED */
int
-pcibr_unmap(devfs_handle_t dev, vhandl_t *vt)
+pcibr_unmap(vertex_hdl_t dev, vhandl_t *vt)
{
- devfs_handle_t pcibr_vhdl = hwgraph_connectpt_get((devfs_handle_t) dev);
+ vertex_hdl_t pcibr_vhdl = hwgraph_connectpt_get((vertex_hdl_t) dev);
pcibr_soft_t pcibr_soft = pcibr_soft_get(pcibr_vhdl);
bridge_t *bridge = pcibr_soft->bs_base;
@@ -785,10 +785,10 @@ pcibr_unmap(devfs_handle_t dev, vhandl_t *vt)
* be sufficient.
*/
pciio_slot_t
-pcibr_device_slot_get(devfs_handle_t dev_vhdl)
+pcibr_device_slot_get(vertex_hdl_t dev_vhdl)
{
char devname[MAXDEVNAME];
- devfs_handle_t tdev;
+ vertex_hdl_t tdev;
pciio_info_t pciio_info;
pciio_slot_t slot = PCIIO_SLOT_NONE;
@@ -814,7 +814,7 @@ pcibr_device_slot_get(devfs_handle_t dev_vhdl)
/*ARGSUSED */
int
-pcibr_ioctl(devfs_handle_t dev,
+pcibr_ioctl(vertex_hdl_t dev,
int cmd,
void *arg,
int flag,
@@ -825,7 +825,7 @@ pcibr_ioctl(devfs_handle_t dev,
}
pcibr_info_t
-pcibr_info_get(devfs_handle_t vhdl)
+pcibr_info_get(vertex_hdl_t vhdl)
{
return (pcibr_info_t) pciio_info_get(vhdl);
}
@@ -902,10 +902,10 @@ pcibr_device_info_new(
* This is usually used at the time of shutting down of the PCI card.
*/
int
-pcibr_device_unregister(devfs_handle_t pconn_vhdl)
+pcibr_device_unregister(vertex_hdl_t pconn_vhdl)
{
pciio_info_t pciio_info;
- devfs_handle_t pcibr_vhdl;
+ vertex_hdl_t pcibr_vhdl;
pciio_slot_t slot;
pcibr_soft_t pcibr_soft;
bridge_t *bridge;
@@ -982,12 +982,12 @@ pcibr_device_unregister(devfs_handle_t pconn_vhdl)
* slot's device status to be set.
*/
void
-pcibr_driver_reg_callback(devfs_handle_t pconn_vhdl,
+pcibr_driver_reg_callback(vertex_hdl_t pconn_vhdl,
int key1, int key2, int error)
{
pciio_info_t pciio_info;
pcibr_info_t pcibr_info;
- devfs_handle_t pcibr_vhdl;
+ vertex_hdl_t pcibr_vhdl;
pciio_slot_t slot;
pcibr_soft_t pcibr_soft;
@@ -1033,12 +1033,12 @@ pcibr_driver_reg_callback(devfs_handle_t pconn_vhdl,
* slot's device status to be set.
*/
void
-pcibr_driver_unreg_callback(devfs_handle_t pconn_vhdl,
+pcibr_driver_unreg_callback(vertex_hdl_t pconn_vhdl,
int key1, int key2, int error)
{
pciio_info_t pciio_info;
pcibr_info_t pcibr_info;
- devfs_handle_t pcibr_vhdl;
+ vertex_hdl_t pcibr_vhdl;
pciio_slot_t slot;
pcibr_soft_t pcibr_soft;
@@ -1084,14 +1084,14 @@ pcibr_driver_unreg_callback(devfs_handle_t pconn_vhdl,
* depends on hwgraph separator == '/'
*/
int
-pcibr_bus_cnvlink(devfs_handle_t f_c)
+pcibr_bus_cnvlink(vertex_hdl_t f_c)
{
char dst[MAXDEVNAME];
char *dp = dst;
char *cp, *xp;
int widgetnum;
char pcibus[8];
- devfs_handle_t nvtx, svtx;
+ vertex_hdl_t nvtx, svtx;
int rv;
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, f_c, "pcibr_bus_cnvlink\n"));
@@ -1145,11 +1145,11 @@ pcibr_bus_cnvlink(devfs_handle_t f_c)
*/
/*ARGSUSED */
int
-pcibr_attach(devfs_handle_t xconn_vhdl)
+pcibr_attach(vertex_hdl_t xconn_vhdl)
{
/* REFERENCED */
graph_error_t rc;
- devfs_handle_t pcibr_vhdl;
+ vertex_hdl_t pcibr_vhdl;
bridge_t *bridge;
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, xconn_vhdl, "pcibr_attach\n"));
@@ -1180,11 +1180,11 @@ pcibr_attach(devfs_handle_t xconn_vhdl)
/*ARGSUSED */
int
-pcibr_attach2(devfs_handle_t xconn_vhdl, bridge_t *bridge,
- devfs_handle_t pcibr_vhdl, int busnum, pcibr_soft_t *ret_softp)
+pcibr_attach2(vertex_hdl_t xconn_vhdl, bridge_t *bridge,
+ vertex_hdl_t pcibr_vhdl, int busnum, pcibr_soft_t *ret_softp)
{
/* REFERENCED */
- devfs_handle_t ctlr_vhdl;
+ vertex_hdl_t ctlr_vhdl;
bridgereg_t id;
int rev;
pcibr_soft_t pcibr_soft;
@@ -1193,7 +1193,7 @@ pcibr_attach2(devfs_handle_t xconn_vhdl, bridge_t *bridge,
xtalk_intr_t xtalk_intr;
int slot;
int ibit;
- devfs_handle_t noslot_conn;
+ vertex_hdl_t noslot_conn;
char devnm[MAXDEVNAME], *s;
pcibr_hints_t pcibr_hints;
uint64_t int_enable;
@@ -1209,23 +1209,15 @@ pcibr_attach2(devfs_handle_t xconn_vhdl, bridge_t *bridge,
nasid_t nasid;
int iobrick_type_get_nasid(nasid_t nasid);
int iobrick_module_get_nasid(nasid_t nasid);
- extern unsigned char Is_pic_on_this_nasid[512];
-
-
- async_attach_t aa = NULL;
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
"pcibr_attach2: bridge=0x%p, busnum=%d\n", bridge, busnum));
- aa = async_attach_get_info(xconn_vhdl);
-
ctlr_vhdl = NULL;
- ctlr_vhdl = hwgraph_register(pcibr_vhdl, EDGE_LBL_CONTROLLER,
- 0, DEVFS_FL_AUTO_DEVNUM,
- 0, 0,
- S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
- &pcibr_fops, NULL);
-
+ ctlr_vhdl = hwgraph_register(pcibr_vhdl, EDGE_LBL_CONTROLLER, 0,
+ DEVFS_FL_AUTO_DEVNUM, 0, 0,
+ S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
+ (struct file_operations *)&pcibr_fops, (void *)pcibr_vhdl);
ASSERT(ctlr_vhdl != NULL);
/*
@@ -1261,13 +1253,7 @@ pcibr_attach2(devfs_handle_t xconn_vhdl, bridge_t *bridge,
pcibr_soft->bs_min_slot = 0; /* lowest possible slot# */
pcibr_soft->bs_max_slot = 7; /* highest possible slot# */
pcibr_soft->bs_busnum = busnum;
- if (is_xbridge(bridge)) {
- pcibr_soft->bs_bridge_type = PCIBR_BRIDGETYPE_XBRIDGE;
- } else if (is_pic(bridge)) {
- pcibr_soft->bs_bridge_type = PCIBR_BRIDGETYPE_PIC;
- } else {
- pcibr_soft->bs_bridge_type = PCIBR_BRIDGETYPE_BRIDGE;
- }
+ pcibr_soft->bs_bridge_type = PCIBR_BRIDGETYPE_PIC;
switch(pcibr_soft->bs_bridge_type) {
case PCIBR_BRIDGETYPE_BRIDGE:
pcibr_soft->bs_int_ate_size = BRIDGE_INTERNAL_ATES;
@@ -1367,10 +1353,6 @@ pcibr_attach2(devfs_handle_t xconn_vhdl, bridge_t *bridge,
nasid = NASID_GET(bridge);
- /* set whether it is a PIC or not */
- Is_pic_on_this_nasid[nasid] = (IS_PIC_SOFT(pcibr_soft)) ? 1 : 0;
-
-
if ((pcibr_soft->bs_bricktype = iobrick_type_get_nasid(nasid)) < 0)
printk(KERN_WARNING "0x%p: Unknown bricktype : 0x%x\n", (void *)xconn_vhdl,
(unsigned int)pcibr_soft->bs_bricktype);
@@ -1380,11 +1362,27 @@ pcibr_attach2(devfs_handle_t xconn_vhdl, bridge_t *bridge,
if (pcibr_soft->bs_bricktype > 0) {
switch (pcibr_soft->bs_bricktype) {
case MODULE_PXBRICK:
+ case MODULE_IXBRICK:
pcibr_soft->bs_first_slot = 0;
pcibr_soft->bs_last_slot = 1;
pcibr_soft->bs_last_reset = 1;
+
+ /* If Bus 1 has IO9 then there are 4 devices in that bus. Note
+ * we figure this out from klconfig since the kernel has yet to
+ * probe
+ */
+ if (pcibr_widget_to_bus(pcibr_vhdl) == 1) {
+ lboard_t *brd = (lboard_t *)KL_CONFIG_INFO(nasid);
+
+ while (brd) {
+ if (brd->brd_flags & LOCAL_MASTER_IO6) {
+ pcibr_soft->bs_last_slot = 3;
+ pcibr_soft->bs_last_reset = 3;
+ }
+ brd = KLCF_NEXT(brd);
+ }
+ }
break;
- case MODULE_PEBRICK:
case MODULE_PBRICK:
pcibr_soft->bs_first_slot = 1;
pcibr_soft->bs_last_slot = 2;
@@ -1527,7 +1525,7 @@ pcibr_attach2(devfs_handle_t xconn_vhdl, bridge_t *bridge,
/* enable parity checking on PICs internal RAM */
pic_ctrl_reg |= PIC_CTRL_PAR_EN_RESP;
pic_ctrl_reg |= PIC_CTRL_PAR_EN_ATE;
- /* PIC BRINGUP WAR (PV# 862253): don't enable write request
+ /* PIC BRINGUP WAR (PV# 862253): dont enable write request
* parity checking.
*/
if (!PCIBR_WAR_ENABLED(PV862253, pcibr_soft)) {
@@ -1559,11 +1557,6 @@ pcibr_attach2(devfs_handle_t xconn_vhdl, bridge_t *bridge,
int entry;
cnodeid_t cnodeid;
nasid_t nasid;
-#ifdef PIC_LATER
- char *node_val;
- devfs_handle_t node_vhdl;
- char vname[MAXDEVNAME];
-#endif
/* Set the Bridge's 32-bit PCI to XTalk
* Direct Map register to the most useful
@@ -1582,30 +1575,6 @@ pcibr_attach2(devfs_handle_t xconn_vhdl, bridge_t *bridge,
*/
cnodeid = 0; /* default node id */
- /*
- * Determine the base address node id to be used for all 32-bit
- * Direct Mapping I/O. The default is node 0, but this can be changed
- * via a DEVICE_ADMIN directive and the PCIBUS_DMATRANS_NODE
- * attribute in the irix.sm config file. A device driver can obtain
- * this node value via a call to pcibr_get_dmatrans_node().
- */
-#ifdef PIC_LATER
-// This probably needs to be addressed - pfg
- node_val = device_admin_info_get(pcibr_vhdl, ADMIN_LBL_DMATRANS_NODE);
- if (node_val != NULL) {
- node_vhdl = hwgraph_path_to_vertex(node_val);
- if (node_vhdl != GRAPH_VERTEX_NONE) {
- cnodeid = nodevertex_to_cnodeid(node_vhdl);
- }
- if ((node_vhdl == GRAPH_VERTEX_NONE) || (cnodeid == CNODEID_NONE)) {
- cnodeid = 0;
- vertex_to_name(pcibr_vhdl, vname, sizeof(vname));
- printk(KERN_WARNING "Invalid hwgraph node path specified:\n"
- " DEVICE_ADMIN: %s %s=%s\n",
- vname, ADMIN_LBL_DMATRANS_NODE, node_val);
- }
- }
-#endif /* PIC_LATER */
nasid = COMPACT_TO_NASID_NODEID(cnodeid);
paddr = NODE_OFFSET(nasid) + 0;
@@ -1763,6 +1732,13 @@ pcibr_attach2(devfs_handle_t xconn_vhdl, bridge_t *bridge,
*/
xtalk_intr = xtalk_intr_alloc(xconn_vhdl, (device_desc_t)0, pcibr_vhdl);
+ {
+ int irq = ((hub_intr_t)xtalk_intr)->i_bit;
+ int cpu = ((hub_intr_t)xtalk_intr)->i_cpuid;
+
+ intr_unreserve_level(cpu, irq);
+ ((hub_intr_t)xtalk_intr)->i_bit = SGI_PCIBR_ERROR;
+ }
ASSERT(xtalk_intr != NULL);
pcibr_soft->bsi_err_intr = xtalk_intr;
@@ -1778,12 +1754,8 @@ pcibr_attach2(devfs_handle_t xconn_vhdl, bridge_t *bridge,
xtalk_intr_connect(xtalk_intr, (intr_func_t) pcibr_error_intr_handler,
(intr_arg_t) pcibr_soft, (xtalk_intr_setfunc_t)pcibr_setwidint, (void *)bridge);
-#ifdef BUS_INT_WAR_NOT_YET
- request_irq(CPU_VECTOR_TO_IRQ(((hub_intr_t)xtalk_intr)->i_cpuid,
- ((hub_intr_t)xtalk_intr)->i_bit),
- (intr_func_t)pcibr_error_intr_handler, 0, "PCIBR error",
+ request_irq(SGI_PCIBR_ERROR, (void *)pcibr_error_intr_handler, SA_SHIRQ, "PCIBR error",
(intr_arg_t) pcibr_soft);
-#endif
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_vhdl,
"pcibr_setwidint: b_wid_int_upper=0x%x, b_wid_int_lower=0x%x\n",
@@ -1801,18 +1773,16 @@ pcibr_attach2(devfs_handle_t xconn_vhdl, bridge_t *bridge,
if (IS_PIC_SOFT(pcibr_soft)) {
int_enable_64 = bridge->p_int_enable_64 | BRIDGE_ISR_ERRORS;
int_enable = (uint64_t)int_enable_64;
+#ifdef PFG_TEST
+ int_enable = (uint64_t)0x7ffffeff7ffffeff;
+#endif
} else {
int_enable_32 = bridge->b_int_enable | (BRIDGE_ISR_ERRORS & 0xffffffff);
int_enable = ((uint64_t)int_enable_32 & 0xffffffff);
- }
-#ifdef BUS_INT_WAR_NOT_YET
- {
- extern void sn_add_polled_interrupt(int irq, int interval);
-
- sn_add_polled_interrupt(CPU_VECTOR_TO_IRQ(((hub_intr_t)xtalk_intr)->i_cpuid,
- ((hub_intr_t)xtalk_intr)->i_bit), 20000);
- }
+#ifdef PFG_TEST
+ int_enable = (uint64_t)0x7ffffeff;
#endif
+ }
#if BRIDGE_ERROR_INTR_WAR
@@ -1849,24 +1819,6 @@ pcibr_attach2(devfs_handle_t xconn_vhdl, bridge_t *bridge,
}
#endif
-#ifdef BRIDGE_B_DATACORR_WAR
-
- /* WAR panic for Rev B silent data corruption.
- * PIOERR turned off here because there is a problem
- * with not re-arming it in pcibr_error_intr_handler.
- * We don't get LLP error interrupts if we don't
- * re-arm PIOERR interrupts! Just disable them here
- */
-
- if (pcibr_soft->bs_rev_num == BRIDGE_PART_REV_B) {
- int_enable |= BRIDGE_IMR_LLP_REC_CBERR;
- int_enable &= ~BRIDGE_ISR_PCIBUS_PIOERR;
-
- PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
- "Turning on LLP_REC_CBERR for Rev B Bridge.\n"));
- }
-#endif
-
/* PIC BRINGUP WAR (PV# 856864 & 856865): allow the tnums that are
* locked out to be freed up sooner (by timing out) so that the
* read tnums are never completely used up.
@@ -1918,16 +1870,12 @@ pcibr_attach2(devfs_handle_t xconn_vhdl, bridge_t *bridge,
if (pcibr_soft->bs_rev_num < BRIDGE_PART_REV_B)
pcibr_soft->bs_dma_flags |= PCIBR_NOPREFETCH;
else if (pcibr_soft->bs_rev_num <
- (BRIDGE_WIDGET_PART_NUM << 4 | pcibr_prefetch_enable_rev))
+ (BRIDGE_WIDGET_PART_NUM << 4))
pcibr_soft->bs_dma_flags |= PCIIO_NOPREFETCH;
- /* WRITE_GATHER:
- * Disabled up to but not including the
- * rev number in pcibr_wg_enable_rev. There
- * is no "WAR range" as with prefetch.
- */
+ /* WRITE_GATHER: Disabled */
if (pcibr_soft->bs_rev_num <
- (BRIDGE_WIDGET_PART_NUM << 4 | pcibr_wg_enable_rev))
+ (BRIDGE_WIDGET_PART_NUM << 4))
pcibr_soft->bs_dma_flags |= PCIBR_NOWRITE_GATHER;
/* PIC only supports 64-bit direct mapping in PCI-X mode. Since
@@ -2064,7 +2012,23 @@ pcibr_attach2(devfs_handle_t xconn_vhdl, bridge_t *bridge,
*/
if (pcibr_soft->bs_bricktype > 0) {
switch (pcibr_soft->bs_bricktype) {
+ case MODULE_PBRICK:
+ do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 8);
+ do_pcibr_rrb_autoalloc(pcibr_soft, 2, VCHAN0, 8);
+ break;
+ case MODULE_IBRICK:
+ /* port 0xe on the Ibrick only has slots 1 and 2 */
+ if (pcibr_soft->bs_xid == 0xe) {
+ do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 8);
+ do_pcibr_rrb_autoalloc(pcibr_soft, 2, VCHAN0, 8);
+ }
+ else {
+ /* allocate one RRB for the serial port */
+ do_pcibr_rrb_autoalloc(pcibr_soft, 0, VCHAN0, 1);
+ }
+ break;
case MODULE_PXBRICK:
+ case MODULE_IXBRICK:
/*
* If the IO9 is in the PXBrick (bus1, slot1) allocate
* RRBs to all the devices
@@ -2080,23 +2044,6 @@ pcibr_attach2(devfs_handle_t xconn_vhdl, bridge_t *bridge,
do_pcibr_rrb_autoalloc(pcibr_soft, 0, VCHAN0, 8);
do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 8);
}
-
- break;
- case MODULE_PEBRICK:
- case MODULE_PBRICK:
- do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 8);
- do_pcibr_rrb_autoalloc(pcibr_soft, 2, VCHAN0, 8);
- break;
- case MODULE_IBRICK:
- /* port 0xe on the Ibrick only has slots 1 and 2 */
- if (pcibr_soft->bs_xid == 0xe) {
- do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 8);
- do_pcibr_rrb_autoalloc(pcibr_soft, 2, VCHAN0, 8);
- }
- else {
- /* allocate one RRB for the serial port */
- do_pcibr_rrb_autoalloc(pcibr_soft, 0, VCHAN0, 1);
- }
break;
} /* switch */
}
@@ -2113,78 +2060,8 @@ pcibr_attach2(devfs_handle_t xconn_vhdl, bridge_t *bridge,
/* Call the device attach */
(void)pcibr_slot_call_device_attach(pcibr_vhdl, slot, 0);
-#ifdef PIC_LATER
-#if (defined(USS302_TIMEOUT_WAR))
- /*
- * If this bridge holds a Lucent USS-302 or USS-312 pci/usb controller,
- * increase the Bridge PCI retry backoff interval. This part seems
- * to go away for long periods of time if a DAC appears on the bus during
- * a read command that is being retried.
- */
-
-{
- ii_ixtt_u_t ixtt;
-
- for (slot = pcibr_soft->bs_min_slot;
- slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
- if (pcibr_soft->bs_slot[slot].bss_vendor_id ==
- LUCENT_USBHC_VENDOR_ID_NUM &&
- (pcibr_soft->bs_slot[slot].bss_device_id ==
- LUCENT_USBHC302_DEVICE_ID_NUM ||
- pcibr_soft->bs_slot[slot].bss_device_id ==
- LUCENT_USBHC312_DEVICE_ID_NUM)) {
- printk(KERN_NOTICE
- "pcibr_attach: %x Bus holds a usb part - setting"
- "bridge PCI_RETRY_HLD to %d\n",
- pcibr_vhdl, USS302_BRIDGE_TIMEOUT_HLD);
-
- bridge->b_bus_timeout &= ~BRIDGE_BUS_PCI_RETRY_HLD_MASK;
- bridge->b_bus_timeout |=
- BRIDGE_BUS_PCI_RETRY_HLD(USS302_BRIDGE_TIMEOUT_HLD);
-
- /*
- * Have to consider the read response timer in the hub II as well
- */
-
- hubii_ixtt_get(xconn_vhdl, &ixtt);
-
- /*
- * bump rrsp_ps to allow at least 1ms for read
- * responses from this widget
- */
-
- ixtt.ii_ixtt_fld_s.i_rrsp_ps = 20000;
- hubii_ixtt_set(xconn_vhdl, &ixtt);
-
- /*
- * print the current setting
- */
-
- hubii_ixtt_get(xconn_vhdl, &ixtt);
- printk( "Setting hub ixtt.rrsp_ps field to 0x%x\n",
- ixtt.ii_ixtt_fld_s.i_rrsp_ps);
-
- break; /* only need to do it once */
- }
- }
-}
-#endif /* (defined(USS302_TIMEOUT_WAR)) */
-#else
- FIXME("pcibr_attach: Call do_pcibr_rrb_autoalloc nicinfo\n");
-#endif /* PIC_LATER */
-
- if (aa)
- async_attach_add_info(noslot_conn, aa);
-
pciio_device_attach(noslot_conn, (int)0);
- /*
- * Tear down pointer to async attach info -- async threads for
- * bridge's descendants may be running but the bridge's work is done.
- */
- if (aa)
- async_attach_del_info(xconn_vhdl);
-
return 0;
}
@@ -2195,10 +2072,10 @@ pcibr_attach2(devfs_handle_t xconn_vhdl, bridge_t *bridge,
*/
int
-pcibr_detach(devfs_handle_t xconn)
+pcibr_detach(vertex_hdl_t xconn)
{
pciio_slot_t slot;
- devfs_handle_t pcibr_vhdl;
+ vertex_hdl_t pcibr_vhdl;
pcibr_soft_t pcibr_soft;
bridge_t *bridge;
unsigned s;
@@ -2265,9 +2142,9 @@ pcibr_detach(devfs_handle_t xconn)
}
int
-pcibr_asic_rev(devfs_handle_t pconn_vhdl)
+pcibr_asic_rev(vertex_hdl_t pconn_vhdl)
{
- devfs_handle_t pcibr_vhdl;
+ vertex_hdl_t pcibr_vhdl;
int tmp_vhdl;
arbitrary_info_t ainfo;
@@ -2294,7 +2171,7 @@ pcibr_asic_rev(devfs_handle_t pconn_vhdl)
}
int
-pcibr_write_gather_flush(devfs_handle_t pconn_vhdl)
+pcibr_write_gather_flush(vertex_hdl_t pconn_vhdl)
{
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
@@ -2309,7 +2186,7 @@ pcibr_write_gather_flush(devfs_handle_t pconn_vhdl)
*/
static iopaddr_t
-pcibr_addr_pci_to_xio(devfs_handle_t pconn_vhdl,
+pcibr_addr_pci_to_xio(vertex_hdl_t pconn_vhdl,
pciio_slot_t slot,
pciio_space_t space,
iopaddr_t pci_addr,
@@ -2323,6 +2200,8 @@ pcibr_addr_pci_to_xio(devfs_handle_t pconn_vhdl,
unsigned bar; /* which BASE reg on device is decoding */
iopaddr_t xio_addr = XIO_NOWHERE;
+ iopaddr_t base; /* base of devio(x) mapped area on PCI */
+ iopaddr_t limit; /* base of devio(x) mapped area on PCI */
pciio_space_t wspace; /* which space device is decoding */
iopaddr_t wbase; /* base of device decode on PCI */
@@ -2533,8 +2412,6 @@ pcibr_addr_pci_to_xio(devfs_handle_t pconn_vhdl,
PCIBR_DEBUG((PCIBR_DEBUG_DEVREG, pconn_vhdl,
"pcibr_addr_pci_to_xio: Device(%d): %x\n",
win, devreg, device_bits));
-#else
- printk("pcibr_addr_pci_to_xio: Device(%d): %x\n", win, devreg);
#endif
}
pcibr_soft->bs_slot[win].bss_devio.bssd_space = space;
@@ -2620,18 +2497,46 @@ pcibr_addr_pci_to_xio(devfs_handle_t pconn_vhdl,
*/
case PCIIO_SPACE_MEM: /* "mem space" */
case PCIIO_SPACE_MEM32: /* "mem, use 32-bit-wide bus" */
- if ((pci_addr + BRIDGE_PCI_MEM32_BASE + req_size - 1) <=
- BRIDGE_PCI_MEM32_LIMIT)
- xio_addr = pci_addr + BRIDGE_PCI_MEM32_BASE;
+ if (IS_PIC_BUSNUM_SOFT(pcibr_soft, 0)) { /* PIC bus 0 */
+ base = PICBRIDGE0_PCI_MEM32_BASE;
+ limit = PICBRIDGE0_PCI_MEM32_LIMIT;
+ } else if (IS_PIC_BUSNUM_SOFT(pcibr_soft, 1)) { /* PIC bus 1 */
+ base = PICBRIDGE1_PCI_MEM32_BASE;
+ limit = PICBRIDGE1_PCI_MEM32_LIMIT;
+ } else { /* Bridge/Xbridge */
+ base = BRIDGE_PCI_MEM32_BASE;
+ limit = BRIDGE_PCI_MEM32_LIMIT;
+ }
+
+ if ((pci_addr + base + req_size - 1) <= limit)
+ xio_addr = pci_addr + base;
break;
case PCIIO_SPACE_MEM64: /* "mem, use 64-bit-wide bus" */
- if ((pci_addr + BRIDGE_PCI_MEM64_BASE + req_size - 1) <=
- BRIDGE_PCI_MEM64_LIMIT)
- xio_addr = pci_addr + BRIDGE_PCI_MEM64_BASE;
+ if (IS_PIC_BUSNUM_SOFT(pcibr_soft, 0)) { /* PIC bus 0 */
+ base = PICBRIDGE0_PCI_MEM64_BASE;
+ limit = PICBRIDGE0_PCI_MEM64_LIMIT;
+ } else if (IS_PIC_BUSNUM_SOFT(pcibr_soft, 1)) { /* PIC bus 1 */
+ base = PICBRIDGE1_PCI_MEM64_BASE;
+ limit = PICBRIDGE1_PCI_MEM64_LIMIT;
+ } else { /* Bridge/Xbridge */
+ base = BRIDGE_PCI_MEM64_BASE;
+ limit = BRIDGE_PCI_MEM64_LIMIT;
+ }
+
+ if ((pci_addr + base + req_size - 1) <= limit)
+ xio_addr = pci_addr + base;
break;
case PCIIO_SPACE_IO: /* "i/o space" */
+ /*
+ * PIC bridges do not support big-window aliases into PCI I/O space
+ */
+ if (IS_PIC_SOFT(pcibr_soft)) {
+ xio_addr = XIO_NOWHERE;
+ break;
+ }
+
/* Bridge Hardware Bug WAR #482741:
* The 4G area that maps directly from
* XIO space to PCI I/O space is busted
@@ -2725,7 +2630,7 @@ pcibr_addr_pci_to_xio(devfs_handle_t pconn_vhdl,
/*ARGSUSED6 */
pcibr_piomap_t
-pcibr_piomap_alloc(devfs_handle_t pconn_vhdl,
+pcibr_piomap_alloc(vertex_hdl_t pconn_vhdl,
device_desc_t dev_desc,
pciio_space_t space,
iopaddr_t pci_addr,
@@ -2737,7 +2642,7 @@ pcibr_piomap_alloc(devfs_handle_t pconn_vhdl,
pciio_info_t pciio_info = &pcibr_info->f_c;
pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
+ vertex_hdl_t xconn_vhdl = pcibr_soft->bs_conn;
pcibr_piomap_t *mapptr;
pcibr_piomap_t maplist;
@@ -2867,7 +2772,7 @@ pcibr_piomap_done(pcibr_piomap_t pcibr_piomap)
/*ARGSUSED */
caddr_t
-pcibr_piotrans_addr(devfs_handle_t pconn_vhdl,
+pcibr_piotrans_addr(vertex_hdl_t pconn_vhdl,
device_desc_t dev_desc,
pciio_space_t space,
iopaddr_t pci_addr,
@@ -2877,7 +2782,7 @@ pcibr_piotrans_addr(devfs_handle_t pconn_vhdl,
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
+ vertex_hdl_t xconn_vhdl = pcibr_soft->bs_conn;
iopaddr_t xio_addr;
caddr_t addr;
@@ -2908,7 +2813,7 @@ pcibr_piotrans_addr(devfs_handle_t pconn_vhdl,
/*ARGSUSED */
iopaddr_t
-pcibr_piospace_alloc(devfs_handle_t pconn_vhdl,
+pcibr_piospace_alloc(vertex_hdl_t pconn_vhdl,
device_desc_t dev_desc,
pciio_space_t space,
size_t req_size,
@@ -3010,7 +2915,7 @@ pcibr_piospace_alloc(devfs_handle_t pconn_vhdl,
/*ARGSUSED */
void
-pcibr_piospace_free(devfs_handle_t pconn_vhdl,
+pcibr_piospace_free(vertex_hdl_t pconn_vhdl,
pciio_space_t space,
iopaddr_t pciaddr,
size_t req_size)
@@ -3161,14 +3066,14 @@ pcibr_flags_to_d64(unsigned flags, pcibr_soft_t pcibr_soft)
/*ARGSUSED */
pcibr_dmamap_t
-pcibr_dmamap_alloc(devfs_handle_t pconn_vhdl,
+pcibr_dmamap_alloc(vertex_hdl_t pconn_vhdl,
device_desc_t dev_desc,
size_t req_size_max,
unsigned flags)
{
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
+ vertex_hdl_t xconn_vhdl = pcibr_soft->bs_conn;
pciio_slot_t slot;
xwidgetnum_t xio_port;
@@ -3454,6 +3359,29 @@ pcibr_addr_xio_to_pci(pcibr_soft_t soft,
iopaddr_t pci_addr;
pciio_slot_t slot;
+ if (IS_PIC_BUSNUM_SOFT(soft, 0)) {
+ if ((xio_addr >= PICBRIDGE0_PCI_MEM32_BASE) &&
+ (xio_lim <= PICBRIDGE0_PCI_MEM32_LIMIT)) {
+ pci_addr = xio_addr - PICBRIDGE0_PCI_MEM32_BASE;
+ return pci_addr;
+ }
+ if ((xio_addr >= PICBRIDGE0_PCI_MEM64_BASE) &&
+ (xio_lim <= PICBRIDGE0_PCI_MEM64_LIMIT)) {
+ pci_addr = xio_addr - PICBRIDGE0_PCI_MEM64_BASE;
+ return pci_addr;
+ }
+ } else if (IS_PIC_BUSNUM_SOFT(soft, 1)) {
+ if ((xio_addr >= PICBRIDGE1_PCI_MEM32_BASE) &&
+ (xio_lim <= PICBRIDGE1_PCI_MEM32_LIMIT)) {
+ pci_addr = xio_addr - PICBRIDGE1_PCI_MEM32_BASE;
+ return pci_addr;
+ }
+ if ((xio_addr >= PICBRIDGE1_PCI_MEM64_BASE) &&
+ (xio_lim <= PICBRIDGE1_PCI_MEM64_LIMIT)) {
+ pci_addr = xio_addr - PICBRIDGE1_PCI_MEM64_BASE;
+ return pci_addr;
+ }
+ } else {
if ((xio_addr >= BRIDGE_PCI_MEM32_BASE) &&
(xio_lim <= BRIDGE_PCI_MEM32_LIMIT)) {
pci_addr = xio_addr - BRIDGE_PCI_MEM32_BASE;
@@ -3464,6 +3392,7 @@ pcibr_addr_xio_to_pci(pcibr_soft_t soft,
pci_addr = xio_addr - BRIDGE_PCI_MEM64_BASE;
return pci_addr;
}
+ }
for (slot = soft->bs_min_slot; slot < PCIBR_NUM_SLOTS(soft); ++slot)
if ((xio_addr >= PCIBR_BRIDGE_DEVIO(soft, slot)) &&
(xio_lim < PCIBR_BRIDGE_DEVIO(soft, slot + 1))) {
@@ -3644,243 +3573,6 @@ pcibr_dmamap_addr(pcibr_dmamap_t pcibr_dmamap,
}
/*ARGSUSED */
-alenlist_t
-pcibr_dmamap_list(pcibr_dmamap_t pcibr_dmamap,
- alenlist_t palenlist,
- unsigned flags)
-{
- pcibr_soft_t pcibr_soft;
- bridge_t *bridge=NULL;
-
- unsigned al_flags = (flags & PCIIO_NOSLEEP) ? AL_NOSLEEP : 0;
- int inplace = flags & PCIIO_INPLACE;
-
- alenlist_t pciio_alenlist = 0;
- alenlist_t xtalk_alenlist;
- size_t length;
- iopaddr_t offset;
- unsigned direct64;
- int ate_index = 0;
- int ate_count = 0;
- int ate_total = 0;
- bridge_ate_p ate_ptr = (bridge_ate_p)0;
- bridge_ate_t ate_proto = (bridge_ate_t)0;
- bridge_ate_t ate_prev;
- bridge_ate_t ate;
- alenaddr_t xio_addr;
- xwidgetnum_t xio_port;
- iopaddr_t pci_addr;
- alenaddr_t new_addr;
- unsigned cmd_regs[8];
- unsigned s = 0;
-
-#if PCIBR_FREEZE_TIME
- unsigned freeze_time;
-#endif
- int ate_freeze_done = 0; /* To pair ATE_THAW
- * with an ATE_FREEZE
- */
-
- pcibr_soft = pcibr_dmamap->bd_soft;
-
- xtalk_alenlist = xtalk_dmamap_list(pcibr_dmamap->bd_xtalk, palenlist,
- flags & DMAMAP_FLAGS);
- if (!xtalk_alenlist) {
- PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
- "pcibr_dmamap_list: xtalk_dmamap_list() failed, "
- "pcibr_dmamap=0x%x\n", pcibr_dmamap));
- goto fail;
- }
- alenlist_cursor_init(xtalk_alenlist, 0, NULL);
-
- if (inplace) {
- pciio_alenlist = xtalk_alenlist;
- } else {
- pciio_alenlist = alenlist_create(al_flags);
- if (!pciio_alenlist) {
- PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
- "pcibr_dmamap_list: alenlist_create() failed, "
- "pcibr_dmamap=0x%lx\n", (unsigned long)pcibr_dmamap));
- goto fail;
- }
- }
-
- direct64 = pcibr_dmamap->bd_flags & PCIIO_DMA_A64;
- if (!direct64) {
- bridge = pcibr_soft->bs_base;
- ate_ptr = pcibr_dmamap->bd_ate_ptr;
- ate_index = pcibr_dmamap->bd_ate_index;
- ate_proto = pcibr_dmamap->bd_ate_proto;
- ATE_FREEZE();
- ate_freeze_done = 1; /* Remember that we need to do an ATE_THAW */
- }
- pci_addr = pcibr_dmamap->bd_pci_addr;
-
- ate_prev = 0; /* matches no valid ATEs */
- while (ALENLIST_SUCCESS ==
- alenlist_get(xtalk_alenlist, NULL, 0,
- &xio_addr, &length, al_flags)) {
- if (XIO_PACKED(xio_addr)) {
- xio_port = XIO_PORT(xio_addr);
- xio_addr = XIO_ADDR(xio_addr);
- } else
- xio_port = pcibr_dmamap->bd_xio_port;
-
- if (xio_port == pcibr_soft->bs_xid) {
- new_addr = pcibr_addr_xio_to_pci(pcibr_soft, xio_addr, length);
- if (new_addr == PCI_NOWHERE) {
- PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
- "pcibr_dmamap_list: pcibr_addr_xio_to_pci failed, "
- "pcibr_dmamap=0x%x\n", pcibr_dmamap));
- goto fail;
- }
- } else if (direct64) {
- new_addr = pci_addr | xio_addr
- | ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT);
-
- /* Bridge Hardware WAR #482836:
- * If the transfer is not cache aligned
- * and the Bridge Rev is <= B, force
- * prefetch to be off.
- */
- if (flags & PCIBR_NOPREFETCH)
- new_addr &= ~PCI64_ATTR_PREF;
-
- } else {
- /* calculate the ate value for
- * the first address. If it
- * matches the previous
- * ATE written (ie. we had
- * multiple blocks in the
- * same IOPG), then back up
- * and reuse that ATE.
- *
- * We are NOT going to
- * aggressively try to
- * reuse any other ATEs.
- */
- offset = IOPGOFF(xio_addr);
- ate = ate_proto
- | (xio_port << ATE_TIDSHIFT)
- | (xio_addr - offset);
- if (ate == ate_prev) {
- PCIBR_DEBUG((PCIBR_DEBUG_ATE, pcibr_dmamap->bd_dev,
- "pcibr_dmamap_list: ATE share\n"));
- ate_ptr--;
- ate_index--;
- pci_addr -= IOPGSIZE;
- }
- new_addr = pci_addr + offset;
-
- /* Fill in the hardware ATEs
- * that contain this block.
- */
- ate_count = IOPG(offset + length - 1) + 1;
- ate_total += ate_count;
-
- /* Ensure that this map contains enough ATE's */
- if (ate_total > pcibr_dmamap->bd_ate_count) {
- PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATE, pcibr_dmamap->bd_dev,
- "pcibr_dmamap_list :\n"
- "\twanted xio_addr [0x%x..0x%x]\n"
- "\tate_total 0x%x bd_ate_count 0x%x\n"
- "\tATE's required > number allocated\n",
- xio_addr, xio_addr + length - 1,
- ate_total, pcibr_dmamap->bd_ate_count));
- goto fail;
- }
-
- ATE_WRITE();
-
- ate_index += ate_count;
- ate_ptr += ate_count;
-
- ate_count <<= IOPFNSHIFT;
- ate += ate_count;
- pci_addr += ate_count;
- }
-
- /* write the PCI DMA address
- * out to the scatter-gather list.
- */
- if (inplace) {
- if (ALENLIST_SUCCESS !=
- alenlist_replace(pciio_alenlist, NULL,
- &new_addr, &length, al_flags)) {
- PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
- "pcibr_dmamap_list: alenlist_replace() failed, "
- "pcibr_dmamap=0x%x\n", pcibr_dmamap));
-
- goto fail;
- }
- } else {
- if (ALENLIST_SUCCESS !=
- alenlist_append(pciio_alenlist,
- new_addr, length, al_flags)) {
- PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
- "pcibr_dmamap_list: alenlist_append() failed, "
- "pcibr_dmamap=0x%x\n", pcibr_dmamap));
- goto fail;
- }
- }
- }
- if (!inplace)
- alenlist_done(xtalk_alenlist);
-
- /* Reset the internal cursor of the alenlist to be returned back
- * to the caller.
- */
- alenlist_cursor_init(pciio_alenlist, 0, NULL);
-
-
- /* In case an ATE_FREEZE was done do the ATE_THAW to unroll all the
- * changes that ATE_FREEZE has done to implement the external SSRAM
- * bug workaround.
- */
- if (ate_freeze_done) {
- ATE_THAW();
- if ( IS_PIC_SOFT(pcibr_soft) ) {
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
- }
- else {
- if (io_get_sh_swapper(NASID_GET(bridge))) {
- BRIDGE_REG_GET32((&bridge->b_wid_tflush));
- } else {
- bridge->b_wid_tflush;
- }
- }
- }
- PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
- "pcibr_dmamap_list: pcibr_dmamap=0x%x, pciio_alenlist=0x%x\n",
- pcibr_dmamap, pciio_alenlist));
-
- return pciio_alenlist;
-
- fail:
- /* There are various points of failure after doing an ATE_FREEZE
- * We need to do an ATE_THAW. Otherwise the ATEs are locked forever.
- * The decision to do an ATE_THAW needs to be based on whether a
- * an ATE_FREEZE was done before.
- */
- if (ate_freeze_done) {
- ATE_THAW();
- if ( IS_PIC_SOFT(pcibr_soft) ) {
- bridge->b_wid_tflush;
- }
- else {
- if (io_get_sh_swapper(NASID_GET(bridge))) {
- BRIDGE_REG_GET32((&bridge->b_wid_tflush));
- } else {
- bridge->b_wid_tflush;
- }
- }
- }
- if (pciio_alenlist && !inplace)
- alenlist_destroy(pciio_alenlist);
- return 0;
-}
-
-/*ARGSUSED */
void
pcibr_dmamap_done(pcibr_dmamap_t pcibr_dmamap)
{
@@ -3917,7 +3609,7 @@ pcibr_dmamap_done(pcibr_dmamap_t pcibr_dmamap)
/*ARGSUSED */
cnodeid_t
-pcibr_get_dmatrans_node(devfs_handle_t pconn_vhdl)
+pcibr_get_dmatrans_node(vertex_hdl_t pconn_vhdl)
{
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
@@ -3928,7 +3620,7 @@ pcibr_get_dmatrans_node(devfs_handle_t pconn_vhdl)
/*ARGSUSED */
iopaddr_t
-pcibr_dmatrans_addr(devfs_handle_t pconn_vhdl,
+pcibr_dmatrans_addr(vertex_hdl_t pconn_vhdl,
device_desc_t dev_desc,
paddr_t paddr,
size_t req_size,
@@ -3936,7 +3628,7 @@ pcibr_dmatrans_addr(devfs_handle_t pconn_vhdl,
{
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
+ vertex_hdl_t xconn_vhdl = pcibr_soft->bs_conn;
pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
pcibr_soft_slot_t slotp = &pcibr_soft->bs_slot[pciio_slot];
@@ -4149,213 +3841,6 @@ pcibr_dmatrans_addr(devfs_handle_t pconn_vhdl,
return 0;
}
-/*ARGSUSED */
-alenlist_t
-pcibr_dmatrans_list(devfs_handle_t pconn_vhdl,
- device_desc_t dev_desc,
- alenlist_t palenlist,
- unsigned flags)
-{
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
- pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
- pcibr_soft_slot_t slotp = &pcibr_soft->bs_slot[pciio_slot];
- xwidgetnum_t xio_port;
-
- alenlist_t pciio_alenlist = 0;
- alenlist_t xtalk_alenlist = 0;
-
- int inplace;
- unsigned direct64;
- unsigned al_flags;
-
- iopaddr_t xio_base;
- alenaddr_t xio_addr;
- size_t xio_size;
-
- size_t map_size;
- iopaddr_t pci_base;
- alenaddr_t pci_addr;
-
- unsigned relbits = 0;
-
- /* merge in forced flags */
- flags |= pcibr_soft->bs_dma_flags;
-
- inplace = flags & PCIIO_INPLACE;
- direct64 = flags & PCIIO_DMA_A64;
- al_flags = (flags & PCIIO_NOSLEEP) ? AL_NOSLEEP : 0;
-
- if (direct64) {
- map_size = 1ull << 48;
- xio_base = 0;
- pci_base = slotp->bss_d64_base;
- if ((pci_base != PCIBR_D64_BASE_UNSET) &&
- (flags == slotp->bss_d64_flags)) {
- /* reuse previous base info */
- } else if (pcibr_try_set_device(pcibr_soft, pciio_slot, flags, BRIDGE_DEV_D64_BITS) < 0) {
- /* DMA configuration conflict */
- PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
- "pcibr_dmatrans_list: DMA configuration conflict "
- "for direct64, flags=0x%x\n", flags));
- goto fail;
- } else {
- relbits = BRIDGE_DEV_D64_BITS;
- pci_base =
- pcibr_flags_to_d64(flags, pcibr_soft);
- }
- } else {
- xio_base = pcibr_soft->bs_dir_xbase;
- map_size = 1ull << 31;
- pci_base = slotp->bss_d32_base;
- if ((pci_base != PCIBR_D32_BASE_UNSET) &&
- (flags == slotp->bss_d32_flags)) {
- /* reuse previous base info */
- } else if (pcibr_try_set_device(pcibr_soft, pciio_slot, flags, BRIDGE_DEV_D32_BITS) < 0) {
- /* DMA configuration conflict */
- PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
- "pcibr_dmatrans_list: DMA configuration conflict "
- "for direct32, flags=0x%x\n", flags));
- goto fail;
- } else {
- relbits = BRIDGE_DEV_D32_BITS;
- pci_base = PCI32_DIRECT_BASE;
- }
- }
-
- xtalk_alenlist = xtalk_dmatrans_list(xconn_vhdl, 0, palenlist,
- flags & DMAMAP_FLAGS);
- if (!xtalk_alenlist) {
- PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
- "pcibr_dmatrans_list: xtalk_dmatrans_list failed "
- "xtalk_alenlist=0x%x\n", xtalk_alenlist));
- goto fail;
- }
-
- alenlist_cursor_init(xtalk_alenlist, 0, NULL);
-
- if (inplace) {
- pciio_alenlist = xtalk_alenlist;
- } else {
- pciio_alenlist = alenlist_create(al_flags);
- if (!pciio_alenlist) {
- PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
- "pcibr_dmatrans_list: alenlist_create failed with "
- " 0x%x\n", pciio_alenlist));
- goto fail;
- }
- }
-
- while (ALENLIST_SUCCESS ==
- alenlist_get(xtalk_alenlist, NULL, 0,
- &xio_addr, &xio_size, al_flags)) {
-
- /*
- * find which XIO port this goes to.
- */
- if (XIO_PACKED(xio_addr)) {
- if (xio_addr == XIO_NOWHERE) {
- PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
- "pcibr_dmatrans_list: xio_addr == XIO_NOWHERE\n"));
- return 0;
- }
- xio_port = XIO_PORT(xio_addr);
- xio_addr = XIO_ADDR(xio_addr);
- } else
- xio_port = pcibr_soft->bs_mxid;
-
- /*
- * If this DMA comes back to us,
- * return the PCI MEM address on
- * which it would land, or NULL
- * if the target is something
- * on bridge other than PCI MEM.
- */
- if (xio_port == pcibr_soft->bs_xid) {
- pci_addr = pcibr_addr_xio_to_pci(pcibr_soft, xio_addr, xio_size);
- if (pci_addr == (alenaddr_t)NULL) {
- PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
- "pcibr_dmatrans_list: pcibr_addr_xio_to_pci failed "
- "xio_addr=0x%x, xio_size=0x%x\n", xio_addr, xio_size));
- goto fail;
- }
- } else if (direct64) {
- ASSERT(xio_port != 0);
- pci_addr = pci_base | xio_addr
- | ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT);
- } else {
- iopaddr_t offset = xio_addr - xio_base;
- iopaddr_t endoff = xio_size + offset;
-
- if ((xio_size > map_size) ||
- (xio_addr < xio_base) ||
- (xio_port != pcibr_soft->bs_dir_xport) ||
- (endoff > map_size)) {
- PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
- "pcibr_dmatrans_list: xio_size > map_size fail\n"
- "xio_addr=0x%x, xio_size=0x%x. map_size=0x%x, "
- "xio_port=0x%x, endoff=0x%x\n",
- xio_addr, xio_size, map_size, xio_port, endoff));
- goto fail;
- }
-
- pci_addr = pci_base + (xio_addr - xio_base);
- }
-
- /* write the PCI DMA address
- * out to the scatter-gather list.
- */
- if (inplace) {
- if (ALENLIST_SUCCESS !=
- alenlist_replace(pciio_alenlist, NULL,
- &pci_addr, &xio_size, al_flags)) {
- PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
- "pcibr_dmatrans_list: alenlist_replace failed\n"));
- goto fail;
- }
- } else {
- if (ALENLIST_SUCCESS !=
- alenlist_append(pciio_alenlist,
- pci_addr, xio_size, al_flags)) {
- PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
- "pcibr_dmatrans_list: alenlist_append failed\n"));
- goto fail;
- }
- }
- }
-
- if (relbits) {
- if (direct64) {
- slotp->bss_d64_flags = flags;
- slotp->bss_d64_base = pci_base;
- } else {
- slotp->bss_d32_flags = flags;
- slotp->bss_d32_base = pci_base;
- }
- }
- if (!inplace)
- alenlist_done(xtalk_alenlist);
-
- /* Reset the internal cursor of the alenlist to be returned back
- * to the caller.
- */
- alenlist_cursor_init(pciio_alenlist, 0, NULL);
-
- PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
- "pcibr_dmatrans_list: pciio_alenlist=0x%x\n",
- pciio_alenlist));
-
- return pciio_alenlist;
-
- fail:
- if (relbits)
- pcibr_release_device(pcibr_soft, pciio_slot, relbits);
- if (pciio_alenlist && !inplace)
- alenlist_destroy(pciio_alenlist);
- return 0;
-}
-
void
pcibr_dmamap_drain(pcibr_dmamap_t map)
{
@@ -4363,24 +3848,24 @@ pcibr_dmamap_drain(pcibr_dmamap_t map)
}
void
-pcibr_dmaaddr_drain(devfs_handle_t pconn_vhdl,
+pcibr_dmaaddr_drain(vertex_hdl_t pconn_vhdl,
paddr_t paddr,
size_t bytes)
{
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
+ vertex_hdl_t xconn_vhdl = pcibr_soft->bs_conn;
xtalk_dmaaddr_drain(xconn_vhdl, paddr, bytes);
}
void
-pcibr_dmalist_drain(devfs_handle_t pconn_vhdl,
+pcibr_dmalist_drain(vertex_hdl_t pconn_vhdl,
alenlist_t list)
{
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
+ vertex_hdl_t xconn_vhdl = pcibr_soft->bs_conn;
xtalk_dmalist_drain(xconn_vhdl, list);
}
@@ -4402,18 +3887,18 @@ pcibr_dmamap_pciaddr_get(pcibr_dmamap_t pcibr_dmamap)
*/
/*ARGSUSED */
void
-pcibr_provider_startup(devfs_handle_t pcibr)
+pcibr_provider_startup(vertex_hdl_t pcibr)
{
}
/*ARGSUSED */
void
-pcibr_provider_shutdown(devfs_handle_t pcibr)
+pcibr_provider_shutdown(vertex_hdl_t pcibr)
{
}
int
-pcibr_reset(devfs_handle_t conn)
+pcibr_reset(vertex_hdl_t conn)
{
#ifdef PIC_LATER
pciio_info_t pciio_info = pciio_info_get(conn);
@@ -4484,7 +3969,7 @@ pcibr_reset(devfs_handle_t conn)
}
pciio_endian_t
-pcibr_endian_set(devfs_handle_t pconn_vhdl,
+pcibr_endian_set(vertex_hdl_t pconn_vhdl,
pciio_endian_t device_end,
pciio_endian_t desired_end)
{
@@ -4629,7 +4114,7 @@ pcibr_priority_bits_set(pcibr_soft_t pcibr_soft,
}
pciio_priority_t
-pcibr_priority_set(devfs_handle_t pconn_vhdl,
+pcibr_priority_set(vertex_hdl_t pconn_vhdl,
pciio_priority_t device_prio)
{
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
@@ -4653,7 +4138,7 @@ pcibr_priority_set(devfs_handle_t pconn_vhdl,
* Returns 0 on failure, 1 on success
*/
int
-pcibr_device_flags_set(devfs_handle_t pconn_vhdl,
+pcibr_device_flags_set(vertex_hdl_t pconn_vhdl,
pcibr_device_flags_t flags)
{
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
@@ -4792,10 +4277,8 @@ pciio_provider_t pcibr_provider =
(pciio_dmamap_alloc_f *) pcibr_dmamap_alloc,
(pciio_dmamap_free_f *) pcibr_dmamap_free,
(pciio_dmamap_addr_f *) pcibr_dmamap_addr,
- (pciio_dmamap_list_f *) pcibr_dmamap_list,
(pciio_dmamap_done_f *) pcibr_dmamap_done,
(pciio_dmatrans_addr_f *) pcibr_dmatrans_addr,
- (pciio_dmatrans_list_f *) pcibr_dmatrans_list,
(pciio_dmamap_drain_f *) pcibr_dmamap_drain,
(pciio_dmaaddr_drain_f *) pcibr_dmaaddr_drain,
(pciio_dmalist_drain_f *) pcibr_dmalist_drain,
@@ -4814,23 +4297,16 @@ pciio_provider_t pcibr_provider =
(pciio_priority_set_f *) pcibr_priority_set,
(pciio_config_get_f *) pcibr_config_get,
(pciio_config_set_f *) pcibr_config_set,
-#ifdef PIC_LATER
- (pciio_error_devenable_f *) pcibr_error_devenable,
- (pciio_error_extract_f *) pcibr_error_extract,
- (pciio_driver_reg_callback_f *) pcibr_driver_reg_callback,
- (pciio_driver_unreg_callback_f *) pcibr_driver_unreg_callback,
-#else
(pciio_error_devenable_f *) 0,
(pciio_error_extract_f *) 0,
(pciio_driver_reg_callback_f *) 0,
(pciio_driver_unreg_callback_f *) 0,
-#endif /* PIC_LATER */
(pciio_device_unregister_f *) pcibr_device_unregister,
(pciio_dma_enabled_f *) pcibr_dma_enabled,
};
int
-pcibr_dma_enabled(devfs_handle_t pconn_vhdl)
+pcibr_dma_enabled(vertex_hdl_t pconn_vhdl)
{
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
@@ -4857,7 +4333,7 @@ pcibr_dma_enabled(devfs_handle_t pconn_vhdl)
* parameter 'format' is sent to the console.
*/
void
-pcibr_debug(uint32_t type, devfs_handle_t vhdl, char *format, ...)
+pcibr_debug(uint32_t type, vertex_hdl_t vhdl, char *format, ...)
{
char hwpath[MAXDEVNAME] = "\0";
char copy_of_hwpath[MAXDEVNAME];
@@ -4865,7 +4341,6 @@ pcibr_debug(uint32_t type, devfs_handle_t vhdl, char *format, ...)
short widget = -1;
short slot = -1;
va_list ap;
- char *strtok_r(char *string, const char *sepset, char **lasts);
if (pcibr_debug_mask & type) {
if (vhdl) {
@@ -4873,13 +4348,12 @@ pcibr_debug(uint32_t type, devfs_handle_t vhdl, char *format, ...)
char *cp;
if (strcmp(module, pcibr_debug_module)) {
- /* strtok_r() wipes out string, use a copy */
+ /* use a copy */
(void)strcpy(copy_of_hwpath, hwpath);
cp = strstr(copy_of_hwpath, "/module/");
if (cp) {
- char *last = NULL;
cp += strlen("/module");
- module = strtok_r(cp, "/", &last);
+ module = strsep(&cp, "/");
}
}
if (pcibr_debug_widget != -1) {
@@ -4918,3 +4392,26 @@ pcibr_debug(uint32_t type, devfs_handle_t vhdl, char *format, ...)
}
}
}
+
+int
+isIO9(nasid_t nasid) {
+ lboard_t *brd = (lboard_t *)KL_CONFIG_INFO(nasid);
+
+ while (brd) {
+ if (brd->brd_flags & LOCAL_MASTER_IO6) {
+ return 1;
+ }
+ brd = KLCF_NEXT(brd);
+ }
+ /* if it's dual ported, check the peer also */
+ nasid = NODEPDA(NASID_TO_COMPACT_NODEID(nasid))->xbow_peer;
+ if (nasid < 0) return 0;
+ brd = (lboard_t *)KL_CONFIG_INFO(nasid);
+ while (brd) {
+ if (brd->brd_flags & LOCAL_MASTER_IO6) {
+ return 1;
+ }
+ brd = KLCF_NEXT(brd);
+ }
+ return 0;
+}
diff --git a/arch/ia64/sn/io/sn2/pcibr/pcibr_error.c b/arch/ia64/sn/io/sn2/pcibr/pcibr_error.c
index 4295a33e916df9..91ee03e14b352d 100644
--- a/arch/ia64/sn/io/sn2/pcibr/pcibr_error.c
+++ b/arch/ia64/sn/io/sn2/pcibr/pcibr_error.c
@@ -4,7 +4,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
@@ -27,26 +27,11 @@
#include <asm/sn/prio.h>
#include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
-#include <asm/sn/eeprom.h>
#include <asm/sn/io.h>
#include <asm/sn/sn_private.h>
-#ifdef __ia64
-#define rmallocmap atemapalloc
-#define rmfreemap atemapfree
-#define rmfree atefree
-#define rmalloc atealloc
-#endif
-
extern int hubii_check_widget_disabled(nasid_t, int);
-#ifdef BRIDGE_B_DATACORR_WAR
-extern int ql_bridge_rev_b_war(devfs_handle_t);
-extern int bridge_rev_b_data_check_disable;
-char *rev_b_datacorr_warning =
-"***************************** WARNING! ******************************\n";
-char *rev_b_datacorr_mesg =
-"UNRECOVERABLE IO LINK ERROR. CONTACT SERVICE PROVIDER\n";
-#endif
+
/* =====================================================================
* ERROR HANDLING
@@ -76,13 +61,9 @@ uint64_t bridge_errors_to_dump = BRIDGE_ISR_ERROR_FATAL |
BRIDGE_ISR_PCIBUS_PIOERR;
#endif
-#if defined (PCIBR_LLP_CONTROL_WAR)
-int pcibr_llp_control_war_cnt;
-#endif /* PCIBR_LLP_CONTROL_WAR */
+int pcibr_llp_control_war_cnt; /* PCIBR_LLP_CONTROL_WAR */
-/* FIXME: can these arrays be local ? */
-
-struct reg_values xio_cmd_pactyp[] =
+static struct reg_values xio_cmd_pactyp[] =
{
{0x0, "RdReq"},
{0x1, "RdResp"},
@@ -103,7 +84,7 @@ struct reg_values xio_cmd_pactyp[] =
{0}
};
-struct reg_desc xio_cmd_bits[] =
+static struct reg_desc xio_cmd_bits[] =
{
{WIDGET_DIDN, -28, "DIDN", "%x"},
{WIDGET_SIDN, -24, "SIDN", "%x"},
@@ -120,58 +101,7 @@ struct reg_desc xio_cmd_bits[] =
#define F(s,n) { 1l<<(s),-(s), n }
-struct reg_desc bridge_int_status_desc[] =
-{
- F(45, "PCI_X_SPLIT_MES_PE"),/* PIC ONLY */
- F(44, "PCI_X_SPLIT_EMES"), /* PIC ONLY */
- F(43, "PCI_X_SPLIT_TO"), /* PIC ONLY */
- F(42, "PCI_X_UNEX_COMP"), /* PIC ONLY */
- F(41, "INT_RAM_PERR"), /* PIC ONLY */
- F(40, "PCI_X_ARB_ERR"), /* PIC ONLY */
- F(39, "PCI_X_REQ_TOUT"), /* PIC ONLY */
- F(38, "PCI_X_TABORT"), /* PIC ONLY */
- F(37, "PCI_X_PERR"), /* PIC ONLY */
- F(36, "PCI_X_SERR"), /* PIC ONLY */
- F(35, "PCI_X_MRETRY"), /* PIC ONLY */
- F(34, "PCI_X_MTOUT"), /* PIC ONLY */
- F(33, "PCI_X_DA_PARITY"), /* PIC ONLY */
- F(32, "PCI_X_AD_PARITY"), /* PIC ONLY */
- F(31, "MULTI_ERR"), /* BRIDGE ONLY */
- F(30, "PMU_ESIZE_EFAULT"),
- F(29, "UNEXPECTED_RESP"),
- F(28, "BAD_XRESP_PACKET"),
- F(27, "BAD_XREQ_PACKET"),
- F(26, "RESP_XTALK_ERROR"),
- F(25, "REQ_XTALK_ERROR"),
- F(24, "INVALID_ADDRESS"),
- F(23, "UNSUPPORTED_XOP"),
- F(22, "XREQ_FIFO_OFLOW"),
- F(21, "LLP_REC_SNERROR"),
- F(20, "LLP_REC_CBERROR"),
- F(19, "LLP_RCTY"),
- F(18, "LLP_TX_RETRY"),
- F(17, "LLP_TCTY"),
- F(16, "SSRAM_PERR"), /* BRIDGE ONLY */
- F(15, "PCI_ABORT"),
- F(14, "PCI_PARITY"),
- F(13, "PCI_SERR"),
- F(12, "PCI_PERR"),
- F(11, "PCI_MASTER_TOUT"),
- F(10, "PCI_RETRY_CNT"),
- F(9, "XREAD_REQ_TOUT"),
- F(8, "GIO_BENABLE_ERR"), /* BRIDGE ONLY */
- F(7, "INT7"),
- F(6, "INT6"),
- F(5, "INT5"),
- F(4, "INT4"),
- F(3, "INT3"),
- F(2, "INT2"),
- F(1, "INT1"),
- F(0, "INT0"),
- {0}
-};
-
-struct reg_values space_v[] =
+static struct reg_values space_v[] =
{
{PCIIO_SPACE_NONE, "none"},
{PCIIO_SPACE_ROM, "ROM"},
@@ -189,13 +119,13 @@ struct reg_values space_v[] =
{PCIIO_SPACE_BAD, "BAD"},
{0}
};
-struct reg_desc space_desc[] =
+static struct reg_desc space_desc[] =
{
{0xFF, 0, "space", 0, space_v},
{0}
};
#define device_desc device_bits
-struct reg_desc device_bits[] =
+static struct reg_desc device_bits[] =
{
{BRIDGE_DEV_ERR_LOCK_EN, 0, "ERR_LOCK_EN"},
{BRIDGE_DEV_PAGE_CHK_DIS, 0, "PAGE_CHK_DIS"},
@@ -218,14 +148,14 @@ struct reg_desc device_bits[] =
{0}
};
-void
+static void
print_bridge_errcmd(uint32_t cmdword, char *errtype)
{
printk("\t Bridge %s Error Command Word Register ", errtype);
print_register(cmdword, xio_cmd_bits);
}
-char *pcibr_isr_errs[] =
+static char *pcibr_isr_errs[] =
{
"", "", "", "", "", "", "", "",
"08: GIO non-contiguous byte enable in crosstalk packet", /* BRIDGE ONLY */
@@ -279,7 +209,7 @@ char *pcibr_isr_errs[] =
/*
* display memory directory state
*/
-void
+static void
pcibr_show_dir_state(paddr_t paddr, char *prefix)
{
#ifdef LATER
@@ -428,7 +358,6 @@ pcibr_error_dump(pcibr_soft_t pcibr_soft)
break;
case BRIDGE_ISR_PAGE_FAULT: /* bit30 PMU_PAGE_FAULT */
-/* case BRIDGE_ISR_PMU_ESIZE_FAULT: bit30 PMU_ESIZE_FAULT */
if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft))
reg_desc = "Map Fault Address";
else
@@ -592,31 +521,9 @@ pcibr_error_dump(pcibr_soft_t pcibr_soft)
printk( "\t%s\n", pcibr_isr_errs[i]);
}
}
-
-#if BRIDGE_ERROR_INTR_WAR
- if (pcibr_soft->bs_rev_num == BRIDGE_PART_REV_A) { /* known bridge bug */
- /*
- * Should never receive interrupts for these reasons on Rev 1 bridge
- * as they are not enabled. Assert for it.
- */
- ASSERT((int_status & (BRIDGE_IMR_PCI_MST_TIMEOUT |
- BRIDGE_ISR_RESP_XTLK_ERR |
- BRIDGE_ISR_LLP_TX_RETRY)) == 0);
- }
- if (pcibr_soft->bs_rev_num < BRIDGE_PART_REV_C) { /* known bridge bug */
- /*
- * This interrupt is turned off at init time. So, should never
- * see this interrupt.
- */
- ASSERT((int_status & BRIDGE_ISR_BAD_XRESP_PKT) == 0);
- }
-#endif
}
-#define PCIBR_ERRINTR_GROUP(error) \
- (( error & (BRIDGE_IRR_PCI_GRP|BRIDGE_IRR_GIO_GRP)
-
-uint32_t
+static uint32_t
pcibr_errintr_group(uint32_t error)
{
uint32_t group = BRIDGE_IRR_MULTI_CLR;
@@ -741,15 +648,7 @@ pcibr_error_intr_handler(int irq, void *arg, struct pt_regs *ep)
picreg_t int_status_64;
int number_bits;
int i;
-
- /* REFERENCED */
uint64_t disable_errintr_mask = 0;
-#ifdef EHE_ENABLE
- int rv;
- int error_code = IOECODE_DMA | IOECODE_READ;
- ioerror_mode_t mode = MODE_DEVERROR;
- ioerror_t ioe;
-#endif /* EHE_ENABLE */
nasid_t nasid;
@@ -806,10 +705,6 @@ pcibr_error_intr_handler(int irq, void *arg, struct pt_regs *ep)
pcibr_soft->bs_errinfo.bserr_toutcnt++;
/* Let's go recursive */
return(pcibr_error_intr_handler(irq, arg, ep));
-#ifdef LATER
- timeout(pcibr_error_intr_handler, pcibr_soft, BRIDGE_PIOERR_TIMEOUT);
-#endif
- return;
}
/* We read the INT_STATUS register as a 64bit picreg_t for PIC and a
@@ -847,24 +742,6 @@ pcibr_error_intr_handler(int irq, void *arg, struct pt_regs *ep)
pcibr_pioerr_check(pcibr_soft);
}
-#ifdef BRIDGE_B_DATACORR_WAR
- if ((pcibr_soft->bs_rev_num == BRIDGE_PART_REV_B) &&
- (err_status & BRIDGE_IMR_LLP_REC_CBERR)) {
- if (bridge_rev_b_data_check_disable)
- printk(KERN_WARNING "\n%s%s: %s%s\n", rev_b_datacorr_warning,
- pcibr_soft->bs_name, rev_b_datacorr_mesg,
- rev_b_datacorr_warning);
- else {
- ql_bridge_rev_b_war(pcibr_soft->bs_vhdl);
- PRINT_PANIC( "\n%s%s: %s%s\n", rev_b_datacorr_warning,
- pcibr_soft->bs_name, rev_b_datacorr_mesg,
- rev_b_datacorr_warning);
- }
-
- err_status &= ~BRIDGE_IMR_LLP_REC_CBERR;
- }
-#endif /* BRIDGE_B_DATACORR_WAR */
-
if (err_status) {
struct bs_errintr_stat_s *bs_estat = pcibr_soft->bs_errintr_stat;
@@ -1024,9 +901,8 @@ pcibr_error_intr_handler(int irq, void *arg, struct pt_regs *ep)
(0x00402000 == (0x00F07F00 & bridge->b_wid_err_cmdword))) {
err_status &= ~BRIDGE_ISR_INVLD_ADDR;
}
-#if defined (PCIBR_LLP_CONTROL_WAR)
/*
- * The bridge bug, where the llp_config or control registers
+ * The bridge bug (PCIBR_LLP_CONTROL_WAR), where the llp_config or control registers
* need to be read back after being written, affects an MP
* system since there could be small windows between writing
* the register and reading it back on one cpu while another
@@ -1039,40 +915,9 @@ pcibr_error_intr_handler(int irq, void *arg, struct pt_regs *ep)
if ((err_status & BRIDGE_ISR_INVLD_ADDR) &&
((((uint64_t) bridge->b_wid_err_upper << 32) | (bridge->b_wid_err_lower))
== (BRIDGE_INT_RST_STAT & 0xff0))) {
-#if 0
- if (kdebug)
- printk(KERN_NOTICE "%s bridge: ignoring llp/control address interrupt",
- pcibr_soft->bs_name);
-#endif
pcibr_llp_control_war_cnt++;
err_status &= ~BRIDGE_ISR_INVLD_ADDR;
}
-#endif /* PCIBR_LLP_CONTROL_WAR */
-
-#ifdef EHE_ENABLE
- /* Check if this is the RESP_XTALK_ERROR interrupt.
- * This can happen due to a failed DMA READ operation.
- */
- if (err_status & BRIDGE_ISR_RESP_XTLK_ERR) {
- /* Phase 1 : Look at the error state in the bridge and further
- * down in the device layers.
- */
- (void)error_state_set(pcibr_soft->bs_conn, ERROR_STATE_LOOKUP);
- IOERROR_SETVALUE(&ioe, widgetnum, pcibr_soft->bs_xid);
- (void)pcibr_error_handler((error_handler_arg_t)pcibr_soft,
- error_code,
- mode,
- &ioe);
- /* Phase 2 : Perform the action agreed upon in phase 1.
- */
- (void)error_state_set(pcibr_soft->bs_conn, ERROR_STATE_ACTION);
- rv = pcibr_error_handler((error_handler_arg_t)pcibr_soft,
- error_code,
- mode,
- &ioe);
- }
- if (rv != IOERROR_HANDLED) {
-#endif /* EHE_ENABLE */
bridge_errors_to_dump |= BRIDGE_ISR_PCIBUS_PIOERR;
@@ -1089,25 +934,16 @@ pcibr_error_intr_handler(int irq, void *arg, struct pt_regs *ep)
*/
if (IS_PIC_SOFT(pcibr_soft) && PCIBR_WAR_ENABLED(PV867308, pcibr_soft) &&
(err_status & (BRIDGE_ISR_LLP_REC_SNERR | BRIDGE_ISR_LLP_REC_CBERR))) {
- printk("BRIDGE ERR_STATUS 0x%x\n", err_status);
+ printk("BRIDGE ERR_STATUS 0x%lx\n", err_status);
pcibr_error_dump(pcibr_soft);
-#ifdef LATER
- machine_error_dump("");
-#endif
PRINT_PANIC("PCI Bridge Error interrupt killed the system");
}
if (err_status & BRIDGE_ISR_ERROR_FATAL) {
-#ifdef LATER
- machine_error_dump("");
-#endif
PRINT_PANIC("PCI Bridge Error interrupt killed the system");
/*NOTREACHED */
}
-#ifdef EHE_ENABLE
- }
-#endif
/*
* We can't return without re-enabling the interrupt, since
@@ -1137,136 +973,6 @@ pcibr_error_intr_handler(int irq, void *arg, struct pt_regs *ep)
pcibr_soft->bs_errinfo.bserr_intstat = 0;
}
-/*
- * pcibr_addr_toslot
- * Given the 'pciaddr' find out which slot this address is
- * allocated to, and return the slot number.
- * While we have the info handy, construct the
- * function number, space code and offset as well.
- *
- * NOTE: if this routine is called, we don't know whether
- * the address is in CFG, MEM, or I/O space. We have to guess.
- * This will be the case on PIO stores, where the only way
- * we have of getting the address is to check the Bridge, which
- * stores the PCI address but not the space and not the xtalk
- * address (from which we could get it).
- */
-int
-pcibr_addr_toslot(pcibr_soft_t pcibr_soft,
- iopaddr_t pciaddr,
- pciio_space_t *spacep,
- iopaddr_t *offsetp,
- pciio_function_t *funcp)
-{
- int s, f = 0, w;
- iopaddr_t base;
- size_t size;
- pciio_piospace_t piosp;
-
- /*
- * Check if the address is in config space
- */
-
- if ((pciaddr >= BRIDGE_CONFIG_BASE) && (pciaddr < BRIDGE_CONFIG_END)) {
-
- if (pciaddr >= BRIDGE_CONFIG1_BASE)
- pciaddr -= BRIDGE_CONFIG1_BASE;
- else
- pciaddr -= BRIDGE_CONFIG_BASE;
-
- s = pciaddr / BRIDGE_CONFIG_SLOT_SIZE;
- pciaddr %= BRIDGE_CONFIG_SLOT_SIZE;
-
- if (funcp) {
- f = pciaddr / 0x100;
- pciaddr %= 0x100;
- }
- if (spacep)
- *spacep = PCIIO_SPACE_CFG;
- if (offsetp)
- *offsetp = pciaddr;
- if (funcp)
- *funcp = f;
-
- return s;
- }
- for (s = pcibr_soft->bs_min_slot; s < PCIBR_NUM_SLOTS(pcibr_soft); ++s) {
- int nf = pcibr_soft->bs_slot[s].bss_ninfo;
- pcibr_info_h pcibr_infoh = pcibr_soft->bs_slot[s].bss_infos;
-
- for (f = 0; f < nf; f++) {
- pcibr_info_t pcibr_info = pcibr_infoh[f];
-
- if (!pcibr_info)
- continue;
- for (w = 0; w < 6; w++) {
- if (pcibr_info->f_window[w].w_space == PCIIO_SPACE_NONE) {
- continue;
- }
- base = pcibr_info->f_window[w].w_base;
- size = pcibr_info->f_window[w].w_size;
-
- if ((pciaddr >= base) && (pciaddr < (base + size))) {
- if (spacep)
- *spacep = PCIIO_SPACE_WIN(w);
- if (offsetp)
- *offsetp = pciaddr - base;
- if (funcp)
- *funcp = f;
- return s;
- } /* endif match */
- } /* next window */
- } /* next func */
- } /* next slot */
-
- /*
- * Check if the address was allocated as part of the
- * pcibr_piospace_alloc calls.
- */
- for (s = pcibr_soft->bs_min_slot; s < PCIBR_NUM_SLOTS(pcibr_soft); ++s) {
- int nf = pcibr_soft->bs_slot[s].bss_ninfo;
- pcibr_info_h pcibr_infoh = pcibr_soft->bs_slot[s].bss_infos;
-
- for (f = 0; f < nf; f++) {
- pcibr_info_t pcibr_info = pcibr_infoh[f];
-
- if (!pcibr_info)
- continue;
- piosp = pcibr_info->f_piospace;
- while (piosp) {
- if ((piosp->start <= pciaddr) &&
- ((piosp->count + piosp->start) > pciaddr)) {
- if (spacep)
- *spacep = piosp->space;
- if (offsetp)
- *offsetp = pciaddr - piosp->start;
- return s;
- } /* endif match */
- piosp = piosp->next;
- } /* next piosp */
- } /* next func */
- } /* next slot */
-
- /*
- * Some other random address on the PCI bus ...
- * we have no way of knowing whether this was
- * a MEM or I/O access; so, for now, we just
- * assume that the low 1G is MEM, the next
- * 3G is I/O, and anything above the 4G limit
- * is obviously MEM.
- */
-
- if (spacep)
- *spacep = ((pciaddr < (1ul << 30)) ? PCIIO_SPACE_MEM :
- (pciaddr < (4ul << 30)) ? PCIIO_SPACE_IO :
- PCIIO_SPACE_MEM);
- if (offsetp)
- *offsetp = pciaddr;
-
- return PCIIO_SLOT_NONE;
-
-}
-
void
pcibr_error_cleanup(pcibr_soft_t pcibr_soft, int error_code)
{
@@ -1286,59 +992,6 @@ pcibr_error_cleanup(pcibr_soft_t pcibr_soft, int error_code)
(void) bridge->b_wid_tflush; /* flushbus */
}
-/*
- * pcibr_error_extract
- * Given the 'pcibr vertex handle' find out which slot
- * the bridge status error address (from pcibr_soft info
- * hanging off the vertex)
- * allocated to, and return the slot number.
- * While we have the info handy, construct the
- * space code and offset as well.
- *
- * NOTE: if this routine is called, we don't know whether
- * the address is in CFG, MEM, or I/O space. We have to guess.
- * This will be the case on PIO stores, where the only way
- * we have of getting the address is to check the Bridge, which
- * stores the PCI address but not the space and not the xtalk
- * address (from which we could get it).
- *
- * XXX- this interface has no way to return the function
- * number on a multifunction card, even though that data
- * is available.
- */
-
-pciio_slot_t
-pcibr_error_extract(devfs_handle_t pcibr_vhdl,
- pciio_space_t *spacep,
- iopaddr_t *offsetp)
-{
- pcibr_soft_t pcibr_soft = 0;
- iopaddr_t bserr_addr;
- bridge_t *bridge;
- pciio_slot_t slot = PCIIO_SLOT_NONE;
- arbitrary_info_t rev;
-
- /* Do a sanity check as to whether we really got a
- * bridge vertex handle.
- */
- if (hwgraph_info_get_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, &rev) !=
- GRAPH_SUCCESS)
- return(slot);
-
- pcibr_soft = pcibr_soft_get(pcibr_vhdl);
- if (pcibr_soft) {
- bridge = pcibr_soft->bs_base;
- bserr_addr =
- bridge->b_pci_err_lower |
- ((uint64_t) (bridge->b_pci_err_upper &
- BRIDGE_ERRUPPR_ADDRMASK) << 32);
-
- slot = pcibr_addr_toslot(pcibr_soft, bserr_addr,
- spacep, offsetp, NULL);
- }
- return slot;
-}
-
/*ARGSUSED */
void
pcibr_device_disable(pcibr_soft_t pcibr_soft, int devnum)
@@ -1426,7 +1079,7 @@ pcibr_pioerror(
{
int retval = IOERROR_HANDLED;
- devfs_handle_t pcibr_vhdl = pcibr_soft->bs_vhdl;
+ vertex_hdl_t pcibr_vhdl = pcibr_soft->bs_vhdl;
bridge_t *bridge = pcibr_soft->bs_base;
iopaddr_t bad_xaddr;
@@ -1837,7 +1490,7 @@ pcibr_dmard_error(
ioerror_mode_t mode,
ioerror_t *ioe)
{
- devfs_handle_t pcibr_vhdl = pcibr_soft->bs_vhdl;
+ vertex_hdl_t pcibr_vhdl = pcibr_soft->bs_vhdl;
bridge_t *bridge = pcibr_soft->bs_base;
bridgereg_t bus_lowaddr, bus_uppraddr;
int retval = 0;
@@ -1946,7 +1599,7 @@ pcibr_dmawr_error(
ioerror_mode_t mode,
ioerror_t *ioe)
{
- devfs_handle_t pcibr_vhdl = pcibr_soft->bs_vhdl;
+ vertex_hdl_t pcibr_vhdl = pcibr_soft->bs_vhdl;
int retval;
retval = pciio_error_handler(pcibr_vhdl, error_code, mode, ioe);
@@ -1982,34 +1635,12 @@ pcibr_error_handler(
pcibr_soft_t pcibr_soft;
int retval = IOERROR_BADERRORCODE;
-#ifdef EHE_ENABLE
- devfs_handle_t xconn_vhdl,pcibr_vhdl;
- error_state_t e_state;
-#endif /* EHE_ENABLE */
-
pcibr_soft = (pcibr_soft_t) einfo;
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ERROR_HDLR, pcibr_soft->bs_conn,
"pcibr_error_handler: pcibr_soft=0x%x, error_code=0x%x\n",
pcibr_soft, error_code));
-#ifdef EHE_ENABLE
- xconn_vhdl = pcibr_soft->bs_conn;
- pcibr_vhdl = pcibr_soft->bs_vhdl;
-
- e_state = error_state_get(xconn_vhdl);
-
- if (error_state_set(pcibr_vhdl, e_state) ==
- ERROR_RETURN_CODE_CANNOT_SET_STATE)
- return(IOERROR_UNHANDLED);
-
- /* If we are in the action handling phase clean out the error state
- * on the xswitch.
- */
- if (e_state == ERROR_STATE_ACTION)
- (void)error_state_set(xconn_vhdl, ERROR_STATE_NONE);
-#endif /* EHE_ENABLE */
-
#if DEBUG && ERROR_DEBUG
printk( "%s: pcibr_error_handler\n", pcibr_soft->bs_name);
#endif
@@ -2086,11 +1717,6 @@ pcibr_error_handler_wrapper(
* the error from the PIO address.
*/
-#if 0
- if (mode == MODE_DEVPROBE)
- pio_retval = IOERROR_HANDLED;
- else {
-#endif
if (error_code & IOECODE_PIO) {
iopaddr_t bad_xaddr;
/*
@@ -2123,9 +1749,6 @@ pcibr_error_handler_wrapper(
pio_retval = IOERROR_UNHANDLED;
}
}
-#if 0
- } /* MODE_DEVPROBE */
-#endif
/*
* If the error was a result of a DMA Write, we tell what bus on the PIC
@@ -2201,37 +1824,3 @@ pcibr_error_handler_wrapper(
return IOERROR_HANDLED;
}
}
-
-
-/*
- * Reenable a device after handling the error.
- * This is called by the lower layers when they wish to be reenabled
- * after an error.
- * Note that each layer would be calling the previous layer to reenable
- * first, before going ahead with their own re-enabling.
- */
-
-int
-pcibr_error_devenable(devfs_handle_t pconn_vhdl, int error_code)
-{
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
- pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
-
- ASSERT(error_code & IOECODE_PIO);
-
- /* If the error is not known to be a write,
- * we have to call devenable.
- * write errors are isolated to the bridge.
- */
- if (!(error_code & IOECODE_WRITE)) {
- devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
- int rc;
-
- rc = xtalk_error_devenable(xconn_vhdl, pciio_slot, error_code);
- if (rc != IOERROR_HANDLED)
- return rc;
- }
- pcibr_error_cleanup(pcibr_soft, error_code);
- return IOERROR_HANDLED;
-}
diff --git a/arch/ia64/sn/io/sn2/pcibr/pcibr_hints.c b/arch/ia64/sn/io/sn2/pcibr/pcibr_hints.c
index 657e2f855d3d5c..3b9344a36f5441 100644
--- a/arch/ia64/sn/io/sn2/pcibr/pcibr_hints.c
+++ b/arch/ia64/sn/io/sn2/pcibr/pcibr_hints.c
@@ -4,7 +4,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
@@ -27,20 +27,19 @@
#include <asm/sn/prio.h>
#include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
-#include <asm/sn/eeprom.h>
#include <asm/sn/io.h>
#include <asm/sn/sn_private.h>
-pcibr_hints_t pcibr_hints_get(devfs_handle_t, int);
-void pcibr_hints_fix_rrbs(devfs_handle_t);
-void pcibr_hints_dualslot(devfs_handle_t, pciio_slot_t, pciio_slot_t);
-void pcibr_hints_intr_bits(devfs_handle_t, pcibr_intr_bits_f *);
-void pcibr_set_rrb_callback(devfs_handle_t, rrb_alloc_funct_t);
-void pcibr_hints_handsoff(devfs_handle_t);
-void pcibr_hints_subdevs(devfs_handle_t, pciio_slot_t, uint64_t);
+pcibr_hints_t pcibr_hints_get(vertex_hdl_t, int);
+void pcibr_hints_fix_rrbs(vertex_hdl_t);
+void pcibr_hints_dualslot(vertex_hdl_t, pciio_slot_t, pciio_slot_t);
+void pcibr_hints_intr_bits(vertex_hdl_t, pcibr_intr_bits_f *);
+void pcibr_set_rrb_callback(vertex_hdl_t, rrb_alloc_funct_t);
+void pcibr_hints_handsoff(vertex_hdl_t);
+void pcibr_hints_subdevs(vertex_hdl_t, pciio_slot_t, uint64_t);
pcibr_hints_t
-pcibr_hints_get(devfs_handle_t xconn_vhdl, int alloc)
+pcibr_hints_get(vertex_hdl_t xconn_vhdl, int alloc)
{
arbitrary_info_t ainfo = 0;
graph_error_t rv;
@@ -79,7 +78,7 @@ abnormal_exit:
}
void
-pcibr_hints_fix_some_rrbs(devfs_handle_t xconn_vhdl, unsigned mask)
+pcibr_hints_fix_some_rrbs(vertex_hdl_t xconn_vhdl, unsigned mask)
{
pcibr_hints_t hint = pcibr_hints_get(xconn_vhdl, 1);
@@ -91,13 +90,13 @@ pcibr_hints_fix_some_rrbs(devfs_handle_t xconn_vhdl, unsigned mask)
}
void
-pcibr_hints_fix_rrbs(devfs_handle_t xconn_vhdl)
+pcibr_hints_fix_rrbs(vertex_hdl_t xconn_vhdl)
{
pcibr_hints_fix_some_rrbs(xconn_vhdl, 0xFF);
}
void
-pcibr_hints_dualslot(devfs_handle_t xconn_vhdl,
+pcibr_hints_dualslot(vertex_hdl_t xconn_vhdl,
pciio_slot_t host,
pciio_slot_t guest)
{
@@ -111,7 +110,7 @@ pcibr_hints_dualslot(devfs_handle_t xconn_vhdl,
}
void
-pcibr_hints_intr_bits(devfs_handle_t xconn_vhdl,
+pcibr_hints_intr_bits(vertex_hdl_t xconn_vhdl,
pcibr_intr_bits_f *xxx_intr_bits)
{
pcibr_hints_t hint = pcibr_hints_get(xconn_vhdl, 1);
@@ -124,7 +123,7 @@ pcibr_hints_intr_bits(devfs_handle_t xconn_vhdl,
}
void
-pcibr_set_rrb_callback(devfs_handle_t xconn_vhdl, rrb_alloc_funct_t rrb_alloc_funct)
+pcibr_set_rrb_callback(vertex_hdl_t xconn_vhdl, rrb_alloc_funct_t rrb_alloc_funct)
{
pcibr_hints_t hint = pcibr_hints_get(xconn_vhdl, 1);
@@ -133,7 +132,7 @@ pcibr_set_rrb_callback(devfs_handle_t xconn_vhdl, rrb_alloc_funct_t rrb_alloc_fu
}
void
-pcibr_hints_handsoff(devfs_handle_t xconn_vhdl)
+pcibr_hints_handsoff(vertex_hdl_t xconn_vhdl)
{
pcibr_hints_t hint = pcibr_hints_get(xconn_vhdl, 1);
@@ -145,13 +144,13 @@ pcibr_hints_handsoff(devfs_handle_t xconn_vhdl)
}
void
-pcibr_hints_subdevs(devfs_handle_t xconn_vhdl,
+pcibr_hints_subdevs(vertex_hdl_t xconn_vhdl,
pciio_slot_t slot,
uint64_t subdevs)
{
arbitrary_info_t ainfo = 0;
char sdname[16];
- devfs_handle_t pconn_vhdl = GRAPH_VERTEX_NONE;
+ vertex_hdl_t pconn_vhdl = GRAPH_VERTEX_NONE;
sprintf(sdname, "%s/%d", EDGE_LBL_PCI, slot);
(void) hwgraph_path_add(xconn_vhdl, sdname, &pconn_vhdl);
diff --git a/arch/ia64/sn/io/sn2/pcibr/pcibr_idbg.c b/arch/ia64/sn/io/sn2/pcibr/pcibr_idbg.c
deleted file mode 100644
index 93c52e35a7c768..00000000000000
--- a/arch/ia64/sn/io/sn2/pcibr/pcibr_idbg.c
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/arch.h>
-#include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/xtalk/xwidget.h>
-#include <asm/sn/pci/bridge.h>
-#include <asm/sn/pci/pciio.h>
-#include <asm/sn/pci/pcibr.h>
-#include <asm/sn/pci/pcibr_private.h>
-#include <asm/sn/pci/pci_defs.h>
-#include <asm/sn/prio.h>
-#include <asm/sn/xtalk/xbow.h>
-#include <asm/sn/ioc3.h>
-#include <asm/sn/eeprom.h>
-#include <asm/sn/io.h>
-#include <asm/sn/sn_private.h>
-
-#ifdef LATER
-
-char *pci_space[] = {"NONE",
- "ROM",
- "IO",
- "",
- "MEM",
- "MEM32",
- "MEM64",
- "CFG",
- "WIN0",
- "WIN1",
- "WIN2",
- "WIN3",
- "WIN4",
- "WIN5",
- "",
- "BAD"};
-
-void
-idbg_pss_func(pcibr_info_h pcibr_infoh, int func)
-{
- pcibr_info_t pcibr_info = pcibr_infoh[func];
- char name[MAXDEVNAME];
- int win;
-
- if (!pcibr_info)
- return;
- qprintf("Per-slot Function Info\n");
- sprintf(name, "%v", pcibr_info->f_vertex);
- qprintf("\tSlot Name : %s\n",name);
- qprintf("\tPCI Bus : %d ",pcibr_info->f_bus);
- qprintf("Slot : %d ", pcibr_info->f_slot);
- qprintf("Function : %d ", pcibr_info->f_func);
- qprintf("VendorId : 0x%x " , pcibr_info->f_vendor);
- qprintf("DeviceId : 0x%x\n", pcibr_info->f_device);
- sprintf(name, "%v", pcibr_info->f_master);
- qprintf("\tBus provider : %s\n",name);
- qprintf("\tProvider Fns : 0x%x ", pcibr_info->f_pops);
- qprintf("Error Handler : 0x%x Arg 0x%x\n",
- pcibr_info->f_efunc,pcibr_info->f_einfo);
- for(win = 0 ; win < 6 ; win++)
- qprintf("\tBase Reg #%d space %s base 0x%x size 0x%x\n",
- win,pci_space[pcibr_info->f_window[win].w_space],
- pcibr_info->f_window[win].w_base,
- pcibr_info->f_window[win].w_size);
-
- qprintf("\tRom base 0x%x size 0x%x\n",
- pcibr_info->f_rbase,pcibr_info->f_rsize);
-
- qprintf("\tInterrupt Bit Map\n");
- qprintf("\t\tPCI Int#\tBridge Pin#\n");
- for (win = 0 ; win < 4; win++)
- qprintf("\t\tINT%c\t\t%d\n",win+'A',pcibr_info->f_ibit[win]);
- qprintf("\n");
-}
-
-
-void
-idbg_pss_info(pcibr_soft_t pcibr_soft, pciio_slot_t slot)
-{
- pcibr_soft_slot_t pss;
- char slot_conn_name[MAXDEVNAME];
- int func;
-
- pss = &pcibr_soft->bs_slot[slot];
- qprintf("PCI INFRASTRUCTURAL INFO FOR SLOT %d\n", slot);
- qprintf("\tHost Present ? %s ", pss->has_host ? "yes" : "no");
- qprintf("\tHost Slot : %d\n",pss->host_slot);
- sprintf(slot_conn_name, "%v", pss->slot_conn);
- qprintf("\tSlot Conn : %s\n",slot_conn_name);
- qprintf("\t#Functions : %d\n",pss->bss_ninfo);
- for (func = 0; func < pss->bss_ninfo; func++)
- idbg_pss_func(pss->bss_infos,func);
- qprintf("\tSpace : %s ",pci_space[pss->bss_devio.bssd_space]);
- qprintf("\tBase : 0x%x ", pss->bss_devio.bssd_base);
- qprintf("\tShadow Devreg : 0x%x\n", pss->bss_device);
- qprintf("\tUsage counts : pmu %d d32 %d d64 %d\n",
- pss->bss_pmu_uctr,pss->bss_d32_uctr,pss->bss_d64_uctr);
-
- qprintf("\tDirect Trans Info : d64_base 0x%x d64_flags 0x%x"
- "d32_base 0x%x d32_flags 0x%x\n",
- pss->bss_d64_base, pss->bss_d64_flags,
- pss->bss_d32_base, pss->bss_d32_flags);
-
- qprintf("\tExt ATEs active ? %s",
- pss->bss_ext_ates_active ? "yes" : "no");
- qprintf(" Command register : 0x%x ", pss->bss_cmd_pointer);
- qprintf(" Shadow command val : 0x%x\n", pss->bss_cmd_shadow);
-
- qprintf("\tRRB Info : Valid %d+%d Reserved %d\n",
- pcibr_soft->bs_rrb_valid[slot],
- pcibr_soft->bs_rrb_valid[slot + PCIBR_RRB_SLOT_VIRTUAL],
- pcibr_soft->bs_rrb_res[slot]);
-
-}
-
-int ips = 0;
-
-void
-idbg_pss(pcibr_soft_t pcibr_soft)
-{
- pciio_slot_t slot;
-
-
- if (ips >= 0 && ips < 8)
- idbg_pss_info(pcibr_soft,ips);
- else if (ips < 0)
- for (slot = 0; slot < 8; slot++)
- idbg_pss_info(pcibr_soft,slot);
- else
- qprintf("Invalid ips %d\n",ips);
-}
-#endif /* LATER */
diff --git a/arch/ia64/sn/io/sn2/pcibr/pcibr_intr.c b/arch/ia64/sn/io/sn2/pcibr/pcibr_intr.c
index 22b679e9d8ab13..211aec200f4b59 100644
--- a/arch/ia64/sn/io/sn2/pcibr/pcibr_intr.c
+++ b/arch/ia64/sn/io/sn2/pcibr/pcibr_intr.c
@@ -4,7 +4,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
@@ -27,7 +27,6 @@
#include <asm/sn/prio.h>
#include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
-#include <asm/sn/eeprom.h>
#include <asm/sn/io.h>
#include <asm/sn/sn_private.h>
@@ -36,20 +35,32 @@
#define rmfreemap atemapfree
#define rmfree atefree
#define rmalloc atealloc
+
+inline int
+compare_and_swap_ptr(void **location, void *old_ptr, void *new_ptr)
+{
+ FIXME("compare_and_swap_ptr : NOT ATOMIC");
+ if (*location == old_ptr) {
+ *location = new_ptr;
+ return(1);
+ }
+ else
+ return(0);
+}
#endif
unsigned pcibr_intr_bits(pciio_info_t info, pciio_intr_line_t lines, int nslots);
-pcibr_intr_t pcibr_intr_alloc(devfs_handle_t, device_desc_t, pciio_intr_line_t, devfs_handle_t);
+pcibr_intr_t pcibr_intr_alloc(vertex_hdl_t, device_desc_t, pciio_intr_line_t, vertex_hdl_t);
void pcibr_intr_free(pcibr_intr_t);
void pcibr_setpciint(xtalk_intr_t);
int pcibr_intr_connect(pcibr_intr_t, intr_func_t, intr_arg_t);
void pcibr_intr_disconnect(pcibr_intr_t);
-devfs_handle_t pcibr_intr_cpu_get(pcibr_intr_t);
+vertex_hdl_t pcibr_intr_cpu_get(pcibr_intr_t);
void pcibr_xintr_preset(void *, int, xwidgetnum_t, iopaddr_t, xtalk_intr_vector_t);
void pcibr_intr_func(intr_arg_t);
-extern pcibr_info_t pcibr_info_get(devfs_handle_t);
+extern pcibr_info_t pcibr_info_get(vertex_hdl_t);
/* =====================================================================
* INTERRUPT MANAGEMENT
@@ -132,6 +143,102 @@ pcibr_wrap_put(pcibr_intr_wrap_t wrap, pcibr_intr_cbuf_t cbuf)
}
/*
+ * On SN systems there is a race condition between a PIO read response
+ * and DMA's. In rare cases, the read response may beat the DMA, causing
+ * the driver to think that data in memory is complete and meaningful.
+ * This code eliminates that race.
+ * This routine is called by the PIO read routines after doing the read.
+ * This routine then forces a fake interrupt on another line, which
+ * is logically associated with the slot that the PIO is addressed to.
+ * (see sn_dma_flush_init() )
+ * It then spins while watching the memory location that the interrupt
+ * is targetted to. When the interrupt response arrives, we are sure
+ * that the DMA has landed in memory and it is safe for the driver
+ * to proceed.
+ */
+
+extern struct sn_flush_nasid_entry flush_nasid_list[MAX_NASIDS];
+
+void
+sn_dma_flush(unsigned long addr) {
+ nasid_t nasid;
+ int wid_num;
+ volatile struct sn_flush_device_list *p;
+ int i,j;
+ int bwin;
+ unsigned long flags;
+
+ nasid = NASID_GET(addr);
+ wid_num = SWIN_WIDGETNUM(addr);
+ bwin = BWIN_WINDOWNUM(addr);
+
+ if (flush_nasid_list[nasid].widget_p == NULL) return;
+ if (bwin > 0) {
+ bwin--;
+ switch (bwin) {
+ case 0:
+ wid_num = ((flush_nasid_list[nasid].iio_itte1) >> 8) & 0xf;
+ break;
+ case 1:
+ wid_num = ((flush_nasid_list[nasid].iio_itte2) >> 8) & 0xf;
+ break;
+ case 2:
+ wid_num = ((flush_nasid_list[nasid].iio_itte3) >> 8) & 0xf;
+ break;
+ case 3:
+ wid_num = ((flush_nasid_list[nasid].iio_itte4) >> 8) & 0xf;
+ break;
+ case 4:
+ wid_num = ((flush_nasid_list[nasid].iio_itte5) >> 8) & 0xf;
+ break;
+ case 5:
+ wid_num = ((flush_nasid_list[nasid].iio_itte6) >> 8) & 0xf;
+ break;
+ case 6:
+ wid_num = ((flush_nasid_list[nasid].iio_itte7) >> 8) & 0xf;
+ break;
+ }
+ }
+ if (flush_nasid_list[nasid].widget_p == NULL) return;
+ if (flush_nasid_list[nasid].widget_p[wid_num] == NULL) return;
+ p = &flush_nasid_list[nasid].widget_p[wid_num][0];
+
+ // find a matching BAR
+
+ for (i=0; i<DEV_PER_WIDGET;i++) {
+ for (j=0; j<PCI_ROM_RESOURCE;j++) {
+ if (p->bar_list[j].start == 0) break;
+ if (addr >= p->bar_list[j].start && addr <= p->bar_list[j].end) break;
+ }
+ if (j < PCI_ROM_RESOURCE && p->bar_list[j].start != 0) break;
+ p++;
+ }
+
+ // if no matching BAR, return without doing anything.
+
+ if (i == DEV_PER_WIDGET) return;
+
+ spin_lock_irqsave(&p->flush_lock, flags);
+
+ p->flush_addr = 0;
+
+ // force an interrupt.
+
+ *(bridgereg_t *)(p->force_int_addr) = 1;
+
+ // wait for the interrupt to come back.
+
+ while (p->flush_addr != 0x10f);
+
+ // okay, everything is synched up.
+ spin_unlock_irqrestore(&p->flush_lock, flags);
+
+ return;
+}
+
+EXPORT_SYMBOL(sn_dma_flush);
+
+/*
* There are end cases where a deadlock can occur if interrupt
* processing completes and the Bridge b_int_status bit is still set.
*
@@ -164,51 +271,42 @@ pcibr_wrap_put(pcibr_intr_wrap_t wrap, pcibr_intr_cbuf_t cbuf)
* to check if a specific Bridge b_int_status bit is set, and if so,
* cause the setting of the corresponding interrupt bit.
*
- * On a XBridge (SN1), we do this by writing the appropriate Bridge Force
- * Interrupt register. On SN0, or SN1 with an older Bridge, the Bridge
- * Force Interrupt register does not exist, so we write the Hub
- * INT_PEND_MOD register directly. Likewise for Octane, where we write the
- * Heart Set Interrupt Status register directly.
+ * On a XBridge (SN1) and PIC (SN2), we do this by writing the appropriate Bridge Force
+ * Interrupt register.
*/
void
-pcibr_force_interrupt(pcibr_intr_wrap_t wrap)
+pcibr_force_interrupt(pcibr_intr_t intr)
{
-#ifdef PIC_LATER
unsigned bit;
- pcibr_soft_t pcibr_soft = wrap->iw_soft;
+ unsigned bits;
+ pcibr_soft_t pcibr_soft = intr->bi_soft;
bridge_t *bridge = pcibr_soft->bs_base;
- bit = wrap->iw_ibit;
+ bits = intr->bi_ibits;
+ for (bit = 0; bit < 8; bit++) {
+ if (bits & (1 << bit)) {
- PCIBR_DEBUG((PCIBR_DEBUG_INTR, pcibr_soft->bs_vhdl,
- "pcibr_force_interrupt: bit=0x%x\n", bit));
+ PCIBR_DEBUG((PCIBR_DEBUG_INTR, pcibr_soft->bs_vhdl,
+ "pcibr_force_interrupt: bit=0x%x\n", bit));
- if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) {
- bridge->b_force_pin[bit].intr = 1;
- } else if ((1 << bit) & *wrap->iw_stat) {
- cpuid_t cpu;
- unsigned intr_bit;
- xtalk_intr_t xtalk_intr =
- pcibr_soft->bs_intr[bit].bsi_xtalk_intr;
-
- intr_bit = (short) xtalk_intr_vector_get(xtalk_intr);
- cpu = xtalk_intr_cpuid_get(xtalk_intr);
- REMOTE_CPU_SEND_INTR(cpu, intr_bit);
+ if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) {
+ bridge->b_force_pin[bit].intr = 1;
+ }
+ }
}
-#endif /* PIC_LATER */
}
/*ARGSUSED */
pcibr_intr_t
-pcibr_intr_alloc(devfs_handle_t pconn_vhdl,
+pcibr_intr_alloc(vertex_hdl_t pconn_vhdl,
device_desc_t dev_desc,
pciio_intr_line_t lines,
- devfs_handle_t owner_dev)
+ vertex_hdl_t owner_dev)
{
pcibr_info_t pcibr_info = pcibr_info_get(pconn_vhdl);
pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pcibr_info);
pcibr_soft_t pcibr_soft = (pcibr_soft_t) pcibr_info->f_mfast;
- devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
+ vertex_hdl_t xconn_vhdl = pcibr_soft->bs_conn;
bridge_t *bridge = pcibr_soft->bs_base;
int is_threaded = 0;
@@ -498,25 +596,18 @@ pcibr_setpciint(xtalk_intr_t xtalk_intr)
{
iopaddr_t addr;
xtalk_intr_vector_t vect;
- devfs_handle_t vhdl;
+ vertex_hdl_t vhdl;
bridge_t *bridge;
+ picreg_t *int_addr;
addr = xtalk_intr_addr_get(xtalk_intr);
vect = xtalk_intr_vector_get(xtalk_intr);
vhdl = xtalk_intr_dev_get(xtalk_intr);
bridge = (bridge_t *)xtalk_piotrans_addr(vhdl, 0, 0, sizeof(bridge_t), 0);
- if (is_pic(bridge)) {
- picreg_t *int_addr;
- int_addr = (picreg_t *)xtalk_intr_sfarg_get(xtalk_intr);
- *int_addr = ((PIC_INT_ADDR_FLD & ((uint64_t)vect << 48)) |
+ int_addr = (picreg_t *)xtalk_intr_sfarg_get(xtalk_intr);
+ *int_addr = ((PIC_INT_ADDR_FLD & ((uint64_t)vect << 48)) |
(PIC_INT_ADDR_HOST & addr));
- } else {
- bridgereg_t *int_addr;
- int_addr = (bridgereg_t *)xtalk_intr_sfarg_get(xtalk_intr);
- *int_addr = ((BRIDGE_INT_ADDR_HOST & (addr >> 30)) |
- (BRIDGE_INT_ADDR_FLD & vect));
- }
}
/*ARGSUSED */
@@ -582,8 +673,7 @@ pcibr_intr_connect(pcibr_intr_t pcibr_intr, intr_func_t intr_func, intr_arg_t in
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_intr->bi_dev,
"pcibr_setpciint: int_addr=0x%x, *int_addr=0x%x, "
"pcibr_int_bit=0x%x\n", int_addr,
- (is_pic(bridge) ?
- *(picreg_t *)int_addr : *(bridgereg_t *)int_addr),
+ *(picreg_t *)int_addr,
pcibr_int_bit));
}
@@ -699,7 +789,7 @@ pcibr_intr_disconnect(pcibr_intr_t pcibr_intr)
xtalk_intr_connect(pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr,
pcibr_intr_func, (intr_arg_t) intr_wrap,
(xtalk_intr_setfunc_t)pcibr_setpciint,
- (void *)pcibr_int_bit);
+ (void *)(long)pcibr_int_bit);
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_intr->bi_dev,
"pcibr_intr_disconnect: now-sharing int_bits=0x%x\n",
pcibr_int_bit));
@@ -707,7 +797,7 @@ pcibr_intr_disconnect(pcibr_intr_t pcibr_intr)
}
/*ARGSUSED */
-devfs_handle_t
+vertex_hdl_t
pcibr_intr_cpu_get(pcibr_intr_t pcibr_intr)
{
pcibr_soft_t pcibr_soft = pcibr_intr->bi_soft;
@@ -780,9 +870,6 @@ pcibr_setwidint(xtalk_intr_t intr)
bridge->b_wid_int_lower = NEW_b_wid_int_lower;
bridge->b_int_host_err = vect;
-printk("pcibr_setwidint: b_wid_int_upper 0x%x b_wid_int_lower 0x%x b_int_host_err 0x%x\n",
- NEW_b_wid_int_upper, NEW_b_wid_int_lower, vect);
-
}
/*
@@ -957,7 +1044,7 @@ pcibr_intr_func(intr_arg_t arg)
* interrupt to avoid a potential deadlock situation.
*/
if (wrap->iw_hdlrcnt == 0) {
- pcibr_force_interrupt(wrap);
+ pcibr_force_interrupt((pcibr_intr_t) wrap);
}
}
diff --git a/arch/ia64/sn/io/sn2/pcibr/pcibr_rrb.c b/arch/ia64/sn/io/sn2/pcibr/pcibr_rrb.c
index 3febeecaa10220..bafa7d7d303305 100644
--- a/arch/ia64/sn/io/sn2/pcibr/pcibr_rrb.c
+++ b/arch/ia64/sn/io/sn2/pcibr/pcibr_rrb.c
@@ -4,7 +4,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
@@ -27,7 +27,6 @@
#include <asm/sn/prio.h>
#include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
-#include <asm/sn/eeprom.h>
#include <asm/sn/io.h>
#include <asm/sn/sn_private.h>
@@ -41,11 +40,11 @@ void do_pcibr_rrb_free_all(pcibr_soft_t, bridge_t *, pciio_slot_t);
void do_pcibr_rrb_autoalloc(pcibr_soft_t, int, int, int);
-int pcibr_wrb_flush(devfs_handle_t);
-int pcibr_rrb_alloc(devfs_handle_t, int *, int *);
-int pcibr_rrb_check(devfs_handle_t, int *, int *, int *, int *);
-void pcibr_rrb_flush(devfs_handle_t);
-int pcibr_slot_initial_rrb_alloc(devfs_handle_t,pciio_slot_t);
+int pcibr_wrb_flush(vertex_hdl_t);
+int pcibr_rrb_alloc(vertex_hdl_t, int *, int *);
+int pcibr_rrb_check(vertex_hdl_t, int *, int *, int *, int *);
+void pcibr_rrb_flush(vertex_hdl_t);
+int pcibr_slot_initial_rrb_alloc(vertex_hdl_t,pciio_slot_t);
void pcibr_rrb_debug(char *, pcibr_soft_t);
@@ -70,17 +69,15 @@ void pcibr_rrb_debug(char *, pcibr_soft_t);
#define RRB_SIZE (4) /* sizeof rrb within reg (bits) */
#define RRB_ENABLE_BIT(bridge) (0x8) /* [BRIDGE | PIC]_RRB_EN */
-#define NUM_PDEV_BITS(bridge) (is_pic((bridge)) ? 1 : 2)
-#define NUM_VDEV_BITS(bridge) (is_pic((bridge)) ? 2 : 1)
-#define NUMBER_VCHANNELS(bridge) (is_pic((bridge)) ? 4 : 2)
+#define NUM_PDEV_BITS(bridge) (1)
+#define NUM_VDEV_BITS(bridge) (2)
+#define NUMBER_VCHANNELS(bridge) (4)
#define SLOT_2_PDEV(bridge, slot) ((slot) >> 1)
#define SLOT_2_RRB_REG(bridge, slot) ((slot) & 0x1)
/* validate that the slot and virtual channel are valid for a given bridge */
#define VALIDATE_SLOT_n_VCHAN(bridge, s, v) \
- (is_pic((bridge)) ? \
- (((((s) != PCIIO_SLOT_NONE) && ((s) <= (pciio_slot_t)3)) && (((v) >= 0) && ((v) <= 3))) ? 1 : 0) : \
- (((((s) != PCIIO_SLOT_NONE) && ((s) <= (pciio_slot_t)7)) && (((v) >= 0) && ((v) <= 1))) ? 1 : 0))
+ (((((s) != PCIIO_SLOT_NONE) && ((s) <= (pciio_slot_t)3)) && (((v) >= 0) && ((v) <= 3))) ? 1 : 0)
/*
* Count how many RRBs are marked valid for the specified PCI slot
@@ -105,16 +102,7 @@ do_pcibr_rrb_count_valid(bridge_t *bridge,
pdev_bits = SLOT_2_PDEV(bridge, slot);
rrb_bits = enable_bit | vchan_bits | pdev_bits;
- if ( is_pic(bridge) ) {
- tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg;
- }
- else {
- if (io_get_sh_swapper(NASID_GET(bridge))) {
- tmp = BRIDGE_REG_GET32((&bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg));
- } else {
- tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg;
- }
- }
+ tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg;
for (rrb_index = 0; rrb_index < 8; rrb_index++) {
if ((tmp & RRB_MASK) == rrb_bits)
@@ -144,16 +132,7 @@ do_pcibr_rrb_count_avail(bridge_t *bridge,
enable_bit = RRB_ENABLE_BIT(bridge);
- if ( is_pic(bridge) ) {
- tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg;
- }
- else {
- if (io_get_sh_swapper(NASID_GET(bridge))) {
- tmp = BRIDGE_REG_GET32((&bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg));
- } else {
- tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg;
- }
- }
+ tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg;
for (rrb_index = 0; rrb_index < 8; rrb_index++) {
if ((tmp & enable_bit) != enable_bit)
@@ -192,17 +171,8 @@ do_pcibr_rrb_alloc(bridge_t *bridge,
pdev_bits = SLOT_2_PDEV(bridge, slot);
rrb_bits = enable_bit | vchan_bits | pdev_bits;
- if ( is_pic(bridge) ) {
- reg = tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg;
- }
- else {
- if (io_get_sh_swapper(NASID_GET(bridge))) {
- reg = tmp = BRIDGE_REG_GET32((&bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg));
- } else {
- reg = tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg;
- }
- }
-
+ reg = tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg;
+
for (rrb_index = 0; ((rrb_index < 8) && (more > 0)); rrb_index++) {
if ((tmp & enable_bit) != enable_bit) {
/* clear the rrb and OR in the new rrb into 'reg' */
@@ -213,16 +183,7 @@ do_pcibr_rrb_alloc(bridge_t *bridge,
tmp = (tmp >> RRB_SIZE);
}
- if ( is_pic(bridge) ) {
- bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg = reg;
- }
- else {
- if (io_get_sh_swapper(NASID_GET(bridge))) {
- BRIDGE_REG_SET32((&bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg)) = reg;
- } else {
- bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg = reg;
- }
- }
+ bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg = reg;
return (more ? -1 : 0);
}
@@ -255,17 +216,8 @@ do_pcibr_rrb_free(bridge_t *bridge,
pdev_bits = SLOT_2_PDEV(bridge, slot);
rrb_bits = enable_bit | vchan_bits | pdev_bits;
- if ( is_pic(bridge) ) {
- reg = tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg;
- }
- else {
- if (io_get_sh_swapper(NASID_GET(bridge))) {
- reg = BRIDGE_REG_GET32((&bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg));
- } else {
- reg = tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg;
- }
- }
-
+ reg = tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg;
+
for (rrb_index = 0; ((rrb_index < 8) && (less > 0)); rrb_index++) {
if ((tmp & RRB_MASK) == rrb_bits) {
/*
@@ -281,16 +233,7 @@ do_pcibr_rrb_free(bridge_t *bridge,
tmp = (tmp >> RRB_SIZE);
}
- if ( is_pic(bridge) ) {
- bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg = reg;
- }
- else {
- if (io_get_sh_swapper(NASID_GET(bridge))) {
- BRIDGE_REG_SET32((&bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg)) = reg;
- } else {
- bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg = reg;
- }
- }
+ bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg = reg;
/* call do_pcibr_rrb_clear() for all the rrbs we've freed */
for (rrb_index = 0; rrb_index < 8; rrb_index++) {
@@ -337,50 +280,18 @@ do_pcibr_rrb_clear(bridge_t *bridge, int rrb)
* this RRB must be disabled.
*/
- if ( is_pic(bridge) ) {
- /* wait until RRB has no outstanduing XIO packets. */
- while ((status = bridge->b_resp_status) & BRIDGE_RRB_INUSE(rrb)) {
- ; /* XXX- beats on bridge. bad idea? */
- }
+ /* wait until RRB has no outstanduing XIO packets. */
+ while ((status = bridge->b_resp_status) & BRIDGE_RRB_INUSE(rrb)) {
+ ; /* XXX- beats on bridge. bad idea? */
+ }
- /* if the RRB has data, drain it. */
- if (status & BRIDGE_RRB_VALID(rrb)) {
- bridge->b_resp_clear = BRIDGE_RRB_CLEAR(rrb);
+ /* if the RRB has data, drain it. */
+ if (status & BRIDGE_RRB_VALID(rrb)) {
+ bridge->b_resp_clear = BRIDGE_RRB_CLEAR(rrb);
- /* wait until RRB is no longer valid. */
- while ((status = bridge->b_resp_status) & BRIDGE_RRB_VALID(rrb)) {
- ; /* XXX- beats on bridge. bad idea? */
- }
- }
- }
- else {
- if (io_get_sh_swapper(NASID_GET(bridge))) {
- while ((status = BRIDGE_REG_GET32((&bridge->b_resp_status))) & BRIDGE_RRB_INUSE(rrb)) {
- ; /* XXX- beats on bridge. bad idea? */
- }
-
- /* if the RRB has data, drain it. */
- if (status & BRIDGE_RRB_VALID(rrb)) {
- BRIDGE_REG_SET32((&bridge->b_resp_clear)) = __swab32(BRIDGE_RRB_CLEAR(rrb));
-
- /* wait until RRB is no longer valid. */
- while ((status = BRIDGE_REG_GET32((&bridge->b_resp_status))) & BRIDGE_RRB_VALID(rrb)) {
- ; /* XXX- beats on bridge. bad idea? */
- }
- }
- } else { /* io_get_sh_swapper(NASID_GET(bridge)) */
- while ((status = bridge->b_resp_status) & BRIDGE_RRB_INUSE(rrb)) {
- ; /* XXX- beats on bridge. bad idea? */
- }
-
- /* if the RRB has data, drain it. */
- if (status & BRIDGE_RRB_VALID(rrb)) {
- bridge->b_resp_clear = BRIDGE_RRB_CLEAR(rrb);
- /* wait until RRB is no longer valid. */
- while ((status = bridge->b_resp_status) & BRIDGE_RRB_VALID(rrb)) {
- ; /* XXX- beats on bridge. bad idea? */
- }
- }
+ /* wait until RRB is no longer valid. */
+ while ((status = bridge->b_resp_status) & BRIDGE_RRB_VALID(rrb)) {
+ ; /* XXX- beats on bridge. bad idea? */
}
}
}
@@ -399,43 +310,16 @@ do_pcibr_rrb_flush(bridge_t *bridge, int rrbn)
int shft = (RRB_SIZE * (rrbn >> 1));
unsigned long ebit = RRB_ENABLE_BIT(bridge) << shft;
- if ( is_pic(bridge) ) {
- rrbv = *rrbp;
- }
- else {
- if (io_get_sh_swapper(NASID_GET(bridge))) {
- rrbv = BRIDGE_REG_GET32((&rrbp));
- } else {
- rrbv = *rrbp;
- }
- }
+ rrbv = *rrbp;
if (rrbv & ebit) {
- if ( is_pic(bridge) ) {
- *rrbp = rrbv & ~ebit;
- }
- else {
- if (io_get_sh_swapper(NASID_GET(bridge))) {
- BRIDGE_REG_SET32((&rrbp)) = __swab32((rrbv & ~ebit));
- } else {
- *rrbp = rrbv & ~ebit;
- }
- }
+ *rrbp = rrbv & ~ebit;
}
do_pcibr_rrb_clear(bridge, rrbn);
if (rrbv & ebit) {
- if ( is_pic(bridge) ) {
- *rrbp = rrbv;
- }
- else {
- if (io_get_sh_swapper(NASID_GET(bridge))) {
- BRIDGE_REG_SET32((&rrbp)) = __swab32(rrbv);
- } else {
- *rrbp = rrbv;
- }
- }
+ *rrbp = rrbv;
}
}
@@ -475,7 +359,7 @@ do_pcibr_rrb_autoalloc(pcibr_soft_t pcibr_soft,
* Flush all the rrb's assigned to the specified connection point.
*/
void
-pcibr_rrb_flush(devfs_handle_t pconn_vhdl)
+pcibr_rrb_flush(vertex_hdl_t pconn_vhdl)
{
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
pcibr_soft_t pcibr_soft = (pcibr_soft_t)pciio_info_mfast_get(pciio_info);
@@ -510,7 +394,7 @@ pcibr_rrb_flush(devfs_handle_t pconn_vhdl)
* device hanging off the bridge.
*/
int
-pcibr_wrb_flush(devfs_handle_t pconn_vhdl)
+pcibr_wrb_flush(vertex_hdl_t pconn_vhdl)
{
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
@@ -546,7 +430,7 @@ pcibr_wrb_flush(devfs_handle_t pconn_vhdl)
* as best we can and return 0.
*/
int
-pcibr_rrb_alloc(devfs_handle_t pconn_vhdl,
+pcibr_rrb_alloc(vertex_hdl_t pconn_vhdl,
int *count_vchan0,
int *count_vchan1)
{
@@ -753,7 +637,7 @@ pcibr_rrb_alloc(devfs_handle_t pconn_vhdl,
*/
int
-pcibr_rrb_check(devfs_handle_t pconn_vhdl,
+pcibr_rrb_check(vertex_hdl_t pconn_vhdl,
int *count_vchan0,
int *count_vchan1,
int *count_reserved,
@@ -802,7 +686,7 @@ pcibr_rrb_check(devfs_handle_t pconn_vhdl,
*/
int
-pcibr_slot_initial_rrb_alloc(devfs_handle_t pcibr_vhdl,
+pcibr_slot_initial_rrb_alloc(vertex_hdl_t pcibr_vhdl,
pciio_slot_t slot)
{
pcibr_soft_t pcibr_soft;
@@ -889,7 +773,7 @@ rrb_reserved_free(pcibr_soft_t pcibr_soft, int slot)
*/
int
-pcibr_initial_rrb(devfs_handle_t pcibr_vhdl,
+pcibr_initial_rrb(vertex_hdl_t pcibr_vhdl,
pciio_slot_t first, pciio_slot_t last)
{
pcibr_soft_t pcibr_soft = pcibr_soft_get(pcibr_vhdl);
diff --git a/arch/ia64/sn/io/sn2/pcibr/pcibr_slot.c b/arch/ia64/sn/io/sn2/pcibr/pcibr_slot.c
index 3d3fda15c7b3ac..de8d9a19dd63f9 100644
--- a/arch/ia64/sn/io/sn2/pcibr/pcibr_slot.c
+++ b/arch/ia64/sn/io/sn2/pcibr/pcibr_slot.c
@@ -4,7 +4,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
@@ -28,7 +28,6 @@
#include <asm/sn/prio.h>
#include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
-#include <asm/sn/eeprom.h>
#include <asm/sn/io.h>
#include <asm/sn/sn_private.h>
#include <asm/sn/ate_utils.h>
@@ -41,42 +40,41 @@
#endif
-extern pcibr_info_t pcibr_info_get(devfs_handle_t);
-extern int pcibr_widget_to_bus(devfs_handle_t pcibr_vhdl);
+extern pcibr_info_t pcibr_info_get(vertex_hdl_t);
+extern int pcibr_widget_to_bus(vertex_hdl_t pcibr_vhdl);
extern pcibr_info_t pcibr_device_info_new(pcibr_soft_t, pciio_slot_t, pciio_function_t, pciio_vendor_id_t, pciio_device_id_t);
-extern int pcibr_slot_initial_rrb_alloc(devfs_handle_t,pciio_slot_t);
+extern int pcibr_slot_initial_rrb_alloc(vertex_hdl_t,pciio_slot_t);
extern int pcibr_pcix_rbars_calc(pcibr_soft_t);
-int pcibr_slot_info_init(devfs_handle_t pcibr_vhdl, pciio_slot_t slot);
-int pcibr_slot_info_free(devfs_handle_t pcibr_vhdl, pciio_slot_t slot);
-int pcibr_slot_addr_space_init(devfs_handle_t pcibr_vhdl, pciio_slot_t slot);
+int pcibr_slot_info_init(vertex_hdl_t pcibr_vhdl, pciio_slot_t slot);
+int pcibr_slot_info_free(vertex_hdl_t pcibr_vhdl, pciio_slot_t slot);
+int pcibr_slot_addr_space_init(vertex_hdl_t pcibr_vhdl, pciio_slot_t slot);
int pcibr_slot_pcix_rbar_init(pcibr_soft_t pcibr_soft, pciio_slot_t slot);
-int pcibr_slot_device_init(devfs_handle_t pcibr_vhdl, pciio_slot_t slot);
-int pcibr_slot_guest_info_init(devfs_handle_t pcibr_vhdl, pciio_slot_t slot);
-int pcibr_slot_call_device_attach(devfs_handle_t pcibr_vhdl,
+int pcibr_slot_device_init(vertex_hdl_t pcibr_vhdl, pciio_slot_t slot);
+int pcibr_slot_guest_info_init(vertex_hdl_t pcibr_vhdl, pciio_slot_t slot);
+int pcibr_slot_call_device_attach(vertex_hdl_t pcibr_vhdl,
pciio_slot_t slot, int drv_flags);
-int pcibr_slot_call_device_detach(devfs_handle_t pcibr_vhdl,
+int pcibr_slot_call_device_detach(vertex_hdl_t pcibr_vhdl,
pciio_slot_t slot, int drv_flags);
-int pcibr_slot_detach(devfs_handle_t pcibr_vhdl, pciio_slot_t slot,
+int pcibr_slot_detach(vertex_hdl_t pcibr_vhdl, pciio_slot_t slot,
int drv_flags, char *l1_msg, int *sub_errorp);
-int pcibr_is_slot_sys_critical(devfs_handle_t pcibr_vhdl, pciio_slot_t slot);
static int pcibr_probe_slot(bridge_t *, cfg_p, unsigned int *);
-void pcibr_device_info_free(devfs_handle_t, pciio_slot_t);
+void pcibr_device_info_free(vertex_hdl_t, pciio_slot_t);
iopaddr_t pcibr_bus_addr_alloc(pcibr_soft_t, pciio_win_info_t,
pciio_space_t, int, int, int);
void pciibr_bus_addr_free(pcibr_soft_t, pciio_win_info_t);
cfg_p pcibr_find_capability(cfg_p, unsigned);
-extern uint64_t do_pcibr_config_get(int, cfg_p, unsigned, unsigned);
-void do_pcibr_config_set(int, cfg_p, unsigned, unsigned, uint64_t);
+extern uint64_t do_pcibr_config_get(cfg_p, unsigned, unsigned);
+void do_pcibr_config_set(cfg_p, unsigned, unsigned, uint64_t);
-int pcibr_slot_attach(devfs_handle_t pcibr_vhdl, pciio_slot_t slot,
+int pcibr_slot_attach(vertex_hdl_t pcibr_vhdl, pciio_slot_t slot,
int drv_flags, char *l1_msg, int *sub_errorp);
int pcibr_slot_info_return(pcibr_soft_t pcibr_soft, pciio_slot_t slot,
pcibr_slot_info_resp_t respp);
-extern devfs_handle_t baseio_pci_vhdl;
-int scsi_ctlr_nums_add(devfs_handle_t, devfs_handle_t);
+extern vertex_hdl_t baseio_pci_vhdl;
+int scsi_ctlr_nums_add(vertex_hdl_t, vertex_hdl_t);
/* For now .... */
@@ -111,7 +109,7 @@ int max_readcount_to_bufsize[MAX_READCNT_TABLE] = {512, 1024, 2048, 4096 };
#ifdef PIC_LATER
int
-pcibr_slot_startup(devfs_handle_t pcibr_vhdl, pcibr_slot_req_t reqp)
+pcibr_slot_startup(vertex_hdl_t pcibr_vhdl, pcibr_slot_req_t reqp)
{
pcibr_soft_t pcibr_soft = pcibr_soft_get(pcibr_vhdl);
pciio_slot_t slot;
@@ -127,11 +125,6 @@ pcibr_slot_startup(devfs_handle_t pcibr_vhdl, pcibr_slot_req_t reqp)
/* req_slot is the 'external' slot number, convert for internal use */
slot = PCIBR_SLOT_TO_DEVICE(pcibr_soft, reqp->req_slot);
- /* Do not allow start-up of a slot in a shoehorn */
- if(nic_vertex_info_match(pcibr_soft->bs_conn, XTALK_PCI_PART_NUM)) {
- return(PCI_SLOT_IN_SHOEHORN);
- }
-
/* Check for the valid slot */
if (!PCIBR_VALID_SLOT(pcibr_soft, slot))
return(PCI_NOT_A_SLOT);
@@ -170,7 +163,7 @@ pcibr_slot_startup(devfs_handle_t pcibr_vhdl, pcibr_slot_req_t reqp)
* Software shut-down the PCI slot
*/
int
-pcibr_slot_shutdown(devfs_handle_t pcibr_vhdl, pcibr_slot_req_t reqp)
+pcibr_slot_shutdown(vertex_hdl_t pcibr_vhdl, pcibr_slot_req_t reqp)
{
pcibr_soft_t pcibr_soft = pcibr_soft_get(pcibr_vhdl);
bridge_t *bridge;
@@ -194,11 +187,6 @@ pcibr_slot_shutdown(devfs_handle_t pcibr_vhdl, pcibr_slot_req_t reqp)
if (!PCIBR_VALID_SLOT(pcibr_soft, slot))
return(PCI_NOT_A_SLOT);
- /* Do not allow shut-down of a slot in a shoehorn */
- if(nic_vertex_info_match(pcibr_soft->bs_conn, XTALK_PCI_PART_NUM)) {
- return(PCI_SLOT_IN_SHOEHORN);
- }
-
#ifdef PIC_LATER
/* Acquire update access to the bus */
mrlock(pcibr_soft->bs_bus_lock, MR_UPDATE, PZERO);
@@ -284,7 +272,6 @@ pcibr_slot_func_info_return(pcibr_info_h pcibr_infoh,
{
pcibr_info_t pcibr_info = pcibr_infoh[func];
int win;
- boolean_t is_sys_critical_vertex(devfs_handle_t);
funcp->resp_f_status = 0;
@@ -296,9 +283,6 @@ pcibr_slot_func_info_return(pcibr_info_h pcibr_infoh,
#if defined(SUPPORT_PRINTING_V_FORMAT)
sprintf(funcp->resp_f_slot_name, "%v", pcibr_info->f_vertex);
#endif
- if(is_sys_critical_vertex(pcibr_info->f_vertex)) {
- funcp->resp_f_status |= FUNC_IS_SYS_CRITICAL;
- }
funcp->resp_f_bus = pcibr_info->f_bus;
funcp->resp_f_slot = PCIBR_INFO_SLOT_GET_EXT(pcibr_info);
@@ -345,7 +329,6 @@ pcibr_slot_info_return(pcibr_soft_t pcibr_soft,
reg_p b_respp;
pcibr_slot_info_resp_t slotp;
pcibr_slot_func_info_resp_t funcp;
- boolean_t is_sys_critical_vertex(devfs_handle_t);
extern void snia_kmem_free(void *, int);
slotp = snia_kmem_zalloc(sizeof(*slotp), 0);
@@ -368,11 +351,6 @@ pcibr_slot_info_return(pcibr_soft_t pcibr_soft,
slotp->resp_slot_status = pss->slot_status;
slotp->resp_l1_bus_num = pcibr_widget_to_bus(pcibr_soft->bs_vhdl);
-
- if (is_sys_critical_vertex(pss->slot_conn)) {
- slotp->resp_slot_status |= SLOT_IS_SYS_CRITICAL;
- }
-
slotp->resp_bss_ninfo = pss->bss_ninfo;
for (func = 0; func < pss->bss_ninfo; func++) {
@@ -455,7 +433,7 @@ pcibr_slot_info_return(pcibr_soft_t pcibr_soft,
* External SSRAM workaround info
*/
int
-pcibr_slot_query(devfs_handle_t pcibr_vhdl, pcibr_slot_req_t reqp)
+pcibr_slot_query(vertex_hdl_t pcibr_vhdl, pcibr_slot_req_t reqp)
{
pcibr_soft_t pcibr_soft = pcibr_soft_get(pcibr_vhdl);
pciio_slot_t slot;
@@ -481,11 +459,6 @@ pcibr_slot_query(devfs_handle_t pcibr_vhdl, pcibr_slot_req_t reqp)
return(PCI_NOT_A_SLOT);
}
- /* Do not allow a query of a slot in a shoehorn */
- if(nic_vertex_info_match(pcibr_soft->bs_conn, XTALK_PCI_PART_NUM)) {
- return(PCI_SLOT_IN_SHOEHORN);
- }
-
/* Return information for the requested PCI slot */
if (slot != PCIIO_SLOT_NONE) {
if (size < sizeof(*respp)) {
@@ -534,88 +507,6 @@ pcibr_slot_query(devfs_handle_t pcibr_vhdl, pcibr_slot_req_t reqp)
return(error);
}
-#if 0
-/*
- * pcibr_slot_reset
- * Reset the PCI device in the particular slot.
- *
- * The Xbridge does not comply with the PCI Specification
- * when resetting an indiviaudl slot. An individual slot is
- * is reset by toggling the slot's bit in the Xbridge Control
- * Register. The Xbridge will assert the target slot's
- * (non-bussed) RST signal, but does not assert the (bussed)
- * REQ64 signal as required by the specification. As
- * designed, the Xbridge cannot assert the REQ64 signal
- * becuase it may interfere with a bus transaction in progress.
- * The practical effects of this Xbridge implementation is
- * device dependent; it probably will not adversely effect
- * 32-bit cards, but may disable 64-bit data transfers by those
- * cards that normally support 64-bit data transfers.
- *
- * The Xbridge will assert REQ64 when all four slots are reset
- * by simultaneously toggling all four slot reset bits in the
- * Xbridge Control Register. This is basically a PCI bus reset
- * and asserting the (bussed) REQ64 signal will not interfere
- * with any bus transactions in progress.
- *
- * The Xbridge (and the SN0 Bridge) support resetting only
- * four PCI bus slots via the (X)bridge Control Register.
- *
- * To reset an individual slot for the PCI Hot-Plug feature
- * use the L1 console commands to power-down and then
- * power-up the slot, or use the kernel infrastructure
- * functions to power-down/up the slot when they are
- * implemented for SN1.
- */
-int
-pcibr_slot_reset(devfs_handle_t pcibr_vhdl, pciio_slot_t slot)
-{
- pcibr_soft_t pcibr_soft = pcibr_soft_get(pcibr_vhdl);
- bridge_t *bridge;
- bridgereg_t ctrlreg,tmp;
- volatile bridgereg_t *wrb_flush;
-
- if (!pcibr_soft)
- return(EINVAL);
-
- if (!PCIBR_VALID_SLOT(pcibr_soft, slot))
- return(EINVAL);
-
- /* Enable the DMA operations from this device of the xtalk widget
- * (PCI host bridge in this case).
- */
- xtalk_widgetdev_enable(pcibr_soft->bs_conn, slot);
-
- /* Set the reset slot bit in the bridge's wid control register
- * to reset the PCI slot
- */
- bridge = pcibr_soft->bs_base;
-
- /* Read the bridge widget control and clear out the reset pin
- * bit for the corresponding slot.
- */
- tmp = ctrlreg = bridge->b_wid_control;
-
- tmp &= ~BRIDGE_CTRL_RST_PIN(slot);
-
- bridge->b_wid_control = tmp;
- tmp = bridge->b_wid_control;
-
- /* Restore the old control register back.
- * NOTE : PCI card gets reset when the reset pin bit
- * changes from 0 (set above) to 1 (going to be set now).
- */
-
- bridge->b_wid_control = ctrlreg;
-
- /* Flush the write buffers if any !! */
- wrb_flush = &(bridge->b_wr_req_buf[slot].reg);
- while (*wrb_flush);
-
- return(0);
-}
-#endif
-
#define PROBE_LOCK 0 /* FIXME: we're attempting to lock around accesses
* to b_int_enable. This hangs pcibr_probe_slot()
*/
@@ -627,7 +518,7 @@ pcibr_slot_reset(devfs_handle_t pcibr_vhdl, pciio_slot_t slot)
* information associated with this particular PCI device.
*/
int
-pcibr_slot_info_init(devfs_handle_t pcibr_vhdl,
+pcibr_slot_info_init(vertex_hdl_t pcibr_vhdl,
pciio_slot_t slot)
{
pcibr_soft_t pcibr_soft;
@@ -650,7 +541,7 @@ pcibr_slot_info_init(devfs_handle_t pcibr_vhdl,
int nfunc;
pciio_function_t rfunc;
int func;
- devfs_handle_t conn_vhdl;
+ vertex_hdl_t conn_vhdl;
pcibr_soft_slot_t slotp;
/* Get the basic software information required to proceed */
@@ -669,10 +560,6 @@ pcibr_slot_info_init(devfs_handle_t pcibr_vhdl,
return(0);
}
- /* Check for a slot with any system critical functions */
- if (pcibr_is_slot_sys_critical(pcibr_vhdl, slot))
- return(EPERM);
-
/* Try to read the device-id/vendor-id from the config space */
cfgw = pcibr_slot_config_addr(bridge, slot, 0);
@@ -701,7 +588,7 @@ pcibr_slot_info_init(devfs_handle_t pcibr_vhdl,
if (vendor == 0xFFFF)
return(ENODEV);
- htype = do_pcibr_config_get(IS_PIC_SOFT(pcibr_soft), cfgw, PCI_CFG_HEADER_TYPE, 1);
+ htype = do_pcibr_config_get(cfgw, PCI_CFG_HEADER_TYPE, 1);
nfunc = 1;
rfunc = PCIIO_FUNC_NONE;
pfail = 0;
@@ -750,7 +637,7 @@ pcibr_slot_info_init(devfs_handle_t pcibr_vhdl,
cfgw = pcibr_func_config_addr(bridge, 0, slot, func, 0);
device = 0xFFFF & (idword >> 16);
- htype = do_pcibr_config_get(IS_PIC_SOFT(pcibr_soft), cfgw, PCI_CFG_HEADER_TYPE, 1);
+ htype = do_pcibr_config_get(cfgw, PCI_CFG_HEADER_TYPE, 1);
rfunc = func;
}
htype &= 0x7f;
@@ -810,16 +697,10 @@ pcibr_slot_info_init(devfs_handle_t pcibr_vhdl,
* Timer for these devices
*/
- lt_time = do_pcibr_config_get(IS_PIC_SOFT(pcibr_soft), cfgw, PCI_CFG_LATENCY_TIMER, 1);
+ lt_time = do_pcibr_config_get(cfgw, PCI_CFG_LATENCY_TIMER, 1);
if ((lt_time == 0) && !(bridge->b_device[slot].reg & BRIDGE_DEV_RT) &&
- !((vendor == IOC3_VENDOR_ID_NUM) &&
- (
-#ifdef PIC_LATER
- (device == IOC3_DEVICE_ID_NUM) ||
- (device == LINC_DEVICE_ID_NUM) ||
-#endif
- (device == 0x5 /* RAD_DEV */)))) {
+ (device == 0x5 /* RAD_DEV */)) {
unsigned min_gnt;
unsigned min_gnt_mult;
@@ -827,7 +708,7 @@ pcibr_slot_info_init(devfs_handle_t pcibr_vhdl,
* needs in increments of 250ns. But latency timer is in
* PCI clock cycles, so a conversion is needed.
*/
- min_gnt = do_pcibr_config_get(IS_PIC_SOFT(pcibr_soft), cfgw, PCI_MIN_GNT, 1);
+ min_gnt = do_pcibr_config_get(cfgw, PCI_MIN_GNT, 1);
if (IS_133MHZ(pcibr_soft))
min_gnt_mult = 32; /* 250ns @ 133MHz in clocks */
@@ -843,7 +724,7 @@ pcibr_slot_info_init(devfs_handle_t pcibr_vhdl,
else
lt_time = 4 * min_gnt_mult; /* 1 micro second */
- do_pcibr_config_set(IS_PIC_SOFT(pcibr_soft), cfgw, PCI_CFG_LATENCY_TIMER, 1, lt_time);
+ do_pcibr_config_set(cfgw, PCI_CFG_LATENCY_TIMER, 1, lt_time);
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_CONFIG, pcibr_vhdl,
"pcibr_slot_info_init: set Latency Timer for slot=%d, "
@@ -851,12 +732,27 @@ pcibr_slot_info_init(devfs_handle_t pcibr_vhdl,
PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot), func, lt_time));
}
- /* Get the PCI-X capability if running in PCI-X mode. If the func
- * doesn't have a pcix capability, allocate a PCIIO_VENDOR_ID_NONE
- * pcibr_info struct so the device driver for that function is not
- * called.
+
+ /* In our architecture the setting of the cacheline size isn't
+ * beneficial for cards in PCI mode, but in PCI-X mode devices
+ * can optionally use the cacheline size value for internal
+ * device optimizations (See 7.1.5 of the PCI-X v1.0 spec).
+ * NOTE: cachline size is in doubleword increments
*/
if (IS_PCIX(pcibr_soft)) {
+ if (!do_pcibr_config_get(cfgw, PCI_CFG_CACHE_LINE, 1)) {
+ do_pcibr_config_set(cfgw, PCI_CFG_CACHE_LINE, 1, 0x20);
+ PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_CONFIG, pcibr_vhdl,
+ "pcibr_slot_info_init: set CacheLine for slot=%d, "
+ "func=%d, to 0x20\n",
+ PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot), func));
+ }
+
+ /* Get the PCI-X capability if running in PCI-X mode. If the func
+ * doesnt have a pcix capability, allocate a PCIIO_VENDOR_ID_NONE
+ * pcibr_info struct so the device driver for that function is not
+ * called.
+ */
if (!(pcix_cap = pcibr_find_capability(cfgw, PCI_CAP_PCIX))) {
printk(KERN_WARNING
#if defined(SUPPORT_PRINTING_V_FORMAT)
@@ -898,7 +794,7 @@ pcibr_slot_info_init(devfs_handle_t pcibr_vhdl,
if (func == 0)
slotp->slot_conn = conn_vhdl;
- cmd_reg = do_pcibr_config_get(IS_PIC_SOFT(pcibr_soft), cfgw, PCI_CFG_COMMAND, 4);
+ cmd_reg = do_pcibr_config_get(cfgw, PCI_CFG_COMMAND, 4);
wptr = cfgw + PCI_CFG_BASE_ADDR_0 / 4;
@@ -949,7 +845,7 @@ pcibr_slot_info_init(devfs_handle_t pcibr_vhdl,
* this could be pushed up into pciio, when we
* start supporting more PCI providers.
*/
- base = do_pcibr_config_get(IS_PIC_SOFT(pcibr_soft), wptr, (win * 4), 4);
+ base = do_pcibr_config_get(wptr, (win * 4), 4);
if (base & PCI_BA_IO_SPACE) {
/* BASE is in I/O space. */
@@ -975,7 +871,7 @@ pcibr_slot_info_init(devfs_handle_t pcibr_vhdl,
} else if (base & 0xC0000000) {
base = 0; /* outside permissable range */
} else if ((code == PCI_BA_MEM_64BIT) &&
- (do_pcibr_config_get(IS_PIC_SOFT(pcibr_soft), wptr, ((win + 1)*4), 4) != 0)) {
+ (do_pcibr_config_get(wptr, ((win + 1)*4), 4) != 0)) {
base = 0; /* outside permissable range */
}
}
@@ -983,8 +879,8 @@ pcibr_slot_info_init(devfs_handle_t pcibr_vhdl,
if (base != 0) { /* estimate size */
size = base & -base;
} else { /* calculate size */
- do_pcibr_config_set(IS_PIC_SOFT(pcibr_soft), wptr, (win * 4), 4, ~0); /* write 1's */
- size = do_pcibr_config_get(IS_PIC_SOFT(pcibr_soft), wptr, (win * 4), 4); /* read back */
+ do_pcibr_config_set(wptr, (win * 4), 4, ~0); /* write 1's */
+ size = do_pcibr_config_get(wptr, (win * 4), 4); /* read back */
size &= mask; /* keep addr */
size &= -size; /* keep lsbit */
if (size == 0)
@@ -995,45 +891,9 @@ pcibr_slot_info_init(devfs_handle_t pcibr_vhdl,
pcibr_info->f_window[win].w_base = base;
pcibr_info->f_window[win].w_size = size;
-#if defined(IOC3_VENDOR_ID_NUM) && defined(IOC3_DEVICE_ID_NUM)
- /*
- * IOC3 BASE_ADDR* BUG WORKAROUND
- *
-
- * If we write to BASE1 on the IOC3, the
- * data in BASE0 is replaced. The
- * original workaround was to remember
- * the value of BASE0 and restore it
- * when we ran off the end of the BASE
- * registers; however, a later
- * workaround was added (I think it was
- * rev 1.44) to avoid setting up
- * anything but BASE0, with the comment
- * that writing all ones to BASE1 set
- * the enable-parity-error test feature
- * in IOC3's SCR bit 14.
- *
- * So, unless we defer doing any PCI
- * space allocation until drivers
- * attach, and set up a way for drivers
- * (the IOC3 in paricular) to tell us
- * generically to keep our hands off
- * BASE registers, we gotta "know" about
- * the IOC3 here.
- *
- * Too bad the PCI folks didn't reserve the
- * all-zero value for 'no BASE here' (it is a
- * valid code for an uninitialized BASE in
- * 32-bit PCI memory space).
- */
-
- if ((vendor == IOC3_VENDOR_ID_NUM) &&
- (device == IOC3_DEVICE_ID_NUM))
- break;
-#endif
if (code == PCI_BA_MEM_64BIT) {
win++; /* skip upper half */
- do_pcibr_config_set(IS_PIC_SOFT(pcibr_soft), wptr, (win * 4), 4, 0); /* must be zero */
+ do_pcibr_config_set(wptr, (win * 4), 4, 0); /* must be zero */
}
} /* next win */
} /* next func */
@@ -1056,7 +916,7 @@ pcibr_find_capability(cfg_p cfgw,
int defend_against_circular_linkedlist = 0;
/* Check to see if there is a capabilities pointer in the cfg header */
- if (!(do_pcibr_config_get(1, cfgw, PCI_CFG_STATUS, 2) & PCI_STAT_CAP_LIST)) {
+ if (!(do_pcibr_config_get(cfgw, PCI_CFG_STATUS, 2) & PCI_STAT_CAP_LIST)) {
return (NULL);
}
@@ -1067,14 +927,14 @@ pcibr_find_capability(cfg_p cfgw,
* significant bits of the next pointer must be ignored, so we mask
* with 0xfc).
*/
- cap_nxt = (do_pcibr_config_get(1, cfgw, PCI_CAPABILITIES_PTR, 1) & 0xfc);
+ cap_nxt = (do_pcibr_config_get(cfgw, PCI_CAPABILITIES_PTR, 1) & 0xfc);
while (cap_nxt && (defend_against_circular_linkedlist <= 48)) {
- cap_id = do_pcibr_config_get(1, cfgw, cap_nxt, 1);
+ cap_id = do_pcibr_config_get(cfgw, cap_nxt, 1);
if (cap_id == capability) {
return ((cfg_p)((char *)cfgw + cap_nxt));
}
- cap_nxt = (do_pcibr_config_get(1, cfgw, cap_nxt+1, 1) & 0xfc);
+ cap_nxt = (do_pcibr_config_get(cfgw, cap_nxt+1, 1) & 0xfc);
defend_against_circular_linkedlist++;
}
@@ -1087,7 +947,7 @@ pcibr_find_capability(cfg_p cfgw,
* with a particular PCI device.
*/
int
-pcibr_slot_info_free(devfs_handle_t pcibr_vhdl,
+pcibr_slot_info_free(vertex_hdl_t pcibr_vhdl,
pciio_slot_t slot)
{
pcibr_soft_t pcibr_soft;
@@ -1223,20 +1083,21 @@ int as_debug = 0;
* the base registers in the card.
*/
int
-pcibr_slot_addr_space_init(devfs_handle_t pcibr_vhdl,
+pcibr_slot_addr_space_init(vertex_hdl_t pcibr_vhdl,
pciio_slot_t slot)
{
pcibr_soft_t pcibr_soft;
pcibr_info_h pcibr_infoh;
pcibr_info_t pcibr_info;
bridge_t *bridge;
- size_t align_slot;
iopaddr_t mask;
int nbars;
int nfunc;
int func;
int win;
int rc = 0;
+ int align;
+ int align_slot;
pcibr_soft = pcibr_soft_get(pcibr_vhdl);
@@ -1275,7 +1136,8 @@ pcibr_slot_addr_space_init(devfs_handle_t pcibr_vhdl,
* the entire "lo" area is only a
* megabyte, total ...
*/
- align_slot = (slot < 2) ? 0x200000 : 0x100000;
+ align_slot = 0x100000;
+ align = align_slot;
for (func = 0; func < nfunc; ++func) {
cfg_p cfgw;
@@ -1300,7 +1162,7 @@ pcibr_slot_addr_space_init(devfs_handle_t pcibr_vhdl,
cfgw = pcibr_func_config_addr(bridge, 0, slot, func, 0);
wptr = cfgw + PCI_CFG_BASE_ADDR_0 / 4;
- if ((do_pcibr_config_get(IS_PIC_SOFT(pcibr_soft), cfgw, PCI_CFG_HEADER_TYPE, 1) & 0x7f) != 0)
+ if ((do_pcibr_config_get(cfgw, PCI_CFG_HEADER_TYPE, 1) & 0x7f) != 0)
nbars = 2;
else
nbars = PCI_CFG_BASE_ADDRS;
@@ -1333,23 +1195,24 @@ pcibr_slot_addr_space_init(devfs_handle_t pcibr_vhdl,
continue; /* already allocated */
}
+ align = (win) ? size : align_slot;
+
+ if (align < _PAGESZ)
+ align = _PAGESZ; /* ie. 0x00004000 */
+
switch (space) {
case PCIIO_SPACE_IO:
base = pcibr_bus_addr_alloc(pcibr_soft,
&pcibr_info->f_window[win],
PCIIO_SPACE_IO,
- 0, size, align_slot);
+ 0, size, align);
if (!base)
rc = ENOSPC;
break;
case PCIIO_SPACE_MEM:
- if ((do_pcibr_config_get(IS_PIC_SOFT(pcibr_soft), wptr, (win * 4), 4) &
+ if ((do_pcibr_config_get(wptr, (win * 4), 4) &
PCI_BA_MEM_LOCATION) == PCI_BA_MEM_1MEG) {
- int align = size; /* ie. 0x00001000 */
-
- if (align < _PAGESZ)
- align = _PAGESZ; /* ie. 0x00004000 */
/* allocate from 20-bit PCI space */
base = pcibr_bus_addr_alloc(pcibr_soft,
@@ -1363,7 +1226,7 @@ pcibr_slot_addr_space_init(devfs_handle_t pcibr_vhdl,
base = pcibr_bus_addr_alloc(pcibr_soft,
&pcibr_info->f_window[win],
PCIIO_SPACE_MEM32,
- 0, size, align_slot);
+ 0, size, align);
if (!base)
rc = ENOSPC;
}
@@ -1377,7 +1240,7 @@ pcibr_slot_addr_space_init(devfs_handle_t pcibr_vhdl,
PCIBR_DEVICE_TO_SLOT(pcibr_soft,slot), win, space));
}
pcibr_info->f_window[win].w_base = base;
- do_pcibr_config_set(IS_PIC_SOFT(pcibr_soft), wptr, (win * 4), 4, base);
+ do_pcibr_config_set(wptr, (win * 4), 4, base);
#if defined(SUPPORT_PRINTING_R_FORMAT)
if (pcibr_debug_mask & PCIBR_DEBUG_BAR) {
@@ -1405,26 +1268,22 @@ pcibr_slot_addr_space_init(devfs_handle_t pcibr_vhdl,
/*
* Allocate space for the EXPANSION ROM
- * NOTE: DO NOT DO THIS ON AN IOC3,
- * as it blows the system away.
*/
base = size = 0;
- if ((pcibr_soft->bs_slot[slot].bss_vendor_id != IOC3_VENDOR_ID_NUM) ||
- (pcibr_soft->bs_slot[slot].bss_device_id != IOC3_DEVICE_ID_NUM)) {
-
+ {
wptr = cfgw + PCI_EXPANSION_ROM / 4;
- do_pcibr_config_set(IS_PIC_SOFT(pcibr_soft), wptr, 0, 4, 0xFFFFF000);
- mask = do_pcibr_config_get(IS_PIC_SOFT(pcibr_soft), wptr, 0, 4);
+ do_pcibr_config_set(wptr, 0, 4, 0xFFFFF000);
+ mask = do_pcibr_config_get(wptr, 0, 4);
if (mask & 0xFFFFF000) {
size = mask & -mask;
base = pcibr_bus_addr_alloc(pcibr_soft,
&pcibr_info->f_rwindow,
PCIIO_SPACE_MEM32,
- 0, size, align_slot);
+ 0, size, align);
if (!base)
rc = ENOSPC;
else {
- do_pcibr_config_set(IS_PIC_SOFT(pcibr_soft), wptr, 0, 4, base);
+ do_pcibr_config_set(wptr, 0, 4, base);
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_BAR, pcibr_vhdl,
"pcibr_slot_addr_space_init: slot=%d, func=%d, "
"ROM in [0x%X..0x%X], allocated by pcibr\n",
@@ -1435,7 +1294,7 @@ pcibr_slot_addr_space_init(devfs_handle_t pcibr_vhdl,
}
pcibr_info->f_rbase = base;
pcibr_info->f_rsize = size;
-
+
/*
* if necessary, update the board's
* command register to enable decoding
@@ -1463,7 +1322,7 @@ pcibr_slot_addr_space_init(devfs_handle_t pcibr_vhdl,
pci_cfg_cmd_reg_add |= PCI_CMD_BUS_MASTER;
- pci_cfg_cmd_reg = do_pcibr_config_get(IS_PIC_SOFT(pcibr_soft), cfgw, PCI_CFG_COMMAND, 4);
+ pci_cfg_cmd_reg = do_pcibr_config_get(cfgw, PCI_CFG_COMMAND, 4);
#if PCI_FBBE /* XXX- check here to see if dev can do fast-back-to-back */
if (!((pci_cfg_cmd_reg >> 16) & PCI_STAT_F_BK_BK_CAP))
@@ -1471,7 +1330,7 @@ pcibr_slot_addr_space_init(devfs_handle_t pcibr_vhdl,
#endif
pci_cfg_cmd_reg &= 0xFFFF;
if (pci_cfg_cmd_reg_add & ~pci_cfg_cmd_reg)
- do_pcibr_config_set(IS_PIC_SOFT(pcibr_soft), cfgw, PCI_CFG_COMMAND, 4,
+ do_pcibr_config_set(cfgw, PCI_CFG_COMMAND, 4,
pci_cfg_cmd_reg | pci_cfg_cmd_reg_add);
} /* next func */
return(rc);
@@ -1483,7 +1342,7 @@ pcibr_slot_addr_space_init(devfs_handle_t pcibr_vhdl,
*/
int
-pcibr_slot_device_init(devfs_handle_t pcibr_vhdl,
+pcibr_slot_device_init(vertex_hdl_t pcibr_vhdl,
pciio_slot_t slot)
{
pcibr_soft_t pcibr_soft;
@@ -1525,8 +1384,6 @@ pcibr_slot_device_init(devfs_handle_t pcibr_vhdl,
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DEVREG, pcibr_vhdl,
"pcibr_slot_device_init: Device(%d): %R\n",
slot, devreg, device_bits));
-#else
- printk("pcibr_slot_device_init: Device(%d) 0x%x\n", slot, devreg);
#endif
return(0);
}
@@ -1536,7 +1393,7 @@ pcibr_slot_device_init(devfs_handle_t pcibr_vhdl,
* Setup the host/guest relations for a PCI slot.
*/
int
-pcibr_slot_guest_info_init(devfs_handle_t pcibr_vhdl,
+pcibr_slot_guest_info_init(vertex_hdl_t pcibr_vhdl,
pciio_slot_t slot)
{
pcibr_soft_t pcibr_soft;
@@ -1605,18 +1462,17 @@ pcibr_slot_guest_info_init(devfs_handle_t pcibr_vhdl,
* card in this slot.
*/
int
-pcibr_slot_call_device_attach(devfs_handle_t pcibr_vhdl,
+pcibr_slot_call_device_attach(vertex_hdl_t pcibr_vhdl,
pciio_slot_t slot,
int drv_flags)
{
pcibr_soft_t pcibr_soft;
pcibr_info_h pcibr_infoh;
pcibr_info_t pcibr_info;
- async_attach_t aa = NULL;
int func;
- devfs_handle_t xconn_vhdl, conn_vhdl;
+ vertex_hdl_t xconn_vhdl, conn_vhdl;
#ifdef PIC_LATER
- devfs_handle_t scsi_vhdl;
+ vertex_hdl_t scsi_vhdl;
#endif
int nfunc;
int error_func;
@@ -1639,7 +1495,6 @@ pcibr_slot_call_device_attach(devfs_handle_t pcibr_vhdl,
}
xconn_vhdl = pcibr_soft->bs_conn;
- aa = async_attach_get_info(xconn_vhdl);
nfunc = pcibr_soft->bs_slot[slot].bss_ninfo;
pcibr_infoh = pcibr_soft->bs_slot[slot].bss_infos;
@@ -1656,13 +1511,6 @@ pcibr_slot_call_device_attach(devfs_handle_t pcibr_vhdl,
conn_vhdl = pcibr_info->f_vertex;
-#ifdef LATER
- /*
- * Activate if and when we support cdl.
- */
- if (aa)
- async_attach_add_info(conn_vhdl, aa);
-#endif /* LATER */
error_func = pciio_device_attach(conn_vhdl, drv_flags);
@@ -1728,7 +1576,7 @@ pcibr_slot_call_device_attach(devfs_handle_t pcibr_vhdl,
* card in this slot.
*/
int
-pcibr_slot_call_device_detach(devfs_handle_t pcibr_vhdl,
+pcibr_slot_call_device_detach(vertex_hdl_t pcibr_vhdl,
pciio_slot_t slot,
int drv_flags)
{
@@ -1736,7 +1584,7 @@ pcibr_slot_call_device_detach(devfs_handle_t pcibr_vhdl,
pcibr_info_h pcibr_infoh;
pcibr_info_t pcibr_info;
int func;
- devfs_handle_t conn_vhdl = GRAPH_VERTEX_NONE;
+ vertex_hdl_t conn_vhdl = GRAPH_VERTEX_NONE;
int nfunc;
int error_func;
int error_slot = 0;
@@ -1811,7 +1659,7 @@ pcibr_slot_call_device_detach(devfs_handle_t pcibr_vhdl,
* PCI card on the bus.
*/
int
-pcibr_slot_attach(devfs_handle_t pcibr_vhdl,
+pcibr_slot_attach(vertex_hdl_t pcibr_vhdl,
pciio_slot_t slot,
int drv_flags,
char *l1_msg,
@@ -1850,7 +1698,7 @@ pcibr_slot_attach(devfs_handle_t pcibr_vhdl,
* slot-specific freeing that needs to be done.
*/
int
-pcibr_slot_detach(devfs_handle_t pcibr_vhdl,
+pcibr_slot_detach(vertex_hdl_t pcibr_vhdl,
pciio_slot_t slot,
int drv_flags,
char *l1_msg,
@@ -1859,10 +1707,6 @@ pcibr_slot_detach(devfs_handle_t pcibr_vhdl,
pcibr_soft_t pcibr_soft = pcibr_soft_get(pcibr_vhdl);
int error;
- /* Make sure that we do not detach a system critical function vertex */
- if(pcibr_is_slot_sys_critical(pcibr_vhdl, slot))
- return(PCI_IS_SYS_CRITICAL);
-
/* Call the device detach function */
error = (pcibr_slot_call_device_detach(pcibr_vhdl, slot, drv_flags));
if (error) {
@@ -1892,61 +1736,6 @@ pcibr_slot_detach(devfs_handle_t pcibr_vhdl,
}
/*
- * pcibr_is_slot_sys_critical
- * Check slot for any functions that are system critical.
- * Return 1 if any are system critical or 0 otherwise.
- *
- * This function will always return 0 when called by
- * pcibr_attach() because the system critical vertices
- * have not yet been set in the hwgraph.
- */
-int
-pcibr_is_slot_sys_critical(devfs_handle_t pcibr_vhdl,
- pciio_slot_t slot)
-{
- pcibr_soft_t pcibr_soft;
- pcibr_info_h pcibr_infoh;
- pcibr_info_t pcibr_info;
- devfs_handle_t conn_vhdl = GRAPH_VERTEX_NONE;
- int nfunc;
- int func;
- boolean_t is_sys_critical_vertex(devfs_handle_t);
-
- pcibr_soft = pcibr_soft_get(pcibr_vhdl);
- if (!pcibr_soft)
- return(EINVAL);
-
- if (!PCIBR_VALID_SLOT(pcibr_soft, slot))
- return(EINVAL);
-
- nfunc = pcibr_soft->bs_slot[slot].bss_ninfo;
- pcibr_infoh = pcibr_soft->bs_slot[slot].bss_infos;
-
- for (func = 0; func < nfunc; ++func) {
-
- pcibr_info = pcibr_infoh[func];
- if (!pcibr_info)
- continue;
-
- if (pcibr_info->f_vendor == PCIIO_VENDOR_ID_NONE)
- continue;
-
- conn_vhdl = pcibr_info->f_vertex;
- if (is_sys_critical_vertex(conn_vhdl)) {
-#if defined(SUPPORT_PRINTING_V_FORMAT)
- printk(KERN_WARNING "%v is a system critical device vertex\n", conn_vhdl);
-#else
- printk(KERN_WARNING "%p is a system critical device vertex\n", (void *)conn_vhdl);
-#endif
- return(1);
- }
-
- }
-
- return(0);
-}
-
-/*
* pcibr_probe_slot_pic: read a config space word
* while trapping any errors; return zero if
* all went OK, or nonzero if there was an error.
@@ -1984,57 +1773,6 @@ pcibr_probe_slot_pic(bridge_t *bridge,
}
/*
- * pcibr_probe_slot_non_pic: read a config space word
- * while trapping any errors; return zero if
- * all went OK, or nonzero if there was an error.
- * The value read, if any, is passed back
- * through the valp parameter.
- */
-static int
-pcibr_probe_slot_non_pic(bridge_t *bridge,
- cfg_p cfg,
- unsigned *valp)
-{
- int rv;
- bridgereg_t b_old_enable = (bridgereg_t)0, b_new_enable = (bridgereg_t)0;
- extern int badaddr_val(volatile void *, int, volatile void *);
-
- b_old_enable = bridge->b_int_enable;
- b_new_enable = b_old_enable & ~BRIDGE_IMR_PCI_MST_TIMEOUT;
- bridge->b_int_enable = b_new_enable;
-
- /*
- * The xbridge doesn't clear b_err_int_view unless
- * multi-err is cleared...
- */
- if (is_xbridge(bridge)) {
- if (bridge->b_err_int_view & BRIDGE_ISR_PCI_MST_TIMEOUT)
- bridge->b_int_rst_stat = BRIDGE_IRR_MULTI_CLR;
- }
-
- if (bridge->b_int_status & BRIDGE_IRR_PCI_GRP) {
- bridge->b_int_rst_stat = BRIDGE_IRR_PCI_GRP_CLR;
- (void) bridge->b_wid_tflush; /* flushbus */
- }
- rv = badaddr_val((void *) (((uint64_t)cfg) ^ 4), 4, valp);
- /*
- * The xbridge doesn't set master timeout in b_int_status
- * here. Fortunately it's in error_interrupt_view.
- */
- if (is_xbridge(bridge)) {
- if (bridge->b_err_int_view & BRIDGE_ISR_PCI_MST_TIMEOUT) {
- bridge->b_int_rst_stat = BRIDGE_IRR_MULTI_CLR;
- rv = 1; /* unoccupied slot */
- }
- }
- bridge->b_int_enable = b_old_enable;
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
-
- return(rv);
-}
-
-
-/*
* pcibr_probe_slot: read a config space word
* while trapping any errors; return zero if
* all went OK, or nonzero if there was an error.
@@ -2046,15 +1784,12 @@ pcibr_probe_slot(bridge_t *bridge,
cfg_p cfg,
unsigned *valp)
{
- if ( is_pic(bridge) )
- return(pcibr_probe_slot_pic(bridge, cfg, valp));
- else
- return(pcibr_probe_slot_non_pic(bridge, cfg, valp));
+ return(pcibr_probe_slot_pic(bridge, cfg, valp));
}
void
-pcibr_device_info_free(devfs_handle_t pcibr_vhdl, pciio_slot_t slot)
+pcibr_device_info_free(vertex_hdl_t pcibr_vhdl, pciio_slot_t slot)
{
pcibr_soft_t pcibr_soft = pcibr_soft_get(pcibr_vhdl);
pcibr_info_t pcibr_info;
@@ -2079,9 +1814,9 @@ pcibr_device_info_free(devfs_handle_t pcibr_vhdl, pciio_slot_t slot)
/* Disable memory and I/O BARs */
cfgw = pcibr_func_config_addr(bridge, 0, slot, func, 0);
- cmd_reg = do_pcibr_config_get(IS_PIC_SOFT(pcibr_soft), cfgw, PCI_CFG_COMMAND, 4);
+ cmd_reg = do_pcibr_config_get(cfgw, PCI_CFG_COMMAND, 4);
cmd_reg &= (PCI_CMD_MEM_SPACE | PCI_CMD_IO_SPACE);
- do_pcibr_config_set(IS_PIC_SOFT(pcibr_soft), cfgw, PCI_CFG_COMMAND, 4, cmd_reg);
+ do_pcibr_config_set(cfgw, PCI_CFG_COMMAND, 4, cmd_reg);
for (bar = 0; bar < PCI_CFG_BASE_ADDRS; bar++) {
if (pcibr_info->f_window[bar].w_space == PCIIO_SPACE_NONE)
@@ -2181,7 +1916,7 @@ pciibr_bus_addr_free(pcibr_soft_t pcibr_soft, pciio_win_info_t win_info_p)
* io_brick_tab[] array defined in ml/SN/iograph.c
*/
int
-pcibr_widget_to_bus(devfs_handle_t pcibr_vhdl)
+pcibr_widget_to_bus(vertex_hdl_t pcibr_vhdl)
{
pcibr_soft_t pcibr_soft = pcibr_soft_get(pcibr_vhdl);
xwidgetnum_t widget = pcibr_soft->bs_xid;
diff --git a/arch/ia64/sn/io/sn2/pciio.c b/arch/ia64/sn/io/sn2/pciio.c
index 5af418be3464f1..e0a147a57dc5dd 100644
--- a/arch/ia64/sn/io/sn2/pciio.c
+++ b/arch/ia64/sn/io/sn2/pciio.c
@@ -7,8 +7,6 @@
* Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
-#define USRPCI 0
-
#include <linux/init.h>
#include <linux/types.h>
#include <linux/pci.h>
@@ -44,13 +42,8 @@
#undef DEBUG_PCIIO /* turn this on for yet more console output */
-#define GET_NEW(ptr) (ptr = kmalloc(sizeof (*(ptr)), GFP_KERNEL))
-#define DO_DEL(ptr) (kfree(ptr))
-
char pciio_info_fingerprint[] = "pciio_info";
-cdl_p pciio_registry = NULL;
-
int
badaddr_val(volatile void *addr, int len, volatile void *ptr)
{
@@ -97,8 +90,6 @@ get_master_baseio_nasid(void)
extern char master_baseio_wid;
if (master_baseio_nasid < 0) {
- nasid_t tmp;
-
master_baseio_nasid = ia64_sn_get_master_baseio_nasid();
if ( master_baseio_nasid >= 0 ) {
@@ -109,13 +100,13 @@ get_master_baseio_nasid(void)
}
int
-hub_dma_enabled(devfs_handle_t xconn_vhdl)
+hub_dma_enabled(vertex_hdl_t xconn_vhdl)
{
return(0);
}
int
-hub_error_devenable(devfs_handle_t xconn_vhdl, int devnum, int error_code)
+hub_error_devenable(vertex_hdl_t xconn_vhdl, int devnum, int error_code)
{
return(0);
}
@@ -153,66 +144,64 @@ ioerror_dump(char *name, int error_code, int error_mode, ioerror_t *ioerror)
*/
#if !defined(DEV_FUNC)
-static pciio_provider_t *pciio_to_provider_fns(devfs_handle_t dev);
+static pciio_provider_t *pciio_to_provider_fns(vertex_hdl_t dev);
#endif
-pciio_piomap_t pciio_piomap_alloc(devfs_handle_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, size_t, unsigned);
+pciio_piomap_t pciio_piomap_alloc(vertex_hdl_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, size_t, unsigned);
void pciio_piomap_free(pciio_piomap_t);
caddr_t pciio_piomap_addr(pciio_piomap_t, iopaddr_t, size_t);
void pciio_piomap_done(pciio_piomap_t);
-caddr_t pciio_piotrans_addr(devfs_handle_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, unsigned);
-caddr_t pciio_pio_addr(devfs_handle_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, pciio_piomap_t *, unsigned);
+caddr_t pciio_piotrans_addr(vertex_hdl_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, unsigned);
+caddr_t pciio_pio_addr(vertex_hdl_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, pciio_piomap_t *, unsigned);
-iopaddr_t pciio_piospace_alloc(devfs_handle_t, device_desc_t, pciio_space_t, size_t, size_t);
-void pciio_piospace_free(devfs_handle_t, pciio_space_t, iopaddr_t, size_t);
+iopaddr_t pciio_piospace_alloc(vertex_hdl_t, device_desc_t, pciio_space_t, size_t, size_t);
+void pciio_piospace_free(vertex_hdl_t, pciio_space_t, iopaddr_t, size_t);
-pciio_dmamap_t pciio_dmamap_alloc(devfs_handle_t, device_desc_t, size_t, unsigned);
+pciio_dmamap_t pciio_dmamap_alloc(vertex_hdl_t, device_desc_t, size_t, unsigned);
void pciio_dmamap_free(pciio_dmamap_t);
iopaddr_t pciio_dmamap_addr(pciio_dmamap_t, paddr_t, size_t);
-alenlist_t pciio_dmamap_list(pciio_dmamap_t, alenlist_t, unsigned);
void pciio_dmamap_done(pciio_dmamap_t);
-iopaddr_t pciio_dmatrans_addr(devfs_handle_t, device_desc_t, paddr_t, size_t, unsigned);
-alenlist_t pciio_dmatrans_list(devfs_handle_t, device_desc_t, alenlist_t, unsigned);
+iopaddr_t pciio_dmatrans_addr(vertex_hdl_t, device_desc_t, paddr_t, size_t, unsigned);
void pciio_dmamap_drain(pciio_dmamap_t);
-void pciio_dmaaddr_drain(devfs_handle_t, paddr_t, size_t);
-void pciio_dmalist_drain(devfs_handle_t, alenlist_t);
-iopaddr_t pciio_dma_addr(devfs_handle_t, device_desc_t, paddr_t, size_t, pciio_dmamap_t *, unsigned);
+void pciio_dmaaddr_drain(vertex_hdl_t, paddr_t, size_t);
+void pciio_dmalist_drain(vertex_hdl_t, alenlist_t);
+iopaddr_t pciio_dma_addr(vertex_hdl_t, device_desc_t, paddr_t, size_t, pciio_dmamap_t *, unsigned);
-pciio_intr_t pciio_intr_alloc(devfs_handle_t, device_desc_t, pciio_intr_line_t, devfs_handle_t);
+pciio_intr_t pciio_intr_alloc(vertex_hdl_t, device_desc_t, pciio_intr_line_t, vertex_hdl_t);
void pciio_intr_free(pciio_intr_t);
int pciio_intr_connect(pciio_intr_t, intr_func_t, intr_arg_t);
void pciio_intr_disconnect(pciio_intr_t);
-devfs_handle_t pciio_intr_cpu_get(pciio_intr_t);
+vertex_hdl_t pciio_intr_cpu_get(pciio_intr_t);
void pciio_slot_func_to_name(char *, pciio_slot_t, pciio_function_t);
-void pciio_provider_startup(devfs_handle_t);
-void pciio_provider_shutdown(devfs_handle_t);
+void pciio_provider_startup(vertex_hdl_t);
+void pciio_provider_shutdown(vertex_hdl_t);
-pciio_endian_t pciio_endian_set(devfs_handle_t, pciio_endian_t, pciio_endian_t);
-pciio_priority_t pciio_priority_set(devfs_handle_t, pciio_priority_t);
-devfs_handle_t pciio_intr_dev_get(pciio_intr_t);
+pciio_endian_t pciio_endian_set(vertex_hdl_t, pciio_endian_t, pciio_endian_t);
+pciio_priority_t pciio_priority_set(vertex_hdl_t, pciio_priority_t);
+vertex_hdl_t pciio_intr_dev_get(pciio_intr_t);
-devfs_handle_t pciio_pio_dev_get(pciio_piomap_t);
+vertex_hdl_t pciio_pio_dev_get(pciio_piomap_t);
pciio_slot_t pciio_pio_slot_get(pciio_piomap_t);
pciio_space_t pciio_pio_space_get(pciio_piomap_t);
iopaddr_t pciio_pio_pciaddr_get(pciio_piomap_t);
ulong pciio_pio_mapsz_get(pciio_piomap_t);
caddr_t pciio_pio_kvaddr_get(pciio_piomap_t);
-devfs_handle_t pciio_dma_dev_get(pciio_dmamap_t);
+vertex_hdl_t pciio_dma_dev_get(pciio_dmamap_t);
pciio_slot_t pciio_dma_slot_get(pciio_dmamap_t);
-pciio_info_t pciio_info_chk(devfs_handle_t);
-pciio_info_t pciio_info_get(devfs_handle_t);
-void pciio_info_set(devfs_handle_t, pciio_info_t);
-devfs_handle_t pciio_info_dev_get(pciio_info_t);
+pciio_info_t pciio_info_chk(vertex_hdl_t);
+pciio_info_t pciio_info_get(vertex_hdl_t);
+void pciio_info_set(vertex_hdl_t, pciio_info_t);
+vertex_hdl_t pciio_info_dev_get(pciio_info_t);
pciio_slot_t pciio_info_slot_get(pciio_info_t);
pciio_function_t pciio_info_function_get(pciio_info_t);
pciio_vendor_id_t pciio_info_vendor_id_get(pciio_info_t);
pciio_device_id_t pciio_info_device_id_get(pciio_info_t);
-devfs_handle_t pciio_info_master_get(pciio_info_t);
+vertex_hdl_t pciio_info_master_get(pciio_info_t);
arbitrary_info_t pciio_info_mfast_get(pciio_info_t);
pciio_provider_t *pciio_info_pops_get(pciio_info_t);
error_handler_f *pciio_info_efunc_get(pciio_info_t);
@@ -223,30 +212,28 @@ size_t pciio_info_bar_size_get(pciio_info_t, int);
iopaddr_t pciio_info_rom_base_get(pciio_info_t);
size_t pciio_info_rom_size_get(pciio_info_t);
-void pciio_init(void);
-int pciio_attach(devfs_handle_t);
+int pciio_attach(vertex_hdl_t);
-void pciio_provider_register(devfs_handle_t, pciio_provider_t *pciio_fns);
-void pciio_provider_unregister(devfs_handle_t);
-pciio_provider_t *pciio_provider_fns_get(devfs_handle_t);
+void pciio_provider_register(vertex_hdl_t, pciio_provider_t *pciio_fns);
+void pciio_provider_unregister(vertex_hdl_t);
+pciio_provider_t *pciio_provider_fns_get(vertex_hdl_t);
int pciio_driver_register(pciio_vendor_id_t, pciio_device_id_t, char *driver_prefix, unsigned);
-void pciio_driver_unregister(char *driver_prefix);
-devfs_handle_t pciio_device_register(devfs_handle_t, devfs_handle_t, pciio_slot_t, pciio_function_t, pciio_vendor_id_t, pciio_device_id_t);
+vertex_hdl_t pciio_device_register(vertex_hdl_t, vertex_hdl_t, pciio_slot_t, pciio_function_t, pciio_vendor_id_t, pciio_device_id_t);
-void pciio_device_unregister(devfs_handle_t);
-pciio_info_t pciio_device_info_new(pciio_info_t, devfs_handle_t, pciio_slot_t, pciio_function_t, pciio_vendor_id_t, pciio_device_id_t);
+void pciio_device_unregister(vertex_hdl_t);
+pciio_info_t pciio_device_info_new(pciio_info_t, vertex_hdl_t, pciio_slot_t, pciio_function_t, pciio_vendor_id_t, pciio_device_id_t);
void pciio_device_info_free(pciio_info_t);
-devfs_handle_t pciio_device_info_register(devfs_handle_t, pciio_info_t);
-void pciio_device_info_unregister(devfs_handle_t, pciio_info_t);
-int pciio_device_attach(devfs_handle_t, int);
-int pciio_device_detach(devfs_handle_t, int);
-void pciio_error_register(devfs_handle_t, error_handler_f *, error_handler_arg_t);
+vertex_hdl_t pciio_device_info_register(vertex_hdl_t, pciio_info_t);
+void pciio_device_info_unregister(vertex_hdl_t, pciio_info_t);
+int pciio_device_attach(vertex_hdl_t, int);
+int pciio_device_detach(vertex_hdl_t, int);
+void pciio_error_register(vertex_hdl_t, error_handler_f *, error_handler_arg_t);
-int pciio_reset(devfs_handle_t);
-int pciio_write_gather_flush(devfs_handle_t);
-int pciio_slot_inuse(devfs_handle_t);
+int pciio_reset(vertex_hdl_t);
+int pciio_write_gather_flush(vertex_hdl_t);
+int pciio_slot_inuse(vertex_hdl_t);
/* =====================================================================
* Provider Function Location
@@ -261,7 +248,7 @@ int pciio_slot_inuse(devfs_handle_t);
#if !defined(DEV_FUNC)
static pciio_provider_t *
-pciio_to_provider_fns(devfs_handle_t dev)
+pciio_to_provider_fns(vertex_hdl_t dev)
{
pciio_info_t card_info;
pciio_provider_t *provider_fns;
@@ -316,7 +303,7 @@ pciio_to_provider_fns(devfs_handle_t dev)
*/
pciio_piomap_t
-pciio_piomap_alloc(devfs_handle_t dev, /* set up mapping for this device */
+pciio_piomap_alloc(vertex_hdl_t dev, /* set up mapping for this device */
device_desc_t dev_desc, /* device descriptor */
pciio_space_t space, /* CFG, MEM, IO, or a device-decoded window */
iopaddr_t addr, /* lowest address (or offset in window) */
@@ -354,7 +341,7 @@ pciio_piomap_done(pciio_piomap_t pciio_piomap)
}
caddr_t
-pciio_piotrans_addr(devfs_handle_t dev, /* translate for this device */
+pciio_piotrans_addr(vertex_hdl_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
pciio_space_t space, /* CFG, MEM, IO, or a device-decoded window */
iopaddr_t addr, /* starting address (or offset in window) */
@@ -366,7 +353,7 @@ pciio_piotrans_addr(devfs_handle_t dev, /* translate for this device */
}
caddr_t
-pciio_pio_addr(devfs_handle_t dev, /* translate for this device */
+pciio_pio_addr(vertex_hdl_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
pciio_space_t space, /* CFG, MEM, IO, or a device-decoded window */
iopaddr_t addr, /* starting address (or offset in window) */
@@ -410,7 +397,7 @@ pciio_pio_addr(devfs_handle_t dev, /* translate for this device */
}
iopaddr_t
-pciio_piospace_alloc(devfs_handle_t dev, /* Device requiring space */
+pciio_piospace_alloc(vertex_hdl_t dev, /* Device requiring space */
device_desc_t dev_desc, /* Device descriptor */
pciio_space_t space, /* MEM32/MEM64/IO */
size_t byte_count, /* Size of mapping */
@@ -423,7 +410,7 @@ pciio_piospace_alloc(devfs_handle_t dev, /* Device requiring space */
}
void
-pciio_piospace_free(devfs_handle_t dev, /* Device freeing space */
+pciio_piospace_free(vertex_hdl_t dev, /* Device freeing space */
pciio_space_t space, /* Type of space */
iopaddr_t pciaddr, /* starting address */
size_t byte_count)
@@ -440,7 +427,7 @@ pciio_piospace_free(devfs_handle_t dev, /* Device freeing space */
*/
pciio_dmamap_t
-pciio_dmamap_alloc(devfs_handle_t dev, /* set up mappings for this device */
+pciio_dmamap_alloc(vertex_hdl_t dev, /* set up mappings for this device */
device_desc_t dev_desc, /* device descriptor */
size_t byte_count_max, /* max size of a mapping */
unsigned flags)
@@ -465,15 +452,6 @@ pciio_dmamap_addr(pciio_dmamap_t pciio_dmamap, /* use these mapping resources */
(CAST_DMAMAP(pciio_dmamap), paddr, byte_count);
}
-alenlist_t
-pciio_dmamap_list(pciio_dmamap_t pciio_dmamap, /* use these mapping resources */
- alenlist_t alenlist, /* map this Address/Length List */
- unsigned flags)
-{
- return DMAMAP_FUNC(pciio_dmamap, dmamap_list)
- (CAST_DMAMAP(pciio_dmamap), alenlist, flags);
-}
-
void
pciio_dmamap_done(pciio_dmamap_t pciio_dmamap)
{
@@ -482,7 +460,7 @@ pciio_dmamap_done(pciio_dmamap_t pciio_dmamap)
}
iopaddr_t
-pciio_dmatrans_addr(devfs_handle_t dev, /* translate for this device */
+pciio_dmatrans_addr(vertex_hdl_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
paddr_t paddr, /* system physical address */
size_t byte_count, /* length */
@@ -492,18 +470,8 @@ pciio_dmatrans_addr(devfs_handle_t dev, /* translate for this device */
(dev, dev_desc, paddr, byte_count, flags);
}
-alenlist_t
-pciio_dmatrans_list(devfs_handle_t dev, /* translate for this device */
- device_desc_t dev_desc, /* device descriptor */
- alenlist_t palenlist, /* system address/length list */
- unsigned flags)
-{ /* defined in dma.h */
- return DEV_FUNC(dev, dmatrans_list)
- (dev, dev_desc, palenlist, flags);
-}
-
iopaddr_t
-pciio_dma_addr(devfs_handle_t dev, /* translate for this device */
+pciio_dma_addr(vertex_hdl_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
paddr_t paddr, /* system physical address */
size_t byte_count, /* length */
@@ -553,14 +521,14 @@ pciio_dmamap_drain(pciio_dmamap_t map)
}
void
-pciio_dmaaddr_drain(devfs_handle_t dev, paddr_t addr, size_t size)
+pciio_dmaaddr_drain(vertex_hdl_t dev, paddr_t addr, size_t size)
{
DEV_FUNC(dev, dmaaddr_drain)
(dev, addr, size);
}
void
-pciio_dmalist_drain(devfs_handle_t dev, alenlist_t list)
+pciio_dmalist_drain(vertex_hdl_t dev, alenlist_t list)
{
DEV_FUNC(dev, dmalist_drain)
(dev, list);
@@ -577,10 +545,10 @@ pciio_dmalist_drain(devfs_handle_t dev, alenlist_t list)
* Return resource handle in intr_hdl.
*/
pciio_intr_t
-pciio_intr_alloc(devfs_handle_t dev, /* which Crosstalk device */
+pciio_intr_alloc(vertex_hdl_t dev, /* which Crosstalk device */
device_desc_t dev_desc, /* device descriptor */
pciio_intr_line_t lines, /* INTR line(s) to attach */
- devfs_handle_t owner_dev)
+ vertex_hdl_t owner_dev)
{ /* owner of this interrupt */
return (pciio_intr_t) DEV_FUNC(dev, intr_alloc)
(dev, dev_desc, lines, owner_dev);
@@ -624,7 +592,7 @@ pciio_intr_disconnect(pciio_intr_t intr_hdl)
* Return a hwgraph vertex that represents the CPU currently
* targeted by an interrupt.
*/
-devfs_handle_t
+vertex_hdl_t
pciio_intr_cpu_get(pciio_intr_t intr_hdl)
{
return INTR_FUNC(intr_hdl, intr_cpu_get)
@@ -663,12 +631,12 @@ pciio_slot_func_to_name(char *name,
*/
static pciio_info_t
pciio_cardinfo_get(
- devfs_handle_t pciio_vhdl,
+ vertex_hdl_t pciio_vhdl,
pciio_slot_t pci_slot)
{
char namebuf[16];
pciio_info_t info = 0;
- devfs_handle_t conn;
+ vertex_hdl_t conn;
pciio_slot_func_to_name(namebuf, pci_slot, PCIIO_FUNC_NONE);
if (GRAPH_SUCCESS ==
@@ -699,22 +667,16 @@ pciio_cardinfo_get(
/*ARGSUSED */
int
pciio_error_handler(
- devfs_handle_t pciio_vhdl,
+ vertex_hdl_t pciio_vhdl,
int error_code,
ioerror_mode_t mode,
ioerror_t *ioerror)
{
pciio_info_t pciio_info;
- devfs_handle_t pconn_vhdl;
-#if USRPCI
- devfs_handle_t usrpci_v;
-#endif
+ vertex_hdl_t pconn_vhdl;
pciio_slot_t slot;
int retval;
-#ifdef EHE_ENABLE
- error_state_t e_state;
-#endif /* EHE_ENABLE */
#if DEBUG && ERROR_DEBUG
printk("%v: pciio_error_handler\n", pciio_vhdl);
@@ -733,16 +695,6 @@ pciio_error_handler(
if (pciio_info && pciio_info->c_efunc) {
pconn_vhdl = pciio_info_dev_get(pciio_info);
-#ifdef EHE_ENABLE
- e_state = error_state_get(pciio_vhdl);
-
- if (e_state == ERROR_STATE_ACTION)
- (void)error_state_set(pciio_vhdl, ERROR_STATE_NONE);
-
- if (error_state_set(pconn_vhdl,e_state) == ERROR_RETURN_CODE_CANNOT_SET_STATE)
- return(IOERROR_UNHANDLED);
-#endif
-
retval = pciio_info->c_efunc
(pciio_info->c_einfo, error_code, mode, ioerror);
if (retval != IOERROR_UNHANDLED)
@@ -770,49 +722,11 @@ pciio_error_handler(
pconn_vhdl = pciio_info_dev_get(pciio_info);
-#ifdef EHE_ENABLE
- e_state = error_state_get(pciio_vhdl);
-
- if (e_state == ERROR_STATE_ACTION)
- (void)error_state_set(pciio_vhdl, ERROR_STATE_NONE);
-
- if (error_state_set(pconn_vhdl,e_state) ==
- ERROR_RETURN_CODE_CANNOT_SET_STATE)
- return(IOERROR_UNHANDLED);
-#endif /* EHE_ENABLE */
-
retval = pciio_info->c_efunc
(pciio_info->c_einfo, error_code, mode, ioerror);
if (retval != IOERROR_UNHANDLED)
return retval;
}
-
-#if USRPCI
- /* If the USRPCI driver is available and
- * knows about this connection point,
- * deliver the error to it.
- *
- * OK to use pconn_vhdl here, even though we
- * have already UNREF'd it, since we know that
- * it is not going away.
- */
- pconn_vhdl = pciio_info_dev_get(pciio_info);
- if (GRAPH_SUCCESS == hwgraph_traverse(pconn_vhdl, EDGE_LBL_USRPCI, &usrpci_v)) {
- iopaddr_t busaddr;
- IOERROR_GETVALUE(busaddr, ioerror, busaddr);
- retval = usrpci_error_handler (usrpci_v, error_code, busaddr);
- hwgraph_vertex_unref(usrpci_v);
- if (retval != IOERROR_UNHANDLED) {
- /*
- * This unref is not needed. If this code is called often enough,
- * the system will crash, due to vertex reference count reaching 0,
- * causing vertex to be unallocated. -jeremy
- * hwgraph_vertex_unref(pconn_vhdl);
- */
- return retval;
- }
- }
-#endif
}
}
@@ -829,7 +743,7 @@ pciio_error_handler(
* Startup a crosstalk provider
*/
void
-pciio_provider_startup(devfs_handle_t pciio_provider)
+pciio_provider_startup(vertex_hdl_t pciio_provider)
{
DEV_FUNC(pciio_provider, provider_startup)
(pciio_provider);
@@ -839,7 +753,7 @@ pciio_provider_startup(devfs_handle_t pciio_provider)
* Shutdown a crosstalk provider
*/
void
-pciio_provider_shutdown(devfs_handle_t pciio_provider)
+pciio_provider_shutdown(vertex_hdl_t pciio_provider)
{
DEV_FUNC(pciio_provider, provider_shutdown)
(pciio_provider);
@@ -851,7 +765,7 @@ pciio_provider_shutdown(devfs_handle_t pciio_provider)
* how things will actually appear in memory.
*/
pciio_endian_t
-pciio_endian_set(devfs_handle_t dev,
+pciio_endian_set(vertex_hdl_t dev,
pciio_endian_t device_end,
pciio_endian_t desired_end)
{
@@ -880,7 +794,7 @@ pciio_endian_set(devfs_handle_t dev,
* Specify PCI arbitration priority.
*/
pciio_priority_t
-pciio_priority_set(devfs_handle_t dev,
+pciio_priority_set(vertex_hdl_t dev,
pciio_priority_t device_prio)
{
ASSERT((device_prio == PCI_PRIO_HIGH) || (device_prio == PCI_PRIO_LOW));
@@ -893,7 +807,7 @@ pciio_priority_set(devfs_handle_t dev,
* Read value of configuration register
*/
uint64_t
-pciio_config_get(devfs_handle_t dev,
+pciio_config_get(vertex_hdl_t dev,
unsigned reg,
unsigned size)
{
@@ -923,7 +837,7 @@ pciio_config_get(devfs_handle_t dev,
* Change value of configuration register
*/
void
-pciio_config_set(devfs_handle_t dev,
+pciio_config_set(vertex_hdl_t dev,
unsigned reg,
unsigned size,
uint64_t value)
@@ -953,7 +867,7 @@ pciio_config_set(devfs_handle_t dev,
* Issue a hardware reset to a card.
*/
int
-pciio_reset(devfs_handle_t dev)
+pciio_reset(vertex_hdl_t dev)
{
return DEV_FUNC(dev, reset) (dev);
}
@@ -962,19 +876,19 @@ pciio_reset(devfs_handle_t dev)
* flush write gather buffers
*/
int
-pciio_write_gather_flush(devfs_handle_t dev)
+pciio_write_gather_flush(vertex_hdl_t dev)
{
return DEV_FUNC(dev, write_gather_flush) (dev);
}
-devfs_handle_t
+vertex_hdl_t
pciio_intr_dev_get(pciio_intr_t pciio_intr)
{
return (pciio_intr->pi_dev);
}
/****** Generic crosstalk pio interfaces ******/
-devfs_handle_t
+vertex_hdl_t
pciio_pio_dev_get(pciio_piomap_t pciio_piomap)
{
return (pciio_piomap->pp_dev);
@@ -1011,7 +925,7 @@ pciio_pio_kvaddr_get(pciio_piomap_t pciio_piomap)
}
/****** Generic crosstalk dma interfaces ******/
-devfs_handle_t
+vertex_hdl_t
pciio_dma_dev_get(pciio_dmamap_t pciio_dmamap)
{
return (pciio_dmamap->pd_dev);
@@ -1026,7 +940,7 @@ pciio_dma_slot_get(pciio_dmamap_t pciio_dmamap)
/****** Generic pci slot information interfaces ******/
pciio_info_t
-pciio_info_chk(devfs_handle_t pciio)
+pciio_info_chk(vertex_hdl_t pciio)
{
arbitrary_info_t ainfo = 0;
@@ -1035,7 +949,7 @@ pciio_info_chk(devfs_handle_t pciio)
}
pciio_info_t
-pciio_info_get(devfs_handle_t pciio)
+pciio_info_get(vertex_hdl_t pciio)
{
pciio_info_t pciio_info;
@@ -1051,18 +965,17 @@ pciio_info_get(devfs_handle_t pciio)
#endif /* DEBUG_PCIIO */
if ((pciio_info != NULL) &&
- (pciio_info->c_fingerprint != pciio_info_fingerprint)
- && (pciio_info->c_fingerprint != NULL)) {
+ (pciio_info->c_fingerprint != pciio_info_fingerprint)
+ && (pciio_info->c_fingerprint != NULL)) {
- return((pciio_info_t)-1); /* Should panic .. */
+ return((pciio_info_t)-1); /* Should panic .. */
}
-
return pciio_info;
}
void
-pciio_info_set(devfs_handle_t pciio, pciio_info_t pciio_info)
+pciio_info_set(vertex_hdl_t pciio, pciio_info_t pciio_info)
{
if (pciio_info != NULL)
pciio_info->c_fingerprint = pciio_info_fingerprint;
@@ -1076,7 +989,7 @@ pciio_info_set(devfs_handle_t pciio, pciio_info_t pciio_info)
(arbitrary_info_t) pciio_info);
}
-devfs_handle_t
+vertex_hdl_t
pciio_info_dev_get(pciio_info_t pciio_info)
{
return (pciio_info->c_vertex);
@@ -1106,7 +1019,7 @@ pciio_info_device_id_get(pciio_info_t pciio_info)
return (pciio_info->c_device);
}
-devfs_handle_t
+vertex_hdl_t
pciio_info_master_get(pciio_info_t pciio_info)
{
return (pciio_info->c_master);
@@ -1172,47 +1085,12 @@ pciio_info_rom_size_get(pciio_info_t info)
*/
/*
- * pciioinit: called once during device driver
- * initializtion if this driver is configured into
- * the system.
- */
-void
-pciio_init(void)
-{
- cdl_p cp;
-
-#if DEBUG && ATTACH_DEBUG
- printf("pciio_init\n");
-#endif
- /* Allocate the registry.
- * We might already have one.
- * If we don't, go get one.
- * MPness: someone might have
- * set one up for us while we
- * were not looking; use an atomic
- * compare-and-swap to commit to
- * using the new registry if and
- * only if nobody else did first.
- * If someone did get there first,
- * toss the one we allocated back
- * into the pool.
- */
- if (pciio_registry == NULL) {
- cp = cdl_new(EDGE_LBL_PCI, "vendor", "device");
- if (!compare_and_swap_ptr((void **) &pciio_registry, NULL, (void *) cp)) {
- cdl_del(cp);
- }
- }
- ASSERT(pciio_registry != NULL);
-}
-
-/*
* pciioattach: called for each vertex in the graph
* that is a PCI provider.
*/
/*ARGSUSED */
int
-pciio_attach(devfs_handle_t pciio)
+pciio_attach(vertex_hdl_t pciio)
{
#if DEBUG && ATTACH_DEBUG
#if defined(SUPPORT_PRINTING_V_FORMAT)
@@ -1228,7 +1106,7 @@ pciio_attach(devfs_handle_t pciio)
* Associate a set of pciio_provider functions with a vertex.
*/
void
-pciio_provider_register(devfs_handle_t provider, pciio_provider_t *pciio_fns)
+pciio_provider_register(vertex_hdl_t provider, pciio_provider_t *pciio_fns)
{
hwgraph_info_add_LBL(provider, INFO_LBL_PFUNCS, (arbitrary_info_t) pciio_fns);
}
@@ -1237,7 +1115,7 @@ pciio_provider_register(devfs_handle_t provider, pciio_provider_t *pciio_fns)
* Disassociate a set of pciio_provider functions with a vertex.
*/
void
-pciio_provider_unregister(devfs_handle_t provider)
+pciio_provider_unregister(vertex_hdl_t provider)
{
arbitrary_info_t ainfo;
@@ -1249,7 +1127,7 @@ pciio_provider_unregister(devfs_handle_t provider)
* provider.
*/
pciio_provider_t *
-pciio_provider_fns_get(devfs_handle_t provider)
+pciio_provider_fns_get(vertex_hdl_t provider)
{
arbitrary_info_t ainfo = 0;
@@ -1265,86 +1143,13 @@ pciio_driver_register(
char *driver_prefix,
unsigned flags)
{
- /* a driver's init routine might call
- * pciio_driver_register before the
- * system calls pciio_init; so we
- * make the init call ourselves here.
- */
- if (pciio_registry == NULL)
- pciio_init();
-
- return cdl_add_driver(pciio_registry,
- vendor_id, device_id,
- driver_prefix, flags, NULL);
-}
-
-/*
- * Remove an initialization function.
- */
-void
-pciio_driver_unregister(
- char *driver_prefix)
-{
- /* before a driver calls unregister,
- * it must have called register; so
- * we can assume we have a registry here.
- */
- ASSERT(pciio_registry != NULL);
-
- cdl_del_driver(pciio_registry, driver_prefix, NULL);
-}
-
-/*
- * Set the slot status for a device supported by the
- * driver being registered.
- */
-void
-pciio_driver_reg_callback(
- devfs_handle_t pconn_vhdl,
- int key1,
- int key2,
- int error)
-{
-}
-
-/*
- * Set the slot status for a device supported by the
- * driver being unregistered.
- */
-void
-pciio_driver_unreg_callback(
- devfs_handle_t pconn_vhdl,
- int key1,
- int key2,
- int error)
-{
-}
-
-/*
- * Call some function with each vertex that
- * might be one of this driver's attach points.
- */
-void
-pciio_iterate(char *driver_prefix,
- pciio_iter_f * func)
-{
- /* a driver's init routine might call
- * pciio_iterate before the
- * system calls pciio_init; so we
- * make the init call ourselves here.
- */
- if (pciio_registry == NULL)
- pciio_init();
-
- ASSERT(pciio_registry != NULL);
-
- cdl_iterate(pciio_registry, driver_prefix, (cdl_iter_f *) func);
+ return(0);
}
-devfs_handle_t
+vertex_hdl_t
pciio_device_register(
- devfs_handle_t connectpt, /* vertex for /hw/.../pciio/%d */
- devfs_handle_t master, /* card's master ASIC (PCI provider) */
+ vertex_hdl_t connectpt, /* vertex for /hw/.../pciio/%d */
+ vertex_hdl_t master, /* card's master ASIC (PCI provider) */
pciio_slot_t slot, /* card's slot */
pciio_function_t func, /* card's func */
pciio_vendor_id_t vendor_id,
@@ -1356,7 +1161,7 @@ pciio_device_register(
}
void
-pciio_device_unregister(devfs_handle_t pconn)
+pciio_device_unregister(vertex_hdl_t pconn)
{
DEV_FUNC(pconn,device_unregister)(pconn);
}
@@ -1364,14 +1169,14 @@ pciio_device_unregister(devfs_handle_t pconn)
pciio_info_t
pciio_device_info_new(
pciio_info_t pciio_info,
- devfs_handle_t master,
+ vertex_hdl_t master,
pciio_slot_t slot,
pciio_function_t func,
pciio_vendor_id_t vendor_id,
pciio_device_id_t device_id)
{
if (!pciio_info)
- GET_NEW(pciio_info);
+ NEW(pciio_info);
ASSERT(pciio_info != NULL);
pciio_info->c_slot = slot;
@@ -1396,14 +1201,14 @@ pciio_device_info_free(pciio_info_t pciio_info)
BZERO((char *)pciio_info,sizeof(pciio_info));
}
-devfs_handle_t
+vertex_hdl_t
pciio_device_info_register(
- devfs_handle_t connectpt, /* vertex at center of bus */
+ vertex_hdl_t connectpt, /* vertex at center of bus */
pciio_info_t pciio_info) /* details about the connectpt */
{
char name[32];
- devfs_handle_t pconn;
- int device_master_set(devfs_handle_t, devfs_handle_t);
+ vertex_hdl_t pconn;
+ int device_master_set(vertex_hdl_t, vertex_hdl_t);
pciio_slot_func_to_name(name,
pciio_info->c_slot,
@@ -1429,25 +1234,15 @@ pciio_device_info_register(
*/
device_master_set(pconn, pciio_info->c_master);
-
-#if USRPCI
- /*
- * Call into usrpci provider to let it initialize for
- * the given slot.
- */
- if (pciio_info->c_slot != PCIIO_SLOT_NONE)
- usrpci_device_register(pconn, pciio_info->c_master, pciio_info->c_slot);
-#endif
-
return pconn;
}
void
-pciio_device_info_unregister(devfs_handle_t connectpt,
+pciio_device_info_unregister(vertex_hdl_t connectpt,
pciio_info_t pciio_info)
{
char name[32];
- devfs_handle_t pconn;
+ vertex_hdl_t pconn;
if (!pciio_info)
return;
@@ -1470,7 +1265,7 @@ pciio_device_info_unregister(devfs_handle_t connectpt,
/* Add the pci card inventory information to the hwgraph
*/
static void
-pciio_device_inventory_add(devfs_handle_t pconn_vhdl)
+pciio_device_inventory_add(vertex_hdl_t pconn_vhdl)
{
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
@@ -1488,7 +1283,7 @@ pciio_device_inventory_add(devfs_handle_t pconn_vhdl)
/*ARGSUSED */
int
-pciio_device_attach(devfs_handle_t pconn,
+pciio_device_attach(vertex_hdl_t pconn,
int drv_flags)
{
pciio_info_t pciio_info;
@@ -1507,34 +1302,15 @@ pciio_device_attach(devfs_handle_t pconn,
* pciio_init) have been called; so we
* can assume here that we have a registry.
*/
- ASSERT(pciio_registry != NULL);
- return(cdl_add_connpt(pciio_registry, vendor_id, device_id, pconn, drv_flags));
+ return(cdl_add_connpt(vendor_id, device_id, pconn, drv_flags));
}
int
-pciio_device_detach(devfs_handle_t pconn,
+pciio_device_detach(vertex_hdl_t pconn,
int drv_flags)
{
- pciio_info_t pciio_info;
- pciio_vendor_id_t vendor_id;
- pciio_device_id_t device_id;
-
- pciio_info = pciio_info_get(pconn);
-
- vendor_id = pciio_info->c_vendor;
- device_id = pciio_info->c_device;
-
- /* we don't start attaching things until
- * all the driver init routines (including
- * pciio_init) have been called; so we
- * can assume here that we have a registry.
- */
- ASSERT(pciio_registry != NULL);
-
- return(cdl_del_connpt(pciio_registry, vendor_id, device_id,
- pconn, drv_flags));
-
+ return(0);
}
/* SN2 */
@@ -1728,7 +1504,7 @@ pciio_device_win_free(pciio_win_alloc_t win_alloc)
* cooperating drivers, well, cooperate ...
*/
void
-pciio_error_register(devfs_handle_t pconn,
+pciio_error_register(vertex_hdl_t pconn,
error_handler_f *efunc,
error_handler_arg_t einfo)
{
@@ -1746,7 +1522,7 @@ pciio_error_register(devfs_handle_t pconn,
* vhdl is the vertex for the slot
*/
int
-pciio_slot_inuse(devfs_handle_t pconn_vhdl)
+pciio_slot_inuse(vertex_hdl_t pconn_vhdl)
{
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
@@ -1763,7 +1539,7 @@ pciio_slot_inuse(devfs_handle_t pconn_vhdl)
}
int
-pciio_dma_enabled(devfs_handle_t pconn_vhdl)
+pciio_dma_enabled(vertex_hdl_t pconn_vhdl)
{
return DEV_FUNC(pconn_vhdl, dma_enabled)(pconn_vhdl);
}
@@ -1777,7 +1553,7 @@ pciio_info_type1_get(pciio_info_t pci_info)
/*
* These are complementary Linux interfaces that takes in a pci_dev * as the
- * first arguement instead of devfs_handle_t.
+ * first arguement instead of vertex_hdl_t.
*/
iopaddr_t snia_pciio_dmatrans_addr(struct pci_dev *, device_desc_t, paddr_t, size_t, unsigned);
pciio_dmamap_t snia_pciio_dmamap_alloc(struct pci_dev *, device_desc_t, size_t, unsigned);
@@ -1800,7 +1576,7 @@ snia_pcibr_rrb_alloc(struct pci_dev *pci_dev,
int *count_vchan0,
int *count_vchan1)
{
- devfs_handle_t dev = PCIDEV_VERTEX(pci_dev);
+ vertex_hdl_t dev = PCIDEV_VERTEX(pci_dev);
return pcibr_rrb_alloc(dev, count_vchan0, count_vchan1);
}
@@ -1811,7 +1587,7 @@ snia_pciio_endian_set(struct pci_dev *pci_dev,
pciio_endian_t device_end,
pciio_endian_t desired_end)
{
- devfs_handle_t dev = PCIDEV_VERTEX(pci_dev);
+ vertex_hdl_t dev = PCIDEV_VERTEX(pci_dev);
return DEV_FUNC(dev, endian_set)
(dev, device_end, desired_end);
@@ -1825,7 +1601,7 @@ snia_pciio_dmatrans_addr(struct pci_dev *pci_dev, /* translate for this device *
unsigned flags)
{ /* defined in dma.h */
- devfs_handle_t dev = PCIDEV_VERTEX(pci_dev);
+ vertex_hdl_t dev = PCIDEV_VERTEX(pci_dev);
/*
* If the device is not a PIC, we always want the PCIIO_BYTE_STREAM to be
@@ -1842,7 +1618,7 @@ snia_pciio_dmamap_alloc(struct pci_dev *pci_dev, /* set up mappings for this de
unsigned flags)
{ /* defined in dma.h */
- devfs_handle_t dev = PCIDEV_VERTEX(pci_dev);
+ vertex_hdl_t dev = PCIDEV_VERTEX(pci_dev);
/*
* If the device is not a PIC, we always want the PCIIO_BYTE_STREAM to be
diff --git a/arch/ia64/sn/io/sn2/pic.c b/arch/ia64/sn/io/sn2/pic.c
index 4dba132ca4caa6..ffa174762e74b1 100644
--- a/arch/ia64/sn/io/sn2/pic.c
+++ b/arch/ia64/sn/io/sn2/pic.c
@@ -27,7 +27,6 @@
#include <asm/sn/prio.h>
#include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
-#include <asm/sn/eeprom.h>
#include <asm/sn/io.h>
#include <asm/sn/sn_private.h>
@@ -38,27 +37,16 @@ extern char *bcopy(const char * src, char * dest, int count);
int pic_devflag = D_MP;
-extern int pcibr_attach2(devfs_handle_t, bridge_t *, devfs_handle_t, int, pcibr_soft_t *);
-extern void pcibr_driver_reg_callback(devfs_handle_t, int, int, int);
-extern void pcibr_driver_unreg_callback(devfs_handle_t, int, int, int);
+extern int pcibr_attach2(vertex_hdl_t, bridge_t *, vertex_hdl_t, int, pcibr_soft_t *);
+extern void pcibr_driver_reg_callback(vertex_hdl_t, int, int, int);
+extern void pcibr_driver_unreg_callback(vertex_hdl_t, int, int, int);
-void
-pic_init(void)
-{
- PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INIT, NULL, "pic_init()\n"));
-
- xwidget_driver_register(PIC_WIDGET_PART_NUM_BUS0,
- PIC_WIDGET_MFGR_NUM,
- "pic_",
- 0);
-}
-
/*
* copy inventory_t from conn_v to peer_conn_v
*/
int
-pic_bus1_inventory_dup(devfs_handle_t conn_v, devfs_handle_t peer_conn_v)
+pic_bus1_inventory_dup(vertex_hdl_t conn_v, vertex_hdl_t peer_conn_v)
{
inventory_t *pinv, *peer_pinv;
@@ -66,7 +54,7 @@ pic_bus1_inventory_dup(devfs_handle_t conn_v, devfs_handle_t peer_conn_v)
(arbitrary_info_t *)&pinv) == GRAPH_SUCCESS)
{
NEW(peer_pinv);
- bcopy(pinv, peer_pinv, sizeof(inventory_t));
+ bcopy((const char *)pinv, (char *)peer_pinv, sizeof(inventory_t));
if (hwgraph_info_add_LBL(peer_conn_v, INFO_LBL_INVENT,
(arbitrary_info_t)peer_pinv) != GRAPH_SUCCESS) {
DEL(peer_pinv);
@@ -75,8 +63,7 @@ pic_bus1_inventory_dup(devfs_handle_t conn_v, devfs_handle_t peer_conn_v)
return 1;
}
- printk("pic_bus1_inventory_dup: cannot get INFO_LBL_INVENT from 0x%lx\n ",
- conn_v);
+ printk("pic_bus1_inventory_dup: cannot get INFO_LBL_INVENT from 0x%lx\n ", (uint64_t)conn_v);
return 0;
}
@@ -84,13 +71,12 @@ pic_bus1_inventory_dup(devfs_handle_t conn_v, devfs_handle_t peer_conn_v)
* copy xwidget_info_t from conn_v to peer_conn_v
*/
int
-pic_bus1_widget_info_dup(devfs_handle_t conn_v, devfs_handle_t peer_conn_v,
+pic_bus1_widget_info_dup(vertex_hdl_t conn_v, vertex_hdl_t peer_conn_v,
cnodeid_t xbow_peer)
{
xwidget_info_t widget_info, peer_widget_info;
char peer_path[256];
- char *p;
- devfs_handle_t peer_hubv;
+ vertex_hdl_t peer_hubv;
hubinfo_t peer_hub_info;
/* get the peer hub's widgetid */
@@ -126,7 +112,7 @@ pic_bus1_widget_info_dup(devfs_handle_t conn_v, devfs_handle_t peer_conn_v,
}
printk("pic_bus1_widget_info_dup: "
- "cannot get INFO_LBL_XWIDGET from 0x%lx\n", conn_v);
+ "cannot get INFO_LBL_XWIDGET from 0x%lx\n", (uint64_t)conn_v);
return 0;
}
@@ -138,15 +124,15 @@ pic_bus1_widget_info_dup(devfs_handle_t conn_v, devfs_handle_t peer_conn_v,
* If not successful, return zero and both buses will attach to the
* vertex passed into pic_attach().
*/
-devfs_handle_t
-pic_bus1_redist(nasid_t nasid, devfs_handle_t conn_v)
+vertex_hdl_t
+pic_bus1_redist(nasid_t nasid, vertex_hdl_t conn_v)
{
cnodeid_t cnode = NASID_TO_COMPACT_NODEID(nasid);
cnodeid_t xbow_peer = -1;
char pathname[256], peer_path[256], tmpbuf[256];
char *p;
int rc;
- devfs_handle_t peer_conn_v;
+ vertex_hdl_t peer_conn_v;
int pos;
slabid_t slab;
@@ -155,7 +141,7 @@ pic_bus1_redist(nasid_t nasid, devfs_handle_t conn_v)
/* pcibr widget hw/module/001c11/slab/0/Pbrick/xtalk/12 */
/* sprintf(pathname, "%v", conn_v); */
xbow_peer = NASID_TO_COMPACT_NODEID(NODEPDA(cnode)->xbow_peer);
- pos = devfs_generate_path(conn_v, tmpbuf, 256);
+ pos = hwgfs_generate_path(conn_v, tmpbuf, 256);
strcpy(pathname, &tmpbuf[pos]);
p = pathname + strlen("hw/module/001c01/slab/0/");
@@ -170,7 +156,7 @@ pic_bus1_redist(nasid_t nasid, devfs_handle_t conn_v)
rc = hwgraph_traverse(hwgraph_root, peer_path, &peer_conn_v);
if (GRAPH_SUCCESS == rc)
printk("pic_attach: found unexpected vertex: 0x%lx\n",
- peer_conn_v);
+ (uint64_t)peer_conn_v);
else if (GRAPH_NOT_FOUND != rc) {
printk("pic_attach: hwgraph_traverse unexpectedly"
" returned 0x%x\n", rc);
@@ -208,13 +194,13 @@ pic_bus1_redist(nasid_t nasid, devfs_handle_t conn_v)
int
-pic_attach(devfs_handle_t conn_v)
+pic_attach(vertex_hdl_t conn_v)
{
int rc;
bridge_t *bridge0, *bridge1 = (bridge_t *)0;
- devfs_handle_t pcibr_vhdl0, pcibr_vhdl1 = (devfs_handle_t)0;
+ vertex_hdl_t pcibr_vhdl0, pcibr_vhdl1 = (vertex_hdl_t)0;
pcibr_soft_t bus0_soft, bus1_soft = (pcibr_soft_t)0;
- devfs_handle_t conn_v0, conn_v1, peer_conn_v;
+ vertex_hdl_t conn_v0, conn_v1, peer_conn_v;
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, conn_v, "pic_attach()\n"));
@@ -229,11 +215,11 @@ pic_attach(devfs_handle_t conn_v)
conn_v0 = conn_v1 = conn_v;
/* If dual-ported then split the two PIC buses across both Cbricks */
- if (peer_conn_v = pic_bus1_redist(NASID_GET(bridge0), conn_v))
+ if ((peer_conn_v = (pic_bus1_redist(NASID_GET(bridge0), conn_v))))
conn_v1 = peer_conn_v;
/*
- * Create the vertex for the PCI buses, which week
+ * Create the vertex for the PCI buses, which we
* will also use to hold the pcibr_soft and
* which will be the "master" vertex for all the
* pciio connection points we will hang off it.
@@ -266,7 +252,6 @@ pic_attach(devfs_handle_t conn_v)
/* save a pointer to the PIC's other bus's soft struct */
bus0_soft->bs_peers_soft = bus1_soft;
bus1_soft->bs_peers_soft = bus0_soft;
- bus0_soft->bs_peers_soft = (pcibr_soft_t)0;
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, conn_v,
"pic_attach: bus0_soft=0x%x, bus1_soft=0x%x\n",
@@ -294,10 +279,8 @@ pciio_provider_t pci_pic_provider =
(pciio_dmamap_alloc_f *) pcibr_dmamap_alloc,
(pciio_dmamap_free_f *) pcibr_dmamap_free,
(pciio_dmamap_addr_f *) pcibr_dmamap_addr,
- (pciio_dmamap_list_f *) pcibr_dmamap_list,
(pciio_dmamap_done_f *) pcibr_dmamap_done,
(pciio_dmatrans_addr_f *) pcibr_dmatrans_addr,
- (pciio_dmatrans_list_f *) pcibr_dmatrans_list,
(pciio_dmamap_drain_f *) pcibr_dmamap_drain,
(pciio_dmaaddr_drain_f *) pcibr_dmaaddr_drain,
(pciio_dmalist_drain_f *) pcibr_dmalist_drain,
diff --git a/arch/ia64/sn/io/sn2/sgi_io_init.c b/arch/ia64/sn/io/sn2/sgi_io_init.c
deleted file mode 100644
index ed1417ed456747..00000000000000
--- a/arch/ia64/sn/io/sn2/sgi_io_init.c
+++ /dev/null
@@ -1,226 +0,0 @@
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/types.h>
-#include <linux/config.h>
-#include <linux/slab.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/io.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/klconfig.h>
-#include <asm/sn/sn_private.h>
-#include <asm/sn/pci/pciba.h>
-#include <linux/smp.h>
-
-extern void mlreset(void);
-extern int init_hcl(void);
-extern void klgraph_hack_init(void);
-extern void hubspc_init(void);
-extern void pciio_init(void);
-extern void pcibr_init(void);
-extern void xtalk_init(void);
-extern void xbow_init(void);
-extern void xbmon_init(void);
-extern void pciiox_init(void);
-extern void pic_init(void);
-extern void usrpci_init(void);
-extern void ioc3_init(void);
-extern void initialize_io(void);
-extern void klhwg_add_all_modules(devfs_handle_t);
-extern void klhwg_add_all_nodes(devfs_handle_t);
-
-void sn_mp_setup(void);
-extern devfs_handle_t hwgraph_root;
-extern void io_module_init(void);
-extern void pci_bus_cvlink_init(void);
-extern void temp_hack(void);
-
-extern int pci_bus_to_hcl_cvlink(void);
-
-/* #define DEBUG_IO_INIT 1 */
-#ifdef DEBUG_IO_INIT
-#define DBG(x...) printk(x)
-#else
-#define DBG(x...)
-#endif /* DEBUG_IO_INIT */
-
-/*
- * per_hub_init
- *
- * This code is executed once for each Hub chip.
- */
-static void
-per_hub_init(cnodeid_t cnode)
-{
- nasid_t nasid;
- nodepda_t *npdap;
- ii_icmr_u_t ii_icmr;
- ii_ibcr_u_t ii_ibcr;
-
- nasid = COMPACT_TO_NASID_NODEID(cnode);
-
- ASSERT(nasid != INVALID_NASID);
- ASSERT(NASID_TO_COMPACT_NODEID(nasid) == cnode);
-
- npdap = NODEPDA(cnode);
-
- REMOTE_HUB_S(nasid, IIO_IWEIM, 0x8000);
-
- /*
- * Set the total number of CRBs that can be used.
- */
- ii_icmr.ii_icmr_regval= 0x0;
- ii_icmr.ii_icmr_fld_s.i_c_cnt = 0xf;
- REMOTE_HUB_S(nasid, IIO_ICMR, ii_icmr.ii_icmr_regval);
-
- /*
- * Set the number of CRBs that both of the BTEs combined
- * can use minus 1.
- */
- ii_ibcr.ii_ibcr_regval= 0x0;
- ii_ibcr.ii_ibcr_fld_s.i_count = 0x8;
- REMOTE_HUB_S(nasid, IIO_IBCR, ii_ibcr.ii_ibcr_regval);
-
- /*
- * Set CRB timeout to be 10ms.
- */
-#ifdef BRINGUP2
- REMOTE_HUB_S(nasid, IIO_ICTP, 0xffffff );
- REMOTE_HUB_S(nasid, IIO_ICTO, 0xff);
- //REMOTE_HUB_S(nasid, IIO_IWI, 0x00FF00FF00FFFFFF);
-#endif
-
- /* Initialize error interrupts for this hub. */
- hub_error_init(cnode);
-}
-
-/*
- * This routine is responsible for the setup of all the IRIX hwgraph style
- * stuff that's been pulled into linux. It's called by sn_pci_find_bios which
- * is called just before the generic Linux PCI layer does its probing (by
- * platform_pci_fixup aka sn_pci_fixup).
- *
- * It is very IMPORTANT that this call is only made by the Master CPU!
- *
- */
-
-void
-sgi_master_io_infr_init(void)
-{
- int cnode;
- extern void kdba_io_init();
-
- /*
- * Do any early init stuff .. einit_tbl[] etc.
- */
- init_hcl(); /* Sets up the hwgraph compatibility layer with devfs */
-
- /*
- * initialize the Linux PCI to xwidget vertexes ..
- */
- pci_bus_cvlink_init();
-
- kdba_io_init();
-
-#ifdef BRINGUP
- /*
- * Hack to provide statically initialzed klgraph entries.
- */
- DBG("--> sgi_master_io_infr_init: calling klgraph_hack_init()\n");
- klgraph_hack_init();
-#endif /* BRINGUP */
-
- /*
- * This is the Master CPU. Emulate mlsetup and main.c in Irix.
- */
- mlreset();
-
- /*
- * allowboot() is called by kern/os/main.c in main()
- * Emulate allowboot() ...
- * per_cpu_init() - only need per_hub_init()
- * cpu_io_setup() - Nothing to do.
- *
- */
- sn_mp_setup();
-
- for (cnode = 0; cnode < numnodes; cnode++) {
- per_hub_init(cnode);
- }
-
- /* We can do headless hub cnodes here .. */
-
- /*
- * io_init[] stuff.
- *
- * Get SGI IO Infrastructure drivers to init and register with
- * each other etc.
- */
-
- hubspc_init();
- pciio_init();
- pcibr_init();
- pic_init();
- xtalk_init();
- xbow_init();
- xbmon_init();
- pciiox_init();
- usrpci_init();
- ioc3_init();
-
- /*
- *
- * Our IO Infrastructure drivers are in place ..
- * Initialize the whole IO Infrastructure .. xwidget/device probes.
- *
- */
- initialize_io();
- pci_bus_to_hcl_cvlink();
-
-#ifdef CONFIG_PCIBA
- DBG("--> sgi_master_io_infr_init: calling pciba_init()\n");
-#ifndef BRINGUP2
- pciba_init();
-#endif
-#endif
-}
-
-/*
- * One-time setup for MP SN.
- * Allocate per-node data, slurp prom klconfig information and
- * convert it to hwgraph information.
- */
-void
-sn_mp_setup(void)
-{
- cpuid_t cpu;
-
- for (cpu = 0; cpu < NR_CPUS; cpu++) {
- /* Skip holes in CPU space */
- if (cpu_enabled(cpu)) {
- init_platform_pda(cpu);
- }
- }
-
- /*
- * Initialize platform-dependent vertices in the hwgraph:
- * module
- * node
- * cpu
- * memory
- * slot
- * hub
- * router
- * xbow
- */
-
- io_module_init(); /* Use to be called module_init() .. */
- klhwg_add_all_modules(hwgraph_root);
- klhwg_add_all_nodes(hwgraph_root);
-}
diff --git a/arch/ia64/sn/io/sn2/shub.c b/arch/ia64/sn/io/sn2/shub.c
index ee48cd1eb34e7b..9709c01cc27183 100644
--- a/arch/ia64/sn/io/sn2/shub.c
+++ b/arch/ia64/sn/io/sn2/shub.c
@@ -97,6 +97,14 @@ shub_mmr_write(cnodeid_t cnode, shubreg_t reg, uint64_t val)
}
static inline void
+shub_mmr_write_iospace(cnodeid_t cnode, shubreg_t reg, uint64_t val)
+{
+ int nasid = cnodeid_to_nasid(cnode);
+
+ REMOTE_HUB_S(nasid, reg, val);
+}
+
+static inline void
shub_mmr_write32(cnodeid_t cnode, shubreg_t reg, uint32_t val)
{
int nasid = cnodeid_to_nasid(cnode);
@@ -118,6 +126,14 @@ shub_mmr_read(cnodeid_t cnode, shubreg_t reg)
return val;
}
+static inline uint64_t
+shub_mmr_read_iospace(cnodeid_t cnode, shubreg_t reg)
+{
+ int nasid = cnodeid_to_nasid(cnode);
+
+ return REMOTE_HUB_L(nasid, reg);
+}
+
static inline uint32_t
shub_mmr_read32(cnodeid_t cnode, shubreg_t reg)
{
@@ -182,11 +198,9 @@ shubstats_ioctl(struct inode *inode, struct file *file,
{
cnodeid_t cnode;
uint64_t longarg;
- devfs_handle_t d;
+ vertex_hdl_t d;
int nasid;
- if ((d = devfs_get_handle_from_inode(inode)) == NULL)
- return -ENODEV;
cnode = (cnodeid_t)hwgraph_fastinfo_get(d);
switch (cmd) {
@@ -231,3 +245,252 @@ shubstats_ioctl(struct inode *inode, struct file *file,
struct file_operations shub_mon_fops = {
ioctl: shubstats_ioctl,
};
+
+/*
+ * "linkstatd" kernel thread to export SGI Numalink
+ * stats via /proc/sgi_sn/linkstats
+ */
+static struct s_linkstats {
+ uint64_t hs_ni_sn_errors[2];
+ uint64_t hs_ni_cb_errors[2];
+ uint64_t hs_ni_retry_errors[2];
+ int hs_ii_up;
+ uint64_t hs_ii_sn_errors;
+ uint64_t hs_ii_cb_errors;
+ uint64_t hs_ii_retry_errors;
+} *sn_linkstats;
+
+static spinlock_t sn_linkstats_lock;
+static unsigned long sn_linkstats_starttime;
+static unsigned long sn_linkstats_samples;
+static unsigned long sn_linkstats_overflows;
+static unsigned long sn_linkstats_update_msecs;
+
+void
+sn_linkstats_reset(unsigned long msecs)
+{
+ int cnode;
+ uint64_t iio_wstat;
+ uint64_t llp_csr_reg;
+
+ spin_lock(&sn_linkstats_lock);
+ memset(sn_linkstats, 0, numnodes * sizeof(struct s_linkstats));
+ for (cnode=0; cnode < numnodes; cnode++) {
+ shub_mmr_write(cnode, SH_NI0_LLP_ERR, 0L);
+ shub_mmr_write(cnode, SH_NI1_LLP_ERR, 0L);
+ shub_mmr_write_iospace(cnode, IIO_LLP_LOG, 0L);
+
+ /* zero the II retry counter */
+ iio_wstat = shub_mmr_read_iospace(cnode, IIO_WSTAT);
+ iio_wstat &= 0xffffffffff00ffff; /* bits 23:16 */
+ shub_mmr_write_iospace(cnode, IIO_WSTAT, iio_wstat);
+
+ /* Check if the II xtalk link is working */
+ llp_csr_reg = shub_mmr_read_iospace(cnode, IIO_LLP_CSR);
+ if (llp_csr_reg & IIO_LLP_CSR_IS_UP)
+ sn_linkstats[cnode].hs_ii_up = 1;
+ }
+
+ sn_linkstats_update_msecs = msecs;
+ sn_linkstats_samples = 0;
+ sn_linkstats_overflows = 0;
+ sn_linkstats_starttime = jiffies;
+ spin_unlock(&sn_linkstats_lock);
+}
+
+int
+linkstatd_thread(void *unused)
+{
+ int cnode;
+ int overflows;
+ uint64_t reg[2];
+ uint64_t iio_wstat = 0L;
+ ii_illr_u_t illr;
+ struct s_linkstats *lsp;
+ struct task_struct *tsk = current;
+
+ daemonize("linkstatd");
+ set_user_nice(tsk, 19);
+ sigfillset(&tsk->blocked);
+ strcpy(tsk->comm, "linkstatd");
+
+ while(1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(sn_linkstats_update_msecs * HZ / 1000);
+
+ spin_lock(&sn_linkstats_lock);
+
+ overflows = 0;
+ for (lsp=sn_linkstats, cnode=0; cnode < numnodes; cnode++, lsp++) {
+ reg[0] = shub_mmr_read(cnode, SH_NI0_LLP_ERR);
+ reg[1] = shub_mmr_read(cnode, SH_NI1_LLP_ERR);
+ if (lsp->hs_ii_up) {
+ illr = (ii_illr_u_t)shub_mmr_read_iospace(cnode, IIO_LLP_LOG);
+ iio_wstat = shub_mmr_read_iospace(cnode, IIO_WSTAT);
+ }
+
+ if (!overflows && (
+ (reg[0] & SH_NI0_LLP_ERR_RX_SN_ERR_COUNT_MASK) ==
+ SH_NI0_LLP_ERR_RX_SN_ERR_COUNT_MASK ||
+ (reg[0] & SH_NI0_LLP_ERR_RX_CB_ERR_COUNT_MASK) ==
+ SH_NI0_LLP_ERR_RX_CB_ERR_COUNT_MASK ||
+ (reg[1] & SH_NI1_LLP_ERR_RX_SN_ERR_COUNT_MASK) ==
+ SH_NI1_LLP_ERR_RX_SN_ERR_COUNT_MASK ||
+ (reg[1] & SH_NI1_LLP_ERR_RX_CB_ERR_COUNT_MASK) ==
+ SH_NI1_LLP_ERR_RX_CB_ERR_COUNT_MASK ||
+ (lsp->hs_ii_up && illr.ii_illr_fld_s.i_sn_cnt == IIO_LLP_SN_MAX) ||
+ (lsp->hs_ii_up && illr.ii_illr_fld_s.i_cb_cnt == IIO_LLP_CB_MAX))) {
+ overflows = 1;
+ }
+
+#define LINKSTAT_UPDATE(reg, cnt, mask, shift) cnt += (reg & mask) >> shift
+
+ LINKSTAT_UPDATE(reg[0], lsp->hs_ni_sn_errors[0],
+ SH_NI0_LLP_ERR_RX_SN_ERR_COUNT_MASK,
+ SH_NI0_LLP_ERR_RX_SN_ERR_COUNT_SHFT);
+
+ LINKSTAT_UPDATE(reg[1], lsp->hs_ni_sn_errors[1],
+ SH_NI1_LLP_ERR_RX_SN_ERR_COUNT_MASK,
+ SH_NI1_LLP_ERR_RX_SN_ERR_COUNT_SHFT);
+
+ LINKSTAT_UPDATE(reg[0], lsp->hs_ni_cb_errors[0],
+ SH_NI0_LLP_ERR_RX_CB_ERR_COUNT_MASK,
+ SH_NI0_LLP_ERR_RX_CB_ERR_COUNT_SHFT);
+
+ LINKSTAT_UPDATE(reg[1], lsp->hs_ni_cb_errors[1],
+ SH_NI1_LLP_ERR_RX_CB_ERR_COUNT_MASK,
+ SH_NI1_LLP_ERR_RX_CB_ERR_COUNT_SHFT);
+
+ LINKSTAT_UPDATE(reg[0], lsp->hs_ni_retry_errors[0],
+ SH_NI0_LLP_ERR_RETRY_COUNT_MASK,
+ SH_NI0_LLP_ERR_RETRY_COUNT_SHFT);
+
+ LINKSTAT_UPDATE(reg[1], lsp->hs_ni_retry_errors[1],
+ SH_NI1_LLP_ERR_RETRY_COUNT_MASK,
+ SH_NI1_LLP_ERR_RETRY_COUNT_SHFT);
+
+ if (lsp->hs_ii_up) {
+ /* II sn and cb errors */
+ lsp->hs_ii_sn_errors += illr.ii_illr_fld_s.i_sn_cnt;
+ lsp->hs_ii_cb_errors += illr.ii_illr_fld_s.i_cb_cnt;
+ lsp->hs_ii_retry_errors += (iio_wstat & 0x0000000000ff0000) >> 16;
+
+ shub_mmr_write(cnode, SH_NI0_LLP_ERR, 0L);
+ shub_mmr_write(cnode, SH_NI1_LLP_ERR, 0L);
+ shub_mmr_write_iospace(cnode, IIO_LLP_LOG, 0L);
+
+ /* zero the II retry counter */
+ iio_wstat = shub_mmr_read_iospace(cnode, IIO_WSTAT);
+ iio_wstat &= 0xffffffffff00ffff; /* bits 23:16 */
+ shub_mmr_write_iospace(cnode, IIO_WSTAT, iio_wstat);
+ }
+ }
+
+ sn_linkstats_samples++;
+ if (overflows)
+ sn_linkstats_overflows++;
+
+ spin_unlock(&sn_linkstats_lock);
+ }
+}
+
+static char *
+rate_per_minute(uint64_t val, uint64_t secs)
+{
+ static char buf[16];
+ uint64_t a=0, b=0, c=0, d=0;
+
+ if (secs) {
+ a = 60 * val / secs;
+ b = 60 * 10 * val / secs - (10 * a);
+ c = 60 * 100 * val / secs - (100 * a) - (10 * b);
+ d = 60 * 1000 * val / secs - (1000 * a) - (100 * b) - (10 * c);
+ }
+ sprintf(buf, "%4lu.%lu%lu%lu", a, b, c, d);
+
+ return buf;
+}
+
+int
+sn_linkstats_get(char *page)
+{
+ int n = 0;
+ int cnode;
+ int nlport;
+ struct s_linkstats *lsp;
+ nodepda_t *npda;
+ uint64_t snsum = 0;
+ uint64_t cbsum = 0;
+ uint64_t retrysum = 0;
+ uint64_t snsum_ii = 0;
+ uint64_t cbsum_ii = 0;
+ uint64_t retrysum_ii = 0;
+ uint64_t secs;
+
+ spin_lock(&sn_linkstats_lock);
+ secs = (jiffies - sn_linkstats_starttime) / HZ;
+
+ n += sprintf(page, "# SGI Numalink stats v1 : %lu samples, %lu o/flows, update %lu msecs\n",
+ sn_linkstats_samples, sn_linkstats_overflows, sn_linkstats_update_msecs);
+
+ n += sprintf(page+n, "%-37s %8s %8s %8s %8s\n",
+ "# Numalink", "sn errs", "cb errs", "cb/min", "retries");
+
+ for (lsp=sn_linkstats, cnode=0; cnode < numnodes; cnode++, lsp++) {
+ npda = NODEPDA(cnode);
+
+ /* two NL links on each SHub */
+ for (nlport=0; nlport < 2; nlport++) {
+ cbsum += lsp->hs_ni_cb_errors[nlport];
+ snsum += lsp->hs_ni_sn_errors[nlport];
+ retrysum += lsp->hs_ni_retry_errors[nlport];
+
+ /* avoid buffer overrun (should be using seq_read API) */
+ if (numnodes > 64)
+ continue;
+
+ n += sprintf(page + n, "/%s/link/%d %8lu %8lu %8s %8lu\n",
+ npda->hwg_node_name, nlport+1, lsp->hs_ni_sn_errors[nlport],
+ lsp->hs_ni_cb_errors[nlport],
+ rate_per_minute(lsp->hs_ni_cb_errors[nlport], secs),
+ lsp->hs_ni_retry_errors[nlport]);
+ }
+
+ /* one II port on each SHub (may not be connected) */
+ if (lsp->hs_ii_up) {
+ n += sprintf(page + n, "/%s/xtalk %8lu %8lu %8s %8lu\n",
+ npda->hwg_node_name, lsp->hs_ii_sn_errors,
+ lsp->hs_ii_cb_errors, rate_per_minute(lsp->hs_ii_cb_errors, secs),
+ lsp->hs_ii_retry_errors);
+
+ snsum_ii += lsp->hs_ii_sn_errors;
+ cbsum_ii += lsp->hs_ii_cb_errors;
+ retrysum_ii += lsp->hs_ii_retry_errors;
+ }
+ }
+
+ n += sprintf(page + n, "%-37s %8lu %8lu %8s %8lu\n",
+ "System wide NL totals", snsum, cbsum,
+ rate_per_minute(cbsum, secs), retrysum);
+
+ n += sprintf(page + n, "%-37s %8lu %8lu %8s %8lu\n",
+ "System wide II totals", snsum_ii, cbsum_ii,
+ rate_per_minute(cbsum_ii, secs), retrysum_ii);
+
+ spin_unlock(&sn_linkstats_lock);
+
+ return n;
+}
+
+static int __init
+linkstatd_init(void)
+{
+ spin_lock_init(&sn_linkstats_lock);
+ sn_linkstats = kmalloc(numnodes * sizeof(struct s_linkstats), GFP_KERNEL);
+ sn_linkstats_reset(60000UL); /* default 60 second update interval */
+ kernel_thread(linkstatd_thread, NULL, CLONE_FS | CLONE_FILES);
+
+ return 0;
+}
+
+__initcall(linkstatd_init);
diff --git a/arch/ia64/sn/io/sn2/shub_intr.c b/arch/ia64/sn/io/sn2/shub_intr.c
index d64022e1cc1179..f081c260f39a3c 100644
--- a/arch/ia64/sn/io/sn2/shub_intr.c
+++ b/arch/ia64/sn/io/sn2/shub_intr.c
@@ -4,7 +4,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992-1997, 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (C) 1992-1997, 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/types.h>
@@ -30,7 +30,7 @@
/* ARGSUSED */
void
-hub_intr_init(devfs_handle_t hubv)
+hub_intr_init(vertex_hdl_t hubv)
{
}
@@ -45,9 +45,9 @@ hub_widget_id(nasid_t nasid)
}
static hub_intr_t
-do_hub_intr_alloc(devfs_handle_t dev,
+do_hub_intr_alloc(vertex_hdl_t dev,
device_desc_t dev_desc,
- devfs_handle_t owner_dev,
+ vertex_hdl_t owner_dev,
int uncond_nothread)
{
cpuid_t cpu = 0;
@@ -71,7 +71,7 @@ do_hub_intr_alloc(devfs_handle_t dev,
cpuphys = cpu_physical_id(cpu);
slice = cpu_physical_id_to_slice(cpuphys);
nasid = cpu_physical_id_to_nasid(cpuphys);
- cnode = cpu_to_node_map[cpu];
+ cnode = cpuid_to_cnodeid(cpu);
if (slice) {
xtalk_addr = SH_II_INT1 | ((unsigned long)nasid << 36) | (1UL << 47);
@@ -101,17 +101,17 @@ do_hub_intr_alloc(devfs_handle_t dev,
}
hub_intr_t
-hub_intr_alloc(devfs_handle_t dev,
+hub_intr_alloc(vertex_hdl_t dev,
device_desc_t dev_desc,
- devfs_handle_t owner_dev)
+ vertex_hdl_t owner_dev)
{
return(do_hub_intr_alloc(dev, dev_desc, owner_dev, 0));
}
hub_intr_t
-hub_intr_alloc_nothd(devfs_handle_t dev,
+hub_intr_alloc_nothd(vertex_hdl_t dev,
device_desc_t dev_desc,
- devfs_handle_t owner_dev)
+ vertex_hdl_t owner_dev)
{
return(do_hub_intr_alloc(dev, dev_desc, owner_dev, 1));
}
@@ -188,18 +188,3 @@ hub_intr_disconnect(hub_intr_t intr_hdl)
ASSERT(rv == 0);
intr_hdl->i_flags &= ~HUB_INTR_IS_CONNECTED;
}
-
-
-/*
- * Return a hwgraph vertex that represents the CPU currently
- * targeted by an interrupt.
- */
-devfs_handle_t
-hub_intr_cpu_get(hub_intr_t intr_hdl)
-{
- cpuid_t cpuid = intr_hdl->i_cpuid;
-
- ASSERT(cpuid != CPU_NONE);
-
- return(cpuid_to_vertex(cpuid));
-}
diff --git a/arch/ia64/sn/io/sn2/shuberror.c b/arch/ia64/sn/io/sn2/shuberror.c
index 0861a3b0831679..2af0ae94f8adaa 100644
--- a/arch/ia64/sn/io/sn2/shuberror.c
+++ b/arch/ia64/sn/io/sn2/shuberror.c
@@ -4,13 +4,14 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000,2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000,2002-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/irq.h>
+#include <asm/io.h>
#include <asm/irq.h>
#include <asm/smp.h>
#include <asm/sn/sgi.h>
@@ -27,6 +28,7 @@
#include <asm/sn/xtalk/xtalk.h>
#include <asm/sn/pci/pcibr_private.h>
#include <asm/sn/intr.h>
+#include <asm/sn/ioerror_handling.h>
#include <asm/sn/ioerror.h>
#include <asm/sn/sn2/shubio.h>
#include <asm/sn/bte.h>
@@ -34,21 +36,18 @@
extern void hubni_eint_init(cnodeid_t cnode);
extern void hubii_eint_init(cnodeid_t cnode);
extern void hubii_eint_handler (int irq, void *arg, struct pt_regs *ep);
-int hubiio_crb_error_handler(devfs_handle_t hub_v, hubinfo_t hinfo);
-int hubiio_prb_error_handler(devfs_handle_t hub_v, hubinfo_t hinfo);
-extern void bte_crb_error_handler(devfs_handle_t hub_v, int btenum, int crbnum, ioerror_t *ioe, int bteop);
+int hubiio_crb_error_handler(vertex_hdl_t hub_v, hubinfo_t hinfo);
+int hubiio_prb_error_handler(vertex_hdl_t hub_v, hubinfo_t hinfo);
+extern void bte_crb_error_handler(vertex_hdl_t hub_v, int btenum, int crbnum, ioerror_t *ioe, int bteop);
+void print_crb_fields(int crb_num, ii_icrb0_a_u_t icrba,
+ ii_icrb0_b_u_t icrbb, ii_icrb0_c_u_t icrbc,
+ ii_icrb0_d_u_t icrbd, ii_icrb0_e_u_t icrbe);
extern int maxcpus;
+extern error_return_code_t error_state_set(vertex_hdl_t v,error_state_t new_state);
#define HUB_ERROR_PERIOD (120 * HZ) /* 2 minutes */
-#ifdef BUS_INT_WAR
-void sn_add_polled_interrupt(int irq, int interval);
-void sn_delete_polled_interrupt(int irq);
-extern int bus_int_war_ide_irq;
-#endif
-
-
void
hub_error_clear(nasid_t nasid)
{
@@ -74,9 +73,7 @@ hub_error_clear(nasid_t nasid)
REMOTE_HUB_S(nasid, IIO_IOPRB_0 + (i * sizeof(hubreg_t)), prb.iprb_regval);
}
- REMOTE_HUB_S(nasid, IIO_IO_ERR_CLR, -1);
- idsr = REMOTE_HUB_L(nasid, IIO_IIDSR);
- REMOTE_HUB_S(nasid, IIO_IIDSR, (idsr & ~(IIO_IIDSR_SENT_MASK)));
+ REMOTE_HUB_S(nasid, IIO_IECLR, -1);
}
@@ -117,7 +114,6 @@ hub_error_init(cnodeid_t cnode)
* Returns : None.
*/
-
void
hubii_eint_init(cnodeid_t cnode)
{
@@ -125,33 +121,41 @@ hubii_eint_init(cnodeid_t cnode)
ii_iidsr_u_t hubio_eint;
hubinfo_t hinfo;
cpuid_t intr_cpu;
- devfs_handle_t hub_v;
+ vertex_hdl_t hub_v;
int bit_pos_to_irq(int bit);
+ ii_ilcsr_u_t ilcsr;
- hub_v = (devfs_handle_t)cnodeid_to_vertex(cnode);
+ hub_v = (vertex_hdl_t)cnodeid_to_vertex(cnode);
ASSERT_ALWAYS(hub_v);
hubinfo_get(hub_v, &hinfo);
ASSERT(hinfo);
ASSERT(hinfo->h_cnodeid == cnode);
+ ilcsr.ii_ilcsr_regval = REMOTE_HUB_L(hinfo->h_nasid, IIO_ILCSR);
+ if ((ilcsr.ii_ilcsr_fld_s.i_llp_stat & 0x2) == 0) {
+ /*
+ * HUB II link is not up. Disable LLP. Clear old errors.
+ * Enable interrupts to handle BTE errors.
+ */
+ ilcsr.ii_ilcsr_fld_s.i_llp_en = 0;
+ REMOTE_HUB_S(hinfo->h_nasid, IIO_ILCSR, ilcsr.ii_ilcsr_regval);
+ }
+
/* Select a possible interrupt target where there is a free interrupt
* bit and also reserve the interrupt bit for this IO error interrupt
*/
- intr_cpu = intr_heuristic(hub_v,0,-1,0,hub_v,
+ intr_cpu = intr_heuristic(hub_v,0,SGI_II_ERROR,0,hub_v,
"HUB IO error interrupt",&bit);
if (intr_cpu == CPU_NONE) {
printk("hubii_eint_init: intr_reserve_level failed, cnode %d", cnode);
return;
}
- rv = intr_connect_level(intr_cpu, bit, 0, NULL);
- request_irq(bit + (intr_cpu << 8), hubii_eint_handler, 0, "SN_hub_error", (void *)hub_v);
- irq_desc(bit + (intr_cpu << 8))->status |= SN2_IRQ_PER_HUB;
-#ifdef BUS_INT_WAR
- sn_add_polled_interrupt(bit + (intr_cpu << 8), (0.01 * HZ));
-#endif
+ rv = intr_connect_level(intr_cpu, SGI_II_ERROR, 0, NULL);
+ request_irq(SGI_II_ERROR, hubii_eint_handler, SA_SHIRQ, "SN_hub_error", (void *)hub_v);
+ irq_desc(bit)->status |= SN2_IRQ_PER_HUB;
ASSERT_ALWAYS(rv >= 0);
hubio_eint.ii_iidsr_regval = 0;
hubio_eint.ii_iidsr_fld_s.i_enable = 1;
@@ -167,18 +171,29 @@ hubii_eint_init(cnodeid_t cnode)
void
hubii_eint_handler (int irq, void *arg, struct pt_regs *ep)
{
- devfs_handle_t hub_v;
+ vertex_hdl_t hub_v;
hubinfo_t hinfo;
ii_wstat_u_t wstat;
hubreg_t idsr;
+ ii_ilcsr_u_t ilcsr;
/* two levels of casting avoids compiler warning.!! */
- hub_v = (devfs_handle_t)(long)(arg);
+ hub_v = (vertex_hdl_t)(long)(arg);
ASSERT(hub_v);
hubinfo_get(hub_v, &hinfo);
+ idsr = REMOTE_HUB_L(hinfo->h_nasid, IIO_ICMR);
+#if 0
+ if (idsr & 0x1) {
+ /* ICMR bit is set .. we are getting into "Spurious Interrupts condition. */
+ printk("Cnode %d II has seen the ICMR condition\n", hinfo->h_cnodeid);
+ printk("***** Please file PV with the above messages *****\n");
+ /* panic("We have to panic to prevent further unknown states ..\n"); */
+ }
+#endif
+
/*
* Identify the reason for error.
*/
@@ -218,10 +233,26 @@ hubii_eint_handler (int irq, void *arg, struct pt_regs *ep)
* Note: we may never be able to print this, if the II talking
* to Xbow which hosts the console is dead.
*/
- printk("Hub %d to Xtalk Link failed (II_ECRAZY) Reason: %s",
- hinfo->h_cnodeid, reason);
+ ilcsr.ii_ilcsr_regval = REMOTE_HUB_L(hinfo->h_nasid, IIO_ILCSR);
+ if (ilcsr.ii_ilcsr_fld_s.i_llp_en == 1) { /* Link is enabled */
+ printk("Hub %d, cnode %d to Xtalk Link failed (II_ECRAZY) Reason: %s",
+ hinfo->h_nasid, hinfo->h_cnodeid, reason);
+ }
}
+
+ /*
+ * Before processing any interrupt related information, clear all
+ * error indication and reenable interrupts. This will prevent
+ * lost interrupts due to the interrupt handler scanning past a PRB/CRB
+ * which has not errorred yet and then the PRB/CRB goes into error.
+ * Note, PRB errors are cleared individually.
+ */
+ REMOTE_HUB_S(hinfo->h_nasid, IIO_IECLR, 0xff0000);
+ idsr = REMOTE_HUB_L(hinfo->h_nasid, IIO_IIDSR) & ~IIO_IIDSR_SENT_MASK;
+ REMOTE_HUB_S(hinfo->h_nasid, IIO_IIDSR, idsr);
+
+
/*
* It's a toss as to which one among PRB/CRB to check first.
* Current decision is based on the severity of the errors.
@@ -232,14 +263,6 @@ hubii_eint_handler (int irq, void *arg, struct pt_regs *ep)
*/
(void)hubiio_crb_error_handler(hub_v, hinfo);
(void)hubiio_prb_error_handler(hub_v, hinfo);
- /*
- * If we reach here, it indicates crb/prb handlers successfully
- * handled the error. So, re-enable II to send more interrupt
- * and return.
- */
- REMOTE_HUB_S(hinfo->h_nasid, IIO_IECLR, 0xffffff);
- idsr = REMOTE_HUB_L(hinfo->h_nasid, IIO_IIDSR) & ~IIO_IIDSR_SENT_MASK;
- REMOTE_HUB_S(hinfo->h_nasid, IIO_IIDSR, idsr);
}
/*
@@ -295,6 +318,105 @@ char *hubiio_crb_errors[] = {
"Xtalk Error Packet"
};
+void
+print_crb_fields(int crb_num, ii_icrb0_a_u_t icrba,
+ ii_icrb0_b_u_t icrbb, ii_icrb0_c_u_t icrbc,
+ ii_icrb0_d_u_t icrbd, ii_icrb0_e_u_t icrbe)
+{
+ printk("CRB %d regA\n\t"
+ "a_iow 0x%x\n\t"
+ "valid0x%x\n\t"
+ "Address0x%lx\n\t"
+ "a_tnum 0x%x\n\t"
+ "a_sidn 0x%x\n",
+ crb_num,
+ icrba.a_iow,
+ icrba.a_valid,
+ icrba.a_addr,
+ icrba.a_tnum,
+ icrba.a_sidn);
+ printk("CRB %d regB\n\t"
+ "b_imsgtype 0x%x\n\t"
+ "b_imsg 0x%x\n"
+ "\tb_use_old 0x%x\n\t"
+ "b_initiator 0x%x\n\t"
+ "b_exc 0x%x\n"
+ "\tb_ackcnt 0x%x\n\t"
+ "b_resp 0x%x\n\t"
+ "b_ack 0x%x\n"
+ "\tb_hold 0x%x\n\t"
+ "b_wb 0x%x\n\t"
+ "b_intvn 0x%x\n"
+ "\tb_stall_ib 0x%x\n\t"
+ "b_stall_int 0x%x\n"
+ "\tb_stall_bte_0 0x%x\n\t"
+ "b_stall_bte_1 0x%x\n"
+ "\tb_error 0x%x\n\t"
+ "b_lnetuce 0x%x\n\t"
+ "b_mark 0x%x\n\t"
+ "b_xerr 0x%x\n",
+ crb_num,
+ icrbb.b_imsgtype,
+ icrbb.b_imsg,
+ icrbb.b_use_old,
+ icrbb.b_initiator,
+ icrbb.b_exc,
+ icrbb.b_ackcnt,
+ icrbb.b_resp,
+ icrbb.b_ack,
+ icrbb.b_hold,
+ icrbb.b_wb,
+ icrbb.b_intvn,
+ icrbb.b_stall_ib,
+ icrbb.b_stall_int,
+ icrbb.b_stall_bte_0,
+ icrbb.b_stall_bte_1,
+ icrbb.b_error,
+ icrbb.b_lnetuce,
+ icrbb.b_mark,
+ icrbb.b_xerr);
+ printk("CRB %d regC\n\t"
+ "c_source 0x%x\n\t"
+ "c_xtsize 0x%x\n\t"
+ "c_cohtrans 0x%x\n\t"
+ "c_btenum 0x%x\n\t"
+ "c_gbr 0x%x\n\t"
+ "c_doresp 0x%x\n\t"
+ "c_barrop 0x%x\n\t"
+ "c_suppl 0x%x\n",
+ crb_num,
+ icrbc.c_source,
+ icrbc.c_xtsize,
+ icrbc.c_cohtrans,
+ icrbc.c_btenum,
+ icrbc.c_gbr,
+ icrbc.c_doresp,
+ icrbc.c_barrop,
+ icrbc.c_suppl);
+ printk("CRB %d regD\n\t"
+ "d_bteaddr 0x%lx\n\t"
+ "d_bteop 0x%x\n\t"
+ "d_pripsc 0x%x\n\t"
+ "d_pricnt 0x%x\n\t"
+ "d_sleep 0x%x\n\t",
+ crb_num,
+ icrbd.d_bteaddr,
+ icrbd.d_bteop,
+ icrbd.d_pripsc,
+ icrbd.d_pricnt,
+ icrbd.d_sleep);
+ printk("CRB %d regE\n\t"
+ "icrbe_timeout 0x%x\n\t"
+ "icrbe_context 0x%x\n\t"
+ "icrbe_toutvld 0x%x\n\t"
+ "icrbe_ctxtvld 0x%x\n\t",
+ crb_num,
+ icrbe.icrbe_timeout,
+ icrbe.icrbe_context,
+ icrbe.icrbe_toutvld,
+ icrbe.icrbe_ctxtvld);
+}
+
/*
* hubiio_crb_error_handler
*
@@ -317,7 +439,7 @@ char *hubiio_crb_errors[] = {
*/
int
-hubiio_crb_error_handler(devfs_handle_t hub_v, hubinfo_t hinfo)
+hubiio_crb_error_handler(vertex_hdl_t hub_v, hubinfo_t hinfo)
{
cnodeid_t cnode;
nasid_t nasid;
@@ -335,6 +457,9 @@ hubiio_crb_error_handler(devfs_handle_t hub_v, hubinfo_t hinfo)
cnode = NASID_TO_COMPACT_NODEID(nasid);
/*
+ * XXX - Add locking for any recovery actions
+ */
+ /*
* Scan through all CRBs in the Hub, and handle the errors
* in any of the CRBs marked.
*/
@@ -373,16 +498,11 @@ hubiio_crb_error_handler(devfs_handle_t hub_v, hubinfo_t hinfo)
else /* b_initiator bit 2 gives BTE number */
bte_num = (icrbb.b_initiator & 0x4) >> 2;
- /* >>> bte_crb_error_handler needs to be
- * broken into two parts. The first should
- * cleanup the CRB. The second should wait
- * until all bte related CRB's are complete
- * and then do the error reset.
- */
+ hubiio_crb_free(hinfo, i);
+
bte_crb_error_handler(hub_v, bte_num,
i, &ioerror,
icrbd.d_bteop);
- hubiio_crb_free(hinfo, i);
num_errors++;
continue;
}
@@ -430,6 +550,86 @@ hubiio_crb_error_handler(devfs_handle_t hub_v, hubinfo_t hinfo)
IOERROR_SETVALUE(&ioerror, tnum, icrba.a_tnum);
}
+ if (icrbb.b_error) {
+ /*
+ * CRB 'i' has some error. Identify the type of error,
+ * and try to handle it.
+ *
+ */
+ switch(icrbb.b_ecode) {
+ case IIO_ICRB_ECODE_PERR:
+ case IIO_ICRB_ECODE_WERR:
+ case IIO_ICRB_ECODE_AERR:
+ case IIO_ICRB_ECODE_PWERR:
+ case IIO_ICRB_ECODE_TOUT:
+ case IIO_ICRB_ECODE_XTERR:
+ printk("Shub II CRB %d: error %s on hub cnodeid: %d",
+ i, hubiio_crb_errors[icrbb.b_ecode], cnode);
+ /*
+ * Any sort of write error is mostly due
+ * bad programming (Note it's not a timeout.)
+ * So, invoke hub_iio_error_handler with
+ * appropriate information.
+ */
+ IOERROR_SETVALUE(&ioerror,errortype,icrbb.b_ecode);
+
+ /* Go through the error bit lookup phase */
+ if (error_state_set(hub_v, ERROR_STATE_LOOKUP) ==
+ ERROR_RETURN_CODE_CANNOT_SET_STATE)
+ return(IOERROR_UNHANDLED);
+ rc = hub_ioerror_handler(
+ hub_v,
+ DMA_WRITE_ERROR,
+ MODE_DEVERROR,
+ &ioerror);
+ if (rc == IOERROR_HANDLED) {
+ rc = hub_ioerror_handler(
+ hub_v,
+ DMA_WRITE_ERROR,
+ MODE_DEVREENABLE,
+ &ioerror);
+ }else {
+ printk("Unable to handle %s on hub %d",
+ hubiio_crb_errors[icrbb.b_ecode],
+ cnode);
+ /* panic; */
+ }
+ /* Go to Next error */
+ print_crb_fields(i, icrba, icrbb, icrbc,
+ icrbd, icrbe);
+ hubiio_crb_free(hinfo, i);
+ continue;
+ case IIO_ICRB_ECODE_PRERR:
+ case IIO_ICRB_ECODE_DERR:
+ printk("Shub II CRB %d: error %s on hub : %d",
+ i, hubiio_crb_errors[icrbb.b_ecode], cnode);
+ /* panic */
+ default:
+ printk("Shub II CRB error (code : %d) on hub : %d",
+ icrbb.b_ecode, cnode);
+ /* panic */
+ }
+ }
+ /*
+ * Error is not indicated via the errcode field
+ * Check other error indications in this register.
+ */
+ if (icrbb.b_xerr) {
+ printk("Shub II CRB %d: Xtalk Packet with error bit set to hub %d",
+ i, cnode);
+ /* panic */
+ }
+ if (icrbb.b_lnetuce) {
+ printk("Shub II CRB %d: Uncorrectable data error detected on data "
+ " from NUMAlink to node %d",
+ i, cnode);
+ /* panic */
+ }
+ print_crb_fields(i, icrba, icrbb, icrbc, icrbd, icrbe);
+
+
+
+
if (icrbb.b_error) {
/*
@@ -488,7 +688,7 @@ hubiio_crb_error_handler(devfs_handle_t hub_v, hubinfo_t hinfo)
default:
panic("Fatal error (code : %d) on hub : %d",
- cnode);
+ icrbb.b_ecode, cnode);
/*NOTREACHED*/
}
@@ -568,7 +768,7 @@ hubii_check_widget_disabled(nasid_t nasid, int wnum)
* Cleanup involes freeing the PRB register
*/
static void
-hubii_prb_handler(devfs_handle_t hub_v, hubinfo_t hinfo, int wnum)
+hubii_prb_handler(vertex_hdl_t hub_v, hubinfo_t hinfo, int wnum)
{
nasid_t nasid;
@@ -576,13 +776,13 @@ hubii_prb_handler(devfs_handle_t hub_v, hubinfo_t hinfo, int wnum)
/*
* Clear error bit by writing to IECLR register.
*/
- REMOTE_HUB_S(nasid, IIO_IO_ERR_CLR, (1 << wnum));
+ REMOTE_HUB_S(nasid, IIO_IECLR, (1 << wnum));
/*
* PIO Write to Widget 'i' got into an error.
* Invoke hubiio_error_handler with this information.
*/
- printk( "Hub nasid %d got a PIO Write error from widget %d, cleaning up and continuing",
- nasid, wnum);
+ printk( "Hub nasid %d got a PIO Write error from widget %d, "
+ "cleaning up and continuing", nasid, wnum);
/*
* XXX
* It may be necessary to adjust IO PRB counter
@@ -591,7 +791,7 @@ hubii_prb_handler(devfs_handle_t hub_v, hubinfo_t hinfo, int wnum)
}
int
-hubiio_prb_error_handler(devfs_handle_t hub_v, hubinfo_t hinfo)
+hubiio_prb_error_handler(vertex_hdl_t hub_v, hubinfo_t hinfo)
{
int wnum;
nasid_t nasid;
diff --git a/arch/ia64/sn/io/sn2/shubio.c b/arch/ia64/sn/io/sn2/shubio.c
index 90294319bf09dc..676498ec0a095b 100644
--- a/arch/ia64/sn/io/sn2/shubio.c
+++ b/arch/ia64/sn/io/sn2/shubio.c
@@ -30,8 +30,8 @@
#include <asm/sn/sn2/shubio.h>
-error_state_t error_state_get(devfs_handle_t v);
-error_return_code_t error_state_set(devfs_handle_t v,error_state_t new_state);
+error_state_t error_state_get(vertex_hdl_t v);
+error_return_code_t error_state_set(vertex_hdl_t v,error_state_t new_state);
/*
@@ -42,7 +42,7 @@ error_return_code_t error_state_set(devfs_handle_t v,error_state_t new_state);
/*ARGSUSED*/
int
hub_xp_error_handler(
- devfs_handle_t hub_v,
+ vertex_hdl_t hub_v,
nasid_t nasid,
int error_code,
ioerror_mode_t mode,
@@ -50,7 +50,7 @@ hub_xp_error_handler(
{
/*REFERENCED*/
hubreg_t iio_imem;
- devfs_handle_t xswitch;
+ vertex_hdl_t xswitch;
error_state_t e_state;
cnodeid_t cnode;
@@ -148,7 +148,7 @@ is_widget_pio_enabled(ioerror_t *ioerror)
*/
int
hub_ioerror_handler(
- devfs_handle_t hub_v,
+ vertex_hdl_t hub_v,
int error_code,
int mode,
struct io_error_s *ioerror)
@@ -158,6 +158,7 @@ hub_ioerror_handler(
int retval = 0;
/*REFERENCED*/
iopaddr_t p;
+ caddr_t cp;
IOERROR_DUMP("hub_ioerror_handler", error_code, mode, ioerror);
@@ -193,14 +194,14 @@ hub_ioerror_handler(
* This is typically true for user mode bus errors while
* accessing I/O space.
*/
- IOERROR_GETVALUE(p,ioerror,vaddr);
- if (p){
+ IOERROR_GETVALUE(cp,ioerror,vaddr);
+ if (cp){
/*
* If neither in small window nor in large window range,
* outright reject it.
*/
- IOERROR_GETVALUE(p,ioerror,vaddr);
- if (NODE_SWIN_ADDR(nasid, (paddr_t)p)){
+ IOERROR_GETVALUE(cp,ioerror,vaddr);
+ if (NODE_SWIN_ADDR(nasid, (paddr_t)cp)){
iopaddr_t hubaddr;
xwidgetnum_t widgetnum;
iopaddr_t xtalkaddr;
@@ -216,7 +217,7 @@ hub_ioerror_handler(
IOERROR_SETVALUE(ioerror,xtalkaddr,xtalkaddr);
- } else if (NODE_BWIN_ADDR(nasid, (paddr_t)p)){
+ } else if (NODE_BWIN_ADDR(nasid, (paddr_t)cp)){
/*
* Address corresponds to large window space.
* Convert it to xtalk address.
@@ -428,11 +429,6 @@ end:
return retval;
}
-#define L_BITSMINOR 18
-#define L_MAXMAJ 0x1ff
-#define emajor(x) (int )(((unsigned )(x)>>L_BITSMINOR) & L_MAXMAJ)
-#define dev_is_vertex(dev) (emajor((dev_t)(dev)) == 0)
-
#define INFO_LBL_ERROR_STATE "error_state"
#define v_error_state_get(v,s) \
@@ -454,12 +450,12 @@ hwgraph_info_add_LBL(v,INFO_LBL_ERROR_STATE, (arbitrary_info_t)s))
* current state otherwise
*/
error_state_t
-error_state_get(devfs_handle_t v)
+error_state_get(vertex_hdl_t v)
{
error_state_t s;
/* Check if we have a valid hwgraph vertex */
- if (!dev_is_vertex(v))
+ if ( v == (vertex_hdl_t)0 )
return(ERROR_STATE_NONE);
/* Get the labelled info hanging off the vertex which corresponds
@@ -479,13 +475,13 @@ error_state_get(devfs_handle_t v)
* ERROR_RETURN_CODE_SUCCESS otherwise
*/
error_return_code_t
-error_state_set(devfs_handle_t v,error_state_t new_state)
+error_state_set(vertex_hdl_t v,error_state_t new_state)
{
error_state_t old_state;
boolean_t replace = B_TRUE;
/* Check if we have a valid hwgraph vertex */
- if (!dev_is_vertex(v))
+ if ( v == (vertex_hdl_t)0 )
return(ERROR_RETURN_CODE_GENERAL_FAILURE);
diff --git a/arch/ia64/sn/io/sn2/xbow.c b/arch/ia64/sn/io/sn2/xbow.c
index dd28e1158d180e..6b229ba28222c2 100644
--- a/arch/ia64/sn/io/sn2/xbow.c
+++ b/arch/ia64/sn/io/sn2/xbow.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/sched.h>
+#include <linux/interrupt.h>
#include <asm/sn/sgi.h>
#include <asm/sn/intr.h>
#include <asm/sn/sn2/sn_private.h>
@@ -19,7 +20,6 @@
#include <asm/sn/invent.h>
#include <asm/sn/hcl.h>
#include <asm/sn/labelcl.h>
-#include <asm/sn/hack.h>
#include <asm/sn/pci/bridge.h>
#include <asm/sn/xtalk/xtalk_private.h>
#include <asm/sn/simulator.h>
@@ -60,9 +60,9 @@ int xbow_devflag = D_MP;
typedef struct xbow_soft_s *xbow_soft_t;
struct xbow_soft_s {
- devfs_handle_t conn; /* our connection point */
- devfs_handle_t vhdl; /* xbow's private vertex */
- devfs_handle_t busv; /* the xswitch vertex */
+ vertex_hdl_t conn; /* our connection point */
+ vertex_hdl_t vhdl; /* xbow's private vertex */
+ vertex_hdl_t busv; /* the xswitch vertex */
xbow_t *base; /* PIO pointer to crossbow chip */
char *name; /* hwgraph name */
@@ -90,36 +90,35 @@ struct xbow_soft_s {
*/
void xbow_mlreset(xbow_t *);
-void xbow_init(void);
-int xbow_attach(devfs_handle_t);
+int xbow_attach(vertex_hdl_t);
-int xbow_open(devfs_handle_t *, int, int, cred_t *);
-int xbow_close(devfs_handle_t, int, int, cred_t *);
+static int xbow_open(struct inode *, struct file *);
-int xbow_map(devfs_handle_t, vhandl_t *, off_t, size_t, uint);
-int xbow_unmap(devfs_handle_t, vhandl_t *);
-int xbow_ioctl(devfs_handle_t, int, void *, int, struct cred *, int *);
+int xbow_close(vertex_hdl_t, int, int, cred_t *);
+
+int xbow_map(vertex_hdl_t, vhandl_t *, off_t, size_t, uint);
+int xbow_unmap(vertex_hdl_t, vhandl_t *);
+int xbow_ioctl(vertex_hdl_t, int, void *, int, struct cred *, int *);
int xbow_widget_present(xbow_t *, int);
static int xbow_link_alive(xbow_t *, int);
-devfs_handle_t xbow_widget_lookup(devfs_handle_t, int);
+vertex_hdl_t xbow_widget_lookup(vertex_hdl_t, int);
void xbow_intr_preset(void *, int, xwidgetnum_t, iopaddr_t, xtalk_intr_vector_t);
-void xbow_update_perf_counters(devfs_handle_t);
-xbow_perf_link_t *xbow_get_perf_counters(devfs_handle_t);
-int xbow_enable_perf_counter(devfs_handle_t, int, int, int);
-xbow_link_status_t *xbow_get_llp_status(devfs_handle_t);
-void xbow_update_llp_status(devfs_handle_t);
+void xbow_update_perf_counters(vertex_hdl_t);
+xbow_perf_link_t *xbow_get_perf_counters(vertex_hdl_t);
+int xbow_enable_perf_counter(vertex_hdl_t, int, int, int);
+xbow_link_status_t *xbow_get_llp_status(vertex_hdl_t);
+void xbow_update_llp_status(vertex_hdl_t);
-int xbow_disable_llp_monitor(devfs_handle_t);
-int xbow_enable_llp_monitor(devfs_handle_t);
-int xbow_prio_bw_alloc(devfs_handle_t, xwidgetnum_t, xwidgetnum_t,
+int xbow_disable_llp_monitor(vertex_hdl_t);
+int xbow_enable_llp_monitor(vertex_hdl_t);
+int xbow_prio_bw_alloc(vertex_hdl_t, xwidgetnum_t, xwidgetnum_t,
unsigned long long, unsigned long long);
static void xbow_setwidint(xtalk_intr_t);
-void idbg_xbowregs(int64_t);
xswitch_reset_link_f xbow_reset_link;
@@ -164,8 +163,8 @@ xbow_mmap(struct file * file, struct vm_area_struct * vma)
phys_addr = (unsigned long)file->private_data & ~0xc000000000000000; /* Mask out the Uncache bits */
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_flags |= VM_RESERVED | VM_IO;
- error = io_remap_page_range(vma, vma->vm_start, phys_addr,
- vma->vm_end - vma->vm_start,
+ error = io_remap_page_range(vma, phys_addr, vma->vm_start,
+ vma->vm_end-vma->vm_start,
vma->vm_page_prot);
return(error);
}
@@ -188,39 +187,6 @@ xbow_mlreset(xbow_t * xbow)
{
}
-/*
- * xbow_init: called with the rest of the device
- * driver XXX_init routines. This platform *might*
- * have a Crossbow chip, or even several, but it
- * might have none. Register with the crosstalk
- * generic provider so when we encounter the chip
- * the right magic happens.
- */
-void
-xbow_init(void)
-{
-
-#if DEBUG && ATTACH_DEBUG
- printk("xbow_init\n");
-#endif
-
- xwidget_driver_register(PXBOW_WIDGET_PART_NUM,
- 0, /* XXBOW_WIDGET_MFGR_NUM, */
- "xbow_",
- CDL_PRI_HI); /* attach before friends */
-
-
- xwidget_driver_register(XXBOW_WIDGET_PART_NUM,
- 0, /* XXBOW_WIDGET_MFGR_NUM, */
- "xbow_",
- CDL_PRI_HI); /* attach before friends */
-
- xwidget_driver_register(XBOW_WIDGET_PART_NUM,
- XBOW_WIDGET_MFGR_NUM,
- "xbow_",
- CDL_PRI_HI); /* attach before friends */
-}
-
#ifdef XBRIDGE_REGS_SIM
/* xbow_set_simulated_regs: sets xbow regs as needed
* for powering through the boot
@@ -257,11 +223,11 @@ xbow_set_simulated_regs(xbow_t *xbow, int port)
/*ARGSUSED */
int
-xbow_attach(devfs_handle_t conn)
+xbow_attach(vertex_hdl_t conn)
{
/*REFERENCED */
- devfs_handle_t vhdl;
- devfs_handle_t busv;
+ vertex_hdl_t vhdl;
+ vertex_hdl_t busv;
xbow_t *xbow;
xbow_soft_t soft;
int port;
@@ -322,10 +288,10 @@ xbow_attach(devfs_handle_t conn)
* file ops.
*/
vhdl = NULL;
- vhdl = devfs_register(conn, EDGE_LBL_XBOW,
- DEVFS_FL_AUTO_DEVNUM, 0, 0,
- S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP,
- &xbow_fops, (void *)xbow);
+ vhdl = hwgraph_register(conn, EDGE_LBL_XBOW, 0,
+ DEVFS_FL_AUTO_DEVNUM, 0, 0,
+ S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
+ (struct file_operations *)&xbow_fops, (void *)xbow);
if (!vhdl) {
printk(KERN_WARNING "xbow_attach: Unable to create char device for xbow conn %p\n",
(void *)conn);
@@ -393,6 +359,14 @@ xbow_attach(devfs_handle_t conn)
*/
intr_hdl = xtalk_intr_alloc(conn, (device_desc_t)0, vhdl);
ASSERT(intr_hdl != NULL);
+
+ {
+ int irq = ((hub_intr_t)intr_hdl)->i_bit;
+ int cpu = ((hub_intr_t)intr_hdl)->i_cpuid;
+
+ intr_unreserve_level(cpu, irq);
+ ((hub_intr_t)intr_hdl)->i_bit = SGI_XBOW_ERROR;
+ }
xtalk_intr_connect(intr_hdl,
(intr_func_t) xbow_errintr_handler,
@@ -400,19 +374,9 @@ xbow_attach(devfs_handle_t conn)
(xtalk_intr_setfunc_t) xbow_setwidint,
(void *) xbow);
- request_irq(CPU_VECTOR_TO_IRQ(((hub_intr_t)intr_hdl)->i_cpuid,
- ((hub_intr_t)intr_hdl)->i_bit),
- (intr_func_t)xbow_errintr_handler, 0, "XBOW error",
+ request_irq(SGI_XBOW_ERROR, (void *)xbow_errintr_handler, SA_SHIRQ, "XBOW error",
(intr_arg_t) soft);
-#ifdef BUS_INT_WAR_NOT_YET
- {
- void sn_add_polled_interrupt(int, int);
- sn_add_polled_interrupt(CPU_VECTOR_TO_IRQ(((hub_intr_t)intr_hdl)->i_cpuid,
- ((hub_intr_t)intr_hdl)->i_bit), 5000);
- }
-#endif
-
/*
* Enable xbow error interrupts
@@ -483,24 +447,24 @@ xbow_attach(devfs_handle_t conn)
}
/*ARGSUSED */
-int
-xbow_open(devfs_handle_t *devp, int oflag, int otyp, cred_t *credp)
+static int
+xbow_open(struct inode *xx, struct file *yy)
{
return 0;
}
/*ARGSUSED */
int
-xbow_close(devfs_handle_t dev, int oflag, int otyp, cred_t *crp)
+xbow_close(vertex_hdl_t dev, int oflag, int otyp, cred_t *crp)
{
return 0;
}
/*ARGSUSED */
int
-xbow_map(devfs_handle_t dev, vhandl_t *vt, off_t off, size_t len, uint prot)
+xbow_map(vertex_hdl_t dev, vhandl_t *vt, off_t off, size_t len, uint prot)
{
- devfs_handle_t vhdl = dev_to_vhdl(dev);
+ vertex_hdl_t vhdl = dev_to_vhdl(dev);
xbow_soft_t soft = xbow_soft_get(vhdl);
int error;
@@ -513,7 +477,7 @@ xbow_map(devfs_handle_t dev, vhandl_t *vt, off_t off, size_t len, uint prot)
/*ARGSUSED */
int
-xbow_unmap(devfs_handle_t dev, vhandl_t *vt)
+xbow_unmap(vertex_hdl_t dev, vhandl_t *vt)
{
return 0;
}
@@ -523,9 +487,9 @@ xbow_unmap(devfs_handle_t dev, vhandl_t *vt)
* be good enough.
*/
xwidgetnum_t
-xbow_widget_num_get(devfs_handle_t dev)
+xbow_widget_num_get(vertex_hdl_t dev)
{
- devfs_handle_t tdev;
+ vertex_hdl_t tdev;
char devname[MAXDEVNAME];
xwidget_info_t xwidget_info;
int i;
@@ -556,19 +520,19 @@ xbow_widget_num_get(devfs_handle_t dev)
}
int
-xbow_ioctl(devfs_handle_t dev,
+xbow_ioctl(vertex_hdl_t dev,
int cmd,
void *arg,
int flag,
struct cred *cr,
int *rvalp)
{
- devfs_handle_t vhdl;
+ vertex_hdl_t vhdl;
int error = 0;
#if defined (DEBUG)
int rc;
- devfs_handle_t conn;
+ vertex_hdl_t conn;
struct xwidget_info_s *xwidget_info;
xbow_soft_t xbow_soft;
#endif
@@ -648,12 +612,12 @@ xbow_link_alive(xbow_t * xbow, int port)
* specified.
* If not found, return 0.
*/
-devfs_handle_t
-xbow_widget_lookup(devfs_handle_t vhdl,
+vertex_hdl_t
+xbow_widget_lookup(vertex_hdl_t vhdl,
int widgetnum)
{
xswitch_info_t xswitch_info;
- devfs_handle_t conn;
+ vertex_hdl_t conn;
xswitch_info = xswitch_info_get(vhdl);
conn = xswitch_info_vhdl_get(xswitch_info, widgetnum);
@@ -713,48 +677,14 @@ xbow_intr_preset(void *which_widget,
XEM_ADD_NVAR("ioe." #n, p); \
}
-#ifdef LATER
-static void
-xem_add_ioe(ioerror_t *ioe)
-{
- union tmp {
- ushort stmp;
- unsigned long long lltmp;
- cpuid_t cputmp;
- cnodeid_t cntmp;
- iopaddr_t iotmp;
- caddr_t catmp;
- paddr_t patmp;
- } tmp;
-
- XEM_ADD_IOEF(tmp.stmp, errortype);
- XEM_ADD_IOEF(tmp.stmp, widgetnum);
- XEM_ADD_IOEF(tmp.stmp, widgetdev);
- XEM_ADD_IOEF(tmp.cputmp, srccpu);
- XEM_ADD_IOEF(tmp.cntmp, srcnode);
- XEM_ADD_IOEF(tmp.cntmp, errnode);
- XEM_ADD_IOEF(tmp.iotmp, sysioaddr);
- XEM_ADD_IOEF(tmp.iotmp, xtalkaddr);
- XEM_ADD_IOEF(tmp.iotmp, busspace);
- XEM_ADD_IOEF(tmp.iotmp, busaddr);
- XEM_ADD_IOEF(tmp.catmp, vaddr);
- XEM_ADD_IOEF(tmp.patmp, memaddr);
- XEM_ADD_IOEF(tmp.catmp, epc);
- XEM_ADD_IOEF(tmp.catmp, ef);
- XEM_ADD_IOEF(tmp.stmp, tnum);
-}
-
-#define XEM_ADD_IOE() (xem_add_ioe(ioe))
-#endif /* LATER */
-
-int xbow_xmit_retry_errors = 0;
+int xbow_xmit_retry_errors;
int
xbow_xmit_retry_error(xbow_soft_t soft,
int port)
{
xswitch_info_t info;
- devfs_handle_t vhdl;
+ vertex_hdl_t vhdl;
widget_cfg_t *wid;
widgetreg_t id;
int part;
@@ -904,7 +834,7 @@ xbow_errintr_handler(int irq, void *arg, struct pt_regs *ep)
link_pend &= ~XB_STAT_XMT_RTRY_ERR;
}
if (link_pend) {
- devfs_handle_t xwidget_vhdl;
+ vertex_hdl_t xwidget_vhdl;
char *xwidget_name;
/* Get the widget name corresponding to the current
@@ -956,12 +886,6 @@ xbow_errintr_handler(int irq, void *arg, struct pt_regs *ep)
XEM_ADD_VAR(link_status);
XEM_ADD_VAR(link_aux_status);
-#ifdef LATER
- if (dump_ioe) {
- XEM_ADD_IOE();
- dump_ioe = 0;
- }
-#endif
#if !DEBUG
}
#endif
@@ -1026,8 +950,8 @@ xbow_error_handler(
xbow_soft_t soft = (xbow_soft_t) einfo;
int port;
- devfs_handle_t conn;
- devfs_handle_t busv;
+ vertex_hdl_t conn;
+ vertex_hdl_t busv;
xbow_t *xbow = soft->base;
xbowreg_t wid_stat;
@@ -1279,7 +1203,7 @@ xbow_error_handler(
}
void
-xbow_update_perf_counters(devfs_handle_t vhdl)
+xbow_update_perf_counters(vertex_hdl_t vhdl)
{
xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
xbow_perf_t *xbow_perf = xbow_soft->xbow_perfcnt;
@@ -1307,7 +1231,7 @@ xbow_update_perf_counters(devfs_handle_t vhdl)
}
xbow_perf_link_t *
-xbow_get_perf_counters(devfs_handle_t vhdl)
+xbow_get_perf_counters(vertex_hdl_t vhdl)
{
xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
xbow_perf_link_t *xbow_perf_link = xbow_soft->xbow_perflink;
@@ -1316,7 +1240,7 @@ xbow_get_perf_counters(devfs_handle_t vhdl)
}
int
-xbow_enable_perf_counter(devfs_handle_t vhdl, int link, int mode, int counter)
+xbow_enable_perf_counter(vertex_hdl_t vhdl, int link, int mode, int counter)
{
xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
xbow_perf_t *xbow_perf = xbow_soft->xbow_perfcnt;
@@ -1370,7 +1294,7 @@ xbow_enable_perf_counter(devfs_handle_t vhdl, int link, int mode, int counter)
}
xbow_link_status_t *
-xbow_get_llp_status(devfs_handle_t vhdl)
+xbow_get_llp_status(vertex_hdl_t vhdl)
{
xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
xbow_link_status_t *xbow_llp_status = xbow_soft->xbow_link_status;
@@ -1379,7 +1303,7 @@ xbow_get_llp_status(devfs_handle_t vhdl)
}
void
-xbow_update_llp_status(devfs_handle_t vhdl)
+xbow_update_llp_status(vertex_hdl_t vhdl)
{
xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
xbow_link_status_t *xbow_llp_status = xbow_soft->xbow_link_status;
@@ -1387,7 +1311,7 @@ xbow_update_llp_status(devfs_handle_t vhdl)
xbwX_stat_t lnk_sts;
xbow_aux_link_status_t aux_sts;
int link;
- devfs_handle_t xwidget_vhdl;
+ vertex_hdl_t xwidget_vhdl;
char *xwidget_name;
xbow = (xbow_t *) xbow_soft->base;
@@ -1421,7 +1345,7 @@ xbow_update_llp_status(devfs_handle_t vhdl)
}
int
-xbow_disable_llp_monitor(devfs_handle_t vhdl)
+xbow_disable_llp_monitor(vertex_hdl_t vhdl)
{
xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
int port;
@@ -1436,7 +1360,7 @@ xbow_disable_llp_monitor(devfs_handle_t vhdl)
}
int
-xbow_enable_llp_monitor(devfs_handle_t vhdl)
+xbow_enable_llp_monitor(vertex_hdl_t vhdl)
{
xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
@@ -1446,7 +1370,7 @@ xbow_enable_llp_monitor(devfs_handle_t vhdl)
int
-xbow_reset_link(devfs_handle_t xconn_vhdl)
+xbow_reset_link(vertex_hdl_t xconn_vhdl)
{
xwidget_info_t widget_info;
xwidgetnum_t port;
@@ -1469,7 +1393,7 @@ xbow_reset_link(devfs_handle_t xconn_vhdl)
xbow = XBOW_K1PTR;
#else
{
- devfs_handle_t xbow_vhdl;
+ vertex_hdl_t xbow_vhdl;
xbow_soft_t xbow_soft;
hwgraph_traverse(xconn_vhdl, ".master/xtalk/0/xbow", &xbow_vhdl);
@@ -1502,46 +1426,6 @@ xbow_reset_link(devfs_handle_t xconn_vhdl)
return 0;
}
-/*
- * Dump xbow registers.
- * input parameter is either a pointer to
- * the xbow chip or the vertex handle for
- * an xbow vertex.
- */
-void
-idbg_xbowregs(int64_t regs)
-{
- xbow_t *xbow;
- int i;
- xb_linkregs_t *link;
-
- xbow = (xbow_t *) regs;
-
-#ifdef LATER
- qprintf("Printing xbow registers starting at 0x%x\n", xbow);
- qprintf("wid %x status %x erruppr %x errlower %x control %x timeout %x\n",
- xbow->xb_wid_id, xbow->xb_wid_stat, xbow->xb_wid_err_upper,
- xbow->xb_wid_err_lower, xbow->xb_wid_control,
- xbow->xb_wid_req_timeout);
- qprintf("intr uppr %x lower %x errcmd %x llp ctrl %x arb_reload %x\n",
- xbow->xb_wid_int_upper, xbow->xb_wid_int_lower,
- xbow->xb_wid_err_cmdword, xbow->xb_wid_llp,
- xbow->xb_wid_arb_reload);
-#endif
-
- for (i = 8; i <= 0xf; i++) {
- link = &xbow->xb_link(i);
-#ifdef LATER
- qprintf("Link %d registers\n", i);
- qprintf("\tctrl %x stat %x arbuppr %x arblowr %x auxstat %x\n",
- link->link_control, link->link_status,
- link->link_arb_upper, link->link_arb_lower,
- link->link_aux_status);
-#endif
- }
-}
-
-
#define XBOW_ARB_RELOAD_TICKS 25
/* granularity: 4 MB/s, max: 124 MB/s */
#define GRANULARITY ((100 * 1000000) / XBOW_ARB_RELOAD_TICKS)
@@ -1601,7 +1485,7 @@ xbow_gbr_to_bytes(int gbr)
* If bandwidth allocation is successful, return success else return failure.
*/
int
-xbow_prio_bw_alloc(devfs_handle_t vhdl,
+xbow_prio_bw_alloc(vertex_hdl_t vhdl,
xwidgetnum_t src_wid,
xwidgetnum_t dest_wid,
unsigned long long old_alloc_bw,
diff --git a/arch/ia64/sn/io/sn2/xtalk.c b/arch/ia64/sn/io/sn2/xtalk.c
index ac9988596bf333..fbcec3e395e0bf 100644
--- a/arch/ia64/sn/io/sn2/xtalk.c
+++ b/arch/ia64/sn/io/sn2/xtalk.c
@@ -37,8 +37,6 @@
char widget_info_fingerprint[] = "widget_info";
-cdl_p xtalk_registry = NULL;
-
#define DEV_FUNC(dev,func) hub_##func
#define CAST_PIOMAP(x) ((hub_piomap_t)(x))
#define CAST_DMAMAP(x) ((hub_dmamap_t)(x))
@@ -47,71 +45,70 @@ cdl_p xtalk_registry = NULL;
/* =====================================================================
* Function Table of Contents
*/
-xtalk_piomap_t xtalk_piomap_alloc(devfs_handle_t, device_desc_t, iopaddr_t, size_t, size_t, unsigned);
+xtalk_piomap_t xtalk_piomap_alloc(vertex_hdl_t, device_desc_t, iopaddr_t, size_t, size_t, unsigned);
void xtalk_piomap_free(xtalk_piomap_t);
caddr_t xtalk_piomap_addr(xtalk_piomap_t, iopaddr_t, size_t);
void xtalk_piomap_done(xtalk_piomap_t);
-caddr_t xtalk_piotrans_addr(devfs_handle_t, device_desc_t, iopaddr_t, size_t, unsigned);
-caddr_t xtalk_pio_addr(devfs_handle_t, device_desc_t, iopaddr_t, size_t, xtalk_piomap_t *, unsigned);
+caddr_t xtalk_piotrans_addr(vertex_hdl_t, device_desc_t, iopaddr_t, size_t, unsigned);
+caddr_t xtalk_pio_addr(vertex_hdl_t, device_desc_t, iopaddr_t, size_t, xtalk_piomap_t *, unsigned);
void xtalk_set_early_piotrans_addr(xtalk_early_piotrans_addr_f *);
caddr_t xtalk_early_piotrans_addr(xwidget_part_num_t, xwidget_mfg_num_t, int, iopaddr_t, size_t, unsigned);
static caddr_t null_xtalk_early_piotrans_addr(xwidget_part_num_t, xwidget_mfg_num_t, int, iopaddr_t, size_t, unsigned);
-xtalk_dmamap_t xtalk_dmamap_alloc(devfs_handle_t, device_desc_t, size_t, unsigned);
+xtalk_dmamap_t xtalk_dmamap_alloc(vertex_hdl_t, device_desc_t, size_t, unsigned);
void xtalk_dmamap_free(xtalk_dmamap_t);
iopaddr_t xtalk_dmamap_addr(xtalk_dmamap_t, paddr_t, size_t);
alenlist_t xtalk_dmamap_list(xtalk_dmamap_t, alenlist_t, unsigned);
void xtalk_dmamap_done(xtalk_dmamap_t);
-iopaddr_t xtalk_dmatrans_addr(devfs_handle_t, device_desc_t, paddr_t, size_t, unsigned);
-alenlist_t xtalk_dmatrans_list(devfs_handle_t, device_desc_t, alenlist_t, unsigned);
+iopaddr_t xtalk_dmatrans_addr(vertex_hdl_t, device_desc_t, paddr_t, size_t, unsigned);
+alenlist_t xtalk_dmatrans_list(vertex_hdl_t, device_desc_t, alenlist_t, unsigned);
void xtalk_dmamap_drain(xtalk_dmamap_t);
-void xtalk_dmaaddr_drain(devfs_handle_t, iopaddr_t, size_t);
-void xtalk_dmalist_drain(devfs_handle_t, alenlist_t);
-xtalk_intr_t xtalk_intr_alloc(devfs_handle_t, device_desc_t, devfs_handle_t);
-xtalk_intr_t xtalk_intr_alloc_nothd(devfs_handle_t, device_desc_t, devfs_handle_t);
+void xtalk_dmaaddr_drain(vertex_hdl_t, iopaddr_t, size_t);
+void xtalk_dmalist_drain(vertex_hdl_t, alenlist_t);
+xtalk_intr_t xtalk_intr_alloc(vertex_hdl_t, device_desc_t, vertex_hdl_t);
+xtalk_intr_t xtalk_intr_alloc_nothd(vertex_hdl_t, device_desc_t, vertex_hdl_t);
void xtalk_intr_free(xtalk_intr_t);
int xtalk_intr_connect(xtalk_intr_t, intr_func_t, intr_arg_t, xtalk_intr_setfunc_t, void *);
void xtalk_intr_disconnect(xtalk_intr_t);
-devfs_handle_t xtalk_intr_cpu_get(xtalk_intr_t);
-int xtalk_error_handler(devfs_handle_t, int, ioerror_mode_t, ioerror_t *);
-int xtalk_error_devenable(devfs_handle_t, int, int);
-void xtalk_provider_startup(devfs_handle_t);
-void xtalk_provider_shutdown(devfs_handle_t);
-devfs_handle_t xtalk_intr_dev_get(xtalk_intr_t);
+vertex_hdl_t xtalk_intr_cpu_get(xtalk_intr_t);
+int xtalk_error_handler(vertex_hdl_t, int, ioerror_mode_t, ioerror_t *);
+int xtalk_error_devenable(vertex_hdl_t, int, int);
+void xtalk_provider_startup(vertex_hdl_t);
+void xtalk_provider_shutdown(vertex_hdl_t);
+vertex_hdl_t xtalk_intr_dev_get(xtalk_intr_t);
xwidgetnum_t xtalk_intr_target_get(xtalk_intr_t);
xtalk_intr_vector_t xtalk_intr_vector_get(xtalk_intr_t);
iopaddr_t xtalk_intr_addr_get(struct xtalk_intr_s *);
void *xtalk_intr_sfarg_get(xtalk_intr_t);
-devfs_handle_t xtalk_pio_dev_get(xtalk_piomap_t);
+vertex_hdl_t xtalk_pio_dev_get(xtalk_piomap_t);
xwidgetnum_t xtalk_pio_target_get(xtalk_piomap_t);
iopaddr_t xtalk_pio_xtalk_addr_get(xtalk_piomap_t);
ulong xtalk_pio_mapsz_get(xtalk_piomap_t);
caddr_t xtalk_pio_kvaddr_get(xtalk_piomap_t);
-devfs_handle_t xtalk_dma_dev_get(xtalk_dmamap_t);
+vertex_hdl_t xtalk_dma_dev_get(xtalk_dmamap_t);
xwidgetnum_t xtalk_dma_target_get(xtalk_dmamap_t);
-xwidget_info_t xwidget_info_chk(devfs_handle_t);
-xwidget_info_t xwidget_info_get(devfs_handle_t);
-void xwidget_info_set(devfs_handle_t, xwidget_info_t);
-devfs_handle_t xwidget_info_dev_get(xwidget_info_t);
+xwidget_info_t xwidget_info_chk(vertex_hdl_t);
+xwidget_info_t xwidget_info_get(vertex_hdl_t);
+void xwidget_info_set(vertex_hdl_t, xwidget_info_t);
+vertex_hdl_t xwidget_info_dev_get(xwidget_info_t);
xwidgetnum_t xwidget_info_id_get(xwidget_info_t);
-devfs_handle_t xwidget_info_master_get(xwidget_info_t);
+vertex_hdl_t xwidget_info_master_get(xwidget_info_t);
xwidgetnum_t xwidget_info_masterid_get(xwidget_info_t);
xwidget_part_num_t xwidget_info_part_num_get(xwidget_info_t);
xwidget_mfg_num_t xwidget_info_mfg_num_get(xwidget_info_t);
char *xwidget_info_name_get(xwidget_info_t);
-void xtalk_init(void);
-void xtalk_provider_register(devfs_handle_t, xtalk_provider_t *);
-void xtalk_provider_unregister(devfs_handle_t);
-xtalk_provider_t *xtalk_provider_fns_get(devfs_handle_t);
+void xtalk_provider_register(vertex_hdl_t, xtalk_provider_t *);
+void xtalk_provider_unregister(vertex_hdl_t);
+xtalk_provider_t *xtalk_provider_fns_get(vertex_hdl_t);
int xwidget_driver_register(xwidget_part_num_t,
xwidget_mfg_num_t,
char *, unsigned);
void xwidget_driver_unregister(char *);
-int xwidget_register(xwidget_hwid_t, devfs_handle_t,
- xwidgetnum_t, devfs_handle_t,
- xwidgetnum_t, async_attach_t);
-int xwidget_unregister(devfs_handle_t);
-void xwidget_reset(devfs_handle_t);
-char *xwidget_name_get(devfs_handle_t);
+int xwidget_register(xwidget_hwid_t, vertex_hdl_t,
+ xwidgetnum_t, vertex_hdl_t,
+ xwidgetnum_t);
+int xwidget_unregister(vertex_hdl_t);
+void xwidget_reset(vertex_hdl_t);
+char *xwidget_name_get(vertex_hdl_t);
#if !defined(DEV_FUNC)
/*
* There is more than one possible provider
@@ -126,7 +123,7 @@ char *xwidget_name_get(devfs_handle_t);
#define CAST_INTR(x) ((xtalk_intr_t)(x))
static xtalk_provider_t *
-xwidget_to_provider_fns(devfs_handle_t xconn)
+xwidget_to_provider_fns(vertex_hdl_t xconn)
{
xwidget_info_t widget_info;
xtalk_provider_t *provider_fns;
@@ -159,7 +156,7 @@ xwidget_to_provider_fns(devfs_handle_t xconn)
*/
xtalk_piomap_t
-xtalk_piomap_alloc(devfs_handle_t dev, /* set up mapping for this device */
+xtalk_piomap_alloc(vertex_hdl_t dev, /* set up mapping for this device */
device_desc_t dev_desc, /* device descriptor */
iopaddr_t xtalk_addr, /* map for this xtalk_addr range */
size_t byte_count,
@@ -198,7 +195,7 @@ xtalk_piomap_done(xtalk_piomap_t xtalk_piomap)
caddr_t
-xtalk_piotrans_addr(devfs_handle_t dev, /* translate for this device */
+xtalk_piotrans_addr(vertex_hdl_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
iopaddr_t xtalk_addr, /* Crosstalk address */
size_t byte_count, /* map this many bytes */
@@ -209,7 +206,7 @@ xtalk_piotrans_addr(devfs_handle_t dev, /* translate for this device */
}
caddr_t
-xtalk_pio_addr(devfs_handle_t dev, /* translate for this device */
+xtalk_pio_addr(vertex_hdl_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
iopaddr_t addr, /* starting address (or offset in window) */
size_t byte_count, /* map this many bytes */
@@ -326,7 +323,7 @@ null_xtalk_early_piotrans_addr(xwidget_part_num_t part_num,
*/
xtalk_dmamap_t
-xtalk_dmamap_alloc(devfs_handle_t dev, /* set up mappings for this device */
+xtalk_dmamap_alloc(vertex_hdl_t dev, /* set up mappings for this device */
device_desc_t dev_desc, /* device descriptor */
size_t byte_count_max, /* max size of a mapping */
unsigned flags)
@@ -373,7 +370,7 @@ xtalk_dmamap_done(xtalk_dmamap_t xtalk_dmamap)
iopaddr_t
-xtalk_dmatrans_addr(devfs_handle_t dev, /* translate for this device */
+xtalk_dmatrans_addr(vertex_hdl_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
paddr_t paddr, /* system physical address */
size_t byte_count, /* length */
@@ -385,7 +382,7 @@ xtalk_dmatrans_addr(devfs_handle_t dev, /* translate for this device */
alenlist_t
-xtalk_dmatrans_list(devfs_handle_t dev, /* translate for this device */
+xtalk_dmatrans_list(vertex_hdl_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
alenlist_t palenlist, /* system address/length list */
unsigned flags)
@@ -402,14 +399,14 @@ xtalk_dmamap_drain(xtalk_dmamap_t map)
}
void
-xtalk_dmaaddr_drain(devfs_handle_t dev, paddr_t addr, size_t size)
+xtalk_dmaaddr_drain(vertex_hdl_t dev, paddr_t addr, size_t size)
{
DEV_FUNC(dev, dmaaddr_drain)
(dev, addr, size);
}
void
-xtalk_dmalist_drain(devfs_handle_t dev, alenlist_t list)
+xtalk_dmalist_drain(vertex_hdl_t dev, alenlist_t list)
{
DEV_FUNC(dev, dmalist_drain)
(dev, list);
@@ -426,9 +423,9 @@ xtalk_dmalist_drain(devfs_handle_t dev, alenlist_t list)
* Return resource handle in intr_hdl.
*/
xtalk_intr_t
-xtalk_intr_alloc(devfs_handle_t dev, /* which Crosstalk device */
+xtalk_intr_alloc(vertex_hdl_t dev, /* which Crosstalk device */
device_desc_t dev_desc, /* device descriptor */
- devfs_handle_t owner_dev)
+ vertex_hdl_t owner_dev)
{ /* owner of this interrupt */
return (xtalk_intr_t) DEV_FUNC(dev, intr_alloc)
(dev, dev_desc, owner_dev);
@@ -440,9 +437,9 @@ xtalk_intr_alloc(devfs_handle_t dev, /* which Crosstalk device */
* Return resource handle in intr_hdl.
*/
xtalk_intr_t
-xtalk_intr_alloc_nothd(devfs_handle_t dev, /* which Crosstalk device */
+xtalk_intr_alloc_nothd(vertex_hdl_t dev, /* which Crosstalk device */
device_desc_t dev_desc, /* device descriptor */
- devfs_handle_t owner_dev) /* owner of this interrupt */
+ vertex_hdl_t owner_dev) /* owner of this interrupt */
{
return (xtalk_intr_t) DEV_FUNC(dev, intr_alloc_nothd)
(dev, dev_desc, owner_dev);
@@ -492,11 +489,10 @@ xtalk_intr_disconnect(xtalk_intr_t intr_hdl)
* Return a hwgraph vertex that represents the CPU currently
* targeted by an interrupt.
*/
-devfs_handle_t
+vertex_hdl_t
xtalk_intr_cpu_get(xtalk_intr_t intr_hdl)
{
- return INTR_FUNC(intr_hdl, intr_cpu_get)
- (CAST_INTR(intr_hdl));
+ return (vertex_hdl_t)0;
}
@@ -526,7 +522,7 @@ xtalk_intr_cpu_get(xtalk_intr_t intr_hdl)
*/
int
xtalk_error_handler(
- devfs_handle_t xconn,
+ vertex_hdl_t xconn,
int error_code,
ioerror_mode_t mode,
ioerror_t *ioerror)
@@ -555,7 +551,7 @@ xtalk_error_handler(
#if defined(SUPPORT_PRINTING_V_FORMAT)
printk(KERN_WARNING "Xbow at %v encountered Fatal error", xconn);
#else
- printk(KERN_WARNING "Xbow at 0x%p encountered Fatal error", xconn);
+ printk(KERN_WARNING "Xbow at 0x%p encountered Fatal error", (void *)xconn);
#endif
ioerror_dump("xtalk", error_code, mode, ioerror);
@@ -563,7 +559,7 @@ xtalk_error_handler(
}
int
-xtalk_error_devenable(devfs_handle_t xconn_vhdl, int devnum, int error_code)
+xtalk_error_devenable(vertex_hdl_t xconn_vhdl, int devnum, int error_code)
{
return DEV_FUNC(xconn_vhdl, error_devenable) (xconn_vhdl, devnum, error_code);
}
@@ -577,7 +573,7 @@ xtalk_error_devenable(devfs_handle_t xconn_vhdl, int devnum, int error_code)
* Startup a crosstalk provider
*/
void
-xtalk_provider_startup(devfs_handle_t xtalk_provider)
+xtalk_provider_startup(vertex_hdl_t xtalk_provider)
{
DEV_FUNC(xtalk_provider, provider_startup)
(xtalk_provider);
@@ -588,7 +584,7 @@ xtalk_provider_startup(devfs_handle_t xtalk_provider)
* Shutdown a crosstalk provider
*/
void
-xtalk_provider_shutdown(devfs_handle_t xtalk_provider)
+xtalk_provider_shutdown(vertex_hdl_t xtalk_provider)
{
DEV_FUNC(xtalk_provider, provider_shutdown)
(xtalk_provider);
@@ -598,22 +594,22 @@ xtalk_provider_shutdown(devfs_handle_t xtalk_provider)
* Enable a device on a xtalk widget
*/
void
-xtalk_widgetdev_enable(devfs_handle_t xconn_vhdl, int devnum)
+xtalk_widgetdev_enable(vertex_hdl_t xconn_vhdl, int devnum)
{
- DEV_FUNC(xconn_vhdl, widgetdev_enable) (xconn_vhdl, devnum);
+ return;
}
/*
* Shutdown a device on a xtalk widget
*/
void
-xtalk_widgetdev_shutdown(devfs_handle_t xconn_vhdl, int devnum)
+xtalk_widgetdev_shutdown(vertex_hdl_t xconn_vhdl, int devnum)
{
- DEV_FUNC(xconn_vhdl, widgetdev_shutdown) (xconn_vhdl, devnum);
+ return;
}
int
-xtalk_dma_enabled(devfs_handle_t xconn_vhdl)
+xtalk_dma_enabled(vertex_hdl_t xconn_vhdl)
{
return DEV_FUNC(xconn_vhdl, dma_enabled) (xconn_vhdl);
}
@@ -623,7 +619,7 @@ xtalk_dma_enabled(devfs_handle_t xconn_vhdl)
*/
/****** Generic crosstalk interrupt interfaces ******/
-devfs_handle_t
+vertex_hdl_t
xtalk_intr_dev_get(xtalk_intr_t xtalk_intr)
{
return (xtalk_intr->xi_dev);
@@ -654,7 +650,7 @@ xtalk_intr_sfarg_get(xtalk_intr_t xtalk_intr)
}
/****** Generic crosstalk pio interfaces ******/
-devfs_handle_t
+vertex_hdl_t
xtalk_pio_dev_get(xtalk_piomap_t xtalk_piomap)
{
return (xtalk_piomap->xp_dev);
@@ -686,7 +682,7 @@ xtalk_pio_kvaddr_get(xtalk_piomap_t xtalk_piomap)
/****** Generic crosstalk dma interfaces ******/
-devfs_handle_t
+vertex_hdl_t
xtalk_dma_dev_get(xtalk_dmamap_t xtalk_dmamap)
{
return (xtalk_dmamap->xd_dev);
@@ -707,7 +703,7 @@ xtalk_dma_target_get(xtalk_dmamap_t xtalk_dmamap)
* if not, return NULL.
*/
xwidget_info_t
-xwidget_info_chk(devfs_handle_t xwidget)
+xwidget_info_chk(vertex_hdl_t xwidget)
{
arbitrary_info_t ainfo = 0;
@@ -717,28 +713,18 @@ xwidget_info_chk(devfs_handle_t xwidget)
xwidget_info_t
-xwidget_info_get(devfs_handle_t xwidget)
+xwidget_info_get(vertex_hdl_t xwidget)
{
xwidget_info_t widget_info;
widget_info = (xwidget_info_t)
hwgraph_fastinfo_get(xwidget);
-#ifdef LATER
- if ((widget_info != NULL) &&
- (widget_info->w_fingerprint != widget_info_fingerprint))
-#ifdef SUPPORT_PRINTING_V_FORMAT
- PRINT_PANIC("%v bad xwidget_info", xwidget);
-#else
- PRINT_PANIC("%x bad xwidget_info", xwidget);
-#endif
-#endif /* LATER */
-
return (widget_info);
}
void
-xwidget_info_set(devfs_handle_t xwidget, xwidget_info_t widget_info)
+xwidget_info_set(vertex_hdl_t xwidget, xwidget_info_t widget_info)
{
if (widget_info != NULL)
widget_info->w_fingerprint = widget_info_fingerprint;
@@ -753,11 +739,11 @@ xwidget_info_set(devfs_handle_t xwidget, xwidget_info_t widget_info)
(arbitrary_info_t) widget_info);
}
-devfs_handle_t
+vertex_hdl_t
xwidget_info_dev_get(xwidget_info_t xwidget_info)
{
if (xwidget_info == NULL)
- panic("null xwidget_info");
+ panic("xwidget_info_dev_get: null xwidget_info");
return (xwidget_info->w_vertex);
}
@@ -765,16 +751,16 @@ xwidgetnum_t
xwidget_info_id_get(xwidget_info_t xwidget_info)
{
if (xwidget_info == NULL)
- panic("null xwidget_info");
+ panic("xwidget_info_id_get: null xwidget_info");
return (xwidget_info->w_id);
}
-devfs_handle_t
+vertex_hdl_t
xwidget_info_master_get(xwidget_info_t xwidget_info)
{
if (xwidget_info == NULL)
- panic("null xwidget_info");
+ panic("xwidget_info_master_get: null xwidget_info");
return (xwidget_info->w_master);
}
@@ -782,7 +768,7 @@ xwidgetnum_t
xwidget_info_masterid_get(xwidget_info_t xwidget_info)
{
if (xwidget_info == NULL)
- panic("null xwidget_info");
+ panic("xwidget_info_masterid_get: null xwidget_info");
return (xwidget_info->w_masterid);
}
@@ -790,7 +776,7 @@ xwidget_part_num_t
xwidget_info_part_num_get(xwidget_info_t xwidget_info)
{
if (xwidget_info == NULL)
- panic("null xwidget_info");
+ panic("xwidget_info_part_num_get: null xwidget_info");
return (xwidget_info->w_hwid.part_num);
}
@@ -798,7 +784,7 @@ xwidget_mfg_num_t
xwidget_info_mfg_num_get(xwidget_info_t xwidget_info)
{
if (xwidget_info == NULL)
- panic("null xwidget_info");
+ panic("xwidget_info_mfg_num_get: null xwidget_info");
return (xwidget_info->w_hwid.mfg_num);
}
/* Extract the widget name from the widget information
@@ -808,49 +794,16 @@ char *
xwidget_info_name_get(xwidget_info_t xwidget_info)
{
if (xwidget_info == NULL)
- panic("null xwidget info");
+ panic("xwidget_info_name_get: null xwidget_info");
return(xwidget_info->w_name);
}
/****** Generic crosstalk initialization interfaces ******/
/*
- * One-time initialization needed for systems that support crosstalk.
- */
-void
-xtalk_init(void)
-{
- cdl_p cp;
-
-#if DEBUG && ATTACH_DEBUG
- printf("xtalk_init\n");
-#endif
- /* Allocate the registry.
- * We might already have one.
- * If we don't, go get one.
- * MPness: someone might have
- * set one up for us while we
- * were not looking; use an atomic
- * compare-and-swap to commit to
- * using the new registry if and
- * only if nobody else did first.
- * If someone did get there first,
- * toss the one we allocated back
- * into the pool.
- */
- if (xtalk_registry == NULL) {
- cp = cdl_new(EDGE_LBL_XIO, "part", "mfgr");
- if (!compare_and_swap_ptr((void **) &xtalk_registry, NULL, (void *) cp)) {
- cdl_del(cp);
- }
- }
- ASSERT(xtalk_registry != NULL);
-}
-
-/*
* Associate a set of xtalk_provider functions with a vertex.
*/
void
-xtalk_provider_register(devfs_handle_t provider, xtalk_provider_t *xtalk_fns)
+xtalk_provider_register(vertex_hdl_t provider, xtalk_provider_t *xtalk_fns)
{
hwgraph_fastinfo_set(provider, (arbitrary_info_t) xtalk_fns);
}
@@ -859,7 +812,7 @@ xtalk_provider_register(devfs_handle_t provider, xtalk_provider_t *xtalk_fns)
* Disassociate a set of xtalk_provider functions with a vertex.
*/
void
-xtalk_provider_unregister(devfs_handle_t provider)
+xtalk_provider_unregister(vertex_hdl_t provider)
{
hwgraph_fastinfo_set(provider, (arbitrary_info_t)NULL);
}
@@ -869,50 +822,19 @@ xtalk_provider_unregister(devfs_handle_t provider)
* provider.
*/
xtalk_provider_t *
-xtalk_provider_fns_get(devfs_handle_t provider)
+xtalk_provider_fns_get(vertex_hdl_t provider)
{
return ((xtalk_provider_t *) hwgraph_fastinfo_get(provider));
}
/*
- * Announce a driver for a particular crosstalk part.
- * Returns 0 on success or -1 on failure. Failure occurs if the
- * specified hardware already has a driver.
- */
-/*ARGSUSED4 */
-int
-xwidget_driver_register(xwidget_part_num_t part_num,
- xwidget_mfg_num_t mfg_num,
- char *driver_prefix,
- unsigned flags)
-{
- /* a driver's init routine could call
- * xwidget_driver_register before the
- * system calls xtalk_init; so, we
- * make the call here.
- */
- if (xtalk_registry == NULL)
- xtalk_init();
-
- return cdl_add_driver(xtalk_registry,
- part_num, mfg_num,
- driver_prefix, flags, NULL);
-}
-
-/*
* Inform xtalk infrastructure that a driver is no longer available for
* handling any widgets.
*/
void
xwidget_driver_unregister(char *driver_prefix)
{
- /* before a driver calls unregister,
- * it must have called registger; so we
- * can assume we have a registry here.
- */
- ASSERT(xtalk_registry != NULL);
-
- cdl_del_driver(xtalk_registry, driver_prefix, NULL);
+ return;
}
/*
@@ -923,9 +845,6 @@ void
xtalk_iterate(char *driver_prefix,
xtalk_iter_f *func)
{
- ASSERT(xtalk_registry != NULL);
-
- cdl_iterate(xtalk_registry, driver_prefix, (cdl_iter_f *)func);
}
/*
@@ -939,11 +858,10 @@ xtalk_iterate(char *driver_prefix,
*/
int
xwidget_register(xwidget_hwid_t hwid, /* widget's hardware ID */
- devfs_handle_t widget, /* widget to initialize */
+ vertex_hdl_t widget, /* widget to initialize */
xwidgetnum_t id, /* widget's target id (0..f) */
- devfs_handle_t master, /* widget's master vertex */
- xwidgetnum_t targetid, /* master's target id (9/a) */
- async_attach_t aa)
+ vertex_hdl_t master, /* widget's master vertex */
+ xwidgetnum_t targetid) /* master's target id (9/a) */
{
xwidget_info_t widget_info;
char *s,devnm[MAXDEVNAME];
@@ -972,21 +890,11 @@ xwidget_register(xwidget_hwid_t hwid, /* widget's hardware ID */
device_master_set(widget, master);
- /* All the driver init routines (including
- * xtalk_init) are called before we get into
- * attaching devices, so we can assume we
- * have a registry here.
- */
- ASSERT(xtalk_registry != NULL);
-
/*
* Add pointer to async attach info -- tear down will be done when
* the particular descendant is done with the info.
*/
- if (aa)
- async_attach_add_info(widget, aa);
-
- return cdl_add_connpt(xtalk_registry, hwid->part_num, hwid->mfg_num,
+ return cdl_add_connpt(hwid->part_num, hwid->mfg_num,
widget, 0);
}
@@ -995,7 +903,7 @@ xwidget_register(xwidget_hwid_t hwid, /* widget's hardware ID */
* Unregister the xtalk device and detach all its hwgraph namespace.
*/
int
-xwidget_unregister(devfs_handle_t widget)
+xwidget_unregister(vertex_hdl_t widget)
{
xwidget_info_t widget_info;
xwidget_hwid_t hwid;
@@ -1011,9 +919,6 @@ xwidget_unregister(devfs_handle_t widget)
hwid = &(widget_info->w_hwid);
- cdl_del_connpt(xtalk_registry, hwid->part_num, hwid->mfg_num,
- widget, 0);
-
/* Clean out the xwidget information */
(void)kfree(widget_info->w_name);
BZERO((void *)widget_info, sizeof(widget_info));
@@ -1023,7 +928,7 @@ xwidget_unregister(devfs_handle_t widget)
}
void
-xwidget_error_register(devfs_handle_t xwidget,
+xwidget_error_register(vertex_hdl_t xwidget,
error_handler_f *efunc,
error_handler_arg_t einfo)
{
@@ -1039,37 +944,23 @@ xwidget_error_register(devfs_handle_t xwidget,
* Issue a link reset to a widget.
*/
void
-xwidget_reset(devfs_handle_t xwidget)
+xwidget_reset(vertex_hdl_t xwidget)
{
xswitch_reset_link(xwidget);
-
}
void
-xwidget_gfx_reset(devfs_handle_t xwidget)
+xwidget_gfx_reset(vertex_hdl_t xwidget)
{
- xwidget_info_t info;
-
- xswitch_reset_link(xwidget);
- info = xwidget_info_get(xwidget);
-#ifdef LATER
- ASSERT_ALWAYS(info != NULL);
-#endif
-
- /*
- * Enable this for other architectures once we add widget_reset to the
- * xtalk provider interface.
- */
- DEV_FUNC(xtalk_provider, widget_reset)
- (xwidget_info_master_get(info), xwidget_info_id_get(info));
+ return;
}
#define ANON_XWIDGET_NAME "No Name" /* Default Widget Name */
/* Get the canonical hwgraph name of xtalk widget */
char *
-xwidget_name_get(devfs_handle_t xwidget_vhdl)
+xwidget_name_get(vertex_hdl_t xwidget_vhdl)
{
xwidget_info_t info;
diff --git a/arch/ia64/sn/io/stubs.c b/arch/ia64/sn/io/stubs.c
deleted file mode 100644
index 0acbdb83df1faf..00000000000000
--- a/arch/ia64/sn/io/stubs.c
+++ /dev/null
@@ -1,140 +0,0 @@
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/types.h>
-#include <linux/ctype.h>
-#include <linux/mmzone.h>
-#include <linux/slab.h>
-#include <asm/sn/arch.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/pci/bridge.h>
-#include <asm/sn/ioerror_handling.h>
-#include <asm/sn/pci/pciio.h>
-#include <asm/sn/slotnum.h>
-#include <asm/sn/vector.h>
-#include <asm/sn/nic.h>
-
-/******
- ****** hack defines ......
- ******/
-
-int pcibr_prefetch_enable_rev, pcibr_wg_enable_rev;
-int default_intr_pri;
-int force_fire_and_forget = 1;
-int ignore_conveyor_override = 0;
-
-devfs_handle_t dummy_vrtx; /* Needed for cpuid_to_vertex() in hack.h */
-
-
-/* ARGSUSED */
-void hub_widgetdev_enable(devfs_handle_t xconn_vhdl, int devnum)
- {FIXME("hub_widgetdev_enable");}
-
-/* ARGSUSED */
-void hub_widgetdev_shutdown(devfs_handle_t xconn_vhdl, int devnum)
- {FIXME("hub_widgetdev_shutdown");}
-
-/* ARGSUSED */
-void hub_widget_reset(devfs_handle_t hubv, xwidgetnum_t widget)
- {FIXME("hub_widget_reset");}
-
-boolean_t
-is_sys_critical_vertex(devfs_handle_t x)
-{
- FIXME("is_sys_critical_vertex : returns 0");
- return(0);
-}
-
-void *
-snia_kmem_zone_alloc(register struct zone *zone, int flags)
-{
- FIXME("snia_kmem_zone_alloc : return null");
- return((void *)0);
-}
-
-void
-snia_kmem_zone_free(register struct zone *zone, void *ptr)
-{
- FIXME("snia_kmem_zone_free : no-op");
-}
-
-struct zone *
-snia_kmem_zone_init(register int size, char *zone_name)
-{
- FIXME("snia_kmem_zone_free : returns NULL");
- return((struct zone *)0);
-}
-
-int
-compare_and_swap_ptr(void **location, void *old_ptr, void *new_ptr)
-{
- FIXME("compare_and_swap_ptr : NOT ATOMIC");
- if (*location == old_ptr) {
- *location = new_ptr;
- return(1);
- }
- else
- return(0);
-}
-
-/* For ml/SN/SN1/slots.c */
-/* ARGSUSED */
-slotid_t get_widget_slotnum(int xbow, int widget)
- {FIXME("get_widget_slotnum"); return (unsigned char)NULL;}
-
-/* For router */
-int
-router_init(cnodeid_t cnode,int writeid, void *npda_rip)
- {FIXME("router_init"); return(0);}
-
-/* From io/ioerror_handling.c */
-error_return_code_t
-sys_critical_graph_vertex_add(devfs_handle_t parent, devfs_handle_t child)
- {FIXME("sys_critical_graph_vertex_add"); return(0);}
-
-/* From io/ioc3.c */
-devfs_handle_t
-ioc3_console_vhdl_get(void)
- {FIXME("ioc3_console_vhdl_get"); return( (devfs_handle_t)-1);}
-
-void
-nic_vmc_check(devfs_handle_t vhdl, char *nicinfo)
-{
-
- FIXME("nic_vmc_check\n");
-
-}
-
-char *
-nic_vertex_info_get(devfs_handle_t v)
-{
- FIXME("nic_vertex_info_get\n");
- return(NULL);
-}
-
-int
-vector_read_node(net_vec_t dest, nasid_t nasid,
- int write_id, int address,
- uint64_t *value)
-{
- FIXME("vector_read_node\n");
- return(0);
-}
-
-int
-vector_write_node(net_vec_t dest, nasid_t nasid,
- int write_id, int address,
- uint64_t value)
-{
- FIXME("vector_write_node\n");
- return(0);
-}
diff --git a/arch/ia64/sn/io/xbow.c b/arch/ia64/sn/io/xbow.c
deleted file mode 100644
index 00983e0def8001..00000000000000
--- a/arch/ia64/sn/io/xbow.c
+++ /dev/null
@@ -1,1325 +0,0 @@
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2001 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/hack.h>
-#include <asm/sn/pci/bridge.h>
-#include <asm/sn/xtalk/xtalk_private.h>
-#include <asm/sn/simulator.h>
-
-/* #define DEBUG 1 */
-/* #define XBOW_DEBUG 1 */
-
-
-/*
- * Files needed to get the device driver entry points
- */
-
-#include <asm/sn/xtalk/xbow.h>
-#include <asm/sn/xtalk/xtalk.h>
-#include <asm/sn/xtalk/xswitch.h>
-#include <asm/sn/xtalk/xwidget.h>
-
-#include <asm/sn/prio.h>
-#include <asm/sn/hcl_util.h>
-
-
-#define NEW(ptr) (ptr = kmalloc(sizeof (*(ptr)), GFP_KERNEL))
-#define DEL(ptr) (kfree(ptr))
-
-int xbow_devflag = D_MP;
-
-/*
- * This file supports the Xbow chip. Main functions: initializtion,
- * error handling, and GBR.
- */
-
-/*
- * each vertex corresponding to an xbow chip
- * has a "fastinfo" pointer pointing at one
- * of these things.
- */
-typedef struct xbow_soft_s *xbow_soft_t;
-
-struct xbow_soft_s {
- devfs_handle_t conn; /* our connection point */
- devfs_handle_t vhdl; /* xbow's private vertex */
- devfs_handle_t busv; /* the xswitch vertex */
- xbow_t *base; /* PIO pointer to crossbow chip */
- char *name; /* hwgraph name */
-
- xbow_perf_t xbow_perfcnt[XBOW_PERF_COUNTERS];
- xbow_perf_link_t xbow_perflink[MAX_XBOW_PORTS];
- xbow_link_status_t xbow_link_status[MAX_XBOW_PORTS];
- spinlock_t xbow_perf_lock;
- int link_monitor;
- widget_cfg_t *wpio[MAX_XBOW_PORTS]; /* cached PIO pointer */
-
- /* Bandwidth allocation state. Bandwidth values are for the
- * destination port since contention happens there.
- * Implicit mapping from xbow ports (8..f) -> (0..7) array indices.
- */
- spinlock_t xbow_bw_alloc_lock; /* bw allocation lock */
- unsigned long long bw_hiwm[MAX_XBOW_PORTS]; /* hiwater mark values */
- unsigned long long bw_cur_used[MAX_XBOW_PORTS]; /* bw used currently */
-};
-
-#define xbow_soft_set(v,i) hwgraph_fastinfo_set((v), (arbitrary_info_t)(i))
-#define xbow_soft_get(v) ((xbow_soft_t)hwgraph_fastinfo_get((v)))
-
-/*
- * Function Table of Contents
- */
-
-void xbow_mlreset(xbow_t *);
-void xbow_init(void);
-int xbow_attach(devfs_handle_t);
-
-int xbow_open(devfs_handle_t *, int, int, cred_t *);
-int xbow_close(devfs_handle_t, int, int, cred_t *);
-
-int xbow_map(devfs_handle_t, vhandl_t *, off_t, size_t, uint);
-int xbow_unmap(devfs_handle_t, vhandl_t *);
-int xbow_ioctl(devfs_handle_t, int, void *, int, struct cred *, int *);
-
-int xbow_widget_present(xbow_t *, int);
-static int xbow_link_alive(xbow_t *, int);
-devfs_handle_t xbow_widget_lookup(devfs_handle_t, int);
-
-#ifdef LATER
-static void xbow_setwidint(xtalk_intr_t);
-static void xbow_errintr_handler(intr_arg_t);
-#endif
-void xbow_intr_preset(void *, int, xwidgetnum_t, iopaddr_t, xtalk_intr_vector_t);
-
-
-
-void xbow_update_perf_counters(devfs_handle_t);
-xbow_perf_link_t *xbow_get_perf_counters(devfs_handle_t);
-int xbow_enable_perf_counter(devfs_handle_t, int, int, int);
-xbow_link_status_t *xbow_get_llp_status(devfs_handle_t);
-void xbow_update_llp_status(devfs_handle_t);
-
-int xbow_disable_llp_monitor(devfs_handle_t);
-int xbow_enable_llp_monitor(devfs_handle_t);
-int xbow_prio_bw_alloc(devfs_handle_t, xwidgetnum_t, xwidgetnum_t,
- unsigned long long, unsigned long long);
-
-xswitch_reset_link_f xbow_reset_link;
-
-void idbg_xbowregs(int64_t);
-
-xswitch_provider_t xbow_provider =
-{
- xbow_reset_link,
-};
-
-/*
- * xbow_mlreset: called at mlreset time if the
- * platform specific code determines that there is
- * a crossbow in a critical path that must be
- * functional before the driver would normally get
- * the device properly set up.
- *
- * what do we need to do, that the boot prom can
- * not be counted on to have already done, that is
- * generic across all platforms using crossbows?
- */
-/*ARGSUSED */
-void
-xbow_mlreset(xbow_t * xbow)
-{
-}
-
-/*
- * xbow_init: called with the rest of the device
- * driver XXX_init routines. This platform *might*
- * have a Crossbow chip, or even several, but it
- * might have none. Register with the crosstalk
- * generic provider so when we encounter the chip
- * the right magic happens.
- */
-void
-xbow_init(void)
-{
-
-#if DEBUG && ATTACH_DEBUG
- printf("xbow_init\n");
-#endif
-
- xwidget_driver_register(XXBOW_WIDGET_PART_NUM,
- 0, /* XXBOW_WIDGET_MFGR_NUM, */
- "xbow_",
- CDL_PRI_HI); /* attach before friends */
-
- xwidget_driver_register(XBOW_WIDGET_PART_NUM,
- XBOW_WIDGET_MFGR_NUM,
- "xbow_",
- CDL_PRI_HI); /* attach before friends */
-}
-
-#ifdef XBRIDGE_REGS_SIM
-/* xbow_set_simulated_regs: sets xbow regs as needed
- * for powering through the boot
- */
-void
-xbow_set_simulated_regs(xbow_t *xbow, int port)
-{
- /*
- * turn on link
- */
- xbow->xb_link(port).link_status = (1<<31);
- /*
- * and give it a live widget too
- */
- xbow->xb_link(port).link_aux_status = XB_AUX_STAT_PRESENT;
- /*
- * zero the link control reg
- */
- xbow->xb_link(port).link_control = 0x0;
-}
-#endif /* XBRIDGE_REGS_SIM */
-
-/*
- * xbow_attach: the crosstalk provider has
- * determined that there is a crossbow widget
- * present, and has handed us the connection
- * point for that vertex.
- *
- * We not only add our own vertex, but add
- * some "xtalk switch" data to the switch
- * vertex (at the connect point's parent) if
- * it does not have any.
- */
-
-/*ARGSUSED */
-int
-xbow_attach(devfs_handle_t conn)
-{
- /*REFERENCED */
- devfs_handle_t vhdl;
- devfs_handle_t busv;
- xbow_t *xbow;
- xbow_soft_t soft;
- int port;
- xswitch_info_t info;
-#ifdef LATER
- xtalk_intr_t intr_hdl;
- device_desc_t dev_desc;
-#endif
- char devnm[MAXDEVNAME], *s;
- xbowreg_t id;
- int rev;
- int i;
- int xbow_num;
-
-#if DEBUG && ATTACH_DEBUG
-#if defined(SUPPORT_PRINTING_V_FORMAT
- printk("%v: xbow_attach\n", conn);
-#else
- printk("0x%x: xbow_attach\n", conn);
-#endif
-#endif
-
- /*
- * Get a PIO pointer to the base of the crossbow
- * chip.
- */
-#ifdef XBRIDGE_REGS_SIM
- printk("xbow_attach: XBRIDGE_REGS_SIM FIXME: allocating %ld bytes for xbow_s\n", sizeof(xbow_t));
- xbow = (xbow_t *) kmalloc(sizeof(xbow_t), GFP_KERNEL);
- /*
- * turn on ports e and f like in a real live ibrick
- */
- xbow_set_simulated_regs(xbow, 0xe);
- xbow_set_simulated_regs(xbow, 0xf);
-#else
- xbow = (xbow_t *) xtalk_piotrans_addr(conn, 0, 0, sizeof(xbow_t), 0);
-#endif /* XBRIDGE_REGS_SIM */
-
- /*
- * Locate the "switch" vertex: it is the parent
- * of our connection point.
- */
- busv = hwgraph_connectpt_get(conn);
-#if DEBUG && ATTACH_DEBUG
- printk("xbow_attach: Bus Vertex 0x%p, conn 0x%p, xbow register 0x%p wid= 0x%x\n", busv, conn, xbow, *(volatile u32 *)xbow);
-#endif
-
- ASSERT(busv != GRAPH_VERTEX_NONE);
-
- /*
- * Create our private vertex, and connect our
- * driver information to it. This makes it possible
- * for diagnostic drivers to open the crossbow
- * vertex for access to registers.
- */
-
- /*
- * We need to teach xbow drivers to provide the right set of
- * file ops.
- */
- vhdl = NULL;
- vhdl = hwgraph_register(conn, EDGE_LBL_XBOW,
- 0, DEVFS_FL_AUTO_DEVNUM,
- 0, 0,
- S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
- /* &hcl_fops */ (void *)&vhdl, NULL);
- if (!vhdl) {
- printk(KERN_WARNING "xbow_attach: Unable to create char device for xbow conn %p\n",
- (void *)conn);
- }
-
- /*
- * Allocate the soft state structure and attach
- * it to the xbow's vertex
- */
- NEW(soft);
- soft->conn = conn;
- soft->vhdl = vhdl;
- soft->busv = busv;
- soft->base = xbow;
- /* does the universe really need another macro? */
- /* xbow_soft_set(vhdl, (arbitrary_info_t) soft); */
- hwgraph_fastinfo_set(vhdl, (arbitrary_info_t) soft);
-
-#define XBOW_NUM_SUFFIX_FORMAT "[xbow# %d]"
-
- /* Add xbow number as a suffix to the hwgraph name of the xbow.
- * This is helpful while looking at the error/warning messages.
- */
- xbow_num = 0;
-
- /*
- * get the name of this xbow vertex and keep the info.
- * This is needed during errors and interrupts, but as
- * long as we have it, we can use it elsewhere.
- */
- s = dev_to_name(vhdl, devnm, MAXDEVNAME);
- soft->name = kmalloc(strlen(s) + strlen(XBOW_NUM_SUFFIX_FORMAT) + 1,
- GFP_KERNEL);
- sprintf(soft->name,"%s"XBOW_NUM_SUFFIX_FORMAT, s,xbow_num);
-
-#ifdef XBRIDGE_REGS_SIM
- /* my o200/ibrick has id=0x2d002049, but XXBOW_WIDGET_PART_NUM is defined
- * as 0xd000, so I'm using that for the partnum bitfield.
- */
- printk("xbow_attach: XBRIDGE_REGS_SIM FIXME: need xb_wid_id value!!\n");
- id = 0x2d000049;
-#else
- id = xbow->xb_wid_id;
-#endif /* XBRIDGE_REGS_SIM */
- rev = XWIDGET_PART_REV_NUM(id);
-
- /*
- * Print the revision if DEBUG, or SHOW_REVS and kdebug,
- * or the xbow is downrev.
- *
- * If xbow is downrev, make it a WARNING that the
- * Crossbow is DOWNREV: these chips are not good
- * to have around, and the operator should be told.
- */
-#ifdef LATER
-#if !DEBUG
- if (
-#if SHOW_REVS
- (kdebug) ||
-#endif /* SHOW_REVS */
- (rev < XBOW_REV_1_1))
-#endif /* !DEBUG */
- printk("%sCrossbow ASIC: rev %s (code=%d) at %s%s",
- (rev < XBOW_REV_1_1) ? "DOWNREV " : "",
- (rev == XBOW_REV_1_0) ? "1.0" :
- (rev == XBOW_REV_1_1) ? "1.1" :
- (rev == XBOW_REV_1_2) ? "1.2" :
- (rev == XBOW_REV_1_3) ? "1.3" :
- (rev == XBOW_REV_2_0) ? "2.0" :
- (rev == XXBOW_PART_REV_1_0) ? "Xbridge 1.0" :
- (rev == XXBOW_PART_REV_2_0) ? "Xbridge 2.0" :
- "unknown",
- rev, soft->name,
- (rev < XBOW_REV_1_1) ? "" : "\n");
-#endif /* LATER */
- mutex_spinlock_init(&soft->xbow_perf_lock);
- soft->xbow_perfcnt[0].xp_perf_reg = &xbow->xb_perf_ctr_a;
- soft->xbow_perfcnt[1].xp_perf_reg = &xbow->xb_perf_ctr_b;
-
- /* Initialization for GBR bw allocation */
- mutex_spinlock_init(&soft->xbow_bw_alloc_lock);
-
-#define XBOW_8_BIT_PORT_BW_MAX (400 * 1000 * 1000) /* 400 MB/s */
-#define XBOW_16_BIT_PORT_BW_MAX (800 * 1000 * 1000) /* 800 MB/s */
-
- /* Set bandwidth hiwatermark and current values */
- for (i = 0; i < MAX_XBOW_PORTS; i++) {
- soft->bw_hiwm[i] = XBOW_16_BIT_PORT_BW_MAX; /* for now */
- soft->bw_cur_used[i] = 0;
- }
-
- /*
- * Enable xbow error interrupts
- */
- xbow->xb_wid_control = (XB_WID_CTRL_REG_ACC_IE | XB_WID_CTRL_XTALK_IE);
-
- /*
- * take a census of the widgets present,
- * leaving notes at the switch vertex.
- */
- info = xswitch_info_new(busv);
-
- for (port = MAX_PORT_NUM - MAX_XBOW_PORTS;
- port < MAX_PORT_NUM; ++port) {
- if (!xbow_link_alive(xbow, port)) {
-#if DEBUG && XBOW_DEBUG
- printk(KERN_INFO "0x%p link %d is not alive\n",
- busv, port);
-#endif
- continue;
- }
- if (!xbow_widget_present(xbow, port)) {
-#if DEBUG && XBOW_DEBUG
- printk(KERN_INFO "0x%p link %d is alive but no widget is present\n", busv, port);
-#endif
- continue;
- }
-#if DEBUG && XBOW_DEBUG
- printk(KERN_INFO "0x%p link %d has a widget\n",
- busv, port);
-#endif
-
- xswitch_info_link_is_ok(info, port);
- /*
- * Turn some error interrupts on
- * and turn others off. The PROM has
- * some things turned on we don't
- * want to see (bandwidth allocation
- * errors for instance); so if it
- * is not listed here, it is not on.
- */
- xbow->xb_link(port).link_control =
- ( (xbow->xb_link(port).link_control
- /*
- * Turn off these bits; they are non-fatal,
- * but we might want to save some statistics
- * on the frequency of these errors.
- * XXX FIXME XXX
- */
- & ~XB_CTRL_RCV_CNT_OFLOW_IE
- & ~XB_CTRL_XMT_CNT_OFLOW_IE
- & ~XB_CTRL_BNDWDTH_ALLOC_IE
- & ~XB_CTRL_RCV_IE)
- /*
- * These are the ones we want to turn on.
- */
- | (XB_CTRL_ILLEGAL_DST_IE
- | XB_CTRL_OALLOC_IBUF_IE
- | XB_CTRL_XMT_MAX_RTRY_IE
- | XB_CTRL_MAXREQ_TOUT_IE
- | XB_CTRL_XMT_RTRY_IE
- | XB_CTRL_SRC_TOUT_IE) );
- }
-
- xswitch_provider_register(busv, &xbow_provider);
-
- return 0; /* attach successful */
-}
-
-/*ARGSUSED */
-int
-xbow_open(devfs_handle_t *devp, int oflag, int otyp, cred_t *credp)
-{
- return 0;
-
-}
-
-/*ARGSUSED */
-int
-xbow_close(devfs_handle_t dev, int oflag, int otyp, cred_t *crp)
-{
- return 0;
-}
-
-/*ARGSUSED */
-int
-xbow_map(devfs_handle_t dev, vhandl_t *vt, off_t off, size_t len, uint prot)
-{
- devfs_handle_t vhdl = dev_to_vhdl(dev);
- xbow_soft_t soft = xbow_soft_get(vhdl);
- int error;
-
- ASSERT(soft);
- len = ctob(btoc(len));
- /* XXX- this ignores the offset!!! */
- error = v_mapphys(vt, (void *) soft->base, len);
- return error;
-}
-
-/*ARGSUSED */
-int
-xbow_unmap(devfs_handle_t dev, vhandl_t *vt)
-{
- return 0;
-}
-
-/* This contains special-case code for grio. There are plans to make
- * this general sometime in the future, but till then this should
- * be good enough.
- */
-xwidgetnum_t
-xbow_widget_num_get(devfs_handle_t dev)
-{
- devfs_handle_t tdev;
- char devname[MAXDEVNAME];
- xwidget_info_t xwidget_info;
- int i;
-
- vertex_to_name(dev, devname, MAXDEVNAME);
-
- /* If this is a pci controller vertex, traverse up using
- * the ".." links to get to the widget.
- */
- if (strstr(devname, EDGE_LBL_PCI) &&
- strstr(devname, EDGE_LBL_CONTROLLER)) {
- tdev = dev;
- for (i=0; i< 2; i++) {
- if (hwgraph_edge_get(tdev,
- HWGRAPH_EDGELBL_DOTDOT, &tdev) !=
- GRAPH_SUCCESS)
- return XWIDGET_NONE;
- }
-
- if ((xwidget_info = xwidget_info_chk(tdev)) != NULL) {
- return (xwidget_info_id_get(xwidget_info));
- } else {
- return XWIDGET_NONE;
- }
- }
-
- return XWIDGET_NONE;
-}
-
-int
-xbow_ioctl(devfs_handle_t dev,
- int cmd,
- void *arg,
- int flag,
- struct cred *cr,
- int *rvalp)
-{
- devfs_handle_t vhdl;
- int error = 0;
-
-#if defined (DEBUG)
- int rc;
- devfs_handle_t conn;
- struct xwidget_info_s *xwidget_info;
- xbow_soft_t xbow_soft;
-#endif
- *rvalp = 0;
-
- vhdl = dev_to_vhdl(dev);
-#if defined (DEBUG)
- xbow_soft = xbow_soft_get(vhdl);
- conn = xbow_soft->conn;
-
- xwidget_info = xwidget_info_get(conn);
- ASSERT_ALWAYS(xwidget_info != NULL);
-
- rc = xwidget_hwid_is_xswitch(&xwidget_info->w_hwid);
- ASSERT_ALWAYS(rc != 0);
-#endif
- switch (cmd) {
-#ifdef LATER
- case XBOWIOC_PERF_ENABLE:
- case XBOWIOC_PERF_DISABLE:
- {
- struct xbow_perfarg_t xbow_perf_en;
-
- if (!_CAP_CRABLE(cr, CAP_DEVICE_MGT)) {
- error = EPERM;
- break;
- }
- if ((flag & FWRITE) == 0) {
- error = EBADF;
- break;
- }
- if (COPYIN(arg, &xbow_perf_en, sizeof(xbow_perf_en))) {
- error = EFAULT;
- break;
- }
- if (error = xbow_enable_perf_counter(vhdl,
- xbow_perf_en.link,
- (cmd == XBOWIOC_PERF_DISABLE) ? 0 : xbow_perf_en.mode,
- xbow_perf_en.counter)) {
- error = EINVAL;
- break;
- }
- break;
- }
-#endif
-
-#ifdef LATER
- case XBOWIOC_PERF_GET:
- {
- xbow_perf_link_t *xbow_perf_cnt;
-
- if ((flag & FREAD) == 0) {
- error = EBADF;
- break;
- }
- xbow_perf_cnt = xbow_get_perf_counters(vhdl);
- ASSERT_ALWAYS(xbow_perf_cnt != NULL);
-
- if (COPYOUT((void *) xbow_perf_cnt, (void *) arg,
- MAX_XBOW_PORTS * sizeof(xbow_perf_link_t))) {
- error = EFAULT;
- break;
- }
- break;
- }
-#endif
-
- case XBOWIOC_LLP_ERROR_ENABLE:
- if ((error = xbow_enable_llp_monitor(vhdl)) != 0)
- error = EINVAL;
-
- break;
-
- case XBOWIOC_LLP_ERROR_DISABLE:
-
- if ((error = xbow_disable_llp_monitor(vhdl)) != 0)
- error = EINVAL;
-
- break;
-
-#ifdef LATER
- case XBOWIOC_LLP_ERROR_GET:
- {
- xbow_link_status_t *xbow_llp_status;
-
- if ((flag & FREAD) == 0) {
- error = EBADF;
- break;
- }
- xbow_llp_status = xbow_get_llp_status(vhdl);
- ASSERT_ALWAYS(xbow_llp_status != NULL);
-
- if (COPYOUT((void *) xbow_llp_status, (void *) arg,
- MAX_XBOW_PORTS * sizeof(xbow_link_status_t))) {
- error = EFAULT;
- break;
- }
- break;
- }
-#endif
-
-#ifdef LATER
- case GIOCSETBW:
- {
- grio_ioctl_info_t info;
- xwidgetnum_t src_widgetnum, dest_widgetnum;
-
- if (COPYIN(arg, &info, sizeof(grio_ioctl_info_t))) {
- error = EFAULT;
- break;
- }
-#ifdef GRIO_DEBUG
- printf("xbow:: prev_vhdl: %d next_vhdl: %d reqbw: %lld\n",
- info.prev_vhdl, info.next_vhdl, info.reqbw);
-#endif /* GRIO_DEBUG */
-
- src_widgetnum = xbow_widget_num_get(info.prev_vhdl);
- dest_widgetnum = xbow_widget_num_get(info.next_vhdl);
-
- /* Bandwidth allocation is bi-directional. Since bandwidth
- * reservations have already been done at an earlier stage,
- * we cannot fail here for lack of bandwidth.
- */
- xbow_prio_bw_alloc(dev, src_widgetnum, dest_widgetnum,
- 0, info.reqbw);
- xbow_prio_bw_alloc(dev, dest_widgetnum, src_widgetnum,
- 0, info.reqbw);
-
- break;
- }
-
- case GIOCRELEASEBW:
- {
- grio_ioctl_info_t info;
- xwidgetnum_t src_widgetnum, dest_widgetnum;
-
- if (!cap_able(CAP_DEVICE_MGT)) {
- error = EPERM;
- break;
- }
-
- if (COPYIN(arg, &info, sizeof(grio_ioctl_info_t))) {
- error = EFAULT;
- break;
- }
-#ifdef GRIO_DEBUG
- printf("xbow:: prev_vhdl: %d next_vhdl: %d reqbw: %lld\n",
- info.prev_vhdl, info.next_vhdl, info.reqbw);
-#endif /* GRIO_DEBUG */
-
- src_widgetnum = xbow_widget_num_get(info.prev_vhdl);
- dest_widgetnum = xbow_widget_num_get(info.next_vhdl);
-
- /* Bandwidth reservation is bi-directional. Hence, remove
- * bandwidth reservations for both directions.
- */
- xbow_prio_bw_alloc(dev, src_widgetnum, dest_widgetnum,
- info.reqbw, (-1 * info.reqbw));
- xbow_prio_bw_alloc(dev, dest_widgetnum, src_widgetnum,
- info.reqbw, (-1 * info.reqbw));
-
- break;
- }
-#endif
-
- default:
- break;
-
- }
- return error;
-}
-
-/*
- * xbow_widget_present: See if a device is present
- * on the specified port of this crossbow.
- */
-int
-xbow_widget_present(xbow_t * xbow, int port)
-{
- if ( IS_RUNNING_ON_SIMULATOR() ) {
- if ( (port == 14) || (port == 15) ) {
- return 1;
- }
- else {
- return 0;
- }
- }
- else {
- return xbow->xb_link(port).link_aux_status & XB_AUX_STAT_PRESENT;
- }
-}
-
-static int
-xbow_link_alive(xbow_t * xbow, int port)
-{
- xbwX_stat_t xbow_linkstat;
-
- xbow_linkstat.linkstatus = xbow->xb_link(port).link_status;
- return (xbow_linkstat.link_alive);
-}
-
-/*
- * xbow_widget_lookup
- * Lookup the edges connected to the xbow specified, and
- * retrieve the handle corresponding to the widgetnum
- * specified.
- * If not found, return 0.
- */
-devfs_handle_t
-xbow_widget_lookup(devfs_handle_t vhdl,
- int widgetnum)
-{
- xswitch_info_t xswitch_info;
- devfs_handle_t conn;
-
- xswitch_info = xswitch_info_get(vhdl);
- conn = xswitch_info_vhdl_get(xswitch_info, widgetnum);
- return conn;
-}
-
-/*
- * xbow_setwidint: called when xtalk
- * is establishing or migrating our
- * interrupt service.
- */
-#ifdef LATER
-static void
-xbow_setwidint(xtalk_intr_t intr)
-{
- xwidgetnum_t targ = xtalk_intr_target_get(intr);
- iopaddr_t addr = xtalk_intr_addr_get(intr);
- xtalk_intr_vector_t vect = xtalk_intr_vector_get(intr);
- xbow_t *xbow = (xbow_t *) xtalk_intr_sfarg_get(intr);
-
- xbow_intr_preset((void *) xbow, 0, targ, addr, vect);
-}
-#endif /* LATER */
-
-/*
- * xbow_intr_preset: called during mlreset time
- * if the platform specific code needs to route
- * an xbow interrupt before the xtalk infrastructure
- * is available for use.
- *
- * Also called from xbow_setwidint, so we don't
- * replicate the guts of the routine.
- *
- * XXX- probably should be renamed xbow_wid_intr_set or
- * something to reduce confusion.
- */
-/*ARGSUSED3 */
-void
-xbow_intr_preset(void *which_widget,
- int which_widget_intr,
- xwidgetnum_t targ,
- iopaddr_t addr,
- xtalk_intr_vector_t vect)
-{
- xbow_t *xbow = (xbow_t *) which_widget;
-
- xbow->xb_wid_int_upper = ((0xFF000000 & (vect << 24)) |
- (0x000F0000 & (targ << 16)) |
- XTALK_ADDR_TO_UPPER(addr));
- xbow->xb_wid_int_lower = XTALK_ADDR_TO_LOWER(addr);
-}
-
-#define XEM_ADD_STR(s) printk("%s", (s))
-#define XEM_ADD_NVAR(n,v) printk("\t%20s: 0x%x\n", (n), (v))
-#define XEM_ADD_VAR(v) XEM_ADD_NVAR(#v,(v))
-#define XEM_ADD_IOEF(n) if (IOERROR_FIELDVALID(ioe,n)) \
- XEM_ADD_NVAR("ioe." #n, \
- IOERROR_GETVALUE(ioe,n))
-
-#ifdef LATER
-static void
-xem_add_ioe(ioerror_t *ioe)
-{
- XEM_ADD_IOEF(errortype);
- XEM_ADD_IOEF(widgetnum);
- XEM_ADD_IOEF(widgetdev);
- XEM_ADD_IOEF(srccpu);
- XEM_ADD_IOEF(srcnode);
- XEM_ADD_IOEF(errnode);
- XEM_ADD_IOEF(sysioaddr);
- XEM_ADD_IOEF(xtalkaddr);
- XEM_ADD_IOEF(busspace);
- XEM_ADD_IOEF(busaddr);
- XEM_ADD_IOEF(vaddr);
- XEM_ADD_IOEF(memaddr);
- XEM_ADD_IOEF(epc);
- XEM_ADD_IOEF(ef);
-}
-
-#define XEM_ADD_IOE() (xem_add_ioe(ioe))
-#endif /* LATER */
-
-int xbow_xmit_retry_errors = 0;
-
-int
-xbow_xmit_retry_error(xbow_soft_t soft,
- int port)
-{
- xswitch_info_t info;
- devfs_handle_t vhdl;
- widget_cfg_t *wid;
- widgetreg_t id;
- int part;
- int mfgr;
-
- wid = soft->wpio[port - BASE_XBOW_PORT];
- if (wid == NULL) {
- /* If we can't track down a PIO
- * pointer to our widget yet,
- * leave our caller knowing that
- * we are interested in this
- * interrupt if it occurs in
- * the future.
- */
- info = xswitch_info_get(soft->busv);
- if (!info)
- return 1;
- vhdl = xswitch_info_vhdl_get(info, port);
- if (vhdl == GRAPH_VERTEX_NONE)
- return 1;
- wid = (widget_cfg_t *) xtalk_piotrans_addr
- (vhdl, 0, 0, sizeof *wid, 0);
- if (!wid)
- return 1;
- soft->wpio[port - BASE_XBOW_PORT] = wid;
- }
- id = wid->w_id;
- part = XWIDGET_PART_NUM(id);
- mfgr = XWIDGET_MFG_NUM(id);
-
- /* If this thing is not a Bridge,
- * do not activate the WAR, and
- * tell our caller we do not need
- * to be called again.
- */
- if ((part != BRIDGE_WIDGET_PART_NUM) ||
- (mfgr != BRIDGE_WIDGET_MFGR_NUM)) {
- /* FIXME: add Xbridge to the WAR.
- * Shouldn't hurt anything. Later need to
- * check if we can remove this.
- */
- if ((part != XBRIDGE_WIDGET_PART_NUM) ||
- (mfgr != XBRIDGE_WIDGET_MFGR_NUM))
- return 0;
- }
-
- /* count how many times we
- * have picked up after
- * LLP Transmit problems.
- */
- xbow_xmit_retry_errors++;
-
- /* rewrite the control register
- * to fix things up.
- */
- wid->w_control = wid->w_control;
- wid->w_control;
-
- return 1;
-}
-
-void
-xbow_update_perf_counters(devfs_handle_t vhdl)
-{
- xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
- xbow_perf_t *xbow_perf = xbow_soft->xbow_perfcnt;
- xbow_perf_link_t *xbow_plink = xbow_soft->xbow_perflink;
- xbow_perfcount_t perf_reg;
- unsigned long s;
- int link, i;
-
- for (i = 0; i < XBOW_PERF_COUNTERS; i++, xbow_perf++) {
- if (xbow_perf->xp_mode == XBOW_MONITOR_NONE)
- continue;
-
- s = mutex_spinlock(&xbow_soft->xbow_perf_lock);
-
- perf_reg.xb_counter_val = *(xbowreg_t *) xbow_perf->xp_perf_reg;
-
- link = perf_reg.xb_perf.link_select;
-
- (xbow_plink + link)->xlp_cumulative[xbow_perf->xp_curmode] +=
- ((perf_reg.xb_perf.count - xbow_perf->xp_current) & XBOW_COUNTER_MASK);
- xbow_perf->xp_current = perf_reg.xb_perf.count;
-
- mutex_spinunlock(&xbow_soft->xbow_perf_lock, s);
- }
- /* Do port /mode multiplexing here */
-
-#ifdef LATER
- (void) timeout(xbow_update_perf_counters,
- (void *) (__psunsigned_t) vhdl, XBOW_PERF_TIMEOUT);
-#endif
-
-}
-
-xbow_perf_link_t *
-xbow_get_perf_counters(devfs_handle_t vhdl)
-{
- xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
- xbow_perf_link_t *xbow_perf_link = xbow_soft->xbow_perflink;
-
- return xbow_perf_link;
-}
-
-int
-xbow_enable_perf_counter(devfs_handle_t vhdl, int link, int mode, int counter)
-{
- xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
- xbow_perf_t *xbow_perf = xbow_soft->xbow_perfcnt;
- xbow_linkctrl_t xbow_link_ctrl;
- xbow_t *xbow = xbow_soft->base;
- xbow_perfcount_t perf_reg;
- unsigned long s;
- int i;
-
- link -= BASE_XBOW_PORT;
- if ((link < 0) || (link >= MAX_XBOW_PORTS))
- return -1;
-
- if ((mode < XBOW_MONITOR_NONE) || (mode > XBOW_MONITOR_DEST_LINK))
- return -1;
-
- if ((counter < 0) || (counter >= XBOW_PERF_COUNTERS))
- return -1;
-
- s = mutex_spinlock(&xbow_soft->xbow_perf_lock);
-
- if ((xbow_perf + counter)->xp_mode && mode) {
- mutex_spinunlock(&xbow_soft->xbow_perf_lock, s);
- return -1;
- }
- for (i = 0; i < XBOW_PERF_COUNTERS; i++) {
- if (i == counter)
- continue;
- if (((xbow_perf + i)->xp_link == link) &&
- ((xbow_perf + i)->xp_mode)) {
- mutex_spinunlock(&xbow_soft->xbow_perf_lock, s);
- return -1;
- }
- }
- xbow_perf += counter;
-
- xbow_perf->xp_curlink = xbow_perf->xp_link = link;
- xbow_perf->xp_curmode = xbow_perf->xp_mode = mode;
-
- xbow_link_ctrl.xbl_ctrlword = xbow->xb_link_raw[link].link_control;
- xbow_link_ctrl.xb_linkcontrol.perf_mode = mode;
- xbow->xb_link_raw[link].link_control = xbow_link_ctrl.xbl_ctrlword;
-
- perf_reg.xb_counter_val = *(xbowreg_t *) xbow_perf->xp_perf_reg;
- perf_reg.xb_perf.link_select = link;
- *(xbowreg_t *) xbow_perf->xp_perf_reg = perf_reg.xb_counter_val;
- xbow_perf->xp_current = perf_reg.xb_perf.count;
-
-#ifdef LATER
- (void) timeout(xbow_update_perf_counters,
- (void *) (__psunsigned_t) vhdl, XBOW_PERF_TIMEOUT);
-#endif
-
- mutex_spinunlock(&xbow_soft->xbow_perf_lock, s);
-
- return 0;
-}
-
-xbow_link_status_t *
-xbow_get_llp_status(devfs_handle_t vhdl)
-{
- xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
- xbow_link_status_t *xbow_llp_status = xbow_soft->xbow_link_status;
-
- return xbow_llp_status;
-}
-
-void
-xbow_update_llp_status(devfs_handle_t vhdl)
-{
- xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
- xbow_link_status_t *xbow_llp_status = xbow_soft->xbow_link_status;
- xbow_t *xbow;
- xbwX_stat_t lnk_sts;
- xbow_aux_link_status_t aux_sts;
- int link;
- devfs_handle_t xwidget_vhdl;
- char *xwidget_name;
-
- xbow = (xbow_t *) xbow_soft->base;
- for (link = 0; link < MAX_XBOW_PORTS; link++, xbow_llp_status++) {
- /* Get the widget name corresponding the current link.
- * Note : 0 <= link < MAX_XBOW_PORTS(8).
- * BASE_XBOW_PORT(0x8) <= xwidget number < MAX_PORT_NUM (0x10)
- */
- xwidget_vhdl = xbow_widget_lookup(xbow_soft->busv,link+BASE_XBOW_PORT);
- xwidget_name = xwidget_name_get(xwidget_vhdl);
- aux_sts.aux_linkstatus
- = xbow->xb_link_raw[link].link_aux_status;
- lnk_sts.linkstatus = xbow->xb_link_raw[link].link_status_clr;
-
- if (lnk_sts.link_alive == 0)
- continue;
-
- xbow_llp_status->rx_err_count +=
- aux_sts.xb_aux_linkstatus.rx_err_cnt;
-
- xbow_llp_status->tx_retry_count +=
- aux_sts.xb_aux_linkstatus.tx_retry_cnt;
-
- if (lnk_sts.linkstatus & ~(XB_STAT_RCV_ERR | XB_STAT_XMT_RTRY_ERR | XB_STAT_LINKALIVE)) {
-#ifdef LATER
- printk(KERN_WARNING "link %d[%s]: bad status 0x%x\n",
- link, xwidget_name, lnk_sts.linkstatus);
-#endif
- }
- }
-#ifdef LATER
- if (xbow_soft->link_monitor)
- (void) timeout(xbow_update_llp_status,
- (void *) (__psunsigned_t) vhdl, XBOW_STATS_TIMEOUT);
-#endif
-}
-
-int
-xbow_disable_llp_monitor(devfs_handle_t vhdl)
-{
- xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
- int port;
-
- for (port = 0; port < MAX_XBOW_PORTS; port++) {
- xbow_soft->xbow_link_status[port].rx_err_count = 0;
- xbow_soft->xbow_link_status[port].tx_retry_count = 0;
- }
-
- xbow_soft->link_monitor = 0;
- return 0;
-}
-
-int
-xbow_enable_llp_monitor(devfs_handle_t vhdl)
-{
- xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
-
-#ifdef LATER
- (void) timeout(xbow_update_llp_status,
- (void *) (__psunsigned_t) vhdl, XBOW_STATS_TIMEOUT);
-#endif
- xbow_soft->link_monitor = 1;
- return 0;
-}
-
-
-int
-xbow_reset_link(devfs_handle_t xconn_vhdl)
-{
- xwidget_info_t widget_info;
- xwidgetnum_t port;
- xbow_t *xbow;
- xbowreg_t ctrl;
- xbwX_stat_t stat;
- unsigned itick;
- unsigned dtick;
- static int ticks_per_ms = 0;
-
- if (!ticks_per_ms) {
- itick = get_timestamp();
- us_delay(1000);
- ticks_per_ms = get_timestamp() - itick;
- }
- widget_info = xwidget_info_get(xconn_vhdl);
- port = xwidget_info_id_get(widget_info);
-
-#ifdef XBOW_K1PTR /* defined if we only have one xbow ... */
- xbow = XBOW_K1PTR;
-#else
- {
- devfs_handle_t xbow_vhdl;
- xbow_soft_t xbow_soft;
-
- hwgraph_traverse(xconn_vhdl, ".master/xtalk/0/xbow", &xbow_vhdl);
- xbow_soft = xbow_soft_get(xbow_vhdl);
- xbow = xbow_soft->base;
- }
-#endif
-
- /*
- * This requires three PIOs (reset the link, check for the
- * reset, restore the control register for the link) plus
- * 10us to wait for the reset. We allow up to 1ms for the
- * widget to come out of reset before giving up and
- * returning a failure.
- */
- ctrl = xbow->xb_link(port).link_control;
- xbow->xb_link(port).link_reset = 0;
- itick = get_timestamp();
- while (1) {
- stat.linkstatus = xbow->xb_link(port).link_status;
- if (stat.link_alive)
- break;
- dtick = get_timestamp() - itick;
- if (dtick > ticks_per_ms) {
- return -1; /* never came out of reset */
- }
- DELAY(2); /* don't beat on link_status */
- }
- xbow->xb_link(port).link_control = ctrl;
- return 0;
-}
-
-/*
- * Dump xbow registers.
- * input parameter is either a pointer to
- * the xbow chip or the vertex handle for
- * an xbow vertex.
- */
-void
-idbg_xbowregs(int64_t regs)
-{
- xbow_t *xbow;
- int i;
- xb_linkregs_t *link;
-
-#ifdef LATER
- if (dev_is_vertex((devfs_handle_t) regs)) {
- devfs_handle_t vhdl = (devfs_handle_t) regs;
- xbow_soft_t soft = xbow_soft_get(vhdl);
-
- xbow = soft->base;
- } else
-#endif
- {
- xbow = (xbow_t *) regs;
- }
-
-#ifdef LATER
- qprintf("Printing xbow registers starting at 0x%x\n", xbow);
- qprintf("wid %x status %x erruppr %x errlower %x control %x timeout %x\n",
- xbow->xb_wid_id, xbow->xb_wid_stat, xbow->xb_wid_err_upper,
- xbow->xb_wid_err_lower, xbow->xb_wid_control,
- xbow->xb_wid_req_timeout);
- qprintf("intr uppr %x lower %x errcmd %x llp ctrl %x arb_reload %x\n",
- xbow->xb_wid_int_upper, xbow->xb_wid_int_lower,
- xbow->xb_wid_err_cmdword, xbow->xb_wid_llp,
- xbow->xb_wid_arb_reload);
-#endif
-
- for (i = 8; i <= 0xf; i++) {
- link = &xbow->xb_link(i);
-#ifdef LATER
- qprintf("Link %d registers\n", i);
- qprintf("\tctrl %x stat %x arbuppr %x arblowr %x auxstat %x\n",
- link->link_control, link->link_status,
- link->link_arb_upper, link->link_arb_lower,
- link->link_aux_status);
-#endif
- }
-}
-
-
-#define XBOW_ARB_RELOAD_TICKS 25
- /* granularity: 4 MB/s, max: 124 MB/s */
-#define GRANULARITY ((100 * 1000000) / XBOW_ARB_RELOAD_TICKS)
-
-#define XBOW_BYTES_TO_GBR(BYTES_per_s) (int) (BYTES_per_s / GRANULARITY)
-
-#define XBOW_GBR_TO_BYTES(cnt) (bandwidth_t) ((cnt) * GRANULARITY)
-
-#define CEILING_BYTES_TO_GBR(gbr, bytes_per_sec) \
- ((XBOW_GBR_TO_BYTES(gbr) < bytes_per_sec) ? gbr+1 : gbr)
-
-#define XBOW_ARB_GBR_MAX 31
-
-#define ABS(x) ((x > 0) ? (x) : (-1 * x))
- /* absolute value */
-
-int
-xbow_bytes_to_gbr(bandwidth_t old_bytes_per_sec, bandwidth_t bytes_per_sec)
-{
- int gbr_granted;
- int new_total_gbr;
- int change_gbr;
- bandwidth_t new_total_bw;
-
-#ifdef GRIO_DEBUG
- printf("xbow_bytes_to_gbr: old_bytes_per_sec %lld bytes_per_sec %lld\n",
- old_bytes_per_sec, bytes_per_sec);
-#endif /* GRIO_DEBUG */
-
- gbr_granted = CEILING_BYTES_TO_GBR((XBOW_BYTES_TO_GBR(old_bytes_per_sec)),
- old_bytes_per_sec);
- new_total_bw = old_bytes_per_sec + bytes_per_sec;
- new_total_gbr = CEILING_BYTES_TO_GBR((XBOW_BYTES_TO_GBR(new_total_bw)),
- new_total_bw);
-
- change_gbr = new_total_gbr - gbr_granted;
-
-#ifdef GRIO_DEBUG
- printf("xbow_bytes_to_gbr: gbr_granted %d new_total_gbr %d change_gbr %d\n",
- gbr_granted, new_total_gbr, change_gbr);
-#endif /* GRIO_DEBUG */
-
- return (change_gbr);
-}
-
-/* Conversion from GBR to bytes */
-bandwidth_t
-xbow_gbr_to_bytes(int gbr)
-{
- return (XBOW_GBR_TO_BYTES(gbr));
-}
-
-/* Given the vhdl for the desired xbow, the src and dest. widget ids
- * and the req_bw value, this xbow driver entry point accesses the
- * xbow registers and allocates the desired bandwidth if available.
- *
- * If bandwidth allocation is successful, return success else return failure.
- */
-int
-xbow_prio_bw_alloc(devfs_handle_t vhdl,
- xwidgetnum_t src_wid,
- xwidgetnum_t dest_wid,
- unsigned long long old_alloc_bw,
- unsigned long long req_bw)
-{
- xbow_soft_t soft = xbow_soft_get(vhdl);
- volatile xbowreg_t *xreg;
- xbowreg_t mask;
- unsigned long s;
- int error = 0;
- bandwidth_t old_bw_BYTES, req_bw_BYTES;
- xbowreg_t old_xreg;
- int old_bw_GBR, req_bw_GBR, new_bw_GBR;
-
-#ifdef GRIO_DEBUG
- printf("xbow_prio_bw_alloc: vhdl %d src_wid %d dest_wid %d req_bw %lld\n",
- (int) vhdl, (int) src_wid, (int) dest_wid, req_bw);
-#endif
-
- ASSERT(XBOW_WIDGET_IS_VALID(src_wid));
- ASSERT(XBOW_WIDGET_IS_VALID(dest_wid));
-
- s = mutex_spinlock(&soft->xbow_bw_alloc_lock);
-
- /* Get pointer to the correct register */
- xreg = XBOW_PRIO_ARBREG_PTR(soft->base, dest_wid, src_wid);
-
- /* Get mask for GBR count value */
- mask = XB_ARB_GBR_MSK << XB_ARB_GBR_SHFT(src_wid);
-
- req_bw_GBR = xbow_bytes_to_gbr(old_alloc_bw, req_bw);
- req_bw_BYTES = (req_bw_GBR < 0) ? (-1 * xbow_gbr_to_bytes(ABS(req_bw_GBR)))
- : xbow_gbr_to_bytes(req_bw_GBR);
-
-#ifdef GRIO_DEBUG
- printf("req_bw %lld req_bw_BYTES %lld req_bw_GBR %d\n",
- req_bw, req_bw_BYTES, req_bw_GBR);
-#endif /* GRIO_DEBUG */
-
- old_bw_BYTES = soft->bw_cur_used[(int) dest_wid - MAX_XBOW_PORTS];
- old_xreg = *xreg;
- old_bw_GBR = (((*xreg) & mask) >> XB_ARB_GBR_SHFT(src_wid));
-
-#ifdef GRIO_DEBUG
- ASSERT(XBOW_BYTES_TO_GBR(old_bw_BYTES) == old_bw_GBR);
-
- printf("old_bw_BYTES %lld old_bw_GBR %d\n", old_bw_BYTES, old_bw_GBR);
-
- printf("req_bw_BYTES %lld old_bw_BYTES %lld soft->bw_hiwm %lld\n",
- req_bw_BYTES, old_bw_BYTES,
- soft->bw_hiwm[(int) dest_wid - MAX_XBOW_PORTS]);
-
-#endif /* GRIO_DEBUG */
-
- /* Accept the request only if we don't exceed the destination
- * port HIWATER_MARK *AND* the max. link GBR arbitration count
- */
- if (((old_bw_BYTES + req_bw_BYTES) <=
- soft->bw_hiwm[(int) dest_wid - MAX_XBOW_PORTS]) &&
- (req_bw_GBR + old_bw_GBR <= XBOW_ARB_GBR_MAX)) {
-
- new_bw_GBR = (old_bw_GBR + req_bw_GBR);
-
- /* Set this in the xbow link register */
- *xreg = (old_xreg & ~mask) | \
- (new_bw_GBR << XB_ARB_GBR_SHFT(src_wid) & mask);
-
- soft->bw_cur_used[(int) dest_wid - MAX_XBOW_PORTS] =
- xbow_gbr_to_bytes(new_bw_GBR);
- } else {
- error = 1;
- }
-
- mutex_spinunlock(&soft->xbow_bw_alloc_lock, s);
-
- return (error);
-}
diff --git a/arch/ia64/sn/io/xswitch.c b/arch/ia64/sn/io/xswitch.c
index 9d0ffa896da129..687a2c9d8d9fec 100644
--- a/arch/ia64/sn/io/xswitch.c
+++ b/arch/ia64/sn/io/xswitch.c
@@ -4,7 +4,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992-1997,2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (c) 1992-1997,2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
@@ -45,9 +45,9 @@ int xswitch_devflag = D_MP;
#define DEV_FUNC(dev,func) xwidget_to_provider_fns(dev)->func
static xswitch_provider_t *
-xwidget_to_provider_fns(devfs_handle_t xconn)
+xwidget_to_provider_fns(vertex_hdl_t xconn)
{
- devfs_handle_t busv;
+ vertex_hdl_t busv;
xswitch_info_t xswitch_info;
xswitch_provider_t provider_fns;
@@ -75,27 +75,18 @@ static char xswitch_info_fingerprint[] = "xswitch_info";
struct xswitch_info_s {
char *fingerprint;
unsigned census;
- devfs_handle_t vhdl[XSWITCH_CENSUS_PORTS];
- devfs_handle_t master_vhdl[XSWITCH_CENSUS_PORTS];
+ vertex_hdl_t vhdl[XSWITCH_CENSUS_PORTS];
+ vertex_hdl_t master_vhdl[XSWITCH_CENSUS_PORTS];
xswitch_provider_t *xswitch_fns;
};
xswitch_info_t
-xswitch_info_get(devfs_handle_t xwidget)
+xswitch_info_get(vertex_hdl_t xwidget)
{
xswitch_info_t xswitch_info;
xswitch_info = (xswitch_info_t)
hwgraph_fastinfo_get(xwidget);
-#ifdef LATER
- if ((xswitch_info != NULL) &&
- (xswitch_info->fingerprint != xswitch_info_fingerprint))
-#ifdef SUPPORT_PRINTING_V_FORMAT
- PRINT_PANIC("%v xswitch_info_get bad fingerprint", xwidget);
-#else
- PRINT_PANIC("%x xswitch_info_get bad fingerprint", xwidget);
-#endif
-#endif /* LATER */
return (xswitch_info);
}
@@ -103,7 +94,7 @@ xswitch_info_get(devfs_handle_t xwidget)
void
xswitch_info_vhdl_set(xswitch_info_t xswitch_info,
xwidgetnum_t port,
- devfs_handle_t xwidget)
+ vertex_hdl_t xwidget)
{
#if XSWITCH_CENSUS_PORT_MIN
if (port < XSWITCH_CENSUS_PORT_MIN)
@@ -115,15 +106,10 @@ xswitch_info_vhdl_set(xswitch_info_t xswitch_info,
xswitch_info->vhdl[port - XSWITCH_CENSUS_PORT_MIN] = xwidget;
}
-devfs_handle_t
+vertex_hdl_t
xswitch_info_vhdl_get(xswitch_info_t xswitch_info,
xwidgetnum_t port)
{
-#ifdef LATER
- if (xswitch_info == NULL)
- PRINT_PANIC("xswitch_info_vhdl_get: null xswitch_info");
-#endif
-
#if XSWITCH_CENSUS_PORT_MIN
if (port < XSWITCH_CENSUS_PORT_MIN)
return GRAPH_VERTEX_NONE;
@@ -142,7 +128,7 @@ xswitch_info_vhdl_get(xswitch_info_t xswitch_info,
void
xswitch_info_master_assignment_set(xswitch_info_t xswitch_info,
xwidgetnum_t port,
- devfs_handle_t master_vhdl)
+ vertex_hdl_t master_vhdl)
{
#if XSWITCH_CENSUS_PORT_MIN
if (port < XSWITCH_CENSUS_PORT_MIN)
@@ -154,7 +140,7 @@ xswitch_info_master_assignment_set(xswitch_info_t xswitch_info,
xswitch_info->master_vhdl[port - XSWITCH_CENSUS_PORT_MIN] = master_vhdl;
}
-devfs_handle_t
+vertex_hdl_t
xswitch_info_master_assignment_get(xswitch_info_t xswitch_info,
xwidgetnum_t port)
{
@@ -169,14 +155,14 @@ xswitch_info_master_assignment_get(xswitch_info_t xswitch_info,
}
void
-xswitch_info_set(devfs_handle_t xwidget, xswitch_info_t xswitch_info)
+xswitch_info_set(vertex_hdl_t xwidget, xswitch_info_t xswitch_info)
{
xswitch_info->fingerprint = xswitch_info_fingerprint;
hwgraph_fastinfo_set(xwidget, (arbitrary_info_t) xswitch_info);
}
xswitch_info_t
-xswitch_info_new(devfs_handle_t xwidget)
+xswitch_info_new(vertex_hdl_t xwidget)
{
xswitch_info_t xswitch_info;
@@ -202,7 +188,7 @@ xswitch_info_new(devfs_handle_t xwidget)
}
void
-xswitch_provider_register(devfs_handle_t busv,
+xswitch_provider_register(vertex_hdl_t busv,
xswitch_provider_t * xswitch_fns)
{
xswitch_info_t xswitch_info = xswitch_info_get(busv);
@@ -232,35 +218,8 @@ xswitch_info_link_ok(xswitch_info_t xswitch_info, xwidgetnum_t port)
}
int
-xswitch_reset_link(devfs_handle_t xconn_vhdl)
+xswitch_reset_link(vertex_hdl_t xconn_vhdl)
{
return DEV_FUNC(xconn_vhdl, reset_link)
(xconn_vhdl);
}
-
-/* Given a vertex handle to the xswitch get its logical
- * id.
- */
-int
-xswitch_id_get(devfs_handle_t xconn_vhdl)
-{
- arbitrary_info_t xbow_num;
- graph_error_t rv;
-
- rv = hwgraph_info_get_LBL(xconn_vhdl,INFO_LBL_XSWITCH_ID,&xbow_num);
- ASSERT(rv == GRAPH_SUCCESS);
- return(xbow_num);
-}
-
-/* Given a vertex handle to the xswitch set its logical
- * id.
- */
-void
-xswitch_id_set(devfs_handle_t xconn_vhdl,int xbow_num)
-{
- graph_error_t rv;
-
- rv = hwgraph_info_add_LBL(xconn_vhdl,INFO_LBL_XSWITCH_ID,
- (arbitrary_info_t)xbow_num);
- ASSERT(rv == GRAPH_SUCCESS);
-}
diff --git a/arch/ia64/sn/io/xtalk.c b/arch/ia64/sn/io/xtalk.c
deleted file mode 100644
index fdb0671500ba13..00000000000000
--- a/arch/ia64/sn/io/xtalk.c
+++ /dev/null
@@ -1,1024 +0,0 @@
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2001 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/driver.h>
-#include <asm/sn/io.h>
-#include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/hcl_util.h>
-#include <asm/sn/xtalk/xtalk.h>
-#include <asm/sn/xtalk/xswitch.h>
-#include <asm/sn/xtalk/xwidget.h>
-#include <asm/sn/xtalk/xtalk_private.h>
-
-/*
- * Implement crosstalk provider operations. The xtalk* layer provides a
- * platform-independent interface for crosstalk devices. This layer
- * switches among the possible implementations of a crosstalk adapter.
- *
- * On platforms with only one possible xtalk provider, macros can be
- * set up at the top that cause the table lookups and indirections to
- * completely disappear.
- */
-
-#define NEW(ptr) (ptr = kmalloc(sizeof (*(ptr)), GFP_KERNEL))
-#define DEL(ptr) (kfree(ptr))
-
-char widget_info_fingerprint[] = "widget_info";
-
-cdl_p xtalk_registry = NULL;
-
-#define DEV_FUNC(dev,func) hub_##func
-#define CAST_PIOMAP(x) ((hub_piomap_t)(x))
-#define CAST_DMAMAP(x) ((hub_dmamap_t)(x))
-#define CAST_INTR(x) ((hub_intr_t)(x))
-
-/* =====================================================================
- * Function Table of Contents
- */
-xtalk_piomap_t xtalk_piomap_alloc(devfs_handle_t, device_desc_t, iopaddr_t, size_t, size_t, unsigned);
-void xtalk_piomap_free(xtalk_piomap_t);
-caddr_t xtalk_piomap_addr(xtalk_piomap_t, iopaddr_t, size_t);
-void xtalk_piomap_done(xtalk_piomap_t);
-caddr_t xtalk_piotrans_addr(devfs_handle_t, device_desc_t, iopaddr_t, size_t, unsigned);
-caddr_t xtalk_pio_addr(devfs_handle_t, device_desc_t, iopaddr_t, size_t, xtalk_piomap_t *, unsigned);
-void xtalk_set_early_piotrans_addr(xtalk_early_piotrans_addr_f *);
-caddr_t xtalk_early_piotrans_addr(xwidget_part_num_t, xwidget_mfg_num_t, int, iopaddr_t, size_t, unsigned);
-static caddr_t null_xtalk_early_piotrans_addr(xwidget_part_num_t, xwidget_mfg_num_t, int, iopaddr_t, size_t, unsigned);
-xtalk_dmamap_t xtalk_dmamap_alloc(devfs_handle_t, device_desc_t, size_t, unsigned);
-void xtalk_dmamap_free(xtalk_dmamap_t);
-iopaddr_t xtalk_dmamap_addr(xtalk_dmamap_t, paddr_t, size_t);
-alenlist_t xtalk_dmamap_list(xtalk_dmamap_t, alenlist_t, unsigned);
-void xtalk_dmamap_done(xtalk_dmamap_t);
-iopaddr_t xtalk_dmatrans_addr(devfs_handle_t, device_desc_t, paddr_t, size_t, unsigned);
-alenlist_t xtalk_dmatrans_list(devfs_handle_t, device_desc_t, alenlist_t, unsigned);
-void xtalk_dmamap_drain(xtalk_dmamap_t);
-void xtalk_dmaaddr_drain(devfs_handle_t, iopaddr_t, size_t);
-void xtalk_dmalist_drain(devfs_handle_t, alenlist_t);
-xtalk_intr_t xtalk_intr_alloc(devfs_handle_t, device_desc_t, devfs_handle_t);
-xtalk_intr_t xtalk_intr_alloc_nothd(devfs_handle_t, device_desc_t, devfs_handle_t);
-void xtalk_intr_free(xtalk_intr_t);
-int xtalk_intr_connect(xtalk_intr_t, xtalk_intr_setfunc_t, void *);
-void xtalk_intr_disconnect(xtalk_intr_t);
-devfs_handle_t xtalk_intr_cpu_get(xtalk_intr_t);
-int xtalk_error_handler(devfs_handle_t, int, ioerror_mode_t, ioerror_t *);
-int xtalk_error_devenable(devfs_handle_t, int, int);
-void xtalk_provider_startup(devfs_handle_t);
-void xtalk_provider_shutdown(devfs_handle_t);
-devfs_handle_t xtalk_intr_dev_get(xtalk_intr_t);
-xwidgetnum_t xtalk_intr_target_get(xtalk_intr_t);
-xtalk_intr_vector_t xtalk_intr_vector_get(xtalk_intr_t);
-iopaddr_t xtalk_intr_addr_get(struct xtalk_intr_s *);
-void *xtalk_intr_sfarg_get(xtalk_intr_t);
-devfs_handle_t xtalk_pio_dev_get(xtalk_piomap_t);
-xwidgetnum_t xtalk_pio_target_get(xtalk_piomap_t);
-iopaddr_t xtalk_pio_xtalk_addr_get(xtalk_piomap_t);
-ulong xtalk_pio_mapsz_get(xtalk_piomap_t);
-caddr_t xtalk_pio_kvaddr_get(xtalk_piomap_t);
-devfs_handle_t xtalk_dma_dev_get(xtalk_dmamap_t);
-xwidgetnum_t xtalk_dma_target_get(xtalk_dmamap_t);
-xwidget_info_t xwidget_info_chk(devfs_handle_t);
-xwidget_info_t xwidget_info_get(devfs_handle_t);
-void xwidget_info_set(devfs_handle_t, xwidget_info_t);
-devfs_handle_t xwidget_info_dev_get(xwidget_info_t);
-xwidgetnum_t xwidget_info_id_get(xwidget_info_t);
-devfs_handle_t xwidget_info_master_get(xwidget_info_t);
-xwidgetnum_t xwidget_info_masterid_get(xwidget_info_t);
-xwidget_part_num_t xwidget_info_part_num_get(xwidget_info_t);
-xwidget_mfg_num_t xwidget_info_mfg_num_get(xwidget_info_t);
-char *xwidget_info_name_get(xwidget_info_t);
-void xtalk_init(void);
-void xtalk_provider_register(devfs_handle_t, xtalk_provider_t *);
-void xtalk_provider_unregister(devfs_handle_t);
-xtalk_provider_t *xtalk_provider_fns_get(devfs_handle_t);
-int xwidget_driver_register(xwidget_part_num_t,
- xwidget_mfg_num_t,
- char *, unsigned);
-void xwidget_driver_unregister(char *);
-int xwidget_register(xwidget_hwid_t, devfs_handle_t,
- xwidgetnum_t, devfs_handle_t,
- xwidgetnum_t, async_attach_t);
-int xwidget_unregister(devfs_handle_t);
-void xwidget_reset(devfs_handle_t);
-char *xwidget_name_get(devfs_handle_t);
-#if !defined(DEV_FUNC)
-/*
- * There is more than one possible provider
- * for this platform. We need to examine the
- * master vertex of the current vertex for
- * a provider function structure, and indirect
- * through the appropriately named member.
- */
-#define DEV_FUNC(dev,func) xwidget_to_provider_fns(dev)->func
-#define CAST_PIOMAP(x) ((xtalk_piomap_t)(x))
-#define CAST_DMAMAP(x) ((xtalk_dmamap_t)(x))
-#define CAST_INTR(x) ((xtalk_intr_t)(x))
-
-static xtalk_provider_t *
-xwidget_to_provider_fns(devfs_handle_t xconn)
-{
- xwidget_info_t widget_info;
- xtalk_provider_t *provider_fns;
-
- widget_info = xwidget_info_get(xconn);
- ASSERT(widget_info != NULL);
-
- provider_fns = xwidget_info_pops_get(widget_info);
- ASSERT(provider_fns != NULL);
-
- return (provider_fns);
-}
-#endif
-
-/*
- * Many functions are not passed their vertex
- * information directly; rather, they must
- * dive through a resource map. These macros
- * are available to coordinate this detail.
- */
-#define PIOMAP_FUNC(map,func) DEV_FUNC(map->xp_dev,func)
-#define DMAMAP_FUNC(map,func) DEV_FUNC(map->xd_dev,func)
-#define INTR_FUNC(intr,func) DEV_FUNC(intr_hdl->xi_dev,func)
-
-/* =====================================================================
- * PIO MANAGEMENT
- *
- * For mapping system virtual address space to
- * xtalk space on a specified widget
- */
-
-xtalk_piomap_t
-xtalk_piomap_alloc(devfs_handle_t dev, /* set up mapping for this device */
- device_desc_t dev_desc, /* device descriptor */
- iopaddr_t xtalk_addr, /* map for this xtalk_addr range */
- size_t byte_count,
- size_t byte_count_max, /* maximum size of a mapping */
- unsigned flags)
-{ /* defined in sys/pio.h */
- return (xtalk_piomap_t) DEV_FUNC(dev, piomap_alloc)
- (dev, dev_desc, xtalk_addr, byte_count, byte_count_max, flags);
-}
-
-
-void
-xtalk_piomap_free(xtalk_piomap_t xtalk_piomap)
-{
- PIOMAP_FUNC(xtalk_piomap, piomap_free)
- (CAST_PIOMAP(xtalk_piomap));
-}
-
-
-caddr_t
-xtalk_piomap_addr(xtalk_piomap_t xtalk_piomap, /* mapping resources */
- iopaddr_t xtalk_addr, /* map for this xtalk address */
- size_t byte_count)
-{ /* map this many bytes */
- return PIOMAP_FUNC(xtalk_piomap, piomap_addr)
- (CAST_PIOMAP(xtalk_piomap), xtalk_addr, byte_count);
-}
-
-
-void
-xtalk_piomap_done(xtalk_piomap_t xtalk_piomap)
-{
- PIOMAP_FUNC(xtalk_piomap, piomap_done)
- (CAST_PIOMAP(xtalk_piomap));
-}
-
-
-caddr_t
-xtalk_piotrans_addr(devfs_handle_t dev, /* translate for this device */
- device_desc_t dev_desc, /* device descriptor */
- iopaddr_t xtalk_addr, /* Crosstalk address */
- size_t byte_count, /* map this many bytes */
- unsigned flags)
-{ /* (currently unused) */
- return DEV_FUNC(dev, piotrans_addr)
- (dev, dev_desc, xtalk_addr, byte_count, flags);
-}
-
-caddr_t
-xtalk_pio_addr(devfs_handle_t dev, /* translate for this device */
- device_desc_t dev_desc, /* device descriptor */
- iopaddr_t addr, /* starting address (or offset in window) */
- size_t byte_count, /* map this many bytes */
- xtalk_piomap_t *mapp, /* where to return the map pointer */
- unsigned flags)
-{ /* PIO flags */
- xtalk_piomap_t map = 0;
- caddr_t res;
-
- if (mapp)
- *mapp = 0; /* record "no map used" */
-
- res = xtalk_piotrans_addr
- (dev, dev_desc, addr, byte_count, flags);
- if (res)
- return res; /* xtalk_piotrans worked */
-
- map = xtalk_piomap_alloc
- (dev, dev_desc, addr, byte_count, byte_count, flags);
- if (!map)
- return res; /* xtalk_piomap_alloc failed */
-
- res = xtalk_piomap_addr
- (map, addr, byte_count);
- if (!res) {
- xtalk_piomap_free(map);
- return res; /* xtalk_piomap_addr failed */
- }
- if (mapp)
- *mapp = map; /* pass back map used */
-
- return res; /* xtalk_piomap_addr succeeded */
-}
-
-/* =====================================================================
- * EARLY PIOTRANS SUPPORT
- *
- * There are places where drivers (mgras, for instance)
- * need to get PIO translations before the infrastructure
- * is extended to them (setting up textports, for
- * instance). These drivers should call
- * xtalk_early_piotrans_addr with their xtalk ID
- * information, a sequence number (so we can use the second
- * mgras for instance), and the usual piotrans parameters.
- *
- * Machine specific code should provide an implementation
- * of early_piotrans_addr, and present a pointer to this
- * function to xtalk_set_early_piotrans_addr so it can be
- * used by clients without the clients having to know what
- * platform or what xtalk provider is in use.
- */
-
-static xtalk_early_piotrans_addr_f null_xtalk_early_piotrans_addr;
-
-xtalk_early_piotrans_addr_f *impl_early_piotrans_addr = null_xtalk_early_piotrans_addr;
-
-/* xtalk_set_early_piotrans_addr:
- * specify the early_piotrans_addr implementation function.
- */
-void
-xtalk_set_early_piotrans_addr(xtalk_early_piotrans_addr_f *impl)
-{
- impl_early_piotrans_addr = impl;
-}
-
-/* xtalk_early_piotrans_addr:
- * figure out a PIO address for the "nth" crosstalk widget that
- * matches the specified part and mfgr number. Returns NULL if
- * there is no such widget, or if the requested mapping can not
- * be constructed.
- * Limitations on which crosstalk slots (and busses) are
- * checked, and definitions of the ordering of the search across
- * the crosstalk slots, are defined by the platform.
- */
-caddr_t
-xtalk_early_piotrans_addr(xwidget_part_num_t part_num,
- xwidget_mfg_num_t mfg_num,
- int which,
- iopaddr_t xtalk_addr,
- size_t byte_count,
- unsigned flags)
-{
- return impl_early_piotrans_addr
- (part_num, mfg_num, which, xtalk_addr, byte_count, flags);
-}
-
-/* null_xtalk_early_piotrans_addr:
- * used as the early_piotrans_addr implementation until and
- * unless a real implementation is provided. In DEBUG kernels,
- * we want to know who is calling before the implementation is
- * registered; in non-DEBUG kernels, return NULL representing
- * lack of mapping support.
- */
-/*ARGSUSED */
-static caddr_t
-null_xtalk_early_piotrans_addr(xwidget_part_num_t part_num,
- xwidget_mfg_num_t mfg_num,
- int which,
- iopaddr_t xtalk_addr,
- size_t byte_count,
- unsigned flags)
-{
-#if DEBUG
- PRINT_PANIC("null_xtalk_early_piotrans_addr");
-#endif
- return NULL;
-}
-
-/* =====================================================================
- * DMA MANAGEMENT
- *
- * For mapping from crosstalk space to system
- * physical space.
- */
-
-xtalk_dmamap_t
-xtalk_dmamap_alloc(devfs_handle_t dev, /* set up mappings for this device */
- device_desc_t dev_desc, /* device descriptor */
- size_t byte_count_max, /* max size of a mapping */
- unsigned flags)
-{ /* defined in dma.h */
- return (xtalk_dmamap_t) DEV_FUNC(dev, dmamap_alloc)
- (dev, dev_desc, byte_count_max, flags);
-}
-
-
-void
-xtalk_dmamap_free(xtalk_dmamap_t xtalk_dmamap)
-{
- DMAMAP_FUNC(xtalk_dmamap, dmamap_free)
- (CAST_DMAMAP(xtalk_dmamap));
-}
-
-
-iopaddr_t
-xtalk_dmamap_addr(xtalk_dmamap_t xtalk_dmamap, /* use these mapping resources */
- paddr_t paddr, /* map for this address */
- size_t byte_count)
-{ /* map this many bytes */
- return DMAMAP_FUNC(xtalk_dmamap, dmamap_addr)
- (CAST_DMAMAP(xtalk_dmamap), paddr, byte_count);
-}
-
-
-alenlist_t
-xtalk_dmamap_list(xtalk_dmamap_t xtalk_dmamap, /* use these mapping resources */
- alenlist_t alenlist, /* map this Address/Length List */
- unsigned flags)
-{
- return DMAMAP_FUNC(xtalk_dmamap, dmamap_list)
- (CAST_DMAMAP(xtalk_dmamap), alenlist, flags);
-}
-
-
-void
-xtalk_dmamap_done(xtalk_dmamap_t xtalk_dmamap)
-{
- DMAMAP_FUNC(xtalk_dmamap, dmamap_done)
- (CAST_DMAMAP(xtalk_dmamap));
-}
-
-
-iopaddr_t
-xtalk_dmatrans_addr(devfs_handle_t dev, /* translate for this device */
- device_desc_t dev_desc, /* device descriptor */
- paddr_t paddr, /* system physical address */
- size_t byte_count, /* length */
- unsigned flags)
-{ /* defined in dma.h */
- return DEV_FUNC(dev, dmatrans_addr)
- (dev, dev_desc, paddr, byte_count, flags);
-}
-
-
-alenlist_t
-xtalk_dmatrans_list(devfs_handle_t dev, /* translate for this device */
- device_desc_t dev_desc, /* device descriptor */
- alenlist_t palenlist, /* system address/length list */
- unsigned flags)
-{ /* defined in dma.h */
- return DEV_FUNC(dev, dmatrans_list)
- (dev, dev_desc, palenlist, flags);
-}
-
-void
-xtalk_dmamap_drain(xtalk_dmamap_t map)
-{
- DMAMAP_FUNC(map, dmamap_drain)
- (CAST_DMAMAP(map));
-}
-
-void
-xtalk_dmaaddr_drain(devfs_handle_t dev, paddr_t addr, size_t size)
-{
- DEV_FUNC(dev, dmaaddr_drain)
- (dev, addr, size);
-}
-
-void
-xtalk_dmalist_drain(devfs_handle_t dev, alenlist_t list)
-{
- DEV_FUNC(dev, dmalist_drain)
- (dev, list);
-}
-
-/* =====================================================================
- * INTERRUPT MANAGEMENT
- *
- * Allow crosstalk devices to establish interrupts
- */
-
-/*
- * Allocate resources required for an interrupt as specified in intr_desc.
- * Return resource handle in intr_hdl.
- */
-xtalk_intr_t
-xtalk_intr_alloc(devfs_handle_t dev, /* which Crosstalk device */
- device_desc_t dev_desc, /* device descriptor */
- devfs_handle_t owner_dev)
-{ /* owner of this interrupt */
- return (xtalk_intr_t) DEV_FUNC(dev, intr_alloc)
- (dev, dev_desc, owner_dev);
-}
-
-/*
- * Allocate resources required for an interrupt as specified in dev_desc.
- * Unconditionally setup resources to be non-threaded.
- * Return resource handle in intr_hdl.
- */
-xtalk_intr_t
-xtalk_intr_alloc_nothd(devfs_handle_t dev, /* which Crosstalk device */
- device_desc_t dev_desc, /* device descriptor */
- devfs_handle_t owner_dev) /* owner of this interrupt */
-{
- return (xtalk_intr_t) DEV_FUNC(dev, intr_alloc_nothd)
- (dev, dev_desc, owner_dev);
-}
-
-/*
- * Free resources consumed by intr_alloc.
- */
-void
-xtalk_intr_free(xtalk_intr_t intr_hdl)
-{
- INTR_FUNC(intr_hdl, intr_free)
- (CAST_INTR(intr_hdl));
-}
-
-
-/*
- * Associate resources allocated with a previous xtalk_intr_alloc call with the
- * described handler, arg, name, etc.
- *
- * Returns 0 on success, returns <0 on failure.
- */
-int
-xtalk_intr_connect(xtalk_intr_t intr_hdl, /* xtalk intr resource handle */
- xtalk_intr_setfunc_t setfunc, /* func to set intr hw */
- void *setfunc_arg) /* arg to setfunc */
-{
- return INTR_FUNC(intr_hdl, intr_connect)
- (CAST_INTR(intr_hdl), setfunc, setfunc_arg);
-}
-
-
-/*
- * Disassociate handler with the specified interrupt.
- */
-void
-xtalk_intr_disconnect(xtalk_intr_t intr_hdl)
-{
- INTR_FUNC(intr_hdl, intr_disconnect)
- (CAST_INTR(intr_hdl));
-}
-
-
-/*
- * Return a hwgraph vertex that represents the CPU currently
- * targeted by an interrupt.
- */
-devfs_handle_t
-xtalk_intr_cpu_get(xtalk_intr_t intr_hdl)
-{
- return INTR_FUNC(intr_hdl, intr_cpu_get)
- (CAST_INTR(intr_hdl));
-}
-
-
-/* =====================================================================
- * CONFIGURATION MANAGEMENT
- */
-
-/*
- * Startup a crosstalk provider
- */
-void
-xtalk_provider_startup(devfs_handle_t xtalk_provider)
-{
- DEV_FUNC(xtalk_provider, provider_startup)
- (xtalk_provider);
-}
-
-
-/*
- * Shutdown a crosstalk provider
- */
-void
-xtalk_provider_shutdown(devfs_handle_t xtalk_provider)
-{
- DEV_FUNC(xtalk_provider, provider_shutdown)
- (xtalk_provider);
-}
-
-/*
- * Enable a device on a xtalk widget
- */
-void
-xtalk_widgetdev_enable(devfs_handle_t xconn_vhdl, int devnum)
-{
- DEV_FUNC(xconn_vhdl, widgetdev_enable) (xconn_vhdl, devnum);
-}
-
-/*
- * Shutdown a device on a xtalk widget
- */
-void
-xtalk_widgetdev_shutdown(devfs_handle_t xconn_vhdl, int devnum)
-{
- DEV_FUNC(xconn_vhdl, widgetdev_shutdown) (xconn_vhdl, devnum);
-}
-
-int
-xtalk_dma_enabled(devfs_handle_t xconn_vhdl)
-{
- return DEV_FUNC(xconn_vhdl, dma_enabled) (xconn_vhdl);
-}
-/*
- * Generic crosstalk functions, for use with all crosstalk providers
- * and all crosstalk devices.
- */
-
-/****** Generic crosstalk interrupt interfaces ******/
-devfs_handle_t
-xtalk_intr_dev_get(xtalk_intr_t xtalk_intr)
-{
- return (xtalk_intr->xi_dev);
-}
-
-xwidgetnum_t
-xtalk_intr_target_get(xtalk_intr_t xtalk_intr)
-{
- return (xtalk_intr->xi_target);
-}
-
-xtalk_intr_vector_t
-xtalk_intr_vector_get(xtalk_intr_t xtalk_intr)
-{
- return (xtalk_intr->xi_vector);
-}
-
-iopaddr_t
-xtalk_intr_addr_get(struct xtalk_intr_s *xtalk_intr)
-{
- return (xtalk_intr->xi_addr);
-}
-
-void *
-xtalk_intr_sfarg_get(xtalk_intr_t xtalk_intr)
-{
- return (xtalk_intr->xi_sfarg);
-}
-
-/****** Generic crosstalk pio interfaces ******/
-devfs_handle_t
-xtalk_pio_dev_get(xtalk_piomap_t xtalk_piomap)
-{
- return (xtalk_piomap->xp_dev);
-}
-
-xwidgetnum_t
-xtalk_pio_target_get(xtalk_piomap_t xtalk_piomap)
-{
- return (xtalk_piomap->xp_target);
-}
-
-iopaddr_t
-xtalk_pio_xtalk_addr_get(xtalk_piomap_t xtalk_piomap)
-{
- return (xtalk_piomap->xp_xtalk_addr);
-}
-
-ulong
-xtalk_pio_mapsz_get(xtalk_piomap_t xtalk_piomap)
-{
- return (xtalk_piomap->xp_mapsz);
-}
-
-caddr_t
-xtalk_pio_kvaddr_get(xtalk_piomap_t xtalk_piomap)
-{
- return (xtalk_piomap->xp_kvaddr);
-}
-
-
-/****** Generic crosstalk dma interfaces ******/
-devfs_handle_t
-xtalk_dma_dev_get(xtalk_dmamap_t xtalk_dmamap)
-{
- return (xtalk_dmamap->xd_dev);
-}
-
-xwidgetnum_t
-xtalk_dma_target_get(xtalk_dmamap_t xtalk_dmamap)
-{
- return (xtalk_dmamap->xd_target);
-}
-
-
-/****** Generic crosstalk widget information interfaces ******/
-
-/* xwidget_info_chk:
- * check to see if this vertex is a widget;
- * if so, return its widget_info (if any).
- * if not, return NULL.
- */
-xwidget_info_t
-xwidget_info_chk(devfs_handle_t xwidget)
-{
- arbitrary_info_t ainfo = 0;
-
- hwgraph_info_get_LBL(xwidget, INFO_LBL_XWIDGET, &ainfo);
- return (xwidget_info_t) ainfo;
-}
-
-
-xwidget_info_t
-xwidget_info_get(devfs_handle_t xwidget)
-{
- xwidget_info_t widget_info;
-
- widget_info = (xwidget_info_t)
- hwgraph_fastinfo_get(xwidget);
-
-#ifdef LATER
- if ((widget_info != NULL) &&
- (widget_info->w_fingerprint != widget_info_fingerprint))
-#ifdef SUPPORT_PRINTING_V_FORMAT
- PRINT_PANIC("%v bad xwidget_info", xwidget);
-#else
- PRINT_PANIC("%x bad xwidget_info", xwidget);
-#endif
-#endif /* LATER */
-
- return (widget_info);
-}
-
-void
-xwidget_info_set(devfs_handle_t xwidget, xwidget_info_t widget_info)
-{
- if (widget_info != NULL)
- widget_info->w_fingerprint = widget_info_fingerprint;
-
- hwgraph_fastinfo_set(xwidget, (arbitrary_info_t) widget_info);
-
- /* Also, mark this vertex as an xwidget,
- * and use the widget_info, so xwidget_info_chk
- * can work (and be fairly efficient).
- */
- hwgraph_info_add_LBL(xwidget, INFO_LBL_XWIDGET,
- (arbitrary_info_t) widget_info);
-}
-
-devfs_handle_t
-xwidget_info_dev_get(xwidget_info_t xwidget_info)
-{
- if (xwidget_info == NULL)
- panic("null xwidget_info");
- return (xwidget_info->w_vertex);
-}
-
-xwidgetnum_t
-xwidget_info_id_get(xwidget_info_t xwidget_info)
-{
- if (xwidget_info == NULL)
- panic("null xwidget_info");
- return (xwidget_info->w_id);
-}
-
-
-devfs_handle_t
-xwidget_info_master_get(xwidget_info_t xwidget_info)
-{
- if (xwidget_info == NULL)
- panic("null xwidget_info");
- return (xwidget_info->w_master);
-}
-
-xwidgetnum_t
-xwidget_info_masterid_get(xwidget_info_t xwidget_info)
-{
- if (xwidget_info == NULL)
- panic("null xwidget_info");
- return (xwidget_info->w_masterid);
-}
-
-xwidget_part_num_t
-xwidget_info_part_num_get(xwidget_info_t xwidget_info)
-{
- if (xwidget_info == NULL)
- panic("null xwidget_info");
- return (xwidget_info->w_hwid.part_num);
-}
-
-xwidget_mfg_num_t
-xwidget_info_mfg_num_get(xwidget_info_t xwidget_info)
-{
- if (xwidget_info == NULL)
- panic("null xwidget_info");
- return (xwidget_info->w_hwid.mfg_num);
-}
-/* Extract the widget name from the widget information
- * for the xtalk widget.
- */
-char *
-xwidget_info_name_get(xwidget_info_t xwidget_info)
-{
- if (xwidget_info == NULL)
- panic("null xwidget info");
- return(xwidget_info->w_name);
-}
-/****** Generic crosstalk initialization interfaces ******/
-
-/*
- * One-time initialization needed for systems that support crosstalk.
- */
-void
-xtalk_init(void)
-{
- cdl_p cp;
-
-#if DEBUG && ATTACH_DEBUG
- printf("xtalk_init\n");
-#endif
- /* Allocate the registry.
- * We might already have one.
- * If we don't, go get one.
- * MPness: someone might have
- * set one up for us while we
- * were not looking; use an atomic
- * compare-and-swap to commit to
- * using the new registry if and
- * only if nobody else did first.
- * If someone did get there first,
- * toss the one we allocated back
- * into the pool.
- */
- if (xtalk_registry == NULL) {
- cp = cdl_new(EDGE_LBL_XIO, "part", "mfgr");
- if (!compare_and_swap_ptr((void **) &xtalk_registry, NULL, (void *) cp)) {
- cdl_del(cp);
- }
- }
- ASSERT(xtalk_registry != NULL);
-}
-
-/*
- * Associate a set of xtalk_provider functions with a vertex.
- */
-void
-xtalk_provider_register(devfs_handle_t provider, xtalk_provider_t *xtalk_fns)
-{
- hwgraph_fastinfo_set(provider, (arbitrary_info_t) xtalk_fns);
-}
-
-/*
- * Disassociate a set of xtalk_provider functions with a vertex.
- */
-void
-xtalk_provider_unregister(devfs_handle_t provider)
-{
- hwgraph_fastinfo_set(provider, (arbitrary_info_t)NULL);
-}
-
-/*
- * Obtain a pointer to the xtalk_provider functions for a specified Crosstalk
- * provider.
- */
-xtalk_provider_t *
-xtalk_provider_fns_get(devfs_handle_t provider)
-{
- return ((xtalk_provider_t *) hwgraph_fastinfo_get(provider));
-}
-
-/*
- * Announce a driver for a particular crosstalk part.
- * Returns 0 on success or -1 on failure. Failure occurs if the
- * specified hardware already has a driver.
- */
-/*ARGSUSED4 */
-int
-xwidget_driver_register(xwidget_part_num_t part_num,
- xwidget_mfg_num_t mfg_num,
- char *driver_prefix,
- unsigned flags)
-{
- /* a driver's init routine could call
- * xwidget_driver_register before the
- * system calls xtalk_init; so, we
- * make the call here.
- */
- if (xtalk_registry == NULL)
- xtalk_init();
-
- return cdl_add_driver(xtalk_registry,
- part_num, mfg_num,
- driver_prefix, flags, NULL);
-}
-
-/*
- * Inform xtalk infrastructure that a driver is no longer available for
- * handling any widgets.
- */
-void
-xwidget_driver_unregister(char *driver_prefix)
-{
- /* before a driver calls unregister,
- * it must have called registger; so we
- * can assume we have a registry here.
- */
- ASSERT(xtalk_registry != NULL);
-
- cdl_del_driver(xtalk_registry, driver_prefix, NULL);
-}
-
-/*
- * Call some function with each vertex that
- * might be one of this driver's attach points.
- */
-void
-xtalk_iterate(char *driver_prefix,
- xtalk_iter_f *func)
-{
- ASSERT(xtalk_registry != NULL);
-
- cdl_iterate(xtalk_registry, driver_prefix, (cdl_iter_f *)func);
-}
-
-/*
- * xwidget_register:
- * Register a xtalk device (xwidget) by doing the following.
- * -allocate and initialize xwidget_info data
- * -allocate a hwgraph vertex with name based on widget number (id)
- * -look up the widget's initialization function and call it,
- * or remember the vertex for later initialization.
- *
- */
-int
-xwidget_register(xwidget_hwid_t hwid, /* widget's hardware ID */
- devfs_handle_t widget, /* widget to initialize */
- xwidgetnum_t id, /* widget's target id (0..f) */
- devfs_handle_t master, /* widget's master vertex */
- xwidgetnum_t targetid, /* master's target id (9/a) */
- async_attach_t aa)
-{
- xwidget_info_t widget_info;
- char *s,devnm[MAXDEVNAME];
-
- /* Allocate widget_info and associate it with widget vertex */
- NEW(widget_info);
-
- /* Initialize widget_info */
- widget_info->w_vertex = widget;
- widget_info->w_id = id;
- widget_info->w_master = master;
- widget_info->w_masterid = targetid;
- widget_info->w_hwid = *hwid; /* structure copy */
- widget_info->w_efunc = 0;
- widget_info->w_einfo = 0;
- /*
- * get the name of this xwidget vertex and keep the info.
- * This is needed during errors and interrupts, but as
- * long as we have it, we can use it elsewhere.
- */
- s = dev_to_name(widget,devnm,MAXDEVNAME);
- widget_info->w_name = kmalloc(strlen(s) + 1, GFP_KERNEL);
- strcpy(widget_info->w_name,s);
-
- xwidget_info_set(widget, widget_info);
-
- device_master_set(widget, master);
-
- /* All the driver init routines (including
- * xtalk_init) are called before we get into
- * attaching devices, so we can assume we
- * have a registry here.
- */
- ASSERT(xtalk_registry != NULL);
-
- /*
- * Add pointer to async attach info -- tear down will be done when
- * the particular descendant is done with the info.
- */
- if (aa)
- async_attach_add_info(widget, aa);
-
- return cdl_add_connpt(xtalk_registry, hwid->part_num, hwid->mfg_num,
- widget, 0);
-}
-
-/*
- * xwidget_unregister :
- * Unregister the xtalk device and detach all its hwgraph namespace.
- */
-int
-xwidget_unregister(devfs_handle_t widget)
-{
- xwidget_info_t widget_info;
- xwidget_hwid_t hwid;
-
- /* Make sure that we have valid widget information initialized */
- if (!(widget_info = xwidget_info_get(widget)))
- return(1);
-
- /* Remove the inventory information associated
- * with the widget.
- */
- hwgraph_inventory_remove(widget, -1, -1, -1, -1, -1);
-
- hwid = &(widget_info->w_hwid);
-
- cdl_del_connpt(xtalk_registry, hwid->part_num, hwid->mfg_num,
- widget, 0);
-
- /* Clean out the xwidget information */
- (void)kfree(widget_info->w_name);
- BZERO((void *)widget_info, sizeof(widget_info));
- DEL(widget_info);
-
- return(0);
-}
-
-/*
- * Issue a link reset to a widget.
- */
-void
-xwidget_reset(devfs_handle_t xwidget)
-{
- xswitch_reset_link(xwidget);
-
-}
-
-
-void
-xwidget_gfx_reset(devfs_handle_t xwidget)
-{
- xwidget_info_t info;
-
- xswitch_reset_link(xwidget);
- info = xwidget_info_get(xwidget);
-#ifdef LATER
- ASSERT_ALWAYS(info != NULL);
-#endif
-
- /*
- * Enable this for other architectures once we add widget_reset to the
- * xtalk provider interface.
- */
- DEV_FUNC(xtalk_provider, widget_reset)
- (xwidget_info_master_get(info), xwidget_info_id_get(info));
-}
-
-#define ANON_XWIDGET_NAME "No Name" /* Default Widget Name */
-
-/* Get the canonical hwgraph name of xtalk widget */
-char *
-xwidget_name_get(devfs_handle_t xwidget_vhdl)
-{
- xwidget_info_t info;
-
- /* If we have a bogus widget handle then return
- * a default anonymous widget name.
- */
- if (xwidget_vhdl == GRAPH_VERTEX_NONE)
- return(ANON_XWIDGET_NAME);
- /* Read the widget name stored in the widget info
- * for the widget setup during widget initialization.
- */
- info = xwidget_info_get(xwidget_vhdl);
- ASSERT(info != NULL);
- return(xwidget_info_name_get(info));
-}
-
-/*
- * xtalk_device_shutdown
- * Disable the specified xtalk widget and clean out all the software
- * state associated with it.
- */
-int
-xtalk_device_shutdown(devfs_handle_t xbus_vhdl, xwidgetnum_t widget)
-{
- devfs_handle_t widget_vhdl;
- char edge_name[8];
-
- sprintf(edge_name, "%d", widget);
- if (hwgraph_traverse(xbus_vhdl, edge_name, &widget_vhdl)
- != GRAPH_SUCCESS)
- return(1);
-
- xwidget_unregister(widget_vhdl);
-
- return(0);
-}