blob: f9aa614ddb17086c0aa2dd446f6ee430ee39d2ed (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
|
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_NODEPDA_H
#define _ASM_IA64_SN_NODEPDA_H
#include <linux/config.h>
#include <asm/irq.h>
#include <asm/sn/intr.h>
#include <asm/sn/router.h>
#if defined(CONFIG_IA64_SGI_SN1)
#include <asm/sn/sn1/synergy.h>
#endif
#include <asm/sn/pda.h>
#include <asm/sn/module.h>
#include <asm/sn/bte.h>
#if defined(CONFIG_IA64_SGI_SN1)
#include <asm/sn/sn1/hubstat.h>
#endif
/*
* NUMA Node-Specific Data structures are defined in this file.
* In particular, this is the location of the node PDA.
* A pointer to the right node PDA is saved in each CPU PDA.
*/
/*
* Subnode PDA structures. Each node needs a few data structures that
* correspond to the PIs on the HUB chip that supports the node.
*/
#if defined(CONFIG_IA64_SGI_SN1)
struct subnodepda_s {
intr_vecblk_t intr_dispatch0;
intr_vecblk_t intr_dispatch1;
};
typedef struct subnodepda_s subnode_pda_t;
struct synergy_perf_s;
#endif
/*
* Node-specific data structure.
*
* One of these structures is allocated on each node of a NUMA system.
*
* This structure provides a convenient way of keeping together
* all per-node data structures.
*/
struct nodepda_s {
cpuid_t node_first_cpu; /* Starting cpu number for node */
/* WARNING: no guarantee that */
/* the second cpu on a node is */
/* node_first_cpu+1. */
devfs_handle_t xbow_vhdl;
nasid_t xbow_peer; /* NASID of our peer hub on xbow */
struct semaphore xbow_sema; /* Sema for xbow synchronization */
slotid_t slotdesc;
#ifdef CONFIG_IA64_SGI_SN2
geoid_t geoid;
#else
moduleid_t module_id; /* Module ID (redundant local copy) */
#endif
module_t *module; /* Pointer to containing module */
xwidgetnum_t basew_id;
devfs_handle_t basew_xc;
int hubticks;
int num_routers; /* XXX not setup! Total routers in the system */
char *hwg_node_name; /* hwgraph node name */
devfs_handle_t node_vertex; /* Hwgraph vertex for this node */
void *pdinfo; /* Platform-dependent per-node info */
nodepda_router_info_t *npda_rip_first;
nodepda_router_info_t **npda_rip_last;
/*
* The BTEs on this node are shared by the local cpus
*/
bteinfo_t bte_if[BTES_PER_NODE]; /* Virtual Interface */
char bte_cleanup[5 * L1_CACHE_BYTES] ____cacheline_aligned;
#if defined(CONFIG_IA64_SGI_SN1)
subnode_pda_t snpda[NUM_SUBNODES];
/*
* New extended memory reference counters
*/
void *migr_refcnt_counterbase;
void *migr_refcnt_counterbuffer;
size_t migr_refcnt_cbsize;
int migr_refcnt_numsets;
hubstat_t hubstats;
int synergy_perf_enabled;
int synergy_perf_freq;
spinlock_t synergy_perf_lock;
uint64_t synergy_inactive_intervals;
uint64_t synergy_active_intervals;
struct synergy_perf_s *synergy_perf_data;
struct synergy_perf_s *synergy_perf_first; /* reporting consistency .. */
#endif /* CONFIG_IA64_SGI_SN1 */
/*
* Array of pointers to the nodepdas for each node.
*/
struct nodepda_s *pernode_pdaindr[MAX_COMPACT_NODES];
};
typedef struct nodepda_s nodepda_t;
#ifdef CONFIG_IA64_SGI_SN2
#define NR_IVECS 256
struct irqpda_s {
int num_irq_used;
char irq_flags[NR_IVECS];
};
typedef struct irqpda_s irqpda_t;
#endif /* CONFIG_IA64_SGI_SN2 */
/*
* Access Functions for node PDA.
* Since there is one nodepda for each node, we need a convenient mechanism
* to access these nodepdas without cluttering code with #ifdefs.
* The next set of definitions provides this.
* Routines are expected to use
*
* nodepda -> to access node PDA for the node on which code is running
* subnodepda -> to access subnode PDA for the subnode on which code is running
*
* NODEPDA(cnode) -> to access node PDA for cnodeid
* SUBNODEPDA(cnode,sn) -> to access subnode PDA for cnodeid/subnode
*/
#define nodepda pda->p_nodepda /* Ptr to this node's PDA */
#define NODEPDA(cnode) (nodepda->pernode_pdaindr[cnode])
#if defined(CONFIG_IA64_SGI_SN1)
#define subnodepda pda.p_subnodepda /* Ptr to this node's subnode PDA */
#define SUBNODEPDA(cnode,sn) (&(NODEPDA(cnode)->snpda[sn]))
#define SNPDA(npda,sn) (&(npda)->snpda[sn])
#endif
/*
* Macros to access data structures inside nodepda
*/
#ifdef CONFIG_IA64_SGI_SN2
#define NODE_MODULEID(cnode) geo_module((NODEPDA(cnode)->geoid))
#else
#define NODE_MODULEID(cnode) (NODEPDA(cnode)->module_id)
#endif
#define NODE_SLOTID(cnode) (NODEPDA(cnode)->slotdesc)
/*
* Quickly convert a compact node ID into a hwgraph vertex
*/
#define cnodeid_to_vertex(cnodeid) (NODEPDA(cnodeid)->node_vertex)
/*
* Check if given a compact node id the corresponding node has all the
* cpus disabled.
*/
#define is_headless_node(cnode) 0 /*((cnode == CNODEID_NONE) || \
(node_data(cnode)->active_cpu_count == 0)) */
/*
* Check if given a node vertex handle the corresponding node has all the
* cpus disabled.
*/
#define is_headless_node_vertex(_nodevhdl) \
is_headless_node(nodevertex_to_cnodeid(_nodevhdl))
#endif /* _ASM_IA64_SN_NODEPDA_H */
|