diff -purN -X /home/mbligh/.diff.exclude reference/Documentation/filesystems/proc.txt current/Documentation/filesystems/proc.txt --- reference/Documentation/filesystems/proc.txt 2004-03-11 14:33:29.000000000 -0800 +++ current/Documentation/filesystems/proc.txt 2004-04-09 21:41:41.000000000 -0700 @@ -38,6 +38,7 @@ Table of Contents 2.8 /proc/sys/net/ipv4 - IPV4 settings 2.9 Appletalk 2.10 IPX + 2.11 /proc/sys/sched - scheduler tunables ------------------------------------------------------------------------------ Preface @@ -1814,6 +1815,92 @@ The /proc/net/ipx_route table holds a gives the destination network, the router node (or Directly) and the network address of the router (or Connected) for internal networks. +2.11 /proc/sys/sched - scheduler tunables +----------------------------------------- + +Useful knobs for tuning the scheduler live in /proc/sys/sched. + +child_penalty +------------- + +Percentage of the parent's sleep_avg that children inherit. sleep_avg is +a running average of the time a process spends sleeping. Tasks with high +sleep_avg values are considered interactive and given a higher dynamic +priority and a larger timeslice. You typically want this some value just +under 100. + +exit_weight +----------- + +When a CPU hog task exits, its parent's sleep_avg is reduced by a factor of +exit_weight against the exiting task's sleep_avg. + +interactive_delta +----------------- + +If a task is "interactive" it is reinserted into the active array after it +has expired its timeslice, instead of being inserted into the expired array. +How "interactive" a task must be in order to be deemed interactive is a +function of its nice value. This interactive limit is scaled linearly by nice +value and is offset by the interactive_delta. + +max_sleep_avg +------------- + +max_sleep_avg is the largest value (in ms) stored for a task's running sleep +average. The larger this value, the longer a task needs to sleep to be +considered interactive (maximum interactive bonus is a function of +max_sleep_avg). + +max_timeslice +------------- + +Maximum timeslice, in milliseconds. This is the value given to tasks of the +highest dynamic priority. + +min_timeslice +------------- + +Minimum timeslice, in milliseconds. This is the value given to tasks of the +lowest dynamic priority. Every task gets at least this slice of the processor +per array switch. + +parent_penalty +-------------- + +Percentage of the parent's sleep_avg that it retains across a fork(). +sleep_avg is a running average of the time a process spends sleeping. Tasks +with high sleep_avg values are considered interactive and given a higher +dynamic priority and a larger timeslice. Normally, this value is 100 and thus +task's retain their sleep_avg on fork. If you want to punish interactive +tasks for forking, set this below 100. + +prio_bonus_ratio +---------------- + +Middle percentage of the priority range that tasks can receive as a dynamic +priority. The default value of 25% ensures that nice values at the +extremes are still enforced. For example, nice +19 interactive tasks will +never be able to preempt a nice 0 CPU hog. Setting this higher will increase +the size of the priority range the tasks can receive as a bonus. Setting +this lower will decrease this range, making the interactivity bonus less +apparent and user nice values more applicable. + +starvation_limit +---------------- + +Sufficiently interactive tasks are reinserted into the active array when they +run out of timeslice. Normally, tasks are inserted into the expired array. +Reinserting interactive tasks into the active array allows them to remain +runnable, which is important to interactive performance. This could starve +expired tasks, however, since the interactive task could prevent the array +switch. To prevent starving the tasks on the expired array for too long. the +starvation_limit is the longest (in ms) we will let the expired array starve +at the expense of reinserting interactive tasks back into active. Higher +values here give more preferance to running interactive tasks, at the expense +of expired tasks. Lower values provide more fair scheduling behavior, at the +expense of interactivity. The units are in milliseconds. + ------------------------------------------------------------------------------ Summary ------------------------------------------------------------------------------ diff -purN -X /home/mbligh/.diff.exclude reference/Documentation/i386/kgdb/andthen current/Documentation/i386/kgdb/andthen --- reference/Documentation/i386/kgdb/andthen 1969-12-31 16:00:00.000000000 -0800 +++ current/Documentation/i386/kgdb/andthen 2004-04-08 15:10:20.000000000 -0700 @@ -0,0 +1,100 @@ + +define set_andthen + set var $thp=0 + set var $thp=(struct kgdb_and_then_struct *)&kgdb_data[0] + set var $at_size = (sizeof kgdb_data)/(sizeof *$thp) + set var $at_oc=kgdb_and_then_count + set var $at_cc=$at_oc +end + +define andthen_next + set var $at_cc=$arg0 +end + +define andthen + andthen_set_edge + if ($at_cc >= $at_oc) + printf "Outside window. Window size is %d\n",($at_oc-$at_low) + else + printf "%d: ",$at_cc + output *($thp+($at_cc++ % $at_size )) + printf "\n" + end +end +define andthen_set_edge + set var $at_oc=kgdb_and_then_count + set var $at_low = $at_oc - $at_size + if ($at_low < 0 ) + set var $at_low = 0 + end + if (( $at_cc > $at_oc) || ($at_cc < $at_low)) + printf "Count outside of window, setting count to " + if ($at_cc >= $at_oc) + set var $at_cc = $at_oc + else + set var $at_cc = $at_low + end + printf "%d\n",$at_cc + end +end + +define beforethat + andthen_set_edge + if ($at_cc <= $at_low) + printf "Outside window. Window size is %d\n",($at_oc-$at_low) + else + printf "%d: ",$at_cc-1 + output *($thp+(--$at_cc % $at_size )) + printf "\n" + end +end + +document andthen_next + andthen_next + . sets the number of the event to display next. If this event + . is not in the event pool, either andthen or beforethat will + . correct it to the nearest event pool edge. The event pool + . ends at the last event recorded and begins + . prior to that. If beforethat is used next, it will display + . event -1. +. + andthen commands are: set_andthen, andthen_next, andthen and beforethat +end + + +document andthen + andthen +. displays the next event in the list. sets up to display +. the oldest saved event first. +. (optional) count of the event to display. +. note the number of events saved is specified at configure time. +. if events are saved between calls to andthen the index will change +. but the displayed event will be the next one (unless the event buffer +. is overrun). +. +. andthen commands are: set_andthen, andthen_next, andthen and beforethat +end + +document set_andthen + set_andthen +. sets up to use the and commands. +. if you have defined your own struct, use the above and +. then enter the following: +. p $thp=(struct kgdb_and_then_structX *)&kgdb_data[0] +. where is the name of your structure. +. +. andthen commands are: set_andthen, andthen_next, andthen and beforethat +end + +document beforethat + beforethat +. displays the next prior event in the list. sets up to +. display the last occuring event first. +. +. note the number of events saved is specified at configure time. +. if events are saved between calls to beforethat the index will change +. but the displayed event will be the next one (unless the event buffer +. is overrun). +. +. andthen commands are: set_andthen, andthen_next, andthen and beforethat +end diff -purN -X /home/mbligh/.diff.exclude reference/Documentation/i386/kgdb/debug-nmi.txt current/Documentation/i386/kgdb/debug-nmi.txt --- reference/Documentation/i386/kgdb/debug-nmi.txt 1969-12-31 16:00:00.000000000 -0800 +++ current/Documentation/i386/kgdb/debug-nmi.txt 2004-04-08 15:10:20.000000000 -0700 @@ -0,0 +1,37 @@ +Subject: Debugging with NMI +Date: Mon, 12 Jul 1999 11:28:31 -0500 +From: David Grothe +Organization: Gcom, Inc +To: David Grothe + +Kernel hackers: + +Maybe this is old hat, but it is new to me -- + +On an ISA bus machine, if you short out the A1 and B1 pins of an ISA +slot you will generate an NMI to the CPU. This interrupts even a +machine that is hung in a loop with interrupts disabled. Used in +conjunction with kgdb < +ftp://ftp.gcom.com/pub/linux/src/kgdb-2.3.35/kgdb-2.3.35.tgz > you can +gain debugger control of a machine that is hung in the kernel! Even +without kgdb the kernel will print a stack trace so you can find out +where it was hung. + +The A1/B1 pins are directly opposite one another and the farthest pins +towards the bracket end of the ISA bus socket. You can stick a paper +clip or multi-meter probe between them to short them out. + +I had a spare ISA bus to PC104 bus adapter around. The PC104 end of the +board consists of two rows of wire wrap pins. So I wired a push button +between the A1/B1 pins and now have an ISA board that I can stick into +any ISA bus slot for debugger entry. + +Microsoft has a circuit diagram of a PCI card at +http://www.microsoft.com/hwdev/DEBUGGING/DMPSW.HTM. If you want to +build one you will have to mail them and ask for the PAL equations. +Nobody makes one comercially. + +[THIS TIP COMES WITH NO WARRANTY WHATSOEVER. It works for me, but if +your machine catches fire, it is your problem, not mine.] + +-- Dave (the kgdb guy) diff -purN -X /home/mbligh/.diff.exclude reference/Documentation/i386/kgdb/gdb-globals.txt current/Documentation/i386/kgdb/gdb-globals.txt --- reference/Documentation/i386/kgdb/gdb-globals.txt 1969-12-31 16:00:00.000000000 -0800 +++ current/Documentation/i386/kgdb/gdb-globals.txt 2004-04-08 15:10:20.000000000 -0700 @@ -0,0 +1,71 @@ +Sender: akale@veritas.com +Date: Fri, 23 Jun 2000 19:26:35 +0530 +From: "Amit S. Kale" +Organization: Veritas Software (India) +To: Dave Grothe , linux-kernel@vger.rutgers.edu +CC: David Milburn , + "Edouard G. Parmelan" , + ezannoni@cygnus.com, Keith Owens +Subject: Re: Module debugging using kgdb + +Dave Grothe wrote: +> +> Amit: +> +> There is a 2.4.0 version of kgdb on our ftp site: +> ftp://ftp.gcom.com/pub/linux/src/kgdb. I mirrored your version of gdb +> and loadmodule.sh there. +> +> Have a look at the README file and see if I go it right. If not, send +> me some corrections and I will update it. +> +> Does your version of gdb solve the global variable problem? + +Yes. +Thanks to Elena Zanoni, gdb (developement version) can now calculate +correctly addresses of dynamically loaded object files. I have not been +following gdb developement for sometime and am not sure when symbol +address calculation fix is going to appear in a gdb stable version. + +Elena, any idea when the fix will make it to a prebuilt gdb from a +redhat release? + +For the time being I have built a gdb developement version. It can be +used for module debugging with loadmodule.sh script. + +The problem with calculating of module addresses with previous versions +of gdb was as follows: +gdb did not use base address of a section while calculating address of +a symbol in the section in an object file loaded via 'add-symbol-file'. +It used address of .text segment instead. Due to this addresses of +symbols in .data, .bss etc. (e.g. global variables) were calculated incorrectly. + +Above mentioned fix allow gdb to use base address of a segment while +calculating address of a symbol in it. It adds a parameter '-s' to +'add-symbol-file' command for specifying base address of a segment. + +loadmodule.sh script works as follows. + +1. Copy a module file to target machine. +2. Load the module on the target machine using insmod with -m parameter. +insmod produces a module load map which contains base addresses of all +sections in the module and addresses of symbols in the module file. +3. Find all sections and their base addresses in the module from +the module map. +4. Generate a script that loads the module file. The script uses +'add-symbol-file' and specifies address of text segment followed by +addresses of all segments in the module. + +Here is an example gdb script produced by loadmodule.sh script. + +add-symbol-file foo 0xd082c060 -s .text.lock 0xd08cbfb5 +-s .fixup 0xd08cfbdf -s .rodata 0xd08cfde0 -s __ex_table 0xd08e3b38 +-s .data 0xd08e3d00 -s .bss 0xd08ec8c0 -s __ksymtab 0xd08ee838 + +With this command gdb can calculate addresses of symbols in ANY segment +in a module file. + +Regards. +-- +Amit Kale +Veritas Software ( http://www.veritas.com ) diff -purN -X /home/mbligh/.diff.exclude reference/Documentation/i386/kgdb/gdbinit current/Documentation/i386/kgdb/gdbinit --- reference/Documentation/i386/kgdb/gdbinit 1969-12-31 16:00:00.000000000 -0800 +++ current/Documentation/i386/kgdb/gdbinit 2004-04-08 15:10:20.000000000 -0700 @@ -0,0 +1,14 @@ +shell echo -e "\003" >/dev/ttyS0 +set remotebaud 38400 +target remote /dev/ttyS0 +define si +stepi +printf "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n", $eax, $ebx, $ecx, $edx +printf "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n", $esi, $edi, $ebp, $esp +x/i $eip +end +define ni +nexti +printf "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n", $eax, $ebx, $ecx, $edx +printf "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n", $esi, $edi, $ebp, $esp +x/i $eip diff -purN -X /home/mbligh/.diff.exclude reference/Documentation/i386/kgdb/gdbinit-modules current/Documentation/i386/kgdb/gdbinit-modules --- reference/Documentation/i386/kgdb/gdbinit-modules 1969-12-31 16:00:00.000000000 -0800 +++ current/Documentation/i386/kgdb/gdbinit-modules 2004-04-08 15:10:20.000000000 -0700 @@ -0,0 +1,146 @@ +# +# Usefull GDB user-command to debug Linux Kernel Modules with gdbstub. +# +# This don't work for Linux-2.0 or older. +# +# Author Edouard G. Parmelan +# +# +# Fri Apr 30 20:33:29 CEST 1999 +# First public release. +# +# Major cleanup after experiment Linux-2.0 kernel without success. +# Symbols of a module are not in the correct order, I can't explain +# why :( +# +# Fri Mar 19 15:41:40 CET 1999 +# Initial version. +# +# Thu Jan 6 16:29:03 CST 2000 +# A little fixing by Dave Grothe +# +# Mon Jun 19 09:33:13 CDT 2000 +# Alignment changes from Edouard Parmelan +# +# The basic idea is to find where insmod load the module and inform +# GDB to load the symbol table of the module with the GDB command +# ``add-symbol-file
''. +# +# The Linux kernel holds the list of all loaded modules in module_list, +# this list end with &kernel_module (exactly with module->next == NULL, +# but the last module is not a real module). +# +# Insmod allocates the struct module before the object file. Since +# Linux-2.1, this structure contain his size. The real address of +# the object file is then (char*)module + module->size_of_struct. +# +# You can use three user functions ``mod-list'', ``mod-print-symbols'' +# and ``add-module-symbols''. +# +# mod-list list all loaded modules with the format: +# +# +# As soon as you have found the address of your module, you can +# print its exported symbols (mod-print-symbols) or inform GDB to add +# symbols from your module file (mod-add-symbols). +# +# The argument that you give to mod-print-symbols or mod-add-symbols +# is the from the mod-list command. +# +# When using the mod-add-symbols command you must also give the full +# pathname of the modules object code file. +# +# The command mod-add-lis is an example of how to make this easier. +# You can edit this macro to contain the path name of your own +# favorite module and then use it as a shorthand to load it. You +# still need the module-address, however. +# +# The internal function ``mod-validate'' set the GDB variable $mod +# as a ``struct module*'' if the kernel known the module otherwise +# $mod is set to NULL. This ensure to not add symbols for a wrong +# address. +# +# Have a nice hacking day ! +# +# +define mod-list + set $mod = (struct module*)module_list + # the last module is the kernel, ignore it + while $mod != &kernel_module + printf "%p\t%s\n", (long)$mod, ($mod)->name + set $mod = $mod->next + end +end +document mod-list +List all modules in the form: +Use the as the argument for the other +mod-commands: mod-print-symbols, mod-add-symbols. +end + +define mod-validate + set $mod = (struct module*)module_list + while ($mod != $arg0) && ($mod != &kernel_module) + set $mod = $mod->next + end + if $mod == &kernel_module + set $mod = 0 + printf "%p is not a module\n", $arg0 + end +end +document mod-validate +mod-validate +Internal user-command used to validate the module parameter. +If is a real loaded module, set $mod to it otherwise set $mod to 0. +end + + +define mod-print-symbols + mod-validate $arg0 + if $mod != 0 + set $i = 0 + while $i < $mod->nsyms + set $sym = $mod->syms[$i] + printf "%p\t%s\n", $sym->value, $sym->name + set $i = $i + 1 + end + end +end +document mod-print-symbols +mod-print-symbols +Print all exported symbols of the module. see mod-list +end + + +define mod-add-symbols-align + mod-validate $arg0 + if $mod != 0 + set $mod_base = ($mod->size_of_struct + (long)$mod) + if ($arg2 != 0) && (($mod_base & ($arg2 - 1)) != 0) + set $mod_base = ($mod_base | ($arg2 - 1)) + 1 + end + add-symbol-file $arg1 $mod_base + end +end +document mod-add-symbols-align +mod-add-symbols-align +Load the symbols table of the module from the object file where +first section aligment is . +To retreive alignment, use `objdump -h '. +end + +define mod-add-symbols + mod-add-symbols-align $arg0 $arg1 sizeof(long) +end +document mod-add-symbols +mod-add-symbols +Load the symbols table of the module from the object file. +Default alignment is 4. See mod-add-symbols-align. +end + +define mod-add-lis + mod-add-symbols-align $arg0 /usr/src/LiS/streams.o 16 +end +document mod-add-lis +mod-add-lis +Does mod-add-symbols /usr/src/LiS/streams.o +end diff -purN -X /home/mbligh/.diff.exclude reference/Documentation/i386/kgdb/gdbinit.hw current/Documentation/i386/kgdb/gdbinit.hw --- reference/Documentation/i386/kgdb/gdbinit.hw 1969-12-31 16:00:00.000000000 -0800 +++ current/Documentation/i386/kgdb/gdbinit.hw 2004-04-08 15:10:20.000000000 -0700 @@ -0,0 +1,117 @@ + +#Using ia-32 hardware breakpoints. +# +#4 hardware breakpoints are available in ia-32 processors. These breakpoints +#do not need code modification. They are set using debug registers. +# +#Each hardware breakpoint can be of one of the +#three types: execution, write, access. +#1. An Execution breakpoint is triggered when code at the breakpoint address is +#executed. +#2. A write breakpoint ( aka watchpoints ) is triggered when memory location +#at the breakpoint address is written. +#3. An access breakpoint is triggered when memory location at the breakpoint +#address is either read or written. +# +#As hardware breakpoints are available in limited number, use software +#breakpoints ( br command in gdb ) instead of execution hardware breakpoints. +# +#Length of an access or a write breakpoint defines length of the datatype to +#be watched. Length is 1 for char, 2 short , 3 int. +# +#For placing execution, write and access breakpoints, use commands +#hwebrk, hwwbrk, hwabrk +#To remove a breakpoint use hwrmbrk command. +# +#These commands take following types of arguments. For arguments associated +#with each command, use help command. +#1. breakpointno: 0 to 3 +#2. length: 1 to 3 +#3. address: Memory location in hex ( without 0x ) e.g c015e9bc +# +#Use the command exinfo to find which hardware breakpoint occured. + +#hwebrk breakpointno address +define hwebrk + maintenance packet Y$arg0,0,0,$arg1 +end +document hwebrk + hwebrk
+ Places a hardware execution breakpoint + = 0 - 3 +
= Hex digits without leading "0x". +end + +#hwwbrk breakpointno length address +define hwwbrk + maintenance packet Y$arg0,1,$arg1,$arg2 +end +document hwwbrk + hwwbrk
+ Places a hardware write breakpoint + = 0 - 3 + = 1 (1 byte), 2 (2 byte), 3 (4 byte) +
= Hex digits without leading "0x". +end + +#hwabrk breakpointno length address +define hwabrk + maintenance packet Y$arg0,1,$arg1,$arg2 +end +document hwabrk + hwabrk
+ Places a hardware access breakpoint + = 0 - 3 + = 1 (1 byte), 2 (2 byte), 3 (4 byte) +
= Hex digits without leading "0x". +end + +#hwrmbrk breakpointno +define hwrmbrk + maintenance packet y$arg0 +end +document hwrmbrk + hwrmbrk + = 0 - 3 + Removes a hardware breakpoint +end + +define reboot + maintenance packet r +end +#exinfo +define exinfo + maintenance packet qE +end +document exinfo + exinfo + Gives information about a breakpoint. +end +define get_th + p $th=(struct thread_info *)((int)$esp & ~8191) +end +document get_th + get_tu + Gets and prints the current thread_info pointer, Defines th to be it. +end +define get_cu + p $cu=((struct thread_info *)((int)$esp & ~8191))->task +end +document get_cu + get_cu + Gets and print the "current" value. Defines $cu to be it. +end +define int_off + set var $flags=$eflags + set $eflags=$eflags&~0x200 + end +define int_on + set var $eflags|=$flags&0x200 + end +document int_off + saves the current interrupt state and clears the processor interrupt + flag. Use int_on to restore the saved flag. +end +document int_on + Restores the interrupt flag saved by int_off. +end diff -purN -X /home/mbligh/.diff.exclude reference/Documentation/i386/kgdb/kgdb.txt current/Documentation/i386/kgdb/kgdb.txt --- reference/Documentation/i386/kgdb/kgdb.txt 1969-12-31 16:00:00.000000000 -0800 +++ current/Documentation/i386/kgdb/kgdb.txt 2004-04-08 15:10:20.000000000 -0700 @@ -0,0 +1,775 @@ +Last edit: <20030806.1637.12> +This file has information specific to the i386 kgdb option. Other +platforms with the kgdb option may behave in a similar fashion. + +New features: +============ +20030806.1557.37 +This version was made against the 2.6.0-test2 kernel. We have made the +following changes: + +- The getthread() code in the stub calls find_task_by_pid(). It fails + if we are early in the bring up such that the pid arrays have yet to + be allocated. We have added a line to kernel/pid.c to make + "kgdb_pid_init_done" true once the arrays are allocated. This way the + getthread() code knows not to call. This is only used by the thread + debugging stuff and threads will not yet exist at this point in the + boot. + +- For some reason, gdb was not asking for a new thread list when the + "info thread" command was given. We changed to the newer version of + the thread info command and gdb now seems to ask when needed. Result, + we now get all threads in the thread list. + +- We now respond to the ThreadExtraInfo request from gdb with the thread + name from task_struct .comm. This then appears in the thread list. + Thoughts on additional options for this are welcome. Things such as + "has BKL" and "Preempted" come to mind. I think we could have a flag + word that could enable different bits of info here. + +- We now honor, sort of, the C and S commands. These are continue and + single set after delivering a signal. We ignore the signal and do the + requested action. This only happens when we told gdb that a signal + was the reason for entry, which is only done on memory faults. The + result is that you can now continue into the Oops. + +- We changed the -g to -gdwarf-2. This seems to be the same as -ggdb, + but it is more exact on what language to use. + +- We added two dwarf2 include files and a bit of code at the end of + entry.S. This does not yet work, so it is disabled. Still we want to + keep track of the code and "maybe" someone out there can fix it. + +- Randy Dunlap sent some fix ups for this file which are now merged. + +- Hugh Dickins sent a fix to a bit of code in traps.c that prevents a + compiler warning if CONFIG_KGDB is off (now who would do that :). + +- Andrew Morton sent a fix for the serial driver which is now merged. + +- Andrew also sent a change to the stub around the cpu managment code + which is also merged. + +- Andrew also sent a patch to make "f" as well as "g" work as SysRq + commands to enter kgdb, merged. + +- If CONFIG_KGDB and CONFIG_DEBUG_SPINLOCKS are both set we added a + "who" field to the spinlock data struct. This is filled with + "current" when ever the spinlock suceeds. Useful if you want to know + who has the lock. + +_ And last, but not least, we fixed the "get_cu" macro to properly get + the current value of "current". + +New features: +============ +20030505.1827.27 +We are starting to align with the sourceforge version, at least in +commands. To this end, the boot command string to start kgdb at +boot time has been changed from "kgdb" to "gdb". + +Andrew Morton sent a couple of patches which are now included as follows: +1.) We now return a flag to the interrupt handler. +2.) We no longer use smp_num_cpus (a conflict with the lock meter). +3.) And from William Lee Irwin III code to make + sure high-mem is set up before we attempt to register our interrupt + handler. +We now include asm/kgdb.h from config.h so you will most likely never +have to include it. It also 'NULLS' the kgdb macros you might have in +your code when CONFIG_KGDB is not defined. This allows you to just +turn off CONFIG_KGDB to turn off all the kgdb_ts() calls and such. +This include is conditioned on the machine being an x86 so as to not +mess with other archs. + +20020801.1129.03 +This is currently the version for the 2.4.18 (and beyond?) kernel. + +We have several new "features" beginning with this version: + +1.) Kgdb now syncs the "other" CPUs with a cross-CPU NMI. No more + waiting and it will pull that guy out of an IRQ off spin lock :) + +2.) We doctored up the code that tells where a task is waiting and + included it so that the "info thread" command will show a bit more + than "schedule()". Try it... + +3.) Added the ability to call a function from gdb. All the standard gdb + issues apply, i.e. if you hit a breakpoint in the function, you are + not allowed to call another (gdb limitation, not kgdb). To help + this capability we added a memory allocation function. Gdb does not + return this memory (it is used for strings that you pass to that function + you are calling from gdb) so we fixed up a way to allow you to + manually return the memory (see below). + +4.) Kgdb time stamps (kgdb_ts()) are enhanced to expand what was the + interrupt flag to now also include the preemption count and the + "in_interrupt" info. The flag is now called "with_pif" to indicate + the order, preempt_count, in_interrupt, flag. The preempt_count is + shifted left by 4 bits so you can read the count in hex by dropping + the low order digit. In_interrupt is in bit 1, and the flag is in + bit 0. + +5.) The command: "p kgdb_info" is now expanded and prints something + like: +(gdb) p kgdb_info +$2 = {used_malloc = 0, called_from = 0xc0107506, entry_tsc = 67468627259, + errcode = 0, vector = 3, print_debug_info = 0, hold_on_sstep = 1, + cpus_waiting = {{task = 0xc027a000, pid = 32768, hold = 0, + regs = 0xc027bf84}, {task = 0x0, pid = 0, hold = 0, regs = 0x0}}} + + Things to note here: a.) used_malloc is the amount of memory that + has been malloc'ed to do calls from gdb. You can reclaim this + memory like this: "p kgdb_info.used_malloc=0" Cool, huh? b.) + cpus_waiting is now "sized" by the number of CPUs you enter at + configure time in the kgdb configure section. This is NOT used + anywhere else in the system, but it is "nice" here. c.) The task's + "pid" is now in the structure. This is the pid you will need to use + to decode to the thread id to get gdb to look at that thread. + Remember that the "info thread" command prints a list of threads + wherein it numbers each thread with its reference number followed + by the thread's pid. Note that the per-CPU idle threads actually + have pids of 0 (yes, there is more than one pid 0 in an SMP system). + To avoid confusion, kgdb numbers these threads with numbers beyond + the MAX_PID. That is why you see 32768 and above. + +6.) A subtle change, we now provide the complete register set for tasks + that are active on the other CPUs. This allows better trace back on + those tasks. + + And, let's mention what we could not fix. Back-trace from all but the + thread that we trapped will, most likely, have a bogus entry in it. + The problem is that gdb does not recognize the entry code for + functions that use "current" near (at all?) the entry. The compiler + is putting the "current" decode as the first two instructions of the + function where gdb expects to find %ebp changing code. Back trace + also has trouble with interrupt frames. I am talking with Daniel + Jacobowitz about some way to fix this, but don't hold your breath. + +20011220.0050.35 +Major enhancement with this version is the ability to hold one or more +CPUs in an SMP system while allowing the others to continue. Also, by +default only the current CPU is enabled on single-step commands (please +note that gdb issues single-step commands at times other than when you +use the si command). + +Another change is to collect some useful information in +a global structure called "kgdb_info". You should be able to just: + +p kgdb_info + +although I have seen cases where the first time this is done gdb just +prints the first member but prints the whole structure if you then enter +CR (carriage return or enter). This also works: + +p *&kgdb_info + +Here is a sample: +(gdb) p kgdb_info +$4 = {called_from = 0xc010732c, entry_tsc = 32804123790856, errcode = 0, + vector = 3, print_debug_info = 0} + +"Called_from" is the return address from the current entry into kgdb. +Sometimes it is useful to know why you are in kgdb, for example, was +it an NMI or a real breakpoint? The simple way to interrogate this +return address is: + +l *0xc010732c + +which will print the surrounding few lines of source code. + +"Entry_tsc" is the CPU TSC on entry to kgdb (useful to compare to the +kgdb_ts entries). + +"errcode" and "vector" are other entry parameters which may be helpful on +some traps. + +"print_debug_info" is the internal debugging kgdb print enable flag. Yes, +you can modify it. + +In SMP systems kgdb_info also includes the "cpus_waiting" structure and +"hold_on_step": + +(gdb) p kgdb_info +$7 = {called_from = 0xc0112739, entry_tsc = 1034936624074, errcode = 0, + vector = 2, print_debug_info = 0, hold_on_sstep = 1, cpus_waiting = {{ + task = 0x0, hold = 0, regs = 0x0}, {task = 0xc71b8000, hold = 0, + regs = 0xc71b9f70}, {task = 0x0, hold = 0, regs = 0x0}, {task = 0x0, + hold = 0, regs = 0x0}, {task = 0x0, hold = 0, regs = 0x0}, {task = 0x0, + hold = 0, regs = 0x0}, {task = 0x0, hold = 0, regs = 0x0}, {task = 0x0, + hold = 0, regs = 0x0}}} + +"Cpus_waiting" has an entry for each CPU other than the current one that +has been stopped. Each entry contains the task_struct address for that +CPU, the address of the regs for that task and a hold flag. All these +have the proper typing so that, for example: + +p *kgdb_info.cpus_waiting[1].regs + +will print the registers for CPU 1. + +"Hold_on_sstep" is a new feature with this version and comes up set or +true. What this means is that whenever kgdb is asked to single-step all +other CPUs are held (i.e. not allowed to execute). The flag applies to +all but the current CPU and, again, can be changed: + +p kgdb_info.hold_on_sstep=0 + +restores the old behavior of letting all CPUs run during single-stepping. + +Likewise, each CPU has a "hold" flag, which if set, locks that CPU out +of execution. Note that this has some risk in cases where the CPUs need +to communicate with each other. If kgdb finds no CPU available on exit, +it will push a message thru gdb and stay in kgdb. Note that it is legal +to hold the current CPU as long as at least one CPU can execute. + +20010621.1117.09 +This version implements an event queue. Events are signaled by calling +a function in the kgdb stub and may be examined from gdb. See EVENTS +below for details. This version also tightens up the interrupt and SMP +handling to not allow interrupts on the way to kgdb from a breakpoint +trap. It is fine to allow these interrupts for user code, but not +system debugging. + +Version +======= + +This version of the kgdb package was developed and tested on +kernel version 2.4.16. It will not install on any earlier kernels. +It is possible that it will continue to work on later versions +of 2.4 and then versions of 2.5 (I hope). + + +Debugging Setup +=============== + +Designate one machine as the "development" machine. This is the +machine on which you run your compiles and which has your source +code for the kernel. Designate a second machine as the "target" +machine. This is the machine that will run your experimental +kernel. + +The two machines will be connected together via a serial line out +one or the other of the COM ports of the PC. You will need the +appropriate modem eliminator (null modem) cable(s) for this. + +Decide on which tty port you want the machines to communicate, then +connect them up back-to-back using the null modem cable. COM1 is +/dev/ttyS0 and COM2 is /dev/ttyS1. You should test this connection +with the two machines prior to trying to debug a kernel. Once you +have it working, on the TARGET machine, enter: + +setserial /dev/ttyS0 (or what ever tty you are using) + +and record the port address and the IRQ number. + +On the DEVELOPMENT machine you need to apply the patch for the kgdb +hooks. You have probably already done that if you are reading this +file. + +On your DEVELOPMENT machine, go to your kernel source directory and do +"make Xconfig" where X is one of "x", "menu", or "". If you are +configuring in the standard serial driver, it must not be a module. +Either yes or no is ok, but making the serial driver a module means it +will initialize after kgdb has set up the UART interrupt code and may +cause a failure of the control-C option discussed below. The configure +question for the serial driver is under the "Character devices" heading +and is: + +"Standard/generic (8250/16550 and compatible UARTs) serial support" + +Go down to the kernel debugging menu item and open it up. Enable the +kernel kgdb stub code by selecting that item. You can also choose to +turn on the "-ggdb -O1" compile options. The -ggdb causes the compiler +to put more debug info (like local symbols) in the object file. On the +i386 -g and -ggdb are the same so this option just reduces to "O1". The +-O1 reduces the optimization level. This may be helpful in some cases, +be aware, however, that this may also mask the problem you are looking +for. + +The baud rate. Default is 115200. What ever you choose be sure that +the host machine is set to the same speed. I recommend the default. + +The port. This is the I/O address of the serial UART that you should +have gotten using setserial as described above. The standard COM1 port +(3f8) using IRQ 4 is default. COM2 is 2f8 which by convention uses IRQ +3. + +The port IRQ (see above). + +Stack overflow test. This option makes a minor change in the trap, +system call and interrupt code to detect stack overflow and transfer +control to kgdb if it happens. (Some platforms have this in the +baseline code, but the i386 does not.) + +You can also configure the system to recognize the boot option +"console=kgdb" which if given will cause all console output during +booting to be put thru gdb as well as other consoles. This option +requires that gdb and kgdb be connected prior to sending console output +so, if they are not, a breakpoint is executed to force the connection. +This will happen before any kernel output (it is going thru gdb, right), +and will stall the boot until the connection is made. + +You can also configure in a patch to SysRq to enable the kGdb SysRq. +This request generates a breakpoint. Since the serial port IRQ line is +set up after any serial drivers, it is possible that this command will +work when the control-C will not. + +Save and exit the Xconfig program. Then do "make clean" , "make dep" +and "make bzImage" (or whatever target you want to make). This gets the +kernel compiled with the "-g" option set -- necessary for debugging. + +You have just built the kernel on your DEVELOPMENT machine that you +intend to run on your TARGET machine. + +To install this new kernel, use the following installation procedure. +Remember, you are on the DEVELOPMENT machine patching the kernel source +for the kernel that you intend to run on the TARGET machine. + +Copy this kernel to your target machine using your usual procedures. I +usually arrange to copy development: +/usr/src/linux/arch/i386/boot/bzImage to /vmlinuz on the TARGET machine +via a LAN based NFS access. That is, I run the cp command on the target +and copy from the development machine via the LAN. Run Lilo (see "man +lilo" for details on how to set this up) on the new kernel on the target +machine so that it will boot! Then boot the kernel on the target +machine. + +On the DEVELOPMENT machine, create a file called .gdbinit in the +directory /usr/src/linux. An example .gdbinit file looks like this: + +shell echo -e "\003" >/dev/ttyS0 +set remotebaud 38400 (or what ever speed you have chosen) +target remote /dev/ttyS0 + + +Change the "echo" and "target" definition so that it specifies the tty +port that you intend to use. Change the "remotebaud" definition to +match the data rate that you are going to use for the com line. + +You are now ready to try it out. + +Boot your target machine with "kgdb" in the boot command i.e. something +like: + +lilo> test kgdb + +or if you also want console output thru gdb: + +lilo> test kgdb console=kgdb + +You should see the lilo message saying it has loaded the kernel and then +all output stops. The kgdb stub is trying to connect with gdb. Start +gdb something like this: + + +On your DEVELOPMENT machine, cd /usr/src/linux and enter "gdb vmlinux". +When gdb gets the symbols loaded it will read your .gdbinit file and, if +everything is working correctly, you should see gdb print out a few +lines indicating that a breakpoint has been taken. It will actually +show a line of code in the target kernel inside the kgdb activation +code. + +The gdb interaction should look something like this: + + linux-dev:/usr/src/linux# gdb vmlinux + GDB is free software and you are welcome to distribute copies of it + under certain conditions; type "show copying" to see the conditions. + There is absolutely no warranty for GDB; type "show warranty" for details. + GDB 4.15.1 (i486-slackware-linux), + Copyright 1995 Free Software Foundation, Inc... + breakpoint () at i386-stub.c:750 + 750 } + (gdb) + +You can now use whatever gdb commands you like to set breakpoints. +Enter "continue" to start your target machine executing again. At this +point the target system will run at full speed until it encounters +your breakpoint or gets a segment violation in the kernel, or whatever. + +If you have the kgdb console enabled when you continue, gdb will print +out all the console messages. + +The above example caused a breakpoint relatively early in the boot +process. For the i386 kgdb it is possible to code a break instruction +as the first C-language point in init/main.c, i.e. as the first instruction +in start_kernel(). This could be done as follows: + +#include + breakpoint(); + +This breakpoint() is really a function that sets up the breakpoint and +single-step hardware trap cells and then executes a breakpoint. Any +early hard coded breakpoint will need to use this function. Once the +trap cells are set up they need not be set again, but doing it again +does not hurt anything, so you don't need to be concerned about which +breakpoint is hit first. Once the trap cells are set up (and the kernel +sets them up in due course even if breakpoint() is never called) the +macro: + +BREAKPOINT; + +will generate an inline breakpoint. This may be more useful as it stops +the processor at the instruction instead of in a function a step removed +from the location of interest. In either case must be +included to define both breakpoint() and BREAKPOINT. + +Triggering kgdbstub at other times +================================== + +Often you don't need to enter the debugger until much later in the boot +or even after the machine has been running for some time. Once the +kernel is booted and interrupts are on, you can force the system to +enter the debugger by sending a control-C to the debug port. This is +what the first line of the recommended .gdbinit file does. This allows +you to start gdb any time after the system is up as well as when the +system is already at a breakpoint. (In the case where the system is +already at a breakpoint the control-C is not needed, however, it will +be ignored by the target so no harm is done. Also note the the echo +command assumes that the port speed is already set. This will be true +once gdb has connected, but it is best to set the port speed before you +run gdb.) + +Another simple way to do this is to put the following file in you ~/bin +directory: + +#!/bin/bash +echo -e "\003" > /dev/ttyS0 + +Here, the ttyS0 should be replaced with what ever port you are using. +The "\003" is control-C. Once you are connected with gdb, you can enter +control-C at the command prompt. + +An alternative way to get control to the debugger is to enable the kGdb +SysRq command. Then you would enter Alt-SysRq-g (all three keys at the +same time, but push them down in the order given). To refresh your +memory of the available SysRq commands try Alt-SysRq-=. Actually any +undefined command could replace the "=", but I like to KNOW that what I +am pushing will never be defined. + +Debugging hints +=============== + +You can break into the target machine at any time from the development +machine by typing ^C (see above paragraph). If the target machine has +interrupts enabled this will stop it in the kernel and enter the +debugger. + +There is unfortunately no way of breaking into the kernel if it is +in a loop with interrupts disabled, so if this happens to you then +you need to place exploratory breakpoints or printk's into the kernel +to find out where it is looping. The exploratory breakpoints can be +entered either thru gdb or hard coded into the source. This is very +handy if you do something like: + +if () BREAKPOINT; + + +There is a copy of an e-mail in the Documentation/i386/kgdb/ directory +(debug-nmi.txt) which describes how to create an NMI on an ISA bus +machine using a paper clip. I have a sophisticated version of this made +by wiring a push button switch into a PC104/ISA bus adapter card. The +adapter card nicely furnishes wire wrap pins for all the ISA bus +signals. + +When you are done debugging the kernel on the target machine it is a +good idea to leave it in a running state. This makes reboots faster, +bypassing the fsck. So do a gdb "continue" as the last gdb command if +this is possible. To terminate gdb itself on the development machine +and leave the target machine running, first clear all breakpoints and +continue, then type ^Z to suspend gdb and then kill it with "kill %1" or +something similar. + +If gdbstub Does Not Work +======================== + +If it doesn't work, you will have to troubleshoot it. Do the easy +things first like double checking your cabling and data rates. You +might try some non-kernel based programs to see if the back-to-back +connection works properly. Just something simple like cat /etc/hosts +>/dev/ttyS0 on one machine and cat /dev/ttyS0 on the other will tell you +if you can send data from one machine to the other. Make sure it works +in both directions. There is no point in tearing out your hair in the +kernel if the line doesn't work. + +All of the real action takes place in the file +/usr/src/linux/arch/i386/kernel/kgdb_stub.c. That is the code on the target +machine that interacts with gdb on the development machine. In gdb you can +turn on a debug switch with the following command: + + set remotedebug + +This will print out the protocol messages that gdb is exchanging with +the target machine. + +Another place to look is /usr/src/arch/i386/lib/kgdb_serial.c. This is +the code that talks to the serial port on the target side. There might +be a problem there. In particular there is a section of this code that +tests the UART which will tell you what UART you have if you define +"PRNT" (just remove "_off" from the #define PRNT_off). To view this +report you will need to boot the system without any beakpoints. This +allows the kernel to run to the point where it calls kgdb to set up +interrupts. At this time kgdb will test the UART and print out the type +it finds. (You need to wait so that the printks are actually being +printed. Early in the boot they are cached, waiting for the console to +be enabled. Also, if kgdb is entered thru a breakpoint it is possible +to cause a dead lock by calling printk when the console is locked. The +stub thus avoids doing printks from breakpoints, especially in the +serial code.) At this time, if the UART fails to do the expected thing, +kgdb will print out (using printk) information on what failed. (These +messages will be buried in all the other boot up messages. Look for +lines that start with "gdb_hook_interrupt:". You may want to use dmesg +once the system is up to view the log. If this fails or if you still +don't connect, review your answers for the port address. Use: + +setserial /dev/ttyS0 + +to get the current port and IRQ information. This command will also +tell you what the system found for the UART type. The stub recognizes +the following UART types: + +16450, 16550, and 16550A + +If you are really desperate you can use printk debugging in the +kgdbstub code in the target kernel until you get it working. In particular, +there is a global variable in /usr/src/linux/arch/i386/kernel/kgdb_stub.c +named "remote_debug". Compile your kernel with this set to 1, rather +than 0 and the debug stub will print out lots of stuff as it does +what it does. Likewise there are debug printks in the kgdb_serial.c +code that can be turned on with simple changes in the macro defines. + + +Debugging Loadable Modules +========================== + +This technique comes courtesy of Edouard Parmelan + + +When you run gdb, enter the command + +source gdbinit-modules + +This will read in a file of gdb macros that was installed in your +kernel source directory when kgdb was installed. This file implements +the following commands: + +mod-list + Lists the loaded modules in the form + +mod-print-symbols + Prints all the symbols in the indicated module. + +mod-add-symbols + Loads the symbols from the object file and associates them + with the indicated module. + +After you have loaded the module that you want to debug, use the command +mod-list to find the of your module. Then use that +address in the mod-add-symbols command to load your module's symbols. +From that point onward you can debug your module as if it were a part +of the kernel. + +The file gdbinit-modules also contains a command named mod-add-lis as +an example of how to construct a command of your own to load your +favorite module. The idea is to "can" the pathname of the module +in the command so you don't have to type so much. + +Threads +======= + +Each process in a target machine is seen as a gdb thread. gdb thread +related commands (info threads, thread n) can be used. + +ia-32 hardware breakpoints +========================== + +kgdb stub contains support for hardware breakpoints using debugging features +of ia-32(x86) processors. These breakpoints do not need code modification. +They use debugging registers. 4 hardware breakpoints are available in ia-32 +processors. + +Each hardware breakpoint can be of one of the following three types. + +1. Execution breakpoint - An Execution breakpoint is triggered when code + at the breakpoint address is executed. + + As limited number of hardware breakpoints are available, it is + advisable to use software breakpoints ( break command ) instead + of execution hardware breakpoints, unless modification of code + is to be avoided. + +2. Write breakpoint - A write breakpoint is triggered when memory + location at the breakpoint address is written. + + A write or can be placed for data of variable length. Length of + a write breakpoint indicates length of the datatype to be + watched. Length is 1 for 1 byte data , 2 for 2 byte data, 3 for + 4 byte data. + +3. Access breakpoint - An access breakpoint is triggered when memory + location at the breakpoint address is either read or written. + + Access breakpoints also have lengths similar to write breakpoints. + +IO breakpoints in ia-32 are not supported. + +Since gdb stub at present does not use the protocol used by gdb for hardware +breakpoints, hardware breakpoints are accessed through gdb macros. gdb macros +for hardware breakpoints are described below. + +hwebrk - Places an execution breakpoint + hwebrk breakpointno address +hwwbrk - Places a write breakpoint + hwwbrk breakpointno length address +hwabrk - Places an access breakpoint + hwabrk breakpointno length address +hwrmbrk - Removes a breakpoint + hwrmbrk breakpointno +exinfo - Tells whether a software or hardware breakpoint has occurred. + Prints number of the hardware breakpoint if a hardware breakpoint has + occurred. + +Arguments required by these commands are as follows +breakpointno - 0 to 3 +length - 1 to 3 +address - Memory location in hex digits ( without 0x ) e.g c015e9bc + +SMP support +========== + +When a breakpoint occurs or user issues a break ( Ctrl + C ) to gdb +client, all the processors are forced to enter the debugger. Current +thread corresponds to the thread running on the processor where +breakpoint occurred. Threads running on other processor(s) appear +similar to other non-running threads in the 'info threads' output. +Within the kgdb stub there is a structure "waiting_cpus" in which kgdb +records the values of "current" and "regs" for each CPU other than the +one that hit the breakpoint. "current" is a pointer to the task +structure for the task that CPU is running, while "regs" points to the +saved registers for the task. This structure can be examined with the +gdb "p" command. + +ia-32 hardware debugging registers on all processors are set to same +values. Hence any hardware breakpoints may occur on any processor. + +gdb troubleshooting +=================== + +1. gdb hangs +Kill it. restart gdb. Connect to target machine. + +2. gdb cannot connect to target machine (after killing a gdb and +restarting another) If the target machine was not inside debugger when +you killed gdb, gdb cannot connect because the target machine won't +respond. In this case echo "Ctrl+C"(ASCII 3) to the serial line. +e.g. echo -e "\003" > /dev/ttyS1 +This forces that target machine into the debugger, after which you +can connect. + +3. gdb cannot connect even after echoing Ctrl+C into serial line +Try changing serial line settings min to 1 and time to 0 +e.g. stty min 1 time 0 < /dev/ttyS1 +Try echoing again + +Check serial line speed and set it to correct value if required +e.g. stty ispeed 115200 ospeed 115200 < /dev/ttyS1 + +EVENTS +====== + +Ever want to know the order of things happening? Which CPU did what and +when? How did the spinlock get the way it is? Then events are for +you. Events are defined by calls to an event collection interface and +saved for later examination. In this case, kgdb events are saved by a +very fast bit of code in kgdb which is fully SMP and interrupt protected +and they are examined by using gdb to display them. Kgdb keeps only +the last N events, where N must be a power of two and is defined at +configure time. + + +Events are signaled to kgdb by calling: + +kgdb_ts(data0,data1) + +For each call kgdb records each call in an array along with other info. +Here is the array definition: + +struct kgdb_and_then_struct { +#ifdef CONFIG_SMP + int on_cpu; +#endif + long long at_time; + int from_ln; + char * in_src; + void *from; + int with_if; + int data0; + int data1; +}; + +For SMP machines the CPU is recorded, for all machines the TSC is +recorded (gets a time stamp) as well as the line number and source file +the call was made from. The address of the (from), the "if" (interrupt +flag) and the two data items are also recorded. The macro kgdb_ts casts +the types to int, so you can put any 32-bit values here. There is a +configure option to select the number of events you want to keep. A +nice number might be 128, but you can keep up to 1024 if you want. The +number must be a power of two. An "andthen" macro library is provided +for gdb to help you look at these events. It is also possible to define +a different structure for the event storage and cast the data to this +structure. For example the following structure is defined in kgdb: + +struct kgdb_and_then_struct2 { +#ifdef CONFIG_SMP + int on_cpu; +#endif + long long at_time; + int from_ln; + char * in_src; + void *from; + int with_if; + struct task_struct *t1; + struct task_struct *t2; +}; + +If you use this for display, the data elements will be displayed as +pointers to task_struct entries. You may want to define your own +structure to use in casting. You should only change the last two items +and you must keep the structure size the same. Kgdb will handle these +as 32-bit ints, but within that constraint you can define a structure to +cast to any 32-bit quantity. This need only be available to gdb and is +only used for casting in the display code. + +Final Items +=========== + +I picked up this code from Amit S. Kale and enhanced it. + +If you make some really cool modification to this stuff, or if you +fix a bug, please let me know. + +George Anzinger + + +Amit S. Kale + + +(First kgdb by David Grothe ) + +(modified by Tigran Aivazian ) + Putting gdbstub into the kernel config menu. + +(modified by Scott Foehner ) + Hooks for entering gdbstub at boot time. + +(modified by Amit S. Kale ) + Threads, ia-32 hw debugging, mp support, console support, + nmi watchdog handling. + +(modified by George Anzinger ) + Extended threads to include the idle threads. + Enhancements to allow breakpoint() at first C code. + Use of module_init() and __setup() to automate the configure. + Enhanced the cpu "collection" code to work in early bring-up. + Added ability to call functions from gdb + Print info thread stuff without going back to schedule() + Now collect the "other" cpus with an IPI/ NMI. diff -purN -X /home/mbligh/.diff.exclude reference/Documentation/i386/kgdb/kgdbeth.txt current/Documentation/i386/kgdb/kgdbeth.txt --- reference/Documentation/i386/kgdb/kgdbeth.txt 1969-12-31 16:00:00.000000000 -0800 +++ current/Documentation/i386/kgdb/kgdbeth.txt 2004-04-08 15:10:21.000000000 -0700 @@ -0,0 +1,92 @@ +KGDB over ethernet +================== + +Authors +------- + +Robert Walsh (2.6 port) +wangdi (2.6 port) +Matt Mackall (netpoll api) +San Mehat (original 2.4 code) + + +Introduction +------------ + +KGDB supports debugging over ethernet (kgdboe) via polling of a given +network interface. Most cards should be supported automatically. +Debugging facilities are available as soon as the network driver and +kgdboe have initialized. Unfortunately, this is too late in the boot +process for debugging some issues, but works quite well for many +others. This should not interfere with normal network usage and +doesn't require a dedicated NIC. + +Terminology +----------- + +This document uses the following terms: + + TARGET: the machine being debugged. + HOST: the machine running gdb. + + +Usage +----- + +You need to use the following command-line option on the TARGET kernel: + + kgdboe=[tgt-port]@/[dev],[host-port]@/[host-macaddr] + + where + tgt-port source for UDP packets (defaults to 6443) + tgt-ip source IP to use (interface address) + dev network interface (eth0) + host-port HOST UDP port (6442) (not really used) + host-ip IP address for HOST machine + host-macaddr ethernet MAC address for HOST (ff:ff:ff:ff:ff:ff) + + examples: + + kgdboe=7000@192.168.0.1/eth1,7001@192.168.0.2/00:05:3C:04:47:5D + this machine is 192.168.0.1 on eth1 + remote machine is 192.168.0.2 with MAC address 00:05:3C:04:47:5D + listen for gdb packets on port 7000 + send unsolicited gdb packets to port 7001 + + kgdboe=@192.168.0.1/,@192.168.0.2/ + this machine is 192.168.0.1 on default interface eth0 + remote machine is 192.168.0.2, use default broadcast MAC address + listen for gdb packets on default port 6443 + send unsolicited gdb packets to port 6442 + +Only packets originating from the configured HOST IP address will be +accepted by the debugger. + +On the HOST side, run gdb as normal and use a remote UDP host as the +target: + + % gdb ./vmlinux + GNU gdb Red Hat Linux (5.3post-0.20021129.18rh) + Copyright 2003 Free Software Foundation, Inc. + GDB is free software, covered by the GNU General Public License, and you are + welcome to change it and/or distribute copies of it under certain conditions. + Type "show copying" to see the conditions. + There is absolutely no warranty for GDB. Type "show warranty" for details. + This GDB was configured as "i386-redhat-linux-gnu"... + (gdb) target remote udp:HOSTNAME:6443 + +You can now continue as if you were debugging over a serial line. + +Limitations +----------- + +The current release of this code is exclusive of using kgdb on a +serial interface, so you must boot without the kgdboe option to use +serial debugging. Trying to debug the network driver while using it +will prove interesting. + +Bug reports +----------- + +Send bug reports to Robert Walsh and Matt +Mackall . diff -purN -X /home/mbligh/.diff.exclude reference/Documentation/i386/kgdb/loadmodule.sh current/Documentation/i386/kgdb/loadmodule.sh --- reference/Documentation/i386/kgdb/loadmodule.sh 1969-12-31 16:00:00.000000000 -0800 +++ current/Documentation/i386/kgdb/loadmodule.sh 2004-04-08 15:10:20.000000000 -0700 @@ -0,0 +1,78 @@ +#/bin/sh +# This script loads a module on a target machine and generates a gdb script. +# source generated gdb script to load the module file at appropriate addresses +# in gdb. +# +# Usage: +# Loading the module on target machine and generating gdb script) +# [foo]$ loadmodule.sh +# +# Loading the module file into gdb +# (gdb) source +# +# Modify following variables according to your setup. +# TESTMACHINE - Name of the target machine +# GDBSCRIPTS - The directory where a gdb script will be generated +# +# Author: Amit S. Kale (akale@veritas.com). +# +# If you run into problems, please check files pointed to by following +# variables. +# ERRFILE - /tmp/.errs contains stderr output of insmod +# MAPFILE - /tmp/.map contains stdout output of insmod +# GDBSCRIPT - $GDBSCRIPTS/load gdb script. + +TESTMACHINE=foo +GDBSCRIPTS=/home/bar + +if [ $# -lt 1 ] ; then { + echo Usage: $0 modulefile + exit +} ; fi + +MODULEFILE=$1 +MODULEFILEBASENAME=`basename $1` + +if [ $MODULEFILE = $MODULEFILEBASENAME ] ; then { + MODULEFILE=`pwd`/$MODULEFILE +} fi + +ERRFILE=/tmp/$MODULEFILEBASENAME.errs +MAPFILE=/tmp/$MODULEFILEBASENAME.map +GDBSCRIPT=$GDBSCRIPTS/load$MODULEFILEBASENAME + +function findaddr() { + local ADDR=0x$(echo "$SEGMENTS" | \ + grep "$1" | sed 's/^[^ ]*[ ]*[^ ]*[ ]*//' | \ + sed 's/[ ]*[^ ]*$//') + echo $ADDR +} + +function checkerrs() { + if [ "`cat $ERRFILE`" != "" ] ; then { + cat $ERRFILE + exit + } fi +} + +#load the module +echo Copying $MODULEFILE to $TESTMACHINE +rcp $MODULEFILE root@${TESTMACHINE}: + +echo Loading module $MODULEFILE +rsh -l root $TESTMACHINE /sbin/insmod -m ./`basename $MODULEFILE` \ + > $MAPFILE 2> $ERRFILE +checkerrs + +SEGMENTS=`head -n 11 $MAPFILE | tail -n 10` +TEXTADDR=$(findaddr "\\.text[^.]") +LOADSTRING="add-symbol-file $MODULEFILE $TEXTADDR" +SEGADDRS=`echo "$SEGMENTS" | awk '//{ + if ($1 != ".text" && $1 != ".this" && + $1 != ".kstrtab" && $1 != ".kmodtab") { + print " -s " $1 " 0x" $3 " " + } +}'` +LOADSTRING="$LOADSTRING $SEGADDRS" +echo Generating script $GDBSCRIPT +echo $LOADSTRING > $GDBSCRIPT diff -purN -X /home/mbligh/.diff.exclude reference/Documentation/sched-domains.txt current/Documentation/sched-domains.txt --- reference/Documentation/sched-domains.txt 1969-12-31 16:00:00.000000000 -0800 +++ current/Documentation/sched-domains.txt 2004-04-08 15:10:22.000000000 -0700 @@ -0,0 +1,55 @@ +Each CPU has a "base" scheduling domain (struct sched_domain). These are +accessed via cpu_sched_domain(i) and this_sched_domain() macros. The domain +hierarchy is built from these base domains via the ->parent pointer. ->parent +MUST be NULL terminated, and domain structures should be per-CPU as they +are locklessly updated. + +Each scheduling domain spans a number of CPUs (stored in the ->span field). +A domain's span MUST be a superset of it child's span, and a base domain +for CPU i MUST span at least i. The top domain for each CPU will generally +span all CPUs in the system although strictly it doesn't have to, but this +could lead to a case where some CPUs will never be given tasks to run unless +the CPUs allowed mask is explicitly set. A sched domain's span means "balance +process load among these CPUs". + +Each scheduling domain must have one or more CPU groups (struct sched_group) +which are organised as a circular one way linked list from the ->groups +pointer. The union of cpumasks of these groups MUST be the same as the +domain's span. The intersection of cpumasks from any two of these groups +MUST be the empty set. The group pointed to by the ->groups pointer MUST +contain the CPU to which the domain belongs. Groups may be shared among +CPUs as they contain read only data after they have been set up. + +Balancing within a sched domain occurs between groups. That is, each group +is treated as one entity. The load of a group is defined as the sum of the +load of each of its member CPUs, and only when the load of a group becomes +out of balance are tasks moved between groups. + +In kernel/sched.c, rebalance_tick is run periodically on each CPU. This +function takes its CPU's base sched domain and checks to see if has reached +its rebalance interval. If so, then it will run load_balance on that domain. +rebalance_tick then checks the parent sched_domain (if it exists), and the +parent of the parent and so forth. + +*** Implementing sched domains *** +The "base" domain will "span" the first level of the hierarchy. In the case +of SMT, you'll span all siblings of the physical CPU, with each group being +a single virtual CPU. + +In SMP, the parent of the base domain will span all physical CPUs in the +node. Each group being a single physical CPU. Then with NUMA, the parent +of the SMP domain will span the entire machine, with each group having the +cpumask of a node. Or, you could do multi-level NUMA or Opteron, for example, +might have just one domain covering its one NUMA level. + +The implementor should read comments in include/linux/sched.h: +struct sched_domain fields, SD_FLAG_*, SD_*_INIT to get an idea of +the specifics and what to tune. + +Implementors should change the line +#undef SCHED_DOMAIN_DEBUG +to +#define SCHED_DOMAIN_DEBUG +in kernel/sched.c as this enables an error checking parse of the sched domains +which should catch most possible errors (described above). It also prints out +the domain structure in a visual format. diff -purN -X /home/mbligh/.diff.exclude reference/MAINTAINERS current/MAINTAINERS --- reference/MAINTAINERS 2004-04-07 14:53:52.000000000 -0700 +++ current/MAINTAINERS 2004-04-09 13:23:20.000000000 -0700 @@ -1186,6 +1186,12 @@ W: http://sf.net/projects/kernel-janitor W: http://developer.osdl.org/rddunlap/kj-patches/ S: Maintained +KGDB FOR I386 PLATFORM +P: George Anzinger +M: george@mvista.com +L: linux-net@vger.kernel.org +S: Supported + KERNEL NFSD P: Neil Brown M: neilb@cse.unsw.edu.au @@ -1194,6 +1200,15 @@ W: http://nfs.sourceforge.net/ W: http://www.cse.unsw.edu.au/~neilb/patches/linux-devel/ S: Maintained +KEXEC +P: Eric Biederman +M: ebiederm@xmission.com +M: ebiederman@lnxi.com +W: http://www.xmission.com/~ebiederm/files/kexec/ +L: linux-kernel@vger.kernel.org +L: fastboot@osdl.org +S: Maintained + LANMEDIA WAN CARD DRIVER P: Andrew Stanley-Jones M: asj@lanmedia.com diff -purN -X /home/mbligh/.diff.exclude reference/Makefile current/Makefile --- reference/Makefile 2004-04-07 14:53:52.000000000 -0700 +++ current/Makefile 2004-04-09 11:53:01.000000000 -0700 @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 5 -EXTRAVERSION = +EXTRAVERSION = -mjb1 NAME=Zonked Quokka # *DOCUMENTATION* @@ -457,6 +457,10 @@ ifndef CONFIG_FRAME_POINTER CFLAGS += -fomit-frame-pointer endif +ifeq ($(CONFIG_MCOUNT),y) +CFLAGS += -pg +endif + ifdef CONFIG_DEBUG_INFO CFLAGS += -g endif diff -purN -X /home/mbligh/.diff.exclude reference/arch/alpha/Kconfig current/arch/alpha/Kconfig --- reference/arch/alpha/Kconfig 2004-04-07 14:53:52.000000000 -0700 +++ current/arch/alpha/Kconfig 2004-04-09 21:46:02.000000000 -0700 @@ -519,6 +519,14 @@ config NUMA Access). This option is for configuring high-end multiprocessor server machines. If in doubt, say N. +config SCHED_NUMA + bool "Two level sched domains" + depends on NUMA + default y + help + Enable two level sched domains hierarchy. + Say Y if unsure. + # LARGE_VMALLOC is racy, if you *really* need it then fix it first config ALPHA_LARGE_VMALLOC bool diff -purN -X /home/mbligh/.diff.exclude reference/arch/arm/mm/fault-armv.c current/arch/arm/mm/fault-armv.c --- reference/arch/arm/mm/fault-armv.c 2003-10-01 11:47:31.000000000 -0700 +++ current/arch/arm/mm/fault-armv.c 2004-04-08 15:10:25.000000000 -0700 @@ -191,7 +191,7 @@ void __flush_dcache_page(struct page *pa __cpuc_flush_dcache_page(page_address(page)); - if (!page->mapping) + if (!page_mapping(page)) return; /* @@ -292,7 +292,7 @@ void update_mmu_cache(struct vm_area_str if (!pfn_valid(pfn)) return; page = pfn_to_page(pfn); - if (page->mapping) { + if (page_mapping(page)) { int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); if (dirty) diff -purN -X /home/mbligh/.diff.exclude reference/arch/arm/mm/mm-armv.c current/arch/arm/mm/mm-armv.c --- reference/arch/arm/mm/mm-armv.c 2004-03-11 14:33:33.000000000 -0800 +++ current/arch/arm/mm/mm-armv.c 2004-04-08 15:10:26.000000000 -0700 @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include @@ -232,7 +231,7 @@ void free_pgd_slow(pgd_t *pgd) pte = pmd_page(*pmd); pmd_clear(pmd); - pgtable_remove_rmap(pte); + dec_page_state(nr_page_table_pages); pte_free(pte); pmd_free(pmd); free: diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/Kconfig current/arch/i386/Kconfig --- reference/arch/i386/Kconfig 2004-04-07 14:53:56.000000000 -0700 +++ current/arch/i386/Kconfig 2004-04-09 21:46:02.000000000 -0700 @@ -418,6 +418,54 @@ config X86_OOSTORE depends on (MWINCHIP3D || MWINCHIP2 || MWINCHIPC6) && MTRR default y +config X86_4G + bool "4 GB kernel-space and 4 GB user-space virtual memory support" + help + This option is only useful for systems that have more than 1 GB + of RAM. + + The default kernel VM layout leaves 1 GB of virtual memory for + kernel-space mappings, and 3 GB of VM for user-space applications. + This option ups both the kernel-space VM and the user-space VM to + 4 GB. + + The cost of this option is additional TLB flushes done at + system-entry points that transition from user-mode into kernel-mode. + I.e. system calls and page faults, and IRQs that interrupt user-mode + code. There's also additional overhead to kernel operations that copy + memory to/from user-space. The overhead from this is hard to tell and + depends on the workload - it can be anything from no visible overhead + to 20-30% overhead. A good rule of thumb is to count with a runtime + overhead of 20%. + + The upside is the much increased kernel-space VM, which more than + quadruples the maximum amount of RAM supported. Kernels compiled with + this option boot on 64GB of RAM and still have more than 3.1 GB of + 'lowmem' left. Another bonus is that highmem IO bouncing decreases, + if used with drivers that still use bounce-buffers. + + There's also a 33% increase in user-space VM size - database + applications might see a boost from this. + + But the cost of the TLB flushes and the runtime overhead has to be + weighed against the bonuses offered by the larger VM spaces. The + dividing line depends on the actual workload - there might be 4 GB + systems that benefit from this option. Systems with less than 4 GB + of RAM will rarely see a benefit from this option - but it's not + out of question, the exact circumstances have to be considered. + +config X86_SWITCH_PAGETABLES + def_bool X86_4G + +config X86_4G_VM_LAYOUT + def_bool X86_4G + +config X86_UACCESS_INDIRECT + def_bool X86_4G + +config X86_HIGH_ENTRY + def_bool X86_4G + config HPET_TIMER bool "HPET Timer Support" help @@ -432,6 +480,10 @@ config HPET_TIMER config HPET_EMULATE_RTC def_bool HPET_TIMER && RTC=y +config VSYSCALL_GTOD + depends on EXPERIMENTAL + bool "VSYSCALL gettimeofday() interface" + config SMP bool "Symmetric multi-processing support" ---help--- @@ -475,17 +527,27 @@ config NR_CPUS This is purely to save memory - each supported CPU adds approximately eight kilobytes to the kernel image. -config PREEMPT - bool "Preemptible Kernel" +config SCHED_SMT + bool "SMT (Hyperthreading) scheduler support" + depends on SMP + default off help - This option reduces the latency of the kernel when reacting to - real-time or interactive events by allowing a low priority process to - be preempted even if it is in kernel mode executing a system call. - This allows applications to run more reliably even when the system is - under load. - - Say Y here if you are building a kernel for a desktop, embedded - or real-time system. Say N if you are unsure. + SMT scheduler support improves the CPU scheduler's decision making + when dealing with Intel Pentium 4 chips with HyperThreading at a + cost of slightly increased overhead in some places. If unsure say + N here. + +# config PREEMPT +# bool "Preemptible Kernel" +# help +# This option reduces the latency of the kernel when reacting to +# real-time or interactive events by allowing a low priority process to +# be preempted even if it is in kernel mode executing a system call. +# This allows applications to run more reliably even when the system is +# under load. +# +# Say Y here if you are building a kernel for a desktop, embedded +# or real-time system. Say N if you are unsure. config X86_UP_APIC bool "Local APIC support on uniprocessors" if !SMP @@ -705,11 +767,19 @@ config X86_PAE # Common NUMA Features config NUMA - bool "Numa Memory Allocation Support" + bool "Numa Memory Allocation and Scheduler Support" depends on SMP && HIGHMEM64G && (X86_NUMAQ || X86_GENERICARCH || (X86_SUMMIT && ACPI)) default n if X86_PC default y if (X86_NUMAQ || X86_SUMMIT) +config SCHED_NUMA + bool "Two level sched domains" + depends on NUMA + default y + help + Enable two level sched domains hierarchy. + Say Y if unsure. + # Need comments to help the hapless user trying to turn on NUMA support comment "NUMA (NUMA-Q) requires SMP, 64GB highmem support" depends on X86_NUMAQ && (!HIGHMEM64G || !SMP) @@ -822,6 +892,25 @@ config IRQBALANCE The default yes will allow the kernel to do irq load balancing. Saying no will keep the kernel from doing irq load balancing. +choice + help + This is unrelated to your processor's speed. This variable alters + how often the system is asked to generate timer interrupts. A larger + value can lead to a more responsive system, but also causes extra + overhead from the increased number of context switches. + + If in doubt, leave it at the default of 1000. + + prompt "Kernel HZ" + default 1000HZ + +config 100HZ + bool "100 Hz" + +config 1000HZ + bool "1000 Hz" +endchoice + config HAVE_DEC_LOCK bool depends on (SMP || PREEMPT) && X86_CMPXCHG @@ -847,6 +936,23 @@ config REGPARM generate incorrect output with certain kernel constructs when -mregparm=3 is used. +config KEXEC + bool "kexec system call (EXPERIMENTAL)" + depends on EXPERIMENTAL + help + kexec is a system call that implements the ability to shutdown your + current kernel, and to start another kernel. It is like a reboot + but it is indepedent of the system firmware. And like a reboot + you can start any kernel with it, not just Linux. + + The name comes from the similiarity to the exec system call. + + It is an ongoing process to be certain the hardware in a machine + is properly shutdown, so do not be surprised if this code does not + initially work for you. It may help to enable device hotplugging + support. As of this writing the exact hardware interface is + strongly in flux, so no good recommendation can be made. + endmenu @@ -1260,6 +1366,15 @@ config DEBUG_PAGEALLOC This results in a large slowdown, but helps to find certain types of memory corruptions. +config SPINLINE + bool "Spinlock inlining" + depends on DEBUG_KERNEL + help + This will change spinlocks from out of line to inline, making them + account cost to the callers in readprofile, rather than the lock + itself (as ".text.lock.filename"). This can be helpful for finding + the callers of locks. + config DEBUG_HIGHMEM bool "Highmem debugging" depends on DEBUG_KERNEL && HIGHMEM @@ -1276,20 +1391,233 @@ config DEBUG_INFO Say Y here only if you plan to use gdb to debug the kernel. If you don't debug the kernel, you can say N. +config LOCKMETER + bool "Kernel lock metering" + depends on SMP + help + Say Y to enable kernel lock metering, which adds overhead to SMP locks, + but allows you to see various statistics using the lockstat command. + config DEBUG_SPINLOCK_SLEEP bool "Sleep-inside-spinlock checking" help If you say Y here, various routines which may sleep will become very noisy if they are called with a spinlock held. +config KGDB + bool "Include kgdb kernel debugger" + depends on DEBUG_KERNEL + help + If you say Y here, the system will be compiled with the debug + option (-g) and a debugging stub will be included in the + kernel. This stub communicates with gdb on another (host) + computer via a serial port. The host computer should have + access to the kernel binary file (vmlinux) and a serial port + that is connected to the target machine. Gdb can be made to + configure the serial port or you can use stty and setserial to + do this. See the 'target' command in gdb. This option also + configures in the ability to request a breakpoint early in the + boot process. To request the breakpoint just include 'kgdb' + as a boot option when booting the target machine. The system + will then break as soon as it looks at the boot options. This + option also installs a breakpoint in panic and sends any + kernel faults to the debugger. For more information see the + Documentation/i386/kgdb/kgdb.txt file. + +choice + depends on KGDB + prompt "Debug serial port BAUD" + default KGDB_115200BAUD + help + Gdb and the kernel stub need to agree on the baud rate to be + used. Some systems (x86 family at this writing) allow this to + be configured. + +config KGDB_9600BAUD + bool "9600" + +config KGDB_19200BAUD + bool "19200" + +config KGDB_38400BAUD + bool "38400" + +config KGDB_57600BAUD + bool "57600" + +config KGDB_115200BAUD + bool "115200" +endchoice + +config KGDB_PORT + hex "hex I/O port address of the debug serial port" + depends on KGDB + default 3f8 + help + Some systems (x86 family at this writing) allow the port + address to be configured. The number entered is assumed to be + hex, don't put 0x in front of it. The standard address are: + COM1 3f8 , irq 4 and COM2 2f8 irq 3. Setserial /dev/ttySx + will tell you what you have. It is good to test the serial + connection with a live system before trying to debug. + +config KGDB_IRQ + int "IRQ of the debug serial port" + depends on KGDB + default 4 + help + This is the irq for the debug port. If everything is working + correctly and the kernel has interrupts on a control C to the + port should cause a break into the kernel debug stub. + +config DEBUG_INFO + bool + depends on KGDB + default y + +config KGDB_MORE + bool "Add any additional compile options" + depends on KGDB + default n + help + Saying yes here turns on the ability to enter additional + compile options. + + +config KGDB_OPTIONS + depends on KGDB_MORE + string "Additional compile arguments" + default "-O1" + help + This option allows you enter additional compile options for + the whole kernel compile. Each platform will have a default + that seems right for it. For example on PPC "-ggdb -O1", and + for i386 "-O1". Note that by configuring KGDB "-g" is already + turned on. In addition, on i386 platforms + "-fomit-frame-pointer" is deleted from the standard compile + options. + +config NO_KGDB_CPUS + int "Number of CPUs" + depends on KGDB && SMP + default NR_CPUS + help + + This option sets the number of cpus for kgdb ONLY. It is used + to prune some internal structures so they look "nice" when + displayed with gdb. This is to overcome possibly larger + numbers that may have been entered above. Enter the real + number to get nice clean kgdb_info displays. + +config KGDB_TS + bool "Enable kgdb time stamp macros?" + depends on KGDB + default n + help + Kgdb event macros allow you to instrument your code with calls + to the kgdb event recording function. The event log may be + examined with gdb at a break point. Turning on this + capability also allows you to choose how many events to + keep. Kgdb always keeps the lastest events. + +choice + depends on KGDB_TS + prompt "Max number of time stamps to save?" + default KGDB_TS_128 + +config KGDB_TS_64 + bool "64" + +config KGDB_TS_128 + bool "128" + +config KGDB_TS_256 + bool "256" + +config KGDB_TS_512 + bool "512" + +config KGDB_TS_1024 + bool "1024" + +endchoice + +config STACK_OVERFLOW_TEST + bool "Turn on kernel stack overflow testing?" + depends on KGDB + default n + help + This option enables code in the front line interrupt handlers + to check for kernel stack overflow on interrupts and system + calls. This is part of the kgdb code on x86 systems. + +config KGDB_CONSOLE + bool "Enable serial console thru kgdb port" + depends on KGDB + default n + help + This option enables the command line "console=kgdb" option. + When the system is booted with this option in the command line + all kernel printk output is sent to gdb (as well as to other + consoles). For this to work gdb must be connected. For this + reason, this command line option will generate a breakpoint if + gdb has not yet connected. After the gdb continue command is + given all pent up console output will be printed by gdb on the + host machine. Neither this option, nor KGDB require the + serial driver to be configured. + +config KGDB_SYSRQ + bool "Turn on SysRq 'G' command to do a break?" + depends on KGDB + default y + help + This option includes an option in the SysRq code that allows + you to enter SysRq G which generates a breakpoint to the KGDB + stub. This will work if the keyboard is alive and can + interrupt the system. Because of constraints on when the + serial port interrupt can be enabled, this code may allow you + to interrupt the system before the serial port control C is + available. Just say yes here. + +config MMAP_TOPDOWN + bool "Top-down vma allocation" + help + Say Y here to have the kernel change its vma allocation policy + to allocate vma's from the top of the address space down, and + to shove the stack low so as to conserve virtualspace. This is + risky because various apps, including a number of versions of + ld.so, depend on the kernel's bottom-up behavior. + config FRAME_POINTER bool "Compile the kernel with frame pointers" + default KGDB help If you say Y here the resulting kernel image will be slightly larger and slower, but it will give very useful debugging information. If you don't debug the kernel, you can say N, but we may not be able to solve problems without frame pointers. +config MAGIC_SYSRQ + bool + depends on KGDB_SYSRQ + default y + +config 4KSTACKS + def_bool y + +config SCHEDSTATS + bool "Collect scheduler statistics" + depends on PROC_FS + default y + help + If you say Y here, additional code will be inserted into the + scheduler and related routines to collect statistics about + scheduler behavior and provide them in /proc/schedstat. These + stats may be useful for both tuning and debugging the scheduler + If you aren't debugging the scheduler or trying to tune a specific + application, you can say N to avoid the very slight overhead + this adds. + config X86_FIND_SMP_CONFIG bool depends on X86_LOCAL_APIC || X86_VOYAGER @@ -1300,6 +1628,14 @@ config X86_MPPARSE depends on X86_LOCAL_APIC && !X86_VISWS default y +config MCOUNT + bool "Generate function call graph" + depends on FRAME_POINTER + help + This option instruments the kernel to generate a deterministic + function call graph. Answering Y here will make your kernel run + ???% slower. + endmenu source "security/Kconfig" diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/Makefile current/arch/i386/Makefile --- reference/arch/i386/Makefile 2004-03-11 14:33:34.000000000 -0800 +++ current/arch/i386/Makefile 2004-04-08 15:10:23.000000000 -0700 @@ -56,9 +56,9 @@ cflags-$(CONFIG_X86_ELAN) += -march=i486 GCC_VERSION := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CC)) cflags-$(CONFIG_REGPARM) += $(shell if [ $(GCC_VERSION) -ge 0300 ] ; then echo "-mregparm=3"; fi ;) -# Enable unit-at-a-time mode when possible. It shrinks the -# kernel considerably. -CFLAGS += $(call check_gcc,-funit-at-a-time,) +# Disable unit-at-a-time mode, it makes gcc use a lot more stack +# due to the lack of sharing of stacklots. +CFLAGS += $(call check_gcc,-fno-unit-at-a-time,) CFLAGS += $(cflags-y) @@ -97,6 +97,9 @@ mcore-$(CONFIG_X86_ES7000) := mach-es700 # default subarch .h files mflags-y += -Iinclude/asm-i386/mach-default +mflags-$(CONFIG_KGDB) += -gdwarf-2 +mflags-$(CONFIG_KGDB_MORE) += $(shell echo $(CONFIG_KGDB_OPTIONS) | sed -e 's/"//g') + head-y := arch/i386/kernel/head.o arch/i386/kernel/init_task.o libs-y += arch/i386/lib/ diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/boot/compressed/Makefile current/arch/i386/boot/compressed/Makefile --- reference/arch/i386/boot/compressed/Makefile 2003-03-20 11:25:38.000000000 -0800 +++ current/arch/i386/boot/compressed/Makefile 2004-04-09 11:53:01.000000000 -0700 @@ -9,6 +9,17 @@ EXTRA_AFLAGS := -traditional LDFLAGS_vmlinux := -Ttext $(IMAGE_OFFSET) -e startup_32 +ifeq ($(CONFIG_MCOUNT),y) +quiet_cmd_nopg = CC $@ + cmd_nopg = $(CC) $(subst -pg,,$(CFLAGS)) -c $(src)/$(*F).c -o $@ + +$(obj)/misc.o: alwayscc + $(call cmd,nopg) + +alwayscc: + $(Q)rm -f $(obj)/misc.o +endif + $(obj)/vmlinux: $(obj)/head.o $(obj)/misc.o $(obj)/piggy.o FORCE $(call if_changed,ld) @: diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/boot/setup.S current/arch/i386/boot/setup.S --- reference/arch/i386/boot/setup.S 2004-04-07 14:53:56.000000000 -0700 +++ current/arch/i386/boot/setup.S 2004-04-09 11:52:59.000000000 -0700 @@ -156,7 +156,7 @@ cmd_line_ptr: .long 0 # (Header versio # can be located anywhere in # low memory 0x10000 or higher. -ramdisk_max: .long MAXMEM-1 # (Header version 0x0203 or later) +ramdisk_max: .long __MAXMEM-1 # (Header version 0x0203 or later) # The highest safe address for # the contents of an initrd diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/defconfig current/arch/i386/defconfig --- reference/arch/i386/defconfig 2004-04-07 14:53:56.000000000 -0700 +++ current/arch/i386/defconfig 2004-04-09 13:23:20.000000000 -0700 @@ -89,6 +89,7 @@ CONFIG_X86_USE_PPRO_CHECKSUM=y CONFIG_SMP=y CONFIG_NR_CPUS=8 CONFIG_PREEMPT=y +CONFIG_KEXEC=y CONFIG_X86_LOCAL_APIC=y CONFIG_X86_IO_APIC=y CONFIG_X86_TSC=y diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/Makefile current/arch/i386/kernel/Makefile --- reference/arch/i386/kernel/Makefile 2004-04-07 14:53:56.000000000 -0700 +++ current/arch/i386/kernel/Makefile 2004-04-09 21:41:40.000000000 -0700 @@ -7,13 +7,14 @@ extra-y := head.o init_task.o vmlinux.ld obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o vm86.o \ ptrace.o i8259.o ioport.o ldt.o setup.o time.o sys_i386.o \ pci-dma.o i386_ksyms.o i387.o dmi_scan.o bootflag.o \ - doublefault.o + doublefault.o entry_trampoline.o obj-y += cpu/ obj-y += timers/ obj-$(CONFIG_ACPI_BOOT) += acpi/ obj-$(CONFIG_X86_BIOS_REBOOT) += reboot.o obj-$(CONFIG_MCA) += mca.o +obj-$(CONFIG_KGDB) += kgdb_stub.o obj-$(CONFIG_X86_MSR) += msr.o obj-$(CONFIG_X86_CPUID) += cpuid.o obj-$(CONFIG_MICROCODE) += microcode.o @@ -23,6 +24,7 @@ obj-$(CONFIG_X86_TRAMPOLINE) += trampoli obj-$(CONFIG_X86_MPPARSE) += mpparse.o obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o obj-$(CONFIG_X86_IO_APIC) += io_apic.o +obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o obj-$(CONFIG_X86_NUMAQ) += numaq.o obj-$(CONFIG_X86_SUMMIT_NUMA) += summit.o obj-$(CONFIG_MODULES) += module.o @@ -31,6 +33,7 @@ obj-$(CONFIG_ACPI_SRAT) += srat.o obj-$(CONFIG_HPET_TIMER) += time_hpet.o obj-$(CONFIG_EFI) += efi.o efi_stub.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o +obj-$(CONFIG_VSYSCALL_GTOD) += vsyscall-gtod.o EXTRA_AFLAGS := -traditional diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/acpi/boot.c current/arch/i386/kernel/acpi/boot.c --- reference/arch/i386/kernel/acpi/boot.c 2004-04-07 14:53:56.000000000 -0700 +++ current/arch/i386/kernel/acpi/boot.c 2004-04-09 11:52:59.000000000 -0700 @@ -438,7 +438,7 @@ acpi_scan_rsdp ( * RSDP signature. */ for (offset = 0; offset < length; offset += 16) { - if (strncmp((char *) (start + offset), "RSD PTR ", sig_len)) + if (strncmp((char *) __va(start + offset), "RSD PTR ", sig_len)) continue; return (start + offset); } diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/acpi/sleep.c current/arch/i386/kernel/acpi/sleep.c --- reference/arch/i386/kernel/acpi/sleep.c 2003-10-01 11:34:28.000000000 -0700 +++ current/arch/i386/kernel/acpi/sleep.c 2004-04-09 11:52:59.000000000 -0700 @@ -19,13 +19,29 @@ extern void zap_low_mappings(void); extern unsigned long FASTCALL(acpi_copy_wakeup_routine(unsigned long)); -static void init_low_mapping(pgd_t *pgd, int pgd_limit) +static void map_low(pgd_t *pgd_base, unsigned long start, unsigned long end) { - int pgd_ofs = 0; - - while ((pgd_ofs < pgd_limit) && (pgd_ofs + USER_PTRS_PER_PGD < PTRS_PER_PGD)) { - set_pgd(pgd, *(pgd+USER_PTRS_PER_PGD)); - pgd_ofs++, pgd++; + unsigned long vaddr; + pmd_t *pmd; + pgd_t *pgd; + int i, j; + + pgd = pgd_base; + + for (i = 0; i < PTRS_PER_PGD; pgd++, i++) { + vaddr = i*PGDIR_SIZE; + if (end && (vaddr >= end)) + break; + pmd = pmd_offset(pgd, 0); + for (j = 0; j < PTRS_PER_PMD; pmd++, j++) { + vaddr = i*PGDIR_SIZE + j*PMD_SIZE; + if (end && (vaddr >= end)) + break; + if (vaddr < start) + continue; + set_pmd(pmd, __pmd(_KERNPG_TABLE + _PAGE_PSE + + vaddr - start)); + } } } @@ -39,7 +55,9 @@ int acpi_save_state_mem (void) { if (!acpi_wakeup_address) return 1; - init_low_mapping(swapper_pg_dir, USER_PTRS_PER_PGD); + if (!cpu_has_pse) + return 1; + map_low(swapper_pg_dir, 0, LOW_MAPPINGS_SIZE); memcpy((void *) acpi_wakeup_address, &wakeup_start, &wakeup_end - &wakeup_start); acpi_copy_wakeup_routine(acpi_wakeup_address); diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/apic.c current/arch/i386/kernel/apic.c --- reference/arch/i386/kernel/apic.c 2004-03-11 14:33:35.000000000 -0800 +++ current/arch/i386/kernel/apic.c 2004-04-09 13:23:20.000000000 -0700 @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -183,6 +184,39 @@ void disconnect_bsp_APIC(void) outb(0x70, 0x22); outb(0x00, 0x23); } +#ifdef CONFIG_KEXEC + else { + /* Go back to Virtual Wire compatibility mode */ + unsigned long value; + + /* For the spurious interrupt use vector F, and enable it */ + value = apic_read(APIC_SPIV); + value &= ~APIC_VECTOR_MASK; + value |= APIC_SPIV_APIC_ENABLED; + value |= 0xf; + apic_write_around(APIC_SPIV, value); + + /* For LVT0 make it edge triggered, active high, external and enabled */ + value = apic_read(APIC_LVT0); + value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING | + APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR | + APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED ); + value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING; + value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXINT); + apic_write_around(APIC_LVT0, value); + + /* For LVT1 make it edge triggered, active high, nmi and enabled */ + value = apic_read(APIC_LVT1); + value &= ~( + APIC_MODE_MASK | APIC_SEND_PENDING | + APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR | + APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED); + value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING; + value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI); + apic_write_around(APIC_LVT1, value); + } +#endif /* CONFIG_KEXEC */ + } void disable_local_APIC(void) @@ -1024,7 +1058,7 @@ int setup_profiling_timer(unsigned int m * multiplier is 1 and it can be changed by writing the new multiplier * value into /proc/profile. */ - +extern void calc_load_cpu(int cpu); inline void smp_local_timer_interrupt(struct pt_regs * regs) { int cpu = smp_processor_id(); @@ -1052,6 +1086,7 @@ inline void smp_local_timer_interrupt(st #ifdef CONFIG_SMP update_process_times(user_mode(regs)); + calc_load_cpu(cpu); #endif } @@ -1154,6 +1189,28 @@ asmlinkage void smp_error_interrupt(void irq_exit(); } +void stop_apics(void) +{ + /* By resetting the APIC's we disable the nmi watchdog */ +#if CONFIG_SMP + /* + * Stop all CPUs and turn off local APICs and the IO-APIC, so + * other OSs see a clean IRQ state. + */ + smp_send_stop(); +#else + if (cpu_has_apic) { + local_irq_disable(); + disable_local_APIC(); + local_irq_enable(); + } +#endif +#if defined(CONFIG_X86_IO_APIC) + disable_IO_APIC(); +#endif + disconnect_bsp_APIC(); +} + /* * This initializes the IO-APIC and APIC hardware if this is * a UP kernel. diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/asm-offsets.c current/arch/i386/kernel/asm-offsets.c --- reference/arch/i386/kernel/asm-offsets.c 2004-04-07 14:53:56.000000000 -0700 +++ current/arch/i386/kernel/asm-offsets.c 2004-04-09 11:53:00.000000000 -0700 @@ -31,5 +31,19 @@ void foo(void) DEFINE(RT_SIGFRAME_sigcontext, offsetof (struct rt_sigframe, uc.uc_mcontext)); + DEFINE(TI_task, offsetof (struct thread_info, task)); + DEFINE(TI_exec_domain, offsetof (struct thread_info, exec_domain)); + DEFINE(TI_flags, offsetof (struct thread_info, flags)); + DEFINE(TI_preempt_count, offsetof (struct thread_info, preempt_count)); + DEFINE(TI_addr_limit, offsetof (struct thread_info, addr_limit)); + DEFINE(TI_real_stack, offsetof (struct thread_info, real_stack)); + DEFINE(TI_virtual_stack, offsetof (struct thread_info, virtual_stack)); + DEFINE(TI_user_pgd, offsetof (struct thread_info, user_pgd)); + + DEFINE(FIX_ENTRY_TRAMPOLINE_0_addr, + __fix_to_virt(FIX_ENTRY_TRAMPOLINE_0)); + DEFINE(FIX_VSYSCALL_addr, __fix_to_virt(FIX_VSYSCALL)); DEFINE(PAGE_SIZE_asm, PAGE_SIZE); + DEFINE(task_thread_db7, + offsetof (struct task_struct, thread.debugreg[7])); } diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/cpu/common.c current/arch/i386/kernel/cpu/common.c --- reference/arch/i386/kernel/cpu/common.c 2004-01-15 10:41:00.000000000 -0800 +++ current/arch/i386/kernel/cpu/common.c 2004-04-09 11:53:00.000000000 -0700 @@ -514,12 +514,16 @@ void __init cpu_init (void) set_tss_desc(cpu,t); cpu_gdt_table[cpu][GDT_ENTRY_TSS].b &= 0xfffffdff; load_TR_desc(); - load_LDT(&init_mm.context); + if (cpu) + load_LDT(&init_mm.context); /* Set up doublefault TSS pointer in the GDT */ __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); cpu_gdt_table[cpu][GDT_ENTRY_DOUBLEFAULT_TSS].b &= 0xfffffdff; + if (cpu) + trap_init_virtual_GDT(); + /* Clear %fs and %gs. */ asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs"); diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c current/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c --- reference/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c 2004-02-18 14:56:46.000000000 -0800 +++ current/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c 2004-04-08 15:10:22.000000000 -0700 @@ -57,8 +57,7 @@ static int cpufreq_p4_setdc(unsigned int u32 l, h; cpumask_t cpus_allowed, affected_cpu_map; struct cpufreq_freqs freqs; - int hyperthreading = 0; - int sibling = 0; + int j; if (!cpu_online(cpu) || (newstate > DC_DISABLE) || (newstate == DC_RESV)) @@ -68,13 +67,10 @@ static int cpufreq_p4_setdc(unsigned int cpus_allowed = current->cpus_allowed; /* only run on CPU to be set, or on its sibling */ - affected_cpu_map = cpumask_of_cpu(cpu); -#ifdef CONFIG_X86_HT - hyperthreading = ((cpu_has_ht) && (smp_num_siblings == 2)); - if (hyperthreading) { - sibling = cpu_sibling_map[cpu]; - cpu_set(sibling, affected_cpu_map); - } +#ifdef CONFIG_SMP + affected_cpu_map = cpu_sibling_map[cpu]; +#else + affected_cpu_map = cpumask_of_cpu(cpu); #endif set_cpus_allowed(current, affected_cpu_map); BUG_ON(!cpu_isset(smp_processor_id(), affected_cpu_map)); @@ -97,11 +93,11 @@ static int cpufreq_p4_setdc(unsigned int /* notifiers */ freqs.old = stock_freq * l / 8; freqs.new = stock_freq * newstate / 8; - freqs.cpu = cpu; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - if (hyperthreading) { - freqs.cpu = sibling; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + for_each_cpu(j) { + if (cpu_isset(j, affected_cpu_map)) { + freqs.cpu = j; + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + } } rdmsr(MSR_IA32_THERM_STATUS, l, h); @@ -132,10 +128,11 @@ static int cpufreq_p4_setdc(unsigned int set_cpus_allowed(current, cpus_allowed); /* notifiers */ - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - if (hyperthreading) { - freqs.cpu = cpu; - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + for_each_cpu(j) { + if (cpu_isset(j, affected_cpu_map)) { + freqs.cpu = j; + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + } } return 0; diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/cpu/intel.c current/arch/i386/kernel/cpu/intel.c --- reference/arch/i386/kernel/cpu/intel.c 2004-03-11 14:33:36.000000000 -0800 +++ current/arch/i386/kernel/cpu/intel.c 2004-04-09 11:53:00.000000000 -0700 @@ -10,6 +10,7 @@ #include #include #include +#include #include "cpu.h" @@ -19,8 +20,6 @@ #include #endif -extern int trap_init_f00f_bug(void); - #ifdef CONFIG_X86_INTEL_USERCOPY /* * Alignment at which movsl is preferred for bulk memory copies. @@ -165,7 +164,7 @@ static void __init init_intel(struct cpu c->f00f_bug = 1; if ( !f00f_workaround_enabled ) { - trap_init_f00f_bug(); + trap_init_virtual_IDT(); printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n"); f00f_workaround_enabled = 1; } @@ -250,6 +249,12 @@ static void __init init_intel(struct cpu /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */ if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) clear_bit(X86_FEATURE_SEP, c->x86_capability); + /* + * FIXME: SEP is disabled for 4G/4G for now: + */ +#ifdef CONFIG_X86_HIGH_ENTRY + clear_bit(X86_FEATURE_SEP, c->x86_capability); +#endif /* Names for the Pentium II/Celeron processors detectable only by also checking the cache size. diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/dmi_scan.c current/arch/i386/kernel/dmi_scan.c --- reference/arch/i386/kernel/dmi_scan.c 2004-04-07 14:53:56.000000000 -0700 +++ current/arch/i386/kernel/dmi_scan.c 2004-04-09 13:23:20.000000000 -0700 @@ -216,31 +216,6 @@ static __init int set_bios_reboot(struct return 0; } -/* - * Some machines require the "reboot=s" commandline option, this quirk makes that automatic. - */ -static __init int set_smp_reboot(struct dmi_blacklist *d) -{ -#ifdef CONFIG_SMP - extern int reboot_smp; - if (reboot_smp == 0) - { - reboot_smp = 1; - printk(KERN_INFO "%s series board detected. Selecting SMP-method for reboots.\n", d->ident); - } -#endif - return 0; -} - -/* - * Some machines require the "reboot=b,s" commandline option, this quirk makes that automatic. - */ -static __init int set_smp_bios_reboot(struct dmi_blacklist *d) -{ - set_smp_reboot(d); - set_bios_reboot(d); - return 0; -} /* * Some bioses have a broken protected mode poweroff and need to use realmode @@ -616,7 +591,7 @@ static __initdata struct dmi_blacklist d MATCH(DMI_BIOS_VERSION, "4.60 PGMA"), MATCH(DMI_BIOS_DATE, "134526184"), NO_MATCH } }, - { set_smp_bios_reboot, "Dell PowerEdge 1300", { /* Handle problems with rebooting on Dell 1300's */ + { set_bios_reboot, "Dell PowerEdge 1300", { /* Handle problems with rebooting on Dell 1300's */ MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), MATCH(DMI_PRODUCT_NAME, "PowerEdge 1300/"), NO_MATCH, NO_MATCH diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/doublefault.c current/arch/i386/kernel/doublefault.c --- reference/arch/i386/kernel/doublefault.c 2003-10-01 11:40:40.000000000 -0700 +++ current/arch/i386/kernel/doublefault.c 2004-04-09 11:53:00.000000000 -0700 @@ -7,12 +7,13 @@ #include #include #include +#include #define DOUBLEFAULT_STACKSIZE (1024) static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE]; #define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE) -#define ptr_ok(x) ((x) > 0xc0000000 && (x) < 0xc1000000) +#define ptr_ok(x) (((x) > __PAGE_OFFSET && (x) < (__PAGE_OFFSET + 0x01000000)) || ((x) >= FIXADDR_START)) static void doublefault_fn(void) { @@ -38,8 +39,8 @@ static void doublefault_fn(void) printk("eax = %08lx, ebx = %08lx, ecx = %08lx, edx = %08lx\n", t->eax, t->ebx, t->ecx, t->edx); - printk("esi = %08lx, edi = %08lx\n", - t->esi, t->edi); + printk("esi = %08lx, edi = %08lx, ebp = %08lx\n", + t->esi, t->edi, t->ebp); } } diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/entry.S current/arch/i386/kernel/entry.S --- reference/arch/i386/kernel/entry.S 2004-03-11 14:33:36.000000000 -0800 +++ current/arch/i386/kernel/entry.S 2004-04-09 13:23:20.000000000 -0700 @@ -43,11 +43,25 @@ #include #include #include +#include #include #include +#include #include #include #include "irq_vectors.h" + /* We do not recover from a stack overflow, but at least + * we know it happened and should be able to track it down. + */ +#ifdef CONFIG_STACK_OVERFLOW_TEST +#define STACK_OVERFLOW_TEST \ + testl $7680,%esp; \ + jnz 10f; \ + call stack_overflow; \ +10: +#else +#define STACK_OVERFLOW_TEST +#endif #define nr_syscalls ((syscall_table_size)/4) @@ -87,7 +101,102 @@ TSS_ESP0_OFFSET = (4 - 0x200) #define resume_kernel restore_all #endif -#define SAVE_ALL \ +#ifdef CONFIG_X86_HIGH_ENTRY + +#ifdef CONFIG_X86_SWITCH_PAGETABLES + +#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) +/* + * If task is preempted in __SWITCH_KERNELSPACE, and moved to another cpu, + * __switch_to repoints %esp to the appropriate virtual stack; but %ebp is + * left stale, so we must check whether to repeat the real stack calculation. + */ +#define repeat_if_esp_changed \ + xorl %esp, %ebp; \ + testl $-THREAD_SIZE, %ebp; \ + jnz 0b +#else +#define repeat_if_esp_changed +#endif + +/* clobbers ebx, edx and ebp */ + +#define __SWITCH_KERNELSPACE \ + cmpl $0xff000000, %esp; \ + jb 1f; \ + \ + /* \ + * switch pagetables and load the real stack, \ + * keep the stack offset: \ + */ \ + \ + movl $swapper_pg_dir-__PAGE_OFFSET, %edx; \ + \ + /* GET_THREAD_INFO(%ebp) intermixed */ \ +0: \ + movl %esp, %ebp; \ + movl %esp, %ebx; \ + andl $(-THREAD_SIZE), %ebp; \ + andl $(THREAD_SIZE-1), %ebx; \ + orl TI_real_stack(%ebp), %ebx; \ + repeat_if_esp_changed; \ + \ + movl %edx, %cr3; \ + movl %ebx, %esp; \ +1: + +#endif + + +#define __SWITCH_USERSPACE \ + /* interrupted any of the user return paths? */ \ + \ + movl EIP(%esp), %eax; \ + \ + cmpl $int80_ret_start_marker, %eax; \ + jb 33f; /* nope - continue with sysexit check */\ + cmpl $int80_ret_end_marker, %eax; \ + jb 22f; /* yes - switch to virtual stack */ \ +33: \ + cmpl $sysexit_ret_start_marker, %eax; \ + jb 44f; /* nope - continue with user check */ \ + cmpl $sysexit_ret_end_marker, %eax; \ + jb 22f; /* yes - switch to virtual stack */ \ + /* return to userspace? */ \ +44: \ + movl EFLAGS(%esp),%ecx; \ + movb CS(%esp),%cl; \ + testl $(VM_MASK | 3),%ecx; \ + jz 2f; \ +22: \ + /* \ + * switch to the virtual stack, then switch to \ + * the userspace pagetables. \ + */ \ + \ + GET_THREAD_INFO(%ebp); \ + movl TI_virtual_stack(%ebp), %edx; \ + movl TI_user_pgd(%ebp), %ecx; \ + \ + movl %esp, %ebx; \ + andl $(THREAD_SIZE-1), %ebx; \ + orl %ebx, %edx; \ +int80_ret_start_marker: \ + movl %edx, %esp; \ + movl %ecx, %cr3; \ + \ + __RESTORE_ALL; \ +int80_ret_end_marker: \ +2: + +#else /* !CONFIG_X86_HIGH_ENTRY */ + +#define __SWITCH_KERNELSPACE +#define __SWITCH_USERSPACE + +#endif + +#define __SAVE_ALL \ cld; \ pushl %es; \ pushl %ds; \ @@ -102,7 +211,7 @@ TSS_ESP0_OFFSET = (4 - 0x200) movl %edx, %ds; \ movl %edx, %es; -#define RESTORE_INT_REGS \ +#define __RESTORE_INT_REGS \ popl %ebx; \ popl %ecx; \ popl %edx; \ @@ -111,29 +220,28 @@ TSS_ESP0_OFFSET = (4 - 0x200) popl %ebp; \ popl %eax -#define RESTORE_REGS \ - RESTORE_INT_REGS; \ -1: popl %ds; \ -2: popl %es; \ +#define __RESTORE_REGS \ + __RESTORE_INT_REGS; \ +111: popl %ds; \ +222: popl %es; \ .section .fixup,"ax"; \ -3: movl $0,(%esp); \ - jmp 1b; \ -4: movl $0,(%esp); \ - jmp 2b; \ +444: movl $0,(%esp); \ + jmp 111b; \ +555: movl $0,(%esp); \ + jmp 222b; \ .previous; \ .section __ex_table,"a";\ .align 4; \ - .long 1b,3b; \ - .long 2b,4b; \ + .long 111b,444b;\ + .long 222b,555b;\ .previous - -#define RESTORE_ALL \ - RESTORE_REGS \ +#define __RESTORE_ALL \ + __RESTORE_REGS \ addl $4, %esp; \ -1: iret; \ +333: iret; \ .section .fixup,"ax"; \ -2: sti; \ +666: sti; \ movl $(__USER_DS), %edx; \ movl %edx, %ds; \ movl %edx, %es; \ @@ -142,10 +250,19 @@ TSS_ESP0_OFFSET = (4 - 0x200) .previous; \ .section __ex_table,"a";\ .align 4; \ - .long 1b,2b; \ + .long 333b,666b;\ .previous +#define SAVE_ALL \ + __SAVE_ALL; \ + __SWITCH_KERNELSPACE; \ + STACK_OVERFLOW_TEST; + +#define RESTORE_ALL \ + __SWITCH_USERSPACE; \ + __RESTORE_ALL; +.section .entry.text,"ax" ENTRY(lcall7) pushfl # We get a different stack layout with call @@ -163,7 +280,7 @@ do_lcall: movl %edx,EIP(%ebp) # Now we move them to their "normal" places movl %ecx,CS(%ebp) # GET_THREAD_INFO_WITH_ESP(%ebp) # GET_THREAD_INFO - movl TI_EXEC_DOMAIN(%ebp), %edx # Get the execution domain + movl TI_exec_domain(%ebp), %edx # Get the execution domain call *4(%edx) # Call the lcall7 handler for the domain addl $4, %esp popl %eax @@ -208,7 +325,7 @@ ENTRY(resume_userspace) cli # make sure we don't miss an interrupt # setting need_resched or sigpending # between sampling and the iret - movl TI_FLAGS(%ebp), %ecx + movl TI_flags(%ebp), %ecx andl $_TIF_WORK_MASK, %ecx # is there any work to be done on # int/exception return? jne work_pending @@ -216,18 +333,18 @@ ENTRY(resume_userspace) #ifdef CONFIG_PREEMPT ENTRY(resume_kernel) - cmpl $0,TI_PRE_COUNT(%ebp) # non-zero preempt_count ? + cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ? jnz restore_all need_resched: - movl TI_FLAGS(%ebp), %ecx # need_resched set ? + movl TI_flags(%ebp), %ecx # need_resched set ? testb $_TIF_NEED_RESCHED, %cl jz restore_all testl $IF_MASK,EFLAGS(%esp) # interrupts off (exception path) ? jz restore_all - movl $PREEMPT_ACTIVE,TI_PRE_COUNT(%ebp) + movl $PREEMPT_ACTIVE,TI_preempt_count(%ebp) sti call schedule - movl $0,TI_PRE_COUNT(%ebp) + movl $0,TI_preempt_count(%ebp) cli jmp need_resched #endif @@ -246,37 +363,50 @@ sysenter_past_esp: pushl $(__USER_CS) pushl $SYSENTER_RETURN -/* - * Load the potential sixth argument from user stack. - * Careful about security. - */ - cmpl $__PAGE_OFFSET-3,%ebp - jae syscall_fault -1: movl (%ebp),%ebp -.section __ex_table,"a" - .align 4 - .long 1b,syscall_fault -.previous - pushl %eax SAVE_ALL GET_THREAD_INFO(%ebp) cmpl $(nr_syscalls), %eax jae syscall_badsys - testb $_TIF_SYSCALL_TRACE,TI_FLAGS(%ebp) + testb $_TIF_SYSCALL_TRACE,TI_flags(%ebp) jnz syscall_trace_entry call *sys_call_table(,%eax,4) movl %eax,EAX(%esp) cli - movl TI_FLAGS(%ebp), %ecx + movl TI_flags(%ebp), %ecx testw $_TIF_ALLWORK_MASK, %cx jne syscall_exit_work + +#ifdef CONFIG_X86_SWITCH_PAGETABLES + + GET_THREAD_INFO(%ebp) + movl TI_virtual_stack(%ebp), %edx + movl TI_user_pgd(%ebp), %ecx + movl %esp, %ebx + andl $0x1fff, %ebx + orl %ebx, %edx +sysexit_ret_start_marker: + movl %edx, %esp + movl %ecx, %cr3 +#endif + /* + * only ebx is not restored by the userspace sysenter vsyscall + * code, it assumes it to be callee-saved. + */ + movl EBX(%esp), %ebx + /* if something modifies registers it must also disable sysexit */ + movl EIP(%esp), %edx movl OLDESP(%esp), %ecx + sti sysexit +#ifdef CONFIG_X86_SWITCH_PAGETABLES +sysexit_ret_end_marker: + nop +#endif # system call handler stub @@ -287,7 +417,7 @@ ENTRY(system_call) cmpl $(nr_syscalls), %eax jae syscall_badsys # system call tracing in operation - testb $_TIF_SYSCALL_TRACE,TI_FLAGS(%ebp) + testb $_TIF_SYSCALL_TRACE,TI_flags(%ebp) jnz syscall_trace_entry syscall_call: call *sys_call_table(,%eax,4) @@ -296,10 +426,23 @@ syscall_exit: cli # make sure we don't miss an interrupt # setting need_resched or sigpending # between sampling and the iret - movl TI_FLAGS(%ebp), %ecx + movl TI_flags(%ebp), %ecx testw $_TIF_ALLWORK_MASK, %cx # current->work jne syscall_exit_work restore_all: +#ifdef CONFIG_TRAP_BAD_SYSCALL_EXITS + movl EFLAGS(%esp), %eax # mix EFLAGS and CS + movb CS(%esp), %al + testl $(VM_MASK | 3), %eax + jz resume_kernelX # returning to kernel or vm86-space + + cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ? + jz resume_kernelX + + int $3 + +resume_kernelX: +#endif RESTORE_ALL # perform work that needs to be done immediately before resumption @@ -312,7 +455,7 @@ work_resched: cli # make sure we don't miss an interrupt # setting need_resched or sigpending # between sampling and the iret - movl TI_FLAGS(%ebp), %ecx + movl TI_flags(%ebp), %ecx andl $_TIF_WORK_MASK, %ecx # is there any work to be done other # than syscall tracing? jz restore_all @@ -327,6 +470,22 @@ work_notifysig: # deal with pending s # vm86-space xorl %edx, %edx call do_notify_resume + +#if CONFIG_X86_HIGH_ENTRY + /* + * Reload db7 if necessary: + */ + movl TI_flags(%ebp), %ecx + testb $_TIF_DB7, %cl + jnz work_db7 + + jmp restore_all + +work_db7: + movl TI_task(%ebp), %edx; + movl task_thread_db7(%edx), %edx; + movl %edx, %db7; +#endif jmp restore_all ALIGN @@ -382,7 +541,7 @@ syscall_badsys: */ .data ENTRY(interrupt) -.text +.previous vector=0 ENTRY(irq_entries_start) @@ -392,7 +551,7 @@ ENTRY(irq_entries_start) jmp common_interrupt .data .long 1b -.text +.previous vector=vector+1 .endr @@ -433,12 +592,17 @@ error_code: movl ES(%esp), %edi # get the function address movl %eax, ORIG_EAX(%esp) movl %ecx, ES(%esp) - movl %esp, %edx pushl %esi # push the error code - pushl %edx # push the pt_regs pointer movl $(__USER_DS), %edx movl %edx, %ds movl %edx, %es + +/* clobbers edx, ebx and ebp */ + __SWITCH_KERNELSPACE + + leal 4(%esp), %edx # prepare pt_regs + pushl %edx # push pt_regs + call *%edi addl $8, %esp jmp ret_from_exception @@ -529,7 +693,7 @@ nmi_stack_correct: pushl %edx call do_nmi addl $8, %esp - RESTORE_ALL + jmp restore_all nmi_stack_fixup: FIX_STACK(12,nmi_stack_correct, 1) @@ -606,6 +770,8 @@ ENTRY(spurious_interrupt_bug) pushl $do_spurious_interrupt_bug jmp error_code +.previous + .data ENTRY(sys_call_table) .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */ @@ -882,5 +1048,6 @@ ENTRY(sys_call_table) .long sys_utimes .long sys_fadvise64_64 .long sys_ni_syscall /* sys_vserver */ + .long sys_kexec_load syscall_table_size=(.-sys_call_table) diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/entry_trampoline.c current/arch/i386/kernel/entry_trampoline.c --- reference/arch/i386/kernel/entry_trampoline.c 1969-12-31 16:00:00.000000000 -0800 +++ current/arch/i386/kernel/entry_trampoline.c 2004-04-09 11:53:01.000000000 -0700 @@ -0,0 +1,75 @@ +/* + * linux/arch/i386/kernel/entry_trampoline.c + * + * (C) Copyright 2003 Ingo Molnar + * + * This file contains the needed support code for 4GB userspace + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern char __entry_tramp_start, __entry_tramp_end, __start___entry_text; + +void __init init_entry_mappings(void) +{ +#ifdef CONFIG_X86_HIGH_ENTRY + + void *tramp; + int p; + + /* + * We need a high IDT and GDT for the 4G/4G split: + */ + trap_init_virtual_IDT(); + + __set_fixmap(FIX_ENTRY_TRAMPOLINE_0, __pa((unsigned long)&__entry_tramp_start), PAGE_KERNEL); + __set_fixmap(FIX_ENTRY_TRAMPOLINE_1, __pa((unsigned long)&__entry_tramp_start) + PAGE_SIZE, PAGE_KERNEL); + tramp = (void *)fix_to_virt(FIX_ENTRY_TRAMPOLINE_0); + + printk("mapped 4G/4G trampoline to %p.\n", tramp); + BUG_ON((void *)&__start___entry_text != tramp); + /* + * Virtual kernel stack: + */ + BUG_ON(__kmap_atomic_vaddr(KM_VSTACK_TOP) & (THREAD_SIZE-1)); + BUG_ON(sizeof(struct desc_struct)*NR_CPUS*GDT_ENTRIES > 2*PAGE_SIZE); + BUG_ON((unsigned int)&__entry_tramp_end - (unsigned int)&__entry_tramp_start > 2*PAGE_SIZE); + + /* + * set up the initial thread's virtual stack related + * fields: + */ + for (p = 0; p < ARRAY_SIZE(current->thread.stack_page); p++) + current->thread.stack_page[p] = virt_to_page((char *)current->thread_info + (p*PAGE_SIZE)); + + current->thread_info->virtual_stack = (void *)__kmap_atomic_vaddr(KM_VSTACK_TOP); + + for (p = 0; p < ARRAY_SIZE(current->thread.stack_page); p++) { + __kunmap_atomic_type(KM_VSTACK_TOP-p); + __kmap_atomic(current->thread.stack_page[p], KM_VSTACK_TOP-p); + } +#endif + current->thread_info->real_stack = (void *)current->thread_info; + current->thread_info->user_pgd = NULL; + current->thread.esp0 = (unsigned long)current->thread_info->real_stack + THREAD_SIZE; +} + + + +void __init entry_trampoline_setup(void) +{ + /* + * old IRQ entries set up by the boot code will still hang + * around - they are a sign of hw trouble anyway, now they'll + * produce a double fault message. + */ + trap_init_virtual_GDT(); +} diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/i386_ksyms.c current/arch/i386/kernel/i386_ksyms.c --- reference/arch/i386/kernel/i386_ksyms.c 2004-04-07 14:53:56.000000000 -0700 +++ current/arch/i386/kernel/i386_ksyms.c 2004-04-09 11:53:01.000000000 -0700 @@ -93,7 +93,6 @@ EXPORT_SYMBOL_NOVERS(__down_failed_inter EXPORT_SYMBOL_NOVERS(__down_failed_trylock); EXPORT_SYMBOL_NOVERS(__up_wakeup); /* Networking helper routines. */ -EXPORT_SYMBOL(csum_partial_copy_generic); /* Delay loops */ EXPORT_SYMBOL(__ndelay); EXPORT_SYMBOL(__udelay); @@ -107,13 +106,17 @@ EXPORT_SYMBOL_NOVERS(__get_user_4); EXPORT_SYMBOL(strpbrk); EXPORT_SYMBOL(strstr); +#if !defined(CONFIG_X86_UACCESS_INDIRECT) EXPORT_SYMBOL(strncpy_from_user); -EXPORT_SYMBOL(__strncpy_from_user); +EXPORT_SYMBOL(__direct_strncpy_from_user); EXPORT_SYMBOL(clear_user); EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(__copy_from_user_ll); EXPORT_SYMBOL(__copy_to_user_ll); EXPORT_SYMBOL(strnlen_user); +#else /* CONFIG_X86_UACCESS_INDIRECT */ +EXPORT_SYMBOL(direct_csum_partial_copy_generic); +#endif EXPORT_SYMBOL(dma_alloc_coherent); EXPORT_SYMBOL(dma_free_coherent); @@ -184,6 +187,11 @@ EXPORT_SYMBOL_NOVERS(memcpy); EXPORT_SYMBOL_NOVERS(memset); EXPORT_SYMBOL_NOVERS(memcmp); +#ifdef CONFIG_MCOUNT +extern void mcount(void); +EXPORT_SYMBOL_NOVERS(mcount); +#endif + #ifdef CONFIG_HAVE_DEC_LOCK EXPORT_SYMBOL(atomic_dec_and_lock); #endif diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/i387.c current/arch/i386/kernel/i387.c --- reference/arch/i386/kernel/i387.c 2004-02-18 14:56:47.000000000 -0800 +++ current/arch/i386/kernel/i387.c 2004-04-09 11:53:00.000000000 -0700 @@ -218,6 +218,7 @@ void set_fpu_mxcsr( struct task_struct * static int convert_fxsr_to_user( struct _fpstate __user *buf, struct i387_fxsave_struct *fxsave ) { + struct _fpreg tmp[8]; /* 80 bytes scratch area */ unsigned long env[7]; struct _fpreg __user *to; struct _fpxreg *from; @@ -234,23 +235,25 @@ static int convert_fxsr_to_user( struct if ( __copy_to_user( buf, env, 7 * sizeof(unsigned long) ) ) return 1; - to = &buf->_st[0]; + to = tmp; from = (struct _fpxreg *) &fxsave->st_space[0]; for ( i = 0 ; i < 8 ; i++, to++, from++ ) { unsigned long *t = (unsigned long *)to; unsigned long *f = (unsigned long *)from; - if (__put_user(*f, t) || - __put_user(*(f + 1), t + 1) || - __put_user(from->exponent, &to->exponent)) - return 1; + *t = *f; + *(t + 1) = *(f+1); + to->exponent = from->exponent; } + if (copy_to_user(buf->_st, tmp, sizeof(struct _fpreg [8]))) + return 1; return 0; } static int convert_fxsr_from_user( struct i387_fxsave_struct *fxsave, struct _fpstate __user *buf ) { + struct _fpreg tmp[8]; /* 80 bytes scratch area */ unsigned long env[7]; struct _fpxreg *to; struct _fpreg __user *from; @@ -258,6 +261,8 @@ static int convert_fxsr_from_user( struc if ( __copy_from_user( env, buf, 7 * sizeof(long) ) ) return 1; + if (copy_from_user(tmp, buf->_st, sizeof(struct _fpreg [8]))) + return 1; fxsave->cwd = (unsigned short)(env[0] & 0xffff); fxsave->swd = (unsigned short)(env[1] & 0xffff); @@ -269,15 +274,14 @@ static int convert_fxsr_from_user( struc fxsave->fos = env[6]; to = (struct _fpxreg *) &fxsave->st_space[0]; - from = &buf->_st[0]; + from = tmp; for ( i = 0 ; i < 8 ; i++, to++, from++ ) { unsigned long *t = (unsigned long *)to; unsigned long *f = (unsigned long *)from; - if (__get_user(*t, f) || - __get_user(*(t + 1), f + 1) || - __get_user(to->exponent, &from->exponent)) - return 1; + *t = *f; + *(t + 1) = *(f + 1); + to->exponent = from->exponent; } return 0; } diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/i8259.c current/arch/i386/kernel/i8259.c --- reference/arch/i386/kernel/i8259.c 2004-03-11 14:33:36.000000000 -0800 +++ current/arch/i386/kernel/i8259.c 2004-04-09 13:23:20.000000000 -0700 @@ -244,9 +244,21 @@ static int i8259A_resume(struct sys_devi return 0; } +static int i8259A_shutdown(struct sys_device *dev) +{ + /* Put the i8259A into a quiescent state that + * the kernel initialization code can get it + * out of. + */ + outb(0xff, 0x21); /* mask all of 8259A-1 */ + outb(0xff, 0xA1); /* mask all of 8259A-1 */ + return 0; +} + static struct sysdev_class i8259_sysdev_class = { set_kset_name("i8259"), .resume = i8259A_resume, + .shutdown = i8259A_shutdown, }; static struct sys_device device_i8259A = { @@ -444,4 +456,7 @@ void __init init_IRQ(void) */ if (boot_cpu_data.hard_math && !cpu_has_fpu) setup_irq(FPU_IRQ, &fpu_irq); + + current_thread_info()->cpu = 0; + irq_ctx_init(0); } diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/init_task.c current/arch/i386/kernel/init_task.c --- reference/arch/i386/kernel/init_task.c 2004-04-07 14:53:56.000000000 -0700 +++ current/arch/i386/kernel/init_task.c 2004-04-09 11:53:00.000000000 -0700 @@ -26,7 +26,7 @@ EXPORT_SYMBOL(init_mm); */ union thread_union init_thread_union __attribute__((__section__(".data.init_task"))) = - { INIT_THREAD_INFO(init_task) }; + { INIT_THREAD_INFO(init_task, init_thread_union) }; /* * Initial task structure. @@ -44,5 +44,5 @@ EXPORT_SYMBOL(init_task); * section. Since TSS's are completely CPU-local, we want them * on exact cacheline boundaries, to eliminate cacheline ping-pong. */ -struct tss_struct init_tss[NR_CPUS] __cacheline_aligned = { [0 ... NR_CPUS-1] = INIT_TSS }; +struct tss_struct init_tss[NR_CPUS] __attribute__((__section__(".data.tss"))) = { [0 ... NR_CPUS-1] = INIT_TSS }; diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/io_apic.c current/arch/i386/kernel/io_apic.c --- reference/arch/i386/kernel/io_apic.c 2004-04-07 14:53:56.000000000 -0700 +++ current/arch/i386/kernel/io_apic.c 2004-04-09 13:23:20.000000000 -0700 @@ -317,8 +317,7 @@ struct irq_cpu_info { #define IRQ_ALLOWED(cpu, allowed_mask) cpu_isset(cpu, allowed_mask) -#define CPU_TO_PACKAGEINDEX(i) \ - ((physical_balance && i > cpu_sibling_map[i]) ? cpu_sibling_map[i] : i) +#define CPU_TO_PACKAGEINDEX(i) (first_cpu(cpu_sibling_map[i])) #define MAX_BALANCED_IRQ_INTERVAL (5*HZ) #define MIN_BALANCED_IRQ_INTERVAL (HZ/2) @@ -401,7 +400,8 @@ static void do_irq_balance(void) unsigned long max_cpu_irq = 0, min_cpu_irq = (~0); unsigned long move_this_load = 0; int max_loaded = 0, min_loaded = 0; - unsigned long useful_load_threshold = balanced_irq_interval + 10; + int load; + unsigned long useful_load_threshold = balanced_irq_interval / 10; int selected_irq; int tmp_loaded, first_attempt = 1; unsigned long tmp_cpu_irq; @@ -452,7 +452,7 @@ static void do_irq_balance(void) for (i = 0; i < NR_CPUS; i++) { if (!cpu_online(i)) continue; - if (physical_balance && i > cpu_sibling_map[i]) + if (i != CPU_TO_PACKAGEINDEX(i)) continue; if (min_cpu_irq > CPU_IRQ(i)) { min_cpu_irq = CPU_IRQ(i); @@ -471,7 +471,7 @@ tryanothercpu: for (i = 0; i < NR_CPUS; i++) { if (!cpu_online(i)) continue; - if (physical_balance && i > cpu_sibling_map[i]) + if (i != CPU_TO_PACKAGEINDEX(i)) continue; if (max_cpu_irq <= CPU_IRQ(i)) continue; @@ -551,9 +551,14 @@ tryanotherirq: * We seek the least loaded sibling by making the comparison * (A+B)/2 vs B */ - if (physical_balance && (CPU_IRQ(min_loaded) >> 1) > - CPU_IRQ(cpu_sibling_map[min_loaded])) - min_loaded = cpu_sibling_map[min_loaded]; + load = CPU_IRQ(min_loaded) >> 1; + for_each_cpu_mask(j, cpu_sibling_map[min_loaded]) { + if (load > CPU_IRQ(j)) { + /* This won't change cpu_sibling_map[min_loaded] */ + load = CPU_IRQ(j); + min_loaded = j; + } + } cpus_and(allowed_mask, cpu_online_map, irq_affinity[selected_irq]); target_cpu_mask = cpumask_of_cpu(min_loaded); @@ -1631,8 +1636,6 @@ void disable_IO_APIC(void) * Clear the IO-APIC before rebooting: */ clear_IO_APIC(); - - disconnect_bsp_APIC(); } /* diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/irq.c current/arch/i386/kernel/irq.c --- reference/arch/i386/kernel/irq.c 2004-03-11 14:33:36.000000000 -0800 +++ current/arch/i386/kernel/irq.c 2004-04-09 11:53:01.000000000 -0700 @@ -75,6 +75,14 @@ irq_desc_t irq_desc[NR_IRQS] __cacheline static void register_irq_proc (unsigned int irq); /* + * per-CPU IRQ handling stacks + */ +#ifdef CONFIG_4KSTACKS +union irq_ctx *hardirq_ctx[NR_CPUS]; +union irq_ctx *softirq_ctx[NR_CPUS]; +#endif + +/* * Special irq handlers. */ @@ -213,7 +221,7 @@ inline void synchronize_irq(unsigned int * waste of time and is not what some drivers would * prefer. */ -int handle_IRQ_event(unsigned int irq, +asmlinkage int handle_IRQ_event(unsigned int irq, struct pt_regs *regs, struct irqaction *action) { int status = 1; /* Force the "do bottom halves" bit */ @@ -436,7 +444,7 @@ asmlinkage unsigned int do_IRQ(struct pt __asm__ __volatile__("andl %%esp,%0" : "=r" (esp) : "0" (THREAD_SIZE - 1)); - if (unlikely(esp < (sizeof(struct thread_info) + 1024))) { + if (unlikely(esp < (sizeof(struct thread_info) + STACK_WARN))) { printk("do_IRQ: stack overflow: %ld\n", esp - sizeof(struct thread_info)); dump_stack(); @@ -484,11 +492,70 @@ asmlinkage unsigned int do_IRQ(struct pt * useful for irq hardware that does not mask cleanly in an * SMP environment. */ +#ifdef CONFIG_4KSTACKS + + for (;;) { + irqreturn_t action_ret; + u32 *isp; + union irq_ctx * curctx; + union irq_ctx * irqctx; + + curctx = (union irq_ctx *) current_thread_info(); + irqctx = hardirq_ctx[smp_processor_id()]; + + spin_unlock(&desc->lock); + + /* + * this is where we switch to the IRQ stack. However, if we are already using + * the IRQ stack (because we interrupted a hardirq handler) we can't do that + * and just have to keep using the current stack (which is the irq stack already + * after all) + */ + + if (curctx == irqctx) + action_ret = handle_IRQ_event(irq, ®s, action); + else { + /* build the stack frame on the IRQ stack */ + isp = (u32*) ((char*)irqctx + sizeof(*irqctx)); + irqctx->tinfo.task = curctx->tinfo.task; + irqctx->tinfo.real_stack = curctx->tinfo.real_stack; + irqctx->tinfo.virtual_stack = curctx->tinfo.virtual_stack; + irqctx->tinfo.previous_esp = current_stack_pointer(); + + *--isp = (u32) action; + *--isp = (u32) ®s; + *--isp = (u32) irq; + + asm volatile( + " xchgl %%ebx,%%esp \n" + " call handle_IRQ_event \n" + " xchgl %%ebx,%%esp \n" + : "=a"(action_ret) + : "b"(isp) + : "memory", "cc", "edx", "ecx" + ); + + + } + spin_lock(&desc->lock); + if (!noirqdebug) + note_interrupt(irq, desc, action_ret); + if (curctx != irqctx) + irqctx->tinfo.task = NULL; + if (likely(!(desc->status & IRQ_PENDING))) + break; + desc->status &= ~IRQ_PENDING; + } + +#else + for (;;) { irqreturn_t action_ret; spin_unlock(&desc->lock); + action_ret = handle_IRQ_event(irq, ®s, action); + spin_lock(&desc->lock); if (!noirqdebug) note_interrupt(irq, desc, action_ret); @@ -496,6 +563,7 @@ asmlinkage unsigned int do_IRQ(struct pt break; desc->status &= ~IRQ_PENDING; } +#endif desc->status &= ~IRQ_INPROGRESS; out: @@ -508,6 +576,8 @@ out: irq_exit(); + kgdb_process_breakpoint(); + return 1; } @@ -1053,3 +1123,81 @@ void init_irq_proc (void) register_irq_proc(i); } + +#ifdef CONFIG_4KSTACKS +static char softirq_stack[NR_CPUS * THREAD_SIZE] __attribute__((__aligned__(THREAD_SIZE))); +static char hardirq_stack[NR_CPUS * THREAD_SIZE] __attribute__((__aligned__(THREAD_SIZE))); + +/* + * allocate per-cpu stacks for hardirq and for softirq processing + */ +void irq_ctx_init(int cpu) +{ + union irq_ctx *irqctx; + + if (hardirq_ctx[cpu]) + return; + + irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE]; + irqctx->tinfo.task = NULL; + irqctx->tinfo.exec_domain = NULL; + irqctx->tinfo.cpu = cpu; + irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; + irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); + + hardirq_ctx[cpu] = irqctx; + + irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE]; + irqctx->tinfo.task = NULL; + irqctx->tinfo.exec_domain = NULL; + irqctx->tinfo.cpu = cpu; + irqctx->tinfo.preempt_count = SOFTIRQ_OFFSET; + irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); + + softirq_ctx[cpu] = irqctx; + + printk("CPU %u irqstacks, hard=%p soft=%p\n", + cpu,hardirq_ctx[cpu],softirq_ctx[cpu]); +} + +extern asmlinkage void __do_softirq(void); + +asmlinkage void do_softirq(void) +{ + unsigned long flags; + struct thread_info *curctx; + union irq_ctx *irqctx; + u32 *isp; + + if (in_interrupt()) + return; + + local_irq_save(flags); + + if (local_softirq_pending()) { + curctx = current_thread_info(); + irqctx = softirq_ctx[smp_processor_id()]; + irqctx->tinfo.task = curctx->task; + irqctx->tinfo.real_stack = curctx->real_stack; + irqctx->tinfo.virtual_stack = curctx->virtual_stack; + irqctx->tinfo.previous_esp = current_stack_pointer(); + + /* build the stack frame on the softirq stack */ + isp = (u32*) ((char*)irqctx + sizeof(*irqctx)); + + + asm volatile( + " xchgl %%ebx,%%esp \n" + " call __do_softirq \n" + " movl %%ebx,%%esp \n" + : "=b"(isp) + : "0"(isp) + : "memory", "cc", "edx", "ecx", "eax" + ); + } + + local_irq_restore(flags); +} + +EXPORT_SYMBOL(do_softirq); +#endif diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/kgdb_stub.c current/arch/i386/kernel/kgdb_stub.c --- reference/arch/i386/kernel/kgdb_stub.c 1969-12-31 16:00:00.000000000 -0800 +++ current/arch/i386/kernel/kgdb_stub.c 2004-04-08 15:10:21.000000000 -0700 @@ -0,0 +1,2457 @@ +/* + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + */ + +/* + * Copyright (c) 2000 VERITAS Software Corporation. + * + */ +/**************************************************************************** + * Header: remcom.c,v 1.34 91/03/09 12:29:49 glenne Exp $ + * + * Module name: remcom.c $ + * Revision: 1.34 $ + * Date: 91/03/09 12:29:49 $ + * Contributor: Lake Stevens Instrument Division$ + * + * Description: low level support for gdb debugger. $ + * + * Considerations: only works on target hardware $ + * + * Written by: Glenn Engel $ + * Updated by: David Grothe + * Updated by: Robert Walsh + * Updated by: wangdi + * ModuleState: Experimental $ + * + * NOTES: See Below $ + * + * Modified for 386 by Jim Kingdon, Cygnus Support. + * Compatibility with 2.1.xx kernel by David Grothe + * + * Changes to allow auto initilization. All that is needed is that it + * be linked with the kernel and a break point (int 3) be executed. + * The header file defines BREAKPOINT to allow one to do + * this. It should also be possible, once the interrupt system is up, to + * call putDebugChar("+"). Once this is done, the remote debugger should + * get our attention by sending a ^C in a packet. George Anzinger + * + * Integrated into 2.2.5 kernel by Tigran Aivazian + * Added thread support, support for multiple processors, + * support for ia-32(x86) hardware debugging. + * Amit S. Kale ( akale@veritas.com ) + * + * Modified to support debugging over ethernet by Robert Walsh + * and wangdi , based on + * code by San Mehat. + * + * + * To enable debugger support, two things need to happen. One, a + * call to set_debug_traps() is necessary in order to allow any breakpoints + * or error conditions to be properly intercepted and reported to gdb. + * Two, a breakpoint needs to be generated to begin communication. This + * is most easily accomplished by a call to breakpoint(). Breakpoint() + * simulates a breakpoint by executing an int 3. + * + ************* + * + * The following gdb commands are supported: + * + * command function Return value + * + * g return the value of the CPU registers hex data or ENN + * G set the value of the CPU registers OK or ENN + * + * mAA..AA,LLLL Read LLLL bytes at address AA..AA hex data or ENN + * MAA..AA,LLLL: Write LLLL bytes at address AA.AA OK or ENN + * + * c Resume at current address SNN ( signal NN) + * cAA..AA Continue at address AA..AA SNN + * + * s Step one instruction SNN + * sAA..AA Step one instruction from AA..AA SNN + * + * k kill + * + * ? What was the last sigval ? SNN (signal NN) + * + * All commands and responses are sent with a packet which includes a + * checksum. A packet consists of + * + * $#. + * + * where + * :: + * :: < two hex digits computed as modulo 256 sum of > + * + * When a packet is received, it is first acknowledged with either '+' or '-'. + * '+' indicates a successful transfer. '-' indicates a failed transfer. + * + * Example: + * + * Host: Reply: + * $m0,10#2a +$00010203040506070809101112131415#42 + * + ****************************************************************************/ +#define KGDB_VERSION "<20030915.1651.33>" +#include +#include +#include /* for strcpy */ +#include +#include +#include +#include +#include /* for linux pt_regs struct */ +#include +#include +#include +#include +#include +#include +#include +#include + +/************************************************************************ + * + * external low-level support routines + */ +typedef void (*Function) (void); /* pointer to a function */ + +/* Thread reference */ +typedef unsigned char threadref[8]; + +extern int tty_putDebugChar(int); /* write a single character */ +extern int tty_getDebugChar(void); /* read and return a single char */ +extern void tty_flushDebugChar(void); /* flush pending characters */ +extern int eth_putDebugChar(int); /* write a single character */ +extern int eth_getDebugChar(void); /* read and return a single char */ +extern void eth_flushDebugChar(void); /* flush pending characters */ + +/************************************************************************/ +/* BUFMAX defines the maximum number of characters in inbound/outbound buffers*/ +/* at least NUMREGBYTES*2 are needed for register packets */ +/* Longer buffer is needed to list all threads */ +#define BUFMAX 400 + +char *kgdb_version = KGDB_VERSION; + +/* debug > 0 prints ill-formed commands in valid packets & checksum errors */ +int debug_regs = 0; /* set to non-zero to print registers */ + +/* filled in by an external module */ +char *gdb_module_offsets; + +static const char hexchars[] = "0123456789abcdef"; + +/* Number of bytes of registers. */ +#define NUMREGBYTES 64 +/* + * Note that this register image is in a different order than + * the register image that Linux produces at interrupt time. + * + * Linux's register image is defined by struct pt_regs in ptrace.h. + * Just why GDB uses a different order is a historical mystery. + */ +enum regnames { _EAX, /* 0 */ + _ECX, /* 1 */ + _EDX, /* 2 */ + _EBX, /* 3 */ + _ESP, /* 4 */ + _EBP, /* 5 */ + _ESI, /* 6 */ + _EDI, /* 7 */ + _PC /* 8 also known as eip */ , + _PS /* 9 also known as eflags */ , + _CS, /* 10 */ + _SS, /* 11 */ + _DS, /* 12 */ + _ES, /* 13 */ + _FS, /* 14 */ + _GS /* 15 */ +}; + +/*************************** ASSEMBLY CODE MACROS *************************/ +/* + * Put the error code here just in case the user cares. + * Likewise, the vector number here (since GDB only gets the signal + * number through the usual means, and that's not very specific). + * The called_from is the return address so he can tell how we entered kgdb. + * This will allow him to seperate out the various possible entries. + */ +#define REMOTE_DEBUG 0 /* set != to turn on printing (also available in info) */ + +#define PID_MAX PID_MAX_DEFAULT + +#ifdef CONFIG_SMP +void smp_send_nmi_allbutself(void); +#define IF_SMP(x) x +#undef MAX_NO_CPUS +#ifndef CONFIG_NO_KGDB_CPUS +#define CONFIG_NO_KGDB_CPUS 2 +#endif +#if CONFIG_NO_KGDB_CPUS > NR_CPUS +#define MAX_NO_CPUS NR_CPUS +#else +#define MAX_NO_CPUS CONFIG_NO_KGDB_CPUS +#endif +#define hold_init hold_on_sstep: 1, +#define MAX_CPU_MASK (unsigned long)((1LL << MAX_NO_CPUS) - 1LL) +#define NUM_CPUS num_online_cpus() +#else +#define IF_SMP(x) +#define hold_init +#undef MAX_NO_CPUS +#define MAX_NO_CPUS 1 +#define NUM_CPUS 1 +#endif +#define NOCPU (struct task_struct *)0xbad1fbad +/* *INDENT-OFF* */ +struct kgdb_info { + int used_malloc; + void *called_from; + long long entry_tsc; + int errcode; + int vector; + int print_debug_info; +#ifdef CONFIG_SMP + int hold_on_sstep; + struct { + volatile struct task_struct *task; + int pid; + int hold; + struct pt_regs *regs; + } cpus_waiting[MAX_NO_CPUS]; +#endif +} kgdb_info = {hold_init print_debug_info:REMOTE_DEBUG, vector:-1}; + +/* *INDENT-ON* */ + +#define used_m kgdb_info.used_malloc +/* + * This is little area we set aside to contain the stack we + * need to build to allow gdb to call functions. We use one + * per cpu to avoid locking issues. We will do all this work + * with interrupts off so that should take care of the protection + * issues. + */ +#define LOOKASIDE_SIZE 200 /* should be more than enough */ +#define MALLOC_MAX 200 /* Max malloc size */ +struct { + unsigned int esp; + int array[LOOKASIDE_SIZE]; +} fn_call_lookaside[MAX_NO_CPUS]; + +static int trap_cpu; +static unsigned int OLD_esp; + +#define END_OF_LOOKASIDE &fn_call_lookaside[trap_cpu].array[LOOKASIDE_SIZE] +#define IF_BIT 0x200 +#define TF_BIT 0x100 + +#define MALLOC_ROUND 8-1 + +static char malloc_array[MALLOC_MAX]; +IF_SMP(static void to_gdb(const char *mess)); +void * +malloc(int size) +{ + + if (size <= (MALLOC_MAX - used_m)) { + int old_used = used_m; + used_m += ((size + MALLOC_ROUND) & (~MALLOC_ROUND)); + return &malloc_array[old_used]; + } else { + return NULL; + } +} + +/* + * I/O dispatch functions... + * Based upon kgdboe, either call the ethernet + * handler or the serial one.. + */ +void +putDebugChar(int c) +{ + if (!kgdboe) { + tty_putDebugChar(c); + } else { + eth_putDebugChar(c); + } +} + +int +getDebugChar(void) +{ + if (!kgdboe) { + return tty_getDebugChar(); + } else { + return eth_getDebugChar(); + } +} + +void +flushDebugChar(void) +{ + if (!kgdboe) { + tty_flushDebugChar(); + } else { + eth_flushDebugChar(); + } +} + +/* + * Gdb calls functions by pushing agruments, including a return address + * on the stack and the adjusting EIP to point to the function. The + * whole assumption in GDB is that we are on a different stack than the + * one the "user" i.e. code that hit the break point, is on. This, of + * course is not true in the kernel. Thus various dodges are needed to + * do the call without directly messing with EIP (which we can not change + * as it is just a location and not a register. To adjust it would then + * require that we move every thing below EIP up or down as needed. This + * will not work as we may well have stack relative pointer on the stack + * (such as the pointer to regs, for example). + + * So here is what we do: + * We detect gdb attempting to store into the stack area and instead, store + * into the fn_call_lookaside.array at the same relative location as if it + * were the area ESP pointed at. We also trap ESP modifications + * and uses these to adjust fn_call_lookaside.esp. On entry + * fn_call_lookaside.esp will be set to point at the last entry in + * fn_call_lookaside.array. This allows us to check if it has changed, and + * if so, on exit, we add the registers we will use to do the move and a + * trap/ interrupt return exit sequence. We then adjust the eflags in the + * regs array (remember we now have a copy in the fn_call_lookaside.array) to + * kill the interrupt bit, AND we change EIP to point at our set up stub. + * As part of the register set up we preset the registers to point at the + * begining and end of the fn_call_lookaside.array, so all the stub needs to + * do is move words from the array to the stack until ESP= the desired value + * then do the rti. This will then transfer to the desired function with + * all the correct registers. Nifty huh? + */ +extern asmlinkage void fn_call_stub(void); +extern asmlinkage void fn_rtn_stub(void); +/* *INDENT-OFF* */ +__asm__("fn_rtn_stub:\n\t" + "movl %eax,%esp\n\t" + "fn_call_stub:\n\t" + "1:\n\t" + "addl $-4,%ebx\n\t" + "movl (%ebx), %eax\n\t" + "pushl %eax\n\t" + "cmpl %esp,%ecx\n\t" + "jne 1b\n\t" + "popl %eax\n\t" + "popl %ebx\n\t" + "popl %ecx\n\t" + "iret \n\t"); +/* *INDENT-ON* */ +#define gdb_i386vector kgdb_info.vector +#define gdb_i386errcode kgdb_info.errcode +#define waiting_cpus kgdb_info.cpus_waiting +#define remote_debug kgdb_info.print_debug_info +#define hold_cpu(cpu) kgdb_info.cpus_waiting[cpu].hold +/* gdb locks */ + +#ifdef CONFIG_SMP +static int in_kgdb_called; +static spinlock_t waitlocks[MAX_NO_CPUS] = + {[0 ... MAX_NO_CPUS - 1] = SPIN_LOCK_UNLOCKED }; +/* + * The following array has the thread pointer of each of the "other" + * cpus. We make it global so it can be seen by gdb. + */ +volatile int in_kgdb_entry_log[MAX_NO_CPUS]; +volatile struct pt_regs *in_kgdb_here_log[MAX_NO_CPUS]; +/* +static spinlock_t continuelocks[MAX_NO_CPUS]; +*/ +spinlock_t kgdb_spinlock = SPIN_LOCK_UNLOCKED; +/* waiters on our spinlock plus us */ +static atomic_t spinlock_waiters = ATOMIC_INIT(1); +static int spinlock_count = 0; +static int spinlock_cpu = 0; +/* + * Note we use nested spin locks to account for the case where a break + * point is encountered when calling a function by user direction from + * kgdb. Also there is the memory exception recursion to account for. + * Well, yes, but this lets other cpus thru too. Lets add a + * cpu id to the lock. + */ +#define KGDB_SPIN_LOCK(x) if( spinlock_count == 0 || \ + spinlock_cpu != smp_processor_id()){\ + atomic_inc(&spinlock_waiters); \ + while (! spin_trylock(x)) {\ + in_kgdb(®s);\ + }\ + atomic_dec(&spinlock_waiters); \ + spinlock_count = 1; \ + spinlock_cpu = smp_processor_id(); \ + }else{ \ + spinlock_count++; \ + } +#define KGDB_SPIN_UNLOCK(x) if( --spinlock_count == 0) spin_unlock(x) +#else +unsigned kgdb_spinlock = 0; +#define KGDB_SPIN_LOCK(x) --*x +#define KGDB_SPIN_UNLOCK(x) ++*x +#endif + +int +hex(char ch) +{ + if ((ch >= 'a') && (ch <= 'f')) + return (ch - 'a' + 10); + if ((ch >= '0') && (ch <= '9')) + return (ch - '0'); + if ((ch >= 'A') && (ch <= 'F')) + return (ch - 'A' + 10); + return (-1); +} + +/* scan for the sequence $# */ +void +getpacket(char *buffer) +{ + unsigned char checksum; + unsigned char xmitcsum; + int i; + int count; + char ch; + + do { + /* wait around for the start character, ignore all other characters */ + while ((ch = (getDebugChar() & 0x7f)) != '$') ; + checksum = 0; + xmitcsum = -1; + + count = 0; + + /* now, read until a # or end of buffer is found */ + while (count < BUFMAX) { + ch = getDebugChar() & 0x7f; + if (ch == '#') + break; + checksum = checksum + ch; + buffer[count] = ch; + count = count + 1; + } + buffer[count] = 0; + + if (ch == '#') { + xmitcsum = hex(getDebugChar() & 0x7f) << 4; + xmitcsum += hex(getDebugChar() & 0x7f); + if ((remote_debug) && (checksum != xmitcsum)) { + printk + ("bad checksum. My count = 0x%x, sent=0x%x. buf=%s\n", + checksum, xmitcsum, buffer); + } + + if (checksum != xmitcsum) + putDebugChar('-'); /* failed checksum */ + else { + putDebugChar('+'); /* successful transfer */ + /* if a sequence char is present, reply the sequence ID */ + if (buffer[2] == ':') { + putDebugChar(buffer[0]); + putDebugChar(buffer[1]); + /* remove sequence chars from buffer */ + count = strlen(buffer); + for (i = 3; i <= count; i++) + buffer[i - 3] = buffer[i]; + } + } + } + } while (checksum != xmitcsum); + + if (remote_debug) + printk("R:%s\n", buffer); + flushDebugChar(); +} + +/* send the packet in buffer. */ + +void +putpacket(char *buffer) +{ + unsigned char checksum; + int count; + char ch; + + /* $#. */ + + if (!kgdboe) { + do { + if (remote_debug) + printk("T:%s\n", buffer); + putDebugChar('$'); + checksum = 0; + count = 0; + + while ((ch = buffer[count])) { + putDebugChar(ch); + checksum += ch; + count += 1; + } + + putDebugChar('#'); + putDebugChar(hexchars[checksum >> 4]); + putDebugChar(hexchars[checksum % 16]); + flushDebugChar(); + + } while ((getDebugChar() & 0x7f) != '+'); + } else { + /* + * For udp, we can not transfer too much bytes once. + * We only transfer MAX_SEND_COUNT size bytes each time + */ + +#define MAX_SEND_COUNT 30 + + int send_count = 0, i = 0; + char send_buf[MAX_SEND_COUNT]; + + do { + if (remote_debug) + printk("T:%s\n", buffer); + putDebugChar('$'); + checksum = 0; + count = 0; + send_count = 0; + while ((ch = buffer[count])) { + if (send_count >= MAX_SEND_COUNT) { + for(i = 0; i < MAX_SEND_COUNT; i++) { + putDebugChar(send_buf[i]); + } + flushDebugChar(); + send_count = 0; + } else { + send_buf[send_count] = ch; + checksum += ch; + count ++; + send_count++; + } + } + for(i = 0; i < send_count; i++) + putDebugChar(send_buf[i]); + putDebugChar('#'); + putDebugChar(hexchars[checksum >> 4]); + putDebugChar(hexchars[checksum % 16]); + flushDebugChar(); + } while ((getDebugChar() & 0x7f) != '+'); + } +} + +static char remcomInBuffer[BUFMAX]; +static char remcomOutBuffer[BUFMAX]; +static short error; + +void +debug_error(char *format, char *parm) +{ + if (remote_debug) + printk(format, parm); +} + +static void +print_regs(struct pt_regs *regs) +{ + printk("EAX=%08lx ", regs->eax); + printk("EBX=%08lx ", regs->ebx); + printk("ECX=%08lx ", regs->ecx); + printk("EDX=%08lx ", regs->edx); + printk("\n"); + printk("ESI=%08lx ", regs->esi); + printk("EDI=%08lx ", regs->edi); + printk("EBP=%08lx ", regs->ebp); + printk("ESP=%08lx ", (long) ®s->esp); + printk("\n"); + printk(" DS=%08x ", regs->xds); + printk(" ES=%08x ", regs->xes); + printk(" SS=%08x ", __KERNEL_DS); + printk(" FL=%08lx ", regs->eflags); + printk("\n"); + printk(" CS=%08x ", regs->xcs); + printk(" IP=%08lx ", regs->eip); +#if 0 + printk(" FS=%08x ", regs->fs); + printk(" GS=%08x ", regs->gs); +#endif + printk("\n"); + +} /* print_regs */ + +#define NEW_esp fn_call_lookaside[trap_cpu].esp + +static void +regs_to_gdb_regs(int *gdb_regs, struct pt_regs *regs) +{ + gdb_regs[_EAX] = regs->eax; + gdb_regs[_EBX] = regs->ebx; + gdb_regs[_ECX] = regs->ecx; + gdb_regs[_EDX] = regs->edx; + gdb_regs[_ESI] = regs->esi; + gdb_regs[_EDI] = regs->edi; + gdb_regs[_EBP] = regs->ebp; + gdb_regs[_DS] = regs->xds; + gdb_regs[_ES] = regs->xes; + gdb_regs[_PS] = regs->eflags; + gdb_regs[_CS] = regs->xcs; + gdb_regs[_PC] = regs->eip; + /* Note, as we are a debugging the kernel, we will always + * trap in kernel code, this means no priviledge change, + * and so the pt_regs structure is not completely valid. In a non + * privilege change trap, only EFLAGS, CS and EIP are put on the stack, + * SS and ESP are not stacked, this means that the last 2 elements of + * pt_regs is not valid (they would normally refer to the user stack) + * also, using regs+1 is no good because you end up will a value that is + * 2 longs (8) too high. This used to cause stepping over functions + * to fail, so my fix is to use the address of regs->esp, which + * should point at the end of the stack frame. Note I have ignored + * completely exceptions that cause an error code to be stacked, such + * as double fault. Stuart Hughes, Zentropix. + * original code: gdb_regs[_ESP] = (int) (regs + 1) ; + + * this is now done on entry and moved to OLD_esp (as well as NEW_esp). + */ + gdb_regs[_ESP] = NEW_esp; + gdb_regs[_SS] = __KERNEL_DS; + gdb_regs[_FS] = 0xFFFF; + gdb_regs[_GS] = 0xFFFF; +} /* regs_to_gdb_regs */ + +static void +gdb_regs_to_regs(int *gdb_regs, struct pt_regs *regs) +{ + regs->eax = gdb_regs[_EAX]; + regs->ebx = gdb_regs[_EBX]; + regs->ecx = gdb_regs[_ECX]; + regs->edx = gdb_regs[_EDX]; + regs->esi = gdb_regs[_ESI]; + regs->edi = gdb_regs[_EDI]; + regs->ebp = gdb_regs[_EBP]; + regs->xds = gdb_regs[_DS]; + regs->xes = gdb_regs[_ES]; + regs->eflags = gdb_regs[_PS]; + regs->xcs = gdb_regs[_CS]; + regs->eip = gdb_regs[_PC]; + NEW_esp = gdb_regs[_ESP]; /* keep the value */ +#if 0 /* can't change these */ + regs->esp = gdb_regs[_ESP]; + regs->xss = gdb_regs[_SS]; + regs->fs = gdb_regs[_FS]; + regs->gs = gdb_regs[_GS]; +#endif + +} /* gdb_regs_to_regs */ +extern void scheduling_functions_start_here(void); +extern void scheduling_functions_end_here(void); +#define first_sched ((unsigned long) scheduling_functions_start_here) +#define last_sched ((unsigned long) scheduling_functions_end_here) + +int thread_list = 0; + +void +get_gdb_regs(struct task_struct *p, struct pt_regs *regs, int *gdb_regs) +{ + unsigned long stack_page; + int count = 0; + IF_SMP(int i); + if (!p || p == current) { + regs_to_gdb_regs(gdb_regs, regs); + return; + } +#ifdef CONFIG_SMP + for (i = 0; i < MAX_NO_CPUS; i++) { + if (p == kgdb_info.cpus_waiting[i].task) { + regs_to_gdb_regs(gdb_regs, + kgdb_info.cpus_waiting[i].regs); + gdb_regs[_ESP] = + (int) &kgdb_info.cpus_waiting[i].regs->esp; + + return; + } + } +#endif + memset(gdb_regs, 0, NUMREGBYTES); + gdb_regs[_ESP] = p->thread.esp; + gdb_regs[_PC] = p->thread.eip; + gdb_regs[_EBP] = *(int *) gdb_regs[_ESP]; + gdb_regs[_EDI] = *(int *) (gdb_regs[_ESP] + 4); + gdb_regs[_ESI] = *(int *) (gdb_regs[_ESP] + 8); + +/* + * This code is to give a more informative notion of where a process + * is waiting. It is used only when the user asks for a thread info + * list. If he then switches to the thread, s/he will find the task + * is in schedule, but a back trace should show the same info we come + * up with. This code was shamelessly purloined from process.c. It was + * then enhanced to provide more registers than simply the program + * counter. + */ + + if (!thread_list) { + return; + } + + if (p->state == TASK_RUNNING) + return; + stack_page = (unsigned long) p->thread_info; + if (gdb_regs[_ESP] < stack_page || gdb_regs[_ESP] > 8188 + stack_page) + return; + /* include/asm-i386/system.h:switch_to() pushes ebp last. */ + do { + if (gdb_regs[_EBP] < stack_page || + gdb_regs[_EBP] > 8184 + stack_page) + return; + gdb_regs[_PC] = *(unsigned long *) (gdb_regs[_EBP] + 4); + gdb_regs[_ESP] = gdb_regs[_EBP] + 8; + gdb_regs[_EBP] = *(unsigned long *) gdb_regs[_EBP]; + if (gdb_regs[_PC] < first_sched || gdb_regs[_PC] >= last_sched) + return; + } while (count++ < 16); + return; +} + +/* Indicate to caller of mem2hex or hex2mem that there has been an + error. */ +static volatile int mem_err = 0; +static volatile int mem_err_expected = 0; +static volatile int mem_err_cnt = 0; +static int garbage_loc = -1; + +int +get_char(char *addr) +{ + return *addr; +} + +void +set_char(char *addr, int val, int may_fault) +{ + /* + * This code traps references to the area mapped to the kernel + * stack as given by the regs and, instead, stores to the + * fn_call_lookaside[cpu].array + */ + if (may_fault && + (unsigned int) addr < OLD_esp && + ((unsigned int) addr > (OLD_esp - (unsigned int) LOOKASIDE_SIZE))) { + addr = (char *) END_OF_LOOKASIDE - ((char *) OLD_esp - addr); + } + *addr = val; +} + +/* convert the memory pointed to by mem into hex, placing result in buf */ +/* return a pointer to the last char put in buf (null) */ +/* If MAY_FAULT is non-zero, then we should set mem_err in response to + a fault; if zero treat a fault like any other fault in the stub. */ +char * +mem2hex(char *mem, char *buf, int count, int may_fault) +{ + int i; + unsigned char ch; + + if (may_fault) { + mem_err_expected = 1; + mem_err = 0; + } + for (i = 0; i < count; i++) { + /* printk("%lx = ", mem) ; */ + + ch = get_char(mem++); + + /* printk("%02x\n", ch & 0xFF) ; */ + if (may_fault && mem_err) { + if (remote_debug) + printk("Mem fault fetching from addr %lx\n", + (long) (mem - 1)); + *buf = 0; /* truncate buffer */ + return (buf); + } + *buf++ = hexchars[ch >> 4]; + *buf++ = hexchars[ch % 16]; + } + *buf = 0; + if (may_fault) + mem_err_expected = 0; + return (buf); +} + +/* convert the hex array pointed to by buf into binary to be placed in mem */ +/* return a pointer to the character AFTER the last byte written */ +/* NOTE: We use the may fault flag to also indicate if the write is to + * the registers (0) or "other" memory (!=0) + */ +char * +hex2mem(char *buf, char *mem, int count, int may_fault) +{ + int i; + unsigned char ch; + + if (may_fault) { + mem_err_expected = 1; + mem_err = 0; + } + for (i = 0; i < count; i++) { + ch = hex(*buf++) << 4; + ch = ch + hex(*buf++); + set_char(mem++, ch, may_fault); + + if (may_fault && mem_err) { + if (remote_debug) + printk("Mem fault storing to addr %lx\n", + (long) (mem - 1)); + return (mem); + } + } + if (may_fault) + mem_err_expected = 0; + return (mem); +} + +/**********************************************/ +/* WHILE WE FIND NICE HEX CHARS, BUILD AN INT */ +/* RETURN NUMBER OF CHARS PROCESSED */ +/**********************************************/ +int +hexToInt(char **ptr, int *intValue) +{ + int numChars = 0; + int hexValue; + + *intValue = 0; + + while (**ptr) { + hexValue = hex(**ptr); + if (hexValue >= 0) { + *intValue = (*intValue << 4) | hexValue; + numChars++; + } else + break; + + (*ptr)++; + } + + return (numChars); +} + +#define stubhex(h) hex(h) +#ifdef old_thread_list + +static int +stub_unpack_int(char *buff, int fieldlength) +{ + int nibble; + int retval = 0; + + while (fieldlength) { + nibble = stubhex(*buff++); + retval |= nibble; + fieldlength--; + if (fieldlength) + retval = retval << 4; + } + return retval; +} +#endif +static char * +pack_hex_byte(char *pkt, int byte) +{ + *pkt++ = hexchars[(byte >> 4) & 0xf]; + *pkt++ = hexchars[(byte & 0xf)]; + return pkt; +} + +#define BUF_THREAD_ID_SIZE 16 + +static char * +pack_threadid(char *pkt, threadref * id) +{ + char *limit; + unsigned char *altid; + + altid = (unsigned char *) id; + limit = pkt + BUF_THREAD_ID_SIZE; + while (pkt < limit) + pkt = pack_hex_byte(pkt, *altid++); + return pkt; +} + +#ifdef old_thread_list +static char * +unpack_byte(char *buf, int *value) +{ + *value = stub_unpack_int(buf, 2); + return buf + 2; +} + +static char * +unpack_threadid(char *inbuf, threadref * id) +{ + char *altref; + char *limit = inbuf + BUF_THREAD_ID_SIZE; + int x, y; + + altref = (char *) id; + + while (inbuf < limit) { + x = stubhex(*inbuf++); + y = stubhex(*inbuf++); + *altref++ = (x << 4) | y; + } + return inbuf; +} +#endif +void +int_to_threadref(threadref * id, int value) +{ + unsigned char *scan; + + scan = (unsigned char *) id; + { + int i = 4; + while (i--) + *scan++ = 0; + } + *scan++ = (value >> 24) & 0xff; + *scan++ = (value >> 16) & 0xff; + *scan++ = (value >> 8) & 0xff; + *scan++ = (value & 0xff); +} +int +int_to_hex_v(unsigned char * id, int value) +{ + unsigned char *start = id; + int shift; + int ch; + + for (shift = 28; shift >= 0; shift -= 4) { + if ((ch = (value >> shift) & 0xf) || (id != start)) { + *id = hexchars[ch]; + id++; + } + } + if (id == start) + *id++ = '0'; + return id - start; +} +#ifdef old_thread_list + +static int +threadref_to_int(threadref * ref) +{ + int i, value = 0; + unsigned char *scan; + + scan = (char *) ref; + scan += 4; + i = 4; + while (i-- > 0) + value = (value << 8) | ((*scan++) & 0xff); + return value; +} +#endif +static int +cmp_str(char *s1, char *s2, int count) +{ + while (count--) { + if (*s1++ != *s2++) + return 0; + } + return 1; +} + +#if 1 /* this is a hold over from 2.4 where O(1) was "sometimes" */ +extern struct task_struct *kgdb_get_idle(int cpu); +#define idle_task(cpu) kgdb_get_idle(cpu) +#else +#define idle_task(cpu) init_tasks[cpu] +#endif + +extern int kgdb_pid_init_done; + +struct task_struct * +getthread(int pid) +{ + struct task_struct *thread; + if (pid >= PID_MAX && pid <= (PID_MAX + MAX_NO_CPUS)) { + + return idle_task(pid - PID_MAX); + } else { + /* + * find_task_by_pid is relatively safe all the time + * Other pid functions require lock downs which imply + * that we may be interrupting them (as we get here + * in the middle of most any lock down). + * Still we don't want to call until the table exists! + */ + if (kgdb_pid_init_done){ + thread = find_task_by_pid(pid); + if (thread) { + return thread; + } + } + } + return NULL; +} +/* *INDENT-OFF* */ +struct hw_breakpoint { + unsigned enabled; + unsigned type; + unsigned len; + unsigned addr; +} breakinfo[4] = { {enabled:0}, + {enabled:0}, + {enabled:0}, + {enabled:0}}; +/* *INDENT-ON* */ +unsigned hw_breakpoint_status; +void +correct_hw_break(void) +{ + int breakno; + int correctit; + int breakbit; + unsigned dr7; + + asm volatile ("movl %%db7, %0\n":"=r" (dr7) + :); + /* *INDENT-OFF* */ + do { + unsigned addr0, addr1, addr2, addr3; + asm volatile ("movl %%db0, %0\n" + "movl %%db1, %1\n" + "movl %%db2, %2\n" + "movl %%db3, %3\n" + :"=r" (addr0), "=r"(addr1), + "=r"(addr2), "=r"(addr3) + :); + } while (0); + /* *INDENT-ON* */ + correctit = 0; + for (breakno = 0; breakno < 3; breakno++) { + breakbit = 2 << (breakno << 1); + if (!(dr7 & breakbit) && breakinfo[breakno].enabled) { + correctit = 1; + dr7 |= breakbit; + dr7 &= ~(0xf0000 << (breakno << 2)); + dr7 |= (((breakinfo[breakno].len << 2) | + breakinfo[breakno].type) << 16) << + (breakno << 2); + switch (breakno) { + case 0: + asm volatile ("movl %0, %%dr0\n"::"r" + (breakinfo[breakno].addr)); + break; + + case 1: + asm volatile ("movl %0, %%dr1\n"::"r" + (breakinfo[breakno].addr)); + break; + + case 2: + asm volatile ("movl %0, %%dr2\n"::"r" + (breakinfo[breakno].addr)); + break; + + case 3: + asm volatile ("movl %0, %%dr3\n"::"r" + (breakinfo[breakno].addr)); + break; + } + } else if ((dr7 & breakbit) && !breakinfo[breakno].enabled) { + correctit = 1; + dr7 &= ~breakbit; + dr7 &= ~(0xf0000 << (breakno << 2)); + } + } + if (correctit) { + asm volatile ("movl %0, %%db7\n"::"r" (dr7)); + } +} + +int +remove_hw_break(unsigned breakno) +{ + if (!breakinfo[breakno].enabled) { + return -1; + } + breakinfo[breakno].enabled = 0; + return 0; +} + +int +set_hw_break(unsigned breakno, unsigned type, unsigned len, unsigned addr) +{ + if (breakinfo[breakno].enabled) { + return -1; + } + breakinfo[breakno].enabled = 1; + breakinfo[breakno].type = type; + breakinfo[breakno].len = len; + breakinfo[breakno].addr = addr; + return 0; +} + +#ifdef CONFIG_SMP +static int in_kgdb_console = 0; + +int +in_kgdb(struct pt_regs *regs) +{ + unsigned flags; + int cpu = smp_processor_id(); + in_kgdb_called = 1; + if (!spin_is_locked(&kgdb_spinlock)) { + if (in_kgdb_here_log[cpu] || /* we are holding this cpu */ + in_kgdb_console) { /* or we are doing slow i/o */ + return 1; + } + return 0; + } + + /* As I see it the only reason not to let all cpus spin on + * the same spin_lock is to allow selected ones to proceed. + * This would be a good thing, so we leave it this way. + * Maybe someday.... Done ! + + * in_kgdb() is called from an NMI so we don't pretend + * to have any resources, like printk() for example. + */ + + kgdb_local_irq_save(flags); /* only local here, to avoid hanging */ + /* + * log arival of this cpu + * The NMI keeps on ticking. Protect against recurring more + * than once, and ignor the cpu that has the kgdb lock + */ + in_kgdb_entry_log[cpu]++; + in_kgdb_here_log[cpu] = regs; + if (cpu == spinlock_cpu || waiting_cpus[cpu].task) + goto exit_in_kgdb; + + /* + * For protection of the initilization of the spin locks by kgdb + * it locks the kgdb spinlock before it gets the wait locks set + * up. We wait here for the wait lock to be taken. If the + * kgdb lock goes away first?? Well, it could be a slow exit + * sequence where the wait lock is removed prior to the kgdb lock + * so if kgdb gets unlocked, we just exit. + */ + + while (spin_is_locked(&kgdb_spinlock) && + !spin_is_locked(waitlocks + cpu)) ; + if (!spin_is_locked(&kgdb_spinlock)) + goto exit_in_kgdb; + + waiting_cpus[cpu].task = current; + waiting_cpus[cpu].pid = (current->pid) ? : (PID_MAX + cpu); + waiting_cpus[cpu].regs = regs; + + spin_unlock_wait(waitlocks + cpu); + + /* + * log departure of this cpu + */ + waiting_cpus[cpu].task = 0; + waiting_cpus[cpu].pid = 0; + waiting_cpus[cpu].regs = 0; + correct_hw_break(); + exit_in_kgdb: + in_kgdb_here_log[cpu] = 0; + kgdb_local_irq_restore(flags); + return 1; + /* + spin_unlock(continuelocks + smp_processor_id()); + */ +} + +void +smp__in_kgdb(struct pt_regs regs) +{ + ack_APIC_irq(); + in_kgdb(®s); +} +#else +int +in_kgdb(struct pt_regs *regs) +{ + return (kgdb_spinlock); +} +#endif + +void +printexceptioninfo(int exceptionNo, int errorcode, char *buffer) +{ + unsigned dr6; + int i; + switch (exceptionNo) { + case 1: /* debug exception */ + break; + case 3: /* breakpoint */ + sprintf(buffer, "Software breakpoint"); + return; + default: + sprintf(buffer, "Details not available"); + return; + } + asm volatile ("movl %%db6, %0\n":"=r" (dr6) + :); + if (dr6 & 0x4000) { + sprintf(buffer, "Single step"); + return; + } + for (i = 0; i < 4; ++i) { + if (dr6 & (1 << i)) { + sprintf(buffer, "Hardware breakpoint %d", i); + return; + } + } + sprintf(buffer, "Unknown trap"); + return; +} + +/* + * This function does all command procesing for interfacing to gdb. + * + * NOTE: The INT nn instruction leaves the state of the interrupt + * enable flag UNCHANGED. That means that when this routine + * is entered via a breakpoint (INT 3) instruction from code + * that has interrupts enabled, then interrupts will STILL BE + * enabled when this routine is entered. The first thing that + * we do here is disable interrupts so as to prevent recursive + * entries and bothersome serial interrupts while we are + * trying to run the serial port in polled mode. + * + * For kernel version 2.1.xx the kgdb_cli() actually gets a spin lock so + * it is always necessary to do a restore_flags before returning + * so as to let go of that lock. + */ +int +kgdb_handle_exception(int exceptionVector, + int signo, int err_code, struct pt_regs *linux_regs) +{ + struct task_struct *usethread = NULL; + struct task_struct *thread_list_start = 0, *thread = NULL; + int addr, length; + int breakno, breaktype; + char *ptr; + int newPC; + threadref thref; + int threadid; + int thread_min = PID_MAX + MAX_NO_CPUS; +#ifdef old_thread_list + int maxthreads; +#endif + int nothreads; + unsigned long flags; + int gdb_regs[NUMREGBYTES / 4]; + int dr6; + IF_SMP(int entry_state = 0); /* 0, ok, 1, no nmi, 2 sync failed */ +#define NO_NMI 1 +#define NO_SYNC 2 +#define regs (*linux_regs) +#define NUMREGS NUMREGBYTES/4 + /* + * If the entry is not from the kernel then return to the Linux + * trap handler and let it process the interrupt normally. + */ + if ((linux_regs->eflags & VM_MASK) || (3 & linux_regs->xcs)) { + printk("ignoring non-kernel exception\n"); + print_regs(®s); + return (0); + } + /* + * If we're using eth mode, set the 'mode' in the netdevice. + */ + + if (kgdboe) + netpoll_set_trap(1); + + kgdb_local_irq_save(flags); + + /* Get kgdb spinlock */ + + KGDB_SPIN_LOCK(&kgdb_spinlock); + rdtscll(kgdb_info.entry_tsc); + /* + * We depend on this spinlock and the NMI watch dog to control the + * other cpus. They will arrive at "in_kgdb()" as a result of the + * NMI and will wait there for the following spin locks to be + * released. + */ +#ifdef CONFIG_SMP + +#if 0 + if (cpu_callout_map & ~MAX_CPU_MASK) { + printk("kgdb : too many cpus, possibly not mapped" + " in contiguous space, change MAX_NO_CPUS" + " in kgdb_stub and make new kernel.\n" + " cpu_callout_map is %lx\n", cpu_callout_map); + goto exit_just_unlock; + } +#endif + if (spinlock_count == 1) { + int time = 0, end_time, dum = 0; + int i; + int cpu_logged_in[MAX_NO_CPUS] = {[0 ... MAX_NO_CPUS - 1] = (0) + }; + if (remote_debug) { + printk("kgdb : cpu %d entry, syncing others\n", + smp_processor_id()); + } + for (i = 0; i < MAX_NO_CPUS; i++) { + /* + * Use trylock as we may already hold the lock if + * we are holding the cpu. Net result is all + * locked. + */ + spin_trylock(&waitlocks[i]); + } + for (i = 0; i < MAX_NO_CPUS; i++) + cpu_logged_in[i] = 0; + /* + * Wait for their arrival. We know the watch dog is active if + * in_kgdb() has ever been called, as it is always called on a + * watchdog tick. + */ + rdtsc(dum, time); + end_time = time + 2; /* Note: we use the High order bits! */ + i = 1; + if (num_online_cpus() > 1) { + int me_in_kgdb = in_kgdb_entry_log[smp_processor_id()]; + smp_send_nmi_allbutself(); + + while (i < num_online_cpus() && time != end_time) { + int j; + for (j = 0; j < MAX_NO_CPUS; j++) { + if (waiting_cpus[j].task && + waiting_cpus[j].task != NOCPU && + !cpu_logged_in[j]) { + i++; + cpu_logged_in[j] = 1; + if (remote_debug) { + printk + ("kgdb : cpu %d arrived at kgdb\n", + j); + } + break; + } else if (!waiting_cpus[j].task && + !cpu_online(j)) { + waiting_cpus[j].task = NOCPU; + cpu_logged_in[j] = 1; + waiting_cpus[j].hold = 1; + break; + } + if (!waiting_cpus[j].task && + in_kgdb_here_log[j]) { + + int wait = 100000; + while (wait--) ; + if (!waiting_cpus[j].task && + in_kgdb_here_log[j]) { + printk + ("kgdb : cpu %d stall" + " in in_kgdb\n", + j); + i++; + cpu_logged_in[j] = 1; + waiting_cpus[j].task = + (struct task_struct + *) 1; + } + } + } + + if (in_kgdb_entry_log[smp_processor_id()] > + (me_in_kgdb + 10)) { + break; + } + + rdtsc(dum, time); + } + if (i < num_online_cpus()) { + printk + ("kgdb : time out, proceeding without sync\n"); +#if 0 + printk("kgdb : Waiting_cpus: 0 = %d, 1 = %d\n", + waiting_cpus[0].task != 0, + waiting_cpus[1].task != 0); + printk("kgdb : Cpu_logged in: 0 = %d, 1 = %d\n", + cpu_logged_in[0], cpu_logged_in[1]); + printk + ("kgdb : in_kgdb_here_log in: 0 = %d, 1 = %d\n", + in_kgdb_here_log[0] != 0, + in_kgdb_here_log[1] != 0); +#endif + entry_state = NO_SYNC; + } else { +#if 0 + int ent = + in_kgdb_entry_log[smp_processor_id()] - + me_in_kgdb; + printk("kgdb : sync after %d entries\n", ent); +#endif + } + } else { + if (remote_debug) { + printk + ("kgdb : %d cpus, but watchdog not active\n" + "proceeding without locking down other cpus\n", + num_online_cpus()); + entry_state = NO_NMI; + } + } + } +#endif + + if (remote_debug) { + unsigned long *lp = (unsigned long *) &linux_regs; + + printk("handle_exception(exceptionVector=%d, " + "signo=%d, err_code=%d, linux_regs=%p)\n", + exceptionVector, signo, err_code, linux_regs); + if (debug_regs) { + print_regs(®s); + printk("Stk: %8lx %8lx %8lx %8lx" + " %8lx %8lx %8lx %8lx\n", + lp[0], lp[1], lp[2], lp[3], + lp[4], lp[5], lp[6], lp[7]); + printk(" %8lx %8lx %8lx %8lx" + " %8lx %8lx %8lx %8lx\n", + lp[8], lp[9], lp[10], lp[11], + lp[12], lp[13], lp[14], lp[15]); + printk(" %8lx %8lx %8lx %8lx " + "%8lx %8lx %8lx %8lx\n", + lp[16], lp[17], lp[18], lp[19], + lp[20], lp[21], lp[22], lp[23]); + printk(" %8lx %8lx %8lx %8lx " + "%8lx %8lx %8lx %8lx\n", + lp[24], lp[25], lp[26], lp[27], + lp[28], lp[29], lp[30], lp[31]); + } + } + + /* Disable hardware debugging while we are in kgdb */ + /* Get the debug register status register */ +/* *INDENT-OFF* */ + __asm__("movl %0,%%db7" + : /* no output */ + :"r"(0)); + + asm volatile ("movl %%db6, %0\n" + :"=r" (hw_breakpoint_status) + :); + +/* *INDENT-ON* */ + switch (exceptionVector) { + case 0: /* divide error */ + case 1: /* debug exception */ + case 2: /* NMI */ + case 3: /* breakpoint */ + case 4: /* overflow */ + case 5: /* bounds check */ + case 6: /* invalid opcode */ + case 7: /* device not available */ + case 8: /* double fault (errcode) */ + case 10: /* invalid TSS (errcode) */ + case 12: /* stack fault (errcode) */ + case 16: /* floating point error */ + case 17: /* alignment check (errcode) */ + default: /* any undocumented */ + break; + case 11: /* segment not present (errcode) */ + case 13: /* general protection (errcode) */ + case 14: /* page fault (special errcode) */ + case 19: /* cache flush denied */ + if (mem_err_expected) { + /* + * This fault occured because of the + * get_char or set_char routines. These + * two routines use either eax of edx to + * indirectly reference the location in + * memory that they are working with. + * For a page fault, when we return the + * instruction will be retried, so we + * have to make sure that these + * registers point to valid memory. + */ + mem_err = 1; /* set mem error flag */ + mem_err_expected = 0; + mem_err_cnt++; /* helps in debugging */ + /* make valid address */ + regs.eax = (long) &garbage_loc; + /* make valid address */ + regs.edx = (long) &garbage_loc; + if (remote_debug) + printk("Return after memory error: " + "mem_err_cnt=%d\n", mem_err_cnt); + if (debug_regs) + print_regs(®s); + goto exit_kgdb; + } + break; + } + if (remote_debug) + printk("kgdb : entered kgdb on cpu %d\n", smp_processor_id()); + + gdb_i386vector = exceptionVector; + gdb_i386errcode = err_code; + kgdb_info.called_from = __builtin_return_address(0); +#ifdef CONFIG_SMP + /* + * OK, we can now communicate, lets tell gdb about the sync. + * but only if we had a problem. + */ + switch (entry_state) { + case NO_NMI: + to_gdb("NMI not active, other cpus not stopped\n"); + break; + case NO_SYNC: + to_gdb("Some cpus not stopped, see 'kgdb_info' for details\n"); + default:; + } + +#endif +/* + * Set up the gdb function call area. + */ + trap_cpu = smp_processor_id(); + OLD_esp = NEW_esp = (int) (&linux_regs->esp); + + IF_SMP(once_again:) + /* reply to host that an exception has occurred */ + remcomOutBuffer[0] = 'S'; + remcomOutBuffer[1] = hexchars[signo >> 4]; + remcomOutBuffer[2] = hexchars[signo % 16]; + remcomOutBuffer[3] = 0; + + putpacket(remcomOutBuffer); + + while (1 == 1) { + error = 0; + remcomOutBuffer[0] = 0; + getpacket(remcomInBuffer); + switch (remcomInBuffer[0]) { + case '?': + remcomOutBuffer[0] = 'S'; + remcomOutBuffer[1] = hexchars[signo >> 4]; + remcomOutBuffer[2] = hexchars[signo % 16]; + remcomOutBuffer[3] = 0; + break; + case 'd': + remote_debug = !(remote_debug); /* toggle debug flag */ + printk("Remote debug %s\n", + remote_debug ? "on" : "off"); + break; + case 'g': /* return the value of the CPU registers */ + get_gdb_regs(usethread, ®s, gdb_regs); + mem2hex((char *) gdb_regs, + remcomOutBuffer, NUMREGBYTES, 0); + break; + case 'G': /* set the value of the CPU registers - return OK */ + hex2mem(&remcomInBuffer[1], + (char *) gdb_regs, NUMREGBYTES, 0); + if (!usethread || usethread == current) { + gdb_regs_to_regs(gdb_regs, ®s); + strcpy(remcomOutBuffer, "OK"); + } else { + strcpy(remcomOutBuffer, "E00"); + } + break; + + case 'P':{ /* set the value of a single CPU register - + return OK */ + /* + * For some reason, gdb wants to talk about psudo + * registers (greater than 15). These may have + * meaning for ptrace, but for us it is safe to + * ignor them. We do this by dumping them into + * _GS which we also ignor, but do have memory for. + */ + int regno; + + ptr = &remcomInBuffer[1]; + regs_to_gdb_regs(gdb_regs, ®s); + if ((!usethread || usethread == current) && + hexToInt(&ptr, ®no) && + *ptr++ == '=' && (regno >= 0)) { + regno = + (regno >= NUMREGS ? _GS : regno); + hex2mem(ptr, (char *) &gdb_regs[regno], + 4, 0); + gdb_regs_to_regs(gdb_regs, ®s); + strcpy(remcomOutBuffer, "OK"); + break; + } + strcpy(remcomOutBuffer, "E01"); + break; + } + + /* mAA..AA,LLLL Read LLLL bytes at address AA..AA */ + case 'm': + /* TRY TO READ %x,%x. IF SUCCEED, SET PTR = 0 */ + ptr = &remcomInBuffer[1]; + if (hexToInt(&ptr, &addr) && + (*(ptr++) == ',') && (hexToInt(&ptr, &length))) { + ptr = 0; + /* + * hex doubles the byte count + */ + if (length > (BUFMAX / 2)) + length = BUFMAX / 2; + mem2hex((char *) addr, + remcomOutBuffer, length, 1); + if (mem_err) { + strcpy(remcomOutBuffer, "E03"); + debug_error("memory fault\n", NULL); + } + } + + if (ptr) { + strcpy(remcomOutBuffer, "E01"); + debug_error + ("malformed read memory command: %s\n", + remcomInBuffer); + } + break; + + /* MAA..AA,LLLL: + Write LLLL bytes at address AA.AA return OK */ + case 'M': + /* TRY TO READ '%x,%x:'. IF SUCCEED, SET PTR = 0 */ + ptr = &remcomInBuffer[1]; + if (hexToInt(&ptr, &addr) && + (*(ptr++) == ',') && + (hexToInt(&ptr, &length)) && (*(ptr++) == ':')) { + hex2mem(ptr, (char *) addr, length, 1); + + if (mem_err) { + strcpy(remcomOutBuffer, "E03"); + debug_error("memory fault\n", NULL); + } else { + strcpy(remcomOutBuffer, "OK"); + } + + ptr = 0; + } + if (ptr) { + strcpy(remcomOutBuffer, "E02"); + debug_error + ("malformed write memory command: %s\n", + remcomInBuffer); + } + break; + case 'S': + remcomInBuffer[0] = 's'; + case 'C': + /* Csig;AA..AA where ;AA..AA is optional + * continue with signal + * Since signals are meaning less to us, delete that + * part and then fall into the 'c' code. + */ + ptr = &remcomInBuffer[1]; + length = 2; + while (*ptr && *ptr != ';') { + length++; + ptr++; + } + if (*ptr) { + do { + ptr++; + *(ptr - length++) = *ptr; + } while (*ptr); + } else { + remcomInBuffer[1] = 0; + } + + /* cAA..AA Continue at address AA..AA(optional) */ + /* sAA..AA Step one instruction from AA..AA(optional) */ + /* D detach, reply OK and then continue */ + case 'c': + case 's': + case 'D': + + /* try to read optional parameter, + pc unchanged if no parm */ + ptr = &remcomInBuffer[1]; + if (hexToInt(&ptr, &addr)) { + if (remote_debug) + printk("Changing EIP to 0x%x\n", addr); + + regs.eip = addr; + } + + newPC = regs.eip; + + /* clear the trace bit */ + regs.eflags &= 0xfffffeff; + + /* set the trace bit if we're stepping */ + if (remcomInBuffer[0] == 's') + regs.eflags |= 0x100; + + /* detach is a friendly version of continue. Note that + debugging is still enabled (e.g hit control C) + */ + if (remcomInBuffer[0] == 'D') { + strcpy(remcomOutBuffer, "OK"); + putpacket(remcomOutBuffer); + } + + if (remote_debug) { + printk("Resuming execution\n"); + print_regs(®s); + } + asm volatile ("movl %%db6, %0\n":"=r" (dr6) + :); + if (!(dr6 & 0x4000)) { + for (breakno = 0; breakno < 4; ++breakno) { + if (dr6 & (1 << breakno) && + (breakinfo[breakno].type == 0)) { + /* Set restore flag */ + regs.eflags |= 0x10000; + break; + } + } + } + + if (kgdboe) + netpoll_set_trap(0); + + correct_hw_break(); + asm volatile ("movl %0, %%db6\n"::"r" (0)); + goto exit_kgdb; + + /* kill the program */ + case 'k': /* do nothing */ + break; + + /* query */ + case 'q': + nothreads = 0; + switch (remcomInBuffer[1]) { + case 'f': + threadid = 1; + thread_list = 2; + thread_list_start = (usethread ? : current); + case 's': + if (!cmp_str(&remcomInBuffer[2], + "ThreadInfo", 10)) + break; + + remcomOutBuffer[nothreads++] = 'm'; + for (; threadid < PID_MAX + MAX_NO_CPUS; + threadid++) { + thread = getthread(threadid); + if (thread) { + nothreads += int_to_hex_v( + &remcomOutBuffer[ + nothreads], + threadid); + if (thread_min > threadid) + thread_min = threadid; + remcomOutBuffer[ + nothreads] = ','; + nothreads++; + if (nothreads > BUFMAX - 10) + break; + } + } + if (remcomOutBuffer[nothreads - 1] == 'm') { + remcomOutBuffer[nothreads - 1] = 'l'; + } else { + nothreads--; + } + remcomOutBuffer[nothreads] = 0; + break; + +#ifdef old_thread_list /* Old thread info request */ + case 'L': + /* List threads */ + thread_list = 2; + thread_list_start = (usethread ? : current); + unpack_byte(remcomInBuffer + 3, &maxthreads); + unpack_threadid(remcomInBuffer + 5, &thref); + do { + int buf_thread_limit = + (BUFMAX - 22) / BUF_THREAD_ID_SIZE; + if (maxthreads > buf_thread_limit) { + maxthreads = buf_thread_limit; + } + } while (0); + remcomOutBuffer[0] = 'q'; + remcomOutBuffer[1] = 'M'; + remcomOutBuffer[4] = '0'; + pack_threadid(remcomOutBuffer + 5, &thref); + + threadid = threadref_to_int(&thref); + for (nothreads = 0; + nothreads < maxthreads && + threadid < PID_MAX + MAX_NO_CPUS; + threadid++) { + thread = getthread(threadid); + if (thread) { + int_to_threadref(&thref, + threadid); + pack_threadid(remcomOutBuffer + + 21 + + nothreads * 16, + &thref); + nothreads++; + if (thread_min > threadid) + thread_min = threadid; + } + } + + if (threadid == PID_MAX + MAX_NO_CPUS) { + remcomOutBuffer[4] = '1'; + } + pack_hex_byte(remcomOutBuffer + 2, nothreads); + remcomOutBuffer[21 + nothreads * 16] = '\0'; + break; +#endif + case 'C': + /* Current thread id */ + remcomOutBuffer[0] = 'Q'; + remcomOutBuffer[1] = 'C'; + threadid = current->pid; + if (!threadid) { + /* + * idle thread + */ + for (threadid = PID_MAX; + threadid < PID_MAX + MAX_NO_CPUS; + threadid++) { + if (current == + idle_task(threadid - + PID_MAX)) + break; + } + } + int_to_threadref(&thref, threadid); + pack_threadid(remcomOutBuffer + 2, &thref); + remcomOutBuffer[18] = '\0'; + break; + + case 'E': + /* Print exception info */ + printexceptioninfo(exceptionVector, + err_code, remcomOutBuffer); + break; + case 'T':{ + char * nptr; + /* Thread extra info */ + if (!cmp_str(&remcomInBuffer[2], + "hreadExtraInfo,", 15)) { + break; + } + ptr = &remcomInBuffer[17]; + hexToInt(&ptr, &threadid); + thread = getthread(threadid); + nptr = &thread->comm[0]; + length = 0; + ptr = &remcomOutBuffer[0]; + do { + length++; + ptr = pack_hex_byte(ptr, *nptr++); + } while (*nptr && length < 16); + /* + * would like that 16 to be the size of + * task_struct.comm but don't know the + * syntax.. + */ + *ptr = 0; + } + } + break; + + /* task related */ + case 'H': + switch (remcomInBuffer[1]) { + case 'g': + ptr = &remcomInBuffer[2]; + hexToInt(&ptr, &threadid); + thread = getthread(threadid); + if (!thread) { + remcomOutBuffer[0] = 'E'; + remcomOutBuffer[1] = '\0'; + break; + } + /* + * Just in case I forget what this is all about, + * the "thread info" command to gdb causes it + * to ask for a thread list. It then switches + * to each thread and asks for the registers. + * For this (and only this) usage, we want to + * fudge the registers of tasks not on the run + * list (i.e. waiting) to show the routine that + * called schedule. Also, gdb, is a minimalist + * in that if the current thread is the last + * it will not re-read the info when done. + * This means that in this case we must show + * the real registers. So here is how we do it: + * Each entry we keep track of the min + * thread in the list (the last that gdb will) + * get info for. We also keep track of the + * starting thread. + * "thread_list" is cleared when switching back + * to the min thread if it is was current, or + * if it was not current, thread_list is set + * to 1. When the switch to current comes, + * if thread_list is 1, clear it, else do + * nothing. + */ + usethread = thread; + if ((thread_list == 1) && + (thread == thread_list_start)) { + thread_list = 0; + } + if (thread_list && (threadid == thread_min)) { + if (thread == thread_list_start) { + thread_list = 0; + } else { + thread_list = 1; + } + } + /* follow through */ + case 'c': + remcomOutBuffer[0] = 'O'; + remcomOutBuffer[1] = 'K'; + remcomOutBuffer[2] = '\0'; + break; + } + break; + + /* Query thread status */ + case 'T': + ptr = &remcomInBuffer[1]; + hexToInt(&ptr, &threadid); + thread = getthread(threadid); + if (thread) { + remcomOutBuffer[0] = 'O'; + remcomOutBuffer[1] = 'K'; + remcomOutBuffer[2] = '\0'; + if (thread_min > threadid) + thread_min = threadid; + } else { + remcomOutBuffer[0] = 'E'; + remcomOutBuffer[1] = '\0'; + } + break; + + case 'Y': /* set up a hardware breakpoint */ + ptr = &remcomInBuffer[1]; + hexToInt(&ptr, &breakno); + ptr++; + hexToInt(&ptr, &breaktype); + ptr++; + hexToInt(&ptr, &length); + ptr++; + hexToInt(&ptr, &addr); + if (set_hw_break(breakno & 0x3, + breaktype & 0x3, + length & 0x3, addr) == 0) { + strcpy(remcomOutBuffer, "OK"); + } else { + strcpy(remcomOutBuffer, "ERROR"); + } + break; + + /* Remove hardware breakpoint */ + case 'y': + ptr = &remcomInBuffer[1]; + hexToInt(&ptr, &breakno); + if (remove_hw_break(breakno & 0x3) == 0) { + strcpy(remcomOutBuffer, "OK"); + } else { + strcpy(remcomOutBuffer, "ERROR"); + } + break; + + case 'r': /* reboot */ + strcpy(remcomOutBuffer, "OK"); + putpacket(remcomOutBuffer); + /*to_gdb("Rebooting\n"); */ + /* triplefault no return from here */ + { + static long no_idt[2]; + __asm__ __volatile__("lidt %0"::"m"(no_idt[0])); + BREAKPOINT; + } + + } /* switch */ + + /* reply to the request */ + putpacket(remcomOutBuffer); + } /* while(1==1) */ + /* + * reached by goto only. + */ + exit_kgdb: + /* + * Here is where we set up to trap a gdb function call. NEW_esp + * will be changed if we are trying to do this. We handle both + * adding and subtracting, thus allowing gdb to put grung on + * the stack which it removes later. + */ + if (NEW_esp != OLD_esp) { + int *ptr = END_OF_LOOKASIDE; + if (NEW_esp < OLD_esp) + ptr -= (OLD_esp - NEW_esp) / sizeof (int); + *--ptr = linux_regs->eflags; + *--ptr = linux_regs->xcs; + *--ptr = linux_regs->eip; + *--ptr = linux_regs->ecx; + *--ptr = linux_regs->ebx; + *--ptr = linux_regs->eax; + linux_regs->ecx = NEW_esp - (sizeof (int) * 6); + linux_regs->ebx = (unsigned int) END_OF_LOOKASIDE; + if (NEW_esp < OLD_esp) { + linux_regs->eip = (unsigned int) fn_call_stub; + } else { + linux_regs->eip = (unsigned int) fn_rtn_stub; + linux_regs->eax = NEW_esp; + } + linux_regs->eflags &= ~(IF_BIT | TF_BIT); + } +#ifdef CONFIG_SMP + /* + * Release gdb wait locks + * Sanity check time. Must have at least one cpu to run. Also single + * step must not be done if the current cpu is on hold. + */ + if (spinlock_count == 1) { + int ss_hold = (regs.eflags & 0x100) && kgdb_info.hold_on_sstep; + int cpu_avail = 0; + int i; + + for (i = 0; i < MAX_NO_CPUS; i++) { + if (!cpu_online(i)) + break; + if (!hold_cpu(i)) { + cpu_avail = 1; + } + } + /* + * Early in the bring up there will be NO cpus on line... + */ + if (!cpu_avail && !cpus_empty(cpu_online_map)) { + to_gdb("No cpus unblocked, see 'kgdb_info.hold_cpu'\n"); + goto once_again; + } + if (hold_cpu(smp_processor_id()) && (regs.eflags & 0x100)) { + to_gdb + ("Current cpu must be unblocked to single step\n"); + goto once_again; + } + if (!(ss_hold)) { + int i; + for (i = 0; i < MAX_NO_CPUS; i++) { + if (!hold_cpu(i)) { + spin_unlock(&waitlocks[i]); + } + } + } else { + spin_unlock(&waitlocks[smp_processor_id()]); + } + /* Release kgdb spinlock */ + KGDB_SPIN_UNLOCK(&kgdb_spinlock); + /* + * If this cpu is on hold, this is where we + * do it. Note, the NMI will pull us out of here, + * but will return as the above lock is not held. + * We will stay here till another cpu releases the lock for us. + */ + spin_unlock_wait(waitlocks + smp_processor_id()); + kgdb_local_irq_restore(flags); + return (0); + } +#if 0 +exit_just_unlock: +#endif +#endif + /* Release kgdb spinlock */ + KGDB_SPIN_UNLOCK(&kgdb_spinlock); + kgdb_local_irq_restore(flags); + return (0); +} + +/* this function is used to set up exception handlers for tracing and + * breakpoints. + * This function is not needed as the above line does all that is needed. + * We leave it for backward compatitability... + */ +void +set_debug_traps(void) +{ + /* + * linux_debug_hook is defined in traps.c. We store a pointer + * to our own exception handler into it. + + * But really folks, every hear of labeled common, an old Fortran + * concept. Lots of folks can reference it and it is define if + * anyone does. Only one can initialize it at link time. We do + * this with the hook. See the statement above. No need for any + * executable code and it is ready as soon as the kernel is + * loaded. Very desirable in kernel debugging. + + linux_debug_hook = handle_exception ; + */ + + /* In case GDB is started before us, ack any packets (presumably + "$?#xx") sitting there. + putDebugChar ('+'); + + initialized = 1; + */ +} + +/* This function will generate a breakpoint exception. It is used at the + beginning of a program to sync up with a debugger and can be used + otherwise as a quick means to stop program execution and "break" into + the debugger. */ +/* But really, just use the BREAKPOINT macro. We will handle the int stuff + */ + +#ifdef later +/* + * possibly we should not go thru the traps.c code at all? Someday. + */ +void +do_kgdb_int3(struct pt_regs *regs, long error_code) +{ + kgdb_handle_exception(3, 5, error_code, regs); + return; +} +#endif +#undef regs +#ifdef CONFIG_TRAP_BAD_SYSCALL_EXITS +asmlinkage void +bad_sys_call_exit(int stuff) +{ + struct pt_regs *regs = (struct pt_regs *) &stuff; + printk("Sys call %d return with %x preempt_count\n", + (int) regs->orig_eax, preempt_count()); +} +#endif +#ifdef CONFIG_STACK_OVERFLOW_TEST +#include +asmlinkage void +stack_overflow(void) +{ +#ifdef BREAKPOINT + BREAKPOINT; +#else + printk("Kernel stack overflow, looping forever\n"); +#endif + while (1) { + } +} +#endif + +#if defined(CONFIG_SMP) || defined(CONFIG_KGDB_CONSOLE) +char gdbconbuf[BUFMAX]; + +static void +kgdb_gdb_message(const char *s, unsigned count) +{ + int i; + int wcount; + char *bufptr; + /* + * This takes care of NMI while spining out chars to gdb + */ + IF_SMP(in_kgdb_console = 1); + gdbconbuf[0] = 'O'; + bufptr = gdbconbuf + 1; + while (count > 0) { + if ((count << 1) > (BUFMAX - 2)) { + wcount = (BUFMAX - 2) >> 1; + } else { + wcount = count; + } + count -= wcount; + for (i = 0; i < wcount; i++) { + bufptr = pack_hex_byte(bufptr, s[i]); + } + *bufptr = '\0'; + s += wcount; + + putpacket(gdbconbuf); + + } + IF_SMP(in_kgdb_console = 0); +} +#endif +#ifdef CONFIG_SMP +static void +to_gdb(const char *s) +{ + int count = 0; + while (s[count] && (count++ < BUFMAX)) ; + kgdb_gdb_message(s, count); +} +#endif +#ifdef CONFIG_KGDB_CONSOLE +#include +#include +#include +#include +#include + +void +kgdb_console_write(struct console *co, const char *s, unsigned count) +{ + + if (gdb_i386vector == -1) { + /* + * We have not yet talked to gdb. What to do... + * lets break, on continue we can do the write. + * But first tell him whats up. Uh, well no can do, + * as this IS the console. Oh well... + * We do need to wait or the messages will be lost. + * Other option would be to tell the above code to + * ignore this breakpoint and do an auto return, + * but that might confuse gdb. Also this happens + * early enough in boot up that we don't have the traps + * set up yet, so... + */ + breakpoint(); + } + kgdb_gdb_message(s, count); +} + +/* + * ------------------------------------------------------------ + * Serial KGDB driver + * ------------------------------------------------------------ + */ + +static struct console kgdbcons = { + name:"kgdb", + write:kgdb_console_write, +#ifdef CONFIG_KGDB_USER_CONSOLE + device:kgdb_console_device, +#endif + flags:CON_PRINTBUFFER | CON_ENABLED, + index:-1, +}; + +/* + * The trick here is that this file gets linked before printk.o + * That means we get to peer at the console info in the command + * line before it does. If we are up, we register, otherwise, + * do nothing. By returning 0, we allow printk to look also. + */ +static int kgdb_console_enabled; + +int __init +kgdb_console_init(char *str) +{ + if ((strncmp(str, "kgdb", 4) == 0) || (strncmp(str, "gdb", 3) == 0)) { + register_console(&kgdbcons); + kgdb_console_enabled = 1; + } + return 0; /* let others look at the string */ +} + +__setup("console=", kgdb_console_init); + +#ifdef CONFIG_KGDB_USER_CONSOLE +static kdev_t kgdb_console_device(struct console *c); +/* This stuff sort of works, but it knocks out telnet devices + * we are leaving it here in case we (or you) find time to figure it out + * better.. + */ + +/* + * We need a real char device as well for when the console is opened for user + * space activities. + */ + +static int +kgdb_consdev_open(struct inode *inode, struct file *file) +{ + return 0; +} + +static ssize_t +kgdb_consdev_write(struct file *file, const char *buf, + size_t count, loff_t * ppos) +{ + int size, ret = 0; + static char kbuf[128]; + static DECLARE_MUTEX(sem); + + /* We are not reentrant... */ + if (down_interruptible(&sem)) + return -ERESTARTSYS; + + while (count > 0) { + /* need to copy the data from user space */ + size = count; + if (size > sizeof (kbuf)) + size = sizeof (kbuf); + if (copy_from_user(kbuf, buf, size)) { + ret = -EFAULT; + break;; + } + kgdb_console_write(&kgdbcons, kbuf, size); + count -= size; + ret += size; + buf += size; + } + + up(&sem); + + return ret; +} + +struct file_operations kgdb_consdev_fops = { + open:kgdb_consdev_open, + write:kgdb_consdev_write +}; +static kdev_t +kgdb_console_device(struct console *c) +{ + return MKDEV(TTYAUX_MAJOR, 1); +} + +/* + * This routine gets called from the serial stub in the i386/lib + * This is so it is done late in bring up (just before the console open). + */ +void +kgdb_console_finit(void) +{ + if (kgdb_console_enabled) { + char *cptr = cdevname(MKDEV(TTYAUX_MAJOR, 1)); + char *cp = cptr; + while (*cptr && *cptr != '(') + cptr++; + *cptr = 0; + unregister_chrdev(TTYAUX_MAJOR, cp); + register_chrdev(TTYAUX_MAJOR, "kgdb", &kgdb_consdev_fops); + } +} +#endif +#endif +#ifdef CONFIG_KGDB_TS +#include /* time stamp code */ +#include /* in_interrupt */ +#ifdef CONFIG_KGDB_TS_64 +#define DATA_POINTS 64 +#endif +#ifdef CONFIG_KGDB_TS_128 +#define DATA_POINTS 128 +#endif +#ifdef CONFIG_KGDB_TS_256 +#define DATA_POINTS 256 +#endif +#ifdef CONFIG_KGDB_TS_512 +#define DATA_POINTS 512 +#endif +#ifdef CONFIG_KGDB_TS_1024 +#define DATA_POINTS 1024 +#endif +#ifndef DATA_POINTS +#define DATA_POINTS 128 /* must be a power of two */ +#endif +#define INDEX_MASK (DATA_POINTS - 1) +#if (INDEX_MASK & DATA_POINTS) +#error "CONFIG_KGDB_TS_COUNT must be a power of 2" +#endif +struct kgdb_and_then_struct { +#ifdef CONFIG_SMP + int on_cpu; +#endif + struct task_struct *task; + long long at_time; + int from_ln; + char *in_src; + void *from; + int *with_shpf; + int data0; + int data1; +}; +struct kgdb_and_then_struct2 { +#ifdef CONFIG_SMP + int on_cpu; +#endif + struct task_struct *task; + long long at_time; + int from_ln; + char *in_src; + void *from; + int *with_shpf; + struct task_struct *t1; + struct task_struct *t2; +}; +struct kgdb_and_then_struct kgdb_data[DATA_POINTS]; + +struct kgdb_and_then_struct *kgdb_and_then = &kgdb_data[0]; +int kgdb_and_then_count; + +void +kgdb_tstamp(int line, char *source, int data0, int data1) +{ + static spinlock_t ts_spin = SPIN_LOCK_UNLOCKED; + int flags; + kgdb_local_irq_save(flags); + spin_lock(&ts_spin); + rdtscll(kgdb_and_then->at_time); +#ifdef CONFIG_SMP + kgdb_and_then->on_cpu = smp_processor_id(); +#endif + kgdb_and_then->task = current; + kgdb_and_then->from_ln = line; + kgdb_and_then->in_src = source; + kgdb_and_then->from = __builtin_return_address(0); + kgdb_and_then->with_shpf = (int *) (((flags & IF_BIT) >> 9) | + (preempt_count() << 8)); + kgdb_and_then->data0 = data0; + kgdb_and_then->data1 = data1; + kgdb_and_then = &kgdb_data[++kgdb_and_then_count & INDEX_MASK]; + spin_unlock(&ts_spin); + kgdb_local_irq_restore(flags); +#ifdef CONFIG_PREEMPT + +#endif + return; +} +#endif +typedef int gdb_debug_hook(int exceptionVector, + int signo, int err_code, struct pt_regs *linux_regs); +gdb_debug_hook *linux_debug_hook = &kgdb_handle_exception; /* histerical reasons... */ + +static int kgdb_need_breakpoint[NR_CPUS]; + +void kgdb_schedule_breakpoint(void) +{ + kgdb_need_breakpoint[smp_processor_id()] = 1; +} + +void kgdb_process_breakpoint(void) +{ + /* + * Handle a breakpoint queued from inside network driver code + * to avoid reentrancy issues + */ + if (kgdb_need_breakpoint[smp_processor_id()]) { + kgdb_need_breakpoint[smp_processor_id()] = 0; + BREAKPOINT; + } +} + diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/ldt.c current/arch/i386/kernel/ldt.c --- reference/arch/i386/kernel/ldt.c 2003-10-01 11:40:40.000000000 -0700 +++ current/arch/i386/kernel/ldt.c 2004-04-09 11:53:00.000000000 -0700 @@ -2,7 +2,7 @@ * linux/kernel/ldt.c * * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds - * Copyright (C) 1999 Ingo Molnar + * Copyright (C) 1999, 2003 Ingo Molnar */ #include @@ -18,6 +18,8 @@ #include #include #include +#include +#include #ifdef CONFIG_SMP /* avoids "defined but not used" warnig */ static void flush_ldt(void *null) @@ -29,34 +31,31 @@ static void flush_ldt(void *null) static int alloc_ldt(mm_context_t *pc, int mincount, int reload) { - void *oldldt; - void *newldt; - int oldsize; + int oldsize, newsize, i; if (mincount <= pc->size) return 0; + /* + * LDT got larger - reallocate if necessary. + */ oldsize = pc->size; mincount = (mincount+511)&(~511); - if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE) - newldt = vmalloc(mincount*LDT_ENTRY_SIZE); - else - newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL); - - if (!newldt) - return -ENOMEM; - - if (oldsize) - memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE); - oldldt = pc->ldt; - memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE); - pc->ldt = newldt; - wmb(); + newsize = mincount*LDT_ENTRY_SIZE; + for (i = 0; i < newsize; i += PAGE_SIZE) { + int nr = i/PAGE_SIZE; + BUG_ON(i >= 64*1024); + if (!pc->ldt_pages[nr]) { + pc->ldt_pages[nr] = alloc_page(GFP_HIGHUSER); + if (!pc->ldt_pages[nr]) + return -ENOMEM; + clear_highpage(pc->ldt_pages[nr]); + } + } pc->size = mincount; - wmb(); - if (reload) { #ifdef CONFIG_SMP cpumask_t mask; + preempt_disable(); load_LDT(pc); mask = cpumask_of_cpu(smp_processor_id()); @@ -67,21 +66,20 @@ static int alloc_ldt(mm_context_t *pc, i load_LDT(pc); #endif } - if (oldsize) { - if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE) - vfree(oldldt); - else - kfree(oldldt); - } return 0; } static inline int copy_ldt(mm_context_t *new, mm_context_t *old) { - int err = alloc_ldt(new, old->size, 0); - if (err < 0) + int i, err, size = old->size, nr_pages = (size*LDT_ENTRY_SIZE + PAGE_SIZE-1)/PAGE_SIZE; + + err = alloc_ldt(new, size, 0); + if (err < 0) { + new->size = 0; return err; - memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE); + } + for (i = 0; i < nr_pages; i++) + copy_user_highpage(new->ldt_pages[i], old->ldt_pages[i], 0); return 0; } @@ -96,6 +94,7 @@ int init_new_context(struct task_struct init_MUTEX(&mm->context.sem); mm->context.size = 0; + memset(mm->context.ldt_pages, 0, sizeof(struct page *) * MAX_LDT_PAGES); old_mm = current->mm; if (old_mm && old_mm->context.size > 0) { down(&old_mm->context.sem); @@ -107,23 +106,21 @@ int init_new_context(struct task_struct /* * No need to lock the MM as we are the last user + * Do not touch the ldt register, we are already + * in the next thread. */ void destroy_context(struct mm_struct *mm) { - if (mm->context.size) { - if (mm == current->active_mm) - clear_LDT(); - if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE) - vfree(mm->context.ldt); - else - kfree(mm->context.ldt); - mm->context.size = 0; - } + int i, nr_pages = (mm->context.size*LDT_ENTRY_SIZE + PAGE_SIZE-1) / PAGE_SIZE; + + for (i = 0; i < nr_pages; i++) + __free_page(mm->context.ldt_pages[i]); + mm->context.size = 0; } static int read_ldt(void __user * ptr, unsigned long bytecount) { - int err; + int err, i; unsigned long size; struct mm_struct * mm = current->mm; @@ -138,8 +135,25 @@ static int read_ldt(void __user * ptr, u size = bytecount; err = 0; - if (copy_to_user(ptr, mm->context.ldt, size)) - err = -EFAULT; + /* + * This is necessary just in case we got here straight from a + * context-switch where the ptes were set but no tlb flush + * was done yet. We rather avoid doing a TLB flush in the + * context-switch path and do it here instead. + */ + __flush_tlb_global(); + + for (i = 0; i < size; i += PAGE_SIZE) { + int nr = i / PAGE_SIZE, bytes; + char *kaddr = kmap(mm->context.ldt_pages[nr]); + + bytes = size - i; + if (bytes > PAGE_SIZE) + bytes = PAGE_SIZE; + if (copy_to_user(ptr + i, kaddr, size - i)) + err = -EFAULT; + kunmap(mm->context.ldt_pages[nr]); + } up(&mm->context.sem); if (err < 0) return err; @@ -158,7 +172,7 @@ static int read_default_ldt(void __user err = 0; address = &default_ldt[0]; - size = 5*sizeof(struct desc_struct); + size = 5*LDT_ENTRY_SIZE; if (size > bytecount) size = bytecount; @@ -200,7 +214,15 @@ static int write_ldt(void __user * ptr, goto out_unlock; } - lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *) mm->context.ldt); + /* + * No rescheduling allowed from this point to the install. + * + * We do a TLB flush for the same reason as in the read_ldt() path. + */ + preempt_disable(); + __flush_tlb_global(); + lp = (__u32 *) ((ldt_info.entry_number << 3) + + (char *) __kmap_atomic_vaddr(KM_LDT_PAGE0)); /* Allow LDTs to be cleared by the user. */ if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { @@ -221,6 +243,7 @@ install: *lp = entry_1; *(lp+1) = entry_2; error = 0; + preempt_enable(); out_unlock: up(&mm->context.sem); @@ -248,3 +271,26 @@ asmlinkage int sys_modify_ldt(int func, } return ret; } + +/* + * load one particular LDT into the current CPU + */ +void load_LDT_nolock(mm_context_t *pc, int cpu) +{ + struct page **pages = pc->ldt_pages; + int count = pc->size; + int nr_pages, i; + + if (likely(!count)) { + pages = &default_ldt_page; + count = 5; + } + nr_pages = (count*LDT_ENTRY_SIZE + PAGE_SIZE-1) / PAGE_SIZE; + + for (i = 0; i < nr_pages; i++) { + __kunmap_atomic_type(KM_LDT_PAGE0 - i); + __kmap_atomic(pages[i], KM_LDT_PAGE0 - i); + } + set_ldt_desc(cpu, (void *)__kmap_atomic_vaddr(KM_LDT_PAGE0), count); + load_LDT_desc(); +} diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/machine_kexec.c current/arch/i386/kernel/machine_kexec.c --- reference/arch/i386/kernel/machine_kexec.c 1969-12-31 16:00:00.000000000 -0800 +++ current/arch/i386/kernel/machine_kexec.c 2004-04-09 13:23:20.000000000 -0700 @@ -0,0 +1,122 @@ +/* + * machine_kexec.c - handle transition of Linux booting another kernel + * Copyright (C) 2002-2003 Eric Biederman + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +static void set_idt(void *newidt, __u16 limit) +{ + unsigned char curidt[6]; + + /* ia32 supports unaliged loads & stores */ + (*(__u16 *)(curidt)) = limit; + (*(__u32 *)(curidt +2)) = (unsigned long)(newidt); + + __asm__ __volatile__ ( + "lidt %0\n" + : "=m" (curidt) + ); +}; + + +static void set_gdt(void *newgdt, __u16 limit) +{ + unsigned char curgdt[6]; + + /* ia32 supports unaligned loads & stores */ + (*(__u16 *)(curgdt)) = limit; + (*(__u32 *)(curgdt +2)) = (unsigned long)(newgdt); + + __asm__ __volatile__ ( + "lgdt %0\n" + : "=m" (curgdt) + ); +}; + +static void load_segments(void) +{ +#define __STR(X) #X +#define STR(X) __STR(X) + + __asm__ __volatile__ ( + "\tljmp $"STR(__KERNEL_CS)",$1f\n" + "\t1:\n" + "\tmovl $"STR(__KERNEL_DS)",%eax\n" + "\tmovl %eax,%ds\n" + "\tmovl %eax,%es\n" + "\tmovl %eax,%fs\n" + "\tmovl %eax,%gs\n" + "\tmovl %eax,%ss\n" + ); +#undef STR +#undef __STR +} + +typedef void (*relocate_new_kernel_t)( + unsigned long indirection_page, unsigned long reboot_code_buffer, + unsigned long start_address, unsigned int has_pae); + +const extern unsigned char relocate_new_kernel[]; +extern void relocate_new_kernel_end(void); +const extern unsigned int relocate_new_kernel_size; +extern void use_mm(struct mm_struct *mm); + +/* + * Do not allocate memory (or fail in any way) in machine_kexec(). + * We are past the point of no return, committed to rebooting now. + */ +void machine_kexec(struct kimage *image) +{ + unsigned long indirection_page; + unsigned long reboot_code_buffer; + relocate_new_kernel_t rnk; + + /* switch to an mm where the reboot_code_buffer is identity mapped */ + use_mm(&init_mm); + stop_apics(); + + /* Interrupts aren't acceptable while we reboot */ + local_irq_disable(); + reboot_code_buffer = page_to_pfn(image->reboot_code_pages) << PAGE_SHIFT; + indirection_page = image->head & PAGE_MASK; + + /* copy it out */ + memcpy((void *)reboot_code_buffer, relocate_new_kernel, relocate_new_kernel_size); + + /* The segment registers are funny things, they are + * automatically loaded from a table, in memory wherever you + * set them to a specific selector, but this table is never + * accessed again you set the segment to a different selector. + * + * The more common model is are caches where the behide + * the scenes work is done, but is also dropped at arbitrary + * times. + * + * I take advantage of this here by force loading the + * segments, before I zap the gdt with an invalid value. + */ + load_segments(); + /* The gdt & idt are now invalid. + * If you want to load them you must set up your own idt & gdt. + */ + set_gdt(phys_to_virt(0),0); + set_idt(phys_to_virt(0),0); + + /* now call it */ + rnk = (relocate_new_kernel_t) reboot_code_buffer; + (*rnk)(indirection_page, reboot_code_buffer, image->start, cpu_has_pae); +} diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/mpparse.c current/arch/i386/kernel/mpparse.c --- reference/arch/i386/kernel/mpparse.c 2004-04-07 14:53:56.000000000 -0700 +++ current/arch/i386/kernel/mpparse.c 2004-04-09 11:53:00.000000000 -0700 @@ -675,7 +675,7 @@ void __init get_smp_config (void) * Read the physical hardware table. Anything here will * override the defaults. */ - if (!smp_read_mpc((void *)mpf->mpf_physptr)) { + if (!smp_read_mpc((void *)phys_to_virt(mpf->mpf_physptr))) { smp_found_config = 0; printk(KERN_ERR "BIOS bug, MP table errors detected!...\n"); printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n"); diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/nmi.c current/arch/i386/kernel/nmi.c --- reference/arch/i386/kernel/nmi.c 2004-03-11 14:33:36.000000000 -0800 +++ current/arch/i386/kernel/nmi.c 2004-04-08 15:10:20.000000000 -0700 @@ -31,7 +31,16 @@ #include #include +#ifdef CONFIG_KGDB +#include +#ifdef CONFIG_SMP +unsigned int nmi_watchdog = NMI_IO_APIC; +#else +unsigned int nmi_watchdog = NMI_LOCAL_APIC; +#endif +#else unsigned int nmi_watchdog = NMI_NONE; +#endif static unsigned int nmi_hz = HZ; unsigned int nmi_perfctr_msr; /* the MSR to reset in NMI handler */ extern void show_registers(struct pt_regs *regs); @@ -408,6 +417,9 @@ void touch_nmi_watchdog (void) for (i = 0; i < NR_CPUS; i++) alert_counter[i] = 0; } +#ifdef CONFIG_KGDB +int tune_watchdog = 5*HZ; +#endif void nmi_watchdog_tick (struct pt_regs * regs) { @@ -421,12 +433,24 @@ void nmi_watchdog_tick (struct pt_regs * sum = irq_stat[cpu].apic_timer_irqs; +#ifdef CONFIG_KGDB + if (! in_kgdb(regs) && last_irq_sums[cpu] == sum ) { + +#else if (last_irq_sums[cpu] == sum) { +#endif /* * Ayiee, looks like this CPU is stuck ... * wait a few IRQs (5 seconds) before doing the oops ... */ alert_counter[cpu]++; +#ifdef CONFIG_KGDB + if (alert_counter[cpu] == tune_watchdog) { + kgdb_handle_exception(2, SIGPWR, 0, regs); + last_irq_sums[cpu] = sum; + alert_counter[cpu] = 0; + } +#endif if (alert_counter[cpu] == 5*nmi_hz) { spin_lock(&nmi_print_lock); /* diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/numaq.c current/arch/i386/kernel/numaq.c --- reference/arch/i386/kernel/numaq.c 2003-10-01 11:47:33.000000000 -0700 +++ current/arch/i386/kernel/numaq.c 2004-04-09 11:53:02.000000000 -0700 @@ -42,6 +42,10 @@ extern long node_start_pfn[], node_end_p * function also increments numnodes with the number of nodes (quads) * present. */ +extern unsigned long max_pages_per_node; +extern int limit_mem_per_node; + +#define node_size_pages(n) (node_end_pfn[n] - node_start_pfn[n]) static void __init smp_dump_qct(void) { int node; @@ -60,6 +64,8 @@ static void __init smp_dump_qct(void) eq->hi_shrd_mem_start - eq->priv_mem_size); node_end_pfn[node] = MB_TO_PAGES( eq->hi_shrd_mem_start + eq->hi_shrd_mem_size); + if (node_size_pages(node) > max_pages_per_node) + node_end_pfn[node] = node_start_pfn[node] + max_pages_per_node; } } } diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/process.c current/arch/i386/kernel/process.c --- reference/arch/i386/kernel/process.c 2004-04-07 14:53:56.000000000 -0700 +++ current/arch/i386/kernel/process.c 2004-04-09 11:53:01.000000000 -0700 @@ -46,6 +46,7 @@ #include #include #include +#include #ifdef CONFIG_MATH_EMULATION #include #endif @@ -303,6 +304,9 @@ void flush_thread(void) struct task_struct *tsk = current; memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8); +#ifdef CONFIG_X86_HIGH_ENTRY + clear_thread_flag(TIF_DB7); +#endif memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); /* * Forget coprocessor state.. @@ -316,9 +320,8 @@ void release_thread(struct task_struct * if (dead_task->mm) { // temporary debugging check if (dead_task->mm->context.size) { - printk("WARNING: dead process %8s still has LDT? <%p/%d>\n", + printk("WARNING: dead process %8s still has LDT? <%d>\n", dead_task->comm, - dead_task->mm->context.ldt, dead_task->mm->context.size); BUG(); } @@ -342,7 +345,7 @@ int copy_thread(int nr, unsigned long cl { struct pt_regs * childregs; struct task_struct *tsk; - int err; + int err, i; childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1; struct_cpy(childregs, regs); @@ -353,7 +356,18 @@ int copy_thread(int nr, unsigned long cl p->thread.esp = (unsigned long) childregs; p->thread.esp0 = (unsigned long) (childregs+1); + /* + * get the two stack pages, for the virtual stack. + * + * IMPORTANT: this code relies on the fact that the task + * structure is an THREAD_SIZE aligned piece of physical memory. + */ + for (i = 0; i < ARRAY_SIZE(p->thread.stack_page); i++) + p->thread.stack_page[i] = + virt_to_page((unsigned long)p->thread_info + (i*PAGE_SIZE)); + p->thread.eip = (unsigned long) ret_from_fork; + p->thread_info->real_stack = p->thread_info; savesegment(fs,p->thread.fs); savesegment(gs,p->thread.gs); @@ -505,10 +519,42 @@ struct task_struct fastcall * __switch_t __unlazy_fpu(prev_p); +#ifdef CONFIG_X86_HIGH_ENTRY +{ + int i; + /* + * Set the ptes of the virtual stack. (NOTE: a one-page TLB flush is + * needed because otherwise NMIs could interrupt the + * user-return code with a virtual stack and stale TLBs.) + */ + for (i = 0; i < ARRAY_SIZE(next->stack_page); i++) { + __kunmap_atomic_type(KM_VSTACK_TOP-i); + __kmap_atomic(next->stack_page[i], KM_VSTACK_TOP-i); + } + /* + * NOTE: here we rely on the task being the stack as well + */ + next_p->thread_info->virtual_stack = + (void *)__kmap_atomic_vaddr(KM_VSTACK_TOP); +} +#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) + /* + * If next was preempted on entry from userspace to kernel, + * and now it's on a different cpu, we need to adjust %esp. + * This assumes that entry.S does not copy %esp while on the + * virtual stack (with interrupts enabled): which is so, + * except within __SWITCH_KERNELSPACE itself. + */ + if (unlikely(next->esp >= TASK_SIZE)) { + next->esp &= THREAD_SIZE - 1; + next->esp |= (unsigned long) next_p->thread_info->virtual_stack; + } +#endif +#endif /* * Reload esp0, LDT and the page table pointer: */ - load_esp0(tss, next); + load_virtual_esp0(tss, next_p); /* * Load the per-thread Thread-Local Storage descriptor. diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/reboot.c current/arch/i386/kernel/reboot.c --- reference/arch/i386/kernel/reboot.c 2004-01-15 10:41:00.000000000 -0800 +++ current/arch/i386/kernel/reboot.c 2004-04-09 13:23:20.000000000 -0700 @@ -22,8 +22,7 @@ static int reboot_mode; int reboot_thru_bios; #ifdef CONFIG_SMP -int reboot_smp = 0; -static int reboot_cpu = -1; +int reboot_cpu = -1; /* specifies the internal linux cpu id, not the apicid */ /* shamelessly grabbed from lib/vsprintf.c for readability */ #define is_digit(c) ((c) >= '0' && (c) <= '9') #endif @@ -45,7 +44,6 @@ static int __init reboot_setup(char *str break; #ifdef CONFIG_SMP case 's': /* "smp" reboot by executing reset on BSP or other CPU*/ - reboot_smp = 1; if (is_digit(*(str+1))) { reboot_cpu = (int) (*(str+1) - '0'); if (is_digit(*(str+2))) @@ -155,12 +153,11 @@ void machine_real_restart(unsigned char CMOS_WRITE(0x00, 0x8f); spin_unlock_irqrestore(&rtc_lock, flags); - /* Remap the kernel at virtual address zero, as well as offset zero - from the kernel segment. This assumes the kernel segment starts at - virtual address PAGE_OFFSET. */ - - memcpy (swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS, - sizeof (swapper_pg_dir [0]) * KERNEL_PGD_PTRS); + /* + * Remap the first 16 MB of RAM (which includes the kernel image) + * at virtual address zero: + */ + setup_identity_mappings(swapper_pg_dir, 0, LOW_MAPPINGS_SIZE); /* * Use `swapper_pg_dir' as our page directory. @@ -219,50 +216,7 @@ void machine_real_restart(unsigned char void machine_restart(char * __unused) { -#ifdef CONFIG_SMP - int cpuid; - - cpuid = GET_APIC_ID(apic_read(APIC_ID)); - - if (reboot_smp) { - - /* check to see if reboot_cpu is valid - if its not, default to the BSP */ - if ((reboot_cpu == -1) || - (reboot_cpu > (NR_CPUS -1)) || - !physid_isset(cpuid, phys_cpu_present_map)) - reboot_cpu = boot_cpu_physical_apicid; - - reboot_smp = 0; /* use this as a flag to only go through this once*/ - /* re-run this function on the other CPUs - it will fall though this section since we have - cleared reboot_smp, and do the reboot if it is the - correct CPU, otherwise it halts. */ - if (reboot_cpu != cpuid) - smp_call_function((void *)machine_restart , NULL, 1, 0); - } - - /* if reboot_cpu is still -1, then we want a tradional reboot, - and if we are not running on the reboot_cpu,, halt */ - if ((reboot_cpu != -1) && (cpuid != reboot_cpu)) { - for (;;) - __asm__ __volatile__ ("hlt"); - } - /* - * Stop all CPUs and turn off local APICs and the IO-APIC, so - * other OSs see a clean IRQ state. - */ - smp_send_stop(); -#elif defined(CONFIG_X86_LOCAL_APIC) - if (cpu_has_apic) { - local_irq_disable(); - disable_local_APIC(); - local_irq_enable(); - } -#endif -#ifdef CONFIG_X86_IO_APIC - disable_IO_APIC(); -#endif + stop_apics(); if (!reboot_thru_bios) { if (efi_enabled) { @@ -289,12 +243,14 @@ EXPORT_SYMBOL(machine_restart); void machine_halt(void) { + stop_apics(); } EXPORT_SYMBOL(machine_halt); void machine_power_off(void) { + stop_apics(); if (efi_enabled) efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, 0); if (pm_power_off) diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/relocate_kernel.S current/arch/i386/kernel/relocate_kernel.S --- reference/arch/i386/kernel/relocate_kernel.S 1969-12-31 16:00:00.000000000 -0800 +++ current/arch/i386/kernel/relocate_kernel.S 2004-04-09 13:23:20.000000000 -0700 @@ -0,0 +1,118 @@ +/* + * relocate_kernel.S - put the kernel image in place to boot + * Copyright (C) 2002-2003 Eric Biederman + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ + +#include + + /* + * Must be relocatable PIC code callable as a C function, that once + * it starts can not use the previous processes stack. + */ + .globl relocate_new_kernel +relocate_new_kernel: + /* read the arguments and say goodbye to the stack */ + movl 4(%esp), %ebx /* indirection_page */ + movl 8(%esp), %ebp /* reboot_code_buffer */ + movl 12(%esp), %edx /* start address */ + movl 16(%esp), %ecx /* cpu_has_pae */ + + /* zero out flags, and disable interrupts */ + pushl $0 + popfl + + /* set a new stack at the bottom of our page... */ + lea 4096(%ebp), %esp + + /* store the parameters back on the stack */ + pushl %edx /* store the start address */ + + /* Set cr0 to a known state: + * 31 0 == Paging disabled + * 18 0 == Alignment check disabled + * 16 0 == Write protect disabled + * 3 0 == No task switch + * 2 0 == Don't do FP software emulation. + * 0 1 == Proctected mode enabled + */ + movl %cr0, %eax + andl $~((1<<31)|(1<<18)|(1<<16)|(1<<3)|(1<<2)), %eax + orl $(1<<0), %eax + movl %eax, %cr0 + + /* clear cr4 if applicable */ + testl %ecx, %ecx + jz 1f + /* Set cr4 to a known state: + * Setting everything to zero seems safe. + */ + movl %cr4, %eax + andl $0, %eax + movl %eax, %cr4 + + jmp 1f +1: + + /* Flush the TLB (needed?) */ + xorl %eax, %eax + movl %eax, %cr3 + + /* Do the copies */ + cld +0: /* top, read another word for the indirection page */ + movl %ebx, %ecx + movl (%ebx), %ecx + addl $4, %ebx + testl $0x1, %ecx /* is it a destination page */ + jz 1f + movl %ecx, %edi + andl $0xfffff000, %edi + jmp 0b +1: + testl $0x2, %ecx /* is it an indirection page */ + jz 1f + movl %ecx, %ebx + andl $0xfffff000, %ebx + jmp 0b +1: + testl $0x4, %ecx /* is it the done indicator */ + jz 1f + jmp 2f +1: + testl $0x8, %ecx /* is it the source indicator */ + jz 0b /* Ignore it otherwise */ + movl %ecx, %esi /* For every source page do a copy */ + andl $0xfffff000, %esi + + movl $1024, %ecx + rep ; movsl + jmp 0b + +2: + + /* To be certain of avoiding problems with self-modifying code + * I need to execute a serializing instruction here. + * So I flush the TLB, it's handy, and not processor dependent. + */ + xorl %eax, %eax + movl %eax, %cr3 + + /* set all of the registers to known values */ + /* leave %esp alone */ + + xorl %eax, %eax + xorl %ebx, %ebx + xorl %ecx, %ecx + xorl %edx, %edx + xorl %esi, %esi + xorl %edi, %edi + xorl %ebp, %ebp + ret +relocate_new_kernel_end: + + .globl relocate_new_kernel_size +relocate_new_kernel_size: + .long relocate_new_kernel_end - relocate_new_kernel diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/setup.c current/arch/i386/kernel/setup.c --- reference/arch/i386/kernel/setup.c 2004-04-07 14:53:56.000000000 -0700 +++ current/arch/i386/kernel/setup.c 2004-04-09 21:41:40.000000000 -0700 @@ -47,6 +47,7 @@ #include #include #include +#include #include "setup_arch_pre.h" #include "mach_resources.h" @@ -148,7 +149,7 @@ static void __init probe_roms(void) probe_extension_roms(roms); } -static void __init limit_regions(unsigned long long size) +void __init limit_regions(unsigned long long size) { unsigned long long current_addr = 0; int i; @@ -484,6 +485,7 @@ static void __init setup_memory_region(v print_memory_map(who); } /* setup_memory_region */ +unsigned long max_pages_per_node = 0xFFFFFFFF; static void __init parse_cmdline_early (char ** cmdline_p) { @@ -526,6 +528,14 @@ static void __init parse_cmdline_early ( userdef=1; } } + + if (c == ' ' && !memcmp(from, "memnode=", 8)) { + unsigned long long node_size_bytes; + if (to != command_line) + to--; + node_size_bytes = memparse(from+8, &from); + max_pages_per_node = node_size_bytes >> PAGE_SHIFT; + } if (c == ' ' && !memcmp(from, "memmap=", 7)) { if (to != command_line) @@ -1200,6 +1210,7 @@ void __init setup_arch(char **cmdline_p) conswitchp = &dummy_con; #endif #endif + vsyscall_init(); } #include "setup_arch_post.h" diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/signal.c current/arch/i386/kernel/signal.c --- reference/arch/i386/kernel/signal.c 2004-03-11 14:33:36.000000000 -0800 +++ current/arch/i386/kernel/signal.c 2004-04-09 11:53:00.000000000 -0700 @@ -128,28 +128,29 @@ sys_sigaltstack(const stack_t __user *us */ static int -restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *peax) +restore_sigcontext(struct pt_regs *regs, + struct sigcontext __user *__sc, int *peax) { - unsigned int err = 0; + struct sigcontext scratch; /* 88 bytes of scratch area */ /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; -#define COPY(x) err |= __get_user(regs->x, &sc->x) + if (copy_from_user(&scratch, __sc, sizeof(scratch))) + return -EFAULT; + +#define COPY(x) regs->x = scratch.x #define COPY_SEG(seg) \ - { unsigned short tmp; \ - err |= __get_user(tmp, &sc->seg); \ + { unsigned short tmp = scratch.seg; \ regs->x##seg = tmp; } #define COPY_SEG_STRICT(seg) \ - { unsigned short tmp; \ - err |= __get_user(tmp, &sc->seg); \ + { unsigned short tmp = scratch.seg; \ regs->x##seg = tmp|3; } #define GET_SEG(seg) \ - { unsigned short tmp; \ - err |= __get_user(tmp, &sc->seg); \ + { unsigned short tmp = scratch.seg; \ loadsegment(seg,tmp); } GET_SEG(gs); @@ -168,27 +169,23 @@ restore_sigcontext(struct pt_regs *regs, COPY_SEG_STRICT(ss); { - unsigned int tmpflags; - err |= __get_user(tmpflags, &sc->eflags); + unsigned int tmpflags = scratch.eflags; regs->eflags = (regs->eflags & ~0x40DD5) | (tmpflags & 0x40DD5); regs->orig_eax = -1; /* disable syscall checks */ } { - struct _fpstate __user * buf; - err |= __get_user(buf, &sc->fpstate); + struct _fpstate * buf = scratch.fpstate; if (buf) { if (verify_area(VERIFY_READ, buf, sizeof(*buf))) - goto badframe; - err |= restore_i387(buf); + return -EFAULT; + if (restore_i387(buf)) + return -EFAULT; } } - err |= __get_user(*peax, &sc->eax); - return err; - -badframe: - return 1; + *peax = scratch.eax; + return 0; } asmlinkage int sys_sigreturn(unsigned long __unused) @@ -266,46 +263,47 @@ badframe: */ static int -setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate, +setup_sigcontext(struct sigcontext __user *__sc, struct _fpstate __user *fpstate, struct pt_regs *regs, unsigned long mask) { - int tmp, err = 0; + struct sigcontext sc; /* 88 bytes of scratch area */ + int tmp; tmp = 0; __asm__("movl %%gs,%0" : "=r"(tmp): "0"(tmp)); - err |= __put_user(tmp, (unsigned int *)&sc->gs); + *(unsigned int *)&sc.gs = tmp; __asm__("movl %%fs,%0" : "=r"(tmp): "0"(tmp)); - err |= __put_user(tmp, (unsigned int *)&sc->fs); - - err |= __put_user(regs->xes, (unsigned int *)&sc->es); - err |= __put_user(regs->xds, (unsigned int *)&sc->ds); - err |= __put_user(regs->edi, &sc->edi); - err |= __put_user(regs->esi, &sc->esi); - err |= __put_user(regs->ebp, &sc->ebp); - err |= __put_user(regs->esp, &sc->esp); - err |= __put_user(regs->ebx, &sc->ebx); - err |= __put_user(regs->edx, &sc->edx); - err |= __put_user(regs->ecx, &sc->ecx); - err |= __put_user(regs->eax, &sc->eax); - err |= __put_user(current->thread.trap_no, &sc->trapno); - err |= __put_user(current->thread.error_code, &sc->err); - err |= __put_user(regs->eip, &sc->eip); - err |= __put_user(regs->xcs, (unsigned int *)&sc->cs); - err |= __put_user(regs->eflags, &sc->eflags); - err |= __put_user(regs->esp, &sc->esp_at_signal); - err |= __put_user(regs->xss, (unsigned int *)&sc->ss); + *(unsigned int *)&sc.fs = tmp; + *(unsigned int *)&sc.es = regs->xes; + *(unsigned int *)&sc.ds = regs->xds; + sc.edi = regs->edi; + sc.esi = regs->esi; + sc.ebp = regs->ebp; + sc.esp = regs->esp; + sc.ebx = regs->ebx; + sc.edx = regs->edx; + sc.ecx = regs->ecx; + sc.eax = regs->eax; + sc.trapno = current->thread.trap_no; + sc.err = current->thread.error_code; + sc.eip = regs->eip; + *(unsigned int *)&sc.cs = regs->xcs; + sc.eflags = regs->eflags; + sc.esp_at_signal = regs->esp; + *(unsigned int *)&sc.ss = regs->xss; tmp = save_i387(fpstate); if (tmp < 0) - err = 1; - else - err |= __put_user(tmp ? fpstate : NULL, &sc->fpstate); + return 1; + sc.fpstate = tmp ? fpstate : NULL; /* non-iBCS2 extensions.. */ - err |= __put_user(mask, &sc->oldmask); - err |= __put_user(current->thread.cr2, &sc->cr2); + sc.oldmask = mask; + sc.cr2 = current->thread.cr2; - return err; + if (copy_to_user(__sc, &sc, sizeof(sc))) + return 1; + return 0; } /* @@ -443,7 +441,7 @@ static void setup_rt_frame(int sig, stru /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(0, &frame->uc.uc_link); - err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); + err |= __put_user(current->sas_ss_sp, (unsigned long *)&frame->uc.uc_stack.ss_sp); err |= __put_user(sas_ss_flags(regs->esp), &frame->uc.uc_stack.ss_flags); err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/smp.c current/arch/i386/kernel/smp.c --- reference/arch/i386/kernel/smp.c 2004-03-11 14:33:36.000000000 -0800 +++ current/arch/i386/kernel/smp.c 2004-04-09 13:23:20.000000000 -0700 @@ -327,10 +327,12 @@ asmlinkage void smp_invalidate_interrupt if (flush_mm == cpu_tlbstate[cpu].active_mm) { if (cpu_tlbstate[cpu].state == TLBSTATE_OK) { +#ifndef CONFIG_X86_SWITCH_PAGETABLES if (flush_va == FLUSH_ALL) local_flush_tlb(); else __flush_tlb_one(flush_va); +#endif } else leave_mm(cpu); } @@ -396,21 +398,6 @@ static void flush_tlb_others(cpumask_t c spin_unlock(&tlbstate_lock); } -void flush_tlb_current_task(void) -{ - struct mm_struct *mm = current->mm; - cpumask_t cpu_mask; - - preempt_disable(); - cpu_mask = mm->cpu_vm_mask; - cpu_clear(smp_processor_id(), cpu_mask); - - local_flush_tlb(); - if (!cpus_empty(cpu_mask)) - flush_tlb_others(cpu_mask, mm, FLUSH_ALL); - preempt_enable(); -} - void flush_tlb_mm (struct mm_struct * mm) { cpumask_t cpu_mask; @@ -442,7 +429,10 @@ void flush_tlb_page(struct vm_area_struc if (current->active_mm == mm) { if(current->mm) - __flush_tlb_one(va); +#ifndef CONFIG_X86_SWITCH_PAGETABLES + __flush_tlb_one(va) +#endif + ; else leave_mm(smp_processor_id()); } @@ -466,7 +456,17 @@ void flush_tlb_all(void) { on_each_cpu(do_flush_tlb_all, 0, 1, 1); } - +#ifdef CONFIG_KGDB +/* + * By using the NMI code instead of a vector we just sneak thru the + * word generator coming out with just what we want. AND it does + * not matter if clustered_apic_mode is set or not. + */ +void smp_send_nmi_allbutself(void) +{ + send_IPI_allbutself(APIC_DM_NMI); +} +#endif /* * this function sends a 'reschedule' IPI to another CPU. * it goes straight through and wastes no time serializing @@ -564,6 +564,30 @@ static void stop_this_cpu (void * dummy) void smp_send_stop(void) { + extern int reboot_cpu; + int reboot_cpu_id; + + /* The boot cpu is always logical cpu 0 */ + reboot_cpu_id = 0; + + /* See if there has been give a command line override. + */ + if ((reboot_cpu != -1) && !(reboot_cpu >= NR_CPUS) && + test_bit(reboot_cpu, &cpu_online_map)) { + reboot_cpu_id = reboot_cpu; + } + + /* Make certain the the cpu I'm rebooting on is online */ + if (!test_bit(reboot_cpu_id, &cpu_online_map)) { + reboot_cpu_id = smp_processor_id(); + } + + /* Make certain I only run on the appropriate processor */ + set_cpus_allowed(current, 1 << reboot_cpu_id); + + /* O.K. Now that I'm on the appropriate processor, stop + * all of the others. + */ smp_call_function(stop_this_cpu, NULL, 1, 0); local_irq_disable(); diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/smpboot.c current/arch/i386/kernel/smpboot.c --- reference/arch/i386/kernel/smpboot.c 2004-03-11 14:33:36.000000000 -0800 +++ current/arch/i386/kernel/smpboot.c 2004-04-08 15:10:24.000000000 -0700 @@ -39,6 +39,7 @@ #include #include +#include #include #include #include @@ -815,6 +816,8 @@ static int __init do_boot_cpu(int apicid /* Stack for startup_32 can be just as for start_secondary onwards */ stack_start.esp = (void *) idle->thread.esp; + irq_ctx_init(cpu); + /* * This grunge runs the startup process for * the targeted processor. @@ -934,7 +937,7 @@ static int boot_cpu_logical_apicid; /* Where the IO area was mapped on multiquad, always 0 otherwise */ void *xquad_portio; -int cpu_sibling_map[NR_CPUS] __cacheline_aligned; +cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned; static void __init smp_boot_cpus(unsigned int max_cpus) { @@ -953,6 +956,8 @@ static void __init smp_boot_cpus(unsigne current_thread_info()->cpu = 0; smp_tune_scheduling(); + cpus_clear(cpu_sibling_map[0]); + cpu_set(0, cpu_sibling_map[0]); /* * If we couldn't find an SMP configuration at boot time, @@ -1079,32 +1084,34 @@ static void __init smp_boot_cpus(unsigne Dprintk("Boot done.\n"); /* - * If Hyper-Threading is avaialble, construct cpu_sibling_map[], so - * that we can tell the sibling CPU efficiently. + * construct cpu_sibling_map[], so that we can tell sibling CPUs + * efficiently. */ - if (cpu_has_ht && smp_num_siblings > 1) { - for (cpu = 0; cpu < NR_CPUS; cpu++) - cpu_sibling_map[cpu] = NO_PROC_ID; - - for (cpu = 0; cpu < NR_CPUS; cpu++) { - int i; - if (!cpu_isset(cpu, cpu_callout_map)) - continue; + for (cpu = 0; cpu < NR_CPUS; cpu++) + cpus_clear(cpu_sibling_map[cpu]); + + for (cpu = 0; cpu < NR_CPUS; cpu++) { + int siblings = 0; + int i; + if (!cpu_isset(cpu, cpu_callout_map)) + continue; + if (smp_num_siblings > 1) { for (i = 0; i < NR_CPUS; i++) { - if (i == cpu || !cpu_isset(i, cpu_callout_map)) + if (!cpu_isset(i, cpu_callout_map)) continue; if (phys_proc_id[cpu] == phys_proc_id[i]) { - cpu_sibling_map[cpu] = i; - printk("cpu_sibling_map[%d] = %d\n", cpu, cpu_sibling_map[cpu]); - break; + siblings++; + cpu_set(i, cpu_sibling_map[cpu]); } } - if (cpu_sibling_map[cpu] == NO_PROC_ID) { - smp_num_siblings = 1; - printk(KERN_WARNING "WARNING: No sibling found for CPU %d.\n", cpu); - } + } else { + siblings++; + cpu_set(cpu, cpu_sibling_map[cpu]); } + + if (siblings != smp_num_siblings) + printk(KERN_WARNING "WARNING: %d siblings found for CPU%d, should be %d\n", siblings, cpu, smp_num_siblings); } smpboot_setup_io_apic(); @@ -1118,6 +1125,256 @@ static void __init smp_boot_cpus(unsigne synchronize_tsc_bp(); } +#ifdef CONFIG_SCHED_SMT +#ifdef CONFIG_NUMA +static struct sched_group sched_group_cpus[NR_CPUS]; +static struct sched_group sched_group_phys[NR_CPUS]; +static struct sched_group sched_group_nodes[MAX_NUMNODES]; +static DEFINE_PER_CPU(struct sched_domain, cpu_domains); +static DEFINE_PER_CPU(struct sched_domain, phys_domains); +static DEFINE_PER_CPU(struct sched_domain, node_domains); +__init void arch_init_sched_domains(void) +{ + int i; + struct sched_group *first = NULL, *last = NULL; + + /* Set up domains */ + for_each_cpu(i) { + struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i); + struct sched_domain *phys_domain = &per_cpu(phys_domains, i); + struct sched_domain *node_domain = &per_cpu(node_domains, i); + int node = cpu_to_node(i); + cpumask_t nodemask = node_to_cpumask(node); + + *cpu_domain = SD_SIBLING_INIT; + cpu_domain->span = cpu_sibling_map[i]; + cpu_domain->cache_hot_time = cacheflush_time / 2; + cpu_domain->parent = phys_domain; + cpu_domain->groups = &sched_group_cpus[i]; + + *phys_domain = SD_CPU_INIT; + phys_domain->span = nodemask; + phys_domain->cache_hot_time = cacheflush_time / 2; + phys_domain->parent = node_domain; + phys_domain->groups = &sched_group_phys[first_cpu(cpu_domain->span)]; + + *node_domain = SD_NODE_INIT; + node_domain->span = cpu_possible_map; + node_domain->cache_hot_time = cacheflush_time; + node_domain->groups = &sched_group_nodes[cpu_to_node(i)]; + } + + /* Set up CPU (sibling) groups */ + for_each_cpu(i) { + struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i); + int j; + first = last = NULL; + + if (i != first_cpu(cpu_domain->span)) + continue; + + for_each_cpu_mask(j, cpu_domain->span) { + struct sched_group *cpu = &sched_group_cpus[j]; + + cpu->cpumask = CPU_MASK_NONE; + cpu_set(j, cpu->cpumask); + cpu->cpu_power = SCHED_LOAD_SCALE; + + if (!first) + first = cpu; + if (last) + last->next = cpu; + last = cpu; + } + last->next = first; + } + + for (i = 0; i < MAX_NUMNODES; i++) { + int j; + cpumask_t nodemask; + struct sched_group *node = &sched_group_nodes[i]; + cpus_and(nodemask, node_to_cpumask(i), cpu_possible_map); + + if (cpus_empty(nodemask)) + continue; + + first = last = NULL; + /* Set up physical groups */ + for_each_cpu_mask(j, nodemask) { + struct sched_domain *cpu_domain = &per_cpu(cpu_domains, j); + struct sched_group *cpu = &sched_group_phys[j]; + + if (j != first_cpu(cpu_domain->span)) + continue; + + cpu->cpumask = cpu_domain->span; + /* + * Make each extra sibling increase power by 10% of + * the basic CPU. This is very arbitrary. + */ + cpu->cpu_power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE*(cpus_weight(cpu->cpumask)-1) / 10; + node->cpu_power += cpu->cpu_power; + + if (!first) + first = cpu; + if (last) + last->next = cpu; + last = cpu; + } + last->next = first; + } + + /* Set up nodes */ + first = last = NULL; + for (i = 0; i < MAX_NUMNODES; i++) { + struct sched_group *cpu = &sched_group_nodes[i]; + cpumask_t nodemask; + cpus_and(nodemask, node_to_cpumask(i), cpu_possible_map); + + if (cpus_empty(nodemask)) + continue; + + cpu->cpumask = nodemask; + /* ->cpu_power already setup */ + + if (!first) + first = cpu; + if (last) + last->next = cpu; + last = cpu; + } + last->next = first; + + mb(); + for_each_cpu(i) { + struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i); + cpu_attach_domain(cpu_domain, i); + } +} +#else /* !CONFIG_NUMA */ +static struct sched_group sched_group_cpus[NR_CPUS]; +static struct sched_group sched_group_phys[NR_CPUS]; +static DEFINE_PER_CPU(struct sched_domain, cpu_domains); +static DEFINE_PER_CPU(struct sched_domain, phys_domains); +__init void arch_init_sched_domains(void) +{ + int i; + struct sched_group *first = NULL, *last = NULL; + + /* Set up domains */ + for_each_cpu(i) { + struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i); + struct sched_domain *phys_domain = &per_cpu(phys_domains, i); + + *cpu_domain = SD_SIBLING_INIT; + cpu_domain->span = cpu_sibling_map[i]; + cpu_domain->cache_hot_time = cacheflush_time / 2; + cpu_domain->parent = phys_domain; + cpu_domain->groups = &sched_group_cpus[i]; + + *phys_domain = SD_CPU_INIT; + phys_domain->span = cpu_possible_map; + phys_domain->cache_hot_time = cacheflush_time / 2; + phys_domain->groups = &sched_group_phys[first_cpu(cpu_domain->span)]; + } + + /* Set up CPU (sibling) groups */ + for_each_cpu(i) { + struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i); + int j; + first = last = NULL; + + if (i != first_cpu(cpu_domain->span)) + continue; + + for_each_cpu_mask(j, cpu_domain->span) { + struct sched_group *cpu = &sched_group_cpus[j]; + + cpus_clear(cpu->cpumask); + cpu_set(j, cpu->cpumask); + cpu->cpu_power = SCHED_LOAD_SCALE; + + if (!first) + first = cpu; + if (last) + last->next = cpu; + last = cpu; + } + last->next = first; + } + + first = last = NULL; + /* Set up physical groups */ + for_each_cpu(i) { + struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i); + struct sched_group *cpu = &sched_group_phys[i]; + + if (i != first_cpu(cpu_domain->span)) + continue; + + cpu->cpumask = cpu_domain->span; + /* See SMT+NUMA setup for comment */ + cpu->cpu_power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE*(cpus_weight(cpu->cpumask)-1) / 10; + + if (!first) + first = cpu; + if (last) + last->next = cpu; + last = cpu; + } + last->next = first; + + mb(); + for_each_cpu(i) { + struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i); + cpu_attach_domain(cpu_domain, i); + } +} +#endif /* CONFIG_NUMA */ +#else /* !CONFIG_SCHED_SMT */ + +static struct sched_group sched_group_cpus[NR_CPUS]; +static DEFINE_PER_CPU(struct sched_domain, cpu_domains); + +void __init arch_init_sched_domains(void) +{ + int i; + struct sched_group *first_cpu = NULL, *last_cpu = NULL; + + /* Set up domains */ + for_each_cpu(i) { + struct sched_domain *cpu_sd = &per_cpu(cpu_domains, i); + + *cpu_sd = SD_CPU_INIT; + cpu_sd->span = cpu_possible_map; + cpu_sd->cache_hot_time = cacheflush_time / 2; + cpu_sd->groups = &sched_group_cpus[i]; + } + + /* Set up CPU groups */ + for_each_cpu_mask(i, cpu_possible_map) { + struct sched_group *cpu = &sched_group_cpus[i]; + + cpus_clear(cpu->cpumask); + cpu_set(i, cpu->cpumask); + cpu->cpu_power = SCHED_LOAD_SCALE; + + if (!first_cpu) + first_cpu = cpu; + if (last_cpu) + last_cpu->next = cpu; + last_cpu = cpu; + } + last_cpu->next = first_cpu; + + mb(); /* domains were modified outside the lock */ + for_each_cpu(i) { + struct sched_domain *cpu_sd = &per_cpu(cpu_domains, i); + cpu_attach_domain(cpu_sd, i); + } +} +#endif + /* These are wrappers to interface to the new boot process. Someone who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */ void __init smp_prepare_cpus(unsigned int max_cpus) diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/srat.c current/arch/i386/kernel/srat.c --- reference/arch/i386/kernel/srat.c 2003-10-01 11:47:33.000000000 -0700 +++ current/arch/i386/kernel/srat.c 2004-04-09 11:53:02.000000000 -0700 @@ -53,6 +53,10 @@ struct node_memory_chunk_s { }; static struct node_memory_chunk_s node_memory_chunk[MAXCHUNKS]; +#define chunk_start(i) (node_memory_chunk[i].start_pfn) +#define chunk_end(i) (node_memory_chunk[i].end_pfn) +#define chunk_size(i) (chunk_end(i)-chunk_start(i)) + static int num_memory_chunks; /* total number of memory chunks */ static int zholes_size_init; static unsigned long zholes_size[MAX_NUMNODES * MAX_NR_ZONES]; @@ -198,6 +202,9 @@ static void __init initialize_physnode_m } } +extern unsigned long max_pages_per_node; +extern int limit_mem_per_node; + /* Parse the ACPI Static Resource Affinity Table */ static int __init acpi20_parse_srat(struct acpi_table_srat *sratp) { @@ -281,23 +288,27 @@ static int __init acpi20_parse_srat(stru node_memory_chunk[j].start_pfn, node_memory_chunk[j].end_pfn); } - + /*calculate node_start_pfn/node_end_pfn arrays*/ for (nid = 0; nid < numnodes; nid++) { - int been_here_before = 0; + unsigned long node_present_pages = 0; + node_start_pfn[nid] = -1; for (j = 0; j < num_memory_chunks; j++){ - if (node_memory_chunk[j].nid == nid) { - if (been_here_before == 0) { - node_start_pfn[nid] = node_memory_chunk[j].start_pfn; - node_end_pfn[nid] = node_memory_chunk[j].end_pfn; - been_here_before = 1; - } else { /* We've found another chunk of memory for the node */ - if (node_start_pfn[nid] < node_memory_chunk[j].start_pfn) { - node_end_pfn[nid] = node_memory_chunk[j].end_pfn; - } - } - } + unsigned long proposed_size; + + if (node_memory_chunk[j].nid != nid) + continue; + + proposed_size = node_present_pages + chunk_size(j); + if (proposed_size > max_pages_per_node) + chunk_end(j) = chunk_start(j) + + max_pages_per_node - node_present_pages; + node_present_pages += chunk_size(j); + + if (node_start_pfn[nid] == -1) + node_start_pfn[nid] = chunk_start(j); + node_end_pfn[nid] = chunk_end(j); } } return 1; diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/sysenter.c current/arch/i386/kernel/sysenter.c --- reference/arch/i386/kernel/sysenter.c 2003-10-01 11:34:29.000000000 -0700 +++ current/arch/i386/kernel/sysenter.c 2004-04-09 11:53:00.000000000 -0700 @@ -18,13 +18,18 @@ #include #include #include +#include extern asmlinkage void sysenter_entry(void); void enable_sep_cpu(void *info) { int cpu = get_cpu(); +#ifdef CONFIG_X86_HIGH_ENTRY + struct tss_struct *tss = (struct tss_struct *) __fix_to_virt(FIX_TSS_0) + cpu; +#else struct tss_struct *tss = init_tss + cpu; +#endif tss->ss1 = __KERNEL_CS; tss->esp1 = sizeof(struct tss_struct) + (unsigned long) tss; diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/time.c current/arch/i386/kernel/time.c --- reference/arch/i386/kernel/time.c 2004-03-11 14:33:36.000000000 -0800 +++ current/arch/i386/kernel/time.c 2004-04-09 21:41:40.000000000 -0700 @@ -393,5 +393,8 @@ void __init time_init(void) cur_timer = select_timer(); printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name); + /* set vsyscall to use selected time source */ + vsyscall_set_timesource(cur_timer->name); + time_init_hook(); } diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/timers/timer.c current/arch/i386/kernel/timers/timer.c --- reference/arch/i386/kernel/timers/timer.c 2004-03-11 14:33:37.000000000 -0800 +++ current/arch/i386/kernel/timers/timer.c 2004-04-09 21:41:40.000000000 -0700 @@ -2,6 +2,7 @@ #include #include #include +#include #ifdef CONFIG_HPET_TIMER /* @@ -44,6 +45,9 @@ __setup("clock=", clock_setup); void clock_fallback(void) { cur_timer = &timer_pit; + + /* set vsyscall to use selected time source */ + vsyscall_set_timesource(cur_timer->name); } /* iterates through the list of timers, returning the first diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/timers/timer_cyclone.c current/arch/i386/kernel/timers/timer_cyclone.c --- reference/arch/i386/kernel/timers/timer_cyclone.c 2004-03-11 14:33:37.000000000 -0800 +++ current/arch/i386/kernel/timers/timer_cyclone.c 2004-04-09 21:41:40.000000000 -0700 @@ -21,18 +21,24 @@ extern spinlock_t i8253_lock; /* Number of usecs that the last interrupt was delayed */ -static int delay_at_last_interrupt; +int cyclone_delay_at_last_interrupt; + +/* FIXMAP flag */ +#ifdef CONFIG_VSYSCALL_GTOD +#define PAGE_CYCLONE PAGE_KERNEL_VSYSCALL_NOCACHE +#else +#define PAGE_CYCLONE PAGE_KERNEL_NOCACHE +#endif #define CYCLONE_CBAR_ADDR 0xFEB00CD0 #define CYCLONE_PMCC_OFFSET 0x51A0 #define CYCLONE_MPMC_OFFSET 0x51D0 #define CYCLONE_MPCS_OFFSET 0x51A8 -#define CYCLONE_TIMER_FREQ 100000000 #define CYCLONE_TIMER_MASK (((u64)1<<40)-1) /* 40 bit mask */ int use_cyclone = 0; -static u32* volatile cyclone_timer; /* Cyclone MPMC0 register */ -static u32 last_cyclone_low; +u32* volatile cyclone_timer; /* Cyclone MPMC0 register */ +u32 last_cyclone_low; static u32 last_cyclone_high; static unsigned long long monotonic_base; static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED; @@ -57,7 +63,7 @@ static void mark_offset_cyclone(void) spin_lock(&i8253_lock); read_cyclone_counter(last_cyclone_low,last_cyclone_high); - /* read values for delay_at_last_interrupt */ + /* read values for cyclone_delay_at_last_interrupt */ outb_p(0x00, 0x43); /* latch the count ASAP */ count = inb_p(0x40); /* read the latched count */ @@ -67,7 +73,7 @@ static void mark_offset_cyclone(void) /* lost tick compensation */ delta = last_cyclone_low - delta; delta /= (CYCLONE_TIMER_FREQ/1000000); - delta += delay_at_last_interrupt; + delta += cyclone_delay_at_last_interrupt; lost = delta/(1000000/HZ); delay = delta%(1000000/HZ); if (lost >= 2) @@ -78,16 +84,16 @@ static void mark_offset_cyclone(void) monotonic_base += (this_offset - last_offset) & CYCLONE_TIMER_MASK; write_sequnlock(&monotonic_lock); - /* calculate delay_at_last_interrupt */ + /* calculate cyclone_delay_at_last_interrupt */ count = ((LATCH-1) - count) * TICK_SIZE; - delay_at_last_interrupt = (count + LATCH/2) / LATCH; + cyclone_delay_at_last_interrupt = (count + LATCH/2) / LATCH; /* catch corner case where tick rollover occured * between cyclone and pit reads (as noted when * usec delta is > 90% # of usecs/tick) */ - if (lost && abs(delay - delay_at_last_interrupt) > (900000/HZ)) + if (lost && abs(delay - cyclone_delay_at_last_interrupt) > (900000/HZ)) jiffies_64++; } @@ -96,7 +102,7 @@ static unsigned long get_offset_cyclone( u32 offset; if(!cyclone_timer) - return delay_at_last_interrupt; + return cyclone_delay_at_last_interrupt; /* Read the cyclone timer */ offset = cyclone_timer[0]; @@ -109,7 +115,7 @@ static unsigned long get_offset_cyclone( offset = offset/(CYCLONE_TIMER_FREQ/1000000); /* our adjusted time offset in microseconds */ - return delay_at_last_interrupt + offset; + return cyclone_delay_at_last_interrupt + offset; } static unsigned long long monotonic_clock_cyclone(void) @@ -193,7 +199,7 @@ static int __init init_cyclone(char* ove /* map in cyclone_timer */ pageaddr = (base + CYCLONE_MPMC_OFFSET)&PAGE_MASK; offset = (base + CYCLONE_MPMC_OFFSET)&(~PAGE_MASK); - set_fixmap_nocache(FIX_CYCLONE_TIMER, pageaddr); + __set_fixmap(FIX_CYCLONE_TIMER, pageaddr, PAGE_CYCLONE); cyclone_timer = (u32*)(fix_to_virt(FIX_CYCLONE_TIMER) + offset); if(!cyclone_timer){ printk(KERN_ERR "Summit chipset: Could not find valid MPMC register.\n"); diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/timers/timer_tsc.c current/arch/i386/kernel/timers/timer_tsc.c --- reference/arch/i386/kernel/timers/timer_tsc.c 2004-04-07 14:53:56.000000000 -0700 +++ current/arch/i386/kernel/timers/timer_tsc.c 2004-04-09 21:41:40.000000000 -0700 @@ -33,7 +33,7 @@ extern spinlock_t i8253_lock; static int use_tsc; /* Number of usecs that the last interrupt was delayed */ -static int delay_at_last_interrupt; +int tsc_delay_at_last_interrupt; static unsigned long last_tsc_low; /* lsb 32 bits of Time Stamp Counter */ static unsigned long last_tsc_high; /* msb 32 bits of Time Stamp Counter */ @@ -104,7 +104,7 @@ static unsigned long get_offset_tsc(void "0" (eax)); /* our adjusted time offset in microseconds */ - return delay_at_last_interrupt + edx; + return tsc_delay_at_last_interrupt + edx; } static unsigned long long monotonic_clock_tsc(void) @@ -223,7 +223,7 @@ static void mark_offset_tsc(void) "0" (eax)); delta = edx; } - delta += delay_at_last_interrupt; + delta += tsc_delay_at_last_interrupt; lost = delta/(1000000/HZ); delay = delta%(1000000/HZ); if (lost >= 2) { @@ -248,15 +248,15 @@ static void mark_offset_tsc(void) monotonic_base += cycles_2_ns(this_offset - last_offset); write_sequnlock(&monotonic_lock); - /* calculate delay_at_last_interrupt */ + /* calculate tsc_delay_at_last_interrupt */ count = ((LATCH-1) - count) * TICK_SIZE; - delay_at_last_interrupt = (count + LATCH/2) / LATCH; + tsc_delay_at_last_interrupt = (count + LATCH/2) / LATCH; /* catch corner case where tick rollover occured * between tsc and pit reads (as noted when * usec delta is > 90% # of usecs/tick) */ - if (lost && abs(delay - delay_at_last_interrupt) > (900000/HZ)) + if (lost && abs(delay - tsc_delay_at_last_interrupt) > (900000/HZ)) jiffies_64++; } @@ -308,7 +308,7 @@ static void mark_offset_tsc_hpet(void) monotonic_base += cycles_2_ns(this_offset - last_offset); write_sequnlock(&monotonic_lock); - /* calculate delay_at_last_interrupt */ + /* calculate tsc_delay_at_last_interrupt */ /* * Time offset = (hpet delta) * ( usecs per HPET clock ) * = (hpet delta) * ( usecs per tick / HPET clocks per tick) @@ -316,9 +316,9 @@ static void mark_offset_tsc_hpet(void) * Where, * hpet_usec_quotient = (2^32 * usecs per tick)/HPET clocks per tick */ - delay_at_last_interrupt = hpet_current - offset; - ASM_MUL64_REG(temp, delay_at_last_interrupt, - hpet_usec_quotient, delay_at_last_interrupt); + tsc_delay_at_last_interrupt = hpet_current - offset; + ASM_MUL64_REG(temp, tsc_delay_at_last_interrupt, + hpet_usec_quotient, tsc_delay_at_last_interrupt); } #endif diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/traps.c current/arch/i386/kernel/traps.c --- reference/arch/i386/kernel/traps.c 2004-04-07 14:53:56.000000000 -0700 +++ current/arch/i386/kernel/traps.c 2004-04-09 11:53:04.000000000 -0700 @@ -55,12 +55,8 @@ #include "mach_traps.h" -asmlinkage int system_call(void); -asmlinkage void lcall7(void); -asmlinkage void lcall27(void); - -struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 }, - { 0, 0 }, { 0, 0 } }; +struct desc_struct default_ldt[] __attribute__((__section__(".data.default_ldt"))) = { { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 } }; +struct page *default_ldt_page; /* Do we ignore FPU interrupts ? */ char ignore_fpu_irq = 0; @@ -92,26 +88,127 @@ asmlinkage void alignment_check(void); asmlinkage void spurious_interrupt_bug(void); asmlinkage void machine_check(void); -static int kstack_depth_to_print = 24; +#ifdef CONFIG_KGDB +extern void sysenter_entry(void); +#include +#include +extern void int3(void); +extern void debug(void); +void set_intr_gate(unsigned int n, void *addr); +static void set_intr_usr_gate(unsigned int n, void *addr); +/* + * Should be able to call this breakpoint() very early in + * bring up. Just hard code the call where needed. + * The breakpoint() code is here because set_?_gate() functions + * are local (static) to trap.c. They need be done only once, + * but it does not hurt to do them over. + */ +void breakpoint(void) +{ + init_entry_mappings(); + set_intr_usr_gate(3,&int3); /* disable ints on trap */ + set_intr_gate(1,&debug); + set_intr_gate(14,&page_fault); -void show_trace(struct task_struct *task, unsigned long * stack) + BREAKPOINT; +} +#define CHK_REMOTE_DEBUG(trapnr,signr,error_code,regs,after) \ + { \ + if (!user_mode(regs) ) \ + { \ + kgdb_handle_exception(trapnr, signr, error_code, regs); \ + after; \ + } else if ((trapnr == 3) && (regs->eflags &0x200)) local_irq_enable(); \ + } +#else +#define CHK_REMOTE_DEBUG(trapnr,signr,error_code,regs,after) +#endif + +#define STACK_PRINT_DEPTH 32 + +#ifdef CONFIG_FRAME_POINTER +#define valid_stack_ptr(task, p) \ + ((p > (unsigned long)task->thread_info) && \ + (p < (unsigned long)task->thread_info+4096)) + +void show_stack_frame(unsigned long start, unsigned long end) +{ + int i; + + printk(" "); + for (i = start; i < end; i += 4) { + if ((i - start) && ((i - start)%24 == 0)) + printk("\n "); + printk("%08lx ", *(unsigned long *) i); + } + printk("\n"); +} + +void show_trace_fp(struct task_struct *task, unsigned long * stack) +{ + unsigned long addr, ebp; + + if (!task) + task = current; + + if (task == current) { + /* Grab ebp right from our regs */ + asm ("movl %%ebp, %0" : "=r" (ebp) : ); + } else { + /* ebp is the last reg pushed by switch_to */ + ebp = *(unsigned long *) task->thread.esp; + } + + show_stack_frame((unsigned long) stack, ebp+4); + while (valid_stack_ptr(task, ebp)) { + addr = *(unsigned long *) (ebp + 4); + printk(" [<%08lx>] ", addr); + print_symbol("%s\n", addr); + + /* Show the stack frame starting with args */ + show_stack_frame(ebp + 8, (*(unsigned long *) ebp) + 4); + ebp = *(unsigned long *) ebp; + } +} + +#else /* !CONFIG_FRAME_POINTER */ + +void show_trace_guess(unsigned long * stack) { unsigned long addr; if (!stack) stack = (unsigned long*)&stack; + while (1) { + struct thread_info *context; + context = (struct thread_info*) ((unsigned long)stack & (~(THREAD_SIZE - 1))); + while (!kstack_end(stack)) { + addr = *stack++; + if (kernel_text_address(addr)) { + printk(" [<%08lx>] ", addr); + print_symbol("%s\n", addr); + } + } + stack = (unsigned long*)context->previous_esp; + if (!stack) + break; + printk(" =======================\n"); + } +} +#endif + +void show_trace(struct task_struct *task, unsigned long * stack) +{ printk("Call Trace:"); #ifdef CONFIG_KALLSYMS printk("\n"); #endif - while (!kstack_end(stack)) { - addr = *stack++; - if (kernel_text_address(addr)) { - printk(" [<%08lx>] ", addr); - print_symbol("%s\n", addr); - } - } +#ifdef CONFIG_FRAME_POINTER + show_trace_fp(task, stack); +#else + show_trace_guess(stack); +#endif printk("\n"); } @@ -127,8 +224,10 @@ void show_trace_task(struct task_struct void show_stack(struct task_struct *task, unsigned long *esp) { +#ifndef CONFIG_FRAME_POINTER unsigned long *stack; int i; +#endif if (esp == NULL) { if (task) @@ -137,8 +236,9 @@ void show_stack(struct task_struct *task esp = (unsigned long *)&esp; } +#ifndef CONFIG_FRAME_POINTER stack = esp; - for(i = 0; i < kstack_depth_to_print; i++) { + for(i = 0; i < STACK_PRINT_DEPTH; i++) { if (kstack_end(stack)) break; if (i && ((i % 8) == 0)) @@ -146,6 +246,7 @@ void show_stack(struct task_struct *task printk("%08lx ", *stack++); } printk("\n"); +#endif show_trace(task, esp); } @@ -176,7 +277,7 @@ void show_registers(struct pt_regs *regs ss = regs->xss & 0xffff; } print_modules(); - printk("CPU: %d\nEIP: %04x:[<%08lx>] %s\nEFLAGS: %08lx" + printk("CPU: %d\nEIP: %04x:[<%08lx>] %s VLI\nEFLAGS: %08lx" " (%s) \n", smp_processor_id(), 0xffff & regs->xcs, regs->eip, print_tainted(), regs->eflags, UTS_RELEASE); @@ -194,23 +295,27 @@ void show_registers(struct pt_regs *regs * time of the fault.. */ if (in_kernel) { + u8 *eip; printk("\nStack: "); show_stack(NULL, (unsigned long*)esp); printk("Code: "); - if(regs->eip < PAGE_OFFSET) - goto bad; - for(i=0;i<20;i++) - { - unsigned char c; - if(__get_user(c, &((unsigned char*)regs->eip)[i])) { -bad: + eip = (u8 *)regs->eip - 43; + for (i = 0; i < 64; i++, eip++) { + unsigned char c = 0xff; + + if ((user_mode(regs) && get_user(c, eip)) || + (!user_mode(regs) && __direct_get_user(c, eip))) { + printk(" Bad EIP value."); break; } - printk("%02x ", c); + if (eip == (u8 *)regs->eip) + printk("<%02x> ", c); + else + printk("%02x ", c); } } printk("\n"); @@ -229,16 +334,14 @@ static void handle_BUG(struct pt_regs *r eip = regs->eip; - if (eip < PAGE_OFFSET) - goto no_bug; - if (__get_user(ud2, (unsigned short *)eip)) + if (__direct_get_user(ud2, (unsigned short *)eip)) goto no_bug; if (ud2 != 0x0b0f) goto no_bug; - if (__get_user(line, (unsigned short *)(eip + 2))) + if (__direct_get_user(line, (unsigned short *)(eip + 2))) goto bug; - if (__get_user(file, (char **)(eip + 4)) || - (unsigned long)file < PAGE_OFFSET || __get_user(c, file)) + if (__direct_get_user(file, (char **)(eip + 4)) || + __direct_get_user(c, file)) file = ""; printk("------------[ cut here ]------------\n"); @@ -278,6 +381,15 @@ void die(const char * str, struct pt_reg #endif if (nl) printk("\n"); +#ifdef CONFIG_KGDB + /* This is about the only place we want to go to kgdb even if in + * user mode. But we must go in via a trap so within kgdb we will + * always be in kernel mode. + */ + if (user_mode(regs)) + BREAKPOINT; +#endif + CHK_REMOTE_DEBUG(0,SIGTRAP,err,regs,) show_registers(regs); bust_spinlocks(0); spin_unlock_irq(&die_lock); @@ -347,6 +459,7 @@ static inline void do_trap(int trapnr, i #define DO_ERROR(trapnr, signr, str, name) \ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ { \ + CHK_REMOTE_DEBUG(trapnr,signr,error_code,regs,)\ do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \ } @@ -364,7 +477,9 @@ asmlinkage void do_##name(struct pt_regs #define DO_VM86_ERROR(trapnr, signr, str, name) \ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ { \ + CHK_REMOTE_DEBUG(trapnr, signr, error_code,regs, return)\ do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \ + return; \ } #define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ @@ -411,8 +526,10 @@ gp_in_vm86: return; gp_in_kernel: - if (!fixup_exception(regs)) + if (!fixup_exception(regs)){ + CHK_REMOTE_DEBUG(13,SIGSEGV,error_code,regs,) die("general protection fault", regs, error_code); + } } static void mem_parity_error(unsigned char reason, struct pt_regs * regs) @@ -551,10 +668,18 @@ asmlinkage void do_debug(struct pt_regs if (regs->eflags & X86_EFLAGS_IF) local_irq_enable(); - /* Mask out spurious debug traps due to lazy DR7 setting */ + /* + * Mask out spurious debug traps due to lazy DR7 setting or + * due to 4G/4G kernel mode: + */ if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) { if (!tsk->thread.debugreg[7]) goto clear_dr7; + if (!user_mode(regs)) { + // restore upon return-to-userspace: + set_thread_flag(TIF_DB7); + goto clear_dr7; + } } if (regs->eflags & VM_MASK) @@ -574,8 +699,18 @@ asmlinkage void do_debug(struct pt_regs * allowing programs to debug themselves without the ptrace() * interface. */ +#ifdef CONFIG_KGDB + /* + * I think this is the only "real" case of a TF in the kernel + * that really belongs to user space. Others are + * "Ours all ours!" + */ + if (((regs->xcs & 3) == 0) && ((void *)regs->eip == sysenter_entry)) + goto clear_TF_reenable; +#else if ((regs->xcs & 3) == 0) goto clear_TF_reenable; +#endif if ((tsk->ptrace & (PT_DTRACE|PT_PTRACED)) == PT_DTRACE) goto clear_TF; } @@ -587,6 +722,17 @@ asmlinkage void do_debug(struct pt_regs info.si_errno = 0; info.si_code = TRAP_BRKPT; +#ifdef CONFIG_KGDB + /* + * If this is a kernel mode trap, we need to reset db7 to allow us + * to continue sanely ALSO skip the signal delivery + */ + if ((regs->xcs & 3) == 0) + goto clear_dr7; + + /* if not kernel, allow ints but only if they were on */ + if ( regs->eflags & 0x200) local_irq_enable(); +#endif /* If this is a kernel mode trap, save the user PC on entry to * the kernel, that's what the debugger can make sense of. */ @@ -601,6 +747,7 @@ clear_dr7: __asm__("movl %0,%%db7" : /* no output */ : "r" (0)); + CHK_REMOTE_DEBUG(1,SIGTRAP,error_code,regs,) return; debug_vm86: @@ -796,19 +943,53 @@ asmlinkage void math_emulate(long arg) #endif /* CONFIG_MATH_EMULATION */ -#ifdef CONFIG_X86_F00F_BUG -void __init trap_init_f00f_bug(void) +void __init trap_init_virtual_IDT(void) { - __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO); - /* - * Update the IDT descriptor and reload the IDT so that - * it uses the read-only mapped virtual address. + * "idt" is magic - it overlaps the idt_descr + * variable so that updating idt will automatically + * update the idt descriptor.. */ - idt_descr.address = fix_to_virt(FIX_F00F_IDT); + __set_fixmap(FIX_IDT, __pa(&idt_table), PAGE_KERNEL_RO); + idt_descr.address = __fix_to_virt(FIX_IDT); + __asm__ __volatile__("lidt %0" : : "m" (idt_descr)); } + +void __init trap_init_virtual_GDT(void) +{ + int cpu = smp_processor_id(); + struct Xgt_desc_struct *gdt_desc = cpu_gdt_descr + cpu; + struct Xgt_desc_struct tmp_desc = {0, 0}; + struct tss_struct * t; + + __asm__ __volatile__("sgdt %0": "=m" (tmp_desc): :"memory"); + +#ifdef CONFIG_X86_HIGH_ENTRY + if (!cpu) { + __set_fixmap(FIX_GDT_0, __pa(cpu_gdt_table), PAGE_KERNEL); + __set_fixmap(FIX_GDT_1, __pa(cpu_gdt_table) + PAGE_SIZE, PAGE_KERNEL); + __set_fixmap(FIX_TSS_0, __pa(init_tss), PAGE_KERNEL); + __set_fixmap(FIX_TSS_1, __pa(init_tss) + 1*PAGE_SIZE, PAGE_KERNEL); + __set_fixmap(FIX_TSS_2, __pa(init_tss) + 2*PAGE_SIZE, PAGE_KERNEL); + __set_fixmap(FIX_TSS_3, __pa(init_tss) + 3*PAGE_SIZE, PAGE_KERNEL); + } + + gdt_desc->address = __fix_to_virt(FIX_GDT_0) + sizeof(cpu_gdt_table[0]) * cpu; +#else + gdt_desc->address = (unsigned long)cpu_gdt_table[cpu]; +#endif + __asm__ __volatile__("lgdt %0": "=m" (*gdt_desc)); + +#ifdef CONFIG_X86_HIGH_ENTRY + t = (struct tss_struct *) __fix_to_virt(FIX_TSS_0) + cpu; +#else + t = init_tss + cpu; #endif + set_tss_desc(cpu, t); + cpu_gdt_table[cpu][GDT_ENTRY_TSS].b &= 0xfffffdff; + load_TR_desc(); +} #define _set_gate(gate_addr,type,dpl,addr,seg) \ do { \ @@ -835,20 +1016,26 @@ void set_intr_gate(unsigned int n, void _set_gate(idt_table+n,14,0,addr,__KERNEL_CS); } -static void __init set_trap_gate(unsigned int n, void *addr) +void __init set_trap_gate(unsigned int n, void *addr) { _set_gate(idt_table+n,15,0,addr,__KERNEL_CS); } -static void __init set_system_gate(unsigned int n, void *addr) +void __init set_system_gate(unsigned int n, void *addr) { _set_gate(idt_table+n,15,3,addr,__KERNEL_CS); } -static void __init set_call_gate(void *a, void *addr) +void __init set_call_gate(void *a, void *addr) { _set_gate(a,12,3,addr,__KERNEL_CS); } +#ifdef CONFIG_KGDB +void set_intr_usr_gate(unsigned int n, void *addr) +{ + _set_gate(idt_table+n,14,3,addr,__KERNEL_CS); +} +#endif static void __init set_task_gate(unsigned int n, unsigned int gdt_entry) { @@ -867,11 +1054,16 @@ void __init trap_init(void) #ifdef CONFIG_X86_LOCAL_APIC init_apic_mappings(); #endif + init_entry_mappings(); set_trap_gate(0,÷_error); set_intr_gate(1,&debug); set_intr_gate(2,&nmi); +#ifndef CONFIG_KGDB set_system_gate(3,&int3); /* int3-5 can be called from all */ +#else + set_intr_usr_gate(3,&int3); /* int3-5 can be called from all */ +#endif set_system_gate(4,&overflow); set_system_gate(5,&bounds); set_trap_gate(6,&invalid_op); diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/vm86.c current/arch/i386/kernel/vm86.c --- reference/arch/i386/kernel/vm86.c 2004-03-11 14:33:37.000000000 -0800 +++ current/arch/i386/kernel/vm86.c 2004-04-09 11:53:00.000000000 -0700 @@ -125,7 +125,7 @@ struct pt_regs * fastcall save_v86_state tss = init_tss + get_cpu(); current->thread.esp0 = current->thread.saved_esp0; current->thread.sysenter_cs = __KERNEL_CS; - load_esp0(tss, ¤t->thread); + load_virtual_esp0(tss, current); current->thread.saved_esp0 = 0; put_cpu(); @@ -305,7 +305,7 @@ static void do_sys_vm86(struct kernel_vm tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0; if (cpu_has_sep) tsk->thread.sysenter_cs = 0; - load_esp0(tss, &tsk->thread); + load_virtual_esp0(tss, tsk); put_cpu(); tsk->thread.screen_bitmap = info->screen_bitmap; diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/vmlinux.lds.S current/arch/i386/kernel/vmlinux.lds.S --- reference/arch/i386/kernel/vmlinux.lds.S 2004-04-07 14:53:56.000000000 -0700 +++ current/arch/i386/kernel/vmlinux.lds.S 2004-04-09 21:41:40.000000000 -0700 @@ -5,13 +5,18 @@ #include #include +#include +#include +#include +#include + OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386") OUTPUT_ARCH(i386) ENTRY(startup_32) -jiffies = jiffies_64; + SECTIONS { - . = 0xC0000000 + 0x100000; + . = __PAGE_OFFSET + 0x100000; /* read-only */ _text = .; /* Text and read-only data */ .text : { @@ -20,6 +25,19 @@ SECTIONS *(.gnu.warning) } = 0x9090 +#ifdef CONFIG_X86_4G + . = ALIGN(PAGE_SIZE_asm); + __entry_tramp_start = .; + . = FIX_ENTRY_TRAMPOLINE_0_addr; + __start___entry_text = .; + .entry.text : AT (__entry_tramp_start) { *(.entry.text) } + __entry_tramp_end = __entry_tramp_start + SIZEOF(.entry.text); + . = __entry_tramp_end; + . = ALIGN(PAGE_SIZE_asm); +#else + .entry.text : { *(.entry.text) } +#endif + _etext = .; /* End of text section */ . = ALIGN(16); /* Exception table */ @@ -35,25 +53,95 @@ SECTIONS CONSTRUCTORS } - . = ALIGN(4096); + . = ALIGN(PAGE_SIZE_asm); __nosave_begin = .; .data_nosave : { *(.data.nosave) } - . = ALIGN(4096); + . = ALIGN(PAGE_SIZE_asm); __nosave_end = .; - . = ALIGN(4096); - .data.page_aligned : { *(.data.idt) } - . = ALIGN(32); .data.cacheline_aligned : { *(.data.cacheline_aligned) } _edata = .; /* End of data section */ +/* VSYSCALL_GTOD data */ +#ifdef CONFIG_VSYSCALL_GTOD + + /* vsyscall entry */ + . = ALIGN(64); + .data.cacheline_aligned : { *(.data.cacheline_aligned) } + + .vsyscall_0 VSYSCALL_GTOD_START: AT ((LOADADDR(.data.cacheline_aligned) + SIZEOF(.data.cacheline_aligned) + 4095) & ~(4095)) { *(.vsyscall_0) } + __vsyscall_0 = LOADADDR(.vsyscall_0); + + + /* generic gtod variables */ + . = ALIGN(64); + .vsyscall_timesource : AT ((LOADADDR(.vsyscall_0) + SIZEOF(.vsyscall_0) + 63) & ~(63)) { *(.vsyscall_timesource) } + vsyscall_timesource = LOADADDR(.vsyscall_timesource); + + . = ALIGN(16); + .xtime_lock : AT ((LOADADDR(.vsyscall_timesource) + SIZEOF(.vsyscall_timesource) + 15) & ~(15)) { *(.xtime_lock) } + xtime_lock = LOADADDR(.xtime_lock); + + . = ALIGN(16); + .xtime : AT ((LOADADDR(.xtime_lock) + SIZEOF(.xtime_lock) + 15) & ~(15)) { *(.xtime) } + xtime = LOADADDR(.xtime); + + . = ALIGN(16); + .jiffies : AT ((LOADADDR(.xtime) + SIZEOF(.xtime) + 15) & ~(15)) { *(.jiffies) } + jiffies = LOADADDR(.jiffies); + + . = ALIGN(16); + .wall_jiffies : AT ((LOADADDR(.jiffies) + SIZEOF(.jiffies) + 15) & ~(15)) { *(.wall_jiffies) } + wall_jiffies = LOADADDR(.wall_jiffies); + + .sys_tz : AT (LOADADDR(.wall_jiffies) + SIZEOF(.wall_jiffies)) { *(.sys_tz) } + sys_tz = LOADADDR(.sys_tz); + + /* NTP variables */ + .tickadj : AT (LOADADDR(.sys_tz) + SIZEOF(.sys_tz)) { *(.tickadj) } + tickadj = LOADADDR(.tickadj); + + .time_adjust : AT (LOADADDR(.tickadj) + SIZEOF(.tickadj)) { *(.time_adjust) } + time_adjust = LOADADDR(.time_adjust); + + /* TSC variables*/ + .last_tsc_low : AT (LOADADDR(.time_adjust) + SIZEOF(.time_adjust)) { *(.last_tsc_low) } + last_tsc_low = LOADADDR(.last_tsc_low); + + .tsc_delay_at_last_interrupt : AT (LOADADDR(.last_tsc_low) + SIZEOF(.last_tsc_low)) { *(.tsc_delay_at_last_interrupt) } + tsc_delay_at_last_interrupt = LOADADDR(.tsc_delay_at_last_interrupt); + + .fast_gettimeoffset_quotient : AT (LOADADDR(.tsc_delay_at_last_interrupt) + SIZEOF(.tsc_delay_at_last_interrupt)) { *(.fast_gettimeoffset_quotient) } + fast_gettimeoffset_quotient = LOADADDR(.fast_gettimeoffset_quotient); + + + /*cyclone values*/ + .cyclone_timer : AT (LOADADDR(.fast_gettimeoffset_quotient) + SIZEOF(.fast_gettimeoffset_quotient)) { *(.cyclone_timer) } + cyclone_timer = LOADADDR(.cyclone_timer); + + .last_cyclone_low : AT (LOADADDR(.cyclone_timer) + SIZEOF(.cyclone_timer)) { *(.last_cyclone_low) } + last_cyclone_low = LOADADDR(.last_cyclone_low); + + .cyclone_delay_at_last_interrupt : AT (LOADADDR(.last_cyclone_low) + SIZEOF(.last_cyclone_low)) { *(.cyclone_delay_at_last_interrupt) } + cyclone_delay_at_last_interrupt = LOADADDR(.cyclone_delay_at_last_interrupt); + + + .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT (LOADADDR(.vsyscall_0) + 1024) { *(.vsyscall_1) } + . = LOADADDR(.vsyscall_0) + 4096; + + jiffies_64 = jiffies; +#else + jiffies = jiffies_64; +#endif +/* END of VSYSCALL_GTOD data*/ + . = ALIGN(THREAD_SIZE); /* init_task */ .data.init_task : { *(.data.init_task) } /* will be freed after init */ - . = ALIGN(4096); /* Init code and data */ + . = ALIGN(PAGE_SIZE_asm); /* Init code and data */ __init_begin = .; .init.text : { _sinittext = .; @@ -92,7 +180,7 @@ SECTIONS from .altinstructions and .eh_frame */ .exit.text : { *(.exit.text) } .exit.data : { *(.exit.data) } - . = ALIGN(4096); + . = ALIGN(PAGE_SIZE_asm); __initramfs_start = .; .init.ramfs : { *(.init.ramfs) } __initramfs_end = .; @@ -100,10 +188,22 @@ SECTIONS __per_cpu_start = .; .data.percpu : { *(.data.percpu) } __per_cpu_end = .; - . = ALIGN(4096); + . = ALIGN(PAGE_SIZE_asm); __init_end = .; /* freed after init ends here */ - + + . = ALIGN(PAGE_SIZE_asm); + .data.page_aligned_tss : { *(.data.tss) } + + . = ALIGN(PAGE_SIZE_asm); + .data.page_aligned_default_ldt : { *(.data.default_ldt) } + + . = ALIGN(PAGE_SIZE_asm); + .data.page_aligned_idt : { *(.data.idt) } + + . = ALIGN(PAGE_SIZE_asm); + .data.page_aligned_gdt : { *(.data.gdt) } + __bss_start = .; /* BSS */ .bss : { *(.bss) } . = ALIGN(4); @@ -128,4 +228,6 @@ SECTIONS .stab.index 0 : { *(.stab.index) } .stab.indexstr 0 : { *(.stab.indexstr) } .comment 0 : { *(.comment) } + + } diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/vsyscall-gtod.c current/arch/i386/kernel/vsyscall-gtod.c --- reference/arch/i386/kernel/vsyscall-gtod.c 1969-12-31 16:00:00.000000000 -0800 +++ current/arch/i386/kernel/vsyscall-gtod.c 2004-04-09 21:41:40.000000000 -0700 @@ -0,0 +1,275 @@ +/* + * linux/arch/i386/kernel/vsyscall-gtod.c + * + * Copyright (C) 2001 Andrea Arcangeli SuSE + * Copyright (C) 2003,2004 John Stultz IBM + * + * Thanks to hpa@transmeta.com for some useful hint. + * Special thanks to Ingo Molnar for his early experience with + * a different vsyscall implementation for Linux/IA32 and for the name. + * + * vsyscall 0 is located at VSYSCALL_START, vsyscall 1 is located + * at virtual address VSYSCALL_START+1024bytes etc... + * + * Originally written for x86-64 by Andrea Arcangeli + * Ported to i386 by John Stultz + */ + + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int errno; +static inline _syscall2(int,gettimeofday,struct timeval *,tv,struct timezone *,tz); +static int vsyscall_mapped = 0; /* flag variable for remap_vsyscall() */ + +enum vsyscall_timesource_e vsyscall_timesource; +enum vsyscall_timesource_e __vsyscall_timesource __section_vsyscall_timesource; + +/* readonly clones of generic time values */ +seqlock_t __xtime_lock __section_xtime_lock = SEQLOCK_UNLOCKED; +struct timespec __xtime __section_xtime; +volatile unsigned long __jiffies __section_jiffies; +unsigned long __wall_jiffies __section_wall_jiffies; +struct timezone __sys_tz __section_sys_tz; +/* readonly clones of ntp time variables */ +int __tickadj __section_tickadj; +long __time_adjust __section_time_adjust; + +/* readonly clones of TSC timesource values*/ +unsigned long __last_tsc_low __section_last_tsc_low; +int __tsc_delay_at_last_interrupt __section_tsc_delay_at_last_interrupt; +unsigned long __fast_gettimeoffset_quotient __section_fast_gettimeoffset_quotient; + +/* readonly clones of cyclone timesource values*/ +u32* __cyclone_timer __section_cyclone_timer; /* Cyclone MPMC0 register */ +u32 __last_cyclone_low __section_last_cyclone_low; +int __cyclone_delay_at_last_interrupt __section_cyclone_delay_at_last_interrupt; + + +static inline unsigned long vgettimeoffset_tsc(void) +{ + unsigned long eax, edx; + + /* Read the Time Stamp Counter */ + rdtsc(eax,edx); + + /* .. relative to previous jiffy (32 bits is enough) */ + eax -= __last_tsc_low; /* tsc_low delta */ + + /* + * Time offset = (tsc_low delta) * fast_gettimeoffset_quotient + * = (tsc_low delta) * (usecs_per_clock) + * = (tsc_low delta) * (usecs_per_jiffy / clocks_per_jiffy) + * + * Using a mull instead of a divl saves up to 31 clock cycles + * in the critical path. + */ + + + __asm__("mull %2" + :"=a" (eax), "=d" (edx) + :"rm" (__fast_gettimeoffset_quotient), + "0" (eax)); + + /* our adjusted time offset in microseconds */ + return __tsc_delay_at_last_interrupt + edx; + +} + +static inline unsigned long vgettimeoffset_cyclone(void) +{ + u32 offset; + + if (!__cyclone_timer) + return 0; + + /* Read the cyclone timer */ + offset = __cyclone_timer[0]; + + /* .. relative to previous jiffy */ + offset = offset - __last_cyclone_low; + + /* convert cyclone ticks to microseconds */ + offset = offset/(CYCLONE_TIMER_FREQ/1000000); + + /* our adjusted time offset in microseconds */ + return __cyclone_delay_at_last_interrupt + offset; +} + +static inline void do_vgettimeofday(struct timeval * tv) +{ + long sequence; + unsigned long usec, sec; + unsigned long lost; + unsigned long max_ntp_tick; + + /* If we don't have a valid vsyscall time source, + * just call gettimeofday() + */ + if (__vsyscall_timesource == VSYSCALL_GTOD_NONE) { + gettimeofday(tv, NULL); + return; + } + + + do { + sequence = read_seqbegin(&__xtime_lock); + + /* Get the high-res offset */ + if (__vsyscall_timesource == VSYSCALL_GTOD_CYCLONE) + usec = vgettimeoffset_cyclone(); + else + usec = vgettimeoffset_tsc(); + + lost = __jiffies - __wall_jiffies; + + /* + * If time_adjust is negative then NTP is slowing the clock + * so make sure not to go into next possible interval. + * Better to lose some accuracy than have time go backwards.. + */ + if (unlikely(__time_adjust < 0)) { + max_ntp_tick = (USEC_PER_SEC / HZ) - __tickadj; + usec = min(usec, max_ntp_tick); + + if (lost) + usec += lost * max_ntp_tick; + } + else if (unlikely(lost)) + usec += lost * (USEC_PER_SEC / HZ); + + sec = __xtime.tv_sec; + usec += (__xtime.tv_nsec / 1000); + + } while (read_seqretry(&__xtime_lock, sequence)); + + tv->tv_sec = sec + usec / 1000000; + tv->tv_usec = usec % 1000000; +} + +static inline void do_get_tz(struct timezone * tz) +{ + long sequence; + + do { + sequence = read_seqbegin(&__xtime_lock); + + *tz = __sys_tz; + + } while (read_seqretry(&__xtime_lock, sequence)); +} + +static int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz) +{ + if (tv) + do_vgettimeofday(tv); + if (tz) + do_get_tz(tz); + return 0; +} + +static time_t __vsyscall(1) vtime(time_t * t) +{ + struct timeval tv; + vgettimeofday(&tv,NULL); + if (t) + *t = tv.tv_sec; + return tv.tv_sec; +} + +static long __vsyscall(2) venosys_0(void) +{ + return -ENOSYS; +} + +static long __vsyscall(3) venosys_1(void) +{ + return -ENOSYS; +} + + +void vsyscall_set_timesource(char* name) +{ + if (!strncmp(name, "tsc", 3)) + vsyscall_timesource = VSYSCALL_GTOD_TSC; + else if (!strncmp(name, "cyclone", 7)) + vsyscall_timesource = VSYSCALL_GTOD_CYCLONE; + else + vsyscall_timesource = VSYSCALL_GTOD_NONE; +} + + +static void __init map_vsyscall(void) +{ + unsigned long physaddr_page0 = (unsigned long) &__vsyscall_0 - PAGE_OFFSET; + + /* Initially we map the VSYSCALL page w/ PAGE_KERNEL permissions to + * keep the alternate_instruction code from bombing out when it + * changes the seq_lock memory barriers in vgettimeofday() + */ + __set_fixmap(FIX_VSYSCALL_GTOD_FIRST_PAGE, physaddr_page0, PAGE_KERNEL); +} + +static int __init remap_vsyscall(void) +{ + unsigned long physaddr_page0 = (unsigned long) &__vsyscall_0 - PAGE_OFFSET; + + if (!vsyscall_mapped) + return 0; + + /* Remap the VSYSCALL page w/ PAGE_KERNEL_VSYSCALL permissions + * after the alternate_instruction code has run + */ + clear_fixmap(FIX_VSYSCALL_GTOD_FIRST_PAGE); + __set_fixmap(FIX_VSYSCALL_GTOD_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL); + + return 0; +} + +int __init vsyscall_init(void) +{ + printk("VSYSCALL: consistency checks..."); + if ((unsigned long) &vgettimeofday != VSYSCALL_ADDR(__NR_vgettimeofday)) { + printk("vgettimeofday link addr broken\n"); + printk("VSYSCALL: vsyscall_init failed!\n"); + return -EFAULT; + } + if ((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime)) { + printk("vtime link addr broken\n"); + printk("VSYSCALL: vsyscall_init failed!\n"); + return -EFAULT; + } + if (VSYSCALL_ADDR(0) != __fix_to_virt(FIX_VSYSCALL_GTOD_FIRST_PAGE)) { + printk("fixmap first vsyscall 0x%lx should be 0x%x\n", + __fix_to_virt(FIX_VSYSCALL_GTOD_FIRST_PAGE), + VSYSCALL_ADDR(0)); + printk("VSYSCALL: vsyscall_init failed!\n"); + return -EFAULT; + } + + + printk("passed...mapping..."); + map_vsyscall(); + printk("done.\n"); + vsyscall_mapped = 1; + printk("VSYSCALL: fixmap virt addr: 0x%lx\n", + __fix_to_virt(FIX_VSYSCALL_GTOD_FIRST_PAGE)); + + return 0; +} + +__initcall(remap_vsyscall); diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/vsyscall-int80.S current/arch/i386/kernel/vsyscall-int80.S --- reference/arch/i386/kernel/vsyscall-int80.S 2004-04-07 14:53:56.000000000 -0700 +++ current/arch/i386/kernel/vsyscall-int80.S 2004-04-09 21:41:41.000000000 -0700 @@ -1,3 +1,6 @@ +#include +#include +#include /* * Code for the vsyscall page. This version uses the old int $0x80 method. * @@ -12,8 +15,26 @@ .type __kernel_vsyscall,@function __kernel_vsyscall: .LSTART_vsyscall: +#ifdef CONFIG_VSYSCALL_GTOD + cmp $__NR_gettimeofday, %eax + je .Lvgettimeofday +#endif /* CONFIG_VSYSCALL_GTOD */ int $0x80 ret + +#ifdef CONFIG_VSYSCALL_GTOD +/* vsyscall-gettimeofday code */ +.Lvgettimeofday: + pushl %edx + pushl %ecx + pushl %ebx + call VSYSCALL_GTOD_START + popl %ebx + popl %ecx + popl %edx + ret +#endif /* CONFIG_VSYSCALL_GTOD */ + .LEND_vsyscall: .size __kernel_vsyscall,.-.LSTART_vsyscall .previous diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/vsyscall-sysenter.S current/arch/i386/kernel/vsyscall-sysenter.S --- reference/arch/i386/kernel/vsyscall-sysenter.S 2004-04-07 14:53:56.000000000 -0700 +++ current/arch/i386/kernel/vsyscall-sysenter.S 2004-04-09 21:41:41.000000000 -0700 @@ -1,3 +1,6 @@ +#include +#include +#include /* * Code for the vsyscall page. This version uses the sysenter instruction. * @@ -12,6 +15,15 @@ .type __kernel_vsyscall,@function __kernel_vsyscall: .LSTART_vsyscall: +#ifdef CONFIG_VSYSCALL_GTOD + cmp $__NR_gettimeofday, %eax + je .Lvgettimeofday +#endif /* CONFIG_VSYSCALL_GTOD */ + cmpl $192, %eax + jne 1f + int $0x80 + ret +1: push %ecx .Lpush_ecx: push %edx @@ -36,6 +48,20 @@ SYSENTER_RETURN: pop %ecx .Lpop_ecx: ret + +#ifdef CONFIG_VSYSCALL_GTOD +/* vsyscall-gettimeofday code */ +.Lvgettimeofday: + pushl %edx + pushl %ecx + pushl %ebx + call VSYSCALL_GTOD_START + popl %ebx + popl %ecx + popl %edx + ret +#endif /* CONFIG_VSYSCALL_GTOD */ + .LEND_vsyscall: .size __kernel_vsyscall,.-.LSTART_vsyscall .previous diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/kernel/vsyscall.lds current/arch/i386/kernel/vsyscall.lds --- reference/arch/i386/kernel/vsyscall.lds 2003-06-05 14:35:02.000000000 -0700 +++ current/arch/i386/kernel/vsyscall.lds 2004-04-09 11:53:00.000000000 -0700 @@ -5,7 +5,7 @@ */ /* This must match . */ -VSYSCALL_BASE = 0xffffe000; +VSYSCALL_BASE = 0xffffd000; SECTIONS { diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/lib/Makefile current/arch/i386/lib/Makefile --- reference/arch/i386/lib/Makefile 2004-04-07 14:53:56.000000000 -0700 +++ current/arch/i386/lib/Makefile 2004-04-09 11:53:01.000000000 -0700 @@ -9,3 +9,5 @@ lib-y = checksum.o delay.o \ lib-$(CONFIG_X86_USE_3DNOW) += mmx.o lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o +lib-$(CONFIG_KGDB) += kgdb_serial.o +lib-$(CONFIG_MCOUNT) += mcount.o diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/lib/checksum.S current/arch/i386/lib/checksum.S --- reference/arch/i386/lib/checksum.S 2002-12-09 18:45:52.000000000 -0800 +++ current/arch/i386/lib/checksum.S 2004-04-09 11:53:00.000000000 -0700 @@ -280,14 +280,14 @@ unsigned int csum_partial_copy_generic ( .previous .align 4 -.globl csum_partial_copy_generic +.globl direct_csum_partial_copy_generic #ifndef CONFIG_X86_USE_PPRO_CHECKSUM #define ARGBASE 16 #define FP 12 -csum_partial_copy_generic: +direct_csum_partial_copy_generic: subl $4,%esp pushl %edi pushl %esi @@ -422,7 +422,7 @@ DST( movb %cl, (%edi) ) #define ARGBASE 12 -csum_partial_copy_generic: +direct_csum_partial_copy_generic: pushl %ebx pushl %edi pushl %esi diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/lib/dec_and_lock.c current/arch/i386/lib/dec_and_lock.c --- reference/arch/i386/lib/dec_and_lock.c 2002-12-09 18:45:50.000000000 -0800 +++ current/arch/i386/lib/dec_and_lock.c 2004-04-08 15:10:21.000000000 -0700 @@ -10,6 +10,7 @@ #include #include +#ifndef ATOMIC_DEC_AND_LOCK int atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) { int counter; @@ -38,3 +39,5 @@ slow_path: spin_unlock(lock); return 0; } +#endif + diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/lib/getuser.S current/arch/i386/lib/getuser.S --- reference/arch/i386/lib/getuser.S 2002-12-09 18:45:40.000000000 -0800 +++ current/arch/i386/lib/getuser.S 2004-04-09 11:53:00.000000000 -0700 @@ -9,6 +9,7 @@ * return value. */ #include +#include /* @@ -28,7 +29,7 @@ .globl __get_user_1 __get_user_1: GET_THREAD_INFO(%edx) - cmpl TI_ADDR_LIMIT(%edx),%eax + cmpl TI_addr_limit(%edx),%eax jae bad_get_user 1: movzbl (%eax),%edx xorl %eax,%eax @@ -40,7 +41,7 @@ __get_user_2: addl $1,%eax jc bad_get_user GET_THREAD_INFO(%edx) - cmpl TI_ADDR_LIMIT(%edx),%eax + cmpl TI_addr_limit(%edx),%eax jae bad_get_user 2: movzwl -1(%eax),%edx xorl %eax,%eax @@ -52,7 +53,7 @@ __get_user_4: addl $3,%eax jc bad_get_user GET_THREAD_INFO(%edx) - cmpl TI_ADDR_LIMIT(%edx),%eax + cmpl TI_addr_limit(%edx),%eax jae bad_get_user 3: movl -3(%eax),%edx xorl %eax,%eax diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/lib/kgdb_serial.c current/arch/i386/lib/kgdb_serial.c --- reference/arch/i386/lib/kgdb_serial.c 1969-12-31 16:00:00.000000000 -0800 +++ current/arch/i386/lib/kgdb_serial.c 2004-04-08 15:10:21.000000000 -0700 @@ -0,0 +1,499 @@ +/* + * Serial interface GDB stub + * + * Written (hacked together) by David Grothe (dave@gcom.com) + * Modified to allow invokation early in boot see also + * kgdb.h for instructions by George Anzinger(george@mvista.com) + * Modified to handle debugging over ethernet by Robert Walsh + * and wangdi , based on + * code by San Mehat. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_KGDB_USER_CONSOLE +extern void kgdb_console_finit(void); +#endif +#define PRNT_off +#define TEST_EXISTANCE +#ifdef PRNT +#define dbprintk(s) printk s +#else +#define dbprintk(s) +#endif +#define TEST_INTERRUPT_off +#ifdef TEST_INTERRUPT +#define intprintk(s) printk s +#else +#define intprintk(s) +#endif + +#define IRQ_T(info) ((info->flags & ASYNC_SHARE_IRQ) ? SA_SHIRQ : SA_INTERRUPT) + +#define GDB_BUF_SIZE 512 /* power of 2, please */ + +static char gdb_buf[GDB_BUF_SIZE]; +static int gdb_buf_in_inx; +static atomic_t gdb_buf_in_cnt; +static int gdb_buf_out_inx; + +struct async_struct *gdb_async_info; +static int gdb_async_irq; + +#define outb_px(a,b) outb_p(b,a) + +static void program_uart(struct async_struct *info); +static void write_char(struct async_struct *info, int chr); +/* + * Get a byte from the hardware data buffer and return it + */ +static int +read_data_bfr(struct async_struct *info) +{ + char it = inb_p(info->port + UART_LSR); + + if (it & UART_LSR_DR) + return (inb_p(info->port + UART_RX)); + /* + * If we have a framing error assume somebody messed with + * our uart. Reprogram it and send '-' both ways... + */ + if (it & 0xc) { + program_uart(info); + write_char(info, '-'); + return ('-'); + } + return (-1); + +} /* read_data_bfr */ + +/* + * Get a char if available, return -1 if nothing available. + * Empty the receive buffer first, then look at the interface hardware. + + * Locking here is a bit of a problem. We MUST not lock out communication + * if we are trying to talk to gdb about a kgdb entry. ON the other hand + * we can loose chars in the console pass thru if we don't lock. It is also + * possible that we could hold the lock or be waiting for it when kgdb + * NEEDS to talk. Since kgdb locks down the world, it does not need locks. + * We do, of course have possible issues with interrupting a uart operation, + * but we will just depend on the uart status to help keep that straight. + + */ +static spinlock_t uart_interrupt_lock = SPIN_LOCK_UNLOCKED; +#ifdef CONFIG_SMP +extern spinlock_t kgdb_spinlock; +#endif + +static int +read_char(struct async_struct *info) +{ + int chr; + unsigned long flags; + local_irq_save(flags); +#ifdef CONFIG_SMP + if (!spin_is_locked(&kgdb_spinlock)) { + spin_lock(&uart_interrupt_lock); + } +#endif + if (atomic_read(&gdb_buf_in_cnt) != 0) { /* intr routine has q'd chars */ + chr = gdb_buf[gdb_buf_out_inx++]; + gdb_buf_out_inx &= (GDB_BUF_SIZE - 1); + atomic_dec(&gdb_buf_in_cnt); + } else { + chr = read_data_bfr(info); + } +#ifdef CONFIG_SMP + if (!spin_is_locked(&kgdb_spinlock)) { + spin_unlock(&uart_interrupt_lock); + } +#endif + local_irq_restore(flags); + return (chr); +} + +/* + * Wait until the interface can accept a char, then write it. + */ +static void +write_char(struct async_struct *info, int chr) +{ + while (!(inb_p(info->port + UART_LSR) & UART_LSR_THRE)) ; + + outb_p(chr, info->port + UART_TX); + +} /* write_char */ + +/* + * Mostly we don't need a spinlock, but since the console goes + * thru here with interrutps on, well, we need to catch those + * chars. + */ +/* + * This is the receiver interrupt routine for the GDB stub. + * It will receive a limited number of characters of input + * from the gdb host machine and save them up in a buffer. + * + * When the gdb stub routine tty_getDebugChar() is called it + * draws characters out of the buffer until it is empty and + * then reads directly from the serial port. + * + * We do not attempt to write chars from the interrupt routine + * since the stubs do all of that via tty_putDebugChar() which + * writes one byte after waiting for the interface to become + * ready. + * + * The debug stubs like to run with interrupts disabled since, + * after all, they run as a consequence of a breakpoint in + * the kernel. + * + * Perhaps someone who knows more about the tty driver than I + * care to learn can make this work for any low level serial + * driver. + */ +static irqreturn_t +gdb_interrupt(int irq, void *dev_id, struct pt_regs *regs) +{ + struct async_struct *info; + unsigned long flags; + + info = gdb_async_info; + if (!info || !info->tty || irq != gdb_async_irq) + return IRQ_NONE; + + local_irq_save(flags); + spin_lock(&uart_interrupt_lock); + do { + int chr = read_data_bfr(info); + intprintk(("Debug char on int: %x hex\n", chr)); + if (chr < 0) + continue; + + if (chr == 3) { /* Ctrl-C means remote interrupt */ + BREAKPOINT; + continue; + } + + if (atomic_read(&gdb_buf_in_cnt) >= GDB_BUF_SIZE) { + /* buffer overflow tosses early char */ + read_char(info); + } + gdb_buf[gdb_buf_in_inx++] = chr; + gdb_buf_in_inx &= (GDB_BUF_SIZE - 1); + } while (inb_p(info->port + UART_IIR) & UART_IIR_RDI); + spin_unlock(&uart_interrupt_lock); + local_irq_restore(flags); + return IRQ_HANDLED; +} /* gdb_interrupt */ + +/* + * Just a NULL routine for testing. + */ +void +gdb_null(void) +{ +} /* gdb_null */ + +/* These structure are filled in with values defined in asm/kgdb_local.h + */ +static struct serial_state state = SB_STATE; +static struct async_struct local_info = SB_INFO; +static int ok_to_enable_ints = 0; +static void kgdb_enable_ints_now(void); + +extern char *kgdb_version; +/* + * Hook an IRQ for KGDB. + * + * This routine is called from tty_putDebugChar, below. + */ +static int ints_disabled = 1; +int +gdb_hook_interrupt(struct async_struct *info, int verb) +{ + struct serial_state *state = info->state; + unsigned long flags; + int port; +#ifdef TEST_EXISTANCE + int scratch, scratch2; +#endif + + /* The above fails if memory managment is not set up yet. + * Rather than fail the set up, just keep track of the fact + * and pick up the interrupt thing later. + */ + gdb_async_info = info; + port = gdb_async_info->port; + gdb_async_irq = state->irq; + if (verb) { + printk("kgdb %s : port =%x, IRQ=%d, divisor =%d\n", + kgdb_version, + port, + gdb_async_irq, gdb_async_info->state->custom_divisor); + } + local_irq_save(flags); +#ifdef TEST_EXISTANCE + /* Existance test */ + /* Should not need all this, but just in case.... */ + + scratch = inb_p(port + UART_IER); + outb_px(port + UART_IER, 0); + outb_px(0xff, 0x080); + scratch2 = inb_p(port + UART_IER); + outb_px(port + UART_IER, scratch); + if (scratch2) { + printk + ("gdb_hook_interrupt: Could not clear IER, not a UART!\n"); + local_irq_restore(flags); + return 1; /* We failed; there's nothing here */ + } + scratch2 = inb_p(port + UART_LCR); + outb_px(port + UART_LCR, 0xBF); /* set up for StarTech test */ + outb_px(port + UART_EFR, 0); /* EFR is the same as FCR */ + outb_px(port + UART_LCR, 0); + outb_px(port + UART_FCR, UART_FCR_ENABLE_FIFO); + scratch = inb_p(port + UART_IIR) >> 6; + if (scratch == 1) { + printk("gdb_hook_interrupt: Undefined UART type!" + " Not a UART! \n"); + local_irq_restore(flags); + return 1; + } else { + dbprintk(("gdb_hook_interrupt: UART type " + "is %d where 0=16450, 2=16550 3=16550A\n", scratch)); + } + scratch = inb_p(port + UART_MCR); + outb_px(port + UART_MCR, UART_MCR_LOOP | scratch); + outb_px(port + UART_MCR, UART_MCR_LOOP | 0x0A); + scratch2 = inb_p(port + UART_MSR) & 0xF0; + outb_px(port + UART_MCR, scratch); + if (scratch2 != 0x90) { + printk("gdb_hook_interrupt: " + "Loop back test failed! Not a UART!\n"); + local_irq_restore(flags); + return scratch2 + 1000; /* force 0 to fail */ + } +#endif /* test existance */ + program_uart(info); + local_irq_restore(flags); + + return (0); + +} /* gdb_hook_interrupt */ + +static void +program_uart(struct async_struct *info) +{ + int port = info->port; + + (void) inb_p(port + UART_RX); + outb_px(port + UART_IER, 0); + + (void) inb_p(port + UART_RX); /* serial driver comments say */ + (void) inb_p(port + UART_IIR); /* this clears the interrupt regs */ + (void) inb_p(port + UART_MSR); + outb_px(port + UART_LCR, UART_LCR_WLEN8 | UART_LCR_DLAB); + outb_px(port + UART_DLL, info->state->custom_divisor & 0xff); /* LS */ + outb_px(port + UART_DLM, info->state->custom_divisor >> 8); /* MS */ + outb_px(port + UART_MCR, info->MCR); + + outb_px(port + UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR_TRIGGER_1 | UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR); /* set fcr */ + outb_px(port + UART_LCR, UART_LCR_WLEN8); /* reset DLAB */ + outb_px(port + UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR_TRIGGER_1); /* set fcr */ + if (!ints_disabled) { + intprintk(("KGDB: Sending %d to port %x offset %d\n", + gdb_async_info->IER, + (int) gdb_async_info->port, UART_IER)); + outb_px(gdb_async_info->port + UART_IER, gdb_async_info->IER); + } + return; +} + +/* + * tty_getDebugChar + * + * This is a GDB stub routine. It waits for a character from the + * serial interface and then returns it. If there is no serial + * interface connection then it returns a bogus value which will + * almost certainly cause the system to hang. In the + */ +int kgdb_in_isr = 0; +int kgdb_in_lsr = 0; +extern spinlock_t kgdb_spinlock; + +/* Caller takes needed protections */ + +int +tty_getDebugChar(void) +{ + volatile int chr, dum, time, end_time; + + dbprintk(("tty_getDebugChar(port %x): ", gdb_async_info->port)); + + if (gdb_async_info == NULL) { + gdb_hook_interrupt(&local_info, 0); + } + /* + * This trick says if we wait a very long time and get + * no char, return the -1 and let the upper level deal + * with it. + */ + rdtsc(dum, time); + end_time = time + 2; + while (((chr = read_char(gdb_async_info)) == -1) && + (end_time - time) > 0) { + rdtsc(dum, time); + }; + /* + * This covers our butts if some other code messes with + * our uart, hay, it happens :o) + */ + if (chr == -1) + program_uart(gdb_async_info); + + dbprintk(("%c\n", chr > ' ' && chr < 0x7F ? chr : ' ')); + return (chr); + +} /* tty_getDebugChar */ + +static int count = 3; +static spinlock_t one_at_atime = SPIN_LOCK_UNLOCKED; + +static int __init +kgdb_enable_ints(void) +{ + if (kgdboe) { + return 0; + } + if (gdb_async_info == NULL) { + gdb_hook_interrupt(&local_info, 1); + } + ok_to_enable_ints = 1; + kgdb_enable_ints_now(); +#ifdef CONFIG_KGDB_USER_CONSOLE + kgdb_console_finit(); +#endif + return 0; +} + +#ifdef CONFIG_SERIAL_8250 +void shutdown_for_kgdb(struct async_struct *gdb_async_info); +#endif + +#ifdef CONFIG_DISCONTIGMEM +static inline int kgdb_mem_init_done(void) +{ + return highmem_start_page != NULL; +} +#else +static inline int kgdb_mem_init_done(void) +{ + return max_mapnr != 0; +} +#endif + +static void +kgdb_enable_ints_now(void) +{ + if (!spin_trylock(&one_at_atime)) + return; + if (!ints_disabled) + goto exit; + if (kgdb_mem_init_done() && + ints_disabled) { /* don't try till mem init */ +#ifdef CONFIG_SERIAL_8250 + /* + * The ifdef here allows the system to be configured + * without the serial driver. + * Don't make it a module, however, it will steal the port + */ + shutdown_for_kgdb(gdb_async_info); +#endif + ints_disabled = request_irq(gdb_async_info->state->irq, + gdb_interrupt, + IRQ_T(gdb_async_info), + "KGDB-stub", NULL); + intprintk(("KGDB: request_irq returned %d\n", ints_disabled)); + } + if (!ints_disabled) { + intprintk(("KGDB: Sending %d to port %x offset %d\n", + gdb_async_info->IER, + (int) gdb_async_info->port, UART_IER)); + outb_px(gdb_async_info->port + UART_IER, gdb_async_info->IER); + } + exit: + spin_unlock(&one_at_atime); +} + +/* + * tty_putDebugChar + * + * This is a GDB stub routine. It waits until the interface is ready + * to transmit a char and then sends it. If there is no serial + * interface connection then it simply returns to its caller, having + * pretended to send the char. Caller takes needed protections. + */ +void +tty_putDebugChar(int chr) +{ + dbprintk(("tty_putDebugChar(port %x): chr=%02x '%c', ints_on=%d\n", + gdb_async_info->port, + chr, + chr > ' ' && chr < 0x7F ? chr : ' ', ints_disabled ? 0 : 1)); + + if (gdb_async_info == NULL) { + gdb_hook_interrupt(&local_info, 0); + } + + write_char(gdb_async_info, chr); /* this routine will wait */ + count = (chr == '#') ? 0 : count + 1; + if ((count == 2)) { /* try to enable after */ + if (ints_disabled & ok_to_enable_ints) + kgdb_enable_ints_now(); /* try to enable after */ + + /* We do this a lot because, well we really want to get these + * interrupts. The serial driver will clear these bits when it + * initializes the chip. Every thing else it does is ok, + * but this. + */ + if (!ints_disabled) { + outb_px(gdb_async_info->port + UART_IER, + gdb_async_info->IER); + } + } + +} /* tty_putDebugChar */ + +/* + * This does nothing for the serial port, since it doesn't buffer. + */ + +void tty_flushDebugChar(void) +{ +} + +module_init(kgdb_enable_ints); diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/lib/mcount.S current/arch/i386/lib/mcount.S --- reference/arch/i386/lib/mcount.S 1969-12-31 16:00:00.000000000 -0800 +++ current/arch/i386/lib/mcount.S 2004-04-09 11:53:01.000000000 -0700 @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2000 SGI + * + * Written by Dimitris Michailidis dimitris@sgi.com + * + * This file implements mcount(), which is used to collect profiling data. + * We provide several variants to accomodate different types of callers at + * the lowest possible overhead. + */ + +#include +#include + +#define MCOUNT_HEAD \ + pushl %ecx /* We must protect the arguments of FASTCALLs */; \ + movl mcount_hook, %ecx; \ + testl %ecx, %ecx; \ + jz 1f; \ + pushl %eax; \ + pushl %edx; \ + movl 12(%esp), %edx /* mcount()'s parent */ + +#define MCOUNT_TAIL \ + call *%ecx; \ + popl %edx; \ + popl %eax; \ +1: popl %ecx + +/* + * This is the main variant and is called by C code. GCC's -pg option + * automatically instruments every C function with a call to this. + */ +ENTRY(mcount) +#if defined(CONFIG_MCOUNT) + MCOUNT_HEAD +#ifdef CONFIG_FRAME_POINTER + movl 4(%ebp), %eax /* mcount()'s parent's parent */ +#endif + MCOUNT_TAIL +#endif + ret diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/lib/usercopy.c current/arch/i386/lib/usercopy.c --- reference/arch/i386/lib/usercopy.c 2004-01-15 10:41:00.000000000 -0800 +++ current/arch/i386/lib/usercopy.c 2004-04-09 11:53:00.000000000 -0700 @@ -76,7 +76,7 @@ do { \ * and returns @count. */ long -__strncpy_from_user(char *dst, const char __user *src, long count) +__direct_strncpy_from_user(char *dst, const char __user *src, long count) { long res; __do_strncpy_from_user(dst, src, count, res); @@ -102,7 +102,7 @@ __strncpy_from_user(char *dst, const cha * and returns @count. */ long -strncpy_from_user(char *dst, const char __user *src, long count) +direct_strncpy_from_user(char *dst, const char __user *src, long count) { long res = -EFAULT; if (access_ok(VERIFY_READ, src, 1)) @@ -147,7 +147,7 @@ do { \ * On success, this will be zero. */ unsigned long -clear_user(void __user *to, unsigned long n) +direct_clear_user(void __user *to, unsigned long n) { might_sleep(); if (access_ok(VERIFY_WRITE, to, n)) @@ -167,7 +167,7 @@ clear_user(void __user *to, unsigned lon * On success, this will be zero. */ unsigned long -__clear_user(void __user *to, unsigned long n) +__direct_clear_user(void __user *to, unsigned long n) { __do_clear_user(to, n); return n; @@ -184,7 +184,7 @@ __clear_user(void __user *to, unsigned l * On exception, returns 0. * If the string is too long, returns a value greater than @n. */ -long strnlen_user(const char __user *s, long n) +long direct_strnlen_user(const char __user *s, long n) { unsigned long mask = -__addr_ok(s); unsigned long res, tmp; @@ -575,3 +575,4 @@ unsigned long __copy_from_user_ll(void * n = __copy_user_zeroing_intel(to, (const void *) from, n); return n; } + diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/math-emu/fpu_system.h current/arch/i386/math-emu/fpu_system.h --- reference/arch/i386/math-emu/fpu_system.h 2002-12-09 18:45:53.000000000 -0800 +++ current/arch/i386/math-emu/fpu_system.h 2004-04-09 11:53:00.000000000 -0700 @@ -15,6 +15,7 @@ #include #include #include +#include /* This sets the pointer FPU_info to point to the argument part of the stack frame of math_emulate() */ @@ -22,7 +23,7 @@ /* s is always from a cpu register, and the cpu does bounds checking * during register load --> no further bounds checks needed */ -#define LDT_DESCRIPTOR(s) (((struct desc_struct *)current->mm->context.ldt)[(s) >> 3]) +#define LDT_DESCRIPTOR(s) (((struct desc_struct *)__kmap_atomic_vaddr(KM_LDT_PAGE0))[(s) >> 3]) #define SEG_D_SIZE(x) ((x).b & (3 << 21)) #define SEG_G_BIT(x) ((x).b & (1 << 23)) #define SEG_GRANULARITY(x) (((x).b & (1 << 23)) ? 4096 : 1) diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/mm/discontig.c current/arch/i386/mm/discontig.c --- reference/arch/i386/mm/discontig.c 2004-04-07 14:53:57.000000000 -0700 +++ current/arch/i386/mm/discontig.c 2004-04-09 21:41:41.000000000 -0700 @@ -56,7 +56,7 @@ bootmem_data_t node0_bdata; * physnode_map[4-7] = 1; * physnode_map[8- ] = -1; */ -u8 physnode_map[MAX_ELEMENTS] = { [0 ... (MAX_ELEMENTS - 1)] = -1}; +u8 physnode_map[MAX_ELEMENTS] = { [0 ... (MAX_ELEMENTS - 1)] = 0}; unsigned long node_start_pfn[MAX_NUMNODES]; unsigned long node_end_pfn[MAX_NUMNODES]; @@ -233,6 +233,13 @@ unsigned long __init setup_memory(void) unsigned long bootmap_size, system_start_pfn, system_max_low_pfn; unsigned long reserve_pages; + /* + * When mapping a NUMA machine we allocate the node_mem_map arrays + * from node local memory. They are then mapped directly into KVA + * between zone normal and vmalloc space. Calculate the size of + * this space and use it to adjust the boundry between ZONE_NORMAL + * and ZONE_HIGHMEM. + */ get_memcfg_numa(); reserve_pages = calculate_numa_remap_pages(); @@ -240,7 +247,10 @@ unsigned long __init setup_memory(void) system_start_pfn = min_low_pfn = PFN_UP(init_pg_tables_end); find_max_pfn(); - system_max_low_pfn = max_low_pfn = find_max_low_pfn(); + system_max_low_pfn = max_low_pfn = find_max_low_pfn() - reserve_pages; + printk("reserve_pages = %ld find_max_low_pfn() ~ %ld\n", + reserve_pages, max_low_pfn + reserve_pages); + printk("max_pfn = %ld\n", max_pfn); #ifdef CONFIG_HIGHMEM highstart_pfn = highend_pfn = max_pfn; if (max_pfn > system_max_low_pfn) @@ -248,7 +258,6 @@ unsigned long __init setup_memory(void) printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", pages_to_mb(highend_pfn - highstart_pfn)); #endif - system_max_low_pfn = max_low_pfn = max_low_pfn - reserve_pages; printk(KERN_NOTICE "%ldMB LOWMEM available.\n", pages_to_mb(system_max_low_pfn)); printk("min_low_pfn = %ld, max_low_pfn = %ld, highstart_pfn = %ld\n", @@ -258,15 +267,16 @@ unsigned long __init setup_memory(void) (ulong) pfn_to_kaddr(max_low_pfn)); for (nid = 0; nid < numnodes; nid++) { node_remap_start_vaddr[nid] = pfn_to_kaddr( - highstart_pfn - node_remap_offset[nid]); + (highstart_pfn + reserve_pages) - node_remap_offset[nid]); allocate_pgdat(nid); printk ("node %d will remap to vaddr %08lx - %08lx\n", nid, (ulong) node_remap_start_vaddr[nid], - (ulong) pfn_to_kaddr(highstart_pfn + (ulong) pfn_to_kaddr(highstart_pfn + reserve_pages - node_remap_offset[nid] + node_remap_size[nid])); } printk("High memory starts at vaddr %08lx\n", (ulong) pfn_to_kaddr(highstart_pfn)); + vmalloc_earlyreserve = reserve_pages * PAGE_SIZE; for (nid = 0; nid < numnodes; nid++) find_max_pfn_node(nid); diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/mm/fault.c current/arch/i386/mm/fault.c --- reference/arch/i386/mm/fault.c 2003-12-26 12:28:13.000000000 -0800 +++ current/arch/i386/mm/fault.c 2004-04-09 11:53:00.000000000 -0700 @@ -27,6 +27,7 @@ #include #include #include +#include extern void die(const char *,struct pt_regs *,long); @@ -104,8 +105,17 @@ static inline unsigned long get_segment_ if (seg & (1<<2)) { /* Must lock the LDT while reading it. */ down(¤t->mm->context.sem); +#if 1 + /* horrible hack for 4/4 disabled kernels. + I'm not quite sure what the TLB flush is good for, + it's mindlessly copied from the read_ldt code */ + __flush_tlb_global(); + desc = kmap(current->mm->context.ldt_pages[(seg&~7)/PAGE_SIZE]); + desc = (void *)desc + ((seg & ~7) % PAGE_SIZE); +#else desc = current->mm->context.ldt; desc = (void *)desc + (seg & ~7); +#endif } else { /* Must disable preemption while reading the GDT. */ desc = (u32 *)&cpu_gdt_table[get_cpu()]; @@ -118,6 +128,9 @@ static inline unsigned long get_segment_ (desc[1] & 0xff000000); if (seg & (1<<2)) { +#if 1 + kunmap((void *)((unsigned long)desc & PAGE_MASK)); +#endif up(¤t->mm->context.sem); } else put_cpu(); @@ -243,6 +256,19 @@ asmlinkage void do_page_fault(struct pt_ * (error_code & 4) == 0, and that the fault was not a * protection error (error_code & 1) == 0. */ +#ifdef CONFIG_X86_4G + /* + * On 4/4 all kernels faults are either bugs, vmalloc or prefetch + */ + if (unlikely((regs->xcs & 3) == 0)) { + if (error_code & 3) + goto bad_area_nosemaphore; + + /* If it's vm86 fall through */ + if (!(regs->eflags & VM_MASK)) + goto vmalloc_fault; + } +#else if (unlikely(address >= TASK_SIZE)) { if (!(error_code & 5)) goto vmalloc_fault; @@ -252,6 +278,7 @@ asmlinkage void do_page_fault(struct pt_ */ goto bad_area_nosemaphore; } +#endif mm = tsk->mm; @@ -403,6 +430,12 @@ no_context: * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ +#ifdef CONFIG_KGDB + if (!user_mode(regs)){ + kgdb_handle_exception(14,SIGBUS, error_code, regs); + return; + } +#endif bust_spinlocks(1); diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/mm/init.c current/arch/i386/mm/init.c --- reference/arch/i386/mm/init.c 2004-04-07 14:53:57.000000000 -0700 +++ current/arch/i386/mm/init.c 2004-04-09 11:53:00.000000000 -0700 @@ -40,125 +40,13 @@ #include #include #include +#include DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); unsigned long highstart_pfn, highend_pfn; static int do_test_wp_bit(void); -/* - * Creates a middle page table and puts a pointer to it in the - * given global directory entry. This only returns the gd entry - * in non-PAE compilation mode, since the middle layer is folded. - */ -static pmd_t * __init one_md_table_init(pgd_t *pgd) -{ - pmd_t *pmd_table; - -#ifdef CONFIG_X86_PAE - pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); - if (pmd_table != pmd_offset(pgd, 0)) - BUG(); -#else - pmd_table = pmd_offset(pgd, 0); -#endif - - return pmd_table; -} - -/* - * Create a page table and place a pointer to it in a middle page - * directory entry. - */ -static pte_t * __init one_page_table_init(pmd_t *pmd) -{ - if (pmd_none(*pmd)) { - pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); - set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); - if (page_table != pte_offset_kernel(pmd, 0)) - BUG(); - - return page_table; - } - - return pte_offset_kernel(pmd, 0); -} - -/* - * This function initializes a certain range of kernel virtual memory - * with new bootmem page tables, everywhere page tables are missing in - * the given range. - */ - -/* - * NOTE: The pagetables are allocated contiguous on the physical space - * so we can cache the place of the first one and move around without - * checking the pgd every time. - */ -static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base) -{ - pgd_t *pgd; - pmd_t *pmd; - int pgd_idx, pmd_idx; - unsigned long vaddr; - - vaddr = start; - pgd_idx = pgd_index(vaddr); - pmd_idx = pmd_index(vaddr); - pgd = pgd_base + pgd_idx; - - for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { - if (pgd_none(*pgd)) - one_md_table_init(pgd); - - pmd = pmd_offset(pgd, vaddr); - for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) { - if (pmd_none(*pmd)) - one_page_table_init(pmd); - - vaddr += PMD_SIZE; - } - pmd_idx = 0; - } -} - -/* - * This maps the physical memory to kernel virtual address space, a total - * of max_low_pfn pages, by creating page tables starting from address - * PAGE_OFFSET. - */ -static void __init kernel_physical_mapping_init(pgd_t *pgd_base) -{ - unsigned long pfn; - pgd_t *pgd; - pmd_t *pmd; - pte_t *pte; - int pgd_idx, pmd_idx, pte_ofs; - - pgd_idx = pgd_index(PAGE_OFFSET); - pgd = pgd_base + pgd_idx; - pfn = 0; - - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { - pmd = one_md_table_init(pgd); - if (pfn >= max_low_pfn) - continue; - for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) { - /* Map with big pages if possible, otherwise create normal page tables. */ - if (cpu_has_pse) { - set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE)); - pfn += PTRS_PER_PTE; - } else { - pte = one_page_table_init(pmd); - - for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; pte++, pfn++, pte_ofs++) - set_pte(pte, pfn_pte(pfn, PAGE_KERNEL)); - } - } - } -} - static inline int page_kills_ppro(unsigned long pagenr) { if (pagenr >= 0x70000 && pagenr <= 0x7003F) @@ -206,11 +94,8 @@ static inline int page_is_ram(unsigned l return 0; } -#ifdef CONFIG_HIGHMEM pte_t *kmap_pte; -pgprot_t kmap_prot; -EXPORT_SYMBOL(kmap_prot); EXPORT_SYMBOL(kmap_pte); #define kmap_get_fixmap_pte(vaddr) \ @@ -218,29 +103,7 @@ EXPORT_SYMBOL(kmap_pte); void __init kmap_init(void) { - unsigned long kmap_vstart; - - /* cache the first kmap pte */ - kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); - kmap_pte = kmap_get_fixmap_pte(kmap_vstart); - - kmap_prot = PAGE_KERNEL; -} - -void __init permanent_kmaps_init(pgd_t *pgd_base) -{ - pgd_t *pgd; - pmd_t *pmd; - pte_t *pte; - unsigned long vaddr; - - vaddr = PKMAP_BASE; - page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); - - pgd = swapper_pg_dir + pgd_index(vaddr); - pmd = pmd_offset(pgd, vaddr); - pte = pte_offset_kernel(pmd, vaddr); - pkmap_page_table = pte; + kmap_pte = kmap_get_fixmap_pte(__fix_to_virt(FIX_KMAP_BEGIN)); } void __init one_highpage_init(struct page *page, int pfn, int bad_ppro) @@ -255,6 +118,8 @@ void __init one_highpage_init(struct pag SetPageReserved(page); } +#ifdef CONFIG_HIGHMEM + #ifndef CONFIG_DISCONTIGMEM void __init set_highmem_pages_init(int bad_ppro) { @@ -266,12 +131,9 @@ void __init set_highmem_pages_init(int b #else extern void set_highmem_pages_init(int); #endif /* !CONFIG_DISCONTIGMEM */ - #else -#define kmap_init() do { } while (0) -#define permanent_kmaps_init(pgd_base) do { } while (0) -#define set_highmem_pages_init(bad_ppro) do { } while (0) -#endif /* CONFIG_HIGHMEM */ +# define set_highmem_pages_init(bad_ppro) do { } while (0) +#endif unsigned long __PAGE_KERNEL = _PAGE_KERNEL; @@ -281,30 +143,125 @@ unsigned long __PAGE_KERNEL = _PAGE_KERN extern void __init remap_numa_kva(void); #endif -static void __init pagetable_init (void) +static __init void prepare_pagetables(pgd_t *pgd_base, unsigned long address) +{ + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + + pgd = pgd_base + pgd_index(address); + pmd = pmd_offset(pgd, address); + if (!pmd_present(*pmd)) { + pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); + set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))); + } +} + +static void __init fixrange_init (unsigned long start, unsigned long end, pgd_t *pgd_base) { unsigned long vaddr; - pgd_t *pgd_base = swapper_pg_dir; + for (vaddr = start; vaddr != end; vaddr += PAGE_SIZE) + prepare_pagetables(pgd_base, vaddr); +} + +void setup_identity_mappings(pgd_t *pgd_base, unsigned long start, unsigned long end) +{ + unsigned long vaddr; + pgd_t *pgd; + int i, j, k; + pmd_t *pmd; + pte_t *pte, *pte_base; + + pgd = pgd_base; + + for (i = 0; i < PTRS_PER_PGD; pgd++, i++) { + vaddr = i*PGDIR_SIZE; + if (end && (vaddr >= end)) + break; + pmd = pmd_offset(pgd, 0); + for (j = 0; j < PTRS_PER_PMD; pmd++, j++) { + vaddr = i*PGDIR_SIZE + j*PMD_SIZE; + if (end && (vaddr >= end)) + break; + if (vaddr < start) + continue; + if (cpu_has_pse) { + unsigned long __pe; + + set_in_cr4(X86_CR4_PSE); + boot_cpu_data.wp_works_ok = 1; + __pe = _KERNPG_TABLE + _PAGE_PSE + vaddr - start; + /* Make it "global" too if supported */ + if (cpu_has_pge) { + set_in_cr4(X86_CR4_PGE); +#if !defined(CONFIG_X86_SWITCH_PAGETABLES) + __pe += _PAGE_GLOBAL; + __PAGE_KERNEL |= _PAGE_GLOBAL; +#endif + } + set_pmd(pmd, __pmd(__pe)); + continue; + } + if (!pmd_present(*pmd)) + pte_base = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); + else + pte_base = (pte_t *) page_address(pmd_page(*pmd)); + pte = pte_base; + for (k = 0; k < PTRS_PER_PTE; pte++, k++) { + vaddr = i*PGDIR_SIZE + j*PMD_SIZE + k*PAGE_SIZE; + if (end && (vaddr >= end)) + break; + if (vaddr < start) + continue; + *pte = mk_pte_phys(vaddr-start, PAGE_KERNEL); + } + set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte_base))); + } + } +} + +static void __init pagetable_init (void) +{ + unsigned long vaddr, end; + pgd_t *pgd_base; #ifdef CONFIG_X86_PAE int i; - /* Init entries of the first-level page table to the zero page */ - for (i = 0; i < PTRS_PER_PGD; i++) - set_pgd(pgd_base + i, __pgd(__pa(empty_zero_page) | _PAGE_PRESENT)); #endif - /* Enable PSE if available */ - if (cpu_has_pse) { - set_in_cr4(X86_CR4_PSE); - } + /* + * This can be zero as well - no problem, in that case we exit + * the loops anyway due to the PTRS_PER_* conditions. + */ + end = (unsigned long)__va(max_low_pfn*PAGE_SIZE); - /* Enable PGE if available */ - if (cpu_has_pge) { - set_in_cr4(X86_CR4_PGE); - __PAGE_KERNEL |= _PAGE_GLOBAL; + pgd_base = swapper_pg_dir; +#ifdef CONFIG_X86_PAE + /* + * It causes too many problems if there's no proper pmd set up + * for all 4 entries of the PGD - so we allocate all of them. + * PAE systems will not miss this extra 4-8K anyway ... + */ + for (i = 0; i < PTRS_PER_PGD; i++) { + pmd_t *pmd = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); + set_pgd(pgd_base + i, __pgd(__pa(pmd) + 0x1)); } +#endif + /* + * Set up lowmem-sized identity mappings at PAGE_OFFSET: + */ + setup_identity_mappings(pgd_base, PAGE_OFFSET, end); - kernel_physical_mapping_init(pgd_base); + /* + * Add flat-mode identity-mappings - SMP needs it when + * starting up on an AP from real-mode. (In the non-PAE + * case we already have these mappings through head.S.) + * All user-space mappings are explicitly cleared after + * SMP startup. + */ +#if defined(CONFIG_SMP) && defined(CONFIG_X86_PAE) + setup_identity_mappings(pgd_base, 0, 16*1024*1024); +#endif remap_numa_kva(); /* @@ -312,38 +269,64 @@ static void __init pagetable_init (void) * created - mappings will be set by set_fixmap(): */ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; - page_table_range_init(vaddr, 0, pgd_base); + fixrange_init(vaddr, 0, pgd_base); - permanent_kmaps_init(pgd_base); +#ifdef CONFIG_HIGHMEM + { + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; -#ifdef CONFIG_X86_PAE - /* - * Add low memory identity-mappings - SMP needs it when - * starting up on an AP from real-mode. In the non-PAE - * case we already have these mappings through head.S. - * All user-space mappings are explicitly cleared after - * SMP startup. - */ - pgd_base[0] = pgd_base[USER_PTRS_PER_PGD]; + /* + * Permanent kmaps: + */ + vaddr = PKMAP_BASE; + fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); + + pgd = swapper_pg_dir + pgd_index(vaddr); + pmd = pmd_offset(pgd, vaddr); + pte = pte_offset_kernel(pmd, vaddr); + pkmap_page_table = pte; + } #endif } -void zap_low_mappings (void) +/* + * Clear kernel pagetables in a PMD_SIZE-aligned range. + */ +static void clear_mappings(pgd_t *pgd_base, unsigned long start, unsigned long end) { - int i; + unsigned long vaddr; + pgd_t *pgd; + pmd_t *pmd; + int i, j; + + pgd = pgd_base; + + for (i = 0; i < PTRS_PER_PGD; pgd++, i++) { + vaddr = i*PGDIR_SIZE; + if (end && (vaddr >= end)) + break; + pmd = pmd_offset(pgd, 0); + for (j = 0; j < PTRS_PER_PMD; pmd++, j++) { + vaddr = i*PGDIR_SIZE + j*PMD_SIZE; + if (end && (vaddr >= end)) + break; + if (vaddr < start) + continue; + pmd_clear(pmd); + } + } + flush_tlb_all(); +} + +void zap_low_mappings(void) +{ + printk("zapping low mappings.\n"); /* * Zap initial low-memory mappings. - * - * Note that "pgd_clear()" doesn't do it for - * us, because pgd_clear() is a no-op on i386. */ - for (i = 0; i < USER_PTRS_PER_PGD; i++) -#ifdef CONFIG_X86_PAE - set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page))); -#else - set_pgd(swapper_pg_dir+i, __pgd(0)); -#endif - flush_tlb_all(); + clear_mappings(swapper_pg_dir, 0, 16*1024*1024); } #ifndef CONFIG_DISCONTIGMEM @@ -393,7 +376,15 @@ void __init paging_init(void) set_in_cr4(X86_CR4_PAE); #endif __flush_tlb_all(); - + /* + * Subtle. SMP is doing it's boot stuff late (because it has to + * fork idle threads) - but it also needs low mappings for the + * protected-mode entry to work. We zap these entries only after + * the WP-bit has been tested. + */ +#ifndef CONFIG_SMP + zap_low_mappings(); +#endif kmap_init(); zone_sizes_init(); } @@ -512,22 +503,18 @@ void __init mem_init(void) if (boot_cpu_data.wp_works_ok < 0) test_wp_bit(); - /* - * Subtle. SMP is doing it's boot stuff late (because it has to - * fork idle threads) - but it also needs low mappings for the - * protected-mode entry to work. We zap these entries only after - * the WP-bit has been tested. - */ -#ifndef CONFIG_SMP - zap_low_mappings(); -#endif + entry_trampoline_setup(); + default_ldt_page = virt_to_page(default_ldt); + load_LDT(&init_mm.context); } -kmem_cache_t *pgd_cache; -kmem_cache_t *pmd_cache; +kmem_cache_t *pgd_cache, *pmd_cache, *kpmd_cache; void __init pgtable_cache_init(void) { + void (*ctor)(void *, kmem_cache_t *, unsigned long); + void (*dtor)(void *, kmem_cache_t *, unsigned long); + if (PTRS_PER_PMD > 1) { pmd_cache = kmem_cache_create("pmd", PTRS_PER_PMD*sizeof(pmd_t), @@ -537,13 +524,36 @@ void __init pgtable_cache_init(void) NULL); if (!pmd_cache) panic("pgtable_cache_init(): cannot create pmd cache"); + + if (TASK_SIZE > PAGE_OFFSET) { + kpmd_cache = kmem_cache_create("kpmd", + PTRS_PER_PMD*sizeof(pmd_t), + 0, + SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN, + kpmd_ctor, + NULL); + if (!kpmd_cache) + panic("pgtable_cache_init(): " + "cannot create kpmd cache"); + } } + + if (PTRS_PER_PMD == 1 || TASK_SIZE <= PAGE_OFFSET) + ctor = pgd_ctor; + else + ctor = NULL; + + if (PTRS_PER_PMD == 1 && TASK_SIZE <= PAGE_OFFSET) + dtor = pgd_dtor; + else + dtor = NULL; + pgd_cache = kmem_cache_create("pgd", PTRS_PER_PGD*sizeof(pgd_t), 0, SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN, - pgd_ctor, - PTRS_PER_PMD == 1 ? pgd_dtor : NULL); + ctor, + dtor); if (!pgd_cache) panic("pgtable_cache_init(): Cannot create pgd cache"); } diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/mm/pageattr.c current/arch/i386/mm/pageattr.c --- reference/arch/i386/mm/pageattr.c 2003-10-01 11:34:29.000000000 -0700 +++ current/arch/i386/mm/pageattr.c 2004-04-09 11:53:00.000000000 -0700 @@ -71,7 +71,7 @@ static void set_pmd_pte(pte_t *kpte, uns unsigned long flags; set_pte_atomic(kpte, pte); /* change init_mm */ - if (PTRS_PER_PMD > 1) + if (PTRS_PER_PMD > 1 || TASK_SIZE > PAGE_OFFSET) return; spin_lock_irqsave(&pgd_lock, flags); diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/mm/pgtable.c current/arch/i386/mm/pgtable.c --- reference/arch/i386/mm/pgtable.c 2003-10-01 11:34:29.000000000 -0700 +++ current/arch/i386/mm/pgtable.c 2004-04-09 11:53:04.000000000 -0700 @@ -21,6 +21,7 @@ #include #include #include +#include void show_mem(void) { @@ -157,11 +158,20 @@ void pmd_ctor(void *pmd, kmem_cache_t *c memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t)); } +void kpmd_ctor(void *__pmd, kmem_cache_t *cache, unsigned long flags) +{ + pmd_t *kpmd, *pmd; + kpmd = pmd_offset(&swapper_pg_dir[PTRS_PER_PGD-1], + (PTRS_PER_PMD - NR_SHARED_PMDS)*PMD_SIZE); + pmd = (pmd_t *)__pmd + (PTRS_PER_PMD - NR_SHARED_PMDS); + + memset(__pmd, 0, (PTRS_PER_PMD - NR_SHARED_PMDS)*sizeof(pmd_t)); + memcpy(pmd, kpmd, NR_SHARED_PMDS*sizeof(pmd_t)); +} + /* - * List of all pgd's needed for non-PAE so it can invalidate entries - * in both cached and uncached pgd's; not needed for PAE since the - * kernel pmd is shared. If PAE were not to share the pmd a similar - * tactic would be needed. This is essentially codepath-based locking + * List of all pgd's needed so it can invalidate entries in both cached + * and uncached pgd's. This is essentially codepath-based locking * against pageattr.c; it is the unique case in which a valid change * of kernel pagetables can't be lazily synchronized by vmalloc faults. * vmalloc faults work because attached pagetables are never freed. @@ -170,30 +180,60 @@ void pmd_ctor(void *pmd, kmem_cache_t *c * could be used. The locking scheme was chosen on the basis of * manfred's recommendations and having no core impact whatsoever. * -- wli + * + * The entire issue goes away when XKVA is configured. */ spinlock_t pgd_lock = SPIN_LOCK_UNLOCKED; LIST_HEAD(pgd_list); -void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused) +/* + * This is not that hard to figure out. + * (a) PTRS_PER_PMD == 1 means non-PAE. + * (b) PTRS_PER_PMD > 1 means PAE. + * (c) TASK_SIZE > PAGE_OFFSET means XKVA. + * (d) TASK_SIZE <= PAGE_OFFSET means non-XKVA. + * + * Do *NOT* back out the preconstruction like the patch I'm cleaning + * up after this very instant did, or at all, for that matter. + * This is never called when PTRS_PER_PMD > 1 && TASK_SIZE > PAGE_OFFSET. + * -- wli + */ +void pgd_ctor(void *__pgd, kmem_cache_t *cache, unsigned long unused) { + pgd_t *pgd = (pgd_t *)__pgd; unsigned long flags; - if (PTRS_PER_PMD == 1) - spin_lock_irqsave(&pgd_lock, flags); + if (PTRS_PER_PMD == 1) { + if (TASK_SIZE <= PAGE_OFFSET) + spin_lock_irqsave(&pgd_lock, flags); + else + memcpy(&pgd[PTRS_PER_PGD - NR_SHARED_PMDS], + &swapper_pg_dir[PTRS_PER_PGD - NR_SHARED_PMDS], + NR_SHARED_PMDS * sizeof(pgd_t)); + } - memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD, - swapper_pg_dir + USER_PTRS_PER_PGD, - (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); + if (TASK_SIZE <= PAGE_OFFSET) + memcpy(pgd + USER_PTRS_PER_PGD, + swapper_pg_dir + USER_PTRS_PER_PGD, + (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); if (PTRS_PER_PMD > 1) return; - list_add(&virt_to_page(pgd)->lru, &pgd_list); - spin_unlock_irqrestore(&pgd_lock, flags); - memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); + if (TASK_SIZE > PAGE_OFFSET) + memset(pgd, 0, (PTRS_PER_PGD - NR_SHARED_PMDS)*sizeof(pgd_t)); + else { + list_add(&virt_to_page(pgd)->lru, &pgd_list); + spin_unlock_irqrestore(&pgd_lock, flags); + memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); + } } -/* never called when PTRS_PER_PMD > 1 */ +/* + * Never called when PTRS_PER_PMD > 1 || TASK_SIZE > PAGE_OFFSET + * for with PAE we would list_del() multiple times, and for non-PAE + * with XKVA all the AGP pgd shootdown code is unnecessary. + */ void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused) { unsigned long flags; /* can be called from interrupt context */ @@ -203,6 +243,12 @@ void pgd_dtor(void *pgd, kmem_cache_t *c spin_unlock_irqrestore(&pgd_lock, flags); } +/* + * See the comments above pgd_ctor() wrt. preconstruction. + * Do *NOT* memcpy() here. If you do, you back out important + * anti- cache pollution code. + * + */ pgd_t *pgd_alloc(struct mm_struct *mm) { int i; @@ -211,15 +257,33 @@ pgd_t *pgd_alloc(struct mm_struct *mm) if (PTRS_PER_PMD == 1 || !pgd) return pgd; + /* + * In the 4G userspace case alias the top 16 MB virtual + * memory range into the user mappings as well (these + * include the trampoline and CPU data structures). + */ for (i = 0; i < USER_PTRS_PER_PGD; ++i) { - pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL); + kmem_cache_t *cache; + pmd_t *pmd; + + if (TASK_SIZE > PAGE_OFFSET && i == USER_PTRS_PER_PGD - 1) + cache = kpmd_cache; + else + cache = pmd_cache; + + pmd = kmem_cache_alloc(cache, GFP_KERNEL); if (!pmd) goto out_oom; set_pgd(&pgd[i], __pgd(1 + __pa((u64)((u32)pmd)))); } - return pgd; + return pgd; out_oom: + /* + * we don't have to handle the kpmd_cache here, since it's the + * last allocation, and has either nothing to free or when it + * succeeds the whole operation succeeds. + */ for (i--; i >= 0; i--) kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); kmem_cache_free(pgd_cache, pgd); @@ -230,10 +294,85 @@ void pgd_free(pgd_t *pgd) { int i; + /* in the non-PAE case, clear_page_tables() clears user pgd entries */ + if (PTRS_PER_PMD == 1) + goto out_free; + /* in the PAE case user pgd entries are overwritten before usage */ - if (PTRS_PER_PMD > 1) - for (i = 0; i < USER_PTRS_PER_PGD; ++i) - kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); - /* in the non-PAE case, clear_page_tables() clears user pgd entries */ + for (i = 0; i < USER_PTRS_PER_PGD; ++i) { + kmem_cache_t *cache; + pmd_t *pmd = __va(pgd_val(pgd[i]) - 1); + + /* + * only userspace pmd's are cleared for us + * by mm/memory.c; it's a slab cache invariant + * that we must separate the kernel pmd slab + * all times, else we'll have bad pmd's. + */ + if (TASK_SIZE > PAGE_OFFSET && i == USER_PTRS_PER_PGD - 1) + cache = kpmd_cache; + else + cache = pmd_cache; + + kmem_cache_free(cache, pmd); + } +out_free: kmem_cache_free(pgd_cache, pgd); } + +#define GLIBC_BUFFER (32*1024*1024) + +/* + * This is total crap; it needs to use the free area cache to mitigate + * catastrophic O(n) search with many vmas. + */ +unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma, *prev; + + len = PAGE_ALIGN(len); + addr = PAGE_ALIGN(addr); + + if (len > TASK_SIZE) + return -ENOMEM; + + if (addr) { + struct vm_area_struct *vma; + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && + (!vma || addr + len <= vma->vm_start)) + goto out; + } + + if (!mm->mmap) { + if (len > TASK_SIZE - GLIBC_BUFFER) + addr = TASK_SIZE - len; + else + addr = TASK_SIZE - GLIBC_BUFFER - len; + goto out; + } + + addr = -ENOMEM; + for (prev = NULL, vma = mm->mmap; vma; prev = vma, vma = vma->vm_next) { + unsigned long lo, hi; + lo = prev ? prev->vm_end : 0; + hi = vma->vm_start; + if (hi - lo >= len && (addr == -ENOMEM || addr < hi - len)) + addr = hi - len; + } + /* + * We're at the last one; let's try the top, but only if nothing + * else can be found (to respect GLIBC_BUFFER). + */ + if (prev && TASK_SIZE - prev->vm_end >= len) { + if (TASK_SIZE - GLIBC_BUFFER - prev->vm_end >= len) + addr = TASK_SIZE - GLIBC_BUFFER - len; + else if (addr == -ENOMEM) + addr = TASK_SIZE - len; + } +out: + return addr; +} diff -purN -X /home/mbligh/.diff.exclude reference/arch/i386/oprofile/op_model_p4.c current/arch/i386/oprofile/op_model_p4.c --- reference/arch/i386/oprofile/op_model_p4.c 2003-10-01 11:40:41.000000000 -0700 +++ current/arch/i386/oprofile/op_model_p4.c 2004-04-08 15:10:22.000000000 -0700 @@ -382,11 +382,8 @@ static struct p4_event_binding p4_events static unsigned int get_stagger(void) { #ifdef CONFIG_SMP - int cpu; - if (smp_num_siblings > 1) { - cpu = smp_processor_id(); - return (cpu_sibling_map[cpu] > cpu) ? 0 : 1; - } + int cpu = smp_processor_id(); + return (cpu != first_cpu(cpu_sibling_map[cpu])); #endif return 0; } diff -purN -X /home/mbligh/.diff.exclude reference/arch/ia64/Kconfig current/arch/ia64/Kconfig --- reference/arch/ia64/Kconfig 2004-04-07 14:53:57.000000000 -0700 +++ current/arch/ia64/Kconfig 2004-04-09 21:46:02.000000000 -0700 @@ -172,6 +172,14 @@ config NUMA Access). This option is for configuring high-end multiprocessor server systems. If in doubt, say N. +config SCHED_NUMA + bool "Two level sched domains" + depends on NUMA + default y + help + Enable two level sched domains hierarchy. + Say Y if unsure. + config VIRTUAL_MEM_MAP bool "Virtual mem map" default y if !IA64_HP_SIM @@ -497,6 +505,15 @@ config SYSVIPC_COMPAT bool depends on COMPAT && SYSVIPC default y + +config LOCKMETER + bool "Kernel lock metering" + depends on SMP + help + Say Y to enable kernel lock metering, which adds overhead to SMP + locks, but allows you to see various statistics using the + lockstat command. + endmenu source "security/Kconfig" diff -purN -X /home/mbligh/.diff.exclude reference/arch/mips/Kconfig current/arch/mips/Kconfig --- reference/arch/mips/Kconfig 2004-04-07 14:53:58.000000000 -0700 +++ current/arch/mips/Kconfig 2004-04-09 21:46:02.000000000 -0700 @@ -337,6 +337,14 @@ config NUMA Access). This option is for configuring high-end multiprocessor server machines. If in doubt, say N. +config SCHED_NUMA + bool "Two level sched domains" + depends on NUMA + default y + help + Enable two level sched domains hierarchy. + Say Y if unsure. + config MAPPED_KERNEL bool "Mapped kernel support" depends on SGI_IP27 diff -purN -X /home/mbligh/.diff.exclude reference/arch/mips/mm/cache.c current/arch/mips/mm/cache.c --- reference/arch/mips/mm/cache.c 2004-03-11 14:33:51.000000000 -0800 +++ current/arch/mips/mm/cache.c 2004-04-08 15:10:25.000000000 -0700 @@ -57,7 +57,7 @@ void flush_dcache_page(struct page *page { unsigned long addr; - if (page->mapping && + if (page_mapping(page) && list_empty(&page->mapping->i_mmap) && list_empty(&page->mapping->i_mmap_shared)) { SetPageDcacheDirty(page); @@ -66,7 +66,7 @@ void flush_dcache_page(struct page *page } /* - * We could delay the flush for the !page->mapping case too. But that + * We could delay the flush for the !page_mapping case too. But that * case is for exec env/arg pages and those are %99 certainly going to * get faulted into the tlb (and thus flushed) anyways. */ @@ -81,7 +81,7 @@ void __update_cache(struct vm_area_struc unsigned long pfn, addr; pfn = pte_pfn(pte); - if (pfn_valid(pfn) && (page = pfn_to_page(pfn), page->mapping) && + if (pfn_valid(pfn) && (page = pfn_to_page(pfn), page_mapping(page)) && Page_dcache_dirty(page)) { if (pages_do_alias((unsigned long)page_address(page), address & PAGE_MASK)) { diff -purN -X /home/mbligh/.diff.exclude reference/arch/parisc/kernel/cache.c current/arch/parisc/kernel/cache.c --- reference/arch/parisc/kernel/cache.c 2004-01-15 10:41:01.000000000 -0800 +++ current/arch/parisc/kernel/cache.c 2004-04-08 15:10:25.000000000 -0700 @@ -68,7 +68,7 @@ update_mmu_cache(struct vm_area_struct * { struct page *page = pte_page(pte); - if (VALID_PAGE(page) && page->mapping && + if (VALID_PAGE(page) && page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) { flush_kernel_dcache_page(page_address(page)); @@ -234,7 +234,7 @@ void __flush_dcache_page(struct page *pa flush_kernel_dcache_page(page_address(page)); - if (!page->mapping) + if (!page_mapping(page)) return; /* check shared list first if it's not empty...it's usually * the shortest */ diff -purN -X /home/mbligh/.diff.exclude reference/arch/ppc/Kconfig current/arch/ppc/Kconfig --- reference/arch/ppc/Kconfig 2004-04-07 14:53:59.000000000 -0700 +++ current/arch/ppc/Kconfig 2004-04-09 21:41:41.000000000 -0700 @@ -1209,6 +1209,19 @@ config DEBUG_INFO debug the kernel. If you don't debug the kernel, you can say N. +config SCHEDSTATS + bool "Collect scheduler statistics" + depends on PROC_FS + default y + help + If you say Y here, additional code will be inserted into the + scheduler and related routines to collect statistics about + scheduler behavior and provide them in /proc/schedstat. These + stats may be useful for both tuning and debugging the scheduler + If you aren't debugging the scheduler or trying to tune a specific + application, you can say N to avoid the very slight overhead + this adds. + config BOOTX_TEXT bool "Support for early boot text console (BootX or OpenFirmware only)" depends PPC_OF diff -purN -X /home/mbligh/.diff.exclude reference/arch/ppc/mm/pgtable.c current/arch/ppc/mm/pgtable.c --- reference/arch/ppc/mm/pgtable.c 2004-04-07 14:54:00.000000000 -0700 +++ current/arch/ppc/mm/pgtable.c 2004-04-08 15:10:26.000000000 -0700 @@ -86,9 +86,14 @@ pte_t *pte_alloc_one_kernel(struct mm_st extern int mem_init_done; extern void *early_get_page(void); - if (mem_init_done) + if (mem_init_done) { pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); - else + if (pte) { + struct page *ptepage = virt_to_page(pte); + ptepage->mapping = (void *) mm; + ptepage->index = address & PMD_MASK; + } + } else pte = (pte_t *)early_get_page(); if (pte) clear_page(pte); @@ -97,7 +102,7 @@ pte_t *pte_alloc_one_kernel(struct mm_st struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) { - struct page *pte; + struct page *ptepage; #ifdef CONFIG_HIGHPTE int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT; @@ -105,10 +110,13 @@ struct page *pte_alloc_one(struct mm_str int flags = GFP_KERNEL | __GFP_REPEAT; #endif - pte = alloc_pages(flags, 0); - if (pte) - clear_highpage(pte); - return pte; + ptepage = alloc_pages(flags, 0); + if (ptepage) { + ptepage->mapping = (void *) mm; + ptepage->index = address & PMD_MASK; + clear_highpage(ptepage); + } + return ptepage; } void pte_free_kernel(pte_t *pte) @@ -116,15 +124,17 @@ void pte_free_kernel(pte_t *pte) #ifdef CONFIG_SMP hash_page_sync(); #endif + virt_to_page(pte)->mapping = NULL; free_page((unsigned long)pte); } -void pte_free(struct page *pte) +void pte_free(struct page *ptepage) { #ifdef CONFIG_SMP hash_page_sync(); #endif - __free_page(pte); + ptepage->mapping = NULL; + __free_page(ptepage); } #ifndef CONFIG_44x diff -purN -X /home/mbligh/.diff.exclude reference/arch/ppc64/Kconfig current/arch/ppc64/Kconfig --- reference/arch/ppc64/Kconfig 2004-04-07 14:54:00.000000000 -0700 +++ current/arch/ppc64/Kconfig 2004-04-09 21:46:02.000000000 -0700 @@ -173,6 +173,23 @@ config NUMA bool "NUMA support" depends on DISCONTIGMEM +config SCHED_NUMA + bool "Two level sched domains" + depends on NUMA + default y + help + Enable two level sched domains hierarchy. + Say Y if unsure. + +config SCHED_SMT + bool "SMT (Hyperthreading) scheduler support" + depends on SMP + default off + help + SMT scheduler support improves the CPU scheduler's decision making + when dealing with POWER5 cpus at a cost of slightly increased + overhead in some places. If unsure say N here. + config PREEMPT bool "Preemptible Kernel" depends on BROKEN @@ -387,6 +404,27 @@ config DEBUG_INFO Say Y here only if you plan to use gdb to debug the kernel. If you don't debug the kernel, you can say N. +config SCHEDSTATS + bool "Collect scheduler statistics" + depends on PROC_FS + default y + help + If you say Y here, additional code will be inserted into the + scheduler and related routines to collect statistics about + scheduler behavior and provide them in /proc/schedstat. These + stats may be useful for both tuning and debugging the scheduler + If you aren't debugging the scheduler or trying to tune a specific + application, you can say N to avoid the very slight overhead + this adds. + +config MCOUNT + bool "Generate function call graph" + depends on DEBUG_KERNEL + help + This option instruments the kernel to generate a deterministic + function call graph. Answering Y here will make your kernel run + 1-2% slower. + endmenu source "security/Kconfig" diff -purN -X /home/mbligh/.diff.exclude reference/arch/ppc64/kernel/Makefile current/arch/ppc64/kernel/Makefile --- reference/arch/ppc64/kernel/Makefile 2004-04-07 14:54:00.000000000 -0700 +++ current/arch/ppc64/kernel/Makefile 2004-04-09 11:53:02.000000000 -0700 @@ -5,6 +5,17 @@ EXTRA_CFLAGS += -mno-minimal-toc extra-y := head.o vmlinux.lds.s +ifeq ($(CONFIG_MCOUNT),y) +quiet_cmd_nopg = CC $@ + cmd_nopg = $(CC) $(subst -pg,,$(CFLAGS)) -c $(src)/$(*F).c -o $@ + +$(obj)/stab.o: alwayscc + $(call cmd,nopg) + +alwayscc: + $(Q)rm -f $(obj)/stab.o +endif + obj-y := setup.o entry.o traps.o irq.o idle.o dma.o \ time.o process.o signal.o syscalls.o misc.o ptrace.o \ align.o semaphore.o bitops.o stab.o pacaData.o \ diff -purN -X /home/mbligh/.diff.exclude reference/arch/ppc64/kernel/smp.c current/arch/ppc64/kernel/smp.c --- reference/arch/ppc64/kernel/smp.c 2004-04-07 14:54:00.000000000 -0700 +++ current/arch/ppc64/kernel/smp.c 2004-04-08 15:10:22.000000000 -0700 @@ -579,11 +579,6 @@ void __init smp_prepare_cpus(unsigned in paca[boot_cpuid].prof_counter = 1; paca[boot_cpuid].prof_multiplier = 1; - /* - * XXX very rough. - */ - cache_decay_ticks = HZ/100; - #ifndef CONFIG_PPC_ISERIES paca[boot_cpuid].next_jiffy_update_tb = tb_last_stamp = get_tb(); @@ -796,3 +791,278 @@ static int __init topology_init(void) return 0; } __initcall(topology_init); + +#ifdef CONFIG_SCHED_SMT +#ifdef CONFIG_NUMA +static struct sched_group sched_group_cpus[NR_CPUS]; +static struct sched_group sched_group_phys[NR_CPUS]; +static struct sched_group sched_group_nodes[MAX_NUMNODES]; +static DEFINE_PER_CPU(struct sched_domain, cpu_domains); +static DEFINE_PER_CPU(struct sched_domain, phys_domains); +static DEFINE_PER_CPU(struct sched_domain, node_domains); +__init void arch_init_sched_domains(void) +{ + int i; + struct sched_group *first_cpu = NULL, *last_cpu = NULL; + + /* Set up domains */ + for_each_cpu(i) { + struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i); + struct sched_domain *phys_domain = &per_cpu(phys_domains, i); + struct sched_domain *node_domain = &per_cpu(node_domains, i); + int node = cpu_to_node(i); + cpumask_t nodemask = node_to_cpumask(node); + cpumask_t my_cpumask = cpumask_of_cpu(i); + cpumask_t sibling_cpumask = cpumask_of_cpu(i ^ 0x1); + + *cpu_domain = SD_SIBLING_INIT; + if (__is_processor(PV_POWER5)) + cpus_or(cpu_domain->span, my_cpumask, sibling_cpumask); + else + cpu_domain->span = my_cpumask; + cpu_domain->groups = &sched_group_cpus[i]; + cpu_domain->parent = phys_domain; + + *phys_domain = SD_CPU_INIT; + phys_domain->span = nodemask; + // phys_domain->cache_hot_time = XXX; + phys_domain->groups = &sched_group_phys[first_cpu(cpu_domain->span)]; + phys_domain->parent = node_domain; + + *node_domain = SD_NODE_INIT; + node_domain->span = cpu_possible_map; + // node_domain->cache_hot_time = XXX; + node_domain->groups = &sched_group_nodes[node]; + } + + /* Set up CPU (sibling) groups */ + for_each_cpu(i) { + struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i); + int j; + first_cpu = last_cpu = NULL; + + if (i != first_cpu(cpu_domain->span)) { + &per_cpu(cpu_domains, i)->flags |= SD_SHARE_CPUPOWER; + &per_cpu(cpu_domains, first_cpu(cpu_domain->span))->flags |= + SD_SHARE_CPUPOWER; + continue; + } + + for_each_cpu_mask(j, cpu_domain->span) { + struct sched_group *cpu = &sched_group_cpus[j]; + + cpus_clear(cpu->cpumask); + cpu_set(j, cpu->cpumask); + cpu->cpu_power = SCHED_LOAD_SCALE; + + if (!first_cpu) + first_cpu = cpu; + if (last_cpu) + last_cpu->next = cpu; + last_cpu = cpu; + } + last_cpu->next = first_cpu; + } + + for (i = 0; i < MAX_NUMNODES; i++) { + int j; + cpumask_t nodemask; + struct sched_group *node = &sched_group_nodes[i]; + cpumask_t node_cpumask = node_to_cpumask(i); + cpus_and(nodemask, node_cpumask, cpu_online_map); + + if (cpus_empty(nodemask)) + continue; + + first_cpu = last_cpu = NULL; + /* Set up physical groups */ + for_each_cpu_mask(j, nodemask) { + struct sched_domain *cpu_domain = &per_cpu(cpu_domains, j); + struct sched_group *cpu = &sched_group_phys[j]; + + if (j != first_cpu(cpu_domain->span)) + continue; + + cpu->cpumask = cpu_domain->span; + /* + * Make each extra sibling increase power by 10% of + * the basic CPU. This is very arbitrary. + */ + cpu->cpu_power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE*(cpus_weight(cpu->cpumask)-1) / 10; + node->cpu_power += cpu->cpu_power; + + if (!first_cpu) + first_cpu = cpu; + if (last_cpu) + last_cpu->next = cpu; + last_cpu = cpu; + } + last_cpu->next = first_cpu; + } + + /* Set up nodes */ + first_cpu = last_cpu = NULL; + for (i = 0; i < MAX_NUMNODES; i++) { + struct sched_group *cpu = &sched_group_nodes[i]; + cpumask_t nodemask; + cpumask_t node_cpumask = node_to_cpumask(i); + cpus_and(nodemask, node_cpumask, cpu_possible_map); + + if (cpus_empty(nodemask)) + continue; + + cpu->cpumask = nodemask; + /* ->cpu_power already setup */ + + if (!first_cpu) + first_cpu = cpu; + if (last_cpu) + last_cpu->next = cpu; + last_cpu = cpu; + } + last_cpu->next = first_cpu; + + mb(); + for_each_cpu(i) { + int node = cpu_to_node(i); + struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i); + cpu_attach_domain(cpu_domain, i); + } +} +#else /* !CONFIG_NUMA */ +static struct sched_group sched_group_cpus[NR_CPUS]; +static struct sched_group sched_group_phys[NR_CPUS]; +static DEFINE_PER_CPU(struct sched_domain, cpu_domains); +static DEFINE_PER_CPU(struct sched_domain, phys_domains); +__init void arch_init_sched_domains(void) +{ + int i; + struct sched_group *first_cpu = NULL, *last_cpu = NULL; + + /* Set up domains */ + for_each_cpu(i) { + struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i); + struct sched_domain *phys_domain = &per_cpu(phys_domains, i); + cpumask_t my_cpumask = cpumask_of_cpu(i); + cpumask_t sibling_cpumask = cpumask_of_cpu(i ^ 0x1); + + *cpu_domain = SD_SIBLING_INIT; + if (__is_processor(PV_POWER5)) + cpus_or(cpu_domain->span, my_cpumask, sibling_cpumask); + else + cpu_domain->span = my_cpumask; + cpu_domain->groups = &sched_group_cpus[i]; + cpu_domain->parent = phys_domain; + + *phys_domain = SD_CPU_INIT; + phys_domain->span = cpu_possible_map; + // phys_domain->cache_hot_time = XXX; + phys_domain->groups = &sched_group_phys[first_cpu(cpu_domain->span)]; + } + + /* Set up CPU (sibling) groups */ + for_each_cpu(i) { + struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i); + int j; + first_cpu = last_cpu = NULL; + + if (i != first_cpu(cpu_domain->span)) { + per_cpu(cpu_domains, i).flags |= SD_SHARE_CPUPOWER; + per_cpu(cpu_domains, first_cpu(cpu_domain->span)).flags |= + SD_SHARE_CPUPOWER; + continue; + } + + for_each_cpu_mask(j, cpu_domain->span) { + struct sched_group *cpu = &sched_group_cpus[j]; + + cpus_clear(cpu->cpumask); + cpu_set(j, cpu->cpumask); + cpu->cpu_power = SCHED_LOAD_SCALE; + + if (!first_cpu) + first_cpu = cpu; + if (last_cpu) + last_cpu->next = cpu; + last_cpu = cpu; + } + last_cpu->next = first_cpu; + } + + first_cpu = last_cpu = NULL; + /* Set up physical groups */ + for_each_cpu(i) { + struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i); + struct sched_group *cpu = &sched_group_phys[i]; + + if (i != first_cpu(cpu_domain->span)) + continue; + + cpu->cpumask = cpu_domain->span; + /* See SMT+NUMA setup for comment */ + cpu->cpu_power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE*(cpus_weight(cpu->cpumask)-1) / 10; + + if (!first_cpu) + first_cpu = cpu; + if (last_cpu) + last_cpu->next = cpu; + last_cpu = cpu; + } + last_cpu->next = first_cpu; + + mb(); + for_each_cpu(i) { + struct sched_domain *cpu_sd = &per_cpu(cpu_domains, i); + cpu_attach_domain(cpu_sd, i); + } +} +#endif /* CONFIG_NUMA */ +#else /* !CONFIG_SCHED_SMT */ + +#ifdef CONFIG_NUMA +#error ppc64 has no NUMA scheduler defined without CONFIG_SCHED_SMT. \ + Please enable CONFIG_SCHED_SMT or bug Anton. +#endif + +static struct sched_group sched_group_cpus[NR_CPUS]; +static DEFINE_PER_CPU(struct sched_domain, cpu_domains); + +__init void arch_init_sched_domains(void) +{ + int i; + struct sched_group *first_cpu = NULL, *last_cpu = NULL; + + /* Set up domains */ + for_each_cpu(i) { + struct sched_domain *cpu_sd = &per_cpu(cpu_domains, i); + + *cpu_sd = SD_CPU_INIT; + cpu_sd->span = cpu_possible_map; + // cpu_sd->cache_hot_time = XXX; + cpu_sd->groups = &sched_group_cpus[i]; + } + + /* Set up CPU groups */ + for_each_cpu_mask(i, cpu_possible_map) { + struct sched_group *cpu = &sched_group_cpus[i]; + + cpus_clear(cpu->cpumask); + cpu_set(i, cpu->cpumask); + cpu->cpu_power = SCHED_LOAD_SCALE; + + if (!first_cpu) + first_cpu = cpu; + if (last_cpu) + last_cpu->next = cpu; + last_cpu = cpu; + } + last_cpu->next = first_cpu; + + mb(); + for_each_cpu(i) { + struct sched_domain *cpu_sd = &per_cpu(cpu_domains, i); + cpu_attach_domain(cpu_sd, i); + } +} + +#endif diff -purN -X /home/mbligh/.diff.exclude reference/arch/ppc64/lib/Makefile current/arch/ppc64/lib/Makefile --- reference/arch/ppc64/lib/Makefile 2003-06-19 14:41:18.000000000 -0700 +++ current/arch/ppc64/lib/Makefile 2004-04-09 11:53:02.000000000 -0700 @@ -4,3 +4,4 @@ lib-y := checksum.o dec_and_lock.o string.o strcase.o lib-y += copypage.o memcpy.o copyuser.o +lib-$(CONFIG_MCOUNT) += mcount.o diff -purN -X /home/mbligh/.diff.exclude reference/arch/ppc64/lib/mcount.S current/arch/ppc64/lib/mcount.S --- reference/arch/ppc64/lib/mcount.S 1969-12-31 16:00:00.000000000 -0800 +++ current/arch/ppc64/lib/mcount.S 2004-04-09 11:53:02.000000000 -0700 @@ -0,0 +1,61 @@ +/* + * Written by Adam Litke (agl@us.ibm.com) + * + * This file implements mcount(), which is used to collect profiling data. + * + */ + +#include +#include +#include + +/* + * This is called by C code in all files compiled with -pg + */ + +_GLOBAL(_mcount) + /* Store parameter regs on stack */ + std r3, -16(r1) + std r4, -24(r1) + std r5, -32(r1) + std r6, -40(r1) + std r7, -48(r1) + std r8, -56(r1) + std r9, -64(r1) + std r10, -72(r1) + + /* Set up new stack frame */ + mflr r0 + std r0, 16(r1) + mfcr r0 + std r0, 8(r1) + stdu r1, -184(r1) + + /* If relocation is off skip mcount_entry */ + std r14, -8(r1) + mfmsr r14 + andi. r14, r14, MSR_IR + cmpldi r14, 0 + ld r14, -8(r1) + beq 1f + + /* Call mcount_entry */ + bl .mcount_entry + ori 0,0,0 + +1: + /* Put everything back */ + addi r1, r1, 184 + ld r0, 16(r1) + mtlr r0 + ld r0, 8(r1) + mtcr r0 + ld r3, -16(r1) + ld r4, -24(r1) + ld r5, -32(r1) + ld r6, -40(r1) + ld r7, -48(r1) + ld r8, -56(r1) + ld r9, -64(r1) + ld r10, -72(r1) + blr diff -purN -X /home/mbligh/.diff.exclude reference/arch/ppc64/mm/hugetlbpage.c current/arch/ppc64/mm/hugetlbpage.c --- reference/arch/ppc64/mm/hugetlbpage.c 2004-04-07 14:54:00.000000000 -0700 +++ current/arch/ppc64/mm/hugetlbpage.c 2004-04-09 21:41:39.000000000 -0700 @@ -25,7 +25,6 @@ #include #include #include -#include #include @@ -279,7 +278,7 @@ static int open_32bit_htlbpage_range(str } pmd_clear(pmd); - pgtable_remove_rmap(page); + dec_page_state(nr_page_table_pages); pte_free(page); } } @@ -308,6 +307,21 @@ int prepare_hugepage_range(unsigned long return -EINVAL; } +int close_32bit_htlbpage_range(struct mm_struct *mm) +{ + struct vm_area_struct *vma; + + BUG_ON(mm->context.low_hpages == 0); + + /* Check if any vmas are in the region */ + vma = find_vma(mm, TASK_HPAGE_BASE_32); + if (vma && vma->vm_start < TASK_HPAGE_END_32) + return -EBUSY; + + mm->context.low_hpages = 0; + return 0; +} + int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma) { @@ -638,8 +652,11 @@ unsigned long hugetlb_get_unmapped_area( for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { /* At this point: (!vma || addr < vma->vm_end). */ - if (addr + len > end) + if (addr + len > end) { + if (test_thread_flag(TIF_32BIT)) + close_32bit_htlbpage_range(current->mm); return -ENOMEM; + } if (!vma || (addr + len) <= vma->vm_start) return addr; addr = ALIGN(vma->vm_end, HPAGE_SIZE); diff -purN -X /home/mbligh/.diff.exclude reference/arch/ppc64/mm/tlb.c current/arch/ppc64/mm/tlb.c --- reference/arch/ppc64/mm/tlb.c 2004-03-11 14:33:55.000000000 -0800 +++ current/arch/ppc64/mm/tlb.c 2004-04-08 15:10:26.000000000 -0700 @@ -31,7 +31,6 @@ #include #include #include -#include DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); @@ -59,7 +58,8 @@ void hpte_update(pte_t *ptep, unsigned l ptepage = virt_to_page(ptep); mm = (struct mm_struct *) ptepage->mapping; - addr = ptep_to_address(ptep); + addr = ptepage->index + + (((unsigned long)ptep & ~PAGE_MASK) * PTRS_PER_PTE); if (REGION_ID(addr) == USER_REGION_ID) context = mm->context.id; diff -purN -X /home/mbligh/.diff.exclude reference/arch/sparc64/Kconfig current/arch/sparc64/Kconfig --- reference/arch/sparc64/Kconfig 2004-04-07 14:54:02.000000000 -0700 +++ current/arch/sparc64/Kconfig 2004-04-08 15:10:21.000000000 -0700 @@ -687,12 +687,19 @@ config DEBUG_BOOTMEM depends on DEBUG_KERNEL bool "Debug BOOTMEM initialization" +config LOCKMETER + bool "Kernel lock metering" + depends on SMP && !PREEMPT + help + Say Y to enable kernel lock metering, which adds overhead to SMP locks, + but allows you to see various statistics using the lockstat command. + # We have a custom atomic_dec_and_lock() implementation but it's not # compatible with spinlock debugging so we need to fall back on # the generic version in that case. config HAVE_DEC_LOCK bool - depends on SMP && !DEBUG_SPINLOCK + depends on SMP && !DEBUG_SPINLOCK && !LOCKMETER default y config MCOUNT diff -purN -X /home/mbligh/.diff.exclude reference/arch/sparc64/kernel/smp.c current/arch/sparc64/kernel/smp.c --- reference/arch/sparc64/kernel/smp.c 2004-04-07 14:54:02.000000000 -0700 +++ current/arch/sparc64/kernel/smp.c 2004-04-08 15:10:25.000000000 -0700 @@ -671,9 +671,9 @@ static __inline__ void __local_flush_dca #if (L1DCACHE_SIZE > PAGE_SIZE) __flush_dcache_page(page->virtual, ((tlb_type == spitfire) && - page->mapping != NULL)); + page_mapping(page) != NULL)); #else - if (page->mapping != NULL && + if (page_mapping(page) != NULL && tlb_type == spitfire) __flush_icache_page(__pa(page->virtual)); #endif @@ -694,7 +694,7 @@ void smp_flush_dcache_page_impl(struct p if (tlb_type == spitfire) { data0 = ((u64)&xcall_flush_dcache_page_spitfire); - if (page->mapping != NULL) + if (page_mapping(page) != NULL) data0 |= ((u64)1 << 32); spitfire_xcall_deliver(data0, __pa(page->virtual), @@ -727,7 +727,7 @@ void flush_dcache_page_all(struct mm_str goto flush_self; if (tlb_type == spitfire) { data0 = ((u64)&xcall_flush_dcache_page_spitfire); - if (page->mapping != NULL) + if (page_mapping(page) != NULL) data0 |= ((u64)1 << 32); spitfire_xcall_deliver(data0, __pa(page->virtual), diff -purN -X /home/mbligh/.diff.exclude reference/arch/sparc64/lib/rwlock.S current/arch/sparc64/lib/rwlock.S --- reference/arch/sparc64/lib/rwlock.S 2003-11-24 16:12:28.000000000 -0800 +++ current/arch/sparc64/lib/rwlock.S 2004-04-08 15:10:21.000000000 -0700 @@ -85,5 +85,20 @@ __write_trylock_succeed: __write_trylock_fail: retl mov 0, %o0 + + .globl __read_trylock +__read_trylock: /* %o0 = lock_ptr */ + ldsw [%o0], %g5 + brlz,pn %g5, 100f + add %g5, 1, %g7 + cas [%o0], %g5, %g7 + cmp %g5, %g7 + bne,pn %icc, __read_trylock + membar #StoreLoad | #StoreStore + retl + mov 1, %o0 +100: retl + mov 0, %o0 + rwlock_impl_end: diff -purN -X /home/mbligh/.diff.exclude reference/arch/sparc64/mm/init.c current/arch/sparc64/mm/init.c --- reference/arch/sparc64/mm/init.c 2004-04-07 14:54:02.000000000 -0700 +++ current/arch/sparc64/mm/init.c 2004-04-08 15:10:25.000000000 -0700 @@ -139,9 +139,9 @@ __inline__ void flush_dcache_page_impl(s #if (L1DCACHE_SIZE > PAGE_SIZE) __flush_dcache_page(page->virtual, ((tlb_type == spitfire) && - page->mapping != NULL)); + page_mapping(page) != NULL)); #else - if (page->mapping != NULL && + if (page_mapping(page) != NULL && tlb_type == spitfire) __flush_icache_page(__pa(page->virtual)); #endif @@ -203,7 +203,7 @@ void update_mmu_cache(struct vm_area_str pfn = pte_pfn(pte); if (pfn_valid(pfn) && - (page = pfn_to_page(pfn), page->mapping) && + (page = pfn_to_page(pfn), page_mapping(page)) && ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) { int cpu = ((pg_flags >> 24) & (NR_CPUS - 1UL)); @@ -227,7 +227,7 @@ void flush_dcache_page(struct page *page int dirty = test_bit(PG_dcache_dirty, &page->flags); int dirty_cpu = dcache_dirty_cpu(page); - if (page->mapping && + if (page_mapping(page) && list_empty(&page->mapping->i_mmap) && list_empty(&page->mapping->i_mmap_shared)) { if (dirty) { @@ -237,7 +237,7 @@ void flush_dcache_page(struct page *page } set_dcache_dirty(page); } else { - /* We could delay the flush for the !page->mapping + /* We could delay the flush for the !page_mapping * case too. But that case is for exec env/arg * pages and those are %99 certainly going to get * faulted into the tlb (and thus flushed) anyways. @@ -279,7 +279,7 @@ static inline void flush_cache_pte_range if (!pfn_valid(pfn)) continue; page = pfn_to_page(pfn); - if (PageReserved(page) || !page->mapping) + if (PageReserved(page) || !page_mapping(page)) continue; pgaddr = (unsigned long) page_address(page); uaddr = address + offset; diff -purN -X /home/mbligh/.diff.exclude reference/arch/x86_64/Kconfig current/arch/x86_64/Kconfig --- reference/arch/x86_64/Kconfig 2004-04-07 14:54:02.000000000 -0700 +++ current/arch/x86_64/Kconfig 2004-04-09 21:46:02.000000000 -0700 @@ -261,6 +261,14 @@ config NUMA depends on K8_NUMA default y +config SCHED_NUMA + bool "Two level sched domains" + depends on NUMA + default n + help + Enable two level sched domains hierarchy. + Say N if unsure. + config HAVE_DEC_LOCK bool depends on SMP @@ -468,6 +476,19 @@ config DEBUG_INFO Say Y here only if you plan to use gdb to debug the kernel. Please note that this option requires new binutils. If you don't debug the kernel, you can say N. + +config SCHEDSTATS + bool "Collect scheduler statistics" + depends on PROC_FS + default y + help + If you say Y here, additional code will be inserted into the + scheduler and related routines to collect statistics about + scheduler behavior and provide them in /proc/schedstat. These + stats may be useful for both tuning and debugging the scheduler + If you aren't debugging the scheduler or trying to tune a specific + application, you can say N to avoid the very slight overhead + this adds. config FRAME_POINTER bool "Compile the kernel with frame pointers" diff -purN -X /home/mbligh/.diff.exclude reference/arch/x86_64/boot/compressed/head.S current/arch/x86_64/boot/compressed/head.S --- reference/arch/x86_64/boot/compressed/head.S 2002-12-09 18:46:24.000000000 -0800 +++ current/arch/x86_64/boot/compressed/head.S 2004-04-08 15:10:20.000000000 -0700 @@ -26,6 +26,7 @@ .code32 .text +#define IN_BOOTLOADER #include #include diff -purN -X /home/mbligh/.diff.exclude reference/arch/x86_64/boot/compressed/misc.c current/arch/x86_64/boot/compressed/misc.c --- reference/arch/x86_64/boot/compressed/misc.c 2003-10-01 11:47:39.000000000 -0700 +++ current/arch/x86_64/boot/compressed/misc.c 2004-04-08 15:10:20.000000000 -0700 @@ -9,6 +9,7 @@ * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996 */ +#define IN_BOOTLOADER #include "miscsetup.h" #include diff -purN -X /home/mbligh/.diff.exclude reference/drivers/block/scsi_ioctl.c current/drivers/block/scsi_ioctl.c --- reference/drivers/block/scsi_ioctl.c 2004-04-07 14:54:05.000000000 -0700 +++ current/drivers/block/scsi_ioctl.c 2004-04-09 11:53:00.000000000 -0700 @@ -230,7 +230,7 @@ static int sg_scsi_ioctl(request_queue_t return -EFAULT; if (in_len > PAGE_SIZE || out_len > PAGE_SIZE) return -EINVAL; - if (get_user(opcode, sic->data)) + if (get_user(opcode, (int *)sic->data)) return -EFAULT; bytes = max(in_len, out_len); diff -purN -X /home/mbligh/.diff.exclude reference/drivers/char/keyboard.c current/drivers/char/keyboard.c --- reference/drivers/char/keyboard.c 2004-02-18 14:56:54.000000000 -0800 +++ current/drivers/char/keyboard.c 2004-04-08 15:10:20.000000000 -0700 @@ -1066,6 +1066,9 @@ void kbd_keycode(unsigned int keycode, i } if (sysrq_down && down && !rep) { handle_sysrq(kbd_sysrq_xlate[keycode], regs, tty); +#ifdef CONFIG_KGDB_SYSRQ + sysrq_down = 0; /* in case we miss the "up" event */ +#endif return; } #endif diff -purN -X /home/mbligh/.diff.exclude reference/drivers/char/sysrq.c current/drivers/char/sysrq.c --- reference/drivers/char/sysrq.c 2004-02-04 16:24:01.000000000 -0800 +++ current/drivers/char/sysrq.c 2004-04-08 15:10:20.000000000 -0700 @@ -35,6 +35,25 @@ #include #include +#ifdef CONFIG_KGDB_SYSRQ + +#define GDB_OP &kgdb_op +static void kgdb_sysrq(int key, struct pt_regs *pt_regs, struct tty_struct *tty) +{ + printk("kgdb sysrq\n"); + breakpoint(); +} + +static struct sysrq_key_op kgdb_op = { + .handler = kgdb_sysrq, + .help_msg = "kGdb|Fgdb", + .action_msg = "Debug breakpoint\n", +}; + +#else +#define GDB_OP NULL +#endif + extern void reset_vc(unsigned int); @@ -238,8 +257,8 @@ static struct sysrq_key_op *sysrq_key_ta /* c */ NULL, /* d */ NULL, /* e */ &sysrq_term_op, -/* f */ NULL, -/* g */ NULL, +/* f */ GDB_OP, +/* g */ GDB_OP, /* h */ NULL, /* i */ &sysrq_kill_op, /* j */ NULL, diff -purN -X /home/mbligh/.diff.exclude reference/drivers/char/tty_io.c current/drivers/char/tty_io.c --- reference/drivers/char/tty_io.c 2004-04-07 14:54:06.000000000 -0700 +++ current/drivers/char/tty_io.c 2004-04-09 13:27:12.000000000 -0700 @@ -1901,6 +1901,21 @@ int tty_ioctl(struct inode * inode, stru case TIOCMBIC: case TIOCMBIS: return tty_tiocmset(tty, file, cmd, arg); + /* + * Without the real device to which /dev/console is connected, + * blogd can not work. + * blogd spawns a pty/tty pair, + * set /dev/console to the tty of that pair (ioctl TIOCCONS), + * then reads in all input from the current /dev/console, + * buffer or write the readed data to /var/log/boot.msg + * _and_ to the original real device. + */ + case TIOCGDEV: + { + unsigned int ret = old_encode_dev(tty_devnum(real_tty)); + return put_user(ret, (unsigned int*) arg); + } + } if (tty->driver->ioctl) { int retval = (tty->driver->ioctl)(tty, file, cmd, arg); diff -purN -X /home/mbligh/.diff.exclude reference/drivers/media/Kconfig current/drivers/media/Kconfig --- reference/drivers/media/Kconfig 2004-02-04 16:24:06.000000000 -0800 +++ current/drivers/media/Kconfig 2004-04-09 21:41:40.000000000 -0700 @@ -34,14 +34,15 @@ source "drivers/media/common/Kconfig" config VIDEO_TUNER tristate - default y if VIDEO_BT848=y || VIDEO_SAA7134=y || VIDEO_MXB=y || VIDEO_CX88=y - default m if VIDEO_BT848=m || VIDEO_SAA7134=m || VIDEO_MXB=m || VIDEO_CX88=m + default y if VIDEO_BT848=y || VIDEO_SAA7134=y || VIDEO_MXB=y || VIDEO_CX88=y || VIDEO_IVTV=y + default m if VIDEO_BT848=m || VIDEO_SAA7134=m || VIDEO_MXB=m || VIDEO_CX88=m || VIDEO_IVTV=m + depends on VIDEO_DEV config VIDEO_BUF tristate - default y if VIDEO_BT848=y || VIDEO_SAA7134=y || VIDEO_SAA7146=y || VIDEO_CX88=y - default m if VIDEO_BT848=m || VIDEO_SAA7134=m || VIDEO_SAA7146=m || VIDEO_CX88=m + default y if VIDEO_BT848=y || VIDEO_SAA7134=y || VIDEO_SAA7146=y || VIDEO_CX88=y || VIDEO_IVTV=y + default m if VIDEO_BT848=m || VIDEO_SAA7134=m || VIDEO_SAA7146=m || VIDEO_CX88=m || VIDEO_IVTV=m depends on VIDEO_DEV config VIDEO_BTCX diff -purN -X /home/mbligh/.diff.exclude reference/drivers/media/video/Kconfig current/drivers/media/video/Kconfig --- reference/drivers/media/video/Kconfig 2004-03-11 14:34:34.000000000 -0800 +++ current/drivers/media/video/Kconfig 2004-04-09 21:41:40.000000000 -0700 @@ -22,6 +22,25 @@ config VIDEO_BT848 To compile this driver as a module, choose M here: the module will be called bttv. +config VIDEO_IVTV + tristate "IVTV Video For Linux" + depends on VIDEO_DEV && PCI && I2C_ALGOBIT && SOUND + ---help--- + Support for Hauppauge WinTv PVR 250 and 350 boards. + + If you say Y or M here, you need to say Y or M to "I2C support" and + "I2C bit-banging interfaces" in the character device section. + + Saying M here will compile this driver as a module (ivtv). + +config VIDEO_IVTV_FB + tristate "IVTV Video For Linux Framebuffer" + depends on VIDEO_IVTV && FB && PCI + ---help--- + Support for Hauppauge WinTv PVR 350 boards TV Out via framebuffer. + + Saying M here will compile this driver as a module (ivtv-fb). + config VIDEO_PMS tristate "Mediavision Pro Movie Studio Video For Linux" depends on VIDEO_DEV && ISA diff -purN -X /home/mbligh/.diff.exclude reference/drivers/media/video/Makefile current/drivers/media/video/Makefile --- reference/drivers/media/video/Makefile 2004-03-11 14:34:35.000000000 -0800 +++ current/drivers/media/video/Makefile 2004-04-09 21:41:40.000000000 -0700 @@ -8,12 +8,16 @@ zoran-objs := zr36120.o zr36120_i2c zr36067-objs := zoran_procfs.o zoran_device.o \ zoran_driver.o zoran_card.o +ivtv-objs := ivtv-driver.o ivtv-i2c.o ivtv-api.o + obj-$(CONFIG_VIDEO_DEV) += videodev.o v4l2-common.o v4l1-compat.o obj-$(CONFIG_VIDEO_BT848) += bttv.o msp3400.o tvaudio.o \ tda7432.o tda9875.o ir-kbd-i2c.o ir-kbd-gpio.o obj-$(CONFIG_SOUND_TVMIXER) += tvmixer.o +obj-$(CONFIG_VIDEO_IVTV) += msp3400.o saa7115.o tveeprom.o ivtv.o saa7127.o +obj-$(CONFIG_VIDEO_IVTV_FB) += ivtv-fb.o obj-$(CONFIG_VIDEO_ZR36120) += zoran.o obj-$(CONFIG_VIDEO_SAA5246A) += saa5246a.o obj-$(CONFIG_VIDEO_SAA5249) += saa5249.o diff -purN -X /home/mbligh/.diff.exclude reference/drivers/media/video/ivtv-api.c current/drivers/media/video/ivtv-api.c --- reference/drivers/media/video/ivtv-api.c 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/media/video/ivtv-api.c 2004-04-09 21:41:40.000000000 -0700 @@ -0,0 +1,3151 @@ +/* License: GPL + * Author: Kevin Thayer + * + * This file will hold API related functions, both internal (firmware api) + * and external (v4l2, etc) + * + */ + +#include "ivtv.h" + +/* Fix the v4l2 api breakage - need to define if still using the old api */ +#ifndef VIDIOC_OVERLAY_OLD +#define VIDIOC_OVERLAY_OLD _IOWR ('V', 14, int) +#define VIDIOC_S_PARM_OLD _IOW ('V', 22, struct v4l2_streamparm) +#define VIDIOC_S_CTRL_OLD _IOW ('V', 28, struct v4l2_control) +#define VIDIOC_G_AUDIO_OLD _IOWR ('V', 33, struct v4l2_audio) +#define VIDIOC_G_AUDOUT_OLD _IOWR ('V', 49, struct v4l2_audioout) +#endif + +/* FIXME need to find a good value */ +#define V4L2_PIX_FMT_CMP_MPG2 77777 +#define IVTV_V4L2_MAX_MINOR 15 + +static int ivtv_v4l2_init(struct video_device *v); +static int ivtv_v4l2_close(struct inode *inode, struct file *filp); +static int ivtv_v4l2_open(struct inode *inode, struct file *filp); +static int ivtv_v4l2_read(struct file *filp, char *buf, size_t count, + loff_t * pos); +static ssize_t ivtv_v4l2_write(struct file *filp, const char *buf, size_t count, + loff_t * pos); +static int ivtv_v4l2_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +static int ivtv_v4l2_pre_init(struct ivtv *itv); + +struct file_operations ivtv_v4l2_fops = { + owner:THIS_MODULE, + read:ivtv_v4l2_read, + write:ivtv_v4l2_write, + open:ivtv_v4l2_open, + ioctl:ivtv_v4l2_ioctl, + release:ivtv_v4l2_close, + poll:ivtv_poll, +}; + +/* FIXME Static variables for the various card types go here */ + +static struct video_device tmk_v4l2dev = { /*values that work with the author's card */ + .owner = THIS_MODULE, + .name = "Vanilla iTVC15 card", + .type = VID_TYPE_CAPTURE | VID_TYPE_TUNER | VID_TYPE_OVERLAY | + VID_TYPE_CLIPPING | VID_TYPE_SCALES, + .fops = &ivtv_v4l2_fops, + .minor = -1, +}; + +/* some tuner table values can change, so allocate this dynamically when you use it*/ +struct v4l2_tuner tmk_tuners[2] = { + { + .index = 0, + .name = "ivtv TV Tuner", + .type = V4L2_TUNER_ANALOG_TV, + .capability = (V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO), + .rxsubchans = (V4L2_TUNER_SUB_STEREO), + .audmode = V4L2_TUNER_MODE_STEREO, + .signal = 0, + .afc = 0, + .reserved = {0, 0, 0, 0} + }, { + .index = 1, + .name = "ivtv Radio", + .type = V4L2_TUNER_RADIO, + .capability = (V4L2_TUNER_CAP_STEREO), + .rxsubchans = 0, + .audmode = V4L2_TUNER_MODE_STEREO, + .signal = 0, + .afc = 0, + .reserved = {0, 0, 0, 0} + } +}; + +struct v4l2_standard tmk_standards[3] = { + { + .index = 0, + .id = V4L2_STD_NTSC, + .name = "NTSC", + .frameperiod = {.numerator = 1001, + .denominator = 30000}, + .framelines = 525, + .reserved = {0, 0, 0, 0} + }, { + .index = 1, + .id = V4L2_STD_PAL, + .name = "PAL", + .frameperiod = {.numerator = 1, + .denominator = 25}, + .framelines = 625, + .reserved = {0, 0, 0, 0} + }, { + .index = 2, + .id = V4L2_STD_SECAM, + .name = "SECAM", + .frameperiod = {.numerator = 1, + .denominator = 25}, + .framelines = 625, + .reserved = {0, 0, 0, 0} + } +}; + +struct v4l2_input tmk_inputs[10] = { /*values that work with the author's card */ + { + .index = 0, + .name = "Composite 0", + .type = V4L2_INPUT_TYPE_CAMERA, + .audioset = 1, + .tuner = 0, + .status = 0, + }, { + .index = 1, + .name = "Composite 1", + .type = V4L2_INPUT_TYPE_CAMERA, + .audioset = 1, + .tuner = 0, + .status = 0, + }, { + .index = 2, + .name = "Composite 2", + .type = V4L2_INPUT_TYPE_CAMERA, + .audioset = 1, + .tuner = 0, + .status = 0, + }, { + .index = 3, + .name = "Composite 3", + .type = V4L2_INPUT_TYPE_CAMERA, + .audioset = 1, + .tuner = 0, + .status = 0, + }, { + .index = 4, + .name = "Tuner 0", + .type = V4L2_INPUT_TYPE_TUNER, + .audioset = 0, + .tuner = 0, + .status = 0, + }, { + .index = 5, + .name = "Composite 4", + .type = V4L2_INPUT_TYPE_CAMERA, + .audioset = 1, + .tuner = 0, + .status = 0, + }, { + .index = 6, + .name = "S-Video 0", + .type = V4L2_INPUT_TYPE_CAMERA, + .audioset = 1, + .tuner = 0, + .status = 0, + }, { + .index = 7, + .name = "S-Video 1", + .type = V4L2_INPUT_TYPE_CAMERA, + .audioset = 1, + .tuner = 0, + .status = 0, + }, { + .index = 8, + .name = "S-Video 2", + .type = V4L2_INPUT_TYPE_CAMERA, + .audioset = 1, + .tuner = 0, + .status = 0, + }, { + .index = 9, + .name = "S-Video 3", + .type = V4L2_INPUT_TYPE_CAMERA, + .audioset = 1, + .tuner = 0, + .status = 0, + } +}; + +//FIXME capability and mode might be wrong +struct v4l2_audio tmk_audio_inputs[2] = { + {0, "Tuner Audio In", 0, 0,}, + {1, "Audio Line In", 0, 0,}, +}; + +int tmk_audio_mapping[] = { + 0, 3, /* Input 0 is msp input 3 */ + 1, 1, /* input 1 is msp input 1 */ + 0, 0 /* you're at end of list! */ +}; + +struct v4l2_queryctrl ivtv_ctrl_menu_freq = { + .id = V4L2_CID_IVTV_FREQ, + .type = V4L2_CTRL_TYPE_MENU, + .name = "Frequency", + .minimum = 0, + .maximum = 2, + .step = 1, + .default_value = 2, + .flags = 0, + .reserved = {0, 0} +}; + +struct v4l2_querymenu ivtv_ctrl_query_freq[] = { + /* ID, Index, Name, Reserved */ + {V4L2_CID_IVTV_FREQ, 0, "32kHz", 0}, + {V4L2_CID_IVTV_FREQ, 1, "44.1kHz", 0}, + {V4L2_CID_IVTV_FREQ, 2, "48kHz", 0} +}; + +u32 ivtv_audio_tbl_freq[] = { + /* setting */ + 0x2 /* 32kHz binary 10 */ , + 0x0 /* 44.1kHz binary 00 */ , + 0x1 /* 48kHz binary 01 */ +}; + +u32 ivtv_audio_mask_freq = 0x3; + +struct v4l2_queryctrl ivtv_ctrl_menu_enc = { + .id = V4L2_CID_IVTV_ENC, + .type = V4L2_CTRL_TYPE_MENU, + .name = "Encoding", + .minimum = 0, + .maximum = 2, + .step = 1, + .default_value = 1, + .flags = 0, + .reserved = {0, 0} +}; + +struct v4l2_querymenu ivtv_ctrl_query_enc[] = { + /* ID, Index, Name, Reserved */ + {V4L2_CID_IVTV_ENC, 0, "Layer 1", 0}, + {V4L2_CID_IVTV_ENC, 1, "Layer 2", 0}, + {V4L2_CID_IVTV_ENC, 2, "Layer 3(?)", 0} +}; + +u32 ivtv_audio_tbl_enc[] = { + /* setting */ + 0x1 << 2 /* Layer 1 binary 0100 */ , + 0x2 << 2 /* Layer 2 binary 1000 */ , + 0x3 << 2 /* Layer 3(?) binary 1100 */ +}; + +u32 ivtv_audio_mask_enc = 0xC; + +struct v4l2_queryctrl ivtv_ctrl_menu_bitrate = { + .id = V4L2_CID_IVTV_BITRATE, + .type = V4L2_CTRL_TYPE_MENU, + .name = "Audio Bitrate", + .minimum = 0, + .maximum = 14, + .step = 1, + .default_value = 14, + .flags = 0, + .reserved = {0, 0} +}; + +struct v4l2_querymenu ivtv_ctrl_query_bitrate[] = { + /* ID, Index, Name, Reserved */ + {V4L2_CID_IVTV_BITRATE, 0, "[L1/L2] Free fmt", 0}, + {V4L2_CID_IVTV_BITRATE, 1, "[L1/L2] 32k/32k", 0}, + {V4L2_CID_IVTV_BITRATE, 2, "[L1/L2] 64k/48k", 0}, + {V4L2_CID_IVTV_BITRATE, 3, "[L1/L2] 96k/56k", 0}, + {V4L2_CID_IVTV_BITRATE, 4, "[L1/L2] 128k/64k", 0}, + {V4L2_CID_IVTV_BITRATE, 5, "[L1/L2] 160k/80k", 0}, + {V4L2_CID_IVTV_BITRATE, 6, "[L1/L2] 192k/96k", 0}, + {V4L2_CID_IVTV_BITRATE, 7, "[L1/L2] 224k/112k", 0}, + {V4L2_CID_IVTV_BITRATE, 8, "[L1/L2] 256k/128k", 0}, + {V4L2_CID_IVTV_BITRATE, 9, "[L1/L2] 288k/160k", 0}, + {V4L2_CID_IVTV_BITRATE, 10, "[L1/L2] 320k/192k", 0}, + {V4L2_CID_IVTV_BITRATE, 11, "[L1/L2] 352k/224k", 0}, + {V4L2_CID_IVTV_BITRATE, 12, "[L1/L2] 384k/256k", 0}, + {V4L2_CID_IVTV_BITRATE, 13, "[L1/L2] 416k/320k", 0}, + {V4L2_CID_IVTV_BITRATE, 14, "[L1/L2] 448k/384k", 0}, +}; + +u32 ivtv_audio_tbl_bitrate[] = { + /* setting */ + 0x0 << 4 /* [L1/L2] Free fmt binary 0000 */ , + 0x1 << 4 /* [L1/L2] 32k/32k, binary 0001 */ , + 0x2 << 4 /* [L1/L2] 64k/48k, binary 0010 */ , + 0x3 << 4 /* [L1/L2] 96k/56k, binary 0011 */ , + 0x4 << 4 /* [L1/L2] 128k/64k, binary 0100 */ , + 0x5 << 4 /* [L1/L2] 160k/80k, binary 0101 */ , + 0x6 << 4 /* [L1/L2] 192k/96k, binary 0110 */ , + 0x7 << 4 /* [L1/L2] 224k/112k, binary 0111 */ , + 0x8 << 4 /* [L1/L2] 256k/128k, binary 1000 */ , + 0x9 << 4 /* [L1/L2] 288k/160k, binary 1001 */ , + 0xA << 4 /* [L1/L2] 320k/192k, binary 1010 */ , + 0xB << 4 /* [L1/L2] 352k/224k, binary 1011 */ , + 0xC << 4 /* [L1/L2] 384k/256k, binary 1100 */ , + 0xD << 4 /* [L1/L2] 416k/320k, binary 1101 */ , + 0xE << 4 /* [L1/L2] 448k/384k, binary 1110 */ +}; + +u32 ivtv_audio_mask_bitrate = 0xF0; + +struct v4l2_queryctrl ivtv_ctrl_menu_mono = { + .id = V4L2_CID_IVTV_MONO, + .type = V4L2_CTRL_TYPE_MENU, + .name = "Mono/Stereo", + .minimum = 0, + .maximum = 3, + .step = 1, + .default_value = 0, + .flags = 0, + .reserved = {0, 0} +}; + +struct v4l2_querymenu ivtv_ctrl_query_mono[] = { + /* ID, Index, Name, Reserved */ + {V4L2_CID_IVTV_MONO, 0, "Stereo", 0}, + {V4L2_CID_IVTV_MONO, 1, "JointStereo", 0}, + {V4L2_CID_IVTV_MONO, 2, "Dual", 0}, + {V4L2_CID_IVTV_MONO, 3, "Mono", 0} +}; + +u32 ivtv_audio_tbl_mono[] = { + /* setting */ + 0x0 << 8 /* Stereo, binary 00 */ , + 0x1 << 8 /* JointStereo, binary 01 */ , + 0x2 << 8 /* Dual, binary 10 */ , + 0x3 << 8 /* Mono, binary 11 */ +}; + +u32 ivtv_audio_mask_mono = 0x300; + +struct v4l2_queryctrl ivtv_ctrl_menu_joint = { + .id = V4L2_CID_IVTV_JOINT, + .type = V4L2_CTRL_TYPE_MENU, + .name = "Joint extension", + .minimum = 0, + .maximum = 3, + .step = 1, + .default_value = 0, + .flags = 0, + .reserved = {0, 0} +}; + +struct v4l2_querymenu ivtv_ctrl_query_joint[] = { + /* ID, Index, Name, Reserved */ + {V4L2_CID_IVTV_JOINT, 0, "Subbands 4-31/bound=4", 0}, + {V4L2_CID_IVTV_JOINT, 1, "Subbands 8-31/bound=8", 0}, + {V4L2_CID_IVTV_JOINT, 2, "Subbands 12-31/bound=12", 0}, + {V4L2_CID_IVTV_JOINT, 3, "Subbands 16-31/bound=16", 0} +}; + +u32 ivtv_audio_tbl_joint[] = { + /* setting */ + 0x0 << 10 /* Subbands 4-31/bound=4, binary 00 */ , + 0x1 << 10 /* Subbands 8-31/bound=8, binary 01 */ , + 0x2 << 10 /* Subbands 12-31/bound=12, binary 10 */ , + 0x3 << 10 /* Subbands 16-31/bound=16, binary 11 */ +}; + +u32 ivtv_audio_mask_joint = 0xc00; + +struct v4l2_queryctrl ivtv_ctrl_menu_emphasis = { + .id = V4L2_CID_IVTV_EMPHASIS, + .type = V4L2_CTRL_TYPE_MENU, + .name = "Emphasis", + .minimum = 0, + .maximum = 2, + .step = 1, + .default_value = 0, + .flags = 0, + .reserved = {0, 0} +}; + +struct v4l2_querymenu ivtv_ctrl_query_emphasis[] = { + /* ID, Index, Name, Reserved */ + {V4L2_CID_IVTV_EMPHASIS, 0, "None", 0}, + {V4L2_CID_IVTV_EMPHASIS, 1, "50/15uS", 0}, + {V4L2_CID_IVTV_EMPHASIS, 2, "CCITT J.17", 0} +}; + +u32 ivtv_audio_tbl_emphasis[] = { + /* setting */ + 0x0 << 12 /* None, binary 00 */ , + 0x1 << 12 /* 50/15uS, binary 01 */ , + 0x3 << 12 /* CCITT J.17, binary 11 */ +}; + +u32 ivtv_audio_mask_emphasis = 0x3000; + +struct v4l2_queryctrl ivtv_ctrl_menu_crc = { + .id = V4L2_CID_IVTV_CRC, + .type = V4L2_CTRL_TYPE_MENU, + .name = "Audio CRC", + .minimum = 0, + .maximum = 1, + .step = 1, + .default_value = 0, + .flags = 0, + .reserved = {0, 0} +}; + +struct v4l2_querymenu ivtv_ctrl_query_crc[] = { + /* ID, Index, Name, Reserved */ + {V4L2_CID_IVTV_CRC, 0, "off", 0}, + {V4L2_CID_IVTV_CRC, 1, "on", 0} +}; + +u32 ivtv_audio_tbl_crc[] = { + /* setting */ + 0x0 << 14 /* off, binary 0 */ , + 0x1 << 14 /* on, binary 1 */ +}; + +u32 ivtv_audio_mask_crc = 0x4000; + +struct v4l2_queryctrl ivtv_ctrl_menu_copyright = { + .id = V4L2_CID_IVTV_COPYRIGHT, + .type = V4L2_CTRL_TYPE_MENU, + .name = "Copyright", + .minimum = 0, + .maximum = 1, + .step = 1, + .default_value = 0, + .flags = 0, + .reserved = {0, 0} +}; + +struct v4l2_querymenu ivtv_ctrl_query_copyright[] = { + /* ID, Index, Name, Reserved */ + {V4L2_CID_IVTV_COPYRIGHT, 0, "off", 0}, + {V4L2_CID_IVTV_COPYRIGHT, 1, "on", 0} +}; + +u32 ivtv_audio_tbl_copyright[] = { + /* setting */ + 0x0 << 15 /* off, binary 0 */ , + 0x1 << 15 /* on, binary 1 */ +}; + +u32 ivtv_audio_mask_copyright = 0x8000; + +struct v4l2_queryctrl ivtv_ctrl_menu_generation = { + .id = V4L2_CID_IVTV_GEN, + .type = V4L2_CTRL_TYPE_MENU, + .name = "Generation", + .minimum = 0, + .maximum = 1, + .step = 1, + .default_value = 0, + .flags = 0, + .reserved = {0, 0} +}; + +struct v4l2_querymenu ivtv_ctrl_query_generation[] = { + /* ID, Index, Name, Reserved */ + {V4L2_CID_IVTV_GEN, 0, "copy", 0}, + {V4L2_CID_IVTV_GEN, 1, "original", 0} +}; + +u32 ivtv_audio_tbl_generation[] = { + /* setting */ + 0x0 << 16 /* copy, binary 0 */ , + 0x1 << 16 /* original, binary 1 */ +}; + +u32 ivtv_audio_mask_generation = 0x10000; + +/* 3 stream types: mpeg, yuv, passthru */ +struct ivtv_v4l2_stream tmk_mpg_stream = { + /*MPEG*/.s_flags = 0, + .id = -1, + .v4l_reg_type = VFL_TYPE_GRABBER, + .format = { + .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, + .fmt = { + .pix = { + .width = 720, + .height = 480, + .field = V4L2_FIELD_INTERLACED, + .sizeimage = (128 * 1024), + } + }, + }, + .controlcount = 0, + .controls = NULL +}; + +struct ivtv_v4l2_stream tmk_yuv_stream = { + /*YUV*/.s_flags = 0, + .id = -1, + .v4l_reg_type = VFL_TYPE_GRABBER, + .format = { + .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, + .fmt = { + .pix = { + .width = 720, + .height = 480, + .field = V4L2_FIELD_INTERLACED, + .sizeimage = (720 * 720), + } + }, + }, + .controlcount = 0, + .controls = NULL +}; + +//FIXME these settings are way wrong +struct ivtv_v4l2_stream tmk_vbi_stream = { + .s_flags = 0, + .id = -1, + .v4l_reg_type = VFL_TYPE_VBI, + .format = { + .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, + .fmt = { + .pix = { + .width = 720, + .height = 480, + .field = V4L2_FIELD_INTERLACED, + .sizeimage = (128 * 1024), + } + }, + }, + .controlcount = 0, + .controls = NULL +}; + +struct ivtv_v4l2_stream dec_mpg_stream = { +/*Decoder MPG*/ + .s_flags = 0, + .id = -1, + .v4l_reg_type = VFL_TYPE_GRABBER, + .format = { + .type = V4L2_BUF_TYPE_VIDEO_OUTPUT, + .fmt = { + .pix = { + .width = 720, + .height = 480, + .field = V4L2_FIELD_INTERLACED, + .sizeimage = (128 * 1024), + } + }, + }, + .controlcount = 0, + .controls = NULL +}; + +struct ivtv_v4l2_stream dec_yuv_stream = { +/*Decoder YUV*/ + .s_flags = 0, + .id = -1, + .v4l_reg_type = VFL_TYPE_GRABBER, + .format = { + .type = V4L2_BUF_TYPE_VIDEO_OUTPUT, + .fmt = { + .pix = { + .width = 720, + .height = 480, + .field = V4L2_FIELD_INTERLACED, + .sizeimage = (720 * 720), + } + }, + }, + .controlcount = 0, + .controls = NULL +}; + +/* Initialize v4l2 variables and register v4l2 device */ +int ivtv_v4l2_setup(struct ivtv *itv) +{ + int x, cont, retval; + + IVTV_DEBUG(IVTV_DEBUG_INFO, "v4l2 setup\n"); + + //switch based on card type + // and fill in appropriate v4l2 device + switch (itv->card_type) { + case IVTV_350_V1: + IVTV_DEBUG(IVTV_DEBUG_INFO, "Configuring 350rev1 card\n"); + itv->v4l2.streamcount = IVTV_350_V1_STREAMS; + /* Disable dec yuv buffers if requested */ + if (itv->options.dec_yuv_buffers == 0) + itv->v4l2.streamcount--; + /* FIXME wrong values */ + itv->v4l2.capabilities = + (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER | V4L2_CAP_AUDIO | + V4L2_CAP_READWRITE | V4L2_CAP_VIDEO_OUTPUT); + break; + case IVTV_250_V2: + IVTV_DEBUG(IVTV_DEBUG_INFO, "Configuring 250rev2 card\n"); + itv->v4l2.streamcount = IVTV_250_V2_STREAMS; + /* FIXME wrong values */ + itv->v4l2.capabilities = + (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER | V4L2_CAP_AUDIO | + V4L2_CAP_READWRITE); + break; + case IVTV_250_V1: + IVTV_DEBUG(IVTV_DEBUG_INFO, "Configuring 250rev1 card\n"); + default: /* shouldn't happen, treat like V1 */ + itv->v4l2.streamcount = IVTV_250_V1_STREAMS; + itv->v4l2.capabilities = + (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER | V4L2_CAP_AUDIO | + V4L2_CAP_READWRITE); + + break; + } + + /* Initial settings */ + itv->v4l2.codec.bitrate_mode = 0; + itv->v4l2.codec.bitrate = 8000000; + itv->v4l2.codec.bitrate_peak = 16000000; + itv->v4l2.codec.stream_type = IVTV_STREAM_PS; + itv->v4l2.codec.bframes = 3; + itv->v4l2.codec.gop_closure = 0; + itv->v4l2.codec.dnr_mode = 0; + itv->v4l2.codec.dnr_type = 0; + itv->v4l2.codec.dnr_spatial = 0; + itv->v4l2.codec.dnr_temporal = 0; + itv->v4l2.codec.aspect = 2; + + itv->dec_options.hide_last_frame = 1; + itv->dec_options.pts_low = 0; + itv->dec_options.pts_hi = 0; + itv->dec_options.gop_offset = 0; + itv->dec_options.mute_frames = 0; + + /* Ctrls */ + itv->dec_options.speed.mute = 1; + itv->dec_options.speed.aud_mute = 0; + itv->dec_options.speed.smooth = 1; + itv->dec_options.speed.fr_mask = 2; + itv->dec_options.speed.fr_field = 1; + itv->dec_options.decbuffers = 1; + itv->dec_options.prebuffer = 1; + + /* Allocate streams */ + itv->v4l2.streams = (struct ivtv_v4l2_stream *) + kmalloc((itv->v4l2.streamcount * + sizeof(struct ivtv_v4l2_stream)), GFP_KERNEL); + if (NULL == itv->v4l2.streams) { + IVTV_DEBUG(IVTV_DEBUG_ERR, "Couldn't allocate v4l2 streams\n"); + retval = -ENOMEM; + goto ivtv_stream_fail; + } + + /* pre-init */ + retval = ivtv_v4l2_pre_init(itv); + if (retval < 0) { + IVTV_DEBUG(IVTV_DEBUG_ERR, "Error in pre-init\n"); + goto ivtv_pre_init_fail; + } + + /* Fill in streams with some defaults */ + memcpy(&itv->v4l2.streams[IVTV_ENC_STREAM_TYPE_MPG], &tmk_mpg_stream, + sizeof(struct ivtv_v4l2_stream)); + + memcpy(&itv->v4l2.streams[IVTV_ENC_STREAM_TYPE_YUV], &tmk_yuv_stream, + sizeof(struct ivtv_v4l2_stream)); + + memcpy(&itv->v4l2.streams[IVTV_ENC_STREAM_TYPE_VBI], &tmk_vbi_stream, + sizeof(struct ivtv_v4l2_stream)); + + /* Set some card-specific per-stream stuff here */ + switch (itv->card_type) { + case IVTV_350_V1: + memcpy(&itv->v4l2.streams[IVTV_DEC_STREAM_TYPE_MPG], + &dec_mpg_stream, sizeof(struct ivtv_v4l2_stream)); + + if (itv->options.dec_yuv_buffers != 0) { + memcpy(&itv->v4l2.streams[IVTV_DEC_STREAM_TYPE_YUV], + &dec_yuv_stream, + sizeof(struct ivtv_v4l2_stream)); + } + break; + case IVTV_250_V2: + break; + case IVTV_250_V1: + default: /* shouldn't happen, treat like V1 */ + break; + } + + for (x = 0; x < itv->v4l2.streamcount; x++) { + init_waitqueue_head(&itv->v4l2.streams[x].waitq); + + itv->v4l2.streams[x].v4l2dev = video_device_alloc(); + if (NULL == itv->v4l2.streams[x].v4l2dev) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "Couldn't allocate v4l2 video_device\n"); + retval = -ENOMEM; + goto ivtv_videodev_fail; + } + + memcpy(itv->v4l2.streams[x].v4l2dev, + &tmk_v4l2dev, sizeof(struct video_device)); + itv->v4l2.streams[x].v4l2dev->priv = itv; + itv->v4l2.streams[x].ubytes = 0; + itv->v4l2.streams[x].free_q.vdev = itv->v4l2.streams[x].v4l2dev; + itv->v4l2.streams[x].full_q.vdev = itv->v4l2.streams[x].v4l2dev; + itv->v4l2.streams[x].dma_q.vdev = itv->v4l2.streams[x].v4l2dev; + INIT_LIST_HEAD(&itv->v4l2.streams[x].free_q.list); + INIT_LIST_HEAD(&itv->v4l2.streams[x].full_q.list); + INIT_LIST_HEAD(&itv->v4l2.streams[x].dma_q.list); + + retval = ivtv_init_queue(itv, &itv->v4l2.streams[x].full_q, 0, + itv->v4l2.streams[x].format.type); + if (retval < 0) { + IVTV_DEBUG(IVTV_DEBUG_ERR, "Error on init_queue 1\n"); + goto ivtv_initq_fail; + } + + retval = ivtv_init_queue(itv, &itv->v4l2.streams[x].dma_q, 0, + itv->v4l2.streams[x].format.type); + if (retval < 0) { + IVTV_DEBUG(IVTV_DEBUG_ERR, "Error on init_queue 2\n"); + goto ivtv_initq_fail; + } + + itv->v4l2.streams[x].v4l2dev->dev = &itv->dev->dev; + itv->v4l2.streams[x].v4l2dev->release = video_device_release; + } + + /* Some streams have specific values */ + x = ivtv_init_queue(itv, + &itv->v4l2.streams[IVTV_ENC_STREAM_TYPE_MPG].free_q, + itv->options.mpg_buffers, + itv->v4l2.streams[IVTV_ENC_STREAM_TYPE_MPG].format. + type); + x = ivtv_init_queue(itv, + &itv->v4l2.streams[IVTV_ENC_STREAM_TYPE_YUV].free_q, + itv->options.yuv_buffers, + itv->v4l2.streams[IVTV_ENC_STREAM_TYPE_YUV].format. + type); + x = ivtv_init_queue(itv, + &itv->v4l2.streams[IVTV_ENC_STREAM_TYPE_VBI].free_q, + itv->options.vbi_buffers, + itv->v4l2.streams[IVTV_ENC_STREAM_TYPE_VBI].format. + type); + + /* set default minors */ + itv->v4l2.streams[IVTV_ENC_STREAM_TYPE_MPG].v4l2dev->minor = itv->num; + itv->v4l2.streams[IVTV_ENC_STREAM_TYPE_YUV].v4l2dev->minor = + itv->num + IVTV_V4L2_YUV_OFFSET; + //vbi will get offset by v4l, so no offset needed by us + itv->v4l2.streams[IVTV_ENC_STREAM_TYPE_VBI].v4l2dev->minor = itv->num; + + /* Set any card-specific per-stream stuff here */ + switch (itv->card_type) { + case IVTV_350_V1: + /* allocate buffers for decoder */ + x = ivtv_init_queue(itv, + &itv->v4l2.streams[IVTV_DEC_STREAM_TYPE_MPG] + .free_q, itv->options.dec_mpg_buffers, + itv->v4l2.streams[IVTV_DEC_STREAM_TYPE_MPG]. + format.type); + + itv->v4l2.streams[IVTV_DEC_STREAM_TYPE_MPG].v4l2dev->minor = + itv->num + IVTV_V4L2_DEC_OFFSET; + + if (itv->options.dec_yuv_buffers != 0) { + x = ivtv_init_queue(itv, + &itv->v4l2. + streams[IVTV_DEC_STREAM_TYPE_YUV] + .free_q, + itv->options.dec_yuv_buffers, + itv->v4l2. + streams[IVTV_DEC_STREAM_TYPE_YUV]. + format.type); + + itv->v4l2.streams[IVTV_DEC_STREAM_TYPE_YUV].v4l2dev-> + minor = + itv->num + IVTV_V4L2_YUV_OFFSET + + IVTV_V4L2_DEC_OFFSET; + } + + /* Set poll for decoder parts */ + itv->v4l2.streams[IVTV_DEC_STREAM_TYPE_MPG].v4l2dev->fops-> + poll = ivtv_dec_poll; + if (itv->options.dec_yuv_buffers) + itv->v4l2.streams[IVTV_DEC_STREAM_TYPE_YUV].v4l2dev-> + fops->poll = ivtv_dec_poll; + + break; + case IVTV_250_V2: + break; + case IVTV_250_V1: + default: /* shouldn't happen, treat like V1 */ + break; + } + + /* allocate minor, register, loop until works or out of range */ + for (x = 0; x < itv->v4l2.streamcount; x++) { + cont = 0; + do { + if (video_register_device(itv->v4l2.streams[x].v4l2dev, + itv->v4l2.streams[x]. + v4l_reg_type, + itv->v4l2.streams[x].v4l2dev-> + minor)) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "Device or minor %d not accepted\n", + itv->v4l2.streams[x].v4l2dev->minor); + itv->v4l2.streams[x].v4l2dev->minor++; + } else { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "Registered v4l2 device, minor %d\n", + itv->v4l2.streams[x].v4l2dev->minor); + cont = 1; + ivtv_v4l2_init(itv->v4l2.streams[x].v4l2dev); + } + } while ((0 == cont) && + (itv->v4l2.streams[x].v4l2dev->minor <= + IVTV_V4L2_MAX_MINOR)); + if (0 == cont) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "Couldn't register v4l2 device!\n"); + /* invalidate so we don't try to unload the device */ + itv->v4l2.streams[x].v4l2dev->minor = -1; + return -ENODEV; + } + } + + return 0; + + ivtv_pre_init_fail: + /* needs lots of queue cleanup here -axboe */ + ivtv_videodev_fail: + for (x = 0; x < itv->v4l2.streamcount; x++) + if (itv->v4l2.streams[x].v4l2dev != NULL) + video_device_release(itv->v4l2.streams[x].v4l2dev); + /* needs lots of queue cleanup here -axboe */ + ivtv_initq_fail: + kfree(itv->v4l2.streams); + ivtv_stream_fail: + return retval; +} + +/* After setting the audio.active param, call this to + * get the right input.. think of it as a resolver */ +int ivtv_set_audio(struct ivtv *itv, int *map) +{ + int input, msp_input; + struct msp_matrix mspm; + + do { + input = *(map++); + msp_input = *(map++); + if (input == itv->v4l2.audio.active) { + IVTV_DEBUG(IVTV_DEBUG_INFO, + "Setting audio to input %d\n", msp_input); + mspm.input = msp_input; + mspm.output = itv->v4l2.audio_output; + + ivtv_call_i2c_client(itv, + IVTV_MSP3400_I2C_ADDR, + MSP_SET_MATRIX, &mspm); + return 0; + } + } while ((msp_input != 0) || (input != 0)); + + IVTV_DEBUG(IVTV_DEBUG_ERR, "Invalid audio input, shouldn't happen!\n"); + + return -EINVAL; +} + +u32 ivtv_pause_encoder(struct ivtv * itv, int cmd) +{ + u32 data[16], result = 0; + int x; + + data[0] = 0; /* 0 = pause, 1 = unpause */ + if (cmd) + data[0] = 1; + + x = ivtv_api(itv->enc_mbox, &itv->enc_msem, IVTV_API_PAUSE_ENCODER, + &result, 1, &data[0]); + return result; +} + +/* Called if v4l2 registration is successful. Set video mode here, at least + * that is required on PAL cards */ +int ivtv_v4l2_init(struct video_device *v) +{ + struct ivtv *ivtv = v->priv; + u32 data[IVTV_MBOX_MAX_DATA], result; + int x; + + /* + * only set it on minor 0 + */ + if (v->minor != 0) + return 0; + + memset(data, 0, sizeof(data)); + /* set display standard */ + if (ivtv_pal) + data[0] = 1; + else + data[0] = 0; + + x = ivtv_api(ivtv->dec_mbox, &ivtv->dec_msem, + IVTV_API_DEC_DISP_STANDARD, &result, 1, &data[0]); + + return 0; +} + +/* Called before v4l2 registration */ +int ivtv_v4l2_pre_init(struct ivtv *itv) +{ + int x, temp, retval = -1; + + IVTV_DEBUG(IVTV_DEBUG_INFO, "v4l2 init\n"); + //Allocate based on card type + // allocate capabilities and such based on device type + + /* FIXME too much hardcoding? */ + //inputs + itv->v4l2.input.active = 4; + itv->v4l2.input.count = 10; + itv->v4l2.input.table.input = tmk_inputs; + + itv->v4l2.audio_output = 1; + + //audio inputs + itv->v4l2.audio.active = 0; + itv->v4l2.audio.count = 2; + itv->v4l2.audio.table.audio = tmk_audio_inputs; + + //outputs .. none yet (no real 350 support anyways) + itv->v4l2.output.active = 0; + itv->v4l2.output.count = 0; + itv->v4l2.output.table.output = NULL; + + //standards (NTSC, PAL, SECAM) + if (ivtv_pal) + itv->v4l2.standard.active = 1; + else + itv->v4l2.standard.active = 0; + itv->v4l2.standard.count = 3; + itv->v4l2.standard.table.std = tmk_standards; + + if (itv->v4l2.standard.active == 0) { + itv->v4l2.codec.framespergop = 15; // NTSC + itv->v4l2.codec.framerate = 0; // NTSC 30fps + } else { + itv->v4l2.codec.framespergop = 12; // PAL + itv->v4l2.codec.framerate = 1; // PAL 25fps + + /* set pal height in stream defaults */ + tmk_mpg_stream.format.fmt.pix.height = 576; + tmk_yuv_stream.format.fmt.pix.height = 576; + tmk_vbi_stream.format.fmt.pix.height = 576; + dec_mpg_stream.format.fmt.pix.height = 576; + dec_yuv_stream.format.fmt.pix.height = 576; + } + + //tuner + itv->v4l2.tuner.active = 0; + if (itv->card_type == IVTV_350_V1) { + itv->v4l2.tuner.count = 2; + } else { + itv->v4l2.tuner.count = 1; + } + + itv->v4l2.tuner.table.tuner = (struct v4l2_tuner *) + kmalloc((itv->v4l2.tuner.count * + sizeof(struct v4l2_tuner)), GFP_KERNEL); + + if (itv->v4l2.tuner.table.tuner == NULL) { + IVTV_DEBUG(IVTV_DEBUG_ERR, "Couldn't allocate v4l2 tuner\n"); + return -ENOMEM; + } + + memcpy(itv->v4l2.tuner.table.tuner, &tmk_tuners[0], + (itv->v4l2.tuner.count * sizeof(struct v4l2_tuner))); + + /* Setup audio */ + /* V4L2_CID_IVTV_FREQ */ + itv->v4l2.audio_meta[0].ctrl = &ivtv_ctrl_menu_freq; + itv->v4l2.audio_meta[0].menu = ivtv_ctrl_query_freq; + itv->v4l2.audio_meta[0].mask = ivtv_audio_mask_freq; + itv->v4l2.audio_meta[0].setting = ivtv_ctrl_menu_freq.default_value; + itv->v4l2.audio_meta[0].table = &ivtv_audio_tbl_freq[0]; + + /* V4L2_CID_IVTV_ENC */ + itv->v4l2.audio_meta[1].ctrl = &ivtv_ctrl_menu_enc; + itv->v4l2.audio_meta[1].menu = ivtv_ctrl_query_enc; + itv->v4l2.audio_meta[1].mask = ivtv_audio_mask_enc; + itv->v4l2.audio_meta[1].setting = ivtv_ctrl_menu_enc.default_value; + itv->v4l2.audio_meta[1].table = &ivtv_audio_tbl_enc[0]; + + /* V4L2_CID_IVTV_BITRATE */ + itv->v4l2.audio_meta[2].ctrl = &ivtv_ctrl_menu_bitrate; + itv->v4l2.audio_meta[2].menu = ivtv_ctrl_query_bitrate; + itv->v4l2.audio_meta[2].mask = ivtv_audio_mask_bitrate; + itv->v4l2.audio_meta[2].setting = ivtv_ctrl_menu_bitrate.default_value; + itv->v4l2.audio_meta[2].table = &ivtv_audio_tbl_bitrate[0]; + + /* V4L2_CID_IVTV_MONO */ + itv->v4l2.audio_meta[3].ctrl = &ivtv_ctrl_menu_mono; + itv->v4l2.audio_meta[3].menu = ivtv_ctrl_query_mono; + itv->v4l2.audio_meta[3].mask = ivtv_audio_mask_mono; + itv->v4l2.audio_meta[3].setting = ivtv_ctrl_menu_mono.default_value; + itv->v4l2.audio_meta[3].table = &ivtv_audio_tbl_mono[0]; + + /* V4L2_CID_IVTV_JOINT */ + itv->v4l2.audio_meta[4].ctrl = &ivtv_ctrl_menu_joint; + itv->v4l2.audio_meta[4].menu = ivtv_ctrl_query_joint; + itv->v4l2.audio_meta[4].mask = ivtv_audio_mask_joint; + itv->v4l2.audio_meta[4].setting = ivtv_ctrl_menu_joint.default_value; + itv->v4l2.audio_meta[4].table = &ivtv_audio_tbl_joint[0]; + + /* V4L2_CID_IVTV_EMPHASIS */ + itv->v4l2.audio_meta[5].ctrl = &ivtv_ctrl_menu_emphasis; + itv->v4l2.audio_meta[5].menu = ivtv_ctrl_query_emphasis; + itv->v4l2.audio_meta[5].mask = ivtv_audio_mask_emphasis; + itv->v4l2.audio_meta[5].setting = ivtv_ctrl_menu_emphasis.default_value; + itv->v4l2.audio_meta[5].table = &ivtv_audio_tbl_emphasis[0]; + + /* V4L2_CID_IVTV_CRC */ + itv->v4l2.audio_meta[6].ctrl = &ivtv_ctrl_menu_crc; + itv->v4l2.audio_meta[6].menu = ivtv_ctrl_query_crc; + itv->v4l2.audio_meta[6].mask = ivtv_audio_mask_crc; + itv->v4l2.audio_meta[6].setting = ivtv_ctrl_menu_crc.default_value; + itv->v4l2.audio_meta[6].table = &ivtv_audio_tbl_crc[0]; + + /* V4L2_CID_IVTV_COPYRIGHT */ + itv->v4l2.audio_meta[7].ctrl = &ivtv_ctrl_menu_copyright; + itv->v4l2.audio_meta[7].menu = ivtv_ctrl_query_copyright; + itv->v4l2.audio_meta[7].mask = ivtv_audio_mask_copyright; + itv->v4l2.audio_meta[7].setting = + ivtv_ctrl_menu_copyright.default_value; + itv->v4l2.audio_meta[7].table = &ivtv_audio_tbl_copyright[0]; + + /* V4L2_CID_IVTV_GEN */ + itv->v4l2.audio_meta[8].ctrl = &ivtv_ctrl_menu_generation; + itv->v4l2.audio_meta[8].menu = ivtv_ctrl_query_generation; + itv->v4l2.audio_meta[8].mask = ivtv_audio_mask_generation; + itv->v4l2.audio_meta[8].setting = + ivtv_ctrl_menu_generation.default_value; + itv->v4l2.audio_meta[8].table = &ivtv_audio_tbl_generation[0]; + + itv->v4l2.codec.audio_bitmap = 0; + for (x = 0; x < IVTV_V4L2_AUDIO_MENUCOUNT; x++) { + temp = itv->v4l2.audio_meta[x].setting; + itv->v4l2.codec.audio_bitmap |= + itv->v4l2.audio_meta[x].table[temp]; + } + + retval = ivtv_set_audio(itv, tmk_audio_mapping); + if (retval) { + kfree(itv->v4l2.tuner.table.tuner); + return retval; + } + //FIXME Setup components here? tuner channel etc + return 0; +} + +int ivtv_start_v4l2_stream(struct ivtv_open_id *id) +{ + struct ivtv *itv = id->itv; + u32 data[IVTV_MBOX_MAX_DATA], result; + int x, vsize, vsync, hsize; + int type, subtype; + unsigned int dig; + + /* sem_lock must be held */ + IVTV_ASSERT(ivtv_sem_count(&itv->sem_lock) <= 0); + + IVTV_DEBUG(IVTV_DEBUG_INFO, "ivtv start v4l2 stream\n"); + + /* NTSC/PAL switching */ + vsize = itv->v4l2.streams[0].format.fmt.pix.height; + vsync = (int)itv->v4l2.streams[0].format.fmt.pix.height / 2; + hsize = itv->v4l2.streams[0].format.fmt.pix.width; + + type = id->type; + + switch (type) { + case 2: /* VBI, may be the wrong value */ + subtype = 4; + case 4: /* Radio, probably not applicable */ + subtype = 2; + break; + default: + subtype = 3; + break; + } + + /* clear queues */ + ivtv_move_queue(itv, &itv->v4l2.streams[id->type].full_q, + &itv->v4l2.streams[id->type].free_q); + ivtv_move_queue(itv, &itv->v4l2.streams[id->type].dma_q, + &itv->v4l2.streams[id->type].free_q); + + IVTV_DEBUG(IVTV_DEBUG_INFO, "fullq size %d\n", + itv->v4l2.streams[id->type].full_q.elements); + IVTV_DEBUG(IVTV_DEBUG_INFO, "freeq size %d\n", + itv->v4l2.streams[id->type].free_q.elements); + IVTV_DEBUG(IVTV_DEBUG_INFO, "dmaq size %d\n", + itv->v4l2.streams[id->type].dma_q.elements); + + /*assign dma block len */ + /* FIXME this needs a flag */ + data[0] = 1; /* num bytes in block */ + data[1] = 1; /* use info from sg instead */ + x = ivtv_api(itv->enc_mbox, &itv->enc_msem, + IVTV_API_ASSIGN_DMA_BLOCKLEN, &result, 2, &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, "init error 1. Code %d\n", x); + + /*assign program index info */ + /* FIXME need more info on this call */ + data[0] = 0; /*Mask 0:Disable */ + data[1] = 0; /*Num_req 0:??/ */ + x = ivtv_api(itv->enc_mbox, &itv->enc_msem, + IVTV_API_ASSIGN_PGM_INDEX_INFO, &result, 2, &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, "init error 2. Code %d\n", x); + + /*assign stream type */ + data[0] = itv->v4l2.codec.stream_type; + x = ivtv_api(itv->enc_mbox, &itv->enc_msem, IVTV_API_ASSIGN_STREAM_TYPE, + &result, 1, &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, "init error 3. Code %d\n", x); + + /*assign output port */ + data[0] = 0; /*0:Memory */ + x = ivtv_api(itv->enc_mbox, &itv->enc_msem, IVTV_API_ASSIGN_OUTPUT_PORT, + &result, 1, &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, "init error 4. Code %d\n", x); + + /*assign framerate */ + data[0] = itv->v4l2.codec.framerate; + x = ivtv_api(itv->enc_mbox, &itv->enc_msem, IVTV_API_ASSIGN_FRAMERATE, + &result, 1, &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, "init error 5. Code %d\n", x); + + /*assign frame size */ + data[0] = vsize; /* height */ + data[1] = hsize; /* width */ + x = ivtv_api(itv->enc_mbox, &itv->enc_msem, IVTV_API_ASSIGN_FRAME_SIZE, + &result, 2, &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, "init error 6. Code %d\n", x); + + /*assign aspect ratio */ + data[0] = itv->v4l2.codec.aspect; /*mpeg spec sez 2 */ + x = ivtv_api(itv->enc_mbox, &itv->enc_msem, + IVTV_API_ASSIGN_ASPECT_RATIO, &result, 1, &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, "init error 7. Code %d\n", x); + + /*assign bitrates */ + /*FIXME i think these settings are valid for compressed only */ + data[0] = itv->v4l2.codec.bitrate_mode; /*mode */ + data[1] = itv->v4l2.codec.bitrate; /* bps */ + data[2] = itv->v4l2.codec.bitrate_peak / 400; /* peak/400 */ + data[3] = 0; /*??? */ + data[4] = 0x70; /*??? */ + x = ivtv_api(itv->enc_mbox, &itv->enc_msem, IVTV_API_ASSIGN_BITRATES, + &result, 5, &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, "init error 8. Code %d\n", x); + + /*assign gop properties */ + data[0] = itv->v4l2.codec.framespergop; + data[1] = itv->v4l2.codec.bframes; + x = ivtv_api(itv->enc_mbox, &itv->enc_msem, + IVTV_API_ASSIGN_GOP_PROPERTIES, &result, 2, &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, "init error 9. Code %d\n", x); + + /*assign 3 2 pulldown */ + data[0] = itv->v4l2.codec.pulldown; + x = ivtv_api(itv->enc_mbox, &itv->enc_msem, + IVTV_API_ASSIGN_3_2_PULLDOWN, &result, 1, &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, "init error 10. Code %d\n", x); + + /*assign gop closure */ + data[0] = itv->v4l2.codec.gop_closure; + x = ivtv_api(itv->enc_mbox, &itv->enc_msem, IVTV_API_ASSIGN_GOP_CLOSURE, + &result, 1, &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, "init error 11. Code %d\n", x); + + /*assign audio properties */ + data[0] = itv->v4l2.codec.audio_bitmap; + x = ivtv_api(itv->enc_mbox, &itv->enc_msem, + IVTV_API_ASSIGN_AUDIO_PROPERTIES, &result, 1, &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, "init error 12. Code %d\n", x); + + /*assign dnr filter mode */ + data[0] = itv->v4l2.codec.dnr_mode; + data[1] = itv->v4l2.codec.dnr_type; + x = ivtv_api(itv->enc_mbox, &itv->enc_msem, + IVTV_API_ASSIGN_DNR_FILTER_MODE, &result, 2, &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, "init error 13. Code %d\n", x); + + /*assign dnr filter props */ + data[0] = itv->v4l2.codec.dnr_spatial; + data[1] = itv->v4l2.codec.dnr_temporal; + x = ivtv_api(itv->enc_mbox, &itv->enc_msem, + IVTV_API_ASSIGN_DNR_FILTER_PROPS, &result, 2, &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, "init error 14. Code %d\n", x); + + /*assign coring levels */ + data[0] = 0; /*luma_h */ + data[1] = 255; /*luma_l */ + data[2] = 0; /*chroma_h */ + data[3] = 255; /*chroma_l */ + x = ivtv_api(itv->enc_mbox, &itv->enc_msem, + IVTV_API_ASSIGN_CORING_LEVELS, &result, 4, &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, "init error 15. Code %d\n", x); + + /*assign spatial filter type */ + data[0] = 1; /*luma_t: 1 = horiz_only */ + data[1] = 1; /*chroma_t: 1 = horiz_only */ + x = ivtv_api(itv->enc_mbox, &itv->enc_msem, + IVTV_API_ASSIGN_SPATIAL_FILTER_TYPE, &result, 2, &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, "init error 16. Code %d\n", x); + + /*assign frame drop rate */ + data[0] = 0; + x = ivtv_api(itv->enc_mbox, &itv->enc_msem, + IVTV_API_ASSIGN_FRAME_DROP_RATE, &result, 1, &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, "init error 17. Code %d\n", x); + + /*assign placeholder */ + data[0] = 0; /* type: 0 = Extension/UserData */ + data[1] = 0; /*period */ + data[2] = 0; /*size_t */ + data[3] = 0; /*arg0 */ + data[4] = 0; /*arg1 */ + data[5] = 0; /*arg2 */ + data[6] = 0; /*arg3 */ + data[7] = 0; /*arg4 */ + data[8] = 0; /*arg5 */ + data[9] = 0; /*arg6 */ + data[10] = 0; /*arg7 */ + data[11] = 0; /*arg8 */ + x = ivtv_api(itv->enc_mbox, &itv->enc_msem, IVTV_API_ASSIGN_PLACEHOLDER, + &result, 12, &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, "init error 18. Code %d\n\n", x); + + /* assign num vsync lines */ + data[0] = vsync; /*??? */ + data[1] = vsync; /* ??? */ + x = ivtv_api(itv->enc_mbox, &itv->enc_msem, + IVTV_API_ASSIGN_NUM_VSYNC_LINES, &result, 2, &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, "init error 20. Code %d\n", x); + + if (atomic_read(&itv->capturing) == 0) { + + itv->trans_id = 0; + itv->first_read = 1; + + /* Clear pending interrupts */ + IVTV_DEBUG(IVTV_DEBUG_INFO, "Clearing Interrupts\n"); + writel((readl(itv->reg_mem + IVTV_REG_IRQSTATUS) & 0xC8000000), + (IVTV_REG_IRQSTATUS + itv->reg_mem)); + +#if 0 + /* event notification (on) */ + data[0] = 0; /*type: 0 = refresh */ + data[1] = 1; /*on/off: 1 = on */ + data[2] = 0x10000000; /*intr_bit: 0x10000000 = digitizer */ + data[3] = -1; /*mbox_id: -1: none */ + x = ivtv_api(itv->enc_mbox, &itv->enc_msem, + IVTV_API_EVENT_NOTIFICATION, &result, 4, &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, + "startcap error 2. Code %d\n", x); +#endif + + /* Disable digitizer (saa7115) */ + IVTV_DEBUG(IVTV_DEBUG_INFO, "Disabling digitizer\n"); + dig = 0; + ivtv_call_i2c_client(itv, IVTV_SAA7115_I2C_ADDR, + DECODER_ENABLE_OUTPUT, &dig); + + /*initialize input (no args) */ + x = ivtv_api(itv->enc_mbox, &itv->enc_msem, + IVTV_API_INITIALIZE_INPUT, &result, 0, &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, "init error 19. Code %d\n\n", + x); + + /* enable digitizer (saa7115) */ + IVTV_DEBUG(IVTV_DEBUG_INFO, "Enabling digitizer\n"); + dig = 1; + ivtv_call_i2c_client(itv, IVTV_SAA7115_I2C_ADDR, + DECODER_ENABLE_OUTPUT, &dig); + + IVTV_DEBUG(IVTV_DEBUG_INFO, "Sleeping for 100ms\n"); + ivtv_sleep_timeout(HZ / 10); + } + + /* FIXME this is for mpg captures only i think */ + clear_bit(IVTV_F_I_EOS, &itv->i_flags); + + /* begin_capture */ + data[0] = type; /*type: 0 = mpeg */ + data[1] = subtype; /*subtype: 3 = video+audio */ + x = ivtv_api(itv->enc_mbox, &itv->enc_msem, IVTV_API_BEGIN_CAPTURE, + &result, 2, &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, "startcap error 1. Code %d\n", x); + + if (atomic_read(&itv->capturing) == 0) { + /*Clear the following Interrupt mask bits: 0xd8000000 */ + ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_CAPTURE); + IVTV_DEBUG(IVTV_DEBUG_IRQ, "IRQ Mask is now: 0x%08x\n", + itv->irqmask); + } + + /* you're live! sit back and await interrupts :) */ + atomic_inc(&itv->capturing); + return 0; +} + +int ivtv_api_dec_playback_speed(struct ivtv *itv, int fastspeed, int factor, + int forward, int mpeg_frame_type_mask, + int bframes_per_gop, int mute_audio, + int display_fields) +{ + + u32 data[IVTV_MBOX_MAX_DATA], result; + + data[0] = (fastspeed << 31) | (factor & 0xff); + data[1] = forward; + data[2] = mpeg_frame_type_mask; + data[3] = bframes_per_gop; + data[4] = mute_audio; + data[5] = display_fields; + + ivtv_api(itv->dec_mbox, &itv->dec_msem, IVTV_API_DEC_PLAYBACK_SPEED, + &result, 6, &data[0]); + return result; +} + +int ivtv_start_v4l2_decode(struct ivtv_open_id *id) +{ + struct ivtv *itv = id->itv; + u32 data[IVTV_MBOX_MAX_DATA], result; + struct ivtv_v4l2_stream *stream; + int x; + int type; + int standard = 0; + + IVTV_DEBUG(IVTV_DEBUG_INFO, "Start v4l2_decode \n"); + /* sem_lock must be held */ + IVTV_ASSERT(ivtv_sem_count(&itv->sem_lock) <= 0); + + if ((id->type != IVTV_DEC_STREAM_TYPE_MPG) && + (id->type != IVTV_DEC_STREAM_TYPE_YUV)) { + IVTV_DEBUG(IVTV_DEBUG_ERR, "Write on read-only interface\n"); + return -EINVAL; + } + + stream = &itv->v4l2.streams[id->type]; + if ((stream->id == -1) && !test_bit(IVTV_F_S_CAP, &stream->s_flags)) { + set_bit(IVTV_F_S_CAP, &stream->s_flags); + stream->id = id->open_id; + IVTV_DEBUG(IVTV_DEBUG_INFO, "Granting ownership to id %d\n", + id->open_id); + } else { + if (id->open_id != stream->id) { + IVTV_DEBUG(IVTV_DEBUG_INFO, "Decoder busy\n"); + return -EBUSY; + } + if (test_bit(IVTV_F_S_CAP, &stream->s_flags)) { + IVTV_DEBUG(IVTV_DEBUG_INFO, + "Decoder doesn't need init\n"); + return 0; + } + } + + IVTV_DEBUG(IVTV_DEBUG_INFO, "Starting v4l2_decode \n"); + + type = id->type; + + if (itv->v4l2.standard.active != 0) { /* if not NTSC */ + standard = 1; /* PAL */ + } + +/* this isn't needed until we use buffers for decoding */ + /* clear queues */ + ivtv_move_queue(itv, &itv->v4l2.streams[id->type].full_q, + &itv->v4l2.streams[id->type].free_q); + ivtv_move_queue(itv, &itv->v4l2.streams[id->type].dma_q, + &itv->v4l2.streams[id->type].free_q); + + IVTV_DEBUG(IVTV_DEBUG_INFO, "Decoder fullq size %d\n", + itv->v4l2.streams[id->type].full_q.elements); + IVTV_DEBUG(IVTV_DEBUG_INFO, "Decoder freeq size %d\n", + itv->v4l2.streams[id->type].free_q.elements); + IVTV_DEBUG(IVTV_DEBUG_INFO, "Decoder dmaq size %d\n", + itv->v4l2.streams[id->type].dma_q.elements); + + if (atomic_read(&itv->decoding) == 0) { + /* Clear pending interrupts */ + IVTV_DEBUG(IVTV_DEBUG_INFO, "Clearing Interrupts\n"); + writel((readl(itv->reg_mem + IVTV_REG_IRQSTATUS) & 0xC8000000), + (IVTV_REG_IRQSTATUS + itv->reg_mem)); + } + + if (test_bit(IVTV_F_S_UNINIT, &id->itv->v4l2.streams[id->type].s_flags)) { + IVTV_DEBUG(IVTV_DEBUG_INFO, + "Setting some initial decoder settings\n"); + + /* set display standard */ + data[0] = standard; /* 0 = NTSC, 1 = PAL */ + x = ivtv_api(itv->dec_mbox, + &itv->dec_msem, + IVTV_API_DEC_DISP_STANDARD, &result, 1, &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, + "COULDN'T SET DISPLAY STD %d\n", x); + + /* set audio mode */ + data[0] = 0; /* Dual mono-mode action: ??? */ + data[1] = 0; /* stereo mode action: 0=stereo, 1=left, + 2=right, 3=mono */ + x = ivtv_api(itv->dec_mbox, + &itv->dec_msem, + IVTV_API_DEC_SELECT_AUDIO, &result, 2, &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, + "COULDN'T SET AUDIO MODE %d\n", x); + + /* set number of internal decoder buffers */ + data[0] = itv->dec_options.decbuffers; /* 0 = 6 buffers, + 1 = 9 buffers */ + x = ivtv_api(itv->dec_mbox, + &itv->dec_msem, + IVTV_API_DEC_DISPLAY_BUFFERS, + &result, 1, &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, + "COULDN'T INITIALIZE # OF DISPLAY BUFFERS %d\n", + x); + + /* prebufferring */ + data[0] = itv->dec_options.prebuffer; /* 0 = no prebuffering, + 1 = enabled, see docs */ + x = ivtv_api(itv->dec_mbox, + &itv->dec_msem, + IVTV_API_DEC_BUFFER, &result, 1, &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, + "COULDN't INITIALIZE PREBUFFER %d\n", x); + + /* set decoder source settings */ + data[0] = id->type; /* Data type: 0 = mpeg from host, + 1 = yuv from encoder, + 2 = yuv_from_host */ + data[1] = 720; /* YUV source width */ + if (itv->v4l2.standard.active == 1) { + data[2] = 576; /* YUV source height */ + } else { + data[2] = 480; /* YUV source height */ + } + data[3] = itv->v4l2.codec.audio_bitmap; /* Audio settings to use, + bitmap. see docs. */ + x = ivtv_api(itv->dec_mbox, + &itv->dec_msem, + IVTV_API_DEC_DECODE_SOURCE, &result, 4, &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, + "COULDN'T INITIALIZE DECODER SOURCE %d\n", + x); + + clear_bit(IVTV_F_S_UNINIT, + &id->itv->v4l2.streams[id->type].s_flags); + } else { + IVTV_DEBUG(IVTV_DEBUG_INFO, + "Decoder already configured, skipping extra setup\n"); + } +#if 0 + /* select event notification */ + data[0] = 0; /* Event: 0 = audio change between stereo and mono */ + data[1] = 1; /* Enable/Disable: 0 = disabled, 1 = enabled */ + data[2] = 0x00010000; /* Bit: interrupt bit to fire */ + data[3] = -1; /* Mailbox to use: -1 = no mailbox needed */ + x = ivtv_api(itv->dec_mbox, &itv->dec_msem, + IVTV_API_DEC_EVENT_NOTIFICATION, &result, 4, &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, + "COULDN'T INITIALIZE EVENT NOTIFICATION %d\n", x); +#endif +#if 0 + /* set stream input port */ + data[0] = 0; /* 0 = memory, 1 = streaming */ + x = ivtv_api(itv->dec_mbox, &itv->dec_msem, IVTV_API_DEC_STREAM_INPUT, + &result, 1, &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, + "COULDN'T INITIALIZE STREAM INPUT %d\n", x); + + /* A/V sync delay */ + data[0] = 0; /* Delay in 90khz ticks. 0 = synced, negative = audio lags, positive = video lags */ + x = ivtv_api(itv->dec_mbox, &itv->dec_msem, IVTV_API_DEC_SET_AV_DELAY, + &result, 1, &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, + "COULDN't INITIALIZE Audio/Vid sync delay %d\n", x); +#endif + + if (atomic_read(&itv->decoding) == 0) { + memset(&itv->dec_timestamp, 0, + sizeof(struct ivtv_ioctl_framesync)); + writel(0, &itv->dec_mbox[IVTV_MBOX_FIELD_DISPLAYED].data[0]); + writel(0, &itv->dec_mbox[IVTV_MBOX_FIELD_DISPLAYED].data[1]); + writel(0, &itv->dec_mbox[IVTV_MBOX_FIELD_DISPLAYED].data[2]); + writel(0, &itv->dec_mbox[IVTV_MBOX_FIELD_DISPLAYED].data[3]); + writel(0, &itv->dec_mbox[IVTV_MBOX_FIELD_DISPLAYED].data[4]); + + /*Clear the following Interrupt mask bits: 0xd8000000 */ + ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_DECODE); + IVTV_DEBUG(IVTV_DEBUG_IRQ, "IRQ Mask is now: 0x%08x\n", + itv->irqmask); + } + + /* start playback */ + data[0] = itv->dec_options.gop_offset; /* frame to start from (in GOP) */ + data[1] = itv->dec_options.mute_frames; /* # of audio frames to mute */ + x = ivtv_api(itv->dec_mbox, &itv->dec_msem, + IVTV_API_DEC_START_PLAYBACK, &result, 2, &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, "COULDN'T START PLAYBACK %d\n", x); + + /*you're live! sit back and await interrupts :) */ + atomic_inc(&itv->decoding); + return 0; +} + +void ivtv_v4l2_cleanup(struct ivtv *itv) +{ + int x; + + IVTV_DEBUG(IVTV_DEBUG_INFO, "v4l2 unregister\n"); + + if (atomic_read(&itv->capturing) >= 0) + ivtv_stop_all_captures(itv); + if (itv->v4l2.tuner.table.tuner) + kfree(itv->v4l2.tuner.table.tuner); + for (x = 0; x < itv->v4l2.streamcount; x++) { + /* Catch a possible kernel panic */ + if (itv->v4l2.streams[x].v4l2dev->minor != -1) { + video_unregister_device(itv->v4l2.streams[x].v4l2dev); + } else { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "invalid v4l2 registration on unload\n"); + } + } + if (itv->v4l2.streams) + kfree(itv->v4l2.streams); +} + +int ivtv_v4l2_open(struct inode *inode, struct file *filp) +{ + int x, y = 0, minor; + struct ivtv_open_id *item; + struct ivtv *itv = NULL; + + minor = MINOR(inode->i_rdev); + IVTV_DEBUG(IVTV_DEBUG_INFO, "v4l2 open on minor %d\n", minor); + + /* Find which card this open was on */ + spin_lock_irq(&ivtv_lock); + for (x = 0; x < ivtv_cards_active; x++) { + + /* find out which stream this open was on */ + for (y = 0; y < ivtv_cards[x].v4l2.streamcount; y++) { + IVTV_DEBUG(IVTV_DEBUG_INFO, "current minor %d\n", + ivtv_cards[x].v4l2.streams[y].v4l2dev-> + minor); + if (ivtv_cards[x].v4l2.streams[y].v4l2dev->minor == + minor) { + itv = + ivtv_cards[x].v4l2.streams[y].v4l2dev->priv; + break; + } + } + /* FIXME ugly :( */ + if (itv != NULL) + break; + } + spin_unlock_irq(&ivtv_lock); + + /* FIXME temporary + if (y == 2) { + IVTV_DEBUG(IVTV_DEBUG_ERR, "VBI not supported yet \n"); + return -EINVAL; + } + */ + if (itv != NULL) { + IVTV_DEBUG(IVTV_DEBUG_INFO, "opened card # %d, stream %d\n", x, + y); + //allocate memory + item = kmalloc(sizeof(struct ivtv_open_id), GFP_KERNEL); + if (NULL == item) { + IVTV_DEBUG(IVTV_DEBUG_ERR, "nomem on v4l2 open\n"); + return -ENOMEM; + } + item->itv = itv; + item->type = y; + + INIT_LIST_HEAD(&item->list); + + down(&itv->sem_lock); + + item->open_id = item->itv->open_id++; + + list_add_tail(&item->list, &item->itv->client_list); + + up(&itv->sem_lock); + + filp->private_data = item; + + return 0; + } + + /* Couldnt find a device registered on that minor, shouldn't happen! */ + IVTV_DEBUG(IVTV_DEBUG_ERR, "Device on minor %d not found!\n", minor); + + return -ENXIO; +} + +int ivtv_v4l2_read(struct file *filp, char *buf, size_t count, loff_t * pos) +{ + struct ivtv_open_id *id = filp->private_data; + struct ivtv *itv = id->itv; + struct ivtv_v4l2_stream *stream; + int ret = 0; + + IVTV_DEBUG(IVTV_DEBUG_INFO, "v4l2 read\n"); + + if (down_interruptible(&itv->sem_lock)) + return -ERESTARTSYS; + + stream = &itv->v4l2.streams[id->type]; + + if (!test_bit(IVTV_F_S_CAP, &stream->s_flags) && stream->id == -1) { + set_bit(IVTV_F_S_CAP, &stream->s_flags); + stream->id = id->open_id; + + ret = ivtv_start_v4l2_stream(id); + if (ret) { + stream->id = -1; + IVTV_DEBUG(IVTV_DEBUG_INFO, + "Error in v4l2 stream init\n"); + } + stream->seq = 0; + stream->ubytes = 0; + } else { + if (id->open_id != stream->id) + ret = -EBUSY; + } + + up(&itv->sem_lock); + + if (ret) + return ret; + + ret = ivtv_read(id, buf, count, !(filp->f_flags & O_NONBLOCK)); + + if (ret > 0) + *pos += ret; + + if (ret == 0) { + IVTV_DEBUG(IVTV_DEBUG_INFO, "v4l2 read returning 0\n"); + } + + return ret; +} + +ssize_t ivtv_v4l2_write(struct file * filp, const char *buf, size_t count, + loff_t * pos) +{ + struct ivtv_open_id *id = filp->private_data; + struct ivtv *itv = id->itv; + + int ret = 0; + + if (down_interruptible(&itv->sem_lock)) + return -ERESTARTSYS; + + ret = ivtv_start_v4l2_decode(id); + + up(&itv->sem_lock); + + if (ret) + return ret; + + /* do all the work */ + return ivtv_write(id, buf, count, !(filp->f_flags & O_NONBLOCK)); +} + +int ivtv_v4l2_streamoff(struct ivtv_open_id *id) +{ + + if (down_interruptible(&id->itv->sem_lock)) + return -ERESTARTSYS; + + if (id->open_id != id->itv->v4l2.streams[id->type].id) { + return -EINVAL; + } else { + ivtv_stop_capture(id); + } + + up(&id->itv->sem_lock); + + return 0; +} + +int ivtv_v4l2_close(struct inode *inode, struct file *filp) +{ + struct ivtv_open_id *id = filp->private_data; + + IVTV_DEBUG(IVTV_DEBUG_INFO, "v4l2 close\n"); + + if (NULL == id) { + IVTV_DEBUG(IVTV_DEBUG_ERR, "invalid id on v4l2 close\n"); + return -ENODEV; + } + + if (down_interruptible(&id->itv->sem_lock)) + return -ERESTARTSYS; + + if (id->itv->v4l2.streams[id->type].id == -1) { + clear_bit(IVTV_F_S_CAP, + &id->itv->v4l2.streams[id->type].s_flags); + set_bit(IVTV_F_S_UNINIT, + &id->itv->v4l2.streams[id->type].s_flags); + ivtv_flush_queues(id); + } else if (id->open_id == id->itv->v4l2.streams[id->type].id) { + ivtv_close(id); + + clear_bit(IVTV_F_S_CAP, + &id->itv->v4l2.streams[id->type].s_flags); + set_bit(IVTV_F_S_UNINIT, + &id->itv->v4l2.streams[id->type].s_flags); + id->itv->v4l2.streams[id->type].id = -1; + ivtv_flush_queues(id); + } + + up(&id->itv->sem_lock); + + list_del(&id->list); + + kfree(id); + + return 0; +} + +/* direct from the latest v4l2 patch */ +static unsigned int video_fix_command(unsigned int cmd) +{ + switch (cmd) { + case VIDIOC_OVERLAY_OLD: + cmd = VIDIOC_OVERLAY; + break; + case VIDIOC_S_PARM_OLD: + cmd = VIDIOC_S_PARM; + break; + case VIDIOC_S_CTRL_OLD: + cmd = VIDIOC_S_CTRL; + break; + case VIDIOC_G_AUDIO_OLD: + cmd = VIDIOC_G_AUDIO; + break; + case VIDIOC_G_AUDOUT_OLD: + cmd = VIDIOC_G_AUDOUT; + break; + default: + break; + } + + return cmd; +} + +int ivtv_change_speed(struct ivtv *itv, struct ivtv_speed speed) +{ + u32 data[IVTV_MBOX_MAX_DATA], result; + + if ((speed.scale < 0) || (speed.scale > 50)) + return -EINVAL; + + if ((speed.speed < 0) || (speed.speed > 1)) + return -EINVAL; + + data[0] = speed.scale; + + if (speed.smooth) /* smooth ff */ + data[0] |= 0x40000000; + + if (speed.speed) /* fast forward */ + data[0] |= 0x80000000; + + if (speed.direction) { + IVTV_DEBUG(IVTV_DEBUG_ERR, "DEC: Reverse not supported\n"); + return -EINVAL; + } + + data[1] = speed.direction; /* Forward. Reverse not supported */ + + switch (speed.fr_mask) { + case 2: + default: + data[2] |= 4; /* B */ + case 1: + data[2] |= 2; /* P */ + case 0: + data[2] |= 1; /* I */ + break; + } + + data[3] = itv->v4l2.codec.framespergop; + data[4] = speed.aud_mute; /* mute while fast/slow */ + data[5] = speed.fr_field; /* frame or field at a time */ + data[6] = speed.mute; /* # of frames to mute on normal speed resume */ + + if (ivtv_api(itv->dec_mbox, &itv->dec_msem, + IVTV_API_DEC_PLAYBACK_SPEED, &result, 7, &data[0])) { + IVTV_DEBUG(IVTV_DEBUG_ERR, "DEC: error changing speed\n"); + return (int)result; + } + + /* Save speed options if call succeeded */ + memcpy(&itv->dec_options.speed, &speed, sizeof(speed)); + + return 0; +} + +int ivtv_v4l2_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + + struct ivtv_open_id *id = (struct ivtv_open_id *)filp->private_data; + struct ivtv *itv = id->itv; + + IVTV_DEBUG(IVTV_DEBUG_INFO, "v4l2 ioctl 0x%08x\n", cmd); + + cmd = video_fix_command(cmd); + + switch (cmd) { +#ifdef SAA7115_REGTEST + /* ioctls to allow direct access to the saa7115 registers for testing */ + case SAA7115_GET_REG:{ + struct saa7115_reg_t *saa7115_reg = + (struct saa7115_reg_t *)arg; + + ivtv_call_i2c_client(itv, IVTV_SAA7115_I2C_ADDR, + SAA7115_GET_REG, saa7115_reg); + break; + } + case SAA7115_SET_REG:{ + struct saa7115_reg_t *saa7115_reg = + (struct saa7115_reg_t *)arg; + + ivtv_call_i2c_client(itv, IVTV_SAA7115_I2C_ADDR, + SAA7115_SET_REG, saa7115_reg); + break; + } +#endif + case IVTV_IOC_ZCOUNT:{ + /* Zeroes out usage count so it can be unloaded in case of + * drastic error */ + + IVTV_DEBUG(IVTV_DEBUG_INFO, "ivtv ioctl: ZCOUNT\n"); + + break; + } + case IVTV_IOC_GET_FB:{ + if (itv->fb_id < 0) + return -EINVAL; + if (copy_to_user + ((int *)arg, &itv->fb_id, sizeof(itv->fb_id))) + return -EFAULT; + + break; + } + case IVTV_IOC_FWAPI:{ + struct ivtv_ioctl_fwapi fwapi; + int x; + + IVTV_DEBUG(IVTV_DEBUG_INFO, "ivtv ioctl: FWAPI\n"); + + if (copy_from_user + (&fwapi, (struct ivtv_ioctl_fwapi *)arg, + sizeof(struct ivtv_ioctl_fwapi))) + return -EFAULT; + + /* Encoder + x = ivtv_api(itv->enc_mbox, &itv->enc_msem, fwapi.cmd, + &fwapi.result, fwapi.args, &fwapi.data[0]); + */ + + /* Decoder */ + x = ivtv_api(itv->dec_mbox, &itv->dec_msem, fwapi.cmd, + &fwapi.result, fwapi.args, &fwapi.data[0]); + + if (copy_to_user((struct ivtv_ioctl_fwapi *)arg, &fwapi, + sizeof(struct ivtv_ioctl_fwapi))) + return -EFAULT; + + return x; + } + case IVTV_IOC_FRAMESYNC:{ + interruptible_sleep_on(&itv->vsync_w); + + if (signal_pending(current)) + return -ERESTARTSYS; + + if (copy_to_user((void *)arg, &itv->dec_timestamp, + sizeof(itv->dec_timestamp))) { + return -EFAULT; + } + + break; + } + case IVTV_IOC_PLAY:{ + u32 data[IVTV_MBOX_MAX_DATA], result; + data[0] = 0; /* 0-based frame # to start from (in GOP) */ + data[1] = 0; /* # of audio frames to mute */ + if (ivtv_api(itv->dec_mbox, &itv->dec_msem, + IVTV_API_DEC_START_PLAYBACK, &result, 2, + &data[0])) + IVTV_DEBUG(IVTV_DEBUG_ERR, + "DEC: error starting playback\n"); + + break; + + } + case IVTV_IOC_START_DECODE: + case IVTV_IOC_S_START_DECODE:{ + struct ivtv_cfg_start_decode sd; + int ret = 0; + + if (copy_from_user + (&sd, (struct ivtv_cfg_start_decode *)arg, + sizeof(sd))) + return -EFAULT; + + if ((sd.gop_offset < 0) || (sd.gop_offset > 15)) + return -EINVAL; + if (sd.muted_audio_frames < 0) + return -EINVAL; + + itv->dec_options.gop_offset = sd.gop_offset; + itv->dec_options.mute_frames = sd.muted_audio_frames; + + if (cmd == IVTV_IOC_S_START_DECODE) + break; + + if (down_interruptible(&itv->sem_lock)) + return -ERESTARTSYS; + + ret = ivtv_start_v4l2_decode(id); + + up(&itv->sem_lock); + + return ret; + break; + } + case IVTV_IOC_STOP_DECODE: + case IVTV_IOC_S_STOP_DECODE:{ + struct ivtv_cfg_stop_decode sd; + + if (copy_from_user + (&sd, (struct ivtv_cfg_stop_decode *)arg, + sizeof(sd))) + return -EFAULT; + + if ((sd.hide_last < 0) || (sd.hide_last > 1)) + return -EINVAL; + itv->dec_options.hide_last_frame = sd.hide_last; + + itv->dec_options.pts_low = + (u32) (sd.pts_stop & 0xFFFFFFFF); + itv->dec_options.pts_hi = (u32) (sd.pts_stop >> 32); + + if (cmd == IVTV_IOC_S_STOP_DECODE) + break; + + if (down_interruptible(&id->itv->sem_lock)) + return -ERESTARTSYS; + + if (id->open_id == id->itv->v4l2.streams[id->type].id) { + ivtv_stop_decode(id); + clear_bit(IVTV_F_S_CAP, + &id->itv->v4l2.streams[id->type]. + s_flags); + id->itv->v4l2.streams[id->type].id = -1; + } + + up(&id->itv->sem_lock); + + break; + } + case IVTV_IOC_DEC_FLUSH:{ + if ((id->open_id == id->itv->v4l2.streams[id->type].id) + || (id->itv->v4l2.streams[id->type].id == -1)) { + if (down_interruptible(&id->itv->sem_lock)) + return -ERESTARTSYS; + + ivtv_flush_queues(id); + + up(&id->itv->sem_lock); + } else { + return -EBUSY; + } + } + case IVTV_IOC_DEC_STEP:{ + int howfar, *fieldsel = (int *)arg; + u32 data[IVTV_MBOX_MAX_DATA], result; + + get_user(howfar, fieldsel); + + if (howfar < 0 || howfar > 2) + return -EINVAL; + + data[0] = howfar; /* 0 = 1 frame, 1 = top field, 2 = bottom field */ + if (ivtv_api(itv->dec_mbox, &itv->dec_msem, + IVTV_API_DEC_STEP_VIDEO, &result, 1, + &data[0])) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "DEC: error stepping\n"); + } + + break; + } + case IVTV_IOC_G_SPEED:{ + if (copy_to_user((void *)arg, + &itv->dec_options.speed, + sizeof(itv->dec_options.speed))) { + return -EFAULT; + } + + break; + } + case IVTV_IOC_S_SPEED:{ + struct ivtv_speed speed; + int ret = 0; + + if (copy_from_user(&speed, (struct ivtv_speed *)arg, + sizeof(speed))) + return -EFAULT; + + ret = ivtv_change_speed(itv, speed); + if (ret) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "DEC: error in slow/fast mode\n"); + return ret; + } + + break; + } + case IVTV_IOC_S_SLOW_FAST:{ + struct ivtv_slow_fast sf; + struct ivtv_speed speed; + int ret; + + if (copy_from_user(&sf, (struct ivtv_slow_fast *)arg, + sizeof(sf))) + return -EFAULT; + + if ((sf.scale < 0) || (sf.scale > 50)) + return -EINVAL; + if ((sf.speed < 0) || (sf.speed > 1)) + return -EINVAL; + + memcpy(&speed, &itv->dec_options.speed, sizeof(speed)); + speed.scale = sf.scale; + speed.speed = sf.speed; + + ret = ivtv_change_speed(itv, speed); + if (ret) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "DEC: error in set slow/fast mode\n"); + return ret; + } + + break; + } + case IVTV_IOC_PAUSE:{ + u32 data[IVTV_MBOX_MAX_DATA], result; + data[0] = 0; + if (ivtv_api(itv->dec_mbox, &itv->dec_msem, + IVTV_API_DEC_PAUSE_PLAYBACK, &result, 1, + &data[0])) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "DEC: error pausing\n"); + } + + break; + } + case IVTV_IOC_GET_TIMING:{ + struct ivtv_ioctl_framesync timing; + + if (atomic_read(&itv->decoding) == 0) { + memset(&timing, 0, sizeof(timing)); + } else { + memcpy(&timing, &itv->dec_timestamp, + sizeof(timing)); + } + + if (copy_to_user((void *)arg, &timing, sizeof(timing))) { + return -EFAULT; + } + + break; + } + + case VIDIOC_QUERYMENU:{ + //FIXME copy_from_user needed + struct v4l2_querymenu *qmenu = + (struct v4l2_querymenu *)arg; + + if (qmenu->id >= V4L2_CID_PRIVATE_BASE) { + int off = qmenu->id - V4L2_CID_PRIVATE_BASE; + if (off < IVTV_V4L2_AUDIO_MENUCOUNT) { + u32 i = qmenu->index; + if ((i >= + itv->v4l2.audio_meta[off].ctrl-> + minimum) + && (i <= + itv->v4l2.audio_meta[off].ctrl-> + maximum)) { + memcpy(qmenu, + &itv->v4l2. + audio_meta[off].menu[i], + sizeof(struct + v4l2_querymenu)); + } else { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "qmenu: invalid index\n"); + return -EINVAL; + } + } else { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "qmenu: id out of range\n"); + return -EINVAL; + } + + } + + break; + } + + case VIDIOC_QUERYCTRL:{ + //FIXME copy_from_user needed + struct v4l2_queryctrl *qctrl = + (struct v4l2_queryctrl *)arg; + + if (qctrl->id >= V4L2_CID_PRIVATE_BASE) { + int off = qctrl->id - V4L2_CID_PRIVATE_BASE; + if (off < IVTV_V4L2_AUDIO_MENUCOUNT) { + memcpy(qctrl, + itv->v4l2.audio_meta[off].ctrl, + sizeof(struct v4l2_queryctrl)); + } else { + switch (qctrl->id) { + case V4L2_CID_IVTV_DEC_SMOOTH_FF: + qctrl->type = + V4L2_CTRL_TYPE_BOOLEAN; + strncpy(qctrl->name, + "Smooth Slow/FF", 32); + qctrl->minimum = 0; + qctrl->maximum = 1; + qctrl->default_value = 1; + qctrl->flags = 0; + qctrl->reserved[0] = 0; + qctrl->reserved[1] = 0; + break; + case V4L2_CID_IVTV_DEC_FR_MASK: + qctrl->type = + V4L2_CTRL_TYPE_INTEGER; + strncpy(qctrl->name, + "Frame Mask", 32); + qctrl->minimum = 0; + qctrl->maximum = 2; + qctrl->default_value = 2; + qctrl->flags = 0; + qctrl->reserved[0] = 0; + qctrl->reserved[1] = 0; + break; + case V4L2_CID_IVTV_DEC_SP_MUTE: + qctrl->type = + V4L2_CTRL_TYPE_BOOLEAN; + strncpy(qctrl->name, + "Mute during slow/fast", + 32); + qctrl->minimum = 0; + qctrl->maximum = 1; + qctrl->default_value = 1; + qctrl->flags = 0; + qctrl->reserved[0] = 0; + qctrl->reserved[1] = 0; + break; + case V4L2_CID_IVTV_DEC_FR_FIELD: + qctrl->type = + V4L2_CTRL_TYPE_BOOLEAN; + strncpy(qctrl->name, + "Toggle frame/field", + 32); + qctrl->minimum = 0; + qctrl->maximum = 1; + qctrl->default_value = 1; + qctrl->flags = 0; + qctrl->reserved[0] = 0; + qctrl->reserved[1] = 0; + break; + case V4L2_CID_IVTV_DEC_AUD_SKIP: + qctrl->type = + V4L2_CTRL_TYPE_INTEGER; + strncpy(qctrl->name, + "Mute audio frames", + 32); + qctrl->minimum = 0; + qctrl->maximum = 15; + qctrl->default_value = 0; + qctrl->flags = 0; + qctrl->reserved[0] = 0; + qctrl->reserved[1] = 0; + break; + case V4L2_CID_IVTV_DEC_NUM_BUFFERS: + qctrl->type = + V4L2_CTRL_TYPE_BOOLEAN; + strncpy(qctrl->name, + "Number of decoder buffers", + 32); + qctrl->minimum = 0; + qctrl->maximum = 1; + qctrl->default_value = 1; + qctrl->flags = 0; + qctrl->reserved[0] = 0; + qctrl->reserved[1] = 1; + break; + case V4L2_CID_IVTV_DEC_PREBUFFER: + qctrl->type = + V4L2_CTRL_TYPE_BOOLEAN; + strncpy(qctrl->name, + "Decoder prebuffer", + 32); + qctrl->minimum = 0; + qctrl->maximum = 1; + qctrl->default_value = 1; + qctrl->flags = 0; + qctrl->reserved[0] = 0; + qctrl->reserved[1] = 1; + break; + default: + IVTV_DEBUG(IVTV_DEBUG_ERR, + "qctrl: invalid control\n"); + return -EINVAL; + break; + } + } + + break; + } + + switch (qctrl->id) { + case V4L2_CID_BRIGHTNESS: + qctrl->type = V4L2_CTRL_TYPE_INTEGER; + strncpy(qctrl->name, "Brightness", 32); + qctrl->minimum = 0; + qctrl->maximum = 255; + qctrl->step = 0; + qctrl->default_value = 128; + qctrl->flags = 0; + qctrl->reserved[0] = 0; + qctrl->reserved[1] = 0; + break; + case V4L2_CID_HUE: + qctrl->type = V4L2_CTRL_TYPE_INTEGER; + strncpy(qctrl->name, "Hue", 32); + qctrl->minimum = -128; + qctrl->maximum = 127; + qctrl->step = 0; + qctrl->default_value = 0; + qctrl->flags = 0; + qctrl->reserved[0] = 0; + qctrl->reserved[1] = 0; + break; + case V4L2_CID_SATURATION: + qctrl->type = V4L2_CTRL_TYPE_INTEGER; + strncpy(qctrl->name, "Saturation", 32); + qctrl->minimum = 0; + qctrl->maximum = 127; + qctrl->step = 0; + qctrl->default_value = 64; + qctrl->flags = 0; + qctrl->reserved[0] = 0; + qctrl->reserved[1] = 0; + break; + case V4L2_CID_CONTRAST: + qctrl->type = V4L2_CTRL_TYPE_INTEGER; + strncpy(qctrl->name, "Contrast", 32); + qctrl->minimum = 0; + qctrl->maximum = 127; + qctrl->step = 0; + qctrl->default_value = 64; + qctrl->flags = 0; + qctrl->reserved[0] = 0; + qctrl->reserved[1] = 0; + break; + case V4L2_CID_AUDIO_VOLUME: + qctrl->type = V4L2_CTRL_TYPE_INTEGER; + strncpy(qctrl->name, "Volume", 32); + qctrl->minimum = 0; + qctrl->maximum = 65535; + qctrl->step = 0; + qctrl->default_value = 65535; + qctrl->flags = 0; + qctrl->reserved[0] = 0; + qctrl->reserved[1] = 0; + break; + case V4L2_CID_AUDIO_MUTE: + qctrl->type = V4L2_CTRL_TYPE_INTEGER; + strncpy(qctrl->name, "Mute", 32); + qctrl->minimum = 0; + qctrl->maximum = 1; + qctrl->step = 0; + qctrl->default_value = 1; + qctrl->flags = 0; + qctrl->reserved[0] = 0; + qctrl->reserved[1] = 0; + break; + default: + IVTV_DEBUG(IVTV_DEBUG_INFO, + "v4l2 ioctl: invalid control\n"); + return -EINVAL; + } + break; + } + case VIDIOC_S_CTRL:{ + //FIXME copy_from_user needed + struct v4l2_control *vctrl = (struct v4l2_control *)arg; + + IVTV_DEBUG(IVTV_DEBUG_INFO, + "v4l2 ioctl: set control\n"); + + if (vctrl->id >= V4L2_CID_PRIVATE_BASE) { + int off = vctrl->id - V4L2_CID_PRIVATE_BASE; + s32 v = vctrl->value; + if (off < IVTV_V4L2_AUDIO_MENUCOUNT) { + if ((v <= + itv->v4l2.audio_meta[off].ctrl-> + maximum) + && (v >= + itv->v4l2.audio_meta[off].ctrl-> + minimum)) { + itv->v4l2.audio_meta[off]. + setting = v; + /* presumably value has changed. + * we should update the bitmap */ + itv->v4l2.codec.audio_bitmap &= + ~itv->v4l2.audio_meta[off]. + mask; + itv->v4l2.codec.audio_bitmap |= + itv->v4l2.audio_meta[off]. + table[v]; + + /* Also upade the digitizer setting */ + if (0 == off) { /* audio input bitrate */ + int vrate = (int)v; + /* FIXME not obvious how this works + * (see ivtv_ctrl_query_freq[]) */ + ivtv_call_i2c_client + (itv, + IVTV_SAA7115_I2C_ADDR, + DECODER_SET_AUDIO, + &vrate); + } + } else { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "ctrl: value out of range\n"); + return -ERANGE; + } + } else { + switch (vctrl->id) { + case V4L2_CID_IVTV_DEC_SMOOTH_FF: + if ((v < 0) || (v > 1)) + return -ERANGE; + itv->dec_options.speed.smooth = + vctrl->value; + break; + case V4L2_CID_IVTV_DEC_FR_MASK: + if ((v < 0) || (v > 2)) + return -ERANGE; + itv->dec_options.speed.fr_mask = + vctrl->value; + break; + case V4L2_CID_IVTV_DEC_SP_MUTE: + if ((v < 0) || (v > 1)) + return -ERANGE; + itv->dec_options.speed. + aud_mute = vctrl->value; + break; + case V4L2_CID_IVTV_DEC_FR_FIELD: + if ((v < 0) || (v > 1)) + return -ERANGE; + itv->dec_options.speed. + fr_field = vctrl->value; + break; + case V4L2_CID_IVTV_DEC_AUD_SKIP: + if ((v < 0) || (v > 15)) + return -ERANGE; + itv->dec_options.mute_frames = + vctrl->value; + break; + case V4L2_CID_IVTV_DEC_NUM_BUFFERS: + if ((v < 0) || (v > 1)) + return -ERANGE; + itv->dec_options.decbuffers = + vctrl->value; + break; + case V4L2_CID_IVTV_DEC_PREBUFFER: + if ((v < 0) || (v > 1)) + return -ERANGE; + itv->dec_options.prebuffer = + vctrl->value; + break; + default: + IVTV_DEBUG(IVTV_DEBUG_ERR, + "ctrl: invalid control\n"); + return -EINVAL; + } + } + + break; + } + + switch (vctrl->id) { + case V4L2_CID_BRIGHTNESS:{ + struct saa7114 pic; + + if (vctrl->value < 0 + || vctrl->value > 255) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "ctrl: invalid brightness value: %d\n", + vctrl->value); + return -EINVAL; + } + ivtv_call_i2c_client(itv, + IVTV_SAA7115_I2C_ADDR, + DECODER_GET_PICTURE, + &pic); + pic.bright = vctrl->value; + ivtv_call_i2c_client(itv, + IVTV_SAA7115_I2C_ADDR, + DECODER_SET_PICTURE, + &pic); + break; + } + case V4L2_CID_HUE:{ + struct saa7114 pic; + + if (vctrl->value < -128 + || vctrl->value > 127) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "ctrl: invalid hue value: %d\n", + vctrl->value); + return -EINVAL; + } + ivtv_call_i2c_client(itv, + IVTV_SAA7115_I2C_ADDR, + DECODER_GET_PICTURE, + &pic); + pic.hue = vctrl->value; + ivtv_call_i2c_client(itv, + IVTV_SAA7115_I2C_ADDR, + DECODER_SET_PICTURE, + &pic); + break; + } + case V4L2_CID_SATURATION:{ + struct saa7114 pic; + + if (vctrl->value < 0 + || vctrl->value > 127) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "ctrl: invalid saturation value: %d\n", + vctrl->value); + return -EINVAL; + } + ivtv_call_i2c_client(itv, + IVTV_SAA7115_I2C_ADDR, + DECODER_GET_PICTURE, + &pic); + pic.sat = vctrl->value; + ivtv_call_i2c_client(itv, + IVTV_SAA7115_I2C_ADDR, + DECODER_SET_PICTURE, + &pic); + break; + } + case V4L2_CID_CONTRAST:{ + struct saa7114 pic; + + if (vctrl->value < 0 + || vctrl->value > 127) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "ctrl: invalid contrast value: %d\n", + vctrl->value); + return -EINVAL; + } + ivtv_call_i2c_client(itv, + IVTV_SAA7115_I2C_ADDR, + DECODER_GET_PICTURE, + &pic); + pic.contrast = vctrl->value; + ivtv_call_i2c_client(itv, + IVTV_SAA7115_I2C_ADDR, + DECODER_SET_PICTURE, + &pic); + break; + } + case V4L2_CID_AUDIO_VOLUME:{ + struct video_audio va; + + if (vctrl->value > 65535 + || vctrl->value < 0) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "ctrl: invalid value for volume: %d", + vctrl->value); + return -EINVAL; + } + ivtv_call_i2c_client(itv, + IVTV_MSP3400_I2C_ADDR, + VIDIOCGAUDIO, &va); + va.volume = vctrl->value; + ivtv_call_i2c_client(itv, + IVTV_MSP3400_I2C_ADDR, + VIDIOCSAUDIO, &va); + break; + } + case V4L2_CID_AUDIO_MUTE:{ + struct video_audio va; + ivtv_call_i2c_client(itv, + IVTV_MSP3400_I2C_ADDR, + VIDIOCGAUDIO, &va); + if (vctrl->value) + va.flags |= VIDEO_AUDIO_MUTE; + else + va.flags = + (va. + flags & + ~(VIDEO_AUDIO_MUTE)); + ivtv_call_i2c_client(itv, + IVTV_MSP3400_I2C_ADDR, + VIDIOCSAUDIO, &va); + break; + } + default: + IVTV_DEBUG(IVTV_DEBUG_ERR, + "ctrl: invalid control\n"); + return -EINVAL; + } + + break; + } + case VIDIOC_G_CTRL:{ + //FIXME copy_from_user needed + struct v4l2_control *vctrl = (struct v4l2_control *)arg; + + IVTV_DEBUG(IVTV_DEBUG_INFO, + "v4l2 ioctl: get control\n"); + + if (vctrl->id >= V4L2_CID_PRIVATE_BASE) { + int off = vctrl->id - V4L2_CID_PRIVATE_BASE; + if (off < IVTV_V4L2_AUDIO_MENUCOUNT) { + vctrl->value = + itv->v4l2.audio_meta[off].setting; + } else { + switch (vctrl->id) { + case V4L2_CID_IVTV_DEC_SMOOTH_FF: + vctrl->value = + itv->dec_options.speed. + smooth; + break; + case V4L2_CID_IVTV_DEC_FR_MASK: + vctrl->value = + itv->dec_options.speed. + fr_mask; + break; + case V4L2_CID_IVTV_DEC_SP_MUTE: + vctrl->value = + itv->dec_options.speed. + aud_mute; + break; + case V4L2_CID_IVTV_DEC_FR_FIELD: + vctrl->value = + itv->dec_options.speed. + fr_field; + break; + case V4L2_CID_IVTV_DEC_AUD_SKIP: + vctrl->value = + itv->dec_options. + mute_frames; + break; + case V4L2_CID_IVTV_DEC_NUM_BUFFERS: + vctrl->value = + itv->dec_options.decbuffers; + break; + case V4L2_CID_IVTV_DEC_PREBUFFER: + vctrl->value = + itv->dec_options.prebuffer; + break; + default: + IVTV_DEBUG(IVTV_DEBUG_ERR, + "ctrl: invalid control\n"); + return -EINVAL; + } + } + + break; + } + + switch (vctrl->id) { + case V4L2_CID_BRIGHTNESS:{ + struct saa7114 pic; + ivtv_call_i2c_client(itv, + IVTV_SAA7115_I2C_ADDR, + DECODER_GET_PICTURE, + &pic); + vctrl->value = pic.bright; + break; + } + case V4L2_CID_HUE:{ + struct saa7114 pic; + ivtv_call_i2c_client(itv, + IVTV_SAA7115_I2C_ADDR, + DECODER_GET_PICTURE, + &pic); + vctrl->value = pic.hue; + break; + } + case V4L2_CID_SATURATION:{ + struct saa7114 pic; + ivtv_call_i2c_client(itv, + IVTV_SAA7115_I2C_ADDR, + DECODER_GET_PICTURE, + &pic); + vctrl->value = pic.sat; + break; + } + case V4L2_CID_CONTRAST:{ + struct saa7114 pic; + ivtv_call_i2c_client(itv, + IVTV_SAA7115_I2C_ADDR, + DECODER_GET_PICTURE, + &pic); + vctrl->value = pic.contrast; + break; + } + case V4L2_CID_AUDIO_VOLUME:{ + struct video_audio va; + ivtv_call_i2c_client(itv, + IVTV_MSP3400_I2C_ADDR, + VIDIOCGAUDIO, &va); + vctrl->value = va.volume; + break; + } + case V4L2_CID_AUDIO_MUTE:{ + struct video_audio va; + ivtv_call_i2c_client(itv, + IVTV_MSP3400_I2C_ADDR, + VIDIOCGAUDIO, &va); + vctrl->value = + (va.flags & VIDEO_AUDIO_MUTE); + break; + } + default: + IVTV_DEBUG(IVTV_DEBUG_ERR, + "ctrl: invalid control\n"); + return -EINVAL; + } + break; + } + case VIDIOC_QUERYCAP:{ + //FIXME copy_from_user needed + struct v4l2_capability *vcap = + (struct v4l2_capability *)arg; + + IVTV_DEBUG(IVTV_DEBUG_INFO, "v4l2 ioctl: querycap\n"); + + /* driver name */ + strcpy(vcap->driver, IVTV_DRIVER_NAME); + + /* card type */ + strcpy(vcap->card, + id->itv->v4l2.streams[id->type].v4l2dev->name); + + /* bus info.. card # will do */ + sprintf(vcap->bus_info, "%d", itv->num); + + /* version */ + vcap->version = IVTV_DRIVER_VERSION; + + /* capabilities */ + vcap->capabilities = itv->v4l2.capabilities; + + /* reserved.. must set to 0! */ + vcap->reserved[0] = vcap->reserved[1] = + vcap->reserved[2] = vcap->reserved[3] = 0; + break; + } + case VIDIOC_ENUMINPUT:{ + //FIXME copy_from_user needed + struct v4l2_input *vin = (struct v4l2_input *)arg; + + IVTV_DEBUG(IVTV_DEBUG_INFO, "v4l2 ioctl: enuminput\n"); + + if ((vin->index < 0) + || (vin->index >= itv->v4l2.input.count)) + return -EINVAL; + + /* set it to defaults from our table */ + memcpy(vin, + &itv->v4l2.input.table.input[vin->index], + sizeof(struct v4l2_input)); + + /* set the standard to whatever our overall standard is */ + vin->std = tmk_standards[itv->v4l2.standard.active].id; + vin->status = 0; /*FIXME status isn't always ok... */ + + break; + } + + case VIDIOC_G_FMT:{ + //FIXME copy_from_user needed + struct v4l2_format *vfmt = (struct v4l2_format *)arg; + + IVTV_DEBUG(IVTV_DEBUG_INFO, "v4l2 ioctl: get format\n"); + + /* FIXME switch on stream type */ + memcpy(vfmt, &itv->v4l2.streams[0].format, + sizeof(struct v4l2_format)); + break; + } + case VIDIOC_S_FMT:{ + //FIXME copy_from_user needed + struct v4l2_format *vfmt = (struct v4l2_format *)arg; + struct video_window wind; + + IVTV_DEBUG(IVTV_DEBUG_INFO, "v4l2 ioctl: set format\n"); + + /* FIXME only sets resolution for now */ + wind.width = vfmt->fmt.pix.width; + wind.height = vfmt->fmt.pix.height; + ivtv_call_i2c_client(itv, IVTV_SAA7115_I2C_ADDR, + DECODER_SET_SIZE, &wind); + + /* FIXME switch on stream type, bounds checking */ + memcpy(&itv->v4l2.streams[0].format, vfmt, + sizeof(struct v4l2_format)); + /* Adjust res in YUV also */ + itv->v4l2.streams[1].format.fmt.pix.height = + vfmt->fmt.pix.height; + itv->v4l2.streams[1].format.fmt.pix.width = + vfmt->fmt.pix.width; + + break; + } + case VIDIOC_G_INPUT:{ + //FIXME copy_from_user needed + int *inp = (int *)arg; + + IVTV_DEBUG(IVTV_DEBUG_INFO, "v4l2 ioctl: get input\n"); + + *inp = itv->v4l2.input.active; + break; + } + case VIDIOC_S_INPUT:{ + //FIXME copy_from_user needed + int a_in, inp = *(int *)arg; + struct msp_matrix mspm; + + IVTV_DEBUG(IVTV_DEBUG_INFO, "v4l2 ioctl: set input\n"); + + if ((inp < 0) || (inp >= itv->v4l2.input.count)) + return -EINVAL; + + if (inp == itv->v4l2.input.active) { + IVTV_DEBUG(IVTV_DEBUG_INFO, + "Input unchanged\n"); + } else { + IVTV_DEBUG(IVTV_DEBUG_INFO, + "Changing input from %d to %d\n", + itv->v4l2.input.active, inp); + + itv->v4l2.input.active = inp; + itv->v4l2.audio.active = + itv->v4l2.input.table.input[inp].audioset; + + /* Mute sound to avoid pop */ + mspm.input = 8; + mspm.output = itv->v4l2.audio_output; + ivtv_call_i2c_client(itv, IVTV_MSP3400_I2C_ADDR, + MSP_SET_MATRIX, &mspm); + + if (0 != ivtv_pause_encoder(itv, 0)) + IVTV_DEBUG(IVTV_DEBUG_ERR, + "Input: Error pausing stream\n"); + + ivtv_call_i2c_client(itv, IVTV_SAA7115_I2C_ADDR, + DECODER_SET_INPUT, &inp); + + /* Pause to let sound calm down */ + ivtv_sleep_timeout(HZ / 33); + + if (0 != ivtv_pause_encoder(itv, 1)) + IVTV_DEBUG(IVTV_DEBUG_ERR, + "Input: Error unpausing stream\n"); + + /* FIXME Needs to be card-specific */ + a_in = ivtv_set_audio(itv, tmk_audio_mapping); + if (a_in < 0) + return a_in; + } + break; + } + case VIDIOC_G_FREQUENCY:{ + //FIXME copy_from_user needed + struct v4l2_frequency *vf = + (struct v4l2_frequency *)arg; + + IVTV_DEBUG(IVTV_DEBUG_INFO, + "v4l2 ioctl: get frequency\n"); + + if ((vf->tuner < 0) + || (vf->tuner >= itv->v4l2.tuner.count)) + return -EINVAL; + vf->frequency = itv->v4l2.freq.frequency; + break; + } + case VIDIOC_S_FREQUENCY:{ + //FIXME copy_from_user needed + struct v4l2_frequency *vf = + (struct v4l2_frequency *)arg; + struct msp_matrix mspm; + + IVTV_DEBUG(IVTV_DEBUG_INFO, + "v4l2 ioctl: set frequency\n"); + + if ((vf->tuner < 0) + || (vf->tuner >= itv->v4l2.tuner.count)) + return -EINVAL; + itv->v4l2.freq.frequency = vf->frequency; + + /* Mute sound to avoid pop */ + mspm.input = 8; + mspm.output = itv->v4l2.audio_output; + ivtv_call_i2c_client(itv, IVTV_MSP3400_I2C_ADDR, + MSP_SET_MATRIX, &mspm); + + if (0 != ivtv_pause_encoder(itv, 0)) + IVTV_DEBUG(IVTV_DEBUG_ERR, + "Freq: Error pausing stream\n"); + + /* Set frequency */ + ivtv_call_i2c_client(itv, IVTV_TUNER_I2C_ADDR, + VIDIOCSFREQ, + &itv->v4l2.freq.frequency); + + /* Pause to let sound calm down */ + ivtv_sleep_timeout(HZ / 33); + + if (0 != ivtv_pause_encoder(itv, 1)) + IVTV_DEBUG(IVTV_DEBUG_ERR, + "Freq: Error unpausing stream\n"); + + /* Unmute */ + ivtv_set_audio(itv, tmk_audio_mapping); + + break; + } + case VIDIOC_ENUMSTD:{ + //FIXME copy_from_user needed + struct v4l2_standard *vs = (struct v4l2_standard *)arg; + + IVTV_DEBUG(IVTV_DEBUG_INFO, + "v4l2 ioctl: enum standard\n"); + + if ((vs->index < 0) + || (vs->index >= itv->v4l2.standard.count)) + return -EINVAL; + + memcpy(vs, &itv->v4l2.standard.table.std[vs->index], + sizeof(struct v4l2_standard)); + + break; + } + case VIDIOC_G_STD:{ + //FIXME copy_from_user needed + v4l2_std_id *vs = (v4l2_std_id *) arg; + + IVTV_DEBUG(IVTV_DEBUG_INFO, + "v4l2 ioctl: get standard\n"); + + *vs = + itv->v4l2.standard.table.std[itv->v4l2.standard. + active].id; + break; + } + case VIDIOC_S_STD:{ + //FIXME copy_from_user needed + v4l2_std_id *vs = (v4l2_std_id *) arg; + struct video_channel v; + int x; + + IVTV_DEBUG(IVTV_DEBUG_INFO, + "v4l2 ioctl: set standard\n"); + + for (x = 0; x < itv->v4l2.standard.count; x++) { + if (itv->v4l2.standard.table.std[x].id & *vs) { + IVTV_DEBUG(IVTV_DEBUG_INFO, + "Switching standard to %s.\n", + itv->v4l2.standard.table. + std[x].name); + itv->v4l2.standard.active = x; + /* fixme set standard here */ + switch (itv->v4l2.standard.active) { + case 0: /* NTSC */ + v.norm = VIDEO_MODE_NTSC; + break; + case 1: /* PAL */ + v.norm = VIDEO_MODE_PAL; + break; + case 2: /* SECAM */ + v.norm = VIDEO_MODE_SECAM; + break; + default: + break; + } + + /* Tuner */ + ivtv_call_i2c_client(itv, + IVTV_TUNER_I2C_ADDR, + VIDIOCSCHAN, &v); + /* Tuner Audio */ + ivtv_call_i2c_client(itv, + IVTV_MSP3400_I2C_ADDR, + VIDIOCSCHAN, &v); + /* Digitizer */ + ivtv_call_i2c_client(itv, + IVTV_SAA7115_I2C_ADDR, + DECODER_SET_NORM, + &v.norm); + + if (itv->v4l2.standard.active == 0) { // NTSC + itv->v4l2.codec.framespergop = + 15; + itv->v4l2.codec.framerate = 0; + } else { // PAL + itv->v4l2.codec.framespergop = + 12; + itv->v4l2.codec.framerate = 1; + } + + return 0; + } + } + return -EINVAL; + } + case VIDIOC_S_TUNER:{ /* Setting tuner can only set audio mode */ + //FIXME copy_from_user needed + struct v4l2_tuner *vt = (struct v4l2_tuner *)arg; + + IVTV_DEBUG(IVTV_DEBUG_INFO, "v4l2 ioctl: set tuner\n"); + + if ((vt->index < 0) + || (vt->index >= itv->v4l2.tuner.count)) + return -EINVAL; + /* looks like tuner.c doesn't support selection + * fallback to stereo... */ + vt->audmode = V4L2_TUNER_MODE_STEREO; + + break; + } + case VIDIOC_G_TUNER:{ + //FIXME copy_from_user needed + struct v4l2_tuner *vt = (struct v4l2_tuner *)arg; + int sig = 0; + + IVTV_DEBUG(IVTV_DEBUG_INFO, "v4l2 ioctl: get tuner\n"); + + if ((vt->index < 0) + || (vt->index >= itv->v4l2.tuner.count)) + return -EINVAL; + + memcpy(vt, &itv->v4l2.tuner.table.tuner[vt->index], + sizeof(struct v4l2_tuner)); + + ivtv_call_i2c_client(itv, IVTV_SAA7115_I2C_ADDR, + DECODER_GET_STATUS, &sig); + + if (sig & DECODER_STATUS_GOOD) { + vt->signal = 65535; /* best possible signal */ + } else { + vt->signal = 0; + } + break; + } + case MSP_SET_MATRIX:{ + //FIXME copy_from_user needed + struct msp_matrix *mspm = (struct msp_matrix *)arg; + + /* FIXME hardcoding! */ + if ((mspm->input < 1) || (mspm->input > 8)) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "Invalid audio input!\n"); + return -EINVAL; + } + if ((mspm->output < 0) || (mspm->output > 3)) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "Invalid audio output!\n"); + return -EINVAL; + } + + itv->v4l2.audio_output = mspm->output; + + IVTV_DEBUG(IVTV_DEBUG_INFO, + "v4l2 ioctl: set matrix in=%d,out=%d\n", + mspm->input, mspm->output); + + ivtv_call_i2c_client(itv, IVTV_MSP3400_I2C_ADDR, + MSP_SET_MATRIX, mspm); + break; + } + case IVTV_IOC_G_CODEC:{ + //FIXME copy_from_user needed + struct ivtv_ioctl_codec *codec = + (struct ivtv_ioctl_codec *)arg; + + /* FIXME: bounds check? */ + memcpy(codec, &(itv->v4l2.codec), + sizeof(struct ivtv_ioctl_codec)); + break; + } + case IVTV_IOC_S_CODEC:{ + //FIXME copy_from_user needed + struct ivtv_ioctl_codec *codec = + (struct ivtv_ioctl_codec *)arg; + + /* FIXME: insert abundant parameter validation here */ + if ((codec->bitrate == 0) || (codec->bitrate_peak == 0) + || (codec->bitrate > codec->bitrate_peak)) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "ivtv ioctl: set " + "bitrate=%u < peak=%u: failed\n", + codec->bitrate, codec->bitrate_peak); + return -EINVAL; + } else { + /* Passed the garbage check */ + memcpy(&(itv->v4l2.codec), codec, + sizeof(struct ivtv_ioctl_codec)); + } + + /* VCD streamtype has some quirks. Handle them here */ + if ((codec->stream_type == IVTV_STREAM_VCD) || + (codec->stream_type == IVTV_STREAM_MPEG1)) { + struct v4l2_format *vfmt = + (struct v4l2_format *)arg; + struct video_window wind; + int tmpsize = 480; + + if (itv->v4l2.standard.active == 1) + tmpsize = 576; + + IVTV_DEBUG(IVTV_DEBUG_INFO, + "ivtv ioctl: mpeg1_stream " + "size %d\n", tmpsize); + + /* so far it looks like you can change width at will * + * but the compressor is unhappy when the height changes * + * to anything other than 240 */ + wind.width = 352; + wind.height = tmpsize; + vfmt->fmt.pix.width = 352; + vfmt->fmt.pix.height = tmpsize / 2; + + ivtv_call_i2c_client(itv, + IVTV_SAA7115_I2C_ADDR, + DECODER_SET_SIZE, &wind); + memcpy(&itv->v4l2.streams[0].format, vfmt, + sizeof(struct v4l2_format)); + } + + break; + } + case IVTV_IOCTL_GET_DEBUG_LEVEL:{ + //FIXME copy_from_user needed + int *dbg_level = (int *)arg; + IVTV_DEBUG(IVTV_DEBUG_INFO, + "IVTV_IOCTL_GET_DEBUG_LEVEL ivtv_debug = " + "0x%08x\n", ivtv_debug); + if (dbg_level) { + put_user(ivtv_debug, dbg_level); + } else { + printk + ("ivtv: Error: IVTV_IOCTL_GET_DEBUG_LEVEL called with " + "NULL\n"); + } + break; + } + case IVTV_IOCTL_SET_DEBUG_LEVEL:{ + //FIXME copy_from_user needed + int *dbg_level = (int *)arg; + int old_debug_level = ivtv_debug; + get_user(ivtv_debug, dbg_level); + if (!(ivtv_debug & IVTV_DEBUG_ERR)) + ivtv_debug |= IVTV_DEBUG_ERR; + IVTV_DEBUG(IVTV_DEBUG_INFO, + "IVTV_IOCTL_SET_DEBUG_LEVEL ivtv_debug = " + "0x%08x (new) 0x%08x (old)\n", + ivtv_debug, old_debug_level); + put_user(ivtv_debug, dbg_level); + break; + } + case VIDIOC_STREAMOFF:{ + ivtv_v4l2_streamoff(id); + break; + } + + case 0x00005401: /* Handle isatty() calls */ + return -EINVAL; + default: + /* If it got here, it's probably not supported.. */ + IVTV_DEBUG(IVTV_DEBUG_ERR, "ivtv-api.c: unknown ioctl 0x%08x\n", + cmd); + return -ENOTTY; + } + return 0; +} diff -purN -X /home/mbligh/.diff.exclude reference/drivers/media/video/ivtv-driver.c current/drivers/media/video/ivtv-driver.c --- reference/drivers/media/video/ivtv-driver.c 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/media/video/ivtv-driver.c 2004-04-09 21:41:40.000000000 -0700 @@ -0,0 +1,3073 @@ +/* Main Driver file for the ivtv project: + * Driver for the iTVC15 chip. + * Author: Kevin Thayer (nufan_wfk at yahoo.com) + * License: GPL + * http://www.sourceforge.net/projects/ivtv/ + */ +/* Hack - this file needs to be converted to use the firmware api */ +#define __KERNEL_SYSCALLS__ + +#include "ivtv.h" + +// Version info +#define IVTV_VERSION_NUMBER(name) name##_version_int +#define IVTV_VERSION_STRING(name) name##_version_string +#define IVTV_VERSION_COMMENT(name) name##_comment_string + +#define IVTV_DEFINE_VERSION_INTERNAL(name, major, minor, patchlevel, comment) \ +unsigned int IVTV_VERSION_NUMBER(name) = ((major << 16) | (minor << 8) | (patchlevel)); \ +const char * const IVTV_VERSION_STRING(name) = #major"."#minor"."#patchlevel;\ +const char * const IVTV_VERSION_COMMENT(name) = comment; + +#define IVTV_VERSION_MAJOR(name) (0xFF & (IVTV_VERSION_NUMBER(name) >> 16)) +#define IVTV_VERSION_MINOR(name) (0xFF & (IVTV_VERSION_NUMBER(name) >> 8)) +#define IVTV_VERSION_PATCHLEVEL(name) (0xFF & (IVTV_VERSION_NUMBER(name))) + +#define IVTV_DEFINE_VERSION(name, major, minor, patchlevel, comment) IVTV_DEFINE_VERSION_INTERNAL(name, major, minor, patchlevel, comment) + +IVTV_DEFINE_VERSION(ivtv_rev, + IVTV_DRIVER_VERSION_MAJOR, + IVTV_DRIVER_VERSION_MINOR, + IVTV_DRIVER_VERSION_PATCHLEVEL, "release"); + +/* mini header */ + +/* var to keep track of the number of array elements in use */ +int ivtv_cards_active = 0; + +/* Master variable for all ivtv info */ +struct ivtv ivtv_cards[IVTV_MAX_CARDS]; + +/* for the global data */ +spinlock_t ivtv_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; + +/* add your revision and whatnot here */ +static struct pci_device_id ivtv_pci_tbl[] __devinitdata = { + {PCI_VENDOR_ID_ICOMP, PCI_DEVICE_ID_IVTV15, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {PCI_VENDOR_ID_ICOMP, PCI_DEVICE_ID_IVTV16, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {0,} +}; + +static void ivtv_irq_dec_vsync(struct ivtv *itv); +static irqreturn_t ivtv_irq_handler(int irq, void *dev_id, + struct pt_regs *regs); +static void ivtv_DMA_done(struct ivtv *itv); +static void ivtv_sched_DMA(struct ivtv *itv); +static void ivtv_dec_DMA_done(struct ivtv *itv); +static void ivtv_dec_sched_DMA(struct ivtv *itv); + +static u32 ivtv_firm_search_id[] = + { 0x12345678, 0x34567812, 0x56781234, 0x78123456 }; + +/* Parameter declarations */ +static int num_devices = IVTV_DEFAULT_NUM_CARDS; +static int yuv_buffers = IVTV_DEFAULT_YUV_BUFFERS; +static int mpg_buffers = IVTV_DEFAULT_MPG_BUFFERS; +static int vbi_buffers = IVTV_DEFAULT_VBI_BUFFERS; +static int dec_mpg_buffers = IVTV_DEFAULT_DEC_MPG_BUFFERS; +static int dec_yuv_buffers = IVTV_DEFAULT_DEC_YUV_BUFFERS; +static int dec_mpg_qlen = IVTV_DEFAULT_DEC_MPG_QLEN; +static int dec_yuv_qlen = IVTV_DEFAULT_DEC_YUV_QLEN; +#ifdef YUV_FIXUP +static int yuv_fixup; +#endif + +int ivtv_pal = 0; + +/* low debugging by default */ +#if 0 +int debug = (IVTV_DEBUG_ERR | IVTV_DEBUG_INFO | IVTV_DEBUG_API + | IVTV_DEBUG_DMA | IVTV_DEBUG_IOCTL | IVTV_DEBUG_I2C + | IVTV_DEBUG_IRQ); +#endif +int ivtv_debug = IVTV_DEBUG_ERR; + +/* tuner.h tuner type for ivtv card */ +int tuner = -1; + +int errno; + +#define EXPAND_TO_STRING_INTERNAL(arg) #arg +#define EXPAND_TO_STRING(arg) EXPAND_TO_STRING_INTERNAL(arg) + +#ifdef YUV_FIXUP +MODULE_PARM(yuv_fixup, "i"); +MODULE_PARM_DESC(yuv_fixup, + "\nToggles conversion of Hauppauge Macroblock NV12 to NV12\n"); +#endif + +MODULE_PARM(tuner, "i"); +MODULE_PARM_DESC(tuner, "\nTuner type selection, see tuner.h for values"); + +MODULE_PARM(yuv_buffers, "i"); +MODULE_PARM_DESC(yuv_buffers, + "\nNumber of 32K buffers for copying YUV.\n" + "Default: " EXPAND_TO_STRING(IVTV_DEFAULT_YUV_BUFFERS) ", " + "Min: " EXPAND_TO_STRING(IVTV_MIN_YUV_BUFFERS) " " + "Max: " EXPAND_TO_STRING(IVTV_MAX_YUV_BUFFERS)); + +MODULE_PARM(mpg_buffers, "i"); +MODULE_PARM_DESC(mpg_buffers, + "\nNumber of 32K buffers for copying mpg.\n" + "Default: " EXPAND_TO_STRING(IVTV_DEFAULT_MPG_BUFFERS) ", " + "Min: " EXPAND_TO_STRING(IVTV_MIN_MPG_BUFFERS) " " + "Max: " EXPAND_TO_STRING(IVTV_MAX_MPG_BUFFERS)); + +MODULE_PARM(vbi_buffers, "i"); +MODULE_PARM_DESC(vbi_buffers, + "\nNumber of 32K buffers for copying VBI.\n" + "Default: " EXPAND_TO_STRING(IVTV_DEFAULT_VBI_BUFFERS) ", " + "Min: " EXPAND_TO_STRING(IVTV_MIN_VBI_BUFFERS) " " + "Max: " EXPAND_TO_STRING(IVTV_MAX_VBI_BUFFERS)); + +MODULE_PARM(num_devices, "i"); +MODULE_PARM_DESC(num_devices, "\nNumber of supported devices (1-9).\n" + "Default: " EXPAND_TO_STRING(IVTV_DEFAULT_NUM_CARDS)); + +MODULE_PARM(dec_mpg_buffers, "i"); +MODULE_PARM_DESC(dec_mpg_buffers, + "\nNumber of 32K buffers for decoding MPG.\n" + "Default: " EXPAND_TO_STRING(IVTV_DEFAULT_DEC_MPG_BUFFERS) ", " + "Min: " EXPAND_TO_STRING(IVTV_MIN_DEC_MPG_BUFFERS) " " + "Max: " EXPAND_TO_STRING(IVTV_MAX_DEC_MPG_BUFFERS)); + +MODULE_PARM(dec_yuv_buffers, "i"); +MODULE_PARM_DESC(dec_yuv_buffers, + "\nNumber of 32K buffers for decoding YUV.\n" + "Default: " EXPAND_TO_STRING(IVTV_DEFAULT_DEC_YUV_BUFFERS) ", " + "Min: " EXPAND_TO_STRING(IVTV_MIN_DEC_YUV_BUFFERS) " " + "Max: " EXPAND_TO_STRING(IVTV_MAX_DEC_YUV_BUFFERS) ", " + "0 to disable"); + +MODULE_PARM(dec_mpg_qlen, "i"); +MODULE_PARM_DESC(dec_mpg_qlen, + "\nNumber of 32K buffers to queue before dispatching to decoder\n" + "Default: " EXPAND_TO_STRING(IVTV_DEFAULT_DEC_MPG_QLEN) ", " + "Min: " EXPAND_TO_STRING(IVTV_MIN_DEC_MPG_QLEN) " " + "Max: "); + +MODULE_PARM(dec_yuv_qlen, "i"); +MODULE_PARM_DESC(dec_yuv_qlen, + "\nNumber of 32K buffers to queue before dispatching to decoder\n" + "Default: " EXPAND_TO_STRING(IVTV_DEFAULT_DEC_YUV_QLEN) ", " + "Min: " EXPAND_TO_STRING(IVTV_MIN_DEC_YUV_QLEN) " " + "Max: "); + +MODULE_PARM(ivtv_debug, "i"); +MODULE_PARM_DESC(ivtv_debug, "\nDebug level (bitmask), default, errors only\n" + "(debug=127 gives full debuging)"); + +MODULE_PARM(ivtv_pal, "i"); +MODULE_PARM_DESC(ivtv_pal, "\nUse PAL as default video mode instead of NTSC"); + +MODULE_AUTHOR("Kevin Thayer"); +MODULE_DESCRIPTION("Alpha iTVC15 driver"); +MODULE_SUPPORTED_DEVICE("iTVC15/16 mpg2 encoder (aka WinTV PVR 250/350)"); +MODULE_LICENSE("GPL"); + +static int SGarray_size; +static int DSGarray_size; + +void ivtv_sleep_timeout(int timeout) +{ + int sleep = timeout; + + do { + set_current_state(TASK_INTERRUPTIBLE); + sleep = schedule_timeout(sleep); + + } while (sleep && !signal_pending(current)); +} + +/* ceiling function for ints.. */ +int ivtv_ceil(int x, int y) +{ + int floor = (int)(x / y); + + if ((floor * y) < x) + return floor + 1; + return floor; +} + +/* Release ioremapped memory */ +static void ivtv_iounmap(struct ivtv *itv) +{ + if (itv == NULL) + return; + + /* Release io memory */ + if (itv->io_mem != NULL) { + IVTV_DEBUG(IVTV_DEBUG_INFO, "releasing iomem\n"); + iounmap(itv->io_mem); + itv->io_mem = NULL; + } + + /* Release registers memory */ + if (itv->reg_mem != NULL) { + IVTV_DEBUG(IVTV_DEBUG_INFO, "releasing regmem\n"); + iounmap(itv->reg_mem); + itv->reg_mem = NULL; + } + + /* Release encoder mailboxes */ + if (itv->enc_mbox != NULL) { + IVTV_DEBUG(IVTV_DEBUG_INFO, "releasing encmbox\n"); + iounmap(itv->enc_mbox); + itv->enc_mbox = NULL; + } + + /* Release decoder mailboxes */ + if (itv->dec_mbox != NULL) { + IVTV_DEBUG(IVTV_DEBUG_INFO, "releasing decmbox\n"); + iounmap(itv->dec_mbox); + itv->dec_mbox = NULL; + } +} + +/* must only be used as hints, not as a definitive answer. the answer could + * be wrong as soon as we return */ +int ivtv_get_free_elements(struct ivtv *itv, struct ivtv_buffer_list *queue) +{ + unsigned long flags; + int elements; + + spin_lock_irqsave(&itv->lock, flags); + elements = queue->elements; + spin_unlock_irqrestore(&itv->lock, flags); + + return elements; +} + +inline void __ivtv_enq_buf(struct ivtv_buffer_list *queue, + struct ivtv_buffer *buf) +{ + WARN_ON(!list_empty(&buf->list)); + list_add_tail(&buf->list, &queue->list); + queue->elements++; +} + +/* Adds buffers to the tail, effectively making a queue */ +int ivtv_enq_buf(struct ivtv *itv, struct ivtv_buffer_list *queue, + struct ivtv_buffer *buf) +{ + unsigned long flags; + + spin_lock_irqsave(&itv->lock, flags); + __ivtv_enq_buf(queue, buf); + spin_unlock_irqrestore(&itv->lock, flags); + + return 0; +} + +inline void __ivtv_del_buf(struct ivtv_buffer_list *queue, + struct ivtv_buffer *buffer) +{ + WARN_ON(list_empty(&buffer->list)); + list_del_init(&buffer->list); + queue->elements--; +} + +/* called to remove the buffer returned by _peek_ functions */ +void ivtv_del_buf(struct ivtv *itv, struct ivtv_buffer_list *queue, + struct ivtv_buffer *buffer) +{ + unsigned long flags; + + spin_lock_irqsave(&itv->lock, flags); + __ivtv_del_buf(queue, buffer); + spin_unlock_irqrestore(&itv->lock, flags); +} + +void ivtv_move_buf(struct ivtv *itv, struct ivtv_buffer_list *from, + struct ivtv_buffer_list *to, struct ivtv_buffer *buffer) +{ + unsigned long flags; + + WARN_ON(list_empty(&buffer->list)); + + spin_lock_irqsave(&itv->lock, flags); + list_move_tail(&buffer->list, &to->list); + from->elements--; + to->elements++; + spin_unlock_irqrestore(&itv->lock, flags); +} + +/* returns first item in queue, doesn't dequeue */ +struct ivtv_buffer *__ivtv_deq_peek_head(struct ivtv_buffer_list *queue) +{ + + /* make sure list has something to DeQ */ + if (!list_empty(&queue->list)) + return list_entry(queue->list.next, struct ivtv_buffer, list); + + IVTV_DEBUG(IVTV_DEBUG_INFO, "DeQ from empty list\n"); + queue->elements = 0; + return NULL; +} + +struct ivtv_buffer *ivtv_deq_peek_head(struct ivtv *itv, + struct ivtv_buffer_list *queue) +{ + unsigned long flags; + struct ivtv_buffer *buffer; + + spin_lock_irqsave(&itv->lock, flags); + buffer = __ivtv_deq_peek_head(queue); + spin_unlock_irqrestore(&itv->lock, flags); + + return buffer; +} + +/* removes buffer from the head */ +struct ivtv_buffer *__ivtv_deq_buf(struct ivtv_buffer_list *queue) +{ + struct ivtv_buffer *buf; + + /* make sure list has something to DeQ */ + if (!list_empty(&queue->list)) { + buf = list_entry(queue->list.next, struct ivtv_buffer, list); + list_del_init(queue->list.next); + queue->elements--; + return buf; + } + + IVTV_DEBUG(IVTV_DEBUG_INFO, "DeQ from empty list!\n"); + queue->elements = 0; + return NULL; +} + +struct ivtv_buffer *ivtv_deq_buf(struct ivtv *itv, + struct ivtv_buffer_list *queue) +{ + struct ivtv_buffer *buf; + unsigned long flags; + + spin_lock_irqsave(&itv->lock, flags); + buf = __ivtv_deq_buf(queue); + spin_unlock_irqrestore(&itv->lock, flags); + + return buf; +} + +struct ivtv_buffer *ivtv_init_buffer(int gfp_mask) +{ + struct ivtv_buffer *ibuf; + + ibuf = kmalloc(sizeof(struct ivtv_buffer), gfp_mask); + if (ibuf == NULL) { + IVTV_DEBUG(IVTV_DEBUG_ERR, "No mem on ibuf alloc!\n"); + return NULL; + } + + (void *)ibuf->buffer.m.userptr = kmalloc(IVTV_DMA_BUF_SIZE, gfp_mask); + if ((void *)ibuf->buffer.m.userptr == NULL) { + kfree(ibuf); + IVTV_DEBUG(IVTV_DEBUG_ERR, "No mem on buf alloc!\n"); + return NULL; + } + + INIT_LIST_HEAD(&ibuf->list); + ibuf->buffer.length = IVTV_DMA_BUF_SIZE; + ibuf->buffer.bytesused = 0; + ibuf->readpos = 0; + + return ibuf; +} + +#define IVTV_DMA_UNMAPPED ((u32) -1) + +void ivtv_free_buffer(struct ivtv *itv, struct ivtv_buffer *item) +{ + if (item->dma_handle != IVTV_DMA_UNMAPPED) + pci_unmap_single(itv->dev, item->dma_handle, IVTV_DMA_BUF_SIZE, + PCI_DMA_TODEVICE); + if (item->buffer.m.userptr) { + IVTV_DEBUG(IVTV_DEBUG_INFO, "Freeing buf %d!\n", + item->buffer.index); + kfree((void *)item->buffer.m.userptr); + } + kfree(item); +} + +int ivtv_free_queue(struct ivtv_buffer_list *queue) +{ + struct ivtv_buffer *item; + unsigned long flags; + struct ivtv *itv; + int x; + + if (queue == NULL) { + IVTV_DEBUG(IVTV_DEBUG_ERR, "Free on NULL list!\n"); + return -EINVAL; + } + + spin_lock_irqsave(&ivtv_lock, flags); + + /* FIXME ugly */ + /* verify ivtv before continuing */ + itv = NULL; + for (x = 0; x < ivtv_cards_active; x++) { + if (queue->vdev->priv == &ivtv_cards[x]) { + itv = queue->vdev->priv; + break; + } + } + + spin_unlock_irqrestore(&ivtv_lock, flags); + + if (itv == NULL) + return -ENODEV; + + while ((item = ivtv_deq_buf(itv, queue))) + ivtv_free_buffer(itv, item); + + return 0; +} + +/* NOTE: This returns the # of buffers allocated */ +int ivtv_init_queue(struct ivtv *itv, struct ivtv_buffer_list *queue, + int length, enum v4l2_buf_type type) +{ + int x; + struct ivtv_buffer *item; + + /* Just in case */ + INIT_LIST_HEAD(&queue->list); + + for (x = 0; x < length; x++) { + /* allocate buffer */ + item = ivtv_init_buffer(GFP_KERNEL); + if (item == NULL) { + IVTV_DEBUG(IVTV_DEBUG_ERR, "Buffer alloc failed!\n"); + return x; + } + + /* setup buffer */ + item->buffer.index = x; + item->buffer.type = type; + item->buffer.field = V4L2_FIELD_INTERLACED; + item->buffer.memory = V4L2_MEMORY_MMAP; + + /* enqueue buffer */ + ivtv_enq_buf(itv, queue, item); + } + + return x; +} + +int ivtv_move_queue(struct ivtv *itv, struct ivtv_buffer_list *src, + struct ivtv_buffer_list *dst) +{ + struct ivtv_buffer *buf; + unsigned long flags; + + spin_lock_irqsave(&itv->lock, flags); + + while ((buf = __ivtv_deq_buf(src))) + __ivtv_enq_buf(dst, buf); + + spin_unlock_irqrestore(&itv->lock, flags); + return 0; +} + +static int load_fw_direct(const char *fn, char *mem) +{ + int fd; + long l; + mm_segment_t fs = get_fs(); + + set_fs(get_ds()); + + if ((fd = open(fn, 0, 0)) == -1) { + printk(KERN_INFO "Unable to open '%s'.\n", fn); + l = -EINVAL; + goto out; + } + /* the 2 means SEEK_END */ + l = lseek(fd, 0L, 2); + + if (l <= 0 || l > IVTV_FIRM_IMAGE_SIZE) { + printk(KERN_INFO "Firmware image too large '%s'\n", fn); + l = -ENOMEM; + goto out; + } + + /* the 2 means SEEK_SET */ + lseek(fd, 0L, 0); + + if (read(fd, mem, l) != l) { + printk(KERN_INFO "Failed to read '%s'.\n", fn); + l = -ENOMEM; + } + + out: + close(fd); + set_fs(fs); + + return (int)l; +} + +int ivtv_firmware_copy(struct ivtv *itv) +{ + + IVTV_DEBUG(IVTV_DEBUG_INFO, "Loading encoder image\n"); + + if (load_fw_direct(IVTV_FIRM_ENC_FILENAME, + (char *)(itv->io_mem + IVTV_ENC_MEM_START)) != + IVTV_FIRM_IMAGE_SIZE) { + IVTV_DEBUG(IVTV_DEBUG_ERR, "failed loading encoder firmware\n"); + return -3; + } + + if (itv->card_type != IVTV_250_V2) { + IVTV_DEBUG(IVTV_DEBUG_INFO, "Loading decoder firmware\n"); + if (load_fw_direct(IVTV_FIRM_DEC_FILENAME, + (char *)(itv->io_mem + + IVTV_DEC_MEM_START)) != + IVTV_FIRM_IMAGE_SIZE) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "failed loading decoder firmware\n"); + return -1; + } + } + + return 0; +} + +int ivtv_stop_firmware(struct ivtv *itv) +{ + u32 data[IVTV_MBOX_MAX_DATA], result; + int x = 0; + + IVTV_DEBUG(IVTV_DEBUG_INFO, "Stopping firmware\n"); + + if (atomic_read(&itv->capturing)) { + x = ivtv_stop_all_captures(itv); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, "stop_fw error 1. Code %d\n", + x); + } + + /*Stop decoder_playback */ + data[0] = 1; /* 0: render last frame, 1: stop NOW! :) */ + data[1] = 0; /* "low 4 bytes of stop index" */ + data[2] = 0; /* 0: stop immedeately */ + x = ivtv_api(itv->dec_mbox, &itv->dec_msem, IVTV_API_DEC_STOP_PLAYBACK, + &result, 3, &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, "stop_fw error 2. Code %d\n", x); + + /*halt enc firmware */ + x = ivtv_api(itv->enc_mbox, &itv->enc_msem, IVTV_API_ENC_HALT_FW, + &result, 0, &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, "stop_fw error 3. Code %d\n", x); + + IVTV_DEBUG(IVTV_DEBUG_INFO, "Sleeping for 10ms\n"); + ivtv_sleep_timeout(HZ / 100); + + /*halt dec firmware */ + if (IVTV_250_V2 != itv->card_type) { + x = ivtv_api(itv->dec_mbox, &itv->dec_msem, + IVTV_API_DEC_HALT_FW, &result, 0, &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, "stop_fw error 4. Code %d\n", + x); + + IVTV_DEBUG(IVTV_DEBUG_INFO, "Sleeping for 10ms\n"); + ivtv_sleep_timeout(HZ / 100); + } + + return 0; +} + +int ivtv_firmware_init(struct ivtv *itv) +{ + int x; + + /* check that we're not RE-loading firmware */ + /* a sucessful load will have detected HW */ + /* mailboxes. */ + + /* FIXME i dont think this will ever get called */ + if (NULL != itv->enc_mbox) { + IVTV_DEBUG(IVTV_DEBUG_INFO, + "readying card for firmware upload\n"); + x = ivtv_stop_firmware(itv); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, + "Error %d, stopping firmware\n", x); + } + + IVTV_DEBUG(IVTV_DEBUG_INFO, "Stopping VDM\n"); + writel(IVTV_CMD_VDM_STOP, (IVTV_REG_VDM + itv->reg_mem)); + + IVTV_DEBUG(IVTV_DEBUG_INFO, "Stopping AO\n"); + writel(IVTV_CMD_AO_STOP, (IVTV_REG_AO + itv->reg_mem)); + + IVTV_DEBUG(IVTV_DEBUG_INFO, "pinging (?) APU\n"); + writel(IVTV_CMD_APU_PING, (IVTV_REG_APU + itv->reg_mem)); + + IVTV_DEBUG(IVTV_DEBUG_INFO, "Stopping VPU\n"); + if (IVTV_250_V2 == itv->card_type) { + writel(IVTV_CMD_VPU_STOP16, (IVTV_REG_VPU + itv->reg_mem)); + } else { + writel(IVTV_CMD_VPU_STOP15, (IVTV_REG_VPU + itv->reg_mem)); + } + + IVTV_DEBUG(IVTV_DEBUG_INFO, "Resetting Hw Blocks\n"); + writel(IVTV_CMD_HW_BLOCKS_RST, (IVTV_REG_HW_BLOCKS + itv->reg_mem)); + + IVTV_DEBUG(IVTV_DEBUG_INFO, "Stopping SPU\n"); + writel(IVTV_CMD_SPU_STOP, (IVTV_REG_SPU + itv->reg_mem)); + + IVTV_DEBUG(IVTV_DEBUG_INFO, "Sleeping for 10ms\n"); + ivtv_sleep_timeout(HZ / 100); + + IVTV_DEBUG(IVTV_DEBUG_INFO, "init Encoder SDRAM pre-charge\n"); + writel(IVTV_CMD_SDRAM_PRECHARGE_INIT, + (IVTV_REG_ENC_SDRAM_PRECHARGE + itv->reg_mem)); + + IVTV_DEBUG(IVTV_DEBUG_INFO, "init Encoder SDRAM refresh to 1us\n"); + writel(IVTV_CMD_SDRAM_REFRESH_INIT, + (IVTV_REG_ENC_SDRAM_REFRESH + itv->reg_mem)); + + IVTV_DEBUG(IVTV_DEBUG_INFO, "init Decoder SDRAM pre-charge\n"); + writel(IVTV_CMD_SDRAM_PRECHARGE_INIT, + (IVTV_REG_DEC_SDRAM_PRECHARGE + itv->reg_mem)); + + IVTV_DEBUG(IVTV_DEBUG_INFO, "init Decoder SDRAM refresh to 1us\n"); + writel(IVTV_CMD_SDRAM_REFRESH_INIT, + (IVTV_REG_DEC_SDRAM_REFRESH + itv->reg_mem)); + + IVTV_DEBUG(IVTV_DEBUG_INFO, "Sleeping for %dms (600 recommended)\n", + (int)IVTV_SDRAM_SLEEPTIME); + ivtv_sleep_timeout(IVTV_SDRAM_SLEEPTIME); + + IVTV_DEBUG(IVTV_DEBUG_INFO, "Card ready for firmware!\n"); + x = ivtv_firmware_copy(itv); + if (x) { + IVTV_DEBUG(IVTV_DEBUG_ERR, "Error loading firmware!\n"); + return x; + } + + /*I guess this is read-modify-write :) */ + writel((readl(itv->reg_mem + IVTV_REG_SPU) & IVTV_MASK_SPU_ENABLE), + (IVTV_REG_SPU + itv->reg_mem)); + + IVTV_DEBUG(IVTV_DEBUG_INFO, "Sleeping for 1 sec\n"); + ivtv_sleep_timeout(HZ); + + /*I guess this is read-modify-write :) */ + if (IVTV_250_V2 == itv->card_type) { + writel((readl(itv->reg_mem + IVTV_REG_VPU) & + IVTV_MASK_VPU_ENABLE16), (IVTV_REG_VPU + itv->reg_mem)); + } else { + writel((readl(itv->reg_mem + IVTV_REG_VPU) & + IVTV_MASK_VPU_ENABLE15), (IVTV_REG_VPU + itv->reg_mem)); + } + + IVTV_DEBUG(IVTV_DEBUG_INFO, "Sleeping for 1 sec\n"); + ivtv_sleep_timeout(HZ); + + /* FIXME Send Status API commands to encoder and decoder to verify! */ + + return 0; +} + +int ivtv_find_firmware_mailbox(struct ivtv *itv) +{ + u32 *searchptr, *result; + int match = 0; + + searchptr = NULL; + result = NULL; + + IVTV_DEBUG(IVTV_DEBUG_INFO, "Searching for encoder mailbox\n"); + searchptr = (u32 *) (IVTV_FIRM_SEARCH_ENCODER_START + itv->io_mem); + + while (searchptr < (u32 *) (IVTV_FIRM_SEARCH_ENCODER_END + itv->io_mem)) { + if (ivtv_firm_search_id[match] == readl(searchptr)) { + (u32) result = (u32) searchptr + 4; /* avoid pointer aritmetic */ + match++; + while ((match > 0) && (match < 4)) { + IVTV_DEBUG(IVTV_DEBUG_INFO, "match: 0x%08x at " + "0x%08x. match: %d\n", *result, + (u32) result, match); + if (ivtv_firm_search_id[match] == readl(result)) { + match++; + /* FIXME change to just "result++;" ? */ + (u32) result = (u32) result + 4; + } else + match = 0; + } + } else { + IVTV_DEBUG(IVTV_DEBUG_INFO, "."); + } + if (4 == match) { + IVTV_DEBUG(IVTV_DEBUG_INFO, "found encoder mailbox!\n"); + itv->enc_mbox = (struct ivtv_mailbox *)result; + break; + } + (u32) searchptr += IVTV_FIRM_SEARCH_STEP; + } + if (itv->enc_mbox == NULL) + IVTV_DEBUG(IVTV_DEBUG_ERR, "Encoder mailbox not found\n"); + + IVTV_DEBUG(IVTV_DEBUG_INFO, "Searching for decoder mailbox\n"); + match = 0; + searchptr = (u32 *) (IVTV_FIRM_SEARCH_DECODER_START + itv->io_mem); + + while (searchptr < (u32 *) (IVTV_FIRM_SEARCH_DECODER_END + itv->io_mem)) { + if (ivtv_firm_search_id[match] == readl(searchptr)) { + (u32) result = (u32) searchptr + 4; /* avoid pointer aritmetic */ + match++; + while ((match > 0) && (match < 4)) { + IVTV_DEBUG(IVTV_DEBUG_INFO, + "match: 0x%08x at 0x%08x. match: %d\n", + *result, (u32) result, match); + if (ivtv_firm_search_id[match] == readl(result)) { + match++; + /* FIXME change to just "result++;" ? */ + (u32) result = (u32) result + 4; + } else + match = 0; + } + } else { + IVTV_DEBUG(IVTV_DEBUG_INFO, "."); + } + if (4 == match) { + IVTV_DEBUG(IVTV_DEBUG_INFO, "found decoder mailbox!\n"); + itv->dec_mbox = (struct ivtv_mailbox *)result; + break; + } + (u32) searchptr += IVTV_FIRM_SEARCH_STEP; + } + if (itv->dec_mbox == 0) + IVTV_DEBUG(IVTV_DEBUG_ERR, "Decoder mailbox not found\n"); + + return 0; +} + +int ivtv_get_free_mailbox(struct ivtv_mailbox *mbox) +{ + int i = 0; + if (NULL == mbox) { + IVTV_DEBUG(IVTV_DEBUG_ERR, "Can't get mailbox from NULL\n"); + return -ENODEV; + } + + /* FIXME hardcoded cause i'm not sure what changing API_BOXES will do */ + //for (i = 0; i < IVTV_MBOX_API_BOXES; i++) { + for (i = 0; i < 2; i++) { + if (mbox[i].flags & IVTV_MBOX_FIRMWARE_DONE) { + switch (mbox[i].cmd) { + case IVTV_API_SCHED_DMA_TO_HOST: + case IVTV_API_DEC_DMA_FROM_HOST: + IVTV_DEBUG(IVTV_DEBUG_API, + "recycled mailbox: %d\n", i); + writel(IVTV_MBOX_IN_USE, &mbox[i].flags); + return i; + break; + default: + IVTV_DEBUG(IVTV_DEBUG_API, + "Mailbox %d in use, skipping\n", i); + break; + } + /* FIXME using 'else' may leak mailboxes in some situations */ + } else if (!test_and_set_bit(0, &mbox[i].flags)) { + IVTV_DEBUG(IVTV_DEBUG_API, "got free mailbox: %d\n", i); + return i; + } + } + + IVTV_DEBUG(IVTV_DEBUG_ERR, "no free mailboxes!\n"); + IVTV_DEBUG(IVTV_DEBUG_ERR, "mbox 0: 0x%08x, mbox 1 0x%08x!\n", + mbox[0].cmd, mbox[1].cmd); + return -ENODEV; +} + +void ivtv_clear_irq_mask(struct ivtv *itv, unsigned long mask) +{ + itv->irqmask &= ~mask; + writel(itv->irqmask, (itv->reg_mem + IVTV_REG_IRQMASK)); + /* pci posting */ + readl(itv->reg_mem + IVTV_REG_IRQMASK); +} + +void ivtv_set_irq_mask(struct ivtv *itv, unsigned long mask) +{ + itv->irqmask |= mask; + writel(itv->irqmask, (itv->reg_mem + IVTV_REG_IRQMASK)); + /* pci posting */ + readl(itv->reg_mem + IVTV_REG_IRQMASK); +} + +/** + * Call ivtv api function using given mailbox, without locking sem. + */ +int __ivtv_api_call(struct ivtv_mailbox *mbox, u32 cmd, int elements, + const u32 * data) +{ + int x; + if (NULL == mbox) { + IVTV_DEBUG(IVTV_DEBUG_ERR, "invalid api mailbox\n"); + return -ENODEV; + } + + /* "if mailbox is available" */ + if ((mbox->flags & IVTV_MBOX_FIRMWARE_DONE) || + (!test_and_set_bit(0, &mbox->flags))) { + /* I'm too lazy to invert the condition ;) */ + } else { + IVTV_DEBUG(IVTV_DEBUG_INFO, "Mailbox busy (unexpected)\n"); + IVTV_DEBUG(IVTV_DEBUG_INFO, + "cmd 0x%08x, m.cmd 0x%08x\n", cmd, mbox->cmd); + IVTV_DEBUG(IVTV_DEBUG_INFO, "d0 0x%08x, d1 0x%08x, d2 0x%08x\n", + mbox->data[0], mbox->data[1], mbox->data[2]); + return -EBUSY; + } + + readl(&mbox->flags); + writel(cmd, &mbox->cmd); + writel(IVTV_API_STD_TIMEOUT, &mbox->timeout); + + for (x = 0; x < IVTV_MBOX_MAX_DATA; x++) { + if (x < elements) { + writel(data[x], &mbox->data[x]); + } else { + writel(0, &mbox->data[x]); + } + } + + writel((IVTV_MBOX_DRIVER_DONE | IVTV_MBOX_IN_USE), &mbox->flags); + readl(&mbox->flags); + + return 0; +} + +/* This one is for stuff that can't sleep.. irq handlers, etc.. */ +int ivtv_api_getresult_nosleep(struct ivtv_mailbox *mbox, u32 * result, + u32 data[]) +{ + u32 readdata; + int count = 0; + + if (NULL == mbox) { + IVTV_DEBUG(IVTV_DEBUG_ERR, "invalid api mailbox\n"); + return -ENODEV; + } + + readdata = readl(&mbox->flags); + + *result = readl(&mbox->retval); + for (count = 0; count < IVTV_MBOX_MAX_DATA; count++) + data[count] = readl(&mbox->data[count]); + + return 0; +} + +int __ivtv_api_getresult(struct ivtv_mailbox *mbox, u32 * result, u32 data[], + int api_timeout) +{ + u32 readdata; + int count = 0; + + readdata = readl(&mbox->flags); + + while (!(readdata & IVTV_MBOX_FIRMWARE_DONE)) { + IVTV_DEBUG(IVTV_DEBUG_API, + "[%d]result not ready, waiting 10 ms\n", count); + ivtv_sleep_timeout(HZ / 100); + readdata = readl(&mbox->flags); + + if (count++ > api_timeout) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "%d ms time out waiting for firmware\n", + api_timeout); + return -EBUSY; + } + } + + *result = readl(&mbox->retval); + for (count = 0; count < IVTV_MBOX_MAX_DATA; count++) + data[count] = readl(&mbox->data[count]); + + return 0; +} + +int ivtv_api(struct ivtv_mailbox *mbox, struct semaphore *sem, int cmd, + u32 * result, int args, u32 data[]) +{ + int x = 0, gotsem = 0, needsresult = 1; + int die = 0, api_timeout = 100; + struct ivtv_mailbox *local_box; + + IVTV_DEBUG(IVTV_DEBUG_API, "API Call: 0x%08x\n", cmd); + + local_box = mbox; + + /* check args */ + if (args > IVTV_MBOX_MAX_DATA) + return -EINVAL; + + switch (cmd) { + case IVTV_API_SCHED_DMA_TO_HOST: + case IVTV_API_DEC_DMA_FROM_HOST: + needsresult = 0; + if (down_trylock(sem)) { /* box 0 was busy */ + gotsem = 0; + local_box = &mbox[1]; + } else { + gotsem = 1; + } + if ((x = __ivtv_api_call(local_box, cmd, args, data))) { + if (local_box == mbox) { + IVTV_DEBUG(IVTV_DEBUG_API, + "Trying alternate mailbox\n"); + x = __ivtv_api_call(&mbox[1], cmd, args, data); + } + } + goto ivtv_api_done; + break; + /* adjust api timeout for these 2 calls */ + case IVTV_API_END_CAPTURE: + case IVTV_API_EVENT_NOTIFICATION: + api_timeout = 1000; + default: + if (down_interruptible(sem)) + return -ERESTARTSYS; + gotsem = 1; + break; + } + + /* wait 200ms for mailbox to become free */ + x = __ivtv_api_call(local_box, cmd, args, data); + while ((x == -EBUSY) && (die < 20)) { + die++; + ivtv_sleep_timeout(HZ / 100); + x = __ivtv_api_call(local_box, cmd, args, data); + IVTV_DEBUG(IVTV_DEBUG_API, "die: %d\n", die); + } + + if (x == -EBUSY) { + /* dilemma here: + if the command that currently has the mailbox + is lost, then it'll never free up the box, and + we'll lose our only 'general purpose' box forever. + But if it comes back, we run the risk of squishing + something important!. */ + switch (local_box->cmd) { + case IVTV_API_DEC_DMA_FROM_HOST: + /* if we got here, it's because the call to xfer the dma + was finished, and the command that wants to run has + already slept for 20ms. It's probably safe to take over + */ + IVTV_DEBUG(IVTV_DEBUG_API, + "Forcibly freeing mailbox\n"); + writel(0x00000000, &mbox->flags); + x = __ivtv_api_call(local_box, cmd, args, data); + break; + default: + /* do nothing */ + break; + } + } + + if (x) { + IVTV_DEBUG(IVTV_DEBUG_API, "Error running command 0x%08x\n", + cmd); + goto ivtv_api_done; + } + + if (needsresult) { + x = __ivtv_api_getresult(local_box, result, &data[0], + api_timeout); + IVTV_DEBUG(IVTV_DEBUG_API, "retval: 0x%08x\n", *result); + if (x == -EBUSY) + IVTV_DEBUG(IVTV_DEBUG_ERR, "api call 0x%08x\n", cmd); + } + + IVTV_DEBUG(IVTV_DEBUG_API, "Releasing mailbox (before 0x%08x, ", + readl(&mbox->flags)); + writel(0x00000000, &mbox->flags); + IVTV_DEBUG(IVTV_DEBUG_API, "after 0x%08x )\n", readl(&mbox->flags)); + + ivtv_api_done: + if (gotsem) { + up(sem); + } + + return x; +} + +int ivtv_firmware_versions(struct ivtv *itv) +{ + u32 data[IVTV_MBOX_MAX_DATA], result; + int x; + + /* Encoder */ + IVTV_DEBUG(IVTV_DEBUG_INFO, "Getting encoder firmware rev.\n"); + x = ivtv_api(itv->enc_mbox, &itv->enc_msem, IVTV_API_ENC_GETVER, + &result, 0, &data[0]); + if (x) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "error getting Encoder firmware version\n"); + } else { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "Encoder revision: 0x%08x\n", data[0]); + } + + if (itv->card_type != IVTV_250_V2) { + /* Decoder */ + IVTV_DEBUG(IVTV_DEBUG_INFO, "Getting decoder firmware rev.\n"); + x = ivtv_api(itv->dec_mbox, &itv->dec_msem, IVTV_API_DEC_GETVER, + &result, 0, &data[0]); + if (x) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "error getting Decoder firmware version\n"); + } else { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "Decoder revision: 0x%08x\n", data[0]); + } + } + + return 0; +} + +void ivtv_flush_queues(struct ivtv_open_id *id) +{ + struct ivtv *itv = id->itv; + struct ivtv_v4l2_stream *st = &itv->v4l2.streams[id->type]; + struct ivtv_buffer *buf; + + /* move free_q to full_q to clean up partially-filled buffers */ + while ((buf = ivtv_deq_buf(itv, &st->free_q))) + ivtv_enq_buf(itv, &st->full_q, buf); + + while ((buf = ivtv_deq_buf(itv, &itv->v4l2.streams[id->type].full_q))) { + buf->buffer.bytesused = 0; + buf->readpos = 0; + ivtv_enq_buf(itv, &st->free_q, buf); + } + + while ((buf = ivtv_deq_buf(itv, &itv->v4l2.streams[id->type].dma_q))) { + buf->buffer.bytesused = 0; + buf->readpos = 0; + ivtv_enq_buf(itv, &st->free_q, buf); + } + + return; +} + +int ivtv_stop_all_captures(struct ivtv *itv) +{ + struct ivtv_open_id id; + int x; + id.itv = itv; + + down(&itv->sem_lock); + + for (x = 0; x < itv->v4l2.streamcount; x++) { + if (test_bit(IVTV_F_S_CAP, &itv->v4l2.streams[x].s_flags)) { + id.type = x; + ivtv_stop_capture(&id); + } + } + + up(&itv->sem_lock); + return 0; +} + +int ivtv_stop_capture(struct ivtv_open_id *id) +{ + struct ivtv *itv = id->itv; + u32 data[IVTV_MBOX_MAX_DATA], result; + DECLARE_WAITQUEUE(wait, current); + int type, subtype, then; + int x; + + /* This function assumes that you are allowed to stop the capture + and that we are actually capturing */ + + IVTV_DEBUG(IVTV_DEBUG_INFO, "Stop Capture\n"); + + /* sem_lock must be held */ + IVTV_ASSERT(ivtv_sem_count(&itv->sem_lock) <= 0); + + type = id->type; + if (type == 1) { + subtype = 3; //FIXME temp + } else { + subtype = 3; + } + +#if 0 + /* only run these if we're shutting down the last cap */ + if (atomic_read(&itv->capturing) - 1 == 0) { + /* event notification (off) */ + data[0] = 0; /*type: 0 = refresh */ + data[1] = 0; /*on/off: 0 = off */ + data[2] = 0x10000000; /*intr_bit: 0x10000000 = digitizer */ + data[3] = -1; /*mbox_id: -1: none */ + x = ivtv_api(itv->enc_mbox, &itv->enc_msem, + IVTV_API_EVENT_NOTIFICATION, &result, 4, &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, "stopcap error 1. Code %d\n", + x); + + } +#endif + + /* end_capture */ + data[0] = 1; /*when: 0 = end of GOP 1 = NOW! */ + data[1] = type; /*type: 0 = mpeg */ + data[2] = subtype; /*subtype: 3 = video+audio */ + x = ivtv_api(itv->enc_mbox, &itv->enc_msem, IVTV_API_END_CAPTURE, + &result, 3, &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, "stopcap error 2. Code %d\n", x); + + then = jiffies; + + add_wait_queue(&itv->v4l2.streams[type].waitq, &wait); + set_current_state(TASK_INTERRUPTIBLE); + do { + /* check if DMA is pending */ + if (!test_bit(IVTV_F_S_DMAP, &itv->v4l2.streams[type].s_flags)) { + break; + } + IVTV_DEBUG(IVTV_DEBUG_INFO, "dma still pending!\n"); + schedule_timeout(HZ / 100); + } while (((then + HZ) < jiffies) && !signal_pending(current)); + + set_current_state(TASK_RUNNING); + remove_wait_queue(&itv->v4l2.streams[type].waitq, &wait); + + if (test_bit(IVTV_F_S_DMAP, &itv->v4l2.streams[type].s_flags)) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "giving up waiting for DMA pending clear\n"); + } +/* only needed if we're searching for an EOS.. currently disabled */ +#if 0 + /* only run these if we're shutting down the last cap */ + if (atomic_read(&itv->capturing) - 1 == 0) { + add_wait_queue(&itv->cap_w, &wait); + + set_current_state(TASK_INTERRUPTIBLE); + + /* wait 2s for EOS interrupt */ + while ((!test_bit(IVTV_F_I_EOS, &itv->i_flags)) && + (jiffies < then + 2 * HZ)) { + schedule_timeout(HZ); + } + then = jiffies - then; + + if (!test_bit(IVTV_F_I_EOS, &itv->i_flags)) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "EOS interrupt not received! stopping anyway.\n"); + IVTV_DEBUG(IVTV_DEBUG_ERR, + "waitied %d ms: %d\n", (1000 / HZ) * then); + } else { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "EOS took %d ms to occur.\n", + (1000 / HZ) * then); + } + set_current_state(TASK_RUNNING); + remove_wait_queue(&itv->cap_w, &wait); + } +#endif + clear_bit(IVTV_F_S_OVERFLOW, &itv->v4l2.streams[type].s_flags); + clear_bit(IVTV_F_S_CAP, &itv->v4l2.streams[type].s_flags); + + atomic_dec(&itv->capturing); + if (atomic_read(&itv->capturing)) + return 0; + + /*Set the following Interrupt mask bits: 0xd8000000 */ + ivtv_set_irq_mask(itv, IVTV_IRQ_MASK_CAPTURE); + IVTV_DEBUG(IVTV_DEBUG_IRQ, "IRQ Mask is now: 0x%08x\n", itv->irqmask); + + return 0; +} + +int ivtv_stop_decode(struct ivtv_open_id *id) +{ + struct ivtv *itv = id->itv; + u32 data[IVTV_MBOX_MAX_DATA], result; + int x; + + /* sem_lock must be held */ + IVTV_ASSERT(ivtv_sem_count(&itv->sem_lock) <= 0); + + /* FIXME set 'die' ?? */ + IVTV_DEBUG(IVTV_DEBUG_INFO, "Decoder stop.\n"); + + /* only run these if we're shutting down the last cap */ + if (atomic_read(&itv->decoding) - 1 == 0) { +#if 0 + /* event notification (off) */ + data[0] = 0; /* Event: 0 = audio change between stereo and mono */ + data[1] = 0; /* Enable/Disable: 0 = disabled, 1 = enabled */ + data[2] = 0x00010000; /* Bit: interrupt bit to fire */ + data[3] = -1; /* Mailbox to use: -1 = no mailbox needed */ + x = ivtv_api(itv->dec_mbox, &itv->dec_msem, + IVTV_API_DEC_EVENT_NOTIFICATION, &result, 4, + &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, "stopDEC error 1. Code %d\n", + x); +#endif + + /* end_capture */ + data[0] = itv->dec_options.hide_last_frame; /* 0 = last frame, + 1 = black */ + data[1] = itv->dec_options.pts_low; /* when: pts low */ + data[2] = itv->dec_options.pts_hi; /* when: pts hi */ + x = ivtv_api(itv->dec_mbox, &itv->dec_msem, + IVTV_API_DEC_STOP_PLAYBACK, &result, 3, &data[0]); + if (x) + IVTV_DEBUG(IVTV_DEBUG_ERR, "stopDEC error 2. Code %d\n", + x); + } + + /* FIXME turn off relevant irqmask here */ + ivtv_set_irq_mask(itv, IVTV_IRQ_MASK_DECODE); + + /* stop decoder interrupt timeout */ + del_timer_sync(&itv->dec_timeout); + + if (test_and_clear_bit + (IVTV_F_S_DMAP, &itv->v4l2.streams[id->type].s_flags)) + IVTV_DEBUG(IVTV_DEBUG_INFO, "DEC: clearing dma_pending\n"); + + /* Clean up some possibly loose-ends */ + clear_bit(IVTV_F_I_BUSY, &itv->i_flags); + atomic_dec(&itv->decoding); + wake_up(&itv->dec_master_w); + + return 0; +} + +void ivtv_dec_timeout(unsigned long arg) +{ + struct ivtv *itv = (struct ivtv *)arg; + + /* FIXME mpg only :/ */ + struct ivtv_v4l2_stream *stream = + &itv->v4l2.streams[IVTV_DEC_STREAM_TYPE_MPG]; + unsigned long flags; + + if (!test_bit(IVTV_F_S_DMAP, &stream->s_flags)) + return; + + IVTV_DEBUG(IVTV_DEBUG_ERR, + "ivtv_dec_timeout: lost IRQ; resetting...\n"); + spin_lock_irqsave(&itv->lock, flags); + ivtv_dec_DMA_done(itv); + /* kick it off again! */ + set_bit(IVTV_F_I_NEEDS_DATA, &itv->i_flags); + ivtv_dec_sched_DMA(itv); + spin_unlock_irqrestore(&itv->lock, flags); +} + +#if 0 +static void ivtv_show_irq_status(struct ivtv *itv, u32 irqstat, u32 irqmask, + u32 dmastat) +{ + struct ivtv_mailbox *mbox8 = &itv->dec_mbox[8]; + struct ivtv_mailbox *mbox9 = &itv->dec_mbox[9]; + +#if 0 + // Make it less verbose... + if ((irqstat & ~4) == IVTV_IRQ_DEC_VSYNC) + return; +#endif + + printk("ivtv: irqstat [ " + "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s], " + "frame %d, pts %d, scr %d, type %d, offset %08x, max %d, full %d\n", + (irqstat & IVTV_IRQ_ENC_START_CAP) ? "StartCap " : "", + (irqstat & IVTV_IRQ_ENC_EOS) ? "EndOfStream " : "", + (irqstat & IVTV_IRQ_ENC_VBI_CAP) ? "VBICap " : "", + (irqstat & IVTV_IRQ_ENC_VIM_RST) ? "VIMReset " : "", + (irqstat & IVTV_IRQ_ENC_DMA_COMPLETE) ? "EncDMAComplete " : "", + (irqstat & (1 << 26)) ? "26 " : "", + (irqstat & IVTV_IRQ_DEC_COPY_PROTECT) ? "CopyProt " : "", + (irqstat & IVTV_IRQ_DEC_AUD_MODE_CHG) ? "AudioMode " : "", + (irqstat & (1 << 23)) ? "23 " : "", + (irqstat & IVTV_IRQ_DEC_DATA_REQ) ? "DecDataReq " : "", + (irqstat & IVTV_IRQ_DEC_IFRAME_DONE) ? "IFrameDone " : "", + (irqstat & IVTV_IRQ_DEC_DMA_COMPLETE) ? "DecDMAComplete " : "", + (irqstat & IVTV_IRQ_DEC_VBI_RE_INSERT) ? "VBIReInsert " : "", + (irqstat & IVTV_IRQ_DEC_DMA_ERR) ? "DecDMAError " : "", + (irqstat & (1 << 17)) ? "17 " : "", + (irqstat & (1 << 16)) ? "16 " : "", + (irqstat & (1 << 15)) ? "15 " : "", + (irqstat & (1 << 14)) ? "14 " : "", + (irqstat & (1 << 13)) ? "13 " : "", + (irqstat & (1 << 12)) ? "12 " : "", + (irqstat & (1 << 11)) ? "11 " : "", + (irqstat & IVTV_IRQ_DEC_VSYNC) ? "DecVSync " : "", + (irqstat & (1 << 9)) ? "9 " : "", + (irqstat & (1 << 8)) ? "8 " : "", + (irqstat & (1 << 7)) ? "7 " : "", + (irqstat & (1 << 6)) ? "6 " : "", + (irqstat & (1 << 5)) ? "5 " : "", + (irqstat & (1 << 4)) ? "4 " : "", + (irqstat & (1 << 3)) ? "3 " : "", + (irqstat & (1 << 2)) ? "2 " : "", + (irqstat & (1 << 1)) ? "1 " : "", + (irqstat & (1 << 0)) ? "0 " : "", + readl(&mbox8->data[0]), + readl(&mbox8->data[1]), + readl(&mbox8->data[3]), + readl(&mbox9->data[0]), + readl(&mbox9->data[1]), + readl(&mbox9->data[2]), readl(&mbox9->data[3])); +} +#endif + +static irqreturn_t ivtv_irq_handler(int irq, void *dev_id, struct pt_regs *regs) +{ + + u32 stat = 0; + u32 combo = 0; + struct ivtv *itv = (struct ivtv *)dev_id; + + spin_lock(&itv->lock); + + /* get contents of irq status register */ + stat = readl(itv->reg_mem + IVTV_REG_IRQSTATUS); + + combo = ~itv->irqmask & stat; + + if (0 == combo) { + /* wasn't for us */ + spin_unlock(&itv->lock); + return IRQ_NONE; + } + +/* + ivtv_show_irq_status(itv, stat, itv->irqmask, + readl(itv->reg_mem + IVTV_REG_DMASTATUS)); +*/ + + IVTV_DEBUG(IVTV_DEBUG_IRQ, "======= valid IRQ bits: 0x%08x ======\n", + combo); + + writel(combo, (itv->reg_mem + IVTV_REG_IRQSTATUS)); + + if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) { + ivtv_DMA_done(itv); + IVTV_DEBUG(IVTV_DEBUG_IRQ, "Processed DMA-complete\n"); + } + if (combo & IVTV_IRQ_ENC_START_CAP) { + ivtv_sched_DMA(itv); + IVTV_DEBUG(IVTV_DEBUG_IRQ, "Processed enc-startcap\n"); + } + if (combo & IVTV_IRQ_ENC_EOS) { + IVTV_DEBUG(IVTV_DEBUG_IRQ, "Encoder End Of Stream\n"); + set_bit(IVTV_F_I_EOS, &itv->i_flags); + wake_up(&itv->cap_w); + } + if (combo & IVTV_IRQ_ENC_VBI_CAP) { + IVTV_DEBUG(IVTV_DEBUG_IRQ, "deb3\n"); + } + if (combo & IVTV_IRQ_ENC_VIM_RST) { + IVTV_DEBUG(IVTV_DEBUG_IRQ, "VIM Restart\n"); + } + if (combo & IVTV_IRQ_DEC_COPY_PROTECT) { + IVTV_DEBUG(IVTV_DEBUG_IRQ, "deb6\n"); + } + if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) { + IVTV_DEBUG(IVTV_DEBUG_IRQ, "deb7\n"); + } + if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) { + IVTV_DEBUG(IVTV_DEBUG_IRQ, "Decoder DMA Done\n"); + ivtv_dec_DMA_done(itv); + } + if (combo & IVTV_IRQ_DEC_DATA_REQ) { + IVTV_DEBUG(IVTV_DEBUG_IRQ, "Decoder Data Request\n"); + set_bit(IVTV_F_I_NEEDS_DATA, &itv->i_flags); + ivtv_dec_sched_DMA(itv); + } + if (combo & IVTV_IRQ_DEC_IFRAME_DONE) { + IVTV_DEBUG(IVTV_DEBUG_IRQ, "deb9\n"); + } + if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) { + IVTV_DEBUG(IVTV_DEBUG_IRQ, "deb11\n"); + } + if (combo & IVTV_IRQ_DEC_DMA_ERR) { + IVTV_DEBUG(IVTV_DEBUG_IRQ, "deb12\n"); + } + if (combo & IVTV_IRQ_DEC_VSYNC) { + ivtv_irq_dec_vsync(itv); + } + /* + stat = readl(itv->reg_mem + IVTV_REG_IRQSTATUS); + IVTV_DEBUG(IVTV_DEBUG_IRQ, "IVTV IRQ STATUS REG AFTER INTERRUPT 0x%08x", stat); + if (combo & ~IVTV_IRQ_DEBUG_KLUGE) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "unknown irq 0x%08x, mask:0x%08x, combo:0x%08x\n", + stat, itv->irqmask, combo); + } + */ + spin_unlock(&itv->lock); + return IRQ_HANDLED; +} + +static void ivtv_irq_dec_vsync(struct ivtv *itv) +{ + u32 *data = itv->dec_mbox[IVTV_MBOX_FIELD_DISPLAYED].data; + + u32 newframe = readl(&data[0]); + u64 newpts = ((u64) readl(&data[2]) << 32) | (u64) (readl(&data[1])); + u64 newscr = ((u64) readl(&data[4]) << 32) | (u64) (readl(&data[3])); + + itv->dec_timestamp.pts = newpts; + itv->dec_timestamp.scr = newscr; + if (newframe != itv->dec_timestamp.frame) { + itv->dec_timestamp.frame = newframe; + wake_up(&itv->vsync_w); + } + + IVTV_DEBUG(IVTV_DEBUG_IRQ, + "ivtv_irq_dec_vsync: frames %d, pts %ld, scr %ld\n", + itv->dec_timestamp.frame, (long int)itv->dec_timestamp.pts, + (long int)itv->dec_timestamp.scr); +} + +static void ivtv_show_debug_flags(struct ivtv *itv) +{ + int y; + + printk(KERN_DEBUG "ivtv: i_flags=%lx", itv->i_flags); + for (y = IVTV_DEC_STREAM_TYPE_MPG; y < itv->v4l2.streamcount; y++) + printk(", %d s_sflags=%lx", y, itv->v4l2.streams[y].s_flags); + printk("\n"); +} + +static void ivtv_DMA_done(struct ivtv *itv) +{ + u32 result; + int y, stmtype = -1; + struct ivtv_v4l2_stream *stream = NULL; + struct ivtv_buffer *buf; + + IVTV_DEBUG(IVTV_DEBUG_INFO, "DMA Done tasklet\n"); + + for (y = 0; y < itv->v4l2.streamcount; y++) { + if (test_and_clear_bit + (IVTV_F_S_DMAP, &itv->v4l2.streams[y].s_flags)) { + stmtype = y; + break; + } + } + + if (stmtype < 0) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "Got DMA-done, but not expecting one\n"); + ivtv_show_debug_flags(itv); + return; + } + + stream = &itv->v4l2.streams[stmtype]; + + /* check DMA status register */ + result = readl(itv->reg_mem + IVTV_REG_DMASTATUS); + + if (!(result & IVTV_DMA_SUCCESS)) { + if (result & IVTV_DMA_WRITE_ERR) + IVTV_DEBUG(IVTV_DEBUG_ERR, + "DMA write error. Result=0x%08x\n", result); + if (result & IVTV_DMA_READ_ERR) + IVTV_DEBUG(IVTV_DEBUG_ERR, + "DMA read error. Result=0x%08x\n", result); + return; + } + + /* DMA was fine if we made it this far */ + + /* remove from dma_pending queue */ + while ((buf = __ivtv_deq_buf(&stream->dma_q))) { + IVTV_ASSERT(buf->dma_handle != IVTV_DMA_UNMAPPED); + pci_unmap_single(itv->dev, buf->dma_handle, + IVTV_DMA_BUF_SIZE, PCI_DMA_TODEVICE); + buf->dma_handle = IVTV_DMA_UNMAPPED; + /* byteswap ABCD -> DCBA for MPG data */ + if (stmtype == 0) { + for (y = 0; y < buf->buffer.bytesused; y += 4) { + swab32s((u32 *) ((u32) buf->buffer.m.userptr + + y)); + } + } + /* put in the 'done' queue */ + __ivtv_enq_buf(&stream->full_q, buf); + } + + IVTV_DEBUG(IVTV_DEBUG_INFO, "DMA Done tasklet5\n"); + /*wake up client */ + wake_up(&stream->waitq); + IVTV_DEBUG(IVTV_DEBUG_INFO, "DMA Done tasklet6\n"); +} + +/* must hold itv->lock */ +static int ivtv_ignore_DMA_req(struct ivtv *itv, u32 type) +{ + u32 data[IVTV_MBOX_MAX_DATA], result; + int ret = 0; + + data[0] = 0; + data[1] = 0; /* ACK the DMA and continue */ + data[2] = type; // AEW - API docs say type goes here + if (ivtv_api(itv->enc_mbox, &itv->enc_msem, + IVTV_API_SCHED_DMA_TO_HOST, &result, 3, &data[0])) { + IVTV_DEBUG(IVTV_DEBUG_ERR, "error sending DMA info\n"); + ret = -EIO; + } + + if (!ret) + set_bit(IVTV_F_S_DMAP, &itv->v4l2.streams[type].s_flags); + + return ret; +} + +/* FIXME this function is getting too long. split it up? */ +static void ivtv_sched_DMA(struct ivtv *itv) +{ + u32 data[IVTV_MBOX_MAX_DATA], result; + u32 type, size, offset; + u32 UVsize = 0, UVoffset = 0, pts_stamp = 0; + struct ivtv_v4l2_stream *st; + int x, bufs_needed; + int uvflag = 0; + struct ivtv_buffer *buf; + LIST_HEAD(free_list); + long sequence; + + IVTV_DEBUG(IVTV_DEBUG_INFO, "Sched DMA tasklet\n"); + + /* Get DMA destination and size arguments from card */ + x = ivtv_api_getresult_nosleep(&itv->enc_mbox[9], &result, &data[0]); + if (x) { + IVTV_DEBUG(IVTV_DEBUG_ERR, "error:%d getting DMA info\n", x); + return; + } + + type = data[0]; + + /* FIXME should check for null on the stream element */ + if (itv->v4l2.streamcount <= type) { + IVTV_DEBUG(IVTV_DEBUG_ERR, "No stream handler for type %d\n", + type); + ivtv_ignore_DMA_req(itv, type); + return; + } + + switch (type) { + case 0: /* MPEG */ + offset = data[1]; + size = data[2]; + IVTV_DEBUG(IVTV_DEBUG_INFO, + "DMA/MPG type 0x%08x,size 0x%08x,offset 0x%08x\n", + type, size, offset); + bufs_needed = ivtv_ceil(size, IVTV_DMA_BUF_SIZE); + break; + case 1: /* YUV */ + offset = data[1]; + size = data[2]; + UVoffset = data[3]; + UVsize = data[4]; + pts_stamp = data[6]; + IVTV_DEBUG(IVTV_DEBUG_INFO, + "DMA/YUV type 0x%08x,Ysize 0x%08x,Yoffset 0x%08x," + "UVsize 0x%08x,UVoffset 0x%08x,PTS 0x%08x\n", + type, size, offset, UVsize, UVoffset, pts_stamp); + bufs_needed = ivtv_ceil(size, IVTV_DMA_BUF_SIZE); + bufs_needed += ivtv_ceil(UVsize, IVTV_DMA_BUF_SIZE); + break; + + case 2: /* PCM (audio) */ + offset = data[1]; + size = data[2]; + pts_stamp = data[6]; + IVTV_DEBUG(IVTV_DEBUG_INFO, + "DMA/PCM type 0x%08x,size 0x%08x,offset 0x%08x " + "PTS 0x%08x\n", type, size, offset, pts_stamp); + bufs_needed = ivtv_ceil(size, IVTV_DMA_BUF_SIZE); + ivtv_ignore_DMA_req(itv, type); + return; + case 3: /* VBI */ + offset = data[1]; + size = data[2]; + bufs_needed = ivtv_ceil(size, IVTV_DMA_BUF_SIZE); + IVTV_DEBUG(IVTV_DEBUG_ERR, + "DMA/VBI type 0x%08x, size 0x%08x, offset 0x%08x" + "EXPERIMENTAL\n", type, size, offset); + break; + default: + IVTV_DEBUG(IVTV_DEBUG_ERR, + "DMA/UNKNOWN type 0x%08x, NOT SUPPORTED\n", type); + ivtv_ignore_DMA_req(itv, type); + return; + } + + if (bufs_needed > SGarray_size) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "INTERNAL ERROR: ivtv_sched_DMA_tasklet: " + "bufs_needed = %d but SGarray_size = %d\n", + bufs_needed, SGarray_size); + return; + } + + st = &itv->v4l2.streams[type]; + + /* gather the needed buffers first, so we don't have to bail + * in mid-air. put them on a list on the stack */ + for (x = 0; x < bufs_needed; x++) { + buf = __ivtv_deq_buf(&st->free_q); + if (!buf) + break; + + list_add_tail(&buf->list, &free_list); + } + + /* damn, failed */ + if (x < bufs_needed) { + IVTV_DEBUG(IVTV_DEBUG_INFO, + "DMA buffer DeQueue failed! got %d, want %d\n", + x + 1, bufs_needed + 1); + IVTV_DEBUG(IVTV_DEBUG_INFO, "SCHED: free_q: %d elements\n", + st->free_q.elements); + IVTV_DEBUG(IVTV_DEBUG_INFO, "SCHED: dma_q: %d elements\n", + st->dma_q.elements); + IVTV_DEBUG(IVTV_DEBUG_INFO, "SCHED: full_q: %d elements\n", + st->full_q.elements); + while (!list_empty(&free_list)) { + buf = + list_entry(free_list.next, struct ivtv_buffer, + list); + list_del_init(&buf->list); + __ivtv_enq_buf(&st->free_q, buf); + } + /* mark overflow condition, next free will restart dma req */ + set_bit(IVTV_F_S_OVERFLOW, &st->s_flags); + return; + } + + /* increment the sequence # */ + sequence = ++st->seq; + + for (x = 0; x < bufs_needed; x++) { + IVTV_DEBUG(IVTV_DEBUG_INFO, "size: %d 0x%08x\n", size, size); + + if ((size == 0) && (type == 1) && (uvflag == 0)) { /* YUV */ + /* process the UV section */ + offset = UVoffset; + size = UVsize; + uvflag = 1; + } + + /* extract the buffers we procured earlier */ + buf = list_entry(free_list.next, struct ivtv_buffer, list); + list_del_init(&buf->list); + + buf->readpos = 0; + buf->buffer.index = x; + buf->buffer.sequence = sequence; + buf->ts = jiffies; + + if (size < (IVTV_DMA_BUF_SIZE & 0xffffff00)) { + buf->buffer.bytesused = size; + /* bytecount must be multiple of 0x100 (256) */ + itv->SGarray[x].size = + (0xffffff00 & (buf->buffer.bytesused + 0xFF)); + size = 0; + } else { + buf->buffer.bytesused = IVTV_DMA_BUF_SIZE; + itv->SGarray[x].size = IVTV_DMA_BUF_SIZE; + size -= IVTV_DMA_BUF_SIZE; + } + + itv->SGarray[x].src = offset; + offset += buf->buffer.bytesused; + + /* unfortunately the pci dma api wasn't properly defined + * for handling mapping errors (running out of iommu space, + * for instance). 0 can be a valid bus address. */ + buf->dma_handle = pci_map_single(itv->dev, + (void *)buf->buffer.m.userptr, + buf->buffer.bytesused, + PCI_DMA_FROMDEVICE); + + itv->SGarray[x].dst = buf->dma_handle; + + /* FIXME need to add pts stuff, index, etc. */ + __ivtv_enq_buf(&st->dma_q, buf); + } + + /* This should wrap gracefully */ + /* FIXME obselete? */ + itv->trans_id++; + + itv->SGarray[bufs_needed - 1].size |= 0x80000000; + + /*FIXME unlock */ + + data[0] = itv->SG_handle; + /* 3 elements * 4 bytes per element * num_elements */ + data[1] = (3 * 4 * bufs_needed); + data[2] = type; + data[3] = 0x0; + + for (x = 0; x < bufs_needed; x++) + IVTV_DEBUG(IVTV_DEBUG_INFO, + "SGarray[%d]: 0x%08x, 0x%08x 0x%08x\n", x, + itv->SGarray[x].src, itv->SGarray[x].dst, + itv->SGarray[x].size); + + IVTV_DEBUG(IVTV_DEBUG_INFO, + "Sched dma: addr: 0x%08x, array_size 0x%08x," + " type 0x%08x\n", data[0], data[1], data[2]); + + set_bit(IVTV_F_S_DMAP, &st->s_flags); + ivtv_api(itv->enc_mbox, &itv->enc_msem, IVTV_API_SCHED_DMA_TO_HOST, + &result, 4, &data[0]); +} + +static void ivtv_sched_DMA_tasklet(unsigned long arg) +{ + struct ivtv *itv = (struct ivtv *)arg; + unsigned long flags; + + spin_lock_irqsave(&itv->lock, flags); + ivtv_sched_DMA(itv); + spin_unlock_irqrestore(&itv->lock, flags); +} + +/* FIXME this function does way more than it should */ +static int __devinit ivtv_probe(struct pci_dev *dev, + const struct pci_device_id *pci_id) +{ + int retval = 0; + unsigned char pci_latency; + struct ivtv *itv; + struct video_channel v; + unsigned long freq; + u16 cmd; + + IVTV_DEBUG(IVTV_DEBUG_INFO, "Found card #%d\n", ivtv_cards_active); + + spin_lock_irq(&ivtv_lock); + + /* Make sure we've got a place for this card */ + if (ivtv_cards_active == IVTV_MAX_CARDS) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + ":Maximum # of cards already detected (%d).\n", + ivtv_cards_active); + spin_unlock_irq(&ivtv_lock); + return -ENOMEM; + } + + itv = &ivtv_cards[ivtv_cards_active]; + itv->dev = dev; + itv->num = ivtv_cards_active; + + ivtv_cards_active++; + + spin_unlock_irq(&ivtv_lock); + + /* always remember what you think the irq mask should be */ + itv->irqmask = 0; + +#ifdef YUV_FIXUP + itv->options.yuv_fixup = yuv_fixup; +#endif + itv->options.dec_yuv_buffers = dec_yuv_buffers; + itv->options.dec_mpg_buffers = mpg_buffers; + itv->options.yuv_buffers = yuv_buffers; + itv->options.mpg_buffers = mpg_buffers; + itv->options.vbi_buffers = vbi_buffers; + itv->options.num_devices = num_devices; + itv->options.dec_mpg_qlen = dec_mpg_qlen; + itv->options.dec_yuv_qlen = dec_yuv_qlen; + + /* Set FrameBuffer-ID to invalid */ + itv->fb_id = -1; + + switch (dev->subsystem_device) { + case IVTV_PCI_ID_250_V2: + case IVTV_PCI_ID_250_V4: + IVTV_DEBUG(IVTV_DEBUG_ERR, "Found an iTVC16 based chip\n"); + itv->card_type = IVTV_250_V2; + break; + case IVTV_PCI_ID_350_V1: + case IVTV_PCI_ID_350_V2: + IVTV_DEBUG(IVTV_DEBUG_ERR, "Found an iTVC15 based chip\n"); + itv->card_type = IVTV_350_V1; + break; + case IVTV_PCI_ID_250_V1: + case IVTV_PCI_ID_250_V3: + IVTV_DEBUG(IVTV_DEBUG_ERR, "Found an iTVC15 based chip\n"); + itv->card_type = IVTV_250_V1; + break; + default: /* Default to 250 v1 style */ + IVTV_DEBUG(IVTV_DEBUG_ERR, + "Found an unknown chip, treating it like an iTVC15\n"); + itv->card_type = IVTV_250_V1; + break; + } + + init_MUTEX(&itv->enc_msem); + init_MUTEX(&itv->dec_msem); + init_MUTEX(&itv->sem_lock); + spin_lock_init(&itv->lock); + itv->base_addr = pci_resource_start(dev, 0); + itv->enc_mbox = NULL; + itv->dec_mbox = NULL; + itv->io_mem = NULL; + itv->reg_mem = NULL; + itv->i_flags = 0; + atomic_set(&itv->capturing, 0); + atomic_set(&itv->decoding, 0); + itv->user_dma_to_device_state = NULL; + + /* Prepare list for action! */ + INIT_LIST_HEAD(&itv->client_list); + + init_waitqueue_head(&itv->cap_w); + init_waitqueue_head(&itv->vsync_w); + init_waitqueue_head(&itv->dec_master_w); + init_timer(&itv->dec_timeout); + itv->dec_timeout.function = ivtv_dec_timeout; + itv->dec_timeout.data = (unsigned long)itv; + + tasklet_init(&itv->dma_sched_tq, ivtv_sched_DMA_tasklet, + (unsigned long)itv); + + IVTV_DEBUG(IVTV_DEBUG_INFO, "base addr: 0x%08x\n", itv->base_addr); + + IVTV_DEBUG(IVTV_DEBUG_INFO, "Enabling pci device\n"); + if (pci_enable_device(dev)) { + IVTV_DEBUG(IVTV_DEBUG_ERR, "Can't enable device %d!\n", + itv->num); + retval = -EIO; + goto err; + } + if (pci_set_dma_mask(dev, 0xffffffff)) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + KERN_WARNING + "No suitable DMA available on card %d.\n", itv->num); + retval = -EIO; + goto err; + } + if (!request_mem_region + (pci_resource_start(dev, 0), IVTV_IOREMAP_SIZE, IVTV_DEVNAME)) { + retval = -EIO; + goto err; + } + + /* Check for bus mastering */ + pci_read_config_word(dev, PCI_COMMAND, &cmd); + if (!(cmd & PCI_COMMAND_MASTER)) { + IVTV_DEBUG(IVTV_DEBUG_ERR, "Bus Mastering is not enabled\n"); + retval = -ENXIO; + goto free_mem; + } else { + IVTV_DEBUG(IVTV_DEBUG_INFO, "Bus Mastering Enabled."); + } + + pci_read_config_byte(dev, PCI_CLASS_REVISION, &itv->card_rev); + pci_read_config_byte(dev, PCI_LATENCY_TIMER, &pci_latency); + + IVTV_DEBUG(IVTV_DEBUG_INFO, "%d (rev %d) at %02x:%02x.%x, ", + itv->dev->device, itv->card_rev, dev->bus->number, + PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)); + IVTV_DEBUG(IVTV_DEBUG_INFO, + " irq: %d, latency: %d, memory: 0x%lx\n", itv->dev->irq, + pci_latency, (unsigned long)itv->base_addr); + + /*map io memory */ + IVTV_DEBUG(IVTV_DEBUG_INFO, "attempting ioremap at 0x%08x len 0x%08x\n", + itv->base_addr, IVTV_ENCDEC_SIZE); + itv->io_mem = ioremap_nocache(itv->base_addr, IVTV_ENCDEC_SIZE); + if (!itv->io_mem) { + IVTV_DEBUG(IVTV_DEBUG_ERR, IVTV_IOREMAP_ERROR); + retval = -ENOMEM; + goto free_mem; + } + + /*map registers memory */ + IVTV_DEBUG(IVTV_DEBUG_INFO, "attempting ioremap at 0x%08x len 0x%08x\n", + itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE); + itv->reg_mem = + ioremap_nocache(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE); + if (!itv->reg_mem) { + IVTV_DEBUG(IVTV_DEBUG_ERR, IVTV_IOREMAP_ERROR); + retval = -ENOMEM; + goto free_io; + } + + IVTV_DEBUG(IVTV_DEBUG_IRQ, "Masking interrupts\n"); + /* clear interrupt mask, effectively disabling interrupts */ + ivtv_set_irq_mask(itv, 0xffffffff); + + retval = request_irq(itv->dev->irq, ivtv_irq_handler, + SA_SHIRQ | SA_INTERRUPT, IVTV_DEVNAME, + (void *)itv); + if (retval) { + IVTV_DEBUG(IVTV_DEBUG_ERR, "failed to register irq %d\n", + retval); + goto free_io; + } + + /* save itv in the pci struct for later use */ + pci_set_drvdata(dev, itv); + + /* active i2c */ + itv->i2c_command = (I2C_TIMING); + IVTV_DEBUG(IVTV_DEBUG_INFO, "activating i2c..\n"); + if (init_ivtv_i2c(itv)) { + IVTV_DEBUG(IVTV_DEBUG_INFO, "i2c died! unloading\n"); + goto free_irq; + } + + IVTV_DEBUG(IVTV_DEBUG_INFO, "Active card count: %d.\n", + ivtv_cards_active); + + /*write firmware */ + retval = ivtv_firmware_init(itv); + if (retval) { + IVTV_DEBUG(IVTV_DEBUG_ERR, "Error initializing.\n"); + retval = -ENOMEM; + goto free_i2c; + } + + /*search for encoder/decoder mailboxes */ + IVTV_DEBUG(IVTV_DEBUG_INFO, "About to search for mailboxes\n"); + ivtv_find_firmware_mailbox(itv); + + if ((itv->enc_mbox == NULL) && (itv->dec_mbox == NULL)) { + IVTV_DEBUG(IVTV_DEBUG_ERR, "Error locating firmware.\n"); + retval = -ENOMEM; + goto free_i2c; + } + + /*releasing unneeded iomapped memory (encoder+decoder) */ + //iounmap(itv->io_mem); + + /*remapping only needed io memory (mailboxes) */ + itv->enc_mbox = + ioremap(itv->base_addr + + ((u8 *) itv->enc_mbox - (u8 *) itv->io_mem), + IVTV_MBOX_MAX_BOXES * IVTV_MBOX_SIZE); + itv->dec_mbox = + ioremap(itv->base_addr + + ((u8 *) itv->dec_mbox - (u8 *) itv->io_mem), + IVTV_MBOX_MAX_BOXES * IVTV_MBOX_SIZE); + + /* clearing pointers */ + //itv->io_mem = NULL ; + + /*Try and get firmware versions */ + IVTV_DEBUG(IVTV_DEBUG_INFO, "Getting firmware version..\n"); + retval = ivtv_firmware_versions(itv); + if (retval) { + IVTV_DEBUG(IVTV_DEBUG_ERR, "error %d getting version #!\n", + retval); + goto free_i2c; + } + + /* Allocate scatter-gather arrays */ + + //++MTY NASTY little bug!!! If user changes dec_mpg_buffers, + // memory corruption results with the old way! + + /* encoder */ + itv->SGarray = (struct ivtv_SG_element *) + kmalloc(sizeof(struct ivtv_SG_element) * SGarray_size, GFP_KERNEL); + if (!(itv->SGarray)) { + IVTV_DEBUG(IVTV_DEBUG_ERR, "Error allocating SGarray[].\n"); + retval = -ENOMEM; + goto free_i2c; + } + + itv->SG_handle = pci_map_single(itv->dev, + (void *)&itv->SGarray[0], + (sizeof(struct ivtv_SG_element) * + SGarray_size), PCI_DMA_TODEVICE); + + if (itv->card_type == IVTV_350_V1) { + /* decoder */ + itv->DSGarray = (struct ivtv_SG_element *) + kmalloc(sizeof(struct ivtv_SG_element) * + DSGarray_size, GFP_KERNEL); + if (!(itv->DSGarray)) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "Error allocating DSGarray[].\n"); + retval = -ENOMEM; + goto free_sg; + } + + itv->DSG_handle = pci_map_single(itv->dev, + (void *)&itv->DSGarray[0], + (sizeof(struct ivtv_SG_element) + * DSGarray_size), + PCI_DMA_TODEVICE); + } + + /* FIXME -temporary- setup tuner */ + IVTV_DEBUG(IVTV_DEBUG_INFO, "Setting Tuner\n"); + + if (tuner > -1) { + ivtv_call_i2c_client(itv, + IVTV_TUNER_I2C_ADDR, + TUNER_SET_TYPE, &tuner); + } + + /* set the standard */ + if (!ivtv_pal) + v.norm = VIDEO_MODE_NTSC; + else + v.norm = VIDEO_MODE_PAL; + + ivtv_call_i2c_client(itv, IVTV_TUNER_I2C_ADDR, VIDIOCSCHAN, &v); + + if (!ivtv_pal) { + /* set the channel */ + freq = 1076; /* ch. 4 67250*16/1000 */ + ivtv_call_i2c_client(itv, IVTV_TUNER_I2C_ADDR, VIDIOCSFREQ, + &freq); + } + + retval = ivtv_v4l2_setup(itv); + if (retval) { + IVTV_DEBUG(IVTV_DEBUG_ERR, "Problem starting v4l2\n"); + goto ivtv_v4l2_fail; + } + + return 0; + + ivtv_v4l2_fail: + pci_unmap_single(itv->dev, + itv->DSG_handle, + (sizeof(struct ivtv_SG_element) * + DSGarray_size), PCI_DMA_TODEVICE); + kfree(itv->DSGarray); + free_sg: + pci_unmap_single(itv->dev, + itv->SG_handle, + (sizeof(struct ivtv_SG_element) * + SGarray_size), PCI_DMA_TODEVICE); + kfree(itv->SGarray); + free_i2c: + exit_ivtv_i2c(itv); + free_irq: + free_irq(itv->dev->irq, (void *)itv); + free_io: + ivtv_iounmap(itv); + free_mem: + release_mem_region(pci_resource_start(itv->dev, 0), IVTV_IOREMAP_SIZE); + err: + IVTV_DEBUG(IVTV_DEBUG_ERR, "Error %d on init\n", retval); + + spin_lock_irq(&ivtv_lock); + ivtv_cards_active--; + spin_unlock_irq(&ivtv_lock); + return retval; +} + +static void ivtv_remove(struct pci_dev *pci_dev) +{ + struct ivtv *itv = pci_get_drvdata(pci_dev); + int x = 0; + + IVTV_DEBUG(IVTV_DEBUG_INFO, "Disabling interrupts!\n"); + ivtv_set_irq_mask(itv, 0xffffffff); + + IVTV_DEBUG(IVTV_DEBUG_INFO, "Stopping thread\n"); + atomic_set(&itv->decoding, 0); + + IVTV_DEBUG(IVTV_DEBUG_INFO, "Stopping card parts\n"); + x = ivtv_stop_firmware(itv); + + IVTV_DEBUG(IVTV_DEBUG_INFO, "Freeing buffers\n"); + + for (x = 0; x < itv->v4l2.streamcount; x++) { + IVTV_DEBUG(IVTV_DEBUG_INFO, "Freeing stream %d!\n", x); + ivtv_free_queue(&itv->v4l2.streams[x].free_q); + ivtv_free_queue(&itv->v4l2.streams[x].full_q); + ivtv_free_queue(&itv->v4l2.streams[x].dma_q); + } + + IVTV_DEBUG(IVTV_DEBUG_INFO, "Unregistering v4l devices!\n"); + ivtv_v4l2_cleanup(itv); + + IVTV_DEBUG(IVTV_DEBUG_INFO, "Freeing dma resources\n"); + + pci_unmap_single(itv->dev, itv->SG_handle, + (sizeof(struct ivtv_SG_element) * SGarray_size), + PCI_DMA_TODEVICE); + kfree(itv->SGarray); + + pci_unmap_single(itv->dev, itv->DSG_handle, + (sizeof(struct ivtv_SG_element) * DSGarray_size), + PCI_DMA_TODEVICE); + kfree(itv->DSGarray); + + exit_ivtv_i2c(itv); + + IVTV_DEBUG(IVTV_DEBUG_INFO, "releasing irq\n"); + free_irq(itv->dev->irq, (void *)itv); + + if (itv->dev) { + ivtv_iounmap(itv); + } + + IVTV_DEBUG(IVTV_DEBUG_INFO, "releasing mem\n"); + if (itv) + release_mem_region(pci_resource_start(itv->dev, 0), + IVTV_IOREMAP_SIZE); + + /* FIXME free v4l2 stuff! */ + /* FIXME am i leaking kernel mem? */ + +} + +/* define a pci_driver for card detection */ +static struct pci_driver ivtv_pci_driver = { + name:"ivtv: iTVC15/16 mpg2 encoder card", + id_table:ivtv_pci_tbl, + probe:ivtv_probe, + remove:ivtv_remove, +}; + +#ifdef YUV_FIXUP +static int ivtv_YUV_fixup(struct ivtv_v4l2_stream *st, int count, + char *ubuf, struct ivtv_buffer *buf) +{ +/* + * int count = # of bytes to transfer to client + * st->ubytes = # of bytes written this frame + * ubuf = buffer to write to (user's buffer) + * buf = read buffer + * + */ + int src_width = 720; /* all known formats have src width of 720 */ + int Hoff, Voff; /* collectors for offsets to read position */ + int width, height; /* resolution of the capture stream */ + int curline; /* vertical line currently being processed */ + int maxline; /* height of combined frame */ + int cur_m_block; /* current horizontal offset of working mblock in this row */ + int maxblock; /* # of macroblocks in a row */ + int Hbytes; /* # of bytes to write to user this time around */ + int retval = 0; /* accumulator for total bytes written */ + int start; /* position in buf to read from */ + int buf_start; /* byte offset of first byte in this *buf */ + + height = st->format.fmt.pix.height; + width = st->format.fmt.pix.width; + maxblock = (width + 0xf) >> 4; + maxline = (int)(1.5 * height); /* 1 for Y, .5 for UV */ + /* Offset is always bufsize * buffer index + buf_start = (st->ubytes - buf->readpos); tested/works */ + + buf_start = IVTV_DMA_BUF_SIZE * buf->buffer.index; + + /* FIXME it may not be possible to get YUV width > 720 */ + // if (width > src_width) src_width=width; + + curline = (int)(st->ubytes / width); + + while (curline < maxline) { +// printk(" cl: %d, ml: %d\n", curline, maxline); + Voff = 16 * (curline & 0xf) + /* Voffset within MBlock */ + ((curline & 0xfff0) * src_width); /* Voffset of Mblock */ + + cur_m_block = (st->ubytes - (curline * width)) >> 4; + +/* printk("voff %d, macroVoff %d, Voff %d, cmb %d\n", (16 * (curline & 0xf)), + ((curline & 0xff0) * src_width), Voff, cur_m_block); +*/ + + while ((cur_m_block < maxblock) && (count > 0)) { + Hoff = (cur_m_block * 256) + /* mblock offset within line */ + /* Hoffset within mblock, usually 0 */ + ((st->ubytes - (curline * width)) & 0xf); + Hbytes = 16 - ((st->ubytes - (curline * width)) & 0xf); + + if (Hbytes > count) + Hbytes = count; + + start = Hoff + Voff; + + if (copy_to_user((char *)((u32) ubuf + retval), + (u32 *) ((u32) buf->buffer.m.userptr + + (start - buf_start)), + Hbytes)) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "copy to user failed\n"); + return -EFAULT; + } + + count -= Hbytes; + retval += Hbytes; + st->ubytes += Hbytes; + + cur_m_block++; + } + + /* if user can't handle anymore data or buffer empty */ + curline++; + if ((count == 0)) /*|| ((curline * src_width) % IVTV_DMA_BUF_SIZE) == 0) */ + return retval; + } + + /* FIXME i don't think you should ever get here */ + IVTV_DEBUG(IVTV_DEBUG_ERR, + "You've just sailed off the edge of this function\n"); + return retval; +} +#endif + +long ivtv_read(struct ivtv_open_id *id, char *ubuf, size_t count, int block) +{ + int x, sleepctr, datalen, retval = 0, freed = 0; + struct ivtv *itv = id->itv; + size_t newcount; + unsigned long tid; + struct ivtv_buffer *buf; + struct ivtv_v4l2_stream *st = &itv->v4l2.streams[id->type]; + DECLARE_WAITQUEUE(wait, current); + + IVTV_DEBUG(IVTV_DEBUG_INFO, " Read stream.. \n"); + + if (atomic_read(&itv->capturing) == 0 && (st->id == -1)) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "Stream not initialized before read(shouldn't happen)\n"); + return -EIO; + } + + /* FIXME find a way to gracefully exit capture */ + + sleepctr = retval = 0; + buf = NULL; + + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue(&st->waitq, &wait); + do { + if ((itv->trans_id == 0) && (sleepctr >= IVTV_MAX_DATA_SLEEP)) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "Timeout waiting for data!\n"); + retval = -EIO; + break; + } + + buf = ivtv_deq_peek_head(itv, &st->full_q); + if (buf) { + break; + } else { + + /* Done capturing? */ + if (!test_bit(IVTV_F_S_CAP, &st->s_flags)) { + IVTV_DEBUG(IVTV_DEBUG_INFO, + "No more data to send, returning 0\n"); + set_current_state(TASK_RUNNING); + remove_wait_queue(&st->waitq, &wait); + return 0; + } + } + + if (!block) { + retval = -EAGAIN; + break; + } + + ivtv_sleep_timeout(IVTV_SLEEP_WAIT); + + if (signal_pending(current)) + retval = -ERESTARTSYS; + + sleepctr++; + } while (!retval); + + set_current_state(TASK_RUNNING); + remove_wait_queue(&st->waitq, &wait); + + /* an error (or signal) occured */ + if (retval) { + return retval; + } + + /* Skip the first 4 bytes of mpg streams to help out + * finicky decoders.. but not for iTVC16 */ + if ((id->type == 0) && (itv->first_read == 1) && + (itv->card_type != IVTV_250_V2)) { + for (x = 0; x < buf->buffer.bytesused - 4; x++) { + unsigned char *p; + itv->first_read = 0; + p = (unsigned char *)buf->buffer.m.userptr + + buf->readpos + x; + if (!p[0] && !p[1] && p[2] == 1) { + IVTV_DEBUG(IVTV_DEBUG_INFO, + "Stripping first 4 bytes\n"); + buf->buffer.bytesused -= x; + buf->readpos += x; + break; + } + } + } + + /* data ready */ + /* copy it to the client */ + while ((count > 0) && (buf->buffer.bytesused > 0)) { + newcount = count; + datalen = buf->buffer.bytesused; + + IVTV_DEBUG(IVTV_DEBUG_INFO, "datalen 0x%08x\n", datalen); + + if (newcount > datalen) + newcount = datalen; + +#ifdef YUV_FIXUP + if ((id->type == 1) && (itv->options.yuv_fixup)) { + newcount = + ivtv_YUV_fixup(st, newcount, ubuf + retval, buf); + if (newcount < 0) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "Error fixing up YUV!\n"); + return newcount; + } + } else { +#endif + if (copy_to_user((char *)((u32) ubuf + retval), + (u32 *) ((u32) buf->buffer.m.userptr + + buf->readpos), newcount)) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "copy to user failed\n"); + return -EFAULT; + } +#ifdef YUV_FIXUP + } +#endif + + buf->readpos += newcount; + retval += newcount; + count -= newcount; + buf->buffer.bytesused -= newcount; + IVTV_DEBUG(IVTV_DEBUG_INFO, + "new datalen 0x%08x\n", buf->buffer.bytesused); + + /* if buffer is empty or we've read the whole frame */ + if ((buf->buffer.bytesused == 0)) { + ivtv_move_buf(itv, &st->full_q, &st->free_q, buf); + freed++; + + buf = ivtv_deq_peek_head(itv, &st->full_q); + if (buf) { + tid = buf->buffer.sequence; + if (buf->buffer.sequence != tid) { + /* end of frame! */ + st->ubytes = 0; + break; + } + } else { + /* user wanted more than we had. Since + * queues are filled in irq time, + * that means end of frame */ + st->ubytes = 0; + break; + } + } + } /* end of while */ + + /* if we put some buffers back in the free queue, kick off dma + * scheduling if card was stopped due to overflow before */ + if (freed && test_and_clear_bit(IVTV_F_S_OVERFLOW, &st->s_flags)) { + spin_lock_irq(&itv->lock); + ivtv_sched_DMA(itv); + spin_unlock_irq(&itv->lock); + } + + /*FIXME unlock */ + if (retval != 0) { + IVTV_DEBUG(IVTV_DEBUG_INFO, "Returning %d\n", retval); + return retval; + } + + /* Shouldn't ever get here */ + return -EIO; +} + +static void ivtv_swap_copy(const char *buf, const char *ubuf, size_t count) +{ + u32 *src, *dst; + + src = (u32 *) ubuf; + dst = (u32 *) buf; + +#ifdef CONFIG_X86 + while ((u32) src <= (u32) ubuf + count) { /* byteswap while copying */ + __asm__ __volatile__("bswap %0":"=r"(*dst):"0"(*src)); + src++; + dst++; + } +#else + { + int y; + /* Old slow memcpy then swab */ + memcpy((void *)buf, (void *)ubuf, count); + for (y = 0; y < count; y += 4) { + swab32s((u32 *) ((u32) buf + y)); + } + } +#endif +} + +static int ivtv_fill_dec_buffers(struct ivtv_open_id *id, const char *ubuf, + size_t count, int block) +{ + struct ivtv *itv = id->itv; + struct ivtv_v4l2_stream *stream = &itv->v4l2.streams[id->type]; + struct ivtv_buffer *buf; + int copybytes = 0, bytesread = 0, retval = 0; + + IVTV_DEBUG(IVTV_DEBUG_INFO, "ivtv_fill_dec_buffers, %d bytes\n", count); + + /* Read what the user gives us. queue it for DMA after each buffer + * also enqueue partly-full buffers. */ + + IVTV_DEBUG(IVTV_DEBUG_INFO, + "DEC: free_q: %d elements\n", stream->free_q.elements); + IVTV_DEBUG(IVTV_DEBUG_INFO, + "DEC: dma_q: %d elements\n", stream->dma_q.elements); + IVTV_DEBUG(IVTV_DEBUG_INFO, + "DEC: full_q: %d elements\n", stream->full_q.elements); + + /* FIXME will only do one write. Has underlying code to handle more + * than one, just need loop control logic for it, if it's + * deemed necessary. */ + while (bytesread == 0) { + DECLARE_WAITQUEUE(wait, current); + unsigned long flags; + + buf = NULL; + add_wait_queue(&stream->waitq, &wait); + do { + set_current_state(TASK_INTERRUPTIBLE); + buf = ivtv_deq_peek_head(itv, &stream->free_q); + if (buf) + break; + + if (!block) { + retval = -EAGAIN; + break; + } + + schedule(); + + if (signal_pending(current)) { + retval = -ERESTARTSYS; + break; + } + + spin_lock_irqsave(&itv->lock, flags); + ivtv_dec_sched_DMA(id->itv); + spin_unlock_irqrestore(&itv->lock, flags); + } while (!buf); + set_current_state(TASK_RUNNING); + remove_wait_queue(&stream->waitq, &wait); + + if (retval) + return retval; + + /* bytes left to send > free bytes in current buffer */ + if ((count - bytesread) > + (IVTV_DMA_DEC_BUF_SIZE - buf->buffer.bytesused)) { + copybytes = + IVTV_DMA_DEC_BUF_SIZE - buf->buffer.bytesused; + } else { + copybytes = count - bytesread; + } + + /* copy data */ + /* FIXME */ + IVTV_DEBUG(IVTV_DEBUG_INFO, "copying %d bytes to 0x%08x" + " (buffer free = %d, used = %d)\n", + copybytes, + (int)(buf->buffer.m.userptr + + buf->buffer.bytesused), + (int)(IVTV_DMA_DEC_BUF_SIZE - + buf->buffer.bytesused), buf->buffer.bytesused); + + ivtv_swap_copy((char *)(buf->buffer.m.userptr + + buf->buffer.bytesused), + (char *)((u32) ubuf + bytesread), copybytes); + + bytesread += copybytes; + buf->buffer.bytesused += copybytes; + + /* enq buffer when full */ + if (buf->buffer.bytesused == IVTV_DMA_DEC_BUF_SIZE) + ivtv_move_buf(itv, &stream->free_q, &stream->full_q, + buf); + } + + return bytesread; +} + +/* + * Schedule host -> hardware DMA of one buffer from the stream (MPEG or YUV) + * with the most recent request for more data, but only if dec->dec_needs_data + * is set. + * + * This code can be called from both interrupt context as well as userspace; + * it does the right things in either case. If called from userspace, it may + * block only when the same call is in progress in interupt context (since + * interrupt context is not allowed to block.) + * + * @returns 0 if the buffer was queued to dma_q and the DMA was initiated. + * + * -EAGAIN if either the full_q queue is empty or the function is + * already in progress in interrupt context. + * + * -ENOSPC if there is no space remaining in the hardware's buffer. + * This should never happen if proper flow control is used. + * + * -EINVAL if the most recent "data needed" interrupt requested an + * unknown stream type (should really never happen!) + * + * -EBUSY if a DMA on the same queue is already in progress (should + * never happen) + * + */ +static void ivtv_dec_sched_DMA(struct ivtv *itv) +{ + int ret = 0, x = 0, bytes_written = 0, type = 0, max = 2; + struct ivtv_buffer *buf; + struct ivtv_v4l2_stream *stream = NULL; + u32 data[IVTV_MBOX_MAX_DATA], result; + u32 mem_offset, mem_size, hw_stream_type, buffer_bytes; + + IVTV_DEBUG(IVTV_DEBUG_INFO, "ivtv_dec_sched_DMA\n"); + + /* fancy way of saying "if (ivtv->dec_needs_data == 0)" */ + if (!test_bit(IVTV_F_I_NEEDS_DATA, &itv->i_flags)) { + IVTV_DEBUG(IVTV_DEBUG_INFO, "DEC: no data needed\n"); + return; + } + + /* Get Card Mem Dst address from mailbox 10 */ + ret = ivtv_api_getresult_nosleep(&itv->dec_mbox[9], &result, &data[0]); + IVTV_DEBUG(IVTV_DEBUG_INFO, + "DEC: Mailbox 10: 0x%08x 0x%08x 0x%08x 0x%08x\n", data[0], + data[1], data[2], data[3]); + + hw_stream_type = data[0]; + + switch (hw_stream_type) { + case 0: /* MPG */ + type = IVTV_DEC_STREAM_TYPE_MPG; + IVTV_DEBUG(IVTV_DEBUG_INFO, "DEC: mpg data\n"); + break; + case 2: /* YUV */ + type = IVTV_DEC_STREAM_TYPE_YUV; + IVTV_DEBUG(IVTV_DEBUG_INFO, "DEC: yuv data\n"); + break; + default: + IVTV_DEBUG(IVTV_DEBUG_ERR, "DEC: unknown stream type %d\n", + data[0]); + max = 0; + return; + } + + stream = &itv->v4l2.streams[type]; + + if (test_bit(IVTV_F_I_BUSY, &itv->i_flags)) { + IVTV_DEBUG(IVTV_DEBUG_INFO, "DEC: decoder busy, delaying\n"); + set_bit(IVTV_F_I_NEEDS_DATA, &itv->i_flags); + return; + } + + /* If we got this far, we have data to send and it wants it */ + clear_bit(IVTV_F_I_NEEDS_DATA, &itv->i_flags); + + /* Get card mem addr and size from data array */ + mem_offset = data[1]; + mem_size = data[2]; + buffer_bytes = data[3]; /* # bytes in card's buffer */ + + while ((max > x) && (mem_size > bytes_written)) { /* send a maximum of + 'max' buffers */ + buf = __ivtv_deq_peek_head(&stream->full_q); + if (buf == NULL) { + IVTV_DEBUG(IVTV_DEBUG_INFO, + "DEC: No more buffers to send\n"); + break; + } +#if 1 + if (mem_size < buf->buffer.bytesused) { + itv->DSGarray[x].size = mem_size; + } else { + itv->DSGarray[x].size = buf->buffer.bytesused; + } +#else + /* just send the whole buffer */ + itv->DSGarray[x].size = buf->buffer.bytesused; +#endif + buf->dma_handle = pci_map_single(itv->dev, + (void *)(buf->buffer.m. + userptr + + buf->readpos), + itv->DSGarray[x].size, + PCI_DMA_TODEVICE); + + itv->DSGarray[x].src = buf->dma_handle; + itv->DSGarray[x].dst = (mem_offset + bytes_written + + IVTV_FIRM_SEARCH_DECODER_START); + + buf->readpos += itv->DSGarray[x].size; + bytes_written += itv->DSGarray[x].size; + buf->buffer.bytesused -= itv->DSGarray[x].size; + + IVTV_DEBUG(IVTV_DEBUG_INFO, + "1st 32bits of buffer %d are 0x%08x\n", + buf->buffer.index, *(u32 *) buf->buffer.m.userptr); + IVTV_DEBUG(IVTV_DEBUG_INFO, + "DSGarray[%d]: 0x%08x, 0x%08x 0x%08x\n", x, + itv->DSGarray[x].src, itv->DSGarray[x].dst, + itv->DSGarray[x].size); + + /* buffer is empty? */ + if (buf->buffer.bytesused == 0) { + __ivtv_del_buf(&stream->full_q, buf); + __ivtv_enq_buf(&stream->dma_q, buf); + } + x++; + } + + if (x == 0) { /* no full buffers */ + IVTV_DEBUG(IVTV_DEBUG_INFO, "DEC: Nothing to send\n"); + set_bit(IVTV_F_I_NEEDS_DATA, &itv->i_flags); + return; + } + //Set Last Element Bit + itv->DSGarray[x - 1].size |= 0x80000000; + + //Schedule DMA XFER + data[0] = itv->DSG_handle; + data[1] = bytes_written; + data[2] = hw_stream_type; + + /* note that we're DMA'ing */ + mod_timer(&itv->dec_timeout, jiffies + DEC_DMA_TIMEOUT); + set_bit(IVTV_F_S_DMAP, &stream->s_flags); + set_bit(IVTV_F_I_BUSY, &itv->i_flags); + + ivtv_api(itv->dec_mbox, &itv->dec_msem, IVTV_API_DEC_DMA_FROM_HOST, + &result, 3, &data[0]); + + IVTV_DEBUG(IVTV_DEBUG_INFO, + "Sched DEC dma: addr: 0x%08x, array_size 0x%08x, type 0x%08x\n", + data[0], data[1], data[2]); +} + +static void ivtv_dec_DMA_done(struct ivtv *itv) +{ + struct ivtv_v4l2_stream *stream = NULL; + struct ivtv_buffer *buf; + int y, stmtype = -1, freed = 0; + + IVTV_DEBUG(IVTV_DEBUG_INFO, "DEC: DMA Done tasklet\n"); + + if (!test_and_clear_bit(IVTV_F_I_BUSY, &itv->i_flags)) { + IVTV_DEBUG(IVTV_DEBUG_ERR, "DMAP not set\n"); + ivtv_show_debug_flags(itv); + return; + } +#if 0 + del_timer(&itv->dec_timeout); +#else + mod_timer(&itv->dec_timeout, jiffies + DEC_DMA_TIMEOUT); +#endif + + for (y = IVTV_DEC_STREAM_TYPE_MPG; y < itv->v4l2.streamcount; y++) { + if (test_and_clear_bit + (IVTV_F_S_DMAP, &itv->v4l2.streams[y].s_flags)) { + stmtype = y; + break; + } + } + + /* Handle OSD DMA */ + if (test_and_clear_bit(IVTV_F_I_OSD_DMA, &itv->i_flags)) { + IVTV_DEBUG(IVTV_DEBUG_INFO, "OSD: DMA Done\n"); + + /* wake all the normal streams, in case they fell asleep */ + for (y = IVTV_DEC_STREAM_TYPE_MPG; y < itv->v4l2.streamcount; + y++) { + wake_up(&itv->v4l2.streams[y].waitq); + } + + wake_up(&itv->dec_master_w); + return; + } + + if (stmtype < 0) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "DEC: Got DMA-done, not expecting one\n"); + ivtv_show_debug_flags(itv); + return; + } + + IVTV_DEBUG(IVTV_DEBUG_INFO, "DEC: Stream %d dma-done\n", y); + stream = &itv->v4l2.streams[y]; + + while ((buf = __ivtv_deq_buf(&stream->dma_q)) != NULL) { + IVTV_ASSERT(buf->dma_handle != IVTV_DMA_UNMAPPED); + pci_unmap_single(itv->dev, buf->dma_handle, + IVTV_DMA_DEC_BUF_SIZE, PCI_DMA_TODEVICE); + buf->dma_handle = IVTV_DMA_UNMAPPED; + buf->buffer.bytesused = 0; + buf->readpos = 0; + + /* put in the 'done' queue */ + __ivtv_enq_buf(&stream->free_q, buf); + freed++; + } + + /* if we put some buffers back in the free queue, kick off dma + * scheduling if card was stopped due to overflow before */ + if (freed && test_and_clear_bit(IVTV_F_S_OVERFLOW, &stream->s_flags)) + ivtv_sched_DMA(itv); + + /* wake up queue filler */ + wake_up(&stream->waitq); + wake_up(&itv->dec_master_w); +} + +int ivtv_get_timing_info(struct ivtv *itv, struct ivtv_ioctl_framesync *info) +{ + u32 ret, result, data[IVTV_MBOX_MAX_DATA]; + + int suicidecounter = 0; + + memset(info, 0x00, sizeof(struct ivtv_ioctl_framesync)); + + /* Occasionally, we'll get a wierd, invalid number for + * frames played. fortunately, it sets the SCR timestamp to 0 + * in that case, which it never is otherwise. cool, huh */ + while (info->scr == 0) { /* eliminate bogus values, FIXME ugly */ + ret = ivtv_api(itv->dec_mbox, &itv->dec_msem, + IVTV_API_DEC_TIMING_INFO, &result, 0, &data[0]); + if (ret) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "DEC: err sending timing info\n"); + return ret; + } + + info->frame = data[0]; + info->pts = ((u64) data[2] << 32) | (u64) data[1]; + info->scr = ((u64) data[4] << 32) | (u64) data[3]; + + if (suicidecounter++ > 10) { /* endless loops are bad! */ + IVTV_DEBUG(IVTV_DEBUG_ERR, + "Timeout getting frames played\n"); + return -1; + } + if (info->scr == 0) + ivtv_sleep_timeout(HZ / 50); + } + + return 0; +} + +ssize_t ivtv_write(struct ivtv_open_id * id, const char *ubuf, size_t count, + int block) +{ + int bytes_written = 0, ret = 0; + unsigned long flags; + + IVTV_DEBUG(IVTV_DEBUG_INFO, "ivtv_write\n"); + + while (bytes_written < count) { /* completely use up user data + * before returning */ + /* buffer the data - this may block waiting on free buffers */ + ret = ivtv_fill_dec_buffers(id, ubuf + bytes_written, + (count - bytes_written), block); + + /* FIXME temporary hack to make sure non-blocking works */ + /* send it! it'll return right away if no data needed */ + spin_lock_irqsave(&id->itv->lock, flags); + ivtv_dec_sched_DMA(id->itv); + spin_unlock_irqrestore(&id->itv->lock, flags); + + if (ret < 0) { + break; + } else { + bytes_written += ret; + } + } + + IVTV_DEBUG(IVTV_DEBUG_INFO, "DEC: returning %d\n", bytes_written); + return bytes_written ? bytes_written : ret; +} + +unsigned int ivtv_dec_poll(struct file *filp, poll_table * wait) +{ + struct ivtv_open_id *id = filp->private_data; + unsigned int mask = 0; + unsigned long flags; + + /* add stream's waitq to the poll list */ + poll_wait(filp, &id->itv->v4l2.streams[id->type].waitq, wait); + + /* FIXME temporary hack to restart DMA in case the decoder's + been busy for a while and the application is using poll to + see if it can write */ + spin_lock_irqsave(&id->itv->lock, flags); + ivtv_dec_sched_DMA(id->itv); + spin_unlock_irqrestore(&id->itv->lock, flags); + + if (ivtv_deq_peek_head + (id->itv, &id->itv->v4l2.streams[id->type].free_q)) + mask |= POLLOUT | POLLWRNORM; /* Writable */ + + IVTV_DEBUG(IVTV_DEBUG_INFO, "DEC: dec_poll returning 0x%x\n", mask); + if (!mask) { + IVTV_DEBUG(IVTV_DEBUG_INFO, "Decoder fullq size %d\n", + id->itv->v4l2.streams[id->type].full_q.elements); + IVTV_DEBUG(IVTV_DEBUG_INFO, "Decoder freeq size %d\n", + id->itv->v4l2.streams[id->type].free_q.elements); + IVTV_DEBUG(IVTV_DEBUG_INFO, "Decoder dmaq size %d\n", + id->itv->v4l2.streams[id->type].dma_q.elements); + } + return mask; +} + +unsigned int ivtv_poll(struct file *filp, poll_table * wait) +{ + struct ivtv_open_id *id = filp->private_data; + unsigned int mask = 0; + + /* add stream's waitq to the poll list */ + poll_wait(filp, &id->itv->v4l2.streams[id->type].waitq, wait); + +#if 0 + if (down_interruptible(&id->itv->sem_lock)) + return -ERESTARTSYS; + + if (ivtv_get_free_elements + (id->itv, &id->itv->v4l2.streams[id->type].full_q)) + mask |= POLLIN | POLLRDNORM; /* readable */ + + up(&id->itv->sem_lock); +#else + mask |= POLLIN | POLLRDNORM; +#endif + return mask; +} + +#if 0 +static void ivtv_print_boxes(struct ivtv_mailbox *mbox) +{ + + int x, y; + + IVTV_DEBUG(IVTV_DEBUG_INFO, "Sleeping for 10ms\n"); + ivtv_sleep_timeout(HZ / 100); + + if (NULL == mbox) { + IVTV_DEBUG(IVTV_DEBUG_IOCTL, "Mailboxes not initialized!\n"); + return; + } + + for (x = 0; x <= IVTV_MBOX_MAX_BOXES; x++) { + IVTV_DEBUG(IVTV_DEBUG_IOCTL, "mbox: 0x%08x, # %d\n", (u32) mbox, + x); + IVTV_DEBUG(IVTV_DEBUG_IOCTL, "flags: 0x%08x ", + (u32) readl(&mbox->flags)); + IVTV_DEBUG(IVTV_DEBUG_IOCTL, "cmd: 0x%08x\n", + readl(&mbox->cmd)); + IVTV_DEBUG(IVTV_DEBUG_IOCTL, "result: 0x%08x ", + readl(&mbox->retval)); + IVTV_DEBUG(IVTV_DEBUG_IOCTL, "timeout: 0x%08x\n", + readl(&mbox->timeout)); + IVTV_DEBUG(IVTV_DEBUG_IOCTL, "Data:\n"); + for (y = 0; y < IVTV_MBOX_MAX_DATA; y++) { + IVTV_DEBUG(IVTV_DEBUG_IOCTL, "[%02d]0x%08x, ", y, + readl(&mbox->data[y])); + if (2 == y % 3) + IVTV_DEBUG(IVTV_DEBUG_IOCTL, "\n"); + } + /*Since mbox has type ptr, this should step it up */ + /* to the start of the next mbox */ + mbox++; + IVTV_DEBUG(IVTV_DEBUG_IOCTL, "\n"); + } +} +#endif + +int ivtv_close(struct ivtv_open_id *id) +{ + struct ivtv *itv = id->itv; + int ret = 0; + + /* sem_lock must be held */ + IVTV_ASSERT(ivtv_sem_count(&itv->sem_lock) <= 0); + + switch (id->type) { + case IVTV_DEC_STREAM_TYPE_MPG: /* decoder streams */ + case IVTV_DEC_STREAM_TYPE_YUV: + if (atomic_read(&itv->decoding)) { + IVTV_DEBUG(IVTV_DEBUG_INFO, "close stopping decode\n"); + ret = ivtv_stop_decode(id); + } + break; + default: /* encoder streams */ + if (atomic_read(&itv->capturing)) { + IVTV_DEBUG(IVTV_DEBUG_INFO, "close stopping capture\n"); + ret = ivtv_stop_capture(id); + } + break; + } + + return ret; +} + +static int module_start(void) +{ + int loop_a; + + printk("ivtv: version %s (%s) loading\n", IVTV_VERSION_STRING(ivtv_rev), + IVTV_VERSION_COMMENT(ivtv_rev)); + + memset(&ivtv_cards[0], 0, IVTV_MAX_CARDS * sizeof(struct ivtv)); + + /* Validate parameters */ + if (((yuv_buffers > IVTV_MAX_YUV_BUFFERS) + || (yuv_buffers < IVTV_MIN_YUV_BUFFERS)) + && (yuv_buffers != 0)) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "Error! yuv_buffers must be between 40 and 500\n"); + IVTV_DEBUG(IVTV_DEBUG_ERR, "Exiting..\n"); + return -1; + } +#ifdef YUV_FIXUP + if ((yuv_fixup != 0) && (yuv_fixup != 1)) { + IVTV_DEBUG(IVTV_DEBUG_ERR, "Error! yuv_fixup must be 0 or 1\n"); + IVTV_DEBUG(IVTV_DEBUG_ERR, "Exiting..\n"); + return -1; + } +#endif + if ((dec_mpg_buffers > IVTV_MAX_DEC_MPG_BUFFERS) + || (dec_mpg_buffers < IVTV_MIN_DEC_MPG_BUFFERS)) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "Error! dec_mpg_buffers must be between 5 and 100\n"); + IVTV_DEBUG(IVTV_DEBUG_ERR, "Exiting..\n"); + return -1; + } + + if (((dec_yuv_buffers > IVTV_MAX_DEC_YUV_BUFFERS) + || (dec_yuv_buffers < IVTV_MIN_DEC_YUV_BUFFERS)) + && (dec_yuv_buffers != 0)) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "Error! dec_yuv_buffers must be between 17 and 500\n"); + IVTV_DEBUG(IVTV_DEBUG_ERR, "Exiting..\n"); + return -1; + } + + if ((mpg_buffers > IVTV_MAX_MPG_BUFFERS) + || (mpg_buffers < IVTV_MIN_MPG_BUFFERS)) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "Error! mpg_buffers must be between 15 and 100\n"); + IVTV_DEBUG(IVTV_DEBUG_ERR, "Exiting..\n"); + return -1; + } + + if ((dec_yuv_qlen > dec_yuv_buffers) + || (dec_yuv_qlen < IVTV_MIN_DEC_YUV_QLEN)) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "Error! dec_yuv_qlen must be between %d and %d\n", + IVTV_MIN_DEC_YUV_QLEN, dec_yuv_buffers); + IVTV_DEBUG(IVTV_DEBUG_ERR, "Exiting..\n"); + return -1; + } + + if ((dec_mpg_qlen > dec_mpg_buffers) + || (dec_mpg_qlen < IVTV_MIN_DEC_MPG_QLEN)) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "Error! dec_mpg_qlen must be between %d and %d\n", + IVTV_MIN_DEC_MPG_QLEN, dec_mpg_buffers); + IVTV_DEBUG(IVTV_DEBUG_ERR, "Exiting..\n"); + return -1; + } + + if ((vbi_buffers > IVTV_MAX_VBI_BUFFERS) + || (vbi_buffers < IVTV_MIN_VBI_BUFFERS)) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "Error! vbi_buffers must be between 3 and 100\n"); + IVTV_DEBUG(IVTV_DEBUG_ERR, "Exiting..\n"); + return -1; + } + + if ((num_devices > IVTV_MAX_CARDS) || (num_devices < 1)) { + IVTV_DEBUG(IVTV_DEBUG_ERR, + "Error! num_devices must be between 1 and 9 (not working yet)\n"); + IVTV_DEBUG(IVTV_DEBUG_ERR, "Exiting..\n"); + return -1; + } + + if (ivtv_debug < 0) + IVTV_DEBUG(IVTV_DEBUG_ERR, "debug value must be >= 0!\n"); + + IVTV_DEBUG(IVTV_DEBUG_INFO, + "Loading, I'll try to detect %d devices!\n", num_devices); + IVTV_DEBUG(IVTV_DEBUG_INFO, " .. running on kernel %s\n", UTS_RELEASE); + IVTV_DEBUG(IVTV_DEBUG_INFO, + "Setting some variables to invalid for detection\n"); + + for (loop_a = 0; loop_a < IVTV_MAX_CARDS; loop_a++) { + ivtv_cards[loop_a].num = -1; + ivtv_cards[loop_a].dev = NULL; + } + + SGarray_size = (mpg_buffers + yuv_buffers + vbi_buffers) * 2; + DSGarray_size = (dec_mpg_buffers + dec_yuv_buffers) * 2; + + IVTV_DEBUG(IVTV_DEBUG_ERR, "SGarray_size = %d, DSGarray_size = %d\n", + SGarray_size, DSGarray_size); + + IVTV_DEBUG(IVTV_DEBUG_INFO, "Scanning PCI bus..\n"); + if (pci_module_init(&ivtv_pci_driver)) { + IVTV_DEBUG(IVTV_DEBUG_ERR, "Error detecting PCI card\n"); + return -ENODEV; + } + + printk("ivtv: loaded\n"); + return 0; +} + +static void module_cleanup(void) +{ + + pci_unregister_driver(&ivtv_pci_driver); + IVTV_DEBUG(IVTV_DEBUG_ERR, "You've not seen the last of willy!\n"); +#ifdef AEW_DEBUG + DUMP_BAD_ALLOC_TABLE; +#endif // AEW_DEBUG +} + +EXPORT_SYMBOL(ivtv_pal); +EXPORT_SYMBOL(ivtv_set_irq_mask); +EXPORT_SYMBOL(ivtv_cards_active); +EXPORT_SYMBOL(ivtv_cards); +EXPORT_SYMBOL(ivtv_api); +EXPORT_SYMBOL(ivtv_clear_irq_mask); +EXPORT_SYMBOL(ivtv_debug); + +module_init(module_start); +module_exit(module_cleanup); diff -purN -X /home/mbligh/.diff.exclude reference/drivers/media/video/ivtv-fb.c current/drivers/media/video/ivtv-fb.c --- reference/drivers/media/video/ivtv-fb.c 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/media/video/ivtv-fb.c 2004-04-09 21:41:40.000000000 -0700 @@ -0,0 +1,1039 @@ +/* + * iTVC15 Framebuffer driver + * + * This module presents the iTVC15 OSD (onscreen display) framebuffer memory + * as a standard Linux /dev/fb style framebuffer device. The framebuffer has + * a 32 bpp packed pixel format with full alpha channel support. Depending + * on the TV standard configured in the ivtv module at load time, resolution + * is fixed at either 720x480 (NTSC) or 720x576 (PAL). + * + * Copyright (c) 2003 Matt T. Yourst + * + * Derived from drivers/video/vesafb.c + * Portions (c) 1998 Gerd Knorr + * + * This file is licensed under the GNU General Public License, version 2. + * + */ + +/* +# +# Instructions for making ivtv-fb work with XFree86: +# Add the following sections and parts thereof to /etc/X11/XF86Config: +# + +# +# NOTE: The monitor section is obtainable by running: +# fbset -fb /dev/fb1 -x +# (or /dev/fbX for whatever framebuffer ivtv-fb is on) +# +Section "Monitor" + Identifier "NTSC Monitor" + HorizSync 30-68 + VertRefresh 50-120 + Mode "720x480" + # D: 34.563 MHz, H: 37.244 kHz, V: 73.897 Hz + DotClock 34.564 + HTimings 720 752 840 928 + VTimings 480 484 488 504 + Flags "-HSync" "-VSync" + EndMode +EndSection + +Section "Device" + Identifier "Hauppauge PVR 350 iTVC15 Framebuffer" + Driver "fbdev" + Option "fbdev" "/dev/fb1" # <-- modify if using another device + BusID "0:10:0" +EndSection + +Section "Screen" + Identifier "TV Screen" + Device "Hauppauge PVR 350 iTVC15 Framebuffer" + Monitor "NTSC Monitor" + DefaultDepth 24 + DefaultFbbpp 32 + Subsection "Display" + Depth 24 + FbBpp 32 + Modes "720x480" + EndSubsection +EndSection + +Section "ServerLayout" + ... + + Screen 0 "Screen 1" # << (your computer monitor) + + # (add the following line) + Screen 1 "TV Screen" RightOf "Screen 1" # << (TV screen) + + ... +EndSection + +# +# Then start X as usual; both your normal (computer) monitor and the +# NTSC or PAL TV monitor should display the default X background. +# +# Note the "RightOf" clause above: if you move the mouse off the right +# side of the computer screen, the pointer should appear on your TV +# screen. Keyboard events will go to windows in either screen. +# +# To start a program (e.g., xterm) on the TV only: +# +# export DISPLAY=:0.1 (i.e., X server #0, screen #1 = TV) +# xterm& +# +# There is also a way to join both the computer monitor and TV into +# one giant virtual screen using the Xinerama extension, but I haven't +# tried it. Doing so may not be such a good idea anyway, as you obviously +# wouldn't want random X windows getting moved over the TV picture. + +A note on unloading the fb driver: + +If you want to be able to unload the framebuffer driver (and you aren't +already using fbcon), add this to your lilo config: + +video=vc:x-y + +where x is the first fb device to allocate and y is the second. If you +already have a fb driver loaded, fiddle with the numbers so all the consoles +are already allocated. For me, i just set it to 0-0, ie: + +in lilo.conf: + +image=/vmlinuz + label=linux + read-only + append="root=/dev/hda1 video=vc:0-0" + +--OR-- +on bootup, do this +LILO: linux video=vc:0-0 + +according to how i read /usr/src/linux/drivers/video/fbmem.c and +/usr/src/linux/drivers/char/console.c, that should disable the +console hijacks, and allow you to unload the driver. + +-tmk +# +# +# +# +# +# +# +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#ifdef CONFIG_MTRR +#include +#endif + +#include "ivtv.h" + +/* + * card parameters + */ + +static int ivtv_fb_card_id; + +/* Card selected as framebuffer for this module instance: */ +static struct ivtv *ivtv_fb; + +/* card */ +static unsigned long video_base; /* physical addr */ +static unsigned long video_rel_base; /* address relative to base of decoder memory */ +static int video_size; +static char *video_vbase; /* mapped */ + +/* mode */ +static int video_width; +static int video_height; +static int video_height_virtual; +static int video_linelength; +static unsigned long shadow_framebuf_offset; +static unsigned long shadow_framebuf_size; + +/* + * ivtv API calls for framebuffer related support + */ + +static inline int ivtv_api_fb_get_framebuffer(struct ivtv *itv, + void **fbbase, int *fblength) +{ + u32 data[IVTV_MBOX_MAX_DATA], result; + int rc; + + IVTV_DEBUG_FB(IVTV_DEBUG_INFO, "ivtv_api_fb_get_framebuffer\n"); + + rc = ivtv_api(itv->dec_mbox, &itv->dec_msem, + IVTV_API_FB_GET_FRAMEBUFFER, &result, 0, &data[0]); + *fbbase = (void *)data[0]; + *fblength = data[1]; + return rc; +} + +static inline int ivtv_api_fb_get_pixel_format(struct ivtv *itv) +{ + u32 data[IVTV_MBOX_MAX_DATA], result; + IVTV_DEBUG_FB(IVTV_DEBUG_INFO, "ivtv_api_fb_get_pixel_format\n"); + + ivtv_api(itv->dec_mbox, &itv->dec_msem, IVTV_API_FB_GET_PIXEL_FORMAT, + &result, 0, &data[0]); + return data[0]; +} + +static inline int ivtv_api_fb_set_pixel_format(struct ivtv *itv, int format) +{ + u32 data[IVTV_MBOX_MAX_DATA], result; + data[0] = format; + IVTV_DEBUG_FB(IVTV_DEBUG_INFO, "ivtv_api_fb_set_pixel_format\n"); + + ivtv_api(itv->dec_mbox, &itv->dec_msem, + IVTV_API_FB_SET_PIXEL_FORMAT, &result, 1, &data[0]); + return result; +} + +static inline int ivtv_api_fb_get_state(struct ivtv *itv) +{ + u32 data[IVTV_MBOX_MAX_DATA], result; + IVTV_DEBUG_FB(IVTV_DEBUG_INFO, "ivtv_api_fb_get_state\n"); + + ivtv_api(itv->dec_mbox, &itv->dec_msem, IVTV_API_FB_GET_STATE, + &result, 0, &data[0]); + return data[0]; +} + +static inline int ivtv_api_fb_set_state(struct ivtv *itv, int enabled) +{ + u32 params[IVTV_MBOX_MAX_DATA], result; + params[0] = enabled; + IVTV_DEBUG_FB(IVTV_DEBUG_INFO, "ivtv_api_fb_set_state\n"); + + ivtv_api(itv->dec_mbox, &itv->dec_msem, IVTV_API_FB_SET_STATE, + &result, 1, ¶ms[0]); + return result; +} + +static inline int ivtv_api_fb_set_framebuffer_window(struct ivtv *itv, + int left, int top, + int width, int height) +{ + u32 data[IVTV_MBOX_MAX_DATA], result; + IVTV_DEBUG_FB(IVTV_DEBUG_INFO, "ivtv_api_fb_set_framebuffer_window\n"); + data[0] = width; + data[1] = height; + data[2] = left; + data[3] = top; + + ivtv_api(itv->dec_mbox, &itv->dec_msem, + IVTV_API_FB_SET_FRAMEBUFFER_WINDOW, &result, 4, &data[0]); + return result; +} + +static inline int ivtv_api_fb_get_osd_coords(struct ivtv *itv, + struct ivtv_osd_coords *osd) +{ + u32 data[IVTV_MBOX_MAX_DATA], result; + IVTV_DEBUG_FB(IVTV_DEBUG_INFO, "ivtv_api_fb_get_osd_coords\n"); + + ivtv_api(itv->dec_mbox, &itv->dec_msem, IVTV_API_FB_GET_OSD_COORDS, + &result, 0, &data[0]); + + osd->offset = data[0] - video_rel_base; + osd->max_offset = video_size; + osd->pixel_stride = data[1]; + osd->lines = data[2]; + osd->x = data[3]; + osd->y = data[4]; + + return result; +} + +static inline int ivtv_api_fb_set_osd_coords(struct ivtv *itv, const struct ivtv_osd_coords + *osd) +{ + u32 data[IVTV_MBOX_MAX_DATA], result; + IVTV_DEBUG_FB(IVTV_DEBUG_INFO, "ivtv_api_fb_set_osd_coords\n"); + data[0] = osd->offset; + data[1] = osd->pixel_stride; + data[2] = osd->lines; + data[3] = osd->x; + data[4] = osd->y; + + // FIXME maybe wait on vsync? + ivtv_api(itv->dec_mbox, &itv->dec_msem, IVTV_API_FB_SET_OSD_COORDS, + &result, 5, &data[0]); + return result; +} + +static inline int ivtv_api_fb_get_screen_coords(struct ivtv *itv, + struct rectangle *r) +{ + u32 data[IVTV_MBOX_MAX_DATA], result; + IVTV_DEBUG_FB(IVTV_DEBUG_INFO, "ivtv_api_fb_get_screen_coords\n"); + + ivtv_api(itv->dec_mbox, &itv->dec_msem, IVTV_API_FB_GET_SCREEN_COORDS, + &result, 0, &data[0]); + + r->x0 = data[0]; + r->y0 = data[1]; + r->x1 = data[2]; + r->y1 = data[3]; + + return result; +} + +static inline int ivtv_api_fb_set_screen_coords(struct ivtv *itv, + const struct rectangle *r) +{ + u32 data[IVTV_MBOX_MAX_DATA], result; + IVTV_DEBUG_FB(IVTV_DEBUG_INFO, "ivtv_api_fb_set_screen_coords\n"); + data[0] = r->x0; + data[1] = r->y0; + data[2] = r->x1; + data[3] = r->y1; + + ivtv_api(itv->dec_mbox, &itv->dec_msem, + IVTV_API_FB_SET_SCREEN_COORDS, &result, 4, &data[0]); + return result; +} + +static inline int ivtv_api_fb_get_global_alpha(struct ivtv *itv) +{ + u32 data[IVTV_MBOX_MAX_DATA], result; + IVTV_DEBUG_FB(IVTV_DEBUG_INFO, "ivtv_api_fb_get_global_alpha\n"); + + ivtv_api(itv->dec_mbox, &itv->dec_msem, IVTV_API_FB_GET_GLOBAL_ALPHA, + &result, 0, &data[0]); + return data[1]; +} + +static inline int ivtv_api_fb_set_global_alpha(struct ivtv *itv, + int enable_global, + int alpha, int enable_local) +{ + u32 data[IVTV_MBOX_MAX_DATA], result; + IVTV_DEBUG_FB(IVTV_DEBUG_INFO, "ivtv_api_fb_set_global_alpha\n"); + data[0] = enable_global; + data[1] = alpha; + data[2] = !enable_local; + ivtv_api(itv->dec_mbox, &itv->dec_msem, + IVTV_API_FB_SET_GLOBAL_ALPHA, &result, 3, &data[0]); + return result; +} + +static inline int ivtv_api_fb_get_flicker_state(struct ivtv *itv) +{ + u32 data[IVTV_MBOX_MAX_DATA], result; + IVTV_DEBUG_FB(IVTV_DEBUG_INFO, "ivtv_api_fb_get_flicker_state\n"); + + ivtv_api(itv->dec_mbox, &itv->dec_msem, IVTV_API_FB_GET_FLICKER_STATE, + &result, 0, &data[0]); + return data[0]; +} + +static inline int ivtv_api_fb_set_flicker_state(struct ivtv *itv, int enabled) +{ + u32 params[IVTV_MBOX_MAX_DATA], result; + IVTV_DEBUG_FB(IVTV_DEBUG_INFO, "ivtv_api_fb_set_flicker_state\n"); + params[0] = enabled; + + ivtv_api(itv->dec_mbox, &itv->dec_msem, + IVTV_API_FB_SET_FLICKER_STATE, &result, 1, ¶ms[0]); + return result; +} + +static inline int ivtv_api_fb_blt_fill(struct ivtv *itv, int rasterop, + int alpha_mode, int alpha_mask_mode, + int width, int height, int destmask, + u32 destaddr, int deststride, u32 value) +{ + u32 data[IVTV_MBOX_MAX_DATA], result; + IVTV_DEBUG_FB(IVTV_DEBUG_INFO, "ivtv_api_fb_blt_fill\n"); + data[0] = rasterop; + data[1] = alpha_mode; + data[2] = alpha_mask_mode; + data[3] = width; + data[4] = height; + data[5] = destmask; + data[6] = destaddr; + data[7] = deststride; + data[8] = value; + + ivtv_api(itv->dec_mbox, &itv->dec_msem, IVTV_API_FB_BLT_FILL, &result, + 9, &data[0]); + return result; +} + +static inline int ivtv_api_fb_blt_copy(struct ivtv *itv, int rasterop, + int alpha_mode, int alpha_mask_mode, + int width, int height, int destmask, + u32 destaddr, int deststride, + int sourcestride, int sourceaddr) +{ + u32 data[IVTV_MBOX_MAX_DATA], result; + IVTV_DEBUG_FB(IVTV_DEBUG_INFO, + "ivtv_api_fb_blt_copy: width = %d, height = %d, destaddr = %d, deststride = %d, sourcestride = %d, sourceaddr = %d\n", + width, height, destaddr, deststride, sourcestride, + sourceaddr); + + data[0] = rasterop; + data[1] = alpha_mode; + data[2] = alpha_mask_mode; + data[3] = width; + data[4] = height; + data[5] = destmask; + data[6] = destaddr; + data[7] = deststride; + data[8] = sourcestride; + data[9] = sourceaddr; + + ivtv_api(itv->dec_mbox, &itv->dec_msem, IVTV_API_FB_BLT_COPY, &result, + 10, &data[0]); + return result; +} + +MODULE_PARM(ivtv_fb_card_id, "i"); +MODULE_PARM_DESC(ivtv_fb_card_id, + "ID number of ivtv card to use as framebuffer device (0-2)"); + +MODULE_LICENSE("GPL"); + +/* --------------------------------------------------------------------- */ + +static struct fb_var_screeninfo ivtvfb_defined = { + 0, 0, 0, 0, /* W,H, W, H (virtual) load xres,xres_virtual */ + 0, 0, /* virtual -> visible no offset */ + 32, /* depth -> load bits_per_pixel */ + 0, /* greyscale ? */ + {0, 0, 0}, /* R */ + {0, 0, 0}, /* G */ + {0, 0, 0}, /* B */ + {0, 0, 0}, /* transparency */ + 0, /* standard pixel format */ + FB_ACTIVATE_NOW, + -1, -1, + 0, + 0L, 0L, 0L, 0L, 0L, + 0L, 0L, 0, /* No sync info */ + FB_VMODE_NONINTERLACED, + 0, + {0, 0, 0, 0, 0} +}; + +static struct fb_info fb_info; + +#ifdef CONFIG_MTRR +static int mtrr = 1; //++MTY +static unsigned long fb_start_aligned_physaddr; /* video_base rounded down as required by hardware MTRRs */ +static unsigned long fb_end_aligned_physaddr; /* video_base rounded up as required by hardware MTRRs */ +#endif + +/* --------------------------------------------------------------------- */ +static int _ivtvfb_set_var(struct fb_var_screeninfo *var) +{ + IVTV_DEBUG_FB(IVTV_DEBUG_INFO, "_ivtvfb_set_var\n"); + + if (var->xres != ivtvfb_defined.xres || + var->yres != ivtvfb_defined.yres || + var->xres_virtual != ivtvfb_defined.xres_virtual || + var->yres_virtual > video_height_virtual || + var->yres_virtual < video_height || + var->xoffset || + var->bits_per_pixel != ivtvfb_defined.bits_per_pixel || + var->nonstd) { + return -EINVAL; + } + return 0; + +} +static int _ivtvfb_get_fix(struct fb_fix_screeninfo *fix) +{ + memset(fix, 0, sizeof(struct fb_fix_screeninfo)); + strcpy(fix->id, "iTVC15 TV out"); + fix->smem_start = video_base; + fix->smem_len = video_size; + fix->type = FB_TYPE_PACKED_PIXELS; + fix->visual = FB_VISUAL_TRUECOLOR; + fix->xpanstep = 0; + fix->ypanstep = 0; + fix->ywrapstep = 0; + fix->line_length = video_linelength; + return 0; +} + +static int ivtvfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) +{ + IVTV_DEBUG_FB(IVTV_DEBUG_INFO, "ivtv_fb_check_var\n"); + IVTV_DEBUG_FB(IVTV_DEBUG_INFO, "ivtvfb_set_var\n"); + return (_ivtvfb_set_var(&info->var)); +} +static int ivtvfb_set_par(struct fb_info *info) +{ + IVTV_DEBUG_FB(IVTV_DEBUG_INFO, "ivtvfb_set_var\n"); + return (_ivtvfb_set_var(&info->var)); +} +static int ivtvfb_setcolreg(unsigned regno, unsigned red, unsigned green, + unsigned blue, unsigned transp, + struct fb_info *info) +{ + return (0); +} + +static int ivtv_fb_blt_copy(struct ivtv *itv, int x, int y, int width, + int height, int source_offset, int source_stride) +{ + int rc; + unsigned long destaddr = ((y * video_width) + x) * 4; + + IVTV_DEBUG_FB(IVTV_DEBUG_INFO, "ivtv_fb_blt_copy\n"); + source_offset += shadow_framebuf_offset; + + rc = ivtv_api_fb_blt_copy(ivtv_fb, 0xa, 0x1, 0x0, width, height, + 0xffffffff, destaddr, video_width, + source_stride, source_offset); + return rc; +} + +#if 0 /* looks like this is unused. */ +/* + * Returns the physical location of the PTE associated with a given virtual address. + */ +static inline pte_t *virt_to_pte(struct mm_struct *mm, void *addr) +{ + return + pte_offset(pmd_offset + (pgd_offset(mm, (unsigned long)addr), + (unsigned long)addr), (unsigned long)addr); +} +#endif + +struct ivtvfb_user_dma_to_device ivtvfb_current_fb_dma; + +// 4MB max buffer size (on IA32 at least: 1024 pages x 4KB/page = 4MB): +#define IVTV_MAX_FB_DMA_PAGES 1024 + +int ivtvfb_alloc_user_dma_to_device(struct ivtvfb_user_dma_to_device *dma) +{ + IVTV_DEBUG_FB(IVTV_DEBUG_INFO, "ivtvfb_alloc_user_dma_to_device\n"); + dma->page_count = 0; + dma->sglist = + kmalloc(sizeof(struct ivtv_SG_element) * IVTV_MAX_FB_DMA_PAGES, + GFP_KERNEL); + if (!dma->sglist) { + printk(KERN_ERR + "ivtvfb: cannot allocate scatter/gather list for %d pages\n", + IVTV_MAX_FB_DMA_PAGES); + return -ENOMEM; + } + + dma->map = + kmalloc(sizeof(struct page *) * IVTV_MAX_FB_DMA_PAGES, GFP_KERNEL); + if (!dma->map) { + IVTV_DEBUG_FB(IVTV_DEBUG_ERR, "can't alloc dma page array\n"); + kfree(dma->sglist); + return -ENOMEM; + } + + dma->sg_dma_handle = + pci_map_single(ivtv_fb->dev, (void *)dma->sglist, + (sizeof(struct ivtv_SG_element) * + IVTV_MAX_FB_DMA_PAGES), PCI_DMA_TODEVICE); + + return 0; +} + +//++MTY This is pretty fast - fast enough to do around 30+ frames per second at NTSC 720x480x4 or 27 frames per second at PAL 720x576x4 +int ivtvfb_prep_user_dma_to_device(struct ivtvfb_user_dma_to_device *dma, + unsigned long ivtv_dest_addr, + char *userbuf, int size_in_bytes) +{ + int i, offset; + unsigned long uaddr; + int size_in_pages = (size_in_bytes + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + + IVTV_DEBUG_FB(IVTV_DEBUG_DMA, + "ivtvfb_prep_user_dma_to_device, dst: 0x%08x\n", + (unsigned int)ivtv_dest_addr); + + uaddr = ((unsigned long)userbuf & PAGE_MASK); + offset = uaddr & ~PAGE_MASK; + + down_read(¤t->mm->mmap_sem); + size_in_pages = + get_user_pages(current, current->mm, uaddr, size_in_pages, 0, 0, + dma->map, NULL); + up_read(¤t->mm->mmap_sem); + + if (size_in_pages < 0) { + IVTV_DEBUG_FB(IVTV_DEBUG_ERR, "failed to map user pages\n"); + return size_in_pages; + } + + dma->page_count = size_in_pages; + for (i = 0; i < size_in_pages; i++) { + dma->sglist[i].size = PAGE_SIZE; + dma->sglist[i].src = + pci_map_page(ivtv_fb->dev, dma->map[i], 0, offset, + PCI_DMA_TODEVICE); + dma->sglist[i].dst = ivtv_dest_addr + i * PAGE_SIZE + offset; + offset = 0; + } + + // Indicate the last element to the hardware, so we get an interrupt on completion... + dma->sglist[size_in_pages - 1].size |= 0x80000000; + +#ifdef IVTVFB_DEBUG_PER_FRAME + printk(KERN_INFO + "ivtvfb: Allocated scatter/gather list of %d bytes (%d pages) at kva 0x%08x = physaddr 0x%08x:\n", + size_in_bytes, size_in_pages, dma->sglist, dma->sg_dma_handle); + for (i = 0; i < size_in_pages; i++) { + printk(KERN_INFO + "ivtvfb: [%d] src 0x%08x -> dest 0x%08x, size 0x%08x bytes\n", + i, dma->sglist[i].src, dma->sglist[i].dst, + dma->sglist[i].size); + } +#endif + + return 0; +} + +int ivtvfb_free_user_dma_to_device(struct ivtvfb_user_dma_to_device *dma) +{ + int i; + IVTV_DEBUG_FB(IVTV_DEBUG_INFO, "ivtvfb_free_user_dma_to_device\n"); + + for (i = 0; i < dma->page_count; i++) { + pci_unmap_page(ivtv_fb->dev, dma->sglist[i].src, PAGE_SIZE, + PCI_DMA_TODEVICE); + page_cache_release(dma->map[i]); + } + kfree(dma->sglist); + kfree(dma->map); + dma->page_count = 0; + return 0; +} + +int ivtvfb_execute_user_dma_to_device(struct ivtvfb_user_dma_to_device + *dma) +{ + u32 data[IVTV_MBOX_MAX_DATA], result; + int rc; + IVTV_DEBUG_FB(IVTV_DEBUG_INFO, "ivtvfb_execute_user_dma_to_device\n"); + + data[0] = dma->sg_dma_handle; + data[1] = dma->page_count; + data[2] = 0x1; // 0x1 = OSD data + + IVTV_DEBUG_FB(IVTV_DEBUG_DMA, + "Schedule FB DMA: physical address 0x%08x, " + "arraysize 0x%08x, type 0x%08x\n", data[0], data[1], + data[2]); + + // Enable DMA complete interrupt: + ivtv_clear_irq_mask(ivtv_fb, IVTV_IRQ_DEC_DMA_COMPLETE); + + set_bit(IVTV_F_I_OSD_DMA, &ivtv_fb->i_flags); + + rc = ivtv_api(ivtv_fb->dec_mbox, &ivtv_fb->dec_msem, + IVTV_API_DEC_DMA_FROM_HOST, &result, 3, &data[0]); + + if (rc) { + IVTV_DEBUG_FB(IVTV_DEBUG_ERR, "error sending DMA info\n"); + clear_bit(IVTV_F_I_BUSY, &ivtv_fb->i_flags); + } + + IVTV_DEBUG_FB(IVTV_DEBUG_DMA, "OK, scheduled FB DMA!"); + return 0; +} + +static inline int ivtv_fb_prep_frame(struct ivtv *itv, + unsigned long destaddr, void *srcaddr, + int count) +{ + DECLARE_WAITQUEUE(wait, current); + int rc; + + //if (!srcaddr || verify_area(...)) ... + if ((destaddr + count) > video_size) + return -E2BIG; + + rc = 0; + add_wait_queue(&ivtv_fb->dec_master_w, &wait); + do { + set_current_state(TASK_INTERRUPTIBLE); + /* FIXME mini-race still .. need to port to 'stream' format */ + if (!test_and_set_bit(IVTV_F_I_BUSY, &ivtv_fb->i_flags)) + break; + + schedule(); + + if (signal_pending(current)) + rc = -ERESTARTSYS; + } while (!rc); + set_current_state(TASK_RUNNING); + remove_wait_queue(&ivtv_fb->dec_master_w, &wait); + + if (rc) + goto out_dma_lock; + + destaddr = IVTV_DEC_MEM_START + video_rel_base + destaddr; + + if (0 != + (rc = + ivtvfb_prep_user_dma_to_device(&ivtvfb_current_fb_dma, destaddr, + (char *)srcaddr, count))) { + IVTV_DEBUG_FB(IVTV_DEBUG_DMA, + "err prep user dma to device=%x\n", rc); + goto out_dma_lock; + } + if (0 != + (rc = ivtvfb_execute_user_dma_to_device(&ivtvfb_current_fb_dma))) { + IVTV_DEBUG_FB(IVTV_DEBUG_DMA, + "err exec user dma to device=%x\n", rc); + goto out_dma_lock; + } + + return 0; + out_dma_lock: + clear_bit(IVTV_F_I_BUSY, &ivtv_fb->i_flags); + wake_up(&ivtv_fb->dec_master_w); + return rc; +} + +int ivtv_fb_ioctl(struct inode *inode, struct file *file, unsigned int cmd, + unsigned long arg, struct fb_info *info) +{ + + int rc; + + IVTV_DEBUG_FB(IVTV_DEBUG_INFO, "ivtv_fb_ioctl\n"); + switch (cmd) { + case IVTVFB_IOCTL_GET_STATE:{ + struct ivtvfb_ioctl_state_info state; + state.status = (ivtv_api_fb_get_state(ivtv_fb) & 0x7); + state.status |= + (ivtv_api_fb_get_flicker_state(ivtv_fb) << 3); + state.alpha = ivtv_api_fb_get_global_alpha(ivtv_fb); + IVTV_DEBUG_FB(IVTV_DEBUG_IOCTL, + "IVTVFB_IOCTL_GET_STATE: status = %lu, alpha = %lu\n", + state.status, state.alpha); + if (copy_to_user((void *)arg, &state, sizeof(state))) + return -EFAULT; + return 0; + } + case IVTVFB_IOCTL_SET_STATE:{ + struct ivtvfb_ioctl_state_info state; + if (copy_from_user(&state, (void *)arg, sizeof(state))) + return -EFAULT; + IVTV_DEBUG_FB(IVTV_DEBUG_IOCTL, + "IVTVFB_IOCTL_SET_STATE: status = %lu, alpha = %lu\n", + state.status, state.alpha); + ivtv_api_fb_set_state(ivtv_fb, + (state.status + && IVTVFB_STATUS_ENABLED)); + ivtv_api_fb_set_global_alpha(ivtv_fb, + (state. + status & + IVTVFB_STATUS_GLOBAL_ALPHA) + ? 1 : 0, state.alpha, + (state. + status & + IVTVFB_STATUS_LOCAL_ALPHA) + ? 1 : 0); + ivtv_api_fb_set_flicker_state(ivtv_fb, + (state. + status & + IVTVFB_STATUS_FLICKER_REDUCTION) + ? 1 : 0); + IVTV_DEBUG_FB(IVTV_DEBUG_IOCTL, "new state = %d\n", + ivtv_api_fb_get_state(ivtv_fb)); + IVTV_DEBUG_FB(IVTV_DEBUG_IOCTL, + "global alpha now = %d\n", + ivtv_api_fb_get_global_alpha(ivtv_fb)); + return 0; + } + case IVTVFB_IOCTL_PREP_FRAME:{ + struct ivtvfb_ioctl_dma_host_to_ivtv_args args; + if (copy_from_user(&args, (void *)arg, sizeof(args))) + return -EFAULT; + return ivtv_fb_prep_frame(ivtv_fb, args.dest_offset, + args.source, args.count); + } + case IVTVFB_IOCTL_BLT_COPY:{ + struct ivtvfb_ioctl_blt_copy_args args; + if (copy_from_user(&args, (void *)arg, sizeof(args))) + return -EFAULT; + + return ivtv_fb_blt_copy(ivtv_fb, args.x, args.y, + args.width, args.height, + args.source_stride, + args.source_offset); + } + case IVTVFB_IOCTL_GET_ACTIVE_BUFFER:{ + struct ivtv_osd_coords bufinfo; + rc = ivtv_api_fb_get_osd_coords(ivtv_fb, &bufinfo); + return copy_to_user((void *)arg, &bufinfo, + sizeof(bufinfo)); + } + case IVTVFB_IOCTL_SET_ACTIVE_BUFFER:{ + struct ivtv_osd_coords bufinfo; + if (copy_from_user + (&bufinfo, (void *)arg, sizeof(bufinfo))) + return -EFAULT; + return ivtv_api_fb_set_osd_coords(ivtv_fb, &bufinfo); + } + case IVTVFB_IOCTL_GET_FRAME_BUFFER:{ + struct ivtvfb_ioctl_get_frame_buffer getfb; + getfb.mem = (void *)video_vbase; + getfb.bytes = video_size; + getfb.sizex = video_width; + getfb.sizey = video_height; + + return copy_to_user((void *)arg, &getfb, sizeof(getfb)); + } + default: + return -EINVAL; + } + return 0; +} + +static struct fb_ops ivtvfb_ops = { + owner:THIS_MODULE, + fb_check_var:ivtvfb_check_var, + fb_set_par:ivtvfb_set_par, + fb_setcolreg:ivtvfb_setcolreg, + fb_ioctl:ivtv_fb_ioctl, + fb_pan_display:NULL, +}; + +int __init ivtvfb_init(void) +{ + int rc; + u32 fbbase; + u32 fblength; + struct ivtv_osd_coords osd; + struct rectangle rect; + + if ((ivtv_fb_card_id < 0) || (ivtv_fb_card_id >= ivtv_cards_active)) { + printk(KERN_ERR + "Error! ivtv-fb: ivtv_fb_card_id parameter is out of range (valid range: 0-%d)\n", + ivtv_cards_active - 1); + return -1; + } + + ivtv_fb = &ivtv_cards[ivtv_fb_card_id]; + if (!ivtv_fb || (ivtv_fb->card_type != IVTV_350_V1)) { + printk(KERN_ERR + "Error! ivtv-fb: Specified card (id %d) is either not present or does not support TV out (PVR350 only)\n", + ivtv_fb_card_id); + return -1; + } + + printk(KERN_INFO + "ivtv-fb: Framebuffer module loaded (attached to ivtv card id %d)\n", + ivtv_fb_card_id); + + rc = ivtv_api_fb_set_pixel_format(ivtv_fb, 4); // 4 = AlphaRGB 8:8:8:8 + + IVTV_DEBUG_FB(IVTV_DEBUG_INFO, "Current pixel format = %d\n", + ivtv_api_fb_get_pixel_format(ivtv_fb)); + + rc = ivtv_api_fb_get_framebuffer(ivtv_fb, (void **)&fbbase, &fblength); + IVTV_DEBUG_FB(IVTV_DEBUG_INFO, + "Framebuffer is at decoder-relative address 0x%08x and has %d bytes.\n", + fbbase, fblength); + + rc = ivtv_api_fb_get_osd_coords(ivtv_fb, &osd); + IVTV_DEBUG_FB(IVTV_DEBUG_INFO, + "OSD: offset = 0x%08x (max offset = 0x%08x), pixel_stride = %d, lines = %d, x = %d, y = %d\n", + (u32) osd.offset, (u32) osd.max_offset, osd.pixel_stride, + osd.lines, osd.x, osd.y); + + /* setup OSD and screen for PAL */ + if (ivtv_pal) { + osd.lines = 576; + rc = ivtv_api_fb_set_osd_coords(ivtv_fb, &osd); + if (rc) + IVTV_DEBUG_FB(IVTV_DEBUG_ERR, + "failed setting PAL osd\n"); + + rect.x0 = 0; + rect.x1 = 720; + rect.y0 = 0; + rect.y1 = 576; + rc = ivtv_api_fb_set_screen_coords(ivtv_fb, &rect); + if (rc) + IVTV_DEBUG_FB(IVTV_DEBUG_ERR, + "failed setting PAL screen\n"); + } + + rc = ivtv_api_fb_get_screen_coords(ivtv_fb, &rect); + printk(KERN_INFO "ivtv-fb: screen coords: [%d %d] -> [%d %d]\n", + rect.x0, rect.y0, rect.x1, rect.y1); + + printk(KERN_INFO "ivtv-fb: original global alpha = %d\n", + ivtv_api_fb_get_global_alpha(ivtv_fb)); + + /* + * Normally a 32-bit RGBA framebuffer would be fine, however XFree86's fbdev + * driver doesn't understand the concept of alpha channel and always sets + * bits 24-31 to zero when using a 24bpp-on-32bpp framebuffer device. We fix + * this behavior by enabling the iTVC15's global alpha feature, which causes + * the chip to ignore the per-pixel alpha data and instead use one value (e.g., + * full brightness = 255) for the entire framebuffer. The local alpha is also + * disabled in this step. + * + *++MTY Need to update http://ivtv.sourceforge.net/ivtv/firmware-api.html + * call 0x4b: param[2] says 1 = enable local alpha, when in reality + * it means *disable* local alpha... + * + */ + ivtv_api_fb_set_global_alpha(ivtv_fb, 1, 255, 0); + printk(KERN_INFO "ivtv-fb: new global alpha = %d\n", + ivtv_api_fb_get_global_alpha(ivtv_fb)); + + rc = ivtv_api_fb_set_state(ivtv_fb, 1); // 1 = enabled + printk(KERN_INFO "ivtv-fb: current OSD state = %d\n", + ivtv_api_fb_get_state(ivtv_fb)); + + video_rel_base = fbbase; + video_base = ivtv_fb->base_addr + IVTV_DEC_MEM_START + video_rel_base; + video_width = rect.x1 - rect.x0; + video_height = rect.y1 - rect.y0; + video_linelength = 4 * osd.pixel_stride; + video_size = fblength; + + shadow_framebuf_size = (video_width * video_height * 4); + shadow_framebuf_offset = (video_size - shadow_framebuf_size) & ~3; + + if (!request_mem_region(video_base, video_size, "ivtvfb")) { + printk(KERN_WARNING + "ivtv-fb: warning: cannot reserve video memory at 0x%lx\n", + video_base); + /* We cannot make this fatal. Sometimes this comes from magic spaces our resource handlers simply don't know about */ + } + + video_vbase = ioremap(video_base, video_size); + if (!video_vbase) { + release_mem_region(video_base, video_size); + printk(KERN_ERR + "ivtv-fb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n", + video_size, video_base); + return -EIO; + } + + printk(KERN_INFO + "ivtv-fb: framebuffer at 0x%lx, mapped to 0x%p, size %dk\n", + video_base, video_vbase, video_size / 1024); + printk(KERN_INFO "ivtv-fb: mode is %dx%dx%d, linelength=%d\n", + video_width, video_height, 32, video_linelength); + + ivtvfb_defined.xres = video_width; + ivtvfb_defined.yres = video_height; + ivtvfb_defined.xres_virtual = video_width; + ivtvfb_defined.yres_virtual = video_height; + ivtvfb_defined.bits_per_pixel = 32; + video_height_virtual = ivtvfb_defined.yres_virtual; + + /* some dummy values for timing to make fbset happy */ + ivtvfb_defined.pixclock = 10000000 / video_width * 1000 / video_height; + ivtvfb_defined.left_margin = (video_width / 8) & 0xf8; + ivtvfb_defined.right_margin = 32; + ivtvfb_defined.upper_margin = 16; + ivtvfb_defined.lower_margin = 4; + ivtvfb_defined.hsync_len = (video_width / 8) & 0xf8; + ivtvfb_defined.vsync_len = 4; + + ivtvfb_defined.red.offset = 0; + ivtvfb_defined.red.length = 8; + ivtvfb_defined.green.offset = 8; + ivtvfb_defined.green.length = 8; + ivtvfb_defined.blue.offset = 16; + ivtvfb_defined.blue.length = 8; + ivtvfb_defined.transp.offset = 24; + ivtvfb_defined.transp.length = 8; + +#ifdef CONFIG_MTRR + if (mtrr) { + /* Find the largest power of two that maps the whole buffer */ + int size_shift = 31; + while (!(video_size & (1 << size_shift))) { + size_shift--; + } + size_shift++; + + fb_start_aligned_physaddr = + video_base & ~((1 << size_shift) - 1); + fb_end_aligned_physaddr = + (video_base + (1 << size_shift) - 1) & ~((1 << size_shift) - + 1); + if (mtrr_add + (fb_start_aligned_physaddr, + (fb_end_aligned_physaddr - fb_start_aligned_physaddr), + MTRR_TYPE_WRCOMB, 1) < 0) { + printk(KERN_WARNING + "ivtv-fb: warning: mtrr_add() failed to add write combining region 0x%08x-0x%08x\n", + (unsigned int)fb_start_aligned_physaddr, + (unsigned int)fb_end_aligned_physaddr); + } + } +#endif + + fb_info.node = -1; + fb_info.flags = FBINFO_FLAG_DEFAULT; + fb_info.fbops = &ivtvfb_ops; + + struct fb_fix_screeninfo fix; + _ivtvfb_get_fix(&fix); + fb_info.var = ivtvfb_defined; + fb_info.fix = fix; + fb_info.screen_base = video_vbase; + fb_info.fbops = &ivtvfb_ops; + fb_alloc_cmap(&fb_info.cmap, 0, 0); + + if (register_framebuffer(&fb_info) < 0) + return -EINVAL; + + ivtv_fb->fb_id = fb_info.node; + + printk(KERN_INFO "fb%d: %s frame buffer device\n", + ivtv_fb->fb_id, fix.id); + + /* Set up DMA and BLT copy structures */ + ivtvfb_alloc_user_dma_to_device(&ivtvfb_current_fb_dma); + ivtv_fb->user_dma_to_device_state = &ivtvfb_current_fb_dma; + return 0; +} + +static void ivtvfb_cleanup(void) +{ + IVTV_DEBUG_FB(IVTV_DEBUG_INFO, "Unloading framebuffer module\n"); + unregister_framebuffer(&fb_info); + iounmap(video_vbase); +#ifdef CONFIG_MTRR + mtrr_del(-1, fb_start_aligned_physaddr, + (fb_end_aligned_physaddr - fb_start_aligned_physaddr)); +#endif + ivtv_fb->user_dma_to_device_state = NULL; + ivtvfb_free_user_dma_to_device(&ivtvfb_current_fb_dma); + ivtv_fb->fb_id = -1; + //release_mem_region(video_base, video_size); +} + +module_init(ivtvfb_init); +module_exit(ivtvfb_cleanup); diff -purN -X /home/mbligh/.diff.exclude reference/drivers/media/video/ivtv-i2c.c current/drivers/media/video/ivtv-i2c.c --- reference/drivers/media/video/ivtv-i2c.c 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/media/video/ivtv-i2c.c 2004-04-09 21:41:40.000000000 -0700 @@ -0,0 +1,221 @@ +#include "ivtv.h" + +/* i2c implementation for iTVC15 chip, ivtv project. + * Author: Kevin Thayer (nufan_wfk at yahoo.com) + * License: GPL + * http://www.sourceforge.net/projects/ivtv/ + */ + +/* moved here from ivtv.h */ +static int writeregs(struct i2c_client *client, const unsigned char *regs); +static int attach_inform(struct i2c_client *client); +static int detach_inform(struct i2c_client *client); + +int writereg(struct i2c_client *client, unsigned char reg, unsigned char data) +{ + int ret; + unsigned char msg[] = { 0x1f, 0x00 }; + + printk("<1>writing reg 0x%02x, data 0x%02x\n", reg, data); + + msg[0] = reg; + msg[1] = data; + ret = i2c_master_send(client, msg, 2); + if (ret != 2) + printk("writereg error\n"); + return ret; +} + +static int writeregs(struct i2c_client *client, const unsigned char *regs) +{ + unsigned char reg, data; + + while (*regs != 0x00) { + reg = *(regs++); + data = *(regs++); + if (writereg(client, reg, data) < 0) + return -1; + } + return 0; +} + +static struct i2c_adapter ivtv_i2c_adapter_template = { + .name = "ivtv i2c driver", + .id = I2C_HW_B_BT848, /*algo-bit is OR'd with this */ + .algo = NULL, /*set by i2c-algo-bit */ + .algo_data = NULL, /*filled from template */ + .client_register = attach_inform, + .client_unregister = detach_inform, +/* i2c-2.8.0 and later */ + .owner = THIS_MODULE, + .class = I2C_ADAP_CLASS_TV_ANALOG, +}; + +static struct i2c_algo_bit_data ivtv_i2c_algo_template = { + NULL, /*?? */ + ivtv_setsda, /*setsda function */ + ivtv_setscl, /*" */ + ivtv_getsda, /*" */ + ivtv_getscl, /*" */ + 5, /*udelay or mdelay */ + 5, /*whatever above isn't */ + 200 /*timeout */ +}; + +void ivtv_setscl(void *data, int state) +{ + struct ivtv *itv = (struct ivtv *)data; + + if (state) + itv->i2c_state |= 0x01; + else + itv->i2c_state &= ~0x01; + + /* write them out */ + /* write bits are inverted */ + writel(~itv->i2c_state, (itv->reg_mem + IVTV_REG_I2C_SETSCL_OFFSET)); +} + +void ivtv_setsda(void *data, int state) +{ + struct ivtv *itv = (struct ivtv *)data; + + if (state) + itv->i2c_state |= 0x01; + else + itv->i2c_state &= ~0x01; + + /* write them out */ + /* write bits are inverted */ + writel(~itv->i2c_state, (itv->reg_mem + IVTV_REG_I2C_SETSDA_OFFSET)); +} + +int ivtv_getscl(void *data) +{ + struct ivtv *itv = (struct ivtv *)data; + return readb(itv->reg_mem + IVTV_REG_I2C_GETSCL_OFFSET); +} + +int ivtv_getsda(void *data) +{ + struct ivtv *itv = (struct ivtv *)data; + return readb(itv->reg_mem + IVTV_REG_I2C_GETSDA_OFFSET); +} + +static struct i2c_client ivtv_i2c_client_template = { + .name = "ivtv internal use only", + .id = -1, +}; + +static int attach_inform(struct i2c_client *client) +{ + struct ivtv *itv = (struct ivtv *)i2c_get_adapdata(client->adapter); + int i; + + IVTV_DEBUG(IVTV_DEBUG_I2C, "i2c client attach\n"); + for (i = 0; i < I2C_CLIENTS_MAX; i++) { + if (itv->i2c_clients[i] == NULL) { + itv->i2c_clients[i] = client; + break; + } + } + IVTV_DEBUG(IVTV_DEBUG_I2C, "i2c attach [client=%s,%s]\n", + client->name, (i < I2C_CLIENTS_MAX) ? "ok" : "failed"); + + return 0; +} + +static int detach_inform(struct i2c_client *client) +{ + struct ivtv *itv = (struct ivtv *)i2c_get_adapdata(client->adapter); + int i; + + IVTV_DEBUG(IVTV_DEBUG_I2C, "i2c client detach\n"); + for (i = 0; i < I2C_CLIENTS_MAX; i++) { + if (itv->i2c_clients[i] == client) { + itv->i2c_clients[i] = NULL; + break; + } + } + IVTV_DEBUG(IVTV_DEBUG_I2C, "i2c detach [client=%s,%s]\n", + client->name, (i < I2C_CLIENTS_MAX) ? "ok" : "failed"); + + return 0; +} + +void ivtv_call_i2c_client(struct ivtv *itv, int addr, unsigned int cmd, + void *arg) +{ + int i; + + IVTV_DEBUG(IVTV_DEBUG_I2C, "call_i2c_client\n"); + for (i = 0; i < I2C_CLIENTS_MAX; i++) { + if (NULL == itv->i2c_clients[i]) + continue; + if (NULL == itv->i2c_clients[i]->driver->command) + continue; + if (addr == itv->i2c_clients[i]->addr) { + itv->i2c_clients[i]->driver->command(itv-> + i2c_clients[i], + cmd, arg); + return; + } + } + IVTV_DEBUG(IVTV_DEBUG_ERR, "i2c client addr: 0x%02x not found!\n", + addr); +} + +int ivtv_i2c_direct(struct ivtv *itv, int addr, const unsigned char *regs) +{ + int i, ret = 0; + + IVTV_DEBUG(IVTV_DEBUG_I2C, "i2c_direct\n"); + for (i = 0; i < I2C_CLIENTS_MAX; i++) { + if (NULL == itv->i2c_clients[i]) + continue; + if (addr == itv->i2c_clients[i]->addr) { + ret = writeregs(itv->i2c_clients[i], regs); + break; + } + } + + if (ret) { + IVTV_DEBUG(IVTV_DEBUG_ERR, "error %d writing reg\n", ret); + return -EIO; + } + + return 0; +} + +/* init + register i2c algo-bit adapter */ +int __devinit init_ivtv_i2c(struct ivtv *itv) +{ + IVTV_DEBUG(IVTV_DEBUG_I2C, "i2c init\n"); + memcpy(&itv->i2c_adap, &ivtv_i2c_adapter_template, + sizeof(struct i2c_adapter)); + memcpy(&itv->i2c_algo, &ivtv_i2c_algo_template, + sizeof(struct i2c_algo_bit_data)); + memcpy(&itv->i2c_client, &ivtv_i2c_client_template, + sizeof(struct i2c_client)); + + sprintf(itv->i2c_adap.name + strlen(itv->i2c_adap.name), + " #%d", itv->num); + itv->i2c_algo.data = itv; + i2c_set_adapdata(&itv->i2c_adap, itv); + itv->i2c_adap.algo_data = &itv->i2c_algo; + itv->i2c_client.adapter = &itv->i2c_adap; + + IVTV_DEBUG(IVTV_DEBUG_I2C, "setting scl and sda to 1\n"); + ivtv_setscl(itv, 1); + ivtv_setsda(itv, 1); + + itv->i2c_rc = i2c_bit_add_bus(&itv->i2c_adap); + return itv->i2c_rc; +} + +void __devexit exit_ivtv_i2c(struct ivtv *itv) +{ + IVTV_DEBUG(IVTV_DEBUG_I2C, "i2c exit\n"); + + i2c_bit_del_bus(&itv->i2c_adap); +} diff -purN -X /home/mbligh/.diff.exclude reference/drivers/media/video/ivtv.h current/drivers/media/video/ivtv.h --- reference/drivers/media/video/ivtv.h 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/media/video/ivtv.h 2004-04-09 21:41:40.000000000 -0700 @@ -0,0 +1,846 @@ +#ifndef IVTV_H +#define IVTV_H + +/* Header for ivtv project: + * Driver for the iTVC15 chip. + * Author: Kevin Thayer (nufan_wfk at yahoo.com) + * License: GPL + * http://www.sourceforge.net/projects/ivtv/ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "msp3400.h" +/* If you don't want to patch to v4l2, grab a copy of + * videodev2.h and put it in the same dir as this file */ +#ifndef HAVE_V4L2 + #define HAVE_V4L2 1 + #include "videodev2.h" +#endif +#include +#include +#include + +#define IVTV_ENCODER_OFFSET 0x00000000 +#define IVTV_ENCODER_SIZE 0x01000000 + +#define IVTV_DECODER_OFFSET 0x01000000 +#define IVTV_DECODER_SIZE 0x01000000 + +#define IVTV_ENCDEC_SIZE (IVTV_ENCODER_SIZE + IVTV_DECODER_SIZE) + +#define IVTV_REG_OFFSET 0x02000000 +#define IVTV_REG_SIZE 0x00010000 + +#define IVTV_IOREMAP_SIZE (IVTV_ENCDEC_SIZE + IVTV_REG_SIZE) + +#define IVTV_IOREMAP_ERROR "ioremap failed, perhaps increasing __VMALLOC_RESERVE in page.h " \ + "or disabling CONFIG_HIMEM4G into the kernel would help" + +/* General */ +#define IVTV_DRIVER_NAME "ivtv" +#define IVTV_DRIVER_VERSION_MAJOR 0 +#define IVTV_DRIVER_VERSION_MINOR 1 +#define IVTV_DRIVER_VERSION_PATCHLEVEL 9 +#define IVTV_DRIVER_VERSION KERNEL_VERSION(IVTV_DRIVER_VERSION_MAJOR,IVTV_DRIVER_VERSION_MINOR,IVTV_DRIVER_VERSION_PATCHLEVEL) +#define IVTV_MAX_CARDS 9 +#define IVTV_DEFAULT_NUM_CARDS 1 +#define IVTV_MAX_YUV_BUFFERS 500 +#define IVTV_MIN_YUV_BUFFERS 40 +#define IVTV_DEFAULT_YUV_BUFFERS 60 +#define IVTV_MAX_MPG_BUFFERS 100 +#define IVTV_MIN_MPG_BUFFERS 15 +#define IVTV_DEFAULT_MPG_BUFFERS IVTV_MAX_MPG_BUFFERS +#define IVTV_MAX_DEC_YUV_BUFFERS 500 +#define IVTV_MIN_DEC_YUV_BUFFERS 17 +#define IVTV_DEFAULT_DEC_YUV_BUFFERS 0 +#define IVTV_MAX_DEC_MPG_BUFFERS 100 +#define IVTV_MIN_DEC_MPG_BUFFERS 5 +#define IVTV_DEFAULT_DEC_MPG_BUFFERS 8 +#define IVTV_MAX_VBI_BUFFERS 100 +#define IVTV_MIN_VBI_BUFFERS 3 +#define IVTV_DEFAULT_VBI_BUFFERS 10 +#define IVTV_MIN_DEC_MPG_QLEN 0 +#define IVTV_DEFAULT_DEC_MPG_QLEN 2 +#define IVTV_MIN_DEC_YUV_QLEN 0 +#define IVTV_DEFAULT_DEC_YUV_QLEN 0 +#define IVTV_IOCTL_SET_DEBUG_LEVEL _IOWR('@', 98, int *) +#define IVTV_IOCTL_GET_DEBUG_LEVEL _IOR('@', 99, int *) + +#define IVTV_PCI_ID_250_V1 0x4001 /* subsystem id */ +#define IVTV_PCI_ID_250_V2 0x4009 +#define IVTV_PCI_ID_250_V3 0x4801 /* treat like 250_V1 */ +#define IVTV_PCI_ID_250_V4 0x4803 /* treat like 250_V2 */ +#define IVTV_PCI_ID_350_V1 0x4000 +#define IVTV_PCI_ID_350_V2 0x4800 /* treat like 350_V1 */ +#define IVTV_250_V1 0 /* wintv pvr 250, encoder and decoder */ +#define IVTV_250_V2 1 /* pvr 250, encoder only */ +#define IVTV_350_V1 2 /* encoder, decoder, tv-out */ +#define IVTV_250_V1_STREAMS 3 +#define IVTV_250_V2_STREAMS 3 +#define IVTV_350_V1_STREAMS 5 +#define IVTV_V4L2_DEC_OFFSET 16 /* offset from 0 to register dec. v4l2 minors on */ +#define IVTV_V4L2_YUV_OFFSET 32 /* offset from 0 to register yuv v4l2 minors on */ + +#define IVTV_ENC_STREAM_TYPE_MPG 0 +#define IVTV_ENC_STREAM_TYPE_YUV 1 +#define IVTV_ENC_STREAM_TYPE_VBI 2 +#define IVTV_DEC_STREAM_TYPE_MPG 3 +#define IVTV_DEC_STREAM_TYPE_YUV 4 + +#define IVTV_ENC_MEM_START 0x00000000 +#define IVTV_DEC_MEM_START 0x01000000 +#define PCI_VENDOR_ID_ICOMP 0x4444 +#define PCI_DEVICE_ID_IVTV15 0x0803 +#define PCI_DEVICE_ID_IVTV16 0x0016 +#define IVTV_DEVNAME "ivtv: iTVC15/16 mpg2 encoder chip" +#define IVTV_MBOX_MAX_BOXES 20 +#define IVTV_MBOX_API_BOXES 6 +#define IVTV_MBOX_DMA_START 6 +#define IVTV_MBOX_DMA_END 8 +#define IVTV_MBOX_MAX_DATA 16 +#define IVTV_MBOX_DMA 9 +#define IVTV_MBOX_FIELD_DISPLAYED 8 +#define IVTV_MBOX_SIZE 80 +#define IVTV_SAA7115_I2C_ADDR 0x21 +#define IVTV_TUNER_I2C_ADDR 0x61 +#define IVTV_MSP3400_I2C_ADDR 0x40 +#define IVTV_DMA_BUF_SIZE 34560 /* 0x8000 = 32kbytes, 0x20000 = 128kbytes */ +#define IVTV_DMA_DEC_BUF_SIZE 32768 /* 0x8000 = 32kbytes, 0x20000 = 128kbytes */ +//#define IVTV_DMA_BUF_SIZE 65536 /* 0x8000 = 32kbytes, 0x20000 = 128kbytes */ +//#define IVTV_DMA_DEC_BUF_SIZE 65536 /* 0x8000 = 32kbytes, 0x20000 = 128kbytes */ + +#define IVTV_DMA_MAX_XFER 0x00080000 /* 0x8000 = 32kbytes, 0x20000 = 128kbytes */ +#define IVTV_DEC_MIN_BUF 0x00050000 /* want this many bytes+ in decoder buffer */ +#define IVTV_SLEEP_WAIT (HZ/10) /*100 ms*/ +#define IVTV_MAX_DATA_SLEEP 30 +#define DEC_DMA_TIMEOUT (15*HZ/100) /* used to be 100/15 */ + +#define IVTV_DMA_ERR_LIST 0x00000008 +#define IVTV_DMA_ERR_WRITE 0x00000004 +#define IVTV_DMA_ERR_READ 0x00000002 +#define IVTV_DMA_SUCCESS 0x00000001 +#define IVTV_DMA_READ_ERR (IVTV_DMA_ERR_LIST | IVTV_DMA_ERR_READ) +#define IVTV_DMA_WRITE_ERR (IVTV_DMA_ERR_LIST | IVTV_DMA_ERR_WRITE) +#define IVTV_DMA_ERR (IVTV_DMA_ERR_LIST | IVTV_DMA_ERR_WRITE | IVTV_DMA_ERR_READ) + +/* video related */ +#define IVTV_MAX_INPUTS 9 + +/*ioctl's*/ +#define IVTV_CTL_PRINTBOXES 0x00000001 +#define IVTV_CTL_CLEANUP 0x00000002 +#define IVTV_CTL_INIT_VIDCAP 0x00000003 + +/* Registers */ +#define IVTV_REG_DMASTATUS (0x0004 /*| IVTV_REG_OFFSET*/) +#define IVTV_REG_IRQSTATUS (0x0040 /*| IVTV_REG_OFFSET*/) +#define IVTV_REG_IRQMASK (0x0048 /*| IVTV_REG_OFFSET*/) +#define IVTV_REG_ENC_SDRAM_REFRESH (0x07F8 /*| IVTV_REG_OFFSET*/) +#define IVTV_REG_ENC_SDRAM_PRECHARGE (0x07FC /*| IVTV_REG_OFFSET*/) +#define IVTV_REG_DEC_SDRAM_REFRESH (0x08F8 /*| IVTV_REG_OFFSET*/) +#define IVTV_REG_DEC_SDRAM_PRECHARGE (0x08FC /*| IVTV_REG_OFFSET*/) +#define IVTV_REG_VDM (0x2800 /*| IVTV_REG_OFFSET*/) +#define IVTV_REG_AO (0x2D00 /*| IVTV_REG_OFFSET*/) +#define IVTV_REG_BYTEFLUSH (0x2D24 /*| IVTV_REG_OFFSET*/) +#define IVTV_REG_SPU (0x9050 /*| IVTV_REG_OFFSET*/) +#define IVTV_REG_HW_BLOCKS (0x9054 /*| IVTV_REG_OFFSET*/) +#define IVTV_REG_VPU (0x9058 /*| IVTV_REG_OFFSET*/) +#define IVTV_REG_APU (0xA064 /*| IVTV_REG_OFFSET*/) + +/* IRQ Masks */ +#define IVTV_IRQ_MASK_DEFAULT 0x00000404 /*stuff to ignore*/ +#define IVTV_IRQ_MASK_CAPTURE 0xFC000400 /*inverse mask, we want the high bits!*/ +#define IVTV_IRQ_MASK_DECODE 0x00FC0400 + +#define IVTV_IRQ_ENC_START_CAP (0x1 << 31) +#define IVTV_IRQ_ENC_EOS (0x1 << 30) +#define IVTV_IRQ_ENC_VBI_CAP (0x1 << 29) +#define IVTV_IRQ_ENC_VIM_RST (0x1 << 28) +#define IVTV_IRQ_ENC_DMA_COMPLETE (0x1 << 27) + +#define IVTV_IRQ_DEC_COPY_PROTECT (0x1 << 25) +#define IVTV_IRQ_DEC_AUD_MODE_CHG (0x1 << 24) +#define IVTV_IRQ_DEC_DATA_REQ (0x1 << 22) +#define IVTV_IRQ_DEC_IFRAME_DONE (0x1 << 21) +#define IVTV_IRQ_DEC_DMA_COMPLETE (0x1 << 20) +#define IVTV_IRQ_DEC_VBI_RE_INSERT (0x1 << 19) +#define IVTV_IRQ_DEC_DMA_ERR (0x1 << 18) +#define IVTV_IRQ_DEC_VSYNC (0x1 << 10) + +#define IVTV_IRQ_DEBUG_KLUGE ( IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_EOS | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_ENC_VIM_RST | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DEC_COPY_PROTECT | IVTV_IRQ_DEC_AUD_MODE_CHG | IVTV_IRQ_DEC_DATA_REQ | IVTV_IRQ_DEC_IFRAME_DONE | IVTV_IRQ_DEC_DMA_COMPLETE | IVTV_IRQ_DEC_VBI_RE_INSERT | IVTV_IRQ_DEC_DMA_ERR ) + +/* commands */ +#define IVTV_MASK_SPU_ENABLE 0xFFFFFFFE +#define IVTV_MASK_VPU_ENABLE15 0xFFFFFFF6 +#define IVTV_MASK_VPU_ENABLE16 0xFFFFFFFB +#define IVTV_CMD_VDM_STOP 0x00000000 +#define IVTV_CMD_AO_STOP 0x00000005 +#define IVTV_CMD_APU_PING 0x00000000 +#define IVTV_CMD_VPU_STOP15 0xFFFFFFFE +#define IVTV_CMD_VPU_STOP16 0xFFFFFFEE +#define IVTV_CMD_HW_BLOCKS_RST 0xFFFFFFFF +#define IVTV_CMD_SPU_STOP 0x00000001 +#define IVTV_CMD_SDRAM_PRECHARGE_INIT 0x0000001A +#define IVTV_CMD_SDRAM_REFRESH_INIT 0x80000640 +#define IVTV_SDRAM_SLEEPTIME (60 * HZ / 100) /* 600ms */ + +/*Used for locating the firmware mailboxes*/ +#define IVTV_FIRM_ENC_FILENAME "/lib/modules/ivtv-fw-enc.bin" +#define IVTV_FIRM_DEC_FILENAME "/lib/modules/ivtv-fw-dec.bin" +#define IVTV_FIRM_IMAGE_SIZE 256*1024 +#define IVTV_FIRM_SEARCH_ENCODER_START IVTV_ENCODER_OFFSET +#define IVTV_FIRM_SEARCH_DECODER_START IVTV_DECODER_OFFSET +#define IVTV_FIRM_SEARCH_ENCODER_END (IVTV_ENCODER_OFFSET + IVTV_ENCODER_SIZE - 1) +#define IVTV_FIRM_SEARCH_DECODER_END (IVTV_DECODER_OFFSET + IVTV_DECODER_SIZE - 1) +#define IVTV_FIRM_SEARCH_STEP 0x00000100 + +/* Firmware mailbox flags*/ +#define IVTV_MBOX_FIRMWARE_DONE 0x00000004 +#define IVTV_MBOX_DRIVER_DONE 0x00000002 +#define IVTV_MBOX_IN_USE 0x00000001 +#define IVTV_MBOX_FREE 0x00000000 + +/*Firmware API commands*/ +#define IVTV_API_ENC_GETVER 0x000000C4 +#define IVTV_API_DEC_GETVER 0x00000011 +#define IVTV_API_ENC_HALT_FW 0x000000C3 +#define IVTV_API_DEC_HALT_FW 0x0000000E +#define IVTV_API_DEC_START_PLAYBACK 0x00000001 +#define IVTV_API_DEC_STOP_PLAYBACK 0x00000002 +#define IVTV_API_DEC_PLAYBACK_SPEED 0x00000003 +#define IVTV_API_DEC_STEP_VIDEO 0x00000005 +#define IVTV_API_DEC_PAUSE_PLAYBACK 0x0000000d +#define IVTV_API_DEC_DMA_BLOCKSIZE 0x00000008 +#define IVTV_API_DEC_DMA_FROM_HOST 0x00000000b +#define IVTV_API_DEC_DISP_STANDARD 0x00000010 +#define IVTV_API_DEC_STREAM_INPUT 0x00000014 +#define IVTV_API_DEC_TIMING_INFO 0x00000015 +#define IVTV_API_DEC_SELECT_AUDIO 0x00000016 +#define IVTV_API_DEC_EVENT_NOTIFICATION 0x00000017 +#define IVTV_API_DEC_DISPLAY_BUFFERS 0x00000018 +#define IVTV_API_DEC_DECODE_SOURCE 0x0000001a +#define IVTV_API_DEC_AUDIO_OUTPUT 0x0000001b +#define IVTV_API_DEC_SET_AV_DELAY 0x0000001c +#define IVTV_API_DEC_BUFFER 0x0000001e +#define IVTV_API_DEC_DMA_STATUS 0x0000000a +#define IVTV_API_DEC_XFER_INFO 0x00000009 +#define IVTV_API_STD_TIMEOUT 0x00010000 /*units??*/ +#define IVTV_API_ASSIGN_DMA_BLOCKLEN 0x000000c9 +#define IVTV_API_ASSIGN_PGM_INDEX_INFO 0x000000c7 +#define IVTV_API_ASSIGN_STREAM_TYPE 0x000000b9 +#define IVTV_API_ASSIGN_OUTPUT_PORT 0x000000bb +#define IVTV_API_ASSIGN_FRAMERATE 0x0000008f +#define IVTV_API_ASSIGN_FRAME_SIZE 0x00000091 +#define IVTV_API_ASSIGN_ASPECT_RATIO 0x00000099 +#define IVTV_API_ASSIGN_BITRATES 0x00000095 +#define IVTV_API_ASSIGN_GOP_PROPERTIES 0x00000097 +#define IVTV_API_ASSIGN_3_2_PULLDOWN 0x000000b1 +#define IVTV_API_ASSIGN_GOP_CLOSURE 0x000000c5 +#define IVTV_API_ASSIGN_AUDIO_PROPERTIES 0x000000bd +#define IVTV_API_ASSIGN_DNR_FILTER_MODE 0x0000009b +#define IVTV_API_ASSIGN_DNR_FILTER_PROPS 0x0000009d +#define IVTV_API_ASSIGN_CORING_LEVELS 0x0000009f +#define IVTV_API_ASSIGN_SPATIAL_FILTER_TYPE 0x000000a1 +#define IVTV_API_ASSIGN_FRAME_DROP_RATE 0x000000d0 +#define IVTV_API_ASSIGN_PLACEHOLDER 0x000000d8 +#define IVTV_API_INITIALIZE_INPUT 0x000000cd +#define IVTV_API_ASSIGN_NUM_VSYNC_LINES 0x000000d6 +#define IVTV_API_BEGIN_CAPTURE 0x00000081 +#define IVTV_API_PAUSE_ENCODER 0x000000d2 +#define IVTV_API_EVENT_NOTIFICATION 0x000000d5 +#define IVTV_API_END_CAPTURE 0x00000082 +#define IVTV_API_SCHED_DMA_TO_HOST 0x000000cc +#define IVTV_API_FB_GET_FRAMEBUFFER 0x00000041 +#define IVTV_API_FB_GET_PIXEL_FORMAT 0x00000042 +#define IVTV_API_FB_SET_PIXEL_FORMAT 0x00000043 +#define IVTV_API_FB_GET_STATE 0x00000044 +#define IVTV_API_FB_SET_STATE 0x00000045 +#define IVTV_API_FB_GET_OSD_COORDS 0x00000046 +#define IVTV_API_FB_SET_OSD_COORDS 0x00000047 +#define IVTV_API_FB_GET_SCREEN_COORDS 0x00000048 +#define IVTV_API_FB_SET_SCREEN_COORDS 0x00000049 +#define IVTV_API_FB_GET_GLOBAL_ALPHA 0x0000004a +#define IVTV_API_FB_SET_GLOBAL_ALPHA 0x0000004b +#define IVTV_API_FB_SET_BLEND_COORDS 0x0000004c +// 0x4d unknown +// 0x4e unknown +#define IVTV_API_FB_GET_FLICKER_STATE 0x0000004f +#define IVTV_API_FB_SET_FLICKER_STATE 0x00000050 +// 0x51 unknown +#define IVTV_API_FB_BLT_COPY 0x00000052 +#define IVTV_API_FB_BLT_FILL 0x00000053 +#define IVTV_API_FB_BLT_TEXT 0x00000054 +// 0x55 unknown +#define IVTV_API_FB_SET_FRAMEBUFFER_WINDOW 0x00000056 +// 0x57 - 0x5f unknown +#define IVTV_API_FB_SET_CHROMA_KEY 0x00000060 +#define IVTV_API_FB_GET_ALPHA_CONTENT_INDEX 0x00000061 +#define IVTV_API_FB_SET_ALPHA_CONTENT_INDEX 0x00000062 + +/* i2c stuff */ +#define I2C_CLIENTS_MAX 16 +#define I2C_TIMING (0x7<<4) +#define IVTV_REG_I2C_SETSCL_OFFSET (0x7000 /*| IVTV_REG_OFFSET*/) +#define IVTV_REG_I2C_SETSDA_OFFSET (0x7004 /*| IVTV_REG_OFFSET*/) +#define IVTV_REG_I2C_GETSCL_OFFSET (0x7008 /*| IVTV_REG_OFFSET*/) +#define IVTV_REG_I2C_GETSDA_OFFSET (0x700c /*| IVTV_REG_OFFSET*/) + +/* debugging */ +#define IVTV_DEBUG_ERR (1 << 0) +#define IVTV_DEBUG_INFO (1 << 1) +#define IVTV_DEBUG_API (1 << 2) +#define IVTV_DEBUG_DMA (1 << 3) +#define IVTV_DEBUG_IOCTL (1 << 4) +#define IVTV_DEBUG_I2C (1 << 5) +#define IVTV_DEBUG_IRQ (1 << 6) +#define IVTV_DEBUG(x,args...) if((x)&ivtv_debug) printk("ivtv: " args); +#define IVTV_DEBUG_FB(x,args...) if((x)&ivtv_debug) printk("ivtv-fb: " args); + +/* Temp saa7115 hack FIXME */ +#define DECODER_SET_SIZE 76598 +#define DECODER_GET_PICTURE 76599 + +/* Values for IVTV_API_DEC_PLAYBACK_SPEED mpeg_frame_type_mask parameter: */ +#define MPEG_FRAME_TYPE_IFRAME 1 +#define MPEG_FRAME_TYPE_IFRAME_PFRAME 3 +#define MPEG_FRAME_TYPE_ALL 7 + +/* External API stuff */ +#define IVTV_IOC_FWAPI 0xFFEE7701 /*just some values i picked for now*/ +#define IVTV_IOC_ZCOUNT 0xFFEE7702 +#define IVTV_IOC_G_CODEC 0xFFEE7703 +#define IVTV_IOC_S_CODEC 0xFFEE7704 + +/* allow direct access to the saa7115 registers for testing */ +#define SAA7115_GET_REG 0xFFEE7705 +#define SAA7115_SET_REG 0xFFEE7706 + + +#define DECODER_SET_AUDIO 0xFFEE7707 +#define DECODER_AUDIO_32_KHZ 0 +#define DECODER_AUDIO_441_KHZ 1 +#define DECODER_AUDIO_48_KHZ 2 + +#define IVTV_IOC_PLAY 0xFFEE7781 +#define IVTV_IOC_PAUSE 0xFFEE7782 +#define IVTV_IOC_FRAMESYNC 0xFFEE7783 +#define IVTV_IOC_GET_TIMING 0xFFEE7784 +#define IVTV_IOC_S_SLOW_FAST 0xFFEE7785 +#define IVTV_IOC_S_START_DECODE 0xFFEE7786 +#define IVTV_IOC_S_STOP_DECODE 0xFFEE7787 +#define IVTV_IOC_S_OSD 0xFFEE7788 +#define IVTV_IOC_GET_FB 0xFFEE7789 + +#define IVTV_IOC_START_DECODE _IOW('@', 29, struct ivtv_cfg_start_decode) +#define IVTV_IOC_STOP_DECODE _IOW('@', 30, struct ivtv_cfg_stop_decode) +#define IVTV_IOC_G_SPEED _IOR('@', 31, struct ivtv_speed) +#define IVTV_IOC_S_SPEED _IOW('@', 32, struct ivtv_speed) +#define IVTV_IOC_DEC_STEP _IOW('@', 33, int) +#define IVTV_IOC_DEC_FLUSH _IOW('@', 34, int) + +/* Framebuffer external API */ +/* NOTE: These must *exactly* match the structures and constants in driver/ivtv.h */ + +struct ivtvfb_ioctl_state_info { + unsigned long status; + unsigned long alpha; +}; + +struct ivtvfb_ioctl_blt_copy_args { + int x, y, width, height, source_offset, source_stride; +}; + +struct ivtvfb_ioctl_dma_host_to_ivtv_args { + void* source; + unsigned long dest_offset; + int count; +}; + +struct ivtvfb_ioctl_get_frame_buffer { + void* mem; + int bytes; + int sizex; + int sizey; +}; + +struct ivtv_osd_coords { + unsigned long offset; + unsigned long max_offset; + int pixel_stride; + int lines; + int x; + int y; +}; + +struct rectangle { + int x0; + int y0; + int x1; + int y1; +}; + +#define IVTVFB_IOCTL_GET_STATE _IOR('@', 1, struct ivtvfb_ioctl_state_info) +#define IVTVFB_IOCTL_SET_STATE _IOW('@', 2, struct ivtvfb_ioctl_state_info) +#define IVTVFB_IOCTL_PREP_FRAME _IOW('@', 3, struct ivtvfb_ioctl_dma_host_to_ivtv_args) +#define IVTVFB_IOCTL_BLT_COPY _IOW('@', 4, struct ivtvfb_ioctl_blt_copy_args) +#define IVTVFB_IOCTL_GET_ACTIVE_BUFFER _IOR('@', 5, struct ivtv_osd_coords) +#define IVTVFB_IOCTL_SET_ACTIVE_BUFFER _IOW('@', 6, struct ivtv_osd_coords) +#define IVTVFB_IOCTL_GET_FRAME_BUFFER _IOR('@', 7, struct ivtvfb_ioctl_get_frame_buffer) + +#define IVTVFB_STATUS_ENABLED (1 << 0) +#define IVTVFB_STATUS_GLOBAL_ALPHA (1 << 1) +#define IVTVFB_STATUS_LOCAL_ALPHA (1 << 2) +#define IVTVFB_STATUS_FLICKER_REDUCTION (1 << 3) + +/* Stream types */ +#define IVTV_STREAM_PS 0 +#define IVTV_STREAM_TS 1 +#define IVTV_STREAM_MPEG1 2 +#define IVTV_STREAM_PES_AV 3 +#define IVTV_STREAM_PES_V 5 +#define IVTV_STREAM_PES_A 7 +#define IVTV_STREAM_DVD 10 +#define IVTV_STREAM_VCD 11 +#define IVTV_STREAM_SVCD 12 +#define IVTV_STREAM_DVD_S1 13 +#define IVTV_STREAM_DVD_S2 14 + +/* Custom v4l controls */ +#ifndef V4L2_CID_PRIVATE_BASE +#define V4L2_CID_PRIVATE_BASE 0x08000000 +#endif + +#define V4L2_CID_IVTV_FREQ (V4L2_CID_PRIVATE_BASE) +#define V4L2_CID_IVTV_ENC (V4L2_CID_PRIVATE_BASE + 1) +#define V4L2_CID_IVTV_BITRATE (V4L2_CID_PRIVATE_BASE + 2) +#define V4L2_CID_IVTV_MONO (V4L2_CID_PRIVATE_BASE + 3) +#define V4L2_CID_IVTV_JOINT (V4L2_CID_PRIVATE_BASE + 4) +#define V4L2_CID_IVTV_EMPHASIS (V4L2_CID_PRIVATE_BASE + 5) +#define V4L2_CID_IVTV_CRC (V4L2_CID_PRIVATE_BASE + 6) +#define V4L2_CID_IVTV_COPYRIGHT (V4L2_CID_PRIVATE_BASE + 7) +#define V4L2_CID_IVTV_GEN (V4L2_CID_PRIVATE_BASE + 8) + +#define IVTV_V4L2_AUDIO_MENUCOUNT 9 /* # of v4l controls */ + +#define IVTV_DEC_PRIVATE_BASE (V4L2_CID_PRIVATE_BASE + IVTV_V4L2_AUDIO_MENUCOUNT) + +#define V4L2_CID_IVTV_DEC_SMOOTH_FF (IVTV_DEC_PRIVATE_BASE + 0) +#define V4L2_CID_IVTV_DEC_FR_MASK (IVTV_DEC_PRIVATE_BASE + 1) +#define V4L2_CID_IVTV_DEC_SP_MUTE (IVTV_DEC_PRIVATE_BASE + 2) +#define V4L2_CID_IVTV_DEC_FR_FIELD (IVTV_DEC_PRIVATE_BASE + 3) +#define V4L2_CID_IVTV_DEC_AUD_SKIP (IVTV_DEC_PRIVATE_BASE + 4) +#define V4L2_CID_IVTV_DEC_NUM_BUFFERS (IVTV_DEC_PRIVATE_BASE + 5) +#define V4L2_CID_IVTV_DEC_PREBUFFER (IVTV_DEC_PRIVATE_BASE + 6) + +#define IVTV_V4L2_DEC_MENUCOUNT 7 + +#ifdef SAA7115_REGTEST +/* allow direct access to the saa7115 registers for testing */ +#define SAA7115_GET_REG 0xFFEE7705 +#define SAA7115_SET_REG 0xFFEE7706 + +struct saa7115_reg_t { + u8 reg; + u8 val; +}; +#endif +struct saa7114 { + int norm; + int input; + int enable; + int bright; + int contrast; + int hue; + int sat; + int playback; +}; + + +struct ivtv_cfg_start_decode { + u32 gop_offset; /*Frames in GOP to skip before starting */ + u32 muted_audio_frames; /* #of audio frames to mute */ +}; + +struct ivtv_cfg_stop_decode { + int hide_last; /* 1 = show black after stop, 0 = show last frame */ + u64 pts_stop; /* PTS to stop at */ +}; + +struct ivtv_speed { + int scale; /* 1-?? (50 for now) */ + int smooth; /* Smooth mode when in slow/fast mode */ + int speed; /* 0 = slow, 1 = fast */ + int direction; /* 0 = forward, 1 = reverse (not supportd */ + int fr_mask; /* 0 = I, 1 = I,P, 2 = I,P,B */ + int b_per_gop; /* frames per GOP (reverse only) */ + int aud_mute; /* Mute audio while in slow/fast mode */ + int fr_field; /* 1 = show every field, 0 = show every frame */ + int mute; /* # of audio frames to mute on playback resume */ +}; + +struct ivtv_slow_fast { + int speed; /* 0 = slow, 1 = fast */ + int scale; /* 1-?? (50 for now) */ +}; + +struct ivtv_ioctl_fwapi { + u32 cmd; + u32 result; + int args; + u32 data[IVTV_MBOX_MAX_DATA]; +}; + +struct ivtv_ioctl_framesync { + u32 frame; + u64 pts; + u64 scr; +}; + +struct ivtv_audio_meta { + struct v4l2_queryctrl *ctrl; + struct v4l2_querymenu *menu; + u32 *table; + u32 mask; + s32 setting; +}; + +/* For use with IVTV_IOC_G_CODEC and IVTV_IOC_S_CODEC */ +struct ivtv_ioctl_codec { + u32 aspect; + u32 audio_bitmap; + u32 bframes; + u32 bitrate_mode; + u32 bitrate; + u32 bitrate_peak; + u32 dnr_mode; + u32 dnr_spatial; + u32 dnr_temporal; + u32 dnr_type; + u32 framerate; + u32 framespergop; + u32 gop_closure; + u32 pulldown; + u32 stream_type; +}; + +extern int ivtv_debug; +extern int ivtv_pal; + +/* Scatter-Gather array element, used in DMA transfers */ +struct ivtv_SG_element { + u32 src; + u32 dst; + u32 size; +}; + +/* ivtv-specific mailbox template */ +struct ivtv_mailbox { + unsigned long flags; + u32 cmd; + u32 retval; + u32 timeout; + u32 data[IVTV_MBOX_MAX_DATA]; +}; + +struct ivtv_state { + unsigned long freq; /* Current tuned frequency */ + int input; /* Current digitizer input */ + u32 flags;/* tuner, audio */ + u16 type; /* tv or camera */ + u16 norm; /* Current video standard */ + /* more to come! */ +}; + +struct ivtv_buffer { + size_t readpos; + dma_addr_t dma_handle; + struct v4l2_buffer buffer; + struct list_head list; + unsigned long ts; +}; + +struct ivtv_buffer_list { + struct video_device *vdev; /* to get itv from */ + int elements; + struct list_head list; +}; + +struct ivtv_options { + int yuv_fixup; /* Should we re-work YUV to a standard format? */ + int yuv_buffers; /* How many yuv buffers to allocate? */ + int mpg_buffers; /* how many mpg buffers to allocate? */ + int vbi_buffers; /* how many vbi buffers to allocate? */ + int dec_mpg_buffers; /* how many decoder mpg buffers to allocate? */ + int dec_yuv_buffers; /* How many decoder yuv buffers to allocate? */ + int dec_mpg_qlen; /* how many decoder mpg buffers to queue? */ + int dec_yuv_qlen; /* how many decoder yuv buffers to queue? */ + int num_devices; /* how many cards to detect? */ +}; + +struct ivtv_dec_options { + int hide_last_frame; /* 0 = display last frame on stop_decode + * 1 = display black */ + u32 pts_low; /* low bits PTS to stop playback at */ + u32 pts_hi; /* hi bits PTS to stop playback at */ + int gop_offset; /* on start-playback, skip this + * # of frames in the GOP */ + int mute_frames; /* # of audio frames to mute on playback start */ + int decbuffers; /* 0 = 6 buffers, 1 = 9 buffers */ + int prebuffer; /* 0 = no prebuffer, 1 = enabled, see docs */ + struct ivtv_speed speed; +}; + +/* per-stream, s_flags */ +#define IVTV_F_S_DMAP 0 +#define IVTV_F_S_OVERFLOW 1 +#define IVTV_F_S_CAP 2 +#define IVTV_F_S_UNINIT 3 + +/* per-ivtv, i_flags */ +#define IVTV_F_I_BUSY 0 +#define IVTV_F_I_NEEDS_DATA 1 +#define IVTV_F_I_EOS 2 +#define IVTV_F_I_OSD_DMA 3 + +struct ivtv_v4l2_stream { + int buf_size; /* size of buffers this stream */ + long id; + long seq; + int ubytes; /* bytes written back to user this frame */ + unsigned long s_flags; + int v4l_reg_type; + wait_queue_head_t waitq; + struct video_device *v4l2dev; + struct v4l2_format format; + + // FIXME need to make sure no read() if streaming + struct ivtv_buffer_list free_q; /* unused buffers */ + struct ivtv_buffer_list full_q; /* filled buffers */ + struct ivtv_buffer_list dma_q; /* awaiting dma to fill them */ + /* only updated in interrupt time! */ + + int controlcount; /* Number of elements in controls */ + struct v4l2_control *controls; +}; + +struct ivtv_v4l2_table { + int count; + int active; + union { + struct v4l2_input *input; + struct v4l2_output *output; + struct v4l2_audio *audio; + struct v4l2_tuner *tuner; + struct v4l2_control *control; + struct v4l2_standard *std; + } table; +}; + +struct ivtv_v4l2 { + u32 capabilities; + struct ivtv_v4l2_table input; + int audio_output; + struct ivtv_v4l2_table output; + struct ivtv_v4l2_table audio; + struct ivtv_v4l2_table tuner; + struct ivtv_v4l2_table standard; + struct v4l2_capability capability; + struct v4l2_frequency freq; + int streamcount; /* Number of elements in streams */ + struct ivtv_v4l2_stream *streams; + + /* codec settings */ + struct ivtv_ioctl_codec codec; + struct ivtv_audio_meta audio_meta[IVTV_V4L2_AUDIO_MENUCOUNT]; + + /* FIXME probably should get rid of this */ + wait_queue_head_t waitq; +}; + +struct ivtv_open_id { + int open_id; + int type; + struct ivtv *itv; + struct list_head list; +}; + +struct ivtvfb_user_dma_to_device { + int page_count; + struct ivtv_SG_element* sglist; + struct page **map; + dma_addr_t sg_dma_handle; +}; + +/* Stuct to hold info about ivtv cards */ +struct ivtv { + int card_type; /* pvr 250 rev1, 250 rev2, 350 are options so far */ + struct pci_dev *dev; + struct ivtv_options options; + struct ivtv_dec_options dec_options; + int num; /* invalidate during init! */ + int first_read; /* used to clean up stream */ + unsigned long i_flags; + atomic_t capturing; + atomic_t decoding; + struct semaphore sem_lock ____cacheline_aligned_in_smp; + spinlock_t lock ____cacheline_aligned_in_smp; + + long open_id; /* incremented each time an open occurs + used as unique ID */ + + /* FIXME should use part of v4l2_performace instead */ + unsigned long trans_id; + + struct tasklet_struct dma_sched_tq; + + u32 enc_fw_ver, dec_fw_ver, base_addr; /*is base_addr needed? */ + u32 irqmask; + + struct ivtv_mailbox *enc_mbox, *dec_mbox; + struct semaphore enc_msem ____cacheline_aligned_in_smp; + struct semaphore dec_msem ____cacheline_aligned_in_smp; + + unsigned char card_rev, *io_mem, *reg_mem; + + wait_queue_head_t cap_w, vsync_w; + + /*FIXME perhaps move these to the v4l2_stream struct */ + struct ivtv_SG_element *SGarray, *DSGarray; + dma_addr_t SG_handle, DSG_handle; + + /* Decoder */ + struct ivtv_ioctl_framesync dec_timestamp; + wait_queue_head_t dec_master_w; + struct timer_list dec_timeout; + + /* Framebuffer DMA support */ + struct ivtvfb_user_dma_to_device* user_dma_to_device_state; + int fb_id; + + /* i2c */ + struct i2c_adapter i2c_adap; + struct i2c_algo_bit_data i2c_algo; + struct i2c_client i2c_client; + int i2c_state, i2c_rc, i2c_command; + struct i2c_client *i2c_clients[I2C_CLIENTS_MAX]; + + /* v4l2 and User settings*/ + struct ivtv_state state; + struct ivtv_v4l2 v4l2; + struct list_head client_list; +}; + +/* Globals */ +extern struct ivtv ivtv_cards[]; +extern int ivtv_cards_active; +extern int dec_yuv_buffers; +extern int dec_mpg_buffers; +extern int yuv_buffers; +extern int mpg_buffers; +extern int vbi_buffers; +extern spinlock_t ivtv_lock; + +/*==============Prototypes==================*/ +/* FIXME some of these proably need fine-tuning + * to avoid warnings + */ + +void ivtv_setscl(void *data, int state); +void ivtv_setsda(void *data, int state); +int ivtv_getscl(void *data); +int ivtv_getsda(void *data); + +void ivtv_call_i2c_client(struct ivtv *itv, int addr, unsigned int cmd, void *arg); +int ivtv_i2c_direct(struct ivtv *itv, int addr, const unsigned char *regs); + +void ivtv_inc(struct i2c_adapter *adapter); +void ivtv_dec(struct i2c_adapter *adapter); + +/* init + register i2c algo-bit adapter */ +int __devinit init_ivtv_i2c(struct ivtv *itv); +void __devexit exit_ivtv_i2c(struct ivtv *itv); + +/* end i2c stuff */ + +/* Initialization stuff */ +int ivtv_firmware_copy(struct ivtv *itv); + +/* Unload stuff */ +void ivtv_v4l2_cleanup(struct ivtv *itv); +int ivtv_stop_firmware(struct ivtv *itv); +void ivtv_zero_usage_count(void); +void ivtv_flush_queues(struct ivtv_open_id *id); + +/* API Related */ +int ivtv_find_firmware_mailbox(struct ivtv *itv); +int ivtv_get_free_mailbox(struct ivtv_mailbox *mbox); +int ivtv_api_call(struct ivtv_mailbox *mbox, u32 cmd, struct semaphore *sem, + int elements, u32 data[]); +int ivtv_api_getresult_nosleep(struct ivtv_mailbox *mbox, u32 *result, u32 data[]); +int ivtv_api_getresult(struct ivtv_mailbox *mbox, struct semaphore *sem, + u32 *result, u32 data[]); +int ivtv_api(struct ivtv_mailbox *mbox, struct semaphore *sem, int cmd, + u32 *result, int args, u32 data[]); +extern int __ivtv_api(struct ivtv_mailbox *mbox, int cmd, + u32 *result, int args, u32 data[]); +int ivtv_v4l2_setup(struct ivtv *itv); + +/* Capture related */ +int ivtv_stop_decode(struct ivtv_open_id *id); +int ivtv_stop_all_captures(struct ivtv *itv); +int ivtv_stop_capture(struct ivtv_open_id *id); +long ivtv_read(struct ivtv_open_id *id, char *ubuf, size_t count, int block); +int ivtv_get_timing_info(struct ivtv *itv, struct ivtv_ioctl_framesync *info); +ssize_t ivtv_write(struct ivtv_open_id *id, const char *buf, size_t count, + int block); +unsigned int ivtv_poll(struct file *filp, poll_table *wait); +unsigned int ivtv_dec_poll(struct file *filp, poll_table *wait); + /* makes a queue complete with 'length' items */ + /* NOTE: This returns the # of buffers allocated */ +extern int ivtv_init_queue(struct ivtv *itv,struct ivtv_buffer_list *queue, + int length, enum v4l2_buf_type type); + /* moves all items in queue 'src' to queue 'dst' */ +extern int ivtv_move_queue(struct ivtv *itv, struct ivtv_buffer_list *src, + struct ivtv_buffer_list *dst); + +/* Hardware/IRQ */ +extern void ivtv_set_irq_mask(struct ivtv *itv, unsigned long mask); +extern void ivtv_clear_irq_mask(struct ivtv *itv, unsigned long mask); +extern void ivtv_sleep_timeout(int timeout); + +/* Testing/Debugging */ +extern int ivtv_close(struct ivtv_open_id *id); + +/* debug stuff, to get the locking right */ +#ifndef WARN_ON +#define WARN_ON(condition) do { \ + if (unlikely((condition)!=0)) { \ + printk("Badness in %s at %s:%d\n", __FUNCTION__, __FILE__, __LINE__); \ + dump_stack(); \ + } \ +} while (0) +#endif + +#define IVTV_ASSERT(x) WARN_ON(!(x)) + +static inline int ivtv_sem_count(struct semaphore *sem) +{ + return atomic_read(&sem->count); +} + +#endif diff -purN -X /home/mbligh/.diff.exclude reference/drivers/media/video/msp3400.c current/drivers/media/video/msp3400.c --- reference/drivers/media/video/msp3400.c 2004-02-18 14:56:58.000000000 -0800 +++ current/drivers/media/video/msp3400.c 2004-04-09 21:41:40.000000000 -0700 @@ -58,6 +58,7 @@ static int debug = 0; /* debug out static int once = 0; /* no continous stereo monitoring */ static int amsound = 0; /* hard-wire AM sound at 6.5 Hz (france), the autoscan seems work well only with FM... */ +static int standard = 1; /* Override auto detect of audio standard, if needed. */ static int simple = -1; /* use short programming (>= msp3410 only) */ static int dolby = 0; @@ -113,6 +114,7 @@ struct msp3400c { MODULE_PARM(once,"i"); MODULE_PARM(debug,"i"); MODULE_PARM(simple,"i"); +MODULE_PARM(standard,"i"); MODULE_PARM(amsound,"i"); MODULE_PARM(dolby,"i"); @@ -384,7 +386,7 @@ static void msp3400c_setvolume(struct i2 if (!muted) { vol = (left > right) ? left : right; - val = (vol * 0x73 / 65535) << 8; + val = (vol * 0x7F / 65535) << 8; } if (vol > 0) { balance = ((right-left) * 127) / vol; @@ -395,8 +397,11 @@ static void msp3400c_setvolume(struct i2 muted ? "on" : "off", left, right, val>>8, balance); msp3400c_write(client,I2C_MSP3400C_DFP, 0x0000, val); /* loudspeaker */ msp3400c_write(client,I2C_MSP3400C_DFP, 0x0006, val); /* headphones */ - /* scart - on/off only */ - msp3400c_write(client,I2C_MSP3400C_DFP, 0x0007, val ? 0x4000 : 0); + // scart - on/off only - AEW why? undone NOTE values below + // 40000 are mostly useless, 59343 is a good default (0x73) + msp3400c_write(client,I2C_MSP3400C_DFP, 0x0007, + muted ? 0x1 : (val | 0x1)); + msp3400c_write(client,I2C_MSP3400C_DFP, 0x0001, balance << 8); } @@ -473,6 +478,20 @@ static void msp3400c_setmode(struct i2c_ } } +// given a bitmask of VIDEO_SOUND_XXX returns the "best" in the bitmask +static int best_video_sound(int mode) { + int ret_cap = VIDEO_SOUND_MONO; + if (mode & VIDEO_SOUND_STEREO) { + ret_cap = VIDEO_SOUND_STEREO; + } else if (mode & VIDEO_SOUND_LANG1) { + ret_cap = VIDEO_SOUND_LANG1; + } else if (mode & VIDEO_SOUND_LANG2) { + ret_cap = VIDEO_SOUND_LANG2; + } + return ret_cap; +} + + /* turn on/off nicam + stereo */ static void msp3400c_setstereo(struct i2c_client *client, int mode) { @@ -547,7 +566,7 @@ static void msp3400c_setstereo(struct i2 } /* switch audio */ - switch (mode) { + switch (best_video_sound(mode)) { case VIDEO_SOUND_STEREO: src = 0x0020 | nicam; #if 0 @@ -1056,7 +1075,7 @@ static int msp3410d_thread(void *data) switch (msp->norm) { case VIDEO_MODE_PAL: mode = 0x1003; - std = 1; + std = standard; break; case VIDEO_MODE_NTSC: /* BTSC */ mode = 0x2003; @@ -1064,15 +1083,19 @@ static int msp3410d_thread(void *data) break; case VIDEO_MODE_SECAM: mode = 0x0003; - std = 1; + std = standard; break; case VIDEO_MODE_RADIO: mode = 0x0003; std = 0x0040; break; + case VIDEO_MODE_AUTO: + mode = 0x2003; + std = standard; + break; default: mode = 0x0003; - std = 1; + std = standard; break; } msp3400c_write(client, I2C_MSP3400C_DEM, 0x30, mode); @@ -1189,8 +1212,11 @@ static int msp3410d_thread(void *data) msp->nicam_on = 0; msp->watch_stereo = 1; break; - } - + } + // AEW a true reset has probably messed with our ACB register + // we need to restore this. + msp3400c_write(client, I2C_MSP3400C_DFP, 0x0013, msp->acb); + /* unmute + restore dfp registers */ msp3400c_setbass(client, msp->bass); msp3400c_settreble(client, msp->treble); @@ -1257,12 +1283,12 @@ static int msp_attach(struct i2c_adapter } memset(msp,0,sizeof(struct msp3400c)); - msp->left = 65535; - msp->right = 65535; + msp->left = 59343; + msp->right = 59343; msp->bass = 32768; msp->treble = 32768; msp->input = -1; - msp->muted = 1; + /* msp->muted = 1; */ for (i = 0; i < DFP_COUNT; i++) msp->dfp_regs[i] = -1; @@ -1480,7 +1506,7 @@ static int msp_command(struct i2c_client struct video_audio *va = arg; dprintk(KERN_DEBUG "msp34xx: VIDIOCGAUDIO\n"); - va->flags |= VIDEO_AUDIO_VOLUME | + va->flags = VIDEO_AUDIO_VOLUME | VIDEO_AUDIO_BASS | VIDEO_AUDIO_TREBLE | VIDEO_AUDIO_MUTABLE; @@ -1532,6 +1558,7 @@ static int msp_command(struct i2c_client dprintk(KERN_DEBUG "msp34xx: VIDIOCSCHAN\n"); msp->norm = vc->norm; + msp_wake_thread(client); break; } case VIDIOCSFREQ: @@ -1541,14 +1568,22 @@ static int msp_command(struct i2c_client msp_wake_thread(client); break; } + case MSP_SET_MATRIX: + { + struct msp_matrix *mspm = arg; - default: - /* nothing */ + dprintk(KERN_DEBUG "msp34xx: MSP_SET_MATRIX\n"); + msp3400c_set_scart(client, mspm->input, mspm->output); break; } - return 0; -} + default: + /* nothing */ + break; + } + return 0; +} + /* ----------------------------------------------------------------------- */ static int msp3400_init_module(void) diff -purN -X /home/mbligh/.diff.exclude reference/drivers/media/video/msp3400.h current/drivers/media/video/msp3400.h --- reference/drivers/media/video/msp3400.h 2002-12-09 18:46:22.000000000 -0800 +++ current/drivers/media/video/msp3400.h 2004-04-09 21:41:40.000000000 -0700 @@ -8,7 +8,15 @@ struct msp_dfpreg { int value; }; +struct msp_matrix { + int input; + int output; +}; + #define MSP_SET_DFPREG _IOW('m',15,struct msp_dfpreg) #define MSP_GET_DFPREG _IOW('m',16,struct msp_dfpreg) +/* ioctl for MSP_SET_MATRIX will have to be registered */ +#define MSP_SET_MATRIX _IOW('m',17,struct msp_matrix) + #endif /* MSP3400_H */ diff -purN -X /home/mbligh/.diff.exclude reference/drivers/media/video/saa7115.c current/drivers/media/video/saa7115.c --- reference/drivers/media/video/saa7115.c 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/media/video/saa7115.c 2004-04-09 21:41:40.000000000 -0700 @@ -0,0 +1,1130 @@ +/* + * saa7114 - Philips SAA7114H video decoder driver version 0.0.1 + * + * Copyright (C) 2002 Maxim Yevtyushkin + * + * Based on saa7111 driver by Dave Perks + * + * Copyright (C) 1998 Dave Perks + * + * Slight changes for video timing and attachment output by + * Wolfgang Scherr + * + * Changes by Ronald Bultje + * - moved over to linux>=2.4.x i2c protocol (1/1/2003) + * + * Changes by Kevin Thayer + * - changed to saa7115. (2/17/2003) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +MODULE_DESCRIPTION("Philips SAA7115 video decoder driver"); +MODULE_AUTHOR("Kevin Thayer"); +MODULE_LICENSE("GPL"); + +#include +#include + +#ifndef I2C_DRIVERID_SAA7114 +#warning Using temporary hack for missing I2C driver-ID for saa7114 +#define I2C_DRIVERID_SAA7114 I2C_DRIVERID_EXP1 +#endif + +#include + +static int debug = 1; +MODULE_PARM(debug, "i"); +MODULE_PARM_DESC(debug, "Debug level (0-1)"); + +/* FIXME need to get this properly allocated + * also defined in ivtv.h, so change it there too */ +#define DECODER_SET_SIZE 76598 +#define DECODER_GET_PICTURE 76599 + +/* Need to be able to set the audio bitrates */ +#define DECODER_SET_AUDIO 0xFFEE7707 +#define DECODER_AUDIO_32_KHZ 0 +#define DECODER_AUDIO_441_KHZ 1 +#define DECODER_AUDIO_48_KHZ 2 + +#ifdef SAA7115_REGTEST +/* allow direct access to the saa7115 registers for testing */ +#define SAA7115_GET_REG 0xFFEE7705 +#define SAA7115_SET_REG 0xFFEE7706 + +struct saa7115_reg_t { + u8 reg; + u8 val; +}; +#endif + +#define dprintk(num, format, args...) \ + do { \ + if (debug >= num) \ + printk(format, ##args); \ + } while (0) + +/* ----------------------------------------------------------------------- */ + +static u8 readreg(struct i2c_client *client, unsigned char reg) +{ + struct i2c_adapter *adap = client->adapter; + unsigned char mm1[] = { 0x1e }; + unsigned char mm2[] = { 0x00 }; + struct i2c_msg msgs[2]; + + msgs[0].flags = 0; + msgs[1].flags = I2C_M_RD; + msgs[0].addr = msgs[1].addr = client->addr; + mm1[0] = reg; + msgs[0].len = 1; + msgs[1].len = 1; + msgs[0].buf = mm1; + msgs[1].buf = mm2; + i2c_transfer(adap, msgs, 2); + + return mm2[0]; +} + +struct saa7114 { + int norm; + int input; + int enable; + int bright; + int contrast; + int hue; + int sat; + int playback; + int audio; +}; + +#define I2C_SAA7114 0x42 +#define I2C_SAA7114A 0x40 + +#define I2C_DELAY 10 + +//#define SAA_7114_NTSC_HSYNC_START (-3) +//#define SAA_7114_NTSC_HSYNC_STOP (-18) + +#define SAA_7114_NTSC_HSYNC_START (-17) +#define SAA_7114_NTSC_HSYNC_STOP (-32) + +//#define SAA_7114_NTSC_HOFFSET (5) +#define SAA_7114_NTSC_HOFFSET (6) +#define SAA_7114_NTSC_VOFFSET (10) +#define SAA_7114_NTSC_WIDTH (720) +#define SAA_7114_NTSC_HEIGHT (480) /* was 250 */ + +#define SAA_7114_SECAM_HSYNC_START (-17) +#define SAA_7114_SECAM_HSYNC_STOP (-32) + +#define SAA_7114_SECAM_HOFFSET (2) +#define SAA_7114_SECAM_VOFFSET (10) +#define SAA_7114_SECAM_WIDTH (720) +#define SAA_7114_SECAM_HEIGHT (300) + +#define SAA_7114_PAL_HSYNC_START (-17) +#define SAA_7114_PAL_HSYNC_STOP (-32) + +#define SAA_7114_PAL_HOFFSET (2) +#define SAA_7114_PAL_VOFFSET (10) +#define SAA_7114_PAL_WIDTH (720) +#define SAA_7114_PAL_HEIGHT (300) + +#define SAA_7114_VERTICAL_CHROMA_OFFSET 0 //0x50504040 +#define SAA_7114_VERTICAL_LUMA_OFFSET 0 + +#define REG_ADDR(x) (((x) << 1) + 1) +#define LOBYTE(x) ((unsigned char)((x) & 0xff)) +#define HIBYTE(x) ((unsigned char)(((x) >> 8) & 0xff)) +#define LOWORD(x) ((unsigned short int)((x) & 0xffff)) +#define HIWORD(x) ((unsigned short int)(((x) >> 16) & 0xffff)) + +/* ----------------------------------------------------------------------- */ + +static inline int saa7114_write(struct i2c_client *client, u8 reg, u8 value) +{ +// struct saa7114 *decoder = i2c_get_clientdata(client); + return i2c_smbus_write_byte_data(client, reg, value); +} + +static int writeregs(struct i2c_client *client, const unsigned char *regs) +{ + unsigned char reg, data; + + while (*regs != 0x00) { + reg = *(regs++); + data = *(regs++); + if (saa7114_write(client, reg, data) < 0) + return -1; + } + return 0; +} + +static inline int saa7114_read(struct i2c_client *client, u8 reg) +{ + return i2c_smbus_read_byte_data(client, reg); +} + +/* ----------------------------------------------------------------------- */ + +static const unsigned char init_saa7115_auto_input[] = { + 0x01, 0x08, //(was 0x48) // 0x08: white peak control enabled, 0x48: white peak control disabled + 0x03, 0x2C, //was 0x20 // 0x20: automatic gain control, 0x2c: user programmable gain + 0x04, 0x90, // analog gain set to 0 + 0x05, 0x90, // analog gain set to 0 + 0x06, 0xEB, // horiz sync begin = -21 + 0x07, 0xE0, // horiz sync stop = -17 + + // done in misc +// 0x09, 0x40, //80 for svideo // 0x40: use luminance comb filter, 0x80: don't use ... + + 0x0A, 0x80, //i2c dump ends up at 96 (was 80) // decoder brightness, 0x80 is itu standard + 0x0B, 0x44, // decoder contrast, 0x44 is itu standard + 0x0C, 0x40, // decoder saturation, 0x40 is itu standard + 0x0D, 0x00, //i2c dump ends up at 04 (was 00) // chrominance hue control + 0x0F, 0x24, //i2c dump says 0x30 (was 0x2A) // chrominance gain control , should be 0x00 for agc, otherwise 0x80+0x24: 0xA4 + 0x10, 0x06, + 0x11, 0x00, + 0x12, 0x9D, //i2c dump says 0x9D (was 0x00) + 0x13, 0x80, //" 0x80 (was 0x00) + 0x14, 0x01, //" 0x01 (was 0x01) + 0x15, 0x04, //" 0x00 (was 0x11) //should also be moved to NTSC/PAL VGATE start + /* moved to NTSC/PAL sections + 0x16, 0x11, //" 0x11 (was 0xFE) // VGATE stop + */ + 0x17, 0x98, //" 0x98 (was 0xD8) //may set to 98 // VGATE MSB and other values + 0x18, 0x40, // raw data gain 0x00 = nominal + 0x19, 0x80, // raw data offset 0x80 = 0 LSB + 0x1A, 0x77, // color killer level control 0x77 = recommended + 0x1B, 0x42, // misc chroma control 0x42 = recommended + 0x1C, 0xA9, // combfilter control 0xA9 = recommended + 0x1D, 0x01, // combfilter control 0x01 = recommended + 0x88, 0xD0, //reset device // set programmed, reset + 0x88, 0xF0, //Set device programmed, all in operational mode // set programmed, active ?programmed should be 0? + 0x00, 0x00 // ? not necessary, version readback register +}; + +/* ============== SAA7715 VIDEO templates ============= */ + +static const unsigned char cfg_saa7115_reset_scaler[] = { + 0x87, 0x00, //Disable I-port output + 0x88, 0x0B, //reset scaler (was 0xD0) // ?should be 0xD0 + 0x88, 0xF0, //activate scaler + 0x87, 0x01, //Enable I-port output // what about high bits? how is ICLK used? + 0x00, 0x00 +}; +static const unsigned char cfg_saa7115_NTSC_fullres_x[] = { + 0xCC, 0xD0, //hsize low (output) //hor output window size = 0x2d0 = 720 + 0xCD, 0x02, //hsize hi (output) + + 0xD0, 0x01, // down scale = 1 + 0xD1, 0x00, // prescale accumulation length = 1 + 0xD2, 0x00, // dc gain and fir prefilter control + 0xD4, 0x80, //Lum Brightness // nominal value = 0x80 + 0xD5, 0x40, //Lum contrast // nominal value = 0x40 + 0xD6, 0x40, //Chroma satur. // nominal value = 0x80 + 0xD8, 0x00, // hor lum scaling 0x0400 = 1 + 0xD9, 0x04, + 0xDA, 0x00, //H-phase offset Luma = 0 + 0xDC, 0x00, // hor chrom scaling 0x0200. must be hor lum scaling /2 + 0xDD, 0x02, //H-scaling incr chroma + 0xDE, 0x00, //H-phase offset chroma // must be offset luma /2 + + 0x00, 0x00 +}; +static const unsigned char cfg_saa7115_NTSC_fullres_y[] = { + 0xCE, 0xFD, //vsize low (output) was FD // ver output window size = 253 ??240 + 0xCF, 0x00, //vsize hi (output) + + 0xE0, 0x00, //V-scaling incr luma low 0x0400 = 1 + 0xE1, 0x04, //" hi + 0xE2, 0x00, //V-scaling incr chroma low // must be same as luma + 0xE3, 0x04, //" hi + 0xE4, 0x01, //V-scaling mode control // no mirroring, higher order accumulation + 0xE8, 0x00, //V-phase offset chroma 00 //?only regs E8 and EC necessary? + 0xE9, 0x00, //V-phase offset chroma 01 + 0xEA, 0x00, //V-phase offset chroma 10 + 0xEB, 0x00, //V-phase offset chroma 11 + 0xEC, 0x00, //V-phase offset luma 00 + 0xED, 0x00, //V-phase offset luma 01 + 0xEE, 0x00, //V-phase offset luma 10 + + 0x00, 0x00 +}; + +static const unsigned char cfg_saa7115_NTSC_video[] = { + 0x80, 0x00, //reset tasks + 0x88, 0x0B, //reset scaler (was 0xD0) + + 0x16, 0x11, //" 0x11 (was 0xFE) //VGATE pulse stop + + 0x08, 0x68, //i2c dump says 0x68 (was 0xB0) NTSC ONLY // 0xBO: auto detection, 0x68 = NTSC + 0x0E, 0x07, //i2c dump says 0x0d (was 0x07) // lots of different stuff... video autodetection is on + + 0xC0, 0x00, //Task Handling Control (was 0x00) + 0xC1, 0x08, //X-port formats/config + 0xC2, 0x00, //Input Ref. signal Def. + 0xC3, 0x80, //I-port config (was 0x80) + 0xC4, 0x02, //hoffset low (input) // 0x0002 is minimum + 0xC5, 0x00, //hoffset hi (input) + 0xC6, 0xD0, //hsize low (input) // 0x02d0 = 720 + 0xC7, 0x02, //hsize hi (input) + 0xC8, 0x14, //voff low was 0x14, changing to 0x0E (14) // 0x0014 = 20 + 0xC9, 0x00, //voff hi + 0xCA, 0xFD, //vsize low (input) was FD // 0x00fd = 253 + 0xCB, 0x00, //vsize hi (input) + + 0xF0, 0xAD, //Set PLL Register. NTSC 525 lines per frame, 27 MHz + 0xF1, 0x05, //low bit with 0xF0, (was 0x05) + 0xF5, 0xAD, //Set pulse generator register + 0xF6, 0x01, + + 0x87, 0x00, //Disable I-port output + 0x88, 0x0B, //reset scaler (was 0xD0) + 0x80, 0x20, //Activate only task "B", continuous mode (was 0xA0) + 0x88, 0xF0, //activate scaler + 0x87, 0x01, //Enable I-port output + 0x00, 0x00 +}; + +static const unsigned char cfg_saa7115_PAL_fullres_x[] = { + 0xCC, 0xD0, //hsize low (output) //720 same as NTSC + 0xCD, 0x02, //hsize hi (output) + + 0xD0, 0x01, + 0xD1, 0x00, + 0xD2, 0x00, + 0xD4, 0x80, //Lum Brightness + 0xD5, 0x40, //Lum contrast + 0xD6, 0x40, //Chroma satur. + 0xD8, 0x00, + 0xD9, 0x04, + 0xDA, 0x00, //H-phase offset Luma + 0xDC, 0x00, + 0xDD, 0x02, //H-scaling incr chroma + 0xDE, 0x00, //H-phase offset chroma + + 0x00, 0x00 +}; +static const unsigned char cfg_saa7115_PAL_fullres_y[] = { + 0xCE, 0x20, //vsize low (output) // 0x0120 = 288 + 0xCF, 0x01, //vsize hi (output) + + 0xE0, 0x00, //V-scaling incr luma low + 0xE1, 0x04, //" hi + 0xE2, 0x00, //V-scaling incr chroma low + 0xE3, 0x04, //" hi + 0xE4, 0x01, //V-scaling mode control + 0xE8, 0x00, //V-phase offset chroma 00 + 0xE9, 0x00, //V-phase offset chroma 01 + 0xEA, 0x00, //V-phase offset chroma 10 + 0xEB, 0x00, //V-phase offset chroma 11 + 0xEC, 0x00, //V-phase offset luma 00 + 0xED, 0x00, //V-phase offset luma 01 + 0xEE, 0x00, //V-phase offset luma 10 + 0xEF, 0x00, //V-phase offset luma 11 + + 0x00, 0x00 +}; + +/* FIXME need to input proper height/width */ +static const unsigned char cfg_saa7115_PAL_video[] = { + 0x80, 0x00, //reset tasks + 0x88, 0x0B, //reset scaler (was 0xD0) + + 0x16, 0x15, //" 0x11 (was 0xFE) + + 0x08, 0x28, //i2c dump says 0x28 (was 0xB0) PAL ONLY // 0x28 = PAL + 0x0E, 0x07, //i2c dump says 0x0d (was 0x07) + + 0xC0, 0x00, //Task Handling Control (was 0x00) + 0xC1, 0x08, //X-port formats/config + 0xC2, 0x00, //Input Ref. signal Def. + 0xC3, 0x80, //I-port config (was 0x80) + 0xC4, 0x00, //hoffset low (input) + 0xC5, 0x00, //hoffset hi (input) + 0xC6, 0xD0, //hsize low (input) // 0x02D0 = 720 + 0xC7, 0x02, //hsize hi (input) + 0xC8, 0x14, //voffset low (input) low was 0x14, changing to 0x0E (14) + 0xC9, 0x00, //voffset hi (input) + 0xCA, 0x20, //vsize low (input) // 288 + 0xCB, 0x01, //vsize hi (input) + + 0xF0, 0xB0, //Set PLL Register. PAL 625 lines per frame, 27 MHz + 0xF1, 0x05, //low bit with 0xF0, (was 0x05) + 0xF5, 0xB0, //Set pulse generator register + 0xF6, 0x01, + + 0x87, 0x00, //Disable I-port output + 0x88, 0x0B, //reset scaler (was 0xD0) + 0x80, 0xA0, //Activate only task "B", continuous mode (was 0xA0) + 0x88, 0xF0, //activate scaler + 0x87, 0x01, //Enable I-port output + 0x00, 0x00 +}; + +/* ============== SAA7715 VIDEO templates (end) ======= */ + +static const unsigned char init_saa7115_misc[] = { + 0x38, 0x03, // audio stuff + 0x39, 0x10, + 0x3A, 0x00, + +// 0x80, 0x00, // set below + 0x81, 0x01, //reg 0x15,0x16 define blanking window + 0x82, 0x00, + 0x83, 0x01, //was 0x01 // I port settings + 0x84, 0x20, + 0x85, 0x21, + 0x86, 0xC5, + 0x87, 0x01, +// 0x88, 0xD0, // unnecessary + +// 0xF0, 0xAD, //this goes in PAL/NTSC video +// 0xF1, 0x05, + 0xF2, 0x50, // crystal clock = 24.576 MHz, target = 27MHz + 0xF3, 0x46, + 0xF4, 0x00, +// 0xF5, 0xAD, //this goes in PAL/NTSC video +// 0xF6, 0x01, + 0xF7, 0x4B, // not the recommended settings! + 0xF8, 0x00, + 0xF9, 0x4B, + 0xFA, 0x00, + 0xFB, 0x4B, +// 0xFC, 0x00, // unused +// 0xFD, 0x00, +// 0xFE, 0x00, + 0xFF, 0x88, // PLL2 lock detection settings: 71 lines 50% phase error + +// 0x88, 0xF0, // unnecessary + +// 0x0D, 0x04, // already set in auto_input +// 0x0C, 0x40, +// 0x0A, 0x96, +// 0x0B, 0x41, +// 0x98, 0x05, // belongs to task A; unnecessary +/* Turn off VBI */ + 0x40, 0x00, + 0x41, 0xFF, + 0x42, 0xFF, + 0x43, 0xFF, + 0x44, 0xFF, + 0x45, 0xFF, + 0x46, 0xFF, + 0x47, 0xFF, + 0x48, 0xFF, + 0x49, 0xFF, + 0x4A, 0xFF, + 0x4B, 0xFF, + 0x4C, 0xFF, + 0x4D, 0xFF, + 0x4E, 0xFF, + 0x4F, 0xFF, + 0x50, 0xFF, + 0x51, 0xFF, + 0x52, 0xFF, + 0x53, 0xFF, + 0x54, 0xFF, + 0x55, 0xFF, + 0x56, 0xFF, + 0x57, 0xFF, + 0x58, 0x00, + 0x59, 0x47, + 0x5A, 0x06, + 0x5B, 0x88, + 0x5D, 0xBF, + 0x5E, 0x35, + + 0x02, 0x84, //input tuner -> input 4, amplifier active + 0x09, 0x53, //chrom trap for tuner // special tuner stuff? + + 0x80, 0x20, //was 0x30 // 0x20 clock from PLL2, 0x30 clock from ICLK + 0x88, 0xD0, + 0x88, 0xF0, + 0x00, 0x00 +}; + +/* ============== SAA7715 AUDIO settings ============= */ +static const unsigned char cfg_saa7115_48_audio[] = { + 0x34, 0xCE, // 48khz + 0x35, 0xFB, // " + 0x36, 0x30, // " + 0x00, 0x00 +}; + +static const unsigned char cfg_saa7115_441_audio[] = { + 0x34, 0xF2, // 44.1khz + 0x35, 0x00, // " + 0x36, 0x2D, // " + 0x00, 0x00 +}; + +static const unsigned char cfg_saa7115_32_audio[] = { + 0x34, 0xDF, // 32.0khz + 0x35, 0xA7, // " + 0x36, 0x20, // " + 0x00, 0x00 +}; + +static const unsigned char cfg_saa7115_NTSC_48_audio[] = { + 0x30, 0xCD, // 48.0khz NTSC + 0x31, 0x20, // " + 0x32, 0x03, // " + 0x00, 0x00 +}; + +static const unsigned char cfg_saa7115_PAL_48_audio[] = { + 0x30, 0x00, // 48.0khz PAL + 0x31, 0xC0, // " + 0x32, 0x03, // " + 0x00, 0x00 +}; + +static const unsigned char cfg_saa7115_NTSC_441_audio[] = { + 0x30, 0xBC, // 44.1khz NTSC + 0x31, 0xDF, // " + 0x32, 0x02, // " + 0x00, 0x00 +}; + +static const unsigned char cfg_saa7115_PAL_441_audio[] = { + 0x30, 0x00, // 44.1khz PAL + 0x31, 0x72, // " + 0x32, 0x03, // " + 0x00, 0x00 +}; + +static const unsigned char cfg_saa7115_NTSC_32_audio[] = { + 0x30, 0xDE, // 32.0khz NTSC + 0x31, 0x15, // " + 0x32, 0x02, // " + 0x00, 0x00 +}; + +static const unsigned char cfg_saa7115_PAL_32_audio[] = { + 0x30, 0x00, // 32.0khz PAL + 0x31, 0x80, // " + 0x32, 0x02, // " + 0x00, 0x00 +}; + +/* ============ SAA7715 AUDIO settings (end) ============= */ + +static int +saa7114_command(struct i2c_client *client, unsigned int cmd, void *arg) +{ + struct saa7114 *decoder = i2c_get_clientdata(client); + + switch (cmd) { + + case 0: + //dprintk(1, KERN_INFO "%s: writing init\n", client->name); + //saa7114_write_block(client, init, sizeof(init)); + break; +#ifdef SAA7115_REGTEST + /* ioctls to allow direct access to the saa7115 registers for testing */ + case SAA7115_GET_REG: + { + struct saa7115_reg_t *saa7115_reg = + (struct saa7115_reg_t *)arg; + + saa7115_reg->val = + saa7114_read(client, saa7115_reg->reg); + break; + } + case SAA7115_SET_REG: + { + struct saa7115_reg_t *saa7115_reg = + (struct saa7115_reg_t *)arg; + + saa7114_write(client, saa7115_reg->reg, + saa7115_reg->val); + break; + } +#endif + case DECODER_SET_SIZE: + { + /* Used video_window because it has height/width and is + * already defined */ + struct video_window *wind = arg; + int HPSC, HFSC; + int VSCY, Vsrc; + + dprintk(1, KERN_INFO "%s: decoder set size\n", + client->name); + + /* FIXME need better bounds checking here */ + if ((wind->width < 1) || (wind->width > 1440)) + return -EINVAL; + if ((wind->height < 1) || (wind->height > 960)) + return -EINVAL; + + /* probably have a valid size, let's set it */ +/* Set output width/height */ + /* width */ + saa7114_write(client, 0xCC, (u8) (wind->width & 0xFF)); + saa7114_write(client, 0xCD, + (u8) ((wind->width >> 8) & 0xFF)); + /* height */ + saa7114_write(client, 0xCE, (u8) (wind->height & 0xFF)); + saa7114_write(client, 0xCF, + (u8) ((wind->height >> 8) & 0xFF)); + +/* Scaling settings */ + /* Hprescaler is floor(inres/outres) */ + /* FIXME hardcoding input res */ + if (wind->width != 720) { + HPSC = (int)(720 / wind->width); + HFSC = + (int)((1024 * 720) / (HPSC * wind->width)); + + printk("Hpsc: 0x%05x, Hfsc: 0x%05x\n", HPSC, + HFSC); + /* FIXME hardcodes to "Task B" + * write H prescaler integer */ + saa7114_write(client, 0xD0, (u8) (HPSC & 0x3F)); + + /* write H fine-scaling (luminance) */ + saa7114_write(client, 0xD8, (u8) (HFSC & 0xFF)); + saa7114_write(client, 0xD9, + (u8) ((HFSC >> 8) & 0xFF)); + /* write H fine-scaling (chrominance) + * must be lum/2, so i'll just bitshift :) */ + saa7114_write(client, 0xDC, + (u8) ((HFSC >> 1) & 0xFF)); + saa7114_write(client, 0xDD, + (u8) ((HFSC >> 9) & 0xFF)); + } else { + if (decoder->norm != VIDEO_MODE_NTSC) { + printk("Setting full PAL width\n"); + writeregs(client, + cfg_saa7115_PAL_fullres_x); + } else { + printk("Setting full NTSC width\n"); + writeregs(client, + cfg_saa7115_NTSC_fullres_x); + } + } + + Vsrc = 480; + if (decoder->norm != VIDEO_MODE_NTSC) + Vsrc = 576; + + if (wind->height != Vsrc) { + VSCY = (int)((1024 * Vsrc) / wind->height); + printk("Vsrc: %d, Vscy: 0x%05x\n", Vsrc, VSCY); + /* write V fine-scaling (luminance) */ + saa7114_write(client, 0xE0, (u8) (VSCY & 0xFF)); + saa7114_write(client, 0xE1, + (u8) ((VSCY >> 8) & 0xFF)); + /* write V fine-scaling (chrominance) */ + saa7114_write(client, 0xE2, (u8) (VSCY & 0xFF)); + saa7114_write(client, 0xE3, + (u8) ((VSCY >> 8) & 0xFF)); + } else { + if (decoder->norm != VIDEO_MODE_NTSC) { + printk("Setting full PAL height\n"); + writeregs(client, + cfg_saa7115_PAL_fullres_y); + } else { + printk("Setting full NTSC height\n"); + writeregs(client, + cfg_saa7115_NTSC_fullres_y); + } + } + + writeregs(client, cfg_saa7115_reset_scaler); + break; + } + case DECODER_DUMP: + { + int i; + + dprintk(1, KERN_INFO "%s: decoder dump\n", + client->name); + + for (i = 0; i < 32; i += 16) { + int j; + + printk(KERN_DEBUG "%s: %03x", client->name, i); + for (j = 0; j < 16; ++j) { + printk(" %02x", + saa7114_read(client, i + j)); + } + printk("\n"); + } + } + break; + + case DECODER_GET_CAPABILITIES: + { + struct video_decoder_capability *cap = arg; + + dprintk(1, KERN_DEBUG "%s: decoder get capabilities\n", + client->name); + + cap->flags = VIDEO_DECODER_PAL | + VIDEO_DECODER_NTSC | + VIDEO_DECODER_SECAM | + VIDEO_DECODER_AUTO | VIDEO_DECODER_CCIR; + cap->inputs = 8; + cap->outputs = 1; + } + break; + + case DECODER_SET_AUDIO: + { + int *iarg = arg; + dprintk(1, KERN_DEBUG "%s set audio: 0x%02x\n", + client->name, *iarg); + switch (*iarg) { + case DECODER_AUDIO_32_KHZ: + writeregs(client, cfg_saa7115_32_audio); + if (decoder->norm == VIDEO_MODE_NTSC) { + writeregs(client, + cfg_saa7115_NTSC_32_audio); + } else { + writeregs(client, + cfg_saa7115_PAL_32_audio); + } + break; + case DECODER_AUDIO_441_KHZ: + writeregs(client, cfg_saa7115_441_audio); + if (decoder->norm == VIDEO_MODE_NTSC) { + writeregs(client, + cfg_saa7115_NTSC_441_audio); + } else { + writeregs(client, + cfg_saa7115_PAL_441_audio); + } + break; + case DECODER_AUDIO_48_KHZ: + writeregs(client, cfg_saa7115_48_audio); + if (decoder->norm == VIDEO_MODE_NTSC) { + writeregs(client, + cfg_saa7115_NTSC_48_audio); + } else { + writeregs(client, + cfg_saa7115_PAL_48_audio); + } + break; + default: + printk(KERN_DEBUG + "%s invalid audio setting 0x%02x\n", + client->name, *iarg); + } + + /*FIXME digitizer reset needed? + * if so, uncomment this line */ + //writeregs(client, cfg_saa7115_reset_scaler); + + decoder->audio = *iarg; + + } + break; + case DECODER_GET_STATUS: + { + int *iarg = arg; + int status; + int res; + + status = saa7114_read(client, 0x1f); + + dprintk(1, KERN_DEBUG "%s status: 0x%02x\n", + client->name, status); + res = 0; + if ((status & (1 << 6)) == 0) { + res |= DECODER_STATUS_GOOD; + } + switch (decoder->norm) { + case VIDEO_MODE_NTSC: + res |= DECODER_STATUS_NTSC; + break; + case VIDEO_MODE_PAL: + res |= DECODER_STATUS_PAL; + break; + case VIDEO_MODE_SECAM: + res |= DECODER_STATUS_SECAM; + break; + default: + case VIDEO_MODE_AUTO: + if ((status & (1 << 5)) != 0) { + res |= DECODER_STATUS_NTSC; + } else { + res |= DECODER_STATUS_PAL; + } + break; + } + if ((status & (1 << 0)) != 0) { + res |= DECODER_STATUS_COLOR; + } + *iarg = res; + } + break; + + case DECODER_SET_NORM: + { + u16 *iarg = arg; + + dprintk(1, KERN_DEBUG "%s: decoder set norm ", + client->name); + + switch (*iarg) { + + case VIDEO_MODE_NTSC: + dprintk(1, "NTSC\n"); + writeregs(client, cfg_saa7115_NTSC_video); + break; + + case VIDEO_MODE_PAL: + dprintk(1, "PAL\n"); + writeregs(client, cfg_saa7115_PAL_video); + break; + + case VIDEO_MODE_SECAM: + dprintk(1, "SECAM\n"); + writeregs(client, cfg_saa7115_PAL_video); + break; + + default: + dprintk(1, " Unknown video mode!!!\n"); + return -EINVAL; + + } + + decoder->norm = *iarg; + + /* switch audio mode too! */ + saa7114_command(client, DECODER_SET_AUDIO, + &decoder->audio); + + } + break; + + case DECODER_SET_INPUT: + { + int *iarg = arg; + + dprintk(1, KERN_DEBUG "%s: decoder set input (%d)\n", + client->name, *iarg); + /* inputs from 0-9 are available */ + if (*iarg < 0 || *iarg > 9) { + return -EINVAL; + } + + if (decoder->input != *iarg) { + dprintk(1, + KERN_DEBUG "%s: now setting %s input\n", + client->name, + *iarg >= 6 ? "S-Video" : "Composite"); + decoder->input = *iarg; + + /* select mode */ + saa7114_write(client, + 0x02, + (saa7114_read(client, 0x02) & + 0xf0) | decoder->input); + + /* bypass chrominance trap for modes 6..9 */ + saa7114_write(client, 0x09, + (saa7114_read(client, 0x09) & + 0x7f) | (decoder->input < + 6 ? 0x0 : 0x80)); + } + } + break; + + case DECODER_SET_OUTPUT: + { + int *iarg = arg; + + dprintk(1, KERN_DEBUG "%s: decoder set output\n", + client->name); + + /* not much choice of outputs */ + if (*iarg != 0) { + return -EINVAL; + } + } + break; + + case DECODER_ENABLE_OUTPUT: + { + int *iarg = arg; + int enable = (*iarg != 0); + + dprintk(1, KERN_DEBUG "%s: decoder %s output\n", + client->name, enable ? "enable" : "disable"); + + decoder->playback = !enable; + + if (decoder->enable != enable) { + decoder->enable = enable; + + if (decoder->enable) { + saa7114_write(client, 0x87, 0x01); + } else { + saa7114_write(client, 0x87, 0x00); + } + } + } + break; + + case DECODER_GET_PICTURE: + { + struct saa7114 *pic = arg; + + pic->bright = decoder->bright; + pic->contrast = decoder->contrast; + pic->sat = decoder->sat; + pic->hue = decoder->hue; + } + break; + + case DECODER_SET_PICTURE: + { + struct saa7114 *pic = arg; + + dprintk(1, + KERN_DEBUG + "%s: decoder set picture bright=%d contrast=%d saturation=%d hue=%d\n", + client->name, pic->bright, pic->contrast, + pic->sat, pic->hue); + + if (decoder->bright != pic->bright) { + /* We want 0 to 255 */ + if (pic->bright < 0 || pic->bright > 255) { + dprintk(0, + KERN_ERR + "%s: invalid brightness setting %d", + client->name, pic->bright); + return -EINVAL; + } + decoder->bright = pic->bright; + saa7114_write(client, 0x0a, decoder->bright); + } + if (decoder->contrast != pic->contrast) { + /* We want 0 to 127 */ + if (pic->contrast < 0 || pic->contrast > 127) { + dprintk(0, + KERN_ERR + "%s: invalid contrast setting %d", + client->name, pic->contrast); + return -EINVAL; + } + decoder->contrast = pic->contrast; + saa7114_write(client, 0x0b, decoder->contrast); + } + if (decoder->sat != pic->sat) { + /* We want 0 to 127 */ + if (pic->sat < 0 || pic->sat > 127) { + dprintk(0, + KERN_ERR + "%s: invalid saturation setting %d", + client->name, pic->sat); + return -EINVAL; + } + decoder->sat = pic->sat; + saa7114_write(client, 0x0c, decoder->sat); + } + if (decoder->hue != pic->hue) { + /* We want -128 to 127 */ + if (pic->hue < -128 || pic->hue > 127) { + dprintk(0, + KERN_ERR + "%s: invalid hue setting %d", + client->name, pic->hue); + return -EINVAL; + } + decoder->hue = pic->hue; + saa7114_write(client, 0x0d, decoder->hue); + } + } + break; + + default: + return -EINVAL; + } + + return 0; +} + +/* ----------------------------------------------------------------------- */ + +/* + * Generic i2c probe + * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1' + */ +static unsigned short normal_i2c[] = + { I2C_SAA7114 >> 1, I2C_SAA7114A >> 1, I2C_CLIENT_END }; +static unsigned short normal_i2c_range[] = { I2C_CLIENT_END }; + +I2C_CLIENT_INSMOD; + +static int saa7114_i2c_id = 0; +struct i2c_driver i2c_driver_saa7114; + +static int +saa7114_detect_client(struct i2c_adapter *adapter, int address, int kind) +{ +// int i, err[30]; +// short int hoff = SAA_7114_NTSC_HOFFSET; +// short int voff = SAA_7114_NTSC_VOFFSET; +// short int w = SAA_7114_NTSC_WIDTH; +// short int h = SAA_7114_NTSC_HEIGHT; + struct i2c_client *client; + struct saa7114 *decoder; + + dprintk(1, + KERN_INFO + "saa7114.c: detecting saa7114 client on address 0x%x\n", + address << 1); + + /* Check if the adapter supports the needed features */ + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) + return 0; + + client = kmalloc(sizeof(struct i2c_client), GFP_KERNEL); + if (client == 0) + return -ENOMEM; + memset(client, 0, sizeof(struct i2c_client)); + client->addr = address; + client->adapter = adapter; + client->driver = &i2c_driver_saa7114; + client->flags = I2C_CLIENT_ALLOW_USE; + client->id = saa7114_i2c_id++; + snprintf(client->name, sizeof(client->name) - 1, "saa7115[%d]", + client->id); + + decoder = kmalloc(sizeof(struct saa7114), GFP_KERNEL); + i2c_set_clientdata(client, decoder); + if (decoder == NULL) { + kfree(client); + return -ENOMEM; + } + memset(decoder, 0, sizeof(struct saa7114)); + decoder->norm = VIDEO_MODE_NTSC; + decoder->input = -1; + decoder->enable = 1; + decoder->bright = 128; + decoder->contrast = 64; + decoder->hue = 0; + decoder->sat = 64; + decoder->playback = 0; // initially capture mode used + decoder->audio = DECODER_AUDIO_48_KHZ; + + dprintk(1, KERN_INFO "saa7115.c: writing init values\n"); + + /* init to NTSC/48khz */ + writeregs(client, init_saa7115_auto_input); + writeregs(client, init_saa7115_misc); + writeregs(client, cfg_saa7115_NTSC_fullres_x); + writeregs(client, cfg_saa7115_NTSC_fullres_y); + writeregs(client, cfg_saa7115_NTSC_video); + writeregs(client, cfg_saa7115_48_audio); + writeregs(client, cfg_saa7115_NTSC_48_audio); + writeregs(client, cfg_saa7115_reset_scaler); + + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(2 * HZ); + + printk("status: (1E) 0x%02x, (1F) 0x%02x\n", + readreg(client, 0x1e), readreg(client, 0x1f)); + + i2c_attach_client(client); + + return 0; +} + +static int saa7114_attach_adapter(struct i2c_adapter *adapter) +{ + dprintk(1, + KERN_INFO + "saa7114.c: starting probe for adapter %s (0x%x)\n", + adapter->name, adapter->id); + return i2c_probe(adapter, &addr_data, &saa7114_detect_client); +} + +static int saa7114_detach_client(struct i2c_client *client) +{ + struct saa7114 *decoder = i2c_get_clientdata(client); + int err; + + err = i2c_detach_client(client); + if (err) { + return err; + } + + kfree(decoder); + kfree(client); + return 0; +} + +/* ----------------------------------------------------------------------- */ + +/* i2c implementation */ +struct i2c_driver i2c_driver_saa7114 = { + .name = "saa7115", + + .id = I2C_DRIVERID_SAA7114, + .flags = I2C_DF_NOTIFY, + + .attach_adapter = saa7114_attach_adapter, + .detach_client = saa7114_detach_client, + .command = saa7114_command, + .owner = THIS_MODULE, +}; + +static int __init saa7114_init(void) +{ + return i2c_add_driver(&i2c_driver_saa7114); +} + +static void __exit saa7114_exit(void) +{ + i2c_del_driver(&i2c_driver_saa7114); +} + +module_init(saa7114_init); +module_exit(saa7114_exit); diff -purN -X /home/mbligh/.diff.exclude reference/drivers/media/video/saa7127.c current/drivers/media/video/saa7127.c --- reference/drivers/media/video/saa7127.c 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/media/video/saa7127.c 2004-04-09 21:41:40.000000000 -0700 @@ -0,0 +1,854 @@ +/* + * saa7127 - Philips SAA7127 video encoder driver version 0.2 + * + * Copyright (C) 2003 Roy Bulter + * + * Based on SAA7126 video encoder driver by Gillem & Andreas Oberritter + * + * Copyright (C) 2000-2001 Gillem + * Copyright (C) 2002 Andreas Oberritter + * + * Based on Stadis 4:2:2 MPEG-2 Decoder Driver by Nathan Laredo + * + * Copyright (C) 1999 Nathan Laredo + * + * This driver is designed for the Hauppauge 250/350 Linux driver + * designed by the Ivytv Project (ivtv.sf.net) + * + * Copyright (C) 2003 Kevin Thayer + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* + * Revision History + * + * + * Revision : 0.1 (09-05-2003) + * Change : First Version + * + * Revision : 0.2 (21-05-2003) + * Change : solved compiler error on line 785(800) + * reg61h variable was not set in saa7127_set_norm function + * + * Revision : 0.3 (21-07-2003) Matt T. Yourst + * Change : Update configuration tables to make NTSC appear correctly; + * Enable alternative outputs (s-video, composite, RGB, etc.) + */ + +#include + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +#include "saa7127.h" + +/* + ********************************************************************** + * Debug Macro's + * + * + ********************************************************************** + */ + +#define CURRENT_MASK 3 +#define NO_MASK 0 +#define INFO_MASK 1 /* 0b0000000000001 */ +#define ERROR_MASK 2 /* 0b0000000000010 */ +#define ENTER_MASK 4 /* 0b0000000000100 */ +#define RETURN_MASK 8 /* 0b0000000001000 */ +#define TRACE1_MASK 16 /* 0b0000000010000 */ +#define TRACE2_MASK 32 /* 0b0000000100000 */ +#define TRACE3_MASK 64 /* 0b0000001000000 */ +#define TRACE4_MASK 128 /* 0b0000010000000 */ +#define TRACE5_MASK 256 /* 0b0000100000000 */ +#define TRACE6_MASK 512 /* 0b0001000000000 */ +#define TRACE7_MASK 1024 /* 0b0010000000000 */ +#define TRACE8_MASK 2048 /* 0b0100000000000 */ +#define TRACE9_MASK 4096 /* 0b1000000000000 */ + +static int debug_mask = CURRENT_MASK; +static int test_image = 0; +static int pal = 0; +static int enable_output = 0; +static int output_select = SAA7127_OUTPUT_TYPE_SVIDEO; + +#define INFO(format, args...)\ + if ((debug_mask&INFO_MASK) == INFO_MASK)\ + {\ + printk("[%s: INFO]: ", __FILE__);\ + printk(format, ##args);\ + printk("\n");\ + }\ + +#define ERROR(format, args...)\ + if ((debug_mask&ERROR_MASK) == ERROR_MASK)\ + {\ + printk("[%s: %d: ERROR]: ", __FILE__,__LINE__);\ + printk(format, ##args);\ + printk("\n");\ + }\ + + +#ifdef DEBUG + +#define ENTER\ + if ((debug_mask&ENTER_MASK) == ENTER_MASK)\ + {\ + printk("[%s : %s: %d: ENTER]: ",__FILE__,__FUNCTION__,__LINE__);\ + printk("\n");\ + }\ + +#define RETURN(value)\ + if ((debug_mask&RETURN_MASK) == RETURN_MASK)\ + {\ + printk("[%s : %s : %d: RETURN]: ", __FILE__,__FUNCTION__,__LINE__);\ + printk("value: %x",value);\ + printk("\n");\ + }\ + return(value);\ + +static int get_trace_mask(int num) +{ + switch (num) { + case 1: + return TRACE1_MASK; + break; + case 2: + return TRACE2_MASK; + break; + case 3: + return TRACE3_MASK; + break; + case 4: + return TRACE4_MASK; + break; + case 5: + return TRACE5_MASK; + break; + case 6: + return TRACE6_MASK; + break; + case 7: + return TRACE7_MASK; + break; + case 8: + return TRACE8_MASK; + break; + case 9: + return TRACE9_MASK; + break; + default: + return NO_MASK; + } +} + +#define TRACE(num, format, args...) \ + if ((debug_mask&get_trace_mask(num)) == get_trace_mask(num)) \ + {\ + printk("[%s: %d: TRACE%d] ", __FILE__, __LINE__,num);\ + printk(format, ##args);\ + printk("\n");\ + }\ + +#else + +#define ENTER +#define RETURN(value) return(value); +#define TRACE(num, format, args...) + +#endif /* DEBUG */ + +/* + ********************************************************************** + * + * Array's with configuration parameters for the SAA7127 + * + ********************************************************************** + */ + +struct i2c_reg_value { + unsigned char reg; + unsigned char value; +}; + +struct i2c_reg_value saa7127_init_config_common[] = { + {SAA7127_REG_WIDESCREEN_CONFIG, 0x0d}, + {SAA7127_REG_WIDESCREEN_ENABLE, 0x00}, + {SAA7127_REG_COPYGEN_0, 0x77}, + {SAA7127_REG_COPYGEN_1, 0x41}, + {SAA7127_REG_COPYGEN_2, 0x00}, // (Macrovision enable/disable) + {SAA7127_REG_OUTPUT_PORT_CONTROL, 0x9e}, + {SAA7127_REG_GAIN_LUMINANCE_RGB, 0x00}, + {SAA7127_REG_GAIN_COLORDIFF_RGB, 0x00}, + {SAA7127_REG_INPUT_PORT_CONTROL_1, 0x80}, // (for color bars) + {SAA7127_REG_LINE_21_ODD_0, 0x77}, + {SAA7127_REG_LINE_21_ODD_1, 0x41}, + {SAA7127_REG_LINE_21_EVEN_0, 0x88}, + {SAA7127_REG_LINE_21_EVEN_1, 0x41}, + {SAA7127_REG_RCV_PORT_CONTROL, 0x12}, + {SAA7127_REG_VTRIG, 0xf9}, + {SAA7127_REG_HTRIG_HI, 0x00}, + {SAA7127_REG_RCV2_OUTPUT_START, 0x41}, + {SAA7127_REG_RCV2_OUTPUT_END, 0xc3}, + {SAA7127_REG_RCV2_OUTPUT_MSBS, 0x00}, + {SAA7127_REG_TTX_REQUEST_H_START, 0x3e}, + {SAA7127_REG_TTX_REQUEST_H_DELAY_LENGTH, 0xb8}, + {SAA7127_REG_CSYNC_ADVANCE_VSYNC_SHIFT, 0x03}, + {SAA7127_REG_TTX_ODD_REQ_VERT_START, 0x15}, + {SAA7127_REG_TTX_ODD_REQ_VERT_END, 0x16}, + {SAA7127_REG_TTX_EVEN_REQ_VERT_START, 0x15}, + {SAA7127_REG_TTX_EVEN_REQ_VERT_END, 0x16}, + {SAA7127_REG_FIRST_ACTIVE, 0x1a}, + {SAA7127_REG_LAST_ACTIVE, 0x01}, + {SAA7127_REG_MSB_VERTICAL, 0xc0}, + {SAA7127_REG_DISABLE_TTX_LINE_LO_0, 0x00}, + {SAA7127_REG_DISABLE_TTX_LINE_LO_1, 0x00}, + {0, 0} +}; + +#define SAA7127_NTSC_DAC_CONTROL 0x05 +struct i2c_reg_value saa7127_init_config_ntsc[] = { + {SAA7127_REG_BURST_START, 0x19}, + {SAA7127_REG_BURST_END, 0x1d}, + {SAA7127_REG_CHROMA_PHASE, 0x27}, + {SAA7127_REG_GAINU, 0x88}, + {SAA7127_REG_GAINV, 0xc0}, + {SAA7127_REG_BLACK_LEVEL, 0x3f}, + {SAA7127_REG_BLANKING_LEVEL, 0x36}, + {SAA7127_REG_VBI_BLANKING, 0x36}, + {SAA7127_REG_DAC_CONTROL, 0x05}, + {SAA7127_REG_BURST_AMP, 0x4a}, + {SAA7127_REG_SUBC3, 0x1f}, + {SAA7127_REG_SUBC2, 0x7c}, + {SAA7127_REG_SUBC1, 0xf0}, + {SAA7127_REG_SUBC0, 0x21}, + {SAA7127_REG_MULTI, 0x90}, + {SAA7127_REG_CLOSED_CAPTION, 0x14}, + {0, 0} +}; + +#define SAA7127_PAL_DAC_CONTROL 0x02 +struct i2c_reg_value saa7127_init_config_pal[] = { + {SAA7127_REG_BURST_START, 0x21}, + {SAA7127_REG_BURST_END, 0x1d}, + {SAA7127_REG_CHROMA_PHASE, 0x3f}, + {SAA7127_REG_GAINU, 0x7d}, + {SAA7127_REG_GAINV, 0xaf}, + {SAA7127_REG_BLACK_LEVEL, 0x23}, + {SAA7127_REG_BLANKING_LEVEL, 0x35}, + {SAA7127_REG_VBI_BLANKING, 0x35}, + {SAA7127_REG_DAC_CONTROL, 0x02}, + {SAA7127_REG_BURST_AMP, 0x2f}, + {SAA7127_REG_SUBC3, 0xcb}, + {SAA7127_REG_SUBC2, 0x8a}, + {SAA7127_REG_SUBC1, 0x09}, + {SAA7127_REG_SUBC0, 0x2a}, + {SAA7127_REG_MULTI, 0xa0}, + {SAA7127_REG_CLOSED_CAPTION, 0x00}, + {0, 0} +}; + +/* + ********************************************************************** + * + * Encoder Struct, holds the configuration state of the encoder + * + ********************************************************************** + */ + +struct saa7127 { + enum SAA7127_video_norm norm; + enum SAA7127_input_type input_type; + enum SAA7127_output_type output_type; + enum SAA7127_enable_type enable; + enum SAA7127_wss_enable_type wss_enable; + enum SAA7127_wss_mode_type wss_mode; + u8 reg_2d; + u8 reg_3a; + u8 reg_61; +}; + +/* ----------------------------------------------------------------------- */ + +static int saa7127_read(struct i2c_client *client, u8 reg) +{ + ENTER; + RETURN(i2c_smbus_read_byte_data(client, reg)); +} + +/* ----------------------------------------------------------------------- */ + +static int saa7127_writereg(struct i2c_client *client, u8 reg, u8 val) +{ + ENTER; + if (i2c_smbus_write_byte_data(client, reg, val) < 0) { + ERROR("I2C Write Problem"); + return (-1); + } + TRACE(4, "I2C Write to reg: %x, data: %x", reg, val); + RETURN(0); +} + +/* ----------------------------------------------------------------------- */ + +static int saa7127_write_inittab(struct i2c_client *client, + const struct i2c_reg_value *regs) +{ + ENTER; + + while (regs->reg != 0) { + if (i2c_smbus_write_byte_data(client, regs->reg, regs->value) < + 0) { + ERROR("I2C Write Problem"); + RETURN(-1); + } + TRACE(4, "I2C Write to reg: %x, data: %x", regs->reg, + regs->value); + regs++; + } + RETURN(0); +} + +/* ----------------------------------------------------------------------- */ + +static int saa7127_set_wss(struct i2c_client *client) +{ + struct saa7127 *encoder = (struct saa7127 *)i2c_get_clientdata(client); + + ENTER; + switch (encoder->wss_enable) { + case SAA7127_WSS_DISABLE: + TRACE(3, "Disable Wide Screen Signal"); + saa7127_writereg(client, 0x27, 0x00); + break; + case SAA7127_WSS_ENABLE: + TRACE(3, "Enable Wide Screen Signal"); + saa7127_writereg(client, 0x27, 0x80); + break; + default: + return (-EINVAL); + } + return (0); +} + +/* ----------------------------------------------------------------------- */ + +static int saa7127_set_wss_mode(struct i2c_client *client) +{ + struct saa7127 *encoder = (struct saa7127 *)i2c_get_clientdata(client); + + ENTER; + + switch (encoder->wss_mode) { + case SAA7127_WSS_MODE_4_3_FULL_FORMAT: + TRACE(3, "Widescreen Mode 4:3 Full Format"); + saa7127_writereg(client, 0x26, 0x08); + break; + case SAA7127_WSS_MODE_BOX_14_9_C: + TRACE(3, "Widescreen Mode Box 14:9 Center"); + saa7127_writereg(client, 0x26, 0x01); + break; + case SAA7127_WSS_MODE_BOX_14_9_TOP: + TRACE(3, "Widescreen Mode Box 14:9 Top"); + saa7127_writereg(client, 0x26, 0x02); + break; + case SAA7127_WSS_MODE_BOX_16_9_C: + TRACE(3, "Widescreen Mode Box 16:9 Center"); + saa7127_writereg(client, 0x26, 0x0b); + break; + case SAA7127_WSS_MODE_BOX_16_9_TOP: + TRACE(3, "Widescreen Mode Box 16:9 Top"); + saa7127_writereg(client, 0x26, 0x04); + break; + case SAA7127_WSS_MODE_SMALL_BOX_16_9_C: + TRACE(3, "Widescreen Mode Small Box 16:9 Center"); + saa7127_writereg(client, 0x26, 0x0d); + break; + case SAA7127_WSS_MODE_4_3_14_9_FULL_FORMAT: + TRACE(3, "Widescreen Mode 14:9 Full Format"); + saa7127_writereg(client, 0x26, 0x0e); + break; + case SAA7127_WSS_MODE_16_9_ANAMORPHIC: + TRACE(3, "Widescreen Mode 16:9 Full Format"); + saa7127_writereg(client, 0x26, 0x07); + break; + default: + RETURN(-EINVAL); + } + RETURN(0); +} + +/* ----------------------------------------------------------------------- */ + +static int saa7127_set_enable(struct i2c_client *client) +{ + struct saa7127 *encoder = (struct saa7127 *)i2c_get_clientdata(client); + + ENTER; + + switch (encoder->enable) { + case SAA7127_DISABLE: + TRACE(3, "Disable Video Output"); + saa7127_writereg(client, 0x2d, (encoder->reg_2d & 0xf0)); + saa7127_writereg(client, 0x61, (encoder->reg_61 | 0xc0)); + break; + case SAA7127_ENABLE: + TRACE(3, "Enable Video Output"); + saa7127_writereg(client, 0x2d, encoder->reg_2d); + saa7127_writereg(client, 0x61, encoder->reg_61); + break; + default: + RETURN(-EINVAL); + } + +#if 0 + int j; + for (j = 0; j < 128 / 16; j++) { + TRACE(3, + "saa7127 registers 0x%02x-0x%02x: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", + j * 16, j * 16 + 15, saa7127_read(client, j * 16 + 0), + saa7127_read(client, j * 16 + 1), saa7127_read(client, + j * 16 + + 2), + saa7127_read(client, j * 16 + 3), saa7127_read(client, + j * 16 + + 4), + saa7127_read(client, j * 16 + 5), saa7127_read(client, + j * 16 + + 6), + saa7127_read(client, j * 16 + 7), saa7127_read(client, + j * 16 + + 8), + saa7127_read(client, j * 16 + 9), saa7127_read(client, + j * 16 + + 10), + saa7127_read(client, j * 16 + 11), saa7127_read(client, + j * 16 + + 12), + saa7127_read(client, j * 16 + 13), saa7127_read(client, + j * 16 + + 14), + saa7127_read(client, j * 16 + 15)); + } +#endif + + RETURN(0); +} + +/* ----------------------------------------------------------------------- */ + +static int saa7127_set_norm(struct i2c_client *client) +{ + struct saa7127 *encoder = (struct saa7127 *)i2c_get_clientdata(client); + const struct i2c_reg_value *inittab; + + ENTER; + + switch (encoder->norm) { + case SAA7127_VIDEO_NORM_NTSC: + TRACE(3, "Selecting NTSC video Standard"); + inittab = saa7127_init_config_ntsc; + encoder->reg_61 = SAA7127_NTSC_DAC_CONTROL; + break; + case SAA7127_VIDEO_NORM_PAL: + TRACE(3, "Selecting PAL video Standard"); + inittab = saa7127_init_config_pal; + encoder->reg_61 = SAA7127_PAL_DAC_CONTROL; + break; + default: + RETURN(-EINVAL); + } + + /* Write Table */ + saa7127_write_inittab(client, inittab); + RETURN(0); +} + +/* ----------------------------------------------------------------------- */ + +static int saa7127_set_output_type(struct i2c_client *client) +{ + struct saa7127 *encoder = (struct saa7127 *)i2c_get_clientdata(client); + + ENTER; + + encoder->reg_3a = 0x13; // by default swithch YUV to RGB-matrix on + + switch (encoder->output_type) { + case SAA7127_OUTPUT_TYPE_RGB: + TRACE(3, "Selecting RGB Output Type"); + encoder->reg_2d = 0x0f; // RGB + CVBS (for sync) + break; + case SAA7127_OUTPUT_TYPE_COMPOSITE: + TRACE(3, "Selecting Composite Output Type"); + encoder->reg_2d = 0x08; // 00001000 CVBS only, RGB DAC's off (high impedance mode) !!! + break; + case SAA7127_OUTPUT_TYPE_SVIDEO: + TRACE(3, "Selecting S-Video Output Type"); + encoder->reg_2d = 0xff; // 11111111 croma -> R, luma -> CVBS + G + B + break; + case SAA7127_OUTPUT_TYPE_YUV_V: + TRACE(3, "Selecting YUV V Output Type"); + encoder->reg_2d = 0x4f; // reg 2D = 01001111, all DAC's on, RGB + VBS + encoder->reg_3a = 0x0b; // reg 3A = 00001011, bypass RGB-matrix + break; + case SAA7127_OUTPUT_TYPE_YUV_C: + TRACE(3, "Selecting YUV C Output Type"); + encoder->reg_2d = 0x0f; // reg 2D = 00001111, all DAC's on, RGB + CVBS + encoder->reg_3a = 0x0b; // reg 3A = 00001011, bypass RGB-matrix + break; + default: + RETURN(-EINVAL); + } + + /* Configure Encoder */ + + saa7127_writereg(client, 0x2d, encoder->reg_2d); + saa7127_writereg(client, 0x3a, + (encoder->input_type == + SAA7127_INPUT_TYPE_TEST_IMAGE) ? 0x80 : encoder-> + reg_3a); + + RETURN(0); +} + +static int saa7127_set_input_type(struct i2c_client *client) +{ + struct saa7127 *encoder = (struct saa7127 *)i2c_get_clientdata(client); + + ENTER; + + switch (encoder->input_type) { + case SAA7127_INPUT_TYPE_NORMAL: /* avia */ + TRACE(3, "Selecting Normal Encoder Input"); + saa7127_writereg(client, 0x3a, encoder->reg_3a); + break; + case SAA7127_INPUT_TYPE_TEST_IMAGE: /* color bar */ + TRACE(3, "Selecting Colour Bar generator"); + saa7127_writereg(client, 0x3a, 0x80); + break; + default: + RETURN(-EINVAL); + } + + RETURN(0); +} + +/* ----------------------------------------------------------------------- */ + +static int saa7127_command(struct i2c_client *client, + unsigned int cmd, void *parg) +{ + struct saa7127 *encoder = i2c_get_clientdata(client); + unsigned long arg = (unsigned long)parg; + struct video_encoder_capability *cap = parg; + ENTER; + printk("saa7127_command: entered with cmd = %d\n", cmd); + + switch (cmd) { + case ENCODER_GET_CAPABILITIES: + TRACE(3, "Asking Encoder Capabilities"); + cap->flags = VIDEO_ENCODER_PAL | VIDEO_ENCODER_NTSC; + cap->inputs = 1; + cap->outputs = 1; + break; + + case ENCODER_SET_NORM: + TRACE(3, "Setting Encoder Video Standard"); + switch (arg) { + case VIDEO_MODE_NTSC: + TRACE(3, "Set NTSC Video Mode"); + encoder->norm = SAA7127_VIDEO_NORM_NTSC; + break; + case VIDEO_MODE_PAL: + TRACE(3, "Set PAL Video Mode"); + encoder->norm = SAA7127_VIDEO_NORM_PAL; + break; + default: + return (-EINVAL); + } + saa7127_set_norm(client); + break; + + case ENCODER_SET_INPUT: + TRACE(3, "Setting Encoder Input"); + switch (arg) { + case SAA7127_INPUT_NORMAL: /* encoder input selected */ + TRACE(3, "Select Normal input"); + encoder->input_type = SAA7127_INPUT_TYPE_NORMAL; + break; + case SAA7127_INPUT_TESTIMAGE: /* Internal colourbars selected */ + TRACE(3, "Select ColourBar Generator"); + encoder->input_type = SAA7127_INPUT_TYPE_TEST_IMAGE; + break; + default: + RETURN(-EINVAL); + } + saa7127_set_input_type(client); + break; + + case ENCODER_SET_OUTPUT: + TRACE(3, "Setting Encoder Output"); + encoder->output_type = arg; + saa7127_set_output_type(client); + break; + + case ENCODER_ENABLE_OUTPUT: + TRACE(3, "Turn on/off Output"); + switch (arg) { + case SAA7127_VIDEO_ENABLE: + TRACE(3, "Turn on Video Output"); + encoder->enable = SAA7127_ENABLE; + break; + case SAA7127_VIDEO_DISABLE: + TRACE(3, "Turn off Video Output"); + encoder->enable = SAA7127_DISABLE; + break; + default: + RETURN(-EINVAL); + } + saa7127_set_enable(client); + break; + + default: + RETURN(-EINVAL); + } + RETURN(0); +} + +/* ----------------------------------------------------------------------- */ + +/* + * Generic i2c probe + * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1' + */ + +static unsigned short normal_i2c[] = + { I2C_SAA7127_ADRESS >> 1, I2C_CLIENT_END }; +static unsigned short normal_i2c_range[] = { I2C_CLIENT_END }; + +I2C_CLIENT_INSMOD; + +static int saa7127_i2c_id = 0; +struct i2c_driver i2c_driver_saa7127; + +/* ----------------------------------------------------------------------- */ + +static int saa7127_detect_client(struct i2c_adapter *adapter, + int address, int kind) +{ + struct i2c_client *client; + struct saa7127 *encoder; + int read_result = 0; + + ENTER; + + TRACE(1, "detecting saa7127 client on address 0x%x", address << 1); + + /* Check if the adapter supports the needed features */ + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) + return (0); + + client = kmalloc(sizeof(struct i2c_client), GFP_KERNEL); + if (client == 0) + return (-ENOMEM); + + memset(client, 0, sizeof(struct i2c_client)); + client->addr = address; + client->adapter = adapter; + client->driver = &i2c_driver_saa7127; + client->flags = I2C_CLIENT_ALLOW_USE; + client->id = saa7127_i2c_id++; + snprintf(client->name, sizeof(client->name) - 1, "saa7127[%d]", + client->id); + + encoder = kmalloc(sizeof(struct saa7127), GFP_KERNEL); + + if (encoder == NULL) { + kfree(client); + return (-ENOMEM); + } + + i2c_set_clientdata(client, encoder); + memset(encoder, 0, sizeof(struct saa7127)); + + /* Initialize default values */ + encoder->output_type = output_select; + encoder->wss_enable = SAA7127_WSS_DISABLE; + encoder->wss_mode = SAA7127_WSS_MODE_4_3_FULL_FORMAT; + + /* Look if the pal module parameter is set */ + + if (pal == 1) { + /* Select PAL Video Standard */ + encoder->norm = SAA7127_VIDEO_NORM_PAL; + } else { + /* Select NTSC Video Standard, default */ + encoder->norm = SAA7127_VIDEO_NORM_NTSC; + } + + /* Look if the Encoder needs to be enabled */ + + if (enable_output == 1) { + encoder->enable = SAA7127_ENABLE; + } else { + /* for default disable output */ + /* Because the MPEG DECODER is not initialised */ + encoder->enable = SAA7127_DISABLE; + } + + /* The Encoder is does have internal Colourbar generator */ + /* This can be used for debugging, configuration values for the encoder */ + + if (test_image == 1) { + /* Select ColourBar Generator */ + encoder->input_type = SAA7127_INPUT_TYPE_TEST_IMAGE; + } else { + /* Select normal input */ + encoder->input_type = SAA7127_INPUT_TYPE_NORMAL; + } + + TRACE(2, "writing init values"); + + /* Configure Encoder */ + + printk("saa7127: Configuring encoder..."); + saa7127_write_inittab(client, saa7127_init_config_common); + saa7127_set_norm(client); + saa7127_set_output_type(client); + saa7127_set_wss(client); + saa7127_set_wss_mode(client); + saa7127_set_input_type(client); + saa7127_set_enable(client); + + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(2 * HZ); + + read_result = saa7127_read(client, 0x00); + + TRACE(4, "Read status register (00h) : 0x%02x ", read_result); + + i2c_attach_client(client); + + RETURN(0); +} + +/* ----------------------------------------------------------------------- */ + +static int saa7127_attach_adapter(struct i2c_adapter *adapter) +{ + + TRACE(2, "starting probe for adapter %s (0x%x)", adapter->name, + adapter->id); + return (i2c_probe(adapter, &addr_data, &saa7127_detect_client)); +} + +/* ----------------------------------------------------------------------- */ + +static int saa7127_detach_client(struct i2c_client *client) +{ + struct saa7127 *encoder = i2c_get_clientdata(client); + int err; + + /* Turn off TV output */ + + encoder->enable = SAA7127_DISABLE; + saa7127_set_enable(client); + + err = i2c_detach_client(client); + + if (err) { + return (err); + } + + kfree(encoder); + kfree(client); + return (0); +} + +/* ----------------------------------------------------------------------- */ + +struct i2c_driver i2c_driver_saa7127 = { + .name = "saa7127", + .id = I2C_DRIVERID_SAA7127, + .flags = I2C_DF_NOTIFY, + .attach_adapter = saa7127_attach_adapter, + .detach_client = saa7127_detach_client, + .command = saa7127_command, + .owner = THIS_MODULE, +}; + +/* ----------------------------------------------------------------------- */ + +static int __init saa7127_init(void) +{ + INFO("SAA7127 video encoder driver loaded"); + TRACE(1, "Driver version: V %s", SAA7127_DRIVER_VERSION); + return (i2c_add_driver(&i2c_driver_saa7127)); +} + +/* ----------------------------------------------------------------------- */ + +static void __exit saa7127_exit(void) +{ + + INFO("SAA7127 video encoder driver unloaded"); + i2c_del_driver(&i2c_driver_saa7127); +} + +/* ----------------------------------------------------------------------- */ + +module_init(saa7127_init); +module_exit(saa7127_exit); + +MODULE_DESCRIPTION("Philips SAA7127 video encoder driver"); +MODULE_AUTHOR("Roy Bulter"); +MODULE_LICENSE("GPL"); +MODULE_PARM(debug_mask, "i"); +MODULE_PARM(test_image, "i"); +MODULE_PARM(pal, "i"); +MODULE_PARM(enable_output, "i"); +MODULE_PARM(output_select, "i"); +MODULE_PARM_DESC(debug_mask, "debug_mask (0-8192) "); +MODULE_PARM_DESC(test_image, "test_image (0-1) "); +MODULE_PARM_DESC(pal, "pal (0-1) "); +MODULE_PARM_DESC(enable_output, "enable_output (0-1) "); +MODULE_PARM_DESC(output_select, + "output_select (0 = composite, 1 = s-video, 2 = rgb, 3 = YUVc, 4 = YUVv)"); diff -purN -X /home/mbligh/.diff.exclude reference/drivers/media/video/saa7127.h current/drivers/media/video/saa7127.h --- reference/drivers/media/video/saa7127.h 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/media/video/saa7127.h 2004-04-09 21:41:40.000000000 -0700 @@ -0,0 +1,154 @@ +#ifndef _SAA7127_H +#define _SAA7127_H + +/* + ********************************************************************** + * + * Define's + * + * + ********************************************************************** + */ + + +#define SAA7127_DRIVER_VERSION "0.3" + + +#ifndef I2C_DRIVERID_SAA7127 + #warning Using temporary hack for missing I2C driver-ID for saa7127 + #define I2C_DRIVERID_SAA7127 I2C_DRIVERID_EXP2 +#endif + + +#define I2C_SAA7127_ADRESS 0x88 + +#define SAA7127_VIDEO_ENABLE 0x01 +#define SAA7127_VIDEO_DISABLE 0x00 + +#define SAA7127_INPUT_TESTIMAGE 0x01 +#define SAA7127_INPUT_NORMAL 0x00 + +/* + * SAA7127 registers + */ + +#define SAA7127_REG_STATUS 0x00 +/* (registers 0x01-0x25 unused.) */ +#define SAA7127_REG_WIDESCREEN_CONFIG 0x26 +#define SAA7127_REG_WIDESCREEN_ENABLE 0x27 +#define SAA7127_REG_BURST_START 0x28 +#define SAA7127_REG_BURST_END 0x29 +#define SAA7127_REG_COPYGEN_0 0x2a +#define SAA7127_REG_COPYGEN_1 0x2b +#define SAA7127_REG_COPYGEN_2 0x2c +#define SAA7127_REG_OUTPUT_PORT_CONTROL 0x2d +/* (registers 0x2e-0x37 unused.) */ +#define SAA7127_REG_GAIN_LUMINANCE_RGB 0x38 +#define SAA7127_REG_GAIN_COLORDIFF_RGB 0x39 +#define SAA7127_REG_INPUT_PORT_CONTROL_1 0x3A +/* (registers 0x3b-0x53 undefined) */ +#define SAA7127_REG_CHROMA_PHASE 0x5A +#define SAA7127_REG_GAINU 0x5B +#define SAA7127_REG_GAINV 0x5C +#define SAA7127_REG_BLACK_LEVEL 0x5D +#define SAA7127_REG_BLANKING_LEVEL 0x5E +#define SAA7127_REG_VBI_BLANKING 0x5F +/* (register 0x60 unused) */ +#define SAA7127_REG_DAC_CONTROL 0x61 +#define SAA7127_REG_BURST_AMP 0x62 +#define SAA7127_REG_SUBC3 0x63 +#define SAA7127_REG_SUBC2 0x64 +#define SAA7127_REG_SUBC1 0x65 +#define SAA7127_REG_SUBC0 0x66 +#define SAA7127_REG_LINE_21_ODD_0 0x67 +#define SAA7127_REG_LINE_21_ODD_1 0x68 +#define SAA7127_REG_LINE_21_EVEN_0 0x69 +#define SAA7127_REG_LINE_21_EVEN_1 0x6A +#define SAA7127_REG_RCV_PORT_CONTROL 0x6B +#define SAA7127_REG_VTRIG 0x6C +#define SAA7127_REG_HTRIG_HI 0x6D +#define SAA7127_REG_MULTI 0x6E +#define SAA7127_REG_CLOSED_CAPTION 0x6F +#define SAA7127_REG_RCV2_OUTPUT_START 0x70 +#define SAA7127_REG_RCV2_OUTPUT_END 0x71 +#define SAA7127_REG_RCV2_OUTPUT_MSBS 0x72 +#define SAA7127_REG_TTX_REQUEST_H_START 0x73 +#define SAA7127_REG_TTX_REQUEST_H_DELAY_LENGTH 0x74 +#define SAA7127_REG_CSYNC_ADVANCE_VSYNC_SHIFT 0x75 +#define SAA7127_REG_TTX_ODD_REQ_VERT_START 0x76 +#define SAA7127_REG_TTX_ODD_REQ_VERT_END 0x77 +#define SAA7127_REG_TTX_EVEN_REQ_VERT_START 0x78 +#define SAA7127_REG_TTX_EVEN_REQ_VERT_END 0x79 +#define SAA7127_REG_FIRST_ACTIVE 0x7A +#define SAA7127_REG_LAST_ACTIVE 0x7B +#define SAA7127_REG_MSB_VERTICAL 0x7C +/* (register 0x7d unused) */ +#define SAA7127_REG_DISABLE_TTX_LINE_LO_0 0x7E +#define SAA7127_REG_DISABLE_TTX_LINE_LO_1 0x7F + + + +/* + ********************************************************************** + * + * Enumurations + * + ********************************************************************** + */ + + +/* Enumeration for the Video Standard */ + + +enum SAA7127_video_norm { + SAA7127_VIDEO_NORM_NTSC, + SAA7127_VIDEO_NORM_PAL + }; + + +/* Enumeration for the Supported input types */ + +enum SAA7127_input_type { + SAA7127_INPUT_TYPE_NORMAL, + SAA7127_INPUT_TYPE_TEST_IMAGE + }; + + +/* Enumeration for the Supported Output signal types */ + +enum SAA7127_output_type { + SAA7127_OUTPUT_TYPE_COMPOSITE, + SAA7127_OUTPUT_TYPE_SVIDEO, + SAA7127_OUTPUT_TYPE_RGB, + SAA7127_OUTPUT_TYPE_YUV_C, + SAA7127_OUTPUT_TYPE_YUV_V + }; + +/* Enumeration for the enable/disabeling the output signal */ + +enum SAA7127_enable_type { + SAA7127_DISABLE, + SAA7127_ENABLE + }; +/* Enumeration for the turning on/off the Wide screen signal for Wide screen TV */ + +enum SAA7127_wss_enable_type { + SAA7127_WSS_DISABLE, + SAA7127_WSS_ENABLE + }; + +/* Enumeration for the selecting the different Wide screen mode */ + +enum SAA7127_wss_mode_type { + SAA7127_WSS_MODE_4_3_FULL_FORMAT, /* full format 4:3 */ + SAA7127_WSS_MODE_BOX_14_9_C, /* box 14:9 c */ + SAA7127_WSS_MODE_BOX_14_9_TOP, /* box 14:9 top */ + SAA7127_WSS_MODE_BOX_16_9_C, /* box 16:9 c */ + SAA7127_WSS_MODE_BOX_16_9_TOP, /* box 16:9 top */ + SAA7127_WSS_MODE_SMALL_BOX_16_9_C, /* box > 16:9 c */ + SAA7127_WSS_MODE_4_3_14_9_FULL_FORMAT, /* full format 4:3 with 14:9 c letterbox content */ + SAA7127_WSS_MODE_16_9_ANAMORPHIC /* full format 16:9 (anamorphic) */ + }; + + +#endif // _SAA7127_H diff -purN -X /home/mbligh/.diff.exclude reference/drivers/media/video/tveeprom.c current/drivers/media/video/tveeprom.c --- reference/drivers/media/video/tveeprom.c 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/media/video/tveeprom.c 2004-04-09 21:41:40.000000000 -0700 @@ -0,0 +1,546 @@ +/* + * tveeprom - eeprom decoder for tvcard configuration eeproms + * + * Data and decoding routines shamelessly borrowed from bttv-cards.c + * eeprom access routine shamelessly borrowed from bttv-if.c + * which are: + + Copyright (C) 1996,97,98 Ralph Metzler (rjkm@thp.uni-koeln.de) + & Marcus Metzler (mocm@thp.uni-koeln.de) + (c) 1999-2001 Gerd Knorr + + * Adjustments to fit a more general model and all bugs: + + Copyright (C) 2003 John Klar + + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include +#include + +MODULE_DESCRIPTION("i2c eeprom decoder driver"); +MODULE_AUTHOR("John Klar"); +MODULE_LICENSE("GPL"); + +#include +#include + +#include + +#ifndef I2C_DRIVERID_TVEEPROM +#warning Using temporary hack for missing I2C driver-ID for tveeprom +#define I2C_DRIVERID_TVEEPROM I2C_DRIVERID_EXP2 +#endif + +static int debug = 1; +MODULE_PARM(debug, "i"); +MODULE_PARM_DESC(debug, "Debug level (0-2)"); + +static int verbose = 0; +MODULE_PARM(verbose, "i"); +MODULE_PARM_DESC(verbose, "Verbose level (0-1)"); + +#define STRM(array,i) (i < sizeof(array)/sizeof(char*) ? array[i] : "unknown") + +#define dprintk(num, format, args...) \ + do { \ + if (debug >= num) \ + printk(format, ##args); \ + } while (0) + +/* ----------------------------------------------------------------------- */ + +static unsigned char eeprom_buf[256]; + +struct tveeprom { + u32 has_radio; + + u32 tuner_type; + u32 tuner_formats; + + u32 digitizer; + u32 digitizer_formats; + + u32 audio_processor; + /* a_p_fmts? */ + + u32 model; + u32 revision; + u32 serial_number; + char rev_str[5]; +}; + +#define I2C_TVEEPROM 0xA0 +#define I2C_TVEEPROMA 0xA0 + +#define I2C_DELAY 10 + +#define REG_ADDR(x) (((x) << 1) + 1) +#define LOBYTE(x) ((unsigned char)((x) & 0xff)) +#define HIBYTE(x) ((unsigned char)(((x) >> 8) & 0xff)) +#define LOWORD(x) ((unsigned short int)((x) & 0xffff)) +#define HIWORD(x) ((unsigned short int)(((x) >> 16) & 0xffff)) + +/* ----------------------------------------------------------------------- */ +/* some hauppauge specific stuff */ + +static struct HAUPPAUGE_TUNER_FMT { + int id; + char *name; +} hauppauge_tuner_fmt[] __devinitdata = { + { + 0x00000000, "unknown1"}, { + 0x00000000, "unknown2"}, { + 0x00000007, "PAL(B/G)"}, { + 0x00001000, "NTSC(M)"}, { + 0x00000010, "PAL(I)"}, { + 0x00400000, "SECAM(L/L´)"}, { + 0x00000e00, "PAL(D/K)"}, { +0x03000000, "ATSC Digital"},}; + +static struct HAUPPAUGE_TUNER { + int id; + char *name; +} hauppauge_tuner[] __devinitdata = { + { + TUNER_ABSENT, ""}, { + TUNER_ABSENT, "External"}, { + TUNER_ABSENT, "Unspecified"}, { + TUNER_PHILIPS_PAL, "Philips FI1216"}, { + TUNER_PHILIPS_SECAM, "Philips FI1216MF"}, { + TUNER_PHILIPS_NTSC, "Philips FI1236"}, { + TUNER_PHILIPS_PAL_I, "Philips FI1246"}, { + TUNER_PHILIPS_PAL_DK, "Philips FI1256"}, { + TUNER_PHILIPS_PAL, "Philips FI1216 MK2"}, { + TUNER_PHILIPS_SECAM, "Philips FI1216MF MK2"}, { + TUNER_PHILIPS_NTSC, "Philips FI1236 MK2"}, { + TUNER_PHILIPS_PAL_I, "Philips FI1246 MK2"}, { + TUNER_PHILIPS_PAL_DK, "Philips FI1256 MK2"}, { + TUNER_TEMIC_NTSC, "Temic 4032FY5"}, { + TUNER_TEMIC_PAL, "Temic 4002FH5"}, { + TUNER_TEMIC_PAL_I, "Temic 4062FY5"}, { + TUNER_PHILIPS_PAL, "Philips FR1216 MK2"}, { + TUNER_PHILIPS_SECAM, "Philips FR1216MF MK2"}, { + TUNER_PHILIPS_NTSC, "Philips FR1236 MK2"}, { + TUNER_PHILIPS_PAL_I, "Philips FR1246 MK2"}, { + TUNER_PHILIPS_PAL_DK, "Philips FR1256 MK2"}, { + TUNER_PHILIPS_PAL, "Philips FM1216"}, { + TUNER_PHILIPS_SECAM, "Philips FM1216MF"}, { + TUNER_PHILIPS_NTSC, "Philips FM1236"}, { + TUNER_PHILIPS_PAL_I, "Philips FM1246"}, { + TUNER_PHILIPS_PAL_DK, "Philips FM1256"}, { + TUNER_TEMIC_4036FY5_NTSC, "Temic 4036FY5"}, { + TUNER_ABSENT, "Samsung TCPN9082D"}, { + TUNER_ABSENT, "Samsung TCPM9092P"}, { + TUNER_TEMIC_4006FH5_PAL, "Temic 4006FH5"}, { + TUNER_ABSENT, "Samsung TCPN9085D"}, { + TUNER_ABSENT, "Samsung TCPB9085P"}, { + TUNER_ABSENT, "Samsung TCPL9091P"}, { + TUNER_TEMIC_4039FR5_NTSC, "Temic 4039FR5"}, { + TUNER_PHILIPS_FQ1216ME, "Philips FQ1216 ME"}, { + TUNER_TEMIC_4066FY5_PAL_I, "Temic 4066FY5"}, { + TUNER_ABSENT, "Philips TD1536"}, { + TUNER_ABSENT, "Philips TD1536D"}, { + TUNER_PHILIPS_NTSC, "Philips FMR1236"}, /* mono radio */ + { + TUNER_ABSENT, "Philips FI1256MP"}, { + TUNER_ABSENT, "Samsung TCPQ9091P"}, { + TUNER_TEMIC_4006FN5_MULTI_PAL, "Temic 4006FN5"}, { + TUNER_TEMIC_4009FR5_PAL, "Temic 4009FR5"}, { + TUNER_TEMIC_4046FM5, "Temic 4046FM5"}, { + TUNER_TEMIC_4009FN5_MULTI_PAL_FM, "Temic 4009FN5"}, { + TUNER_ABSENT, "Philips TD1536D_FH_44"}, { + TUNER_LG_NTSC_FM, "LG TP18NSR01F"}, { + TUNER_LG_PAL_FM, "LG TP18PSB01D"}, { + TUNER_LG_PAL, "LG TP18PSB11D"}, { + TUNER_LG_PAL_I_FM, "LG TAPC-I001D"}, { + TUNER_LG_PAL_I, "LG TAPC-I701D"} +}; + +static char *sndtype[] = { + "None", "TEA6300", "TEA6320", "TDA9850", "MSP3400C", "MSP3410D", + "MSP3415", "MSP3430", "MSP3438", "CS5331", "MSP3435", "MSP3440", + "MSP3445", "MSP3411", "MSP3416", "MSP3425", + + "Type 0x10", "Type 0x11", "Type 0x12", "Type 0x13", + "Type 0x14", "Type 0x15", "Type 0x16", "Type 0x17", + "Type 0x18", "MSP4418", "Type 0x1a", "MSP4448", + "Type 0x1c", "Type 0x1d", "Type 0x1e", "Type 0x1f", +}; + +static void __devinit hauppauge_eeprom(struct tveeprom *tvee, + unsigned char *eeprom_data) +{ + + /* ---------------------------------------------- + ** The hauppauge eeprom format is tagged + ** + ** if packet[0] == 0x84, then packet[0..1] == length + ** else length = packet[0] & 3f; + ** if packet[0] & f8 == f8, then EOD and packet[1] == checksum + ** + ** In our (ivtv) case we're interested in the following: + ** tuner type: tag [00].05 or [0a].01 (index into hauppauge_tuners) + ** tuner fmts: tag [00].04 or [0a].00 (bitmask index into hauppauge_fmts) + ** radio: tag [00].{last} or [0e].00 (bitmask. bit2=FM) + ** audio proc: tag [02].01 or [05].00 (lower nibble indexes lut?) + + ** Fun info: + ** model: tag [00].07-08 or [06].00-01 + ** revision: tag [00].09-0b or [06].04-06 + ** serial#: tag [01].05-07 or [04].04-06 + + ** # of inputs/outputs ??? + */ + + int i, j, len, done, tag, tuner = 0, t_format = 0; + char *t_name = NULL, *t_fmt_name = NULL; + + tvee->revision = done = len = 0; + for (i = 0; !done && i < 256; i += len) { + + dprintk(2, + KERN_INFO + "tvee: processing pos=%02x (%02x,%02x)\n", + i, eeprom_data[i], eeprom_data[i + 1]); + + if (eeprom_data[i] == 0x84) { + len = eeprom_data[i + 1] + (eeprom_data[i + 2] << 8); + i += 3; + } else if ((eeprom_data[i] & 0xf0) == 0x70) { + if ((eeprom_data[i] & 0x08)) { + /* verify checksum! */ + done = 1; + break; + } + len = eeprom_data[i] & 0x07; + ++i; + } else { + printk(KERN_WARNING + "Encountered bad packet header [%02x]. " + "Corrupt or not a Hauppauge eeprom.\n", + eeprom_data[i]); + return; + } + + dprintk(1, KERN_INFO "%3d [%02x] ", len, eeprom_data[i]); + for (j = 1; j < len; j++) { + dprintk(1, "%02x ", eeprom_data[i + j]); + } + dprintk(1, "\n"); + + /* process by tag */ + tag = eeprom_data[i]; + switch (tag) { + case 0x00: + tuner = eeprom_data[i + 6]; + t_format = eeprom_data[i + 5]; + tvee->has_radio = eeprom_data[i + len - 1]; + tvee->model = + eeprom_data[i + 8] + (eeprom_data[i + 9] << 8); + tvee->revision = eeprom_data[i + 10] + + (eeprom_data[i + 11] << 8) + + (eeprom_data[i + 12] << 16); + break; + case 0x01: + tvee->serial_number = + eeprom_data[i + 6] + + (eeprom_data[i + 7] << 8) + + (eeprom_data[i + 8] << 16); + break; + case 0x02: + tvee->audio_processor = eeprom_data[i + 2] & 0x0f; + break; + case 0x04: + tvee->serial_number = + eeprom_data[i + 5] + + (eeprom_data[i + 6] << 8) + + (eeprom_data[i + 7] << 16); + break; + case 0x05: + tvee->audio_processor = eeprom_data[i + 1] & 0x0f; + break; + case 0x06: + tvee->model = + eeprom_data[i + 1] + (eeprom_data[i + 2] << 8); + tvee->revision = eeprom_data[i + 5] + + (eeprom_data[i + 6] << 8) + + (eeprom_data[i + 7] << 16); + break; + case 0x0a: + tuner = eeprom_data[i + 2]; + t_format = eeprom_data[i + 1]; + break; + case 0x0e: + tvee->has_radio = eeprom_data[i + 1]; + break; + default: + printk(KERN_WARNING + "Not sure what to do with tag [%02x]\n", tag); + /* dump the rest of the packet? */ + } + + } + + if (!done) { + printk(KERN_WARNING "Ran out of data!\n"); + return; + } + + if (tvee->revision != 0) { + tvee->rev_str[0] = 32 + ((tvee->revision >> 18) & 0x3f); + tvee->rev_str[1] = 32 + ((tvee->revision >> 12) & 0x3f); + tvee->rev_str[2] = 32 + ((tvee->revision >> 6) & 0x3f); + tvee->rev_str[3] = 32 + (tvee->revision & 0x3f); + tvee->rev_str[4] = 0; + } + + if (tuner < sizeof(hauppauge_tuner) / sizeof(struct HAUPPAUGE_TUNER)) { + tvee->tuner_type = hauppauge_tuner[tuner].id; + t_name = hauppauge_tuner[tuner].name; + } else { + t_name = ""; + } + + tvee->tuner_formats = 0; + t_fmt_name = ""; + for (i = 0; i < 8; i++) { + if ((t_format & (1 << i))) { + tvee->tuner_formats |= hauppauge_tuner_fmt[i].id; + /* yuck */ + t_fmt_name = hauppauge_tuner_fmt[i].name; + } + } + +#if 0 + if (t_format < + sizeof(hauppauge_tuner_fmt) / sizeof(struct HAUPPAUGE_TUNER_FMT)) { + tvee->tuner_formats = hauppauge_tuner_fmt[t_format].id; + t_fmt_name = hauppauge_tuner_fmt[t_format].name; + } else { + t_fmt_name = ""; + } +#endif + + printk(KERN_INFO "tvee: Hauppauge: model=%d, rev=%s, serial#=%d\n", + tvee->model, tvee->rev_str, tvee->serial_number); + printk(KERN_INFO "tvee: tuner=%s (idx=%d, type=%d)\n", + t_name, tuner, tvee->tuner_type); + printk(KERN_INFO "tvee: tuner fmt=%s (eeprom=0x%02x, v4l2=0x%08x)\n", + t_fmt_name, t_format, tvee->tuner_formats); + + printk(KERN_INFO "tvee: audio_processor=%s (type=%d)\n", + STRM(sndtype, tvee->audio_processor), tvee->audio_processor); + +} + +/* ----------------------------------------------------------------------- */ + +/* write I2C */ +int tvee_I2CWrite(struct i2c_client *client, + unsigned char b1, unsigned char b2, int both) +{ + unsigned char buffer[2]; + int bytes = both ? 2 : 1; + + buffer[0] = b1; + buffer[1] = b2; + if (bytes != i2c_master_send(client, buffer, bytes)) + return -1; + return 0; +} + +void __devinit tvee_readee(struct i2c_client *client, unsigned char *eedata) +{ + int i; + + if (tvee_I2CWrite(client, 0, -1, 0) < 0) { + printk(KERN_WARNING "tvee: readee error\n"); + return; + } + + for (i = 0; i < 256; i += 16) { + if (16 != i2c_master_recv(client, eedata + i, 16)) { + printk(KERN_WARNING "tvee: readee error\n"); + break; + } + } +} + +/* ----------------------------------------------------------------------- */ + +static int +tveeprom_command(struct i2c_client *client, unsigned int cmd, void *arg) +{ + + struct tveeprom *eeprom = i2c_get_clientdata(client); + u32 *eeprom_props = arg; + + switch (cmd) { + + case 0: + eeprom_props[0] = eeprom->tuner_type; + eeprom_props[1] = eeprom->tuner_formats; + eeprom_props[2] = eeprom->model; + eeprom_props[3] = eeprom->revision; + break; + + default: + return -EINVAL; + } + + return 0; +} + +/* ----------------------------------------------------------------------- */ + +/* + * Generic i2c probe + * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1' + */ +static unsigned short normal_i2c[] = + { I2C_TVEEPROM >> 1, I2C_TVEEPROMA >> 1, I2C_CLIENT_END }; +static unsigned short normal_i2c_range[] = { I2C_CLIENT_END }; + +I2C_CLIENT_INSMOD; + +static int tveeprom_i2c_id = 0; +struct i2c_driver i2c_driver_tveeprom; + +static int +tveeprom_detect_client(struct i2c_adapter *adapter, int address, int kind) +{ + struct i2c_client *client; + struct tveeprom *eeprom; + + dprintk(1, + KERN_INFO + "tveeprom.c: detecting tveeprom client on address 0x%x\n", + address << 1); + + /* Check if the adapter supports the needed features */ + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) + return 0; + + client = kmalloc(sizeof(struct i2c_client), GFP_KERNEL); + if (client == 0) + return -ENOMEM; + memset(client, 0, sizeof(struct i2c_client)); + client->addr = address; + client->adapter = adapter; + client->driver = &i2c_driver_tveeprom; + client->flags = I2C_CLIENT_ALLOW_USE; + client->id = tveeprom_i2c_id++; + snprintf(client->name, sizeof(client->name) - 1, "tveeprom[%d]", + client->id); + + eeprom = kmalloc(sizeof(struct tveeprom), GFP_KERNEL); + if (eeprom == NULL) { + kfree(client); + return -ENOMEM; + } + i2c_set_clientdata(client, eeprom); + memset(eeprom, 0, sizeof(struct tveeprom)); + eeprom->tuner_type = -1; + eeprom->has_radio = 0; + eeprom->model = 0; + + tvee_readee(client, eeprom_buf); + hauppauge_eeprom(eeprom, eeprom_buf); + + return 0; +} + +static int tveeprom_attach_adapter(struct i2c_adapter *adapter) +{ + if (adapter->id != (I2C_ALGO_BIT | I2C_HW_B_BT848)) + return 0; + dprintk(1, + KERN_INFO + "tveeprom.c: starting probe for adapter %s (0x%x)\n", + adapter->name, adapter->id); + return i2c_probe(adapter, &addr_data, tveeprom_detect_client); +} + +static int tveeprom_detach_client(struct i2c_client *client) +{ + struct tveeprom *eeprom = i2c_get_clientdata(client); + int err; + + err = i2c_detach_client(client); + if (err) { + return err; + } + + kfree(eeprom); + kfree(client); + return 0; +} + +/* ----------------------------------------------------------------------- */ + +/* i2c implementation */ +struct i2c_driver i2c_driver_tveeprom = { + .name = "tveeprom", + + .id = I2C_DRIVERID_TVEEPROM, + .flags = I2C_DF_NOTIFY, + + .attach_adapter = tveeprom_attach_adapter, + .detach_client = tveeprom_detach_client, + .command = tveeprom_command, + .owner = THIS_MODULE, +}; + +static int __init tveeprom_init(void) +{ + return i2c_add_driver(&i2c_driver_tveeprom); +} + +static void __exit tveeprom_exit(void) +{ + i2c_del_driver(&i2c_driver_tveeprom); +} + +module_init(tveeprom_init); +module_exit(tveeprom_exit); diff -purN -X /home/mbligh/.diff.exclude reference/drivers/net/Makefile current/drivers/net/Makefile --- reference/drivers/net/Makefile 2004-04-07 14:54:15.000000000 -0700 +++ current/drivers/net/Makefile 2004-04-08 15:10:21.000000000 -0700 @@ -188,4 +188,6 @@ obj-$(CONFIG_NET_TULIP) += tulip/ obj-$(CONFIG_HAMRADIO) += hamradio/ obj-$(CONFIG_IRDA) += irda/ +# Must come after all NICs that might use them obj-$(CONFIG_NETCONSOLE) += netconsole.o +obj-$(CONFIG_KGDB) += kgdb_eth.o diff -purN -X /home/mbligh/.diff.exclude reference/drivers/net/kgdb_eth.c current/drivers/net/kgdb_eth.c --- reference/drivers/net/kgdb_eth.c 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/net/kgdb_eth.c 2004-04-08 15:10:21.000000000 -0700 @@ -0,0 +1,131 @@ +/* + * Network interface GDB stub + * + * Written by San Mehat (nettwerk@biodome.org) + * Based upon 'gdbserial' by David Grothe (dave@gcom.com) + * and Scott Foehner (sfoehner@engr.sgi.com) + * + * Twiddled for 2.6 by Robert Walsh + * and wangdi . + * + * Refactored for netpoll API by Matt Mackall + * + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#define IN_BUF_SIZE 512 /* power of 2, please */ +#define OUT_BUF_SIZE 256 + +static char in_buf[IN_BUF_SIZE], out_buf[OUT_BUF_SIZE]; +static int in_head, in_tail, out_count; +static atomic_t in_count; +int kgdboe = 0; /* Default to tty mode */ + +extern void set_debug_traps(void); +extern void breakpoint(void); +static void rx_hook(struct netpoll *np, int port, char *msg, int len); + +static struct netpoll np = { + .name = "kgdboe", + .dev_name = "eth0", + .rx_hook = rx_hook, + .local_port = 6443, + .remote_port = 6442, + .remote_mac = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, +}; + +int eth_getDebugChar(void) +{ + int chr; + + while (atomic_read(&in_count) == 0) + netpoll_poll(&np); + + chr = in_buf[in_tail++]; + in_tail &= (IN_BUF_SIZE - 1); + atomic_dec(&in_count); + return chr; +} + +void eth_flushDebugChar(void) +{ + if(out_count && np.dev) { + netpoll_send_udp(&np, out_buf, out_count); + out_count = 0; + } +} + +void eth_putDebugChar(int chr) +{ + out_buf[out_count++] = chr; + if(out_count == OUT_BUF_SIZE) + eth_flushDebugChar(); +} + +static void rx_hook(struct netpoll *np, int port, char *msg, int len) +{ + int i; + + np->remote_port = port; + + /* Is this gdb trying to attach? */ + if (!netpoll_trap() && len == 8 && !strncmp(msg, "$Hc-1#09", 8)) + kgdb_schedule_breakpoint(); + + for (i = 0; i < len; i++) { + if (msg[i] == 3) + kgdb_schedule_breakpoint(); + + if (atomic_read(&in_count) >= IN_BUF_SIZE) { + /* buffer overflow, clear it */ + in_head = in_tail = 0; + atomic_set(&in_count, 0); + break; + } + in_buf[in_head++] = msg[i]; + in_head &= (IN_BUF_SIZE - 1); + atomic_inc(&in_count); + } +} + +static int option_setup(char *opt) +{ + return netpoll_parse_options(&np, opt); +} + +__setup("kgdboe=", option_setup); + +static int init_kgdboe(void) +{ +#ifdef CONFIG_SMP + if (num_online_cpus() > CONFIG_NO_KGDB_CPUS) { + printk("kgdb: too manu cpus. Cannot enable debugger with more than %d cpus\n", CONFIG_NO_KGDB_CPUS); + return -1; + } +#endif + + set_debug_traps(); + + if(!np.remote_ip || netpoll_setup(&np)) + return 1; + + kgdboe = 1; + printk(KERN_INFO "kgdb: debugging over ethernet enabled\n"); + + return 0; +} + +module_init(init_kgdboe); diff -purN -X /home/mbligh/.diff.exclude reference/drivers/scsi/Kconfig current/drivers/scsi/Kconfig --- reference/drivers/scsi/Kconfig 2004-04-07 14:54:22.000000000 -0700 +++ current/drivers/scsi/Kconfig 2004-04-09 11:53:02.000000000 -0700 @@ -1451,6 +1451,13 @@ config SCSI_MAC53C94 source "drivers/scsi/arm/Kconfig" +config SCSI_LPFC + tristate "Emulex LP support" + depends on PCI && SCSI + ---help--- + This driver supports the Emulex LP hardware (fibre channel + adapter cards). + config JAZZ_ESP bool "MIPS JAZZ FAS216 SCSI support" depends on MIPS_JAZZ && SCSI diff -purN -X /home/mbligh/.diff.exclude reference/drivers/scsi/Makefile current/drivers/scsi/Makefile --- reference/drivers/scsi/Makefile 2004-04-07 14:54:22.000000000 -0700 +++ current/drivers/scsi/Makefile 2004-04-09 11:53:02.000000000 -0700 @@ -125,6 +125,7 @@ obj-$(CONFIG_SCSI_SATA_VIA) += libata.o obj-$(CONFIG_SCSI_SATA_VITESSE) += libata.o sata_vsc.o obj-$(CONFIG_ARM) += arm/ +obj-$(CONFIG_SCSI_LPFC) += lpfc/ obj-$(CONFIG_CHR_DEV_ST) += st.o obj-$(CONFIG_CHR_DEV_OSST) += osst.o diff -purN -X /home/mbligh/.diff.exclude reference/drivers/scsi/lpfc/COPYING current/drivers/scsi/lpfc/COPYING --- reference/drivers/scsi/lpfc/COPYING 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/scsi/lpfc/COPYING 2004-04-09 11:53:02.000000000 -0700 @@ -0,0 +1,342 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. + + diff -purN -X /home/mbligh/.diff.exclude reference/drivers/scsi/lpfc/Makefile current/drivers/scsi/lpfc/Makefile --- reference/drivers/scsi/lpfc/Makefile 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/scsi/lpfc/Makefile 2004-04-09 11:53:02.000000000 -0700 @@ -0,0 +1,6 @@ +obj-$(CONFIG_SCSI_LPFC) += lpfcdd.o + +EXTRA_CFLAGS += -DLP6000 -D_LINUX -Idrivers/scsi + +lpfcdd-objs := fcscsib.o fcmboxb.o fcmemb.o fcelsb.o fcstratb.o \ + fcxmitb.o fcrpib.o fcclockb.o fcLINUXfcp.o lpfc.conf.o diff -purN -X /home/mbligh/.diff.exclude reference/drivers/scsi/lpfc/README current/drivers/scsi/lpfc/README --- reference/drivers/scsi/lpfc/README 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/scsi/lpfc/README 2004-04-09 11:53:02.000000000 -0700 @@ -0,0 +1,43 @@ +************************************************************************ +Emulex Corporation + +README for the Driver kit 1.23a for Emulex Fibre Channel Host Adapters + +September 12, 2003 +************************************************************************ + +This Application kit has been designed for the following environment: + +- Supported Hardware architecture platforms: + - 32-bit Intel platforms (IA-32) + - 64-bit Intel platforms (IA-64) + - Power PC 64 bits (PPC) + +- Supported Linux OS (note that testing has been conducted only with the kernels in parenthesis): + - Red Hat Pro 7.3 (kernel 2.4.18-27) + - Red Hat Pro 8.0 (kernel 2.4.18-27) + - Red Hat Advanced Server 2.1 x86 (kernel 2.4.9-e.16) + - SLES 7 x86 (kernel 2.4.16) + - SLES 8 x86 (kernel 2.4.19) + - Red Hat Advanced Server 2.1 IA-64 (kernel 2.4.18-e.25) + - SuSE 8.0 ppc64 (kernel 2.4.19-u11-ppc64) + +- Supported Emulex enterprise adapters: + - LP8000 + - LP9000 + - LP9002L + - LP9002DC + - LP9402DC + - LP9802 + - LP10000 + - LP9802 + - LP10000 + +- This driver supports any mix of the above Emulex adapters within a single host system. + +Main driver features: +1. Full fabric support, discovery, FCP and fibre channel device/error and exception handling +2. Concurrent multi-protocol (FCP and IP) support +3. Supports INT13 (EDD 2.1/3.0) fabric boot. +4. This driver is entirely self-contained and intended for configuration using lpfc. No external utility is required or supported. +5. This driver will not be dependent on any non-open source program for its execution. It will not taint an open source kernel. diff -purN -X /home/mbligh/.diff.exclude reference/drivers/scsi/lpfc/dfc.h current/drivers/scsi/lpfc/dfc.h --- reference/drivers/scsi/lpfc/dfc.h 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/scsi/lpfc/dfc.h 2004-04-09 11:53:02.000000000 -0700 @@ -0,0 +1,199 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Enterprise Fibre Channel Host Bus Adapters. * + * Refer to the README file included with this package for * + * driver version and adapter support. * + * Copyright (C) 2003 Emulex Corporation. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of the GNU General Public License * + * as published by the Free Software Foundation; either version 2 * + * of the License, or (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details, a copy of which * + * can be found in the file COPYING included with this package. * + *******************************************************************/ +#if !defined(_KERNEL) && !defined(__KERNEL__) +#endif + + +#define _DFC_64BIT 1 + +#ifdef BITS_PER_LONG +#if BITS_PER_LONG < 64 +#undef _DFC_64BIT +#endif +#endif + +#ifdef i386 +#undef _DFC_64BIT +#endif + +#ifdef powerpc +#ifndef CONFIG_PPC64 +#undef _DFC_64BIT +#endif +#endif + + +struct dfccmdinfo { +#ifdef _DFC_64BIT + char *c_datain; + char *c_dataout; + unsigned short c_cmd; + unsigned short c_insz; + uint32 c_outsz; +#else + void *c_filler1; + char *c_datain; + void *c_filler2; + char *c_dataout; + unsigned short c_cmd; + unsigned short c_insz; + uint32 c_outsz; +#endif +}; + +struct cmd_input { +#ifdef _DFC_64BIT + short c_brd; + short c_ring; + short c_iocb; + short c_flag; + void *c_arg1; + void *c_arg2; + void *c_arg3; + char c_string[16]; +#else + short c_brd; + short c_ring; + short c_iocb; + short c_flag; + void *c_filler1; + void *c_arg1; + void *c_filler2; + void *c_arg2; + void *c_filler3; + void *c_arg3; + char c_string[16]; +#endif +}; + + + +struct cmdinfo { + int c_cmd; + char *c_string; + int (*c_routine)(struct cmdinfo *cp, void *p); + char *c_datain; + char *c_dataout; + unsigned short c_insz; + unsigned short c_outsz; +}; + +#define C_INVAL 0x0 +#define C_DISPLAY_PCI_ALL 0x1 +#define C_WRITE_PCI 0x2 +#define C_WRITE_HC 0x3 +#define C_WRITE_HS 0x4 +#define C_WRITE_HA 0x5 +#define C_WRITE_CA 0x6 +#define C_READ_PCI 0x7 +#define C_READ_HC 0x8 +#define C_READ_HS 0x9 +#define C_READ_HA 0xa +#define C_READ_CA 0xb +#define C_READ_MB 0xc +#define C_EXIT 0xd +#define C_SET 0xe +#define C_READ_RING 0xf +#define C_READ_MEM 0x10 +#define C_READ_IOCB 0x11 +#define C_READ_RPILIST 0x12 +#define C_READ_BPLIST 0x13 +#define C_READ_MEMSEG 0x14 +#define C_MBOX 0x15 +#define C_RESET 0x16 +#define C_READ_BINFO 0x17 +#define C_NDD_STAT 0x18 +#define C_FC_STAT 0x19 +#define C_WRITE_MEM 0x1a +#define C_WRITE_CTLREG 0x1b +#define C_READ_CTLREG 0x1c +#define C_INITBRDS 0x1d +#define C_SETDIAG 0x1e +#define C_DBG 0x1f +#define C_GET_PHYSADDR 0x20 +#define C_PUT_PHYSADDR 0x21 +#define C_NODE 0x22 +#define C_DEVP 0x23 +#define C_INST 0x24 +#define C_LIP 0x25 +#define C_LINKINFO 0x26 +#define C_IOINFO 0x27 +#define C_NODEINFO 0x28 +#define C_GETCFG 0x29 +#define C_SETCFG 0x2a +#define C_FAILIO 0x2b +#define C_OUTFCPIO 0x2c +#define C_RSTQDEPTH 0x2d +#define C_CT 0x2e +#define C_HBA_ADAPTERATRIBUTES 0x33 +#define C_HBA_PORTATRIBUTES 0x34 +#define C_HBA_PORTSTATISTICS 0x35 +#define C_HBA_DISCPORTATRIBUTES 0x36 +#define C_HBA_WWPNPORTATRIBUTES 0x37 +#define C_HBA_INDEXPORTATRIBUTES 0x38 +#define C_HBA_FCPTARGETMAPPING 0x39 +#define C_HBA_FCPBINDING 0x3a +#define C_HBA_SETMGMTINFO 0x3b +#define C_HBA_GETMGMTINFO 0x3c +#define C_HBA_RNID 0x3d +#define C_HBA_GETEVENT 0x3e +#define C_HBA_RESETSTAT 0x3f +#define C_HBA_SEND_SCSI 0x40 +#define C_HBA_REFRESHINFO 0x41 +#define C_SEND_ELS 0x42 +#define C_LISTN 0x45 +#define C_TRACE 0x46 +#define C_HELP 0x47 +#define C_HBA_SEND_FCP 0x48 +#define C_SET_EVENT 0x49 +#define C_GET_EVENT 0x4a +#define C_SEND_MGMT_CMD 0x4b +#define C_SEND_MGMT_RSP 0x4c +#define C_LISTEVT 0x59 +#define C_MAX_CMDS 0x5a + +#define DFC_MBX_MAX_CMDS 29 + +/* Structure for OUTFCPIO command */ +struct out_fcp_io { + ushort tx_count; + ushort txp_count; + ushort timeout_count; + ushort devp_count; + void * tx_head; + void * tx_tail; + void * txp_head; + void * txp_tail; + void * timeout_head; +}; + +struct out_fcp_devp { + ushort target; + ushort lun; + uint32 standby_count; + uint32 pend_count; + uint32 clear_count; + void *standby_queue_head; + void *standby_queue_tail; + void *pend_head; + void *pend_tail; + void *clear_head; +}; + diff -purN -X /home/mbligh/.diff.exclude reference/drivers/scsi/lpfc/dfcdd.c current/drivers/scsi/lpfc/dfcdd.c --- reference/drivers/scsi/lpfc/dfcdd.c 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/scsi/lpfc/dfcdd.c 2004-04-09 11:53:02.000000000 -0700 @@ -0,0 +1,4595 @@ + +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Enterprise Fibre Channel Host Bus Adapters. * + * Refer to the README file included with this package for * + * driver version and adapter support. * + * Copyright (C) 2003 Emulex Corporation. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of the GNU General Public License * + * as published by the Free Software Foundation; either version 2 * + * of the License, or (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details, a copy of which * + * can be found in the file COPYING included with this package. * + *******************************************************************/ + +#include "dfc.h" + +/*************************************************************************/ +/* Global data structures */ +/*************************************************************************/ + + int rc = 0; + int do_cp = 0; + +#ifdef DFC_SUBSYSTEM + +struct dfc { + uint32 dfc_init; + uint32 dfc_pad; + uchar dfc_buffer[4096]; + struct dfc_info dfc_info[MAX_FC_BRDS]; +}; + +_static_ struct dfc dfc; + +struct dfc_mem { + uint32 fc_outsz; + uint32 fc_filler; + void * fc_dataout; +}; + +extern uint32 fc_dbg_flag; +uint32 fc_out_event = 4; + +/* Routine Declaration - Local */ +_local_ fc_dev_ctl_t * dfc_getpdev(struct cmd_input *ci); +_local_ int dfc_msdelay(fc_dev_ctl_t *p, ulong ms); +_local_ int dfc_issue_mbox( fc_dev_ctl_t *p, MAILBOX * mb, ulong *ipri); +_local_ DMATCHMAP * dfc_cmd_data_alloc(fc_dev_ctl_t *p, uchar *inp, ULP_BDE64 *bpl, uint32 size); +_local_ int dfc_cmd_data_free(fc_dev_ctl_t *p, DMATCHMAP *mlist); +_local_ int dfc_rsp_data_copy(fc_dev_ctl_t *p, uchar * op, DMATCHMAP *mlist, uint32 size); +_local_ DMATCHMAP * dfc_fcp_data_alloc(fc_dev_ctl_t *p, ULP_BDE64 *bpl); +_local_ int dfc_fcp_data_free(fc_dev_ctl_t *p, DMATCHMAP *fcpmp); +_forward_ int dfc_data_alloc(fc_dev_ctl_t * p_dev_ctl, struct dfc_mem *dm, uint32 size); +_forward_ int dfc_hba_rnid(fc_dev_ctl_t * p_dev_ctl, struct dfc_mem *dm, struct cmd_input *cip, struct dfccmdinfo *infop, MBUF_INFO *buf_info, ulong ipri); +_forward_ int dfc_hba_sendmgmt_ct(fc_dev_ctl_t * p_dev_ctl, struct dfc_mem *dm, struct cmd_input *cip, struct dfccmdinfo *infop, ulong ipri); +_forward_ int dfc_hba_fcpbind(fc_dev_ctl_t * p_dev_ctl, struct dfc_mem *dm, struct cmd_input *cip, struct dfccmdinfo *infop, ulong ipri); +_forward_ int dfc_hba_targetmapping(fc_dev_ctl_t * p_dev_ctl, struct dfc_mem *dm, struct cmd_input *cip, struct dfccmdinfo *infop, ulong ipri); +_forward_ int dfc_hba_sendscsi_fcp(fc_dev_ctl_t * p_dev_ctl, struct dfc_mem *dm, struct cmd_input *cip, struct dfccmdinfo *infop, ulong ipri); +_forward_ int dfc_hba_set_event(fc_dev_ctl_t * p_dev_ctl, struct dfc_mem *dm, struct cmd_input *cip, struct dfccmdinfo *infop, ulong ipri); +_forward_ int dfc_data_free(fc_dev_ctl_t * p_dev_ctl, struct dfc_mem *dm); +_forward_ uint32 dfc_getLunId(node_t *nodep, uint32 lunIndex); +/* End Routine Declaration - Local */ + +/*****************************************************************************/ +/* + * NAME: dfc_ioctl + * + * FUNCTION: diagnostic ioctl interface + * + * EXECUTION ENVIRONMENT: process only + * + * NOTES: + * + * CALLED FROM: + * dfc_config + * + * RETURNS: + * 0 - successful + * EINVAL - invalid parameter was passed + * + */ +/*****************************************************************************/ +_static_ int +dfc_ioctl( +struct dfccmdinfo *infop, +struct cmd_input *cip) +{ + uint32 outshift; + int i, j; /* loop index */ + ulong ipri; + int max; + FC_BRD_INFO * binfo; + uint32 * lptr; + MBUF_INFO * buf_info; + MBUF_INFO * dmdata_info; + MBUF_INFO * mbox_info; + uchar * bp; + uint32 incr; + uint32 size; + uint32 buf1sz; + int total_mem; + uint32 offset; + uint32 cnt; + NODELIST * nlp; + node_t * nodep; + dvi_t * dev_ptr; + void * ioa; + fc_dev_ctl_t * p_dev_ctl; + iCfgParam * clp; + RING * rp; + MAILBOX * mb; + MATCHMAP * mm; + node_t * node_ptr; + fcipbuf_t * fbp; + struct out_fcp_io * fp; + struct out_fcp_devp * dp; + struct dfc_info * di; + struct dfc_mem * dm; + HBA_PORTATTRIBUTES * hp; + fc_vpd_t * vp; + MAILBOX * mbox; + MBUF_INFO bufinfo; + + if ((p_dev_ctl = dfc_getpdev(cip)) == 0) { + return(EINVAL); + } + + binfo = &BINFO; + cnt = binfo->fc_brd_no; + clp = DD_CTL.p_config[cnt]; + di = &dfc.dfc_info[cip->c_brd]; + buf_info = &bufinfo; + + dmdata_info = &bufinfo; + dmdata_info->virt = 0; + dmdata_info->phys = 0; + dmdata_info->flags = (FC_MBUF_IOCTL | FC_MBUF_UNLOCK); + dmdata_info->align = sizeof(void *); + dmdata_info->size = sizeof(* dm); + dmdata_info->dma_handle = 0; + fc_malloc(p_dev_ctl, dmdata_info); + if (buf_info->virt == NULL) { + return (ENOMEM); + } + dm = (struct dfc_mem *)dmdata_info->virt; + fc_bzero((void *)dm, sizeof(struct dfc_mem)); + + mbox_info = &bufinfo; + mbox_info->virt = 0; + mbox_info->phys = 0; + mbox_info->flags = (FC_MBUF_IOCTL | FC_MBUF_UNLOCK); + mbox_info->align = sizeof(void *); + mbox_info->size = sizeof(* mbox); + mbox_info->dma_handle = 0; + fc_malloc(p_dev_ctl, mbox_info); + if (mbox_info->virt == NULL) { + return (ENOMEM); + } + mbox = (MAILBOX *)mbox_info->virt; + + + /* dfc_ioctl entry */ + + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0400, /* ptr to msg structure */ + fc_mes0400, /* ptr to msg */ + fc_msgBlk0400.msgPreambleStr, /* begin varargs */ + infop->c_cmd, + (uint32)((ulong)cip->c_arg1), + (uint32)((ulong)cip->c_arg2), + infop->c_outsz); /* end varargs */ + + outshift = 0; + if(infop->c_outsz) { + if(infop->c_outsz <= (64 * 1024)) + total_mem = infop->c_outsz; + else + total_mem = 64 * 1024; + if(dfc_data_alloc(p_dev_ctl, dm, total_mem)) { + return(ENOMEM); + } + } + else { + /* Allocate memory for ioctl data */ + if(dfc_data_alloc(p_dev_ctl, dm, 4096)) { + return(ENOMEM); + } + total_mem = 4096; + } + + /* Make sure driver instance is attached */ + if(p_dev_ctl != DD_CTL.p_dev[cnt]) { + return(ENODEV); + } + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + di->fc_refcnt++; + + + switch (infop->c_cmd) { + /* Diagnostic Interface Library Support */ + + case C_WRITE_PCI: + offset = (uint32)((ulong)cip->c_arg1); + if (!(binfo->fc_flag & FC_OFFLINE_MODE)) { + rc = EPERM; + break; + } + if (offset > 255) { + rc = ERANGE; + break; + } + cnt = (uint32)((ulong)cip->c_arg2); + if ((cnt + offset) > 256) { + rc = ERANGE; + break; + } + + dfc_unlock_enable(ipri, &CMD_LOCK); + if (fc_copyin((uchar *)infop->c_dataout, (uchar *)dfc.dfc_buffer, (ulong)cnt)) { + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + rc = EIO; + break; + } + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + rc = fc_writepci(di, offset, (char *)dfc.dfc_buffer, (cnt >> 2)); + break; + + case C_READ_PCI: + offset = (uint32)((ulong)cip->c_arg1); + if (offset > 255) { + rc = ERANGE; + break; + } + cnt = (uint32)((ulong)cip->c_arg2); + if ((cnt + offset) > 256) { + rc = ERANGE; + break; + } + rc = fc_readpci(di, offset, (char *)dm->fc_dataout, (cnt >> 2)); + break; + + case C_WRITE_MEM: + offset = (uint32)((ulong)cip->c_arg1); + if (!(binfo->fc_flag & FC_OFFLINE_MODE)) { + if (offset != 256) { + rc = EPERM; + break; + } + if (cnt > 128) { + rc = EPERM; + break; + } + } + if (offset >= 4096) { + rc = ERANGE; + break; + } + cnt = (uint32)((ulong)cip->c_arg2); + if ((cnt + offset) > 4096) { + rc = ERANGE; + break; + } + + dfc_unlock_enable(ipri, &CMD_LOCK); + if (fc_copyin((uchar *)infop->c_dataout, (uchar *)dfc.dfc_buffer, (ulong)cnt)) { + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + rc = EIO; + break; + } + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + + binfo = &BINFO; + if ((binfo->fc_flag & FC_SLI2) && (!(binfo->fc_flag & FC_OFFLINE_MODE))) { + fc_pcimem_bcopy((uint32 * )dfc.dfc_buffer, + (uint32 * )(((char *)(binfo->fc_slim2.virt)) + offset), cnt); + } else { + ioa = (void *)FC_MAP_MEM(&di->fc_iomap_mem); /* map in slim */ + WRITE_SLIM_COPY(binfo, (uint32 * )dfc.dfc_buffer, + (volatile uint32 * )((volatile char *)ioa + offset), + (cnt / sizeof(uint32))); + FC_UNMAP_MEMIO(ioa); + } + + break; + + case C_READ_MEM: + offset = (uint32)((ulong)cip->c_arg1); + if (offset >= 4096) { + rc = ERANGE; + break; + } + cnt = (uint32)((ulong)cip->c_arg2); + if ((cnt + offset) > 4096) { + rc = ERANGE; + break; + } + + binfo = &BINFO; + if ((binfo->fc_flag & FC_SLI2) && (!(binfo->fc_flag & FC_OFFLINE_MODE))) { + fc_pcimem_bcopy((uint32 * )(((char *)(binfo->fc_slim2.virt)) + offset), + (uint32 * )dm->fc_dataout, cnt); + } else { + ioa = (void *)FC_MAP_MEM(&di->fc_iomap_mem); /* map in slim */ + READ_SLIM_COPY(binfo, (uint32 * )dm->fc_dataout, + (volatile uint32 * )((volatile char *)ioa + offset), + (cnt / sizeof(uint32))); + FC_UNMAP_MEMIO(ioa); + } + break; + + case C_WRITE_CTLREG: + offset = (uint32)((ulong)cip->c_arg1); + if (!(binfo->fc_flag & FC_OFFLINE_MODE)) { + rc = EPERM; + break; + } + if (offset > 255) { + rc = ERANGE; + break; + } + incr = (uint32)((ulong)cip->c_arg2); + ioa = (void *)FC_MAP_IO(&di->fc_iomap_io); /* map in io registers */ + WRITE_CSR_REG(binfo, + ((volatile uint32 * )((volatile char *)ioa + (offset))), incr); + FC_UNMAP_MEMIO(ioa); + + break; + + case C_READ_CTLREG: + offset = (uint32)((ulong)cip->c_arg1); + if (offset > 255) { + rc = ERANGE; + break; + } + ioa = (void *)FC_MAP_IO(&di->fc_iomap_io); /* map in io registers */ + incr = READ_CSR_REG(binfo, + ((volatile uint32 * )((volatile char *)ioa + (offset)))); + FC_UNMAP_MEMIO(ioa); + *((uint32 * )dm->fc_dataout) = incr; + break; + + case C_INITBRDS: + dfc_unlock_enable(ipri, &CMD_LOCK); + if (fc_copyin((uchar*)infop->c_dataout, (uchar*)&di->fc_ba, sizeof(brdinfo))) { + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + rc = EIO; + break; + } + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + + if (fc_initpci(di, p_dev_ctl)) { + rc = EIO; + break; + } + if (binfo->fc_flag & FC_OFFLINE_MODE) + di->fc_ba.a_offmask |= OFFDI_OFFLINE; + + fc_bcopy((uchar * ) & di->fc_ba, dm->fc_dataout, sizeof(brdinfo)); + infop->c_outsz = sizeof(brdinfo); + break; + + case C_SETDIAG: + dfc_unlock_enable(ipri, &CMD_LOCK); + offset = (uint32)((ulong)cip->c_arg1); + switch (offset) { + case DDI_ONDI: + if (fc_diag_state == DDI_OFFDI) { + fc_online(0); + } + *((uint32 * )(dm->fc_dataout)) = fc_diag_state; + break; + case DDI_OFFDI: + if (fc_diag_state == DDI_ONDI) { + fc_offline(0); + } + *((uint32 * )(dm->fc_dataout)) = fc_diag_state; + break; + case DDI_SHOW: + *((uint32 * )(dm->fc_dataout)) = fc_diag_state; + break; + case DDI_BRD_ONDI: + if (binfo->fc_flag & FC_OFFLINE_MODE) { + fc_online(p_dev_ctl); + } + *((uint32 * )(dm->fc_dataout)) = DDI_ONDI; + break; + case DDI_BRD_OFFDI: + if (!(binfo->fc_flag & FC_OFFLINE_MODE)) { + fc_offline(p_dev_ctl); + } + *((uint32 * )(dm->fc_dataout)) = DDI_OFFDI; + break; + case DDI_BRD_SHOW: + if (binfo->fc_flag & FC_OFFLINE_MODE) { + *((uint32 * )(dm->fc_dataout)) = DDI_OFFDI; + } + else { + *((uint32 * )(dm->fc_dataout)) = DDI_ONDI; + } + break; + default: + rc = ERANGE; + break; + } + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + break; + + case C_LIP: + binfo = &BINFO; + mb = (MAILBOX * )mbox; + fc_bzero((void *)mb, sizeof(MAILBOX)); + + if ((binfo->fc_ffstate == FC_READY) && (binfo->fc_process_LA)) { + /* Turn off link attentions */ + binfo->fc_process_LA = 0; + ioa = (void *)FC_MAP_IO(&binfo->fc_iomap_io); /* map in io registers */ + offset = READ_CSR_REG(binfo, FC_HC_REG(binfo, ioa)); + offset &= ~HC_LAINT_ENA; + WRITE_CSR_REG(binfo, FC_HC_REG(binfo, ioa), offset); + FC_UNMAP_MEMIO(ioa); + + switch (clp[CFG_TOPOLOGY].a_current) { + case FLAGS_TOPOLOGY_MODE_LOOP_PT: + mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP; + mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER; + break; + case FLAGS_TOPOLOGY_MODE_PT_PT: + mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT; + break; + case FLAGS_TOPOLOGY_MODE_LOOP: + mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP; + break; + case FLAGS_TOPOLOGY_MODE_PT_LOOP: + mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT; + mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER; + break; + } + + vp = &VPD; + if (binfo->fc_flag & FC_2G_CAPABLE) { + if ((vp->rev.feaLevelHigh >= 0x02) && + (clp[CFG_LINK_SPEED].a_current > 0)) { + mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; + mb->un.varInitLnk.link_speed = clp[CFG_LINK_SPEED].a_current; + } + } + mb->mbxCommand = MBX_INIT_LINK; + mb->mbxOwner = OWN_HOST; + goto mbxbegin; + } + mb->mbxStatus = MBXERR_ERROR; + fc_bcopy((char *) & mb->mbxStatus, dm->fc_dataout, sizeof(ushort)); + break; + + case C_FAILIO: + { + uint32 tgt; + uint32 lun; + uint32 dev_index; + + binfo = &BINFO; + i = (uint32)((ulong)cip->c_arg1); + tgt = (uint32)((ulong)cip->c_arg2); + lun = (uint32)((ulong)cip->c_arg3); + switch(i) { + case 1: + fc_failio(p_dev_ctl); + break; + case 2: /* stop */ + dev_index = INDEX(0, tgt); + dev_ptr = fc_find_lun(binfo, dev_index, lun); + if(dev_ptr == 0) { + rc = ERANGE; + goto out; + } + dev_ptr->stop_send_io = 1; + break; + case 3: /* start */ + dev_index = INDEX(0, tgt); + dev_ptr = fc_find_lun(binfo, dev_index, lun); + if(dev_ptr == 0) { + rc = ERANGE; + goto out; + } + if(dev_ptr->stop_send_io == 1) { + dev_ptr->stop_send_io = 0; + fc_restart_device(dev_ptr); + } + break; + } + break; + } + + case C_RSTQDEPTH: + fc_reset_dev_q_depth(p_dev_ctl); + break; + + case C_OUTFCPIO: + { + max = (infop->c_outsz / sizeof(struct out_fcp_devp)) - 1; + + binfo = &BINFO; + fp = (struct out_fcp_io *)dm->fc_dataout; + dp = (struct out_fcp_devp *)((uchar *)fp + sizeof(struct out_fcp_io)); + rp = &binfo->fc_ring[FC_FCP_RING]; + fp->tx_head = rp->fc_tx.q_first; + fp->tx_tail = rp->fc_tx.q_last; + fp->txp_head = rp->fc_txp.q_first; + fp->txp_tail = rp->fc_txp.q_last; + fp->tx_count = rp->fc_tx.q_cnt; + fp->txp_count = rp->fc_txp.q_cnt; + fp->timeout_head = p_dev_ctl->timeout_head; + fp->timeout_count = p_dev_ctl->timeout_count; + fp->devp_count = 0; + for (i = 0; i < MAX_FC_TARGETS; i++) { + if ((node_ptr = binfo->device_queue_hash[i].node_ptr) != NULL) { + for (dev_ptr = node_ptr->lunlist; dev_ptr != NULL; + dev_ptr = dev_ptr->next) { + if(fp->devp_count++ >= max) + goto outio; + dp->target = dev_ptr->nodep->scsi_id; + dp->lun = (ushort)(dev_ptr->lun_id); + dp->standby_queue_head = dev_ptr->standby_queue_head; + dp->standby_queue_tail = dev_ptr->standby_queue_tail; + dp->standby_count = dev_ptr->standby_count; + dp->pend_head = dev_ptr->pend_head; + dp->pend_tail = dev_ptr->pend_tail; + dp->pend_count = dev_ptr->pend_count; + dp->clear_head = dev_ptr->clear_head; + dp->clear_count = dev_ptr->clear_count; + dp++; + } + } + } +outio: + infop->c_outsz = (sizeof(struct out_fcp_io) + + (fp->devp_count * sizeof(struct out_fcp_devp))); + } + break; + + case C_HBA_SEND_SCSI: + case C_HBA_SEND_FCP: + ipri = dfc_hba_sendscsi_fcp(p_dev_ctl, dm, cip, infop, ipri); + break; + + case C_SEND_ELS: + { + uint32 did; + uint32 opcode; + + binfo = &BINFO; + did = (uint32)((ulong)cip->c_arg1); + opcode = (uint32)((ulong)cip->c_arg2); + did = (did & Mask_DID); + + if(((nlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, did))) == 0) { + if((nlp = (NODELIST *)fc_mem_get(binfo, MEM_NLP))) { + fc_bzero((void *)nlp, sizeof(NODELIST)); + nlp->sync = binfo->fc_sync; + nlp->capabilities = binfo->fc_capabilities; + nlp->nlp_DID = did; + nlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, nlp); + } + else { + rc = ENOMEM; + break; + } + } + + fc_els_cmd(binfo, opcode, (void *)((ulong)did), (uint32)0, (ushort)0, nlp); + } + break; + + case C_SEND_MGMT_RSP: + { + ULP_BDE64 * bpl; + MATCHMAP * bmp; + DMATCHMAP * indmp; + uint32 tag; + + tag = (uint32)cip->c_flag; /* XRI for XMIT_SEQUENCE */ + buf1sz = (uint32)((ulong)cip->c_arg2); + + if((buf1sz == 0) || + (buf1sz > (80 * 4096))) { + rc = ERANGE; + goto out; + } + + /* Allocate buffer for Buffer ptr list */ + if ((bmp = (MATCHMAP * )fc_mem_get(binfo, MEM_BPL)) == 0) { + rc = ENOMEM; + goto out; + } + bpl = (ULP_BDE64 * )bmp->virt; + dfc_unlock_enable(ipri, &CMD_LOCK); + + if((indmp = dfc_cmd_data_alloc(p_dev_ctl, (uchar *)cip->c_arg1, bpl, buf1sz)) == 0) { + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + fc_mem_put(binfo, MEM_BPL, (uchar * )bmp); + rc = ENOMEM; + goto out; + } + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + if((rc=fc_issue_ct_rsp(binfo, tag, bmp, indmp))) { + if(rc == ENODEV) + rc = EACCES; + goto xmout1; + } + + j = 0; + dfc_unlock_enable(ipri, &CMD_LOCK); + dfc_msdelay(p_dev_ctl, 1); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + + /* Wait for CT request to complete or timeout */ + while(indmp->dfc_flag == 0) { + dfc_unlock_enable(ipri, &CMD_LOCK); + dfc_msdelay(p_dev_ctl, 50); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + if(j >= 600) { + indmp->dfc_flag = -1; + break; + } + j++; + } + + j = indmp->dfc_flag; + if(j == -1) { + rc = ETIMEDOUT; + } + +xmout1: + dfc_unlock_enable(ipri, &CMD_LOCK); + dfc_cmd_data_free(p_dev_ctl, indmp); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + fc_mem_put(binfo, MEM_BPL, (uchar * )bmp); + } + break; + + case C_SEND_MGMT_CMD: + case C_CT: + ipri = dfc_hba_sendmgmt_ct(p_dev_ctl, dm, cip, infop, ipri); + break; + + case C_MBOX: + binfo = &BINFO; + mb = (MAILBOX * )mbox; + + dfc_unlock_enable(ipri, &CMD_LOCK); + if (fc_copyin((uchar *)cip->c_arg1, (uchar *)mb, + MAILBOX_CMD_WSIZE * sizeof(uint32))) { + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + rc = EIO; + goto out; + } + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + +mbxbegin: + + cnt = 0; + while ((binfo->fc_mbox_active) || (di->fc_flag & DFC_MBOX_ACTIVE)) { + dfc_unlock_enable(ipri, &CMD_LOCK); + dfc_msdelay(p_dev_ctl, 5); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + if(cnt++ == 200) + break; + } + + if (cnt >= 200) { + mb->mbxStatus = MBXERR_ERROR; + } + else { + binfo->fc_mbox_active = 2; +#ifdef _LP64 + if((mb->mbxCommand == MBX_READ_SPARM) || + (mb->mbxCommand == MBX_READ_RPI) || + (mb->mbxCommand == MBX_REG_LOGIN) || + (mb->mbxCommand == MBX_READ_LA)) { + mb->mbxStatus = MBXERR_ERROR; + rc = ENODEV; + binfo->fc_mbox_active = 0; + goto mbout; + } +#endif + lptr = 0; + size = 0; + switch (mb->mbxCommand) { + /* Offline only */ + case MBX_WRITE_NV: + case MBX_INIT_LINK: + case MBX_DOWN_LINK: + case MBX_CONFIG_LINK: + case MBX_PART_SLIM: + case MBX_CONFIG_RING: + case MBX_RESET_RING: + case MBX_UNREG_LOGIN: + case MBX_CLEAR_LA: + case MBX_DUMP_CONTEXT: + case MBX_RUN_DIAGS: + case MBX_RESTART: + case MBX_FLASH_WR_ULA: + case MBX_SET_MASK: + case MBX_SET_SLIM: + case MBX_SET_DEBUG: + if (!(binfo->fc_flag & FC_OFFLINE_MODE)) { + if (infop->c_cmd != C_LIP) { + mb->mbxStatus = MBXERR_ERROR; + binfo->fc_mbox_active = 0; + goto mbout; + } + } + break; + + /* Online / Offline */ + case MBX_LOAD_SM: + case MBX_READ_NV: + case MBX_READ_CONFIG: + case MBX_READ_RCONFIG: + case MBX_READ_STATUS: + case MBX_READ_XRI: + case MBX_READ_REV: + case MBX_READ_LNK_STAT: + case MBX_DUMP_MEMORY: + case MBX_DOWN_LOAD: + case MBX_UPDATE_CFG: + case MBX_LOAD_AREA: + case MBX_LOAD_EXP_ROM: + break; + + /* Offline only - with DMA */ + case MBX_REG_LOGIN: + if (!(binfo->fc_flag & FC_OFFLINE_MODE)) { + mb->mbxStatus = MBXERR_ERROR; + binfo->fc_mbox_active = 0; + goto mbout; + } + lptr = (uint32 * )((ulong)mb->un.varRegLogin.un.sp.bdeAddress); + size = (int)mb->un.varRegLogin.un.sp.bdeSize; + if (lptr) { + buf_info->virt = (void * )dfc.dfc_buffer; + buf_info->flags = (FC_MBUF_PHYSONLY | FC_MBUF_DMA | + FC_MBUF_IOCTL | FC_MBUF_UNLOCK); + buf_info->align = DMA_READ; + buf_info->size = sizeof(SERV_PARM); + buf_info->dma_handle = 0; + + dfc_unlock_enable(ipri, &CMD_LOCK); + fc_malloc(p_dev_ctl, buf_info); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + + if (buf_info->phys == NULL) { + mb->mbxStatus = MBXERR_ERROR; + binfo->fc_mbox_active = 0; + goto mbout; + } + mb->un.varRegLogin.un.sp.bdeAddress = + (uint32)putPaddrLow(buf_info->phys); + } + break; + case MBX_RUN_BIU_DIAG: + if (!(binfo->fc_flag & FC_OFFLINE_MODE)) { + mb->mbxStatus = MBXERR_ERROR; + binfo->fc_mbox_active = 0; + goto mbout; + } + mb->mbxStatus = MBXERR_ERROR; + binfo->fc_mbox_active = 0; + goto mbout; + + /* Online / Offline - with DMA */ + case MBX_READ_SPARM64: + if (!((binfo->fc_flag & FC_SLI2) && (!(binfo->fc_flag & FC_OFFLINE_MODE)))) { + mb->mbxStatus = MBXERR_ERROR; + binfo->fc_mbox_active = 0; + goto mbout; + } + case MBX_READ_SPARM: + if ((binfo->fc_flag & FC_SLI2) && (!(binfo->fc_flag & FC_OFFLINE_MODE))) { + if (mb->mbxCommand == MBX_READ_SPARM) { + mb->mbxStatus = MBXERR_ERROR; + binfo->fc_mbox_active = 0; + goto mbout; + } + lptr = (uint32 * )getPaddr(mb->un.varRdSparm.un.sp64.addrHigh, + mb->un.varRdSparm.un.sp64.addrLow); + size = (int)mb->un.varRdSparm.un.sp64.tus.f.bdeSize; + } else { + lptr = (uint32 * )((ulong)mb->un.varRdSparm.un.sp.bdeAddress); + size = (int)mb->un.varRdSparm.un.sp.bdeSize; + } + if (lptr) { + buf_info->virt = (void * )dfc.dfc_buffer; + buf_info->flags = (FC_MBUF_PHYSONLY | FC_MBUF_DMA | + FC_MBUF_IOCTL | FC_MBUF_UNLOCK); + buf_info->align = DMA_READ; + buf_info->size = sizeof(SERV_PARM); + buf_info->dma_handle = 0; + buf_info->phys = 0; + + dfc_unlock_enable(ipri, &CMD_LOCK); + fc_malloc(p_dev_ctl, buf_info); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + + if (buf_info->phys == NULL) { + mb->mbxStatus = MBXERR_ERROR; + binfo->fc_mbox_active = 0; + goto mbout; + } + if ((binfo->fc_flag & FC_SLI2) && (!(binfo->fc_flag & FC_OFFLINE_MODE))) { + mb->un.varRdSparm.un.sp64.addrHigh = + (uint32)putPaddrHigh(buf_info->phys); + mb->un.varRdSparm.un.sp64.addrLow = + (uint32)putPaddrLow(buf_info->phys); + } + else + mb->un.varRdSparm.un.sp.bdeAddress = + (uint32)putPaddrLow(buf_info->phys); + } + break; + case MBX_READ_RPI64: + if (!((binfo->fc_flag & FC_SLI2) && (!(binfo->fc_flag & FC_OFFLINE_MODE)))) { + mb->mbxStatus = MBXERR_ERROR; + binfo->fc_mbox_active = 0; + goto mbout; + } + case MBX_READ_RPI: + if ((binfo->fc_flag & FC_SLI2) && (!(binfo->fc_flag & FC_OFFLINE_MODE))) { + if (mb->mbxCommand == MBX_READ_RPI) { + mb->mbxStatus = MBXERR_ERROR; + binfo->fc_mbox_active = 0; + goto mbout; + } + lptr = (uint32 * )getPaddr(mb->un.varRdRPI.un.sp64.addrHigh, + mb->un.varRdRPI.un.sp64.addrLow); + size = (int)mb->un.varRdRPI.un.sp64.tus.f.bdeSize; + } else { + lptr = (uint32 * )((ulong)mb->un.varRdRPI.un.sp.bdeAddress); + size = (int)mb->un.varRdRPI.un.sp.bdeSize; + } + if (lptr) { + buf_info->virt = (void * )dfc.dfc_buffer; + buf_info->flags = (FC_MBUF_PHYSONLY | FC_MBUF_DMA | + FC_MBUF_IOCTL | FC_MBUF_UNLOCK); + buf_info->align = DMA_READ; + buf_info->size = sizeof(SERV_PARM); + buf_info->dma_handle = 0; + + dfc_unlock_enable(ipri, &CMD_LOCK); + fc_malloc(p_dev_ctl, buf_info); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + + if (buf_info->phys == NULL) { + mb->mbxStatus = MBXERR_ERROR; + binfo->fc_mbox_active = 0; + goto mbout; + } + if ((binfo->fc_flag & FC_SLI2) && (!(binfo->fc_flag & FC_OFFLINE_MODE))) { + mb->un.varRdRPI.un.sp64.addrHigh = + (uint32)putPaddrHigh(buf_info->phys); + mb->un.varRdRPI.un.sp64.addrLow = + (uint32)putPaddrLow(buf_info->phys); + } + else + mb->un.varRdRPI.un.sp.bdeAddress = + (uint32)putPaddrLow(buf_info->phys); + } + break; + case MBX_READ_LA: + if (!(binfo->fc_flag & FC_OFFLINE_MODE)) { + mb->mbxStatus = MBXERR_ERROR; + binfo->fc_mbox_active = 0; + goto mbout; + } + lptr = (uint32 * )((ulong)mb->un.varReadLA.un.lilpBde.bdeAddress); + size = (int)mb->un.varReadLA.un.lilpBde.bdeSize; + if (lptr) { + buf_info->virt = (void * )dfc.dfc_buffer; + buf_info->flags = (FC_MBUF_PHYSONLY | FC_MBUF_DMA | + FC_MBUF_IOCTL | FC_MBUF_UNLOCK); + buf_info->align = DMA_READ; + buf_info->size = 128; + buf_info->dma_handle = 0; + + dfc_unlock_enable(ipri, &CMD_LOCK); + fc_malloc(p_dev_ctl, buf_info); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + + if (buf_info->phys == NULL) { + mb->mbxStatus = MBXERR_ERROR; + binfo->fc_mbox_active = 0; + goto mbout; + } + mb->un.varReadLA.un.lilpBde.bdeAddress = + (uint32)putPaddrLow(buf_info->phys); + } + break; + + case MBX_CONFIG_PORT: + case MBX_REG_LOGIN64: + case MBX_READ_LA64: + mb->mbxStatus = MBXERR_ERROR; + binfo->fc_mbox_active = 0; + goto mbout; + + default: + if (!(binfo->fc_flag & FC_OFFLINE_MODE)) { + mb->mbxStatus = MBXERR_ERROR; + binfo->fc_mbox_active = 0; + goto mbout; + } + break; + } + + binfo->fc_mbox_active = 0; + if(dfc_issue_mbox(p_dev_ctl, mb, &ipri)) + goto mbout; + + if (lptr) { + buf_info->virt = 0; + buf_info->flags = (FC_MBUF_PHYSONLY | FC_MBUF_DMA | + FC_MBUF_IOCTL | FC_MBUF_UNLOCK); + buf_info->dma_handle = 0; + switch (mb->mbxCommand) { + case MBX_REG_LOGIN: + buf_info->phys = (uint32 * ) + ((ulong)mb->un.varRegLogin.un.sp.bdeAddress); + buf_info->size = sizeof(SERV_PARM); + break; + + case MBX_READ_SPARM: + buf_info->phys = (uint32 * ) + ((ulong)mb->un.varRdSparm.un.sp.bdeAddress); + buf_info->size = sizeof(SERV_PARM); + break; + + case MBX_READ_RPI: + buf_info->phys = (uint32 * ) + ((ulong)mb->un.varRdRPI.un.sp.bdeAddress); + buf_info->size = sizeof(SERV_PARM); + break; + + case MBX_READ_LA: + buf_info->phys = (uint32 * ) + ((ulong)mb->un.varReadLA.un.lilpBde.bdeAddress); + buf_info->size = 128; + break; + } + + dfc_unlock_enable(ipri, &CMD_LOCK); + fc_free(p_dev_ctl, buf_info); + + if ((fc_copyout((uchar *)dfc.dfc_buffer, (uchar *)lptr, (ulong)size))) { + rc = EIO; + } + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + } + } + +mbout: + if (infop->c_cmd == C_LIP) { + /* Turn on Link Attention interrupts */ + binfo->fc_process_LA = 1; + ioa = (void *)FC_MAP_IO(&binfo->fc_iomap_io); /* map in io registers */ + offset = READ_CSR_REG(binfo, FC_HC_REG(binfo, ioa)); + offset |= HC_LAINT_ENA; + WRITE_CSR_REG(binfo, FC_HC_REG(binfo, ioa), offset); + FC_UNMAP_MEMIO(ioa); + } + + if (infop->c_cmd == C_LIP) + fc_bcopy((char *) & mb->mbxStatus, dm->fc_dataout, sizeof(ushort)); + else + fc_bcopy((char *)mb, dm->fc_dataout, MAILBOX_CMD_WSIZE * sizeof(uint32)); + break; + + + case C_DISPLAY_PCI_ALL: + + if ((rc = fc_readpci(di, 0, (char *)dm->fc_dataout, 64))) + break; + break; + + case C_SET: + if(cip->c_iocb == 0) { + bp = binfo->fc_portname.IEEE; + } + else { + cnt = 0; + bp = 0; + nlp = binfo->fc_nlpunmap_start; + if(nlp == (NODELIST *)&binfo->fc_nlpunmap_start) + nlp = binfo->fc_nlpmap_start; + while(nlp != (NODELIST *)&binfo->fc_nlpmap_start) { + if((int)cnt == cip->c_iocb-1) { + if ((nlp->nlp_type & NLP_IP_NODE) && (nlp->nlp_Rpi) && + (nlp->nlp_Xri)) { + bp = nlp->nlp_nodename.IEEE; + } + else { + bp = binfo->fc_portname.IEEE; + } + break; + } + cnt++; + nlp = (NODELIST *)nlp->nlp_listp_next; + if(nlp == (NODELIST *)&binfo->fc_nlpunmap_start) + nlp = binfo->fc_nlpmap_start; + } + } + break; + + case C_WRITE_HC: + incr = (uint32)((ulong)cip->c_arg1); + ioa = (void *)FC_MAP_IO(&di->fc_iomap_io); /* map in io registers */ + WRITE_CSR_REG(binfo, + ((volatile uint32 * )((volatile char *)ioa + (sizeof(uint32) * HC_REG_OFFSET))), incr); + FC_UNMAP_MEMIO(ioa); + case C_READ_HC: + ioa = (void *)FC_MAP_IO(&di->fc_iomap_io); /* map in io registers */ + offset = READ_CSR_REG(binfo, FC_HC_REG(binfo, ioa)); + FC_UNMAP_MEMIO(ioa); + + *((uint32 * )dm->fc_dataout) = offset; + break; + + case C_WRITE_HS: + incr = (uint32)((ulong)cip->c_arg1); + ioa = (void *)FC_MAP_IO(&di->fc_iomap_io); /* map in io registers */ + WRITE_CSR_REG(binfo, + ((volatile uint32 * )((volatile char *)ioa + (sizeof(uint32) * HS_REG_OFFSET))), incr); + FC_UNMAP_MEMIO(ioa); + case C_READ_HS: + ioa = (void *)FC_MAP_IO(&di->fc_iomap_io); /* map in io registers */ + offset = READ_CSR_REG(binfo, FC_STAT_REG(binfo, ioa)); + FC_UNMAP_MEMIO(ioa); + + *((uint32 * )dm->fc_dataout) = offset; + break; + + case C_WRITE_HA: + incr = (uint32)((ulong)cip->c_arg1); + ioa = (void *)FC_MAP_IO(&di->fc_iomap_io); /* map in io registers */ + WRITE_CSR_REG(binfo, + ((volatile uint32 * )((volatile char *)ioa + (sizeof(uint32) * HA_REG_OFFSET))), incr); + FC_UNMAP_MEMIO(ioa); + case C_READ_HA: + ioa = (void *)FC_MAP_IO(&di->fc_iomap_io); /* map in io registers */ + offset = READ_CSR_REG(binfo, FC_HA_REG(binfo, ioa)); + FC_UNMAP_MEMIO(ioa); + + *((uint32 * )dm->fc_dataout) = offset; + break; + + case C_WRITE_CA: + incr = (uint32)((ulong)cip->c_arg1); + ioa = (void *)FC_MAP_IO(&di->fc_iomap_io); /* map in io registers */ + WRITE_CSR_REG(binfo, + ((volatile uint32 * )((volatile char *)ioa + (sizeof(uint32) * CA_REG_OFFSET))), incr); + FC_UNMAP_MEMIO(ioa); + case C_READ_CA: + ioa = (void *)FC_MAP_IO(&di->fc_iomap_io); /* map in io registers */ + offset = READ_CSR_REG(binfo, FC_FF_REG(binfo, ioa)); + FC_UNMAP_MEMIO(ioa); + + *((uint32 * )dm->fc_dataout) = offset; + break; + + case C_READ_MB: + binfo = &BINFO; + if ((binfo->fc_flag & FC_SLI2) && (!(binfo->fc_flag & FC_OFFLINE_MODE))) { + mb = FC_SLI2_MAILBOX(binfo); + fc_pcimem_bcopy((uint32 * )mb, (uint32 * )dm->fc_dataout, 128); + } else { + ioa = (void *)FC_MAP_MEM(&di->fc_iomap_mem); /* map in slim */ + READ_SLIM_COPY(binfo, (uint32 * )dm->fc_dataout, (uint32 * )ioa, + MAILBOX_CMD_WSIZE); + FC_UNMAP_MEMIO(ioa); + } + break; + + case C_DBG: + offset = (uint32)((ulong)cip->c_arg1); + switch (offset) { + case 0xffffffff: + break; + default: + fc_dbg_flag = offset; + break; + } + + fc_bcopy((uchar * ) & fc_dbg_flag , dm->fc_dataout, sizeof(uint32)); + break; + + case C_INST: + fc_bcopy((uchar * ) &fcinstcnt, dm->fc_dataout, sizeof(int)); + fc_bcopy((uchar * ) fcinstance, ((uchar *)dm->fc_dataout) + sizeof(int), sizeof(int) * MAX_FC_BRDS); + break; + + case C_READ_RING: + fc_bcopy(&binfo->fc_ring[cip->c_ring], dm->fc_dataout, sizeof(RING)); + break; + + case C_LISTN: + { + NODELIST *npp; + ulong lcnt; + ulong *lcntp; + + offset = (uint32)((ulong)cip->c_arg1); + total_mem -= sizeof(NODELIST); + lcnt = 0; + switch (offset) { + case 1: /* bind */ + lcntp = dm->fc_dataout; + fc_bcopy((uchar * ) &lcnt , dm->fc_dataout, sizeof(ulong)); + npp = (NODELIST *)((char *)(dm->fc_dataout) + sizeof(ulong)); + nlp = binfo->fc_nlpbind_start; + while((nlp != (NODELIST *)&binfo->fc_nlpbind_start) && (total_mem > 0)) { + fc_bcopy((char *)nlp, npp, (sizeof(NODELIST))); + total_mem -= sizeof(NODELIST); + npp++; + lcnt++; + nlp = (NODELIST *)nlp->nlp_listp_next; + } + *lcntp = lcnt; + break; + case 2: /* unmap */ + lcntp = dm->fc_dataout; + fc_bcopy((uchar * ) &lcnt , dm->fc_dataout, sizeof(ulong)); + npp = (NODELIST *)((char *)(dm->fc_dataout) + sizeof(ulong)); + nlp = binfo->fc_nlpunmap_start; + while((nlp != (NODELIST *)&binfo->fc_nlpunmap_start) && (total_mem > 0)) { + fc_bcopy((char *)nlp, npp, (sizeof(NODELIST))); + total_mem -= sizeof(NODELIST); + npp++; + lcnt++; + nlp = (NODELIST *)nlp->nlp_listp_next; + } + *lcntp = lcnt; + break; + case 3: /* map */ + lcntp = dm->fc_dataout; + fc_bcopy((uchar * ) &lcnt , dm->fc_dataout, sizeof(ulong)); + npp = (NODELIST *)((char *)(dm->fc_dataout) + sizeof(ulong)); + nlp = binfo->fc_nlpmap_start; + while((nlp != (NODELIST *)&binfo->fc_nlpmap_start) && (total_mem > 0)) { + fc_bcopy((char *)nlp, npp, (sizeof(NODELIST))); + total_mem -= sizeof(NODELIST); + npp++; + lcnt++; + nlp = (NODELIST *)nlp->nlp_listp_next; + } + *lcntp = lcnt; + break; + case 4: /* all */ + lcntp = dm->fc_dataout; + fc_bcopy((uchar * ) &lcnt , dm->fc_dataout, sizeof(ulong)); + npp = (NODELIST *)((char *)(dm->fc_dataout) + sizeof(ulong)); + nlp = binfo->fc_nlpbind_start; + while((nlp != (NODELIST *)&binfo->fc_nlpbind_start) && (total_mem > 0)) { + fc_bcopy((char *)nlp, npp, (sizeof(NODELIST))); + total_mem -= sizeof(NODELIST); + npp++; + lcnt++; + nlp = (NODELIST *)nlp->nlp_listp_next; + } + nlp = binfo->fc_nlpunmap_start; + while((nlp != (NODELIST *)&binfo->fc_nlpunmap_start) && (total_mem > 0)) { + fc_bcopy((char *)nlp, npp, (sizeof(NODELIST))); + total_mem -= sizeof(NODELIST); + npp++; + lcnt++; + nlp = (NODELIST *)nlp->nlp_listp_next; + } + nlp = binfo->fc_nlpmap_start; + while((nlp != (NODELIST *)&binfo->fc_nlpmap_start) && (total_mem > 0)) { + fc_bcopy((char *)nlp, npp, (sizeof(NODELIST))); + total_mem -= sizeof(NODELIST); + npp++; + lcnt++; + nlp = (NODELIST *)nlp->nlp_listp_next; + } + *lcntp = lcnt; + break; + default: + rc = ERANGE; + break; + } + infop->c_outsz = (sizeof(ulong) + (lcnt * sizeof(NODELIST))); + break; + } + + case C_LISTEVT: + { + uint32 ehdcnt; + uint32 ecnt; + uint32 *ehdcntp; + uint32 *ecntp; + fcEvent *ep; + fcEvent_header *ehp; + + offset = (uint32)((ulong)cip->c_arg1); + total_mem -= sizeof(uint32); + infop->c_outsz = sizeof(uint32); + ehdcnt = 0; + ehdcntp = (uint32 *)dm->fc_dataout; + bp = (uchar *)((char *)(dm->fc_dataout) + sizeof(uint32)); + switch (offset) { + case 1: /* link */ + offset = FC_REG_LINK_EVENT; + break; + case 2: /* rscn */ + offset = FC_REG_RSCN_EVENT; + break; + case 3: /* ct */ + offset = FC_REG_CT_EVENT; + break; + case 7: /* all */ + offset = 0; + break; + default: + rc = ERANGE; + goto out; + } + ehp = (fcEvent_header *)p_dev_ctl->fc_evt_head; + while (ehp) { + if ((offset == 0) || (ehp->e_mask == offset)) { + ehdcnt++; + fc_bcopy((char *)ehp, bp, (sizeof(fcEvent_header))); + bp += (sizeof(fcEvent_header)); + total_mem -= sizeof(fcEvent_header); + if(total_mem <= 0) { + rc = ENOMEM; + goto out; + } + infop->c_outsz += sizeof(fcEvent_header); + ecnt = 0; + ecntp = (uint32 *)bp; + bp += (sizeof(uint32)); + total_mem -= sizeof(uint32); + infop->c_outsz += sizeof(uint32); + ep = ehp->e_head; + while(ep) { + ecnt++; + fc_bcopy((char *)ehp, bp, (sizeof(fcEvent))); + bp += (sizeof(fcEvent)); + total_mem -= sizeof(fcEvent); + if(total_mem <= 0) { + rc = ENOMEM; + goto out; + } + infop->c_outsz += sizeof(fcEvent); + ep = ep->evt_next; + } + *ecntp = ecnt; + } + ehp = (fcEvent_header *)ehp->e_next_header; + } + + *ehdcntp = ehdcnt; + break; + } + + case C_READ_RPILIST: + { + NODELIST *npp; + + cnt = 0; + npp = (NODELIST *)(dm->fc_dataout); + nlp = binfo->fc_nlpbind_start; + if(nlp == (NODELIST *)&binfo->fc_nlpbind_start) + nlp = binfo->fc_nlpunmap_start; + if(nlp == (NODELIST *)&binfo->fc_nlpunmap_start) + nlp = binfo->fc_nlpmap_start; + while((nlp != (NODELIST *)&binfo->fc_nlpmap_start) && (total_mem > 0)) { + fc_bcopy((char *)nlp, (char *)npp, sizeof(NODELIST)); + npp++; + cnt++; + nlp = (NODELIST *)nlp->nlp_listp_next; + if(nlp == (NODELIST *)&binfo->fc_nlpbind_start) + nlp = binfo->fc_nlpunmap_start; + if(nlp == (NODELIST *)&binfo->fc_nlpunmap_start) + nlp = binfo->fc_nlpmap_start; + } + if(cnt) { + infop->c_outsz = (uint32)(cnt * sizeof(NODELIST)); + } + } + break; + + case C_READ_BPLIST: + rp = &binfo->fc_ring[cip->c_ring]; + lptr = (uint32 * )dm->fc_dataout; + + mm = (MATCHMAP * )rp->fc_mpoff; + total_mem -= (3*sizeof(ulong)); + while ((mm) && (total_mem > 0)) { + if (cip->c_ring == FC_ELS_RING) { + *lptr++ = (uint32)((ulong)mm); + *lptr++ = (uint32)((ulong)mm->virt); + *lptr++ = (uint32)((ulong)mm->phys); + mm = (MATCHMAP * )mm->fc_mptr; + } + if (cip->c_ring == FC_IP_RING) { + fbp = (fcipbuf_t * )mm; + *lptr++ = (uint32)((ulong)fbp); + *lptr++ = (uint32)((ulong)fcdata(fbp)); + *lptr++ = (uint32)((ulong)fcnextpkt(fbp)); + mm = (MATCHMAP * )fcnextdata(fbp); + } + total_mem -= (3 * sizeof(ulong)); + } + *lptr++ = 0; + *lptr++ = (uint32)((ulong)rp->fc_mpon); + + infop->c_outsz = ((uchar * )lptr - (uchar *)(dm->fc_dataout)); + break; + + case C_READ_MEMSEG: + fc_bcopy(&binfo->fc_memseg, dm->fc_dataout, (sizeof(MEMSEG) * FC_MAX_SEG)); + break; + + case C_RESET: + offset = (uint32)((ulong)cip->c_arg1); + switch (offset) { + case 1: /* hba */ + fc_brdreset(p_dev_ctl); + break; + case 2: /* link */ + fc_rlip(p_dev_ctl); + break; + case 3: /* target */ + fc_fcp_abort(p_dev_ctl, TARGET_RESET, (int)((ulong)cip->c_arg2), -1); + break; + case 4: /* lun */ + fc_fcp_abort(p_dev_ctl, LUN_RESET, (int)((ulong)cip->c_arg2), + (int)((ulong)cip->c_arg3)); + break; + case 5: /* task set */ + fc_fcp_abort(p_dev_ctl, ABORT_TASK_SET, (int)((ulong)cip->c_arg2), + (int)((ulong)cip->c_arg3)); + break; + case 6: /* bus */ + fc_fcp_abort(p_dev_ctl, TARGET_RESET, -1, -1); + break; + default: + rc = ERANGE; + break; + } + break; + + case C_READ_BINFO: + case C_FC_STAT: + fc_bcopy(binfo, dm->fc_dataout, sizeof(FC_BRD_INFO)); + break; + + case C_NODE: + break; + + case C_DEVP: + offset = (uint32)((ulong)cip->c_arg1); + cnt = (uint32)((ulong)cip->c_arg2); + if ((offset >= (MAX_FC_TARGETS)) || (cnt >= 128)) { + rc = ERANGE; + break; + } + fc_bzero(dm->fc_dataout, (sizeof(dvi_t))+(sizeof(nodeh_t))+(sizeof(node_t))); + fc_bcopy((char *)&binfo->device_queue_hash[offset], (uchar *)dm->fc_dataout, (sizeof(nodeh_t))); + + nodep = binfo->device_queue_hash[offset].node_ptr; + if (nodep == 0) { + break; + } + dev_ptr = nodep->lunlist; + while ((dev_ptr != 0)) { + if(dev_ptr->lun_id == cnt) + break; + dev_ptr = dev_ptr->next; + } + if (dev_ptr == 0) { + break; + } + + fc_bcopy((char *)&binfo->device_queue_hash[offset], (uchar *)dm->fc_dataout, + (sizeof(nodeh_t))); + fc_bcopy((char *)nodep, ((uchar *)dm->fc_dataout + sizeof(nodeh_t)), + (sizeof(node_t))); + fc_bcopy((char *)dev_ptr, + ((uchar *)dm->fc_dataout + sizeof(nodeh_t) + sizeof(node_t)), + (sizeof(dvi_t))); + break; + + case C_NDD_STAT: + fc_bcopy(&NDDSTAT, dm->fc_dataout, sizeof(ndd_genstats_t)); + break; + + case C_LINKINFO: +linfo: + { + LinkInfo *linkinfo; + + linkinfo = (LinkInfo *)dm->fc_dataout; + linkinfo->a_linkEventTag = binfo->fc_eventTag; + linkinfo->a_linkUp = FCSTATCTR.LinkUp; + linkinfo->a_linkDown = FCSTATCTR.LinkDown; + linkinfo->a_linkMulti = FCSTATCTR.LinkMultiEvent; + linkinfo->a_DID = binfo->fc_myDID; + if (binfo->fc_topology == TOPOLOGY_LOOP) { + if(binfo->fc_flag & FC_PUBLIC_LOOP) { + linkinfo->a_topology = LNK_PUBLIC_LOOP; + fc_bcopy((uchar * )binfo->alpa_map, + (uchar *)linkinfo->a_alpaMap, 128); + linkinfo->a_alpaCnt = binfo->alpa_map[0]; + } + else { + linkinfo->a_topology = LNK_LOOP; + fc_bcopy((uchar * )binfo->alpa_map, + (uchar *)linkinfo->a_alpaMap, 128); + linkinfo->a_alpaCnt = binfo->alpa_map[0]; + } + } + else { + fc_bzero((uchar *)linkinfo->a_alpaMap, 128); + linkinfo->a_alpaCnt = 0; + if(binfo->fc_flag & FC_FABRIC) { + linkinfo->a_topology = LNK_FABRIC; + } + else { + linkinfo->a_topology = LNK_PT2PT; + } + } + linkinfo->a_linkState = 0; + switch (binfo->fc_ffstate) { + case FC_INIT_START: + case FC_INIT_NVPARAMS: + case FC_INIT_REV: + case FC_INIT_PARTSLIM: + case FC_INIT_CFGRING: + case FC_INIT_INITLINK: + case FC_LINK_DOWN: + linkinfo->a_linkState = LNK_DOWN; + fc_bzero((uchar *)linkinfo->a_alpaMap, 128); + linkinfo->a_alpaCnt = 0; + break; + case FC_LINK_UP: + case FC_INIT_SPARAM: + case FC_CFG_LINK: + linkinfo->a_linkState = LNK_UP; + break; + case FC_FLOGI: + linkinfo->a_linkState = LNK_FLOGI; + break; + case FC_LOOP_DISC: + case FC_NS_REG: + case FC_NS_QRY: + case FC_NODE_DISC: + case FC_REG_LOGIN: + case FC_CLEAR_LA: + linkinfo->a_linkState = LNK_DISCOVERY; + break; + case FC_READY: + linkinfo->a_linkState = LNK_READY; + break; + } + linkinfo->a_alpa = (uchar)(binfo->fc_myDID & 0xff); + fc_bcopy((uchar * )&binfo->fc_portname, (uchar *)linkinfo->a_wwpName, 8); + fc_bcopy((uchar * )&binfo->fc_nodename, (uchar *)linkinfo->a_wwnName, 8); + } + break; + + case C_IOINFO: + { + IOinfo *ioinfo; + + ioinfo = (IOinfo *)dm->fc_dataout; + ioinfo->a_mbxCmd = FCSTATCTR.issueMboxCmd; + ioinfo->a_mboxCmpl = FCSTATCTR.mboxEvent; + ioinfo->a_mboxErr = FCSTATCTR.mboxStatErr; + ioinfo->a_iocbCmd = FCSTATCTR.IssueIocb; + ioinfo->a_iocbRsp = FCSTATCTR.iocbRsp; + ioinfo->a_adapterIntr = (FCSTATCTR.linkEvent + FCSTATCTR.iocbRsp + + FCSTATCTR.mboxEvent); + ioinfo->a_fcpCmd = FCSTATCTR.fcpCmd; + ioinfo->a_fcpCmpl = FCSTATCTR.fcpCmpl; + ioinfo->a_fcpErr = FCSTATCTR.fcpRspErr + FCSTATCTR.fcpRemoteStop + + FCSTATCTR.fcpPortRjt + FCSTATCTR.fcpPortBusy + FCSTATCTR.fcpError + + FCSTATCTR.fcpLocalErr; + ioinfo->a_seqXmit = NDDSTAT.ndd_ifOutUcastPkts_lsw; + ioinfo->a_seqRcv = NDDSTAT.ndd_recvintr_lsw; + ioinfo->a_bcastXmit = NDDSTAT.ndd_ifOutBcastPkts_lsw + + NDDSTAT.ndd_ifOutMcastPkts_lsw; + ioinfo->a_bcastRcv = FCSTATCTR.frameRcvBcast; + ioinfo->a_elsXmit = FCSTATCTR.elsXmitFrame; + ioinfo->a_elsRcv = FCSTATCTR.elsRcvFrame; + ioinfo->a_RSCNRcv = FCSTATCTR.elsRcvRSCN; + ioinfo->a_seqXmitErr = NDDSTAT.ndd_oerrors; + ioinfo->a_elsXmitErr = FCSTATCTR.elsXmitErr; + ioinfo->a_elsBufPost = binfo->fc_ring[FC_ELS_RING].fc_bufcnt; + ioinfo->a_ipBufPost = binfo->fc_ring[FC_IP_RING].fc_bufcnt; + ioinfo->a_cnt1 = 0; + ioinfo->a_cnt2 = 0; + ioinfo->a_cnt3 = 0; + ioinfo->a_cnt4 = 0; + } + break; + + case C_NODEINFO: + { + NodeInfo * np; + + /* First uint32 word will be count */ + np = (NodeInfo *)dm->fc_dataout; + cnt = 0; + total_mem -= sizeof(NODELIST); + + nlp = binfo->fc_nlpbind_start; + if(nlp == (NODELIST *)&binfo->fc_nlpbind_start) + nlp = binfo->fc_nlpunmap_start; + if(nlp == (NODELIST *)&binfo->fc_nlpunmap_start) + nlp = binfo->fc_nlpmap_start; + while((nlp != (NODELIST *)&binfo->fc_nlpmap_start) && (total_mem > 0)) { + fc_bzero((uchar *)np, sizeof(NODELIST)); + if(nlp->nlp_flag & NLP_NS_REMOVED) + np->a_flag |= NODE_NS_REMOVED; + if(nlp->nlp_flag & NLP_RPI_XRI) + np->a_flag |= NODE_RPI_XRI; + if(nlp->nlp_flag & NLP_REQ_SND) + np->a_flag |= NODE_REQ_SND; + if(nlp->nlp_flag & NLP_RM_ENTRY) + np->a_flag |= NODE_RM_ENTRY; + if(nlp->nlp_flag & NLP_FARP_SND) + np->a_flag |= NODE_FARP_SND; + if(nlp->nlp_type & NLP_FABRIC) + np->a_flag |= NODE_FABRIC; + if(nlp->nlp_type & NLP_FCP_TARGET) + np->a_flag |= NODE_FCP_TARGET; + if(nlp->nlp_type & NLP_IP_NODE) + np->a_flag |= NODE_IP_NODE; + if(nlp->nlp_type & NLP_SEED_WWPN) + np->a_flag |= NODE_SEED_WWPN; + if(nlp->nlp_type & NLP_SEED_WWNN) + np->a_flag |= NODE_SEED_WWNN; + if(nlp->nlp_type & NLP_SEED_DID) + np->a_flag |= NODE_SEED_DID; + if(nlp->nlp_type & NLP_AUTOMAP) + np->a_flag |= NODE_AUTOMAP; + if(nlp->nlp_action & NLP_DO_DISC_START) + np->a_flag |= NODE_DISC_START; + if(nlp->nlp_action & NLP_DO_ADDR_AUTH) + np->a_flag |= NODE_ADDR_AUTH; + np->a_state = nlp->nlp_state; + np->a_did = nlp->nlp_DID; + np->a_targetid = FC_SCSID(nlp->id.nlp_pan, nlp->id.nlp_sid); + fc_bcopy(&nlp->nlp_portname, np->a_wwpn, 8); + fc_bcopy(&nlp->nlp_nodename, np->a_wwnn, 8); + total_mem -= sizeof(NODELIST); + np++; + cnt++; + nlp = (NODELIST *)nlp->nlp_listp_next; + if(nlp == (NODELIST *)&binfo->fc_nlpbind_start) + nlp = binfo->fc_nlpunmap_start; + if(nlp == (NODELIST *)&binfo->fc_nlpunmap_start) + nlp = binfo->fc_nlpmap_start; + } + if(cnt) { + infop->c_outsz = (uint32)(cnt * sizeof(NodeInfo)); + } + } + break; + + case C_HBA_ADAPTERATRIBUTES: + { + HBA_ADAPTERATTRIBUTES * ha; + + vp = &VPD; + ha = (HBA_ADAPTERATTRIBUTES *)dm->fc_dataout; + fc_bzero(dm->fc_dataout, (sizeof(HBA_ADAPTERATTRIBUTES))); + ha->NumberOfPorts = 1; + ha->VendorSpecificID = di->fc_ba.a_pci; + fc_bcopy(di->fc_ba.a_drvrid, ha->DriverVersion, 16); + fc_bcopy(di->fc_ba.a_fwname, ha->FirmwareVersion, 32); + fc_bcopy((uchar * )&binfo->fc_sparam.nodeName, (uchar * )&ha->NodeWWN, + sizeof(HBA_WWN)); + fc_bcopy("Emulex Corporation", ha->Manufacturer, 20); + + switch(((SWAP_LONG(ha->VendorSpecificID))>>16) & 0xffff) { + case PCI_DEVICE_ID_SUPERFLY: + if((vp->rev.biuRev == 1) || + (vp->rev.biuRev == 2) || (vp->rev.biuRev == 3)) { + fc_bcopy("LP7000", ha->Model, 8); + fc_bcopy("Emulex LightPulse LP7000 1 Gigabit PCI Fibre Channel Adapter", ha->ModelDescription, 62); + } + else { + fc_bcopy("LP7000E", ha->Model, 9); + fc_bcopy("Emulex LightPulse LP7000E 1 Gigabit PCI Fibre Channel Adapter", ha->ModelDescription, 62); + } + break; + case PCI_DEVICE_ID_DRAGONFLY: + fc_bcopy("LP8000", ha->Model, 8); + fc_bcopy("Emulex LightPulse LP8000 1 Gigabit PCI Fibre Channel Adapter", ha->ModelDescription, 62); + break; + case PCI_DEVICE_ID_CENTAUR: + if(FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) { + fc_bcopy("LP9002", ha->Model, 8); + fc_bcopy("Emulex LightPulse LP9002 2 Gigabit PCI Fibre Channel Adapter", ha->ModelDescription, 62); + } + else { + fc_bcopy("LP9000", ha->Model, 8); + fc_bcopy("Emulex LightPulse LP9000 1 Gigabit PCI Fibre Channel Adapter", ha->ModelDescription, 62); + } + break; + case PCI_DEVICE_ID_PEGASUS: + fc_bcopy("LP9802", ha->Model, 8); + fc_bcopy("Emulex LightPulse LP9802 2 Gigabit PCI Fibre Channel Adapter", ha->ModelDescription, 62); + break; + case PCI_DEVICE_ID_THOR: + fc_bcopy("LP10000", ha->Model, 9); + fc_bcopy("Emulex LightPulse LP10000 2 Gigabit PCI Fibre Channel Adapter", ha->ModelDescription, 63); + break; + case PCI_DEVICE_ID_PFLY: + fc_bcopy("LP982", ha->Model, 7); + fc_bcopy("Emulex LightPulse LP982 2 Gigabit PCI Fibre Channel Adapter", ha->ModelDescription, 62); + break; + case PCI_DEVICE_ID_TFLY: + fc_bcopy("LP1050", ha->Model, 8); + fc_bcopy("Emulex LightPulse LP1050 2 Gigabit PCI Fibre Channel Adapter", ha->ModelDescription, 63); + break; + } + fc_bcopy("lpfcdd", ha->DriverName, 7); + fc_bcopy(binfo->fc_SerialNumber, ha->SerialNumber, 32); + fc_bcopy(binfo->fc_OptionROMVersion, ha->OptionROMVersion, 32); + + /* Convert JEDEC ID to ascii for hardware version */ + incr = vp->rev.biuRev; + for(i=0;i<8;i++) { + j = (incr & 0xf); + if(j <= 9) + ha->HardwareVersion[7-i] = (char)((uchar)0x30 + (uchar)j); + else + ha->HardwareVersion[7-i] = (char)((uchar)0x61 + (uchar)(j-10)); + incr = (incr >> 4); + } + ha->HardwareVersion[8] = 0; + + } + break; + + case C_HBA_PORTATRIBUTES: + { + SERV_PARM * hsp; + HBA_OSDN * osdn; + +localport: + vp = &VPD; + hsp = (SERV_PARM *)&binfo->fc_sparam; + hp = (HBA_PORTATTRIBUTES *)dm->fc_dataout; + fc_bzero(dm->fc_dataout, (sizeof(HBA_PORTATTRIBUTES))); + fc_bcopy((uchar * )&binfo->fc_sparam.nodeName, (uchar * )&hp->NodeWWN, + sizeof(HBA_WWN)); + fc_bcopy((uchar * )&binfo->fc_sparam.portName, (uchar * )&hp->PortWWN, + sizeof(HBA_WWN)); + + if( binfo->fc_linkspeed == LA_2GHZ_LINK) + hp->PortSpeed = HBA_PORTSPEED_2GBIT; + else + hp->PortSpeed = HBA_PORTSPEED_1GBIT; + + if(FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) + hp->PortSupportedSpeed = HBA_PORTSPEED_2GBIT; + else + hp->PortSupportedSpeed = HBA_PORTSPEED_1GBIT; + + hp->PortFcId = binfo->fc_myDID; + hp->PortType = HBA_PORTTYPE_UNKNOWN; + if (binfo->fc_topology == TOPOLOGY_LOOP) { + if(binfo->fc_flag & FC_PUBLIC_LOOP) { + hp->PortType = HBA_PORTTYPE_NLPORT; + fc_bcopy((uchar * )&binfo->fc_fabparam.nodeName, + (uchar * )&hp->FabricName, sizeof(HBA_WWN)); + } + else { + hp->PortType = HBA_PORTTYPE_LPORT; + } + } + else { + if(binfo->fc_flag & FC_FABRIC) { + hp->PortType = HBA_PORTTYPE_NPORT; + fc_bcopy((uchar * )&binfo->fc_fabparam.nodeName, + (uchar * )&hp->FabricName, sizeof(HBA_WWN)); + } + else { + hp->PortType = HBA_PORTTYPE_PTP; + } + } + + if (binfo->fc_flag & FC_BYPASSED_MODE) { + hp->PortState = HBA_PORTSTATE_BYPASSED; + } + else if (binfo->fc_flag & FC_OFFLINE_MODE) { + hp->PortState = HBA_PORTSTATE_DIAGNOSTICS; + } + else { + switch (binfo->fc_ffstate) { + case FC_INIT_START: + case FC_INIT_NVPARAMS: + case FC_INIT_REV: + case FC_INIT_PARTSLIM: + case FC_INIT_CFGRING: + case FC_INIT_INITLINK: + hp->PortState = HBA_PORTSTATE_UNKNOWN; + case FC_LINK_DOWN: + case FC_LINK_UP: + case FC_INIT_SPARAM: + case FC_CFG_LINK: + case FC_FLOGI: + case FC_LOOP_DISC: + case FC_NS_REG: + case FC_NS_QRY: + case FC_NODE_DISC: + case FC_REG_LOGIN: + case FC_CLEAR_LA: + hp->PortState = HBA_PORTSTATE_LINKDOWN; + break; + case FC_READY: + hp->PortState = HBA_PORTSTATE_ONLINE; + break; + default: + hp->PortState = HBA_PORTSTATE_ERROR; + break; + } + } + cnt = binfo->fc_map_cnt + binfo->fc_unmap_cnt; + hp->NumberofDiscoveredPorts = cnt; + if (hsp->cls1.classValid) { + hp->PortSupportedClassofService |= 1; /* bit 1 */ + } + if (hsp->cls2.classValid) { + hp->PortSupportedClassofService |= 2; /* bit 2 */ + } + if (hsp->cls3.classValid) { + hp->PortSupportedClassofService |= 4; /* bit 3 */ + } + hp->PortMaxFrameSize = (((uint32)hsp->cmn.bbRcvSizeMsb) << 8) | + (uint32)hsp->cmn.bbRcvSizeLsb; + + hp->PortSupportedFc4Types.bits[2] = 0x1; + hp->PortSupportedFc4Types.bits[3] = 0x20; + hp->PortSupportedFc4Types.bits[7] = 0x1; + if(clp[CFG_FCP_ON].a_current) { + hp->PortActiveFc4Types.bits[2] = 0x1; + } + if(clp[CFG_NETWORK_ON].a_current) { + hp->PortActiveFc4Types.bits[3] = 0x20; + } + hp->PortActiveFc4Types.bits[7] = 0x1; + + + /* OSDeviceName is the device info filled into the HBA_OSDN structure */ + osdn = (HBA_OSDN *)&hp->OSDeviceName[0]; + fc_bcopy("lpfc", osdn->drvname, 4); + osdn->instance = fc_brd_to_inst(binfo->fc_brd_no); + osdn->target = (HBA_UINT32)(-1); + osdn->lun = (HBA_UINT32)(-1); + + } + break; + + case C_HBA_PORTSTATISTICS: + { + HBA_PORTSTATISTICS * hs; + FCCLOCK_INFO * clock_info; + + hs = (HBA_PORTSTATISTICS *)dm->fc_dataout; + fc_bzero(dm->fc_dataout, (sizeof(HBA_PORTSTATISTICS))); + + mb = (MAILBOX * )mbox; + fc_read_status(binfo, mb); + mb->un.varRdStatus.clrCounters = 0; + if(dfc_issue_mbox(p_dev_ctl, mb, &ipri)) { + rc = ENODEV; + break; + } + hs->TxFrames = mb->un.varRdStatus.xmitFrameCnt; + hs->RxFrames = mb->un.varRdStatus.rcvFrameCnt; + /* Convert KBytes to words */ + hs->TxWords = (mb->un.varRdStatus.xmitByteCnt * 256); + hs->RxWords = (mb->un.varRdStatus.rcvbyteCnt * 256); + fc_read_lnk_stat(binfo, mb); + if(dfc_issue_mbox(p_dev_ctl, mb, &ipri)) { + rc = ENODEV; + break; + } + hs->LinkFailureCount = mb->un.varRdLnk.linkFailureCnt; + hs->LossOfSyncCount = mb->un.varRdLnk.lossSyncCnt; + hs->LossOfSignalCount = mb->un.varRdLnk.lossSignalCnt; + hs->PrimitiveSeqProtocolErrCount = mb->un.varRdLnk.primSeqErrCnt; + hs->InvalidTxWordCount = mb->un.varRdLnk.invalidXmitWord; + hs->InvalidCRCCount = mb->un.varRdLnk.crcCnt; + hs->ErrorFrames = mb->un.varRdLnk.crcCnt; + + if (binfo->fc_topology == TOPOLOGY_LOOP) { + hs->LIPCount = (binfo->fc_eventTag >> 1); + hs->NOSCount = -1; + } + else { + hs->LIPCount = -1; + hs->NOSCount = (binfo->fc_eventTag >> 1); + } + + hs->DumpedFrames = -1; + clock_info = &DD_CTL.fc_clock_info; + hs->SecondsSinceLastReset = clock_info->ticks; + + } + break; + + case C_HBA_WWPNPORTATRIBUTES: + { + HBA_WWN findwwn; + + hp = (HBA_PORTATTRIBUTES *)dm->fc_dataout; + vp = &VPD; + fc_bzero(dm->fc_dataout, (sizeof(HBA_PORTATTRIBUTES))); + + dfc_unlock_enable(ipri, &CMD_LOCK); + if (fc_copyin((uchar *)cip->c_arg1, (uchar *)&findwwn, (ulong)(sizeof(HBA_WWN)))) { + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + rc = EIO; + break; + } + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + + /* First Mapped ports, then unMapped ports */ + nlp = binfo->fc_nlpmap_start; + if(nlp == (NODELIST *)&binfo->fc_nlpmap_start) + nlp = binfo->fc_nlpunmap_start; + while(nlp != (NODELIST *)&binfo->fc_nlpunmap_start) { + if (fc_geportname(&nlp->nlp_portname, (NAME_TYPE *)&findwwn) == 2) + goto foundit; + nlp = (NODELIST *)nlp->nlp_listp_next; + if(nlp == (NODELIST *)&binfo->fc_nlpmap_start) + nlp = binfo->fc_nlpunmap_start; + } + rc = ERANGE; + break; + } + + case C_HBA_DISCPORTATRIBUTES: + { + SERV_PARM * hsp; + MATCHMAP * mp; + HBA_OSDN * osdn; + uint32 refresh; + + vp = &VPD; + hp = (HBA_PORTATTRIBUTES *)dm->fc_dataout; + fc_bzero(dm->fc_dataout, (sizeof(HBA_PORTATTRIBUTES))); + offset = (uint32)((ulong)cip->c_arg2); + refresh = (uint32)((ulong)cip->c_arg3); + if(refresh != binfo->nlptimer) { + hp->PortFcId = 0xffffffff; + break; + } + cnt = 0; + /* First Mapped ports, then unMapped ports */ + nlp = binfo->fc_nlpmap_start; + if(nlp == (NODELIST *)&binfo->fc_nlpmap_start) + nlp = binfo->fc_nlpunmap_start; + while(nlp != (NODELIST *)&binfo->fc_nlpunmap_start) { + if(cnt == offset) + goto foundit; + cnt++; + nlp = (NODELIST *)nlp->nlp_listp_next; + if(nlp == (NODELIST *)&binfo->fc_nlpmap_start) + nlp = binfo->fc_nlpunmap_start; + } + rc = ERANGE; + break; + +foundit: + /* Check if its the local port */ + if(binfo->fc_myDID == nlp->nlp_DID) { + goto localport; + } + + mb = (MAILBOX * )mbox; + fc_read_rpi(binfo, (uint32)nlp->nlp_Rpi, + (MAILBOX * )mb, (uint32)0); + + if ((mp = (MATCHMAP * )fc_mem_get(binfo, MEM_BUF)) == 0) { + rc = ENOMEM; + break; + } + hsp = (SERV_PARM *)mp->virt; + if (binfo->fc_flag & FC_SLI2) { + mb->un.varRdRPI.un.sp64.addrHigh = + (uint32)putPaddrHigh(mp->phys); + mb->un.varRdRPI.un.sp64.addrLow = + (uint32)putPaddrLow(mp->phys); + mb->un.varRdRPI.un.sp64.tus.f.bdeSize = sizeof(SERV_PARM); + } + else { + mb->un.varRdRPI.un.sp.bdeAddress = + (uint32)putPaddrLow(mp->phys); + mb->un.varRdRPI.un.sp.bdeSize = sizeof(SERV_PARM); + } + + if(dfc_issue_mbox(p_dev_ctl, mb, &ipri)) { + rc = ENODEV; + break; + } + + if (hsp->cls1.classValid) { + hp->PortSupportedClassofService |= 1; /* bit 1 */ + } + if (hsp->cls2.classValid) { + hp->PortSupportedClassofService |= 2; /* bit 2 */ + } + if (hsp->cls3.classValid) { + hp->PortSupportedClassofService |= 4; /* bit 3 */ + } + hp->PortMaxFrameSize = (((uint32)hsp->cmn.bbRcvSizeMsb) << 8) | + (uint32)hsp->cmn.bbRcvSizeLsb; + + fc_mem_put(binfo, MEM_BUF, (uchar * )mp); + + fc_bcopy((uchar * )&nlp->nlp_nodename, (uchar * )&hp->NodeWWN, + sizeof(HBA_WWN)); + fc_bcopy((uchar * )&nlp->nlp_portname, (uchar * )&hp->PortWWN, + sizeof(HBA_WWN)); + + hp->PortSpeed = 0; + if(((binfo->fc_myDID & 0xffff00) == (nlp->nlp_DID & 0xffff00)) && + (binfo->fc_topology == TOPOLOGY_LOOP)) { + if( binfo->fc_linkspeed == LA_2GHZ_LINK) + hp->PortSpeed = HBA_PORTSPEED_2GBIT; + else + hp->PortSpeed = HBA_PORTSPEED_1GBIT; + } + + hp->PortFcId = nlp->nlp_DID; + if((binfo->fc_flag & FC_FABRIC) && + ((binfo->fc_myDID & 0xff0000) == (nlp->nlp_DID & 0xff0000))) { + fc_bcopy((uchar * )&binfo->fc_fabparam.nodeName, + (uchar * )&hp->FabricName, sizeof(HBA_WWN)); + } + hp->PortState = HBA_PORTSTATE_ONLINE; + if (nlp->nlp_type & NLP_FCP_TARGET) { + hp->PortActiveFc4Types.bits[2] = 0x1; + } + if (nlp->nlp_type & NLP_IP_NODE) { + hp->PortActiveFc4Types.bits[3] = 0x20; + } + hp->PortActiveFc4Types.bits[7] = 0x1; + + hp->PortType = HBA_PORTTYPE_UNKNOWN; + if (binfo->fc_topology == TOPOLOGY_LOOP) { + if(binfo->fc_flag & FC_PUBLIC_LOOP) { + /* Check if Fabric port */ + if (fc_geportname(&nlp->nlp_nodename, (NAME_TYPE *)&(binfo->fc_fabparam.nodeName)) == 2) { + hp->PortType = HBA_PORTTYPE_FLPORT; + } + else { + /* Based on DID */ + if((nlp->nlp_DID & 0xff) == 0) { + hp->PortType = HBA_PORTTYPE_NPORT; + } + else { + if((nlp->nlp_DID & 0xff0000) != 0xff0000) { + hp->PortType = HBA_PORTTYPE_NLPORT; + } + } + } + } + else { + hp->PortType = HBA_PORTTYPE_LPORT; + } + } + else { + if(binfo->fc_flag & FC_FABRIC) { + /* Check if Fabric port */ + if (fc_geportname(&nlp->nlp_nodename, (NAME_TYPE *)&(binfo->fc_fabparam.nodeName)) == 2) { + hp->PortType = HBA_PORTTYPE_FPORT; + } + else { + /* Based on DID */ + if((nlp->nlp_DID & 0xff) == 0) { + hp->PortType = HBA_PORTTYPE_NPORT; + } + else { + if((nlp->nlp_DID & 0xff0000) != 0xff0000) { + hp->PortType = HBA_PORTTYPE_NLPORT; + } + } + } + } + else { + hp->PortType = HBA_PORTTYPE_PTP; + } + } + + /* for mapped devices OSDeviceName is device info filled into HBA_OSDN structure */ + if(nlp->nlp_flag & NLP_MAPPED) { + osdn = (HBA_OSDN *)&hp->OSDeviceName[0]; + fc_bcopy("lpfc", osdn->drvname, 4); + osdn->instance = fc_brd_to_inst(binfo->fc_brd_no); + osdn->target = FC_SCSID(nlp->id.nlp_pan, nlp->id.nlp_sid); + osdn->lun = (HBA_UINT32)(-1); + } + + } + break; + + case C_HBA_INDEXPORTATRIBUTES: + { + uint32 refresh; + + vp = &VPD; + hp = (HBA_PORTATTRIBUTES *)dm->fc_dataout; + fc_bzero(dm->fc_dataout, (sizeof(HBA_PORTATTRIBUTES))); + offset = (uint32)((ulong)cip->c_arg2); + refresh = (uint32)((ulong)cip->c_arg3); + if(refresh != binfo->nlptimer) { + hp->PortFcId = 0xffffffff; + break; + } + cnt = 0; + /* Mapped NPorts only */ + nlp = binfo->fc_nlpmap_start; + while(nlp != (NODELIST *)&binfo->fc_nlpmap_start) { + if(cnt == offset) + goto foundit; + cnt++; + nlp = (NODELIST *)nlp->nlp_listp_next; + } + rc = ERANGE; + } + break; + + case C_HBA_SETMGMTINFO: + { + HBA_MGMTINFO *mgmtinfo; + + mgmtinfo = (HBA_MGMTINFO *)dfc.dfc_buffer; + + dfc_unlock_enable(ipri, &CMD_LOCK); + if (fc_copyin((uchar *)cip->c_arg1, (uchar *)mgmtinfo, sizeof(HBA_MGMTINFO))) { + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + rc = EIO; + break; + } + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + + binfo->ipVersion = mgmtinfo->IPVersion; + binfo->UDPport = mgmtinfo->UDPPort; + if(binfo->ipVersion == RNID_IPV4) { + fc_bcopy((uchar *)&mgmtinfo->IPAddress[0], + (uchar * )&binfo->ipAddr[0], 4); + } + else { + fc_bcopy((uchar *)&mgmtinfo->IPAddress[0], + (uchar * )&binfo->ipAddr[0], 16); + } + } + break; + + case C_HBA_GETMGMTINFO: + { + HBA_MGMTINFO *mgmtinfo; + + mgmtinfo = (HBA_MGMTINFO *)dm->fc_dataout; + fc_bcopy((uchar * )&binfo->fc_nodename, (uchar *)&mgmtinfo->wwn, 8); + mgmtinfo->unittype = RNID_HBA; + mgmtinfo->PortId = binfo->fc_myDID; + mgmtinfo->NumberOfAttachedNodes = 0; + mgmtinfo->TopologyDiscoveryFlags = 0; + mgmtinfo->IPVersion = binfo->ipVersion; + mgmtinfo->UDPPort = binfo->UDPport; + if(binfo->ipVersion == RNID_IPV4) { + fc_bcopy((void *) & binfo->ipAddr[0], + (void *) & mgmtinfo->IPAddress[0], 4); + } + else { + fc_bcopy((void *) & binfo->ipAddr[0], + (void *) & mgmtinfo->IPAddress[0], 16); + } + } + break; + + case C_HBA_REFRESHINFO: + { + lptr = (uint32 *)dm->fc_dataout; + *lptr = binfo->nlptimer; + } + break; + + case C_HBA_RNID: + ipri = dfc_hba_rnid( p_dev_ctl, dm, cip, infop, buf_info, ipri); + break; + + case C_HBA_GETEVENT: + { + HBA_UINT32 outsize; + HBAEVENT *rec; + HBAEVENT *recout; + + size = (uint32)((ulong)cip->c_arg1); /* size is number of event entries */ + + recout = (HBAEVENT * )dm->fc_dataout; + for(j=0;jhba_event_get == p_dev_ctl->hba_event_put)) + break; + rec = &p_dev_ctl->hbaevent[p_dev_ctl->hba_event_get]; + fc_bcopy((uchar * )rec, (uchar *)recout, sizeof(HBAEVENT)); + recout++; + p_dev_ctl->hba_event_get++; + if(p_dev_ctl->hba_event_get >= MAX_HBAEVENT) { + p_dev_ctl->hba_event_get = 0; + } + } + outsize = j; + + /* copy back size of response */ + dfc_unlock_enable(ipri, &CMD_LOCK); + if (fc_copyout((uchar *)&outsize, (uchar *)cip->c_arg2, sizeof(HBA_UINT32))) { + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + rc = EIO; + break; + } + /* copy back number of missed records */ + if (fc_copyout((uchar *)&p_dev_ctl->hba_event_missed, (uchar *)cip->c_arg3, sizeof(HBA_UINT32))) { + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + rc = EIO; + break; + } + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + + p_dev_ctl->hba_event_missed = 0; + infop->c_outsz = (uint32)(outsize * sizeof(HBA_EVENTINFO)); + } + + break; + + case C_HBA_FCPTARGETMAPPING: + ipri = dfc_hba_targetmapping(p_dev_ctl, dm, cip, infop, ipri); + break; + + case C_HBA_FCPBINDING: + ipri = dfc_hba_fcpbind(p_dev_ctl, dm, cip, infop, ipri); + break; + + case C_GETCFG: + { + CfgParam * cp; + iCfgParam * icp; + + /* First uint32 word will be count */ + cp = (CfgParam *)dm->fc_dataout; + cnt = 0; + for (i = 0; i < NUM_CFG_PARAM; i++) { + icp = &clp[i]; + cp->a_low = icp->a_low; + cp->a_hi = icp->a_hi; + cp->a_flag = icp->a_flag; + cp->a_default = icp->a_default; + cp->a_current = icp->a_current; + cp->a_changestate = icp->a_changestate; + fc_bcopy(icp->a_string, cp->a_string, 32); + fc_bcopy(icp->a_help, cp->a_help, 80); + cp++; + cnt++; + } + if(cnt) { + infop->c_outsz = (uint32)(cnt * sizeof(CfgParam)); + } + } + break; + + case C_SETCFG: + { + RING * rp; + iCfgParam * icp; + + offset = (uint32)((ulong)cip->c_arg1); + cnt = (uint32)((ulong)cip->c_arg2); + if (offset >= NUM_CFG_PARAM) { + rc = ERANGE; + break; + } + icp = &clp[offset]; + if(icp->a_changestate != CFG_DYNAMIC) { + rc = EPERM; + break; + } + if (((icp->a_low != 0) && (cnt < icp->a_low)) || (cnt > icp->a_hi)) { + rc = ERANGE; + break; + } + switch(offset) { + case CFG_FCP_CLASS: + switch (cnt) { + case 1: + clp[CFG_FCP_CLASS].a_current = CLASS1; + break; + case 2: + clp[CFG_FCP_CLASS].a_current = CLASS2; + break; + case 3: + clp[CFG_FCP_CLASS].a_current = CLASS3; + break; + } + icp->a_current = cnt; + break; + + case CFG_IP_CLASS: + switch (cnt) { + case 1: + clp[CFG_IP_CLASS].a_current = CLASS1; + break; + case 2: + clp[CFG_IP_CLASS].a_current = CLASS2; + break; + case 3: + clp[CFG_IP_CLASS].a_current = CLASS3; + break; + } + icp->a_current = cnt; + break; + + case CFG_LINKDOWN_TMO: + icp->a_current = cnt; + rp = &binfo->fc_ring[FC_FCP_RING]; + if(clp[CFG_LINKDOWN_TMO].a_current) { + rp->fc_ringtmo = clp[CFG_LINKDOWN_TMO].a_current; + } + break; + + default: + icp->a_current = cnt; + } + } + break; + + case C_GET_EVENT: + { + fcEvent *ep; + fcEvent *oep; + fcEvent_header *ehp; + uchar *cp; + MATCHMAP *omm; + int no_more; + + no_more = 1; + + offset = ((uint32)((ulong)cip->c_arg3) & FC_REG_EVENT_MASK); /* event mask */ + incr = (uint32)cip->c_flag; /* event id */ + size = (uint32)cip->c_iocb; /* process requesting event */ + ehp = (fcEvent_header *)p_dev_ctl->fc_evt_head; + while (ehp) { + if (ehp->e_mask == offset) + break; + ehp = (fcEvent_header *)ehp->e_next_header; + } + + if (!ehp) { + rc = ENOENT; + break; + } + + ep = ehp->e_head; + oep = 0; + while(ep) { + /* Find an event that matches the event mask */ + if(ep->evt_sleep == 0) { + /* dequeue event from event list */ + if(oep == 0) { + ehp->e_head = ep->evt_next; + } else { + oep->evt_next = ep->evt_next; + } + if(ehp->e_tail == ep) + ehp->e_tail = oep; + + switch(offset) { + case FC_REG_LINK_EVENT: + break; + case FC_REG_RSCN_EVENT: + /* Return data length */ + cnt = sizeof(uint32); + + dfc_unlock_enable(ipri, &CMD_LOCK); + if (fc_copyout((uchar *)&cnt, (uchar *)cip->c_arg1, sizeof(uint32))) { + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + rc = EIO; + } + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + + fc_bcopy((char *)&ep->evt_data0, dm->fc_dataout, cnt); + infop->c_outsz = (uint32)cnt; + break; + case FC_REG_CT_EVENT: + /* Return data length */ + cnt = (ulong)(ep->evt_data2); + + dfc_unlock_enable(ipri, &CMD_LOCK); + if (fc_copyout((uchar *)&cnt, (uchar *)cip->c_arg1, sizeof(uint32))) { + rc = EIO; + } + else { + if (fc_copyout((uchar *)&ep->evt_data0, (uchar *)cip->c_arg2, + sizeof(uint32))) { + rc = EIO; + } + } + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + + infop->c_outsz = (uint32)cnt; + i = cnt; + mm = (MATCHMAP * )ep->evt_data1; + cp = (uchar *)dm->fc_dataout; + while(mm) { + + if(cnt > FCELSSIZE) + i = FCELSSIZE; + else + i = cnt; + + if(total_mem > 0) { + fc_bcopy((char *)mm->virt, cp, i); + total_mem -= i; + } + + omm = mm; + mm = (MATCHMAP *)mm->fc_mptr; + cp += i; + fc_mem_put(binfo, MEM_BUF, (uchar * )omm); + } + break; + } + + if((offset == FC_REG_CT_EVENT) && (ep->evt_next) && + (((fcEvent *)(ep->evt_next))->evt_sleep == 0)) { + ep->evt_data0 |= 0x80000000; /* More event is waiting */ + if (fc_copyout((uchar *)&ep->evt_data0, (uchar *)cip->c_arg2, + sizeof(uint32))) { + rc = EIO; + } + no_more = 0; + } + + /* Requeue event entry */ + ep->evt_next = 0; + ep->evt_data0 = 0; + ep->evt_data1 = 0; + ep->evt_data2 = 0; + ep->evt_sleep = 1; + ep->evt_flags = 0; + + if(ehp->e_head == 0) { + ehp->e_head = ep; + ehp->e_tail = ep; + } + else { + ehp->e_tail->evt_next = ep; + ehp->e_tail = ep; + } + + if(offset == FC_REG_LINK_EVENT) { + ehp->e_flag &= ~E_GET_EVENT_ACTIVE; + goto linfo; + } + + dfc_unlock_enable(ipri, &CMD_LOCK); + if (fc_copyout((char *)dm->fc_dataout, infop->c_dataout, (int)infop->c_outsz)) { + rc = EIO; + } + dfc_data_free(p_dev_ctl, dm); + + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + if (no_more) + ehp->e_flag &= ~E_GET_EVENT_ACTIVE; + di->fc_refcnt--; + dfc_unlock_enable(ipri, &CMD_LOCK); + + return (rc); + } + oep = ep; + ep = ep->evt_next; + } + if(ep == 0) { + /* No event found */ + rc = ENOENT; + } + } + break; + + case C_SET_EVENT: + ipri = dfc_hba_set_event(p_dev_ctl, dm, cip, infop, ipri); + break; + + default: + rc = EINVAL; + break; + } + +out: + /* dfc_ioctl exit */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0401, /* ptr to msg structure */ + fc_mes0401, /* ptr to msg */ + fc_msgBlk0401.msgPreambleStr, /* begin varargs */ + rc, + infop->c_outsz, + (uint32)((ulong)infop->c_dataout)); /* end varargs */ + + di->fc_refcnt--; + dfc_unlock_enable(ipri, &CMD_LOCK); + + /* Copy data to user space config method */ + if ((rc == 0) || (do_cp == 1)) { + if (infop->c_outsz) { + if (fc_copyout((char *)dm->fc_dataout, infop->c_dataout, (int)infop->c_outsz)) { + rc = EIO; + } + } + } + + /* Now free the space for these structures */ + dmdata_info->virt = (struct dfc_mem *)dm; + dmdata_info->phys = 0; + dmdata_info->flags = (FC_MBUF_IOCTL | FC_MBUF_UNLOCK); + dmdata_info->size = sizeof(* dm); + dmdata_info->dma_handle = 0; + dmdata_info->data_handle = 0; + fc_free(p_dev_ctl, dmdata_info); + + mbox_info->virt = (char *)mbox; + mbox_info->phys = 0; + mbox_info->flags = (FC_MBUF_IOCTL | FC_MBUF_UNLOCK); + mbox_info->size = sizeof(* mbox); + mbox_info->dma_handle = 0; + mbox_info->data_handle = 0; + fc_free(p_dev_ctl, mbox_info); + + dfc_data_free(p_dev_ctl, dm); + return (rc); +} + + +uint32 +dfc_getLunId( +node_t *nodep, uint32 lunIndex) +{ + static uint32 lun; + static int i; + static dvi_t *dev_ptr; + static FCP_CMND *tmp; + + tmp = (FCP_CMND *)nodep->virtRptLunData; + + if(tmp == 0) { + dev_ptr = nodep->lunlist; + lun = dev_ptr->lun_id; + } else { + i = (lunIndex + 1) * 8; + tmp = (FCP_CMND *)(((uchar *)nodep->virtRptLunData) + i); + lun = ((tmp->fcpLunMsl >> FC_LUN_SHIFT) & 0xff); + } + return lun; +} + +_static_ int +dfc_bcopy( +uint32 *lsrc, +uint32 *ldest, +int cnt, +int incr) +{ + static ushort * ssrc; + static ushort * sdest; + static uchar * csrc; + static uchar * cdest; + static int i; + + csrc = (uchar * )lsrc; + cdest = (uchar * )ldest; + ssrc = (ushort * )lsrc; + sdest = (ushort * )ldest; + + for (i = 0; i < cnt; i += incr) { + if (incr == sizeof(char)) { + *cdest++ = *csrc++; + } else if (incr == sizeof(short)) { + *sdest++ = *ssrc++; + } else { + *ldest++ = *lsrc++; + } + } + return(0); +} + + +_static_ fc_dev_ctl_t * +dfc_getpdev( +struct cmd_input *ci) +{ + static fc_dev_ctl_t * p_dev_ctl;/* pointer to dev_ctl area */ + static FC_BRD_INFO * binfo; + + p_dev_ctl = DD_CTL.p_dev[ci->c_brd]; + binfo = &BINFO; + + if (p_dev_ctl == 0) { + return(0); + } + + /* Make sure command specified ring is within range */ + if (ci->c_ring >= binfo->fc_ffnumrings) { + return(0); + } + + return(p_dev_ctl); +} + + +_static_ int +fc_inst_to_brd( +int ddiinst) +{ + int i; + + for (i = 0; i < fcinstcnt; i++) + if (fcinstance[i] == ddiinst) + return(i); + + return(MAX_FC_BRDS); +} + + +_static_ int +dfc_msdelay( +fc_dev_ctl_t * p_dev_ctl, +ulong ms) +{ + DELAYMSctx(ms); + return(0); +} + +_local_ int +dfc_issue_mbox( +fc_dev_ctl_t * p_dev_ctl, +MAILBOX * mb, +ulong * ipri) +{ + static int j; + static MAILBOX * mbslim; + static FC_BRD_INFO * binfo; + static iCfgParam * clp; + struct dfc_info * di; + static volatile uint32 word0, ldata; + static uint32 ha_copy; + static void * ioa; + + binfo = &BINFO; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + di = &dfc.dfc_info[binfo->fc_brd_no]; + if (binfo->fc_ffstate == FC_ERROR) { + mb->mbxStatus = MBXERR_ERROR; + return(1); + } + j = 0; + while((binfo->fc_mbox_active) || (di->fc_flag & DFC_MBOX_ACTIVE)) { + dfc_unlock_enable(*ipri, &CMD_LOCK); + + if (j < 10) { + dfc_msdelay(p_dev_ctl, 1); + } else { + dfc_msdelay(p_dev_ctl, 50); + } + + *ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + if (j++ >= 600) { + mb->mbxStatus = MBXERR_ERROR; + return(1); + } + } + binfo->fc_mbox_active = 2; + di->fc_flag |= DFC_MBOX_ACTIVE; + +retrycmd: + /* next set own bit for the adapter and copy over command word */ + mb->mbxOwner = OWN_CHIP; + + if ((binfo->fc_flag & FC_SLI2) && (!(binfo->fc_flag & FC_OFFLINE_MODE))) { + /* First copy command data */ + mbslim = FC_SLI2_MAILBOX(binfo); + fc_pcimem_bcopy((uint32 * )mb, (uint32 * )mbslim, + (sizeof(uint32) * (MAILBOX_CMD_WSIZE))); + } else { + /* First copy command data */ + ioa = (void *)FC_MAP_MEM(&di->fc_iomap_mem); /* map in SLIM */ + mbslim = FC_MAILBOX(binfo, ioa); + WRITE_SLIM_COPY(binfo, &mb->un.varWords, &mbslim->un.varWords, + (MAILBOX_CMD_WSIZE - 1)); + + /* copy over last word, with mbxOwner set */ + ldata = *((volatile uint32 * )mb); + + + WRITE_SLIM_ADDR(binfo, ((volatile uint32 * )mbslim), ldata); + FC_UNMAP_MEMIO(ioa); + } + + fc_bcopy((char *)(mb), (char *)&p_dev_ctl->dfcmb[0], + (sizeof(uint32) * (MAILBOX_CMD_WSIZE))); + + /* interrupt board to doit right away */ + ioa = (void *)FC_MAP_IO(&di->fc_iomap_io); /* map in io registers */ + WRITE_CSR_REG(binfo, FC_FF_REG(binfo, ioa), CA_MBATT); + FC_UNMAP_MEMIO(ioa); + + FCSTATCTR.issueMboxCmd++; + + if ((binfo->fc_flag & FC_SLI2) && (!(binfo->fc_flag & FC_OFFLINE_MODE))) { + /* First copy command data */ + word0 = p_dev_ctl->dfcmb[0]; + } else { + /* First copy command data */ + ioa = (void *)FC_MAP_MEM(&di->fc_iomap_mem); /* map in SLIM */ + mbslim = FC_MAILBOX(binfo, ioa); + word0 = READ_SLIM_ADDR(binfo, ((volatile uint32 * )mbslim)); + FC_UNMAP_MEMIO(ioa); + } + + ioa = (void *)FC_MAP_IO(&di->fc_iomap_io); /* map in io registers */ + ha_copy = READ_CSR_REG(binfo, FC_HA_REG(binfo, ioa)); + FC_UNMAP_MEMIO(ioa); + + /* Wait for command to complete */ + while (((word0 & OWN_CHIP) == OWN_CHIP) || !(ha_copy & HA_MBATT)) { + dfc_unlock_enable(*ipri, &CMD_LOCK); + + if ((j < 20) && (mb->mbxCommand != MBX_INIT_LINK)) { + dfc_msdelay(p_dev_ctl, 1); + } else { + dfc_msdelay(p_dev_ctl, 50); + } + + *ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + if (j++ >= 600) { + mb->mbxStatus = MBXERR_ERROR; + binfo->fc_mbox_active = 0; + di->fc_flag &= ~DFC_MBOX_ACTIVE; + return(1); + } + + if ((binfo->fc_flag & FC_SLI2) && (!(binfo->fc_flag & FC_OFFLINE_MODE))) { + /* First copy command data */ + word0 = p_dev_ctl->dfcmb[0]; + } else { + /* First copy command data */ + ioa = (void *)FC_MAP_MEM(&di->fc_iomap_mem); /* map in SLIM */ + mbslim = FC_MAILBOX(binfo, ioa); + word0 = READ_SLIM_ADDR(binfo, ((volatile uint32 * )mbslim)); + FC_UNMAP_MEMIO(ioa); + } + ha_copy = HA_MBATT; + } + + mbslim = (MAILBOX * ) & word0; + if (mbslim->mbxCommand != mb->mbxCommand) { + j++; + if(mb->mbxCommand == MBX_INIT_LINK) { + /* Do not retry init_link's */ + mb->mbxStatus = 0; + binfo->fc_mbox_active = 0; + di->fc_flag &= ~DFC_MBOX_ACTIVE; + return(1); + } + goto retrycmd; + } + + /* copy results back to user */ + if ((binfo->fc_flag & FC_SLI2) && (!(binfo->fc_flag & FC_OFFLINE_MODE))) { + /* First copy command data */ + fc_bcopy((char *)&p_dev_ctl->dfcmb[0], (char *)mb, + (sizeof(uint32) * (MAILBOX_CMD_WSIZE))); + } else { + /* First copy command data */ + ioa = (void *)FC_MAP_MEM(&di->fc_iomap_mem); /* map in SLIM */ + mbslim = FC_MAILBOX(binfo, ioa); + /* copy results back to user */ + READ_SLIM_COPY(binfo, (uint32 * )mb, (uint32 * )mbslim, + MAILBOX_CMD_WSIZE); + FC_UNMAP_MEMIO(ioa); + } + + ioa = (void *)FC_MAP_IO(&di->fc_iomap_io); /* map in io registers */ + WRITE_CSR_REG(binfo, FC_HA_REG(binfo, ioa), HA_MBATT); + FC_UNMAP_MEMIO(ioa); + + + binfo->fc_mbox_active = 0; + di->fc_flag &= ~DFC_MBOX_ACTIVE; + + return(0); +} + +int +dfc_put_event( +fc_dev_ctl_t * p_dev_ctl, +uint32 evcode, +uint32 evdata0, +void * evdata1, +void * evdata2) +{ + static fcEvent *ep; + static fcEvent *oep; + static fcEvent_header *ehp = NULL; + static int found; + static MATCHMAP *mp; + static uint32 fstype; + static SLI_CT_REQUEST * ctp; + + ehp = (fcEvent_header *)p_dev_ctl->fc_evt_head; + + while (ehp) { + if (ehp->e_mask == evcode) + break; + ehp = (fcEvent_header *)ehp->e_next_header; + } + + if (!ehp) { + return (0); + } + + ep = ehp->e_head; + oep = 0; + found = 0; + + while(ep && (!found)) { + if(ep->evt_sleep) { + switch(evcode) { + case FC_REG_CT_EVENT: + mp = (MATCHMAP *)evdata1; + ctp = (SLI_CT_REQUEST *)mp->virt; + fstype = (uint32)(ctp->FsType); + if((ep->evt_type == FC_FSTYPE_ALL) || + (ep->evt_type == fstype)) { + found++; + ep->evt_data0 = evdata0; /* tag */ + ep->evt_data1 = evdata1; /* buffer ptr */ + ep->evt_data2 = evdata2; /* count */ + ep->evt_sleep = 0; + if ((ehp->e_mode & E_SLEEPING_MODE) && !(ehp->e_flag & E_GET_EVENT_ACTIVE)) { + ehp->e_flag |= E_GET_EVENT_ACTIVE; + dfc_wakeup(p_dev_ctl, ehp); + } + + } + break; + default: + found++; + ep->evt_data0 = evdata0; + ep->evt_data1 = evdata1; + ep->evt_data2 = evdata2; + ep->evt_sleep = 0; + if ((ehp->e_mode & E_SLEEPING_MODE) && !(ehp->e_flag & E_GET_EVENT_ACTIVE)) { + ehp->e_flag |= E_GET_EVENT_ACTIVE; + dfc_wakeup(p_dev_ctl, ehp); + } + break; + } + } + oep = ep; + ep = ep->evt_next; + } + return(found); +} + +int +dfc_wakeupall( +fc_dev_ctl_t * p_dev_ctl, +int flag) +{ + static fcEvent *ep; + static fcEvent *oep; + static fcEvent_header *ehp = NULL; + static int found; + + ehp = (fcEvent_header *)p_dev_ctl->fc_evt_head; + found = 0; + + while (ehp) { + ep = ehp->e_head; + oep = 0; + while(ep) { + ep->evt_sleep = 0; + if(flag) { + dfc_wakeup(p_dev_ctl, ehp); + } + else if (!(ehp->e_flag & E_GET_EVENT_ACTIVE)) { + found++; + ehp->e_flag |= E_GET_EVENT_ACTIVE; + dfc_wakeup(p_dev_ctl, ehp); + } + oep = ep; + ep = ep->evt_next; + } + ehp = (fcEvent_header *)ehp->e_next_header; + } + return(found); +} + +int +dfc_hba_put_event( +fc_dev_ctl_t * p_dev_ctl, +uint32 evcode, +uint32 evdata1, +uint32 evdata2, +uint32 evdata3, +uint32 evdata4) +{ + static HBAEVENT *rec; + static FC_BRD_INFO * binfo; + + binfo = &BINFO; + rec = &p_dev_ctl->hbaevent[p_dev_ctl->hba_event_put]; + rec->fc_eventcode = evcode; + + rec->fc_evdata1 = evdata1; + rec->fc_evdata2 = evdata2; + rec->fc_evdata3 = evdata3; + rec->fc_evdata4 = evdata4; + p_dev_ctl->hba_event_put++; + if(p_dev_ctl->hba_event_put >= MAX_HBAEVENT) { + p_dev_ctl->hba_event_put = 0; + } + if(p_dev_ctl->hba_event_put == p_dev_ctl->hba_event_get) { + p_dev_ctl->hba_event_missed++; + p_dev_ctl->hba_event_get++; + if(p_dev_ctl->hba_event_get >= MAX_HBAEVENT) { + p_dev_ctl->hba_event_get = 0; + } + } + + return(0); +} /* End dfc_hba_put_event */ + +int +dfc_hba_set_event( +fc_dev_ctl_t * p_dev_ctl, +struct dfc_mem *dm, +struct cmd_input *cip, +struct dfccmdinfo *infop, +ulong ipri) +{ + static fcEvent *evp; + static fcEvent *ep; + static fcEvent *oep; + static fcEvent_header *ehp; + static fcEvent_header *oehp; + static int found; + static MBUF_INFO * buf_info; + static MBUF_INFO bufinfo; + static uint32 offset; + static uint32 incr; + + offset = ((uint32)((ulong)cip->c_arg3) & FC_REG_EVENT_MASK); + incr = (uint32)cip->c_flag; + + switch(offset) { + case FC_REG_CT_EVENT: + found = fc_out_event; + break; + case FC_REG_RSCN_EVENT: + found = fc_out_event; + break; + case FC_REG_LINK_EVENT: + found = 2; + break; + default: + found = 0; + rc = EINTR; + return(ipri); + } + + oehp = 0; + ehp = (fcEvent_header *)p_dev_ctl->fc_evt_head; + while (ehp) { + if (ehp->e_mask == offset) { + found = 0; + break; + } + oehp = ehp; + ehp = (fcEvent_header *)ehp->e_next_header; + } + + if (!ehp) { + buf_info = &bufinfo; + buf_info->virt = 0; + buf_info->phys = 0; + buf_info->flags = (FC_MBUF_IOCTL | FC_MBUF_UNLOCK); + buf_info->align = sizeof(void *); + buf_info->size = sizeof(fcEvent_header); + buf_info->dma_handle = 0; + + dfc_unlock_enable(ipri, &CMD_LOCK); + fc_malloc(p_dev_ctl, buf_info); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + if (buf_info->virt == NULL) { + rc = EINTR; + return(ipri); + } + ehp = (fcEvent_header *)(buf_info->virt); + fc_bzero((char *)ehp, sizeof(fcEvent_header)); + if(p_dev_ctl->fc_evt_head == 0) { + p_dev_ctl->fc_evt_head = ehp; + p_dev_ctl->fc_evt_tail = ehp; + } else { + ((fcEvent_header *)(p_dev_ctl->fc_evt_tail))->e_next_header = ehp; + p_dev_ctl->fc_evt_tail = (void *)ehp; + } + ehp->e_handle = incr; + ehp->e_mask = offset; + + } + + while(found) { + /* Save event id for C_GET_EVENT */ + buf_info = &bufinfo; + buf_info->virt = 0; + buf_info->phys = 0; + buf_info->flags = (FC_MBUF_IOCTL | FC_MBUF_UNLOCK); + buf_info->align = sizeof(void *); + buf_info->size = sizeof(fcEvent); + buf_info->dma_handle = 0; + + dfc_unlock_enable(ipri, &CMD_LOCK); + fc_malloc(p_dev_ctl, buf_info); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + if (buf_info->virt == NULL) { + rc = EINTR; + break; + } + oep = (fcEvent *)(buf_info->virt); + fc_bzero((char *)oep, sizeof(fcEvent)); + + oep->evt_sleep = 1; + oep->evt_handle = incr; + oep->evt_mask = offset; + switch(offset) { + case FC_REG_CT_EVENT: + oep->evt_type = (uint32)((ulong)cip->c_arg2); /* fstype for CT */ + break; + default: + oep->evt_type = 0; + } + + if(ehp->e_head == 0) { + ehp->e_head = oep; + ehp->e_tail = oep; + } else { + ehp->e_tail->evt_next = (void *)oep; + ehp->e_tail = oep; + } + oep->evt_next = 0; + found--; + } + + switch(offset) { + case FC_REG_CT_EVENT: + case FC_REG_RSCN_EVENT: + case FC_REG_LINK_EVENT: + if(dfc_sleep(p_dev_ctl, ehp)) { + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + rc = EINTR; + /* Remove all eventIds from queue */ + ep = ehp->e_head; + oep = 0; + found = 0; + while(ep) { + if(ep->evt_handle == incr) { + /* dequeue event from event list */ + if(oep == 0) { + ehp->e_head = ep->evt_next; + } + else { + oep->evt_next = ep->evt_next; + } + if(ehp->e_tail == ep) + ehp->e_tail = oep; + evp = ep; + ep = ep->evt_next; + dfc_unlock_enable(ipri, &CMD_LOCK); + buf_info = &bufinfo; + buf_info->virt = (uchar *)evp; + buf_info->size = sizeof(fcEvent); + buf_info->phys = 0; + buf_info->flags = (FC_MBUF_IOCTL | FC_MBUF_UNLOCK); + buf_info->align = 0; + buf_info->dma_handle = 0; + fc_free(p_dev_ctl, buf_info); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + } else { + oep = ep; + ep = ep->evt_next; + } + } + if (ehp->e_head == 0) { + + if (oehp == 0) { + p_dev_ctl->fc_evt_head = ehp->e_next_header; + } else { + oehp->e_next_header = ehp->e_next_header; + } + if (p_dev_ctl->fc_evt_tail == ehp) + p_dev_ctl->fc_evt_tail = oehp; + + dfc_unlock_enable(ipri, &CMD_LOCK); + buf_info = &bufinfo; + buf_info->virt = (uchar *)ehp; + buf_info->size = sizeof(fcEvent_header); + buf_info->phys = 0; + buf_info->flags = (FC_MBUF_IOCTL | FC_MBUF_UNLOCK); + buf_info->align = 0; + buf_info->dma_handle = 0; + fc_free(p_dev_ctl, buf_info); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + } + return(ipri); + } + return(ipri); + } + return(ipri); +} + +int +dfc_hba_sendscsi_fcp( +fc_dev_ctl_t * p_dev_ctl, +struct dfc_mem *dm, +struct cmd_input *cip, +struct dfccmdinfo *infop, +ulong ipri) +{ + static HBA_WWN findwwn; + static DMATCHMAP * fcpmp; + static RING * rp; + static fc_buf_t * fcptr; + static FCP_CMND * fcpCmnd; + static FCP_RSP * fcpRsp; + static ULP_BDE64 * bpl; + static MATCHMAP * bmp; + static DMATCHMAP * outdmp; + static MBUF_INFO * buf_info; + static MBUF_INFO bufinfo; + static uint32 buf1sz; + static uint32 buf2sz; + static uint32 j; + static uint32 * lptr; + static char * bp; + static uint32 max; + static struct { + uint32 rspcnt; + uint32 snscnt; + } count; + static struct dev_info *dev_info; + static FC_BRD_INFO * binfo; + + binfo = &BINFO; + lptr = (uint32 *)&cip->c_string[0]; + buf1sz = *lptr++; /* Request data size */ + buf2sz = *lptr; /* Sns / rsp buffer size */ + if((buf1sz + infop->c_outsz) > (80 * 4096)) { + rc = ERANGE; + return(ipri); + } + + dfc_unlock_enable(ipri, &CMD_LOCK); + if (fc_copyin((uchar *)cip->c_arg3, (uchar *)&findwwn, (ulong)(sizeof(HBA_WWN)))) { + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + rc = EIO; + return(ipri); + } + + buf_info = &bufinfo; + buf_info->virt = 0; + buf_info->phys = 0; + buf_info->flags = (FC_MBUF_IOCTL | FC_MBUF_UNLOCK); + buf_info->align = sizeof(void *); + buf_info->size = sizeof(struct dev_info); + buf_info->dma_handle = 0; + + fc_malloc(p_dev_ctl, buf_info); + if (buf_info->virt == NULL) { + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + rc = ENOMEM; + return(ipri); + } + dev_info = (struct dev_info *)buf_info->virt; + + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + /* Allocate buffer for Buffer ptr list */ + if ((bmp = (MATCHMAP * )fc_mem_get(binfo, MEM_BPL)) == 0) { + rc = ENOMEM; + goto ssout3; + } + bpl = (ULP_BDE64 * )bmp->virt; + dfc_unlock_enable(ipri, &CMD_LOCK); + + if((fcpmp = dfc_fcp_data_alloc(p_dev_ctl, bpl)) == 0) { + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + fc_mem_put(binfo, MEM_BUF, (uchar * )bmp); + rc = ENOMEM; + goto ssout3; + } + bpl += 2; /* Cmnd and Rsp ptrs */ + fcpCmnd = (FCP_CMND *)fcpmp->dfc.virt; + fcpRsp = (FCP_RSP *)((uchar *)fcpCmnd + sizeof(FCP_CMND)); + +{ +lptr = (uint32 *)bmp->virt; +} + if (fc_copyin((uchar *)cip->c_arg1, (uchar *)fcpCmnd, (ulong)(buf1sz))) { + dfc_fcp_data_free(p_dev_ctl, fcpmp); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + fc_mem_put(binfo, MEM_BUF, (uchar * )bmp); + rc = ENOMEM; + goto ssout3; + } +{ +lptr = (uint32 *)fcpCmnd; +} + fc_mpdata_sync(fcpmp->dfc.dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV); + + if(fcpCmnd->fcpCntl3 == WRITE_DATA) { + bp = (uchar *)infop->c_dataout; + } + else { + bp = 0; + } + + if(infop->c_outsz == 0) + outdmp = dfc_cmd_data_alloc(p_dev_ctl, bp, bpl, 512); + else + outdmp = dfc_cmd_data_alloc(p_dev_ctl, bp, bpl, infop->c_outsz); + + if(!(outdmp)) { + dfc_fcp_data_free(p_dev_ctl, fcpmp); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + fc_mem_put(binfo, MEM_BUF, (uchar * )bmp); + rc = ENOMEM; + goto ssout3; + } + + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + + max = 0; +redoss: +{ +lptr = (uint32 *)bmp->virt; +} + + if((rc=fc_snd_scsi_req(p_dev_ctl, (NAME_TYPE *)&findwwn, bmp, fcpmp, outdmp, infop->c_outsz, dev_info))) + { + if((rc == ENODEV) && (max < 4)) { + max++; + dfc_unlock_enable(ipri, &CMD_LOCK); + dfc_msdelay(p_dev_ctl, 500); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + goto redoss; + } + if(rc == ENODEV) + rc = EACCES; + dfc_unlock_enable(ipri, &CMD_LOCK); + dfc_cmd_data_free(p_dev_ctl, outdmp); + dfc_fcp_data_free(p_dev_ctl, fcpmp); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + fc_mem_put(binfo, MEM_BUF, (uchar * )bmp); + rc = ENOMEM; + goto ssout3; + } + + rp = &binfo->fc_ring[FC_FCP_RING]; + fcptr = (fc_buf_t *)fcpmp->dfc.virt; + + j = 0; + dfc_unlock_enable(ipri, &CMD_LOCK); + dfc_msdelay(p_dev_ctl, 1); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + + /* Wait for FCP I/O to complete or timeout */ + while(dev_info->queue_state == ACTIVE_PASSTHRU) { + dfc_unlock_enable(ipri, &CMD_LOCK); + dfc_msdelay(p_dev_ctl, 50); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + if(j >= 600) { + break; + } + j++; + } + + /* Check for timeout conditions */ + if(dev_info->queue_state == ACTIVE_PASSTHRU) { + /* Free resources */ + fc_deq_fcbuf_active(rp, fcptr->iotag); + rc = ETIMEDOUT; + dfc_unlock_enable(ipri, &CMD_LOCK); + dfc_cmd_data_free(p_dev_ctl, outdmp); + dfc_fcp_data_free(p_dev_ctl, fcpmp); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + goto ssout3; + } + if ((infop->c_cmd == C_HBA_SEND_FCP) && + (dev_info->ioctl_event != IOSTAT_LOCAL_REJECT)) { + if(buf2sz < sizeof(FCP_RSP)) + count.snscnt = buf2sz; + else + count.snscnt = sizeof(FCP_RSP); +{ +lptr = (uint32 *)fcpRsp; +} + dfc_unlock_enable(ipri, &CMD_LOCK); + if (fc_copyout((uchar *)fcpRsp, (uchar *)cip->c_arg2, count.snscnt)) { + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + rc = EIO; + goto ssout0; + } + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + } + + switch(dev_info->ioctl_event) { + case IOSTAT_SUCCESS: +cpdata: + /* copy back response data */ + if(infop->c_outsz < dev_info->clear_count) { + infop->c_outsz = 0; + rc = ERANGE; + goto ssout0; + } + infop->c_outsz = dev_info->clear_count; + + if (infop->c_cmd == C_HBA_SEND_SCSI) { + count.rspcnt = infop->c_outsz; + count.snscnt = 0; + } else { + /* For C_HBA_SEND_FCP, snscnt is already set */ + count.rspcnt = infop->c_outsz; + } + + /* Return data length */ + dfc_unlock_enable(ipri, &CMD_LOCK); + if (fc_copyout((uchar *)&count, (uchar *)cip->c_arg3, (2*sizeof(uint32)))) { + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + rc = EIO; + goto ssout0; + } + + infop->c_outsz = 0; + if(count.rspcnt) { + if(dfc_rsp_data_copy(p_dev_ctl, (uchar *)infop->c_dataout, outdmp, count.rspcnt)) { + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + rc = EIO; + goto ssout0; + } + } + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + break; + case IOSTAT_LOCAL_REJECT: + infop->c_outsz = 0; + if(dev_info->ioctl_errno == IOERR_SEQUENCE_TIMEOUT) { + rc = ETIMEDOUT; + goto ssout0; + } + rc = EFAULT; + goto ssout0; + case IOSTAT_FCP_RSP_ERROR: + j = 0; + + if(fcpCmnd->fcpCntl3 == READ_DATA) { + dev_info->clear_count = infop->c_outsz - dev_info->clear_count; + if ((fcpRsp->rspStatus2 & RESID_UNDER) && + (dev_info->clear_count)) { + goto cpdata; + } + } + else + dev_info->clear_count = 0; + + count.rspcnt = (uint32)dev_info->clear_count; + infop->c_outsz = 0; + + if (fcpRsp->rspStatus2 & RSP_LEN_VALID) { + j = SWAP_DATA(fcpRsp->rspRspLen); + } + if (fcpRsp->rspStatus2 & SNS_LEN_VALID) { + if (infop->c_cmd == C_HBA_SEND_SCSI) { + if(buf2sz < (int)dev_info->sense_length) + count.snscnt = buf2sz; + else + count.snscnt = dev_info->sense_length; + + /* Return sense info from rsp packet */ + dfc_unlock_enable(ipri, &CMD_LOCK); + if (fc_copyout(((uchar *)&fcpRsp->rspInfo0) + j, + (uchar *)cip->c_arg2, count.snscnt)) { + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + rc = EIO; + goto ssout0; + } + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + } + } + else { + rc = EFAULT; + goto ssout0; + } + + /* Return data length */ + dfc_unlock_enable(ipri, &CMD_LOCK); + if (fc_copyout((uchar *)&count, (uchar *)cip->c_arg3, (2*sizeof(uint32)))) { + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + rc = EIO; + goto ssout0; + } + + /* return data for read */ + if(count.rspcnt) { + if(dfc_rsp_data_copy(p_dev_ctl, (uchar *)infop->c_dataout, outdmp, count.rspcnt)) { + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + rc = EIO; + goto ssout0; + } + } + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + break; + + default: + infop->c_outsz = 0; + rc = EFAULT; + goto ssout0; + } + +ssout0: + dfc_unlock_enable(ipri, &CMD_LOCK); + dfc_cmd_data_free(p_dev_ctl, outdmp); + dfc_fcp_data_free(p_dev_ctl, fcpmp); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); +ssout3: + dfc_unlock_enable(ipri, &CMD_LOCK); + buf_info->size = sizeof(struct dev_info); + buf_info->virt = (uint32 * )dev_info; + buf_info->phys = 0; + buf_info->flags = (FC_MBUF_IOCTL | FC_MBUF_UNLOCK); + + fc_free(p_dev_ctl, buf_info); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + return(ipri); +} + +int +dfc_hba_fcpbind( +fc_dev_ctl_t * p_dev_ctl, +struct dfc_mem *dm, +struct cmd_input *cip, +struct dfccmdinfo *infop, +ulong ipri) +{ + static HBA_FCPBINDING * hb; + static HBA_FCPBINDINGENTRY *ep; + static uint32 room; + static uint32 total; + static uint32 lunIndex, totalLuns; /* these 2 vars are per target id */ + static uint32 lunId; /* what we get back at lunIndex in virtRptLunData */ + static int memsz, mapList; + static char *appPtr; + static uint32 cnt; + static node_t * nodep; + static dvi_t * dev_ptr; + static uint32 total_mem; + static uint32 offset, j; + static NODELIST * nlp; + static FC_BRD_INFO * binfo; + + binfo = &BINFO; + hb = (HBA_FCPBINDING *)dm->fc_dataout; + ep = &hb->entry[0]; + room = (uint32)((ulong)cip->c_arg1); + cnt = 0; + total = 0; + memsz = 0; + lunIndex = 0; + totalLuns = 0; + appPtr = ((char *)infop->c_dataout) + sizeof(ulong); + mapList = 1; + + /* First Mapped ports, then unMapped ports, then binding list */ + nlp = binfo->fc_nlpmap_start; + if(nlp == (NODELIST *)&binfo->fc_nlpmap_start) { + nlp = binfo->fc_nlpunmap_start; + mapList = 0; + } + if(nlp == (NODELIST *)&binfo->fc_nlpunmap_start) + nlp = binfo->fc_nlpbind_start; + while(nlp != (NODELIST *)&binfo->fc_nlpbind_start) { + + if (nlp->nlp_type & NLP_SEED_MASK) { + offset = FC_SCSID(nlp->id.nlp_pan, nlp->id.nlp_sid); + if(offset > MAX_FC_TARGETS) { + goto nextbind; + } + nodep = binfo->device_queue_hash[offset].node_ptr; + if(nodep) + dev_ptr = nodep->lunlist; + else + dev_ptr = 0; + + if((!nodep) || (!dev_ptr)) { + dev_ptr=fc_alloc_devp(p_dev_ctl, offset, 0); + nodep = dev_ptr->nodep; + } + + if(mapList) { + /* For devices on the map list, we need to issue REPORT_LUN + * in case the device's config has changed */ + nodep->rptlunstate = REPORT_LUN_ONGOING; + issue_report_lun(p_dev_ctl, dev_ptr, 0); + + j = 0; + dfc_unlock_enable(ipri, &CMD_LOCK); + dfc_msdelay(p_dev_ctl, 1); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + + /* Wait for ReportLun request to complete or timeout */ + while(nodep->rptlunstate == REPORT_LUN_ONGOING) { + dfc_unlock_enable(ipri, &CMD_LOCK); + dfc_msdelay(p_dev_ctl, 50); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + if(j >= 600) { + break; + } + j++; + } + if(nodep->rptlunstate == REPORT_LUN_ONGOING) { + break; + } + /* + * If nodep->virtRptLunData is null, then we just report 1 lun. + * If not null, we will report luns from virtRptLunData buffer. + */ + lunIndex = 0; + totalLuns = 1; + dev_ptr = 0; + if (nodep->virtRptLunData) { + uint32 *tmp; + tmp = (uint32*)nodep->virtRptLunData; + totalLuns = SWAP_DATA(*tmp) / 8; + } + } + + while(((mapList) && (lunIndex < totalLuns)) || + (dev_ptr)) { + if(mapList) { + lunId = dfc_getLunId(nodep, lunIndex); + dev_ptr = fc_find_lun(binfo, offset, lunId); + } else + lunId = dev_ptr->lun_id; + + if((mapList) || + ((dev_ptr) && (dev_ptr->opened))) + { + if(cnt < room) { + HBA_OSDN *osdn; + HBA_UINT32 fcpLun[2]; + if(total_mem - memsz < sizeof(HBA_FCPBINDINGENTRY)) { + dfc_unlock_enable(ipri, &CMD_LOCK); + fc_copyout((char *)(&hb->entry[0]), appPtr, memsz); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + + appPtr = appPtr + memsz; + ep = &hb->entry[0]; + memsz = 0; + } + fc_bzero((void *)ep->ScsiId.OSDeviceName, 256); + if(nlp->nlp_flag & NLP_MAPPED) { + osdn = (HBA_OSDN *)&ep->ScsiId.OSDeviceName[0]; + fc_bcopy("lpfc", osdn->drvname, 4); + osdn->instance = fc_brd_to_inst(binfo->fc_brd_no); + osdn->target = FC_SCSID(nlp->id.nlp_pan, nlp->id.nlp_sid); + osdn->lun = (HBA_UINT32)(lunId); + } + + ep->ScsiId.ScsiTargetNumber = + FC_SCSID(nlp->id.nlp_pan, nlp->id.nlp_sid); + ep->ScsiId.ScsiOSLun = (HBA_UINT32)(lunId); + ep->ScsiId.ScsiBusNumber = 0; + + fc_bzero((char *)fcpLun, sizeof(HBA_UINT64)); + fcpLun[0] = (lunId << FC_LUN_SHIFT); + if (nodep->addr_mode == VOLUME_SET_ADDRESSING) { + fcpLun[0] |= SWAP_DATA(0x40000000); + } + fc_bcopy((char *)&fcpLun[0], (char *)&ep->FcpId.FcpLun, sizeof(HBA_UINT64)); + if (nlp->nlp_type & NLP_SEED_DID) { + ep->type = TO_D_ID; + ep->FcpId.FcId = nlp->nlp_DID; + ep->FcId = nlp->nlp_DID; + fc_bzero((uchar *)&ep->FcpId.PortWWN, sizeof(HBA_WWN)); + fc_bzero((uchar *)&ep->FcpId.NodeWWN, sizeof(HBA_WWN)); + } + else { + ep->type = TO_WWN; + ep->FcId = 0; + ep->FcpId.FcId = 0; + if (nlp->nlp_type & NLP_SEED_WWPN) + fc_bcopy(&nlp->nlp_portname, (uchar *)&ep->FcpId.PortWWN, sizeof(HBA_WWN)); + else + fc_bcopy(&nlp->nlp_nodename, (uchar *)&ep->FcpId.NodeWWN, sizeof(HBA_WWN)); + } + if (nlp->nlp_state == NLP_ALLOC) { + ep->FcpId.FcId = nlp->nlp_DID; + fc_bcopy(&nlp->nlp_portname, (uchar *)&ep->FcpId.PortWWN, sizeof(HBA_WWN)); + fc_bcopy(&nlp->nlp_nodename, (uchar *)&ep->FcpId.NodeWWN, sizeof(HBA_WWN)); + } + ep++; + cnt++; + memsz = memsz + sizeof(HBA_FCPBINDINGENTRY); + total++; + } + } + if(mapList) { + /* for map list, we want the while loop to go stricly + * based on lunIndex and totalLuns. */ + lunIndex++; + dev_ptr = 0; + } else + dev_ptr = dev_ptr->next; + } /* while loop */ + } + +nextbind: + nlp = (NODELIST *)nlp->nlp_listp_next; + if(nlp == (NODELIST *)&binfo->fc_nlpmap_start) { + nlp = binfo->fc_nlpunmap_start; + mapList = 0; + } + if(nlp == (NODELIST *)&binfo->fc_nlpunmap_start) + nlp = binfo->fc_nlpbind_start; + } + + dfc_unlock_enable(ipri, &CMD_LOCK); + fc_copyout((char *)(&hb->entry[0]), appPtr, memsz); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + + hb->NumberOfEntries = (HBA_UINT32)total; + dfc_unlock_enable(ipri, &CMD_LOCK); + fc_copyout((char *)(&hb->NumberOfEntries), infop->c_dataout, sizeof(ulong)); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + + infop->c_outsz = 0; + if (total > room) { + rc = ERANGE; + do_cp = 1; + } + return (ipri); +} + +int +dfc_hba_sendmgmt_ct( +fc_dev_ctl_t * p_dev_ctl, +struct dfc_mem *dm, +struct cmd_input *cip, +struct dfccmdinfo *infop, +ulong ipri) + +{ + static ULP_BDE64 * bpl; + static MATCHMAP * bmp; + static DMATCHMAP * indmp; + static DMATCHMAP * outdmp; + static uint32 portid; + static HBA_WWN findwwn; + static uint32 buf1sz; + static uint32 buf2sz; + static int j; + static uint32 max; + static uint32 incr; + static uint32 * lptr; + static NODELIST * nlp; + static FC_BRD_INFO * binfo; + + binfo = &BINFO; + incr = (uint32)cip->c_flag; /* timeout for CT request */ + lptr = (uint32 *)&cip->c_string[0]; + buf1sz = *lptr++; + buf2sz = *lptr; + + if((buf1sz == 0) || + (buf2sz == 0) || + (buf1sz + buf2sz > (80 * 4096))) { + rc = ERANGE; + return(ipri); + } + + dfc_unlock_enable(ipri, &CMD_LOCK); + + if(infop->c_cmd == C_SEND_MGMT_CMD) { + if (fc_copyin((uchar *)cip->c_arg3, (uchar *)&findwwn, (ulong)(sizeof(HBA_WWN)))) { + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + rc = EIO; + return(ipri); + } + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + + /* First Mapped ports, then unMapped ports */ + nlp = binfo->fc_nlpmap_start; + if(nlp == (NODELIST *)&binfo->fc_nlpmap_start) + nlp = binfo->fc_nlpunmap_start; + while(nlp != (NODELIST *)&binfo->fc_nlpunmap_start) { + if (fc_geportname(&nlp->nlp_portname, (NAME_TYPE *)&findwwn) == 2) + goto gotit; + nlp = (NODELIST *)nlp->nlp_listp_next; + if(nlp == (NODELIST *)&binfo->fc_nlpmap_start) + nlp = binfo->fc_nlpunmap_start; + } + rc = ERANGE; + return(ipri); +gotit: + portid = nlp->nlp_DID; + dfc_unlock_enable(ipri, &CMD_LOCK); + } + else { + portid = (uint32)((ulong)cip->c_arg3); + } + + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + /* Allocate buffer for Buffer ptr list */ + if ((bmp = (MATCHMAP * )fc_mem_get(binfo, MEM_BPL)) == 0) { + rc = ENOMEM; + return(ipri); + } + bpl = (ULP_BDE64 * )bmp->virt; + dfc_unlock_enable(ipri, &CMD_LOCK); + + if((indmp = dfc_cmd_data_alloc(p_dev_ctl, (uchar *)cip->c_arg1, bpl, buf1sz)) == 0) { + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + fc_mem_put(binfo, MEM_BPL, (uchar * )bmp); + rc = ENOMEM; + return(ipri); + } + bpl += indmp->dfc_flag; + + if((outdmp = dfc_cmd_data_alloc(p_dev_ctl, 0, bpl, buf2sz)) == 0) { + dfc_cmd_data_free(p_dev_ctl, indmp); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + fc_mem_put(binfo, MEM_BPL, (uchar * )bmp); + rc = ENOMEM; + return(ipri); + } + + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + max = 0; +redoct: + if((rc=fc_issue_ct_req(binfo, portid, bmp, indmp, outdmp, incr))) { + if((rc == ENODEV) && (max < 4)) { + max++; + dfc_unlock_enable(ipri, &CMD_LOCK); + dfc_msdelay(p_dev_ctl, 500); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + goto redoct; + } + if(rc == ENODEV) + rc = EACCES; + goto ctout1; + } + + j = 0; + dfc_unlock_enable(ipri, &CMD_LOCK); + dfc_msdelay(p_dev_ctl, 1); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + + /* Wait for CT request to complete or timeout */ + while(outdmp->dfc_flag == 0) { + dfc_unlock_enable(ipri, &CMD_LOCK); + dfc_msdelay(p_dev_ctl, 50); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + if(j >= 600) { + outdmp->dfc_flag = -1; + break; + } + j++; + } + + j = outdmp->dfc_flag; + if(j == -1) { + rc = ETIMEDOUT; + goto ctout1; + } + + if(j == -2) { + rc = EFAULT; + goto ctout1; + } + + /* copy back response data */ + if(j > buf2sz) { + rc = ERANGE; + /* C_CT Request error */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk1208, /* ptr to msg structure */ + fc_mes1208, /* ptr to msg */ + fc_msgBlk1208.msgPreambleStr, /* begin varargs */ + outdmp->dfc_flag, + 4096); /* end varargs */ + goto ctout1; + } + fc_bcopy((char *)&j, dm->fc_dataout, sizeof(int)); + + /* copy back data */ + dfc_unlock_enable(ipri, &CMD_LOCK); + if(dfc_rsp_data_copy(p_dev_ctl, (uchar *)cip->c_arg2, outdmp, j)) + rc = EIO; + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + +ctout1: + dfc_unlock_enable(ipri, &CMD_LOCK); + dfc_cmd_data_free(p_dev_ctl, indmp); + dfc_cmd_data_free(p_dev_ctl, outdmp); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + fc_mem_put(binfo, MEM_BPL, (uchar * )bmp); + return(ipri); +} + +int +dfc_hba_rnid( +fc_dev_ctl_t * p_dev_ctl, +struct dfc_mem *dm, +struct cmd_input *cip, +struct dfccmdinfo *infop, +MBUF_INFO *buf_info, +ulong ipri) +{ + static HBA_WWN findwwn; + static ELS_PKT * ep; + static DMATCHMAP inmatp; + static DMATCHMAP outmatp; + static MATCHMAP * bmptr; + static uint32 * lptr; + static NODELIST * nlp; + static int j; + static uint32 size, incr; + static uint32 max; + static FC_BRD_INFO * binfo; + + binfo = &BINFO; + dfc_unlock_enable(ipri, &CMD_LOCK); + if (fc_copyin((uchar *)cip->c_arg1, (uchar *)&findwwn, (ulong)(sizeof(HBA_WWN)))) { + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + rc = EIO; + return(ipri); + } + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + + size = NLP_ALLOC; + incr = 0; +nlpchk: + nlp = binfo->fc_nlpbind_start; + if(nlp == (NODELIST *)&binfo->fc_nlpbind_start) + nlp = binfo->fc_nlpunmap_start; + if(nlp == (NODELIST *)&binfo->fc_nlpunmap_start) + nlp = binfo->fc_nlpmap_start; + while(nlp != (NODELIST *)&binfo->fc_nlpmap_start) { + if(cip->c_flag == NODE_WWN) { + if (fc_geportname(&nlp->nlp_nodename, (NAME_TYPE *)&findwwn) == 2) + goto foundrnid; + } + else { + if (fc_geportname(&nlp->nlp_portname, (NAME_TYPE *)&findwwn) == 2) + goto foundrnid; + } + nlp = (NODELIST *)nlp->nlp_listp_next; + if(nlp == (NODELIST *)&binfo->fc_nlpbind_start) + nlp = binfo->fc_nlpunmap_start; + if(nlp == (NODELIST *)&binfo->fc_nlpunmap_start) + nlp = binfo->fc_nlpmap_start; + } + rc = ERANGE; + return(ipri); + +foundrnid: + if(nlp->nlp_action & NLP_DO_RNID) + goto waitloop; + + if(nlp->nlp_Rpi == 0) { + int wait_sec; + + size = nlp->nlp_DID; + if(size == 0) { + size = nlp->nlp_oldDID; + } + if((size == 0) || (size == 0xffffffff) || (size == 0xffffff) || + (incr == 3)) { + rc = ERANGE; + return(ipri); + } + incr++; + nlp->nlp_action |= NLP_DO_RNID; + fc_els_cmd(binfo, ELS_CMD_PLOGI, (void *)((ulong)size), + (uint32)0, (ushort)0, nlp); +waitloop: + wait_sec = 0; + while(nlp->nlp_action & NLP_DO_RNID) { + dfc_unlock_enable(ipri, &CMD_LOCK); + dfc_msdelay(p_dev_ctl, 1000); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + if(wait_sec++ == 10) + return(ipri); + } + nlp->nlp_action &= ~NLP_DO_RNID; + goto nlpchk; + } + + buf_info->virt = 0; + buf_info->phys = 0; + buf_info->flags = (FC_MBUF_DMA | FC_MBUF_IOCTL | FC_MBUF_UNLOCK); + buf_info->align = (int)FCELSSIZE; + buf_info->size = (int)FCELSSIZE; + buf_info->dma_handle = 0; + + dfc_unlock_enable(ipri, &CMD_LOCK); + fc_malloc(p_dev_ctl, buf_info); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + + if (buf_info->phys == NULL) { + rc = ENOMEM; + return(ipri); + } + inmatp.dfc.virt = buf_info->virt; + if (buf_info->dma_handle) { + inmatp.dfc.dma_handle = buf_info->dma_handle; + inmatp.dfc.data_handle = buf_info->data_handle; + } + inmatp.dfc.phys = (uchar * )buf_info->phys; + + /* Save size of RNID request in this field */ + inmatp.dfc.fc_mptr = (uchar *)((ulong)(2*sizeof(uint32))); + fc_bzero((void *)inmatp.dfc.virt, (2 * sizeof(uint32))); + + buf_info->virt = 0; + buf_info->phys = 0; + buf_info->flags = (FC_MBUF_DMA | FC_MBUF_IOCTL | FC_MBUF_UNLOCK); + buf_info->align = 4096; + buf_info->size = infop->c_outsz + sizeof(uint32); + buf_info->dma_handle = 0; + + dfc_unlock_enable(ipri, &CMD_LOCK); + fc_malloc(p_dev_ctl, buf_info); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + + if (buf_info->phys == NULL) { + rc = ENOMEM; + goto rnidout2; + } + outmatp.dfc.virt = buf_info->virt; + if (buf_info->dma_handle) { + outmatp.dfc.dma_handle = buf_info->dma_handle; + outmatp.dfc.data_handle = buf_info->data_handle; + } + outmatp.dfc.phys = (uchar * )buf_info->phys; + + /* Save size in this field */ + outmatp.dfc.fc_mptr = (uchar *)((ulong)(infop->c_outsz + sizeof(uint32))); + + /* Setup RNID command */ + lptr = (uint32 *)inmatp.dfc.virt; + *lptr = ELS_CMD_RNID; + ep = (ELS_PKT * )lptr; + ep->un.rnid.Format = RNID_TOPOLOGY_DISC; + + max = 0; + bmptr = 0; +redornid: + outmatp.dfc_flag = 0; + if((rc=fc_rnid_req( binfo, &inmatp, &outmatp, &bmptr, nlp->nlp_Rpi))) { + if(bmptr) + fc_mem_put(binfo, MEM_BPL, (uchar * )bmptr); + + if((rc == ENODEV) && (max < 4)) { + max++; + dfc_unlock_enable(ipri, &CMD_LOCK); + dfc_msdelay(p_dev_ctl, 500); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + goto redornid; + } + if(rc == ENODEV) + rc = EACCES; + goto rnidout1; + } + + j = 0; + dfc_unlock_enable(ipri, &CMD_LOCK); + dfc_msdelay(p_dev_ctl, 1); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + + /* Wait for RNID request to complete or timeout */ + while(outmatp.dfc_flag == 0) { + dfc_unlock_enable(ipri, &CMD_LOCK); + dfc_msdelay(p_dev_ctl, 50); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + if(j >= 600) { + outmatp.dfc_flag = -1; + return(ipri); + } + j++; + } + + if(bmptr) + fc_mem_put(binfo, MEM_BPL, (uchar * )bmptr); + + j = (int)((ulong)outmatp.dfc_flag); + if(outmatp.dfc_flag == -1) { + + rc = ETIMEDOUT; + goto rnidout1; + } + + if(outmatp.dfc_flag == -2) { + + rc = EFAULT; + goto rnidout1; + } + + /* copy back response data */ + if(j > 4096) { + rc = ERANGE; + /* RNID Request error */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk1209, /* ptr to msg structure */ + fc_mes1209, /* ptr to msg */ + fc_msgBlk1209.msgPreambleStr, /* begin varargs */ + (int)((ulong)outmatp.dfc.fc_mptr), + 4096); /* end varargs */ + goto rnidout1; + } + lptr = (uint32 *)outmatp.dfc.virt; + if(*lptr != ELS_CMD_ACC) { + rc = EFAULT; + goto rnidout1; + } + lptr++; + j -= sizeof(uint32); + fc_bcopy((char *)lptr, dm->fc_dataout, j); + + /* copy back size of response */ + dfc_unlock_enable(ipri, &CMD_LOCK); + if (fc_copyout((uchar *)&j, (uchar *)cip->c_arg2, sizeof(int))) { + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + rc = EIO; + goto rnidout1; + } + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + + infop->c_outsz = (uint32)((ulong)outmatp.dfc.fc_mptr); + +rnidout1: + buf_info->size = (int)((ulong)outmatp.dfc.fc_mptr); + buf_info->virt = (uint32 * )outmatp.dfc.virt; + buf_info->phys = (uint32 * )outmatp.dfc.phys; + buf_info->flags = (FC_MBUF_DMA | FC_MBUF_IOCTL | FC_MBUF_UNLOCK); + if (outmatp.dfc.dma_handle) { + buf_info->dma_handle = outmatp.dfc.dma_handle; + buf_info->data_handle = outmatp.dfc.data_handle; + } + dfc_unlock_enable(ipri, &CMD_LOCK); + fc_free(p_dev_ctl, buf_info); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + +rnidout2: + buf_info->size = (int)((ulong)inmatp.dfc.fc_mptr); + buf_info->virt = (uint32 * )inmatp.dfc.virt; + buf_info->phys = (uint32 * )inmatp.dfc.phys; + buf_info->flags = (FC_MBUF_DMA | FC_MBUF_IOCTL | FC_MBUF_UNLOCK); + if (inmatp.dfc.dma_handle) { + buf_info->dma_handle = inmatp.dfc.dma_handle; + buf_info->data_handle = inmatp.dfc.data_handle; + } + dfc_unlock_enable(ipri, &CMD_LOCK); + fc_free(p_dev_ctl, buf_info); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + return(ipri); +} + +int +dfc_hba_targetmapping( +fc_dev_ctl_t * p_dev_ctl, +struct dfc_mem *dm, +struct cmd_input *cip, +struct dfccmdinfo *infop, +ulong ipri) +{ + static HBA_FCPTARGETMAPPING * hf; + static HBA_FCPSCSIENTRY *ep; + static uint32 room; + static uint32 total; + static uint32 lunIndex, totalLuns; /* these 2 vars are per target id */ + static uint32 lunId; /* what we get back at lunIndex in virtRptLunData */ + static int memsz; + static char *appPtr; + static NODELIST * nlp; + static node_t * nodep; + static dvi_t * dev_ptr; + static FC_BRD_INFO * binfo; + static uint32 offset; + static uint32 total_mem; + static uint32 j; + static uint32 cnt; + + binfo = &BINFO; + hf = (HBA_FCPTARGETMAPPING *)dm->fc_dataout; + ep = &hf->entry[0]; + room = (uint32)((ulong)cip->c_arg1); + cnt = 0; + total = 0; + memsz = 0; + appPtr = ((char *)infop->c_dataout) + sizeof(ulong); + + /* Mapped ports only */ + nlp = binfo->fc_nlpmap_start; + while(nlp != (NODELIST *)&binfo->fc_nlpmap_start) { + offset = FC_SCSID(nlp->id.nlp_pan, nlp->id.nlp_sid); + if(offset > MAX_FC_TARGETS) { + nlp = (NODELIST *)nlp->nlp_listp_next; + continue; + } + nodep = binfo->device_queue_hash[offset].node_ptr; + if(nodep) + dev_ptr = nodep->lunlist; + else + dev_ptr = 0; + + if((!nodep) || (!dev_ptr)) { + dev_ptr=fc_alloc_devp(p_dev_ctl, offset, 0); + nodep = dev_ptr->nodep; + } + + /* we need to issue REPORT_LUN here in case the device's + * config has changed */ + nodep->rptlunstate = REPORT_LUN_ONGOING; + issue_report_lun(p_dev_ctl, dev_ptr, 0); + + j = 0; + dfc_unlock_enable(ipri, &CMD_LOCK); + dfc_msdelay(p_dev_ctl, 1); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + + /* Wait for ReportLun request to complete or timeout */ + while(nodep->rptlunstate == REPORT_LUN_ONGOING) { + dfc_unlock_enable(ipri, &CMD_LOCK); + dfc_msdelay(p_dev_ctl, 50); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + if(j >= 600) { + break; + } + j++; + } + if(nodep->rptlunstate == REPORT_LUN_ONGOING) { + break; + } + + lunIndex = 0; + totalLuns = 1; + if (nodep->virtRptLunData) { + uint32 *tmp; + tmp = (uint32*)nodep->virtRptLunData; + totalLuns = SWAP_DATA(*tmp) / 8; + } + + while(lunIndex < totalLuns) { + lunId = dfc_getLunId(nodep, lunIndex); + dev_ptr = fc_find_lun(binfo, offset, lunId); + + if((!dev_ptr) || + ((dev_ptr) && (dev_ptr->opened) && (dev_ptr->queue_state == ACTIVE))) { + if(cnt < room) { + HBA_OSDN *osdn; + HBA_UINT32 fcpLun[2]; + + if(total_mem - memsz < sizeof(HBA_FCPSCSIENTRY)) { + dfc_unlock_enable(ipri, &CMD_LOCK); + fc_copyout((char *)(&hf->entry[0]), appPtr,memsz); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + + appPtr = appPtr + memsz; + ep = &hf->entry[0]; + memsz = 0; + } + + fc_bzero((void *)ep->ScsiId.OSDeviceName, 256); + osdn = (HBA_OSDN *)&ep->ScsiId.OSDeviceName[0]; + fc_bcopy("lpfc", osdn->drvname, 4); + osdn->instance = fc_brd_to_inst(binfo->fc_brd_no); + osdn->target = FC_SCSID(nlp->id.nlp_pan, nlp->id.nlp_sid); + osdn->lun = (HBA_UINT32)(lunId); + osdn->flags = 0; + ep->ScsiId.ScsiTargetNumber = + FC_SCSID(nlp->id.nlp_pan, nlp->id.nlp_sid); + ep->ScsiId.ScsiOSLun = (HBA_UINT32)(lunId); + ep->ScsiId.ScsiBusNumber = 0; + ep->FcpId.FcId = nlp->nlp_DID; + fc_bzero((char *)fcpLun, sizeof(HBA_UINT64)); + + fcpLun[0] = (lunId << FC_LUN_SHIFT); + if (nodep->addr_mode == VOLUME_SET_ADDRESSING) { + fcpLun[0] |= SWAP_DATA(0x40000000); + } + fc_bcopy((char *)&fcpLun[0], (char *)&ep->FcpId.FcpLun, sizeof(HBA_UINT64)); + fc_bcopy(&nlp->nlp_portname, (uchar *)&ep->FcpId.PortWWN, sizeof(HBA_WWN)); + fc_bcopy(&nlp->nlp_nodename, (uchar *)&ep->FcpId.NodeWWN, sizeof(HBA_WWN)); + cnt++; + ep++; + memsz = memsz + sizeof(HBA_FCPSCSIENTRY); + } + total++; + } + lunIndex++; + } + nlp = (NODELIST *)nlp->nlp_listp_next; + } + dfc_unlock_enable(ipri, &CMD_LOCK); + fc_copyout((char *)(&hf->entry[0]), appPtr,memsz); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + + hf->NumberOfEntries = (HBA_UINT32)total; + dfc_unlock_enable(ipri, &CMD_LOCK); + fc_copyout((char *)(&hf->NumberOfEntries), infop->c_dataout, sizeof(ulong)); + ipri = dfc_disable_lock(FC_LVL, &CMD_LOCK); + + infop->c_outsz = 0; /* no more copy needed */ + if (total > room) { + rc = ERANGE; + do_cp = 1; + } + return(ipri); +} + +int +dfc_data_alloc( +fc_dev_ctl_t * p_dev_ctl, +struct dfc_mem *dm, +uint32 size) +{ + static FC_BRD_INFO * binfo; +#ifndef powerpc + static MBUF_INFO * buf_info; + static MBUF_INFO bufinfo; +#endif + + binfo = &BINFO; + + if(dm->fc_dataout) + return(EACCES); + +#ifdef powerpc + dm->fc_dataout = p_dev_ctl->dfc_kernel_buf; + dm->fc_outsz = size; +#else + size = ((size + 0xfff) & 0xfffff000); + buf_info = &bufinfo; + buf_info->virt = 0; + buf_info->phys = 0; + buf_info->flags = (FC_MBUF_IOCTL | FC_MBUF_UNLOCK); + buf_info->align = sizeof(void *); + buf_info->size = (int)size; + buf_info->dma_handle = 0; + + fc_malloc(p_dev_ctl, buf_info); + if (buf_info->virt == NULL) { + return(ENOMEM); + } + dm->fc_dataout = buf_info->virt; + dm->fc_outsz = size; + /* dfc_data_alloc */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0402, /* ptr to msg structure */ + fc_mes0402, /* ptr to msg */ + fc_msgBlk0402.msgPreambleStr, /* begin varargs */ + (uint32)((ulong)dm->fc_dataout), + dm->fc_outsz); /* end varargs */ +#endif + + return(0); +} + +int +dfc_data_free( +fc_dev_ctl_t * p_dev_ctl, +struct dfc_mem *dm) +{ + static FC_BRD_INFO * binfo; +#ifndef powerpc + static MBUF_INFO * buf_info; + static MBUF_INFO bufinfo; +#endif + + binfo = &BINFO; + + /* dfc_data_free */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0403, /* ptr to msg structure */ + fc_mes0403, /* ptr to msg */ + fc_msgBlk0403.msgPreambleStr, /* begin varargs */ + (uint32)((ulong)dm->fc_dataout), + dm->fc_outsz); /* end varargs */ + if(dm->fc_dataout == 0) + return(EACCES); + +#ifdef powerpc + dm->fc_dataout = 0; + dm->fc_outsz = 0; +#else + buf_info = &bufinfo; + buf_info->virt = dm->fc_dataout; + buf_info->size = dm->fc_outsz; + buf_info->phys = 0; + buf_info->flags = (FC_MBUF_IOCTL | FC_MBUF_UNLOCK); + buf_info->align = 0; + buf_info->dma_handle = 0; + fc_free(p_dev_ctl, buf_info); + dm->fc_dataout = 0; + dm->fc_outsz = 0; +#endif + return(0); +} + +DMATCHMAP * +dfc_cmd_data_alloc( +fc_dev_ctl_t * p_dev_ctl, +uchar * indataptr, +ULP_BDE64 * bpl, +uint32 size) +{ + static FC_BRD_INFO * binfo; + static MBUF_INFO * buf_info; + static MBUF_INFO bufinfo; + static DMATCHMAP * mlist; + static DMATCHMAP * mlast; + static DMATCHMAP * dmp; + static int cnt, offset, i; + + binfo = &BINFO; + buf_info = &bufinfo; + mlist = 0; + mlast = 0; + i = 0; + offset = 0; + + while(size) { + + if(size > 4096) + cnt = 4096; + else + cnt = size; + + /* allocate DMATCHMAP buffer header */ + buf_info->virt = 0; + buf_info->phys = 0; + buf_info->flags = (FC_MBUF_IOCTL | FC_MBUF_UNLOCK); + buf_info->align = (int)sizeof(long); + buf_info->size = (int)sizeof(DMATCHMAP); + buf_info->dma_handle = 0; + + fc_malloc(p_dev_ctl, buf_info); + + if (buf_info->virt == NULL) { + goto out; + } + dmp = buf_info->virt; + dmp->dfc.fc_mptr = 0; + dmp->dfc.virt = 0; + + /* Queue it to a linked list */ + if(mlast == 0) { + mlist = dmp; + mlast = dmp; + } + else { + mlast->dfc.fc_mptr = (uchar *)dmp; + mlast = dmp; + } + dmp->dfc.fc_mptr = 0; + + /* allocate buffer */ + buf_info->virt = 0; + buf_info->phys = 0; + buf_info->flags = (FC_MBUF_DMA | FC_MBUF_IOCTL | FC_MBUF_UNLOCK); + buf_info->align = (int)4096; + buf_info->size = (int)cnt; + buf_info->dma_handle = 0; + + fc_malloc(p_dev_ctl, buf_info); + + if (buf_info->phys == NULL) { + goto out; + } + dmp->dfc.virt = buf_info->virt; + if (buf_info->dma_handle) { + dmp->dfc.dma_handle = buf_info->dma_handle; + dmp->dfc.data_handle = buf_info->data_handle; + } + dmp->dfc.phys = (uchar * )buf_info->phys; + dmp->dfc_size = cnt; + + if(indataptr) { + /* Copy data from user space in */ + if (fc_copyin((indataptr+offset), (uchar *)dmp->dfc.virt, (ulong)cnt)) { + goto out; + } + bpl->tus.f.bdeFlags = 0; + fc_mpdata_sync(dmp->dfc.dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV); + } + else { + bpl->tus.f.bdeFlags = BUFF_USE_RCV; + } + + /* build buffer ptr list for IOCB */ + bpl->addrLow = PCIMEM_LONG(putPaddrLow((ulong)dmp->dfc.phys)); + bpl->addrHigh = PCIMEM_LONG(putPaddrHigh((ulong)dmp->dfc.phys)); + bpl->tus.f.bdeSize = (ushort)cnt; + bpl->tus.w = PCIMEM_LONG(bpl->tus.w); + bpl++; + + i++; + offset += cnt; + size -= cnt; + } + + mlist->dfc_flag = i; + return(mlist); +out: + dfc_cmd_data_free(p_dev_ctl, mlist); + return(0); +} + +DMATCHMAP * +dfc_fcp_data_alloc( +fc_dev_ctl_t * p_dev_ctl, +ULP_BDE64 * bpl) +{ + static DMATCHMAP * fcpmp; + static fc_buf_t * fcptr; + static FC_BRD_INFO * binfo; + static MBUF_INFO * buf_info; + static MBUF_INFO bufinfo; + + binfo = &BINFO; + buf_info = &bufinfo; + + /* allocate DMATCHMAP buffer header */ + buf_info->virt = 0; + buf_info->phys = 0; + buf_info->flags = (FC_MBUF_IOCTL | FC_MBUF_UNLOCK); + buf_info->align = (int)sizeof(long); + buf_info->size = (int)sizeof(DMATCHMAP); + buf_info->dma_handle = 0; + + fc_malloc(p_dev_ctl, buf_info); + + if (buf_info->virt == NULL) { + return(0); + } + fcpmp = buf_info->virt; + fc_bzero((char *)fcpmp, sizeof(DMATCHMAP)); + + /* allocate buffer */ + buf_info->virt = 0; + buf_info->phys = 0; + buf_info->flags = (FC_MBUF_DMA | FC_MBUF_IOCTL | FC_MBUF_UNLOCK); + buf_info->align = (int)4096; + buf_info->size = (int)4096; + buf_info->dma_handle = 0; + + fc_malloc(p_dev_ctl, buf_info); + + if (buf_info->phys == NULL) { + buf_info->flags = (FC_MBUF_IOCTL | FC_MBUF_UNLOCK); + buf_info->size = (int)sizeof(DMATCHMAP); + buf_info->virt = (uint32 * )fcpmp; + buf_info->phys = (uint32 * )0; + buf_info->dma_handle = 0; + buf_info->data_handle = 0; + fc_free(p_dev_ctl, buf_info); + return(0); + } + fcpmp->dfc.virt = buf_info->virt; + if (buf_info->dma_handle) { + fcpmp->dfc.dma_handle = buf_info->dma_handle; + fcpmp->dfc.data_handle = buf_info->data_handle; + } + fcpmp->dfc.phys = (uchar * )buf_info->phys; + fcpmp->dfc_size = 4096; + fc_bzero((char *)fcpmp->dfc.virt, 4096); + + fcptr = (fc_buf_t *)fcpmp->dfc.virt; + fcptr->phys_adr = (char *)fcpmp->dfc.phys; + + bpl->addrHigh = PCIMEM_LONG((uint32)putPaddrHigh(GET_PAYLOAD_PHYS_ADDR(fcptr))); + bpl->addrLow = PCIMEM_LONG((uint32)putPaddrLow(GET_PAYLOAD_PHYS_ADDR(fcptr))); + bpl->tus.f.bdeSize = sizeof(FCP_CMND); + bpl->tus.f.bdeFlags = BUFF_USE_CMND; + bpl->tus.w = PCIMEM_LONG(bpl->tus.w); + bpl++; + bpl->addrHigh = PCIMEM_LONG((uint32)putPaddrHigh(GET_PAYLOAD_PHYS_ADDR(fcptr)+sizeof(FCP_CMND))); + bpl->addrLow = PCIMEM_LONG((uint32)putPaddrLow(GET_PAYLOAD_PHYS_ADDR(fcptr)+sizeof(FCP_CMND))); + bpl->tus.f.bdeSize = sizeof(FCP_RSP); + bpl->tus.f.bdeFlags = (BUFF_USE_CMND | BUFF_USE_RCV); + bpl->tus.w = PCIMEM_LONG(bpl->tus.w); + return(fcpmp); +} + +int +dfc_fcp_data_free( +fc_dev_ctl_t * p_dev_ctl, +DMATCHMAP * fcpmp) +{ + static FC_BRD_INFO * binfo; + static MBUF_INFO * buf_info; + static MBUF_INFO bufinfo; + + binfo = &BINFO; + buf_info = &bufinfo; + + if(fcpmp->dfc.virt) { + buf_info->size = fcpmp->dfc_size; + buf_info->virt = (uint32 * )fcpmp->dfc.virt; + buf_info->phys = (uint32 * )fcpmp->dfc.phys; + buf_info->flags = (FC_MBUF_DMA | FC_MBUF_IOCTL | FC_MBUF_UNLOCK); + if (fcpmp->dfc.dma_handle) { + buf_info->dma_handle = fcpmp->dfc.dma_handle; + buf_info->data_handle = fcpmp->dfc.data_handle; + } + fc_free(p_dev_ctl, buf_info); + } + buf_info->flags = (FC_MBUF_IOCTL | FC_MBUF_UNLOCK); + buf_info->size = (int)sizeof(DMATCHMAP); + buf_info->virt = (uint32 * )fcpmp; + buf_info->phys = (uint32 * )0; + buf_info->dma_handle = 0; + buf_info->data_handle = 0; + fc_free(p_dev_ctl, buf_info); + + return(0); +} + +int +dfc_rsp_data_copy( +fc_dev_ctl_t * p_dev_ctl, +uchar * outdataptr, +DMATCHMAP * mlist, +uint32 size) +{ + static FC_BRD_INFO * binfo; + static DMATCHMAP * mlast; + static int cnt, offset; + + binfo = &BINFO; + mlast = 0; + offset = 0; + + while(mlist && size) { + if(size > 4096) + cnt = 4096; + else + cnt = size; + + mlast = mlist; + mlist = (DMATCHMAP *)mlist->dfc.fc_mptr; + + if(outdataptr) { + fc_mpdata_sync(mlast->dfc.dma_handle, 0, 0, DDI_DMA_SYNC_FORKERNEL); + /* Copy data to user space */ + if (fc_copyout((uchar *)mlast->dfc.virt, (outdataptr+offset), (ulong)cnt)) { + return(1); + } + } + offset += cnt; + size -= cnt; + } + return(0); +} + +int +dfc_cmd_data_free( +fc_dev_ctl_t * p_dev_ctl, +DMATCHMAP * mlist) +{ + static FC_BRD_INFO * binfo; + static MBUF_INFO * buf_info; + static MBUF_INFO bufinfo; + static DMATCHMAP * mlast; + + binfo = &BINFO; + buf_info = &bufinfo; + while(mlist) { + mlast = mlist; + mlist = (DMATCHMAP *)mlist->dfc.fc_mptr; + if(mlast->dfc.virt) { + buf_info->size = mlast->dfc_size; + buf_info->virt = (uint32 * )mlast->dfc.virt; + buf_info->phys = (uint32 * )mlast->dfc.phys; + buf_info->flags = (FC_MBUF_DMA | FC_MBUF_IOCTL | FC_MBUF_UNLOCK); + if (mlast->dfc.dma_handle) { + buf_info->dma_handle = mlast->dfc.dma_handle; + buf_info->data_handle = mlast->dfc.data_handle; + } + fc_free(p_dev_ctl, buf_info); + } + buf_info->flags = (FC_MBUF_IOCTL | FC_MBUF_UNLOCK); + buf_info->size = (int)sizeof(DMATCHMAP); + buf_info->virt = (uint32 * )mlast; + buf_info->phys = (uint32 * )0; + buf_info->dma_handle = 0; + buf_info->data_handle = 0; + fc_free(p_dev_ctl, buf_info); + } + return(0); +} + + +_static_ int +dfc_fmw_rev( +fc_dev_ctl_t * p_dev_ctl) +{ + FC_BRD_INFO * binfo; + struct dfc_info * di; + + binfo = &BINFO; + di = &dfc.dfc_info[binfo->fc_brd_no]; + decode_firmware_rev( binfo, &VPD); + fc_bcopy((uchar *)fwrevision, di->fc_ba.a_fwname, 32); + return(0); +} + + +#else /* DFC_SUBSYSTEM */ + +_static_ int +dfc_ioctl( +struct dfccmdinfo *infop, +struct cmd_input *cip) +{ + return (ENODEV); +} + +int +dfc_put_event( +fc_dev_ctl_t * p_dev_ctl, +uint32 evcode, +uint32 evdata0, +void * evdata1, +void * evdata2) +{ + return(0); +} + +int +dfc_hba_put_event( +fc_dev_ctl_t * p_dev_ctl, +uint32 evcode, +uint32 evdata1, +uint32 evdata2, +uint32 evdata3, +uint32 evdata4) +{ + return(0); +} /* End dfc_hba_put_event */ + +_static_ int +dfc_fmw_rev( +fc_dev_ctl_t * p_dev_ctl) +{ + return(0); +} + +#endif /* DFC_SUBSYSTEM */ + diff -purN -X /home/mbligh/.diff.exclude reference/drivers/scsi/lpfc/fc.h current/drivers/scsi/lpfc/fc.h --- reference/drivers/scsi/lpfc/fc.h 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/scsi/lpfc/fc.h 2004-04-09 11:53:02.000000000 -0700 @@ -0,0 +1,1264 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Enterprise Fibre Channel Host Bus Adapters. * + * Refer to the README file included with this package for * + * driver version and adapter support. * + * Copyright (C) 2003 Emulex Corporation. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of the GNU General Public License * + * as published by the Free Software Foundation; either version 2 * + * of the License, or (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details, a copy of which * + * can be found in the file COPYING included with this package. * + *******************************************************************/ + +#ifndef _H_FC +#define _H_FC + +/* Open Source defines */ +#define DFC_SUBSYSTEM 1 /* Include dfc subsystem */ + +#include "fcdiag.h" +#include "fcdds.h" + +#define LONGW_ALIGN 2 /* longword align for xmalloc */ +#define FC_MAX_SEQUENCE 65536 /* maximum fc sequence size */ +#define FC_MIN_SEQUENCE 0 /* minimum fc sequence size */ +#define FC_MAX_IP_VECS 16 /* Max scatter list for mapping */ +#define RING_TMO_DFT 30 /* default cmd timeout for IOCB rings */ +#define MBOX_TMO_DFT 30 /* dft mailbox timeout for mbox cmds */ +#define FC_CAP_AUTOSENSE 0x0400 /* SCSI capability for autosense */ +#define FC_MAX_HOLD_RSCN 32 /* max number of deferred RSCNs */ +#define FC_MAX_NS_RSP 65536 /* max size NameServer rsp */ + +/* Definitions for Binding Entry Type for fc_parse_binding_entry() */ +#define FC_BIND_WW_NN_PN 0 +#define FC_BIND_DID 1 + +#define FC_DMX_ID 0x100 +#define FL_DMX_ID 0x101 + +/* + * Debug printf event ids. + */ + +/* Check if WWN is 0 */ +#define isWWNzero(wwn) ((wwn.nameType == 0) && (wwn.IEEE[0] == 0) && (wwn.IEEE[1] == 0) && (wwn.IEEE[2] == 0) && (wwn.IEEE[3] == 0) && (wwn.IEEE[4] == 0) && (wwn.IEEE[5] == 0)) + +#define ERRID_NOTICE 0x100 +#define ERRID_ERROR 0x200 +#define ERRID_PANIC 0x400 +#define ERRID_MASK 0xf00 + +#define ERRID_VERBOSE 0x10ff + +/* These are verbose logging masks and debug printf masks */ +#define DBG_ELS 0x1 /* ELS events */ +#define DBG_DISCOVERY 0x2 /* Link discovery events */ +#define DBG_MBOX 0x4 /* Mailbox events */ +#define DBG_INIT 0x8 /* Initialization events */ +#define DBG_LINK_EVENT 0x10 /* link events */ +#define DBG_IP 0x20 /* IP traffic history */ +#define DBG_FCP 0x40 /* FCP traffic history */ +#define DBG_NODE 0x80 /* Node Table events */ +#define DBG_CHK_COND 0x1000 /* FCP Check condition flag */ + +/* These are debug printf masks */ +#define DBG_XRI 0x1000 /* Exchange events */ +#define DBG_IP_DATA 0x2000 /* IP traffic history */ +#define DBG_INTR 0x4000 /* Interrupts */ +#define DBG_IOCB_RSP 0x8000 /* IOCB Response ring events */ +#define DBG_IOCB_RSP_DATA 0x10000 /* IOCB Response ring events */ +#define DBG_IOCB_CMD 0x20000 /* IOCB Command ring events */ +#define DBG_IOCB_CMD_DATA 0x40000 /* IOCB Command ring events */ +#define DBG_FCP_DATA 0x100000/* FCP traffic history */ +#define DBG_ERROR 0x800000/* ERROR events */ + + +/* + * These definitions define SYNTAX errors that occur during the parsing + * of binding config lines. + */ +#define FC_SYNTAX_OK 0 +#define FC_SYNTAX_OK_BUT_NOT_THIS_BRD 1 +#define FC_SYNTAX_ERR_ASC_CONVERT 2 +#define FC_SYNTAX_ERR_EXP_COLON 3 +#define FC_SYNTAX_ERR_EXP_LPFC 4 +#define FC_SYNTAX_ERR_INV_LPFC_NUM 5 +#define FC_SYNTAX_ERR_EXP_T 6 +#define FC_SYNTAX_ERR_INV_TARGET_NUM 7 +#define FC_SYNTAX_ERR_EXP_D 8 +#define FC_SYNTAX_ERR_INV_DEVICE_NUM 9 +#define FC_SYNTAX_ERR_EXP_NULL_TERM 13 + +/*****************************************************************************/ +/* device states */ +/*****************************************************************************/ + +#define CLOSED 0 /* initial device state */ +#define DEAD 1 /* fatal hardware error encountered */ +#define LIMBO 2 /* error recovery period */ +#define OPEN_PENDING 3 /* open initiated */ +#define OPENED 4 /* opened successfully, functioning */ +#define CLOSE_PENDING 5 /* close initiated */ + +#define NORMAL_OPEN 0x0 /* opened in normal mode */ +#define DIAG_OPEN 0x1 /* opened in diagnostics mode */ + +/*****************************************************************************/ +/* This is the board information structure for the fc device */ +/*****************************************************************************/ + +struct fc_q { + uchar *q_first; /* queue first element */ + uchar *q_last; /* queue last element */ + ushort q_cnt; /* current length of queue */ + ushort q_max; /* max length queue can get */ +}; +typedef struct fc_q Q; + +typedef struct fclink { + struct fclink *_f; + struct fclink *_b; +} FCLINK; + +/* +*** fc_enque - enqueue element 'x' after element 'p' in +*** a queue without protection for critical sections. +*/ +#define fc_enque(x,p) {(((FCLINK *)x)->_f = ((FCLINK *)p)->_f, \ + ((FCLINK *)x)->_b = ((FCLINK *)p), \ + ((FCLINK *)p)->_f->_b = ((FCLINK *)x), \ + ((FCLINK *)p)->_f = ((FCLINK *)x));} + +/* +*** fc_deque - dequeue element 'x' (the user must make +*** sure its not the queue header +*/ +#define fc_deque(x) {(((FCLINK *)x)->_b->_f = ((FCLINK *)x)->_f, \ + ((FCLINK *)x)->_f->_b = ((FCLINK *)x)->_b, \ + ((FCLINK *)x)->_b = 0, \ + ((FCLINK *)x)->_f = 0);} + +/* This structure is used when allocating a buffer pool. + */ +typedef struct mbuf_info { + int size; /* Specifies the number of bytes to allocate. */ + int align; /* The desired address boundary. */ + + int flags; +#define FC_MBUF_DMA 0x1 /* blocks are for DMA */ +#define FC_MBUF_PHYSONLY 0x2 /* For malloc - map a given virtual address + * to physical (skip the malloc). For free - + * just unmap the given physical address + * (skip the free). + */ +#define FC_MBUF_IOCTL 0x4 /* called from dfc_ioctl */ +#define FC_MBUF_UNLOCK 0x8 /* called with driver unlocked */ + void * virt; /* specifies the virtual buffer pointer */ + void * phys; /* specifies the physical buffer pointer */ + ulong * data_handle; + ulong * dma_handle; +} MBUF_INFO; + + +struct fc_match { + uchar * fc_mptr; + uchar * virt; /* virtual address ptr */ + uchar * phys; /* mapped address */ + ulong * data_handle; + ulong * dma_handle; +}; +typedef struct fc_match MATCHMAP; + +struct dfc_match { + MATCHMAP dfc; + uint32 dfc_size; + int dfc_flag; +}; +typedef struct dfc_match DMATCHMAP; + +/* Kernel level Event structure */ +struct fcEvent { + uint32 evt_handle; + uint32 evt_mask; + uint32 evt_type; + uint32 evt_data0; + ushort evt_sleep; + ushort evt_flags; + void *evt_next; + void *evt_data1; + void *evt_data2; +}; +typedef struct fcEvent fcEvent; + +/* Define for e_mode */ +#define E_SLEEPING_MODE 0x0001 + +/* Define for e_flag */ +#define E_GET_EVENT_ACTIVE 0x0001 + +/* Kernel level Event Header */ +struct fcEvent_header { + uint32 e_handle; + uint32 e_mask; + ushort e_mode; + ushort e_flag; + fcEvent * e_head; + fcEvent * e_tail; + void * e_next_header; +/* Add something here */ +}; +typedef struct fcEvent_header fcEvent_header; + +/* Structures using for clock / timeout handling */ +typedef struct fcclock { + struct fcclock *cl_fw; /* forward linkage */ + union { + struct { + ushort cl_soft_arg; + ushort cl_soft_cmd; + } c1; + struct fcclock *cl_bw; /* backward linkage */ + } un; + uint32 cl_tix; /* differential number of clock ticks */ + void (*cl_func)(void *, void *, void *); + void * cl_p_dev_ctl; + void * cl_arg1; /* argument 1 to function */ + void * cl_arg2; /* argument 2 to function */ +} FCCLOCK; + +#define cl_bw un.cl_bw + +typedef struct clkhdr { + FCCLOCK *cl_f; + FCCLOCK * cl_b; + uint32 count; /* number of clock blocks in list */ +} CLKHDR; + +#define FC_NUM_GLBL_CLK 4 /* number of global clock blocks */ + +typedef struct fcclock_info { + CLKHDR fc_clkhdr; /* fc_clock queue head */ + uint32 ticks; /* elapsed time since initialization */ + uint32 Tmr_ct; /* Timer expired count */ + uint32 timestamp[2]; /* SMT 64 bit timestamp */ + void * clktimer; /* used for scheduling clock routine */ + Simple_lock clk_slock; /* clock routine lock */ + FCCLOCK clk_block[FC_NUM_GLBL_CLK]; /* global clock blocks */ +} FCCLOCK_INFO; + + +/* Structure used to access adapter rings */ +struct fc_ring { + IOCBQ * fc_iocbhd; /* ptr to head iocb rsp list for ring */ + IOCBQ * fc_iocbtl; /* ptr to tail iocb rsp list for ring */ + uchar fc_numCiocb; /* number of command iocb's per ring */ + uchar fc_numRiocb; /* number of rsp iocb's per ring */ + uchar fc_rspidx; /* current index in response ring */ + uchar fc_cmdidx; /* current index in command ring */ + uchar fc_ringno; /* ring number */ + uchar fc_xmitstate; /* state needed for xmit */ + void * fc_cmdringaddr; /* virtual offset for cmd rings */ + void * fc_rspringaddr; /* virtual offset for rsp rings */ + ushort fc_iotag; /* used to identify I/Os */ + + ushort fc_missbufcnt; /* buf cnt we need to repost */ + ushort fc_wdt_inited; /* timer is inited */ + ushort fc_bufcnt; /* cnt of buffers posted */ + uchar * fc_mpon; /* index ptr for match structure */ + uchar * fc_mpoff; /* index ptr for match structure */ + uchar * fc_binfo; /* ptr to FC_BRD_INFO for ring */ + Q fc_tx; /* iocb command queue */ + Q fc_txp; /* iocb pending queue */ + FCCLOCK * fc_wdt; /* timer for ring activity */ + int fc_ringtmo; /* timer timeout value */ +}; +typedef struct fc_ring RING; + +/* Defines for nlp_state (uchar) */ +#define NLP_UNUSED 0 /* unused NL_PORT entry */ +#define NLP_LIMBO 0x1 /* entry needs to hang around for wwpn / sid */ +#define NLP_LOGOUT 0x2 /* NL_PORT is not logged in - entry is cached */ +#define NLP_PLOGI 0x3 /* PLOGI was sent to NL_PORT */ +#define NLP_LOGIN 0x4 /* NL_PORT is logged in / login REG_LOGINed */ +#define NLP_PRLI 0x5 /* PRLI was sent to NL_PORT */ +#define NLP_ALLOC 0x6 /* NL_PORT is ready to initiate adapter I/O */ +#define NLP_SEED 0x7 /* seed scsi id bind in table */ + +/* Defines for nlp_flag (uint32) */ +#define NLP_RPI_XRI 0x1 /* creating xri for entry */ +#define NLP_REQ_SND 0x2 /* sent ELS request for this entry */ +#define NLP_RM_ENTRY 0x4 /* Remove this entry */ +#define NLP_FARP_SND 0x8 /* sent FARP request for this entry */ +#define NLP_NS_NODE 0x10 /* Authenticated entry by NameServer */ +#define NLP_NODEV_TMO 0x20 /* nodev timeout is running for node */ +#define NLP_REG_INP 0x40 /* Reglogin in progress for node */ +#define NLP_UNREG_LOGO 0x80 /* Perform LOGO after unreglogin */ +#define NLP_RCV_PLOGI 0x100 /* Rcv'ed PLOGI from remote system */ +#define NLP_MAPPED 0x200 /* Node is now mapped */ +#define NLP_UNMAPPED 0x400 /* Node is now unmapped */ +#define NLP_BIND 0x800 /* Node is now bound */ +#define NLP_LIST_MASK 0xe00 /* mask to see what list node is on */ +#define NLP_SND_PLOGI 0x1000 /* Flg to indicate send PLOGI */ +#define NLP_REQ_SND_PRLI 0x2000 /* Send PRLI ELS command */ +#define NLP_REQ_SND_ADISC 0x2000 /* Send ADISC ELS command */ +#define NLP_REQ_SND_PDISC 0x2000 /* Send PDISC ELS command */ +#define NLP_NS_REMOVED 0x4000 /* Node removed from NameServer */ + +/* Defines for nlp_action (uchar) */ +#define NLP_DO_ADDR_AUTH 0x1 /* Authenticating addr for entry */ +#define NLP_DO_DISC_START 0x2 /* start discovery on this entry */ +#define NLP_DO_RSCN 0x4 /* Authenticate entry for by RSCN */ +#define NLP_DO_RNID 0x8 /* Authenticate entry for by RSCN */ +#define NLP_DO_SCSICMD 0x10 /* Authenticate entry for by RSCN */ +#define NLP_DO_CT_USR 0x20 /* Authenticate entry for by RSCN */ +#define NLP_DO_CT_DRVR 0x40 /* Authenticate entry for by RSCN */ + +/* Defines for nlp_type (uchar) */ +#define NLP_FABRIC 0x1 /* this entry represents the Fabric */ +#define NLP_FCP_TARGET 0x2 /* this entry is an FCP target */ +#define NLP_IP_NODE 0x4 /* this entry is an IP node */ +#define NLP_SEED_WWPN 0x10 /* Entry scsi id is seeded for WWPN */ +#define NLP_SEED_WWNN 0x20 /* Entry scsi id is seeded for WWNN */ +#define NLP_SEED_DID 0x40 /* Entry scsi id is seeded for DID */ +#define NLP_SEED_MASK 0x70 /* mask for seeded flags */ +#define NLP_AUTOMAP 0x80 /* Entry was automap'ed */ + +/* Defines for list searchs */ +#define NLP_SEARCH_MAPPED 0x1 /* search mapped */ +#define NLP_SEARCH_UNMAPPED 0x2 /* search unmapped */ +#define NLP_SEARCH_BIND 0x4 /* search bind */ +#define NLP_SEARCH_ALL 0x7 /* search all lists */ + +/* Defines for nlp_fcp_info */ +#define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */ + +struct nlp_nodeList { /* NOTE: any changes to this structure + * must be dup'ed in fcdds.h, cnode_t. + */ + void * nlp_listp_next; /* Node table ptr bind / map / unmap list */ + void * nlp_listp_prev; /* Node table ptr bind / map / unmap list */ + uchar nlp_state; /* state transition indicator */ + uchar nlp_action; /* Action being performed on node */ + uchar nlp_type; /* node type identifier */ + uchar nlp_alpa; /* SCSI device AL_PA */ + ushort nlp_Rpi; /* login id returned by REG_LOGIN */ + ushort nlp_Xri; /* output exchange id for RPI */ + ushort capabilities; + ushort sync; + uint32 target_scsi_options; + uint32 nlp_flag; /* entry flags */ + uint32 nlp_DID; /* fibre channel D_ID of entry */ + uint32 nlp_time; /* timestamp */ + uint32 nlp_oldDID; + NAME_TYPE nlp_portname; /* port name */ + NAME_TYPE nlp_nodename; /* node name */ + struct { /* device id - for FCP */ + uchar nlp_pan; /* pseudo adapter number */ + uchar nlp_sid; /* scsi id */ + uchar nlp_fcp_info; /* Remote class info */ + uchar nlp_ip_info; /* Remote class info */ + } id; + uchar * nlp_bp; /* save buffer ptr - for IP */ + uchar * nlp_targetp; /* Node table ptr for target */ +}; +typedef struct nlp_nodeList NODELIST; + +/* For now stick fc_lun_t in here, + * should move to fc_os.h eventually. + */ +typedef uint32 fc_lun_t; + +#define mapLun(di) ((di)->lun_id) + +#define NLP_MAXREQ 32 /* max num of outstanding NODELIST requests */ +#define NLP_MAXSID 16 /* max number of scsi devices / adapter */ +#define NLP_MAXPAN 32 /* max number of pseudo adapters */ +#define PA_MASK 0x1f /* mask devno to get pseudo adapter number */ +#define DABS 5 /* convert devno to adapter number bit shift */ +#define FC_MIN_QFULL 1 /* lowest we can decrement throttle + to on qfull */ + +#define FC_SCSID(pan, sid) ((uint32)((pan << 16) | sid)) /* For logging */ + +/* Max number of fibre channel devices supported in network */ +#define NLP_MAXRPI 512 /* firmware supports 512 rpis [0-511] */ + +#define FC_MAXLOOP 126 /* max devices supported on a single fc loop */ +#define FC_MAX_MCAST 16 /* max number of multicast addresses */ +#define MULTI_BIT_MASK (0x01) /* Multicast Bit Mask */ +#define FC_MAX_ADPTMSG (8*28) /* max size of a msg from adapter */ +#define FC_INIT_RING_BUF 12 + +struct fc_networkhdr { + NAME_TYPE fc_destname; /* destination port name */ + NAME_TYPE fc_srcname; /* source port name */ +}; +typedef struct fc_networkhdr NETHDR; + +#define MEM_NLP 0 /* memory segment to hold node list entries */ +#define MEM_IOCB 1 /* memory segment to hold iocb commands */ +#define MEM_CLOCK 1 /* memory segment to hold clock blocks */ +#define MEM_MBOX 2 /* memory segment to hold mailbox cmds */ +#define MEM_BUF 3 /* memory segment to hold buffer data */ +#define MEM_BPL 3 /* and to hold buffer ptr lists - SLI2 */ +#define FC_MAX_SEG 4 + +#define MEM_SEG_MASK 0xff /* mask used to mask off the priority bit */ +#define MEM_PRI 0x100 /* Priority bit: set to exceed low water */ + +#define MIN_CLK_BLKS 256 + +struct fc_memseg { + uchar *fc_memptr; /* ptr to memory blocks */ + uchar *fc_endmemptr; /* ptr to last memory block */ + uchar *fc_memhi; /* highest address in pool */ + uchar *fc_memlo; /* lowest address in pool */ + ushort fc_memsize; /* size of memory blocks */ + ushort fc_numblks; /* number of memory blocks */ + ushort fc_free; /* number of free memory blocks */ + ushort fc_memflag; /* what to do when list is exhausted */ + ushort fc_lowmem; /* low water mark, used w/MEM_PRI flag */ +}; +typedef struct fc_memseg MEMSEG; + +#define FC_MEM_ERR 1 /* return error memflag */ +#define FC_MEM_GETMORE 2 /* get more memory memflag */ +#define FC_MEM_DMA 4 /* blocks are for DMA */ +#define FC_MEM_LOWHIT 8 /* low water mark was hit */ + +#define FC_MEMPAD 16 /* offset used for a FC_MEM_DMA buffer */ + +/* + * Board stat counters + */ +struct fc_stats { + uint32 chipRingFree; + uint32 cmdCreateXri; + uint32 cmdQbuf; + uint32 elsCmdIocbInval; + uint32 elsCmdPktInval; + uint32 elsLogiCol; + uint32 elsRetryExceeded; + uint32 elsStrayXmitCmpl; + uint32 elsXmitCmpl; + uint32 elsXmitErr; + uint32 elsXmitFrame; + uint32 elsXmitRetry; + uint32 elsRcvDrop; + uint32 elsRcvFrame; + uint32 elsRcvRSCN; + uint32 elsRcvFARP; + uint32 elsRcvFARPR; + uint32 elsRcvFLOGI; + uint32 elsRcvPLOGI; + uint32 elsRcvADISC; + uint32 elsRcvPDISC; + uint32 elsRcvFAN; + uint32 elsRcvLOGO; + uint32 elsRcvPRLO; + uint32 elsRcvRRQ; + uint32 frameRcvBcast; + uint32 frameRcvMulti; + uint32 hostRingFree; + uint32 iocbCmdInval; + uint32 iocbRingBusy; + uint32 IssueIocb; + uint32 iocbRsp; + uint32 issueMboxCmd; + uint32 linkEvent; + uint32 xmitnoroom; + uint32 NoIssueIocb; + uint32 mapPageErr; + uint32 mboxCmdBusy; + uint32 mboxCmdInval; + uint32 mboxEvent; + uint32 mboxStatErr; + uint32 memAllocErr; + uint32 noRpiList; + uint32 noVirtPtr; + uint32 ringEvent; + uint32 strayXmitCmpl; + uint32 frameXmitDelay; + uint32 xriCmdCmpl; + uint32 xriStatErr; + uint32 mbufcopy; + uint32 LinkUp; + uint32 LinkDown; + uint32 LinkMultiEvent; + uint32 NoRcvBuf; + uint32 fcpCmd; + uint32 fcpCmpl; + uint32 fcpStrayCmpl; + uint32 fcpFirstCheck; + uint32 fcpGood; + uint32 fcpRspErr; + uint32 fcpRemoteStop; + uint32 fcpLocalErr; + uint32 fcpLocalTmo; + uint32 fcpLocalNores; + uint32 fcpLocalBufShort; + uint32 fcpLocalSfw; + uint32 fcpLocalTxDMA; + uint32 fcpLocalRxDMA; + uint32 fcpLocalinternal; + uint32 fcpLocalCorrupt; + uint32 fcpLocalIllFrm; + uint32 fcpLocalDupFrm; + uint32 fcpLocalLnkCtlFrm; + uint32 fcpLocalLoopOpen; + uint32 fcpLocalInvalRpi; + uint32 fcpLocalLinkDown; + uint32 fcpLocalOOO; + uint32 fcpLocalAbtInp; + uint32 fcpLocalAbtReq; + uint32 fcpLocal; + uint32 fcpPortRjt; + uint32 fcpPortBusy; + uint32 fcpError; + uint32 fcpScsiTmo; + uint32 fcpSense; + uint32 fcpNoDevice; + uint32 fcMallocCnt; + uint32 fcMallocByte; + uint32 fcFreeCnt; + uint32 fcFreeByte; + uint32 fcMapCnt; + uint32 fcUnMapCnt; + uint32 fcpRsvd0; + uint32 fcpRsvd1; + uint32 fcpRsvd2; + uint32 fcpRsvd3; + uint32 fcpRsvd4; + uint32 fcpRsvd5; + uint32 fcpRsvd6; + uint32 fcpRsvd7; + uint32 fcpRsvd8; +}; +typedef struct fc_stats fc_stat_t; + + +/* Defines / Structures used to support IP profile */ + +#define FC_MIN_MTU 0 /* minimum size FC message */ +#define FC_MAX_MTU 65280 /* maximum size FC message */ + +/* structure for MAC header */ +typedef struct { + uchar dest_addr[MACADDR_LEN]; /* 48 bit unique address */ + uchar src_addr[MACADDR_LEN]; /* 48 bit unique address */ + ushort llc_len; /* length of LLC data */ +} emac_t; +#define HDR_LEN 14 /* MAC header size */ + +/* structure for LLC/SNAP header */ +typedef struct { + unsigned char dsap; /* DSAP */ + unsigned char ssap; /* SSAP */ + unsigned char ctrl; /* control field */ + unsigned char prot_id[3]; /* protocol id */ + unsigned short type; /* type field */ +} snaphdr_t; + +struct fc_hdr { + emac_t mac; + snaphdr_t llc; +}; + +struct fc_nethdr { + NETHDR fcnet; + snaphdr_t llc; +}; + +#define FC_LLC_SSAP 0xaa /* specifies LLC SNAP header */ +#define FC_LLC_DSAP 0xaa /* specifies LLC SNAP header */ +#define FC_LLC_CTRL 3 /* UI */ + + +/* + * The fc_buf structure is used to communicate SCSI commands to the adapter + */ +typedef struct sc_buf T_SCSIBUF; +#define SET_ADAPTER_STATUS(bp, val) bp->general_card_status = val; + +#define P_DEPTH ((FC_MAX_TRANSFER/PAGESIZE) + 2) + +typedef struct fc_buf { + FCP_CMND fcp_cmd; /* FCP command - This MUST be first */ + FCP_RSP fcp_rsp; /* FCP response - This MUST be next */ + struct fc_buf *fc_fwd; /* forward list pointer */ + struct fc_buf *fc_bkwd; /* backward list pointer */ + char *phys_adr; /* physical address of this fc_buf */ + T_SCSIBUF *sc_bufp; /* pointer to sc_buf for this cmd */ + struct dev_info *dev_ptr; /* pointer to SCSI device structure */ + uint32 timeout; /* Fill in how OS represents a time stamp */ + /* Fill in any OS specific members */ + int offset; + ulong * fc_cmd_dma_handle; + ushort iotag; /* iotag for this cmd */ + ushort flags; /* flags for this cmd */ +#define DATA_MAPPED 0x0001 /* data buffer has been D_MAPed */ +#define FCBUF_ABTS 0x0002 /* ABTS has been sent for this cmd */ +#define FCBUF_ABTS2 0x0004 /* ABTS has been sent twice */ +#define FCBUF_INTERNAL 0x0008 /* Internal generated driver command */ + + /* + * Save the buffer pointer list for later use. + * In SLI2, the fc_deq_fcbuf_active uses this pointer to + * free up MEM_BPL buffer + */ + MATCHMAP *bmp; +} fc_buf_t; + +#define FCP_CONTINUE 0x01 /* flag for issue_fcp_cmd */ +#define FCP_REQUEUE 0x02 /* flag for issue_fcp_cmd */ +#define FCP_EXIT 0x04 /* flag for issue_fcp_cmd */ + +/* + * The fcp_table structure is used to relate FCP iotags to an fc_buf + */ + +typedef struct fcp_table { + fc_buf_t *fcp_array[MAX_FCP_CMDS];/* fc_buf pointers indexed by iotag */ +} FCPTBL; + + +/* + * SCSI node structure for each open Fibre Channel node + */ + +typedef struct scsi_node { + struct fc_dev_ctl * ap; /* adapter structure ptr */ + struct dev_info * lunlist; /* LUN structure list for this node */ + NODELIST * nlp; /* nlp structure ptr */ + struct dev_info * last_dev; /* The last device had an I/O */ + FCCLOCK * nodev_tmr; /* Timer for nodev-tmo */ + int devno; /* pseudo adapter major/minor number */ + int max_lun; /* max number of luns */ + ushort tgt_queue_depth; /* Max throttle of this node */ + ushort num_active_io; /* Total number of active I/O */ + ushort rpi; /* Device rpi */ + ushort last_good_rpi; /* Last known good device rpi */ + ushort scsi_id; /* SCSI ID of this device */ + ushort flags; +#define FC_NODEV_TMO 0x1 /* nodev-tmo tmr started and expired */ +#define FC_FCP2_RECOVERY 0x2 /* set FCP2 Recovery for commands */ +#define RETRY_RPTLUN 0x4 /* Report Lun has been retried */ + ushort addr_mode; /* SCSI address method */ +#define PERIPHERAL_DEVICE_ADDRESSING 0 +#define VOLUME_SET_ADDRESSING 1 +#define LOGICAL_UNIT_ADDRESSING 2 + ushort rptlunstate; /* For report lun SCSI command */ +#define REPORT_LUN_REQUIRED 0 +#define REPORT_LUN_ONGOING 1 +#define REPORT_LUN_COMPLETE 2 + void *virtRptLunData; + void *physRptLunData; +} node_t; + +/* Values for node_flag and fcp_mapping are in fcdds.h */ + +/* + * SCSI device structure for each open LUN + */ + +#define MAX_FCBUF_PAGES 6 /* This value may need to change when + * lun-queue-depth > 256 in lpfc.conf + */ + +typedef struct dev_info { + node_t *nodep; /* Pointer to the node structure */ + struct dev_info *next; /* Used for list of LUNs on this node */ + fc_lun_t lun_id; /* LUN ID of this device */ + uchar first_check; /* flag for first check condition */ +#define FIRST_CHECK_COND 0x1 +#define FIRST_IO 0x2 + + uchar opened; + uchar ioctl_wakeup; /* wakeup sleeping ioctl call */ + int ioctl_event; + int ioctl_errno; + int stop_event; + int active_io_count; + + struct dev_info *DEVICE_WAITING_fwd; + struct dev_info *ABORT_BDR_fwd; + struct dev_info *ABORT_BDR_bkwd; + + long qfullcnt; + /* Fill in any OS specific members */ + T_SCSIBUF *scp; + void *scsi_dev; + long scpcnt; + long qcmdcnt; + long iodonecnt; + long errorcnt; + /* + * A command lives in a pending queue until it is sent to the HBA. + * Throttling constraints apply: + * No more than N commands total to a single target + * No more than M commands total to a single LUN on that target + * + * A command that has left the pending queue and been sent to the HBA + * is an "underway" command. We count underway commands, per-LUN, + * to obey the LUN throttling constraint. + * + * Because we only allocate enough fc_buf_t structures to handle N + * commands, per target, we implicitly obey the target throttling + * constraint by being unable to send a command when we run out of + * free fc_buf_t structures. + * + * We count the number of pending commands to determine whether the + * target has I/O to be issued at all. + * + * We use next_pending to rotor through the LUNs, issuing one I/O at + * a time for each LUN. This mechanism guarantees a fair distribution + * of I/Os across LUNs in the face of a target queue_depth lower than + * #LUNs*fcp_lun_queue_depth. + */ + T_SCSIBUF *standby_queue_head; /* ptr to retry command queue */ + T_SCSIBUF *standby_queue_tail; /* ptr to retry command queue */ + uint32 standby_count; /* # of I/Os on standby queue */ + /* END: added by andy kong for SCSI */ + + ushort fcp_cur_queue_depth; /* Current maximum # cmds outstanding + * to dev; */ + ushort fcp_lun_queue_depth; /* maximum # cmds to each lun */ + T_SCSIBUF *pend_head; /* ptr to pending cmd queue */ + T_SCSIBUF *pend_tail; /* ptr to pending cmd queue */ + uint32 pend_count; +#define QUEUE_HEAD 1 +#define QUEUE_TAIL 0 + struct buf *clear_head; /* ptr to bufs to iodone after clear */ + uint32 clear_count; + + uchar numfcbufs; /* number of free fc_bufs */ + uchar stop_send_io; /* stop sending any io to this dev */ + + +#define ACTIVE 0 +#define STOPPING 1 +#define HALTED 2 +#define RESTART_WHEN_READY 3 +#define ACTIVE_PASSTHRU 4 +#define WAIT_RESUME 8 +#define WAIT_INFO 10 +#define WAIT_ACA 11 +#define WAIT_FLUSH 12 +#define WAIT_HEAD_RESUME 13 + uchar queue_state; /* device general queue state */ + /* ACTIVE, STOPPING, or HALTED */ + +#define SCSI_TQ_HALTED 0x0001 /* The transaction Q is halted */ +#define SCSI_TQ_CLEARING 0x0002 /* The transaction Q is clearing */ +#define SCSI_TQ_CLEAR_ACA 0x0004 /* a CLEAR_ACA is PENDING */ +#define SCSI_LUN_RESET 0x0008 /* sent LUN_RESET not of TARGET_RESET */ +#define SCSI_ABORT_TSET 0x0010 /* BDR requested but not yet sent */ +#define SCSI_TARGET_RESET 0x0020 /* a SCSI BDR is active for device */ +#define CHK_SCSI_ABDR 0x0038 /* value used to check tm flags */ +#define QUEUED_FOR_ABDR 0x0040 /* dev_ptr is on ABORT_BDR queue */ +#define NORPI_RESET_DONE 0x0100 /* BOGUS_RPI Bus Reset attempted */ +#define DONT_LOG_INVALID_RPI 0x0200 /* if flag is set, the I/O issuing */ + /* to an invalid RPI won't be logged */ +#define SCSI_IOCTL_INPROGRESS 0x0400 /* An ioctl is in progress */ +#define SCSI_SEND_INQUIRY_SN 0x1000 /* Serial number inq should be sent */ +#define SCSI_INQUIRY_SN 0x2000 /* Serial number inq has been sent */ +#define SCSI_INQUIRY_P0 0x4000 /* Page 0 inq has been sent */ +#define SCSI_INQUIRY_CMD 0x6000 /* Serial number or Page 0 inq sent */ +#define SCSI_DEV_RESET 0x8000 /* device is in process of resetting */ + ushort flags; /* flags for the drive */ + + struct dio vlist; /* virtual address of fc_bufs */ + struct dio blist; /* physical addresses of fc_bufs */ + fc_buf_t * fcbuf_head; /* head ptr to list of free fc_bufs */ + fc_buf_t * fcbuf_tail; /* tail ptr to list of free fc_bufs */ + + uchar sense[MAX_FCP_SNS]; /* Temporary request sense buffer */ + uchar sense_valid; /* flag to indicate new sense data */ + uchar sizeSN; /* size of InquirySN */ + uint32 sense_length; /* new sense data length */ + +#define MAX_QFULL_RETRIES 255 +#define MAX_QFULL_RETRY_INTERVAL 1000 /* 1000 (ms) */ + short qfull_retries; /* number of retries on a qfull condition */ + short qfull_retry_interval; /* the interval for qfull retry */ + void * qfull_tmo_id; + T_SCSIBUF scbuf; /* sc_buf for task management cmds */ + +} dvi_t; + + +typedef struct node_info_hash { + node_t *node_ptr; /* SCSI device node pointer */ + uint32 node_flag; /* match node on WWPN WWNN or DID */ + union { + NAME_TYPE dev_nodename; /* SCSI node name */ + NAME_TYPE dev_portname; /* SCSI port name */ + uint32 dev_did; /* SCSI did */ + } un; +} nodeh_t; + +/* + * LONGWAIT is used to define a default scsi_timeout value in seconds. + */ +#define LONGWAIT 30 + + +/* +*** Board Information Data Structure +*/ + +struct fc_brd_info { + /* Configuration Parameters */ + int fc_ffnumrings; /* number of FF rings being used */ + NAME_TYPE fc_nodename; /* fc nodename */ + NAME_TYPE fc_portname; /* fc portname */ + uint32 fc_pref_DID; /* preferred D_ID */ + uchar fc_pref_ALPA; /* preferred AL_PA */ + uchar fc_deferip; /* defer IP processing */ + uchar fc_nummask[4]; /* number of masks/rings being used */ + uchar fc_rval[6]; /* rctl for ring assume mask is 0xff */ + uchar fc_tval[6]; /* type for ring assume mask is 0xff */ + uchar ipAddr[16]; /* For RNID support */ + ushort ipVersion; /* For RNID support */ + ushort UDPport; /* For RNID support */ + uint32 fc_edtov; /* E_D_TOV timer value */ + uint32 fc_arbtov; /* ARB_TOV timer value */ + uint32 fc_ratov; /* R_A_TOV timer value */ + uint32 fc_rttov; /* R_T_TOV timer value */ + uint32 fc_altov; /* AL_TOV timer value */ + uint32 fc_crtov; /* C_R_TOV timer value */ + uint32 fc_citov; /* C_I_TOV timer value */ + uint32 fc_myDID; /* fibre channel S_ID */ + uint32 fc_prevDID; /* previous fibre channel S_ID */ + + /* The next three structures get DMA'ed directly into, + * so they must be in the first page of the adapter structure! + */ + volatile SERV_PARM fc_sparam; /* buffer for our service parameters */ + volatile SERV_PARM fc_fabparam; /* fabric service parameters buffer */ + volatile uchar alpa_map[128]; /* AL_PA map from READ_LA */ + + uchar fc_mbox_active; /* flag for mailbox in use */ + uchar fc_process_LA; /* flag to process Link Attention */ + uchar fc_ns_retry; /* retries for fabric nameserver */ + uchar fc_sli; /* configured SLI, 1 or 2 */ + int fc_nlp_cnt; /* cnt outstanding NODELIST requests */ + int fc_open_count; /* count of devices opened */ + int fc_rscn_id_cnt; /* count of RSCNs dids in list */ + uint32 fc_rscn_id_list[FC_MAX_HOLD_RSCN]; + Q fc_plogi; /* ELS PLOGI cmd queue */ + Q fc_mbox; /* mailbox cmd queue */ + Q fc_rscn; /* RSCN cmd queue */ + Q fc_defer_rscn; /* deferred RSCN cmd queue */ + uchar * fc_mbbp; /* buffer pointer for mbox command */ + uchar * fc_p_dev_ctl; /* pointer to driver device ctl */ + + /* Board dependent variables */ + int fc_flag; /* FC flags */ + int fc_brd_no; /* FC board number */ + int fc_ints_disabled; /* DEBUG: interrupts disabled */ + volatile int fc_ffstate; /* Current state of FF init process */ + int fc_interrupts; /* number of fc interrupts */ + int fc_cnt; /* generic counter for board */ + int fc_topology; /* link topology, from LINK INIT */ + int fc_firstopen; /* First open to driver flag */ + int fc_msgidx; /* current index to adapter msg buf */ + uint32 fc_eventTag; /* event tag for link attention */ + ulong fc_fabrictmo; /* timeout for fabric timer */ + uchar fc_multi; /* number of multicast addresses */ + uchar fc_linkspeed; /* Link speed after last READ_LA */ + uchar fc_max_data_rate; /* max_data_rate */ + + void * physaddr[FC_MAX_IP_VECS]; /* used in mbuf_to_iocb for */ + uint32 cntaddr[FC_MAX_IP_VECS]; /* phys mapping */ + + uchar fc_busflag; /* bus access flags */ +#define FC_HOSTPTR 2 /* Default is ring pointers in SLIM */ + + volatile uint32 * fc_mboxaddr; /* virtual offset for mailbox/SLIM */ + volatile uint32 fc_BCregaddr; /* virtual offset for BIU config reg */ + volatile uint32 fc_HAregaddr; /* virtual offset for host attn reg */ + volatile uint32 fc_HCregaddr; /* virtual offset for host ctl reg */ + volatile uint32 fc_FFregaddr; /* virtual offset for FF attn reg */ + volatile uint32 fc_STATregaddr; /* virtual offset for status reg */ + + + MATCHMAP fc_slim2; /* pointers to slim for SLI-2 */ + + void *fc_iomap_io; /* starting address for registers */ + void *fc_iomap_mem; /* starting address for SLIM */ + /* Fill in any OS specific members */ + /* dma handle, mem map, pci config access */ + + + + + + FCCLOCK * fc_mbox_wdt; /* timer for mailbox */ + FCCLOCK * fc_fabric_wdt; /* timer for fabric */ + FCCLOCK * fc_rscn_disc_wdt; /* timer for RSCN discovery */ + fc_stat_t fc_stats; /* fc driver generic statistics */ + + NAME_TYPE fc_multiaddr[FC_MAX_MCAST];/* multicast adrs for interface */ + NODELIST * fc_nlpbind_start; /* ptr to bind list */ + NODELIST * fc_nlpbind_end; /* ptr to bind list */ + NODELIST * fc_nlpunmap_start; /* ptr to unmap list */ + NODELIST * fc_nlpunmap_end; /* ptr to unmap list */ + NODELIST * fc_nlpmap_start; /* ptr to map list */ + NODELIST * fc_nlpmap_end; /* ptr to map list */ + ushort fc_bind_cnt; + ushort fc_unmap_cnt; + ushort fc_map_cnt; + ushort fc_rpi_used; + NODELIST * fc_nlplookup[NLP_MAXRPI]; /* ptr to active D_ID / RPIs */ + NODELIST fc_fcpnodev; /* nodelist entry for no device */ + uint32 nlptimer; /* timestamp for nlplist entry */ + ushort fc_capabilities; /* default value for NODELIST caps */ + ushort fc_sync; /* default value for NODELIST sync */ + + nodeh_t device_queue_hash[MAX_FC_TARGETS]; /* SCSI node pointers */ + FCPTBL * fc_table; /* FCP iotag table pointer */ + IOCBQ * fc_delayxmit; /* List of IOCBs for delayed xmit */ + + + char fc_adaptermsg[FC_MAX_ADPTMSG]; /* adapter printf messages */ + char fc_SerialNumber[32]; /* adapter Serial Number */ + char fc_OptionROMVersion[32]; /* adapter BIOS / Fcode version */ + MEMSEG fc_memseg[FC_MAX_SEG]; /* memory for buffers / structures */ + RING fc_ring[MAX_RINGS]; +}; + +typedef struct fc_brd_info FC_BRD_INFO; + + +/* Host Attn reg */ +#define FC_HA_REG(binfo,sa) ((volatile uint32 *)((volatile char *)sa + (binfo->fc_HAregaddr))) +#define FC_FF_REG(binfo,sa) ((volatile uint32 *)((volatile char *)sa + (binfo->fc_FFregaddr))) + +/* Host Status reg */ +#define FC_STAT_REG(binfo,sa) ((volatile uint32 *)((volatile char *)sa +(binfo->fc_STATregaddr))) + +/* Host Cntl reg */ +#define FC_HC_REG(binfo,sa) ((volatile uint32 *)((volatile char *)sa + (binfo->fc_HCregaddr))) + +/* BIU Configuration reg */ +#define FC_BC_REG(binfo,sa) ((volatile uint32 *)((volatile char *)sa + (binfo->fc_BCregaddr))) + +/* SLIM defines for SLI-1 */ +#define FC_MAILBOX(binfo,sa) ((MAILBOX *)((volatile char *)sa + ((uint32)((ulong)binfo->fc_mboxaddr)))) + +/* SLIM defines for SLI-2 */ +#define FC_SLI2_MAILBOX(binfo) ((MAILBOX *)(binfo->fc_mboxaddr)) + +#define FC_IOCB(binfo,sa) ((volatile uchar *)((volatile char *)sa + ((uint32)binfo->fc_mboxaddr + 0x100))) + +#define FC_RING(ringoff,sa) ((volatile uchar *)((volatile char *)sa + (ulong)(ringoff))) + + + +/* Write 32-bit value to CSR register pointed to by regp */ +#define WRITE_CSR_REG(binfo, regp, val) fc_writel((uint32 *)(regp), (uint32)val) + +/* Write 32-bit value to SLIM address pointed to by regp */ +#define WRITE_SLIM_ADDR(binfo, regp, val) fc_writel((uint32 *)(regp), (uint32)val) + +/* Read 32-bit value from CSR register pointed to by regp */ +#define READ_CSR_REG(binfo, regp) fc_readl((uint32 *)(regp)) + +/* Read 32-bit value from SLIM address pointed to by regp */ +#define READ_SLIM_ADDR(binfo, regp) fc_readl((uint32 *)(regp)) + +/* Write wcnt 32-bit words to SLIM address pointed to by slimp */ +#define WRITE_SLIM_COPY(binfo, bufp, slimp, wcnt) \ + fc_write_toio((uint32*)bufp, (uint32*)slimp, (sizeof(uint32)*(wcnt))) + +/* Read wcnt 32-bit words from SLIM address pointed to by slimp */ +#define READ_SLIM_COPY(binfo, bufp, slimp, wcnt) \ + fc_read_fromio((uint32*)slimp, (uint32*)bufp, (sizeof(uint32)*(wcnt)));\ + +#define WRITE_FLASH_COPY(binfo, bufp, flashp, wcnt) \ + fc_write_toio(bufp, flashp ,(sizeof(uint32)*(wcnt))) + +#define READ_FLASH_COPY(binfo, bufp, flashp, wcnt) \ + fc_read_fromio(flashp, bufp, (sizeof(uint32)*(wcnt))) + + + + + +/* defines for fc_open_count */ +#define FC_LAN_OPEN 0x1 /* LAN open completed */ +#define FC_FCP_OPEN 0x2 /* FCP open completed */ + +/* defines for fc_flag */ +#define FC_FCP_WWNN 0x0 /* Match FCP targets on WWNN */ +#define FC_FCP_WWPN 0x1 /* Match FCP targets on WWPN */ +#define FC_FCP_DID 0x2 /* Match FCP targets on DID */ +#define FC_FCP_MATCH 0x3 /* Mask for match FCP targets */ +#define FC_PENDING_RING0 0x4 /* Defer ring 0 IOCB processing */ +#define FC_LNK_DOWN 0x8 /* Link is down */ +#define FC_PT2PT 0x10 /* pt2pt with no fabric */ +#define FC_PT2PT_PLOGI 0x20 /* pt2pt initiate PLOGI */ +#define FC_DELAY_DISC 0x40 /* Delay discovery till after cfglnk */ +#define FC_PUBLIC_LOOP 0x80 /* Public loop */ +#define FC_INTR_THREAD 0x100 /* In interrupt code */ +#define FC_LBIT 0x200 /* LOGIN bit in loopinit set */ +#define FC_RSCN_MODE 0x400 /* RSCN cmd rcv'ed */ +#define FC_RSCN_DISC_TMR 0x800 /* wait edtov before processing RSCN */ +#define FC_NLP_MORE 0x1000 /* More node to process in node tbl */ +#define FC_OFFLINE_MODE 0x2000 /* Interface is offline for diag */ +#define FC_LD_TIMER 0x4000 /* Linkdown timer has been started */ +#define FC_LD_TIMEOUT 0x8000 /* Linkdown timeout has occurred */ +#define FC_FABRIC 0x10000 /* We are fabric attached */ +#define FC_DELAY_PLOGI 0x20000 /* Delay login till unreglogin */ +#define FC_SLI2 0x40000 /* SLI-2 CONFIG_PORT cmd completed */ +#define FC_INTR_WORK 0x80000 /* Was there work last intr */ +#define FC_NO_ROOM_IP 0x100000 /* No room on IP xmit queue */ +#define FC_NO_RCV_BUF 0x200000 /* No Rcv Buffers posted IP ring */ +#define FC_BUS_RESET 0x400000 /* SCSI BUS RESET */ +#define FC_ESTABLISH_LINK 0x800000 /* Reestablish Link */ +#define FC_SCSI_RLIP 0x1000000 /* SCSI rlip routine called */ +#define FC_DELAY_NSLOGI 0x2000000 /* Delay NameServer till ureglogin */ +#define FC_NSLOGI_TMR 0x4000000 /* NameServer in process of logout */ +#define FC_DELAY_RSCN 0x8000000 /* Delay RSCN till ureg/reg login */ +#define FC_RSCN_DISCOVERY 0x10000000 /* Authenticate all devices after RSCN */ +#define FC_2G_CAPABLE 0x20000000 /* HBA is 2 Gig capable */ +#define FC_POLL_MODE 0x40000000 /* [SYNC] I/O is in the polling mode */ +#define FC_BYPASSED_MODE 0x80000000 /* Interface is offline for diag */ + +/* defines for fc_ffstate */ +#define FC_INIT_START 1 +#define FC_INIT_NVPARAMS 2 +#define FC_INIT_REV 3 +#define FC_INIT_PARTSLIM 4 +#define FC_INIT_CFGRING 5 +#define FC_INIT_INITLINK 6 +#define FC_LINK_DOWN 7 +#define FC_LINK_UP 8 +#define FC_INIT_SPARAM 9 +#define FC_CFG_LINK 10 +#define FC_FLOGI 11 +#define FC_LOOP_DISC 12 +#define FC_NS_REG 13 +#define FC_NS_QRY 14 +#define FC_NODE_DISC 15 +#define FC_REG_LOGIN 16 +#define FC_CLEAR_LA 17 +#define FC_READY 32 +#define FC_ERROR 0xff + +#define NADDR_LEN 6 /* MAC network address length */ + +/* This should correspond with the HBA API event structure */ +struct fc_hba_event { + uint32 fc_eventcode; + uint32 fc_evdata1; + uint32 fc_evdata2; + uint32 fc_evdata3; + uint32 fc_evdata4; +}; + +typedef struct fc_hba_event HBAEVENT; +#define MAX_HBAEVENT 32 + +/***************************************************************************/ +/* + * This is the whole device control area for the adapter + */ +/***************************************************************************/ + +struct fc_dev_ctl { /* NOTE: struct intr must be FIRST */ + struct intr ihs; /* interrupt handler control struct */ + ndd_t ndd; /* ndd for NS ndd chain */ + struct fc_dev_ctl *next; /* point to the next device */ + uchar phys_addr[NADDR_LEN]; /* actual network address in use */ + Simple_lock cmd_slock; /* adapter command lock */ + void * ctl_correlator;/* point to the dd_ctl table */ + uchar device_state; /* main state of the device */ + uchar open_state; /* open state of the device */ + uchar intr_inited; /* flag for interrupt registration */ + uchar fcp_mapping; /* Map FCP devices based on WWNN WWPN or DID */ + ulong fc_ipri; /* save priority */ + int power_up; + uint32 dev_flag; /* device flags */ +#define FC_SCHED_CFG_INIT 2 /* schedule a call to fc_cfg_init() */ +#define FC_FULL_INFO_CALL 4 /* set if fc_info() can return full info */ +#define FC_NEEDS_DPC 0x10 + + uchar * devinfo; /* point to the device info */ + uchar * dip; /* point to device information */ + uchar * tran; /* point to device information */ + FCCLOCK * fc_estabtmo; /* link establishment timer */ + FCCLOCK * fc_waitflogi; /* link establishment timer */ + fc_dds_t dds; /* device dependent structure */ + fc_vpd_t vpd; /* vital product data */ + FC_BRD_INFO info; /* device specific info */ + uchar * mbufl_head; /* mbuf for offlevel intr handler */ + uchar * mbufl_tail; /* mbuf for offlevel intr handler */ + void * fc_evt_head; /* waiting for event queue */ + void * fc_evt_tail; /* waiting for event queue */ + + dvi_t * DEVICE_WAITING_head; + dvi_t * DEVICE_WAITING_tail; + dvi_t * ABORT_BDR_head; + dvi_t * ABORT_BDR_tail; + struct buf * timeout_head; /* bufs to iodone after RLIP done */ + + ushort timeout_count; + ushort init_eventTag; /* initial READ_LA eventtag from cfg */ + ushort hba_event_put; /* hbaevent event put word anchor */ + ushort hba_event_get; /* hbaevent event get word anchor */ + int hba_event_missed;/* hbaevent missed event word anchor */ + uchar pan_cnt; /* pseudo adapter number counter */ + uchar sid_cnt; /* SCSI ID counter */ + uchar adapter_state[NLP_MAXPAN]; + /* open/close state for pseudo adapters */ + + Simple_lock iostrat_lock; /* lock for ioctl IOSTRAT */ + int iostrat_event; /* iostrat event word anchor */ + struct buf * iostrat_head; /* head ptr to list of returned bufs */ + struct buf * iostrat_tail; /* tail ptr to list of returned bufs */ + HBAEVENT hbaevent[MAX_HBAEVENT]; + uint32 vendor_flag; + uint32 dfcmb[MAILBOX_CMD_WSIZE]; + /* Fill in any OS specific members */ + struct Scsi_Host *host; + struct pci_dev *pcidev; + struct buf *iodone_head; + struct buf *iodone_list; + void *dfc_kernel_buf; + void *abort_head; + void *abort_list; + void *rdev_head; + void *rdev_list; + void *rbus_head; + void *rbus_list; + void *rhst_head; + void *rhst_list; + void *qcmd_head; + void *qcmd_list; + void *qclk_head; + void *qclk_list; + uint32 dpc_ha_copy; /* copy of Host Attention Reg for DPC */ + uint32 dpc_hstatus; /* copy of Host Status Reg for DPC */ + uint32 dpc_cnt; + uint32 save_dpc_cnt; + ulong iflg; + ulong siflg; + WAIT_QUEUE linkwq; + WAIT_QUEUE rscnwq; + WAIT_QUEUE ctwq; +}; + +typedef struct fc_dev_ctl fc_dev_ctl_t; + + +/***************************************************************************/ +/* + * This is the global device driver control structure + */ +/***************************************************************************/ + +struct fc_dd_ctl { + FCCLOCK_INFO fc_clock_info; /* clock setup */ + FCCLOCK * fc_scsitmo; /* scsi timeout timer */ + fc_dev_ctl_t * p_dev[MAX_FC_BRDS]; /* device array */ + void * p_config[MAX_FC_BRDS]; + ushort num_devs; /* count of devices configed */ + + spinlock_t smp_lock; /* keep this at end */ +}; + +typedef struct fc_dd_ctl fc_dd_ctl_t; + +/* + * Macros for accessing device control area. The pointer to this area has to + * be named p_dev_ctl for using these macros. + */ + +#define DD_CTL fc_dd_ctl +#define CMD_LOCK p_dev_ctl->cmd_slock +#define IOCTL_SLP_LOCK ioctl_slp_lock +#define CLOCK_LOCK clock_info->clk_slock +#define IOSTRAT_LOCK p_dev_ctl->iostrat_lock +#define SCSI_TMO DD_CTL.fc_scsitmo +#define CLOCKWDT clock_info->clktimer + +#define IHS p_dev_ctl->ihs +#define NDD p_dev_ctl->ndd +#define NDDSTAT p_dev_ctl->ndd.ndd_genstats +#define VPD p_dev_ctl->vpd +#define DDS p_dev_ctl->dds +#define BINFO p_dev_ctl->info +#define RINGTMO rp->fc_wdt +#define MBOXTMO binfo->fc_mbox_wdt +#define FABRICTMO binfo->fc_fabric_wdt +#define FCSTATCTR binfo->fc_stats + +/* + * Lock class registration number for lock instrumentation. + * These numbers should be unique on the system and they should be + * controlled by the lock registration procedure set up for the lock + * instrumentations. + */ +#define FC_CMD_LOCK 47 +#define FC_IOSTRAT_LOCK 48 +#define FC_CFG_LOCK 49 +#define FC_CLOCK_LOCK 50 +#define FC_IOCTL_SLP_LOCK 51 + +#ifndef LITTLE_ENDIAN_HOST +#if defined(i386) +#define LITTLE_ENDIAN_HOST 1 +#endif + +#endif +#if LITTLE_ENDIAN_HOST +#define SWAP_SHORT(x) (x) +#define SWAP_LONG(x) (x) +#define SWAP_DATA(x) ((((x) & 0xFF)<<24) | (((x) & 0xFF00)<<8) | \ + (((x) & 0xFF0000)>>8) | (((x) & 0xFF000000)>>24)) +#define SWAP_DATA16(x) ((((x) & 0xFF) << 8) | ((x) >> 8)) +#define PCIMEM_SHORT(x) SWAP_SHORT(x) +#define PCIMEM_LONG(x) SWAP_LONG(x) +#define PCIMEM_DATA(x) SWAP_DATA(x) + +#else /* BIG_ENDIAN_HOST */ + +#define SWAP_SHORT(x) ((((x) & 0xFF) << 8) | ((x) >> 8)) +#define SWAP_LONG(x) ((((x) & 0xFF)<<24) | (((x) & 0xFF00)<<8) | \ + (((x) & 0xFF0000)>>8) | (((x) & 0xFF000000)>>24)) +#define SWAP_DATA(x) (x) +#define SWAP_DATA16(x) (x) + +#ifdef BIU_BSE /* This feature only makes sense for Big Endian */ +#define PCIMEM_SHORT(x) (x) +#define PCIMEM_LONG(x) (x) +#define PCIMEM_DATA(x) ((((x) & 0xFF)<<24) | (((x) & 0xFF00)<<8) | \ + (((x) & 0xFF0000)>>8) | (((x) & 0xFF000000)>>24)) +#else +#define PCIMEM_SHORT(x) SWAP_SHORT(x) +#define PCIMEM_LONG(x) SWAP_LONG(x) +#define PCIMEM_DATA(x) SWAP_DATA(x) +#endif +#endif + +#define SWAP_ALWAYS(x) ((((x) & 0xFF)<<24) | (((x) & 0xFF00)<<8) | \ + (((x) & 0xFF0000)>>8) | (((x) & 0xFF000000)>>24)) +#define SWAP_ALWAYS16(x) ((((x) & 0xFF) << 8) | ((x) >> 8)) + +/* + * For PCI configuration + */ +#define ADDR_LO(addr) ((int)(addr) & 0xffff) /* low 16 bits */ +#define ADDR_HI(addr) (((int)(addr) >> 16) & 0xffff) /* high 16 bits */ + +#endif /* _H_FC */ diff -purN -X /home/mbligh/.diff.exclude reference/drivers/scsi/lpfc/fcLINUXfcp.c current/drivers/scsi/lpfc/fcLINUXfcp.c --- reference/drivers/scsi/lpfc/fcLINUXfcp.c 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/scsi/lpfc/fcLINUXfcp.c 2004-04-09 11:53:04.000000000 -0700 @@ -0,0 +1,6948 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Enterprise Fibre Channel Host Bus Adapters. * + * Refer to the README file included with this package for * + * driver version and adapter support. * + * Copyright (C) 2003 Emulex Corporation. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of the GNU General Public License * + * as published by the Free Software Foundation; either version 2 * + * of the License, or (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details, a copy of which * + * can be found in the file COPYING included with this package. * + *******************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,4) +#include +#else +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "fc_os.h" +#include "fc_hw.h" +#include "fc.h" +#include "dfc.h" +#include "fcdiag.h" +#include "fcmsg.h" +#include "fc_crtn.h" +#include "fc_ertn.h" + + +#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) +#include +#include +#else +#include +#endif +#include +#include +#include +#include + +#ifdef powerpc +#include + +#ifdef NO_TCE +#define INVALID_PHYS NO_TCE +#else +#define INVALID_PHYS 0 +#endif + +#else +#define INVALID_PHYS 0 +#endif + +#define is_invalid_phys(addr) ((addr) == (void *)((ulong)INVALID_PHYS)) + +static long IOcnt = 0; +static long lpfcdiag_cnt = 0; + +#define LPFC_DRIVER_VERSION "1.23a-2.6-3" +_static_ char *lpfc_release_version = LPFC_DRIVER_VERSION; + +/* Declare memory for global structure that is used to access + * per adapter specific info.c + */ +_static_ fc_dd_ctl_t DD_CTL; +_static_ spinlock_t lpfc_smp_lock; +_static_ struct watchdog lpfc_clktimer; +_static_ int lpfc_initTimer = 0; +_static_ int lpfc_one_cpu = 1; /* Just bind DPC to CPU 0 */ +_static_ int lpfc_use_hostptr = 0; + +_static_ spinlock_t lpfc_q_lock[MAX_FC_BRDS]; +_static_ spinlock_t lpfc_mempool_lock[MAX_FC_BRDS]; + +struct lpfc_dpc { + struct task_struct *dpc_handler; /* kernel thread */ + struct semaphore *dpc_wait; /* DPC waits on this semaphore */ + struct semaphore *dpc_notify; /* requester waits for DPC on sem */ + int dpc_active; /* DPC routine is active */ + int dpc_ticks; /* DPC routine current tick count */ + struct semaphore dpc_sem; +} lpfc_dpc[MAX_FC_BRDS]; + +_static_ int lpfc_dpc_timer = 0; + +_forward_ void lpfc_timer(void *p); +_forward_ int do_fc_timer(fc_dev_ctl_t *p_dev_ctl); +_forward_ void lpfc_do_dpc(void *p); +_forward_ int fc_dpc_lstchk(fc_dev_ctl_t *p_dev_ctl, struct scsi_cmnd *Cmnd); + +/* Binding Definitions: Max string size */ +#define FC_MAX_DID_STRING 6 +#define FC_MAX_WW_NN_PN_STRING 16 + +int lpfcMallocCnt = 0; +int lpfcMallocByte = 0; +int lpfcFreeCnt = 0; +int lpfcFreeByte = 0; + +/* This defines memory for the common configuration parameters */ +#define DEF_ICFG 1 +#include "fcfgparm.h" + +#define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGTERM)) + +#ifdef MODULE + +#ifndef EXPORT_SYMTAB +#define EXPORT_SYMTAB +#endif + +#include + +MODULE_PARM(lpfc_vendor, "i"); +MODULE_PARM(lpfc_bind_entries, "i"); +MODULE_PARM(lpfc_fcp_bind_WWPN, "1-" __MODULE_STRING(MAX_FC_BINDINGS) "s"); +MODULE_PARM(lpfc_fcp_bind_WWNN, "1-" __MODULE_STRING(MAX_FC_BINDINGS) "s"); +MODULE_PARM(lpfc_fcp_bind_DID, "1-" __MODULE_STRING(MAX_FC_BINDINGS) "s"); + +MODULE_PARM(lpfc_lun0_missing, "i"); +MODULE_PARM(lpfc_lun_skip, "i"); +MODULE_PARM(lpfc_use_removable, "i"); +MODULE_PARM(lpfc_max_lun, "i"); +MODULE_PARM(lpfc_use_data_direction, "i"); + + +#ifndef FC_NEW_EH +int lpfc_reset(struct scsi_cmnd *, unsigned int); +int fc_proc_info( char *, char **, off_t, int, int, int); +#endif +int fc_abort(struct scsi_cmnd *); +int fc_reset_device(struct scsi_cmnd *); +int fc_reset_bus(struct scsi_cmnd *); +int fc_reset_host(struct scsi_cmnd *); +int fc_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *)); +void fc_queue_done_cmd(fc_dev_ctl_t * , struct buf *); +void fc_flush_done_cmds(fc_dev_ctl_t *, ulong); +void lpfc_nodev(unsigned long); +void local_timeout(unsigned long data); +irqreturn_t do_fc_intr_handler(int , void *, struct pt_regs *); +int do_fc_intr(struct intr *); +void * lpfc_kmalloc( unsigned int, unsigned int, void **, fc_dev_ctl_t *); +void lpfc_kfree( unsigned int, void *, void *, fc_dev_ctl_t *); + +EXPORT_SYMBOL(fc_abort); +EXPORT_SYMBOL(fc_reset_device); +EXPORT_SYMBOL(fc_reset_bus); +EXPORT_SYMBOL(fc_reset_host); +EXPORT_SYMBOL(local_timeout); +EXPORT_SYMBOL(do_fc_intr_handler); +EXPORT_SYMBOL(fc_queuecommand); +EXPORT_SYMBOL(fc_queue_done_cmd); +EXPORT_SYMBOL(fc_flush_done_cmds); +EXPORT_SYMBOL(do_fc_intr); +#else /* MODULE */ +#ifndef FC_NEW_EH +int fc_reset_device(struct scsi_cmnd *); +int fc_reset_bus(struct scsi_cmnd *); +int fc_reset_host(struct scsi_cmnd *); +#endif +void local_timeout(unsigned long data); +irqreturn_t do_fc_intr_handler(int , void *, struct pt_regs *); +int do_fc_intr(struct intr *); +void * lpfc_kmalloc( unsigned int, unsigned int, void **, fc_dev_ctl_t *); +void lpfc_kfree( unsigned int, void *, void *, fc_dev_ctl_t *); +extern int lpfn_probe(void); +static int lpfc_detect_called = 0; +#endif /* MODULE */ +int do_fc_abort(fc_dev_ctl_t *); +int do_fc_reset_device(fc_dev_ctl_t *); +int do_fc_reset_bus(fc_dev_ctl_t *); +int do_fc_reset_host(fc_dev_ctl_t *); +int do_fc_queuecommand(fc_dev_ctl_t *, ulong); +void fc_select_queue_depth(struct Scsi_Host *, struct scsi_device *); +int fc_device_queue_depth(fc_dev_ctl_t *, struct scsi_device *); +int fc_DetectInstance(int, struct pci_dev *pdev, uint, struct scsi_host_template *); + +#include "lpfc.conf.defs" + +#if LINUX_VERSION_CODE < LinuxVersionCode(2,4,0) +#ifdef MODULE +struct scsi_host_template driver_template = EMULEXFC; +#include "scsi_module.c" +#endif +#else /* new kernel scsi initialization scheme */ +static struct scsi_host_template driver_template = EMULEXFC; +#include "scsi_module.c" +#endif + +#ifndef __GENKSYMS__ +#include "fcmsgcom.c" +extern char fwrevision[32]; + +_local_ int lpfcdfc_init(void); +_local_ int fc_rtalloc(fc_dev_ctl_t *, struct dev_info *); +_local_ int fc_bind_wwpn(fc_dev_ctl_t *, char **, u_int ); +_local_ int fc_bind_wwnn(fc_dev_ctl_t *, char **, u_int ); +_local_ int fc_bind_did(fc_dev_ctl_t *, char **, u_int ); +_local_ dvi_t *fc_getDVI(fc_dev_ctl_t *, int, fc_lun_t); +_local_ ulong fc_po2(ulong); +_local_ int linux_attach(int, struct scsi_host_template *, struct pci_dev *); +_local_ int lpfc_find_cmd( fc_dev_ctl_t *p_dev_ctl, struct scsi_cmnd *cmnd); +_local_ void deviFree(fc_dev_ctl_t *, dvi_t *, node_t *); +_local_ int linux_detach(int ); +_local_ void *fc_kmem_zalloc(unsigned int ); + +extern int dfc_ioctl( struct dfccmdinfo *infop, struct cmd_input *cip); + +int lpfcdiag_ioctl(struct inode * inode, struct file * file, + unsigned int cmd, unsigned long arg); +int lpfcdiag_open(struct inode * inode, struct file * file); +int lpfcdiag_release(struct inode * inode, struct file * file); +int fc_ioctl(int , void *); + +static struct file_operations lpfc_fops = { + ioctl: lpfcdiag_ioctl, + open: lpfcdiag_open, + release: lpfcdiag_release, +}; + +static int lpfc_major = 0; + +/* If we want to define a new entry for Emulex boards*/ +/* #define PROC_SCSI_EMULEXFC PROC_SCSI_FILE+1 */ +/* For now we use the FC entry */ +#define NAMEEMULEX "lpfc" +#if LINUX_VERSION_CODE < LinuxVersionCode(2,4,0) +static struct proc_dir_entry proc_scsi_emulex = { + PROC_SCSI_FCAL , 4, "lpfc", + S_IFDIR | S_IRUGO | S_IXUGO, 2 +}; +#endif + +struct dfc { + uint32 dfc_init; + uint32 filler; + uchar bufout[sizeof(FC_BRD_INFO)]; + struct dfc_info dfc_info[MAX_FC_BRDS]; +}; +extern struct dfc dfc; + +/*Extra configuration parameters as defined in lpfc.conf.c*/ +extern int lpfc_vendor; +extern int lpfc_bind_entries; +extern char *lpfc_fcp_bind_WWPN[]; +extern char *lpfc_fcp_bind_WWNN[]; +extern char *lpfc_fcp_bind_DID[]; +extern int lpfc_lun0_missing; +extern int lpfc_lun_skip; +extern int lpfc_use_removable; +extern int lpfc_max_lun; +extern int lpfc_use_data_direction; + +/*Other configuration parameters, not available to user*/ +static int lpfc_pci_latency_clocks =0; +static int lpfc_pci_cache_line =0; + +/*Other configuration parameters, not available to user*/ +static int lpfc_mtu = 4032; /* define IP max MTU size */ +static int lpfc_intr_ack = 1; +static int lpfc_first_check = 1; +static int lpfc_zone_rscn = 1; +static int lpfc_qfull_retry = 5; + +int lpfc_nethdr = 1; +int lpfc_driver_unloading = 0; + +/* The size of a physical memory page */ +uint32 fcPAGESIZE = 4096; /*PAGE_SIZE;*/ + +/* Can be used to map driver instance number and hardware adapter number */ +int fcinstance[MAX_FC_BRDS]; +int fcinstcnt = 0; + +/* Current driver state for diagnostic mode, online / offline, see fcdiag.h */ +uint32 fc_diag_state; +uint32 fc_dbg_flag = 0; +#define FC_MAX_SEGSZ 4096 + +#define FC_MAX_POOL 1024 +struct fc_mem_pool { + void *p_virt; + void *p_phys; + ushort p_refcnt; + ushort p_left; +}; +struct fc_mem_pool *fc_mem_dmapool[MAX_FC_BRDS]; +int fc_idx_dmapool[MAX_FC_BRDS]; +int fc_size_dmapool[MAX_FC_BRDS]; + +#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) + +#define ZERO_PAN 0 + +_static_ unsigned int lpfc_page_mask; + +/* Used in generating timeouts for timers */ +_static_ uint32 fc_scsi_abort_timeout_ticks; +_static_ uint32 fc_ticks_per_second; + +/* Can be used to map driver instance number and hardware adapter number */ +extern int fcinstance[]; +extern int fcinstcnt; + +/* Current driver state for diagnostic mode, online / offline, see fcdiag.h */ +extern uint32 fc_diag_state; + +extern int fc_check_for_vpd; +extern int fc_reset_on_attach; +extern int fc_max_ns_retry; +extern int fc_fdmi_on; +extern int fc_max_els_sent; + + + +void lpfc_scsi_add_timer(struct scsi_cmnd *, int); +int lpfc_scsi_delete_timer(struct scsi_cmnd *); + +#ifdef powerpc +#if LINUX_VERSION_CODE > LinuxVersionCode(2,4,14) +#define NO_BCOPY 1 +#endif +#endif + +#ifndef FC_NEW_EH +/****************************************************************************** +* Function name : fc_proc_info +* +* Description : +* +******************************************************************************/ +int fc_proc_info(char *buffer, + char **start, + off_t offset, + int length, + int hostno, + int inout) +{ + return(0); +} +#endif + +#if LINUX_VERSION_CODE < LinuxVersionCode(2,4,0) +/****************************************************************************** +* Function name : fc_pci_alloc_consistent +* +* Description : +* +******************************************************************************/ +void * fc_pci_alloc_consistent(struct pci_dev *hwdev, + size_t size, + dma_addr_t *dma_handle) +{ + void *virt_ptr; + u_long a_size; + int order; + + if ((size % PAGE_SIZE) == 0) { + for (order = 0, a_size = PAGE_SIZE; + a_size < size; order++, a_size <<= 1); + virt_ptr = (void *) __get_free_pages(GFP_ATOMIC, order); + } + else{ + a_size = fc_po2(size); + if(a_size == 256) + a_size = 512; + virt_ptr = kmalloc(a_size, GFP_KERNEL); + } + *dma_handle = virt_to_bus(virt_ptr); + return virt_ptr; +} + +/****************************************************************************** +* Function name : fc_pci_free_consistent +* +* Description : +* +******************************************************************************/ +void fc_pci_free_consistent(struct pci_dev *hwdev, + size_t size, + void *virt_ptr, + dma_addr_t dma_handle) +{ + u_long a_size; + int order; + + if(!virt_ptr) + return; + + /* + * Check which method was used to allocate the memory + */ + if ((size % PAGE_SIZE) == 0) { + for (order = 0, a_size = PAGE_SIZE; + a_size < size; order++, a_size <<= 1) + ; + free_pages((unsigned long)virt_ptr, order); + } + else{ + kfree(virt_ptr); + } +} +#else +/****************************************************************************** +* Function name : fc_pci_dma_sync_single +* +* Description : +* +******************************************************************************/ +void fc_pci_dma_sync_single(struct pci_dev *hwdev, + dma_addr_t h, + size_t size, + int c) +{ + pci_dma_sync_single(hwdev, h, 4096, c); +} +#endif + +#if defined (MODULE) || defined (NO_BCOPY) +/****************************************************************************** +* Function name : bcopy +* +* Description : kernel-space to kernel-space copy +* +******************************************************************************/ +_static_ void bcopy(void *src, + void *dest, + size_t n) +{ + memcpy(dest, src, n); +} +#else +/****************************************************************************** +* Function name : bcopy +* +* Description : kernel-space to kernel-space copy +* +******************************************************************************/ +_static_ void bcopy(void *src, void *dest, size_t n); + +#endif /* MODULE or NO_BCOPY */ + +/****************************************************************************** +* Function name : lpfc_DELAYMS +* +* Description : Called to delay cnt ms +* +******************************************************************************/ +_static_ int lpfc_DELAYMS(fc_dev_ctl_t *new_dev_ctl, + int cnt) +{ + int i; + fc_dev_ctl_t *p_dev_ctl; + FC_BRD_INFO *binfo; + struct lpfc_dpc *ldp; + + for (i = 0; i < MAX_FC_BRDS; i++) { + if((p_dev_ctl = (fc_dev_ctl_t *)DD_CTL.p_dev[i])) { + if(new_dev_ctl == p_dev_ctl) + continue; + binfo = &BINFO; + ldp = &lpfc_dpc[binfo->fc_brd_no]; + if ((ldp->dpc_active == 0) && ldp->dpc_wait) + up(ldp->dpc_wait); + } + } + if(new_dev_ctl->info.fc_ffstate != FC_INIT_START) { + barrier(); + schedule(); + } + mdelay(cnt); + for (i = 0; i < MAX_FC_BRDS; i++) { + if((p_dev_ctl = (fc_dev_ctl_t *)DD_CTL.p_dev[i])) { + if(new_dev_ctl == p_dev_ctl) + continue; + binfo = &BINFO; + ldp = &lpfc_dpc[binfo->fc_brd_no]; + if ((ldp->dpc_active == 0) && ldp->dpc_wait) + up(ldp->dpc_wait); + } + } + if(new_dev_ctl->info.fc_ffstate != FC_INIT_START) { + barrier(); + schedule(); + } + return(0); +} + +/****************************************************************************** +* Function name : kmem_alloc +* +* Description : Kernel memory alloc and free +* +******************************************************************************/ +_static_ void *fc_kmem_alloc(unsigned int size) +{ + void *ptr; + lpfcMallocCnt++; + lpfcMallocByte += size; + ptr = lpfc_kmalloc(size, GFP_ATOMIC, 0, 0); + return ptr; + +} + +/****************************************************************************** +* Function name : fc_kmem_free +* +* Description : +* +******************************************************************************/ +_static_ void fc_kmem_free(void *obj, + unsigned int size) +{ + lpfcFreeCnt++; + lpfcFreeByte += size; + if(obj) + lpfc_kfree(size, obj, (void *)((ulong)INVALID_PHYS), 0); +} + +/****************************************************************************** +* Function name : fc_kmem_zalloc +* +* Description : allocate memory and initialize to zeros +* +******************************************************************************/ +_static_ void *fc_kmem_zalloc(unsigned int size) +{ + void *ptr = fc_kmem_alloc(size); + if(ptr) + fc_bzero(ptr,size); + return ptr; +} + +/****************************************************************************** +* Function name : dfc_disable_lock +* +* Description : +* +******************************************************************************/ +_static_ ulong dfc_disable_lock(ulong p1, + Simple_lock *p2) + +{ + ulong iflg; + + iflg = 0; + spin_lock_irqsave(&lpfc_smp_lock, iflg); + return(iflg); +} + +/****************************************************************************** +* Function name : dfc_unlock_enable +* +* Description : +* +******************************************************************************/ +_static_ void dfc_unlock_enable(ulong p1, + Simple_lock *p2) +{ + ulong iflg; + + iflg = p1; + spin_unlock_irqrestore(&lpfc_smp_lock, iflg); + return; +} + +_static_ ulong lpfc_q_disable_lock(fc_dev_ctl_t *p_dev_ctl) +{ + ulong iflg; + + iflg = 0; + spin_lock_irqsave(&lpfc_q_lock[p_dev_ctl->info.fc_brd_no], iflg); + return(iflg); +} + + +_static_ void lpfc_q_unlock_enable(fc_dev_ctl_t *p_dev_ctl, ulong p1) +{ + ulong iflg; + + iflg = p1; + spin_unlock_irqrestore(&lpfc_q_lock[p_dev_ctl->info.fc_brd_no], iflg); + return; +} + +_static_ ulong lpfc_mempool_disable_lock(fc_dev_ctl_t *p_dev_ctl) +{ + ulong iflg; + + iflg = 0; + spin_lock_irqsave(&lpfc_mempool_lock[p_dev_ctl->info.fc_brd_no], iflg); + return(iflg); +} + + +_static_ void lpfc_mempool_unlock_enable(fc_dev_ctl_t *p_dev_ctl, ulong p1) +{ + ulong iflg; + + iflg = p1; + spin_unlock_irqrestore(&lpfc_mempool_lock[p_dev_ctl->info.fc_brd_no], iflg); + return; +} + +/****************************************************************************** +* Function name : fc_flush_done_cmds +* +* Description : flush all done commands at once +* +******************************************************************************/ +void fc_flush_done_cmds(fc_dev_ctl_t *p_dev_ctl, + ulong siflg) +{ + int count, first_inq; + struct scsi_cmnd *cmd; + struct buf * head; + FC_BRD_INFO *binfo; + struct dev_info *devp; + struct sc_buf *sp; + uint32 *iptr; + ulong iflg; + + iflg = 0; + LPFC_LOCK_DRIVER(1); + + head = p_dev_ctl->iodone_head; + binfo = &BINFO; + count = 0; + while(head) { + count++; + cmd = head->cmnd; + devp = ((struct sc_buf *)head)->current_devp; + head=head->av_forw; + + if(devp) + devp->iodonecnt++; + else + panic("NULL devp in flush_done\n"); + + if(cmd && (cmd->scsi_done != NULL)) { + sp = (struct sc_buf *)cmd->host_scribble; + if (!sp) { + /* NULL sp in flush_done */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0708, /* ptr to msg structure */ + fc_mes0708, /* ptr to msg */ + fc_msgBlk0708.msgPreambleStr, /* begin varargs */ + cmd->cmnd[0], + cmd->serial_number, + cmd->retries, + cmd->result); /* end varargs */ + continue; + } + + FCSTATCTR.fcpRsvd1++; + + if(devp->scp) { + sp->bufstruct.av_forw = devp->scp; + devp->scp = sp; + } + else { + devp->scp = sp; + devp->scp->bufstruct.av_forw = 0; + } + devp->scpcnt++; + cmd->host_scribble = 0; + + first_inq = 0; + if(devp->first_check & FIRST_IO) { + uchar *buf; + if(cmd->cmnd[0] == FCP_SCSI_INQUIRY) { + buf = (uchar *)cmd->request_buffer; + if((cmd->result) || + ((*buf & 0x70) != 0)) { /* lun not there */ +#ifdef FREE_LUN + deviFree(p_dev_ctl, devp, devp->nodep); +#else + devp->first_check &= ~FIRST_IO; +#endif + } else { + devp->first_check &= ~FIRST_IO; + } + first_inq = 1; + } + } + + + LPFC_UNLOCK_DRIVER; + iptr = (uint32 *)&cmd->sense_buffer[0]; + if((cmd->result) || *iptr) { + devp->errorcnt++; + /* iodone error return */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0710, /* ptr to msg structure */ + fc_mes0710, /* ptr to msg */ + fc_msgBlk0710.msgPreambleStr, /* begin varargs */ + (uint32)((cmd->device->id << 16) | cmd->device->lun), + (uint32)((cmd->retries << 16 ) | cmd->cmnd[0]), + cmd->result, + *iptr); /* end varargs */ + } + + lpfc_scsi_add_timer(cmd, cmd->timeout_per_command); + cmd->scsi_done(cmd); + iflg = 0; + LPFC_LOCK_DRIVER(2); + } + else + panic("Cmnd in done queue without scsi_done\n"); + } + p_dev_ctl->iodone_head = 0; + p_dev_ctl->iodone_list = 0; + LPFC_UNLOCK_DRIVER; + return; +} + +/****************************************************************************** +* Function name : fc_queue_done_cmd +* +* Description : add done command to a queue to be flushed later +* +******************************************************************************/ +void fc_queue_done_cmd(fc_dev_ctl_t *p_dev_ctl, + struct buf *sb) +{ + struct sc_buf *sp; + + if(p_dev_ctl->iodone_head == NULL) { + p_dev_ctl->iodone_head = sb; + p_dev_ctl->iodone_list = sb; + } else { + p_dev_ctl->iodone_list->av_forw = sb; + p_dev_ctl->iodone_list = sb; + } + sb->av_forw = NULL; + + sp = (struct sc_buf *)sb; + if (sp->cmd_flag & FLAG_ABORT) + sp->cmd_flag &= ~FLAG_ABORT; +} + +/****************************************************************************** +* Function name : remap_pci_mem +* +* Description : remap pci memory, makes sure mapped memory is page-aligned +* +******************************************************************************/ +_local_ void * remap_pci_mem(u_long base, + u_long size) +{ +#ifdef powerpc + return (ioremap (base, size)); +#else + u_long page_base = ((u_long) base)& PAGE_MASK; + u_long page_offs = ((u_long) base) - page_base; + u_long page_remapped = (u_long) ioremap(page_base, page_offs+size); + return (void *) (page_remapped? (page_remapped + page_offs) : ((ulong)-1)); +#endif +} + +/****************************************************************************** +* Function name : unmap_pci_mem +* +* Description : unmap pci memory +* +******************************************************************************/ +_local_ void unmap_pci_mem(u_long vaddr) +{ + if (vaddr) { + } +} + +#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) +/****************************************************************************** +* Function name : pci_getadd +* +* Description : get address from a pci register, accounts for 64 bit addresses +* returns next register +* +******************************************************************************/ +_local_ int pci_getadd(struct pci_dev *pdev, + int reg, + u_long *base) + +{ + *base = pci_resource_start(pdev, reg); + reg++; + return ++reg; +} +#else +/****************************************************************************** +* Function name : pci_getadd +* +* Description : get address from a pci register, accounts for 64 bit addresses +* returns next register +* +******************************************************************************/ +_local_ int pci_getadd(struct pci_dev *pdev, + int reg, + u_long *base) +{ + *base = pdev->base_address[reg++]; + if ((*base & 0x7) == 0x4) { +#if BITS_PER_LONG > 32 + *base |= (((u_long)pdev->base_address[reg]) << 32); +#endif + ++reg; + } + return reg; +} +#endif + +/****************************************************************************** +* Function name : fc_DetectInstance +* +* Description : +* +******************************************************************************/ +int fc_DetectInstance( int instance, + struct pci_dev *pdev, + uint type, + struct scsi_host_template *tmpt) +{ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) + /* PCI_SUBSYSTEM_IDS supported */ + while ((pdev = pci_find_subsys(PCI_VENDOR_ID_EMULEX, type, + PCI_ANY_ID, PCI_ANY_ID, pdev) )) + { + if (pci_enable_device(pdev)) continue; +#else + while ((pdev = pci_find_device(PCI_VENDOR_ID_EMULEX, type, + pdev))) + { +#endif + if(linux_attach(instance, tmpt, pdev) ) + continue; + instance++; + } + + return(instance); +} + +/****************************************************************************** +* Function name : fc_detect +* +* Description : Mid-level driver entry function for detecting the boards +* Also provides some initialization +* +******************************************************************************/ +int fc_detect(struct scsi_host_template *tmpt) +{ +#define WAIT_4_FC_READY 200 /* Thats 200 * 25 ms = 5 sec */ +#define MSEC_25_DELAY 25 +#define PRE_FC_READY_DELAY 40 +#define POST_FC_READY_DELAY 60 + + int instance = 0; + struct pci_dev *pdev = NULL; + fc_dev_ctl_t *p_dev_ctl; + FC_BRD_INFO *binfo; + int i, j, cnt; + /* To add another, add 1 to number of elements, add a line + * sType[x] = id, leave last sType[x+1] = 0; + */ + uint sType [8]; + + sType[0] = PCI_DEVICE_ID_THOR; + sType[1] = PCI_DEVICE_ID_SUPERFLY; + sType[2] = PCI_DEVICE_ID_PEGASUS; + sType[3] = PCI_DEVICE_ID_PFLY; + sType[4] = PCI_DEVICE_ID_CENTAUR; + sType[5] = PCI_DEVICE_ID_DRAGONFLY; + sType[6] = PCI_DEVICE_ID_TFLY; + /* sType[x] = PCI_DEVICE_ID_XXX; */ + sType[7] = 0; + + /* + * Intialization + */ + lpfc_page_mask = ((unsigned int) ~(fcPAGESIZE - 1)); + fc_ticks_per_second = HZ; + fc_scsi_abort_timeout_ticks = 300 * HZ /*CLOCK_TICK_RATE*/ ; + fc_bzero(&DD_CTL, sizeof(fc_dd_ctl_t)); + for (i = 0; i < MAX_FC_BRDS; i++) { + DD_CTL.p_dev[i] = NULL; + DD_CTL.p_config[i] = NULL; + fcinstance[i] = -1; + } + DD_CTL.num_devs = 0; + + fc_check_for_vpd = 1; /* issue dump mbox command during HBA initialization + * to check VPD data (if any) for a Serial Number */ + fc_reset_on_attach = 0; /* Always reset HBA before initialization in attach */ + fc_fdmi_on = 0; /* Enable FDMI */ + fc_max_ns_retry = 3; /* max number of retries for NameServer CT requests + * during discovery. */ + + fc_max_els_sent = 1; + if(fc_max_els_sent > NLP_MAXREQ) + fc_max_els_sent = NLP_MAXREQ; + +#if LINUX_VERSION_CODE < LinuxVersionCode(2,4,0) + tmpt->proc_dir = &proc_scsi_emulex; +#else + tmpt->proc_name = NAMEEMULEX; +#endif + + printk("Emulex LightPulse FC SCSI/IP %s\n", lpfc_release_version); + /* + * the mid-level clears interrupts + * no need to re-intialize pdev + */ + i = 0; + while(sType[i]) + { + instance = fc_DetectInstance(instance, pdev, sType[i], tmpt); + i++; + } + + if(instance) { + lpfcdfc_init(); /* Initialize diagnostic interface */ + } + + p_dev_ctl = (fc_dev_ctl_t *)NULL; /* Prevent compiler warning */ + if( (PRE_FC_READY_DELAY > 0) && + (instance > 0) && + (p_dev_ctl = (fc_dev_ctl_t *)DD_CTL.p_dev[0])) { + binfo = &BINFO; + for( i=0; ifc_ffstate >= FC_LINK_UP) && (binfo->fc_ffstate != FC_READY)) { + cnt++; + } + } + } + if(cnt) { + /* HBA(s) not FC_READY yet */ + lpfc_DELAYMS( p_dev_ctl, MSEC_25_DELAY); /* 25 millisec */ + continue; + } + break; + } + + /* There are cases where the HBAs are FC_READY but not all FC nodes + * have completed their FC PLOGI/PRLI sequence due to collisions. The + * following delay loop provides a chance for these noded to complete + * their FC PLOGI/PRLI sequence prior to allowing the SCSI layer to + * start up upon the return from this routine. + */ + + if( (POST_FC_READY_DELAY > 0) && + (instance > 0) && + (p_dev_ctl = (fc_dev_ctl_t *)DD_CTL.p_dev[0])) { + binfo = &BINFO; + for( i=0; ipcidev; + if(!pdev) + return(INTR_FAIL); + if (request_irq(pdev->irq, do_fc_intr_handler, SA_INTERRUPT | SA_SHIRQ, + "lpfcdd", (void *)ihs)) + return(INTR_FAIL); + return(INTR_SUCC); +} + +/****************************************************************************** +* Function name : i_clear +* +* Description : Called from fc_detach to remove interrupt vector for adapter +* +******************************************************************************/ +_static_ int i_clear(struct intr *ihs) +{ + struct pci_dev *pdev; + fc_dev_ctl_t *p_dev_ctl; + + p_dev_ctl = (fc_dev_ctl_t * )ihs; /* Since struct intr is at beginning */ + + /* + * Get PCI for this board + */ + pdev = p_dev_ctl->pcidev; + if(!pdev) + return(1); + free_irq(pdev->irq, p_dev_ctl); + p_dev_ctl->intr_inited=0; + return(0); +} + +/****************************************************************************** +* Function name : linux_attach +* +* Description : LINUX initialization entry point, called from environment +* to attach to / initialize a specific adapter. +* +******************************************************************************/ +_local_ int linux_attach(int instance, + struct scsi_host_template *tmpt, + struct pci_dev *pdev) +{ + struct Scsi_Host *host; + fc_dev_ctl_t *p_dev_ctl=NULL; + FC_BRD_INFO *binfo; + FCCLOCK_INFO *clock_info; + iCfgParam *clp=NULL; + int initTimer = 0; + ulong iflg; + + /* + * must have a valid pci_dev + */ + if(!pdev) + return (1); + + /* Allocate memory to manage HBA dma pool */ + fc_mem_dmapool[instance] = kmalloc((sizeof(struct fc_mem_pool) * FC_MAX_POOL), + GFP_ATOMIC); + if(fc_mem_dmapool[instance] == 0) + return(1); + + fc_bzero((void *)fc_mem_dmapool[instance], + (sizeof(struct fc_mem_pool) * FC_MAX_POOL)); + fc_idx_dmapool[instance] = 0; + fc_size_dmapool[instance] = FC_MAX_POOL; + + /* + * Allocate space for adapter info structure + */ + if (!(p_dev_ctl = (fc_dev_ctl_t *)fc_kmem_zalloc(sizeof(fc_dev_ctl_t)))) { + return (1); + } + /* + * Allocate space for configuration parameters + */ + if (!(clp = (iCfgParam *)fc_kmem_zalloc(sizeof(icfgparam)))) { + goto fail1; + } + + p_dev_ctl->pcidev = pdev; + p_dev_ctl->sid_cnt = 0; /* Start scsid assignment at 1 */ + binfo = &BINFO; + binfo->fc_brd_no = instance; + spin_lock_init(&lpfc_q_lock[instance]); + spin_lock_init(&lpfc_mempool_lock[instance]); + + if(lpfc_use_hostptr) + binfo->fc_busflag = FC_HOSTPTR; +#ifdef powerpc + binfo->fc_busflag = FC_HOSTPTR; +#endif + + binfo->fc_p_dev_ctl = (uchar * )p_dev_ctl; + DD_CTL.p_dev[instance] = p_dev_ctl; + DD_CTL.p_config[instance] = clp; + fcinstance[instance] = instance; + + /* + * Initialize config parameters + */ + bcopy((void * )&icfgparam, (void *)clp, sizeof(icfgparam)); + + /* + * Initialize locks, and timeout functions + */ + clock_info = &DD_CTL.fc_clock_info; + CLOCKWDT = (void *)&lpfc_clktimer; +#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) + init_waitqueue_head(&p_dev_ctl->linkwq); + init_waitqueue_head(&p_dev_ctl->rscnwq); + init_waitqueue_head(&p_dev_ctl->ctwq); +#endif + + + initTimer = 0; + if(lpfc_initTimer == 0) { + LPFC_INIT_LOCK_DRIVER; /* Just one global lock for driver */ + fc_clock_init(); + ((struct watchdog *)(CLOCKWDT))->func = fc_timer; + ((struct watchdog *)(CLOCKWDT))->restart = 1; + ((struct watchdog *)(CLOCKWDT))->count = 0; + ((struct watchdog *)(CLOCKWDT))->stopping = 0; + ((struct watchdog *)(CLOCKWDT))->timeout_id = 1; + /* + * add our watchdog timer routine to kernel's list + */ + ((struct watchdog *)(CLOCKWDT))->timer.expires = HZ + jiffies; + ((struct watchdog *)(CLOCKWDT))->timer.function = local_timeout; + ((struct watchdog *)(CLOCKWDT))->timer.data = (unsigned long)(CLOCKWDT); + init_timer(&((struct watchdog *)(CLOCKWDT))->timer); + add_timer(&((struct watchdog *)(CLOCKWDT))->timer); + lpfc_initTimer = 1; + initTimer = 1; + } + + { + struct lpfc_dpc *ldp; +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) + struct semaphore sem = MUTEX_LOCKED; +#else + DECLARE_MUTEX_LOCKED(sem); +#endif + + ldp = &lpfc_dpc[instance]; + ldp->dpc_notify = &sem; + kernel_thread((int (*)(void *))lpfc_do_dpc, (void *) p_dev_ctl, 0); + /* + * Now wait for the kernel dpc thread to initialize and go to sleep. + */ + down(&sem); + ldp->dpc_notify = NULL; + } + + p_dev_ctl->intr_inited = 0; + fcinstcnt++; + if (fc_attach(instance, (uint32 * )((ulong)instance))) { + /* + * lower level routine will log error + */ + fcinstcnt--; + goto fail; + } + + /* + * Register this board + */ + host = scsi_register(tmpt, sizeof(unsigned long)); + + /* + * Adjust the number of id's + * Although max_id is an int, target id's are unsined chars + * Do not exceed 255, otherwise the device scan will wrap around + */ + host->max_id = MAX_FCP_TARGET; + if(!lpfc_max_lun) { + host->max_lun = MAX_FCP_LUN+1; + lpfc_max_lun = MAX_FCP_LUN+1; + } + else { + host->max_lun = lpfc_max_lun; + } + host->unique_id = instance; + + /* Adapter ID */ + host->this_id = MAX_FCP_TARGET - 1; + + /* + * Starting with 2.4.0 kernel, Linux can support commands longer + * than 12 bytes. However, scsi_register() always sets it to 12. + * For it to be useful to the midlayer, we have to set it here. + */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) + host->max_cmd_len = 16; +#endif + + /* + * Queue depths per lun + */ + host->cmd_per_lun = 1; + + /* + * Save a pointer to device control in host and increment board + */ + host->hostdata[0] = (unsigned long)p_dev_ctl; + p_dev_ctl->host = host; + DD_CTL.num_devs++; + + iflg = 0; + LPFC_LOCK_DRIVER(23); + /* + * Need to start scsi timeout if FCP is turned on + * The SCSI timeout watch dog is for all adaptors, so do it once only + */ + + if((SCSI_TMO == 0) && clp[CFG_FCP_ON].a_current) { + SCSI_TMO = fc_clk_set(0, 5, fc_scsi_timeout, 0, 0); + } + + /* DQFULL */ + if ((clp[CFG_DQFULL_THROTTLE].a_current) && + (clp[CFG_DFT_LUN_Q_DEPTH].a_current > FC_MIN_QFULL)) { + if ((clp[CFG_DQFULL_THROTTLE_UP_TIME].a_current) && + (clp[CFG_DQFULL_THROTTLE_UP_INC].a_current)) { + fc_clk_set(p_dev_ctl, clp[CFG_DQFULL_THROTTLE_UP_TIME].a_current, + fc_q_depth_up, 0, 0); + } + } + LPFC_UNLOCK_DRIVER; + return(0); + +fail: + if(initTimer) { + if(SCSI_TMO) { + fc_clk_can(0, SCSI_TMO); + SCSI_TMO = 0; + } + clock_info = &DD_CTL.fc_clock_info; + ((struct watchdog *)(CLOCKWDT))->stopping = 1; + if (((struct watchdog *)(CLOCKWDT))->timer.function) + del_timer(&((struct watchdog *)(CLOCKWDT))->timer); + ((struct watchdog *)(CLOCKWDT))->timer.function=NULL; + ((struct watchdog *)(CLOCKWDT))->timeout_id=0; + } + + { + struct lpfc_dpc *ldp; + ldp = &lpfc_dpc[instance]; + if(ldp->dpc_handler != NULL ) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) + struct semaphore sem = MUTEX_LOCKED; +#else + DECLARE_MUTEX_LOCKED(sem); +#endif + + ldp->dpc_notify = &sem; + send_sig(SIGKILL, ldp->dpc_handler, 1); + down(&sem); + ldp->dpc_notify = NULL; + } + } + /* + * Free up any allocated resources + */ + fc_kmem_free(clp, sizeof(icfgparam)); + fail1: + /* + * Just in case the interrupt is still on + */ + if(p_dev_ctl->intr_inited) + i_clear((struct intr *)p_dev_ctl); + fc_kmem_free(p_dev_ctl, sizeof(fc_dev_ctl_t)); + + return(1); +} + +/****************************************************************************** +* Function name : fc_device_queue_depth +* +* Description : Determines the queue depth for a given device. +* There are two ways +* a queue depth can be obtained for a tagged queueing device. +* One way is the default queue depth which is determined by +* whether if it is defined, then it is used as the default +* queue depth. +* Otherwise, we use either 4 or 8 as the default queue depth +* (dependent on the number of hardware SCBs). +******************************************************************************/ +int fc_device_queue_depth(fc_dev_ctl_t *p_dev_ctl, + struct scsi_device *device) +{ + iCfgParam * clp; + FC_BRD_INFO *binfo; + + binfo = &p_dev_ctl->info; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + if( device->tagged_supported ) { +#ifdef NEEDS_CHECKING + /* + * XXX double check that we can remove this. + */ + device->tagged_queue = 1; +#endif + device->current_tag = 0; + device->queue_depth = clp[CFG_DFT_LUN_Q_DEPTH].a_current; + } else { + device->queue_depth = 16; + } + return(device->queue_depth); +} + +/****************************************************************************** +* Function name : lpfc_do_dpc +* +* Description : +* +******************************************************************************/ +void lpfc_do_dpc(void *p) +{ + fc_dev_ctl_t * p_dev_ctl=(fc_dev_ctl_t*)p; + FC_BRD_INFO * binfo; + FCCLOCK_INFO * clock_info; + iCfgParam * clp; + struct lpfc_dpc * ldp; + void * ioa; + unsigned long secs; + int instance, ev; + ulong iflg; + ulong siflg; +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) + struct fs_struct *fs; + struct semaphore sem = MUTEX_LOCKED; +#else + DECLARE_MUTEX_LOCKED(sem); +#endif + + lock_kernel(); + secs = 0; + + /* + * If we were started as result of loading a module, close all of the + * user space pages. We don't need them, and if we didn't close them + * they would be locked into memory. + */ + exit_mm(current); + + binfo = &BINFO; + clock_info = &DD_CTL.fc_clock_info; + instance = binfo->fc_brd_no ; + + daemonize("lpfc_do_dpc_%d", instance); + + clp = DD_CTL.p_config[instance]; + ldp = &lpfc_dpc[instance]; + + /* Since this is a kernel process, lets be nice to it! */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) +#ifdef DEF_NICE + current->nice = -20; + current->processor = smp_processor_id(); +#endif /* DEF_NICE */ + + +#else + { + int niceval; + uint32 priority; + + niceval = -20; + priority = niceval; + if (niceval < 0) + priority = -niceval; + if (priority > 20) + priority = 20; + priority = (priority * DEF_PRIORITY + 10) / 20 + DEF_PRIORITY; + + if (niceval >= 0) { + priority = 2*DEF_PRIORITY - priority; + if (!priority) + priority = 1; + } + current->priority = priority; + } + current->session = 1; + current->pgrp = 1; +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) + siginitsetinv(¤t->blocked, SHUTDOWN_SIGS); +#else + siginitsetinv(¤t->blocked, sigmask(SIGKILL)); +#endif + + ldp->dpc_wait = &sem; + ldp->dpc_handler = current; + + unlock_kernel(); + + /* + * Wake up the thread that created us. + */ + if( ldp->dpc_notify != NULL ) + up(ldp->dpc_notify); + ev = 0; + + while( 1 ) { + /* + * If we get a signal, it means we are supposed to go + * away and die. This typically happens if the user is + * trying to unload a module. + */ + if(ev == 0) { + ldp->dpc_ticks = clock_info->ticks; + + if(clp[CFG_NETWORK_ON].a_current) { + } + + /* Only wait if we go thru KP once with no work */ + down_interruptible(&sem); + if( signal_pending(current) ) { + + iflg = 0; + flush_signals(current); + + /* Only allow our driver unload to kill the KP */ + if( ldp->dpc_notify != NULL ) + break; /* get out */ + } + ldp->dpc_ticks = clock_info->ticks; + if(clp[CFG_NETWORK_ON].a_current) { + } + + } + ev = 0; + + siflg = 0; + iflg = 0; + LPFC_LOCK_DRIVER(22); + ldp->dpc_active = 1; + + p_dev_ctl->dpc_cnt++; + p_dev_ctl->dev_flag &= ~FC_NEEDS_DPC; + + /* Handle timer interrupts */ + if(p_dev_ctl->qclk_head) { + ev++; + do_fc_timer(p_dev_ctl); + } + + /* Handle adapter interrupts */ + if(p_dev_ctl->dpc_ha_copy) { + ev++; + do_fc_intr((struct intr *)p_dev_ctl); + } + + if(p_dev_ctl->qcmd_head) { + ev++; + if(clp[CFG_CR_DELAY].a_current != 0) { + ioa = FC_MAP_MEM(&binfo->fc_iomap_mem); /* map in io registers */ + if ((uchar)READ_SLIM_ADDR(binfo, ((volatile uint32 *)ioa + (SLIMOFF+(FC_ELS_RING*2)+1))) != + ((SLI2_SLIM * )binfo->fc_slim2.virt)->mbx.us.s2.port[FC_ELS_RING].rspPutInx) { + handle_ring_event(p_dev_ctl, FC_ELS_RING, HA_R0CE_RSP); + } + if ((uchar)READ_SLIM_ADDR(binfo, ((volatile uint32 *)ioa + (SLIMOFF+(FC_FCP_RING*2)+1))) != + ((SLI2_SLIM * )binfo->fc_slim2.virt)->mbx.us.s2.port[FC_FCP_RING].rspPutInx) { + handle_ring_event(p_dev_ctl, FC_FCP_RING, HA_R2CE_RSP); + } + FC_UNMAP_MEMIO(ioa); + } + do_fc_queuecommand(p_dev_ctl, siflg); + } + + /* Handle SCSI layer aborts */ + if(p_dev_ctl->abort_head) { + ev++; + do_fc_abort(p_dev_ctl); + } + + /* Handle SCSI layer device resets */ + if(p_dev_ctl->rdev_head) { + ev++; + do_fc_reset_device(p_dev_ctl); + } + + /* Handle SCSI layer bus resets */ + if(p_dev_ctl->rbus_head) { + ev++; + do_fc_reset_bus(p_dev_ctl); + } + + /* Handle SCSI layer host resets */ + if(p_dev_ctl->rhst_head) { + ev++; + do_fc_reset_host(p_dev_ctl); + } + + /* Handle iodone processing */ + if(p_dev_ctl->iodone_head) { + int count, first_inq; + struct scsi_cmnd *cmd; + struct buf * head; + struct dev_info *devp; + struct sc_buf *sp; + uint32 *iptr; + + ev++; + ldp->dpc_active = 0; + + head = p_dev_ctl->iodone_head; + count = 0; + while(head) { + count++; + cmd = head->cmnd; + devp = ((struct sc_buf *)head)->current_devp; + head=head->av_forw; + + if(devp) + devp->iodonecnt++; + else + panic("NULL devp in flush_done\n"); + + if(cmd && (cmd->scsi_done != NULL)) { + sp = (struct sc_buf *)cmd->host_scribble; + if (!sp) { + /* NULL sp in DPC flush_done */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0709, /* ptr to msg structure */ + fc_mes0709, /* ptr to msg */ + fc_msgBlk0709.msgPreambleStr, /* begin varargs */ + cmd->cmnd[0], + cmd->serial_number, + cmd->retries, + cmd->result); /* end varargs */ + continue; + } + + FCSTATCTR.fcpRsvd1++; + + if(devp->scp) { + sp->bufstruct.av_forw = devp->scp; + devp->scp = sp; + } + else { + devp->scp = sp; + devp->scp->bufstruct.av_forw = 0; + } + devp->scpcnt++; + cmd->host_scribble = 0; + + iptr = (uint32 *)&cmd->sense_buffer[0]; + if((cmd->result) || *iptr) { + devp->errorcnt++; + /* iodone error return */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0711, /* ptr to msg structure */ + fc_mes0711, /* ptr to msg */ + fc_msgBlk0711.msgPreambleStr, /* begin varargs */ + (uint32)((cmd->device->id << 16) | cmd->device->lun), + (uint32)((cmd->retries << 16 ) | cmd->cmnd[0]), + cmd->result, + *iptr); /* end varargs */ + } + + first_inq = 0; + if(devp->first_check & FIRST_IO) { + uchar *buf; + if(cmd->cmnd[0] == FCP_SCSI_INQUIRY) { + buf = (uchar *)cmd->request_buffer; + if((cmd->result) || + ((*buf & 0x70) != 0)) { /* lun not there */ +#ifdef FREE_LUN + deviFree(p_dev_ctl, devp, devp->nodep); +#else + devp->first_check &= ~FIRST_IO; +#endif + } else { + devp->first_check &= ~FIRST_IO; + } + first_inq = 1; + } + } + + LPFC_UNLOCK_DRIVER; + lpfc_scsi_add_timer(cmd, cmd->timeout_per_command); + cmd->scsi_done(cmd); + iflg = 0; + LPFC_LOCK_DRIVER(2); + } + else + panic("Cmnd in done queue without scsi_done\n"); + } + p_dev_ctl->iodone_head = 0; + p_dev_ctl->iodone_list = 0; + LPFC_UNLOCK_DRIVER; + } + else { + ldp->dpc_active = 0; + LPFC_UNLOCK_DRIVER; + } + + if(p_dev_ctl->dev_flag & FC_SCHED_CFG_INIT) { + p_dev_ctl->dev_flag &= ~FC_SCHED_CFG_INIT; + fc_cfg_init(p_dev_ctl); + + LPFC_LOCK_DRIVER(27); + if(p_dev_ctl->fc_estabtmo) { + fc_clk_can(p_dev_ctl, p_dev_ctl->fc_estabtmo); + } + if (binfo->fc_ffstate != FC_READY) { + p_dev_ctl->fc_estabtmo = + fc_clk_set(p_dev_ctl, 60, fc_establish_link_tmo, 0, 0); + } + LPFC_UNLOCK_DRIVER; + } + } + + /* + * Make sure that nobody tries to wake us up again. + */ + ldp->dpc_wait = NULL; + ldp->dpc_handler = NULL; + ldp->dpc_active = 0; + + /* + * If anyone is waiting for us to exit (i.e. someone trying to unload + * a driver), then wake up that process to let them know we are on + * the way out the door. This may be overkill - I *think* that we + * could probably just unload the driver and send the signal, and when + * the error handling thread wakes up that it would just exit without + * needing to touch any memory associated with the driver itself. + */ + if( ldp->dpc_notify != NULL ) + up(ldp->dpc_notify); +} + +/****************************************************************************** +* Function name : fc_release +* +* Description : +* +******************************************************************************/ +int fc_release(struct Scsi_Host *host) +{ + fc_dev_ctl_t *p_dev_ctl; + FC_BRD_INFO *binfo; + node_t *node_ptr; + struct dev_info *dev_ptr; + struct lpfc_dpc *ldp; + int instance; + int dev_index,target; + fc_lun_t lun; + ulong iflg; + + /* + * Indicate driver unloading so our interrupt handler can stop + * accepting interrupts. + */ + lpfc_driver_unloading = 1; + + /* + * get dev control from host + */ + p_dev_ctl = (fc_dev_ctl_t *)host->hostdata[0]; + binfo = &BINFO; + instance = binfo->fc_brd_no ; + + if(lpfcdiag_cnt) { + /* Cannot unload driver while lpfcdiag Interface is active */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk1200, /* ptr to msg structure */ + fc_mes1200, /* ptr to msg */ + fc_msgBlk1200.msgPreambleStr, /* begin varargs */ + lpfcdiag_cnt, + (uint32)instance); /* end varargs */ + } + + iflg = 0; + LPFC_LOCK_DRIVER(24); + linux_detach(instance); + /* + *Clear all devi's + *Although host_queue has all devices, its not a good idea to touch it! + *instead we will loop on all possible targets and luns + */ + for(target=0; target < host->max_id; target++) { + dev_index = INDEX(ZERO_PAN, target); + node_ptr = binfo->device_queue_hash[dev_index].node_ptr; + if(!node_ptr) + continue; + for(lun=0; lun <= host->max_lun; lun++){ + dev_ptr = fc_find_lun(binfo, dev_index, lun); + if(!dev_ptr) + continue; + /* + * Free this device + */ + deviFree(p_dev_ctl, dev_ptr, node_ptr); + } + fc_kmem_free(node_ptr, sizeof(node_t)); + binfo->device_queue_hash[dev_index].node_ptr = 0; + } + + fcinstcnt--; + DD_CTL.num_devs--; + LPFC_UNLOCK_DRIVER; + + if(lpfc_major) + unregister_chrdev(lpfc_major, "lpfcdfc"); + + ldp = &lpfc_dpc[instance]; + if(ldp->dpc_handler != NULL ) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) + struct semaphore sem = MUTEX_LOCKED; +#else + DECLARE_MUTEX_LOCKED(sem); +#endif + + ldp->dpc_notify = &sem; + send_sig(SIGKILL, ldp->dpc_handler, 1); + down(&sem); + ldp->dpc_notify = NULL; + } + scsi_unregister(host); + + return 0; +} + +/****************************************************************************** +* Function name : linux_detach +* +* Description : LINUX deinitialization entry point, called from environment +* to detach from / free resources for a specific adapter. +******************************************************************************/ +_local_ int linux_detach( int instance) +{ + FC_BRD_INFO * binfo; + iCfgParam * clp; + fc_dev_ctl_t * p_dev_ctl = (fc_dev_ctl_t * ) NULL; + + p_dev_ctl = DD_CTL.p_dev[instance]; + if (p_dev_ctl == NULL) { + return(0); + } + binfo = &BINFO; + clp = DD_CTL.p_config[instance]; + if (clp == NULL) { + return(0); + } + + /* + * Stop and free resources associated with scsi timeout timer + */ + if(DD_CTL.num_devs == 1) { + FCCLOCK_INFO * clock_info; + + if(SCSI_TMO) { + fc_clk_can(0, SCSI_TMO); + SCSI_TMO = 0; + } + clock_info = &DD_CTL.fc_clock_info; + ((struct watchdog *)(CLOCKWDT))->stopping = 1; + if (((struct watchdog *)(CLOCKWDT))->timer.function) + del_timer(&((struct watchdog *)(CLOCKWDT))->timer); + ((struct watchdog *)(CLOCKWDT))->timer.function=NULL; + ((struct watchdog *)(CLOCKWDT))->timeout_id=0; + } + fc_detach(instance); + + fc_kmem_free(DD_CTL.p_dev[instance], sizeof(fc_dev_ctl_t)); + DD_CTL.p_dev[instance] = 0; + fc_kmem_free(DD_CTL.p_config[instance], sizeof(icfgparam)); + DD_CTL.p_config[instance] = 0; + + kfree(fc_mem_dmapool[instance]); + return(0); +} + +/****************************************************************************** +* Function name : fc_abort +* +* Description : Linux mid-level command abort entry +* Note we are using the new error handling routines +******************************************************************************/ +int fc_abort(struct scsi_cmnd *Cmnd) +{ + struct Scsi_Host *host; + fc_dev_ctl_t *p_dev_ctl; + FC_BRD_INFO * binfo; + ulong iflg; + struct lpfc_dpc *ldp; + + + host = Cmnd->device->host; + if(!host) { +#ifdef FC_NEW_EH + return FAILED ; +#else + return SCSI_ABORT_NOT_RUNNING ; +#endif + } + p_dev_ctl = (fc_dev_ctl_t *)host->hostdata[0]; + if(!p_dev_ctl) { +#if FC_NEW_EH + return FAILED ; +#else + return SCSI_ABORT_NOT_RUNNING ; +#endif + } + binfo = &BINFO; + + iflg = 0; + LPFC_LOCK_DRIVER(5); + ldp = &lpfc_dpc[binfo->fc_brd_no]; + if (ldp->dpc_wait == NULL) { + LPFC_UNLOCK_DRIVER; +#if FC_NEW_EH + return SUCCESS; +#else + return SCSI_ABORT_SUCCESS ; +#endif + } + + fc_dpc_lstchk(p_dev_ctl, Cmnd); + if(p_dev_ctl->abort_head == NULL) { + p_dev_ctl->abort_head = (void *)Cmnd; + p_dev_ctl->abort_list = (void *)Cmnd; + } else { + SCMD_NEXT((struct scsi_cmnd *)(p_dev_ctl->abort_list)) = Cmnd; + p_dev_ctl->abort_list = (void *)Cmnd; + } + SCMD_NEXT(Cmnd) = NULL; + + + if (ldp->dpc_active == 0) { + LPFC_UNLOCK_DRIVER; + up(ldp->dpc_wait); + } + else { + LPFC_UNLOCK_DRIVER; + } + +#if FC_NEW_EH + return SUCCESS; +#else + return SCSI_ABORT_SUCCESS ; +#endif +} + +/****************************************************************************** +* Function name : do_fc_abort +* +* Description : +* +******************************************************************************/ +int do_fc_abort(fc_dev_ctl_t *p_dev_ctl) +{ + struct scsi_cmnd * Cmnd; + struct scsi_cmnd * oCmnd; + FC_BRD_INFO * binfo; + dvi_t * dev_ptr; + struct sc_buf * sp; + int dev_index,target; + fc_lun_t lun; + + binfo = &BINFO; + Cmnd = (struct scsi_cmnd *)p_dev_ctl->abort_head; + while(Cmnd) { + target = (int)Cmnd->device->id; + lun = (fc_lun_t)Cmnd->device->lun; + dev_index = INDEX(ZERO_PAN, target); + + dev_ptr = fc_find_lun(binfo, dev_index, lun); + /* SCSI layer issued abort device */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0712, /* ptr to msg structure */ + fc_mes0712, /* ptr to msg */ + fc_msgBlk0712.msgPreambleStr, /* begin varargs */ + target, + (uint32)lun, + Cmnd->cmnd[0], + Cmnd->serial_number); /* end varargs */ + if(!dev_ptr || !(dev_ptr->nodep)) { + goto done; + } + + if (dev_ptr->flags & CHK_SCSI_ABDR) { + goto done; + } + + sp = (struct sc_buf *)Cmnd->host_scribble; + if (lpfc_find_cmd(p_dev_ctl, Cmnd)) { + FCSTATCTR.fcpRsvd2++; + } else { + if (fc_abort_clk_blk(p_dev_ctl, lpfc_scsi_selto_timeout, sp, 0)) { + FCSTATCTR.fcpRsvd2++; + } + } +done: + oCmnd = Cmnd; + Cmnd = SCMD_NEXT(Cmnd); + SCMD_NEXT(oCmnd) = 0; + } + p_dev_ctl->abort_head = 0; + p_dev_ctl->abort_list = 0; + + return(0); +} + +#ifndef FC_NEW_EH +/****************************************************************************** +* Function name : lpfc_reset +* +* Description : +* +******************************************************************************/ +int lpfc_reset(struct scsi_cmnd *Cmnd, + unsigned int flags) +{ + int action; + + if( flags & SCSI_RESET_SUGGEST_HOST_RESET ) { + if((action = fc_reset_host(Cmnd)) == FAILED) + return(SCSI_RESET_ERROR); + action = SCSI_RESET_SUCCESS | SCSI_RESET_HOST_RESET; + } + else if( flags & SCSI_RESET_SUGGEST_BUS_RESET ) { + if((action = fc_reset_bus(Cmnd)) == FAILED) + return(SCSI_RESET_ERROR); + action = SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET; + } + else { + if((action = fc_reset_device(Cmnd)) == FAILED) + return(SCSI_RESET_ERROR); + action = SCSI_RESET_SUCCESS; + } + return(action); +} +#endif + +/****************************************************************************** +* Function name : fc_reset_device +* +* Description : Linux mid-level reset device entry +* Note we are using the new error handling routines +* In the old handlers there is only one reset entry which has +* two arguments +******************************************************************************/ +int fc_reset_device(struct scsi_cmnd *Cmnd) +{ + struct Scsi_Host *host; + fc_dev_ctl_t *p_dev_ctl; + FC_BRD_INFO *binfo; + ulong iflg; + struct lpfc_dpc *ldp; + + host = Cmnd->device->host; + if(!host) { + return FAILED ; + } + p_dev_ctl = (fc_dev_ctl_t *)host->hostdata[0]; + if(!p_dev_ctl) { + return FAILED; + } + binfo = &BINFO; + + iflg = 0; + LPFC_LOCK_DRIVER(6); + ldp = &lpfc_dpc[binfo->fc_brd_no]; + if (ldp->dpc_wait == NULL) { + LPFC_UNLOCK_DRIVER; + return SUCCESS; + } + + fc_dpc_lstchk(p_dev_ctl, Cmnd); + if(p_dev_ctl->rdev_head == NULL) { + p_dev_ctl->rdev_head = (void *)Cmnd; + p_dev_ctl->rdev_list = (void *)Cmnd; + } else { + SCMD_NEXT((struct scsi_cmnd *)(p_dev_ctl->rdev_list)) = Cmnd; + p_dev_ctl->rdev_list = (void *)Cmnd; + } + SCMD_NEXT(Cmnd) = NULL; + + if (ldp->dpc_active == 0) { + LPFC_UNLOCK_DRIVER; + up(ldp->dpc_wait); + } + else { + LPFC_UNLOCK_DRIVER; + } + + return SUCCESS; +} + +/****************************************************************************** +* Function name : do_fc_reset_device +* +* Description : +* +******************************************************************************/ +int do_fc_reset_device(fc_dev_ctl_t *p_dev_ctl) +{ + struct scsi_cmnd * Cmnd; + struct scsi_cmnd * oCmnd; + struct dev_info * dev_ptr; + FC_BRD_INFO * binfo; + int dev_index, target, j; + fc_lun_t lun; + + binfo = &BINFO; + Cmnd = (struct scsi_cmnd *)p_dev_ctl->rdev_head; + while(Cmnd) { + target = (int)Cmnd->device->id; + lun = (fc_lun_t)Cmnd->device->lun; + dev_index = INDEX(ZERO_PAN, target); + + dev_ptr = fc_find_lun(binfo, dev_index, lun); + j = 0; + /* SCSI layer issued target reset */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0713, /* ptr to msg structure */ + fc_mes0713, /* ptr to msg */ + fc_msgBlk0713.msgPreambleStr, /* begin varargs */ + target, + (uint32)lun, + dev_index); /* end varargs */ + if(dev_ptr == 0) { + goto done; + } + if ((binfo->fc_ffstate != FC_READY) || + (!(dev_ptr->nodep)) || + (dev_ptr->nodep->rpi == 0xfffe)) { + goto done; + } + fc_fcp_abort(p_dev_ctl, TARGET_RESET, dev_index, -1); + + +done: + oCmnd = Cmnd; + Cmnd = SCMD_NEXT(Cmnd); + SCMD_NEXT(oCmnd) = 0; + } + p_dev_ctl->rdev_head = 0; + p_dev_ctl->rdev_list = 0; + + return(0); +} + +/****************************************************************************** +* Function name : fc_reset_bus +* +* Description : Linux mid-level reset host/bus entry +* +******************************************************************************/ +int fc_reset_bus(struct scsi_cmnd *Cmnd) +{ + struct Scsi_Host *host; + fc_dev_ctl_t *p_dev_ctl; + FC_BRD_INFO *binfo; + ulong iflg; + struct lpfc_dpc *ldp; + + host = Cmnd->device->host; + if(!host) { + return FAILED; + } + p_dev_ctl = (fc_dev_ctl_t *)host->hostdata[0]; + if(!p_dev_ctl) { + return FAILED; + } + binfo = &p_dev_ctl->info; + + iflg = 0; + LPFC_LOCK_DRIVER(8); + ldp = &lpfc_dpc[binfo->fc_brd_no]; + if (ldp->dpc_wait == NULL) { + LPFC_UNLOCK_DRIVER; + return SUCCESS; + } + + fc_dpc_lstchk(p_dev_ctl, Cmnd); + if(p_dev_ctl->rbus_head == NULL) { + p_dev_ctl->rbus_head = (void *)Cmnd; + p_dev_ctl->rbus_list = (void *)Cmnd; + } else { + SCMD_NEXT((struct scsi_cmnd *)(p_dev_ctl->rbus_list)) = Cmnd; + p_dev_ctl->rbus_list = (void *)Cmnd; + } + SCMD_NEXT(Cmnd) = NULL; + + if (ldp->dpc_active == 0) { + LPFC_UNLOCK_DRIVER; + up(ldp->dpc_wait); + } + else { + LPFC_UNLOCK_DRIVER; + } + + return SUCCESS; +} + +/****************************************************************************** +* Function name : do_fc_reset_bus +* +* Description : +* +******************************************************************************/ +int do_fc_reset_bus(fc_dev_ctl_t *p_dev_ctl) +{ + struct scsi_cmnd * Cmnd; + struct scsi_cmnd * oCmnd; + FC_BRD_INFO *binfo; + node_t * node_ptr; + struct dev_info * dev_ptr; + NODELIST * nlp; + NODELIST * new_nlp; + iCfgParam *clp; + int rets = FAILED; + + binfo = &p_dev_ctl->info; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + Cmnd = (struct scsi_cmnd *)p_dev_ctl->rbus_head; + while(Cmnd) { + /* SCSI layer issued Bus Reset */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0714, /* ptr to msg structure */ + fc_mes0714, /* ptr to msg */ + fc_msgBlk0714.msgPreambleStr, /* begin varargs */ + Cmnd->device->id, + (uint32)Cmnd->device->lun); /* end varargs */ + /* + * Tell them + */ + if (binfo->fc_ffstate == FC_READY) { + rets = SUCCESS; + fc_fcp_abort(p_dev_ctl, TARGET_RESET, -1, -1); + } + else { + /* + * Check to see if we should wait for FC_READY + */ + if ((binfo->fc_ffstate < FC_LINK_DOWN) || + (binfo->fc_ffstate == FC_ERROR)) { + rets = FAILED; + } + else { + rets = SUCCESS; + } + } + + /* Reset first_check */ + nlp = binfo->fc_nlpmap_start; + while(nlp != (NODELIST *)&binfo->fc_nlpmap_start) { + new_nlp = (NODELIST *)nlp->nlp_listp_next; + if (nlp->nlp_type & NLP_FCP_TARGET) { + if(clp[CFG_FIRST_CHECK].a_current) { + /* If we are an FCP node, update first_check flag for all LUNs */ + if ((node_ptr = (node_t * )nlp->nlp_targetp) != NULL) { + for (dev_ptr = node_ptr->lunlist; dev_ptr != NULL; + dev_ptr = dev_ptr->next) { + dev_ptr->first_check = FIRST_CHECK_COND; + } + } + } + } + nlp = new_nlp; + } + oCmnd = Cmnd; + Cmnd = SCMD_NEXT(Cmnd); + SCMD_NEXT(oCmnd) = 0; + } + p_dev_ctl->rbus_head = 0; + p_dev_ctl->rbus_list = 0; + + return rets; +} + +/****************************************************************************** +* Function name : fc_reset_host +* +* Description : +* +******************************************************************************/ +int fc_reset_host(struct scsi_cmnd *Cmnd) +{ + struct Scsi_Host *host; + fc_dev_ctl_t *p_dev_ctl; + FC_BRD_INFO *binfo; + ulong iflg; + struct lpfc_dpc *ldp; + + host = Cmnd->device->host; + if(!host) { + return FAILED; + } + p_dev_ctl = (fc_dev_ctl_t *)host->hostdata[0]; + if(!p_dev_ctl) { + return FAILED; + } + binfo = &p_dev_ctl->info; + + iflg = 0; + LPFC_LOCK_DRIVER(10); + ldp = &lpfc_dpc[binfo->fc_brd_no]; + if (ldp->dpc_wait == NULL) { + LPFC_UNLOCK_DRIVER; + return SUCCESS; + } + + fc_dpc_lstchk(p_dev_ctl, Cmnd); + if(p_dev_ctl->rhst_head == NULL) { + p_dev_ctl->rhst_head = (void *)Cmnd; + p_dev_ctl->rhst_list = (void *)Cmnd; + } else { + SCMD_NEXT((struct scsi_cmnd *)(p_dev_ctl->rhst_list)) = Cmnd; + p_dev_ctl->rhst_list = (void *)Cmnd; + } + SCMD_NEXT(Cmnd) = NULL; + + if (ldp->dpc_active == 0) { + LPFC_UNLOCK_DRIVER; + up(ldp->dpc_wait); + } + else { + LPFC_UNLOCK_DRIVER; + } + + return SUCCESS; +} + +/****************************************************************************** +* Function name : do_fc_reset_host +* +* Description : +* +******************************************************************************/ +int do_fc_reset_host(fc_dev_ctl_t *p_dev_ctl) +{ + struct scsi_cmnd * Cmnd; + struct scsi_cmnd * oCmnd; + FC_BRD_INFO *binfo; + int rets = FAILED; + + binfo = &p_dev_ctl->info; + Cmnd = (struct scsi_cmnd *)p_dev_ctl->rhst_head; + while(Cmnd) { + /* SCSI layer issued Host Reset */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0715, /* ptr to msg structure */ + fc_mes0715, /* ptr to msg */ + fc_msgBlk0715.msgPreambleStr, /* begin varargs */ + Cmnd->device->id, + (uint32)Cmnd->device->lun); /* end varargs */ + /* + * Check to see if we should wait for FC_READY + */ + if ((binfo->fc_ffstate < FC_LINK_DOWN) || (binfo->fc_ffstate == FC_ERROR)) { + rets = FAILED; + } + else { + rets = SUCCESS; + } + oCmnd = Cmnd; + Cmnd = SCMD_NEXT(Cmnd); + SCMD_NEXT(oCmnd) = 0; + } + p_dev_ctl->rhst_head = 0; + p_dev_ctl->rhst_list = 0; + + return(rets); +} + + +static char addrStr[18]; + +/****************************************************************************** +* Function name : addr_sprintf +* +* Description : Used by fc_info for displaying WWNN / WWPNs +* +******************************************************************************/ +_static_ char * addr_sprintf(register uchar *ap) +{ + register int i; + register char *cp = addrStr; + static char digits[] = "0123456789abcdef"; + + for (i = 0; i < 8; i++) { + *cp++ = digits[*ap >> 4]; + *cp++ = digits[*ap++ & 0xf]; + *cp++ = ':'; + } + *--cp = 0; + return(addrStr); +} /* End addr_sprintf */ + +/****************************************************************************** +* Function name : fc_info +* +* Description : Prepare host information for mid-level +* +******************************************************************************/ +const char *fc_info(struct Scsi_Host *host) +{ + static char buf[4096]; + fc_dev_ctl_t *p_dev_ctl; + FC_BRD_INFO * binfo; + struct pci_dev *pdev; + char *multip; + int idx, i, j, incr; + char hdw[9]; + NODELIST *nlp; + + buf[0]='\0'; + p_dev_ctl = (fc_dev_ctl_t *)host->hostdata[0]; + if(!p_dev_ctl) + return buf; + binfo = &BINFO; + pdev = p_dev_ctl->pcidev ; + + for(idx=0; idx < MAX_FC_BRDS; idx++) { + if(p_dev_ctl == DD_CTL.p_dev[idx]) + break; + } + + multip = "LPFC"; + + if (!(p_dev_ctl->dev_flag & FC_FULL_INFO_CALL)) { + if(pdev != NULL) { + switch(pdev->device){ + case PCI_DEVICE_ID_CENTAUR: + if(FC_JEDEC_ID(VPD.rev.biuRev) == CENTAUR_2G_JEDEC_ID) { + sprintf(&buf[strlen(buf)], + "HBA: Emulex LightPulse LP9002 on PCI bus %02x device %02x irq %d", + p_dev_ctl->pcidev->bus->number, p_dev_ctl->pcidev->devfn, + p_dev_ctl->pcidev->irq); + } else { + sprintf(&buf[strlen(buf)], + "HBA: Emulex LightPulse LP9000 on PCI bus %02x device %02x irq %d", + p_dev_ctl->pcidev->bus->number, p_dev_ctl->pcidev->devfn, + p_dev_ctl->pcidev->irq); + } + break; + case PCI_DEVICE_ID_DRAGONFLY: + sprintf(&buf[strlen(buf)], + "HBA: Emulex LightPulse LP8000 on PCI bus %02x device %02x irq %d", + p_dev_ctl->pcidev->bus->number, p_dev_ctl->pcidev->devfn, + p_dev_ctl->pcidev->irq); + break; + case PCI_DEVICE_ID_PEGASUS: + sprintf(&buf[strlen(buf)], + "HBA: Emulex LightPulse LP9802 on PCI bus %02x device %02x irq %d", + p_dev_ctl->pcidev->bus->number, p_dev_ctl->pcidev->devfn, + p_dev_ctl->pcidev->irq); + break; + case PCI_DEVICE_ID_PFLY: + sprintf(&buf[strlen(buf)], + "HBA: Emulex LightPulse LP982 on PCI bus %02x device %02x irq %d", + p_dev_ctl->pcidev->bus->number, p_dev_ctl->pcidev->devfn, + p_dev_ctl->pcidev->irq); + break; + case PCI_DEVICE_ID_THOR: + sprintf(&buf[strlen(buf)], + "HBA: Emulex LightPulse LP10000 on PCI bus %02x device %02x irq %d", + p_dev_ctl->pcidev->bus->number, p_dev_ctl->pcidev->devfn, + p_dev_ctl->pcidev->irq); + break; + case PCI_DEVICE_ID_TFLY: + sprintf(&buf[strlen(buf)], + "HBA: Emulex LightPulse LP1050 on PCI bus %02x device %02x irq %d", + p_dev_ctl->pcidev->bus->number, p_dev_ctl->pcidev->devfn, + p_dev_ctl->pcidev->irq); + break; + default: + sprintf(&buf[strlen(buf)], + "HBA: Emulex LightPulse on PCI bus %02x device %02x irq %d", + p_dev_ctl->pcidev->bus->number, p_dev_ctl->pcidev->devfn, + p_dev_ctl->pcidev->irq); + } + } + p_dev_ctl->dev_flag |= FC_FULL_INFO_CALL; + return(buf); + } + + sprintf(buf, "Emulex LightPulse %s Driver Version: %s\n", + multip, lpfc_release_version); + + if(pdev != NULL) { + switch(pdev->device){ + case PCI_DEVICE_ID_CENTAUR: + if(FC_JEDEC_ID(VPD.rev.biuRev) == CENTAUR_2G_JEDEC_ID) { + sprintf(&buf[strlen(buf)], + "HBA: Emulex LightPulse LP9002 2 Gigabit PCI Fibre Channel Adapter\n"); + } else { + sprintf(&buf[strlen(buf)], + "HBA: Emulex LightPulse LP9000 1 Gigabit PCI Fibre Channel Adapter\n"); + } + break; + case PCI_DEVICE_ID_DRAGONFLY: + sprintf(&buf[strlen(buf)], + "HBA: Emulex LightPulse LP8000 1 Gigabit PCI Fibre Channel Adapter\n"); + break; + case PCI_DEVICE_ID_PEGASUS: + sprintf(&buf[strlen(buf)], + "HBA: Emulex LightPulse LP9802 2 Gigabit PCI Fibre Channel Adapter\n"); + break; + case PCI_DEVICE_ID_PFLY: + sprintf(&buf[strlen(buf)], + "HBA: Emulex LightPulse LP982 2 Gigabit PCI Fibre Channel Adapter\n"); + break; + case PCI_DEVICE_ID_THOR: + sprintf(&buf[strlen(buf)], + "HBA: Emulex LightPulse LP10000 2 Gigabit PCI Fibre Channel Adapter\n"); + break; + case PCI_DEVICE_ID_TFLY: + sprintf(&buf[strlen(buf)], + "HBA: Emulex LightPulse LP1050 2 Gigabit PCI Fibre Channel Adapter\n"); + break; + default: + sprintf(&buf[strlen(buf)], + "HBA: Emulex LightPulse PCI Fibre Channel Adapter\n"); + } + } + + sprintf(&buf[strlen(buf)], "SerialNum: %s\n", binfo->fc_SerialNumber); + + decode_firmware_rev(binfo, &VPD); + sprintf(&buf[strlen(buf)], "Firmware Version: %s\n", fwrevision); + + sprintf(&buf[strlen(buf)], "Hdw: "); + /* Convert JEDEC ID to ascii for hardware version */ + incr = VPD.rev.biuRev; + for(i=0;i<8;i++) { + j = (incr & 0xf); + if(j <= 9) + hdw[7-i] = (char)((uchar)0x30 + (uchar)j); + else + hdw[7-i] = (char)((uchar)0x61 + (uchar)(j-10)); + incr = (incr >> 4); + } + hdw[8] = 0; + strcat(buf, hdw); + + sprintf(&buf[strlen(buf)], "\nVendorId: 0x%x\n", + ((((uint32)pdev->device) << 16) | (uint32)(pdev->vendor))); + + sprintf(&buf[strlen(buf)], "Portname: "); + strcat(buf, addr_sprintf((uchar *)&binfo->fc_portname)); + + sprintf(&buf[strlen(buf)], " Nodename: "); + strcat(buf, addr_sprintf((uchar *)&binfo->fc_nodename)); + + switch (binfo->fc_ffstate) { + case FC_INIT_START: + case FC_INIT_NVPARAMS: + case FC_INIT_REV: + case FC_INIT_PARTSLIM: + case FC_INIT_CFGRING: + case FC_INIT_INITLINK: + case FC_LINK_DOWN: + sprintf(&buf[strlen(buf)], "\n\nLink Down\n"); + break; + case FC_LINK_UP: + case FC_INIT_SPARAM: + case FC_CFG_LINK: + sprintf(&buf[strlen(buf)], "\n\nLink Up\n"); + break; + case FC_FLOGI: + case FC_LOOP_DISC: + case FC_NS_REG: + case FC_NS_QRY: + case FC_NODE_DISC: + case FC_REG_LOGIN: + case FC_CLEAR_LA: + sprintf(&buf[strlen(buf)], "\n\nLink Up - Discovery\n"); + break; + case FC_READY: + sprintf(&buf[strlen(buf)], "\n\nLink Up - Ready:\n"); + sprintf(&buf[strlen(buf)], " PortID 0x%x\n", binfo->fc_myDID); + if (binfo->fc_topology == TOPOLOGY_LOOP) { + if(binfo->fc_flag & FC_PUBLIC_LOOP) + sprintf(&buf[strlen(buf)], " Public Loop\n"); + else + sprintf(&buf[strlen(buf)], " Private Loop\n"); + } else { + if(binfo->fc_flag & FC_FABRIC) + sprintf(&buf[strlen(buf)], " Fabric\n"); + else + sprintf(&buf[strlen(buf)], " Point-2-Point\n"); + } + + if(binfo->fc_linkspeed == LA_2GHZ_LINK) + sprintf(&buf[strlen(buf)], " Current speed 2G\n"); + else + sprintf(&buf[strlen(buf)], " Current speed 1G\n"); + + nlp = binfo->fc_nlpmap_start; + while(nlp != (NODELIST *)&binfo->fc_nlpmap_start) { + if (nlp->nlp_state == NLP_ALLOC) { + sprintf(&buf[strlen(buf)], "\nlpfc%dt%02x DID %06x WWPN ", + idx, FC_SCSID(nlp->id.nlp_pan, nlp->id.nlp_sid), nlp->nlp_DID); + strcat(buf, addr_sprintf((uchar *)&nlp->nlp_portname)); + strcat(buf, " WWNN "); + strcat(buf, addr_sprintf((uchar *)&nlp->nlp_nodename)); + } + if ((4096 - strlen(buf)) < 90) + break; + nlp = (NODELIST *)nlp->nlp_listp_next; + } + if(nlp != (NODELIST *)&binfo->fc_nlpmap_start) + strcat(buf,"\n....\n"); + } + + return (buf); +} + +/****************************************************************************** +* Function name : fc_data_direction +* +* Description : If we do not relay on Cmnd->sc_data_direction call this +* routine to determine if we are doing a read or write. +* +******************************************************************************/ +int fc_data_direction(struct scsi_cmnd *Cmnd) +{ + int ret_code; + + switch (Cmnd->cmnd[0]) { + case WRITE_6: + case WRITE_10: + case WRITE_12: + case CHANGE_DEFINITION: + case LOG_SELECT: + case MODE_SELECT: + case MODE_SELECT_10: + case WRITE_BUFFER: + case VERIFY: + case WRITE_VERIFY: + case WRITE_VERIFY_12: + case WRITE_LONG: + case WRITE_LONG_2: + case WRITE_SAME: + case SEND_DIAGNOSTIC: + case FORMAT_UNIT: + case REASSIGN_BLOCKS: + case FCP_SCSI_RELEASE_LUNR: + case FCP_SCSI_RELEASE_LUNV: + case HPVA_SETPASSTHROUGHMODE: + case HPVA_EXECUTEPASSTHROUGH: + case HPVA_CREATELUN: + case HPVA_SETLUNSECURITYLIST: + case HPVA_SETCLOCK: + case HPVA_RECOVER: + case HPVA_GENERICSERVICEOUT: + case DMEP_EXPORT_OUT: + ret_code = B_WRITE; + break; + case MDACIOCTL_DIRECT_CMD: + switch (Cmnd->cmnd[2]) { + case MDACIOCTL_STOREIMAGE: + case MDACIOCTL_WRITESIGNATURE: + case MDACIOCTL_SETREALTIMECLOCK: + case MDACIOCTL_PASS_THRU_CDB: + case MDACIOCTL_CREATENEWCONF: + case MDACIOCTL_ADDNEWCONF: + case MDACIOCTL_MORE: + case MDACIOCTL_SETPHYSDEVPARAMETER: + case MDACIOCTL_SETLOGDEVPARAMETER: + case MDACIOCTL_SETCONTROLLERPARAMETER: + case MDACIOCTL_WRITESANMAP: + case MDACIOCTL_SETMACADDRESS: + ret_code = B_WRITE; + break; + case MDACIOCTL_PASS_THRU_INITIATE: + if (Cmnd->cmnd[3] & 0x80) { + ret_code = B_WRITE; + } + else { + ret_code = B_READ; + } + break; + default: + ret_code = B_READ; + } + break; + default: +#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) + if (Cmnd->sc_data_direction == SCSI_DATA_WRITE) + ret_code = B_WRITE; + else +#endif + ret_code = B_READ; + } +#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) + if(ret_code == B_WRITE) + Cmnd->sc_data_direction = SCSI_DATA_WRITE; + else + Cmnd->sc_data_direction = SCSI_DATA_READ; +#endif + return ret_code; +} + +int +chkLun( +node_t *node_ptr, +fc_lun_t lun) +{ + uint32 rptLunLen; + uint32 *datap32; + uint32 lunvalue, i; + + if(node_ptr->virtRptLunData) { + datap32 = (uint32 *)node_ptr->virtRptLunData; + rptLunLen = SWAP_DATA(*datap32); + for(i=0; i < rptLunLen; i+=8) { + datap32 += 2; + lunvalue = (((* datap32) >> FC_LUN_SHIFT) & 0xff); + if (lunvalue == (uint32)lun) + return 1; + } + return 0; + } + else { + return 1; + } +} +/****************************************************************************** +* Function name : fc_queuecommand +* +* Description : Linux queue command entry +* +******************************************************************************/ +int fc_queuecommand(struct scsi_cmnd *Cmnd, + void (*done)(struct scsi_cmnd *)) +{ + FC_BRD_INFO * binfo; + struct Scsi_Host *host; + fc_dev_ctl_t *p_dev_ctl; + iCfgParam *clp; + struct dev_info *dev_ptr; + node_t *node_ptr; + struct sc_buf *sp; + int dev_index,target,retcod; + fc_lun_t lun; + ulong iflg; + struct lpfc_dpc *ldp; + + + host = Cmnd->device->host; + fc_bzero(Cmnd->sense_buffer, 16); + if(!host){ + retcod=DID_BAD_TARGET; + Cmnd->result = ScsiResult(retcod, 0); + done(Cmnd); + return(0); + } + Cmnd->scsi_done = done; /* Save done routine for this command */ + + p_dev_ctl = (fc_dev_ctl_t *)host->hostdata[0]; + if(p_dev_ctl == 0) { + retcod=DID_BAD_TARGET; + Cmnd->result = ScsiResult(retcod, 0); + done(Cmnd); + return(0); + } + + + retcod = 0; + binfo = &BINFO; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + + LPFC_LOCK_DRIVER(12); + ldp = &lpfc_dpc[binfo->fc_brd_no]; + if (ldp->dpc_wait == NULL) { + retcod=DID_NO_CONNECT; +#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) + if(lpfc_use_removable) { + Cmnd->sense_buffer[0] = 0x70; + Cmnd->sense_buffer[2] = UNIT_ATTENTION; + Cmnd->device->removable = 1; + } +#endif + Cmnd->result = ScsiResult(retcod, 0); + FCSTATCTR.fcpRsvd8++; + done(Cmnd); + LPFC_UNLOCK_DRIVER; + return(0); + } + + target = (int)Cmnd->device->id; + lun = (fc_lun_t)Cmnd->device->lun; + + if(lun > MAX_FCP_LUN) { + retcod=DID_BAD_TARGET; + Cmnd->result = ScsiResult(retcod, 0); + LPFC_UNLOCK_DRIVER; + done(Cmnd); + return(0); + } + + /* + * Device for target/lun + */ + dev_index = INDEX(ZERO_PAN, target); + if (!(dev_ptr = fc_find_lun(binfo, dev_index, lun))) { + if(!(dev_ptr=fc_getDVI(p_dev_ctl, target, lun))){ + retcod=DID_NO_CONNECT; + Cmnd->result = ScsiResult(retcod, 0); + FCSTATCTR.fcpRsvd3++; + LPFC_UNLOCK_DRIVER; + done(Cmnd); + return(0); + } + } + + node_ptr = binfo->device_queue_hash[dev_index].node_ptr; + if((node_ptr) && + ((node_ptr->flags & FC_NODEV_TMO) || (lun >= node_ptr->max_lun))) { + retcod=DID_NO_CONNECT; + Cmnd->result = ScsiResult(retcod, 0); + LPFC_UNLOCK_DRIVER; + done(Cmnd); + return(0); + } + + if((node_ptr) && (Cmnd->cmnd[0] == 0x12) && (!chkLun(node_ptr, lun))) { + retcod=DID_NO_CONNECT; + Cmnd->result = ScsiResult(retcod, 0); + LPFC_UNLOCK_DRIVER; + done(Cmnd); + return(0); + } + + if(binfo->fc_flag & FC_LD_TIMEOUT) { + retcod=DID_NO_CONNECT; + Cmnd->result = ScsiResult(retcod, 0); + LPFC_UNLOCK_DRIVER; + done(Cmnd); + return(0); + } + + dev_ptr->qcmdcnt++; + + sp = dev_ptr->scp; + if(!sp){ + retcod=DID_NO_CONNECT; + Cmnd->result = ScsiResult(retcod, 0); + dev_ptr->iodonecnt++; + dev_ptr->errorcnt++; + FCSTATCTR.fcpRsvd5++; + LPFC_UNLOCK_DRIVER; + done(Cmnd); + return(0); + } + + Cmnd->host_scribble = (void *)sp; + dev_ptr->scp = sp->bufstruct.av_forw; + dev_ptr->scpcnt--; + fc_bzero(sp,sizeof(struct sc_buf)); + sp->bufstruct.cmnd = Cmnd; + sp->current_devp = dev_ptr; + FCSTATCTR.fcpRsvd0++; + lpfc_scsi_delete_timer(Cmnd); + + /* Since we delete active timers, we can use eh_timeout.data as a linked + * list ptr internally within the driver. + */ + if(p_dev_ctl->qcmd_head == NULL) { + p_dev_ctl->qcmd_head = (void *)Cmnd; + p_dev_ctl->qcmd_list = (void *)Cmnd; + } else { + ((struct scsi_cmnd *)(p_dev_ctl->qcmd_list))->eh_timeout.data = (ulong)Cmnd; + p_dev_ctl->qcmd_list = (void *)Cmnd; + } + Cmnd->eh_timeout.data = (unsigned long) NULL; + + if (ldp->dpc_active == 0) { + LPFC_UNLOCK_DRIVER; + up(ldp->dpc_wait); + } + else { + LPFC_UNLOCK_DRIVER; + } + return 0; +} + +/****************************************************************************** +* Function name : do_fc_queuecommand +* +* Description : +* +******************************************************************************/ +int do_fc_queuecommand(fc_dev_ctl_t *p_dev_ctl, + ulong siflg) +{ + FC_BRD_INFO * binfo; + iCfgParam * clp; + struct dev_info * dev_ptr; + struct sc_buf * sp; + struct buf * bp; + struct scsi_cmnd * Cmnd; + struct scsi_cmnd * oCmnd; + int i, retcod, firstin; + + binfo = &BINFO; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + Cmnd = (struct scsi_cmnd *)p_dev_ctl->qcmd_head; + firstin = 1; + + + while(Cmnd) { + sp = (struct sc_buf *)(Cmnd->host_scribble); + dev_ptr = sp->current_devp; + + sp->flags = SC_RESUME; + + + IOcnt++; + /* + * Buffer count depends on whether scatter-gather is used or not + */ + if(!Cmnd->use_sg){ + sp->bufstruct.b_bcount = (int)Cmnd->request_bufflen; + } + else { + struct scatterlist *scatter = (struct scatterlist *)Cmnd->buffer; + sp->bufstruct.b_bcount = 0; + + for(i=0; i < Cmnd->use_sg; i++) + sp->bufstruct.b_bcount += scatter[i].length; + } + + /* + * Set read/write flag + */ +#if LINUX_VERSION_CODE > LinuxVersionCode(2,4,4) + if(lpfc_use_data_direction) { + if(Cmnd->sc_data_direction == SCSI_DATA_WRITE) + sp->bufstruct.b_flags = B_WRITE; + else + sp->bufstruct.b_flags = B_READ; + } + else { + sp->bufstruct.b_flags = fc_data_direction(Cmnd); + } +#else + sp->bufstruct.b_flags = fc_data_direction(Cmnd); +#endif + + if (Cmnd->cmnd[0] == TEST_UNIT_READY) + sp->bufstruct.b_bcount = 0; + + /* + * Fill in the sp struct + */ + bcopy((void *)Cmnd->cmnd, (void *)&sp->scsi_command.scsi_cmd, 16); + + sp->scsi_command.scsi_length=Cmnd->cmd_len; + sp->scsi_command.scsi_id=Cmnd->device->id; + sp->scsi_command.scsi_lun=Cmnd->device->lun; + if (Cmnd->device->tagged_supported) { + switch (Cmnd->tag) { + case HEAD_OF_QUEUE_TAG: + sp->scsi_command.flags = HEAD_OF_Q; + break; + case ORDERED_QUEUE_TAG: + sp->scsi_command.flags = ORDERED_Q; + break; + default: + sp->scsi_command.flags = SIMPLE_Q; + break; + } + } + else + sp->scsi_command.flags = 0; + + sp->timeout_value = Cmnd->timeout_per_command / fc_ticks_per_second; + sp->adap_q_status = 0; + sp->bufstruct.av_forw = NULL; + + retcod = 0; + if(p_dev_ctl->device_state == DEAD) { + retcod=DID_NO_CONNECT; + Cmnd->result = ScsiResult(retcod, 0); + FCSTATCTR.fcpRsvd8++; + goto done; + } + + /* + * Is it a valid target? + */ + if((dev_ptr == 0) || (dev_ptr->nodep == 0)) { + retcod=DID_NO_CONNECT; + Cmnd->result = ScsiResult(retcod, 0); + FCSTATCTR.fcpRsvd4++; + goto done; + } + + if(dev_ptr->nodep == 0) { + FCSTATCTR.fcpRsvd6++; + retcod=DID_SOFT_ERROR; + } + else { + if((clp[CFG_LINKDOWN_TMO].a_current == 0) || clp[CFG_HOLDIO].a_current) { + retcod=0; + } + else { + retcod=0; + if (binfo->fc_flag & FC_LD_TIMEOUT) { + if(clp[CFG_NODEV_TMO].a_current == 0) { + retcod=DID_SOFT_ERROR; + FCSTATCTR.fcpRsvd7++; + } + else { + if(dev_ptr->nodep->flags & FC_NODEV_TMO) { + retcod=DID_SOFT_ERROR; + FCSTATCTR.fcpRsvd7++; + } + } + } + } + } + if(retcod) + goto done; + retcod=DID_OK; + + + if (dev_ptr->pend_head == NULL) { + dev_ptr->pend_head = sp; + dev_ptr->pend_tail = sp; + } else { + dev_ptr->pend_tail->bufstruct.av_forw = (struct buf *)sp; + dev_ptr->pend_tail = sp; + } + dev_ptr->pend_count++; + + /* + * put on the DEVICE_WAITING_head + */ + fc_enq_wait(dev_ptr); + + /* + * Send out the SCSI REPORT LUN command before sending the very + * first SCSI command to that device. + */ + if (dev_ptr->nodep->rptlunstate == REPORT_LUN_REQUIRED) { + dev_ptr->nodep->rptlunstate = REPORT_LUN_ONGOING; + issue_report_lun(p_dev_ctl, dev_ptr, 0); + } else { + if ( (dev_ptr->nodep->rptlunstate == REPORT_LUN_COMPLETE) && + !(dev_ptr->flags & CHK_SCSI_ABDR) && dev_ptr->numfcbufs) + fc_issue_cmd(p_dev_ctl); + } + + /* + * Done + */ +done: + if(retcod!=DID_OK) { + dev_ptr->iodonecnt++; + dev_ptr->errorcnt++; + bp = (struct buf *) sp; + bp->b_error = EIO; + bp->b_flags |= B_ERROR; + sp->status_validity = SC_ADAPTER_ERROR; + sp->general_card_status = SC_SCSI_BUS_RESET; + fc_delay_iodone(p_dev_ctl, sp); + } + oCmnd = Cmnd; + Cmnd = (struct scsi_cmnd *)Cmnd->eh_timeout.data; + oCmnd->eh_timeout.data = 0; + } + p_dev_ctl->qcmd_head = 0; + p_dev_ctl->qcmd_list = 0; + + return 0; +} + +/****************************************************************************** +* Function name : fc_rtalloc +* +* Description : +* +******************************************************************************/ +_local_ int fc_rtalloc(fc_dev_ctl_t *p_dev_ctl, + struct dev_info *dev_ptr) +{ + int i; + unsigned int size; + fc_buf_t *fcptr; + struct sc_buf *sp; + dma_addr_t phys; + FC_BRD_INFO * binfo; + MBUF_INFO * buf_info; + MBUF_INFO bufinfo; + + binfo = &p_dev_ctl->info; + for (i = 0; i < dev_ptr->fcp_lun_queue_depth+1 ; i++) { + + size = fc_po2(sizeof(fc_buf_t)); + phys = (dma_addr_t)((ulong)INVALID_PHYS); + + buf_info = &bufinfo; + buf_info->size = size; + buf_info->flags = FC_MBUF_DMA; + buf_info->align = size; + buf_info->phys = 0; + buf_info->dma_handle = 0; + buf_info->data_handle = 0; + fc_malloc(p_dev_ctl, buf_info); + fcptr = buf_info->virt; + phys = (dma_addr_t)((ulong)buf_info->phys); + if (!fcptr || is_invalid_phys((void *)((ulong)phys))) { + return(0); + } + + fc_bzero(fcptr, sizeof(fc_buf_t)); + + fcptr->dev_ptr = dev_ptr; + fcptr->phys_adr = (void *)((ulong)phys); + +#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) + fcptr->fc_cmd_dma_handle = (ulong *)fcptr->phys_adr; +#endif + fc_enq_fcbuf(fcptr); + + sp = (struct sc_buf *)fc_kmem_zalloc(sizeof(struct sc_buf)); + if (!sp) { + return(0); + } + if(dev_ptr->scp) { + sp->bufstruct.av_forw = dev_ptr->scp; + dev_ptr->scp = sp; + } + else { + dev_ptr->scp = sp; + dev_ptr->scp->bufstruct.av_forw = 0; + } + dev_ptr->scpcnt++; + } /* end for loop */ + return(1); +} /* end of fc_rtalloc */ + +/****************************************************************************** +* Function name : do_fc_intr_handler +* +* Description : Local interupt handler +* +******************************************************************************/ +irqreturn_t do_fc_intr_handler(int irq, + void *dev_id, + struct pt_regs *regs) +{ + struct intr *ihs; + FC_BRD_INFO * binfo; + fc_dev_ctl_t * p_dev_ctl; + void *ioa; + volatile uint32 ha_copy; + uint32 i; + ulong siflg; + ulong iflg; + + /* + * If driver is unloading, we can stop processing interrupt. + */ + if (lpfc_driver_unloading) + return IRQ_HANDLED; + + ihs = (struct intr *)dev_id; + p_dev_ctl = (fc_dev_ctl_t * )ihs; + if(!p_dev_ctl){ + return IRQ_HANDLED; + } + + for(i=0;iinfo; + /* Ignore all interrupts during initialization. */ + if(binfo->fc_ffstate < FC_LINK_DOWN) { + LPFC_UNLOCK_DRIVER; + return IRQ_HANDLED; + } + + ioa = FC_MAP_IO(&binfo->fc_iomap_io); /* map in io registers */ + + /* Read host attention register to determine interrupt source */ + ha_copy = READ_CSR_REG(binfo, FC_HA_REG(binfo, ioa)); + + /* Clear Attention Sources, except ERROR (to preserve status) & LATT + * (ha_copy & ~HA_ERATT & ~HA_LATT); + */ + WRITE_CSR_REG(binfo, FC_HA_REG(binfo,ioa), (ha_copy & ~(HA_LATT | HA_ERATT))); + + if (ha_copy & HA_ERATT) { /* Link / board error */ + volatile uint32 status; + + /* do what needs to be done, get error from STATUS REGISTER */ + status = READ_CSR_REG(binfo, FC_STAT_REG(binfo, ioa)); + /* Clear Chip error bit */ + WRITE_CSR_REG(binfo, FC_HA_REG(binfo, ioa), HA_ERATT); + if(p_dev_ctl->dpc_hstatus == 0) + p_dev_ctl->dpc_hstatus = status; + } + + if (ha_copy & HA_LATT) { /* Link Attention interrupt */ + volatile uint32 control; + + if (binfo->fc_process_LA) { + control = READ_CSR_REG(binfo, FC_HC_REG(binfo, ioa)); + control &= ~HC_LAINT_ENA; + WRITE_CSR_REG(binfo, FC_HC_REG(binfo, ioa), control); + /* Clear Link Attention in HA REG */ + WRITE_CSR_REG(binfo, FC_HA_REG(binfo,ioa), (volatile uint32)(HA_LATT)); + } + } + + FC_UNMAP_MEMIO(ioa); + + + p_dev_ctl->dpc_ha_copy |= ha_copy; + + { + struct lpfc_dpc *ldp; + ldp = &lpfc_dpc[binfo->fc_brd_no]; + if ((p_dev_ctl->power_up == 0) || (ldp->dpc_wait == NULL)) { + do_fc_intr((struct intr *)p_dev_ctl); + LPFC_UNLOCK_DRIVER; + fc_flush_done_cmds(p_dev_ctl, siflg); + } + else { + if (ldp->dpc_active == 0) { + LPFC_UNLOCK_DRIVER; + up(ldp->dpc_wait); + } + else { + LPFC_UNLOCK_DRIVER; + fc_flush_done_cmds(p_dev_ctl, siflg); + } + } + } + return IRQ_HANDLED; +} + +/****************************************************************************** +* Function name : do_fc_intr +* +* Description : +* p_ihs also points to device control area +******************************************************************************/ +int do_fc_intr(struct intr *p_ihs) +{ + fc_dev_ctl_t * p_dev_ctl = (fc_dev_ctl_t * )p_ihs; + volatile uint32 ha_copy; + FC_BRD_INFO * binfo; + iCfgParam * clp; + fcipbuf_t * mbp; + MAILBOXQ * mb; + IOCBQ * delayiocb; + IOCBQ * temp; + IOCBQ * processiocb; + IOCBQ * endiocb; + int ipri, rc; + + binfo = &BINFO; + ipri = disable_lock(FC_LVL, &CMD_LOCK); + binfo->fc_flag |= FC_INTR_THREAD; + + /* Read host attention register to determine interrupt source */ + ha_copy = p_dev_ctl->dpc_ha_copy; + p_dev_ctl->dpc_ha_copy = 0; + + if (ha_copy) { + rc = INTR_SUCC; + binfo->fc_flag |= FC_INTR_WORK; + } else { + clp = DD_CTL.p_config[binfo->fc_brd_no]; + if (clp[CFG_INTR_ACK].a_current && (binfo->fc_flag&FC_INTR_WORK)) { + rc = INTR_SUCC; /* Just claim the first non-working interrupt */ + binfo->fc_flag &= ~FC_INTR_WORK; + } else { + if (clp[CFG_INTR_ACK].a_current == 2) + rc = INTR_SUCC; /* Always claim the interrupt */ + else + rc = INTR_FAIL; + } + } + + if (binfo->fc_flag & FC_OFFLINE_MODE) { + binfo->fc_flag &= ~FC_INTR_THREAD; + unlock_enable(ipri, &CMD_LOCK); + return(INTR_FAIL); + } + processiocb = 0; + if(binfo->fc_delayxmit) { + delayiocb = binfo->fc_delayxmit; + binfo->fc_delayxmit = 0; + endiocb = 0; + while(delayiocb) { + temp = delayiocb; + delayiocb = (IOCBQ *)temp->q; + temp->rsvd2--; + /* If retry == 0, process IOCB */ + if(temp->rsvd2 == 0) { + if(processiocb == 0) { + processiocb = temp; + } + else { + endiocb->q = (uchar *)temp; + } + endiocb = temp; + temp->q = 0; + } + else { + /* Make delayxmit point to first non-zero retry */ + if(binfo->fc_delayxmit == 0) + binfo->fc_delayxmit = temp; + } + } + if(processiocb) { + /* Handle any delayed IOCBs */ + endiocb = processiocb; + while(endiocb) { + temp = endiocb; + endiocb = (IOCBQ *)temp->q; + temp->q = 0; + issue_iocb_cmd(binfo, &binfo->fc_ring[FC_ELS_RING], temp); + } + } + } + + if (ha_copy & HA_ERATT) { /* Link / board error */ + unlock_enable(ipri, &CMD_LOCK); + handle_ff_error(p_dev_ctl); + return(rc); + } else { + if (ha_copy & HA_MBATT) { /* Mailbox interrupt */ + handle_mb_event(p_dev_ctl); + if(binfo->fc_flag & FC_PENDING_RING0) { + binfo->fc_flag &= ~FC_PENDING_RING0; + ha_copy |= HA_R0ATT; /* event on ring 0 */ + } + } + + if (ha_copy & HA_LATT) { /* Link Attention interrupt */ + if (binfo->fc_process_LA) { + handle_link_event(p_dev_ctl); + } + } + + if (ha_copy & HA_R0ATT) { /* event on ring 0 */ + if(binfo->fc_mbox_active == 0) + handle_ring_event(p_dev_ctl, 0, (ha_copy & 0x0000000F)); + else + binfo->fc_flag |= FC_PENDING_RING0; + } + + if (ha_copy & HA_R1ATT) { /* event on ring 1 */ + /* This ring handles IP. Defer processing anything on this ring + * till all FCP ELS traffic settles down. + */ + if (binfo->fc_ffstate <= FC_NODE_DISC) + binfo->fc_deferip |= (uchar)((ha_copy >> 4) & 0x0000000F); + else + handle_ring_event(p_dev_ctl, 1, ((ha_copy >> 4) & 0x0000000F)); + } + + if (ha_copy & HA_R2ATT) { /* event on ring 2 */ + handle_ring_event(p_dev_ctl, 2, ((ha_copy >> 8) & 0x0000000F)); + } + + if (ha_copy & HA_R3ATT) { /* event on ring 3 */ + handle_ring_event(p_dev_ctl, 3, ((ha_copy >> 12) & 0x0000000F)); + } + } + + if((processiocb == 0) && (binfo->fc_delayxmit) && + (binfo->fc_mbox_active == 0)) { + if ((mb = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX))) { + fc_read_rpi(binfo, (uint32)1, (MAILBOX * )mb, (uint32)0); + if (issue_mb_cmd(binfo, (MAILBOX * )mb, MBX_NOWAIT) != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } + } + + binfo->fc_flag &= ~FC_INTR_THREAD; + + while (p_dev_ctl->mbufl_head != 0) { + binfo->fc_flag |= FC_INTR_WORK; + mbp = (fcipbuf_t * )p_dev_ctl->mbufl_head; + p_dev_ctl->mbufl_head = (uchar * )fcnextpkt(mbp); + fcnextpkt(mbp) = 0; + fc_xmit(p_dev_ctl, mbp); + } + p_dev_ctl->mbufl_tail = 0; + unlock_enable(ipri, &CMD_LOCK); + return(rc); +} /* End do_fc_intr */ + +/****************************************************************************** +* Function name : fc_memmap +* +* Description : Called from fc_attach to map shared memory (SLIM and CSRs) +* for adapter and to setup memory for SLI2 interface. +******************************************************************************/ +int fc_memmap(fc_dev_ctl_t *p_dev_ctl) +{ + FC_BRD_INFO *binfo; + struct pci_dev *pdev; + int reg; + ulong base; + + binfo = &BINFO; + + /* + * Get PCI for board + */ + pdev = p_dev_ctl->pcidev; + if(!pdev){ + panic("no dev in pcimap\n"); + return(1); + } +#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,3) + /* Configure DMA attributes. */ +#if BITS_PER_LONG > 32 + if (pci_set_dma_mask(pdev, (uint64_t) 0xffffffffffffffff)) { + printk("Cannot set dma mask\n"); + return(1); + } +#else + if (pci_set_dma_mask(pdev, (uint64_t) 0xffffffff)) { + printk("Cannot set dma mask\n"); + return(1); + } +#endif +#else +#if BITS_PER_LONG > 32 + pdev->dma_mask = 0xffffffffffffffff; +#endif +#endif + + /* + * address in first register + */ + reg = 0; + reg = pci_getadd(pdev, reg, &base); + + /* + * need to mask the value to get the physical address + */ + base &= PCI_BASE_ADDRESS_MEM_MASK; + DDS.bus_mem_addr = base; + + /* + * next two registers are the control, get the first one, if doing direct io + * if i/o port is to be used get the second + * Note that pci_getadd returns the correct next register + */ + reg = pci_getadd(pdev, reg, &base); + base &= PCI_BASE_ADDRESS_MEM_MASK; + DDS.bus_io_addr = base; + /* + * Map adapter SLIM and Control Registers + */ + binfo->fc_iomap_mem = remap_pci_mem((ulong)DDS.bus_mem_addr,FF_SLIM_SIZE); + if(binfo->fc_iomap_mem == ((void *)(-1))){ + return (ENOMEM); + } + + binfo->fc_iomap_io =remap_pci_mem((ulong)DDS.bus_io_addr,FF_REG_AREA_SIZE); + if(binfo->fc_iomap_io == ((void *)(-1))){ + unmap_pci_mem((ulong)binfo->fc_iomap_mem); + return (ENOMEM); + } + + + /* + * Setup SLI2 interface + */ + if ((binfo->fc_sli == 2) && (binfo->fc_slim2.virt == 0)) { + MBUF_INFO * buf_info; + MBUF_INFO bufinfo; + + buf_info = &bufinfo; + + /* + * Allocate memory for SLI-2 structures + */ + buf_info->size = sizeof(SLI2_SLIM); + buf_info->flags = FC_MBUF_DMA; + buf_info->align = fcPAGESIZE; + buf_info->dma_handle = 0; + buf_info->data_handle = 0; + fc_malloc(p_dev_ctl, buf_info); + if (buf_info->virt == NULL) { + + /* + * unmap adapter SLIM and Control Registers + */ + unmap_pci_mem((ulong)binfo->fc_iomap_mem); + unmap_pci_mem((ulong)binfo->fc_iomap_io); + + return (ENOMEM); + } + + binfo->fc_slim2.virt = (uchar * )buf_info->virt; + binfo->fc_slim2.phys = (uchar * )buf_info->phys; + binfo->fc_slim2.data_handle = buf_info->data_handle; + binfo->fc_slim2.dma_handle = buf_info->dma_handle; + fc_bzero((char *)binfo->fc_slim2.virt, sizeof(SLI2_SLIM)); + } + return(0); +} + +/****************************************************************************** +* Function name : fc_unmemmap +* +* Description : Called from fc_detach to unmap shared memory (SLIM and CSRs) +* for adapter +* +******************************************************************************/ +int fc_unmemmap(fc_dev_ctl_t *p_dev_ctl) +{ + FC_BRD_INFO *binfo; + + binfo = &BINFO; + + /* + * unmap adapter SLIM and Control Registers + */ + unmap_pci_mem((ulong)binfo->fc_iomap_mem); + unmap_pci_mem((ulong)binfo->fc_iomap_io); + /* + * Free resources associated with SLI2 interface + */ + if (binfo->fc_slim2.virt) { + MBUF_INFO * buf_info; + MBUF_INFO bufinfo; + + buf_info = &bufinfo; + buf_info->phys = (uint32 * )binfo->fc_slim2.phys; + buf_info->data_handle = binfo->fc_slim2.data_handle; + buf_info->dma_handle = binfo->fc_slim2.dma_handle; + buf_info->flags = FC_MBUF_DMA; + + buf_info->virt = (uint32 * )binfo->fc_slim2.virt; + buf_info->size = sizeof(SLI2_SLIM); + fc_free(p_dev_ctl, buf_info); + binfo->fc_slim2.virt = 0; + binfo->fc_slim2.phys = 0; + binfo->fc_slim2.dma_handle = 0; + binfo->fc_slim2.data_handle = 0; + } + return(0); +} + +/****************************************************************************** +* Function name : fc_pcimap +* +* Description : Called from fc_attach to setup PCI configuration registers +* +******************************************************************************/ +int fc_pcimap(fc_dev_ctl_t *p_dev_ctl) +{ + FC_BRD_INFO *binfo; + iCfgParam *clp; + struct pci_dev *pdev; + u16 cmd; + + binfo = &BINFO; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + + /* + * PCI for board + */ + pdev = p_dev_ctl->pcidev; + if(!pdev) + return(1); + + /* + * bus mastering and parity checking enabled + */ + pci_read_config_word(pdev, PCI_COMMAND, &cmd); + if(cmd & CMD_PARITY_CHK) + cmd = CMD_CFG_VALUE ; + else + cmd = (CMD_CFG_VALUE & ~(CMD_PARITY_CHK)); + + + pci_write_config_word(pdev, PCI_COMMAND, cmd); + + if(lpfc_pci_latency_clocks) + pci_write_config_byte(pdev, PCI_LATENCY_TMR_REGISTER,(uchar)lpfc_pci_latency_clocks); + + if(lpfc_pci_cache_line) + pci_write_config_byte(pdev, PCI_CACHE_LINE_REGISTER,(uchar)lpfc_pci_cache_line); + + /* + * Get the irq from the pdev structure + */ + DDS.bus_intr_lvl = (int)pdev->irq; + + return(0); +} + +/****************************************************************************** +* Function name : lpfc_cfg_init +* +* Description : Called from handle_ff_error() to bring link back up +* +******************************************************************************/ +int lpfc_cfg_init(fc_dev_ctl_t *p_dev_ctl) +{ + FC_BRD_INFO * binfo; + struct lpfc_dpc *ldp; + + binfo = &BINFO; + p_dev_ctl->dev_flag |= FC_SCHED_CFG_INIT; + ldp = &lpfc_dpc[binfo->fc_brd_no]; + if ((ldp->dpc_active == 0) && ldp->dpc_wait) + up(ldp->dpc_wait); + return(0); +} + +/****************************************************************************** +* Function name : lpfc_kfree_skb +* +* Description : This routine is only called by the IP portion of the driver +* and the Fabric NameServer portion of the driver. It should +* free a fcipbuf chain. +******************************************************************************/ +int lpfc_kfree_skb(struct sk_buff *skb) +{ + struct sk_buff *sskb; + + while(skb->next) { + sskb = skb; + skb = skb->next; + sskb->next = 0; +#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) + if(in_interrupt()) { + dev_kfree_skb_irq(sskb); + } + else { + dev_kfree_skb(sskb); + } +#else + dev_kfree_skb(sskb); +#endif + } +#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) + if(in_interrupt()) { + dev_kfree_skb_irq(skb); + } + else { + dev_kfree_skb(skb); + } +#else + dev_kfree_skb(skb); +#endif + return(0); +} + +/****************************************************************************** +* Function name : lpfc_alloc_skb +* +* Description : +* +******************************************************************************/ +struct sk_buff *lpfc_alloc_skb(unsigned int size) +{ + return(alloc_skb(size, GFP_ATOMIC)); +} + +/****************************************************************************** +* Function name : fc_malloc +* +* Description : fc_malloc environment specific routine for memory +* allocation / mapping +* The buf_info->flags field describes the memory operation requested. +* +* FC_MBUF_PHYSONLY set requests a supplied virtual address be mapped for DMA +* Virtual address is supplied in buf_info->virt +* DMA mapping flag is in buf_info->align (DMA_READ, DMA_WRITE_ONLY, both) +* The mapped physical address is returned buf_info->phys +* +* FC_MBUF_PHYSONLY cleared requests memory be allocated for driver use and +* if FC_MBUF_DMA is set the memory is also mapped for DMA +* The byte alignment of the memory request is supplied in buf_info->align +* The byte size of the memory request is supplied in buf_info->size +* The virtual address is returned buf_info->virt +* The mapped physical address is returned buf_info->phys (for FC_MBUF_DMA) +* +******************************************************************************/ +uchar *fc_malloc(fc_dev_ctl_t *p_dev_ctl, + MBUF_INFO *buf_info) +{ + FC_BRD_INFO * binfo; + unsigned int size; + + binfo = &BINFO; + buf_info->phys = (void *)((ulong)INVALID_PHYS); + buf_info->dma_handle = 0; + if (buf_info->flags & FC_MBUF_PHYSONLY) { + if(buf_info->virt == NULL) + return NULL; +#if LINUX_VERSION_CODE <= LinuxVersionCode(2,4,12) + buf_info->phys = (void *)((ulong)pci_map_single(p_dev_ctl->pcidev, + buf_info->virt, buf_info->size, PCI_DMA_BIDIRECTIONAL)); +#else + { + struct page *page = virt_to_page((ulong)(buf_info->virt)); + unsigned long offset = ((unsigned long)buf_info->virt & ~PAGE_MASK); + + buf_info->phys = (void *)((ulong)pci_map_page(p_dev_ctl->pcidev, + page, offset, buf_info->size, PCI_DMA_BIDIRECTIONAL)); + } +#endif + +#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) + buf_info->dma_handle = buf_info->phys; +#endif + FCSTATCTR.fcMapCnt++; + return((uchar * )buf_info->virt); + } + if((buf_info->flags & FC_MBUF_DMA)) { + size = fc_po2(buf_info->size); + buf_info->phys = (void *)((ulong)INVALID_PHYS); + buf_info->virt = lpfc_kmalloc(size, GFP_ATOMIC, &buf_info->phys, p_dev_ctl); + if (buf_info->virt) { + if(is_invalid_phys(buf_info->phys)) { + lpfc_kfree((unsigned int)buf_info->size, (void *)buf_info->virt, (void *)buf_info->phys, p_dev_ctl); + buf_info->virt = 0; + } + } +#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) + buf_info->dma_handle = buf_info->phys; +#endif + if(buf_info->virt == 0) { + buf_info->phys = (void *)((ulong)INVALID_PHYS); + buf_info->dma_handle = 0; + } + } + else { + buf_info->size = ((buf_info->size + 7) & 0xfffffff8); + buf_info->virt = (uint32 * )lpfc_kmalloc((unsigned int)buf_info->size, GFP_ATOMIC, 0, 0); + if(buf_info->virt) + fc_bzero(buf_info->virt, buf_info->size); + buf_info->phys = (void *)((ulong)INVALID_PHYS); + } + FCSTATCTR.fcMallocCnt++; + FCSTATCTR.fcMallocByte += buf_info->size; + return((uchar * )buf_info->virt); +} + +/****************************************************************************** +* Function name : fc_po2 +* +* Description : Convert size to next highest power of 2 +* +******************************************************************************/ +ulong fc_po2(ulong size) +{ + ulong order; + + for (order = 1; order < size; order <<= 1); + return(order); +} + +void *lpfc_last_dma_page = 0; +int lpfc_dma_page_offset = 0; + +/****************************************************************************** +* Function name : fc_free +* +* Description : Environment specific routine for memory de-allocation/unmapping +* The buf_info->flags field describes the memory operation requested. +* FC_MBUF_PHYSONLY set requests a supplied virtual address be unmapped +* for DMA, but not freed. +* The mapped physical address to be unmapped is in buf_info->phys +* FC_MBUF_PHYSONLY cleared requests memory be freed and unmapped for DMA +* only if FC_MBUF_DMA is set. +* The mapped physical address to be unmapped is in buf_info->phys +* The virtual address to be freed is in buf_info->virt +******************************************************************************/ +void fc_free(fc_dev_ctl_t *p_dev_ctl, + MBUF_INFO *buf_info) +{ + FC_BRD_INFO * binfo; + unsigned int size; + + binfo = &BINFO; + + if (buf_info->flags & FC_MBUF_PHYSONLY) { +#if LINUX_VERSION_CODE <= LinuxVersionCode(2,4,12) + pci_unmap_single(p_dev_ctl->pcidev, + (ulong)(buf_info->phys), buf_info->size, PCI_DMA_BIDIRECTIONAL); +#else + pci_unmap_page(p_dev_ctl->pcidev, + (ulong)(buf_info->phys), buf_info->size, PCI_DMA_BIDIRECTIONAL); +#endif + FCSTATCTR.fcUnMapCnt++; + } + else { + if((buf_info->flags & FC_MBUF_DMA)) { + size = fc_po2(buf_info->size); + lpfc_kfree((unsigned int)buf_info->size, (void *)buf_info->virt, (void *)buf_info->phys, p_dev_ctl); + } + else { + buf_info->size = ((buf_info->size + 7) & 0xfffffff8); + lpfc_kfree((unsigned int)buf_info->size, (void *)buf_info->virt, (void *)((ulong)INVALID_PHYS), 0); + } + FCSTATCTR.fcFreeCnt++; + FCSTATCTR.fcFreeByte += buf_info->size; + } +} + +/****************************************************************************** +* Function name : fc_rdpci_cmd +* +******************************************************************************/ +ushort fc_rdpci_cmd(fc_dev_ctl_t *p_dev_ctl) +{ + u16 cmd; + struct pci_dev *pdev; + + /* + * PCI device + */ + pdev = p_dev_ctl->pcidev; + if(!pdev){ + panic("no dev in fc_rdpci_cmd\n"); + return((ushort)0); + } + pci_read_config_word(pdev, PCI_COMMAND, &cmd); + return((ushort)cmd); +} + +/****************************************************************************** +* Function name : fc_rdpci_32 +* +******************************************************************************/ +uint32 fc_rdpci_32(fc_dev_ctl_t *p_dev_ctl, + uint32 offset) +{ + uint32 cmd; + struct pci_dev *pdev; + + /* + * PCI device + */ + pdev = p_dev_ctl->pcidev; + if(!pdev){ + panic("no dev in fc_rdpci_32\n"); + return((ushort)0); + } + pci_read_config_dword(pdev, offset, &cmd); + return(cmd); +} + +/****************************************************************************** +* Function name : fc_wrpci_cmd +* +******************************************************************************/ +void fc_wrpci_cmd(fc_dev_ctl_t *p_dev_ctl, + ushort cfg_value) +{ + struct pci_dev *pdev; + + /* + * PCI device + */ + pdev = p_dev_ctl->pcidev; + if(!pdev){ + panic("no dev in fc_wrpci_cmd\n"); + return; + } + pci_write_config_word(pdev, PCI_COMMAND, cfg_value); +} +/****************************************************************************** +* +* Function name : lpfc_fcp_error() +* +* Description : Handle an FCP response error +* +* Context : called from handle_fcp_event +* Can be called by interrupt thread. +******************************************************************************/ +_static_ void lpfc_fcp_error(fc_buf_t * fcptr, + IOCB * cmd) +{ + FCP_RSP *fcpRsp = &fcptr->fcp_rsp; + struct sc_buf *sp = fcptr->sc_bufp; + struct buf *bp; + struct scsi_cmnd *Cmnd; + + bp = (struct buf *)sp; + Cmnd = bp->cmnd; + + if (fcpRsp->rspStatus2 & RESID_UNDER) { + uint32 len, resid, brd; + + if((fcptr->dev_ptr) && (fcptr->dev_ptr->nodep)) + brd = fcptr->dev_ptr->nodep->ap->info.fc_brd_no; + else + brd = 0; + + len = SWAP_DATA(fcptr->fcp_cmd.fcpDl); + resid = SWAP_DATA(fcpRsp->rspResId); + + /* FCP residual underrun, expected , residual */ + fc_log_printf_msg_vargs( brd, + &fc_msgBlk0716, /* ptr to msg structure */ + fc_mes0716, /* ptr to msg */ + fc_msgBlk0716.msgPreambleStr, /* begin varargs */ + len, + resid, + Cmnd->cmnd[0], + Cmnd->underflow); /* end varargs */ + + switch (Cmnd->cmnd[0]) { + case TEST_UNIT_READY: + case REQUEST_SENSE: + case INQUIRY: + case RECEIVE_DIAGNOSTIC: + case READ_CAPACITY: + case FCP_SCSI_READ_DEFECT_LIST: + case MDACIOCTL_DIRECT_CMD: + break; + default: + if((!(fcpRsp->rspStatus2 & SNS_LEN_VALID)) && + (len - resid < Cmnd->underflow)) { + /* FCP command residual underrun converted to error */ + fc_log_printf_msg_vargs( brd, + &fc_msgBlk0717, /* ptr to msg structure */ + fc_mes0717, /* ptr to msg */ + fc_msgBlk0717.msgPreambleStr, /* begin varargs */ + Cmnd->cmnd[0], + Cmnd->underflow, + len, + resid); /* end varargs */ + fcpRsp->rspStatus3 = SC_COMMAND_TERMINATED; + fcpRsp->rspStatus2 &= ~RESID_UNDER; + sp->scsi_status = 0; + } + break; + } + } +} +/****************************************************************************** +* Function name : fc_do_iodone +* +* Description : Return a SCSI initiated I/O to above layer +* when the I/O completes. +******************************************************************************/ +int fc_do_iodone(struct buf *bp) +{ + struct scsi_cmnd *Cmnd; + struct sc_buf * sp = (struct sc_buf *) bp; + FC_BRD_INFO * binfo; + iCfgParam * clp; + fc_dev_ctl_t * p_dev_ctl; + NODELIST * nlp; + node_t * node_ptr; + struct Scsi_Host *host; + struct dev_info * dev_ptr; + int dev_index; + int host_status = DID_OK; + + IOcnt--; + + if(!bp) { + return(1); + } + /* + * Linux command from our buffer + */ + Cmnd = bp->cmnd; + + /* + * must have Cmnd and Linux completion functions + */ + if(!Cmnd || !Cmnd->scsi_done){ + return (0); + } + + /* + * retrieve host adapter and device control + */ + host = Cmnd->device->host; + if(!host){ + return (0); + } + p_dev_ctl = (fc_dev_ctl_t *)host->hostdata[0]; + if(!p_dev_ctl){ + return (0); + } + + fc_fcp_bufunmap(p_dev_ctl, sp); + + dev_index = INDEX(ZERO_PAN, Cmnd->device->id); + binfo = &BINFO; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + dev_ptr = sp->current_devp; + + if (!dev_ptr) { + node_ptr = binfo->device_queue_hash[dev_index].node_ptr; + goto qf; + } + + if((node_ptr = dev_ptr->nodep) == 0) { + node_ptr = binfo->device_queue_hash[dev_index].node_ptr; + if(!node_ptr) { + dev_ptr = 0; + goto qf; + } + } + + if(node_ptr->rpi == 0xfffe) { +qf: + if ((binfo->fc_ffstate > FC_LINK_DOWN) && (binfo->fc_ffstate < FC_READY)) + goto force_retry; + + if(node_ptr) + nlp = node_ptr->nlp; + else + nlp = 0; + if (nlp && + (binfo->fc_flag & FC_RSCN_MODE) && (binfo->fc_ffstate == FC_READY) && + (nlp->nlp_action & (NLP_DO_ADDR_AUTH | NLP_DO_DISC_START | NLP_DO_RSCN))) + goto force_retry; + + if ((node_ptr) && (clp[CFG_NODEV_TMO].a_current)) { + if(node_ptr->flags & FC_NODEV_TMO) { +#ifdef FC_NEW_EH + Cmnd->retries = Cmnd->allowed; /* no more retries */ +#endif + host_status = DID_NO_CONNECT; +#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) + if(lpfc_use_removable) { + Cmnd->sense_buffer[0] = 0x70; + Cmnd->sense_buffer[2] = UNIT_ATTENTION; + Cmnd->device->removable = 1; + } +#endif + if(dev_ptr) + dev_ptr->scsi_dev = (void *)Cmnd->device; + } + else { +#ifdef FC_NEW_EH + Cmnd->retries = 0; /* Force retry */ +#endif + host_status = DID_BUS_BUSY; + } + Cmnd->result = ScsiResult(host_status, 0); + } + else { + if((clp[CFG_LINKDOWN_TMO].a_current)&&(binfo->fc_flag & FC_LD_TIMEOUT)) { +#ifdef FC_NEW_EH + Cmnd->retries = Cmnd->allowed; /* no more retries */ +#endif + host_status = DID_NO_CONNECT; +#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) + if(lpfc_use_removable) { + Cmnd->sense_buffer[0] = 0x70; + Cmnd->sense_buffer[2] = UNIT_ATTENTION; + Cmnd->device->removable = 1; + } +#endif + if(dev_ptr) + dev_ptr->scsi_dev = (void *)Cmnd->device; + } + else { +force_retry: +#ifdef FC_NEW_EH + Cmnd->retries = 0; /* Force retry */ +#endif + host_status = DID_BUS_BUSY; + } + Cmnd->result = ScsiResult(host_status, 0); + } + fc_queue_done_cmd(p_dev_ctl, bp); + return (0); + } + + /* + * mark it as done, no longer required, but will leave for now + */ + bp->isdone=1; +#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) + Cmnd->resid = bp->b_resid; +#endif + + /* + * First check if a scsi error, mid-level handles these only if DID_OK + */ + if(sp->status_validity == SC_SCSI_ERROR){ + if(sp->scsi_status==SC_CHECK_CONDITION){ + lpfc_copy_sense(dev_ptr, bp); + } + else if (sp->scsi_status == SC_RESERVATION_CONFLICT) { + host_status = DID_ERROR; + } + else if (sp->scsi_status == SC_BUSY_STATUS) { +#ifdef FC_NEW_EH + Cmnd->retries = 0; /* Force retry */ +#endif + host_status = DID_BUS_BUSY; + } + else { + host_status = DID_ERROR; + } + + if((bp->b_flags & B_ERROR)) { + if (bp->b_error == EBUSY){ + host_status = DID_OK; + sp->scsi_status = SC_QUEUE_FULL; + } else if (bp->b_error == EINVAL){ +#ifdef FC_NEW_EH + Cmnd->retries = 0; /* Force retry */ +#endif + host_status = DID_BUS_BUSY; + sp->scsi_status = 0; + } + } + + Cmnd->result = ScsiResult(host_status,sp->scsi_status); + fc_queue_done_cmd(p_dev_ctl, bp); + return (0); + } + + /* + * check error flag + */ + if((bp->b_flags & B_ERROR)) + { + switch(bp->b_error){ + case 0: + host_status = DID_OK; + sp->scsi_status = 0; + break; + case EBUSY: + host_status = DID_BUS_BUSY; + sp->scsi_status = 0; + break; + case EINVAL: +#ifdef FC_NEW_EH + Cmnd->retries = 0; /* Force retry */ +#endif + host_status = DID_BUS_BUSY; + sp->scsi_status = 0; + break; + default: +#ifdef FC_NEW_EH + host_status = DID_ERROR; +#else + host_status = DID_BUS_BUSY; +#endif + break; + } + } + + /* + * next hardware errors + */ + if(sp->status_validity == SC_ADAPTER_ERROR){ +#ifdef FC_NEW_EH + host_status = DID_ERROR; +#else + host_status = DID_BUS_BUSY; +#endif + Cmnd->result = ScsiResult(host_status,0); + fc_queue_done_cmd(p_dev_ctl, bp); + return (0); + } + + /* + * if lun0_missing feature is turned on and it's inquiry to a missing + * lun 0, then we will fake out LINUX scsi layer to allow scanning + * of other luns. + */ + if (lpfc_lun0_missing) { + if ((Cmnd->cmnd[0] == FCP_SCSI_INQUIRY) && (Cmnd->device->lun == 0)) { + uchar *buf; + buf = (uchar *)Cmnd->request_buffer; + if( *buf == 0x7f) { + /* Make lun unassigned and wrong type */ + *buf = 0x3; + } + } + } + + if(lpfc_lun_skip) { + /* If a LINUX OS patch to support, LUN skipping / no LUN 0, is not present, + * this code will fake out the LINUX scsi layer to allow it to detect + * all LUNs if there are LUN holes on a device. + */ + if (Cmnd->cmnd[0] == FCP_SCSI_INQUIRY) { + uchar *buf; + buf = (uchar *)Cmnd->request_buffer; + if(( *buf == 0x7f) || ((*buf & 0xE0) == 0x20)) { + /* Make lun unassigned and wrong type */ + *buf = 0x3; + } + } + } + + Cmnd->result = ScsiResult(host_status,sp->scsi_status); + fc_queue_done_cmd(p_dev_ctl, bp); + + return(0); +} + +/****************************************************************************** +* Function name : fc_fcp_bufunmap +* +* Description : +* +******************************************************************************/ +int fc_fcp_bufunmap(fc_dev_ctl_t * p_dev_ctl, + struct sc_buf * sp) +{ + struct buf *bp; + struct scsi_cmnd * Cmnd; + FC_BRD_INFO * binfo; + + binfo = &BINFO; + bp = (struct buf *)sp; + Cmnd = bp->cmnd; + /* unmap DMA resources used */ + if(!(sp->flags & SC_MAPPED)) + return(0); +#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) + { + int rwflg; + + rwflg = Cmnd->sc_data_direction; + + if (Cmnd->use_sg) { + pci_unmap_sg(p_dev_ctl->pcidev, Cmnd->request_buffer, Cmnd->use_sg, rwflg); + } + else if ((Cmnd->request_bufflen) && (bp->av_back)) { +#if LINUX_VERSION_CODE <= LinuxVersionCode(2,4,12) + pci_unmap_single(p_dev_ctl->pcidev, (uint64_t)((ulong)(bp->av_back)), Cmnd->request_bufflen, rwflg); +#else + pci_unmap_page(p_dev_ctl->pcidev, (uint64_t)((ulong)(bp->av_back)), Cmnd->request_bufflen, rwflg); +#endif + } + } + +#endif + FCSTATCTR.fcUnMapCnt++; + sp->flags &= ~SC_MAPPED; + return(0); +} + +/****************************************************************************** +* Function name : fc_fcp_bufmap +* +* Description : Called from issue_fcp_cmd, used to map addresses in sbp to +* physical addresses for the I/O. +******************************************************************************/ +int fc_fcp_bufmap(fc_dev_ctl_t * p_dev_ctl, + struct sc_buf * sbp, + fc_buf_t * fcptr, + IOCBQ * temp, + ULP_BDE64 * bpl, + dvi_t * dev_ptr, + int pend) +{ + uint32 seg_cnt, cnt, num_bmps, i, num_bde; + int rwflg; + FC_BRD_INFO * binfo = &BINFO; + iCfgParam * clp; + struct buf * bp; + RING * rp; + IOCB * cmd; + struct scsi_cmnd * cmnd; + ULP_BDE64 * topbpl; + MATCHMAP * bmp; + MATCHMAP * last_bmp; + void * physaddr; + struct scatterlist *sgel_p; +#ifdef powerpc + struct scatterlist *sgel_p_t0; +#endif /* endif powerpc */ + + bp = (struct buf *)sbp; + /* + Linux command */ + cmnd = bp->cmnd; + if(!cmnd) + return(FCP_EXIT); + rp = &binfo->fc_ring[FC_FCP_RING]; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + cmd = &temp->iocb; + + last_bmp = fcptr->bmp; + num_bmps = 1; + num_bde = 0; + topbpl = 0; + sgel_p = 0; + + fcptr->flags |= DATA_MAPPED; + if (cmnd->use_sg) { + sbp->bufstruct.av_back = 0; + sgel_p = (struct scatterlist *)cmnd->request_buffer; +#if LINUX_VERSION_CODE < LinuxVersionCode(2,4,0) + seg_cnt = cmnd->use_sg; + rwflg = 0; +#else + rwflg = cmnd->sc_data_direction; + #ifdef powerpc /* remap to get a different set of fysadds that xclud zro */ + remapsgl: + #endif /* end if powerpc */ + seg_cnt = pci_map_sg(p_dev_ctl->pcidev, sgel_p, cmnd->use_sg, rwflg); + #ifdef powerpc /* check 4 zro phys address, then remap to get a diff 1 */ + for (sgel_p_t0=sgel_p, i=0; ibufstruct.b_bcount = cnt; + break; + } + /* Fill in continuation entry to next bpl */ + bpl->addrHigh = (uint32)putPaddrHigh(bmp->phys); + bpl->addrHigh = PCIMEM_LONG(bpl->addrHigh); + bpl->addrLow = (uint32)putPaddrLow(bmp->phys); + bpl->addrLow = PCIMEM_LONG(bpl->addrLow); + bpl->tus.f.bdeFlags = BPL64_SIZE_WORD; + num_bde++; + if (num_bmps == 1) { + cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(ULP_BDE64)); + } else { + topbpl->tus.f.bdeSize = (num_bde * sizeof(ULP_BDE64)); + topbpl->tus.w = PCIMEM_LONG(topbpl->tus.w); + } + topbpl = bpl; + bpl = (ULP_BDE64 * )bmp->virt; + last_bmp->fc_mptr = (void *)bmp; + last_bmp = bmp; + num_bde = 0; + num_bmps++; + } +#if LINUX_VERSION_CODE < LinuxVersionCode(2,4,0) + rwflg = 0; +#else + if(rwflg == B_WRITE) + rwflg = SCSI_DATA_WRITE; + else + rwflg = SCSI_DATA_READ; +#endif + + physaddr = (void *)((ulong)scsi_sg_dma_address(sgel_p)); + + bpl->addrLow = PCIMEM_LONG(putPaddrLow(physaddr)); + bpl->addrHigh = PCIMEM_LONG(putPaddrHigh(physaddr)); + bpl->tus.f.bdeSize = scsi_sg_dma_len(sgel_p); + cnt += bpl->tus.f.bdeSize; + if (cmd->ulpCommand == CMD_FCP_IREAD64_CR) + bpl->tus.f.bdeFlags = BUFF_USE_RCV; + else + bpl->tus.f.bdeFlags = 0; + bpl->tus.w = PCIMEM_LONG(bpl->tus.w); + bpl++; + sgel_p++; + num_bde++; + } /* end for loop */ + + } + else { + +#if LINUX_VERSION_CODE < LinuxVersionCode(2,4,0) + rwflg = 0; +#else + rwflg = cmnd->sc_data_direction; +#endif + +#if LINUX_VERSION_CODE <= LinuxVersionCode(2,4,12) + physaddr = (void *)((ulong)pci_map_single(p_dev_ctl->pcidev, + cmnd->request_buffer, cmnd->request_bufflen, rwflg)); +#else + { + struct page *page = virt_to_page((ulong)(cmnd->request_buffer)); + unsigned long offset = ((unsigned long)cmnd->request_buffer & ~PAGE_MASK); + + #ifdef powerpc + remapnsg: + #endif /* endif powerpc */ + physaddr = (void *)((ulong)pci_map_page(p_dev_ctl->pcidev, + page, offset, cmnd->request_bufflen, rwflg)); + #ifdef powerpc + if (!physaddr) { + goto remapnsg; + } + #endif /* endif remapnsg */ + } +#endif + FCSTATCTR.fcMapCnt++; + sbp->bufstruct.av_back = (void *)physaddr; + /* no scatter-gather list case */ + bpl->addrLow = PCIMEM_LONG(putPaddrLow(physaddr)); + bpl->addrHigh = PCIMEM_LONG(putPaddrHigh(physaddr)); + bpl->tus.f.bdeSize = sbp->bufstruct.b_bcount; + if (cmd->ulpCommand == CMD_FCP_IREAD64_CR) + bpl->tus.f.bdeFlags = BUFF_USE_RCV; + else + bpl->tus.f.bdeFlags = 0; + bpl->tus.w = PCIMEM_LONG(bpl->tus.w); + num_bde = 1; + bpl++; + + } + + bpl->addrHigh = 0; + bpl->addrLow = 0; + bpl->tus.w = 0; + last_bmp->fc_mptr = 0; + if (num_bmps == 1) { + cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(ULP_BDE64)); + } else { + topbpl->tus.f.bdeSize = (num_bde * sizeof(ULP_BDE64)); + topbpl->tus.w = PCIMEM_LONG(topbpl->tus.w); + } + cmd->ulpBdeCount = 1; + cmd->ulpLe = 1; /* Set the LE bit in the last iocb */ + + /* Queue cmd chain to last iocb entry in xmit queue */ + if (rp->fc_tx.q_first == NULL) { + rp->fc_tx.q_first = (uchar * )temp; + } else { + ((IOCBQ * )(rp->fc_tx.q_last))->q = (uchar * )temp; + } + rp->fc_tx.q_last = (uchar * )temp; + rp->fc_tx.q_cnt++; + + sbp->flags |= SC_MAPPED; + return(0); +} + +/****************************************************************************** +* Function name : local_timeout +* +* Description : Local handler for watchdog timeouts +******************************************************************************/ +void local_timeout(unsigned long data) +{ + struct watchdog *wdt = (struct watchdog *)data; + fc_dev_ctl_t * p_dev_ctl; + FC_BRD_INFO * binfo; + int skip_intr, i; + ulong siflg; + ulong iflg; + struct lpfc_dpc *ldp; + + siflg = 0; + skip_intr = 0; + iflg = 0; + LPFC_LOCK_DRIVER0; + for (i = 0; i < MAX_FC_BRDS; i++) { + if((p_dev_ctl = (fc_dev_ctl_t *)DD_CTL.p_dev[i])) { + if(p_dev_ctl->fc_ipri != 0) { + printk("LOCK 14 failure %x %x\n",(uint32)p_dev_ctl->fc_ipri, (uint32)iflg); + } + p_dev_ctl->fc_ipri = 14; + + /* Check to see if the DPC was scheduled since the last clock interrupt */ + if(p_dev_ctl->dpc_cnt == p_dev_ctl->save_dpc_cnt) { + volatile uint32 ha_copy; + void * ioa; + + binfo = &BINFO; + ioa = FC_MAP_IO(&binfo->fc_iomap_io); /* map in io registers */ + /* Read host attention register to determine interrupt source */ + ha_copy = READ_CSR_REG(binfo, FC_HA_REG(binfo, ioa)); + FC_UNMAP_MEMIO(ioa); + /* If there are any hardware interrupts to process, they better + * get done before the next clock interrupt. + */ + if(p_dev_ctl->dpc_ha_copy || (ha_copy & ~HA_LATT)) { + if(p_dev_ctl->dev_flag & FC_NEEDS_DPC) { + skip_intr = 1; + /* Local_timeout Skipping clock tick */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0756, /* ptr to msg structure */ + fc_mes0756, /* ptr to msg */ + fc_msgBlk0756.msgPreambleStr, /* begin varargs */ + p_dev_ctl->dpc_ha_copy, + ha_copy, + p_dev_ctl->dpc_cnt, + binfo->fc_ffstate); /* end varargs */ + if(wdt) + del_timer(&wdt->timer); + } + else { + p_dev_ctl->dev_flag |= FC_NEEDS_DPC; + } + } + } + p_dev_ctl->save_dpc_cnt = p_dev_ctl->dpc_cnt; + } + } + + if(skip_intr || !wdt || !wdt->timeout_id) { + fc_reset_timer(); + goto out; + } + del_timer(&wdt->timer); + + ldp = &lpfc_dpc[0]; + if (ldp->dpc_wait == NULL) { + if(wdt->func) + wdt->func(wdt); + } + else { + lpfc_timer(0); + } + +out: + for (i = 0; i < MAX_FC_BRDS; i++) { + if((p_dev_ctl = (fc_dev_ctl_t *)DD_CTL.p_dev[i])) { + p_dev_ctl->fc_ipri = 0; + } + } + LPFC_UNLOCK_DRIVER0; + + for (i = 0; i < MAX_FC_BRDS; i++) { + if((p_dev_ctl = (fc_dev_ctl_t *)DD_CTL.p_dev[i])) { + binfo = &BINFO; + ldp = &lpfc_dpc[binfo->fc_brd_no]; + if ((ldp->dpc_active == 0) && ldp->dpc_wait) + up(ldp->dpc_wait); + } + } +} + +/****************************************************************************** +* Function name : fc_reset_timer +* +* Description : +* +******************************************************************************/ +void fc_reset_timer(void) +{ + FCCLOCK_INFO * clock_info; + + clock_info = &DD_CTL.fc_clock_info; + ((struct watchdog *)(CLOCKWDT))->func = fc_timer; + ((struct watchdog *)(CLOCKWDT))->restart = 1; + ((struct watchdog *)(CLOCKWDT))->count = 0; + ((struct watchdog *)(CLOCKWDT))->stopping = 0; + /* + * add our watchdog timer routine to kernel's list + */ + ((struct watchdog *)(CLOCKWDT))->timer.expires = HZ + jiffies; + ((struct watchdog *)(CLOCKWDT))->timer.function = local_timeout; + ((struct watchdog *)(CLOCKWDT))->timer.data = (unsigned long)(CLOCKWDT); + init_timer(&((struct watchdog *)(CLOCKWDT))->timer); + add_timer(&((struct watchdog *)(CLOCKWDT))->timer); + return; +} + +/****************************************************************************** +* Function name : curtime +* +* Description : Set memory pointed to by time, with the current time (LBOLT) +* +******************************************************************************/ +void curtime(uint32 *time) +{ + *time = jiffies; +} + +/****************************************************************************** +* Function name : fc_initpci +* +* Description : Called by driver diagnostic interface to initialize dfc_info +* +******************************************************************************/ +int fc_initpci(struct dfc_info *di, + fc_dev_ctl_t *p_dev_ctl) +{ + FC_BRD_INFO * binfo; /* point to the binfo area */ + struct pci_dev *pdev; + + pdev = p_dev_ctl->pcidev; + /* + must have the pci struct + */ + if(!pdev) + return(1); + binfo = &BINFO; + + di->fc_ba.a_onmask = (ONDI_MBOX | ONDI_RMEM | ONDI_RPCI | ONDI_RCTLREG | + ONDI_IOINFO | ONDI_LNKINFO | ONDI_NODEINFO | ONDI_CFGPARAM | + ONDI_CT | ONDI_HBAAPI); + di->fc_ba.a_offmask = (OFFDI_MBOX | OFFDI_RMEM | OFFDI_WMEM | OFFDI_RPCI | + OFFDI_WPCI | OFFDI_RCTLREG | OFFDI_WCTLREG); + + if ((binfo->fc_flag & FC_SLI2) && (fc_diag_state == DDI_ONDI)) + di->fc_ba.a_onmask |= ONDI_SLI2; + else + di->fc_ba.a_onmask |= ONDI_SLI1; +#ifdef powerpc + di->fc_ba.a_onmask |= ONDI_BIG_ENDIAN; +#else + di->fc_ba.a_onmask |= ONDI_LTL_ENDIAN; +#endif + di->fc_ba.a_pci=((((uint32)pdev->device) << 16) | (uint32)(pdev->vendor)); + di->fc_ba.a_pci = SWAP_LONG(di->fc_ba.a_pci); + di->fc_ba.a_ddi = fcinstance[binfo->fc_brd_no]; + if(pdev->bus) + di->fc_ba.a_busid = (uint32)(pdev->bus->number); + else + di->fc_ba.a_busid = 0; + di->fc_ba.a_devid = (uint32)(pdev->devfn); + + bcopy((void *)lpfc_release_version, di->fc_ba.a_drvrid, 8); + decode_firmware_rev(binfo, &VPD); + bcopy((void *)fwrevision, di->fc_ba.a_fwname, 32); + + + /* setup structures for I/O mapping */ + di->fc_iomap_io = binfo->fc_iomap_io; + di->fc_iomap_mem = binfo->fc_iomap_mem; + di->fc_hmap = (char *)pdev; + return(0); +} + +/****************************************************************************** +* Function name : fc_readpci +* +* Description : Called by driver diagnostic interface to copy cnt bytes from +* PCI configuration registers, at offset, into buf. +******************************************************************************/ +int fc_readpci(struct dfc_info *di, + uint32 offset, + char *buf, + uint32 cnt) +{ + struct pci_dev *pdev; + int i; + uint32 *lp; + uint32 ldata; + + if(!di->fc_hmap) return(1); + pdev = (struct pci_dev *)di->fc_hmap; + for(i=0; i < cnt; i++){ + lp = (uint32 *)buf; + pci_read_config_dword(pdev,(u8)offset, (u32 *)buf); + ldata = *lp; + *lp = SWAP_LONG(ldata); + buf+=4; + offset+=4; + } + return(0); +} + +/****************************************************************************** +* Function name : fc_writepci +* +* Description : Called by driver diagnostic interface to write cnt bytes from +* buf into PCI configuration registers, starting at offset. +******************************************************************************/ +int fc_writepci(struct dfc_info *di, + uint32 offset, + char *buf, + uint32 cnt) +{ + struct pci_dev *pdev; + int i; + uint32 *lp; + uint32 ldata; + + if(!di->fc_hmap) return(1); + pdev = (struct pci_dev *)di->fc_hmap; + for(i=0; i < cnt; i++){ + lp = (uint32 *)buf; + ldata = *lp; + *lp = SWAP_LONG(ldata); + pci_write_config_dword(pdev,(u8)offset, *buf); + buf+=4; + offset+=4; + } + return(0); +} + +/****************************************************************************** +* Function name : copyout +* +* Description : copy from kernel-space to a user-space +* +******************************************************************************/ +int copyout(uchar *src, + uchar *dst, + unsigned long size) +{ + copy_to_user(dst,src,size); + return(0); +} + +/****************************************************************************** +* Function name : copyin +* +* Description : copy from user-space to kernel-space +* +******************************************************************************/ +int copyin(uchar *src, + uchar *dst, + unsigned long size) +{ + copy_from_user(dst,src,size); + return(0); +} + +/****************************************************************************** +* Function name : fc_getDVI +* +* Description : get a dvi ptr from a Linux scsi cmnd +* +******************************************************************************/ +_local_ dvi_t *fc_getDVI(fc_dev_ctl_t * p_dev_ctl, + int target, + fc_lun_t lun) +{ + FC_BRD_INFO * binfo; + iCfgParam * clp; + struct dev_info * dev_ptr; + struct dev_info * save_ptr; + node_t * node_ptr; + NODELIST * nlp; + int dev_index; + int report_device = 1; + + binfo = &p_dev_ctl->info; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + dev_index = INDEX(ZERO_PAN, target); + + if (dev_index >= MAX_FC_TARGETS){ + return (0); + } + + if (lun < 0) { + /* LUN address out of range */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0718, /* ptr to msg structure */ + fc_mes0718, /* ptr to msg */ + fc_msgBlk0718.msgPreambleStr, /* begin varargs */ + target, + (uint32)lun); /* end varargs */ + return (0); + } + + /* + * Find the target from the nlplist based on SCSI ID + */ + if((nlp = fc_findnode_scsid(binfo, NLP_SEARCH_MAPPED, target)) == 0) { + /* + * Device SCSI ID is not a valid FCP target + */ + + return (0); + } + + /* Allocate SCSI node structure for each open FC node */ + node_ptr = binfo->device_queue_hash[dev_index].node_ptr; + if (node_ptr == NULL) { + if (!(node_ptr = (node_t * ) fc_kmem_zalloc(sizeof(node_t)))) { + return (0); + } + + /* Initialize the node ptr structure */ + node_ptr->ap = p_dev_ctl; /* point back to adapter struct */ + node_ptr->devno = target; + node_ptr->lunlist = NULL; + node_ptr->max_lun = lpfc_max_lun; + + node_ptr->last_dev = NULL; + node_ptr->num_active_io = 0; + node_ptr->virtRptLunData = 0; + node_ptr->physRptLunData = 0; + + node_ptr->tgt_queue_depth = (u_int)clp[CFG_DFT_TGT_Q_DEPTH].a_current; + + node_ptr->nlp = nlp; + node_ptr->rpi = nlp->nlp_Rpi; + node_ptr->last_good_rpi = nlp->nlp_Rpi; + node_ptr->scsi_id = target; + nlp->nlp_targetp = (uchar * )node_ptr; + binfo->device_queue_hash[dev_index].node_ptr = node_ptr; + } + + dev_ptr = fc_find_lun(binfo, dev_index, lun); + /* device queue structure doesn't exist yet */ + if ( dev_ptr == NULL ) { + if (!(dev_ptr = (dvi_t * ) fc_kmem_zalloc(sizeof(dvi_t)))) { + return (0); + } + + /* Initialize the dev_info structure */ + dev_ptr->nodep = node_ptr; + dev_ptr->lun_id = lun; + dev_ptr->flags = 0; + dev_ptr->sense_valid = FALSE; + + /* Initialize device queues */ + dev_ptr->ABORT_BDR_fwd = NULL; + dev_ptr->ABORT_BDR_bkwd = NULL; + dev_ptr->DEVICE_WAITING_fwd = NULL; + dev_ptr->pend_head = NULL; + dev_ptr->pend_tail = NULL; + dev_ptr->pend_count = 0; + dev_ptr->clear_head = NULL; + dev_ptr->clear_count = 0; + dev_ptr->active_io_count = 0; + dev_ptr->stop_send_io = 0; + dev_ptr->ioctl_wakeup = 0; + dev_ptr->qfull_retries = lpfc_qfull_retry; + dev_ptr->first_check = FIRST_CHECK_COND | FIRST_IO; + + dev_ptr->fcp_lun_queue_depth = (u_int)clp[CFG_DFT_LUN_Q_DEPTH].a_current; + if (dev_ptr->fcp_lun_queue_depth < 1) + dev_ptr->fcp_lun_queue_depth = 1; + + dev_ptr->fcp_cur_queue_depth = dev_ptr->fcp_lun_queue_depth; + + /* init command state flags */ + dev_ptr->queue_state = ACTIVE; + dev_ptr->opened = TRUE; + dev_ptr->ioctl_wakeup = FALSE; + dev_ptr->ioctl_event = EVENT_NULL; + dev_ptr->stop_event = FALSE; + + /* + * Create fc_bufs - allocate virtual and bus lists for use with FCP + */ + if(fc_rtalloc(p_dev_ctl, dev_ptr) == 0) { + fc_kmem_free(dev_ptr, sizeof(dvi_t)); + return (0); + } + + /* Add dev_ptr to lunlist */ + if (node_ptr->lunlist == NULL) { + node_ptr->lunlist = dev_ptr; + } else { + save_ptr = node_ptr->lunlist; + while (save_ptr->next != NULL ) { + save_ptr = save_ptr->next; + } + save_ptr->next = dev_ptr; + } + dev_ptr->next = NULL; + } + + if(clp[CFG_DEVICE_REPORT].a_current + && dev_ptr!=NULL && report_device && + (dev_ptr->nodep->nlp->nlp_type & NLP_FCP_TARGET)) { + nlp = dev_ptr->nodep->nlp; + printk(KERN_INFO"!lpfc%d: Acquired FCP/SCSI Target 0x%lx LUN 0x%lx , D_ID is (0x%lx)\n", + binfo->fc_brd_no, + (ulong)target, + (ulong)lun, + (ulong)(nlp->nlp_DID)); + } + + return (dev_ptr); +} + +dvi_t * +fc_alloc_devp( +fc_dev_ctl_t * p_dev_ctl, +int target, +fc_lun_t lun) +{ + return fc_getDVI(p_dev_ctl, target, lun); +} + +/****************************************************************************** +* Function name : deviFree +* +* Description : Free a dvi_t pointer +* +******************************************************************************/ +_local_ void deviFree(fc_dev_ctl_t *p_dev_ctl, + dvi_t *dev_ptr, + node_t *node_ptr) +{ + struct dev_info *curDev, *prevDev; + fc_buf_t *curFcBuf, *tmpFcBuf; + struct sc_buf *sp; + dma_addr_t phys; + unsigned int size; + MBUF_INFO * buf_info; + MBUF_INFO bufinfo; + + /* + * First, we free up all fcbuf for this device. + */ + curFcBuf = dev_ptr->fcbuf_head; + while (curFcBuf != NULL) { + tmpFcBuf = curFcBuf->fc_fwd; + phys = (dma_addr_t)((ulong)curFcBuf->phys_adr); + size = fc_po2(sizeof(fc_buf_t)); + + buf_info = &bufinfo; + buf_info->phys = (void *)((ulong)phys); + buf_info->data_handle = 0; + buf_info->dma_handle = 0; + buf_info->flags = FC_MBUF_DMA; + buf_info->virt = (uint32 * )curFcBuf; + buf_info->size = size; + fc_free(p_dev_ctl, buf_info); + curFcBuf = tmpFcBuf; + } /* end while loop */ + while (dev_ptr->scp != NULL) { + sp = dev_ptr->scp; + dev_ptr->scp = sp->bufstruct.av_forw; + dev_ptr->scpcnt--; + fc_kmem_free((void *)sp, sizeof(struct sc_buf)); + } + + /* + * Next, we are going to remove this device-lun combination. + * But we need to make sure the link list where this dev_ptr + * belongs is not broken. + */ + + curDev = node_ptr->lunlist; + prevDev = curDev; + while (curDev != NULL) { + if (curDev == dev_ptr) + break; + else { + prevDev = curDev; + curDev = curDev->next; + } + } + + if (curDev == dev_ptr) { /* This should always pass */ + if (curDev == node_ptr->lunlist) + node_ptr->lunlist = curDev->next; + else + prevDev->next = curDev->next; + } + fc_kmem_free((void *)dev_ptr, sizeof(dvi_t)); +} +/****************************************************************************** +* Function name : fc_print +* +* Description : +* +******************************************************************************/ +int fc_print( char *str, + void *a1, + void *a2) +{ + printk((const char *)str, a1, a2); + return(1); +} /* fc_print */ + +/****************************************************************************** +* Function name : log_printf_msgblk +* +* Description : Called from common code function fc_log_printf_msg_vargs +* Note : Input string 'str' is formatted (sprintf) by caller. +******************************************************************************/ +int log_printf_msgblk(int brdno, + msgLogDef * msg, + char * str, /* String formatted by caller */ + int log_only) +{ + int ddiinst; + char * mod; + + ddiinst = fcinstance[brdno]; + mod = "lpfc"; + if (log_only) { + /* system buffer only */ + switch(msg->msgType) { + case FC_LOG_MSG_TYPE_INFO: + case FC_LOG_MSG_TYPE_WARN: + /* These LOG messages appear in LOG file only */ + printk(KERN_INFO"!%s%d:%04d:%s\n", mod, ddiinst, msg->msgNum, str); + break; + case FC_LOG_MSG_TYPE_ERR_CFG: + case FC_LOG_MSG_TYPE_ERR: + /* These LOG messages appear on the monitor and in the LOG file */ + printk(KERN_WARNING"!%s%d:%04d:%s\n", mod, ddiinst, msg->msgNum, str); + break; + case FC_LOG_MSG_TYPE_PANIC: + panic("!%s%d:%04d:%s\n", mod, ddiinst, msg->msgNum, str); + break; + default: + return(0); + } + } + else { + switch(msg->msgType) { + case FC_LOG_MSG_TYPE_INFO: + case FC_LOG_MSG_TYPE_WARN: + printk(KERN_INFO"!%s%d:%04d:%s\n", mod, ddiinst, msg->msgNum, str); + break; + case FC_LOG_MSG_TYPE_ERR_CFG: + case FC_LOG_MSG_TYPE_ERR: + printk(KERN_WARNING"!%s%d:%04d:%s\n", mod, ddiinst, msg->msgNum, str); + break; + case FC_LOG_MSG_TYPE_PANIC: + panic("!%s%d:%04d:%s\n", mod, ddiinst, msg->msgNum, str); + break; + default: + return(0); + } + } + return(1); +} /* log_printf_msgblk */ + +/****************************************************************************** +* Function name : fc_write_toio +* +******************************************************************************/ +_static_ void fc_write_toio(uint32 *src, + uint32 *dest_io, + uint32 cnt) +{ + uint32 ldata; + int i; + + for (i = 0; i < (int)cnt; i += sizeof(uint32)) { + ldata = *src++; + writel(ldata, dest_io); + dest_io++; + } + return; +} + +/****************************************************************************** +* Function name : fc_read_fromio +* +* Description : +* +******************************************************************************/ +_static_ void fc_read_fromio( uint32 *src_io, + uint32 *dest, + uint32 cnt) +{ + uint32 ldata; + int i; + + for (i = 0; i < (int)cnt; i += sizeof(uint32)) { + ldata = readl(src_io); + src_io++; + *dest++ = ldata; + } + return; +} + +/****************************************************************************** +* Function name : fc_writel +* +* Description : +* +******************************************************************************/ +_static_ void fc_writel(uint32 * src_io, + uint32 ldata) +{ + writel(ldata, src_io); + return; +} + +/****************************************************************************** +* Function name : fc_readl +* +* Description : +* +******************************************************************************/ +_static_ uint32 fc_readl(uint32 *src_io) +{ + uint32 ldata; + + ldata = readl(src_io); + return(ldata); +} + + + +#ifdef MODULE +/* + * XXX: patman I don't know why this is needed. Maybe for out of tree + * builds? +#include "lpfc.ver" + */ +#endif /* MODULE */ + +#endif /* __GENKSYMS__ */ + +#ifdef MODULE + +#ifndef EXPORT_SYMTAB +#define EXPORT_SYMTAB +#endif + +int lpfc_xmit(fc_dev_ctl_t *p_dev_ctl, struct sk_buff *skb); +int lpfc_ioctl(int cmd, void *s); + +EXPORT_SYMBOL(lpfc_xmit); +EXPORT_SYMBOL(lpfc_ioctl); + +#endif /* MODULE */ + +/****************************************************************************** +* Function name : fc_ioctl +* +* Description : ioctl interface to diagnostic utilities +* called by a special character device driver (fcLINUXdiag.c) +* fd is the board number (instance), and s is a cmninfo pointer +* +******************************************************************************/ +int fc_ioctl(int cmd, + void *s) +{ + int rc, fd; + fc_dev_ctl_t *p_dev_ctl; + struct dfccmdinfo *cp = (struct dfccmdinfo *)s; + struct cmd_input *ci = (struct cmd_input *)cp->c_datain; + + if(!cp || !ci) + return EINVAL; + fd = ci->c_brd; + if(fd > DD_CTL.num_devs) + return EINVAL; + if(!(p_dev_ctl = (fc_dev_ctl_t *)DD_CTL.p_dev[fd])) + return EINVAL; + + rc = dfc_ioctl(cp, ci); + + return(rc); +} + +/****************************************************************************** +* Function name : dfc_sleep +* +* Description : +* +******************************************************************************/ +int dfc_sleep(fc_dev_ctl_t *p_dev_ctl, + fcEvent_header *ep) +{ + switch(ep->e_mask) { + case FC_REG_LINK_EVENT: + ep->e_mode |= E_SLEEPING_MODE; + interruptible_sleep_on(&p_dev_ctl->linkwq); + if (signal_pending (current)) + return(1); + break; + case FC_REG_RSCN_EVENT: + ep->e_mode |= E_SLEEPING_MODE; + interruptible_sleep_on(&p_dev_ctl->rscnwq); + if (signal_pending (current)) + return(1); + break; + case FC_REG_CT_EVENT: + ep->e_mode |= E_SLEEPING_MODE; + interruptible_sleep_on(&p_dev_ctl->ctwq); + if (signal_pending (current)) + return(1); + break; + } + return(0); +} + +/****************************************************************************** +* Function name : dfc_wakeup +* +* Description : +* +******************************************************************************/ +int dfc_wakeup(fc_dev_ctl_t *p_dev_ctl, + fcEvent_header *ep) +{ + switch(ep->e_mask) { + case FC_REG_LINK_EVENT: + ep->e_mode &= ~E_SLEEPING_MODE; + wake_up_interruptible(&p_dev_ctl->linkwq); + break; + case FC_REG_RSCN_EVENT: + ep->e_mode &= ~E_SLEEPING_MODE; + wake_up_interruptible(&p_dev_ctl->rscnwq); + break; + case FC_REG_CT_EVENT: + ep->e_mode &= ~E_SLEEPING_MODE; + wake_up_interruptible(&p_dev_ctl->ctwq); + break; + } + return(0); +} + +/****************************************************************************** +* Function name : lpfc_xmit +* +* Description : +* +******************************************************************************/ +int lpfc_xmit(fc_dev_ctl_t *p_dev_ctl, + struct sk_buff *skb) +{ + int rc; + ulong siflg, iflg; + + siflg = 0; + LPFC_LOCK_SCSI_DONE(p_dev_ctl->host); + iflg = 0; + LPFC_LOCK_DRIVER(15); + rc = fc_xmit(p_dev_ctl, skb); + LPFC_UNLOCK_DRIVER; + LPFC_UNLOCK_SCSI_DONE(p_dev_ctl->host); + return(rc); +} + +/****************************************************************************** +* Function name : lpfc_ioctl +* +* Description : +* +******************************************************************************/ +int lpfc_ioctl(int cmd, + void *s) +{ + int i, cnt, ipri; + NETDEVICE *dev; + fc_dev_ctl_t *p_dev_ctl; + FC_BRD_INFO * binfo; + iCfgParam * clp; + struct lpfn_probe *lp; + ndd_t * p_ndd; + + cnt = 0; + switch(cmd) { + case LPFN_PROBE: +#ifndef MODULE + if(lpfc_detect_called != 1) { + lpfc_detect_called = 2; /* defer calling this till after fc_detect */ + return(1); + } +#endif /* MODULE */ + + for (i = 0; i < MAX_FC_BRDS; i++) { + if((p_dev_ctl = (fc_dev_ctl_t *)DD_CTL.p_dev[i])) { + clp = DD_CTL.p_config[i]; + binfo = &BINFO; + if(clp[CFG_NETWORK_ON].a_current == 0) + continue; + ipri = disable_lock(FC_LVL, &CMD_LOCK); + if(p_dev_ctl->ihs.lpfn_dev == 0) { + unsigned int alloc_size; + + /* ensure 32-byte alignment of the private area */ + alloc_size = sizeof(NETDEVICE) + 31; + + dev = (NETDEVICE *) lpfc_kmalloc (alloc_size, GFP_KERNEL, 0, 0); + if (dev == NULL) { + unlock_enable(ipri, &CMD_LOCK); + continue; + } + memset(dev, 0, alloc_size); +#if LINUX_VERSION_CODE < LinuxVersionCode(2,4,0) + dev->name = (char *)(dev + 1); + sprintf(dev->name, "lpfn%d", binfo->fc_brd_no); + +#else + rtnl_lock(); + strcpy(dev->name, "lpfn%d"); + if (dev_alloc_name(dev, "lpfn%d")<0) { + rtnl_unlock(); + lpfc_kfree(alloc_size, (void *)dev, (void *)((ulong)INVALID_PHYS), 0); + unlock_enable(ipri, &CMD_LOCK); + continue; + } + +#endif + dev->priv = (void *)p_dev_ctl; + p_dev_ctl->ihs.lpfn_dev = dev; + + lp = (struct lpfn_probe *)s; + p_ndd = (ndd_t * ) & (NDD); + /* Initialize the device structure. */ + dev->hard_start_xmit = lp->hard_start_xmit; + dev->get_stats = lp->get_stats; + dev->open = lp->open; + dev->stop = lp->stop; + dev->hard_header = lp->hard_header; + dev->rebuild_header = lp->rebuild_header; + dev->change_mtu = lp->change_mtu; + p_ndd->nd_receive = + (void (*)(void *, struct sk_buff *, void *))(lp->receive); + + /* Assume fc header + LLC/SNAP 24 bytes */ + dev->hard_header_len = 24; + dev->type = ARPHRD_ETHER; + dev->mtu = p_dev_ctl->ihs.lpfn_mtu; + dev->addr_len = 6; + dev->tx_queue_len = 100; + + memset(dev->broadcast, 0xFF, 6); + bcopy(p_dev_ctl->phys_addr, dev->dev_addr, 6); + + /* New-style flags */ + dev->flags = IFF_BROADCAST; + register_netdevice(dev); +#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) + rtnl_unlock(); +#endif + + cnt++; + } + unlock_enable(ipri, &CMD_LOCK); + } + } + break; + case LPFN_DETACH: + for (i = 0; i < MAX_FC_BRDS; i++) { + if((p_dev_ctl = (fc_dev_ctl_t *)DD_CTL.p_dev[i])) { + clp = DD_CTL.p_config[i]; + if(clp[CFG_NETWORK_ON].a_current == 0) + continue; + ipri = disable_lock(FC_LVL, &CMD_LOCK); + if((dev=p_dev_ctl->ihs.lpfn_dev)) { + unregister_netdev(dev); + dev->priv = NULL; + p_dev_ctl->ihs.lpfn_dev = 0; + cnt++; + } + unlock_enable(ipri, &CMD_LOCK); + } + } + break; + case LPFN_DFC: + break; + default: + return(0); + } + return(cnt); +} + + +/****************************************************************************** +* Function name : lpfcdfc_init +* +* Description : Register your major, and accept a dynamic number +* +******************************************************************************/ +int lpfcdfc_init(void) +{ + int result; +#ifdef powerpc + fc_dev_ctl_t *p_dev_ctl; + MBUF_INFO *buf_info; + MBUF_INFO bufinfo; + int i; +#endif + + result = register_chrdev(lpfc_major, "lpfcdfc", &lpfc_fops); + if (result < 0) { + printk(KERN_WARNING "lpfcdfc: can't get major %d\n",lpfc_major); + return result; + } + if (lpfc_major == 0) lpfc_major = result; /* dynamic */ + +#ifdef powerpc + for(i=0; i < MAX_FC_BRDS; i++) { + p_dev_ctl = (fc_dev_ctl_t *)DD_CTL.p_dev[i]; + if(p_dev_ctl) { + buf_info = &bufinfo; + buf_info->virt = 0; + buf_info->phys = 0; + buf_info->flags = (FC_MBUF_IOCTL | FC_MBUF_UNLOCK); + buf_info->align = sizeof(void *); + buf_info->size = 64 * 1024; + buf_info->dma_handle = 0; + + fc_malloc(p_dev_ctl, buf_info); + p_dev_ctl->dfc_kernel_buf = buf_info->virt; + } + } +#endif + + return 0; +} + +/****************************************************************************** +* Function name : lpfcdiag_ioctl +* +* Description : caller must insure that cmd is the board number and arg is +* the cmdinfo pointer +* +******************************************************************************/ +int lpfcdiag_ioctl(struct inode * inode, + struct file * file, + unsigned int cmd, + unsigned long arg) +{ + return -fc_ioctl(cmd, (void *)arg); +} + +/****************************************************************************** +* Function name : lpfcdiag_open +* +* Description : +* +******************************************************************************/ +int lpfcdiag_open(struct inode * inode, + struct file * file) +{ + fc_dev_ctl_t *p_dev_ctl; + struct Scsi_Host *host; + + if(((p_dev_ctl = DD_CTL.p_dev[0])) && + ((host = p_dev_ctl->host))) { + lpfcdiag_cnt++; + } + return(0); +} + +/****************************************************************************** +* Function name : lpfcdiag_release +* +* Description : +* +******************************************************************************/ +int lpfcdiag_release(struct inode * inode, + struct file * file) +{ + fc_dev_ctl_t *p_dev_ctl; + struct Scsi_Host *host; + + if(((p_dev_ctl = DD_CTL.p_dev[0])) && + ((host = p_dev_ctl->host))) { + lpfcdiag_cnt--; + } + return(0); +} + +/****************************************************************************** +* Function name : fc_get_dds_bind +* +* Description : Called from fc_attach to setup binding parameters for adapter +******************************************************************************/ +int fc_get_dds_bind(fc_dev_ctl_t *p_dev_ctl) +{ + FC_BRD_INFO *binfo; + iCfgParam *clp; + char **arrayp; + u_int cnt; + + /* + * Check if there are any WWNN / scsid bindings + */ + binfo = &BINFO; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + cnt = lpfc_bind_entries; + arrayp = lpfc_fcp_bind_WWNN; + if(cnt && (*arrayp != 0)) { + fc_bind_wwnn(p_dev_ctl, arrayp, cnt); + } else { + + /* + * Check if there are any WWPN / scsid bindings + */ + arrayp = lpfc_fcp_bind_WWPN; + if(cnt && (*arrayp != 0)) { + fc_bind_wwpn(p_dev_ctl, arrayp, cnt); + } else { + + /* + * Check if there are any NPortID / scsid bindings + */ + arrayp = lpfc_fcp_bind_DID; + if(cnt && (*arrayp != 0)) { + fc_bind_did(p_dev_ctl, arrayp, cnt); + } else { + switch(clp[CFG_AUTOMAP].a_current) { + case 2: + p_dev_ctl->fcp_mapping = FCP_SEED_WWPN; + break; + case 3: + p_dev_ctl->fcp_mapping = FCP_SEED_DID; + break; + default: + p_dev_ctl->fcp_mapping = FCP_SEED_WWNN; + } + } + } + } + + clp[CFG_SCAN_DOWN].a_current = (uint32)lpfc_scandown; + if(cnt && (*arrayp != 0) && (clp[CFG_SCAN_DOWN].a_current == 2)) { + /* Scan-down is 2 with Persistent binding - ignoring scan-down */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0411, /* ptr to msg structure */ + fc_mes0411, /* ptr to msg */ + fc_msgBlk0411.msgPreambleStr, /* begin varargs */ + clp[CFG_SCAN_DOWN].a_current, + p_dev_ctl->fcp_mapping); /* end varargs */ + clp[CFG_SCAN_DOWN].a_current = 0; + } + if(clp[CFG_SCAN_DOWN].a_current > 2) { + /* Scan-down is out of range - ignoring scan-down */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0412, /* ptr to msg structure */ + fc_mes0412, /* ptr to msg */ + fc_msgBlk0412.msgPreambleStr, /* begin varargs */ + clp[CFG_SCAN_DOWN].a_current, + p_dev_ctl->fcp_mapping); /* end varargs */ + clp[CFG_SCAN_DOWN].a_current = 0; + } + return(0); +} + +/****************************************************************************** +* Function name : fc_get_dds +* +* Description : Called from fc_attach to setup configuration parameters for +* adapter +* The goal of this routine is to fill in all the a_current +* members of the CfgParam structure for all configuration +* parameters. +* Example: +* clp[CFG_XXX].a_current = (uint32)value; +* value might be a define, a global variable, clp[CFG_XXX].a_default, +* or some other enviroment specific way of initializing config parameters. +******************************************************************************/ +int fc_get_dds(fc_dev_ctl_t *p_dev_ctl, + uint32 *pio) +{ + FC_BRD_INFO *binfo; + iCfgParam *clp; + int i; + int brd; + + binfo = &BINFO; + brd = binfo->fc_brd_no; + clp = DD_CTL.p_config[brd]; + + p_dev_ctl->open_state = NORMAL_OPEN; + + /* + * Set everything to the defaults + */ + for(i=0; i < NUM_CFG_PARAM; i++) + clp[i].a_current = clp[i].a_default; + + /* Default values for I/O colaesing */ + clp[CFG_CR_DELAY].a_current = + (uint32)((ulong)fc_get_cfg_param(brd, CFG_CR_DELAY)); + clp[CFG_CR_COUNT].a_current = + (uint32)((ulong)fc_get_cfg_param(brd, CFG_CR_COUNT)); + + clp[CFG_AUTOMAP].a_current = + (uint32)((ulong)fc_get_cfg_param(brd, CFG_AUTOMAP)); + + clp[CFG_LINK_SPEED].a_current = + (uint32)((ulong)fc_get_cfg_param(brd, CFG_LINK_SPEED)); + + bcopy((uchar * )"lpfc0", (uchar *)DDS.logical_name, 6); + DDS.logical_name[4] += binfo->fc_brd_no; + DDS.logical_name[5] = 0; + + clp[CFG_INTR_ACK].a_current = (uint32)lpfc_intr_ack; + clp[CFG_IDENTIFY_SELF].a_current = 0; + clp[CFG_DEVICE_REPORT].a_current = 0; + + clp[CFG_LOG_VERBOSE].a_current = + (uint32)((ulong)fc_get_cfg_param(brd, CFG_LOG_VERBOSE)); + clp[CFG_LOG_ONLY].a_current = + (uint32)((ulong)fc_get_cfg_param(brd, CFG_LOG_ONLY)); + + /* Can NOT log verbose messages until you read VERBOSE config param */ + if((binfo->fc_flag & FC_2G_CAPABLE) == 0) { + /* This HBA is NOT 2G_CAPABLE */ + if( clp[CFG_LINK_SPEED].a_current > 1) { + /* Reset link speed to auto. 1G HBA cfg'd for 2G. */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk1303, /* ptr to msg structure */ + fc_mes1303, /* ptr to msg */ + fc_msgBlk1303.msgPreambleStr); /* begin & end varargs */ + clp[CFG_LINK_SPEED].a_current = LINK_SPEED_AUTO; + } + } + + clp[CFG_NUM_IOCBS].a_current = + (uint32)((ulong)fc_get_cfg_param(brd, CFG_NUM_IOCBS)); + + if (clp[CFG_NUM_IOCBS].a_current < LPFC_MIN_NUM_IOCBS) { + /* Num-iocbs too low, resetting */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0413, /* ptr to msg structure */ + fc_mes0413, /* ptr to msg */ + fc_msgBlk0413.msgPreambleStr, /* begin varargs */ + clp[CFG_NUM_IOCBS].a_current, + LPFC_MIN_NUM_IOCBS); /* end varargs */ + clp[CFG_NUM_IOCBS].a_current = LPFC_MIN_NUM_IOCBS; + } + if (clp[CFG_NUM_IOCBS].a_current > LPFC_MAX_NUM_IOCBS) { + /* Num-iocbs too high, resetting */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0414, /* ptr to msg structure */ + fc_mes0414, /* ptr to msg */ + fc_msgBlk0414.msgPreambleStr, /* begin varargs */ + clp[CFG_NUM_IOCBS].a_current, + LPFC_MAX_NUM_IOCBS); /* end varargs */ + clp[CFG_NUM_IOCBS].a_current = LPFC_MAX_NUM_IOCBS; + } + + clp[CFG_NUM_BUFS].a_current = + (uint32)((ulong)fc_get_cfg_param(brd, CFG_NUM_BUFS)); + + if (clp[CFG_NUM_BUFS].a_current < LPFC_MIN_NUM_BUFS) { + /* Num-bufs too low, resetting */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0415, /* ptr to msg structure */ + fc_mes0415, /* ptr to msg */ + fc_msgBlk0415.msgPreambleStr, /* begin varargs */ + clp[CFG_NUM_BUFS].a_current, + LPFC_MIN_NUM_BUFS); /* end varargs */ + clp[CFG_NUM_BUFS].a_current = LPFC_MIN_NUM_BUFS; + } + if (clp[CFG_NUM_BUFS].a_current > LPFC_MAX_NUM_BUFS) { + /* Num-bufs too high, resetting */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0416, /* ptr to msg structure */ + fc_mes0416, /* ptr to msg */ + fc_msgBlk0416.msgPreambleStr, /* begin varargs */ + clp[CFG_NUM_BUFS].a_current, + LPFC_MAX_NUM_BUFS); /* end varargs */ + clp[CFG_NUM_BUFS].a_current = LPFC_MAX_NUM_BUFS; + } + + clp[CFG_FCP_ON].a_current = 1; + clp[CFG_DFT_TGT_Q_DEPTH].a_current = + (uint32)((ulong)fc_get_cfg_param(brd, CFG_DFT_TGT_Q_DEPTH)); + clp[CFG_DFT_LUN_Q_DEPTH].a_current = + (uint32)((ulong)fc_get_cfg_param(brd, CFG_DFT_LUN_Q_DEPTH)); + if (clp[CFG_DFT_TGT_Q_DEPTH].a_current > LPFC_MAX_TGT_Q_DEPTH ) { + /* Target qdepth too high, resetting to max */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0417, /* ptr to msg structure */ + fc_mes0417, /* ptr to msg */ + fc_msgBlk0417.msgPreambleStr, /* begin varargs */ + clp[CFG_DFT_TGT_Q_DEPTH].a_current, + LPFC_MAX_TGT_Q_DEPTH); /* end varargs */ + clp[CFG_DFT_TGT_Q_DEPTH].a_current = LPFC_MAX_TGT_Q_DEPTH; + } + if (clp[CFG_DFT_LUN_Q_DEPTH].a_current > LPFC_MAX_LUN_Q_DEPTH ) { + /* Lun qdepth too high, resetting to max */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0418, /* ptr to msg structure */ + fc_mes0418, /* ptr to msg */ + fc_msgBlk0418.msgPreambleStr, /* begin varargs */ + clp[CFG_DFT_LUN_Q_DEPTH].a_current, + LPFC_MAX_LUN_Q_DEPTH); /* end varargs */ + clp[CFG_DFT_LUN_Q_DEPTH].a_current = LPFC_MAX_LUN_Q_DEPTH; + } + if (clp[CFG_DFT_LUN_Q_DEPTH].a_current == 0 ) { + /* Lun qdepth cannot be , resetting to 1 */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0419, /* ptr to msg structure */ + fc_mes0419, /* ptr to msg */ + fc_msgBlk0419.msgPreambleStr, /* begin varargs */ + clp[CFG_DFT_LUN_Q_DEPTH].a_current ); /* end varargs */ + clp[CFG_DFT_LUN_Q_DEPTH].a_current = 1; + } + + clp[CFG_DQFULL_THROTTLE_UP_TIME].a_current = + (uint32)((ulong)fc_get_cfg_param(brd, CFG_DQFULL_THROTTLE_UP_TIME)); + clp[CFG_DQFULL_THROTTLE_UP_INC].a_current = + (uint32)((ulong)fc_get_cfg_param(brd, CFG_DQFULL_THROTTLE_UP_INC)); + + clp[CFG_FIRST_CHECK].a_current = (uint32)lpfc_first_check; + clp[CFG_FCPFABRIC_TMO].a_current = + (uint32)((ulong)fc_get_cfg_param(brd, CFG_FCPFABRIC_TMO)); + if (clp[CFG_FCPFABRIC_TMO].a_current > LPFC_MAX_FABRIC_TIMEOUT) { + /* Fcpfabric_tmo too high, resetting */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0420, /* ptr to msg structure */ + fc_mes0420, /* ptr to msg */ + fc_msgBlk0420.msgPreambleStr, /* begin varargs */ + clp[CFG_FCPFABRIC_TMO].a_current, + LPFC_MAX_FABRIC_TIMEOUT); /* end varargs */ + clp[CFG_FCPFABRIC_TMO].a_current = LPFC_MAX_FABRIC_TIMEOUT; + } + + clp[CFG_FCP_CLASS].a_current = + (uint32)((ulong)fc_get_cfg_param(brd, CFG_FCP_CLASS)); + switch (clp[CFG_FCP_CLASS].a_current) { + case 2: + clp[CFG_FCP_CLASS].a_current = CLASS2; + break; + case 3: + clp[CFG_FCP_CLASS].a_current = CLASS3; + break; + default: + /* Fcp-class is illegal, resetting to default */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0421, /* ptr to msg structure */ + fc_mes0421, /* ptr to msg */ + fc_msgBlk0421.msgPreambleStr, /* begin varargs */ + clp[CFG_FCP_CLASS].a_current, + CLASS3); /* end varargs */ + clp[CFG_FCP_CLASS].a_current = CLASS3; + break; + } + + clp[CFG_USE_ADISC].a_current = + (uint32)((ulong)fc_get_cfg_param(brd, CFG_USE_ADISC)); + + clp[CFG_NO_DEVICE_DELAY].a_current = + (uint32)((ulong)fc_get_cfg_param(brd, CFG_NO_DEVICE_DELAY)); + if (clp[CFG_NO_DEVICE_DELAY].a_current > LPFC_MAX_NO_DEVICE_DELAY) { + /* No-device-delay too high, resetting to max */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0422, /* ptr to msg structure */ + fc_mes0422, /* ptr to msg */ + fc_msgBlk0422.msgPreambleStr, /* begin varargs */ + clp[CFG_NO_DEVICE_DELAY].a_current, + LPFC_MAX_NO_DEVICE_DELAY); /* end varargs */ + clp[CFG_NO_DEVICE_DELAY].a_current = LPFC_MAX_NO_DEVICE_DELAY; + } + + clp[CFG_NETWORK_ON].a_current = + (uint32)((ulong)fc_get_cfg_param(brd, CFG_NETWORK_ON)); + clp[CFG_POST_IP_BUF].a_current = + (uint32)((ulong)fc_get_cfg_param(brd, CFG_POST_IP_BUF)); + + if (clp[CFG_POST_IP_BUF].a_current < LPFC_MIN_POST_IP_BUF) { + /* Post_ip_buf too low, resetting */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0423, /* ptr to msg structure */ + fc_mes0423, /* ptr to msg */ + fc_msgBlk0423.msgPreambleStr, /* begin varargs */ + clp[CFG_POST_IP_BUF].a_current, + LPFC_MIN_POST_IP_BUF); /* end varargs */ + clp[CFG_POST_IP_BUF].a_current = LPFC_MIN_POST_IP_BUF; + } + if (clp[CFG_POST_IP_BUF].a_current > LPFC_MAX_POST_IP_BUF) { + /* Post_ip_buf too high, resetting */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0424, /* ptr to msg structure */ + fc_mes0424, /* ptr to msg */ + fc_msgBlk0424.msgPreambleStr, /* begin varargs */ + clp[CFG_POST_IP_BUF].a_current, + LPFC_MAX_POST_IP_BUF); /* end varargs */ + clp[CFG_POST_IP_BUF].a_current = LPFC_MAX_POST_IP_BUF; + } + + clp[CFG_XMT_Q_SIZE].a_current = + (uint32)((ulong)fc_get_cfg_param(brd, CFG_XMT_Q_SIZE)); + if (clp[CFG_XMT_Q_SIZE].a_current < LPFC_MIN_XMT_QUE_SIZE) { + /* Xmt-que_size too low, resetting */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0425, /* ptr to msg structure */ + fc_mes0425, /* ptr to msg */ + fc_msgBlk0425.msgPreambleStr, /* begin varargs */ + clp[CFG_XMT_Q_SIZE].a_current, + LPFC_MIN_XMT_QUE_SIZE); /* end varargs */ + clp[CFG_XMT_Q_SIZE].a_current = LPFC_MIN_XMT_QUE_SIZE; + } + if (clp[CFG_XMT_Q_SIZE].a_current > LPFC_MAX_XMT_QUE_SIZE) { + /* Xmt-que_size too high, resetting */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0426, /* ptr to msg structure */ + fc_mes0426, /* ptr to msg */ + fc_msgBlk0426.msgPreambleStr, /* begin varargs */ + clp[CFG_XMT_Q_SIZE].a_current, + LPFC_MAX_XMT_QUE_SIZE); /* end varargs */ + clp[CFG_XMT_Q_SIZE].a_current = LPFC_MAX_XMT_QUE_SIZE; + } + binfo->fc_ring[FC_IP_RING].fc_tx.q_max = clp[CFG_XMT_Q_SIZE].a_current; + + clp[CFG_IP_CLASS].a_current = (uint32)((ulong)fc_get_cfg_param(brd, CFG_IP_CLASS)); + switch (clp[CFG_IP_CLASS].a_current) { + case 2: + clp[CFG_IP_CLASS].a_current = CLASS2; + break; + case 3: + clp[CFG_IP_CLASS].a_current = CLASS3; + break; + default: + /* Ip-class is illegal, resetting */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0427, /* ptr to msg structure */ + fc_mes0427, /* ptr to msg */ + fc_msgBlk0427.msgPreambleStr, /* begin varargs */ + clp[CFG_IP_CLASS].a_current, + CLASS3); /* end varargs */ + clp[CFG_IP_CLASS].a_current = CLASS3; + break; + } + + clp[CFG_ZONE_RSCN].a_current = (uint32)lpfc_zone_rscn; + p_dev_ctl->vendor_flag = (uint32)lpfc_vendor; + + clp[CFG_HOLDIO].a_current = (uint32)((ulong)fc_get_cfg_param(brd, CFG_HOLDIO)); + clp[CFG_ACK0].a_current = (uint32)((ulong)fc_get_cfg_param(brd, CFG_ACK0)); + clp[CFG_TOPOLOGY].a_current = (uint32)((ulong)fc_get_cfg_param(brd, CFG_TOPOLOGY)); + + switch (clp[CFG_TOPOLOGY].a_current) { + case 0: + case 1: + case 2: + case 4: + case 6: + /* topology is a valid choice */ + break; + default: + /* Topology is illegal, resetting */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0428, /* ptr to msg structure */ + fc_mes0428, /* ptr to msg */ + fc_msgBlk0428.msgPreambleStr, /* begin varargs */ + clp[CFG_TOPOLOGY].a_current, + LPFC_DFT_TOPOLOGY); /* end varargs */ + clp[CFG_TOPOLOGY].a_current = LPFC_DFT_TOPOLOGY; + break; + } + + clp[CFG_NODEV_TMO].a_current = + (uint32)((ulong)fc_get_cfg_param(brd, CFG_NODEV_TMO)); + clp[CFG_DELAY_RSP_ERR].a_current = + (uint32)((ulong)fc_get_cfg_param(brd, CFG_DELAY_RSP_ERR)); + clp[CFG_CHK_COND_ERR].a_current = + (uint32)((ulong)fc_get_cfg_param(brd, CFG_CHK_COND_ERR)); + + clp[CFG_LINKDOWN_TMO].a_current = + (uint32)((ulong)fc_get_cfg_param(brd, CFG_LINKDOWN_TMO)); + if (clp[CFG_LINKDOWN_TMO].a_current > LPFC_MAX_LNKDWN_TIMEOUT) { + /* Linkdown_tmo too high, resetting */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0429, /* ptr to msg structure */ + fc_mes0429, /* ptr to msg */ + fc_msgBlk0429.msgPreambleStr, /* begin varargs */ + clp[CFG_LINKDOWN_TMO].a_current, + LPFC_MAX_LNKDWN_TIMEOUT); /* end varargs */ + clp[CFG_LINKDOWN_TMO].a_current = LPFC_MAX_LNKDWN_TIMEOUT; + } + + clp[CFG_LINK_SPEED].a_current = + (uint32)((ulong)fc_get_cfg_param(brd, CFG_LINK_SPEED)); + + p_dev_ctl->ihs.lpfn_mtu = lpfc_mtu; + if((lpfc_mtu % PAGE_SIZE) == 0) + p_dev_ctl->ihs.lpfn_rcv_buf_size = lpfc_mtu; + else { + p_dev_ctl->ihs.lpfn_rcv_buf_size = ((lpfc_mtu + PAGE_SIZE) & (PAGE_MASK)); + p_dev_ctl->ihs.lpfn_rcv_buf_size -= 16; + } + clp[CFG_NUM_NODES].a_current = clp[CFG_NUM_NODES].a_default; + + return(0); +} /* fc_get_dds */ + +/****************************************************************************** +* Function name : fc_bind_wwpn +* +******************************************************************************/ +_local_ int fc_bind_wwpn(fc_dev_ctl_t *p_dev_ctl, + char **arrayp, + u_int cnt) +{ + uchar *datap, *np; + NODELIST *nlp; + nodeh_t *hp; + NAME_TYPE pn; + int i, dev_index, entry, lpfc_num, rstatus; + unsigned int sum; + + FC_BRD_INFO * binfo = &BINFO; + + p_dev_ctl->fcp_mapping = FCP_SEED_WWPN; + np = (uchar *)&pn; + + for(entry = 0; entry < cnt; entry++) { + datap = (uchar *)arrayp[entry]; + if(datap == 0) + break; + /* Determined the number of ASC hex chars in WWNN & WWPN */ + for( i = 0; i < FC_MAX_WW_NN_PN_STRING; i++) { + if( fc_asc_to_hex( datap[i]) < 0) + break; + } + if((rstatus = fc_parse_binding_entry( p_dev_ctl, datap, np, + i, sizeof( NAME_TYPE), + FC_BIND_WW_NN_PN, &sum, entry, &lpfc_num)) > 0) { + if( rstatus == FC_SYNTAX_OK_BUT_NOT_THIS_BRD) + continue; + + /* WWPN binding entry : Syntax error code */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0430, /* ptr to msg structure */ + fc_mes0430, /* ptr to msg */ + fc_msgBlk0430.msgPreambleStr, /* begin varargs */ + entry, + rstatus); /* end varargs */ + goto out; + } + + /* Loop through all NODELIST entries and find + * the next available entry. + */ + if((nlp = (NODELIST *)fc_mem_get(binfo, MEM_NLP)) == 0) { + /* WWPN binding entry: node table full */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0432, /* ptr to msg structure */ + fc_mes0432, /* ptr to msg */ + fc_msgBlk0432.msgPreambleStr); /* begin & end varargs */ + goto out; + } + fc_bzero((void *)nlp, sizeof(NODELIST)); + nlp->sync = binfo->fc_sync; + nlp->capabilities = binfo->fc_capabilities; + + nlp->nlp_state = NLP_SEED; + nlp->nlp_type = NLP_SEED_WWPN | NLP_FCP_TARGET; + + nlp->id.nlp_sid = DEV_SID(sum); + nlp->id.nlp_pan = DEV_PAN(sum); + bcopy((uchar * )&pn, &nlp->nlp_portname, sizeof(NAME_TYPE)); + + dev_index = INDEX(nlp->id.nlp_pan, nlp->id.nlp_sid); + hp = &binfo->device_queue_hash[dev_index]; + + /* Claim SCSI ID by copying bind parameter to + * proper index in device_queue_hash. + */ + bcopy(&nlp->nlp_portname, &hp->un.dev_portname, sizeof(NAME_TYPE)); + hp->node_flag = FCP_SEED_WWPN; + + fc_nlp_bind(binfo, nlp); + + out: + np = (uchar *)&pn; + } + return (0); +} /* fc_bind_wwpn */ + +/****************************************************************************** +* Function name : fc_bind_wwnn +* +* Description : p_dev_ctl, pointer to the device control area +* +******************************************************************************/ +_local_ int fc_bind_wwnn(fc_dev_ctl_t *p_dev_ctl, + char **arrayp, + u_int cnt) +{ + uchar *datap, *np; + NODELIST *nlp; + nodeh_t *hp; + NAME_TYPE pn; + int i, dev_index, entry, lpfc_num, rstatus; + unsigned int sum; + + FC_BRD_INFO * binfo = &BINFO; + + p_dev_ctl->fcp_mapping = FCP_SEED_WWNN; + np = (uchar *)&pn; + + for(entry = 0; entry < cnt; entry++) { + datap = (uchar *)arrayp[entry]; + if(datap == 0) + break; + /* Determined the number of ASC hex chars in WWNN & WWPN */ + for( i = 0; i < FC_MAX_WW_NN_PN_STRING; i++) { + if( fc_asc_to_hex( datap[i]) < 0) + break; + } + if((rstatus = fc_parse_binding_entry( p_dev_ctl, datap, np, + i, sizeof( NAME_TYPE), + FC_BIND_WW_NN_PN, &sum, entry, &lpfc_num)) > 0) { + if( rstatus == FC_SYNTAX_OK_BUT_NOT_THIS_BRD) + continue; + + /* WWNN binding entry : Syntax error code */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0431, /* ptr to msg structure */ + fc_mes0431, /* ptr to msg */ + fc_msgBlk0431.msgPreambleStr, /* begin varargs */ + entry, + rstatus); /* end varargs */ + goto out; + } + + /* Loop through all NODELIST entries and find + * the next available entry. + */ + if((nlp = (NODELIST *)fc_mem_get(binfo, MEM_NLP)) == 0) { + /* WWNN binding entry: node table full */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0433, /* ptr to msg structure */ + fc_mes0433, /* ptr to msg */ + fc_msgBlk0433.msgPreambleStr); /* begin & end varargs */ + goto out; + } + fc_bzero((void *)nlp, sizeof(NODELIST)); + nlp->sync = binfo->fc_sync; + nlp->capabilities = binfo->fc_capabilities; + + nlp->nlp_state = NLP_SEED; + nlp->nlp_type = NLP_SEED_WWNN | NLP_FCP_TARGET; + nlp->id.nlp_sid = DEV_SID(sum); + nlp->id.nlp_pan = DEV_PAN(sum); + bcopy((uchar * )&pn, &nlp->nlp_nodename, sizeof(NAME_TYPE)); + + dev_index = INDEX(nlp->id.nlp_pan, nlp->id.nlp_sid); + hp = &binfo->device_queue_hash[dev_index]; + + /* Claim SCSI ID by copying bind parameter to + * proper index in device_queue_hash. + */ + bcopy(&nlp->nlp_nodename, &hp->un.dev_nodename, sizeof(NAME_TYPE)); + hp->node_flag = FCP_SEED_WWNN; + + fc_nlp_bind(binfo, nlp); + + out: + np = (uchar *)&pn; + } /* for loop */ + return (0); +} /* fc_bind_wwnn */ + +/****************************************************************************** +* Function name : fc_bind_did +* +* Description : p_dev_ctl, pointer to the device control area +* +******************************************************************************/ +_local_ int fc_bind_did(fc_dev_ctl_t *p_dev_ctl, + char **arrayp, + u_int cnt) +{ + uchar *datap, *np; + NODELIST *nlp; + nodeh_t *hp; + FC_BRD_INFO *binfo = &BINFO; + D_ID ndid; + int i, dev_index, entry, lpfc_num, rstatus; + unsigned int sum; + + p_dev_ctl->fcp_mapping = FCP_SEED_DID; + ndid.un.word = 0; + np = (uchar *)&ndid.un.word; + + for(entry = 0; entry < cnt; entry++) { + datap = (uchar *)arrayp[entry]; + if(datap == 0) + break; + /* Determined the number of ASC hex chars in DID */ + for( i = 0; i < FC_MAX_DID_STRING; i++) { + if( fc_asc_to_hex( datap[i]) < 0) + break; + } + if((rstatus = fc_parse_binding_entry( p_dev_ctl, datap, np, + i, sizeof(D_ID), + FC_BIND_DID, &sum, entry, &lpfc_num)) > 0) { + if( rstatus == FC_SYNTAX_OK_BUT_NOT_THIS_BRD) + continue; + + /* DID binding entry : Syntax error code */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0434, /* ptr to msg structure */ + fc_mes0434, /* ptr to msg */ + fc_msgBlk0434.msgPreambleStr, /* begin varargs */ + entry, + rstatus); /* end varargs */ + goto out; + } + + /* Loop through all NODELIST entries and find + * the next available entry. + */ + if((nlp = (NODELIST *)fc_mem_get(binfo, MEM_NLP)) == 0) { + /* DID binding entry: node table full */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0435, /* ptr to msg structure */ + fc_mes0435, /* ptr to msg */ + fc_msgBlk0435.msgPreambleStr); /* begin & end varargs */ + goto out; + } + fc_bzero((void *)nlp, sizeof(NODELIST)); + nlp->sync = binfo->fc_sync; + nlp->capabilities = binfo->fc_capabilities; + + nlp->nlp_state = NLP_SEED; + nlp->nlp_type = NLP_SEED_DID | NLP_FCP_TARGET; + nlp->id.nlp_sid = DEV_SID(sum); + nlp->id.nlp_pan = DEV_PAN(sum); + nlp->nlp_DID = SWAP_DATA(ndid.un.word); + + dev_index = INDEX(nlp->id.nlp_pan, nlp->id.nlp_sid); + hp = &binfo->device_queue_hash[dev_index]; + + /* Claim SCSI ID by copying bind parameter to + * proper index in device_queue_hash. + */ + hp->un.dev_did = nlp->nlp_DID; + hp->node_flag = FCP_SEED_DID; + + fc_nlp_bind(binfo, nlp); + + out: + + np = (uchar *)&ndid.un.word; + } + return (0); +} + +/****************************************************************************** +* Function name : fc_bufmap +* +* Description : Maps in the specified chunk of memory, bp + len, and returns +* the number of mapped segments in the scatter list. Upon return +* phys will point to a list of physical addresses and cnt will +* point to a corresponding list of sizes. Handle will point to a +* dma handle for the I/O, if needed. +* This routine is only called by the IP portion of the driver to +* get a scatter / gather list for a specific IP packet before +* starting the I/O. +******************************************************************************/ +int fc_bufmap(fc_dev_ctl_t *p_dev_ctl, + uchar *bp, + uint32 len, + void **phys, + uint32 *cnt, + void **handle) +{ + MBUF_INFO * buf_info; + MBUF_INFO bufinfo; + + buf_info = &bufinfo; + *handle = 0; + buf_info->phys = (void *)((ulong)INVALID_PHYS); + buf_info->virt = bp; + buf_info->size = len; + buf_info->flags = (FC_MBUF_PHYSONLY | FC_MBUF_DMA); + fc_malloc(p_dev_ctl, buf_info); + + if(is_invalid_phys(buf_info->phys)) + return(0); + + phys[0] = (void *) buf_info->phys; + cnt[0] = (uint32) len; + return(1); +} + +/****************************************************************************** +* Function name : fc_bufunmap +* +* Description : This is called to unmap a piece of memory, mapped by fc_bufmap, +* and to free the asociated DMA handle, if needed. +******************************************************************************/ +void fc_bufunmap(fc_dev_ctl_t *p_dev_ctl, + uchar *addr, + uchar *dmahandle, + uint32 len) +{ + MBUF_INFO * buf_info; + MBUF_INFO bufinfo; + + buf_info = &bufinfo; + buf_info->phys = (uint32 * )addr; + buf_info->flags = (FC_MBUF_PHYSONLY | FC_MBUF_DMA); + buf_info->size = len; + fc_free(p_dev_ctl, buf_info); +} + +/****************************************************************************** +* Function name : lpfc_scsi_selto_timeout +* +* Description : call back function for scsi timeout +******************************************************************************/ +void lpfc_scsi_selto_timeout(fc_dev_ctl_t *p_dev_ctl, + void *l1, + void *l2) +{ + struct buf *bp; + + bp = (struct buf *)l1; + /* Set any necessary flags for buf error */ + if((bp->b_error != EBUSY) && (bp->b_error != EINVAL)) + bp->b_error = EIO; + bp->b_flags |= B_ERROR; + fc_do_iodone(bp); +} + +/****************************************************************************** +* Function name : lpfc_copy_sense +* +* Description : call back function for scsi timeout +******************************************************************************/ +int lpfc_copy_sense(dvi_t * dev_ptr, + struct buf * bp) +{ + struct scsi_cmnd *cmnd; + int sense_cnt; + + cmnd = bp->cmnd; + if (dev_ptr->sense_length > SCSI_SENSE_BUFFERSIZE) { + sense_cnt = SCSI_SENSE_BUFFERSIZE; + } + else { + sense_cnt = dev_ptr->sense_length; + } + /* Copy sense data into SCSI buffer */ + bcopy(dev_ptr->sense, cmnd->sense_buffer, sense_cnt); + dev_ptr->sense_valid = 0; + return(0); +} + +/****************************************************************************** +* Function name : get_cmd_off_txq +* +* Description : +* +******************************************************************************/ +IOCBQ *get_cmd_off_txq(fc_dev_ctl_t *p_dev_ctl, + ushort iotag) +{ + FC_BRD_INFO * binfo = &BINFO; + IOCBQ * iocbq, *save; + RING * rp; + unsigned long iflag; + + iflag = lpfc_q_disable_lock(p_dev_ctl); + rp = &binfo->fc_ring[FC_FCP_RING]; + iocbq = (IOCBQ * )(rp->fc_tx.q_first); + save = 0; + while (iocbq) { + if(iocbq->iocb.ulpIoTag == iotag) { + if(save) + save->q = iocbq->q; + else + rp->fc_tx.q_first = (uchar *)iocbq->q; + + if(rp->fc_tx.q_last == (uchar *)iocbq) + rp->fc_tx.q_last = (uchar *)save; + + rp->fc_tx.q_cnt--; + lpfc_q_unlock_enable(p_dev_ctl, iflag); + return iocbq; + } + save = iocbq; + iocbq = (IOCBQ * )iocbq->q; + } + + lpfc_q_unlock_enable(p_dev_ctl, iflag); + return 0; +} + +/****************************************************************************** +* Function name : find_cmd_in_txpq +* +* Description : +* +******************************************************************************/ +int find_cmd_in_txpq(fc_dev_ctl_t *p_dev_ctl, + struct scsi_cmnd *cmnd) +{ + FC_BRD_INFO * binfo = &BINFO; + struct fc_buf *fcptr, *savefc; + dvi_t * dev_ptr; + IOCBQ *iocb_cmd, *iocb_cn_cmd; + struct buf *bp; + RING * rp; + struct sc_buf *sp; + int abort_stat; + unsigned long iflag; + + iflag = lpfc_q_disable_lock(p_dev_ctl); + rp = &binfo->fc_ring[FC_FCP_RING]; + fcptr = (struct fc_buf *)(rp->fc_txp.q_first); + savefc = 0; + while (fcptr) { + if(((struct buf *)(fcptr->sc_bufp))->cmnd == cmnd) { + dev_ptr = fcptr->dev_ptr; + lpfc_q_unlock_enable(p_dev_ctl, iflag); + iocb_cmd = get_cmd_off_txq(p_dev_ctl, fcptr->iotag); + iflag = lpfc_q_disable_lock(p_dev_ctl); + if (iocb_cmd) { + fc_mem_put(binfo, MEM_IOCB, (uchar * )iocb_cmd); + + lpfc_q_unlock_enable(p_dev_ctl, iflag); + while ((iocb_cn_cmd = get_cmd_off_txq(p_dev_ctl, fcptr->iotag))) { + fc_mem_put(binfo, MEM_IOCB, (uchar * )iocb_cn_cmd); + } + iflag = lpfc_q_disable_lock(p_dev_ctl); + + bp = (struct buf *)fcptr->sc_bufp; + bp->b_error = ETIMEDOUT; + bp->b_flags |= B_ERROR; + lpfc_q_unlock_enable(p_dev_ctl, iflag); + fc_do_iodone(bp); + iflag = lpfc_q_disable_lock(p_dev_ctl); + + if(savefc) { + savefc->fc_fwd = fcptr->fc_fwd; + if (fcptr->fc_fwd) + fcptr->fc_fwd->fc_bkwd = savefc; + } else { + rp->fc_txp.q_first = (uchar *)(fcptr->fc_fwd); + if (fcptr->fc_fwd) + fcptr->fc_fwd->fc_bkwd = 0; + } + + if(rp->fc_txp.q_last == (uchar *)fcptr) { + rp->fc_txp.q_last = (uchar *)savefc; + } + + rp->fc_txp.q_cnt--; + lpfc_q_unlock_enable(p_dev_ctl, iflag); + fc_enq_fcbuf(fcptr); + iflag = lpfc_q_disable_lock(p_dev_ctl); + dev_ptr->active_io_count--; + if (dev_ptr->nodep) + dev_ptr->nodep->num_active_io--; + else + panic ("abort in txp: dev_ptr->nodep is NULL\n"); + } else { + sp = (struct sc_buf *)(fcptr->sc_bufp); + sp->cmd_flag |= FLAG_ABORT; + lpfc_q_unlock_enable(p_dev_ctl, iflag); + abort_stat = fc_abort_xri(binfo, fcptr->dev_ptr, + fcptr->iotag, ABORT_TYPE_ABTS); + iflag = lpfc_q_disable_lock(p_dev_ctl); + } + lpfc_q_unlock_enable(p_dev_ctl, iflag); + return 1; + } else { + savefc = fcptr; + fcptr = (struct fc_buf *)fcptr->fc_fwd; + } + } + lpfc_q_unlock_enable(p_dev_ctl, iflag); + return 0; +} + +/****************************************************************************** +* Function name : find_cmd_in_tmolist +* +* Description : +* +******************************************************************************/ +int find_cmd_in_tmolist(fc_dev_ctl_t *p_dev_ctl, + struct scsi_cmnd *cmnd) +{ + struct buf *bp, *savebp; + + savebp = 0; + for (bp = p_dev_ctl->timeout_head; bp != NULL; ) { + if (bp->cmnd == cmnd) { + if(savebp) + savebp->av_forw = bp->av_forw; + else + p_dev_ctl->timeout_head = bp->av_forw; + + p_dev_ctl->timeout_count--; + bp->b_error = ETIMEDOUT; + bp->b_flags |= B_ERROR; + fc_do_iodone(bp); + return 1; + } else { + savebp = bp; + bp = bp->av_forw; + } + } + + return 0; +} + +/****************************************************************************** +* Function name : find_cmd_in_pendlist +* +* Description : +* +******************************************************************************/ +int find_cmd_in_pendlist(dvi_t *dev_ptr, + struct scsi_cmnd *cmnd) +{ + struct buf *bp, *savebp; + node_t * nodep; + + bp = (struct buf *)dev_ptr->pend_head; + savebp = 0; + while (bp) { + if (bp->cmnd == cmnd) { + nodep = dev_ptr->nodep; + if(savebp) + savebp->av_forw = bp->av_forw; + else + dev_ptr->pend_head = (struct sc_buf *)(bp->av_forw); + + if (dev_ptr->pend_tail == (struct sc_buf *)bp) + dev_ptr->pend_tail = (struct sc_buf *)savebp; + + dev_ptr->pend_count--; + bp->b_error = ETIMEDOUT; + bp->b_flags |= B_ERROR; + fc_do_iodone(bp); + return 1; + } else { + savebp = bp; + bp = bp->av_forw; + } + } + + return 0; +} + +/****************************************************************************** +* Function name : lpfc_find_cmd +* +* Description : +* +******************************************************************************/ +_local_ int lpfc_find_cmd(fc_dev_ctl_t *p_dev_ctl, + struct scsi_cmnd *cmnd) +{ + dvi_t * dev_ptr; + struct sc_buf * sp; + + sp = (struct sc_buf *)cmnd->host_scribble; + if(sp == 0) + return 1; + dev_ptr = sp->current_devp; + if(dev_ptr) { + if (find_cmd_in_pendlist(dev_ptr, cmnd)) + goto err1; + } + + if (find_cmd_in_txpq(p_dev_ctl, cmnd)) + goto err1; + if (find_cmd_in_tmolist(p_dev_ctl, cmnd)) + goto err1; + + return 0; + +err1: + return 1; +} + +/****************************************************************************** +* Function name : lpfc_nodev +* +* Description : +* +******************************************************************************/ +void lpfc_nodev(unsigned long l) +{ + return; +} + +/****************************************************************************** +* Function name : lpfc_scsi_add_timer +* +* Description : Copied from scsi_add_timer +******************************************************************************/ +void lpfc_scsi_add_timer(struct scsi_cmnd * SCset, + int timeout) +{ + + if( SCset->eh_timeout.function != NULL ) + { + del_timer(&SCset->eh_timeout); + } + + if(SCset->eh_timeout.data != (unsigned long) SCset) { + SCset->eh_timeout.data = (unsigned long) SCset; + SCset->eh_timeout.function = (void (*)(unsigned long))lpfc_nodev; + } + SCset->eh_timeout.expires = jiffies + timeout; + + add_timer(&SCset->eh_timeout); +} + +/****************************************************************************** +* Function name : lpfc_scsi_delete_timer() +* +* Purpose: Delete/cancel timer for a given function. +* Copied from scsi_delete_timer() +* +* Arguments: SCset - command that we are canceling timer for. +* +* Returns: Amount of time remaining before command would have timed out. +******************************************************************************/ +int lpfc_scsi_delete_timer(struct scsi_cmnd * SCset) +{ + int rtn; + + rtn = jiffies - SCset->eh_timeout.expires; + del_timer(&SCset->eh_timeout); + SCset->eh_timeout.data = (unsigned long) NULL; + SCset->eh_timeout.function = NULL; + return rtn; +} + +/****************************************************************************** +* Function name : fc_device_changed +* +* Description : +* +******************************************************************************/ +int fc_device_changed(fc_dev_ctl_t *p_dev_ctl, + struct dev_info *dev_ptr) +{ + struct scsi_device *sd; + + if(lpfc_use_removable) { + sd = (struct scsi_device *)dev_ptr->scsi_dev; + if(sd) { +#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) + sd->changed = 0; + sd->removable = 0; +#else + sd->online = 1; +#endif + } + } + return(0); +} +/****************************************************************************** +* Function name : fc_bcopy +* +* Description : +* +******************************************************************************/ +void fc_bcopy(void *from, + void *to, + ulong cnt) +{ + bcopy(from, to, cnt); +} + +/****************************************************************************** +* Function name : fc_bzero +* +* Description : +* +******************************************************************************/ +void fc_bzero(void *from, + ulong cnt) +{ + memset(from,0,(size_t)cnt); +} + +/****************************************************************************** +* Function name : fc_copyout +* +* Description : +* +******************************************************************************/ +int fc_copyout(uchar *from, + uchar *to, + ulong cnt) +{ + return(copyout(from, to, cnt)); +} + +/****************************************************************************** +* Function name : fc_copyin +* +* Description : +* +******************************************************************************/ +int fc_copyin(uchar *from, + uchar *to, + ulong cnt) +{ + return(copyin(from, to, cnt)); +} + +/****************************************************************************** +* Function name : lpfc_mpdata_sync +* +* Description : +* +******************************************************************************/ +void lpfc_mpdata_sync(fc_dev_ctl_t *p_dev_ctl, + void *h, + int a, + int b, + int c) +{ +#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) + if(c == 1) + c = PCI_DMA_FROMDEVICE; + else + c = PCI_DMA_TODEVICE; + if(b) + fc_pci_dma_sync_single(p_dev_ctl->pcidev, (dma_addr_t)((ulong)h), b, c); + else + fc_pci_dma_sync_single(p_dev_ctl->pcidev, (dma_addr_t)((ulong)h), 4096, c); +#endif +} + +/****************************************************************************** +* Function name : lpfc_ip_rcvsz +* +* Description : +* +******************************************************************************/ +int lpfc_ip_rcvsz(fc_dev_ctl_t *p_dev_ctl) +{ + return(p_dev_ctl->ihs.lpfn_rcv_buf_size); +} + +/****************************************************************************** +* Function name : fc_dpc_lstchk +* +* Description : Since abort, device reset, bus reset, and host reset dpc lists +* all use SCp.ptr for linking, double check to make sure +* LINUX doesn't use the same Cmnd for multiple resets / aborts. +* +* XXX patman check that this still works +******************************************************************************/ +int fc_dpc_lstchk(fc_dev_ctl_t * p_dev_ctl, + struct scsi_cmnd * Cmnd) +{ + struct scsi_cmnd * aCmnd; + struct scsi_cmnd * bCmnd; + + aCmnd = (struct scsi_cmnd *)p_dev_ctl->abort_head; + bCmnd = 0; + while(aCmnd) { + /* Check for duplicate on abort list */ + if(aCmnd == Cmnd) { + if(bCmnd == 0) { + p_dev_ctl->abort_head = (void *)SCMD_NEXT(Cmnd); + } + else { + SCMD_NEXT(bCmnd) = SCMD_NEXT(Cmnd); + } + if(Cmnd == (struct scsi_cmnd *)p_dev_ctl->abort_list) + p_dev_ctl->abort_list = (void *)bCmnd; + SCMD_NEXT(Cmnd) = 0; + return(1); + } + bCmnd = aCmnd; + aCmnd = SCMD_NEXT(aCmnd); + } + aCmnd = (struct scsi_cmnd *)p_dev_ctl->rdev_head; + bCmnd = 0; + while(aCmnd) { + /* Check for duplicate on device reset list */ + if(aCmnd == Cmnd) { + if(bCmnd == 0) { + p_dev_ctl->rdev_head = (void *)SCMD_NEXT(Cmnd); + } + else { + SCMD_NEXT(bCmnd) = SCMD_NEXT(Cmnd); + } + if(Cmnd == (struct scsi_cmnd *)p_dev_ctl->rdev_list) + p_dev_ctl->rdev_list = (void *)bCmnd; + SCMD_NEXT(Cmnd) = 0; + return(2); + } + bCmnd = aCmnd; + aCmnd = SCMD_NEXT(aCmnd); + } + aCmnd = (struct scsi_cmnd *)p_dev_ctl->rbus_head; + bCmnd = 0; + while(aCmnd) { + /* Check for duplicate on bus reset list */ + if(aCmnd == Cmnd) { + if(bCmnd == 0) { + p_dev_ctl->rbus_head = (void *)SCMD_NEXT(Cmnd); + } + else { + SCMD_NEXT(bCmnd) = SCMD_NEXT(Cmnd); + } + if(Cmnd == (struct scsi_cmnd *)p_dev_ctl->rbus_list) + p_dev_ctl->rbus_list = (void *)bCmnd; + SCMD_NEXT(Cmnd) = 0; + return(3); + } + bCmnd = aCmnd; + aCmnd = SCMD_NEXT(aCmnd); + } + aCmnd = (struct scsi_cmnd *)p_dev_ctl->rhst_head; + bCmnd = 0; + while(aCmnd) { + /* Check for duplicate on host reset list */ + if(aCmnd == Cmnd) { + if(bCmnd == 0) { + p_dev_ctl->rhst_head = (void *)SCMD_NEXT(Cmnd); + } + else { + SCMD_NEXT(bCmnd) = SCMD_NEXT(Cmnd); + } + if(Cmnd == (struct scsi_cmnd *)p_dev_ctl->rhst_list) + p_dev_ctl->rhst_list = (void *)bCmnd; + SCMD_NEXT(Cmnd) = 0; + return(4); + } + bCmnd = aCmnd; + aCmnd = SCMD_NEXT(aCmnd); + } + return(0); +} + +/****************************************************************************** +* Function name : fc_timer +* +* Description : This function will be called by the driver every second. +******************************************************************************/ +_static_ void lpfc_timer(void *p) +{ + fc_dev_ctl_t * p_dev_ctl; + FCCLOCK_INFO * clock_info; + ulong tix; + FCCLOCK * x; + int ipri; + + clock_info = &DD_CTL.fc_clock_info; + ipri = disable_lock(CLK_LVL, &CLOCK_LOCK); + + /* + *** Increment time_sample value + */ + clock_info->ticks++; + + x = clock_info->fc_clkhdr.cl_f; + + /* counter for propagating negative values */ + tix = 0; + /* If there are expired clocks */ + if (x != (FCCLOCK * ) & clock_info->fc_clkhdr) { + x->cl_tix = x->cl_tix - 1; + if (x->cl_tix <= 0) { + /* Loop thru all clock blocks */ + while (x != (FCCLOCK * ) & clock_info->fc_clkhdr) { + x->cl_tix += tix; + /* If # of ticks left > 0, break out of loop */ + if (x->cl_tix > 0) + break; + tix = x->cl_tix; + + fc_deque(x); + /* Decrement count of unexpired clocks */ + clock_info->fc_clkhdr.count--; + + unlock_enable(ipri, &CLOCK_LOCK); + + p_dev_ctl = x->cl_p_dev_ctl; + + if(p_dev_ctl) { + /* Queue clock blk to appropriate dpc to be processed */ + if(p_dev_ctl->qclk_head == NULL) { + p_dev_ctl->qclk_head = (void *)x; + p_dev_ctl->qclk_list = (void *)x; + } else { + ((FCCLOCK *)(p_dev_ctl->qclk_list))->cl_fw = x; + p_dev_ctl->qclk_list = (void *)x; + } + x->cl_fw = NULL; + } + else { + /* Call timeout routine */ + (*x->cl_func) (p_dev_ctl, x->cl_arg1, x->cl_arg2); + /* Release clock block */ + fc_clkrelb(p_dev_ctl, x); + } + + ipri = disable_lock(CLK_LVL, &CLOCK_LOCK); + + x = clock_info->fc_clkhdr.cl_f; + } + } + } + unlock_enable(ipri, &CLOCK_LOCK); + fc_reset_timer(); +} + +/****************************************************************************** +* Function name : do_fc_timer +* +* Description : +* +******************************************************************************/ +int do_fc_timer(fc_dev_ctl_t *p_dev_ctl) +{ + FCCLOCK *x; + FCCLOCK *cb; + + cb = (FCCLOCK *)p_dev_ctl->qclk_head; + while(cb) { + x = cb; + cb = cb->cl_fw; + /* Call timeout routine */ + (*x->cl_func) (p_dev_ctl, x->cl_arg1, x->cl_arg2); + /* Release clock block */ + fc_clkrelb(p_dev_ctl, x); + } + p_dev_ctl->qclk_head = 0; + p_dev_ctl->qclk_list = 0; + return(0); +} + +/****************************************************************************** +* Function name : lpfc_kmalloc +* +* Description : +* +******************************************************************************/ +void * lpfc_kmalloc(unsigned int size, + unsigned int type, + void **pphys, + fc_dev_ctl_t *p_dev_ctl) +{ + FC_BRD_INFO * binfo; + void * pcidev; + void * virt; + struct fc_mem_pool * fmp; + dma_addr_t phys; + int i, instance; + +/* printk("lpfc_kmalloc: %d %d %lx %lx\n", size, type, pphys, p_dev_ctl); +*/ + if(pphys == 0) { + virt = (void *)kmalloc(size, type); + return(virt); + } + if(p_dev_ctl == 0) { + /* lpfc_kmalloc: Bad p_dev_ctl */ + fc_log_printf_msg_vargs( 0, /* force brd 0, no p_dev_ctl */ + &fc_msgBlk1201, /* ptr to msg structure */ + fc_mes1201, /* ptr to msg */ + fc_msgBlk1201.msgPreambleStr, /* begin varargs */ + size, + type, + fc_idx_dmapool[0]); /* end varargs */ + return(0); + } + instance = p_dev_ctl->info.fc_brd_no; + pcidev = p_dev_ctl->pcidev; + binfo = &BINFO; + + if(size > FC_MAX_SEGSZ) { + /* lpfc_kmalloc: Bad size */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk1202, /* ptr to msg structure */ + fc_mes1202, /* ptr to msg */ + fc_msgBlk1202.msgPreambleStr, /* begin varargs */ + size, + type, + fc_idx_dmapool[instance]); /* end varargs */ + return(0); + } +top: + fmp = fc_mem_dmapool[instance]; + for(i=0;i<=fc_idx_dmapool[instance];i++) { + fmp = (fc_mem_dmapool[instance] + i); + if((fmp->p_virt == 0) || (fmp->p_left >= size)) + break; + } + if(i == (fc_size_dmapool[instance] - 2)) { + /* Lets make it bigger */ + fc_size_dmapool[instance] += FC_MAX_POOL; + fmp = kmalloc((sizeof(struct fc_mem_pool) * fc_size_dmapool[instance]), + GFP_ATOMIC); + if(fmp) { + fc_bzero((void *)fmp, + (sizeof(struct fc_mem_pool) * fc_size_dmapool[instance])); + fc_bcopy((void *)fc_mem_dmapool[instance], fmp, + (sizeof(struct fc_mem_pool) * (fc_size_dmapool[instance]-FC_MAX_POOL))); + kfree(fc_mem_dmapool[instance]); + fc_mem_dmapool[instance] = fmp; + goto top; + } + goto out; + } + + if(fmp->p_virt == 0) { + virt = pci_alloc_consistent(pcidev, FC_MAX_SEGSZ, &phys); + if(virt) { + fmp->p_phys = (void *)((ulong)phys); + fmp->p_virt = virt; + fmp->p_refcnt = 0; + fmp->p_left = (ushort)FC_MAX_SEGSZ; + if(i == fc_idx_dmapool[instance]) + if(i < (fc_size_dmapool[instance] - 2)) + fc_idx_dmapool[instance]++; + } + else { + /* lpfc_kmalloc: Bad virtual addr */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk1204, /* ptr to msg structure */ + fc_mes1204, /* ptr to msg */ + fc_msgBlk1204.msgPreambleStr, /* begin varargs */ + i, + size, + type, + fc_idx_dmapool[instance]); /* end varargs */ + return(0); + } + } + + if(fmp->p_left >= size) { + fmp->p_refcnt++; + virt = (void *)((uchar *)fmp->p_virt + FC_MAX_SEGSZ - fmp->p_left); + phys = (dma_addr_t)(ulong)((uchar *)fmp->p_phys + FC_MAX_SEGSZ - fmp->p_left); + *pphys = (void *)((ulong)phys); + fmp->p_left -= size; + return(virt); + } +out: + /* lpfc_kmalloc: dmapool FULL */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk1205, /* ptr to msg structure */ + fc_mes1205, /* ptr to msg */ + fc_msgBlk1205.msgPreambleStr, /* begin varargs */ + i, + size, + type, + fc_idx_dmapool[instance]); /* end varargs */ + return(0); +} + +/****************************************************************************** +* Function name : lpfc_kfree +* +* Description : +* +******************************************************************************/ +void lpfc_kfree(unsigned int size, + void *virt, + void *phys, + fc_dev_ctl_t *p_dev_ctl) +{ + FC_BRD_INFO * binfo; + struct fc_mem_pool * fmp; + void * pcidev; + int i, instance; + + if(is_invalid_phys(phys)) { + kfree(virt); + return; + } + + if(p_dev_ctl == 0) { + /* lpfc_kfree: Bad p_dev_ctl */ + fc_log_printf_msg_vargs( 0, /* force brd 0, no p_dev_ctl */ + &fc_msgBlk1206, /* ptr to msg structure */ + fc_mes1206, /* ptr to msg */ + fc_msgBlk1206.msgPreambleStr, /* begin varargs */ + size, + fc_idx_dmapool[0]); /* end varargs */ + return; + } + + instance = p_dev_ctl->info.fc_brd_no; + pcidev = p_dev_ctl->pcidev; + binfo = &BINFO; + + + for(i=0;i= fmp->p_virt) && + (virt < (void *)((uchar *)fmp->p_virt + FC_MAX_SEGSZ))) { + fmp->p_refcnt--; + if(fmp->p_refcnt == 0) { + pci_free_consistent(pcidev, FC_MAX_SEGSZ, + fmp->p_virt, (dma_addr_t)((ulong)fmp->p_phys)); + fc_bzero((void *)fmp, sizeof(struct fc_mem_pool)); + } + return; + } + } + /* lpfc_kfree: NOT in dmapool */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk1207, /* ptr to msg structure */ + fc_mes1207, /* ptr to msg */ + fc_msgBlk1207.msgPreambleStr, /* begin varargs */ + (uint32)((ulong)virt), + size, + fc_idx_dmapool[instance]); /* end varargs */ + return; +} /* lpfc_kfree */ + +MODULE_LICENSE("GPL"); diff -purN -X /home/mbligh/.diff.exclude reference/drivers/scsi/lpfc/fcLINUXlan.c current/drivers/scsi/lpfc/fcLINUXlan.c --- reference/drivers/scsi/lpfc/fcLINUXlan.c 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/scsi/lpfc/fcLINUXlan.c 2004-04-09 11:53:02.000000000 -0700 @@ -0,0 +1,376 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Enterprise Fibre Channel Host Bus Adapters. * + * Refer to the README file included with this package for * + * driver version and adapter support. * + * Copyright (C) 2003 Emulex Corporation. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of the GNU General Public License * + * as published by the Free Software Foundation; either version 2 * + * of the License, or (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details, a copy of which * + * can be found in the file COPYING included with this package. * + *******************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,4) +#include +#else +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#define LinuxVersionCode(v, p, s) (((v)<<16)+((p)<<8)+(s)) +#if LINUX_VERSION_CODE >= LinuxVersionCode(2,3,17) +#include +#else +#include +#endif +#include +#include +#include + +#ifdef MODULE +#include +#include "lpfc.ver" +#else +extern int lpfn_probe(void); +#endif /* MODULE */ + +/* fcLINUXlan.c IP interface network driver */ +#include "fc_os.h" +#include "fc_hw.h" +#include "fc.h" + +static int lpfn_xmit(struct sk_buff *skb, NETDEVICE *dev); +static struct enet_statistics *lpfn_get_stats(NETDEVICE *dev); + +extern int arp_find(unsigned char *haddr, struct sk_buff *skb); +extern int lpfc_xmit(fc_dev_ctl_t *p_dev_ctl, struct sk_buff *skb); +extern int lpfc_ioctl(int cmd, void *s); + +/****************************************************************************** +* Function name : lpfn_open +* +* Description : +* +******************************************************************************/ +static int lpfn_open(NETDEVICE *dev) +{ + fc_dev_ctl_t *p_dev_ctl; + FC_BRD_INFO * binfo; + + if((p_dev_ctl = (fc_dev_ctl_t *)(dev->priv)) == 0) + return(-ENODEV); + binfo = &BINFO; + p_dev_ctl->device_state = OPENED; + binfo->fc_open_count |= FC_LAN_OPEN; + + netdevice_start(dev); + netif_start_queue(dev); +#ifdef MODULE + MOD_INC_USE_COUNT; +#endif /* MODULE */ + return 0; +} + +/****************************************************************************** +* Function name : lpfn_close +* +* Description : +* +******************************************************************************/ +static int lpfn_close(NETDEVICE *dev) +{ + fc_dev_ctl_t *p_dev_ctl; + FC_BRD_INFO * binfo; + + if((p_dev_ctl = (fc_dev_ctl_t *)(dev->priv)) == 0) + return(-ENODEV); + binfo = &BINFO; + p_dev_ctl->device_state = 0; + binfo->fc_open_count &= ~FC_LAN_OPEN; + + netdevice_stop(dev); + netif_stop_queue(dev); +#ifdef MODULE + MOD_DEC_USE_COUNT; +#endif /* MODULE */ + return 0; +} + +/****************************************************************************** +* Function name : lpfn_change_mtu +* +* Description : +* +******************************************************************************/ +int lpfn_change_mtu(NETDEVICE *dev, + int new_mtu) +{ + fc_dev_ctl_t *p_dev_ctl; + + if((p_dev_ctl = (fc_dev_ctl_t *)(dev->priv)) == 0) + return(-ENODEV); + if ((new_mtu < FC_MIN_MTU) || (new_mtu > p_dev_ctl->ihs.lpfn_mtu)) + return -EINVAL; + dev->mtu = new_mtu; + return(0); +} + + +/****************************************************************************** +* Function name : lpfn_header +* +* Description : Create the FC MAC/LLC/SNAP header for an arbitrary protocol +* layer +* saddr=NULL means use device source address +* daddr=NULL means leave destination address (eg unresolved arp) +* +******************************************************************************/ +int lpfn_header(struct sk_buff *skb, + NETDEVICE *dev, + unsigned short type, + void *daddr, + void *saddr, + unsigned len) +{ + struct fc_nethdr *fchdr; + int rc; + + if (type == ETH_P_IP || type == ETH_P_ARP) { + fchdr = (struct fc_nethdr *)skb_push(skb, sizeof(struct fc_nethdr)); + + fchdr->llc.dsap = FC_LLC_DSAP; /* DSAP */ + fchdr->llc.ssap = FC_LLC_SSAP; /* SSAP */ + fchdr->llc.ctrl = FC_LLC_CTRL; /* control field */ + fchdr->llc.prot_id[0] = 0; /* protocol id */ + fchdr->llc.prot_id[1] = 0; /* protocol id */ + fchdr->llc.prot_id[2] = 0; /* protocol id */ + fchdr->llc.type = htons(type); /* type field */ + rc = sizeof(struct fc_nethdr); + } + else { +printk("lpfn_header:Not IP or ARP: %x\n",type); + + fchdr = (struct fc_nethdr *)skb_push(skb, sizeof(struct fc_nethdr)); + rc = sizeof(struct fc_nethdr); + } + + /* Set the source and destination hardware addresses */ + if (saddr != NULL) + memcpy(fchdr->fcnet.fc_srcname.IEEE, saddr, dev->addr_len); + else + memcpy(fchdr->fcnet.fc_srcname.IEEE, dev->dev_addr, dev->addr_len); + + fchdr->fcnet.fc_srcname.nameType = NAME_IEEE; /* IEEE name */ + fchdr->fcnet.fc_srcname.IEEEextMsn = 0; + fchdr->fcnet.fc_srcname.IEEEextLsb = 0; + + + if (daddr != NULL) + { + memcpy(fchdr->fcnet.fc_destname.IEEE, daddr, dev->addr_len); + fchdr->fcnet.fc_destname.nameType = NAME_IEEE; /* IEEE name */ + fchdr->fcnet.fc_destname.IEEEextMsn = 0; + fchdr->fcnet.fc_destname.IEEEextLsb = 0; + return(rc); + } + + return(-rc); +} + +/****************************************************************************** +* Function name : lpfn_rebuild_header +* +* Description : Rebuild the FC MAC/LLC/SNAP header. +* This is called after an ARP (or in future other address +* resolution) has completed on this sk_buff. +* We now let ARP fill in the other fields. +******************************************************************************/ +int lpfn_rebuild_header(struct sk_buff *skb) +{ + struct fc_nethdr *fchdr = (struct fc_nethdr *)skb->data; + NETDEVICE *dev = skb->dev; + + if (fchdr->llc.type == htons(ETH_P_IP)) { + return arp_find(fchdr->fcnet.fc_destname.IEEE, skb); + } + + printk("%s: unable to resolve type %X addresses.\n", + dev->name, (int)fchdr->llc.type); + + memcpy(fchdr->fcnet.fc_srcname.IEEE, dev->dev_addr, dev->addr_len); + fchdr->fcnet.fc_srcname.nameType = NAME_IEEE; /* IEEE name */ + fchdr->fcnet.fc_srcname.IEEEextMsn = 0; + fchdr->fcnet.fc_srcname.IEEEextLsb = 0; + + return 0; +} + +/****************************************************************************** +* Function name : lpfn_xmit +* +* Description : +* +******************************************************************************/ +static int lpfn_xmit(struct sk_buff *skb, + NETDEVICE *dev) +{ + fc_dev_ctl_t *p_dev_ctl; + int rc; + + + p_dev_ctl = (fc_dev_ctl_t *)dev->priv; + rc=lpfc_xmit(p_dev_ctl, skb); + return rc; +} + +/****************************************************************************** +* Function name : lpfn_receive +* +* Description : +* +******************************************************************************/ +_static_ void lpfn_receive(ndd_t *p_ndd, + struct sk_buff *skb, + void *p) +{ + fc_dev_ctl_t *p_dev_ctl; + NETDEVICE *dev; + struct fc_nethdr *fchdr = (struct fc_nethdr *)skb->data; + struct ethhdr *eth; + unsigned short *sp; + + p_dev_ctl = (fc_dev_ctl_t *)p; + dev = p_dev_ctl->ihs.lpfn_dev; + skb->dev = dev; + + skb->mac.raw=fchdr->fcnet.fc_destname.IEEE; + sp = (unsigned short *)fchdr->fcnet.fc_srcname.IEEE; + *(sp - 1) = *sp; + sp++; + *(sp - 1) = *sp; + sp++; + *(sp - 1) = *sp; + + skb_pull(skb, dev->hard_header_len); + eth= skb->mac.ethernet; + + if(*eth->h_dest&1) { + if(memcmp(eth->h_dest,dev->broadcast, ETH_ALEN)==0) + skb->pkt_type=PACKET_BROADCAST; + else + skb->pkt_type=PACKET_MULTICAST; + } + + else if(dev->flags&(IFF_PROMISC)) { + if(memcmp(eth->h_dest,dev->dev_addr, ETH_ALEN)) + skb->pkt_type=PACKET_OTHERHOST; + } + + skb->protocol = fchdr->llc.type; + + if (skb->protocol == ntohs(ETH_P_ARP)) + skb->data[1] = 0x06; + + + netif_rx(skb); +} + +/****************************************************************************** +* Function name : lpfn_get_stats +* +* Description : +* +******************************************************************************/ +static struct enet_statistics *lpfn_get_stats(NETDEVICE *dev) +{ + fc_dev_ctl_t *p_dev_ctl; + struct enet_statistics *stats; + + p_dev_ctl = (fc_dev_ctl_t *)dev->priv; + stats = &NDDSTAT.ndd_enet; + return stats; +} + +#ifdef MODULE +/****************************************************************************** +* Function name : init_module +* +* Description : +* +******************************************************************************/ +int init_module(void) +#else +/****************************************************************************** +* Function name : lpfn_probe +* +* Description : +* +******************************************************************************/ +int lpfn_probe(void) +#endif /* MODULE */ +{ + struct lpfn_probe lp; + + lp.hard_start_xmit = &lpfn_xmit; + lp.receive = &lpfn_receive; + lp.get_stats = &lpfn_get_stats; + lp.open = &lpfn_open; + lp.stop = &lpfn_close; + lp.hard_header = &lpfn_header; + lp.rebuild_header = &lpfn_rebuild_header; + lp.change_mtu = &lpfn_change_mtu; + if(lpfc_ioctl(LPFN_PROBE,(void *)&lp) == 0) + return -ENODEV; + + return 0; +} + +#ifdef MODULE +/****************************************************************************** +* Function name : cleanup_module +* +* Description : +* +******************************************************************************/ +void cleanup_module(void) +{ + lpfc_ioctl(LPFN_DETACH,0); +} +MODULE_LICENSE("GPL"); +#endif /* MODULE */ diff -purN -X /home/mbligh/.diff.exclude reference/drivers/scsi/lpfc/fc_crtn.h current/drivers/scsi/lpfc/fc_crtn.h --- reference/drivers/scsi/lpfc/fc_crtn.h 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/scsi/lpfc/fc_crtn.h 2004-04-09 11:53:02.000000000 -0700 @@ -0,0 +1,254 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Enterprise Fibre Channel Host Bus Adapters. * + * Refer to the README file included with this package for * + * driver version and adapter support. * + * Copyright (C) 2003 Emulex Corporation. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of the GNU General Public License * + * as published by the Free Software Foundation; either version 2 * + * of the License, or (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details, a copy of which * + * can be found in the file COPYING included with this package. * + *******************************************************************/ + +/* Module fcxmitb.c External Routine Declarations */ +_forward_ int fc_create_xri(FC_BRD_INFO *binfo, RING *rp, NODELIST *nlp); +_forward_ void fc_restartio(fc_dev_ctl_t *p_dev_ctl, NODELIST *nlp); +_forward_ IOCBQ *fc_ringtx_drain(RING *rp); +_forward_ IOCBQ *fc_ringtx_get(RING *rp); +_forward_ void fc_ringtx_put(RING *rp, IOCBQ *iocbq); +_forward_ IOCBQ *fc_ringtxp_get(RING *rp, ushort iotag); +_forward_ void fc_ringtxp_put(RING *rp, IOCBQ *iocbq); +_forward_ int fc_xmit(fc_dev_ctl_t *p_dev_ctl, fcipbuf_t *p_mbuf); +_forward_ int handle_create_xri(fc_dev_ctl_t *p_dev_ctl, RING *rp,IOCBQ *tmp); +_forward_ int handle_xmit_cmpl(fc_dev_ctl_t *p_dev_ctl, RING *rp, IOCBQ *tmp); + + + +/* Module fcelsb.c External Routine Declarations */ +_forward_ int fc_chkpadisc(FC_BRD_INFO *binfo, NODELIST *nlp, + volatile NAME_TYPE *nn, volatile NAME_TYPE *pn); +_forward_ int fc_els_cmd(FC_BRD_INFO *binfo, uint32 type, void *arg, + uint32 class, ushort iotag, NODELIST *nlp); +_forward_ int fc_els_rsp(FC_BRD_INFO *binfo, uint32 type, uint32 Xri, + uint32 class, void *iocbp, uint32 flag, NODELIST *nlp); +_forward_ void fc_snd_flogi(fc_dev_ctl_t *p_dev_ctl, void *a1, void *a2); +_forward_ int fc_initial_flogi(fc_dev_ctl_t * p_dev_ctl); +_forward_ int handle_els_event(fc_dev_ctl_t *p_dev_ctl, RING *rp, IOCBQ *temp); +_forward_ int handle_rcv_els_req(fc_dev_ctl_t *p_dev_ctl, RING *rp, IOCBQ *temp); +_forward_ int fc_process_rscn(fc_dev_ctl_t *p_dev_ctl, IOCBQ *temp, MATCHMAP *mp); +_forward_ int fc_handle_rscn(fc_dev_ctl_t *p_dev_ctl, D_ID *didp); +_forward_ int fc_issue_ct_req(FC_BRD_INFO *binfo, uint32 portid, MATCHMAP *bmp, DMATCHMAP *inmp, DMATCHMAP *outmp, uint32 tmo); +_forward_ int fc_gen_req(FC_BRD_INFO *binfo, MATCHMAP *bmp, MATCHMAP *inmp, MATCHMAP *outmp, uint32 rpi, uint32 flag, uint32 cnt, uint32 tmo); +_forward_ int fc_issue_ct_rsp(FC_BRD_INFO *binfo, uint32 tag, MATCHMAP *bmp, DMATCHMAP *inp); +_forward_ int fc_rnid_req(FC_BRD_INFO *binfo, DMATCHMAP *inp, DMATCHMAP *outp, + MATCHMAP **bmp, uint32 rpi); +_forward_ void fc_issue_ns_query(fc_dev_ctl_t *p, void *a1, void *a2); +_forward_ int fc_flush_rscn_defer(fc_dev_ctl_t *p_dev_ctl); +_forward_ int fc_abort_discovery( fc_dev_ctl_t *p_dev_ctl); +/* FDMI */ +_forward_ int fc_fdmi_cmd(fc_dev_ctl_t *p_dev_ctl, NODELIST *ndlp, int cmdcode); +_forward_ void fc_fdmi_rsp(fc_dev_ctl_t *p_dev_ctl, MATCHMAP *mp, MATCHMAP *rsp_mp); +_forward_ void fc_plogi_put(FC_BRD_INFO *binfo, IOCBQ *iocbq); +_forward_ IOCBQ * fc_plogi_get(FC_BRD_INFO *binfo); + + + +/* Module fcmboxb.c External Routine Declarations */ +_forward_ void fc_clear_la(FC_BRD_INFO *binfo, MAILBOX *mb); +_forward_ void fc_read_status(FC_BRD_INFO *binfo, MAILBOX *mb); +_forward_ void fc_read_lnk_stat(FC_BRD_INFO *binfo, MAILBOX *mb); +_forward_ void fc_config_link(fc_dev_ctl_t *p_dev_ctl, MAILBOX *mb); +_forward_ int fc_config_port(FC_BRD_INFO *binfo, MAILBOX *mb, uint32 *hbainit); +_forward_ void fc_config_ring(FC_BRD_INFO *binfo, int ring, int profile, + MAILBOX *mb); +_forward_ void fc_init_link(FC_BRD_INFO *binfo, MAILBOX *mb, + uint32 topology, uint32 linkspeed); +_forward_ MAILBOXQ *fc_mbox_get(FC_BRD_INFO *binfo); +_forward_ int fc_read_la(fc_dev_ctl_t *p_dev_ctl, MAILBOX *mb); +_forward_ void fc_mbox_put(FC_BRD_INFO *binfo, MAILBOXQ *mbq); +_forward_ void fc_read_rev(FC_BRD_INFO *binfo, MAILBOX *mb); +_forward_ int fc_read_rpi(FC_BRD_INFO *binfo, uint32 rpi,MAILBOX *mb,uint32 flg); +_forward_ int fc_read_sparam(fc_dev_ctl_t *p_dev_ctl, MAILBOX *mb); +_forward_ int fc_reg_login(FC_BRD_INFO *binfo, uint32 did, uchar *param, + MAILBOX *mb, uint32 flag); +_forward_ void fc_set_slim(FC_BRD_INFO *binfo, MAILBOX *mb, uint32 addr, + uint32 value); +_forward_ void fc_unreg_login(FC_BRD_INFO *binfo, uint32 rpi, MAILBOX *mb); +_forward_ void fc_unreg_did(FC_BRD_INFO *binfo, uint32 did, MAILBOX *mb); +_forward_ void fc_dump_mem(FC_BRD_INFO *binfo, MAILBOX *mb); + +_forward_ void fc_config_farp(FC_BRD_INFO *binfo, MAILBOX *mb); +_forward_ void fc_read_config(FC_BRD_INFO *binfo, MAILBOX *mb); + +/* Module fcmemb.c External Routine Declarations */ +_forward_ void fc_disable_tc(FC_BRD_INFO *binfo, MAILBOX *mb); +_forward_ MATCHMAP *fc_getvaddr(fc_dev_ctl_t *p_dev_ctl, RING *rp, uchar *mapbp); +_forward_ uchar *fc_mem_get(FC_BRD_INFO *binfo, uint32 seg); +_forward_ uchar *fc_mem_put(FC_BRD_INFO *binfo, uint32 seg, uchar *bp); +_forward_ int fc_free_buffer(fc_dev_ctl_t *p_dev_ctl); +_forward_ int fc_malloc_buffer(fc_dev_ctl_t *p_dev_ctl); +_forward_ void fc_mapvaddr(FC_BRD_INFO *binfo, RING *rp, MATCHMAP *mp, + uint32 *haddr, uint32 *laddr); +_forward_ int fc_runBIUdiag(FC_BRD_INFO *binfo, MAILBOX *mb, uchar *in, + uchar *out); + + +/* Module fcclockb.c External Routine Declarations */ +_forward_ void fc_clkrelb(fc_dev_ctl_t *p_dev_ctl, FCCLOCK *cb); +_forward_ int fc_clk_can(fc_dev_ctl_t *p_dev_ctl, FCCLOCK *cb); +_forward_ FCCLOCK *fc_clk_set(fc_dev_ctl_t *p_dev_ctl, ulong tix, + void (*func)(fc_dev_ctl_t*, void*, void*), void *arg1, void *arg2); +_forward_ ulong fc_clk_res(fc_dev_ctl_t *p_dev_ctl, ulong tix, FCCLOCK *cb); +_forward_ void fc_timer(void *); +_forward_ void fc_clock_deque(FCCLOCK *cb); +_forward_ void fc_clock_init(void); +_forward_ void fc_flush_clk_set(fc_dev_ctl_t *p_dev_ctl, + void (*func)(fc_dev_ctl_t*, void*, void*)); +_forward_ int fc_abort_clk_blk(fc_dev_ctl_t *p_dev_ctl, + void (*func)(fc_dev_ctl_t*, void*, void*), void *a1, void *a2); +_forward_ int fc_abort_delay_els_cmd( fc_dev_ctl_t *p_dev_ctl, uint32 did); +_forward_ void fc_q_depth_up(fc_dev_ctl_t *p_dev_ctl, void *, void *); +_forward_ void fc_establish_link_tmo(fc_dev_ctl_t *p_dev_ctl, void *, void *); +/* QFULL_RETRY */ +_forward_ void fc_qfull_retry(void *); +_forward_ void fc_reset_timer(void); + +/* Module fcrpib.c External Routine Declarations */ +_forward_ int fc_discovery(fc_dev_ctl_t *p_dev_ctl); +_forward_ ushort fc_emac_lookup(FC_BRD_INFO *binfo, uchar *addr, + NODELIST **nlpp); +_forward_ int fc_fanovery(fc_dev_ctl_t *p_dev_ctl); +_forward_ NODELIST *fc_findnode_rpi(FC_BRD_INFO *binfo, uint32 rpi); +_forward_ int fc_free_rpilist(fc_dev_ctl_t *p_dev_ctl, int keeprpi); +_forward_ void fc_freebufq(fc_dev_ctl_t *p_dev_ctl, RING *rp, IOCBQ *xmitiq); +_forward_ int fc_freenode(FC_BRD_INFO *binfo, NODELIST *nlp, int rm); +_forward_ int fc_freenode_did(FC_BRD_INFO *binfo, uint32 did, int rm); +_forward_ int fc_nlpadjust(FC_BRD_INFO *binfo); +_forward_ int fc_rpi_abortxri(FC_BRD_INFO *binfo, ushort xri); +_forward_ int fc_nlp_bind(FC_BRD_INFO *binfo, NODELIST *nlp); +_forward_ int fc_nlp_unmap(FC_BRD_INFO *binfo, NODELIST *nlp); +_forward_ int fc_nlp_map(FC_BRD_INFO *binfo, NODELIST *nlp); +_forward_ NODELIST *fc_findnode_odid(FC_BRD_INFO *binfo, uint32 order, uint32 did); +_forward_ NODELIST *fc_findnode_scsid(FC_BRD_INFO *binfo, uint32 order, uint32 scid); +_forward_ NODELIST *fc_findnode_wwpn(FC_BRD_INFO *binfo, uint32 odr, NAME_TYPE *wwp); +_forward_ NODELIST *fc_findnode_wwnn(FC_BRD_INFO *binfo, uint32 odr, NAME_TYPE *wwn); +_forward_ NODELIST *fc_findnode_oxri(FC_BRD_INFO *binfo, uint32 order, uint32 xri); +_forward_ int fc_nlp_logi(FC_BRD_INFO *binfo, NODELIST *nlp, NAME_TYPE *wwpnp, + NAME_TYPE *wwnnp); +_forward_ int fc_nlp_swapinfo(FC_BRD_INFO *binfo, NODELIST *onlp, NODELIST *nnlp); + + +/* Module fcstratb.c External Routine Declarations */ +_forward_ dvi_t *fc_fcp_abort(fc_dev_ctl_t *p, int flg, int tgt, int lun); +_forward_ int fc_assign_scsid(fc_dev_ctl_t *ap, NODELIST *nlp); +_forward_ fc_buf_t *fc_deq_fcbuf_active(RING *rp, ushort iotag); +_forward_ fc_buf_t *fc_deq_fcbuf(dvi_t *di); +_forward_ void fc_enq_abort_bdr(dvi_t *dev_ptr); +_forward_ void fc_enq_fcbuf(fc_buf_t *fcptr); +_forward_ void fc_enq_fcbuf_active(RING *rp, fc_buf_t *fcptr); +_forward_ int issue_fcp_cmd(fc_dev_ctl_t *p_dev_ctl, dvi_t *dev_ptr, + T_SCSIBUF *sbp, int pend); +_forward_ void fc_enq_wait(dvi_t *dev_ptr); +_forward_ void fc_fail_cmd(dvi_t *dev_ptr, char error, uint32 statistic); +_forward_ void fc_fail_pendq(dvi_t *dev_ptr, char error, uint32 statistic); +_forward_ int fc_failio(fc_dev_ctl_t * p_dev_ctl); +_forward_ dvi_t *fc_find_lun( FC_BRD_INFO * binfo, int hash_index, fc_lun_t lun); +_forward_ void fc_issue_cmd(fc_dev_ctl_t *ap); +_forward_ int fc_reset_dev_q_depth( fc_dev_ctl_t * p_dev_ctl); +_forward_ int fc_restart_all_devices(fc_dev_ctl_t * p_dev_ctl); +_forward_ int fc_restart_device(dvi_t * dev_ptr); +_forward_ void fc_return_standby_queue(dvi_t *dev_ptr, uchar status, + uint32 statistic); +_forward_ void re_issue_fcp_cmd(dvi_t *dev_ptr); +_forward_ void fc_polling(FC_BRD_INFO *binfo, uint32 att_bit); +_forward_ void fc_fcp_fix_txq(fc_dev_ctl_t *p_dev_ctl); + + + +/* Module fcscsib.c External Routine Declarations */ +_forward_ int fc_abort_fcp_txpq(FC_BRD_INFO *binfo, dvi_t *dev_ptr); +_forward_ int fc_abort_xri(FC_BRD_INFO *binfo, dvi_t *dev_ptr, ushort iotag, int flag); +_forward_ int fc_abort_ixri_cx(FC_BRD_INFO *binfo, ushort xri, uint32 cmd, RING *rp); +_forward_ int fc_attach(int index, uint32 *p_uio); +_forward_ int fc_cfg_init(fc_dev_ctl_t *p_dev_ctl); +_forward_ void fc_cfg_remove(fc_dev_ctl_t *p_dev_ctl); +_forward_ void fc_cmdring_timeout(fc_dev_ctl_t *p, void *a1, void *a2); +_forward_ int fc_delay_iodone(fc_dev_ctl_t *p_dev_ctl, + T_SCSIBUF * sbp); +_forward_ void fc_delay_timeout(fc_dev_ctl_t *p, void *l1, void *l2); +_forward_ void fc_nodev_timeout(fc_dev_ctl_t *p, void *l1, void *l2); +_forward_ int fc_detach(int index); +_forward_ void fc_ffcleanup(fc_dev_ctl_t *p_dev_ctl); +_forward_ void fc_free_clearq(dvi_t *dev_ptr); +_forward_ int fc_geportname(NAME_TYPE *pn1, NAME_TYPE *pn2); +_forward_ int fc_linkdown(fc_dev_ctl_t *p_dev_ctl); +_forward_ void fc_linkdown_timeout(fc_dev_ctl_t *p, void *a1, void *a2); +_forward_ void fc_mbox_timeout(fc_dev_ctl_t *p, void *a1, void *a2); +_forward_ void fc_fabric_timeout(fc_dev_ctl_t *p, void *a1, void *a2); +_forward_ int fc_nextauth(fc_dev_ctl_t *p_dev_ctl, int sndcnt); +_forward_ int fc_nextdisc(fc_dev_ctl_t *p_dev_ctl, int sndcnt); +_forward_ int fc_nextnode(fc_dev_ctl_t *p_dev_ctl, NODELIST *nlp); +_forward_ int fc_nextrscn(fc_dev_ctl_t *p_dev_ctl, int sndcnt); +_forward_ int fc_free_ct_rsp(fc_dev_ctl_t *p_dev_ctl, MATCHMAP *mlist); +_forward_ int fc_ns_cmd(fc_dev_ctl_t *p_dev_ctl, NODELIST *nlp, int cc); +_forward_ int fc_ns_rsp(fc_dev_ctl_t *p_dev_ctl, NODELIST *nslp, MATCHMAP *mp, uint32 sz); +_forward_ int fc_ct_cmd(fc_dev_ctl_t *p_dev_ctl, MATCHMAP *mp, + MATCHMAP *bmp, NODELIST *nlp); +_forward_ int fc_offline(fc_dev_ctl_t *p_dev_ctl); +_forward_ int fc_online(fc_dev_ctl_t *p_dev_ctl); +_forward_ void fc_pcimem_bcopy(uint32 *src, uint32 *dest, uint32 cnt); +_forward_ int fc_post_buffer(fc_dev_ctl_t *p_dev_ctl, RING *rp, int cnt); +_forward_ int fc_post_mbuf(fc_dev_ctl_t *p_dev_ctl, RING *rp, int cnt); +_forward_ int fc_rlip(fc_dev_ctl_t *p_dev_ctl); +_forward_ void fc_scsi_timeout(fc_dev_ctl_t *p, void *l1, void *l2); +_forward_ void fc_start(fc_dev_ctl_t *p_dev_ctl); +_forward_ void handle_fcp_event(fc_dev_ctl_t *p_dev_ctl, RING *rp,IOCBQ *temp); +_forward_ int handle_mb_cmd(fc_dev_ctl_t *p_dev_ctl, MAILBOX *mb, uint32 cmd); +_forward_ int fc_free_iocb_buf(fc_dev_ctl_t *p_dev_ctl, RING *rp, IOCBQ *tmp); +_forward_ int handle_iprcv_seq(fc_dev_ctl_t *p_dev_ctl, RING *rp, IOCBQ *temp); +_forward_ int handle_elsrcv_seq(fc_dev_ctl_t *p_dev_ctl, RING *rp, IOCBQ *temp); +_forward_ void fc_process_reglogin(fc_dev_ctl_t *p_dev_ctl, NODELIST *nlp); +_forward_ int fc_snd_scsi_req(fc_dev_ctl_t *p_dev_ctl, NAME_TYPE *wwn, + MATCHMAP *bmp, DMATCHMAP *fcpmp, DMATCHMAP *omatp, + uint32 cnt, struct dev_info *devp); +_forward_ void issue_report_lun(fc_dev_ctl_t *p_dev_ctl, void *l1, void *l2); +_forward_ int fc_parse_binding_entry( fc_dev_ctl_t *p_dev_ctl, uchar *inbuf, + uchar *outbuf, int in_size, int out_size, int bind_type, + unsigned int *sum, int entry, int *lpfc_num); + +/* + * External Routine Declarations for local print statement formatting + */ + +_forward_ int fc_asc_seq_to_hex( fc_dev_ctl_t *p_dev_ctl, + int input_bc, int output_bc, char *inp, char *outp); +_forward_ int fc_asc_to_hex( uchar c); +_forward_ int fc_is_digit( int chr); +_forward_ int fc_log_printf_msg_vargs( + int brdno, msgLogDef *msg, + void *control, ...); +_forward_ int fc_check_this_log_msg_disabled( + int brdno, msgLogDef *msg, int *log_only); + +_forward_ void fc_brdreset(fc_dev_ctl_t *p_dev_ctl); +_forward_ int fc_ffinit(fc_dev_ctl_t *p_dev_ctl); +_forward_ int issue_mb_cmd(FC_BRD_INFO *binfo, MAILBOX *mb, int flag); +_forward_ uint32 issue_iocb_cmd(FC_BRD_INFO *binfo, RING *rp, IOCBQ *iocb_cmd); +_forward_ char *decode_firmware_rev(FC_BRD_INFO *binfo, fc_vpd_t *vp); +_forward_ int dfc_fmw_rev( fc_dev_ctl_t * p_dev_ctl); +_forward_ int dfc_hba_put_event( fc_dev_ctl_t * p_dev_ctl, uint32 evcode, + uint32 evdata1, uint32 evdata2, uint32 evdata3, uint32 evdata4); +_forward_ int dfc_put_event( fc_dev_ctl_t * p_dev_ctl, uint32 evcode, + uint32 evdata0, void *evdata1, void *evdata2); +_forward_ void handle_ff_error(fc_dev_ctl_t *p_dev_ctl); +_forward_ int handle_mb_event(fc_dev_ctl_t *p_dev_ctl); +_forward_ void handle_link_event(fc_dev_ctl_t *p_dev_ctl); +_forward_ void handle_ring_event(fc_dev_ctl_t *p_dev_ctl, int ring,uint32 reg); diff -purN -X /home/mbligh/.diff.exclude reference/drivers/scsi/lpfc/fc_ertn.h current/drivers/scsi/lpfc/fc_ertn.h --- reference/drivers/scsi/lpfc/fc_ertn.h 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/scsi/lpfc/fc_ertn.h 2004-04-09 11:53:02.000000000 -0700 @@ -0,0 +1,89 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Enterprise Fibre Channel Host Bus Adapters. * + * Refer to the README file included with this package for * + * driver version and adapter support. * + * Copyright (C) 2003 Emulex Corporation. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of the GNU General Public License * + * as published by the Free Software Foundation; either version 2 * + * of the License, or (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details, a copy of which * + * can be found in the file COPYING included with this package. * + *******************************************************************/ + +/* + * Begin Global Function Definitions + */ +_forward_ void fc_bcopy(void *src, void *dest, unsigned long n); +_forward_ void fc_bzero(void *src, unsigned long size ); +_forward_ int fc_copyin(uchar *src, uchar *dst, unsigned long); +_forward_ int fc_copyout(uchar *, uchar *, unsigned long); +_forward_ void lpfc_mpdata_sync(fc_dev_ctl_t *p_dev_ctl, void *h, int a, int b, int c); +_forward_ void *fc_kmem_alloc(unsigned int size); +_forward_ void fc_kmem_free(void *obj, unsigned int size); +_forward_ void curtime(uint32 *time); +_forward_ ulong dfc_disable_lock(ulong p1, Simple_lock *p2); +_forward_ void dfc_unlock_enable(ulong p1, Simple_lock *p2); +_forward_ ulong lpfc_q_disable_lock(fc_dev_ctl_t *p_dev_ctl); +_forward_ void lpfc_q_unlock_enable(fc_dev_ctl_t *p_dev_ctl, ulong p1); +_forward_ ulong lpfc_mempool_disable_lock(fc_dev_ctl_t *p_dev_ctl); +_forward_ void lpfc_mempool_unlock_enable(fc_dev_ctl_t *p_dev_ctl, ulong p1); +_forward_ int dfc_sleep(fc_dev_ctl_t *p_dev_ctl, fcEvent_header *ep); +_forward_ int dfc_wakeup(fc_dev_ctl_t *p_dev_ctl, fcEvent_header *ep); +_forward_ int lpfc_DELAYMS(fc_dev_ctl_t *p_dev_ctl, int cnt); +_forward_ int fc_fcp_bufunmap(fc_dev_ctl_t *pdev, struct sc_buf *sp); +_forward_ int fc_bufmap(fc_dev_ctl_t *p_dev_ctl, uchar *bp, uint32 len, + void **phys, uint32 *cnt, void **handle); +_forward_ void fc_bufunmap(fc_dev_ctl_t *p_dev_ctl, uchar *addr, + uchar *dmahandle, uint32 size); +_forward_ int fc_fcp_bufmap(fc_dev_ctl_t *p_dev_ctl, struct sc_buf *sbp, + fc_buf_t *fcptr, IOCBQ *temp, ULP_BDE64 *bpl, + dvi_t * dev_ptr, int pend); +_forward_ void fc_free(fc_dev_ctl_t *p_dev_ctl, MBUF_INFO *buf_info); +_forward_ int fc_get_dds(fc_dev_ctl_t *p_dev_ctl, uint32 *p_uio); +_forward_ int fc_get_dds_bind(fc_dev_ctl_t *p_dev_ctl); +_forward_ int fc_get_dds(fc_dev_ctl_t *p_dev_ctl, uint32 *p_uio); +_forward_ void lpfc_scsi_selto_timeout(fc_dev_ctl_t *p, void *l1, void *l2); +_forward_ int lpfc_copy_sense(dvi_t * dev_ptr, struct buf * bp); +_forward_ int fc_intr(struct intr *p_ihs); +_forward_ int fc_pcimap(fc_dev_ctl_t *p_dev_ctl); +_forward_ ushort fc_rdpci_cmd( fc_dev_ctl_t *p_dev_ctl); +_forward_ uint32 fc_rdpci_32( fc_dev_ctl_t *p_dev_ctl, uint32 offset); +_forward_ int fc_initpci(struct dfc_info *di, fc_dev_ctl_t *p_dev_ctl); +_forward_ int fc_readpci(struct dfc_info *di, uint32 offset, char *buf, uint32 cnt); +_forward_ int fc_writepci(struct dfc_info *di, uint32 offset, char *buf, uint32 cnt); +_forward_ uchar *fc_malloc(fc_dev_ctl_t *p_dev_ctl, MBUF_INFO *buf_info); +_forward_ int fc_memmap(fc_dev_ctl_t *p_dev_ctl); +_forward_ int fc_unmemmap(fc_dev_ctl_t *p_dev_ctl); +_forward_ int lpfc_cfg_init(fc_dev_ctl_t *p_dev_ctl); +_forward_ void fc_wrpci_cmd( fc_dev_ctl_t *p_dev_ctl, ushort cfg_value); +_forward_ int i_clear(struct intr *ihs); +_forward_ int i_init(struct intr *ihs); +_forward_ void lpfc_fcp_error( fc_buf_t * fcptr, IOCB * cmd); +_forward_ dvi_t *fc_alloc_devp(fc_dev_ctl_t *, int target, fc_lun_t lun); +_forward_ int fc_do_iodone( struct buf *bp); +_forward_ int fc_device_changed(fc_dev_ctl_t *p, struct dev_info *dp); +_forward_ int log_printf(int f, int type, int num, char *str, int brdno, + uint32 a1, uint32 a2, uint32 a3, uint32 a4); +_forward_ int log_printf_msgblk( int brdno, msgLogDef * msg, char *str, int log_only); + + +_forward_ uint32 timeout(void (*func)(ulong), struct timer_list * , uint32 ); +_forward_ int lpfc_ip_rcvsz(fc_dev_ctl_t *p_dev_ctl); +_forward_ int lpfc_kfree_skb(struct sk_buff *skb); +_forward_ struct sk_buff * lpfc_alloc_skb(unsigned int sz); +_forward_ void fc_pci_dma_sync_single(struct pci_dev *hwdev, dma_addr_t h, + size_t size, int c); +_forward_ void fc_write_toio(uint32 *src, uint32 *dest_io, uint32 cnt); +_forward_ void fc_read_fromio(uint32 *src_io, uint32 *dest, uint32 cnt); +_forward_ uint32 fc_readl(uint32 *src); +_forward_ void fc_writel(uint32 *src, uint32 value); +_forward_ int fc_print( char * str, void * arg1, void * arg2); + diff -purN -X /home/mbligh/.diff.exclude reference/drivers/scsi/lpfc/fc_hw.h current/drivers/scsi/lpfc/fc_hw.h --- reference/drivers/scsi/lpfc/fc_hw.h 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/scsi/lpfc/fc_hw.h 2004-04-09 11:53:02.000000000 -0700 @@ -0,0 +1,3073 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Enterprise Fibre Channel Host Bus Adapters. * + * Refer to the README file included with this package for * + * driver version and adapter support. * + * Copyright (C) 2003 Emulex Corporation. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of the GNU General Public License * + * as published by the Free Software Foundation; either version 2 * + * of the License, or (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details, a copy of which * + * can be found in the file COPYING included with this package. * + *******************************************************************/ + +#ifndef _H_FC_HW +#define _H_FC_HW + +typedef unsigned u32bit; +typedef unsigned u16bit; +typedef unsigned u8bit; + +#define FC_MAX_TRANSFER 0x40000 /* Maximum transfer size per operation */ + +#define MAX_CONFIGURED_RINGS 4 /* # rings currently used */ + +#define IOCB_CMD_R0_ENTRIES 5 /* ELS command ring entries */ +#define IOCB_RSP_R0_ENTRIES 5 /* ELS response ring entries */ +#define IOCB_CMD_R1_ENTRIES 27 /* IP command ring entries */ +#define IOCB_RSP_R1_ENTRIES 28 /* IP response ring entries */ +#define IOCB_CMD_R2_ENTRIES 45 /* FCP command ring entries */ +#define IOCB_RSP_R2_ENTRIES 10 /* FCP response ring entries */ +#define MAX_BIOCB 120 /* max# of BIU IOCBs in shared memory */ + +#define SLI2_IOCB_CMD_R0_ENTRIES 6 /* SLI-2 ELS command ring entries */ +#define SLI2_IOCB_RSP_R0_ENTRIES 6 /* SLI-2 ELS response ring entries */ +#define SLI2_IOCB_CMD_R1_ENTRIES 24 /* SLI-2 IP command ring entries */ +#define SLI2_IOCB_RSP_R1_ENTRIES 30 /* SLI-2 IP response ring entries */ +#define SLI2_IOCB_CMD_R1XTRA_ENTRIES 18 /* SLI-2 extra FCP cmd ring entries */ +#define SLI2_IOCB_RSP_R1XTRA_ENTRIES 24 /* SLI-2 extra FCP rsp ring entries */ +#define SLI2_IOCB_CMD_R2_ENTRIES 30 /* SLI-2 FCP command ring entries */ +#define SLI2_IOCB_RSP_R2_ENTRIES 20 /* SLI-2 FCP response ring entries */ +#define SLI2_IOCB_CMD_R2XTRA_ENTRIES 22 /* SLI-2 extra FCP cmd ring entries */ +#define SLI2_IOCB_RSP_R2XTRA_ENTRIES 20 /* SLI-2 extra FCP rsp ring entries */ +#define SLI2_IOCB_CMD_R3_ENTRIES 0 /* SLI-2 FCP command ring entries */ +#define SLI2_IOCB_RSP_R3_ENTRIES 0 /* SLI-2 FCP response ring entries */ +#define MAX_SLI2_IOCB SLI2_IOCB_CMD_R0_ENTRIES + \ + SLI2_IOCB_RSP_R0_ENTRIES + \ + SLI2_IOCB_CMD_R1_ENTRIES + \ + SLI2_IOCB_RSP_R1_ENTRIES + \ + SLI2_IOCB_CMD_R2_ENTRIES + \ + SLI2_IOCB_RSP_R2_ENTRIES + \ + SLI2_IOCB_CMD_R3_ENTRIES + \ + SLI2_IOCB_RSP_R3_ENTRIES + +#define FCELSSIZE 1024 /* maximum ELS transfer size */ + +#define FC_MAXRETRY 3 /* max retries for ELS commands */ +#define FC_ELS_RING 0 /* use ring 0 for ELS commands */ +#define FC_IP_RING 1 /* use ring 1 for IP commands */ +#define FC_FCP_RING 2 /* use ring 2 for FCP initiator commands */ + +#define FF_DEF_EDTOV 2000 /* Default E_D_TOV (2000ms) */ +#define FF_DEF_ALTOV 15 /* Default AL_TIME (15ms) */ +#define FF_DEF_RATOV 2 /* Default RA_TOV (2s) */ +#define FF_DEF_ARBTOV 1900 /* Default ARB_TOV (1900ms) */ +#define MB_WAIT_PERIOD 500 /* Wait period in usec inbetween MB polls */ +#define MAX_MB_COMPLETION 1000 /* # MB_WAIT_PERIODs to wait for MB cmplt */ +#define MAX_MSG_DATA 28 /* max msg data in CMD_ADAPTER_MSG iocb */ + +#define FF_REG_AREA_SIZE 256 /* size, in bytes, of i/o register area */ +#define FF_SLIM_SIZE 4096 /* size, in bytes, of SLIM */ + +/* + * Miscellaneous stuff.... + */ +/* HBA Mgmt */ +#define FDMI_DID ((uint32)0xfffffa) +#define NameServer_DID ((uint32)0xfffffc) +#define SCR_DID ((uint32)0xfffffd) +#define Fabric_DID ((uint32)0xfffffe) +#define Bcast_DID ((uint32)0xffffff) +#define Mask_DID ((uint32)0xffffff) +#define CT_DID_MASK ((uint32)0xffff00) +#define Fabric_DID_MASK ((uint32)0xfff000) + +#define PT2PT_LocalID ((uint32)1) +#define PT2PT_RemoteID ((uint32)2) + +#define OWN_CHIP 1 /* IOCB / Mailbox is owned by Hba */ +#define OWN_HOST 0 /* IOCB / Mailbox is owned by Host */ +#define END_OF_CHAIN 0 +#define IOCB_WORD_SZ 8 /* # of words in ULP BIU XCB */ +#define MAX_RINGS 3 /* Max # of supported rings */ + +/* defines for type field in fc header */ +#define FC_ELS_DATA 0x1 +#define FC_LLC_SNAP 0x5 +#define FC_FCP_DATA 0x8 +#define FC_COMMON_TRANSPORT_ULP 0x20 + +/* defines for rctl field in fc header */ +#define FC_DEV_DATA 0x0 +#define FC_UNSOL_CTL 0x2 +#define FC_SOL_CTL 0x3 +#define FC_UNSOL_DATA 0x4 +#define FC_FCP_CMND 0x6 +#define FC_ELS_REQ 0x22 +#define FC_ELS_RSP 0x23 +#define FC_NET_HDR 0x20 /* network headers for Dfctl field */ + +/* + * Common Transport structures and definitions + * + */ + +union CtRevisionId { + /* Structure is in Big Endian format */ + struct { + u32bit Revision: 8; + u32bit InId: 24; + } bits; + uint32 word; +}; + +union CtCommandResponse { + /* Structure is in Big Endian format */ + struct { + u32bit CmdRsp: 16; + u32bit Size: 16; + } bits; + uint32 word; +}; + +typedef struct SliCtRequest { + /* Structure is in Big Endian format */ + union CtRevisionId RevisionId; + uchar FsType; + uchar FsSubType; + uchar Options; + uchar Rsrvd1; + union CtCommandResponse CommandResponse; + uchar Rsrvd2; + uchar ReasonCode; + uchar Explanation; + uchar VendorUnique; + + union { + uint32 PortID; + struct gid { + uchar PortType; /* for GID_PT requests */ + uchar DomainScope; + uchar AreaScope; + uchar Fc4Type; /* for GID_FT requests */ + } gid; + struct rft { + uint32 PortId; /* For RFT_ID requests */ +#if BIG_ENDIAN_HW + u32bit rsvd0: 16; + u32bit rsvd1: 7; + u32bit fcpReg: 1; /* Type 8 */ + u32bit rsvd2: 2; + u32bit ipReg: 1; /* Type 5 */ + u32bit rsvd3: 5; +#endif +#if LITTLE_ENDIAN_HW + u32bit rsvd0: 16; + u32bit fcpReg: 1; /* Type 8 */ + u32bit rsvd1: 7; + u32bit rsvd3: 5; + u32bit ipReg: 1; /* Type 5 */ + u32bit rsvd2: 2; +#endif + uint32 rsvd[7]; + } rft; + } un; +} SLI_CT_REQUEST, *PSLI_CT_REQUEST; + +#define SLI_CT_REVISION 1 +#define GID_REQUEST_SZ (sizeof(SLI_CT_REQUEST) - 32) +#define RFT_REQUEST_SZ (sizeof(SLI_CT_REQUEST)) + + +/* + * FsType Definitions + */ + +#define SLI_CT_MANAGEMENT_SERVICE 0xFA +#define SLI_CT_TIME_SERVICE 0xFB +#define SLI_CT_DIRECTORY_SERVICE 0xFC +#define SLI_CT_FABRIC_CONTROLLER_SERVICE 0xFD + +/* + * Directory Service Subtypes + */ + +#define SLI_CT_DIRECTORY_NAME_SERVER 0x02 + +/* + * Response Codes + */ + +#define SLI_CT_RESPONSE_FS_RJT 0x8001 +#define SLI_CT_RESPONSE_FS_ACC 0x8002 + +/* + * Reason Codes + */ + +#define SLI_CT_NO_ADDITIONAL_EXPL 0x0 +#define SLI_CT_INVALID_COMMAND 0x01 +#define SLI_CT_INVALID_VERSION 0x02 +#define SLI_CT_LOGICAL_ERROR 0x03 +#define SLI_CT_INVALID_IU_SIZE 0x04 +#define SLI_CT_LOGICAL_BUSY 0x05 +#define SLI_CT_PROTOCOL_ERROR 0x07 +#define SLI_CT_UNABLE_TO_PERFORM_REQ 0x09 +#define SLI_CT_REQ_NOT_SUPPORTED 0x0b +#define SLI_CT_HBA_INFO_NOT_REGISTERED 0x10 +#define SLI_CT_MULTIPLE_HBA_ATTR_OF_SAME_TYPE 0x11 +#define SLI_CT_INVALID_HBA_ATTR_BLOCK_LEN 0x12 +#define SLI_CT_HBA_ATTR_NOT_PRESENT 0x13 +#define SLI_CT_PORT_INFO_NOT_REGISTERED 0x20 +#define SLI_CT_MULTIPLE_PORT_ATTR_OF_SAME_TYPE 0x21 +#define SLI_CT_INVALID_PORT_ATTR_BLOCK_LEN 0x22 +#define SLI_CT_VENDOR_UNIQUE 0xff + +/* + * Name Server SLI_CT_UNABLE_TO_PERFORM_REQ Explanations + */ + +#define SLI_CT_NO_PORT_ID 0x01 +#define SLI_CT_NO_PORT_NAME 0x02 +#define SLI_CT_NO_NODE_NAME 0x03 +#define SLI_CT_NO_CLASS_OF_SERVICE 0x04 +#define SLI_CT_NO_IP_ADDRESS 0x05 +#define SLI_CT_NO_IPA 0x06 +#define SLI_CT_NO_FC4_TYPES 0x07 +#define SLI_CT_NO_SYMBOLIC_PORT_NAME 0x08 +#define SLI_CT_NO_SYMBOLIC_NODE_NAME 0x09 +#define SLI_CT_NO_PORT_TYPE 0x0A +#define SLI_CT_ACCESS_DENIED 0x10 +#define SLI_CT_INVALID_PORT_ID 0x11 +#define SLI_CT_DATABASE_EMPTY 0x12 + + + +/* + * Name Server Command Codes + */ + +#define SLI_CTNS_GA_NXT 0x0100 +#define SLI_CTNS_GPN_ID 0x0112 +#define SLI_CTNS_GNN_ID 0x0113 +#define SLI_CTNS_GCS_ID 0x0114 +#define SLI_CTNS_GFT_ID 0x0117 +#define SLI_CTNS_GSPN_ID 0x0118 +#define SLI_CTNS_GPT_ID 0x011A +#define SLI_CTNS_GID_PN 0x0121 +#define SLI_CTNS_GID_NN 0x0131 +#define SLI_CTNS_GIP_NN 0x0135 +#define SLI_CTNS_GIPA_NN 0x0136 +#define SLI_CTNS_GSNN_NN 0x0139 +#define SLI_CTNS_GNN_IP 0x0153 +#define SLI_CTNS_GIPA_IP 0x0156 +#define SLI_CTNS_GID_FT 0x0171 +#define SLI_CTNS_GID_PT 0x01A1 +#define SLI_CTNS_RPN_ID 0x0212 +#define SLI_CTNS_RNN_ID 0x0213 +#define SLI_CTNS_RCS_ID 0x0214 +#define SLI_CTNS_RFT_ID 0x0217 +#define SLI_CTNS_RSPN_ID 0x0218 +#define SLI_CTNS_RPT_ID 0x021A +#define SLI_CTNS_RIP_NN 0x0235 +#define SLI_CTNS_RIPA_NN 0x0236 +#define SLI_CTNS_RSNN_NN 0x0239 +#define SLI_CTNS_DA_ID 0x0300 + +/* + * Port Types + */ + +#define SLI_CTPT_N_PORT 0x01 +#define SLI_CTPT_NL_PORT 0x02 +#define SLI_CTPT_FNL_PORT 0x03 +#define SLI_CTPT_IP 0x04 +#define SLI_CTPT_FCP 0x08 +#define SLI_CTPT_NX_PORT 0x7F +#define SLI_CTPT_F_PORT 0x81 +#define SLI_CTPT_FL_PORT 0x82 +#define SLI_CTPT_E_PORT 0x84 + +#define SLI_CT_LAST_ENTRY 0x80000000 + +/*=====================================================================*/ + +#ifdef LP6000 +/* PCI register offsets */ +#define MEM_ADDR_OFFSET 0x10 /* SLIM base memory address */ +#define MEMH_OFFSET 0x14 /* SLIM base memory high address */ +#define REG_ADDR_OFFSET 0x18 /* REGISTER base memory address */ +#define REGH_OFFSET 0x1c /* REGISTER base memory high address */ +#define IO_ADDR_OFFSET 0x20 /* BIU I/O registers */ +#define REGIOH_OFFSET 0x24 /* REGISTER base io high address */ +#endif + +#define CMD_REG_OFFSET 0x4 /* PCI command configuration */ + +/* General PCI Register Definitions */ +/* Refer To The PCI Specification For Detailed Explanations */ + +/* Register Offsets in little endian format */ +#define PCI_VENDOR_ID_REGISTER 0x00 /* PCI Vendor ID Register*/ +#define PCI_DEVICE_ID_REGISTER 0x02 /* PCI Device ID Register*/ +#define PCI_CONFIG_ID_REGISTER 0x00 /* PCI Configuration ID Register*/ +#define PCI_COMMAND_REGISTER 0x04 /* PCI Command Register*/ +#define PCI_STATUS_REGISTER 0x06 /* PCI Status Register*/ +#define PCI_REV_ID_REGISTER 0x08 /* PCI Revision ID Register*/ +#define PCI_CLASS_CODE_REGISTER 0x09 /* PCI Class Code Register*/ +#define PCI_CACHE_LINE_REGISTER 0x0C /* PCI Cache Line Register*/ +#define PCI_LATENCY_TMR_REGISTER 0x0D /* PCI Latency Timer Register*/ +#define PCI_HEADER_TYPE_REGISTER 0x0E /* PCI Header Type Register*/ +#define PCI_BIST_REGISTER 0x0F /* PCI Built-In SelfTest Register*/ +#define PCI_BAR_0_REGISTER 0x10 /* PCI Base Address Register 0*/ +#define PCI_BAR_1_REGISTER 0x14 /* PCI Base Address Register 1*/ +#define PCI_BAR_2_REGISTER 0x18 /* PCI Base Address Register 2*/ +#define PCI_BAR_3_REGISTER 0x1C /* PCI Base Address Register 3*/ +#define PCI_BAR_4_REGISTER 0x20 /* PCI Base Address Register 4*/ +#define PCI_BAR_5_REGISTER 0x24 /* PCI Base Address Register 5*/ +#define PCI_EXPANSION_ROM 0x30 /* PCI Expansion ROM Base Register*/ +#define PCI_INTR_LINE_REGISTER 0x3C /* PCI Interrupt Line Register*/ +#define PCI_INTR_PIN_REGISTER 0x3D /* PCI Interrupt Pin Register*/ +#define PCI_MIN_GNT_REGISTER 0x3E /* PCI Min-Gnt Register*/ +#define PCI_MAX_LAT_REGISTER 0x3F /* PCI Max_Lat Register*/ +#define PCI_NODE_ADDR_REGISTER 0x40 /* PCI Node Address Register*/ + +/* PCI access methods */ +#define P_CONF_T1 1 +#define P_CONF_T2 2 + +/* max number of pci buses */ +#define MAX_PCI_BUSES 0xFF + +/* number of PCI config bytes to access */ +#define PCI_BYTE 1 +#define PCI_WORD 2 +#define PCI_DWORD 4 + +/* PCI related constants */ +#define CMD_IO_ENBL 0x0001 +#define CMD_MEM_ENBL 0x0002 +#define CMD_BUS_MASTER 0x0004 +#define CMD_MWI 0x0010 +#define CMD_PARITY_CHK 0x0040 +#define CMD_SERR_ENBL 0x0100 + +#define CMD_CFG_VALUE 0x156 /* mem enable, master, MWI, SERR, PERR */ + +/* PCI addresses */ +#define PCI_SPACE_ENABLE 0x0CF8 +#define CF1_CONFIG_ADDR_REGISTER 0x0CF8 +#define CF1_CONFIG_DATA_REGISTER 0x0CFC +#define CF2_FORWARD_REGISTER 0x0CFA +#define CF2_BASE_ADDRESS 0xC000 + +#define PCI_VENDOR_ID_EMULEX 0x10df + +#define PCI_DEVICE_ID_SUPERFLY 0xf700 +#define PCI_DEVICE_ID_DRAGONFLY 0xf800 +#define PCI_DEVICE_ID_CENTAUR 0xf900 +#define PCI_DEVICE_ID_PFLY 0xf098 +#define PCI_DEVICE_ID_PEGASUS 0xf980 +#define PCI_DEVICE_ID_TFLY 0xf0a5 +#define PCI_DEVICE_ID_THOR 0xfa00 + +#define JEDEC_ID_ADDRESS 0x0080001c +#define SUPERFLY_JEDEC_ID 0x0020 +#define DRAGONFLY_JEDEC_ID 0x0021 +#define DRAGONFLY_V2_JEDEC_ID 0x0025 +#define CENTAUR_2G_JEDEC_ID 0x0026 +#define CENTAUR_1G_JEDEC_ID 0x0028 +#define JEDEC_ID_MASK 0x0FFFF000 +#define JEDEC_ID_SHIFT 12 +#define FC_JEDEC_ID(id) ((id & JEDEC_ID_MASK) >> JEDEC_ID_SHIFT) + +#define DEFAULT_PCI_LATENCY_CLOCKS 0xf8 /* 0xF8 is a special value for + * FF11.1N6 firmware. Use + * 0x80 for pre-FF11.1N6 &N7, etc + */ +#define PCI_LATENCY_VALUE 0xf8 + +#ifdef LP6000 +typedef struct { /* BIU registers */ + uint32 hostAtt; /* See definitions for Host Attention register */ + uint32 chipAtt; /* See definitions for Chip Attention register */ + uint32 hostStatus; /* See definitions for Host Status register */ + uint32 hostControl; /* See definitions for Host Control register */ + uint32 buiConfig; /* See definitions for BIU configuration register*/ +} FF_REGS, *PFF_REGS; + +/* Host Attention Register */ + +#define HA_REG_OFFSET 0 /* Word offset from register base address */ + +#define HA_R0RE_REQ 0x00000001 /* Bit 0 */ +#define HA_R0CE_RSP 0x00000002 /* Bit 1 */ +#define HA_R0ATT 0x00000008 /* Bit 3 */ +#define HA_R1RE_REQ 0x00000010 /* Bit 4 */ +#define HA_R1CE_RSP 0x00000020 /* Bit 5 */ +#define HA_R1ATT 0x00000080 /* Bit 7 */ +#define HA_R2RE_REQ 0x00000100 /* Bit 8 */ +#define HA_R2CE_RSP 0x00000200 /* Bit 9 */ +#define HA_R2ATT 0x00000800 /* Bit 11 */ +#define HA_R3RE_REQ 0x00001000 /* Bit 12 */ +#define HA_R3CE_RSP 0x00002000 /* Bit 13 */ +#define HA_R3ATT 0x00008000 /* Bit 15 */ +#define HA_LATT 0x20000000 /* Bit 29 */ +#define HA_MBATT 0x40000000 /* Bit 30 */ +#define HA_ERATT 0x80000000 /* Bit 31 */ + + +/* Chip Attention Register */ + +#define CA_REG_OFFSET 1 /* Word offset from register base address */ + +#define CA_R0CE_REQ 0x00000001 /* Bit 0 */ +#define CA_R0RE_RSP 0x00000002 /* Bit 1 */ +#define CA_R0ATT 0x00000008 /* Bit 3 */ +#define CA_R1CE_REQ 0x00000010 /* Bit 4 */ +#define CA_R1RE_RSP 0x00000020 /* Bit 5 */ +#define CA_R1ATT 0x00000080 /* Bit 7 */ +#define CA_R2CE_REQ 0x00000100 /* Bit 8 */ +#define CA_R2RE_RSP 0x00000200 /* Bit 9 */ +#define CA_R2ATT 0x00000800 /* Bit 11 */ +#define CA_R3CE_REQ 0x00001000 /* Bit 12 */ +#define CA_R3RE_RSP 0x00002000 /* Bit 13 */ +#define CA_R3ATT 0x00008000 /* Bit 15 */ +#define CA_MBATT 0x40000000 /* Bit 30 */ + + +/* Host Status Register */ + +#define HS_REG_OFFSET 2 /* Word offset from register base address */ + +#define HS_MBRDY 0x00400000 /* Bit 22 */ +#define HS_FFRDY 0x00800000 /* Bit 23 */ +#define HS_FFER8 0x01000000 /* Bit 24 */ +#define HS_FFER7 0x02000000 /* Bit 25 */ +#define HS_FFER6 0x04000000 /* Bit 26 */ +#define HS_FFER5 0x08000000 /* Bit 27 */ +#define HS_FFER4 0x10000000 /* Bit 28 */ +#define HS_FFER3 0x20000000 /* Bit 29 */ +#define HS_FFER2 0x40000000 /* Bit 30 */ +#define HS_FFER1 0x80000000 /* Bit 31 */ +#define HS_FFERM 0xFF000000 /* Mask for error bits 31:24 */ + + +/* Host Control Register */ + +#define HC_REG_OFFSET 3 /* Word offset from register base address */ + +#define HC_MBINT_ENA 0x00000001 /* Bit 0 */ +#define HC_R0INT_ENA 0x00000002 /* Bit 1 */ +#define HC_R1INT_ENA 0x00000004 /* Bit 2 */ +#define HC_R2INT_ENA 0x00000008 /* Bit 3 */ +#define HC_R3INT_ENA 0x00000010 /* Bit 4 */ +#define HC_INITHBI 0x02000000 /* Bit 25 */ +#define HC_INITMB 0x04000000 /* Bit 26 */ +#define HC_INITFF 0x08000000 /* Bit 27 */ +#define HC_LAINT_ENA 0x20000000 /* Bit 29 */ +#define HC_ERINT_ENA 0x80000000 /* Bit 31 */ + +/* BIU Configuration Register */ + +#define BC_REG_OFFSET 4 /* Word offset from register base address */ + +#define BC_BSE 0x00000001 /* Bit 0 */ +#define BC_BSE_SWAP 0x01000000 /* Bit 0 - swapped */ + +#endif /* LP6000 */ + +/*=====================================================================*/ + +/* + * Start of FCP specific structures + */ + +/* + * Definition of FCP_RSP Packet + */ + +typedef struct _FCP_RSP { + uint32 rspRsvd1; /* FC Word 0, byte 0:3 */ + uint32 rspRsvd2; /* FC Word 1, byte 0:3 */ + + uchar rspStatus0; /* FCP_STATUS byte 0 (reserved) */ + uchar rspStatus1; /* FCP_STATUS byte 1 (reserved) */ + uchar rspStatus2; /* FCP_STATUS byte 2 field validity */ +#define RSP_LEN_VALID 0x01 /* bit 0 */ +#define SNS_LEN_VALID 0x02 /* bit 1 */ +#define RESID_OVER 0x04 /* bit 2 */ +#define RESID_UNDER 0x08 /* bit 3 */ + uchar rspStatus3; /* FCP_STATUS byte 3 SCSI status byte */ +#define SCSI_STAT_GOOD 0x00 +#define SCSI_STAT_CHECK_COND 0x02 +#define SCSI_STAT_COND_MET 0x04 +#define SCSI_STAT_BUSY 0x08 +#define SCSI_STAT_INTERMED 0x10 +#define SCSI_STAT_INTERMED_CM 0x14 +#define SCSI_STAT_RES_CNFLCT 0x18 +#define SCSI_STAT_CMD_TERM 0x22 +#define SCSI_STAT_QUE_FULL 0x28 + + uint32 rspResId; /* Residual xfer if RESID_xxxx set in fcpStatus2 */ + /* Received in Big Endian format */ + uint32 rspSnsLen; /* Length of sense data in fcpSnsInfo */ + /* Received in Big Endian format */ + uint32 rspRspLen; /* Length of FCP response data in fcpRspInfo */ + /* Received in Big Endian format */ + + uchar rspInfo0; /* FCP_RSP_INFO byte 0 (reserved) */ + uchar rspInfo1; /* FCP_RSP_INFO byte 1 (reserved) */ + uchar rspInfo2; /* FCP_RSP_INFO byte 2 (reserved) */ + uchar rspInfo3; /* FCP_RSP_INFO RSP_CODE byte 3 */ + +#define RSP_NO_FAILURE 0x00 +#define RSP_DATA_BURST_ERR 0x01 +#define RSP_CMD_FIELD_ERR 0x02 +#define RSP_RO_MISMATCH_ERR 0x03 +#define RSP_TM_NOT_SUPPORTED 0x04 /* Task mgmt function not supported */ +#define RSP_TM_NOT_COMPLETED 0x05 /* Task mgmt function not performed */ + + uint32 rspInfoRsvd; /* FCP_RSP_INFO bytes 4-7 (reserved) */ + +#define MAX_FCP_SNS 128 + uchar rspSnsInfo[MAX_FCP_SNS]; +} FCP_RSP, *PFCP_RSP; + +/* + * Definition of FCP_CMND Packet + */ + +typedef struct _FCP_CMND { + uint32 fcpLunMsl; /* most significant lun word (32 bits) */ + uint32 fcpLunLsl; /* least significant lun word (32 bits) */ + /* # of bits to shift lun id to end up in right + * payload word, little endian = 8, big = 16. + */ +#if LITTLE_ENDIAN_HW +#define FC_LUN_SHIFT 8 +#define FC_ADDR_MODE_SHIFT 0 +#endif +#if BIG_ENDIAN_HW +#define FC_LUN_SHIFT 16 +#define FC_ADDR_MODE_SHIFT 24 +#endif + + uchar fcpCntl0; /* FCP_CNTL byte 0 (reserved) */ + uchar fcpCntl1; /* FCP_CNTL byte 1 task codes */ +#define SIMPLE_Q 0x00 +#define HEAD_OF_Q 0x01 +#define ORDERED_Q 0x02 +#define ACA_Q 0x04 +#define UNTAGGED 0x05 + uchar fcpCntl2; /* FCP_CTL byte 2 task management codes */ +#define ABORT_TASK_SET 0x02 /* Bit 1 */ +#define CLEAR_TASK_SET 0x04 /* bit 2 */ +#define LUN_RESET 0x10 /* bit 4 */ +#define TARGET_RESET 0x20 /* bit 5 */ +#define CLEAR_ACA 0x40 /* bit 6 */ +#define TERMINATE_TASK 0x80 /* bit 7 */ + uchar fcpCntl3; +#define WRITE_DATA 0x01 /* Bit 0 */ +#define READ_DATA 0x02 /* Bit 1 */ + + uchar fcpCdb[16]; /* SRB cdb field is copied here */ + uint32 fcpDl; /* Total transfer length */ + +} FCP_CMND, *PFCP_CMND; + +/* SCSI INQUIRY Command Structure */ + +typedef struct inquiryDataType { + u8bit DeviceType : 5; + u8bit DeviceTypeQualifier : 3; + + u8bit DeviceTypeModifier : 7; + u8bit RemovableMedia : 1; + + uchar Versions; + uchar ResponseDataFormat; + uchar AdditionalLength; + uchar Reserved[2]; + + u8bit SoftReset : 1; + u8bit CommandQueue : 1; + u8bit Reserved2 : 1; + u8bit LinkedCommands : 1; + u8bit Synchronous : 1; + u8bit Wide16Bit : 1; + u8bit Wide32Bit : 1; + u8bit RelativeAddressing : 1; + + uchar VendorId[8]; + uchar ProductId[16]; + uchar ProductRevisionLevel[4]; + uchar VendorSpecific[20]; + uchar Reserved3[40]; +} INQUIRY_DATA_DEF; + +typedef struct _READ_CAPACITY_DATA { + ulong LogicalBlockAddress; + ulong BytesPerBlock; +} READ_CAPACITY_DATA_DEF; + +typedef struct _REPORT_LUNS_DATA { + union { + uchar cB[8]; + uint32 cL[2]; + } control; + union { + uchar eB[8]; + uint32 eL[2]; + } entry [1]; +} REPORT_LUNS_DATA_DEF; + +/* SCSI CDB command codes */ +#define FCP_SCSI_FORMAT_UNIT 0x04 +#define FCP_SCSI_INQUIRY 0x12 +#define FCP_SCSI_MODE_SELECT 0x15 +#define FCP_SCSI_MODE_SENSE 0x1A +#define FCP_SCSI_PAUSE_RESUME 0x4B +#define FCP_SCSI_PLAY_AUDIO 0x45 +#define FCP_SCSI_PLAY_AUDIO_EXT 0xA5 +#define FCP_SCSI_PLAY_AUDIO_MSF 0x47 +#define FCP_SCSI_PLAY_AUDIO_TRK_INDX 0x48 +#define FCP_SCSI_PREVENT_ALLOW_REMOVAL 0x1E +#define FCP_SCSI_READ 0x08 +#define FCP_SCSI_READ_BUFFER 0x3C +#define FCP_SCSI_READ_CAPACITY 0x25 +#define FCP_SCSI_READ_DEFECT_LIST 0x37 +#define FCP_SCSI_READ_EXTENDED 0x28 +#define FCP_SCSI_READ_HEADER 0x44 +#define FCP_SCSI_READ_LONG 0xE8 +#define FCP_SCSI_READ_SUB_CHANNEL 0x42 +#define FCP_SCSI_READ_TOC 0x43 +#define FCP_SCSI_REASSIGN_BLOCK 0x07 +#define FCP_SCSI_RECEIVE_DIAGNOSTIC_RESULTS 0x1C +#define FCP_SCSI_RELEASE_UNIT 0x17 +#define FCP_SCSI_REPORT_LUNS 0xa0 +#define FCP_SCSI_REQUEST_SENSE 0x03 +#define FCP_SCSI_RESERVE_UNIT 0x16 +#define FCP_SCSI_REZERO_UNIT 0x01 +#define FCP_SCSI_SEEK 0x0B +#define FCP_SCSI_SEEK_EXTENDED 0x2B +#define FCP_SCSI_SEND_DIAGNOSTIC 0x1D +#define FCP_SCSI_START_STOP_UNIT 0x1B +#define FCP_SCSI_TEST_UNIT_READY 0x00 +#define FCP_SCSI_VERIFY 0x2F +#define FCP_SCSI_WRITE 0x0A +#define FCP_SCSI_WRITE_AND_VERIFY 0x2E +#define FCP_SCSI_WRITE_BUFFER 0x3B +#define FCP_SCSI_WRITE_EXTENDED 0x2A +#define FCP_SCSI_WRITE_LONG 0xEA +#define FCP_SCSI_RELEASE_LUNR 0xBB +#define FCP_SCSI_RELEASE_LUNV 0xBF + +#define HPVA_SETPASSTHROUGHMODE 0x27 +#define HPVA_EXECUTEPASSTHROUGH 0x29 +#define HPVA_CREATELUN 0xE2 +#define HPVA_SETLUNSECURITYLIST 0xED +#define HPVA_SETCLOCK 0xF9 +#define HPVA_RECOVER 0xFA +#define HPVA_GENERICSERVICEOUT 0xFD + +#define DMEP_EXPORT_IN 0x85 +#define DMEP_EXPORT_OUT 0x89 + +#define MDACIOCTL_DIRECT_CMD 0x22 +#define MDACIOCTL_STOREIMAGE 0x2C +#define MDACIOCTL_WRITESIGNATURE 0xA6 +#define MDACIOCTL_SETREALTIMECLOCK 0xAC +#define MDACIOCTL_PASS_THRU_CDB 0xAD +#define MDACIOCTL_PASS_THRU_INITIATE 0xAE +#define MDACIOCTL_CREATENEWCONF 0xC0 +#define MDACIOCTL_ADDNEWCONF 0xC4 +#define MDACIOCTL_MORE 0xC6 +#define MDACIOCTL_SETPHYSDEVPARAMETER 0xC8 +#define MDACIOCTL_SETLOGDEVPARAMETER 0xCF +#define MDACIOCTL_SETCONTROLLERPARAMETER 0xD1 +#define MDACIOCTL_WRITESANMAP 0xD4 +#define MDACIOCTL_SETMACADDRESS 0xD5 + +/* + * End of FCP specific structures + */ + +#define FL_ALPA 0x00 /* AL_PA of FL_Port */ + +/* Fibre Channel Service Parameter definitions */ + +#define FC_PH_4_0 6 /* FC-PH version 4.0 */ +#define FC_PH_4_1 7 /* FC-PH version 4.1 */ +#define FC_PH_4_2 8 /* FC-PH version 4.2 */ +#define FC_PH_4_3 9 /* FC-PH version 4.3 */ + +#define FC_PH_LOW 8 /* Lowest supported FC-PH version */ +#define FC_PH_HIGH 9 /* Highest supported FC-PH version */ +#define FC_PH3 0x20 /* FC-PH-3 version */ + +#define FF_FRAME_SIZE 2048 + + +/* ==== Mailbox Commands ==== */ +#define MBX_SHUTDOWN 0x00 /* terminate testing */ +#define MBX_LOAD_SM 0x01 +#define MBX_READ_NV 0x02 +#define MBX_WRITE_NV 0x03 +#define MBX_RUN_BIU_DIAG 0x04 +#define MBX_INIT_LINK 0x05 +#define MBX_DOWN_LINK 0x06 +#define MBX_CONFIG_LINK 0x07 +#define MBX_PART_SLIM 0x08 +#define MBX_CONFIG_RING 0x09 +#define MBX_RESET_RING 0x0A +#define MBX_READ_CONFIG 0x0B +#define MBX_READ_RCONFIG 0x0C +#define MBX_READ_SPARM 0x0D +#define MBX_READ_STATUS 0x0E +#define MBX_READ_RPI 0x0F +#define MBX_READ_XRI 0x10 +#define MBX_READ_REV 0x11 +#define MBX_READ_LNK_STAT 0x12 +#define MBX_REG_LOGIN 0x13 +#define MBX_UNREG_LOGIN 0x14 +#define MBX_READ_LA 0x15 +#define MBX_CLEAR_LA 0x16 +#define MBX_DUMP_MEMORY 0x17 +#define MBX_DUMP_CONTEXT 0x18 +#define MBX_RUN_DIAGS 0x19 +#define MBX_RESTART 0x1A +#define MBX_UPDATE_CFG 0x1B +#define MBX_DOWN_LOAD 0x1C +#define MBX_DEL_LD_ENTRY 0x1D +#define MBX_RUN_PROGRAM 0x1E +#define MBX_SET_MASK 0x20 +#define MBX_SET_SLIM 0x21 +#define MBX_UNREG_D_ID 0x23 +#define MBX_CONFIG_FARP 0x25 + +#define MBX_LOAD_AREA 0x81 +#define MBX_RUN_BIU_DIAG64 0x84 +#define MBX_CONFIG_PORT 0x88 +#define MBX_READ_SPARM64 0x8D +#define MBX_READ_RPI64 0x8F +#define MBX_REG_LOGIN64 0x93 +#define MBX_READ_LA64 0x95 + +#define MBX_FLASH_WR_ULA 0x98 +#define MBX_SET_DEBUG 0x99 +#define MBX_LOAD_EXP_ROM 0x9C + +#define MBX_MAX_CMDS 0x9D +#define MBX_SLI2_CMD_MASK 0x80 + + +/* ==== IOCB Commands ==== */ + +#define CMD_RCV_SEQUENCE_CX 0x01 +#define CMD_XMIT_SEQUENCE_CR 0x02 +#define CMD_XMIT_SEQUENCE_CX 0x03 +#define CMD_XMIT_BCAST_CN 0x04 +#define CMD_XMIT_BCAST_CX 0x05 +#define CMD_QUE_RING_BUF_CN 0x06 +#define CMD_QUE_XRI_BUF_CX 0x07 +#define CMD_IOCB_CONTINUE_CN 0x08 +#define CMD_RET_XRI_BUF_CX 0x09 +#define CMD_ELS_REQUEST_CR 0x0A +#define CMD_ELS_REQUEST_CX 0x0B +#define CMD_RCV_ELS_REQ_CX 0x0D +#define CMD_ABORT_XRI_CN 0x0E +#define CMD_ABORT_XRI_CX 0x0F +#define CMD_CLOSE_XRI_CR 0x10 +#define CMD_CLOSE_XRI_CX 0x11 +#define CMD_CREATE_XRI_CR 0x12 +#define CMD_CREATE_XRI_CX 0x13 +#define CMD_GET_RPI_CN 0x14 +#define CMD_XMIT_ELS_RSP_CX 0x15 +#define CMD_GET_RPI_CR 0x16 +#define CMD_XRI_ABORTED_CX 0x17 +#define CMD_FCP_IWRITE_CR 0x18 +#define CMD_FCP_IWRITE_CX 0x19 +#define CMD_FCP_IREAD_CR 0x1A +#define CMD_FCP_IREAD_CX 0x1B +#define CMD_FCP_ICMND_CR 0x1C +#define CMD_FCP_ICMND_CX 0x1D +#define CMD_ADAPTER_MSG 0x20 +#define CMD_ADAPTER_DUMP 0x22 +#define CMD_BPL_IWRITE_CR 0x48 +#define CMD_BPL_IWRITE_CX 0x49 +#define CMD_BPL_IREAD_CR 0x4A +#define CMD_BPL_IREAD_CX 0x4B +#define CMD_BPL_ICMND_CR 0x4C +#define CMD_BPL_ICMND_CX 0x4D + +/* SLI_2 IOCB Command Set */ + +#define CMD_RCV_SEQUENCE64_CX 0x81 +#define CMD_XMIT_SEQUENCE64_CR 0x82 +#define CMD_XMIT_SEQUENCE64_CX 0x83 +#define CMD_XMIT_BCAST64_CN 0x84 +#define CMD_XMIT_BCAST64_CX 0x85 +#define CMD_QUE_RING_BUF64_CN 0x86 +#define CMD_QUE_XRI_BUF64_CX 0x87 +#define CMD_IOCB_CONTINUE64_CN 0x88 +#define CMD_RET_XRI_BUF64_CX 0x89 +#define CMD_ELS_REQUEST64_CR 0x8A +#define CMD_ELS_REQUEST64_CX 0x8B +#define CMD_RCV_ELS_REQ64_CX 0x8D +#define CMD_XMIT_ELS_RSP64_CX 0x95 +#define CMD_FCP_IWRITE64_CR 0x98 +#define CMD_FCP_IWRITE64_CX 0x99 +#define CMD_FCP_IREAD64_CR 0x9A +#define CMD_FCP_IREAD64_CX 0x9B +#define CMD_FCP_ICMND64_CR 0x9C +#define CMD_FCP_ICMND64_CX 0x9D +#define CMD_GEN_REQUEST64_CR 0xC2 +#define CMD_GEN_REQUEST64_CX 0xC3 + + +/* + * Define Status + */ +#define MBX_SUCCESS 0 +#define MBXERR_NUM_RINGS 1 +#define MBXERR_NUM_IOCBS 2 +#define MBXERR_IOCBS_EXCEEDED 3 +#define MBXERR_BAD_RING_NUMBER 4 +#define MBXERR_MASK_ENTRIES_RANGE 5 +#define MBXERR_MASKS_EXCEEDED 6 +#define MBXERR_BAD_PROFILE 7 +#define MBXERR_BAD_DEF_CLASS 8 +#define MBXERR_BAD_MAX_RESPONDER 9 +#define MBXERR_BAD_MAX_ORIGINATOR 10 +#define MBXERR_RPI_REGISTERED 11 +#define MBXERR_RPI_FULL 12 +#define MBXERR_NO_RESOURCES 13 +#define MBXERR_BAD_RCV_LENGTH 14 +#define MBXERR_DMA_ERROR 15 +#define MBXERR_ERROR 16 +#define MBX_NOT_FINISHED 255 +/* + * Error codes returned by issue_mb_cmd() + */ +#define MBX_BUSY 0xffffff /* Attempted cmd to a busy Mailbox */ +#define MBX_TIMEOUT 0xfffffe /* Max time-out expired waiting for */ +/* synch. Mailbox operation */ +/* + * flags for issue_mb_cmd() + */ +#define MBX_POLL 1 /* poll mailbox till command done, then return */ +#define MBX_SLEEP 2 /* sleep till mailbox intr cmpl wakes thread up */ +#define MBX_NOWAIT 3 /* issue command then return immediately */ + +typedef struct { +#if BIG_ENDIAN_HW + u32bit crReserved :16; + u32bit crBegin : 8; + u32bit crEnd : 8; /* Low order bit first word */ + u32bit rrReserved :16; + u32bit rrBegin : 8; + u32bit rrEnd : 8; /* Low order bit second word */ +#endif +#if LITTLE_ENDIAN_HW + u32bit crEnd : 8; /* Low order bit first word */ + u32bit crBegin : 8; + u32bit crReserved :16; + u32bit rrEnd : 8; /* Low order bit second word */ + u32bit rrBegin : 8; + u32bit rrReserved :16; +#endif +} RINGS; + + +typedef struct { +#if BIG_ENDIAN_HW + ushort offCiocb; + ushort numCiocb; + ushort offRiocb; + ushort numRiocb; +#endif +#if LITTLE_ENDIAN_HW + ushort numCiocb; + ushort offCiocb; + ushort numRiocb; + ushort offRiocb; +#endif +} RING_DEF; + + +/* + * The following F.C. frame stuctures are defined in Big Endian format. + */ + +typedef struct _NAME_TYPE { +#if BIG_ENDIAN_HW + u8bit nameType : 4; /* FC Word 0, bit 28:31 */ + u8bit IEEEextMsn : 4; /* FC Word 0, bit 24:27, bit 8:11 of IEEE ext */ +#endif +#if LITTLE_ENDIAN_HW + u8bit IEEEextMsn : 4; /* FC Word 0, bit 24:27, bit 8:11 of IEEE ext */ + u8bit nameType : 4; /* FC Word 0, bit 28:31 */ +#endif +#define NAME_IEEE 0x1 /* IEEE name - nameType */ +#define NAME_IEEE_EXT 0x2 /* IEEE extended name */ +#define NAME_FC_TYPE 0x3 /* FC native name type */ +#define NAME_IP_TYPE 0x4 /* IP address */ +#define NAME_CCITT_TYPE 0xC +#define NAME_CCITT_GR_TYPE 0xE + uchar IEEEextLsb; /* FC Word 0, bit 16:23, IEEE extended Lsb */ + uchar IEEE[6]; /* FC IEEE address */ +} NAME_TYPE; + + +typedef struct _CSP { + uchar fcphHigh; /* FC Word 0, byte 0 */ + uchar fcphLow; + uchar bbCreditMsb; + uchar bbCreditlsb; /* FC Word 0, byte 3 */ +#if BIG_ENDIAN_HW + u16bit increasingOffset : 1; /* FC Word 1, bit 31 */ + u16bit randomOffset : 1; /* FC Word 1, bit 30 */ + u16bit word1Reserved2 : 1; /* FC Word 1, bit 29 */ + u16bit fPort : 1; /* FC Word 1, bit 28 */ + u16bit altBbCredit : 1; /* FC Word 1, bit 27 */ + u16bit edtovResolution : 1; /* FC Word 1, bit 26 */ + u16bit multicast : 1; /* FC Word 1, bit 25 */ + u16bit broadcast : 1; /* FC Word 1, bit 24 */ + + u16bit huntgroup : 1; /* FC Word 1, bit 23 */ + u16bit simplex : 1; /* FC Word 1, bit 22 */ + u16bit word1Reserved1 : 3; /* FC Word 1, bit 21:19 */ + u16bit dhd : 1; /* FC Word 1, bit 18 */ + u16bit contIncSeqCnt : 1; /* FC Word 1, bit 17 */ + u16bit payloadlength : 1; /* FC Word 1, bit 16 */ +#endif +#if LITTLE_ENDIAN_HW + u16bit broadcast : 1; /* FC Word 1, bit 24 */ + u16bit multicast : 1; /* FC Word 1, bit 25 */ + u16bit edtovResolution : 1; /* FC Word 1, bit 26 */ + u16bit altBbCredit : 1; /* FC Word 1, bit 27 */ + u16bit fPort : 1; /* FC Word 1, bit 28 */ + u16bit word1Reserved2 : 1; /* FC Word 1, bit 29 */ + u16bit randomOffset : 1; /* FC Word 1, bit 30 */ + u16bit increasingOffset : 1; /* FC Word 1, bit 31 */ + + u16bit payloadlength : 1; /* FC Word 1, bit 16 */ + u16bit contIncSeqCnt : 1; /* FC Word 1, bit 17 */ + u16bit dhd : 1; /* FC Word 1, bit 18 */ + u16bit word1Reserved1 : 3; /* FC Word 1, bit 21:19 */ + u16bit simplex : 1; /* FC Word 1, bit 22 */ + u16bit huntgroup : 1; /* FC Word 1, bit 23 */ +#endif + uchar bbRcvSizeMsb; /* Upper nibble is reserved */ + + uchar bbRcvSizeLsb; /* FC Word 1, byte 3 */ + union { + struct { + uchar word2Reserved1; /* FC Word 2 byte 0 */ + + uchar totalConcurrSeq; /* FC Word 2 byte 1 */ + uchar roByCategoryMsb; /* FC Word 2 byte 2 */ + + uchar roByCategoryLsb; /* FC Word 2 byte 3 */ + } nPort; + uint32 r_a_tov; /* R_A_TOV must be in B.E. format */ + } w2; + + uint32 e_d_tov; /* E_D_TOV must be in B.E. format */ +} CSP; + + +typedef struct _CLASS_PARMS { +#if BIG_ENDIAN_HW + u8bit classValid : 1; /* FC Word 0, bit 31 */ + u8bit intermix : 1; /* FC Word 0, bit 30 */ + u8bit stackedXparent : 1; /* FC Word 0, bit 29 */ + u8bit stackedLockDown : 1; /* FC Word 0, bit 28 */ + u8bit seqDelivery : 1; /* FC Word 0, bit 27 */ + u8bit word0Reserved1 : 3; /* FC Word 0, bit 24:26 */ +#endif +#if LITTLE_ENDIAN_HW + u8bit word0Reserved1 : 3; /* FC Word 0, bit 24:26 */ + u8bit seqDelivery : 1; /* FC Word 0, bit 27 */ + u8bit stackedLockDown : 1; /* FC Word 0, bit 28 */ + u8bit stackedXparent : 1; /* FC Word 0, bit 29 */ + u8bit intermix : 1; /* FC Word 0, bit 30 */ + u8bit classValid : 1; /* FC Word 0, bit 31 */ + +#endif + uchar word0Reserved2; /* FC Word 0, bit 16:23 */ +#if BIG_ENDIAN_HW + u8bit iCtlXidReAssgn : 2; /* FC Word 0, Bit 14:15 */ + u8bit iCtlInitialPa : 2; /* FC Word 0, bit 12:13 */ + u8bit iCtlAck0capable : 1; /* FC Word 0, bit 11 */ + u8bit iCtlAckNcapable : 1; /* FC Word 0, bit 10 */ + u8bit word0Reserved3 : 2; /* FC Word 0, bit 8: 9 */ +#endif +#if LITTLE_ENDIAN_HW + u8bit word0Reserved3 : 2; /* FC Word 0, bit 8: 9 */ + u8bit iCtlAckNcapable : 1; /* FC Word 0, bit 10 */ + u8bit iCtlAck0capable : 1; /* FC Word 0, bit 11 */ + u8bit iCtlInitialPa : 2; /* FC Word 0, bit 12:13 */ + u8bit iCtlXidReAssgn : 2; /* FC Word 0, Bit 14:15 */ +#endif + uchar word0Reserved4; /* FC Word 0, bit 0: 7 */ +#if BIG_ENDIAN_HW + u8bit rCtlAck0capable : 1; /* FC Word 1, bit 31 */ + u8bit rCtlAckNcapable : 1; /* FC Word 1, bit 30 */ + u8bit rCtlXidInterlck : 1; /* FC Word 1, bit 29 */ + u8bit rCtlErrorPolicy : 2; /* FC Word 1, bit 27:28 */ + u8bit word1Reserved1 : 1; /* FC Word 1, bit 26 */ + u8bit rCtlCatPerSeq : 2; /* FC Word 1, bit 24:25 */ +#endif +#if LITTLE_ENDIAN_HW + u8bit rCtlCatPerSeq : 2; /* FC Word 1, bit 24:25 */ + u8bit word1Reserved1 : 1; /* FC Word 1, bit 26 */ + u8bit rCtlErrorPolicy : 2; /* FC Word 1, bit 27:28 */ + u8bit rCtlXidInterlck : 1; /* FC Word 1, bit 29 */ + u8bit rCtlAckNcapable : 1; /* FC Word 1, bit 30 */ + u8bit rCtlAck0capable : 1; /* FC Word 1, bit 31 */ +#endif + uchar word1Reserved2; /* FC Word 1, bit 16:23 */ + uchar rcvDataSizeMsb; /* FC Word 1, bit 8:15 */ + uchar rcvDataSizeLsb; /* FC Word 1, bit 0: 7 */ + + uchar concurrentSeqMsb; /* FC Word 2, bit 24:31 */ + uchar concurrentSeqLsb; /* FC Word 2, bit 16:23 */ + uchar EeCreditSeqMsb; /* FC Word 2, bit 8:15 */ + uchar EeCreditSeqLsb; /* FC Word 2, bit 0: 7 */ + + uchar openSeqPerXchgMsb; /* FC Word 3, bit 24:31 */ + uchar openSeqPerXchgLsb; /* FC Word 3, bit 16:23 */ + uchar word3Reserved1; /* Fc Word 3, bit 8:15 */ + uchar word3Reserved2; /* Fc Word 3, bit 0: 7 */ +} CLASS_PARMS; + + +typedef struct _SERV_PARM { /* Structure is in Big Endian format */ + CSP cmn; + NAME_TYPE portName; + NAME_TYPE nodeName; + CLASS_PARMS cls1; + CLASS_PARMS cls2; + CLASS_PARMS cls3; + CLASS_PARMS cls4; + uchar vendorVersion[16]; +} SERV_PARM, *PSERV_PARM; + + +/* + * Extended Link Service LS_COMMAND codes (Payload Word 0) + */ +#if BIG_ENDIAN_HW +#define ELS_CMD_MASK 0xffff0000 +#define ELS_RSP_MASK 0xff000000 +#define ELS_CMD_LS_RJT 0x01000000 +#define ELS_CMD_ACC 0x02000000 +#define ELS_CMD_PLOGI 0x03000000 +#define ELS_CMD_FLOGI 0x04000000 +#define ELS_CMD_LOGO 0x05000000 +#define ELS_CMD_ABTX 0x06000000 +#define ELS_CMD_RCS 0x07000000 +#define ELS_CMD_RES 0x08000000 +#define ELS_CMD_RSS 0x09000000 +#define ELS_CMD_RSI 0x0A000000 +#define ELS_CMD_ESTS 0x0B000000 +#define ELS_CMD_ESTC 0x0C000000 +#define ELS_CMD_ADVC 0x0D000000 +#define ELS_CMD_RTV 0x0E000000 +#define ELS_CMD_RLS 0x0F000000 +#define ELS_CMD_ECHO 0x10000000 +#define ELS_CMD_TEST 0x11000000 +#define ELS_CMD_RRQ 0x12000000 +#define ELS_CMD_PRLI 0x20100014 +#define ELS_CMD_PRLO 0x21100014 +#define ELS_CMD_PDISC 0x50000000 +#define ELS_CMD_FDISC 0x51000000 +#define ELS_CMD_ADISC 0x52000000 +#define ELS_CMD_FARP 0x54000000 +#define ELS_CMD_FARPR 0x55000000 +#define ELS_CMD_FAN 0x60000000 +#define ELS_CMD_RSCN 0x61040000 +#define ELS_CMD_SCR 0x62000000 +#define ELS_CMD_RNID 0x78000000 +#endif +#if LITTLE_ENDIAN_HW +#define ELS_CMD_MASK 0xffff +#define ELS_RSP_MASK 0xff +#define ELS_CMD_LS_RJT 0x01 +#define ELS_CMD_ACC 0x02 +#define ELS_CMD_PLOGI 0x03 +#define ELS_CMD_FLOGI 0x04 +#define ELS_CMD_LOGO 0x05 +#define ELS_CMD_ABTX 0x06 +#define ELS_CMD_RCS 0x07 +#define ELS_CMD_RES 0x08 +#define ELS_CMD_RSS 0x09 +#define ELS_CMD_RSI 0x0A +#define ELS_CMD_ESTS 0x0B +#define ELS_CMD_ESTC 0x0C +#define ELS_CMD_ADVC 0x0D +#define ELS_CMD_RTV 0x0E +#define ELS_CMD_RLS 0x0F +#define ELS_CMD_ECHO 0x10 +#define ELS_CMD_TEST 0x11 +#define ELS_CMD_RRQ 0x12 +#define ELS_CMD_PRLI 0x14001020 +#define ELS_CMD_PRLO 0x14001021 +#define ELS_CMD_PDISC 0x50 +#define ELS_CMD_FDISC 0x51 +#define ELS_CMD_ADISC 0x52 +#define ELS_CMD_FARP 0x54 +#define ELS_CMD_FARPR 0x55 +#define ELS_CMD_FAN 0x60 +#define ELS_CMD_RSCN 0x0461 +#define ELS_CMD_SCR 0x62 +#define ELS_CMD_RNID 0x78 +#endif + + +/* + * LS_RJT Payload Definition + */ + +typedef struct _LS_RJT { /* Structure is in Big Endian format */ + union { + uint32 lsRjtError; + struct { + uchar lsRjtRsvd0; /* FC Word 0, bit 24:31 */ + + uchar lsRjtRsnCode; /* FC Word 0, bit 16:23 */ + /* LS_RJT reason codes */ +#define LSRJT_INVALID_CMD 0x01 +#define LSRJT_LOGICAL_ERR 0x03 +#define LSRJT_LOGICAL_BSY 0x05 +#define LSRJT_PROTOCOL_ERR 0x07 +#define LSRJT_UNABLE_TPC 0x09 /* Unable to perform command */ +#define LSRJT_CMD_UNSUPPORTED 0x0B +#define LSRJT_VENDOR_UNIQUE 0xFF /* See Byte 3 */ + + uchar lsRjtRsnCodeExp; /* FC Word 0, bit 8:15 */ + /* LS_RJT reason explanation */ +#define LSEXP_NOTHING_MORE 0x00 +#define LSEXP_SPARM_OPTIONS 0x01 +#define LSEXP_SPARM_ICTL 0x03 +#define LSEXP_SPARM_RCTL 0x05 +#define LSEXP_SPARM_RCV_SIZE 0x07 +#define LSEXP_SPARM_CONCUR_SEQ 0x09 +#define LSEXP_SPARM_CREDIT 0x0B +#define LSEXP_INVALID_PNAME 0x0D +#define LSEXP_INVALID_NNAME 0x0E +#define LSEXP_INVALID_CSP 0x0F +#define LSEXP_INVALID_ASSOC_HDR 0x11 +#define LSEXP_ASSOC_HDR_REQ 0x13 +#define LSEXP_INVALID_O_SID 0x15 +#define LSEXP_INVALID_OX_RX 0x17 +#define LSEXP_CMD_IN_PROGRESS 0x19 +#define LSEXP_INVALID_NPORT_ID 0x1F +#define LSEXP_INVALID_SEQ_ID 0x21 +#define LSEXP_INVALID_XCHG 0x23 +#define LSEXP_INACTIVE_XCHG 0x25 +#define LSEXP_RQ_REQUIRED 0x27 +#define LSEXP_OUT_OF_RESOURCE 0x29 +#define LSEXP_CANT_GIVE_DATA 0x2A +#define LSEXP_REQ_UNSUPPORTED 0x2C + uchar vendorUnique; /* FC Word 0, bit 0: 7 */ + } b; + } un; +} LS_RJT; + + +/* + * N_Port Login (FLOGO/PLOGO Request) Payload Definition + */ + +typedef struct _LOGO { /* Structure is in Big Endian format */ + union { + uint32 nPortId32; /* Access nPortId as a word */ + struct { + uchar word1Reserved1; /* FC Word 1, bit 31:24 */ + uchar nPortIdByte0; /* N_port ID bit 16:23 */ + uchar nPortIdByte1; /* N_port ID bit 8:15 */ + uchar nPortIdByte2; /* N_port ID bit 0: 7 */ + } b; + } un; + NAME_TYPE portName; /* N_port name field */ +} LOGO; + + +/* + * FCP Login (PRLI Request / ACC) Payload Definition + */ + +#define PRLX_PAGE_LEN 0x10 +#define TPRLO_PAGE_LEN 0x14 + +typedef struct _PRLI { /* Structure is in Big Endian format */ + uchar prliType; /* FC Parm Word 0, bit 24:31 */ + +#define PRLI_FCP_TYPE 0x08 + uchar word0Reserved1; /* FC Parm Word 0, bit 16:23 */ + +#if BIG_ENDIAN_HW + u8bit origProcAssocV : 1; /* FC Parm Word 0, bit 15 */ + u8bit respProcAssocV : 1; /* FC Parm Word 0, bit 14 */ + u8bit estabImagePair : 1; /* FC Parm Word 0, bit 13 */ + + u8bit word0Reserved2 : 1; /* FC Parm Word 0, bit 12 */ + u8bit acceptRspCode : 4; /* FC Parm Word 0, bit 8:11, ACC ONLY */ +#endif +#if LITTLE_ENDIAN_HW + u8bit acceptRspCode : 4; /* FC Parm Word 0, bit 8:11, ACC ONLY */ + u8bit word0Reserved2 : 1; /* FC Parm Word 0, bit 12 */ + u8bit estabImagePair : 1; /* FC Parm Word 0, bit 13 */ + u8bit respProcAssocV : 1; /* FC Parm Word 0, bit 14 */ + u8bit origProcAssocV : 1; /* FC Parm Word 0, bit 15 */ +#endif +#define PRLI_REQ_EXECUTED 0x1 /* acceptRspCode */ +#define PRLI_NO_RESOURCES 0x2 +#define PRLI_INIT_INCOMPLETE 0x3 +#define PRLI_NO_SUCH_PA 0x4 +#define PRLI_PREDEF_CONFIG 0x5 +#define PRLI_PARTIAL_SUCCESS 0x6 +#define PRLI_INVALID_PAGE_CNT 0x7 + uchar word0Reserved3; /* FC Parm Word 0, bit 0:7 */ + + uint32 origProcAssoc; /* FC Parm Word 1, bit 0:31 */ + + uint32 respProcAssoc; /* FC Parm Word 2, bit 0:31 */ + + uchar word3Reserved1; /* FC Parm Word 3, bit 24:31 */ + uchar word3Reserved2; /* FC Parm Word 3, bit 16:23 */ +#if BIG_ENDIAN_HW + u16bit Word3bit15Resved : 1; /* FC Parm Word 3, bit 15 */ + u16bit Word3bit14Resved : 1; /* FC Parm Word 3, bit 14 */ + u16bit Word3bit13Resved : 1; /* FC Parm Word 3, bit 13 */ + u16bit Word3bit12Resved : 1; /* FC Parm Word 3, bit 12 */ + u16bit Word3bit11Resved : 1; /* FC Parm Word 3, bit 11 */ + u16bit Word3bit10Resved : 1; /* FC Parm Word 3, bit 10 */ + u16bit TaskRetryIdReq : 1; /* FC Parm Word 3, bit 9 */ + u16bit Retry : 1; /* FC Parm Word 3, bit 8 */ + u16bit ConfmComplAllowed : 1; /* FC Parm Word 3, bit 7 */ + u16bit dataOverLay : 1; /* FC Parm Word 3, bit 6 */ + u16bit initiatorFunc : 1; /* FC Parm Word 3, bit 5 */ + u16bit targetFunc : 1; /* FC Parm Word 3, bit 4 */ + u16bit cmdDataMixEna : 1; /* FC Parm Word 3, bit 3 */ + u16bit dataRspMixEna : 1; /* FC Parm Word 3, bit 2 */ + u16bit readXferRdyDis : 1; /* FC Parm Word 3, bit 1 */ + u16bit writeXferRdyDis : 1; /* FC Parm Word 3, bit 0 */ +#endif +#if LITTLE_ENDIAN_HW + u16bit Retry : 1; /* FC Parm Word 3, bit 8 */ + u16bit TaskRetryIdReq : 1; /* FC Parm Word 3, bit 9 */ + u16bit Word3bit10Resved : 1; /* FC Parm Word 3, bit 10 */ + u16bit Word3bit11Resved : 1; /* FC Parm Word 3, bit 11 */ + u16bit Word3bit12Resved : 1; /* FC Parm Word 3, bit 12 */ + u16bit Word3bit13Resved : 1; /* FC Parm Word 3, bit 13 */ + u16bit Word3bit14Resved : 1; /* FC Parm Word 3, bit 14 */ + u16bit Word3bit15Resved : 1; /* FC Parm Word 3, bit 15 */ + u16bit writeXferRdyDis : 1; /* FC Parm Word 3, bit 0 */ + u16bit readXferRdyDis : 1; /* FC Parm Word 3, bit 1 */ + u16bit dataRspMixEna : 1; /* FC Parm Word 3, bit 2 */ + u16bit cmdDataMixEna : 1; /* FC Parm Word 3, bit 3 */ + u16bit targetFunc : 1; /* FC Parm Word 3, bit 4 */ + u16bit initiatorFunc : 1; /* FC Parm Word 3, bit 5 */ + u16bit dataOverLay : 1; /* FC Parm Word 3, bit 6 */ + u16bit ConfmComplAllowed : 1; /* FC Parm Word 3, bit 7 */ +#endif +} PRLI; + +/* + * FCP Logout (PRLO Request / ACC) Payload Definition + */ + +typedef struct _PRLO { /* Structure is in Big Endian format */ + uchar prloType; /* FC Parm Word 0, bit 24:31 */ + +#define PRLO_FCP_TYPE 0x08 + uchar word0Reserved1; /* FC Parm Word 0, bit 16:23 */ + +#if BIG_ENDIAN_HW + u8bit origProcAssocV : 1; /* FC Parm Word 0, bit 15 */ + u8bit respProcAssocV : 1; /* FC Parm Word 0, bit 14 */ + u8bit word0Reserved2 : 2; /* FC Parm Word 0, bit 12:13 */ + u8bit acceptRspCode : 4; /* FC Parm Word 0, bit 8:11, ACC ONLY */ +#endif +#if LITTLE_ENDIAN_HW + u8bit acceptRspCode : 4; /* FC Parm Word 0, bit 8:11, ACC ONLY */ + u8bit word0Reserved2 : 2; /* FC Parm Word 0, bit 12:13 */ + u8bit respProcAssocV : 1; /* FC Parm Word 0, bit 14 */ + u8bit origProcAssocV : 1; /* FC Parm Word 0, bit 15 */ +#endif +#define PRLO_REQ_EXECUTED 0x1 /* acceptRspCode */ +#define PRLO_NO_SUCH_IMAGE 0x4 +#define PRLO_INVALID_PAGE_CNT 0x7 + + uchar word0Reserved3; /* FC Parm Word 0, bit 0:7 */ + + uint32 origProcAssoc; /* FC Parm Word 1, bit 0:31 */ + + uint32 respProcAssoc; /* FC Parm Word 2, bit 0:31 */ + + uint32 word3Reserved1; /* FC Parm Word 3, bit 0:31 */ +} PRLO; + + +typedef struct _ADISC { /* Structure is in Big Endian format */ + uint32 hardAL_PA; + NAME_TYPE portName; + NAME_TYPE nodeName; + uint32 DID; +} ADISC; + + +typedef struct _FARP { /* Structure is in Big Endian format */ + u32bit Mflags : 8; + u32bit Odid : 24; +#define FARP_NO_ACTION 0 /* FARP information enclosed, no action */ +#define FARP_MATCH_PORT 0x1 /* Match on Responder Port Name */ +#define FARP_MATCH_NODE 0x2 /* Match on Responder Node Name */ +#define FARP_MATCH_IP 0x4 /* Match on IP address, not supported */ +#define FARP_MATCH_IPV4 0x5 /* Match on IPV4 address, not supported */ +#define FARP_MATCH_IPV6 0x6 /* Match on IPV6 address, not supported */ + u32bit Rflags : 8; + u32bit Rdid : 24; +#define FARP_REQUEST_PLOGI 0x1 /* Request for PLOGI */ +#define FARP_REQUEST_FARPR 0x2 /* Request for FARP Response */ + NAME_TYPE OportName; + NAME_TYPE OnodeName; + NAME_TYPE RportName; + NAME_TYPE RnodeName; + uchar Oipaddr[16]; + uchar Ripaddr[16]; +} FARP; + +typedef struct _FAN { /* Structure is in Big Endian format */ + uint32 Fdid; + NAME_TYPE FportName; + NAME_TYPE FnodeName; +} FAN; + +typedef struct _SCR { /* Structure is in Big Endian format */ + uchar resvd1; + uchar resvd2; + uchar resvd3; + uchar Function; +#define SCR_FUNC_FABRIC 0x01 +#define SCR_FUNC_NPORT 0x02 +#define SCR_FUNC_FULL 0x03 +#define SCR_CLEAR 0xff +} SCR; + +typedef struct _RNID_TOP_DISC { + NAME_TYPE portName; + uchar resvd[8]; + uint32 unitType; +#define RNID_HBA 0x7 +#define RNID_HOST 0xa +#define RNID_DRIVER 0xd + uint32 physPort; + uint32 attachedNodes; + ushort ipVersion; +#define RNID_IPV4 0x1 +#define RNID_IPV6 0x2 + ushort UDPport; + uchar ipAddr[16]; + ushort resvd1; + ushort flags; +#define RNID_TD_SUPPORT 0x1 +#define RNID_LP_VALID 0x2 +} RNID_TOP_DISC; + +typedef struct _RNID { /* Structure is in Big Endian format */ + uchar Format; +#define RNID_TOPOLOGY_DISC 0xdf + uchar CommonLen; + uchar resvd1; + uchar SpecificLen; + NAME_TYPE portName; + NAME_TYPE nodeName; + union { + RNID_TOP_DISC topologyDisc; /* topology disc (0xdf) */ + } un; +} RNID; + +typedef struct _RRQ { /* Structure is in Big Endian format */ + uint32 SID; + ushort Oxid; + ushort Rxid; + uchar resv[32]; /* optional association hdr */ +} RRQ; + + +/* This is used for RSCN command */ +typedef struct _D_ID { /* Structure is in Big Endian format */ + union { + uint32 word; + struct { +#if BIG_ENDIAN_HW + uchar resv; + uchar domain; + uchar area; + uchar id; +#endif +#if LITTLE_ENDIAN_HW + uchar id; + uchar area; + uchar domain; + uchar resv; +#endif + } b; + } un; +} D_ID; + +/* + * Structure to define all ELS Payload types + */ + +typedef struct _ELS_PKT { /* Structure is in Big Endian format */ + uchar elsCode; /* FC Word 0, bit 24:31 */ + uchar elsByte1; + uchar elsByte2; + uchar elsByte3; + union { + LS_RJT lsRjt; /* Payload for LS_RJT ELS response */ + SERV_PARM logi; /* Payload for PLOGI/FLOGI/PDISC/ACC */ + LOGO logo; /* Payload for PLOGO/FLOGO/ACC */ + PRLI prli; /* Payload for PRLI/ACC */ + PRLO prlo; /* Payload for PRLO/ACC */ + ADISC adisc; /* Payload for ADISC/ACC */ + FARP farp; /* Payload for FARP/ACC */ + FAN fan; /* Payload for FAN */ + SCR scr; /* Payload for SCR/ACC */ + RRQ rrq; /* Payload for RRQ */ + RNID rnid; /* Payload for RNID */ + uchar pad[128-4]; /* Pad out to payload of 128 bytes */ + } un; +} ELS_PKT; + + +/* + * Begin Structure Definitions for Mailbox Commands + */ + +typedef struct { +#if BIG_ENDIAN_HW + uchar tval; + uchar tmask; + uchar rval; + uchar rmask; +#endif +#if LITTLE_ENDIAN_HW + uchar rmask; + uchar rval; + uchar tmask; + uchar tval; +#endif +} RR_REG; + +typedef struct { + uint32 bdeAddress; +#if BIG_ENDIAN_HW + u32bit bdeReserved : 4; + u32bit bdeAddrHigh : 4; + u32bit bdeSize : 24; +#endif +#if LITTLE_ENDIAN_HW + u32bit bdeSize : 24; + u32bit bdeAddrHigh : 4; + u32bit bdeReserved : 4; +#endif +} ULP_BDE; + +typedef struct ULP_BDE_64 { /* SLI-2 */ + union ULP_BDE_TUS { + uint32 w; + struct { +#if BIG_ENDIAN_HW + u32bit bdeFlags : 8; + u32bit bdeSize : 24; /* Size of buffer (in bytes) */ +#endif +#if LITTLE_ENDIAN_HW + u32bit bdeSize : 24; /* Size of buffer (in bytes) */ + u32bit bdeFlags : 8; +#endif +#define BUFF_USE_RSVD 0x01 /* bdeFlags */ +#define BUFF_USE_INTRPT 0x02 +#define BUFF_USE_CMND 0x04 /* Optional, 1=cmd/rsp 0=data buffer */ +#define BUFF_USE_RCV 0x08 /* "" "", 1=rcv buffer, 0=xmit buffer */ +#define BUFF_TYPE_32BIT 0x10 /* "" "", 1=32 bit addr 0=64 bit addr */ +#define BUFF_TYPE_SPECIAL 0x20 +#define BUFF_TYPE_BDL 0x40 /* Optional, may be set in BDL */ +#define BUFF_TYPE_INVALID 0x80 /* "" "" */ + } f; + } tus; + uint32 addrLow; + uint32 addrHigh; +} ULP_BDE64; +#define BDE64_SIZE_WORD 0 +#define BPL64_SIZE_WORD 0x40 + +typedef struct ULP_BDL { /* SLI-2 */ +#if BIG_ENDIAN_HW + u32bit bdeFlags : 8; /* BDL Flags */ + u32bit bdeSize : 24; /* Size of BDL array in host memory (bytes) */ +#endif +#if LITTLE_ENDIAN_HW + u32bit bdeSize : 24; /* Size of BDL array in host memory (bytes) */ + u32bit bdeFlags : 8; /* BDL Flags */ +#endif + uint32 addrLow; /* Address 0:31 */ + uint32 addrHigh; /* Address 32:63 */ + uint32 ulpIoTag32; /* Can be used for 32 bit I/O Tag */ +} ULP_BDL; + + +/* Structure for MB Command LOAD_SM and DOWN_LOAD */ + +typedef struct { +#if BIG_ENDIAN_HW + u32bit rsvd2 :25; + u32bit acknowledgment : 1; + u32bit version : 1; + u32bit erase_or_prog : 1; + u32bit update_flash : 1; + u32bit update_ram : 1; + u32bit method : 1; + u32bit load_cmplt : 1; +#endif +#if LITTLE_ENDIAN_HW + u32bit load_cmplt : 1; + u32bit method : 1; + u32bit update_ram : 1; + u32bit update_flash : 1; + u32bit erase_or_prog : 1; + u32bit version : 1; + u32bit acknowledgment : 1; + u32bit rsvd2 :25; +#endif + +#define DL_FROM_BDE 0 /* method */ +#define DL_FROM_SLIM 1 + + uint32 dl_to_adr_low; + uint32 dl_to_adr_high; + uint32 dl_len; + union { + uint32 dl_from_mbx_offset; + ULP_BDE dl_from_bde; + ULP_BDE64 dl_from_bde64; + } un; + +} LOAD_SM_VAR; + + +/* Structure for MB Command READ_NVPARM (02) */ + +typedef struct { + uint32 rsvd1[3]; /* Read as all one's */ + uint32 rsvd2; /* Read as all zero's */ + uint32 portname[2]; /* N_PORT name */ + uint32 nodename[2]; /* NODE name */ +#if BIG_ENDIAN_HW + u32bit pref_DID : 24; + u32bit hardAL_PA : 8; +#endif +#if LITTLE_ENDIAN_HW + u32bit hardAL_PA : 8; + u32bit pref_DID : 24; +#endif + uint32 rsvd3[21]; /* Read as all one's */ +} READ_NV_VAR; + + +/* Structure for MB Command WRITE_NVPARMS (03) */ + +typedef struct { + uint32 rsvd1[3]; /* Must be all one's */ + uint32 rsvd2; /* Must be all zero's */ + uint32 portname[2]; /* N_PORT name */ + uint32 nodename[2]; /* NODE name */ +#if BIG_ENDIAN_HW + u32bit pref_DID : 24; + u32bit hardAL_PA : 8; +#endif +#if LITTLE_ENDIAN_HW + u32bit hardAL_PA : 8; + u32bit pref_DID : 24; +#endif + uint32 rsvd3[21]; /* Must be all one's */ +} WRITE_NV_VAR; + + +/* Structure for MB Command RUN_BIU_DIAG (04) */ +/* Structure for MB Command RUN_BIU_DIAG64 (0x84) */ + +typedef struct { + uint32 rsvd1; + union { + struct { + ULP_BDE xmit_bde; + ULP_BDE rcv_bde; + } s1; + struct { + ULP_BDE64 xmit_bde64; + ULP_BDE64 rcv_bde64; + } s2; + } un; +} BIU_DIAG_VAR; + + +/* Structure for MB Command INIT_LINK (05) */ + +typedef struct { +#if BIG_ENDIAN_HW + u32bit rsvd1 : 24; + u32bit lipsr_AL_PA : 8; /* AL_PA to issue Lip Selective Reset to */ +#endif +#if LITTLE_ENDIAN_HW + u32bit lipsr_AL_PA : 8; /* AL_PA to issue Lip Selective Reset to */ + u32bit rsvd1 : 24; +#endif + +#if BIG_ENDIAN_HW + uchar fabric_AL_PA; /* If using a Fabric Assigned AL_PA */ + uchar rsvd2; + ushort link_flags; +#endif +#if LITTLE_ENDIAN_HW + ushort link_flags; + uchar rsvd2; + uchar fabric_AL_PA; /* If using a Fabric Assigned AL_PA */ +#endif +#define FLAGS_LOCAL_LB 0x01 /* link_flags (=1) ENDEC loopback */ +#define FLAGS_TOPOLOGY_MODE_LOOP_PT 0x00 /* Attempt loop then pt-pt */ +#define FLAGS_TOPOLOGY_MODE_PT_PT 0x02 /* Attempt pt-pt only */ +#define FLAGS_TOPOLOGY_MODE_LOOP 0x04 /* Attempt loop only */ +#define FLAGS_TOPOLOGY_MODE_PT_LOOP 0x06 /* Attempt pt-pt then loop */ +#define FLAGS_LIRP_LILP 0x80 /* LIRP / LILP is disabled */ + +#define FLAGS_TOPOLOGY_FAILOVER 0x0400 /* Bit 10 */ +#define FLAGS_LINK_SPEED 0x0800 /* Bit 11 */ + + uint32 link_speed; +#define LINK_SPEED_AUTO 0 /* Auto selection */ +#define LINK_SPEED_1G 1 /* 1 Gigabaud */ +#define LINK_SPEED_2G 2 /* 2 Gigabaud */ + +} INIT_LINK_VAR; + + +/* Structure for MB Command DOWN_LINK (06) */ + +typedef struct { + uint32 rsvd1; +} DOWN_LINK_VAR; + + +/* Structure for MB Command CONFIG_LINK (07) */ + +typedef struct { +#if BIG_ENDIAN_HW + u32bit cr : 1; + u32bit ci : 1; + u32bit cr_delay : 6; + u32bit cr_count : 8; + u32bit rsvd1 : 8; + u32bit MaxBBC : 8; +#endif +#if LITTLE_ENDIAN_HW + u32bit MaxBBC : 8; + u32bit rsvd1 : 8; + u32bit cr_count : 8; + u32bit cr_delay : 6; + u32bit ci : 1; + u32bit cr : 1; +#endif + uint32 myId; + uint32 rsvd2; + uint32 edtov; + uint32 arbtov; + uint32 ratov; + uint32 rttov; + uint32 altov; + uint32 crtov; + uint32 citov; +#if BIG_ENDIAN_HW + u32bit rrq_enable : 1; + u32bit rrq_immed : 1; + u32bit rsvd4 : 29; + u32bit ack0_enable : 1; +#endif +#if LITTLE_ENDIAN_HW + u32bit ack0_enable : 1; + u32bit rsvd4 : 29; + u32bit rrq_immed : 1; + u32bit rrq_enable : 1; +#endif +} CONFIG_LINK; + + +/* Structure for MB Command PART_SLIM (08) */ + +typedef struct { +#if BIG_ENDIAN_HW + u32bit unused1 : 24; + u32bit numRing : 8; +#endif +#if LITTLE_ENDIAN_HW + u32bit numRing : 8; + u32bit unused1 : 24; +#endif + RING_DEF ringdef[4]; + u32bit hbainit; +} PART_SLIM_VAR; + + +/* Structure for MB Command CONFIG_RING (09) */ + +typedef struct { +#if BIG_ENDIAN_HW + u32bit unused2 : 6; + u32bit recvSeq : 1; + u32bit recvNotify: 1; + u32bit numMask : 8; + u32bit profile : 8; + u32bit unused1 : 4; + u32bit ring : 4; +#endif +#if LITTLE_ENDIAN_HW + u32bit ring : 4; + u32bit unused1 : 4; + u32bit profile : 8; + u32bit numMask : 8; + u32bit recvNotify: 1; + u32bit recvSeq : 1; + u32bit unused2 : 6; +#endif +#if BIG_ENDIAN_HW + ushort maxRespXchg; + ushort maxOrigXchg; +#endif +#if LITTLE_ENDIAN_HW + ushort maxOrigXchg; + ushort maxRespXchg; +#endif + RR_REG rrRegs[6]; +} CONFIG_RING_VAR; + + +/* Structure for MB Command RESET_RING (10) */ + +typedef struct { + uint32 ring_no; +} RESET_RING_VAR; + + +/* Structure for MB Command READ_CONFIG (11) */ + +typedef struct { +#if BIG_ENDIAN_HW + u32bit cr : 1; + u32bit ci : 1; + u32bit cr_delay : 6; + u32bit cr_count : 8; + u32bit InitBBC : 8; + u32bit MaxBBC : 8; +#endif +#if LITTLE_ENDIAN_HW + u32bit MaxBBC : 8; + u32bit InitBBC : 8; + u32bit cr_count : 8; + u32bit cr_delay : 6; + u32bit ci : 1; + u32bit cr : 1; +#endif +#if BIG_ENDIAN_HW + u32bit topology : 8; + u32bit myDid : 24; +#endif +#if LITTLE_ENDIAN_HW + u32bit myDid : 24; + u32bit topology : 8; +#endif + /* Defines for topology (defined previously) */ +#if BIG_ENDIAN_HW + u32bit AR : 1; + u32bit IR : 1; + u32bit rsvd1 : 29; + u32bit ack0 : 1; +#endif +#if LITTLE_ENDIAN_HW + u32bit ack0 : 1; + u32bit rsvd1 : 29; + u32bit IR : 1; + u32bit AR : 1; +#endif + uint32 edtov; + uint32 arbtov; + uint32 ratov; + uint32 rttov; + uint32 altov; + uint32 lmt; +#define LMT_RESERVED 0x0 /* Not used */ +#define LMT_266_10bit 0x1 /* 265.625 Mbaud 10 bit iface */ +#define LMT_532_10bit 0x2 /* 531.25 Mbaud 10 bit iface */ +#define LMT_1063_10bit 0x3 /* 1062.5 Mbaud 20 bit iface */ +#define LMT_2125_10bit 0x8 /* 2125 Mbaud 10 bit iface */ + + uint32 rsvd2; + uint32 rsvd3; + uint32 max_xri; + uint32 max_iocb; + uint32 max_rpi; + uint32 avail_xri; + uint32 avail_iocb; + uint32 avail_rpi; + uint32 default_rpi; +} READ_CONFIG_VAR; + + +/* Structure for MB Command READ_RCONFIG (12) */ + +typedef struct { +#if BIG_ENDIAN_HW + u32bit rsvd2 : 7; + u32bit recvNotify : 1; + u32bit numMask : 8; + u32bit profile : 8; + u32bit rsvd1 : 4; + u32bit ring : 4; +#endif +#if LITTLE_ENDIAN_HW + u32bit ring : 4; + u32bit rsvd1 : 4; + u32bit profile : 8; + u32bit numMask : 8; + u32bit recvNotify : 1; + u32bit rsvd2 : 7; +#endif +#if BIG_ENDIAN_HW + ushort maxResp; + ushort maxOrig; +#endif +#if LITTLE_ENDIAN_HW + ushort maxOrig; + ushort maxResp; +#endif + RR_REG rrRegs[6]; +#if BIG_ENDIAN_HW + ushort cmdRingOffset; + ushort cmdEntryCnt; + ushort rspRingOffset; + ushort rspEntryCnt; + ushort nextCmdOffset; + ushort rsvd3; + ushort nextRspOffset; + ushort rsvd4; +#endif +#if LITTLE_ENDIAN_HW + ushort cmdEntryCnt; + ushort cmdRingOffset; + ushort rspEntryCnt; + ushort rspRingOffset; + ushort rsvd3; + ushort nextCmdOffset; + ushort rsvd4; + ushort nextRspOffset; +#endif +} READ_RCONF_VAR; + + +/* Structure for MB Command READ_SPARM (13) */ +/* Structure for MB Command READ_SPARM64 (0x8D) */ + +typedef struct { + uint32 rsvd1; + uint32 rsvd2; + union { + ULP_BDE sp; /* This BDE points to SERV_PARM structure */ + ULP_BDE64 sp64; + } un; +} READ_SPARM_VAR; + + +/* Structure for MB Command READ_STATUS (14) */ + +typedef struct { +#if BIG_ENDIAN_HW + u32bit rsvd1 : 31; + u32bit clrCounters : 1; + ushort activeXriCnt; + ushort activeRpiCnt; +#endif +#if LITTLE_ENDIAN_HW + u32bit clrCounters : 1; + u32bit rsvd1 : 31; + ushort activeRpiCnt; + ushort activeXriCnt; +#endif + uint32 xmitByteCnt; + uint32 rcvbyteCnt; + uint32 xmitFrameCnt; + uint32 rcvFrameCnt; + uint32 xmitSeqCnt; + uint32 rcvSeqCnt; + uint32 totalOrigExchanges; + uint32 totalRespExchanges; + uint32 rcvPbsyCnt; + uint32 rcvFbsyCnt; +} READ_STATUS_VAR; + + +/* Structure for MB Command READ_RPI (15) */ +/* Structure for MB Command READ_RPI64 (0x8F) */ + +typedef struct { +#if BIG_ENDIAN_HW + ushort nextRpi; + ushort reqRpi; + u32bit rsvd2 : 8; + u32bit DID : 24; +#endif +#if LITTLE_ENDIAN_HW + ushort reqRpi; + ushort nextRpi; + u32bit DID : 24; + u32bit rsvd2 : 8; +#endif + union { + ULP_BDE sp; + ULP_BDE64 sp64; + } un; + +} READ_RPI_VAR; + + +/* Structure for MB Command READ_XRI (16) */ + +typedef struct { +#if BIG_ENDIAN_HW + ushort nextXri; + ushort reqXri; + ushort rsvd1; + ushort rpi; + u32bit rsvd2 : 8; + u32bit DID : 24; + u32bit rsvd3 : 8; + u32bit SID : 24; + uint32 rsvd4; + uchar seqId; + uchar rsvd5; + ushort seqCount; + ushort oxId; + ushort rxId; + u32bit rsvd6 : 30; + u32bit si : 1; + u32bit exchOrig : 1; +#endif +#if LITTLE_ENDIAN_HW + ushort reqXri; + ushort nextXri; + ushort rpi; + ushort rsvd1; + u32bit DID : 24; + u32bit rsvd2 : 8; + u32bit SID : 24; + u32bit rsvd3 : 8; + uint32 rsvd4; + ushort seqCount; + uchar rsvd5; + uchar seqId; + ushort rxId; + ushort oxId; + u32bit exchOrig : 1; + u32bit si : 1; + u32bit rsvd6 : 30; +#endif +} READ_XRI_VAR; + + +/* Structure for MB Command READ_REV (17) */ + +typedef struct { +#if BIG_ENDIAN_HW + u32bit cv : 1; + u32bit rr : 1; + u32bit rsvd1 : 29; + u32bit rv : 1; +#endif +#if LITTLE_ENDIAN_HW + u32bit rv : 1; + u32bit rsvd1 : 29; + u32bit rr : 1; + u32bit cv : 1; +#endif + uint32 biuRev; + uint32 smRev; + union { + uint32 smFwRev; + struct { +#if BIG_ENDIAN_HW + uchar ProgType; + uchar ProgId; + u16bit ProgVer : 4; + u16bit ProgRev : 4; + u16bit ProgFixLvl : 2; + u16bit ProgDistType : 2; + u16bit DistCnt : 4; +#endif +#if LITTLE_ENDIAN_HW + u16bit DistCnt : 4; + u16bit ProgDistType : 2; + u16bit ProgFixLvl : 2; + u16bit ProgRev : 4; + u16bit ProgVer : 4; + uchar ProgId; + uchar ProgType; +#endif + } b; + } un; + uint32 endecRev; +#if BIG_ENDIAN_HW + uchar feaLevelHigh; + uchar feaLevelLow; + uchar fcphHigh; + uchar fcphLow; +#endif +#if LITTLE_ENDIAN_HW + uchar fcphLow; + uchar fcphHigh; + uchar feaLevelLow; + uchar feaLevelHigh; +#endif + uint32 postKernRev; + uint32 opFwRev; + uchar opFwName[16]; + uint32 sli1FwRev; + uchar sli1FwName[16]; + uint32 sli2FwRev; + uchar sli2FwName[16]; + uint32 rsvd2; + uint32 RandomData[7]; +} READ_REV_VAR; + +#define rxSeqRev postKernRev +#define txSeqRev opFwRev + +/* Structure for MB Command READ_LINK_STAT (18) */ + +typedef struct { + uint32 rsvd1; + uint32 linkFailureCnt; + uint32 lossSyncCnt; + + uint32 lossSignalCnt; + uint32 primSeqErrCnt; + uint32 invalidXmitWord; + uint32 crcCnt; + uint32 primSeqTimeout; + uint32 elasticOverrun; + uint32 arbTimeout; +} READ_LNK_VAR; + + +/* Structure for MB Command REG_LOGIN (19) */ +/* Structure for MB Command REG_LOGIN64 (0x93) */ + +typedef struct { +#if BIG_ENDIAN_HW + ushort rsvd1; + ushort rpi; + u32bit rsvd2 : 8; + u32bit did : 24; +#endif +#if LITTLE_ENDIAN_HW + ushort rpi; + ushort rsvd1; + u32bit did : 24; + u32bit rsvd2 : 8; +#endif + union { + ULP_BDE sp; + ULP_BDE64 sp64; + } un; + +} REG_LOGIN_VAR; + +/* Word 30 contents for REG_LOGIN */ +typedef union { + struct { +#if BIG_ENDIAN_HW + u16bit rsvd1 : 12; + u16bit class : 4; + ushort xri; +#endif +#if LITTLE_ENDIAN_HW + ushort xri; + u16bit class : 4; + u16bit rsvd1 : 12; +#endif + } f; + uint32 word; +} REG_WD30; + + +/* Structure for MB Command UNREG_LOGIN (20) */ + +typedef struct { +#if BIG_ENDIAN_HW + ushort rsvd1; + ushort rpi; +#endif +#if LITTLE_ENDIAN_HW + ushort rpi; + ushort rsvd1; +#endif +} UNREG_LOGIN_VAR; + + +/* Structure for MB Command UNREG_D_ID (0x23) */ + +typedef struct { + uint32 did; +} UNREG_D_ID_VAR; + + +/* Structure for MB Command READ_LA (21) */ +/* Structure for MB Command READ_LA64 (0x95) */ + +typedef struct { + uint32 eventTag; /* Event tag */ +#if BIG_ENDIAN_HW + u32bit rsvd1 : 22; + u32bit pb : 1; + u32bit il : 1; + u32bit attType : 8; +#endif +#if LITTLE_ENDIAN_HW + u32bit attType : 8; + u32bit il : 1; + u32bit pb : 1; + u32bit rsvd1 : 22; +#endif +#define AT_RESERVED 0x00 /* Reserved - attType */ +#define AT_LINK_UP 0x01 /* Link is up */ +#define AT_LINK_DOWN 0x02 /* Link is down */ +#if BIG_ENDIAN_HW + uchar granted_AL_PA; + uchar lipAlPs; + uchar lipType; + uchar topology; +#endif +#if LITTLE_ENDIAN_HW + uchar topology; + uchar lipType; + uchar lipAlPs; + uchar granted_AL_PA; +#endif +#define LT_PORT_INIT 0x00 /* An L_PORT initing (F7, AL_PS) - lipType */ +#define LT_PORT_ERR 0x01 /* Err @L_PORT rcv'er (F8, AL_PS) */ +#define LT_RESET_APORT 0x02 /* Lip Reset of some other port */ +#define LT_RESET_MYPORT 0x03 /* Lip Reset of my port */ +#define TOPOLOGY_PT_PT 0x01 /* Topology is pt-pt / pt-fabric */ +#define TOPOLOGY_LOOP 0x02 /* Topology is FC-AL */ + + union { + ULP_BDE lilpBde; /* This BDE points to a 128 byte buffer to */ + /* store the LILP AL_PA position map into */ + ULP_BDE64 lilpBde64; + } un; +#if BIG_ENDIAN_HW + u32bit Dlu : 1; + u32bit Dtf : 1; + u32bit Drsvd2 : 14; + u32bit DlnkSpeed : 8; + u32bit DnlPort : 4; + u32bit Dtx : 2; + u32bit Drx : 2; +#endif +#if LITTLE_ENDIAN_HW + u32bit Drx : 2; + u32bit Dtx : 2; + u32bit DnlPort : 4; + u32bit DlnkSpeed : 8; + u32bit Drsvd2 : 14; + u32bit Dtf : 1; + u32bit Dlu : 1; +#endif +#if BIG_ENDIAN_HW + u32bit Ulu : 1; + u32bit Utf : 1; + u32bit Ursvd2 : 14; + u32bit UlnkSpeed : 8; + u32bit UnlPort : 4; + u32bit Utx : 2; + u32bit Urx : 2; +#endif +#if LITTLE_ENDIAN_HW + u32bit Urx : 2; + u32bit Utx : 2; + u32bit UnlPort : 4; + u32bit UlnkSpeed : 8; + u32bit Ursvd2 : 14; + u32bit Utf : 1; + u32bit Ulu : 1; +#endif +#define LA_1GHZ_LINK 4 /* lnkSpeed */ +#define LA_2GHZ_LINK 8 /* lnkSpeed */ + +} READ_LA_VAR; + + +/* Structure for MB Command CLEAR_LA (22) */ + +typedef struct { + uint32 eventTag; /* Event tag */ + uint32 rsvd1; +} CLEAR_LA_VAR; + +/* Structure for MB Command DUMP */ + +typedef struct { +#if BIG_ENDIAN_HW + u32bit rsvd : 25 ; + u32bit ra : 1 ; + u32bit co : 1 ; + u32bit cv : 1 ; + u32bit type : 4 ; + u32bit entry_index : 16 ; + u32bit region_id : 16 ; +#endif +#if LITTLE_ENDIAN_HW + u32bit type : 4 ; + u32bit cv : 1 ; + u32bit co : 1 ; + u32bit ra : 1 ; + u32bit rsvd : 25 ; + u32bit region_id : 16 ; + u32bit entry_index : 16 ; +#endif + uint32 rsvd1; + uint32 word_cnt ; + uint32 resp_offset ; +} DUMP_VAR ; + +#define DMP_MEM_REG 0x1 +#define DMP_NV_PARAMS 0x2 + +#define DMP_REGION_VPD 0xe +#define DMP_VPD_SIZE 0x100 + +/* Structure for MB Command CONFIG_PORT (0x88) */ + +typedef struct { + uint32 pcbLen; + uint32 pcbLow; /* bit 31:0 of memory based port config block */ + uint32 pcbHigh; /* bit 63:32 of memory based port config block */ + uint32 hbainit[5]; +} CONFIG_PORT_VAR; + + +/* SLI-2 Port Control Block */ + +/* SLIM POINTER */ +#define SLIMOFF 0x30 /* WORD */ + +typedef struct _SLI2_RDSC { + uint32 cmdEntries; + uint32 cmdAddrLow; + uint32 cmdAddrHigh; + + uint32 rspEntries; + uint32 rspAddrLow; + uint32 rspAddrHigh; +} SLI2_RDSC; + +typedef struct _PCB { +#if BIG_ENDIAN_HW + u32bit type : 8; +#define TYPE_NATIVE_SLI2 0x01; + u32bit feature : 8; +#define FEATURE_INITIAL_SLI2 0x01; + u32bit rsvd : 12; + u32bit maxRing : 4; +#endif +#if LITTLE_ENDIAN_HW + u32bit maxRing : 4; + u32bit rsvd : 12; + u32bit feature : 8; +#define FEATURE_INITIAL_SLI2 0x01; + u32bit type : 8; +#define TYPE_NATIVE_SLI2 0x01; +#endif + + uint32 mailBoxSize; + uint32 mbAddrLow; + uint32 mbAddrHigh; + + uint32 hgpAddrLow; + uint32 hgpAddrHigh; + + uint32 pgpAddrLow; + uint32 pgpAddrHigh; + SLI2_RDSC rdsc[ MAX_RINGS]; +} PCB; + +typedef struct { +#if BIG_ENDIAN_HW + u32bit rsvd0 : 27; + u32bit discardFarp : 1; + u32bit IPEnable : 1; + u32bit nodeName : 1; + u32bit portName : 1; + u32bit filterEnable : 1; +#endif +#if LITTLE_ENDIAN_HW + u32bit filterEnable : 1; + u32bit portName : 1; + u32bit nodeName : 1; + u32bit IPEnable : 1; + u32bit discardFarp : 1; + u32bit rsvd : 27; +#endif + NAME_TYPE portname; + NAME_TYPE nodename; + uint32 rsvd1; + uint32 rsvd2; + uint32 rsvd3; + uint32 IPAddress; +} CONFIG_FARP_VAR; + + +/* Union of all Mailbox Command types */ + +typedef union { + uint32 varWords[31]; + LOAD_SM_VAR varLdSM; /* cmd = 1 (LOAD_SM) */ + READ_NV_VAR varRDnvp; /* cmd = 2 (READ_NVPARMS) */ + WRITE_NV_VAR varWTnvp; /* cmd = 3 (WRITE_NVPARMS) */ + BIU_DIAG_VAR varBIUdiag; /* cmd = 4 (RUN_BIU_DIAG) */ + INIT_LINK_VAR varInitLnk; /* cmd = 5 (INIT_LINK) */ + DOWN_LINK_VAR varDwnLnk; /* cmd = 6 (DOWN_LINK) */ + CONFIG_LINK varCfgLnk; /* cmd = 7 (CONFIG_LINK) */ + PART_SLIM_VAR varSlim; /* cmd = 8 (PART_SLIM) */ + CONFIG_RING_VAR varCfgRing; /* cmd = 9 (CONFIG_RING) */ + RESET_RING_VAR varRstRing; /* cmd = 10 (RESET_RING) */ + READ_CONFIG_VAR varRdConfig; /* cmd = 11 (READ_CONFIG) */ + READ_RCONF_VAR varRdRConfig; /* cmd = 12 (READ_RCONFIG) */ + READ_SPARM_VAR varRdSparm; /* cmd = 13 (READ_SPARM(64)) */ + READ_STATUS_VAR varRdStatus; /* cmd = 14 (READ_STATUS) */ + READ_RPI_VAR varRdRPI; /* cmd = 15 (READ_RPI(64)) */ + READ_XRI_VAR varRdXRI; /* cmd = 16 (READ_XRI) */ + READ_REV_VAR varRdRev; /* cmd = 17 (READ_REV) */ + READ_LNK_VAR varRdLnk; /* cmd = 18 (READ_LNK_STAT) */ + REG_LOGIN_VAR varRegLogin; /* cmd = 19 (REG_LOGIN(64)) */ + UNREG_LOGIN_VAR varUnregLogin; /* cmd = 20 (UNREG_LOGIN) */ + READ_LA_VAR varReadLA; /* cmd = 21 (READ_LA(64)) */ + CLEAR_LA_VAR varClearLA; /* cmd = 22 (CLEAR_LA) */ + DUMP_VAR varDmp ; /* Warm Start DUMP mbx cmd */ + UNREG_D_ID_VAR varUnregDID; /* cmd = 0x23 (UNREG_D_ID) */ + CONFIG_PORT_VAR varCfgPort; /* cmd = 0x88 (CONFIG_PORT) */ + CONFIG_FARP_VAR varCfgFarp; /* cmd = 0x25 (CONFIG_FARP) */ +} MAILVARIANTS; + +#define MAILBOX_CMD_WSIZE 32 + +/* + * SLI-2 specific structures + */ + +typedef struct _SLI1_DESC { + RINGS mbxCring[ 4]; + uint32 mbxUnused[ 24]; +} SLI1_DESC; + +typedef struct { + uint32 cmdPutInx; + uint32 rspGetInx; +} HGP; + +typedef struct { + uint32 cmdGetInx; + uint32 rspPutInx; +} PGP; + +typedef struct _SLI2_DESC { + HGP host[ MAX_RINGS]; + uint32 unused[ 16]; + PGP port[ MAX_RINGS]; +} SLI2_DESC; + +typedef union { + SLI1_DESC s1; + SLI2_DESC s2; +} SLI_VAR; + +typedef volatile struct { +#if BIG_ENDIAN_HW + ushort mbxStatus; + uchar mbxCommand; + u8bit mbxReserved : 6; + u8bit mbxHc : 1; + u8bit mbxOwner : 1; /* Low order bit first word */ +#endif +#if LITTLE_ENDIAN_HW + u8bit mbxOwner : 1; /* Low order bit first word */ + u8bit mbxHc : 1; + u8bit mbxReserved : 6; + uchar mbxCommand; + ushort mbxStatus; +#endif + MAILVARIANTS un; + SLI_VAR us; +} MAILBOX, *PMAILBOX; + +/* + * End Structure Definitions for Mailbox Commands + */ + + +/* + * Begin Structure Definitions for IOCB Commands + */ + +typedef struct { +#if BIG_ENDIAN_HW + uchar statAction; + uchar statRsn; + uchar statBaExp; + uchar statLocalError; +#endif +#if LITTLE_ENDIAN_HW + uchar statLocalError; + uchar statBaExp; + uchar statRsn; + uchar statAction; +#endif + /* statAction FBSY reason codes */ +#define FBSY_RSN_MASK 0xF0 /* Rsn stored in upper nibble */ +#define FBSY_FABRIC_BSY 0x10 /* F_bsy due to Fabric BSY */ +#define FBSY_NPORT_BSY 0x30 /* F_bsy due to N_port BSY */ + + /* statAction PBSY action codes */ +#define PBSY_ACTION1 0x01 /* Sequence terminated - retry */ +#define PBSY_ACTION2 0x02 /* Sequence active - retry */ + + /* statAction P/FRJT action codes */ +#define RJT_RETRYABLE 0x01 /* Retryable class of error */ +#define RJT_NO_RETRY 0x02 /* Non-Retryable class of error */ + + /* statRsn LS_RJT reason codes defined in LS_RJT structure */ + + /* statRsn P_BSY reason codes */ +#define PBSY_NPORT_BSY 0x01 /* Physical N_port BSY */ +#define PBSY_RESRCE_BSY 0x03 /* N_port resource BSY */ +#define PBSY_VU_BSY 0xFF /* See VU field for rsn */ + + /* statRsn P/F_RJT reason codes */ +#define RJT_BAD_D_ID 0x01 /* Invalid D_ID field */ +#define RJT_BAD_S_ID 0x02 /* Invalid S_ID field */ +#define RJT_UNAVAIL_TEMP 0x03 /* N_Port unavailable temp. */ +#define RJT_UNAVAIL_PERM 0x04 /* N_Port unavailable perm. */ +#define RJT_UNSUP_CLASS 0x05 /* Class not supported */ +#define RJT_DELIM_ERR 0x06 /* Delimiter usage error */ +#define RJT_UNSUP_TYPE 0x07 /* Type not supported */ +#define RJT_BAD_CONTROL 0x08 /* Invalid link conrtol */ +#define RJT_BAD_RCTL 0x09 /* R_CTL invalid */ +#define RJT_BAD_FCTL 0x0A /* F_CTL invalid */ +#define RJT_BAD_OXID 0x0B /* OX_ID invalid */ +#define RJT_BAD_RXID 0x0C /* RX_ID invalid */ +#define RJT_BAD_SEQID 0x0D /* SEQ_ID invalid */ +#define RJT_BAD_DFCTL 0x0E /* DF_CTL invalid */ +#define RJT_BAD_SEQCNT 0x0F /* SEQ_CNT invalid */ +#define RJT_BAD_PARM 0x10 /* Param. field invalid */ +#define RJT_XCHG_ERR 0x11 /* Exchange error */ +#define RJT_PROT_ERR 0x12 /* Protocol error */ +#define RJT_BAD_LENGTH 0x13 /* Invalid Length */ +#define RJT_UNEXPECTED_ACK 0x14 /* Unexpected ACK */ +#define RJT_LOGIN_REQUIRED 0x16 /* Login required */ +#define RJT_TOO_MANY_SEQ 0x17 /* Excessive sequences */ +#define RJT_XCHG_NOT_STRT 0x18 /* Exchange not started */ +#define RJT_UNSUP_SEC_HDR 0x19 /* Security hdr not supported */ +#define RJT_UNAVAIL_PATH 0x1A /* Fabric Path not available */ +#define RJT_VENDOR_UNIQUE 0xFF /* Vendor unique error */ + + /* statRsn BA_RJT reason codes */ +#define BARJT_BAD_CMD_CODE 0x01 /* Invalid command code */ +#define BARJT_LOGICAL_ERR 0x03 /* Logical error */ +#define BARJT_LOGICAL_BSY 0x05 /* Logical busy */ +#define BARJT_PROTOCOL_ERR 0x07 /* Protocol error */ +#define BARJT_VU_ERR 0xFF /* Vendor unique error */ + + /* LS_RJT reason explanation defined in LS_RJT structure */ + + /* BA_RJT reason explanation */ +#define BARJT_EXP_INVALID_ID 0x01 /* Invalid OX_ID/RX_ID */ +#define BARJT_EXP_ABORT_SEQ 0x05 /* Abort SEQ, no more info */ + + /* Localy detected errors */ +#define IOERR_SUCCESS 0x00 /* statLocalError */ +#define IOERR_MISSING_CONTINUE 0x01 +#define IOERR_SEQUENCE_TIMEOUT 0x02 +#define IOERR_INTERNAL_ERROR 0x03 +#define IOERR_INVALID_RPI 0x04 +#define IOERR_NO_XRI 0x05 +#define IOERR_ILLEGAL_COMMAND 0x06 +#define IOERR_XCHG_DROPPED 0x07 +#define IOERR_ILLEGAL_FIELD 0x08 +#define IOERR_BAD_CONTINUE 0x09 +#define IOERR_TOO_MANY_BUFFERS 0x0A +#define IOERR_RCV_BUFFER_WAITING 0x0B +#define IOERR_NO_CONNECTION 0x0C +#define IOERR_TX_DMA_FAILED 0x0D +#define IOERR_RX_DMA_FAILED 0x0E +#define IOERR_ILLEGAL_FRAME 0x0F +#define IOERR_EXTRA_DATA 0x10 +#define IOERR_NO_RESOURCES 0x11 +#define IOERR_RESERVED 0x12 +#define IOERR_ILLEGAL_LENGTH 0x13 +#define IOERR_UNSUPPORTED_FEATURE 0x14 +#define IOERR_ABORT_IN_PROGRESS 0x15 +#define IOERR_ABORT_REQUESTED 0x16 +#define IOERR_RECEIVE_BUFFER_TIMEOUT 0x17 +#define IOERR_LOOP_OPEN_FAILURE 0x18 +#define IOERR_RING_RESET 0x19 +#define IOERR_LINK_DOWN 0x1A +#define IOERR_CORRUPTED_DATA 0x1B +#define IOERR_CORRUPTED_RPI 0x1C +#define IOERR_OUT_OF_ORDER_DATA 0x1D +#define IOERR_OUT_OF_ORDER_ACK 0x1E +#define IOERR_DUP_FRAME 0x1F +#define IOERR_LINK_CONTROL_FRAME 0x20 /* ACK_N received */ +#define IOERR_BAD_HOST_ADDRESS 0x21 +#define IOERR_RCV_HDRBUF_WAITING 0x22 +#define IOERR_MISSING_HDR_BUFFER 0x23 +#define IOERR_MSEQ_CHAIN_CORRUPTED 0x24 +#define IOERR_ABORTMULT_REQUESTED 0x25 +#define IOERR_BUFFER_SHORTAGE 0x28 +} PARM_ERR; + +typedef union { + struct { +#if BIG_ENDIAN_HW + uchar Rctl; /* R_CTL field */ + uchar Type; /* TYPE field */ + uchar Dfctl; /* DF_CTL field */ + uchar Fctl; /* Bits 0-7 of IOCB word 5 */ +#endif +#if LITTLE_ENDIAN_HW + uchar Fctl; /* Bits 0-7 of IOCB word 5 */ + uchar Dfctl; /* DF_CTL field */ + uchar Type; /* TYPE field */ + uchar Rctl; /* R_CTL field */ +#endif + +#define BC 0x02 /* Broadcast Received - Fctl */ +#define SI 0x04 /* Sequence Initiative */ +#define LA 0x08 /* Ignore Link Attention state */ +#define LS 0x80 /* Last Sequence */ + } hcsw; + uint32 reserved; +} WORD5; + + +/* IOCB Command template for a generic response */ +typedef struct { + uint32 reserved[4]; + PARM_ERR perr; +} GENERIC_RSP; + + +/* IOCB Command template for XMIT / XMIT_BCAST / RCV_SEQUENCE / XMIT_ELS */ +typedef struct { + ULP_BDE xrsqbde[2]; + uint32 xrsqRo; /* Starting Relative Offset */ + WORD5 w5; /* Header control/status word */ +} XR_SEQ_FIELDS; + +/* IOCB Command template for ELS_REQUEST */ +typedef struct { + ULP_BDE elsReq; + ULP_BDE elsRsp; +#if BIG_ENDIAN_HW + u32bit word4Rsvd : 7; + u32bit fl : 1; + u32bit myID : 24; + u32bit word5Rsvd : 8; + u32bit remoteID : 24; +#endif +#if LITTLE_ENDIAN_HW + u32bit myID : 24; + u32bit fl : 1; + u32bit word4Rsvd : 7; + u32bit remoteID : 24; + u32bit word5Rsvd : 8; +#endif +} ELS_REQUEST; + +/* IOCB Command template for RCV_ELS_REQ */ +typedef struct { + ULP_BDE elsReq[2]; + uint32 parmRo; +#if BIG_ENDIAN_HW + u32bit word5Rsvd : 8; + u32bit remoteID : 24; +#endif +#if LITTLE_ENDIAN_HW + u32bit remoteID : 24; + u32bit word5Rsvd : 8; +#endif +} RCV_ELS_REQ; + +/* IOCB Command template for ABORT / CLOSE_XRI */ +typedef struct { + uint32 rsvd[3]; + uint32 abortType; +#define ABORT_TYPE_ABTX 0x00000000 +#define ABORT_TYPE_ABTS 0x00000001 + uint32 parm; +#if BIG_ENDIAN_HW + ushort abortContextTag; /* ulpContext from command to abort/close */ + ushort abortIoTag; /* ulpIoTag from command to abort/close */ +#endif +#if LITTLE_ENDIAN_HW + ushort abortIoTag; /* ulpIoTag from command to abort/close */ + ushort abortContextTag; /* ulpContext from command to abort/close */ +#endif +} AC_XRI; + +/* IOCB Command template for GET_RPI */ +typedef struct { + uint32 rsvd[4]; + uint32 parmRo; +#if BIG_ENDIAN_HW + u32bit word5Rsvd : 8; + u32bit remoteID : 24; +#endif +#if LITTLE_ENDIAN_HW + u32bit remoteID : 24; + u32bit word5Rsvd : 8; +#endif +} GET_RPI; + +/* IOCB Command template for all FCPI commands */ +typedef struct { + ULP_BDE fcpi_cmnd; /* FCP_CMND payload descriptor */ + ULP_BDE fcpi_rsp; /* Rcv buffer */ + uint32 fcpi_parm; + uint32 fcpi_XRdy; /* transfer ready for IWRITE */ +} FCPI_FIELDS; + +/* IOCB Command template for all FCPT commands */ +typedef struct { + ULP_BDE fcpt_Buffer[2]; /* FCP_CMND payload descriptor */ + uint32 fcpt_Offset; + uint32 fcpt_Length; /* transfer ready for IWRITE */ +} FCPT_FIELDS; + +/* SLI-2 IOCB structure definitions */ + +/* IOCB Command template for 64 bit XMIT / XMIT_BCAST / XMIT_ELS */ +typedef struct { + ULP_BDL bdl; + uint32 xrsqRo; /* Starting Relative Offset */ + WORD5 w5; /* Header control/status word */ +} XMT_SEQ_FIELDS64; + +/* IOCB Command template for 64 bit RCV_SEQUENCE64 */ +typedef struct { + ULP_BDE64 rcvBde; + uint32 rsvd1; + uint32 xrsqRo; /* Starting Relative Offset */ + WORD5 w5; /* Header control/status word */ +} RCV_SEQ_FIELDS64; + +/* IOCB Command template for ELS_REQUEST64 */ +typedef struct { + ULP_BDL bdl; +#if BIG_ENDIAN_HW + u32bit word4Rsvd : 7; + u32bit fl : 1; + u32bit myID : 24; + u32bit word5Rsvd : 8; + u32bit remoteID : 24; +#endif +#if LITTLE_ENDIAN_HW + u32bit myID : 24; + u32bit fl : 1; + u32bit word4Rsvd : 7; + u32bit remoteID : 24; + u32bit word5Rsvd : 8; +#endif +} ELS_REQUEST64; + +/* IOCB Command template for GEN_REQUEST64 */ +typedef struct { + ULP_BDL bdl; + uint32 xrsqRo; /* Starting Relative Offset */ + WORD5 w5; /* Header control/status word */ +} GEN_REQUEST64; + +/* IOCB Command template for RCV_ELS_REQ64 */ +typedef struct { + ULP_BDE64 elsReq; + uint32 rcvd1; + uint32 parmRo; +#if BIG_ENDIAN_HW + u32bit word5Rsvd : 8; + u32bit remoteID : 24; +#endif +#if LITTLE_ENDIAN_HW + u32bit remoteID : 24; + u32bit word5Rsvd : 8; +#endif +} RCV_ELS_REQ64; + +/* IOCB Command template for all 64 bit FCPI commands */ +typedef struct { + ULP_BDL bdl; + uint32 fcpi_parm; + uint32 fcpi_XRdy; /* transfer ready for IWRITE */ +} FCPI_FIELDS64; + +/* IOCB Command template for all 64 bit FCPT commands */ +typedef struct { + ULP_BDL bdl; + uint32 fcpt_Offset; + uint32 fcpt_Length; /* transfer ready for IWRITE */ +} FCPT_FIELDS64; + +typedef volatile struct _IOCB { /* IOCB structure */ + union { + GENERIC_RSP grsp; /* Generic response */ + XR_SEQ_FIELDS xrseq; /* XMIT / BCAST / RCV_SEQUENCE cmd */ + ULP_BDE cont[3]; /* up to 3 continuation bdes */ + ELS_REQUEST elsreq; /* ELS_REQUEST template */ + RCV_ELS_REQ rcvels; /* RCV_ELS_REQ template */ + AC_XRI acxri; /* ABORT / CLOSE_XRI template */ + GET_RPI getrpi; /* GET_RPI template */ + FCPI_FIELDS fcpi; /* FCPI template */ + FCPT_FIELDS fcpt; /* FCPT template */ + + /* SLI-2 structures */ + + ULP_BDE64 cont64[ 2]; /* up to 2 64 bit continuation bde_64s */ + ELS_REQUEST64 elsreq64; /* ELS_REQUEST template */ + GEN_REQUEST64 genreq64; /* GEN_REQUEST template */ + RCV_ELS_REQ64 rcvels64; /* RCV_ELS_REQ template */ + XMT_SEQ_FIELDS64 xseq64; /* XMIT / BCAST cmd */ + FCPI_FIELDS64 fcpi64; /* FCPI 64 bit template */ + FCPT_FIELDS64 fcpt64; /* FCPT 64 bit template */ + + uint32 ulpWord[IOCB_WORD_SZ-2]; /* generic 6 'words' */ + } un; + union { + struct { +#if BIG_ENDIAN_HW + ushort ulpContext; /* High order bits word 6 */ + ushort ulpIoTag; /* Low order bits word 6 */ +#endif +#if LITTLE_ENDIAN_HW + ushort ulpIoTag; /* Low order bits word 6 */ + ushort ulpContext; /* High order bits word 6 */ +#endif + } t1; + struct { +#if BIG_ENDIAN_HW + ushort ulpContext; /* High order bits word 6 */ + u16bit ulpIoTag1 : 2; /* Low order bits word 6 */ + u16bit ulpIoTag0 : 14; /* Low order bits word 6 */ +#endif +#if LITTLE_ENDIAN_HW + u16bit ulpIoTag0 : 14; /* Low order bits word 6 */ + u16bit ulpIoTag1 : 2; /* Low order bits word 6 */ + ushort ulpContext; /* High order bits word 6 */ +#endif + } t2; + } un1; +#define ulpContext un1.t1.ulpContext +#define ulpIoTag un1.t1.ulpIoTag +#define ulpIoTag0 un1.t2.ulpIoTag0 +#define ulpDelayXmit un1.t2.ulpIoTag1 +#define IOCB_DELAYXMIT_MSK 0x3000 +#if BIG_ENDIAN_HW + u32bit ulpRsvdByte : 8; + u32bit ulpXS : 1; + u32bit ulpFCP2Rcvy : 1; + u32bit ulpPU : 2; + u32bit ulpIr : 1; + u32bit ulpClass : 3; + u32bit ulpCommand : 8; + u32bit ulpStatus : 4; + u32bit ulpBdeCount : 2; + u32bit ulpLe : 1; + u32bit ulpOwner : 1; /* Low order bit word 7 */ +#endif +#if LITTLE_ENDIAN_HW + u32bit ulpOwner : 1; /* Low order bit word 7 */ + u32bit ulpLe : 1; + u32bit ulpBdeCount : 2; + u32bit ulpStatus : 4; + u32bit ulpCommand : 8; + u32bit ulpClass : 3; + u32bit ulpIr : 1; + u32bit ulpPU : 2; + u32bit ulpFCP2Rcvy : 1; + u32bit ulpXS : 1; + u32bit ulpRsvdByte : 8; +#endif + +#define ulpTimeout ulpRsvdByte + +#define IOCB_FCP 1 /* IOCB is used for FCP ELS cmds - ulpRsvByte */ +#define IOCB_IP 2 /* IOCB is used for IP ELS cmds */ +#define PARM_UNUSED 0 /* PU field (Word 4) not used */ +#define PARM_REL_OFF 1 /* PU field (Word 4) = R. O. */ +#define PARM_READ_CHECK 2 /* PU field (Word 4) = Data Transfer Length */ +#define CLASS1 0 /* Class 1 */ +#define CLASS2 1 /* Class 2 */ +#define CLASS3 2 /* Class 3 */ +#define CLASS_FCP_INTERMIX 7 /* FCP Data->Cls 1, all else->Cls 2 */ + +#define IOSTAT_SUCCESS 0x0 /* ulpStatus */ +#define IOSTAT_FCP_RSP_ERROR 0x1 +#define IOSTAT_REMOTE_STOP 0x2 +#define IOSTAT_LOCAL_REJECT 0x3 +#define IOSTAT_NPORT_RJT 0x4 +#define IOSTAT_FABRIC_RJT 0x5 +#define IOSTAT_NPORT_BSY 0x6 +#define IOSTAT_FABRIC_BSY 0x7 +#define IOSTAT_INTERMED_RSP 0x8 +#define IOSTAT_LS_RJT 0x9 +#define IOSTAT_BA_RJT 0xA + +} IOCB, *PIOCB; + +typedef struct { + IOCB iocb; /* iocb entry */ + uchar * q; /* ptr to next iocb entry */ + uchar * bp; /* ptr to data buffer structure */ + uchar * info; /* ptr to data information structure */ + uchar * bpl; /* ptr to data BPL structure */ + uchar * ndlp; /* ptr to the ndlp structure */ + uchar retry; /* retry counter for IOCB cmd - if needed */ + uchar rsvd1; + ushort rsvd2; +} IOCBQ; + +typedef struct { + volatile uint32 mb[MAILBOX_CMD_WSIZE]; + uchar * q; + uchar * bp; /* ptr to data buffer structure */ +} MAILBOXQ; + +/* Given a pointer to the start of the ring, and the slot number of + * the desired iocb entry, calc a pointer to that entry. + */ +#define IOCB_ENTRY(ring,slot) ((IOCB *)(((uchar *)((ulong)ring)) + (((uint32)((ulong)slot))<< 5))) + +/* + * End Structure Definitions for IOCB Commands + */ + +typedef struct { + MAILBOX mbx; + IOCB IOCBs[MAX_BIOCB]; +} SLIM; + +typedef struct { + MAILBOX mbx; + PCB pcb; + IOCB IOCBs[MAX_SLI2_IOCB]; +} SLI2_SLIM; + +/* +* FDMI +* HBA MAnagement Operations Command Codes +*/ +#define SLI_MGMT_GRHL 0x100 /* Get registered HBA list */ +#define SLI_MGMT_GHAT 0x101 /* Get HBA attributes */ +#define SLI_MGMT_GRPL 0x102 /* Get registered Port list */ +#define SLI_MGMT_GPAT 0x110 /* Get Port attributes */ +#define SLI_MGMT_RHBA 0x200 /* Register HBA */ +#define SLI_MGMT_RHAT 0x201 /* Register HBA atttributes */ +#define SLI_MGMT_RPRT 0x210 /* Register Port */ +#define SLI_MGMT_RPA 0x211 /* Register Port attributes */ +#define SLI_MGMT_DHBA 0x300 /* De-register HBA */ +#define SLI_MGMT_DPRT 0x310 /* De-register Port */ + +/* + * Management Service Subtypes + */ +#define SLI_CT_FDMI_Subtypes 0x10 + +/* + * HBA Management Service Reject Code + */ +#define REJECT_CODE 0x9 /* Unable to perform command request */ +/* + * HBA Management Service Reject Reason Code + * Please refer to the Reason Codes above + */ + +/* + * HBA Attribute Types + */ +#define NODE_NAME 0x1 +#define MANUFACTURER 0x2 +#define SERIAL_NUMBER 0x3 +#define MODEL 0x4 +#define MODEL_DESCRIPTION 0x5 +#define HARDWARE_VERSION 0x6 +#define DRIVER_VERSION 0x7 +#define OPTION_ROM_VERSION 0x8 +#define FIRMWARE_VERSION 0x9 +#define VENDOR_SPECIFIC 0xa +#define DRIVER_NAME 0xb +#define OS_NAME_VERSION 0xc +#define MAX_CT_PAYLOAD_LEN 0xd + +/* + * Port Attrubute Types + */ +#define SUPPORTED_FC4_TYPES 0x1 +#define SUPPORTED_SPEED 0x2 +#define PORT_SPEED 0x3 +#define MAX_FRAME_SIZE 0x4 +#define OS_DEVICE_NAME 0x5 + +union AttributesDef { + /* Structure is in Big Endian format */ + struct { + u32bit AttrType: 16; + u32bit AttrLen: 16; + } bits; + uint32 word; +}; + +/* + * HBA Attribute Entry (8 - 260 bytes) + */ +typedef struct +{ + union AttributesDef ad; + union { + uint32 VendorSpecific; + uint32 SupportSpeed; + uint32 PortSpeed; + uint32 MaxFrameSize; + uint32 MaxCTPayloadLen; + uchar SupportFC4Types[32]; + uchar OsDeviceName[256]; + uchar Manufacturer[64]; + uchar SerialNumber[64]; + uchar Model[256]; + uchar ModelDescription[256]; + uchar HardwareVersion[256]; + uchar DriverVersion[256]; + uchar OptionROMVersion[256]; + uchar FirmwareVersion[256]; + uchar DriverName[256]; + NAME_TYPE NodeName; + } un; +} ATTRIBUTE_ENTRY, *PATTRIBUTE_ENTRY; + + +/* + * HBA Attribute Block + */ +typedef struct +{ + uint32 EntryCnt; /* Number of HBA attribute entries */ + ATTRIBUTE_ENTRY Entry; /* Variable-length array */ +} ATTRIBUTE_BLOCK, *PATTRIBUTE_BLOCK; + + +/* + * Port Entry + */ +typedef struct +{ + NAME_TYPE PortName; +} PORT_ENTRY, *PPORT_ENTRY; + +/* + * HBA Identifier + */ +typedef struct +{ + NAME_TYPE PortName; +} HBA_IDENTIFIER, *PHBA_IDENTIFIER; + +/* + * Registered Port List Format + */ +typedef struct +{ + uint32 EntryCnt; + PORT_ENTRY pe; /* Variable-length array */ +} REG_PORT_LIST, *PREG_PORT_LIST; + +/* + * Register HBA(RHBA) + */ +typedef struct +{ + HBA_IDENTIFIER hi; + REG_PORT_LIST rpl; /* variable-length array */ +} REG_HBA, *PREG_HBA; + +/* + * Register HBA Attributes (RHAT) + */ +typedef struct +{ + NAME_TYPE HBA_PortName; + ATTRIBUTE_BLOCK ab; +} REG_HBA_ATTRIBUTE, *PREG_HBA_ATTRIBUTE; + +/* + * Register Port Attributes (RPA) + */ +typedef struct +{ + NAME_TYPE HBA_PortName; + NAME_TYPE PortName; + ATTRIBUTE_BLOCK ab; +} REG_PORT_ATTRIBUTE, *PREG_PORT_ATTRIBUTE; + +/* + * Get Registered HBA List (GRHL) Accept Payload Format + */ +typedef struct +{ + uint32 HBA__Entry_Cnt; /* Number of Registered HBA Identifiers */ + NAME_TYPE HBA_PortName; /* Variable-length array */ +} GRHL_ACC_PAYLOAD, *PGRHL_ACC_PAYLOAD; + +/* + * Get Registered Port List (GRPL) Accept Payload Format + */ +typedef struct +{ + uint32 RPL_Entry_Cnt; /* Number of Registered Port Entries */ + PORT_ENTRY Reg_Port_Entry[1]; /* Variable-length array */ +} GRPL_ACC_PAYLOAD, *PGRPL_ACC_PAYLOAD; + +/* + * Get Port Attributes (GPAT) Accept Payload Format + */ + +typedef struct +{ + ATTRIBUTE_BLOCK pab; +} GPAT_ACC_PAYLOAD, *PGPAT_ACC_PAYLOAD; +#endif /* _H_FC_HW */ diff -purN -X /home/mbligh/.diff.exclude reference/drivers/scsi/lpfc/fc_os.h current/drivers/scsi/lpfc/fc_os.h --- reference/drivers/scsi/lpfc/fc_os.h 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/scsi/lpfc/fc_os.h 2004-04-09 11:53:03.000000000 -0700 @@ -0,0 +1,633 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Enterprise Fibre Channel Host Bus Adapters. * + * Refer to the README file included with this package for * + * driver version and adapter support. * + * Copyright (C) 2003 Emulex Corporation. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of the GNU General Public License * + * as published by the Free Software Foundation; either version 2 * + * of the License, or (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details, a copy of which * + * can be found in the file COPYING included with this package. * + *******************************************************************/ + +#ifndef _H_FCOS +#define _H_FCOS + +#ifdef __KERNEL__ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#define LinuxVersionCode(v, p, s) (((v)<<16)+((p)<<8)+(s)) +#endif /* __KERNEL__ */ + + +#ifdef LP6000 +#ifdef __KERNEL__ +/* From drivers/scsi */ +#include "hosts.h" + +/* The driver is comditionally compiled to utilize the old scsi error + * handling logic, or the make use of the new scsi logic (use_new_eh_code). + * To use the old error handling logic, delete the line "#define FC_NEW_EH 1". + * To use the new error handling logic, add the line "#define FC_NEW_EH 1". + * + * #define FC_NEW_EH 1 + */ + +/* Turn on new error handling for 2.4 kernel base and on */ +#if LINUX_VERSION_CODE >= LinuxVersionCode(2,3,43) +#define FC_NEW_EH 1 +#endif + +#endif /* __KERNEL__ */ + +#ifndef __KERNEL__ +struct net_device_stats +{ + unsigned long rx_packets; /* total packets received */ + unsigned long tx_packets; /* total packets transmitted */ + unsigned long rx_bytes; /* total bytes received */ + unsigned long tx_bytes; /* total bytes transmitted */ + unsigned long rx_errors; /* bad packets received */ + unsigned long tx_errors; /* packet transmit problems */ + unsigned long rx_dropped; /* no space in linux buffers */ + unsigned long tx_dropped; /* no space available in linux */ + unsigned long multicast; /* multicast packets received */ + unsigned long collisions; + + /* detailed rx_errors: */ + unsigned long rx_length_errors; + unsigned long rx_over_errors; /* receiver ring buff overflow */ + unsigned long rx_crc_errors; /* recved pkt with crc error */ + unsigned long rx_frame_errors; /* recv'd frame alignment error */ + unsigned long rx_fifo_errors; /* recv'r fifo overrun */ + unsigned long rx_missed_errors; /* receiver missed packet */ + + /* detailed tx_errors */ + unsigned long tx_aborted_errors; + unsigned long tx_carrier_errors; + unsigned long tx_fifo_errors; + unsigned long tx_heartbeat_errors; + unsigned long tx_window_errors; + + /* for cslip etc */ + unsigned long rx_compressed; + unsigned long tx_compressed; +}; +#define enet_statistics net_device_stats +#endif /* __KERNEL__ */ + +typedef unsigned char uchar; +/* both ushort and ulong may be defined*/ + +#ifndef __KERNEL__ +#ifndef _SYS_TYPES_H +typedef unsigned short ushort; +typedef unsigned long ulong; +#endif +#endif /* __KERNEL__ */ + + +#define SELTO_TIMEOUT p_dev_ctl->selto_timeout + +#define _local_ static +#define _static_ +#define _forward_ extern + +typedef unsigned short uint16; +typedef unsigned int uint32; +typedef long long uint64; +#ifdef __KERNEL__ +#if LINUX_VERSION_CODE < LinuxVersionCode(2,2,18) +typedef unsigned long dma_addr_t; +#endif +#endif + +#if BITS_PER_LONG > 32 +/* These macros are for 64 bit support */ +#define putPaddrLow(addr) ((uint32) \ +(0xffffffff & (unsigned long)(addr))) +#define putPaddrHigh(addr) ((uint32) \ + (0xffffffff & (((unsigned long)(addr))>>32))) +#define getPaddr(high, low) ((unsigned long) \ + ((((unsigned long) (high)) << 32)|((unsigned long)(low)))) + +#else +/* Macro's to support 32 bit addressing */ +#define putPaddrLow(addr) ((uint32)(addr)) +#define putPaddrHigh(addr) 0 +#define getPaddr(high, low) ((uint32)(low)) +#endif + +/* Macro to get from adapter number to ddi instance */ +#define fc_brd_to_inst(brd) fcinstance[brd] + +#define DELAYMS(ms) lpfc_DELAYMS(p_dev_ctl, ms) +#define DELAYMSctx(ms) lpfc_DELAYMS(p_dev_ctl, ms) + +#define EXPORT_LINUX 1 + +#ifdef CONFIG_PPC64 +#define powerpc +#endif + +#ifdef powerpc +#define LITTLE_ENDIAN_HOST 0 /* For fc.h */ +#define BIG_ENDIAN_HW 1 /* For fc_hw.h */ +#else +#define LITTLE_ENDIAN_HOST 1 /* For fc.h */ +#define LITTLE_ENDIAN_HW 1 /* For fc_hw.h */ +#endif /* powerpc */ + +#define MACADDR_LEN 6 /* MAC network address length */ +#define FC_LVL 0 +#define CLK_LVL 0 +#define EVENT_NULL (-1) +#define DMA_READ 1 /* flag argument to D_MAP_LIST */ +#ifndef NULL /* define NULL if not defined*/ +#define NULL (0) +#endif +#define FALSE 0 +#define TRUE 1 +#define DFC_IOCTL 1 + +/* Return value for PCI interrupt routine */ +#define INTR_SUCC 1 /* Claimed interrupt, detected work to do */ +#define INTR_FAIL 0 /* Doesn't claim interrupt */ + + +#define con_print(s, a, b) \ + fc_print(s, (void *)((ulong)a), (void *)((ulong)b)) + + +/* These calls are used before, and after, access to a shared memory + * access to the adapter. + */ +#define FC_MAP_MEM(p1) (void *) (*(p1)) /* sigh */ +#define FC_MAP_IO(p1) (void *) (*(p1)) /* sigh */ +#define FC_UNMAP_MEMIO(p1) /* groan */ + +#define fc_mpdata_outcopy(p, m, d, c) fc_bcopy((m)->virt, d, c) +#define fc_mpdata_incopy(p, m, s, c) fc_bcopy(s, (m)->virt, c) +#define fc_mpdata_sync(h, a, b, c) lpfc_mpdata_sync(p_dev_ctl, h, a, b, c) + +#define DDI_DMA_SYNC_FORKERNEL 1 +#define DDI_DMA_SYNC_FORCPU 1 +#define DDI_DMA_SYNC_FORDEV 2 + +/* This call is used to wakeup someone waiting to send a SCSI + * administrative command to the drive, only one outstanding + * command can be sent to each device. + */ +#define fc_admin_wakeup(p, d, bp) + +#define lpfc_restart_device(dev_ptr) +#define lpfc_handle_fcp_error(p_pkt, p_fcptr, p_cmd) \ + lpfc_fcp_error(p_fcptr, p_cmd) +#define STAT_ABORTED 0 + +struct watchdog { + void (*func)(void *); /* completion handler */ + uint32 restart; /* restart time (in seconds) */ + uint32 count; /* time remaining */ + ulong timeout_id; + struct timer_list timer; + int stopping; +}; + +#define ntimerisset(p1) (*(p1)) +#define ntimerclear(p1) (*(p1) = 0) +#define ntimercmp(p1, p2, cmp) ((p1) cmp (p2)) + + +/* This is the dio and d_iovec structures for the d_map_* services */ +typedef struct d_iovec { + void *stub; +} *d_iovec_t; + +struct dio { + void *stub; +}; +typedef struct dio * dio_t; + +#ifdef __KERNEL__ +#if LINUX_VERSION_CODE < LinuxVersionCode(2,3,43) +#define pci_map_single(dev, address, size, direction) virt_to_bus(address) +#define pci_unmap_single(dev, address, size, direction) +#define pci_alloc_consistent(dev, s, h) fc_pci_alloc_consistent(dev, s, h) +#define pci_free_consistent(dev, s, v, h) fc_pci_free_consistent(dev, s, v, h) +#define scsi_sg_dma_address(sc) virt_to_bus((sc)->address) +#define scsi_sg_dma_len(sc) ((sc)->length) +typedef struct wait_queue *WAIT_QUEUE; +#else +#define scsi_sg_dma_address(sc) sg_dma_address(sc) +#define scsi_sg_dma_len(sc) sg_dma_len(sc) +typedef wait_queue_head_t WAIT_QUEUE; +#endif + +#if LINUX_VERSION_CODE >= LinuxVersionCode(2,3,17) +#define NETDEVICE struct net_device +#else +#define NETDEVICE struct device +#endif + +#if LINUX_VERSION_CODE < LinuxVersionCode(2,3,43) +#define netif_start_queue(dev) clear_bit(0, (void*)&dev->tbusy) +#define netif_stop_queue(dev) set_bit(0, (void*)&dev->tbusy) +#define netdevice_start(dev) dev->start = 1 +#define netdevice_stop(dev) dev->start = 0 +#define dev_kfree_skb_irq(a) dev_kfree_skb(a) +#else +#define netdevice_start(dev) +#define netdevice_stop(dev) +#endif + +#else +#define NETDEVICE void +#endif + +struct intr { + int (*handler) (struct intr *); + NETDEVICE * lpfn_dev; + int (*lpfn_handler) (void); + int lpfn_mtu; + int lpfn_rcv_buf_size; +}; +typedef struct sk_buff fcipbuf_t; + +#define fcnextpkt(x) ((x)->prev) /* FOR Now */ +#define fcnextdata(x) ((x)->next) +#define fcpktlen(x) ((x)->len) /* Assume 1 skbuff per packet */ +#define fcdata(x) ((x)->data) +#define fcdatalen(x) ((x)->len) +#define fcgethandle(x) 0 + +#define fcsetdatalen(x, l) (((x)->len) = l) +#define fcincdatalen(x, l) (((x)->len) += l) +#define fcsethandle(x, h) +#define fcfreehandle(p,x) + +#define m_getclust(a,b) lpfc_alloc_skb(p_dev_ctl->ihs.lpfn_rcv_buf_size) +#define m_getclustm(a,b,c) lpfc_alloc_skb(c) +#define m_freem(x) lpfc_kfree_skb(x); + +#define FC_RCV_BUF_SIZE lpfc_ip_rcvsz(p_dev_ctl) /* rcv buf size for IP */ + +#define enet_statistics net_device_stats +/* Structure for generic statistics */ +typedef struct ndd_genstats { + struct enet_statistics ndd_enet; + uint32 ndd_elapsed_time; /* time in seconds since last reset */ + uint32 ndd_ipackets_msw; /* packets received on interface(msw) */ + uint32 ndd_ibytes_msw; /* total # of octets received(msw) */ + uint32 ndd_recvintr_msw; /* number of receive interrupts(msw) */ + uint32 ndd_recvintr_lsw; /* number of receive interrupts(lsw) */ + uint32 ndd_opackets_msw; /* packets sent on interface(msw) */ + uint32 ndd_obytes_msw; /* total number of octets sent(msw) */ + uint32 ndd_xmitintr_msw; /* number of transmit interrupts(msw) */ + uint32 ndd_xmitintr_lsw; /* number of transmit interrupts(lsw) */ + uint32 ndd_nobufs; /* no buffers available */ + uint32 ndd_xmitque_max; /* max transmits ever queued */ + uint32 ndd_xmitque_ovf; /* number of transmit queue overflows */ + uint32 ndd_ibadpackets; /* # of bad pkts recv'd from adapter */ + uint32 ndd_xmitque_cur; /* sum of driver+adapter xmit queues */ + uint32 ndd_ifOutUcastPkts_msw; /* outbound unicast pkts requested */ + uint32 ndd_ifOutUcastPkts_lsw; /* on interface (msw and lsw) */ + uint32 ndd_ifOutMcastPkts_msw; /* outbound multicast pkts requested */ + uint32 ndd_ifOutMcastPkts_lsw; /* on interface (msw and lsw) */ + uint32 ndd_ifOutBcastPkts_msw; /* outbound broadcast pkts requested */ + uint32 ndd_ifOutBcastPkts_lsw; /* on interface (msw and lsw) */ + uint32 ndd_ifInBcastPkts_msw; /* rcv'ed broadcast pkts requested */ + uint32 ndd_ifInBcastPkts_lsw; /* on interface (msw and lsw) */ +} ndd_genstats_t; + +#define ndd_ipackets_lsw ndd_enet.rx_packets +#define ndd_opackets_lsw ndd_enet.tx_packets +#define ndd_ibytes_lsw ndd_enet.rx_bytes +#define ndd_obytes_lsw ndd_enet.tx_bytes +#define ndd_ipackets_drop ndd_enet.rx_dropped +#define ndd_opackets_drop ndd_enet.tx_dropped +#define ndd_ierrors ndd_enet.rx_errors +#define ndd_oerrors ndd_enet.tx_errors + +typedef struct ndd { + char *ndd_name; /* name, e.g. ``en0'' or ``tr0'' */ + char *ndd_alias; /* alternate name */ + uint32 ndd_flags; /* up/down, broadcast, etc. */ +#define NDD_UP (0x00000001) /* NDD is opened */ +#define NDD_BROADCAST (0x00000002) /* broadcast address valid */ +#define NDD_RUNNING (0x00000008) /* NDD is operational */ +#define NDD_SIMPLEX (0x00000010) /* can't hear own transmissions */ +#define NDD_MULTICAST (0x00000200) /* receiving all multicasts */ + void (*nd_receive)(void *, struct sk_buff *, void *); /* DLPI streams receive function */ + struct ndd_genstats ndd_genstats; /* generic network stats */ +} ndd_t; + +struct lpfn_probe { + int (*open)(NETDEVICE *dev); + int (*stop)(NETDEVICE *dev); + int (*hard_start_xmit) (struct sk_buff *skb, NETDEVICE *dev); + int (*hard_header) (struct sk_buff *skb, + NETDEVICE *dev, + unsigned short type, + void *daddr, + void *saddr, + unsigned len); + int (*rebuild_header)(struct sk_buff *skb); + void (*receive)(ndd_t *p_ndd, struct sk_buff *skb, void *p_dev_ctl); + struct net_device_stats* (*get_stats)(NETDEVICE *dev); + int (*change_mtu)(NETDEVICE *dev, int new_mtu); +}; +#define LPFN_PROBE 1 +#define LPFN_DETACH 2 +#define LPFN_DFC 3 + +struct buf { + void *av_forw; + void *av_back; + int b_bcount; /* transfer count */ + int b_error; /* expanded error field */ + int b_resid; /* words not transferred after error */ + int b_flags; /* see defines below */ +#define B_ERROR 0x0004 /* transaction aborted */ +#define B_READ 0x0040 /* read when I/O occurs */ +#define B_WRITE 0x0100 /* non-read pseudo-flag */ + struct scsi_cmnd *cmnd; + int isdone; +}; + +/* refer to the SCSI ANSI X3.131-1986 standard for information */ +struct sc_cmd { /* structure of the SCSI cmd block */ + uchar scsi_op_code; /* first byte of SCSI cmd block */ + uchar lun; /* second byte of SCSI cmd block */ + uchar scsi_bytes[14]; /* other bytes of SCSI cmd block */ +}; +#define SCSI_RELEASE_UNIT 0x17 +#define SCSI_REQUEST_SENSE 0x03 +#define SCSI_RESERVE_UNIT 0x16 + +struct scsi { + uchar scsi_length; /* byte length of scsi cmd (6,10, or 12) */ + uchar scsi_id; /* the target SCSI ID */ + uchar scsi_lun; /* which LUN on the target */ + uchar flags; /* flags for use with the physical scsi command */ +#define SC_NODISC 0x80 /* don't allow disconnections */ +#define SC_ASYNC 0x08 /* asynchronous data xfer */ + struct sc_cmd scsi_cmd; /* the actual SCSI cmd */ +}; + +struct sc_buf { + struct buf bufstruct; /* buffer structure containing request + for device -- MUST BE FIRST! */ + struct scsi scsi_command; /* the information relating strictly + to the scsi command itself */ + uint32 timeout_value; /* timeout value for the command, + in units of seconds */ + uint32 cmd_flag; +#define FLAG_ABORT 0x01 + + uchar status_validity; /* least significant bit - scsi_status + * valid, next least significant bit - + * card status valid */ + +#define SC_SCSI_ERROR 1 /* scsi status reflects error */ +#define SC_ADAPTER_ERROR 2 /* general card status reflects err */ + uchar scsi_status; /* returned SCSI Bus status */ +#define SCSI_STATUS_MASK 0x3e /* mask for useful bits */ +#define SC_GOOD_STATUS 0x00 /* target completed successfully */ +#define SC_CHECK_CONDITION 0x02 /* target is reporting an error, + * exception, or abnormal condition */ +#define SC_BUSY_STATUS 0x08 /* target is busy and cannot accept + * a command from initiator */ +#define SC_INTMD_GOOD 0x10 /* intermediate status good when using + * linked commands */ +#define SC_RESERVATION_CONFLICT 0x18 /* attempted to access a LUN which is + * reserved by another initiator */ +#define SC_COMMAND_TERMINATED 0x22 /* Command has been terminated by + * the device. */ +#define SC_QUEUE_FULL 0x28 /* Device's command queue is full */ + + uchar general_card_status; /* SCSI adapter card status byte */ +#define SC_HOST_IO_BUS_ERR 0x01 /* Host I/O Bus error condition */ +#define SC_SCSI_BUS_FAULT 0x02 /* failure of the SCSI Bus */ +#define SC_CMD_TIMEOUT 0x04 /* cmd didn't complete before timeout */ +#define SC_NO_DEVICE_RESPONSE 0x08 /* target device did not respond */ +#define SC_ADAPTER_HDW_FAILURE 0x10 /* indicating a hardware failure */ +#define SC_ADAPTER_SFW_FAILURE 0x20 /* indicating a microcode failure */ +#define SC_FUSE_OR_TERMINAL_PWR 0x40 /* indicating bad fuse or termination */ +#define SC_SCSI_BUS_RESET 0x80 /* detected external SCSI bus reset */ + + uchar adap_q_status; /* adapter's device queue status. This*/ +#define SC_DID_NOT_CLEAR_Q 0x1 /* SCSI adapter device driver has not */ + + uchar flags; /* flags to SCSI adapter driver */ +#define SC_RESUME 0x01 /* resume transaction queueing for this + * id/lun beginning with this sc_buf */ +#define SC_MAPPED 0x02 /* buffer is mapped */ + + uint32 qfull_retry_count; +struct dev_info *current_devp; +}; +#define STAT_DEV_RESET 0x0 + +#define MAX_FCP_TARGET 0xff /* max num of FCP targets supported */ +#define MAX_FCP_LUN 0xff /* max num of FCP LUNs supported */ +/* When on, if a lun is detected to be not present, or + * not ready ... device structures related to that lun + * will be freed to save memory. Remove this define + * to turn off the feature */ +#define FREE_LUN 1 + +#define INDEX(pan, target) (ushort)(((pan)<<8) | ((target) & 0x1ff)) +#define DEV_SID(x) (uchar)(x & 0xff) /* extract sid from device id */ +#define DEV_PAN(x) (uchar)((x>>8) & 0x01) /* extract pan from device id */ + +#define GET_PAYLOAD_PHYS_ADDR(x) (x->phys_adr) + +#define MAX_FCP_CMDS 4096 /* Max # of outstanding FCP cmds */ +#define MAX_FC_BRDS 16 /* Max # boards per system */ +#define MAX_FC_TARGETS 512 /* Max scsi target # per adapter */ +#define MAX_FC_BINDINGS 64 /* Max # of persistent bindings */ + +#define LPFC_LOCK_UNOWNED ((void *) -1) +#ifdef __KERNEL__ +#define cpuid smp_processor_id() +#define maxCPU NR_CPUS +#else +#define cpuid 0 +#define maxCPU 1 +#endif /* __KERNEL__ */ + +typedef struct Simple_lock { + spinlock_t *sl_lock; + int owner; +} Simple_lock; + +#define disable_lock(p1, p2) 0 +#define unlock_enable(p1, p2) + +#define LPFC_INIT_LOCK_DRIVER spin_lock_init(&lpfc_smp_lock) +#define LPFC_INIT_LOCK_DPCQ spin_lock_init(&lpfc_dpc_request_lock) + +#define LPFC_LOCK_DRIVER0 spin_lock_irqsave(&lpfc_smp_lock, iflg) +#define LPFC_LOCK_DRIVER(num) spin_lock_irqsave(&lpfc_smp_lock, iflg); \ + if(p_dev_ctl->fc_ipri != 0) { \ + printk("LOCK %d failure %x %x\n",num, \ + (uint32)p_dev_ctl->fc_ipri, (uint32)iflg); \ + } \ + p_dev_ctl->fc_ipri = num + +#define LPFC_UNLOCK_DRIVER0 spin_unlock_irqrestore(&lpfc_smp_lock, iflg) +#define LPFC_UNLOCK_DRIVER if(p_dev_ctl) p_dev_ctl->fc_ipri = 0; \ + spin_unlock_irqrestore(&lpfc_smp_lock, iflg) + +#define LPFC_LOCK_SCSI_DONE(shost) \ + spin_lock_irqsave(shost->host_lock, siflg) +#define LPFC_UNLOCK_SCSI_DONE(shost) \ + spin_unlock_irqrestore(shost->host_lock, siflg) + +#define LPFC_DPC_LOCK_Q spin_lock_irqsave(&lpfc_dpc_request_lock, siflg) +#define LPFC_DPC_UNLOCK_Q spin_unlock_irqrestore(&lpfc_dpc_request_lock, siflg) + +#define EPERM 1 /* Not super-user */ +#define ENOENT 2 /* No such file or directory */ +#define ESRCH 3 /* No such process */ +#define EINTR 4 /* interrupted system call */ +#define EIO 5 /* I/O error */ +#define ENXIO 6 /* No such device or address */ +#define E2BIG 7 /* Arg list too long */ +#define ENOEXEC 8 /* Exec format error */ +#define EBADF 9 /* Bad file number */ +#define ECHILD 10 /* No children */ +#ifndef EAGAIN +#define EAGAIN 11 /* Resource temporarily unavailable */ +#endif +#define ENOMEM 12 /* Not enough core */ +#define EACCES 13 /* Permission denied */ +#define EFAULT 14 /* Bad address */ +#define ENOTBLK 15 /* Block device required */ +#define EBUSY 16 /* Mount device busy */ +#define EEXIST 17 /* File exists */ +#define EXDEV 18 /* Cross-device link */ +#define ENODEV 19 /* No such device */ +#define ENOTDIR 20 /* Not a directory */ +#define EISDIR 21 /* Is a directory */ +#define EINVAL 22 /* Invalid argument */ +#define ENFILE 23 /* File table overflow */ +#define EMFILE 24 /* Too many open files */ +#define ENOTTY 25 /* Inappropriate ioctl for device */ +#define ETXTBSY 26 /* Text file busy */ +#define EFBIG 27 /* File too large */ +#define ENOSPC 28 /* No space left on device */ +#define ESPIPE 29 /* Illegal seek */ +#define EROFS 30 /* Read only file system */ +#define EMLINK 31 /* Too many links */ +#define EPIPE 32 /* Broken pipe */ +#define EDOM 33 /* Math arg out of domain of func */ +#define ERANGE 34 /* Math result not representable */ +#ifndef ECONNABORTED +#define ECONNABORTED 103 /* Software caused connection abort */ +#endif +#ifndef ETIMEDOUT +#define ETIMEDOUT 110 /* Connection timed out */ +#endif + +#endif /* LP6000 */ + +#ifdef __KERNEL__ +#define EMULEX_REQ_QUEUE_LEN 2048 +#define EMULEX_MAX_SG(ql) (4 + ((ql) > 0) ? 7*((ql) - 1) : 0) + +#define SCMD_NEXT(scmd) ((struct scsi_cmnd *)(scmd)->SCp.ptr) + +int fc_detect(struct scsi_host_template *); +int fc_release(struct Scsi_Host *); +const char * fc_info(struct Scsi_Host *); +int fc_queuecommand(struct scsi_cmnd *, void (* done)(struct scsi_cmnd *)); +#define FC_EXTEND_TRANS_A 1 +int fc_abort(struct scsi_cmnd *); +#ifdef FC_NEW_EH +int fc_reset_bus(struct scsi_cmnd *); +int fc_reset_host(struct scsi_cmnd *); +int fc_reset_device(struct scsi_cmnd *); +#else +int lpfc_reset(struct scsi_cmnd *, unsigned int); +int fc_proc_info( char *, char **, off_t, int, int, int); +#endif + +#ifdef USE_HIMEM +#define HIGHMEM_ENTRY highmem_io:1 +#else +#define HIGHMEM_ENTRY +#endif + +#ifdef FC_NEW_EH +#define LPFC_SG_SEGMENT 64 +#define EMULEXFC { \ + name: "lpfc", \ + detect: fc_detect, \ + release: fc_release, \ + info: fc_info, \ + queuecommand: fc_queuecommand, \ + eh_abort_handler: fc_abort, \ + eh_device_reset_handler: fc_reset_device, \ + eh_bus_reset_handler: fc_reset_bus, \ + eh_host_reset_handler: fc_reset_host, \ + can_queue: EMULEX_REQ_QUEUE_LEN, \ + this_id: -1, \ + sg_tablesize: LPFC_SG_SEGMENT, \ + cmd_per_lun: 30, \ + use_clustering: DISABLE_CLUSTERING, \ + HIGHMEM_ENTRY \ +} + +#else +#define LPFC_SG_SEGMENT 32 +#define EMULEXFC { \ + next: NULL, \ + module: NULL, \ + proc_dir: NULL, \ + proc_info: fc_proc_info, \ + name: "lpfc", \ + detect: fc_detect, \ + release: fc_release, \ + info: fc_info, \ + ioctl: NULL, \ + command: NULL, \ + queuecommand: fc_queuecommand, \ + eh_strategy_handler: NULL, \ + eh_abort_handler: NULL, \ + eh_device_reset_handler:NULL, \ + eh_bus_reset_handler: NULL, \ + eh_host_reset_handler: NULL, \ + abort: fc_abort, \ + reset: lpfc_reset, \ + slave_attach: NULL, \ + can_queue: EMULEX_REQ_QUEUE_LEN, \ + sg_tablesize: LPFC_SG_SEGMENT, \ + cmd_per_lun: 30, \ + present: 0, \ + unchecked_isa_dma: 0, \ + use_clustering: DISABLE_CLUSTERING, \ + use_new_eh_code: 0, \ + emulated: 0 \ +} +#endif +#endif /* __KERNEL */ + +#endif /* _H_FCOS */ + diff -purN -X /home/mbligh/.diff.exclude reference/drivers/scsi/lpfc/fcclockb.c current/drivers/scsi/lpfc/fcclockb.c --- reference/drivers/scsi/lpfc/fcclockb.c 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/scsi/lpfc/fcclockb.c 2004-04-09 11:53:03.000000000 -0700 @@ -0,0 +1,832 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Enterprise Fibre Channel Host Bus Adapters. * + * Refer to the README file included with this package for * + * driver version and adapter support. * + * Copyright (C) 2003 Emulex Corporation. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of the GNU General Public License * + * as published by the Free Software Foundation; either version 2 * + * of the License, or (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details, a copy of which * + * can be found in the file COPYING included with this package. * + *******************************************************************/ + +#include "fc_os.h" + +#include +#include +#include "fc_hw.h" +#include "fc.h" + +#include "fcdiag.h" +#include "hbaapi.h" + +#include "fc_hw.h" +#include "fc.h" + +#include "fcdiag.h" +#include "fcfgparm.h" +#include "fcmsg.h" +#include "fc_crtn.h" +#include "fc_ertn.h" + +extern fc_dd_ctl_t DD_CTL; +extern iCfgParam icfgparam[]; +/* Can be used to map driver instance number and hardware adapter number */ +extern int fcinstance[MAX_FC_BRDS]; +extern int fcinstcnt; + +_static_ FCCLOCK *fc_clkgetb(fc_dev_ctl_t *p_dev_ctl); +_static_ ulong fc_clk_rem(fc_dev_ctl_t *p_dev_ctl, FCCLOCK *cb); +_static_ int que_tin(FCLINK *blk, FCLINK *hdr); + +#include "lp6000.c" +#include "dfcdd.c" +/* +*** boolean to test if block is linked into specific queue +*** (intended for assertions) +*/ +#define inque(x,hdr) que_tin( (FCLINK *)(x), (FCLINK *)(hdr) ) + +#define FC_MAX_CLK_TIMEOUT 0xfffffff + +/***************************************************************** +*** fc_clkgetb() Get a clock block +*****************************************************************/ +_static_ FCCLOCK * +fc_clkgetb(fc_dev_ctl_t *p_dev_ctl) +{ + FC_BRD_INFO * binfo; + FCCLOCK_INFO * clock_info; + FCCLOCK * cb; + int i; + + clock_info = &DD_CTL.fc_clock_info; + + if(p_dev_ctl) { + binfo = &BINFO; + cb = (FCCLOCK * ) fc_mem_get(binfo, MEM_CLOCK); + } + else { + for(i=0;iclk_block[i]; + if(cb->cl_tix == -1) + break; + cb = 0; + } + } + + if(cb) + cb->cl_p_dev_ctl = (void *)p_dev_ctl; + + return (cb); +} + + +/***************************************************************** +*** fc_clkrelb() Release a clock block +*****************************************************************/ +_static_ void +fc_clkrelb(fc_dev_ctl_t *p_dev_ctl, FCCLOCK *cb) +{ + FC_BRD_INFO * binfo; + FCCLOCK_INFO * clock_info; + + clock_info = &DD_CTL.fc_clock_info; + + if(p_dev_ctl) { + binfo = &BINFO; + fc_mem_put(binfo, MEM_CLOCK, (uchar * )cb); + } + else { + cb->cl_tix = (uint32)-1; + } +} + + +/***************************************************************** +*** fc_clk_can() Cancel a clock request +*** fc_clk_can will cancel a previous request to fc_clk_set or +*** fc_clk_res. +*** The request must not have expired so far. A request that has been +*** cancelled cannot be reset. +*****************************************************************/ +_static_ int +fc_clk_can(fc_dev_ctl_t *p_dev_ctl, FCCLOCK *cb) +{ + FCCLOCK_INFO * clock_info; + int ipri; + + clock_info = &DD_CTL.fc_clock_info; + ipri = disable_lock(CLK_LVL, &CLOCK_LOCK); + + /* Make sure timer has not expired */ + if (!inque(cb, &clock_info->fc_clkhdr)) { + unlock_enable(ipri, &CLOCK_LOCK); + return(0); + } + + fc_clock_deque(cb); + + /* Release clock block */ + fc_clkrelb(p_dev_ctl, cb); + unlock_enable(ipri, &CLOCK_LOCK); + + return(1); +} + + +/***************************************************************** +*** fc_clk_rem() get amount of time remaining in a clock request +*** fc_clk_rem() returns the number of tix remaining in +*** a clock request generated by fc_clk_set or fc_clk_res. The timer must +*** not have expired or be cancelled. +*****************************************************************/ +_static_ ulong +fc_clk_rem(fc_dev_ctl_t *p_dev_ctl, FCCLOCK *cb) +{ + FCCLOCK_INFO * clock_info; + FCCLOCK * x; + ulong tix; + int ipri; + + clock_info = &DD_CTL.fc_clock_info; + ipri = disable_lock(CLK_LVL, &CLOCK_LOCK); + + tix = 0; + /* get top of clock queue */ + x = (FCCLOCK * ) & clock_info->fc_clkhdr; + /* + *** Add up ticks in blocks upto specified request + */ + do { + x = x->cl_fw; + if (x == (FCCLOCK * ) & clock_info->fc_clkhdr) { + unlock_enable(ipri, &CLOCK_LOCK); + return(0); + } + tix += x->cl_tix; + } while (x != cb); + + unlock_enable(ipri, &CLOCK_LOCK); + return(tix); +} + + +/***************************************************************** +*** fc_clk_res() clock reset +*** fc_clk_res() resets a clock previously assigned by fc_clk_set(). +*** That clock must not have expired. The new sec time is +*** used, measured from now. The original function/argument +*** are not changed. +*** Note: code parrallels fc_clk_can() and fc_clk_set(). +*****************************************************************/ +_static_ ulong +fc_clk_res(fc_dev_ctl_t *p_dev_ctl, ulong tix, FCCLOCK *cb) +{ + FCCLOCK_INFO * clock_info; + FCCLOCK * x; + int ipri; + + clock_info = &DD_CTL.fc_clock_info; + ipri = disable_lock(CLK_LVL, &CLOCK_LOCK); + + /* Make sure timer has not expired */ + if (!inque(cb, &clock_info->fc_clkhdr)) { + unlock_enable(ipri, &CLOCK_LOCK); + return(0); + } + if (tix <= 0) { + unlock_enable(ipri, &CLOCK_LOCK); + return(0); + } + tix++; /* round up 1 sec to account for partial first tick */ + + fc_clock_deque(cb); + + /* + *** Insert block into queue by order of amount of clock ticks, + *** each block contains difference in ticks between itself and + *** its predacessor. + */ + + /* get top of list */ + x = clock_info->fc_clkhdr.cl_f; + while (x != (FCCLOCK * ) & clock_info->fc_clkhdr) { + if (x->cl_tix >= tix) { + /* if inserting in middle of que, adjust next tix */ + x->cl_tix -= tix; + break; + } + tix -= x->cl_tix; + x = x->cl_fw; + } + + /* back up one in que */ + x = x->cl_bw; + fc_enque(cb, x); + clock_info->fc_clkhdr.count++; + cb->cl_tix = tix; + + unlock_enable(ipri, &CLOCK_LOCK); + return((ulong)1); +} + + +/***************************************************************** +*** fc_clk_set() request a clock service +*** fc_clk_set will cause specific functions to be executed at a fixed +*** time into the future. At a duration guaranteed to not be less +*** than, but potentially is longer than the given number of secs, +*** the given function is called with the given single argument. +*** Interlock is performed at a processor status level not lower +*** than the given value. The returned value is needed if the request +*** is to be cancelled or reset. +*****************************************************************/ +_static_ FCCLOCK * +fc_clk_set(fc_dev_ctl_t *p_dev_ctl, ulong tix, +void (*func)(fc_dev_ctl_t*, void*, void*), void *arg1, void *arg2) +{ + FCCLOCK_INFO * clock_info; + FCCLOCK * x; + FCCLOCK * cb; + int ipri; + + clock_info = &DD_CTL.fc_clock_info; + ipri = disable_lock(CLK_LVL, &CLOCK_LOCK); + + if (tix > FC_MAX_CLK_TIMEOUT) { + return(0); + } + tix++; /* round up 1 sec to account for partial first tick */ + + /* + *** Allocate a CLOCK block + */ + if ((cb = fc_clkgetb(p_dev_ctl)) == 0) { + unlock_enable(ipri, &CLOCK_LOCK); + return(0); + } + + /* + *** Insert block into queue by order of amount of clock ticks, + *** each block contains difference in ticks between itself and + *** its predecessor. + */ + + /* get top of list */ + x = clock_info->fc_clkhdr.cl_f; + while (x != (FCCLOCK * ) & clock_info->fc_clkhdr) { + if (x->cl_tix >= tix) { + /* if inserting in middle of que, adjust next tix */ + if (x->cl_tix > tix) { + x->cl_tix -= tix; + break; + } + /* + *** Another clock expires at same time. + *** Maintain the order of requests. + */ + for (x = x->cl_fw; + x != (FCCLOCK * ) & clock_info->fc_clkhdr; + x = x->cl_fw) { + if (x->cl_tix != 0) + break; + } + + /* I'm at end of list */ + tix = 0; + break; + } + + tix -= x->cl_tix; + x = x->cl_fw; + } + + /* back up one in que */ + x = x->cl_bw; + + /* Count the current number of unexpired clocks */ + clock_info->fc_clkhdr.count++; + fc_enque(cb, x); + cb->cl_func = (void(*)(void*, void*, void*))func; + cb->cl_arg1 = arg1; + cb->cl_arg2 = arg2; + cb->cl_tix = tix; + unlock_enable(ipri, &CLOCK_LOCK); + + return((FCCLOCK * ) cb); +} + + +/***************************************************************** +*** fc_timer +*** This function will be called by the driver every second. +*****************************************************************/ +_static_ void +fc_timer(void *p) +{ + fc_dev_ctl_t * p_dev_ctl; + FCCLOCK_INFO * clock_info; + ulong tix; + FCCLOCK * x; + int ipri; + + clock_info = &DD_CTL.fc_clock_info; + ipri = disable_lock(CLK_LVL, &CLOCK_LOCK); + + /* + *** Increment time_sample value + */ + clock_info->ticks++; + + x = clock_info->fc_clkhdr.cl_f; + + /* counter for propagating negative values */ + tix = 0; + /* If there are expired clocks */ + if (x != (FCCLOCK * ) & clock_info->fc_clkhdr) { + x->cl_tix = x->cl_tix - 1; + if (x->cl_tix <= 0) { + /* Loop thru all clock blocks */ + while (x != (FCCLOCK * ) & clock_info->fc_clkhdr) { + x->cl_tix += tix; + /* If # of ticks left > 0, break out of loop */ + if (x->cl_tix > 0) + break; + tix = x->cl_tix; + + /* Deque expired clock */ + fc_deque(x); + /* Decrement count of unexpired clocks */ + clock_info->fc_clkhdr.count--; + + unlock_enable(ipri, &CLOCK_LOCK); + + p_dev_ctl = x->cl_p_dev_ctl; + + if(p_dev_ctl) { + ipri = disable_lock(FC_LVL, &CMD_LOCK); + + /* Call timeout routine */ + (*x->cl_func) (p_dev_ctl, x->cl_arg1, x->cl_arg2); + /* Release clock block */ + fc_clkrelb(p_dev_ctl, x); + + unlock_enable(ipri, &CMD_LOCK); + } + else { + /* Call timeout routine */ + (*x->cl_func) (p_dev_ctl, x->cl_arg1, x->cl_arg2); + /* Release clock block */ + fc_clkrelb(p_dev_ctl, x); + } + + ipri = disable_lock(CLK_LVL, &CLOCK_LOCK); + + /* start over */ + x = clock_info->fc_clkhdr.cl_f; + } + } + } + unlock_enable(ipri, &CLOCK_LOCK); + fc_reset_timer(); +} + + +/***************************************************************** +*** fc_clock_deque() +*****************************************************************/ +_static_ void +fc_clock_deque(FCCLOCK *cb) +{ + FCCLOCK_INFO * clock_info; + FCCLOCK * x; + + clock_info = &DD_CTL.fc_clock_info; + /* + *** Remove the block from its present spot, but first adjust + *** tix field of any successor. + */ + if (cb->cl_fw != (FCCLOCK * ) & clock_info->fc_clkhdr) { + x = cb->cl_fw; + x->cl_tix += cb->cl_tix; + } + + /* Decrement count of unexpired clocks */ + clock_info->fc_clkhdr.count--; + + fc_deque(cb); +} + + +/***************************************************************** +*** fc_clock_init() +*****************************************************************/ +_static_ void +fc_clock_init() +{ + FCCLOCK_INFO * clock_info; + FCCLOCK * cb; + int i; + + clock_info = &DD_CTL.fc_clock_info; + + /* Initialize clock queue */ + clock_info->fc_clkhdr.cl_f = + clock_info->fc_clkhdr.cl_b = (FCCLOCK * ) & clock_info->fc_clkhdr; + clock_info->fc_clkhdr.count = 0; + + /* Initialize clock globals */ + clock_info->ticks = 0; + clock_info->Tmr_ct = 0; + + for(i=0;iclk_block[i]; + cb->cl_tix = (uint32)-1; + } +} + + +_static_ int +que_tin(FCLINK *blk, FCLINK *hdr) +{ + FCLINK * x; + + x = hdr->_f; + while (x != hdr) { + if (x == blk) { + return (1); + } + x = x->_f; + } + return(0); +} + + +_static_ void +fc_flush_clk_set( +fc_dev_ctl_t *p_dev_ctl, +void (*func)(fc_dev_ctl_t*, void*, void*)) +{ + FC_BRD_INFO * binfo; + FCCLOCK_INFO * clock_info; + FCCLOCK * x, * xmatch; + IOCBQ *iocbq; + int ipri; + + binfo = &BINFO; + clock_info = &DD_CTL.fc_clock_info; + ipri = disable_lock(CLK_LVL, &CLOCK_LOCK); + + x = clock_info->fc_clkhdr.cl_f; + + /* If there are clocks */ + while (x != (FCCLOCK * ) & clock_info->fc_clkhdr) { + if((p_dev_ctl == x->cl_p_dev_ctl) && ((void *)func == (void *)(*x->cl_func))) { + xmatch = x; + x = x->cl_fw; + + /* + *** Remove the block from its present spot, but first adjust + *** tix field of any successor. + */ + if (xmatch->cl_fw != (FCCLOCK * ) & clock_info->fc_clkhdr) { + x->cl_tix += xmatch->cl_tix; + } + + clock_info->fc_clkhdr.count--; + fc_deque(xmatch); + + if((void *)func == (void *)lpfc_scsi_selto_timeout) { + (*xmatch->cl_func) (p_dev_ctl, xmatch->cl_arg1, xmatch->cl_arg2); + } + if(func == fc_delay_timeout) { + iocbq = (IOCBQ *)xmatch->cl_arg1; + if(iocbq->bp) { + fc_mem_put(binfo, MEM_BUF, (uchar * )iocbq->bp); + } + if(iocbq->info) { + fc_mem_put(binfo, MEM_BUF, (uchar * )iocbq->info); + } + if(iocbq->bpl) { + fc_mem_put(binfo, MEM_BPL, (uchar * )iocbq->bpl); + } + fc_mem_put(binfo, MEM_IOCB, (uchar * )iocbq); + } + fc_clkrelb(p_dev_ctl, xmatch); + } + else { + x = x->cl_fw; + } + } + unlock_enable(ipri, &CLOCK_LOCK); + return; +} + +_static_ int +fc_abort_clk_blk( +fc_dev_ctl_t *p_dev_ctl, +void (*func)(fc_dev_ctl_t*, void*, void*), +void *arg1, +void *arg2) +{ + FC_BRD_INFO * binfo; + FCCLOCK_INFO * clock_info; + FCCLOCK * x, * xmatch; + IOCBQ *iocbq; + int ipri; + + binfo = &BINFO; + clock_info = &DD_CTL.fc_clock_info; + ipri = disable_lock(CLK_LVL, &CLOCK_LOCK); + + x = clock_info->fc_clkhdr.cl_f; + + /* If there are clocks */ + while (x != (FCCLOCK * ) & clock_info->fc_clkhdr) { + if((p_dev_ctl == x->cl_p_dev_ctl) && + ((void *)func == (void *)(*x->cl_func)) && + (arg1 == x->cl_arg1) && + (arg2 == x->cl_arg2)) { + xmatch = x; + x = x->cl_fw; + + /* + *** Remove the block from its present spot, but first adjust + *** tix field of any successor. + */ + if (xmatch->cl_fw != (FCCLOCK * ) & clock_info->fc_clkhdr) { + x->cl_tix += xmatch->cl_tix; + } + + clock_info->fc_clkhdr.count--; + fc_deque(xmatch); + if((void *)func == (void *)lpfc_scsi_selto_timeout) { + (*xmatch->cl_func) (p_dev_ctl, xmatch->cl_arg1, xmatch->cl_arg2); + } + if(func == fc_delay_timeout) { + iocbq = (IOCBQ *)xmatch->cl_arg1; + if(iocbq->bp) { + fc_mem_put(binfo, MEM_BUF, (uchar * )iocbq->bp); + } + if(iocbq->info) { + fc_mem_put(binfo, MEM_BUF, (uchar * )iocbq->info); + } + if(iocbq->bpl) { + fc_mem_put(binfo, MEM_BPL, (uchar * )iocbq->bpl); + } + fc_mem_put(binfo, MEM_IOCB, (uchar * )iocbq); + } + fc_clkrelb(p_dev_ctl, xmatch); + unlock_enable(ipri, &CLOCK_LOCK); + return(1); + } + else { + x = x->cl_fw; + } + } + unlock_enable(ipri, &CLOCK_LOCK); + return(0); +} + +_static_ int +fc_abort_delay_els_cmd( +fc_dev_ctl_t *p_dev_ctl, +uint32 did) +{ + FC_BRD_INFO * binfo; + FCCLOCK_INFO * clock_info; + FCCLOCK * x, * xmatch; + IOCBQ *iocbq, *saveiocbq, *next_iocbq; + int ipri; + + binfo = &BINFO; + clock_info = &DD_CTL.fc_clock_info; + ipri = disable_lock(CLK_LVL, &CLOCK_LOCK); + + x = clock_info->fc_clkhdr.cl_f; + + /* If there are clocks */ + while (x != (FCCLOCK * ) & clock_info->fc_clkhdr) { + if((p_dev_ctl == x->cl_p_dev_ctl) && + ((void *)(x->cl_func) == (void *)fc_delay_timeout)) { + xmatch = x; + x = x->cl_fw; + iocbq = (IOCBQ *)xmatch->cl_arg1; + + if((iocbq->iocb.un.elsreq.remoteID != did) && + (did != 0xffffffff)) + continue; + /* Abort delay xmit clock */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0100, /* ptr to msg structure */ + fc_mes0100, /* ptr to msg */ + fc_msgBlk0100.msgPreambleStr, /* begin varargs */ + did, + iocbq->iocb.un.elsreq.remoteID, + iocbq->iocb.ulpIoTag); /* end varargs */ + /* + *** Remove the block from its present spot, but first adjust + *** tix field of any successor. + */ + if (xmatch->cl_fw != (FCCLOCK * ) & clock_info->fc_clkhdr) { + x->cl_tix += xmatch->cl_tix; + } + + clock_info->fc_clkhdr.count--; + fc_deque(xmatch); + if(iocbq->bp) { + fc_mem_put(binfo, MEM_BUF, (uchar * )iocbq->bp); + } + if(iocbq->info) { + fc_mem_put(binfo, MEM_BUF, (uchar * )iocbq->info); + } + if(iocbq->bpl) { + fc_mem_put(binfo, MEM_BPL, (uchar * )iocbq->bpl); + } + fc_mem_put(binfo, MEM_IOCB, (uchar * )iocbq); + + fc_clkrelb(p_dev_ctl, xmatch); + + if(did != 0xffffffff) + break; + } + else { + x = x->cl_fw; + } + } + unlock_enable(ipri, &CLOCK_LOCK); + + if(binfo->fc_delayxmit) { + iocbq = binfo->fc_delayxmit; + saveiocbq = 0; + while(iocbq) { + + if((iocbq->iocb.un.elsreq.remoteID == did) || + (did == 0xffffffff)) { + + next_iocbq = (IOCBQ *)iocbq->q; + /* Abort delay xmit context */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0101, /* ptr to msg structure */ + fc_mes0101, /* ptr to msg */ + fc_msgBlk0101.msgPreambleStr, /* begin varargs */ + did, + iocbq->iocb.un.elsreq.remoteID, + iocbq->iocb.ulpIoTag); /* end varargs */ + if(saveiocbq) { + saveiocbq->q = iocbq->q; + } + else { + binfo->fc_delayxmit = (IOCBQ *)iocbq->q; + } + if(iocbq->bp) { + fc_mem_put(binfo, MEM_BUF, (uchar * )iocbq->bp); + } + if(iocbq->info) { + fc_mem_put(binfo, MEM_BUF, (uchar * )iocbq->info); + } + if(iocbq->bpl) { + fc_mem_put(binfo, MEM_BPL, (uchar * )iocbq->bpl); + } + fc_mem_put(binfo, MEM_IOCB, (uchar * )iocbq); + + if(did != 0xffffffff) + break; + iocbq = next_iocbq; + } + else { + saveiocbq = iocbq; + iocbq = (IOCBQ *)iocbq->q; + } + } + } + return(0); +} +/* DQFULL */ +/***************************************************************************** + * + * NAME: fc_q_depth_up + * FUNCTION: Increment current Q depth for LUNs + * + *****************************************************************************/ + +_static_ void +fc_q_depth_up( +fc_dev_ctl_t * p_dev_ctl, +void *n1, +void *n2) +{ + node_t *nodep; + NODELIST * ndlp; + iCfgParam * clp; + FC_BRD_INFO *binfo; + struct dev_info * dev_ptr; + + binfo = &BINFO; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + + if (clp[CFG_DFT_LUN_Q_DEPTH].a_current <= FC_MIN_QFULL) { + return; + } + + if(binfo->fc_ffstate != FC_READY) + goto out; + + /* + * Find the target from the nlplist based on SCSI ID + */ + ndlp = binfo->fc_nlpmap_start; + while(ndlp != (NODELIST *)&binfo->fc_nlpmap_start) { + nodep = (node_t *)ndlp->nlp_targetp; + if (nodep) { + for (dev_ptr = nodep->lunlist; dev_ptr != NULL; + dev_ptr = dev_ptr->next) { + if ((dev_ptr->stop_send_io == 0) && + (dev_ptr->fcp_cur_queue_depth < clp[CFG_DFT_LUN_Q_DEPTH].a_current)) { + dev_ptr->fcp_cur_queue_depth += (ushort)clp[CFG_DQFULL_THROTTLE_UP_INC].a_current; + if (dev_ptr->fcp_cur_queue_depth > clp[CFG_DFT_LUN_Q_DEPTH].a_current) + dev_ptr->fcp_cur_queue_depth = clp[CFG_DFT_LUN_Q_DEPTH].a_current; + } else { + /* + * Try to reset stop_send_io + */ + if (dev_ptr->stop_send_io) + dev_ptr->stop_send_io--; + } + } + } + ndlp = (NODELIST *)ndlp->nlp_listp_next; + } + +out: + fc_clk_set(p_dev_ctl, clp[CFG_DQFULL_THROTTLE_UP_TIME].a_current, fc_q_depth_up, + 0, 0); + + return; +} + +/* QFULL_RETRY */ +_static_ void +fc_qfull_retry( +void *n1) +{ + fc_buf_t * fcptr; + dvi_t * dev_ptr; + T_SCSIBUF * sbp; + struct buf * bp; + fc_dev_ctl_t * p_dev_ctl; + + fcptr = (fc_buf_t *)n1; + sbp = fcptr->sc_bufp; + dev_ptr = fcptr->dev_ptr; + bp = (struct buf *) sbp; + + if(dev_ptr->nodep) { + p_dev_ctl = dev_ptr->nodep->ap; + fc_fcp_bufunmap(p_dev_ctl, sbp); + } + /* + * Queue this command to the head of the device's + * pending queue for processing by fc_issue_cmd. + */ + if (dev_ptr->pend_head == NULL) { /* Is queue empty? */ + dev_ptr->pend_head = sbp; + dev_ptr->pend_tail = sbp; + bp->av_forw = NULL; + fc_enq_wait(dev_ptr); + } else { /* Queue not empty */ + bp->av_forw = (struct buf *) dev_ptr->pend_head; + dev_ptr->pend_head = sbp; + } + dev_ptr->pend_count++; +} + +_static_ void +fc_establish_link_tmo( +fc_dev_ctl_t * p_dev_ctl, +void *n1, +void *n2) +{ + FC_BRD_INFO * binfo; + iCfgParam * clp; + + binfo = &BINFO; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + /* Re-establishing Link, timer expired */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk1300, /* ptr to msg structure */ + fc_mes1300, /* ptr to msg */ + fc_msgBlk1300.msgPreambleStr, /* begin varargs */ + binfo->fc_flag, + binfo->fc_ffstate); /* end varargs */ + binfo->fc_flag &= ~FC_ESTABLISH_LINK; +} diff -purN -X /home/mbligh/.diff.exclude reference/drivers/scsi/lpfc/fcdds.h current/drivers/scsi/lpfc/fcdds.h --- reference/drivers/scsi/lpfc/fcdds.h 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/scsi/lpfc/fcdds.h 2004-04-09 11:53:03.000000000 -0700 @@ -0,0 +1,175 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Enterprise Fibre Channel Host Bus Adapters. * + * Refer to the README file included with this package for * + * driver version and adapter support. * + * Copyright (C) 2003 Emulex Corporation. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of the GNU General Public License * + * as published by the Free Software Foundation; either version 2 * + * of the License, or (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details, a copy of which * + * can be found in the file COPYING included with this package. * + *******************************************************************/ + +#ifndef _H_FCDDS +#define _H_FCDDS + +#include "fc_hw.h" + +#define LNAMESIZE 16 + +#ifndef DMA_MAXMIN_16M +#define DMA_MAXMIN_16M 0x8 +#define DMA_MAXMIN_32M 0x9 +#define DMA_MAXMIN_64M 0xa +#define DMA_MAXMIN_128M 0xb +#define DMA_MAXMIN_256M 0xc +#define DMA_MAXMIN_512M 0xd +#define DMA_MAXMIN_1G 0xe +#define DMA_MAXMIN_2G 0xf +#endif + +/****************************************************************************/ +/* This is the DDS structure for the FC device */ +/****************************************************************************/ + +typedef struct { + char logical_name[LNAMESIZE]; /* logical name in ASCII characters */ + char dev_alias[LNAMESIZE]; /* logical name in ASCII characters */ + uint32 devno; /* major/minor device number */ + + /* PCI parameters */ + int bus_id; /* for use with i_init and bus io */ + int sla; /* for use with pci_cfgrw and bus io */ + int bus_intr_lvl; /* interrupt level */ + uint64 bus_mem_addr; /* bus memory base address */ + uint64 bus_io_addr; /* I/O reg base address */ + + uint32 xmt_que_size; /* size of xmit queue for mbufs */ + uint32 num_iocbs; /* number of iocb buffers to allocate */ + uint32 num_bufs; /* number of ELS/IP buffers to allocate */ + uint32 fcpfabric_tmo; /* Extra FCP timeout for fabrics (in secs) */ + + ushort topology; /* link topology for init link */ + ushort post_ip_buf; /* number of IP buffers to post to ring 1 */ + + ushort rsvd1; + uchar ipclass; /* class to use for transmitting IP data */ + uchar fcpclass; /* class to use for transmitting FCP data */ + + uchar network_on; /* true if networking is enabled */ + uchar fcp_on; /* true if FCP access is enabled */ + uchar frame_512; /* true if 512 byte framesize is required */ + uchar use_adisc; /* Use ADISC results in FCP rediscovery */ + + uchar first_check; /* Ignore 0x2900 check condition after PLOGI */ + uchar sli; /* Service Level Interface supported */ + uchar ffnumrings; /* number of FF rings being used */ + + uchar scandown; + uchar linkdown_tmo; /* linkdown timer, seconds */ + uchar nodev_tmo; + uchar fabric_reg; /* perform RFT_ID with NameServer */ + + uchar nummask[4]; /* number of masks/rings being used */ + uchar rval[6]; /* rctl for ring, assume mask is 0xff */ + uchar tval[6]; /* type for ring, assume mask is 0xff */ + + uchar verbose; /* how much to hurl onto the console */ + uchar ack0support; /* Run with ACK0 for CLASS2 sequences */ + uchar log_only; /* console messages just logged to log file */ + uchar automap; /* assign scsi ids to all FCP devices */ + + uint32 default_tgt_queue_depth; /* max # cmds outstanding to a target */ + + uchar dds1_os; /* system dependent variable */ + uchar default_lun_queue_depth; /* max # cmds outstanding to a lun */ + uchar nodev_holdio; /* Hold I/O errors if device disappears */ + uchar zone_rscn; /* system dependent variable */ + + uchar check_cond_err; + uchar delay_rsp_err; + uchar rscn_adisc; + uchar filler1; + uchar filler2; + uchar filler3; + + uint32 dds5_os; /* system dependent variable */ +} fc_dds_t; + +/* Values for seed_base and fcp_mapping */ +#define FCP_SEED_WWPN 0x1 /* Entry scsi id is seeded for WWPN */ +#define FCP_SEED_WWNN 0x2 /* Entry scsi id is seeded for WWNN */ +#define FCP_SEED_DID 0x4 /* Entry scsi id is seeded for DID */ +#define FCP_SEED_MASK 0x7 /* mask for seeded flags */ + + + +/* Allocate space for any environment specific dds parameters */ + + + + + +/****************************************************************************/ +/* Device VPD save area */ +/****************************************************************************/ + +typedef struct fc_vpd { + uint32 status; /* vpd status value */ + uint32 length; /* number of bytes actually returned */ + struct { + uint32 rsvd1; /* Revision numbers */ + uint32 biuRev; + uint32 smRev; + uint32 smFwRev; + uint32 endecRev; + ushort rBit; + uchar fcphHigh; + uchar fcphLow; + uchar feaLevelHigh; + uchar feaLevelLow; + uint32 postKernRev; + uint32 opFwRev; + uchar opFwName[16]; + uint32 sli1FwRev; + uchar sli1FwName[16]; + uint32 sli2FwRev; + uchar sli2FwName[16]; + } rev; +} fc_vpd_t; + +/****************************************************************************/ +/* Node table information that the config routine needs */ +/****************************************************************************/ + + +/****************************************************************************/ +/* SCIOCOMPLETE results buffer structure */ +/****************************************************************************/ + +struct iorslt { + struct buf *buf_struct_ptr; + uint32 b_flags; + uint32 b_resid; + char b_error; +}; + +/****************************************************************************/ +/* Special ioctl calls for the Fibre Channel SCSI LAN device driver */ +/****************************************************************************/ + +#define SCIONODES 0x47 /* ioctl to get node table */ +#define SCIOSTRAT 0x48 /* strategy ioctl */ +#define SCIOCOMPLETE 0x49 /* I/O completion ioctl */ +#define SCIORESUMEQ 0x4a /* device resume Q ioctl */ + + +#endif /* _H_FCDDS */ diff -purN -X /home/mbligh/.diff.exclude reference/drivers/scsi/lpfc/fcdiag.h current/drivers/scsi/lpfc/fcdiag.h --- reference/drivers/scsi/lpfc/fcdiag.h 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/scsi/lpfc/fcdiag.h 2004-04-09 11:53:03.000000000 -0700 @@ -0,0 +1,353 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Enterprise Fibre Channel Host Bus Adapters. * + * Refer to the README file included with this package for * + * driver version and adapter support. * + * Copyright (C) 2003 Emulex Corporation. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of the GNU General Public License * + * as published by the Free Software Foundation; either version 2 * + * of the License, or (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details, a copy of which * + * can be found in the file COPYING included with this package. * + *******************************************************************/ + +#ifndef _H_FCDIAG +#define _H_FCDIAG + +#ifndef LP6000 +/* Applications using this header file should typedef the following */ +typedef unsigned int uint32; +typedef unsigned char uchar; +typedef unsigned short ushort; +typedef void* MAILBOX; +#endif + +/* the brdinfo structure */ +typedef struct BRDINFO { + uint32 a_mem_hi; /* memory identifier for adapter access */ + uint32 a_mem_low; /* memory identifier for adapter access */ + uint32 a_flash_hi; /* memory identifier for adapter access */ + uint32 a_flash_low; /* memory identifier for adapter access */ + uint32 a_ctlreg_hi; /* memory identifier for adapter access */ + uint32 a_ctlreg_low; /* memory identifier for adapter access */ + uint32 a_intrlvl; /* interrupt level for adapter */ + uint32 a_pci; /* PCI identifier (device / vendor id) */ + uint32 a_busid; /* identifier of PCI bus adapter is on */ + uint32 a_devid; /* identifier of PCI device number */ + uchar a_rsvd1; /* reserved for future use */ + uchar a_rsvd2; /* reserved for future use */ + uchar a_siglvl; /* signal handler used by library */ + uchar a_ddi; /* identifier device driver instance number */ + uint32 a_onmask; /* mask of ONDI primatives supported */ + uint32 a_offmask; /* mask of OFFDI primatives supported */ + uchar a_drvrid[16]; /* driver version */ + uchar a_fwname[32]; /* firmware version */ +} brdinfo; + +/* bits in a_onmask */ +#define ONDI_MBOX 0x1 /* allows non-destructive mailbox commands */ +#define ONDI_IOINFO 0x2 /* supports retrieval of I/O info */ +#define ONDI_LNKINFO 0x4 /* supports retrieval of link info */ +#define ONDI_NODEINFO 0x8 /* supports retrieval of node info */ +#define ONDI_TRACEINFO 0x10 /* supports retrieval of trace info */ +#define ONDI_SETTRACE 0x20 /* supports configuration of trace info */ +#define ONDI_SLI1 0x40 /* hardware supports SLI-1 interface */ +#define ONDI_SLI2 0x80 /* hardware supports SLI-2 interface */ +#define ONDI_BIG_ENDIAN 0x100 /* DDI interface is BIG Endian */ +#define ONDI_LTL_ENDIAN 0x200 /* DDI interface is LITTLE Endian */ +#define ONDI_RMEM 0x400 /* allows reading of adapter shared memory */ +#define ONDI_RFLASH 0x800 /* allows reading of adapter flash */ +#define ONDI_RPCI 0x1000 /* allows reading of adapter pci registers */ +#define ONDI_RCTLREG 0x2000 /* allows reading of adapter cntrol registers */ +#define ONDI_CFGPARAM 0x4000 /* supports get/set configuration parameters */ +#define ONDI_CT 0x8000 /* supports passthru CT interface */ +#define ONDI_HBAAPI 0x10000 /* supports HBA API interface */ + +/* bits in a_offmask */ +#define OFFDI_MBOX 0x1 /* allows all mailbox commands */ +#define OFFDI_RMEM 0x2 /* allows reading of adapter shared memory */ +#define OFFDI_WMEM 0x4 /* allows writing of adapter shared memory */ +#define OFFDI_RFLASH 0x8 /* allows reading of adapter flash */ +#define OFFDI_WFLASH 0x10 /* allows writing of adapter flash */ +#define OFFDI_RPCI 0x20 /* allows reading of adapter pci registers */ +#define OFFDI_WPCI 0x40 /* allows writing of adapter pci registers */ +#define OFFDI_RCTLREG 0x80 /* allows reading of adapter cntrol registers */ +#define OFFDI_WCTLREG 0x100 /* allows writing of adapter cntrol registers */ +#define OFFDI_OFFLINE 0x80000000 /* if set, adapter is in offline state */ + +/* values for flag in SetDiagEnv */ +#define DDI_SHOW 0x0 +#define DDI_ONDI 0x1 +#define DDI_OFFDI 0x2 + +#define DDI_BRD_SHOW 0x10 +#define DDI_BRD_ONDI 0x11 +#define DDI_BRD_OFFDI 0x12 + +/* unused field */ +#define DDI_UNUSED 0xFFFFFFFFL /* indicate unused field of brdinfo */ + +/* the ioinfo structure */ +typedef struct IOINFO { + uint32 a_mbxCmd; /* mailbox commands issued */ + uint32 a_mboxCmpl; /* mailbox commands completed */ + uint32 a_mboxErr; /* mailbox commands completed, error status */ + uint32 a_iocbCmd; /* iocb command ring issued */ + uint32 a_iocbRsp; /* iocb rsp ring recieved */ + uint32 a_adapterIntr; /* adapter interrupt events */ + uint32 a_fcpCmd; /* FCP commands issued */ + uint32 a_fcpCmpl; /* FCP command completions recieved */ + uint32 a_fcpErr; /* FCP command completions errors */ + uint32 a_seqXmit; /* IP xmit sequences sent */ + uint32 a_seqRcv; /* IP sequences recieved */ + uint32 a_bcastXmit; /* cnt of successful xmit broadcast commands issued */ + uint32 a_bcastRcv; /* cnt of receive broadcast commands received */ + uint32 a_elsXmit; /* cnt of successful ELS request commands issued */ + uint32 a_elsRcv; /* cnt of ELS request commands received */ + uint32 a_RSCNRcv; /* cnt of RSCN commands recieved */ + uint32 a_seqXmitErr; /* cnt of unsuccessful xmit broadcast cmds issued */ + uint32 a_elsXmitErr; /* cnt of unsuccessful ELS request commands issued */ + uint32 a_elsBufPost; /* cnt of ELS buffers posted to adapter */ + uint32 a_ipBufPost; /* cnt of IP buffers posted to adapter */ + uint32 a_cnt1; /* generic counter */ + uint32 a_cnt2; /* generic counter */ + uint32 a_cnt3; /* generic counter */ + uint32 a_cnt4; /* generic counter */ + +} IOinfo; + +/* the linkinfo structure */ +typedef struct LINKINFO { + uint32 a_linkEventTag; + uint32 a_linkUp; + uint32 a_linkDown; + uint32 a_linkMulti; + uint32 a_DID; + uchar a_topology; + uchar a_linkState; + uchar a_alpa; + uchar a_alpaCnt; + uchar a_alpaMap[128]; + uchar a_wwpName[8]; + uchar a_wwnName[8]; +} LinkInfo; + +/* values for a_topology */ +#define LNK_LOOP 0x1 +#define LNK_PUBLIC_LOOP 0x2 +#define LNK_FABRIC 0x3 +#define LNK_PT2PT 0x4 + +/* values for a_linkState */ +#define LNK_DOWN 0x1 +#define LNK_UP 0x2 +#define LNK_FLOGI 0x3 +#define LNK_DISCOVERY 0x4 +#define LNK_REDISCOVERY 0x5 +#define LNK_READY 0x6 + +/* the traceinfo structure */ +typedef struct TRACEINFO { + uchar a_event; + uchar a_cmd; + ushort a_status; + uint32 a_information; +} TraceInfo; + +/* values for flag */ +#define TRC_SHOW 0x0 +#define TRC_MBOX 0x1 +#define TRC_IOCB 0x2 +#define TRC_INTR 0x4 +#define TRC_EVENT 0x8 + +/* values for a_event */ +#define TRC_MBOX_CMD 0x1 +#define TRC_MBOX_CMPL 0x2 +#define TRC_IOCB_CMD 0x3 +#define TRC_IOCB_RSP 0x4 +#define TRC_INTR_RCV 0x5 +#define TRC_EVENT1 0x6 +#define TRC_EVENT2 0x7 +#define TRC_EVENT_MASK 0x7 +#define TRC_RING0 0x0 +#define TRC_RING1 0x40 +#define TRC_RING2 0x80 +#define TRC_RING3 0xC0 +#define TRC_RING_MASK 0xC0 + +/* the cfgparam structure */ +typedef struct CFGPARAM { + uchar a_string[32]; + uint32 a_low; + uint32 a_hi; + uint32 a_default; + uint32 a_current; + ushort a_flag; + ushort a_changestate; + uchar a_help[80]; +} CfgParam; + +#define MAX_CFG_PARAM 64 + +/* values for a_flag */ +#define CFG_EXPORT 0x1 /* Export this parameter to the end user */ +#define CFG_IGNORE 0x2 /* Ignore this parameter */ +#define CFG_DEFAULT 0x8000 /* Reestablishing Link */ + +/* values for a_changestate */ +#define CFG_REBOOT 0x0 /* Changes effective after ystem reboot */ +#define CFG_DYNAMIC 0x1 /* Changes effective immediately */ +#define CFG_RESTART 0x2 /* Changes effective after driver restart */ + +/* the icfgparam structure - internal use only */ +typedef struct ICFGPARAM { + char *a_string; + uint32 a_low; + uint32 a_hi; + uint32 a_default; + uint32 a_current; + ushort a_flag; + ushort a_changestate; + char *a_help; +} iCfgParam; + + +/* the nodeinfo structure */ +typedef struct NODEINFO { + ushort a_flag; + ushort a_state; + uint32 a_did; + uchar a_wwpn[8]; + uchar a_wwnn[8]; + uint32 a_targetid; +} NodeInfo; + +#define MAX_NODES 512 + +/* Defines for a_state */ +#define NODE_UNUSED 0 /* unused NL_PORT entry */ +#define NODE_LIMBO 0x1 /* entry needs to hang around for wwpn / sid */ +#define NODE_LOGOUT 0x2 /* NL_PORT is not logged in - entry is cached */ +#define NODE_PLOGI 0x3 /* PLOGI was sent to NL_PORT */ +#define NODE_LOGIN 0x4 /* NL_PORT is logged in / login REG_LOGINed */ +#define NODE_PRLI 0x5 /* PRLI was sent to NL_PORT */ +#define NODE_ALLOC 0x6 /* NL_PORT is ready to initiate adapter I/O */ +#define NODE_SEED 0x7 /* seed scsi id bind in table */ + +/* Defines for a_flag */ +#define NODE_RPI_XRI 0x1 /* creating xri for entry */ +#define NODE_REQ_SND 0x2 /* sent ELS request for this entry */ +#define NODE_ADDR_AUTH 0x4 /* Authenticating addr for this entry */ +#define NODE_RM_ENTRY 0x8 /* Remove this entry */ +#define NODE_FARP_SND 0x10 /* sent FARP request for this entry */ +#define NODE_FABRIC 0x20 /* this entry represents the Fabric */ +#define NODE_FCP_TARGET 0x40 /* this entry is an FCP target */ +#define NODE_IP_NODE 0x80 /* this entry is an IP node */ +#define NODE_DISC_START 0x100 /* start discovery on this entry */ +#define NODE_SEED_WWPN 0x200 /* Entry scsi id is seeded for WWPN */ +#define NODE_SEED_WWNN 0x400 /* Entry scsi id is seeded for WWNN */ +#define NODE_SEED_DID 0x800 /* Entry scsi id is seeded for DID */ +#define NODE_SEED_MASK 0xe00 /* mask for seeded flags */ +#define NODE_AUTOMAP 0x1000 /* This entry was automap'ed */ +#define NODE_NS_REMOVED 0x2000 /* This entry removed from NameServer */ + +/* Defines for RegisterForEvent mask */ +#define FC_REG_LINK_EVENT 0x1 /* Register for link up / down events */ +#define FC_REG_RSCN_EVENT 0x2 /* Register for RSCN events */ +#define FC_REG_CT_EVENT 0x4 /* Register for CT request events */ +#define FC_REG_EVENT_MASK 0x3f /* event mask */ +#define FC_REG_ALL_PORTS 0x80 /* Register for all ports */ + +#define MAX_FC_EVENTS 8 /* max events user process can wait for per HBA */ +#define FC_FSTYPE_ALL 0xffff /* match on all fsTypes */ + +/* Defines for error codes */ +#define FC_ERROR_BUFFER_OVERFLOW 0xff +#define FC_ERROR_RESPONSE_TIMEOUT 0xfe +#define FC_ERROR_LINK_UNAVAILABLE 0xfd +#define FC_ERROR_INSUFFICIENT_RESOURCES 0xfc +#define FC_ERROR_EXISTING_REGISTRATION 0xfb +#define FC_ERROR_INVALID_TAG 0xfa + +/* User Library level Event structure */ +typedef struct reg_evt { + uint32 e_mask; + uint32 e_gstype; + uint32 e_pid; + uint32 e_outsz; + void (*e_func)(uint32, ...); + void * e_ctx; + void * e_out; +} RegEvent; + +/* Defines for portid for CT interface */ +#define CT_FabricCntlServer ((uint32)0xfffffd) +#define CT_NameServer ((uint32)0xfffffc) +#define CT_TimeServer ((uint32)0xfffffb) +#define CT_MgmtServer ((uint32)0xfffffa) + + +/* functions from diagnostic specification */ +uint32 InitDiagEnv(brdinfo *bi); +uint32 FreeDiagEnv(void); +uint32 SetDiagEnv(uint32 flag); +uint32 SetBrdEnv(uint32 board, uint32 flag); +uint32 GetIOinfo(uint32 board, IOinfo *ioinfo); +uint32 GetLinkInfo(uint32 board, LinkInfo *linkinfo); +uint32 GetCfgParam(uint32 board, CfgParam *cfgparam); +uint32 SetCfgParam(uint32 board, uint32 index, uint32 value); +uint32 GetNodeInfo(uint32 board, NodeInfo *nodeinfo); +int GetCTInfo(uint32 board, uint32 portid, uchar *inbuf, uint32 incnt, + uchar *outbuf, uint32 outcnt); +uint32 GetTraceInfo(uint32 board, TraceInfo *traceinfo); +uint32 SetTraceInfo(uint32 board, uint32 flag, uint32 depth); +uint32 IssueMbox(uint32 board, MAILBOX *mb, uint32 insize, uint32 outsize); +uint32 ReadMem(uint32 board, uchar *buffer, uint32 offset, uint32 count); +uint32 WriteMem(uint32 board, uchar *buffer, uint32 offset, uint32 count); +uint32 ReadFlash(uint32 board, uchar *buffer, uint32 offset, uint32 count); +uint32 WriteFlash(uint32 board, uchar *buffer, uint32 offset, uint32 count); +uint32 ReadCtlReg(uint32 board, uint32 *buffer, uint32 offset); +uint32 WriteCtlReg(uint32 board, uint32 *buffer, uint32 offset); +uint32 ReadPciCfg(uint32 board, uchar *buffer, uint32 offset, uint32 count); +uint32 WritePciCfg(uint32 board, uchar *buffer, uint32 offset, uint32 count); +uint32 ReadFcodeFlash(uint32 board, uchar *buffer, uint32 offset, uint32 count); +uint32 WriteFcodeFlash(uint32 board, uchar *buffer, uint32 offset, uint32 count); +uint32 SendElsCmd(uint32 board, uint32 opcode, uint32 did); +uint32 SendScsiCmd(uint32 board, void *wwn, void *req, uint32 sz, void *rsp, + uint32 *rsz, void *sns, uint32 *snssz); +uint32 SendScsiRead(uint32 board, void *PortWWN, uint64 l, uint32 s, + void *rsp, uint32 *rspCount, void *sns, uint32 *snsCount); +uint32 SendScsiWrite(uint32 board, void *PortWWN, uint64 l, uint32 s, + void *rsp, uint32 *rspCount, void *sns, uint32 *snsCount); +uint32 SendFcpCmd(uint32 board, void *wwn, void *req, uint32 sz, void *data, + uint32 *datasz, void *fcpRsp, uint32 *fcpRspsz); +void * RegisterForCTEvents(uint32 board, ushort type, void (*func)(uint32, ...), void *ctx, uint32 *pstat); +uint32 unRegisterForCTEvent(uint32 board, void *eventid); +uint32 RegisterForEvent(uint32 board, uint32 mask, void *type, uint32 outsz, void (*func)(uint32, ...), void *ctx); +uint32 unRegisterForEvent(uint32 board, uint32 eventid); + +#if defined(_KERNEL) || defined(__KERNEL__) +struct dfc_info { + brdinfo fc_ba; + char * fc_iomap_io; /* starting address for registers */ + char * fc_iomap_mem; /* starting address for SLIM */ + uchar * fc_hmap; /* handle for mapping memory */ + uint32 fc_refcnt; + uint32 fc_flag; +}; + +/* Define for fc_flag */ +#define DFC_STOP_IOCTL 1 /* Stop processing dfc ioctls */ +#define DFC_MBOX_ACTIVE 2 /* mailbox is active thru dfc */ + +#endif + +#endif /* _H_FCDIAG */ diff -purN -X /home/mbligh/.diff.exclude reference/drivers/scsi/lpfc/fcelsb.c current/drivers/scsi/lpfc/fcelsb.c --- reference/drivers/scsi/lpfc/fcelsb.c 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/scsi/lpfc/fcelsb.c 2004-04-09 11:53:03.000000000 -0700 @@ -0,0 +1,4792 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Enterprise Fibre Channel Host Bus Adapters. * + * Refer to the README file included with this package for * + * driver version and adapter support. * + * Copyright (C) 2003 Emulex Corporation. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of the GNU General Public License * + * as published by the Free Software Foundation; either version 2 * + * of the License, or (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details, a copy of which * + * can be found in the file COPYING included with this package. * + *******************************************************************/ + +#include "fc_os.h" + +#include "fc_hw.h" +#include "fc.h" + +#include "fcdiag.h" +#include "hbaapi.h" +#include "fcfgparm.h" +#include "fcmsg.h" +#include "fc_crtn.h" +#include "fc_ertn.h" /* Environment - external routine definitions */ + +extern fc_dd_ctl_t DD_CTL; +extern iCfgParam icfgparam[]; +extern char *lpfc_release_version; +extern int fc_max_els_sent; + +/* Routine Declaration - Local */ +_local_ int fc_chksparm(FC_BRD_INFO *binfo,volatile SERV_PARM *sp, uint32 cls); +_local_ int fc_els_retry(FC_BRD_INFO *binfo, RING *rp, IOCBQ *iocb, uint32 cmd, + NODELIST *nlp); +_local_ int fc_status_action(FC_BRD_INFO *binfo, IOCBQ *iocb, uint32 cmd, + NODELIST *nlp); +/* End Routine Declaration - Local */ + +/******************************************************/ +/** handle_els_event **/ +/** **/ +/** Description: Process an ELS Response Ring cmpl. **/ +/** **/ +/******************************************************/ +_static_ int +handle_els_event( +fc_dev_ctl_t *p_dev_ctl, +RING *rp, +IOCBQ *temp) +{ + IOCB * cmd; + FC_BRD_INFO * binfo; + IOCBQ * xmitiq; + volatile SERV_PARM * sp; + uint32 * lp0, * lp1; + MATCHMAP * mp, * rmp; + DMATCHMAP * drmp; + NODELIST * ndlp; + MAILBOXQ * mb; + ELS_PKT * ep; + iCfgParam * clp; + ADISC * ap; + void * ioa; + int rc; + uint32 command; + uint32 did, bumpcnt; + volatile uint32 ha_copy; + + /* Called from host_interrupt() to process ELS R0ATT */ + rc = 0; + ndlp = 0; + binfo = &BINFO; + cmd = &temp->iocb; + + /* look up xmit complete by IoTag */ + if ((xmitiq = fc_ringtxp_get(rp, cmd->ulpIoTag)) == 0) { + /* completion with missing xmit command */ + FCSTATCTR.elsStrayXmitCmpl++; + + /* Stray ELS completion */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0102, /* ptr to msg structure */ + fc_mes0102, /* ptr to msg */ + fc_msgBlk0102.msgPreambleStr, /* begin varargs */ + cmd->ulpCommand, + cmd->ulpIoTag); /* end varargs */ + + return(EIO); + } + temp->retry = xmitiq->retry; + + if(binfo->fc_ffstate < FC_READY) { + /* If we are in discovery, and a Link Event is pending, abandon + * discovery, clean up pending actions, and take the Link Event. + */ + ioa = FC_MAP_IO(&binfo->fc_iomap_io); /* map in io registers */ + /* Read host attention register to determine interrupt source */ + ha_copy = READ_CSR_REG(binfo, FC_HA_REG(binfo, ioa)); + FC_UNMAP_MEMIO(ioa); + + if(ha_copy & HA_LATT) { + /* Pending Link Event during Discovery */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0250, /* ptr to msg structure */ + fc_mes0250, /* ptr to msg */ + fc_msgBlk0250.msgPreambleStr, /* begin varargs */ + (uint32)cmd->ulpCommand, + (uint32)cmd->ulpIoTag, + (uint32)cmd->ulpStatus, + cmd->un.ulpWord[4]); /* end varargs */ + fc_abort_discovery(p_dev_ctl); + temp->retry = 0xff; + } + } + + /* Check for aborted ELS command */ + if(temp->retry == 0xff) { + + /* Aborted ELS IOCB */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0104, /* ptr to msg structure */ + fc_mes0104, /* ptr to msg */ + fc_msgBlk0104.msgPreambleStr, /* begin varargs */ + cmd->ulpCommand, + cmd->ulpIoTag); /* end varargs */ + switch (cmd->ulpCommand) { + case CMD_ELS_REQUEST_CR: + case CMD_ELS_REQUEST64_CR: + case CMD_ELS_REQUEST_CX: + case CMD_ELS_REQUEST64_CX: + mp = (MATCHMAP * )xmitiq->bp; + rmp = (MATCHMAP * )xmitiq->info; + lp0 = (uint32 * )mp->virt; + ndlp = 0; + command = *lp0; + switch (command) { + case ELS_CMD_PLOGI: + case ELS_CMD_LOGO: + case ELS_CMD_PRLI: + case ELS_CMD_PDISC: + case ELS_CMD_ADISC: + rmp->fc_mptr = (uchar *)0; + break; + } + break; + case CMD_XMIT_ELS_RSP_CX: /* Normal ELS response completion */ + case CMD_XMIT_ELS_RSP64_CX: /* Normal ELS response completion */ + mp = (MATCHMAP * )xmitiq->bp; + ndlp = (NODELIST * )xmitiq->ndlp; + break; + case CMD_GEN_REQUEST64_CX: + case CMD_GEN_REQUEST64_CR: + if(xmitiq->bpl == 0) { + /* User initiated request */ + drmp = (DMATCHMAP * )xmitiq->info; + drmp->dfc_flag = -1; + fc_mem_put(binfo, MEM_IOCB, (uchar * )xmitiq); + } + else { + /* Driver initiated request */ + /* Just free resources and let timer timeout */ + fc_mem_put(binfo, MEM_BPL, (uchar * )xmitiq->bpl); + if(xmitiq->bp) { + fc_mem_put(binfo, MEM_BUF, (uchar * )xmitiq->bp); + } + if(xmitiq->info) + fc_free_ct_rsp(p_dev_ctl, (MATCHMAP *)xmitiq->info); + fc_mem_put(binfo, MEM_IOCB, (uchar * )xmitiq); + } + return(0); + } + goto out2; + } + + /* ELS completion */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0105, /* ptr to msg structure */ + fc_mes0105, /* ptr to msg */ + fc_msgBlk0105.msgPreambleStr, /* begin varargs */ + (uint32)cmd->ulpCommand, + (uint32)cmd->ulpIoTag, + (uint32)cmd->ulpStatus, + cmd->un.ulpWord[4]); /* end varargs */ + + switch (cmd->ulpCommand) { + + case CMD_GEN_REQUEST64_CR: + if(xmitiq->bpl == 0) { + /* User initiated request */ + drmp = (DMATCHMAP * )xmitiq->info; + drmp->dfc_flag = -2; + } + else { + /* Driver initiated request */ + /* Just free resources and let timer timeout */ + fc_mem_put(binfo, MEM_BPL, (uchar * )xmitiq->bpl); + if(xmitiq->bp) { + fc_mem_put(binfo, MEM_BUF, (uchar * )xmitiq->bp); + } + if(xmitiq->info) + fc_free_ct_rsp(p_dev_ctl, (MATCHMAP *)xmitiq->info); + } + + break; + + case CMD_ELS_REQUEST_CR: /* Local error in ELS command */ + case CMD_ELS_REQUEST64_CR: /* Local error in ELS command */ + + FCSTATCTR.elsXmitErr++; + + /* Find out which command failed */ + mp = (MATCHMAP * )xmitiq->bp; + rmp = (MATCHMAP * )xmitiq->info; + ndlp = (NODELIST *)rmp->fc_mptr; + rmp->fc_mptr = (uchar *)0; + + lp0 = (uint32 * )mp->virt; + command = *lp0; + + if (fc_els_retry(binfo, rp, temp, command, ndlp) == 0) { + /* retry of ELS command failed */ + switch (command) { + case ELS_CMD_FLOGI: /* Fabric login */ + if (ndlp) + ndlp->nlp_flag &= ~NLP_REQ_SND; + fc_freenode_did(binfo, Fabric_DID, 1); + if (binfo->fc_ffstate == FC_FLOGI) { + binfo->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); + if (binfo->fc_topology == TOPOLOGY_LOOP) { + binfo->fc_edtov = FF_DEF_EDTOV; + binfo->fc_ratov = FF_DEF_RATOV; + if ((mb=(MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX | MEM_PRI))) { + fc_config_link(p_dev_ctl, (MAILBOX * )mb); + if (issue_mb_cmd(binfo, (MAILBOX * )mb, MBX_NOWAIT) != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } + binfo->fc_flag |= FC_DELAY_DISC; + } else { + /* Device Discovery completion error */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0206, /* ptr to msg structure */ + fc_mes0206, /* ptr to msg */ + fc_msgBlk0206.msgPreambleStr, /* begin varargs */ + cmd->ulpStatus, + cmd->un.ulpWord[4], + cmd->un.ulpWord[5]); /* end varargs */ + binfo->fc_ffstate = FC_ERROR; + if ((mb=(MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX | MEM_PRI))) { + fc_clear_la(binfo, (MAILBOX * )mb); + if (issue_mb_cmd(binfo, (MAILBOX * )mb, MBX_NOWAIT) != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } + } + } + break; + + case ELS_CMD_PLOGI: /* NPort login */ + if ((ndlp == 0) && ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, (uint32)cmd->un.elsreq.remoteID)) == 0)) { + break; + } + + /* If we are in the middle of Discovery */ + if (ndlp->nlp_action & (NLP_DO_ADDR_AUTH | NLP_DO_DISC_START | NLP_DO_RSCN)) { + /* Goto next entry */ + fc_nextnode(p_dev_ctl, ndlp); + } + + ndlp->nlp_action &= ~NLP_DO_RNID; + ndlp->nlp_flag &= ~NLP_REQ_SND; + + if((ndlp->nlp_type & (NLP_AUTOMAP | NLP_SEED_MASK)) == 0) { + fc_freenode(binfo, ndlp, 1); + } + else { + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + } + break; + + case ELS_CMD_PRLI: /* Process Log In */ + if ((ndlp == 0) && ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, (uint32)cmd->un.elsreq.remoteID)) == 0)) { + break; + } + + /* If we are in the middle of Discovery */ + if (ndlp->nlp_action & (NLP_DO_DISC_START | NLP_DO_RSCN)) { + /* Goto next entry */ + fc_nextnode(p_dev_ctl, ndlp); + } + ndlp->nlp_flag &= ~NLP_REQ_SND; + ndlp->nlp_state = NLP_LOGIN; + fc_nlp_unmap(binfo, ndlp); + break; + + case ELS_CMD_PDISC: /* Pdisc */ + case ELS_CMD_ADISC: /* Adisc */ + if ((ndlp == 0) && ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, + (uint32)cmd->un.elsreq.remoteID)) == 0)) { + break; + } + + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + + ndlp->nlp_action |= NLP_DO_DISC_START; + binfo->fc_nlp_cnt++; + fc_els_cmd(binfo, ELS_CMD_PLOGI, (void *)((ulong)cmd->un.elsreq.remoteID), (uint32)0, (ushort)0, ndlp); + break; + + case ELS_CMD_LOGO: /* Logout */ + if ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, (uint32)cmd->un.elsreq.remoteID)) == 0) { + break; + } + + /* If we are in the middle of Discovery */ + if (ndlp->nlp_action & (NLP_DO_ADDR_AUTH | NLP_DO_DISC_START | NLP_DO_RSCN)) { + /* Goto next entry */ + fc_nextnode(p_dev_ctl, ndlp); + } + + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + break; + + case ELS_CMD_FARPR: /* Farp-res */ + ep = (ELS_PKT * )lp0; + if((ndlp = fc_findnode_wwpn(binfo, NLP_SEARCH_ALL, + &ep->un.farp.RportName)) == 0) + break; + + /* If we are in the middle of Discovery */ + if (ndlp->nlp_action & NLP_DO_DISC_START) { + /* Goto next entry */ + fc_nextnode(p_dev_ctl, ndlp); + } + + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + break; + + case ELS_CMD_FARP: /* Farp-req */ + ep = (ELS_PKT * )lp0; + + if((ndlp = fc_findnode_wwpn(binfo, NLP_SEARCH_ALL, + &ep->un.farp.RportName)) == 0) + break; + + ndlp->nlp_flag &= ~NLP_FARP_SND; + ndlp->nlp_action &= ~NLP_DO_ADDR_AUTH; + break; + + case ELS_CMD_SCR: /* State Change Registration */ + break; + + case ELS_CMD_RNID: /* Receive Node Identification */ + break; + + default: + /* Unknown ELS command */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0106, /* ptr to msg structure */ + fc_mes0106, /* ptr to msg */ + fc_msgBlk0106.msgPreambleStr, /* begin varargs */ + command); /* end varargs */ + FCSTATCTR.elsCmdPktInval++; + break; + } + /* ELS command completion error */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0107, /* ptr to msg structure */ + fc_mes0107, /* ptr to msg */ + fc_msgBlk0107.msgPreambleStr, /* begin varargs */ + cmd->ulpCommand, + cmd->ulpStatus, + cmd->un.ulpWord[4], + cmd->un.ulpWord[5]); /* end varargs */ + } + else { + /* Retry in progress */ + if ((command == ELS_CMD_PLOGI) && + ((cmd->un.ulpWord[4] & 0xff) == IOERR_LOOP_OPEN_FAILURE)) { + if (ndlp->nlp_action & (NLP_DO_ADDR_AUTH | NLP_DO_DISC_START | NLP_DO_RSCN)) { + /* Goto next entry */ + fc_nextnode(p_dev_ctl, ndlp); + } + } + } + + if (xmitiq->bpl) { + fc_mem_put(binfo, MEM_BPL, (uchar * )xmitiq->bpl); + } + fc_mem_put(binfo, MEM_BUF, (uchar * )mp); + fc_mem_put(binfo, MEM_BUF, (uchar * )xmitiq->info); + break; + + case CMD_XMIT_ELS_RSP_CX: /* Normal ELS response completion */ + case CMD_XMIT_ELS_RSP64_CX: /* Normal ELS response completion */ + + ndlp = (NODELIST * )xmitiq->ndlp; + did = 0; + bumpcnt = 0; + if ((ndlp) && (ndlp->nlp_flag & NLP_SND_PLOGI)) { + ndlp->nlp_flag &= ~NLP_SND_PLOGI; + did = ndlp->nlp_DID; + if((binfo->fc_flag & FC_RSCN_MODE) || + (binfo->fc_ffstate < FC_READY)) { + binfo->fc_nlp_cnt++; + bumpcnt = 1; + } + } + mp = (MATCHMAP * )xmitiq->bp; + lp0 = (uint32 * )mp->virt; + /* get command that errored */ + command = *lp0++; + sp = (volatile SERV_PARM * )lp0; + if (cmd->ulpStatus) { + /* Error occurred sending ELS response */ + /* check to see if we should retry */ + /* ELS response completion error */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0108, /* ptr to msg structure */ + fc_mes0108, /* ptr to msg */ + fc_msgBlk0108.msgPreambleStr, /* begin varargs */ + cmd->ulpCommand, + cmd->ulpStatus, + cmd->un.ulpWord[4], + cmd->un.ulpWord[5]); /* end varargs */ + FCSTATCTR.elsXmitErr++; + + if (fc_els_retry(binfo, rp, temp, command, ndlp) == 0) { + /* No retries */ + if ((ndlp) && (ndlp->nlp_flag & NLP_RM_ENTRY) && + !(ndlp->nlp_flag & NLP_REQ_SND)) { + if (ndlp->nlp_type & NLP_FCP_TARGET) { + ndlp->nlp_flag &= ~NLP_RM_ENTRY; + ndlp->nlp_action &= ~NLP_DO_ADDR_AUTH; + if(binfo->fc_ffstate == FC_READY) { + if(!(binfo->fc_flag & FC_RSCN_MODE)) { + binfo->fc_flag |= FC_RSCN_MODE; + ndlp->nlp_action |= NLP_DO_RSCN; + ndlp->nlp_flag &= ~NLP_NODEV_TMO; + fc_nextrscn(p_dev_ctl, 1); + } + } + else { + ndlp->nlp_action |= NLP_DO_DISC_START; + fc_nextdisc(p_dev_ctl, 1); + } + } else { + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + } + } + else { + if (ndlp) { + if(!(ndlp->nlp_flag & NLP_REQ_SND)) { + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + } + } + } + } + } else { + FCSTATCTR.elsXmitCmpl++; + if(ndlp) { + /* ELS response completion */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0109, /* ptr to msg structure */ + fc_mes0109, /* ptr to msg */ + fc_msgBlk0109.msgPreambleStr, /* begin varargs */ + ndlp->nlp_DID, + ndlp->nlp_type, + ndlp->nlp_flag, + ndlp->nlp_state); /* end varargs */ + if ((ndlp->nlp_flag & NLP_REG_INP)) { + uint32 size; + MATCHMAP * bmp; + ULP_BDE64 * bpl; + + bmp = (MATCHMAP *)(xmitiq->bpl); + bpl = (ULP_BDE64 * )bmp->virt; + bpl->tus.w = PCIMEM_LONG(bpl->tus.w); + size = (uint32)bpl->tus.f.bdeSize; + if(size == (sizeof(SERV_PARM) + sizeof(uint32))) { + fc_process_reglogin(p_dev_ctl, ndlp); + } + } + + if ((ndlp->nlp_flag & NLP_RM_ENTRY) && + !(ndlp->nlp_flag & NLP_REQ_SND)) { + if (ndlp->nlp_type & NLP_FCP_TARGET) { + ndlp->nlp_flag &= ~NLP_RM_ENTRY; + ndlp->nlp_action &= ~NLP_DO_ADDR_AUTH; + if(binfo->fc_ffstate == FC_READY) { + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + if(!(binfo->fc_flag & FC_RSCN_MODE)) { + did = 0; + if(bumpcnt) + binfo->fc_nlp_cnt--; + + binfo->fc_flag |= FC_RSCN_MODE; + ndlp->nlp_action |= NLP_DO_RSCN; + ndlp->nlp_flag &= ~NLP_NODEV_TMO; + fc_nextrscn(p_dev_ctl, 1); + } + } + else { + did = 0; + if(bumpcnt) + binfo->fc_nlp_cnt--; + + ndlp->nlp_action |= NLP_DO_DISC_START; + fc_nextdisc(p_dev_ctl, 1); + } + } else { + if (ndlp->nlp_type & NLP_IP_NODE) { + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + } + else { + fc_freenode(binfo, ndlp, 1); + } + } + } + } + } + + if (xmitiq->bpl) { + fc_mem_put(binfo, MEM_BPL, (uchar * )xmitiq->bpl); + } + fc_mem_put(binfo, MEM_BUF, (uchar * )mp); + if(did && (!(ndlp->nlp_flag & NLP_NS_REMOVED))) { + fc_els_cmd(binfo, ELS_CMD_PLOGI, (void *)((ulong)did), + (uint32)0, (ushort)0, ndlp); + } + break; + + case CMD_GEN_REQUEST64_CX: + if(xmitiq->bpl == 0) { + /* User initiated request */ + drmp = (DMATCHMAP * )xmitiq->info; + fc_mpdata_sync(drmp->dfc.dma_handle, 0, 0, DDI_DMA_SYNC_FORKERNEL); + + if (cmd->ulpStatus) { + /* Error occurred sending ELS command */ + if ((cmd->un.ulpWord[4] & 0xff) == IOERR_SEQUENCE_TIMEOUT) + drmp->dfc_flag = -1; + else + drmp->dfc_flag = -2; + } + else { + drmp->dfc_flag = (int)(cmd->un.genreq64.bdl.bdeSize); + } + } + else { + /* Driver initiated request */ + if (cmd->ulpStatus == 0) { + mp = (MATCHMAP * )xmitiq->bp; + ndlp = (NODELIST *)mp->fc_mptr; + if(ndlp && (ndlp->nlp_DID == NameServer_DID)) { + fc_ns_rsp(p_dev_ctl, (NODELIST *)mp->fc_mptr, + (MATCHMAP *)xmitiq->info, + (uint32)(cmd->un.genreq64.bdl.bdeSize)); + } + /* FDMI */ + if(ndlp && (ndlp->nlp_DID == FDMI_DID)) { + fc_fdmi_rsp(p_dev_ctl, (MATCHMAP *)mp, (MATCHMAP *)xmitiq->info); + } + } + if(xmitiq->info) + fc_free_ct_rsp(p_dev_ctl, (MATCHMAP *)xmitiq->info); + fc_mem_put(binfo, MEM_BPL, (uchar * )xmitiq->bpl); + if(xmitiq->bp) { + fc_mem_put(binfo, MEM_BUF, (uchar * )xmitiq->bp); + } + } + break; + + + case CMD_ELS_REQUEST_CX: /* Normal ELS command completion */ + case CMD_ELS_REQUEST64_CX: /* Normal ELS command completion */ + + /* get command that was accepted */ + mp = (MATCHMAP * )xmitiq->bp; + lp0 = (uint32 * )mp->virt; + command = *lp0; + + /* ELS command successful, get ptr to service params */ + rmp = (MATCHMAP * )xmitiq->info; + ndlp = (NODELIST *)rmp->fc_mptr; + rmp->fc_mptr = (uchar *)0; + + lp1 = (uint32 * )rmp->virt; + fc_mpdata_sync(rmp->dma_handle, 0, 0, DDI_DMA_SYNC_FORKERNEL); + + sp = (volatile SERV_PARM * )((char *)lp1 + sizeof(uint32)); + + if (cmd->ulpStatus) { + /* Error occurred sending ELS command */ + FCSTATCTR.elsXmitErr++; + /* ELS command completion error */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0110, /* ptr to msg structure */ + fc_mes0110, /* ptr to msg */ + fc_msgBlk0110.msgPreambleStr, /* begin varargs */ + command, + cmd->ulpStatus, + cmd->un.ulpWord[4], + cmd->un.ulpWord[5]); /* end varargs */ + if ((command == ELS_CMD_FARP) || + (command == ELS_CMD_FARPR)) { + ep = (ELS_PKT * )lp0; + if((ndlp=fc_findnode_wwpn(binfo, NLP_SEARCH_ALL, &ep->un.farp.RportName))) { + ndlp->nlp_flag &= ~NLP_FARP_SND; + ndlp->nlp_action &= ~NLP_DO_ADDR_AUTH; + } + } + + /* If error occurred on ADISC/PDISC, check to see if address + * still needs to be authenticated. + */ + if ((command == ELS_CMD_ADISC) || (command == ELS_CMD_PDISC)) { + if(ndlp == 0) { + ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, + (uint32)cmd->un.elsreq.remoteID); + } + } + else { + if (command == ELS_CMD_PLOGI) { + if(ndlp == 0) { + ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, + (uint32)cmd->un.elsreq.remoteID); + } + } + } + + /* check to see if we should retry */ + if (fc_els_retry(binfo, rp, temp, command, ndlp) == 0) { + /* retry of ELS command failed */ + switch (command) { + case ELS_CMD_FLOGI: + if (ndlp) + ndlp->nlp_flag &= ~NLP_REQ_SND; + if (binfo->fc_ffstate == FC_FLOGI) { + binfo->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); + fc_freenode_did(binfo, Fabric_DID, 1); + if (binfo->fc_topology == TOPOLOGY_LOOP) { + binfo->fc_edtov = FF_DEF_EDTOV; + binfo->fc_ratov = FF_DEF_RATOV; + if ((mb = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX | MEM_PRI))) { + fc_config_link(p_dev_ctl, (MAILBOX * )mb); + if (issue_mb_cmd(binfo, (MAILBOX * )mb, MBX_NOWAIT) + != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } + binfo->fc_flag |= FC_DELAY_DISC; + } else { + /* Device Discovery completion error */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0207, /* ptr to msg structure */ + fc_mes0207, /* ptr to msg */ + fc_msgBlk0207.msgPreambleStr); /* begin & end varargs */ + binfo->fc_ffstate = FC_ERROR; + if ((mb = (MAILBOXQ * )fc_mem_get(binfo, + MEM_MBOX | MEM_PRI))) { + fc_clear_la(binfo, (MAILBOX * )mb); + if (issue_mb_cmd(binfo, (MAILBOX * )mb, MBX_NOWAIT) + != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } + } + } + break; + + case ELS_CMD_PLOGI: + /* Cache entry in case we are in a + * LOGI collision. + */ + if ((ndlp == 0) && ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, + (uint32)cmd->un.elsreq.remoteID)) == 0)) { + break; + } + + /* If we are in the middle of Discovery */ + if (ndlp->nlp_action & (NLP_DO_ADDR_AUTH | NLP_DO_DISC_START | NLP_DO_RSCN)) { + if((ndlp->nlp_state < NLP_LOGIN) && + !(ndlp->nlp_flag & NLP_REG_INP)) { + /* Goto next entry */ + fc_nextnode(p_dev_ctl, ndlp); + } + } + + ndlp->nlp_action &= ~NLP_DO_RNID; + ndlp->nlp_flag &= ~NLP_REQ_SND; + + if((ndlp->nlp_type & (NLP_AUTOMAP | NLP_SEED_MASK)) == 0) { + fc_freenode(binfo, ndlp, 1); + } + else { + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + } + break; + + case ELS_CMD_PRLI: + if ((ndlp == 0) && ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, + (uint32)cmd->un.elsreq.remoteID)) == 0)) { + break; + } + + /* If we are in the middle of Discovery */ + if (ndlp->nlp_action & (NLP_DO_DISC_START | NLP_DO_RSCN)) { + /* Goto next entry */ + fc_nextnode(p_dev_ctl, ndlp); + } + ndlp->nlp_flag &= ~NLP_REQ_SND; + ndlp->nlp_state = NLP_LOGIN; + fc_nlp_unmap(binfo, ndlp); + break; + + case ELS_CMD_PDISC: + case ELS_CMD_ADISC: + if ((ndlp == 0) && ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, + (uint32)cmd->un.elsreq.remoteID)) == 0)) { + break; + } + + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + + ndlp->nlp_action |= NLP_DO_DISC_START; + binfo->fc_nlp_cnt++; + fc_els_cmd(binfo, ELS_CMD_PLOGI, + (void *)((ulong)cmd->un.elsreq.remoteID), + (uint32)0, (ushort)0, ndlp); + break; + + case ELS_CMD_LOGO: + if ((ndlp == 0) && ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, + (uint32)cmd->un.elsreq.remoteID)) == 0)) { + break; + } + + /* If we are in the middle of Discovery */ + if (ndlp->nlp_action & (NLP_DO_ADDR_AUTH | NLP_DO_DISC_START | NLP_DO_RSCN)) { + /* Goto next entry */ + fc_nextnode(p_dev_ctl, ndlp); + } + + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + break; + + case ELS_CMD_FARP: /* Farp-req */ + if (ndlp == 0) + break; + + ndlp->nlp_flag &= ~NLP_FARP_SND; + ndlp->nlp_action &= ~NLP_DO_ADDR_AUTH; + break; + + case ELS_CMD_FARPR: /* Farp-res */ + if (ndlp == 0) + break; + + ndlp->nlp_flag &= ~NLP_FARP_SND; + ndlp->nlp_action &= ~NLP_DO_ADDR_AUTH; + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + break; + + case ELS_CMD_SCR: /* State Change Registration */ + break; + + case ELS_CMD_RNID: /* Node Identification */ + break; + + default: + FCSTATCTR.elsCmdPktInval++; + + /* Unknown ELS command */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0111, /* ptr to msg structure */ + fc_mes0111, /* ptr to msg */ + fc_msgBlk0111.msgPreambleStr, /* begin varargs */ + command); /* end varargs */ + break; + } + } + else { + /* Retry in progress */ + if ((command == ELS_CMD_PLOGI) && + ((cmd->un.ulpWord[4] & 0xff) == IOERR_LOOP_OPEN_FAILURE)) { + if (ndlp->nlp_action & (NLP_DO_ADDR_AUTH | NLP_DO_DISC_START | NLP_DO_RSCN)) { + /* Goto next entry */ + fc_nextnode(p_dev_ctl, ndlp); + } + } + } + } else { + FCSTATCTR.elsXmitCmpl++; + + /* Process successful command completion */ + switch (command) { + case ELS_CMD_FLOGI: + /* FLOGI completes successfully */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0208, /* ptr to msg structure */ + fc_mes0208, /* ptr to msg */ + fc_msgBlk0208.msgPreambleStr, /* begin varargs */ + cmd->un.ulpWord[4], + sp->cmn.e_d_tov, + sp->cmn.w2.r_a_tov, + sp->cmn.edtovResolution); /* end varargs */ + if (ndlp) + ndlp->nlp_flag &= ~NLP_REQ_SND; + + /* register the login, REG_LOGIN */ + if (binfo->fc_ffstate == FC_FLOGI) { + /* If Common Service Parameters indicate Nport + * we are point to point, if Fport we are Fabric. + */ + if (sp->cmn.fPort) { + binfo->fc_flag |= FC_FABRIC; + if (sp->cmn.edtovResolution) { + /* E_D_TOV ticks are in nanoseconds */ + binfo->fc_edtov = (SWAP_DATA(sp->cmn.e_d_tov) + 999999) / 1000000; + } else { + /* E_D_TOV ticks are in milliseconds */ + binfo->fc_edtov = SWAP_DATA(sp->cmn.e_d_tov); + } + binfo->fc_ratov = (SWAP_DATA(sp->cmn.w2.r_a_tov) + 999) / 1000; + + if (binfo->fc_topology == TOPOLOGY_LOOP) { + binfo->fc_flag |= FC_PUBLIC_LOOP; + } else { + /* If we are a N-port connected to a Fabric, + * fixup sparam's so logins to devices on + * remote loops work. + */ + binfo->fc_sparam.cmn.altBbCredit = 1; + } + + binfo->fc_myDID = cmd->un.ulpWord[4] & Mask_DID; + + if ((ndlp == 0) && ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, + (uint32)cmd->un.elsreq.remoteID)) == 0)) { + break; + } + ndlp->nlp_type |= NLP_FABRIC; + ndlp->nlp_flag &= ~NLP_REQ_SND; + + fc_nlp_logi(binfo, ndlp, + (NAME_TYPE *)&sp->portName, (NAME_TYPE *)&sp->nodeName); + + if ((mb=(MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX | MEM_PRI))) { + fc_config_link(p_dev_ctl, (MAILBOX * )mb); + if (issue_mb_cmd(binfo, (MAILBOX * )mb, MBX_NOWAIT) + != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } + + /* register the login with adapter */ + if (ndlp->nlp_Rpi == 0) { + fc_bcopy((void *)sp, (void *) & binfo->fc_fabparam, + sizeof(SERV_PARM)); + + if ((mb = (MAILBOXQ * )fc_mem_get(binfo, + MEM_MBOX | MEM_PRI))) { + fc_reg_login(binfo, cmd->un.elsreq.remoteID, + (uchar * )sp, (MAILBOX * )mb, 0); + if (issue_mb_cmd(binfo, (MAILBOX * )mb, MBX_NOWAIT) + != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } + } + + binfo->fc_flag |= FC_DELAY_DISC; + } else { + binfo->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); + binfo->fc_edtov = FF_DEF_EDTOV; + binfo->fc_ratov = FF_DEF_RATOV; + if ((rc = fc_geportname((NAME_TYPE * ) & binfo->fc_portname, + (NAME_TYPE * ) & sp->portName))) { + /* This side will initiate the PLOGI */ + binfo->fc_flag |= FC_PT2PT_PLOGI; + + /* N_Port ID cannot be 0, set our to LocalID the + * other side will be RemoteID. + */ + fc_freenode_did(binfo, 0, 1); + + /* not equal */ + if (rc == 1) + binfo->fc_myDID = PT2PT_LocalID; + rc = 0; + + if ((mb = (MAILBOXQ * )fc_mem_get(binfo, + MEM_MBOX | MEM_PRI))) { + fc_config_link(p_dev_ctl, (MAILBOX * )mb); + if (issue_mb_cmd(binfo, (MAILBOX * )mb, MBX_NOWAIT) + != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } + if ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, (uint32)binfo->fc_myDID)) == 0) { + if((ndlp = (NODELIST *)fc_mem_get(binfo, MEM_NLP))) { + fc_bzero((void *)ndlp, sizeof(NODELIST)); + ndlp->sync = binfo->fc_sync; + ndlp->capabilities = binfo->fc_capabilities; + ndlp->nlp_DID = binfo->fc_myDID; + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + } + else + break; + } + ndlp->nlp_DID = binfo->fc_myDID; + fc_nlp_logi(binfo, ndlp, + (NAME_TYPE *)&sp->portName, (NAME_TYPE *)&sp->nodeName); + } else { + if ((mb = (MAILBOXQ * )fc_mem_get(binfo, + MEM_MBOX | MEM_PRI))) { + fc_config_link(p_dev_ctl, (MAILBOX * )mb); + if (issue_mb_cmd(binfo, (MAILBOX * )mb, MBX_NOWAIT) + != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } + } + binfo->fc_flag |= FC_PT2PT; + /* Use Fabric timer as pt2pt link up timer */ + binfo->fc_fabrictmo = (2 * binfo->fc_ratov) + + ((4 * binfo->fc_edtov) / 1000) + 1; + if(FABRICTMO) { + fc_clk_res(p_dev_ctl, binfo->fc_fabrictmo, FABRICTMO); + } + else { + FABRICTMO = fc_clk_set(p_dev_ctl, binfo->fc_fabrictmo, + fc_fabric_timeout, 0, 0); + } + fc_freenode_did(binfo, Fabric_DID, 1); + + /* This is Login at init, clear la */ + if ((mb=(MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX | MEM_PRI))) { + binfo->fc_ffstate = FC_CLEAR_LA; + fc_clear_la(binfo, (MAILBOX * )mb); + if (issue_mb_cmd(binfo, (MAILBOX * )mb, MBX_NOWAIT) + != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } + } + } else { + FCSTATCTR.elsRcvDrop++; + fc_freenode_did(binfo, Fabric_DID, 1); + } + break; + + case ELS_CMD_PLOGI: + /* PLOGI completes successfully */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0112, /* ptr to msg structure */ + fc_mes0112, /* ptr to msg */ + fc_msgBlk0112.msgPreambleStr, /* begin varargs */ + cmd->un.elsreq.remoteID, + cmd->un.ulpWord[4], + cmd->un.ulpWord[5], + binfo->fc_ffstate); /* end varargs */ + if ((ndlp == 0) && ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, + (uint32)cmd->un.elsreq.remoteID)) == 0)) { + break; + } + fc_nlp_logi(binfo, ndlp, + (NAME_TYPE *)&sp->portName, (NAME_TYPE *)&sp->nodeName); + + if (ndlp->nlp_DID != NameServer_DID) + ndlp->nlp_action &= ~NLP_DO_ADDR_AUTH; + + + ndlp->nlp_action &= ~NLP_DO_RNID; + + if (binfo->fc_flag & FC_PT2PT) { + /* Device Discovery completes */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0209, /* ptr to msg structure */ + fc_mes0209, /* ptr to msg */ + fc_msgBlk0209.msgPreambleStr); /* begin & end varargs */ + /* Fix up any changed RPIs in FCP IOCBs queued up a txq */ + fc_fcp_fix_txq(p_dev_ctl); + + binfo->fc_ffstate = FC_READY; + + binfo->fc_firstopen = 0; + if(FABRICTMO) { + fc_clk_can(p_dev_ctl, FABRICTMO); + FABRICTMO = 0; + } + ndlp->nlp_Rpi = 0; /* Keep the same rpi */ + } + + if (ndlp->nlp_Rpi) { + /* must explicitly unregister the login, UREG_LOGIN */ + /* This is so pending I/Os get returned with NO_RPI */ + if ((mb = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX | MEM_PRI))) { + fc_unreg_login(binfo, ndlp->nlp_Rpi, (MAILBOX * )mb); + if (issue_mb_cmd(binfo,(MAILBOX * )mb,MBX_NOWAIT) != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } + binfo->fc_nlplookup[ndlp->nlp_Rpi] = 0; + ndlp->nlp_Rpi = 0; + } + + /* register the login, REG_LOGIN */ + if ((mb = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX | MEM_PRI))) { + fc_reg_login(binfo, cmd->un.elsreq.remoteID, (uchar * )sp, + (MAILBOX * )mb, 0); + if (issue_mb_cmd(binfo,(MAILBOX * )mb,MBX_NOWAIT) != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } + + /* Fill in the FCP/IP class */ + { + clp = DD_CTL.p_config[binfo->fc_brd_no]; + if ( (clp[CFG_FCP_CLASS].a_current == CLASS2) && + (sp->cls2.classValid) ) { + ndlp->id.nlp_fcp_info |= CLASS2; + } else { + ndlp->id.nlp_fcp_info |= CLASS3; + } + + if ( (clp[CFG_IP_CLASS].a_current == CLASS2) && + (sp->cls2.classValid) ) { + ndlp->id.nlp_ip_info = CLASS2; + } else { + ndlp->id.nlp_ip_info = CLASS3; + } + } + + /* REG_LOGIN cmpl will goto nextnode */ + ndlp->nlp_flag &= ~NLP_REQ_SND; + ndlp->nlp_flag |= NLP_REG_INP; + break; + + case ELS_CMD_PRLI: + /* PRLI completes successfully */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0113, /* ptr to msg structure */ + fc_mes0113, /* ptr to msg */ + fc_msgBlk0113.msgPreambleStr, /* begin varargs */ + cmd->un.elsreq.remoteID, + cmd->un.ulpWord[4], + cmd->un.ulpWord[5], + binfo->fc_ffstate); /* end varargs */ + if ((ndlp == 0) && ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, + cmd->un.elsreq.remoteID)) == 0)) + break; + + ndlp->nlp_flag &= ~NLP_REQ_SND; + + /* If we are in the middle of Discovery or in pt2pt mode */ + if ((ndlp->nlp_action & (NLP_DO_DISC_START | NLP_DO_RSCN)) || + (binfo->fc_flag & FC_PT2PT)) { + int index; + node_t * node_ptr; + PRLI * npr; + + npr = (PRLI * )sp; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) && + (npr->prliType == PRLI_FCP_TYPE) && + (npr->targetFunc == 1)) { + + if(clp[CFG_FCP_ON].a_current) { + if (!(fc_assign_scsid(p_dev_ctl, ndlp))) { + /* No more SCSI ids available */ + fc_nextnode(p_dev_ctl, ndlp); + ndlp->nlp_state = NLP_PRLI; + fc_nlp_unmap(binfo, ndlp); + ndlp->nlp_action &= ~NLP_DO_SCSICMD; + break; + } + + if ((node_ptr = (node_t * )ndlp->nlp_targetp) == NULL) { + index = INDEX(ndlp->id.nlp_pan, ndlp->id.nlp_sid); + node_ptr = binfo->device_queue_hash[index].node_ptr; + } + /* PRLI target assigned */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0210, /* ptr to msg structure */ + fc_mes0210, /* ptr to msg */ + fc_msgBlk0210.msgPreambleStr, /* begin varargs */ + cmd->un.ulpWord[5], /* did */ + ndlp->id.nlp_pan, + ndlp->id.nlp_sid); /* end varargs */ + /* Now check for FCP-2 support */ + if(node_ptr) { + if(npr->Retry && npr->TaskRetryIdReq) + node_ptr->flags |= FC_FCP2_RECOVERY; + else + node_ptr->flags &= ~FC_FCP2_RECOVERY; + } + + } + else { + goto prlierr; + } + + /* If PRLI is successful, we have a FCP target device */ + if (((PRLI * )sp)->Retry == 1) { + ndlp->id.nlp_fcp_info |= NLP_FCP_2_DEVICE; + } + ndlp->nlp_type |= NLP_FCP_TARGET; + if((ndlp->nlp_type & NLP_SEED_MASK) == 0) { + switch(p_dev_ctl->fcp_mapping) { + case FCP_SEED_DID: + ndlp->nlp_type |= NLP_SEED_DID; + break; + case FCP_SEED_WWPN: + ndlp->nlp_type |= NLP_SEED_WWPN; + break; + case FCP_SEED_WWNN: + default: + ndlp->nlp_type |= NLP_SEED_WWNN; + break; + } + if(clp[CFG_AUTOMAP].a_current) + ndlp->nlp_type |= NLP_AUTOMAP; + } + ndlp->nlp_state = NLP_ALLOC; + fc_nlp_map(binfo, ndlp); + + /* Fix up any changed RPIs in FCP IOCBs queued up a txq */ + fc_fcp_fix_txq(p_dev_ctl); + + fc_nextnode(p_dev_ctl, ndlp); + } else { +prlierr: + ndlp->nlp_state = NLP_LOGIN; + fc_nlp_unmap(binfo, ndlp); + fc_nextnode(p_dev_ctl, ndlp); + } + } + else { + PRLI * npr; + + npr = (PRLI * )sp; + if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) && + (npr->prliType == PRLI_FCP_TYPE) && + (npr->targetFunc == 1)) { + if(ndlp->nlp_type & NLP_FCP_TARGET) { + ndlp->nlp_state = NLP_ALLOC; + fc_nlp_map(binfo, ndlp); + } + else { + ndlp->nlp_state = NLP_PRLI; + fc_nlp_unmap(binfo, ndlp); + } + } + else { + ndlp->nlp_state = NLP_LOGIN; + fc_nlp_unmap(binfo, ndlp); + } + } + + ndlp->nlp_action &= ~NLP_DO_SCSICMD; + break; + + case ELS_CMD_PRLO: + /* PRLO completes successfully */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0114, /* ptr to msg structure */ + fc_mes0114, /* ptr to msg */ + fc_msgBlk0114.msgPreambleStr, /* begin varargs */ + cmd->un.elsreq.remoteID, + cmd->un.ulpWord[4], + cmd->un.ulpWord[5], + binfo->fc_ffstate); /* end varargs */ + break; + + case ELS_CMD_LOGO: + /* LOGO completes successfully */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0115, /* ptr to msg structure */ + fc_mes0115, /* ptr to msg */ + fc_msgBlk0115.msgPreambleStr, /* begin varargs */ + cmd->un.elsreq.remoteID, + cmd->un.ulpWord[4], + cmd->un.ulpWord[5], + binfo->fc_ffstate); /* end varargs */ + if ((ndlp == 0) && ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, + (uint32)cmd->un.elsreq.remoteID)) == 0)) { + break; + } + + /* If we are in the middle of Discovery */ + if (ndlp->nlp_action & (NLP_DO_ADDR_AUTH | NLP_DO_DISC_START | NLP_DO_RSCN)) { + /* Goto next entry */ + fc_nextnode(p_dev_ctl, ndlp); + } + + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + break; + + case ELS_CMD_PDISC: + /* PDISC completes successfully */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0116, /* ptr to msg structure */ + fc_mes0116, /* ptr to msg */ + fc_msgBlk0116.msgPreambleStr, /* begin varargs */ + cmd->un.elsreq.remoteID, + cmd->un.ulpWord[4], + cmd->un.ulpWord[5], + binfo->fc_ffstate); /* end varargs */ + if ((ndlp == 0) && ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, + (uint32)cmd->un.elsreq.remoteID)) == 0)) { + break; + } + + /* If we are in the middle of Address Authentication */ + if (ndlp->nlp_action & (NLP_DO_ADDR_AUTH)) { + if (fc_chkpadisc(binfo, ndlp, &sp->nodeName, + &sp->portName) == 0) { + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + ndlp->nlp_action |= NLP_DO_DISC_START; + } + /* Goto next entry */ + fc_nextnode(p_dev_ctl, ndlp); + } + ndlp->nlp_flag &= ~NLP_REQ_SND_PDISC; + break; + + case ELS_CMD_ADISC: + /* ADISC completes successfully */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0117, /* ptr to msg structure */ + fc_mes0117, /* ptr to msg */ + fc_msgBlk0117.msgPreambleStr, /* begin varargs */ + cmd->un.elsreq.remoteID, + cmd->un.ulpWord[4], + cmd->un.ulpWord[5], + binfo->fc_ffstate); /* end varargs */ + if ((ndlp == 0) && ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, + (uint32)cmd->un.elsreq.remoteID)) == 0)) { + break; + } + ndlp->nlp_flag &= ~NLP_REQ_SND_ADISC; + + /* If we are in the middle of Address Authentication */ + if (ndlp->nlp_action & (NLP_DO_ADDR_AUTH | NLP_DO_RSCN)) { + + ap = (ADISC * )sp; + if(fc_chkpadisc(binfo, ndlp, &ap->nodeName,&ap->portName) == 0) { + ndlp->nlp_action &= ~(NLP_DO_ADDR_AUTH | NLP_DO_DISC_START); + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + + + if((binfo->fc_flag & FC_RSCN_MODE) || + (binfo->fc_ffstate < FC_READY)) { + ndlp->nlp_DID = (uint32)cmd->un.elsreq.remoteID; + + + if(binfo->fc_flag & FC_RSCN_MODE) { + ndlp->nlp_action |= NLP_DO_RSCN; + binfo->fc_nlp_cnt--; + if (binfo->fc_nlp_cnt <= 0) { + binfo->fc_nlp_cnt = 0; + fc_nextrscn(p_dev_ctl, fc_max_els_sent); + } + } + else { + ndlp->nlp_action |= NLP_DO_DISC_START; + binfo->fc_nlp_cnt--; + if (binfo->fc_nlp_cnt <= 0) { + binfo->fc_nlp_cnt = 0; + fc_nextdisc(p_dev_ctl, fc_max_els_sent); + } + } + } + } + else { + fc_nextnode(p_dev_ctl, ndlp); + } + } + break; + + case ELS_CMD_FARP: + case ELS_CMD_FARPR: + /* FARP completes successfully */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0118, /* ptr to msg structure */ + fc_mes0118, /* ptr to msg */ + fc_msgBlk0118.msgPreambleStr, /* begin varargs */ + cmd->un.elsreq.remoteID, + cmd->un.ulpWord[4], + cmd->un.ulpWord[5], + command ); /* end varargs */ + ep = (ELS_PKT * )lp0; + if((ndlp = fc_findnode_wwpn(binfo, NLP_SEARCH_ALL, + &ep->un.farp.RportName)) == 0) + break; + + ndlp->nlp_flag &= ~NLP_FARP_SND; + ndlp->nlp_action &= ~NLP_DO_ADDR_AUTH; + break; + + case ELS_CMD_SCR: /* State Change Registration */ + /* SCR completes successfully */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0119, /* ptr to msg structure */ + fc_mes0119, /* ptr to msg */ + fc_msgBlk0119.msgPreambleStr, /* begin varargs */ + cmd->un.elsreq.remoteID, + cmd->un.ulpWord[4], + cmd->un.ulpWord[5], + binfo->fc_ffstate); /* end varargs */ + break; + + case ELS_CMD_RNID: /* Node Identification */ + /* RNID completes successfully */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0120, /* ptr to msg structure */ + fc_mes0120, /* ptr to msg */ + fc_msgBlk0120.msgPreambleStr, /* begin varargs */ + cmd->un.elsreq.remoteID, + cmd->un.ulpWord[4], + cmd->un.ulpWord[5], + binfo->fc_ffstate); /* end varargs */ + break; + + default: + FCSTATCTR.elsCmdPktInval++; + + /* Unknown ELS command completed */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0121, /* ptr to msg structure */ + fc_mes0121, /* ptr to msg */ + fc_msgBlk0121.msgPreambleStr, /* begin varargs */ + command); /* end varargs */ + break; + } + } + if (xmitiq->bpl) { + fc_mem_put(binfo, MEM_BPL, (uchar * )xmitiq->bpl); + } + fc_mem_put(binfo, MEM_BUF, (uchar * )mp); + fc_mem_put(binfo, MEM_BUF, (uchar * )rmp); + break; + + default: + FCSTATCTR.elsCmdIocbInval++; + + /* Unknown ELS IOCB */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0122, /* ptr to msg structure */ + fc_mes0122, /* ptr to msg */ + fc_msgBlk0122.msgPreambleStr, /* begin varargs */ + cmd->ulpCommand ); /* end varargs */ + +out2: + rc = EINVAL; + + if(xmitiq->bp) { + fc_mem_put(binfo, MEM_BUF, (uchar * )xmitiq->bp); + } + if(xmitiq->info) { + fc_mem_put(binfo, MEM_BUF, (uchar * )xmitiq->info); + } + if(xmitiq->bpl) { + fc_mem_put(binfo, MEM_BPL, (uchar * )xmitiq->bpl); + } + + break; + } /* switch(cmd->ulpCommand) */ + + fc_mem_put(binfo, MEM_IOCB, (uchar * )xmitiq); + + return(rc); +} /* End handle_els_event */ + + +/**********************************************/ +/** handle_rcv_els_req **/ +/** **/ +/** Process an incoming ELS request **/ +/**********************************************/ +_static_ int +handle_rcv_els_req( +fc_dev_ctl_t *p_dev_ctl, +RING *rp, +IOCBQ *temp) +{ + IOCB * iocb; + FC_BRD_INFO * binfo; + uint32 * lp; + NODELIST * ndlp; + volatile SERV_PARM * sp; + NAME_TYPE * np; + ELS_PKT * ep; + MAILBOXQ * mb; + MATCHMAP * mp; + IOCBQ * saveq; + IOCBQ * iocbq; + uchar * bp; + uchar * bdeAddr; + iCfgParam * clp; + int i, cnt; + uint32 cmd; + uint32 did; + LS_RJT stat; + REG_WD30 wd30; + + iocb = &temp->iocb; + binfo = &BINFO; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + + if (binfo->fc_flag & FC_SLI2) { + /* type of ELS cmd is first 32bit word in packet */ + mp = fc_getvaddr(p_dev_ctl, rp, + (uchar * )getPaddr(iocb->un.cont64[0].addrHigh, + iocb->un.cont64[0].addrLow)); + } else { + /* type of ELS cmd is first 32bit word in packet */ + mp = fc_getvaddr(p_dev_ctl, rp, (uchar * )((ulong)iocb->un.cont[0].bdeAddress)); + } + + if (mp == 0) { + + if (binfo->fc_flag & FC_SLI2) { + bdeAddr = (uchar *)getPaddr(iocb->un.cont64[0].addrHigh, + iocb->un.cont64[0].addrLow); + } + else { + bdeAddr = (uchar *)((ulong)iocb->un.cont[0].bdeAddress); + } + FCSTATCTR.elsRcvDrop++; + + goto out; + } + + bp = mp->virt; + lp = (uint32 * )bp; + cmd = *lp++; + + /* Received ELS command */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0123, /* ptr to msg structure */ + fc_mes0123, /* ptr to msg */ + fc_msgBlk0123.msgPreambleStr, /* begin varargs */ + cmd, + iocb->un.ulpWord[5], + iocb->ulpStatus, + binfo->fc_ffstate); /* end varargs */ + if ((iocb->ulpStatus) || + ((binfo->fc_ffstate <= FC_FLOGI) && + ((cmd != ELS_CMD_FLOGI) && (cmd != ELS_CMD_ADISC) && + (cmd != ELS_CMD_FAN)))) { + if ((iocb->ulpStatus == 0) && (cmd == ELS_CMD_PLOGI)) { + /* Do this for pt2pt as well, testing with miniport driver */ + + /* Reject this request because we are in process of discovery */ + stat.un.b.lsRjtRsvd0 = 0; + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; + stat.un.b.vendorUnique = 0; + fc_els_rsp(binfo, ELS_CMD_LS_RJT, (uint32)iocb->ulpContext, + (uint32)iocb->ulpClass, (void *)0, (uint32)stat.un.lsRjtError, 0); + } + + fc_mem_put(binfo, MEM_BUF, (uchar * )mp); + + i = 1; + /* free resources associated with iocb and repost the ring buffers */ + if (!(binfo->fc_flag & FC_SLI2)) { + for (i = 1; i < (int)iocb->ulpBdeCount; i++) { + mp = fc_getvaddr(p_dev_ctl, rp, (uchar * )((ulong)iocb->un.cont[i].bdeAddress)); + if (mp) { + fc_mem_put(binfo, MEM_BUF, (uchar * )mp); + } + } + } + fc_post_buffer(p_dev_ctl, rp, i); + /* Drop frame if there is an error */ + FCSTATCTR.elsRcvDrop++; + return(0); + } + + /* Special case RSCN cause 2 byte payload length field is variable */ + if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) { + /* Received RSCN command */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0211, /* ptr to msg structure */ + fc_mes0211, /* ptr to msg */ + fc_msgBlk0211.msgPreambleStr, /* begin varargs */ + binfo->fc_flag, + binfo->fc_defer_rscn.q_cnt, + binfo->fc_rscn.q_cnt, + binfo->fc_mbox_active ); /* end varargs */ + /* ACCEPT the rscn request */ + fc_els_rsp(binfo, ELS_CMD_ACC, (uint32)iocb->ulpContext, + (uint32)iocb->ulpClass, (void *)0, (uint32)(sizeof(uint32)), 0); + + if ((binfo->fc_flag & FC_RSCN_DISCOVERY) || + ((binfo->fc_flag & FC_RSCN_MODE) && !(binfo->fc_flag & FC_NSLOGI_TMR)) || + (binfo->fc_ffstate != FC_READY)) { + if(binfo->fc_defer_rscn.q_cnt > FC_MAX_HOLD_RSCN) { + binfo->fc_flag |= (FC_RSCN_DISCOVERY | FC_RSCN_MODE); + fc_flush_rscn_defer(p_dev_ctl); + goto dropit; + } + if(binfo->fc_flag & FC_RSCN_DISCOVERY) { + goto dropit; + } + else { + /* get an iocb buffer to copy entry into */ + if ((saveq = (IOCBQ * )fc_mem_get(binfo, MEM_IOCB | MEM_PRI)) != NULL) { + fc_bcopy((uchar *)temp, (uchar *)saveq, sizeof(IOCBQ)); + if (binfo->fc_defer_rscn.q_first) { + /* queue command to end of list */ + ((IOCBQ * )binfo->fc_defer_rscn.q_last)->q = (uchar * )saveq; + binfo->fc_defer_rscn.q_last = (uchar * )saveq; + } else { + /* add command to empty list */ + binfo->fc_defer_rscn.q_first = (uchar * )saveq; + binfo->fc_defer_rscn.q_last = (uchar * )saveq; + } + saveq->q = NULL; + *((MATCHMAP **)&saveq->iocb) = mp; + binfo->fc_defer_rscn.q_cnt++; + binfo->fc_flag |= FC_RSCN_MODE; + if (!(binfo->fc_flag & FC_SLI2)) { + i = (int)iocb->ulpBdeCount; + } + else { + i = 1; + } + fc_post_buffer(p_dev_ctl, rp, i); + return(0); + } + else + goto dropit; + } + } + + /* Make sure all outstanding Mailbox cmds (reg/unreg login) are processed + * before processing RSCN. + */ + if (binfo->fc_mbox_active) { + /* get an iocb buffer to copy entry into */ + if ((saveq = (IOCBQ * )fc_mem_get(binfo, MEM_IOCB | MEM_PRI)) != NULL) { + fc_bcopy((uchar *)temp, (uchar *)saveq, sizeof(IOCBQ)); + binfo->fc_flag |= (FC_DELAY_RSCN | FC_RSCN_MODE); + if (binfo->fc_rscn.q_first) { + /* queue command to end of list */ + ((IOCBQ * )binfo->fc_rscn.q_last)->q = (uchar * )saveq; + binfo->fc_rscn.q_last = (uchar * )saveq; + } else { + /* add command to empty list */ + binfo->fc_rscn.q_first = (uchar * )saveq; + binfo->fc_rscn.q_last = (uchar * )saveq; + } + + saveq->q = NULL; + *((MATCHMAP **)&saveq->iocb) = mp; + binfo->fc_rscn.q_cnt++; + if (!(binfo->fc_flag & FC_SLI2)) { + i = (int)iocb->ulpBdeCount; + } + else { + i = 1; + } + fc_post_buffer(p_dev_ctl, rp, i); + return(0); + } + else + goto dropit; + } + cmd &= ELS_CMD_MASK; + } + + switch (cmd) { + case ELS_CMD_PLOGI: + case ELS_CMD_FLOGI: + sp = (volatile SERV_PARM * )lp; + did = iocb->un.elsreq.remoteID; + if (cmd == ELS_CMD_FLOGI) { + FCSTATCTR.elsRcvFLOGI++; + if (binfo->fc_topology == TOPOLOGY_LOOP) { + /* We should never recieve a FLOGI in loop mode, ignore it */ + /* An FLOGI ELS command was received from DID in Loop Mode */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0124, /* ptr to msg structure */ + fc_mes0124, /* ptr to msg */ + fc_msgBlk0124.msgPreambleStr, /* begin varargs */ + cmd, + did); /* end varargs */ + break; + } + did = Fabric_DID; + if (fc_chksparm(binfo, sp, CLASS3)) { + /* For a FLOGI we accept, then if our portname is greater + * then the remote portname we initiate Nport login. + */ + int rc; + MAILBOX *tmpmb; + + rc = fc_geportname((NAME_TYPE * ) & binfo->fc_portname, + (NAME_TYPE * ) & sp->portName); + + if (rc == 2) { /* ourselves */ + /* It's ourselves, so we will just reset link */ + if ((tmpmb = (MAILBOX * )fc_mem_get(binfo, MEM_MBOX | MEM_PRI)) == NULL) { + binfo->fc_ffstate = FC_ERROR; + return(1); + } + + binfo->fc_flag |= FC_SCSI_RLIP; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + + /* Setup and issue mailbox INITIALIZE LINK command */ + fc_linkdown(p_dev_ctl); + fc_init_link(binfo, (MAILBOX * )tmpmb, clp[CFG_TOPOLOGY].a_current, clp[CFG_LINK_SPEED].a_current); + tmpmb->un.varInitLnk.lipsr_AL_PA = 0; + if (issue_mb_cmd(binfo, (MAILBOX * )tmpmb, MBX_NOWAIT) != MBX_BUSY) + fc_mem_put(binfo, MEM_MBOX, (uchar * )tmpmb); + break; + } + + if (p_dev_ctl->fc_waitflogi) { + if (p_dev_ctl->fc_waitflogi != (FCCLOCK *)1) + fc_clk_can(p_dev_ctl, p_dev_ctl->fc_waitflogi); + p_dev_ctl->fc_waitflogi = 0; + p_dev_ctl->power_up = 1; + fc_snd_flogi(p_dev_ctl, 0, 0); + } + + fc_els_rsp(binfo, ELS_CMD_ACC, (uint32)iocb->ulpContext, + (uint32)iocb->ulpClass, (void *)0, (uint32)sizeof(SERV_PARM), 0); + if (rc == 1) { /* greater than */ + binfo->fc_flag |= FC_PT2PT_PLOGI; + } + binfo->fc_flag |= FC_PT2PT; + /* Use Fabric timer as pt2pt link up timer */ + binfo->fc_fabrictmo = (2 * binfo->fc_ratov) + + ((4 * binfo->fc_edtov) / 1000) + 1; + if(FABRICTMO) { + fc_clk_res(p_dev_ctl, binfo->fc_fabrictmo, FABRICTMO); + } + else { + FABRICTMO = fc_clk_set(p_dev_ctl, binfo->fc_fabrictmo, + fc_fabric_timeout, 0, 0); + } + binfo->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); + } else { + /* Reject this request because invalid parameters */ + stat.un.b.lsRjtRsvd0 = 0; + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; + stat.un.b.vendorUnique = 0; + fc_els_rsp(binfo, ELS_CMD_LS_RJT, (uint32)iocb->ulpContext, + (uint32)iocb->ulpClass, (void *)0, (uint32)stat.un.lsRjtError, + 0); + } + break; + } + FCSTATCTR.elsRcvPLOGI++; + + if (!(binfo->fc_flag & FC_PT2PT) && (binfo->fc_ffstate <= FC_FLOGI)) { + /* Reject this PLOGI because we are in rediscovery */ + stat.un.b.lsRjtRsvd0 = 0; + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; + stat.un.b.vendorUnique = 0; + fc_els_rsp(binfo, ELS_CMD_LS_RJT, (uint32)iocb->ulpContext, + (uint32)iocb->ulpClass, (void *)0, (uint32)stat.un.lsRjtError, 0); + break; + } + + if(did == NameServer_DID) + break; + + if((did & Fabric_DID_MASK) != Fabric_DID_MASK) { + /* Check to see if an existing cached entry is bad */ + ndlp = fc_findnode_wwpn(binfo, NLP_SEARCH_ALL, (NAME_TYPE *)&sp->portName); + if (ndlp && ndlp->nlp_DID && (ndlp->nlp_DID != did)) { + /* Check for a FARP generated nlplist entry */ + if (ndlp->nlp_DID == Bcast_DID) + ndlp->nlp_DID = did; + else { + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + } + } + } + + if ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, did)) == 0) { + /* This is a new node so allocate an nlplist entry and accept + * the LOGI request. + */ + if((ndlp = (NODELIST *)fc_mem_get(binfo, MEM_NLP))) { + fc_bzero((void *)ndlp, sizeof(NODELIST)); + ndlp->sync = binfo->fc_sync; + ndlp->capabilities = binfo->fc_capabilities; + ndlp->nlp_DID = did; + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + fc_nlp_logi(binfo, ndlp, + (NAME_TYPE *)&sp->portName, (NAME_TYPE *)&sp->nodeName); + } + else + break; + } + /* Received PLOGI command */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0125, /* ptr to msg structure */ + fc_mes0125, /* ptr to msg */ + fc_msgBlk0125.msgPreambleStr, /* begin varargs */ + ndlp->nlp_DID, + ndlp->nlp_state, + ndlp->nlp_flag, + ndlp->nlp_Rpi); /* end varargs */ + /* If we are pt2pt and this is the first PLOGI rcv'ed */ + if ((binfo->fc_flag & FC_PT2PT) && (binfo->fc_myDID == 0)) { + if(!(fc_chksparm(binfo, sp, CLASS3))) { + /* Reject this request because invalid parameters */ + stat.un.b.lsRjtRsvd0 = 0; + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; + stat.un.b.vendorUnique = 0; + fc_els_rsp(binfo, ELS_CMD_LS_RJT, (uint32)iocb->ulpContext, + (uint32)iocb->ulpClass, (void *)0, (uint32)stat.un.lsRjtError, + ndlp); + break; + } + wd30.word = 0; + wd30.f.xri = iocb->ulpContext; + wd30.f.class = iocb->ulpClass; + + fc_freenode_did(binfo, 0, 1); + binfo->fc_myDID = iocb->un.ulpWord[4] & Mask_DID; + + if ((mb = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX | MEM_PRI))) { + fc_config_link(p_dev_ctl, (MAILBOX * )mb); + if (issue_mb_cmd(binfo, (MAILBOX * )mb, MBX_NOWAIT) + != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } + if ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, binfo->fc_myDID)) == 0) { + if((ndlp = (NODELIST *)fc_mem_get(binfo, MEM_NLP))) { + fc_bzero((void *)ndlp, sizeof(NODELIST)); + ndlp->sync = binfo->fc_sync; + ndlp->capabilities = binfo->fc_capabilities; + ndlp->nlp_DID = binfo->fc_myDID; + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + } + } + if(ndlp) { + ndlp->nlp_DID = binfo->fc_myDID; + fc_nlp_logi(binfo, ndlp, &binfo->fc_portname, &binfo->fc_nodename); + if ((mb=(MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX | MEM_PRI))) { + fc_reg_login(binfo, binfo->fc_myDID, + (uchar * ) & binfo->fc_sparam, (MAILBOX * )mb, 0); + if (issue_mb_cmd(binfo, (MAILBOX * )mb, MBX_NOWAIT) + != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } + } + /* Device Discovery completes */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0212, /* ptr to msg structure */ + fc_mes0212, /* ptr to msg */ + fc_msgBlk0212.msgPreambleStr); /* begin & end varargs */ + binfo->fc_ffstate = FC_READY; + + binfo->fc_firstopen = 0; + if(FABRICTMO) { + fc_clk_can(p_dev_ctl, FABRICTMO); + FABRICTMO = 0; + } + + /* issue mailbox command to register login with the adapter */ + if ((mb = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX | MEM_PRI))) { + fc_reg_login(binfo,did,(uchar * )sp, (MAILBOX * )mb, wd30.word); + if (issue_mb_cmd(binfo,(MAILBOX * )mb,MBX_NOWAIT) != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } + break; + } + + cnt = 1; + switch(ndlp->nlp_state) { + case NLP_PLOGI: + cnt = 0; + break; + + case NLP_LIMBO: + if (ndlp->nlp_flag & NLP_REQ_SND) { + cnt = 0; + break; + } + + case NLP_LOGOUT: + fc_nlp_logi(binfo, ndlp, + (NAME_TYPE *)&sp->portName, (NAME_TYPE *)&sp->nodeName); + + case NLP_LOGIN: + case NLP_PRLI: + case NLP_ALLOC: + ndlp->nlp_flag &= ~NLP_FARP_SND; + ndlp->nlp_action &= ~NLP_DO_ADDR_AUTH; + /* Keep the rpi we have and send ACC / LS_RJT */ + if (fc_chksparm(binfo, sp, CLASS3)) { + + if (ndlp->nlp_Rpi) { + fc_els_rsp(binfo, ELS_CMD_ACC, (uint32)iocb->ulpContext, + (uint32)iocb->ulpClass, (void *)0, (uint32)(sizeof(SERV_PARM)), ndlp); + break; + } + /* no rpi so we must reglogin */ + ndlp->nlp_flag |= NLP_RCV_PLOGI; + wd30.word = 0; + wd30.f.xri = iocb->ulpContext; + wd30.f.class = iocb->ulpClass; + /* issue mailbox command to register login with the adapter */ + if ((mb = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX | MEM_PRI))) { + fc_reg_login(binfo,did,(uchar * )sp, (MAILBOX * )mb, wd30.word); + if (issue_mb_cmd(binfo,(MAILBOX * )mb,MBX_NOWAIT) != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } + } else { + /* Reject this request because invalid parameters */ + stat.un.b.lsRjtRsvd0 = 0; + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; + stat.un.b.vendorUnique = 0; + fc_els_rsp(binfo, ELS_CMD_LS_RJT, (uint32)iocb->ulpContext, + (uint32)iocb->ulpClass, (void *)0, (uint32)stat.un.lsRjtError, + ndlp); + + if ((ndlp->nlp_state == NLP_ALLOC) && (binfo->fc_ffstate == FC_READY)) { + /* unregister existing login first */ + ndlp->nlp_flag |= NLP_UNREG_LOGO; + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + } + } + break; + } + + if(cnt) + break; + + + did = iocb->un.elsreq.remoteID; + + /* If a nlplist entry already exists, we potentially have + * a PLOGI collision. + */ + + if (!(ndlp->nlp_flag & NLP_REQ_SND)) { + /* In this case we are already logged in */ + ndlp->nlp_flag &= ~NLP_FARP_SND; + ndlp->nlp_action &= ~NLP_DO_ADDR_AUTH; + goto chkparm; + } + + FCSTATCTR.elsLogiCol++; + + /* For a PLOGI, we only accept if our portname is less + * than the remote portname. + */ + if (!(fc_geportname((NAME_TYPE * ) & binfo->fc_portname, + (NAME_TYPE * ) & sp->portName))) { +chkparm: + fc_nlp_logi(binfo, ndlp, + (NAME_TYPE *)&sp->portName, (NAME_TYPE *)&sp->nodeName); + if (fc_chksparm(binfo, sp, CLASS3)) { + /* PLOGI chkparm OK */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0126, /* ptr to msg structure */ + fc_mes0126, /* ptr to msg */ + fc_msgBlk0126.msgPreambleStr, /* begin varargs */ + ndlp->nlp_DID, + ndlp->nlp_state, + ndlp->nlp_flag, + ndlp->nlp_Rpi ); /* end varargs */ + if (ndlp->nlp_Rpi == 0) { + if (ndlp->nlp_flag & NLP_REQ_SND) { + /* Abort the current outstanding PLOGI */ + unsigned long iflag; + + iflag = lpfc_q_disable_lock(p_dev_ctl); + iocbq = (IOCBQ * )(rp->fc_txp.q_first); + while (iocbq) { + if(iocbq->iocb.un.elsreq.remoteID == ndlp->nlp_DID) { + iocbq->retry = 0xff; + } + iocbq = (IOCBQ * )iocbq->q; + } + lpfc_q_unlock_enable(p_dev_ctl, iflag); + /* In case its on fc_delay_timeout list */ + fc_abort_delay_els_cmd(p_dev_ctl, ndlp->nlp_DID); + + ndlp->nlp_flag &= ~NLP_REQ_SND; + /* The following reg_login acts as if original PLOGI cmpl */ + } + else + ndlp->nlp_flag |= NLP_RCV_PLOGI; + + wd30.word = 0; + wd30.f.xri = iocb->ulpContext; + wd30.f.class = iocb->ulpClass; + ndlp->nlp_flag |= NLP_REG_INP; + + /* issue mailbox command to register login with the adapter */ + if ((mb = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX | MEM_PRI))) { + fc_reg_login(binfo, did, (uchar * )sp, (MAILBOX * )mb, + wd30.word); + if (issue_mb_cmd(binfo, (MAILBOX * )mb, MBX_NOWAIT) + != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } + } else { + fc_els_rsp(binfo, ELS_CMD_ACC, (uint32)iocb->ulpContext, + (uint32)iocb->ulpClass, (void *)0, (uint32)(sizeof(SERV_PARM)), ndlp); + } + } else { + /* Reject this request because invalid parameters */ + stat.un.b.lsRjtRsvd0 = 0; + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; + stat.un.b.vendorUnique = 0; + fc_els_rsp(binfo, ELS_CMD_LS_RJT, (uint32)iocb->ulpContext, + (uint32)iocb->ulpClass, (void *)0, (uint32)stat.un.lsRjtError, + ndlp); + + if (binfo->fc_ffstate == FC_READY) { + /* unregister existing login first */ + ndlp->nlp_flag |= NLP_UNREG_LOGO; + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + } + } + } else { + /* Reject this request because the remote node will accept ours */ + stat.un.b.lsRjtRsvd0 = 0; + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; + stat.un.b.vendorUnique = 0; + fc_els_rsp(binfo, ELS_CMD_LS_RJT, (uint32)iocb->ulpContext, + (uint32)iocb->ulpClass, (void *)0, (uint32)stat.un.lsRjtError, ndlp); + } + break; + + case ELS_CMD_LOGO: + FCSTATCTR.elsRcvLOGO++; + goto skip1; + case ELS_CMD_PRLO: + FCSTATCTR.elsRcvPRLO++; +skip1: + lp++; /* lp now points to portname */ + np = (NAME_TYPE * )lp; + did = iocb->un.elsreq.remoteID; + + if ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, did)) == 0) { + if(((ndlp = fc_findnode_wwpn(binfo, NLP_SEARCH_ALL, np)) == 0) || + (ndlp->nlp_DID == 0)) + /* ACCEPT the logout request */ + fc_els_rsp(binfo, ELS_CMD_ACC, (uint32)iocb->ulpContext, + (uint32)iocb->ulpClass, (void *)0, (uint32)(sizeof(uint32)), 0); + break; + } + + + if (ndlp) { + if((ndlp->nlp_state >= NLP_LOGIN) || + ((!(ndlp->nlp_flag & (NLP_FARP_SND | NLP_RM_ENTRY))) && + (!(ndlp->nlp_action & (NLP_DO_ADDR_AUTH | NLP_DO_DISC_START | NLP_DO_RSCN))))) { + /* ACCEPT the logout request */ + unsigned long iflag; + + fc_els_rsp(binfo, ELS_CMD_ACC, (uint32)iocb->ulpContext, + (uint32)iocb->ulpClass, (void *)0, (uint32)(sizeof(uint32)), ndlp); + ndlp->nlp_flag &= ~NLP_REQ_SND; + ndlp->nlp_flag |= NLP_RM_ENTRY; + + iflag = lpfc_q_disable_lock(p_dev_ctl); + iocbq = (IOCBQ * )(rp->fc_txp.q_first); + while (iocbq) { + if(iocbq->iocb.un.elsreq.remoteID == ndlp->nlp_DID) { + if(ndlp->nlp_action & (NLP_DO_ADDR_AUTH | NLP_DO_DISC_START | NLP_DO_RSCN)) { + ndlp->nlp_flag &= ~(NLP_REQ_SND_ADISC | NLP_REQ_SND_PDISC | NLP_REQ_SND); + } + iocbq->retry = 0xff; + if((binfo->fc_flag & FC_RSCN_MODE) || + (binfo->fc_ffstate < FC_READY)) { + if((ndlp->nlp_state >= NLP_PLOGI) && + (ndlp->nlp_state <= NLP_ALLOC)) { + binfo->fc_nlp_cnt--; + } + if (binfo->fc_nlp_cnt <= 0) { + binfo->fc_nlp_cnt = 0; + } + } + } + iocbq = (IOCBQ * )iocbq->q; + } + lpfc_q_unlock_enable(p_dev_ctl, iflag); + /* In case its on fc_delay_timeout list */ + fc_abort_delay_els_cmd(p_dev_ctl, ndlp->nlp_DID); + + if ((ndlp->nlp_type & NLP_FCP_TARGET) && + (ndlp->nlp_state >= NLP_LOGIN)) { + ndlp->nlp_flag |= NLP_SND_PLOGI; + } + if (ndlp->nlp_action & (NLP_DO_ADDR_AUTH | NLP_DO_DISC_START | NLP_DO_RSCN)) { + fc_nextnode(p_dev_ctl, ndlp); + } + } + else { + /* ACCEPT the logout request */ + fc_els_rsp(binfo, ELS_CMD_ACC, (uint32)iocb->ulpContext, + (uint32)iocb->ulpClass, (void *)0, (uint32)(sizeof(uint32)), 0); + } + } + else { + /* ACCEPT the logout request */ + fc_els_rsp(binfo, ELS_CMD_ACC, (uint32)iocb->ulpContext, + (uint32)iocb->ulpClass, (void *)0, (uint32)(sizeof(uint32)), 0); + } + break; + + case ELS_CMD_FAN: + FCSTATCTR.elsRcvFAN++; + /* FAN received */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0213, /* ptr to msg structure */ + fc_mes0213, /* ptr to msg */ + fc_msgBlk0213.msgPreambleStr, /* begin varargs */ + iocb->un.ulpWord[4], + binfo->fc_ffstate); /* end varargs */ + /* Check to see if we were waiting for FAN */ + if ((binfo->fc_ffstate != FC_FLOGI) || + (binfo->fc_topology != TOPOLOGY_LOOP) || + (!(binfo->fc_flag & FC_PUBLIC_LOOP))) + break; + + ep = (ELS_PKT * )bp; + + /* Check to make sure we haven't switched fabrics */ + if ((fc_geportname((NAME_TYPE * ) & ep->un.fan.FportName, + (NAME_TYPE * ) & binfo->fc_fabparam.portName) != 2) || + (fc_geportname((NAME_TYPE * ) & ep->un.fan.FnodeName, + (NAME_TYPE * ) & binfo->fc_fabparam.nodeName) != 2)) { + /* We switched, so we need to FLOGI again after timeout */ + break; + } + + if(FABRICTMO) { + fc_clk_can(p_dev_ctl, FABRICTMO); + FABRICTMO = 0; + } + + binfo->fc_myDID = iocb->un.ulpWord[4] & Mask_DID; + + if ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, Fabric_DID)) == 0) { + if ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, (uint32)iocb->un.elsreq.remoteID)) == 0) { + break; + } + fc_nlp_logi(binfo, ndlp, &ep->un.fan.FportName, &ep->un.fan.FnodeName); + } + ndlp->nlp_type |= NLP_FABRIC; + + if ((mb = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX | MEM_PRI))) { + fc_config_link(p_dev_ctl, (MAILBOX * )mb); + if (issue_mb_cmd(binfo, (MAILBOX * )mb, MBX_NOWAIT) != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } + + /* register the login with adapter */ + if (ndlp->nlp_Rpi == 0) { + if ((mb = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX | MEM_PRI))) { + fc_reg_login(binfo, iocb->un.elsreq.remoteID, + (uchar * ) & binfo->fc_fabparam, (MAILBOX * )mb, 0); + if (issue_mb_cmd(binfo, (MAILBOX * )mb, MBX_NOWAIT) != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } + } + + /* Since this is a FAN, we don't need to do any discovery stuff */ + fc_fanovery(p_dev_ctl); + break; + + case ELS_CMD_RSCN: + FCSTATCTR.elsRcvRSCN++; + fc_process_rscn(p_dev_ctl, temp, mp); + break; + + case ELS_CMD_ADISC: + FCSTATCTR.elsRcvADISC++; + ep = (ELS_PKT * )bp; + did = iocb->un.elsreq.remoteID; + if (((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, did)) != 0) && + (ndlp->nlp_state >= NLP_LOGIN)) { + if (fc_chkpadisc(binfo, ndlp, &ep->un.adisc.nodeName, + &ep->un.adisc.portName)) { + fc_els_rsp(binfo, ELS_CMD_ADISC, (uint32)iocb->ulpContext, + (uint32)iocb->ulpClass, (void *)0, (uint32)sizeof(SERV_PARM), + ndlp); + } else { + /* Reject this request because invalid parameters */ + stat.un.b.lsRjtRsvd0 = 0; + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; + stat.un.b.vendorUnique = 0; + fc_els_rsp(binfo, ELS_CMD_LS_RJT, (uint32)iocb->ulpContext, + (uint32)iocb->ulpClass, (void *)0, (uint32)stat.un.lsRjtError, + ndlp); + if (!(ndlp->nlp_flag & NLP_REQ_SND)) { + ndlp->nlp_flag |= NLP_UNREG_LOGO; + fc_freenode_did(binfo, did, 0); + } + } + } else { + /* Reject this request because not logged in */ + stat.un.b.lsRjtRsvd0 = 0; + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; + stat.un.b.vendorUnique = 0; + fc_els_rsp(binfo, ELS_CMD_LS_RJT, (uint32)iocb->ulpContext, + (uint32)iocb->ulpClass, (void *)0, (uint32)stat.un.lsRjtError, ndlp); + if ((ndlp == 0) || (!(ndlp->nlp_flag & NLP_REQ_SND))) + fc_freenode_did(binfo, did, 0); + } + break; + + case ELS_CMD_PDISC: + FCSTATCTR.elsRcvPDISC++; + sp = (volatile SERV_PARM * )lp; + did = iocb->un.elsreq.remoteID; + if (((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, did)) != 0) && + (ndlp->nlp_state >= NLP_LOGIN)) { + if (fc_chkpadisc(binfo, ndlp, &sp->nodeName, &sp->portName)) { + fc_els_rsp(binfo, ELS_CMD_ACC, (uint32)iocb->ulpContext, + (uint32)iocb->ulpClass, (void *)0, (uint32)sizeof(SERV_PARM), + ndlp); + } else { + /* Reject this request because invalid parameters */ + stat.un.b.lsRjtRsvd0 = 0; + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; + stat.un.b.vendorUnique = 0; + fc_els_rsp(binfo, ELS_CMD_LS_RJT, (uint32)iocb->ulpContext, + (uint32)iocb->ulpClass, (void *)0, (uint32)stat.un.lsRjtError, + ndlp); + if (!(ndlp->nlp_flag & NLP_REQ_SND)) { + ndlp->nlp_flag |= NLP_UNREG_LOGO; + fc_freenode_did(binfo, did, 0); + } + } + } else { + /* Reject this request because not logged in */ + stat.un.b.lsRjtRsvd0 = 0; + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; + stat.un.b.vendorUnique = 0; + fc_els_rsp(binfo, ELS_CMD_LS_RJT, (uint32)iocb->ulpContext, + (uint32)iocb->ulpClass, (void *)0, (uint32)stat.un.lsRjtError, ndlp); + if ((ndlp == 0) || (!(ndlp->nlp_flag & NLP_REQ_SND))) + fc_freenode_did(binfo, did, 0); + } + break; + + case ELS_CMD_FARPR: + FCSTATCTR.elsRcvFARPR++; + ep = (ELS_PKT * )bp; + did = iocb->un.elsreq.remoteID; + /* FARP-RSP received from DID */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0600, /* ptr to msg structure */ + fc_mes0600, /* ptr to msg */ + fc_msgBlk0600.msgPreambleStr, /* begin varargs */ + did ); /* end varargs */ + /* ACCEPT the Farp resp request */ + fc_els_rsp(binfo, ELS_CMD_ACC, (uint32)iocb->ulpContext, + (uint32)iocb->ulpClass, (void *)0, (uint32)(sizeof(uint32)), 0); + break; + + case ELS_CMD_FARP: + FCSTATCTR.elsRcvFARP++; + ep = (ELS_PKT * )bp; + did = iocb->un.elsreq.remoteID; + /* FARP-REQ received from DID */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0601, /* ptr to msg structure */ + fc_mes0601, /* ptr to msg */ + fc_msgBlk0601.msgPreambleStr, /* begin varargs */ + did ); /* end varargs */ + if ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, did)) == 0) { + if((ndlp = (NODELIST *)fc_mem_get(binfo, MEM_NLP))) { + fc_bzero((void *)ndlp, sizeof(NODELIST)); + ndlp->sync = binfo->fc_sync; + ndlp->capabilities = binfo->fc_capabilities; + ndlp->nlp_DID = did; + fc_nlp_logi(binfo,ndlp, &ep->un.farp.OportName, &ep->un.farp.OnodeName); + ndlp->nlp_state = NLP_LIMBO; + } + else + break; + } + + /* We will only support match on WWPN or WWNN */ + if (ep->un.farp.Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) + break; + + cnt = 0; + /* If this FARP command is searching for my portname */ + if (ep->un.farp.Mflags & FARP_MATCH_PORT) { + if (fc_geportname(&ep->un.farp.RportName, &binfo->fc_portname) == 2) + cnt = 1; + else + cnt = 0; + } + + /* If this FARP command is searching for my nodename */ + if (ep->un.farp.Mflags & FARP_MATCH_NODE) { + if (fc_geportname(&ep->un.farp.RnodeName, &binfo->fc_nodename) == 2) + cnt = 1; + else + cnt = 0; + } + + if (cnt) { + if (!(binfo->fc_flag & FC_LNK_DOWN) && + (binfo->fc_ffstate >= rp->fc_xmitstate) && + !(ndlp->nlp_flag & NLP_REQ_SND) && + !(ndlp->nlp_action & NLP_DO_ADDR_AUTH)) { + /* We need to re-login to that node */ + if ((ep->un.farp.Rflags & FARP_REQUEST_PLOGI) && + !(ndlp->nlp_flag & NLP_REQ_SND)) { + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + fc_els_cmd(binfo, ELS_CMD_PLOGI, (void *)((ulong)ndlp->nlp_DID), + (uint32)0, (ushort)0, ndlp); + } + + /* We need to send FARP response to that node */ + if (ep->un.farp.Rflags & FARP_REQUEST_FARPR) { + fc_els_cmd(binfo, ELS_CMD_FARPR, (void *)((ulong)ndlp->nlp_DID), + (uint32)0, (ushort)0, ndlp); + } + } + } + break; + + case ELS_CMD_RRQ: + FCSTATCTR.elsRcvRRQ++; + ep = (ELS_PKT * )bp; + /* Get oxid / rxid from payload and internally abort it */ + if ((ep->un.rrq.SID == SWAP_DATA(binfo->fc_myDID))) { + fc_abort_ixri_cx(binfo, ep->un.rrq.Oxid, CMD_CLOSE_XRI_CX, rp); + } else { + fc_abort_ixri_cx(binfo, ep->un.rrq.Rxid, CMD_CLOSE_XRI_CX, rp); + } + /* ACCEPT the rrq request */ + fc_els_rsp(binfo, ELS_CMD_ACC, (uint32)iocb->ulpContext, + (uint32)iocb->ulpClass, (void *)0, (uint32)(sizeof(uint32)), 0); + break; + + case ELS_CMD_PRLI: + /* ACCEPT the prli request */ + did = iocb->un.elsreq.remoteID; + if ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, did))) { + fc_els_rsp(binfo, ELS_CMD_PRLI, (uint32)iocb->ulpContext, + (uint32)iocb->ulpClass, (void *)0, (uint32)(sizeof(uint32)), ndlp); + } + else { + stat.un.b.lsRjtRsvd0 = 0; + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; + stat.un.b.vendorUnique = 0; + fc_els_rsp(binfo, ELS_CMD_LS_RJT, (uint32)iocb->ulpContext, + (uint32)iocb->ulpClass, (void *)0, (uint32)stat.un.lsRjtError, 0); + } + break; + + case ELS_CMD_RNID: + did = iocb->un.elsreq.remoteID; + ep = (ELS_PKT * )bp; + switch(ep->un.rnid.Format) { + case 0: + case RNID_TOPOLOGY_DISC: + fc_els_rsp(binfo, ELS_CMD_RNID, (uint32)iocb->ulpContext, + (uint32)iocb->ulpClass, (void *)0, (uint32)ep->un.rnid.Format, 0); + break; + default: + /* Reject this request because format not supported */ + stat.un.b.lsRjtRsvd0 = 0; + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; + stat.un.b.vendorUnique = 0; + fc_els_rsp(binfo, ELS_CMD_LS_RJT, (uint32)iocb->ulpContext, + (uint32)iocb->ulpClass, (void *)0, (uint32)stat.un.lsRjtError, 0); + } + break; + + default: + /* Unsupported ELS command, reject */ + stat.un.b.lsRjtRsvd0 = 0; + stat.un.b.lsRjtRsnCode = LSRJT_CMD_UNSUPPORTED; + stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; + stat.un.b.vendorUnique = 0; + fc_els_rsp(binfo, ELS_CMD_LS_RJT, (uint32)iocb->ulpContext, + (uint32)iocb->ulpClass, (void *)0, (uint32)stat.un.lsRjtError, 0); + FCSTATCTR.elsCmdPktInval++; + + did = iocb->un.elsreq.remoteID; + /* Unknown ELS command received from NPORT */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0127, /* ptr to msg structure */ + fc_mes0127, /* ptr to msg */ + fc_msgBlk0127.msgPreambleStr, /* begin varargs */ + cmd, + did); /* end varargs */ + break; + } + +dropit: + + FCSTATCTR.elsRcvFrame++; + fc_mem_put(binfo, MEM_BUF, (uchar * )mp); + +out: + + i = 1; + /* free resources associated with this iocb and repost the ring buffers */ + if (!(binfo->fc_flag & FC_SLI2)) { + for (i = 1; i < (int)iocb->ulpBdeCount; i++) { + mp = fc_getvaddr(p_dev_ctl, rp, (uchar * )((ulong)iocb->un.cont[i].bdeAddress)); + if (mp) { + fc_mem_put(binfo, MEM_BUF, (uchar * )mp); + } + } + } + + fc_post_buffer(p_dev_ctl, rp, i); + + return(1); +} /* End handle_rcv_els_req */ + + +/***************************************/ +/** fc_process_rscn Process ELS **/ +/** RSCN command **/ +/***************************************/ +_static_ int +fc_process_rscn( +fc_dev_ctl_t *p_dev_ctl, +IOCBQ *temp, +MATCHMAP *mp) +{ + FC_BRD_INFO * binfo; + IOCB * iocb; + uchar * bp; + uint32 * lp; + D_ID rdid; + uint32 cmd; + int i, j, cnt; + + binfo = &BINFO; + iocb = &temp->iocb; + bp = mp->virt; + lp = (uint32 * )bp; + cmd = *lp++; + i = SWAP_DATA(cmd) & 0xffff; /* payload length */ + i -= sizeof(uint32); /* take off word 0 */ + cmd &= ELS_CMD_MASK; + + /* RSCN received */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0214, /* ptr to msg structure */ + fc_mes0214, /* ptr to msg */ + fc_msgBlk0214.msgPreambleStr, /* begin varargs */ + binfo->fc_flag, + i, + *lp, + binfo->fc_rscn_id_cnt); /* end varargs */ + cnt = 0; /* cnt will determine if we need to access NameServer */ + + /* Loop through all DIDs in the payload */ + binfo->fc_flag |= FC_RSCN_MODE; + + while (i) { + rdid.un.word = *lp++; + rdid.un.word = SWAP_DATA(rdid.un.word); + if(binfo->fc_rscn_id_cnt < FC_MAX_HOLD_RSCN) { + for(j=0;jfc_rscn_id_cnt;j++) { + if(binfo->fc_rscn_id_list[j] == rdid.un.word) { + goto skip_id; + } + } + binfo->fc_rscn_id_list[binfo->fc_rscn_id_cnt++] = rdid.un.word; + } + else { + binfo->fc_flag |= FC_RSCN_DISCOVERY; + fc_flush_rscn_defer(p_dev_ctl); + cnt = 0; + break; + } +skip_id: + cnt += (fc_handle_rscn(p_dev_ctl, &rdid)); + i -= sizeof(uint32); + } + + /* RSCN processed */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0215, /* ptr to msg structure */ + fc_mes0215, /* ptr to msg */ + fc_msgBlk0215.msgPreambleStr, /* begin varargs */ + binfo->fc_flag, + cnt, + binfo->fc_rscn_id_cnt, + binfo->fc_ffstate ); /* end varargs */ + if (cnt == 0) { + /* no need for nameserver login */ + fc_nextrscn(p_dev_ctl, fc_max_els_sent); + } + else { + if(!(binfo->fc_flag & FC_NSLOGI_TMR)) + fc_clk_set(p_dev_ctl, 1, fc_issue_ns_query, 0, 0); + binfo->fc_flag |= FC_NSLOGI_TMR; + } + return(0); +} + + +/***************************************/ +/** fc_handle_rscn Handle ELS **/ +/** RSCN command **/ +/***************************************/ +_static_ int +fc_handle_rscn( +fc_dev_ctl_t *p_dev_ctl, +D_ID *didp) +{ + FC_BRD_INFO * binfo; + NODELIST * ndlp; + NODELIST * new_ndlp; + NODELIST * callnextnode; + iCfgParam * clp; + D_ID did; + int change; + int numchange; + int ns; + + binfo = &BINFO; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + callnextnode = 0; + + dfc_hba_put_event(p_dev_ctl, HBA_EVENT_RSCN, binfo->fc_myDID, didp->un.word, 0, 0); + dfc_put_event(p_dev_ctl, FC_REG_RSCN_EVENT, didp->un.word, 0, 0); + + /* Is this an RSCN for me? */ + if (didp->un.word == binfo->fc_myDID) + return(0); + + /* Always query nameserver on RSCN (zoning) if CFG_ZONE_RSCN it set */ + ns = (int)clp[CFG_ZONE_RSCN].a_current; + numchange = 0; + + ndlp = binfo->fc_nlpbind_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpbind_start) + ndlp = binfo->fc_nlpunmap_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + new_ndlp = 0; + while(ndlp != (NODELIST *)&binfo->fc_nlpmap_start) { + new_ndlp = (NODELIST *)ndlp->nlp_listp_next; + + /* Skip over FABRIC nodes and myself */ + if ((ndlp->nlp_DID == binfo->fc_myDID) || + (ndlp->nlp_type & NLP_FABRIC)) { + + ndlp = new_ndlp; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + continue; + } + + did.un.word = ndlp->nlp_DID; + change = 0; + + switch (didp->un.b.resv) { + case 0: /* Single N_Port ID effected */ + if (did.un.word == didp->un.word) { + change = 1; + } + break; + + case 1: /* Whole N_Port Area effected */ + if ((did.un.b.domain == didp->un.b.domain) && + (did.un.b.area == didp->un.b.area)) { + ns = 1; + change = 1; + } + break; + + case 2: /* Whole N_Port Domain effected */ + if (did.un.b.domain == didp->un.b.domain) { + ns = 1; + change = 1; + } + break; + + case 3: /* Whole Fabric effected */ + binfo->fc_flag |= FC_RSCN_DISCOVERY; + fc_flush_rscn_defer(p_dev_ctl); + return(0); + + default: + /* Unknown Identifier in RSCN payload */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0216, /* ptr to msg structure */ + fc_mes0216, /* ptr to msg */ + fc_msgBlk0216.msgPreambleStr, /* begin varargs */ + didp->un.word ); /* end varargs */ + break; + + } + + if (change) { + numchange++; + if((ndlp->nlp_state == NLP_ALLOC) || + (ndlp->nlp_state == NLP_LOGIN)) { + + if (ndlp->nlp_flag & NLP_REQ_SND) { + RING * rp; + IOCBQ * iocbq; + unsigned long iflag; + + /* Look through ELS ring and remove any ELS cmds in progress */ + iflag = lpfc_q_disable_lock(p_dev_ctl); + rp = &binfo->fc_ring[FC_ELS_RING]; + iocbq = (IOCBQ * )(rp->fc_txp.q_first); + while (iocbq) { + if (iocbq->iocb.un.elsreq.remoteID == ndlp->nlp_DID) { + iocbq->retry = 0xff; /* Mark for abort */ + } + iocbq = (IOCBQ * )iocbq->q; + } + lpfc_q_unlock_enable(p_dev_ctl, iflag); + /* In case its on fc_delay_timeout list */ + fc_abort_delay_els_cmd(p_dev_ctl, ndlp->nlp_DID); + + ndlp->nlp_flag &= ~NLP_REQ_SND; + } + + /* We are always using ADISC for RSCN validation */ + /* IF we are using ADISC, leave ndlp on mapped or unmapped q */ + + ndlp->nlp_flag &= ~NLP_NODEV_TMO; + + /* Mark node for authentication */ + ndlp->nlp_action |= NLP_DO_RSCN; + + } else { + + if (ndlp->nlp_flag & NLP_REQ_SND) { + if((callnextnode == 0) && (ndlp->nlp_action & NLP_DO_RSCN)) + callnextnode = ndlp; + } + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_flag &= ~NLP_NODEV_TMO; + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + + /* Mark node for authentication */ + ndlp->nlp_action |= NLP_DO_RSCN; + } + } + ndlp = new_ndlp; + if(ndlp == (NODELIST *)&binfo->fc_nlpbind_start) + ndlp = binfo->fc_nlpunmap_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + } + + /* If nothing in our node table is effected, + * we need to goto the Nameserver. + */ + if (numchange == 0) { + /* Is this a single N_Port that wasn't in our table */ + if (didp->un.b.resv == 0) { + if ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, didp->un.word)) == 0) { + if((ndlp = (NODELIST *)fc_mem_get(binfo, MEM_NLP))) { + fc_bzero((void *)ndlp, sizeof(NODELIST)); + ndlp->sync = binfo->fc_sync; + ndlp->capabilities = binfo->fc_capabilities; + ndlp->nlp_DID = didp->un.word; + } + else + return(ns); + } + ndlp->nlp_action |= NLP_DO_RSCN; + ndlp->nlp_flag &= ~NLP_NODEV_TMO; + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + } + else { + ns = 1; + } + } + + /* Is this an area or domain N_Port */ + if (didp->un.b.resv != 0) { + ns = 1; + } + + if((ns == 0) && (callnextnode)) + fc_nextnode(p_dev_ctl, callnextnode); + + /* Tell calling routine if NameServer access is required + * and return number of nodes presently being authenticated. + */ + return(ns); +} /* End fc_handle_rscn */ + + +/*************************************************/ +/** fc_chksparm Check service parameters **/ +/*************************************************/ +_local_ int +fc_chksparm( +FC_BRD_INFO *binfo, +volatile SERV_PARM *sp, +uint32 class) +{ + volatile SERV_PARM *hsp; + + hsp = &binfo->fc_sparam; + /* First check for supported version */ + + /* Next check for class validity */ + if (sp->cls1.classValid) { + if (sp->cls1.rcvDataSizeMsb > hsp->cls1.rcvDataSizeMsb) + sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb; + if (sp->cls1.rcvDataSizeLsb > hsp->cls1.rcvDataSizeLsb) + sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb; + } else if (class == CLASS1) { + return(0); + } + + if (sp->cls2.classValid) { + if (sp->cls2.rcvDataSizeMsb > hsp->cls2.rcvDataSizeMsb) + sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb; + if (sp->cls2.rcvDataSizeLsb > hsp->cls2.rcvDataSizeLsb) + sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb; + } else if (class == CLASS2) { + return(0); + } + + if (sp->cls3.classValid) { + if (sp->cls3.rcvDataSizeMsb > hsp->cls3.rcvDataSizeMsb) + sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb; + if (sp->cls3.rcvDataSizeLsb > hsp->cls3.rcvDataSizeLsb) + sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb; + } else if (class == CLASS3) { + return(0); + } + + if (sp->cmn.bbRcvSizeMsb > hsp->cmn.bbRcvSizeMsb) + sp->cmn.bbRcvSizeMsb = hsp->cmn.bbRcvSizeMsb; + if (sp->cmn.bbRcvSizeLsb > hsp->cmn.bbRcvSizeLsb) + sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb; + + return(1); +} /* End fc_chksparm */ + + +/***************************************/ +/** fc_chkpadisc Check **/ +/** P/ADISC parameters **/ +/***************************************/ +_static_ int +fc_chkpadisc( +FC_BRD_INFO *binfo, +NODELIST *ndlp, +volatile NAME_TYPE *nn, +volatile NAME_TYPE *pn) +{ + if (fc_geportname((NAME_TYPE * )nn, &ndlp->nlp_nodename) != 2) { + return(0); + } + + if (fc_geportname((NAME_TYPE * )pn, &ndlp->nlp_portname) != 2) { + return(0); + } + + return(1); +} /* End fc_chkpadisc */ + + +/***************************************/ +/** fc_els_cmd Issue an **/ +/** ELS command **/ +/***************************************/ +_static_ int +fc_els_cmd( +FC_BRD_INFO *binfo, +uint32 elscmd, +void *arg, +uint32 retry, +ushort iotag, +NODELIST *ndlp) +{ + IOCB * icmd; + IOCBQ * temp; + RING * rp; + uchar * bp; + ULP_BDE64 * bpl; + MATCHMAP * mp, * rmp, * bmp; + MAILBOXQ * mb; + iCfgParam * clp; + union { + SERV_PARM * sp; + ADISC * ap; + FARP * fp; + fc_vpd_t * vpd; + PRLI * npr; + } un; + uint32 * lp; + ushort size; + ulong setdelay; + fc_dev_ctl_t * p_dev_ctl; + + clp = DD_CTL.p_config[binfo->fc_brd_no]; + rp = &binfo->fc_ring[FC_ELS_RING]; + p_dev_ctl = (fc_dev_ctl_t *)(binfo->fc_p_dev_ctl); + + if ((elscmd == ELS_CMD_LOGO) && (iotag == 0)) { + /* First do unreglogin for did before sending ELS LOGO request */ + if ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, (uint32)((ulong)arg))) && ndlp->nlp_Rpi) { + /* If we are in the middle of Discovery */ + if (ndlp->nlp_action & (NLP_DO_ADDR_AUTH | NLP_DO_DISC_START | NLP_DO_RSCN)) { + /* Goto next entry */ + fc_nextnode(p_dev_ctl, ndlp); + } + ndlp->nlp_flag |= NLP_UNREG_LOGO; + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + return(0); + } + } + /* Allocate buffer for command iocb */ + if ((temp = (IOCBQ * )fc_mem_get(binfo, MEM_IOCB | MEM_PRI)) == 0) { + return(1); + } + fc_bzero((void *)temp, sizeof(IOCBQ)); + icmd = &temp->iocb; + setdelay = 0; + + /* fill in BDEs for command */ + /* Allocate buffer for command payload */ + if ((mp = (MATCHMAP * )fc_mem_get(binfo, MEM_BUF | MEM_PRI)) == 0) { + fc_mem_put(binfo, MEM_IOCB, (uchar * )temp); + return(1); + } + + /* Allocate buffer for response payload */ + if ((rmp = (MATCHMAP * )fc_mem_get(binfo, MEM_BUF | MEM_PRI)) == 0) { + fc_mem_put(binfo, MEM_IOCB, (uchar * )temp); + fc_mem_put(binfo, MEM_BUF, (uchar * )mp); + return(1); + } + fc_bzero((void *)rmp->virt, sizeof(ELS_PKT)); + + if (binfo->fc_flag & FC_SLI2) { + /* Allocate buffer for Buffer ptr list */ + if ((bmp = (MATCHMAP * )fc_mem_get(binfo, MEM_BPL | MEM_PRI)) == 0) { + fc_mem_put(binfo, MEM_IOCB, (uchar * )temp); + fc_mem_put(binfo, MEM_BUF, (uchar * )mp); + fc_mem_put(binfo, MEM_BUF, (uchar * )rmp); + return(1); + } + bpl = (ULP_BDE64 * )bmp->virt; + bpl->addrLow = PCIMEM_LONG(putPaddrLow((ulong)mp->phys)); + bpl->addrHigh = PCIMEM_LONG(putPaddrHigh((ulong)mp->phys)); + bpl->tus.f.bdeFlags = 0; + bpl++; + bpl->addrLow = PCIMEM_LONG(putPaddrLow((ulong)rmp->phys)); + bpl->addrHigh = PCIMEM_LONG(putPaddrHigh((ulong)rmp->phys)); + bpl->tus.f.bdeSize = FCELSSIZE; + bpl->tus.f.bdeFlags = BUFF_USE_RCV; + bpl->tus.w = PCIMEM_LONG(bpl->tus.w); + + bpl--; /* so we can fill in size later */ + + icmd->un.elsreq64.bdl.ulpIoTag32 = (uint32)0; + icmd->un.elsreq64.bdl.addrHigh = (uint32)putPaddrHigh(bmp->phys); + icmd->un.elsreq64.bdl.addrLow = (uint32)putPaddrLow(bmp->phys); + icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(ULP_BDE64)); + icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BDL; + temp->bpl = (uchar *)bmp; + } else { + bpl = 0; + bmp = 0; + icmd->un.cont[0].bdeAddress = (uint32)putPaddrLow(mp->phys); + icmd->un.cont[1].bdeAddress = (uint32)putPaddrLow(rmp->phys); + icmd->un.cont[1].bdeSize = FCELSSIZE; + temp->bpl = 0; + } + + bp = mp->virt; + /* Save for completion so we can release these resources */ + temp->bp = (uchar * )mp; + temp->info = (uchar * )rmp; + + /* Fill in command field in payload */ + *((uint32 * )(bp)) = elscmd; /* FLOGI, PLOGI or LOGO */ + bp += sizeof(uint32); + + switch (elscmd) { + case ELS_CMD_PLOGI: /* NPort login */ + case ELS_CMD_PDISC: /* exchange parameters */ + if(ndlp && (ndlp->nlp_DID == 0)) { + ndlp->nlp_DID = (uint32)((ulong)arg); + } + case ELS_CMD_FLOGI: /* Fabric login */ + /* For LOGI request, remainder of payload is service parameters */ + fc_bcopy((void *) & binfo->fc_sparam, (void *)bp, sizeof(SERV_PARM)); + un.sp = (SERV_PARM * )bp; + + if (elscmd == ELS_CMD_FLOGI) { + un.sp->cmn.e_d_tov = 0; + un.sp->cmn.w2.r_a_tov = 0; + un.sp->cls1.classValid = 0; + un.sp->cls2.seqDelivery = 1; + un.sp->cls3.seqDelivery = 1; + if (un.sp->cmn.fcphLow < FC_PH3) + un.sp->cmn.fcphLow = FC_PH3; + if(FABRICTMO) { + fc_clk_res(p_dev_ctl, binfo->fc_fabrictmo, FABRICTMO); + } + } else { + /* Seagate drives can't handle FC_PH3 value! */ + if (un.sp->cmn.fcphLow < FC_PH_4_3) + un.sp->cmn.fcphLow = FC_PH_4_3; + } + + if (un.sp->cmn.fcphHigh < FC_PH3) + un.sp->cmn.fcphHigh = FC_PH3; + + icmd->un.elsreq.remoteID = (uint32)((ulong)arg); /* DID */ + size = (sizeof(uint32) + sizeof(SERV_PARM)); + + if (elscmd != ELS_CMD_PDISC) { + /* Allocate a nlplist entry, ELS cmpl will fill it in */ + if ((ndlp == 0) && + ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, (uint32)((ulong)arg))) == 0)) { + if((ndlp = (NODELIST *)fc_mem_get(binfo, MEM_NLP))) { + fc_bzero((void *)ndlp, sizeof(NODELIST)); + ndlp->sync = binfo->fc_sync; + ndlp->capabilities = binfo->fc_capabilities; + ndlp->nlp_DID = (uint32)((ulong)arg); + } + else { + fc_mem_put(binfo, MEM_IOCB, (uchar * )temp); + fc_mem_put(binfo, MEM_BUF, (uchar * )mp); + fc_mem_put(binfo, MEM_BUF, (uchar * )rmp); + if (bmp) { + fc_mem_put(binfo, MEM_BPL, (uchar * )bmp); + } + return(1); + } + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + } + ndlp->nlp_flag &= ~NLP_RM_ENTRY; + ndlp->nlp_flag |= NLP_REQ_SND; + + if (elscmd == ELS_CMD_PLOGI) { + + ndlp->nlp_flag &= ~NLP_SND_PLOGI; + if (ndlp->nlp_Rpi) { + /* must explicitly unregister the login, UREG_LOGIN */ + if ((mb = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX | MEM_PRI))) { + fc_unreg_login(binfo, ndlp->nlp_Rpi, (MAILBOX * )mb); + if (issue_mb_cmd(binfo,(MAILBOX * )mb,MBX_NOWAIT) != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } + binfo->fc_nlplookup[ndlp->nlp_Rpi] = 0; + ndlp->nlp_Rpi = 0; + } + + /* For PLOGI requests, must make sure all outstanding Mailbox + * commands have been processed. This is to ensure UNREG_LOGINs + * complete before we try to login. + */ + if (binfo->fc_mbox_active) { + fc_mem_put(binfo, MEM_BUF, (uchar * )mp); + fc_mem_put(binfo, MEM_BUF, (uchar * )rmp); + if (bmp) { + fc_mem_put(binfo, MEM_BPL, (uchar * )bmp); + } + temp->info = (uchar *)0; + temp->bp = (uchar *)0; + temp->bpl = (uchar *)0; + fc_plogi_put(binfo, temp); + return(1); + } + + if ((ulong)arg == NameServer_DID) { + if (binfo->fc_ffstate == FC_READY) { + if(binfo->fc_flag & FC_RSCN_MODE) + ndlp->nlp_action |= NLP_DO_RSCN; + else + ndlp->nlp_action |= NLP_DO_ADDR_AUTH; + } + else + ndlp->nlp_action |= NLP_DO_ADDR_AUTH; + } + } + } + break; + + case ELS_CMD_LOGO: /* Logout */ + icmd->un.elsreq.remoteID = (uint32)((ulong)arg); /* DID */ + + *((uint32 * )(bp)) = SWAP_DATA(binfo->fc_myDID); + bp += sizeof(uint32); + + /* Last field in payload is our portname */ + fc_bcopy((void *) & binfo->fc_portname, (void *)bp, sizeof(NAME_TYPE)); + size = sizeof(uint32) + sizeof(uint32) + sizeof(NAME_TYPE); + break; + + case ELS_CMD_ADISC: + icmd->un.elsreq.remoteID = (uint32)((ulong)arg); /* DID */ + + if ((ndlp == 0) && + ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, (uint32)((ulong)arg))) == 0)) { + fc_mem_put(binfo, MEM_IOCB, (uchar * )temp); + fc_mem_put(binfo, MEM_BUF, (uchar * )mp); + fc_mem_put(binfo, MEM_BUF, (uchar * )rmp); + if (bmp) { + fc_mem_put(binfo, MEM_BPL, (uchar * )bmp); + } + return(1); + } + ndlp->nlp_DID = (uint32)((ulong)arg); + ndlp->nlp_flag |= NLP_REQ_SND_ADISC; + un.ap = (ADISC * )(bp); + un.ap->hardAL_PA = binfo->fc_pref_ALPA; + fc_bcopy((void *) & binfo->fc_portname, (void *) & un.ap->portName, + sizeof(NAME_TYPE)); + fc_bcopy((void *) & binfo->fc_nodename, (void *) & un.ap->nodeName, + sizeof(NAME_TYPE)); + un.ap->DID = SWAP_DATA(binfo->fc_myDID); + + size = sizeof(uint32) + sizeof(ADISC); + break; + + case ELS_CMD_PRLI: /* Process login */ + icmd->un.elsreq.remoteID = (uint32)((ulong)arg); /* DID */ + if ((ndlp == 0) && + ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, (uint32)((ulong)arg))) == 0)) { + fc_mem_put(binfo, MEM_IOCB, (uchar * )temp); + fc_mem_put(binfo, MEM_BUF, (uchar * )mp); + fc_mem_put(binfo, MEM_BUF, (uchar * )rmp); + if (bmp) { + fc_mem_put(binfo, MEM_BPL, (uchar * )bmp); + } + return(1); + } + ndlp->nlp_flag |= NLP_REQ_SND; + + /* For PRLI, remainder of payload is PRLI parameter page */ + fc_bzero((void *)bp, sizeof(PRLI)); + un.npr = (PRLI *)bp; + + /* + * If our firmware version is 3.20 or later, + * set the following bits for FC-TAPE support. + */ + if ( p_dev_ctl->vpd.rev.feaLevelHigh >= 0x02 ) { + un.npr->ConfmComplAllowed = 1; + un.npr->Retry = 1; + un.npr->TaskRetryIdReq = 1; + } + + un.npr->estabImagePair = 1; + un.npr->readXferRdyDis = 1; + if(clp[CFG_FCP_ON].a_current) { + un.npr->prliType = PRLI_FCP_TYPE; + un.npr->initiatorFunc = 1; + } + + size = sizeof(uint32) + sizeof(PRLI); + break; + + case ELS_CMD_PRLO: /* Process logout */ + /* For PRLO, remainder of payload is PRLO parameter page */ + fc_bzero((void *)bp, sizeof(PRLO)); + + icmd->un.elsreq.remoteID = (uint32)((ulong)arg); /* DID */ + size = sizeof(uint32) + sizeof(PRLO); + break; + + case ELS_CMD_SCR: /* State Change Registration */ + /* For SCR, remainder of payload is SCR parameter page */ + fc_bzero((void *)bp, sizeof(SCR)); + ((SCR * )bp)->Function = SCR_FUNC_FULL; + + icmd->un.elsreq.remoteID = (uint32)((ulong)arg); /* DID */ + size = sizeof(uint32) + sizeof(SCR); + break; + + case ELS_CMD_RNID: /* Node Identification */ + fc_bzero((void *)bp, sizeof(RNID)); + ((RNID * )bp)->Format = 0; + + icmd->un.elsreq.remoteID = (uint32)((ulong)arg); /* DID */ + size = sizeof(uint32) + sizeof(uint32); + break; + + case ELS_CMD_FARP: /* Farp */ + { + un.fp = (FARP * )(bp); + fc_bzero((void *)un.fp, sizeof(FARP)); + lp = (uint32 *)bp; + *lp++ = SWAP_DATA(binfo->fc_myDID); + un.fp->Mflags = FARP_MATCH_PORT; + un.fp->Rflags = FARP_REQUEST_PLOGI; + fc_bcopy((void *) & binfo->fc_portname, (void *) & un.fp->OportName, + sizeof(NAME_TYPE)); + fc_bcopy((void *) & binfo->fc_nodename, (void *) & un.fp->OnodeName, + sizeof(NAME_TYPE)); + switch(retry) { + case 0: + un.fp->Mflags = FARP_MATCH_PORT; + un.fp->RportName.nameType = NAME_IEEE; /* IEEE name */ + un.fp->RportName.IEEEextMsn = 0; + un.fp->RportName.IEEEextLsb = 0; + fc_bcopy(arg, (void *)un.fp->RportName.IEEE, 6); + un.fp->RnodeName.nameType = NAME_IEEE; /* IEEE name */ + un.fp->RnodeName.IEEEextMsn = 0; + un.fp->RnodeName.IEEEextLsb = 0; + fc_bcopy(arg, (void *)un.fp->RnodeName.IEEE, 6); + break; + case 1: + un.fp->Mflags = FARP_MATCH_PORT; + fc_bcopy(arg, (void *)&un.fp->RportName, sizeof(NAME_TYPE)); + retry = 0; + break; + case 2: + un.fp->Mflags = FARP_MATCH_NODE; + fc_bcopy(arg, (void *)&un.fp->RnodeName, sizeof(NAME_TYPE)); + retry = 0; + break; + } + + if((ndlp = fc_findnode_wwpn(binfo, NLP_SEARCH_ALL, &un.fp->RportName))) { + ndlp->nlp_flag |= NLP_FARP_SND; + ndlp->nlp_flag &= ~NLP_RM_ENTRY; + } + size = sizeof(uint32) + sizeof(FARP); + iotag = 0; + } + break; + + case ELS_CMD_FARPR: /* Farp response */ + { + icmd->un.elsreq.remoteID = (uint32)((ulong)arg); /* DID */ + un.fp = (FARP * )(bp); + lp = (uint32 *)bp; + *lp++ = SWAP_DATA((uint32)((ulong)arg)); + *lp++ = SWAP_DATA(binfo->fc_myDID); + un.fp->Rflags = 0; + un.fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE); + + fc_bcopy((void *) & binfo->fc_portname, (void *) & un.fp->RportName, + sizeof(NAME_TYPE)); + fc_bcopy((void *) & binfo->fc_nodename, (void *) & un.fp->RnodeName, + sizeof(NAME_TYPE)); + if ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, (uint32)((ulong)arg)))) { + fc_bcopy((void *) & ndlp->nlp_portname, (void *) & un.fp->OportName, + sizeof(NAME_TYPE)); + fc_bcopy((void *) & ndlp->nlp_nodename, (void *) & un.fp->OnodeName, + sizeof(NAME_TYPE)); + } + + size = sizeof(uint32) + sizeof(FARP); + iotag = 0; + } + break; + + default: + fc_mem_put(binfo, MEM_IOCB, (uchar * )temp); + fc_mem_put(binfo, MEM_BUF, (uchar * )mp); + fc_mem_put(binfo, MEM_BUF, (uchar * )rmp); + if (bmp) { + fc_mem_put(binfo, MEM_BPL, (uchar * )bmp); + } + /* Xmit unknown ELS command */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0128, /* ptr to msg structure */ + fc_mes0128, /* ptr to msg */ + fc_msgBlk0128.msgPreambleStr, /* begin varargs */ + elscmd); /* end varargs */ + return(1); + } + + if (binfo->fc_flag & FC_SLI2) { + icmd->ulpCommand = CMD_ELS_REQUEST64_CR; + bpl->tus.f.bdeSize = size; + bpl->tus.w = PCIMEM_LONG(bpl->tus.w); + + fc_mpdata_sync(bmp->dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV); + } else { + icmd->ulpCommand = CMD_ELS_REQUEST_CR; + icmd->un.cont[0].bdeSize = size; + } + + fc_mpdata_sync(mp->dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV); + + if (iotag) { + icmd->ulpIoTag = iotag; + } + icmd->ulpIoTag0 = (unsigned)rp->fc_iotag++; + if ((rp->fc_iotag & 0x3fff) == 0) { + rp->fc_iotag = 1; + } + + /* Fill in rest of iocb */ + icmd->ulpBdeCount = 1; + icmd->ulpLe = 1; + icmd->ulpClass = CLASS3; + icmd->ulpOwner = OWN_CHIP; + temp->retry = (uchar)retry; /* retry = uint32 */ + rmp->fc_mptr = (uchar *)ndlp; + /* Xmit ELS command to remote NPORT */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0129, /* ptr to msg structure */ + fc_mes0129, /* ptr to msg */ + fc_msgBlk0129.msgPreambleStr, /* begin varargs */ + elscmd, + icmd->un.ulpWord[5], /* did */ + icmd->ulpIoTag, + binfo->fc_ffstate); /* end varargs */ + /* + * For handleing Dump command when system panic, + * the FC_BUS_RESET needs to be checked. If FC_BUS_RESET is set, + * there is no delay for issuing ELS command. + * FC_BUS_RESET is set by the lpfc_scsi_reset(). + */ + if(icmd->ulpDelayXmit) + { + if(icmd->ulpDelayXmit == 2) { + /* Delay issue of iocb 2048 interrupt latencies */ + if(binfo->fc_delayxmit) { + IOCBQ *iop; + iop = binfo->fc_delayxmit; + while(iop->q) + iop = (IOCBQ *)iop->q; + iop->q = (uchar *)temp; + } + else { + binfo->fc_delayxmit = temp; + } + temp->q = 0; + temp->rsvd2 = 2048; + } + else { + /* Delay issue of iocb for 1 to 2 seconds */ + temp->q = 0; + + setdelay = 1; /* seconds */ + fc_clk_set(p_dev_ctl, setdelay, fc_delay_timeout, (void *)temp, ndlp); + } + } + else { + issue_iocb_cmd(binfo, rp, temp); + } + + FCSTATCTR.elsXmitFrame++; + return(0); +} /* End fc_els_cmd */ + + +/***************************************/ +/** fc_els_rsp Issue an **/ +/** ELS response **/ +/***************************************/ +_static_ int +fc_els_rsp( +FC_BRD_INFO *binfo, +uint32 elscmd, +uint32 Xri, +uint32 class, +void *iocbp, +uint32 flag, +NODELIST *ndlp) +{ + IOCB * icmd; + IOCBQ * temp; + RING * rp; + uchar * bp; + MATCHMAP * mp, * bmp; + ULP_BDE64 * bpl; + ADISC * ap; + RNID * rn; + fc_vpd_t * vpd; + PRLI * npr; + iCfgParam * clp; + fc_dev_ctl_t * p_dev_ctl; + ushort size; + + rp = &binfo->fc_ring[FC_ELS_RING]; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + p_dev_ctl = (fc_dev_ctl_t *)(binfo->fc_p_dev_ctl); + + /* Allocate buffer for command iocb */ + if ((temp = (IOCBQ * )fc_mem_get(binfo, MEM_IOCB)) == 0) { + return(1); + } + fc_bzero((void *)temp, sizeof(IOCBQ)); + icmd = &temp->iocb; + + /* fill in BDEs for command */ + /* Allocate buffer for response payload */ + if ((mp = (MATCHMAP * )fc_mem_get(binfo, MEM_BUF)) == 0) { + fc_mem_put(binfo, MEM_IOCB, (uchar * )temp); + return(1); + } + + if (binfo->fc_flag & FC_SLI2) { + /* Allocate buffer for Buffer ptr list */ + if ((bmp = (MATCHMAP * )fc_mem_get(binfo, MEM_BPL)) == 0) { + fc_mem_put(binfo, MEM_IOCB, (uchar * )temp); + fc_mem_put(binfo, MEM_BUF, (uchar * )mp); + return(1); + } + bpl = (ULP_BDE64 * )bmp->virt; + bpl->addrLow = PCIMEM_LONG(putPaddrLow((ulong)mp->phys)); + bpl->addrHigh = PCIMEM_LONG(putPaddrHigh((ulong)mp->phys)); + bpl->tus.f.bdeFlags = 0; + + icmd->un.elsreq64.bdl.ulpIoTag32 = (uint32)0; + icmd->un.elsreq64.bdl.addrHigh = (uint32)putPaddrHigh(bmp->phys); + icmd->un.elsreq64.bdl.addrLow = (uint32)putPaddrLow(bmp->phys); + icmd->un.elsreq64.bdl.bdeSize = sizeof(ULP_BDE64); + icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BDL; + temp->bpl = (uchar *)bmp; + } else { + bpl = 0; + bmp = 0; + icmd->un.cont[0].bdeAddress = (uint32)putPaddrLow(mp->phys); + temp->bpl = 0; + } + + bp = mp->virt; + + /* Save for completion so we can release these resources */ + temp->bp = (uchar * )mp; + temp->ndlp = (uchar * )ndlp; + + /* Fill in command field in payload */ + *((uint32 * )(bp)) = elscmd; /* ACC or LS_RJT */ + + switch (elscmd) { + case ELS_CMD_ACC: /* Accept Response */ + /* ACCEPT will optionally contain service parameters, + * depending on flag. + */ + bp += sizeof(uint32); + if (flag >= sizeof(SERV_PARM)) { + fc_bcopy((void *) & binfo->fc_sparam, (void *)bp, sizeof(SERV_PARM)); + size = (sizeof(SERV_PARM) + sizeof(uint32)); + } else { + size = sizeof(uint32); + } + break; + + case ELS_CMD_LS_RJT: /* reject response */ + bp += sizeof(uint32); + *((uint32 * )(bp)) = flag; /* fill in error code */ + size = sizeof(uint32) + sizeof(uint32); + break; + + case ELS_CMD_ADISC: + *((uint32 * )(bp)) = ELS_CMD_ACC; + bp += sizeof(uint32); + if(ndlp) + icmd->un.elsreq.remoteID = ndlp->nlp_DID; /* DID */ + + ap = (ADISC * )(bp); + ap->hardAL_PA = binfo->fc_pref_ALPA; + fc_bcopy((void *) & binfo->fc_portname, (void *) & ap->portName, + sizeof(NAME_TYPE)); + fc_bcopy((void *) & binfo->fc_nodename, (void *) & ap->nodeName, + sizeof(NAME_TYPE)); + ap->DID = SWAP_DATA(binfo->fc_myDID); + + size = sizeof(uint32) + sizeof(ADISC); + break; + + case ELS_CMD_PRLI: + *((uint32 * )(bp)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); + bp += sizeof(uint32); + npr = (PRLI *)bp; + if(ndlp) + icmd->un.elsreq.remoteID = ndlp->nlp_DID; /* DID */ + + /* For PRLI, remainder of payload is PRLI parameter page */ + fc_bzero((void *)bp, sizeof(PRLI)); + + vpd = &p_dev_ctl->vpd; + /* + * If our firmware version is 3.20 or later, + * set the following bits for FC-TAPE support. + */ + if ( vpd->rev.feaLevelHigh >= 0x02 ) { + npr->ConfmComplAllowed = 1; + npr->Retry = 1; + npr->TaskRetryIdReq = 1; + } + + npr->acceptRspCode = PRLI_REQ_EXECUTED; + npr->estabImagePair = 1; + npr->readXferRdyDis = 1; + npr->ConfmComplAllowed = 1; + if(clp[CFG_FCP_ON].a_current) { + npr->prliType = PRLI_FCP_TYPE; + npr->initiatorFunc = 1; + } + + size = sizeof(uint32) + sizeof(PRLI); + break; + + case ELS_CMD_RNID: + *((uint32 * )(bp)) = ELS_CMD_ACC; + bp += sizeof(uint32); + + rn = (RNID * )(bp); + fc_bzero((void *)bp, sizeof(RNID)); + rn->Format = (uchar)flag; + rn->CommonLen = (2 * sizeof(NAME_TYPE)); + fc_bcopy((void *) & binfo->fc_portname, (void *) & rn->portName, + sizeof(NAME_TYPE)); + fc_bcopy((void *) & binfo->fc_nodename, (void *) & rn->nodeName, + sizeof(NAME_TYPE)); + switch(flag) { + case 0: + rn->SpecificLen = 0; + break; + case RNID_TOPOLOGY_DISC: + rn->SpecificLen = sizeof(RNID_TOP_DISC); + fc_bcopy((void *) & binfo->fc_portname, + (void *) & rn->un.topologyDisc.portName, sizeof(NAME_TYPE)); + rn->un.topologyDisc.unitType = RNID_HBA; + rn->un.topologyDisc.physPort = 0; + rn->un.topologyDisc.attachedNodes = 0; + if(clp[CFG_NETWORK_ON].a_current) { + rn->un.topologyDisc.ipVersion = binfo->ipVersion; + rn->un.topologyDisc.UDPport = binfo->UDPport; + fc_bcopy((void *) & binfo->ipAddr[0], + (void *) & rn->un.topologyDisc.ipAddr[0], 16); + } + break; + default: + rn->CommonLen = 0; + rn->SpecificLen = 0; + break; + } + size = sizeof(uint32) + sizeof(uint32) + rn->CommonLen + rn->SpecificLen; + break; + + default: + fc_mem_put(binfo, MEM_IOCB, (uchar * )temp); + fc_mem_put(binfo, MEM_BUF, (uchar * )mp); + if (bmp) { + fc_mem_put(binfo, MEM_BPL, (uchar * )bmp); + } + /* Xmit unknown ELS response (elsCmd> */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0130, /* ptr to msg structure */ + fc_mes0130, /* ptr to msg */ + fc_msgBlk0130.msgPreambleStr, /* begin varargs */ + elscmd ); /* end varargs */ + return(1); + } + + if (binfo->fc_flag & FC_SLI2) { + icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX; + bpl->tus.f.bdeSize = size; + bpl->tus.w = PCIMEM_LONG(bpl->tus.w); + + fc_mpdata_sync(bmp->dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV); + } else { + icmd->ulpCommand = CMD_XMIT_ELS_RSP_CX; + icmd->un.cont[0].bdeSize = size; + } + + fc_mpdata_sync(mp->dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV); + + /* If iotag is zero, assign one from global counter for board */ + if (iocbp == 0) { + temp->retry = 0; + } else { + icmd->ulpIoTag = ((IOCB *)iocbp)->ulpIoTag; + temp->retry = ((IOCBQ *)iocbp)->retry; + } + icmd->ulpIoTag0 = (unsigned)rp->fc_iotag++; + if ((rp->fc_iotag & 0x3fff) == 0) { + rp->fc_iotag = 1; + } + + /* fill in rest of iocb */ + icmd->ulpContext = (volatile ushort)Xri; + icmd->ulpBdeCount = 1; + icmd->ulpLe = 1; + icmd->ulpClass = class; + icmd->ulpOwner = OWN_CHIP; + /* Xmit ELS response to remote NPORT */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0131, /* ptr to msg structure */ + fc_mes0131, /* ptr to msg */ + fc_msgBlk0131.msgPreambleStr, /* begin varargs */ + elscmd, + icmd->un.ulpWord[5], /* did */ + icmd->ulpIoTag, + size); /* end varargs */ + issue_iocb_cmd(binfo, rp, temp); + + FCSTATCTR.elsXmitFrame++; + return(0); +} /* End fc_els_rsp */ + + +/* Retries the appropriate ELS command if necessary */ +_local_ int +fc_els_retry( +FC_BRD_INFO *binfo, +RING *rp, +IOCBQ *iocbq, +uint32 cmd, +NODELIST *ndlp) +{ + IOCB *iocb; + MATCHMAP *bmp; + + if (((binfo->fc_flag & FC_RSCN_MODE) && (binfo->fc_ffstate == FC_READY)) || + (binfo->fc_ffstate == FC_LOOP_DISC) || + (binfo->fc_ffstate == FC_NODE_DISC)) { + binfo->fc_fabrictmo = (2 * binfo->fc_ratov) + + ((4 * binfo->fc_edtov) / 1000) + 1; + if(FABRICTMO) { + fc_clk_res((fc_dev_ctl_t *)(binfo->fc_p_dev_ctl), + binfo->fc_fabrictmo, FABRICTMO); + } + else { + FABRICTMO = fc_clk_set((fc_dev_ctl_t *)(binfo->fc_p_dev_ctl), + binfo->fc_fabrictmo, fc_fabric_timeout, 0, 0); + } + } + + iocb = &iocbq->iocb; + /* Do not retry FARP/ADISC/PDISC */ + if ((cmd == ELS_CMD_FARP) || + (cmd == ELS_CMD_FARPR) || + (cmd == ELS_CMD_ADISC) || + (cmd == ELS_CMD_PDISC)) { + goto out; + } + + if (fc_status_action(binfo, iocbq, cmd, ndlp)) { + /* Indicates iocb should be retried */ + /* Retry ELS response/command */ + FCSTATCTR.elsXmitRetry++; + switch (iocb->ulpCommand) { + case CMD_ELS_REQUEST_CR: + case CMD_ELS_REQUEST64_CR: + case CMD_ELS_REQUEST_CX: + case CMD_ELS_REQUEST64_CX: + fc_els_cmd(binfo, cmd, (void *)((ulong)iocb->un.elsreq.remoteID), + (uint32)iocbq->retry, (ushort)iocb->ulpIoTag, ndlp); + break; + case CMD_XMIT_ELS_RSP_CX: + fc_els_rsp(binfo,cmd,(uint32)iocb->ulpContext, (uint32)iocb->ulpClass, + (void *)iocbq, (uint32)iocb->un.cont[0].bdeSize, ndlp); + break; + case CMD_XMIT_ELS_RSP64_CX: + bmp = (MATCHMAP *)iocbq->bpl; + if(bmp && bmp->virt) { + fc_els_rsp(binfo,cmd,(uint32)iocb->ulpContext, + (uint32)iocb->ulpClass, (void *)iocbq, + (uint32)(((ULP_BDE64 * )bmp->virt)->tus.f.bdeSize), ndlp); + } + break; + default: + goto out; + } + return(1); + } + +out: + /* ELS Retry failed */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0132, /* ptr to msg structure */ + fc_mes0132, /* ptr to msg */ + fc_msgBlk0132.msgPreambleStr, /* begin varargs */ + cmd, + iocb->un.ulpWord[4] ); /* end varargs */ + return(0); +} /* End fc_els_retry */ + + +/* Determines what action to take as result of the status + * field in the iocb. If the status indicates a retry, the iocb + * will be setup for retry and a 1 will be returned. If the status + * indicates error with no action, a 0 will be returned. + * The retry count is kept in the ls byte of the iotag. + */ +_local_ int +fc_status_action( +FC_BRD_INFO *binfo, +IOCBQ *iocbq, +uint32 cmd, +NODELIST *ndlp) +{ + uint32 class; + uchar tag; + int maxretry; + LS_RJT stat; + IOCB *iocb; + + maxretry = FC_MAXRETRY; + iocb = &iocbq->iocb; + iocb->ulpDelayXmit = 0; + + if(ndlp) { + if(ndlp->nlp_action & NLP_DO_RNID) + return(0); + if((ndlp->nlp_DID == 0) && (ndlp->nlp_type == 0)) + return(0); + } + + switch (iocb->ulpStatus) { + case IOSTAT_FCP_RSP_ERROR: + case IOSTAT_REMOTE_STOP: + break; + + case IOSTAT_LOCAL_REJECT: + if ((iocb->un.ulpWord[4] & 0xff) == IOERR_LINK_DOWN) + return(0); + + if ((iocb->un.ulpWord[4] & 0xff) == IOERR_LOOP_OPEN_FAILURE) { + if(cmd == ELS_CMD_PLOGI) { + if (iocbq->retry == 0) + iocb->ulpDelayXmit = 2; + } + goto elsretry; + } + if ((iocb->un.ulpWord[4] & 0xff) == IOERR_SEQUENCE_TIMEOUT) { + goto elsretry; + } + if ((iocb->un.ulpWord[4] & 0xff) == IOERR_NO_RESOURCES) { + if(cmd == ELS_CMD_PLOGI) + iocb->ulpDelayXmit = 1; + goto elsretry; + } + if ((iocb->un.ulpWord[4] & 0xff) == IOERR_INVALID_RPI) { + goto elsretry; + } + break; + + case IOSTAT_NPORT_RJT: + case IOSTAT_FABRIC_RJT: + /* iotag is retry count */ + if ((tag = (iocbq->retry + 1)) >= maxretry) { + FCSTATCTR.elsRetryExceeded++; + break; + } + + iocbq->retry = tag; + if (iocb->un.ulpWord[4] & RJT_UNAVAIL_TEMP) { + /* not avail temporary */ + /* Retry ELS command */ + return(1); + } + if (iocb->un.ulpWord[4] & RJT_UNSUP_CLASS) { + /* class not supported */ + if (cmd == ELS_CMD_FARP) + return(0); + if (binfo->fc_topology == TOPOLOGY_LOOP) { + /* for FC-AL retry logic goes class 3 - 2 - 1 */ + if (iocb->ulpClass == CLASS3) { + class = CLASS2; + } else { + break; + } + } else { + /* for non FC-AL retry logic goes class 1 - 2 */ + if (iocb->ulpClass == CLASS1) { + class = CLASS2; + } else { + break; + } + } + iocb->ulpClass = class; + /* Retry ELS command */ + return(1); + } + break; + + case IOSTAT_NPORT_BSY: + case IOSTAT_FABRIC_BSY: +elsretry: + tag = (iocbq->retry + 1); + /* iotag is retry count */ + if(ndlp) { + if(cmd == ELS_CMD_PLOGI) { + if((ndlp->nlp_state >= NLP_LOGIN) || + (ndlp->nlp_flag & NLP_REG_INP)) { + return(0); /* Don't retry */ + } + } + if(ndlp->nlp_flag & NLP_NODEV_TMO) { + iocbq->retry = tag; + /* Retry ELS command */ + return(1); + } + } + if(tag >= maxretry) { + FCSTATCTR.elsRetryExceeded++; + break; + } + iocbq->retry = tag; + /* Retry ELS command */ + return(1); + + case IOSTAT_LS_RJT: + stat.un.lsRjtError = SWAP_DATA(iocb->un.ulpWord[4]); + switch(stat.un.b.lsRjtRsnCode) { + case LSRJT_UNABLE_TPC: + if(stat.un.b.lsRjtRsnCodeExp == LSEXP_CMD_IN_PROGRESS) { + if(cmd == ELS_CMD_PLOGI) { + iocb->ulpDelayXmit = 1; + maxretry = 48; + } + goto elsretry; + } + if(cmd == ELS_CMD_PLOGI) { + iocb->ulpDelayXmit = 1; + + /* allow for 1sec FLOGI delay */ + maxretry = FC_MAXRETRY + 1; + goto elsretry; + } + break; + + case LSRJT_LOGICAL_BSY: + if(cmd == ELS_CMD_PLOGI) { + iocb->ulpDelayXmit = 1; + maxretry = 48; + } + goto elsretry; + } + + break; + + case IOSTAT_INTERMED_RSP: + case IOSTAT_BA_RJT: + break; + + default: + break; + } + + if((cmd == ELS_CMD_FLOGI) && (binfo->fc_topology != TOPOLOGY_LOOP)) { + iocb->ulpDelayXmit = 1; + maxretry = 48; + if ((tag = (iocbq->retry + 1)) >= maxretry) + return(0); + iocbq->retry = tag; + return(1); + } + return(0); +} /* End fc_status_action */ + + +_static_ void +fc_snd_flogi( +fc_dev_ctl_t * p_dev_ctl, +void *p1, +void *p2) +{ + FC_BRD_INFO * binfo; + RING * rp; + + binfo = &BINFO; + /* Stop the link down watchdog timer */ + rp = &binfo->fc_ring[FC_FCP_RING]; + if(RINGTMO) { + fc_clk_can(p_dev_ctl, RINGTMO); + RINGTMO = 0; + } + binfo->fc_flag &= ~(FC_LD_TIMEOUT | FC_LD_TIMER); + + /* We are either private or public loop topology */ + /* We are either Fabric or point-to-point topology */ + /* Now build FLOGI payload and issue ELS command to find out */ + fc_els_cmd(binfo, ELS_CMD_FLOGI, (void *)Fabric_DID, + (uint32)0, (ushort)0, (NODELIST *)0); + + /* + * Cancel the establish reset timer + * If we come to this point, we don't need tht timer to + * clear the FC_ESTABLISH_LINK flag. + */ + if (p_dev_ctl->fc_estabtmo) { + fc_clk_can(p_dev_ctl, p_dev_ctl->fc_estabtmo); + p_dev_ctl->fc_estabtmo = 0; + } + return; +} + +/* Wait < a second before sending intial FLOGI to start discovery */ +int +fc_initial_flogi( +fc_dev_ctl_t * p_dev_ctl) /* point to dev_ctl area */ +{ + if((p_dev_ctl->fc_waitflogi = fc_clk_set(p_dev_ctl, 0, fc_snd_flogi, 0, 0)) == 0) + fc_snd_flogi(p_dev_ctl, 0, 0); + return(0); +} + +/***************************************/ +/** fc_issue_ct_rsp Issue an **/ +/** CT rsp **/ +/***************************************/ +_static_ int +fc_issue_ct_rsp( +FC_BRD_INFO * binfo, +uint32 tag, +MATCHMAP * bmp, +DMATCHMAP * inp) +{ + IOCB * icmd; + IOCBQ * temp; + RING * rp; + fc_dev_ctl_t * p_dev_ctl; + uint32 num_entry; + + rp = &binfo->fc_ring[FC_ELS_RING]; + num_entry = (uint32)inp->dfc_flag; + inp->dfc_flag = 0; + + /* Allocate buffer for command iocb */ + if ((temp = (IOCBQ * )fc_mem_get(binfo, MEM_IOCB)) == 0) { + return(1); + } + fc_bzero((void *)temp, sizeof(IOCBQ)); + icmd = &temp->iocb; + + icmd->un.xseq64.bdl.ulpIoTag32 = (uint32)0; + icmd->un.xseq64.bdl.addrHigh = (uint32)putPaddrHigh(bmp->phys); + icmd->un.xseq64.bdl.addrLow = (uint32)putPaddrLow(bmp->phys); + icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDL; + icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(ULP_BDE64)); + + /* Save for completion so we can release these resources */ + temp->bp = (uchar * )inp; + + icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA); + icmd->un.xseq64.w5.hcsw.Dfctl = 0; + icmd->un.xseq64.w5.hcsw.Rctl = FC_SOL_CTL; + icmd->un.xseq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP; + + p_dev_ctl = (fc_dev_ctl_t *)(binfo->fc_p_dev_ctl); + fc_mpdata_sync(bmp->dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV); + + /* If iotag is zero, assign one from global counter for board */ + icmd->ulpIoTag0 = (unsigned)rp->fc_iotag++; + if ((rp->fc_iotag & 0x3fff) == 0) { + rp->fc_iotag = 1; + } + + /* Fill in rest of iocb */ + icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX; + icmd->ulpBdeCount = 1; + icmd->ulpLe = 1; + icmd->ulpClass = CLASS3; + icmd->ulpContext = (ushort)tag; + icmd->ulpOwner = OWN_CHIP; + /* Xmit CT response on exchange */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0133, /* ptr to msg structure */ + fc_mes0133, /* ptr to msg */ + fc_msgBlk0133.msgPreambleStr, /* begin varargs */ + icmd->ulpContext, /* xid */ + icmd->ulpIoTag, + binfo->fc_ffstate ); /* end varargs */ + issue_iocb_cmd(binfo, rp, temp); + return(0); +} /* fc_issue_ct_rsp */ + +/***************************************/ +/** fc_gen_req Issue an **/ +/** GEN_REQUEST cmd **/ +/***************************************/ +_static_ int +fc_gen_req( +FC_BRD_INFO * binfo, +MATCHMAP * bmp, +MATCHMAP * inp, +MATCHMAP * outp, +uint32 rpi, +uint32 usr_flg, +uint32 num_entry, +uint32 tmo) +{ + IOCB * icmd; + IOCBQ * temp; + RING * rp; + fc_dev_ctl_t * p_dev_ctl; + + + rp = &binfo->fc_ring[FC_ELS_RING]; + + /* Allocate buffer for command iocb */ + if ((temp = (IOCBQ * )fc_mem_get(binfo, MEM_IOCB | MEM_PRI)) == 0) { + return(1); + } + fc_bzero((void *)temp, sizeof(IOCBQ)); + icmd = &temp->iocb; + + icmd->un.genreq64.bdl.ulpIoTag32 = (uint32)0; + icmd->un.genreq64.bdl.addrHigh = (uint32)putPaddrHigh(bmp->phys); + icmd->un.genreq64.bdl.addrLow = (uint32)putPaddrLow(bmp->phys); + icmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BDL; + icmd->un.genreq64.bdl.bdeSize = (num_entry * sizeof(ULP_BDE64)); + + if(usr_flg) + temp->bpl = 0; + else + temp->bpl = (uchar *)bmp; + + /* Save for completion so we can release these resources */ + temp->bp = (uchar * )inp; + temp->info = (uchar * )outp; + + /* Fill in payload, bp points to frame payload */ + icmd->ulpCommand = CMD_GEN_REQUEST64_CR; + + p_dev_ctl = (fc_dev_ctl_t *)(binfo->fc_p_dev_ctl); + fc_mpdata_sync(bmp->dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV); + + /* If iotag is zero, assign one from global counter for board */ + icmd->ulpIoTag0 = (unsigned)rp->fc_iotag++; + if ((rp->fc_iotag & 0x3fff) == 0) { + rp->fc_iotag = 1; + } + + /* Fill in rest of iocb */ + icmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); + icmd->un.genreq64.w5.hcsw.Dfctl = 0; + icmd->un.genreq64.w5.hcsw.Rctl = FC_UNSOL_CTL; + icmd->un.genreq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP; + + if(tmo == 0) + tmo = (2 * binfo->fc_ratov); + icmd->ulpTimeout = tmo; + icmd->ulpBdeCount = 1; + icmd->ulpLe = 1; + icmd->ulpClass = CLASS3; + icmd->ulpContext = (volatile ushort)rpi; + icmd->ulpOwner = OWN_CHIP; + /* Issue GEN REQ IOCB for NPORT */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0134, /* ptr to msg structure */ + fc_mes0134, /* ptr to msg */ + fc_msgBlk0134.msgPreambleStr, /* begin varargs */ + icmd->un.ulpWord[5], /* did */ + icmd->ulpIoTag, + binfo->fc_ffstate ); /* end varargs */ + issue_iocb_cmd(binfo, rp, temp); + + FCSTATCTR.elsXmitFrame++; + return(0); +} /* End fc_gen_req */ + + +/***************************************/ +/** fc_rnid_req Issue an **/ +/** RNID REQUEST cmd **/ +/***************************************/ +_static_ int +fc_rnid_req( +FC_BRD_INFO * binfo, +DMATCHMAP * inp, +DMATCHMAP * outp, +MATCHMAP ** bmpp, +uint32 rpi) +{ + IOCB * icmd; + IOCBQ * temp; + RING * rp; + ULP_BDE64 * bpl; + MATCHMAP * bmp; + fc_dev_ctl_t * p_dev_ctl; + + + rp = &binfo->fc_ring[FC_ELS_RING]; + + /* Allocate buffer for command iocb */ + if ((temp = (IOCBQ * )fc_mem_get(binfo, MEM_IOCB | MEM_PRI)) == 0) { + return(1); + } + fc_bzero((void *)temp, sizeof(IOCBQ)); + icmd = &temp->iocb; + + if (binfo->fc_flag & FC_SLI2) { + /* Allocate buffer for Buffer ptr list */ + if ((bmp = (MATCHMAP * )fc_mem_get(binfo, MEM_BPL | MEM_PRI)) == 0) { + fc_mem_put(binfo, MEM_IOCB, (uchar * )temp); + return(1); + } + *bmpp = bmp; /* to free BPL on compl */ + bpl = (ULP_BDE64 * )bmp->virt; + bpl->addrLow = PCIMEM_LONG(putPaddrLow((ulong)inp->dfc.phys)); + bpl->addrHigh = PCIMEM_LONG(putPaddrHigh((ulong)inp->dfc.phys)); + bpl->tus.f.bdeFlags = 0; + bpl++; + bpl->addrLow = PCIMEM_LONG(putPaddrLow((ulong)outp->dfc.phys)); + bpl->addrHigh = PCIMEM_LONG(putPaddrHigh((ulong)outp->dfc.phys)); + bpl->tus.f.bdeSize = (ushort)((ulong)(outp->dfc.fc_mptr)); + bpl->tus.f.bdeFlags = BUFF_USE_RCV; + bpl->tus.w = PCIMEM_LONG(bpl->tus.w); + + bpl--; /* so we can fill in size later */ + + icmd->un.genreq64.bdl.ulpIoTag32 = (uint32)0; + icmd->un.genreq64.bdl.addrHigh = (uint32)putPaddrHigh(bmp->phys); + icmd->un.genreq64.bdl.addrLow = (uint32)putPaddrLow(bmp->phys); + icmd->un.genreq64.bdl.bdeSize = (2 * sizeof(ULP_BDE64)); + icmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BDL; + temp->bpl = 0; + } else { + fc_mem_put(binfo, MEM_IOCB, (uchar * )temp); + return(1); + } + + /* Save for completion so we can release these resources */ + temp->info = (uchar * )outp; + + /* Fill in payload, bp points to frame payload */ + icmd->ulpCommand = CMD_GEN_REQUEST64_CR; + bpl->tus.f.bdeSize = (ushort)((ulong)(inp->dfc.fc_mptr)); + bpl->tus.w = PCIMEM_LONG(bpl->tus.w); + + p_dev_ctl = (fc_dev_ctl_t *)(binfo->fc_p_dev_ctl); + fc_mpdata_sync(bmp->dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV); + fc_mpdata_sync(inp->dfc.dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV); + + /* If iotag is zero, assign one from global counter for board */ + icmd->ulpIoTag0 = (unsigned)rp->fc_iotag++; + if ((rp->fc_iotag & 0x3fff) == 0) { + rp->fc_iotag = 1; + } + + /* Fill in rest of iocb */ + icmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); + icmd->un.genreq64.w5.hcsw.Dfctl = 0; + icmd->un.genreq64.w5.hcsw.Rctl = FC_ELS_REQ; + icmd->un.genreq64.w5.hcsw.Type = FC_ELS_DATA; + + icmd->ulpBdeCount = 1; + icmd->ulpLe = 1; + icmd->ulpClass = CLASS3; + icmd->ulpTimeout = (uchar)(rp->fc_ringtmo - 2); + icmd->ulpContext = (volatile ushort)rpi; + icmd->ulpOwner = OWN_CHIP; + /* Issue GEN REQ IOCB for RNID */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0135, /* ptr to msg structure */ + fc_mes0135, /* ptr to msg */ + fc_msgBlk0135.msgPreambleStr, /* begin varargs */ + icmd->un.ulpWord[5], /* did */ + icmd->ulpIoTag, + binfo->fc_ffstate ); /* end varargs */ + issue_iocb_cmd(binfo, rp, temp); + outp->dfc.fc_mptr = 0; + + FCSTATCTR.elsXmitFrame++; + return(0); +} /* End fc_rnid_req */ + + +/***************************************/ +/** fc_issue_ct_req Issue a **/ +/** CT request to nameserver **/ +/***************************************/ +_static_ int +fc_issue_ct_req( +FC_BRD_INFO * binfo, +uint32 portid, +MATCHMAP * bmp, +DMATCHMAP * inmp, +DMATCHMAP * outmp, +uint32 tmo) +{ + uint32 size; + NODELIST * ndlp; + + size = (uint32)outmp->dfc_flag; + /* Find nameserver entry */ + if((((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, portid))) == 0) || + (ndlp->nlp_Rpi == 0) || + (binfo->fc_flag & FC_RSCN_MODE)) { + + if ((binfo->fc_flag & FC_FABRIC) && (binfo->fc_ffstate == FC_READY)) { + if ((ndlp == 0) || ((ndlp->nlp_state < NLP_PLOGI) && !(ndlp->nlp_flag & NLP_NS_REMOVED))) { + /* We can LOGIN to the port first */ + fc_els_cmd(binfo, ELS_CMD_PLOGI, (void *)((ulong)portid), + (uint32)0, (ushort)0, ndlp); + } + return(ENODEV); + } + return(EACCES); + } + + if((fc_gen_req(binfo, bmp, (MATCHMAP *)inmp, (MATCHMAP *)outmp, + ndlp->nlp_Rpi, 1, (inmp->dfc_flag + outmp->dfc_flag), tmo))) + return(ENOMEM); + + outmp->dfc_flag = 0; + return(0); +} + +/**************************************************/ +/** **/ +/** Free any deferred RSCNs **/ +/** **/ +/**************************************************/ +_static_ int +fc_flush_rscn_defer( +fc_dev_ctl_t *p_dev_ctl) +{ + FC_BRD_INFO * binfo; + RING * rp; + IOCBQ * xmitiq; + IOCB * iocb; + MATCHMAP * mp; + int i; + + binfo = &BINFO; + rp = &binfo->fc_ring[FC_ELS_RING]; + while (binfo->fc_defer_rscn.q_first) { + xmitiq = (IOCBQ * )binfo->fc_defer_rscn.q_first; + if ((binfo->fc_defer_rscn.q_first = xmitiq->q) == 0) { + binfo->fc_defer_rscn.q_last = 0; + } + binfo->fc_defer_rscn.q_cnt--; + iocb = &xmitiq->iocb; + mp = *((MATCHMAP **)iocb); + *((MATCHMAP **)iocb) = 0; + xmitiq->q = NULL; + fc_mem_put(binfo, MEM_BUF, (uchar * )mp); + + i = 1; + /* free resources associated with this iocb and repost the ring buffers */ + if (!(binfo->fc_flag & FC_SLI2)) { + for (i = 1; i < (int)iocb->ulpBdeCount; i++) { + mp = fc_getvaddr(p_dev_ctl, rp, (uchar * )((ulong)iocb->un.cont[i].bdeAddress)); + if (mp) { + fc_mem_put(binfo, MEM_BUF, (uchar * )mp); + } + } + } + fc_mem_put(binfo, MEM_IOCB, (uchar * )xmitiq); + } + return(0); +} + +/**************************************************/ +/** **/ +/** Issue a NameServer query for RSCN processing **/ +/** **/ +/**************************************************/ +_static_ void +fc_issue_ns_query( +fc_dev_ctl_t *p_dev_ctl, +void *a1, +void *a2) +{ + FC_BRD_INFO * binfo; + NODELIST * ndlp; + + binfo = &BINFO; + binfo->fc_flag &= ~FC_NSLOGI_TMR; + /* Now check with NameServer */ + if ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, NameServer_DID)) == 0) { + /* We can LOGIN to the NameServer now */ + fc_els_cmd(binfo, ELS_CMD_PLOGI, (void *)NameServer_DID, + (uint32)0, (ushort)0, ndlp); + } + else { + /* Issue GID_FT to Nameserver */ + if (fc_ns_cmd(p_dev_ctl, ndlp, SLI_CTNS_GID_FT)) { + /* error so start discovery */ + /* Done with NameServer for now, but keep logged in */ + ndlp->nlp_action &= ~NLP_DO_RSCN; + + /* Fire out PLOGIs on nodes marked for discovery */ + if ((binfo->fc_nlp_cnt <= 0) && + !(binfo->fc_flag & FC_NLP_MORE)) { + binfo->fc_nlp_cnt = 0; + fc_nextrscn(p_dev_ctl, fc_max_els_sent); + } + else { + fc_nextnode(p_dev_ctl, ndlp); + } + ndlp->nlp_action &= ~NLP_DO_ADDR_AUTH; + } + } + return; +} + +_static_ int +fc_abort_discovery( +fc_dev_ctl_t *p_dev_ctl) +{ + FC_BRD_INFO * binfo; + MAILBOXQ * mb; + + binfo = &BINFO; + + fc_linkdown(p_dev_ctl); + + /* This should turn off DELAYED ABTS for ELS timeouts */ + if ((mb = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX))) { + fc_set_slim(binfo, (MAILBOX * )mb, 0x052198, 0); + if (issue_mb_cmd(binfo, (MAILBOX * )mb, MBX_NOWAIT) != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } + + /* This is at init, clear la */ + if ((mb = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX | MEM_PRI))) { + fc_clear_la(binfo, (MAILBOX * )mb); + if (issue_mb_cmd(binfo, (MAILBOX * )mb, MBX_NOWAIT) != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } else { + binfo->fc_ffstate = FC_ERROR; + /* Device Discovery completion error */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0217, /* ptr to msg structure */ + fc_mes0217, /* ptr to msg */ + fc_msgBlk0217.msgPreambleStr); /* begin & end varargs */ + } + if(FABRICTMO) { + fc_clk_can(p_dev_ctl, FABRICTMO); + FABRICTMO = 0; + } + return(0); +} + +#define FOURBYTES 4 + +/**************************************************/ +/** fc_fdmi_cmd **/ +/** **/ +/** Description: **/ +/** Issue Cmd to HBA Management Server **/ +/** SLI_MGMT_RHBA **/ +/** SLI_MGMT_RPRT **/ +/** SLI_MGMT_DHBA **/ +/** SLI_MGMT_DPRT **/ +/** **/ +/** Accept Payload for those 4 commands **/ +/** is 0 **/ +/** **/ +/** Returns: **/ +/** **/ +/**************************************************/ +_static_ int +fc_fdmi_cmd( +fc_dev_ctl_t *p_dev_ctl, +NODELIST *ndlp, +int cmdcode) +{ + FC_BRD_INFO * binfo; + MATCHMAP * mp, *bmp; + SLI_CT_REQUEST * CtReq; + ULP_BDE64 * bpl; + u32bit size; + PREG_HBA rh; + PPORT_ENTRY pe; + PREG_PORT_ATTRIBUTE pab; + PATTRIBUTE_BLOCK ab; + PATTRIBUTE_ENTRY ae; + uint32 id; + + binfo = &BINFO; + + /* fill in BDEs for command */ + /* Allocate buffer for command payload */ + if ((mp = (MATCHMAP * )fc_mem_get(binfo, MEM_BUF)) == 0) { + return(1); + } + + bmp = 0; + + /* Allocate buffer for Buffer ptr list */ + if ((bmp = (MATCHMAP * )fc_mem_get(binfo, MEM_BPL)) == 0) { + fc_mem_put(binfo, MEM_BUF, (uchar * )mp); + return(1); + } + /* FDMI Req */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0218, /* ptr to msg structure */ + fc_mes0218, /* ptr to msg */ + fc_msgBlk0218.msgPreambleStr, /* begin varargs */ + cmdcode, + binfo->fc_flag ); /* end varargs */ + CtReq = (SLI_CT_REQUEST * )mp->virt; + /* + * Initialize mp, 1024 bytes + */ + fc_bzero((void *)CtReq, FCELSSIZE); + + CtReq->RevisionId.bits.Revision = SLI_CT_REVISION; + CtReq->RevisionId.bits.InId = 0; + + CtReq->FsType = SLI_CT_MANAGEMENT_SERVICE; + CtReq->FsSubType = SLI_CT_FDMI_Subtypes; + size = 0; + + switch (cmdcode) { + case SLI_MGMT_RHBA : + { + fc_vpd_t * vp; + char * str; + uint32 i, j, incr; + uchar HWrev[8]; + + vp = &VPD; + + CtReq->CommandResponse.bits.CmdRsp = SWAP_DATA16(SLI_MGMT_RHBA); + CtReq->CommandResponse.bits.Size = 0; + rh = (PREG_HBA)&CtReq->un.PortID; + fc_bcopy((uchar * )&binfo->fc_sparam.portName, (uchar * )&rh->hi.PortName, + sizeof(NAME_TYPE)); + rh->rpl.EntryCnt = SWAP_DATA(1); /* One entry (port) per adapter */ + fc_bcopy((uchar * )&binfo->fc_sparam.portName, (uchar * )&rh->rpl.pe, + sizeof(NAME_TYPE)); + + /* point to the HBA attribute block */ + size = sizeof(NAME_TYPE) + FOURBYTES + sizeof(NAME_TYPE); + ab = (PATTRIBUTE_BLOCK)((uchar *)rh + size); + ab->EntryCnt = 0; + + /* Point to the begin of the first HBA attribute entry */ + /* #1 HBA attribute entry */ + size += FOURBYTES; + ae = (PATTRIBUTE_ENTRY)((uchar *)rh + size); + ae->ad.bits.AttrType = SWAP_DATA16(NODE_NAME); + ae->ad.bits.AttrLen = SWAP_DATA16(sizeof(NAME_TYPE)); + fc_bcopy((uchar * )&binfo->fc_sparam.nodeName, (uchar * )&ae->un.NodeName, + sizeof(NAME_TYPE)); + ab->EntryCnt++; + size += FOURBYTES + sizeof(NAME_TYPE); + + /* #2 HBA attribute entry */ + ae = (PATTRIBUTE_ENTRY)((uchar *)rh + size); + ae->ad.bits.AttrType = SWAP_DATA16(MANUFACTURER); + ae->ad.bits.AttrLen = SWAP_DATA16(24); + fc_bcopy("Emulex Network Systems", ae->un.Manufacturer, 22); + ab->EntryCnt++; + size += FOURBYTES + 24; + + /* #3 HBA attribute entry */ + ae = (PATTRIBUTE_ENTRY)((uchar *)rh + size); + ae->ad.bits.AttrType = SWAP_DATA16(SERIAL_NUMBER); + ae->ad.bits.AttrLen = SWAP_DATA16(32); + fc_bcopy(binfo->fc_SerialNumber, ae->un.SerialNumber, 32); + ab->EntryCnt++; + size += FOURBYTES + 32; + + /* #4 HBA attribute entry */ + id = fc_rdpci_32(p_dev_ctl, PCI_VENDOR_ID_REGISTER); + switch((id >> 16) & 0xffff) { + case PCI_DEVICE_ID_SUPERFLY: + if((vp->rev.biuRev == 1) || (vp->rev.biuRev == 2) || + (vp->rev.biuRev == 3)) { + ae = (PATTRIBUTE_ENTRY)((uchar *)rh + size); + ae->ad.bits.AttrType = SWAP_DATA16(MODEL); + ae->ad.bits.AttrLen = SWAP_DATA16(8); + fc_bcopy("LP7000", ae->un.Model, 6); + ab->EntryCnt++; + size += FOURBYTES + 8; + + /* #5 HBA attribute entry */ + ae = (PATTRIBUTE_ENTRY)((uchar *)rh + size); + ae->ad.bits.AttrType = SWAP_DATA16(MODEL_DESCRIPTION); + ae->ad.bits.AttrLen = SWAP_DATA16(64); + fc_bcopy("Emulex LightPulse LP7000 1 Gigabit PCI Fibre Channel Adapter", + ae->un.ModelDescription, 62); + ab->EntryCnt++; + size += FOURBYTES + 64; + } else { + ae = (PATTRIBUTE_ENTRY)((uchar *)rh + size); + ae->ad.bits.AttrType = SWAP_DATA16(MODEL); + ae->ad.bits.AttrLen = SWAP_DATA16(8); + fc_bcopy("LP7000E", ae->un.Model, 7); + ab->EntryCnt++; + size += FOURBYTES + 8; + + /* #5 HBA attribute entry */ + ae = (PATTRIBUTE_ENTRY)((uchar *)rh + size); + ae->ad.bits.AttrType = SWAP_DATA16(MODEL_DESCRIPTION); + ae->ad.bits.AttrLen = SWAP_DATA16(64); + fc_bcopy("Emulex LightPulse LP7000E 1 Gigabit PCI Fibre Channel Adapter", + ae->un.ModelDescription, 62); + ab->EntryCnt++; + size += FOURBYTES + 64; + } + break; + case PCI_DEVICE_ID_DRAGONFLY: + ae = (PATTRIBUTE_ENTRY)((uchar *)rh + size); + ae->ad.bits.AttrType = SWAP_DATA16(MODEL); + ae->ad.bits.AttrLen = SWAP_DATA16(8); + fc_bcopy("LP8000", ae->un.Model, 6); + ab->EntryCnt++; + size += FOURBYTES + 8; + + /* #5 HBA attribute entry */ + ae = (PATTRIBUTE_ENTRY)((uchar *)rh + size); + ae->ad.bits.AttrType = SWAP_DATA16(MODEL_DESCRIPTION); + ae->ad.bits.AttrLen = SWAP_DATA16(64); + fc_bcopy("Emulex LightPulse LP8000 1 Gigabit PCI Fibre Channel Adapter", + ae->un.ModelDescription, 62); + ab->EntryCnt++; + size += FOURBYTES + 64; + break; + case PCI_DEVICE_ID_CENTAUR: + if(FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) { + ae = (PATTRIBUTE_ENTRY)((uchar *)rh + size); + ae->ad.bits.AttrType = SWAP_DATA16(MODEL); + ae->ad.bits.AttrLen = SWAP_DATA16(8); + fc_bcopy("LP9002", ae->un.Model, 6); + ab->EntryCnt++; + size += FOURBYTES + 8; + + /* #5 HBA attribute entry */ + ae = (PATTRIBUTE_ENTRY)((uchar *)rh + size); + ae->ad.bits.AttrType = SWAP_DATA16(MODEL_DESCRIPTION); + ae->ad.bits.AttrLen = SWAP_DATA16(64); + fc_bcopy("Emulex LightPulse LP9002 2 Gigabit PCI Fibre Channel Adapter", + ae->un.ModelDescription, 62); + ab->EntryCnt++; + size += FOURBYTES + 64; + } else { + ae = (PATTRIBUTE_ENTRY)((uchar *)rh + size); + ae->ad.bits.AttrType = SWAP_DATA16(MODEL); + ae->ad.bits.AttrLen = SWAP_DATA16(8); + fc_bcopy("LP9000", ae->un.Model, 6); + ab->EntryCnt++; + size += FOURBYTES + 8; + + /* #5 HBA attribute entry */ + ae = (PATTRIBUTE_ENTRY)((uchar *)rh + size); + ae->ad.bits.AttrType = SWAP_DATA16(MODEL_DESCRIPTION); + ae->ad.bits.AttrLen = SWAP_DATA16(64); + fc_bcopy("Emulex LightPulse LP9000 1 Gigabit PCI Fibre Channel Adapter", + ae->un.ModelDescription, 62); + ab->EntryCnt++; + size += FOURBYTES + 64; + } + break; + case PCI_DEVICE_ID_PEGASUS: + ae = (PATTRIBUTE_ENTRY)((uchar *)rh + size); + ae->ad.bits.AttrType = SWAP_DATA16(MODEL); + ae->ad.bits.AttrLen = SWAP_DATA16(8); + fc_bcopy("LP9802", ae->un.Model, 6); + ab->EntryCnt++; + size += FOURBYTES + 8; + + /* #5 HBA attribute entry */ + ae = (PATTRIBUTE_ENTRY)((uchar *)rh + size); + ae->ad.bits.AttrType = SWAP_DATA16(MODEL_DESCRIPTION); + ae->ad.bits.AttrLen = SWAP_DATA16(64); + fc_bcopy("Emulex LightPulse LP9802 2 Gigabit PCI Fibre Channel Adapter", + ae->un.ModelDescription, 62); + ab->EntryCnt++; + size += FOURBYTES + 64; + break; + case PCI_DEVICE_ID_PFLY: + ae = (PATTRIBUTE_ENTRY)((uchar *)rh + size); + ae->ad.bits.AttrType = SWAP_DATA16(MODEL); + ae->ad.bits.AttrLen = SWAP_DATA16(8); + fc_bcopy("LP982", ae->un.Model, 5); + ab->EntryCnt++; + size += FOURBYTES + 8; + + /* #5 HBA attribute entry */ + ae = (PATTRIBUTE_ENTRY)((uchar *)rh + size); + ae->ad.bits.AttrType = SWAP_DATA16(MODEL_DESCRIPTION); + ae->ad.bits.AttrLen = SWAP_DATA16(64); + fc_bcopy("Emulex LightPulse LP982 2 Gigabit PCI Fibre Channel Adapter", + ae->un.ModelDescription, 62); + ab->EntryCnt++; + size += FOURBYTES + 64; + break; + } + + /* #6 HBA attribute entry */ + ae = (PATTRIBUTE_ENTRY)((uchar *)rh + size); + ae->ad.bits.AttrType = SWAP_DATA16(HARDWARE_VERSION); + ae->ad.bits.AttrLen = SWAP_DATA16(8); + /* Convert JEDEC ID to ascii for hardware version */ + incr = vp->rev.biuRev; + for(i=0;i<8;i++) { + j = (incr & 0xf); + if(j <= 9) + HWrev[7-i] = (char)((uchar)0x30 + (uchar)j); + else + HWrev[7-i] = (char)((uchar)0x61 + (uchar)(j-10)); + incr = (incr >> 4); + } + fc_bcopy((uchar *)HWrev, ae->un.HardwareVersion, 8); + ab->EntryCnt++; + size += FOURBYTES + 8; + + /* #7 HBA attribute entry */ + ae = (PATTRIBUTE_ENTRY)((uchar *)rh + size); + ae->ad.bits.AttrType = SWAP_DATA16(DRIVER_VERSION); + ae->ad.bits.AttrLen = SWAP_DATA16(16); + for (i=0; lpfc_release_version[i]; i++); + fc_bcopy((uchar *)lpfc_release_version, ae->un.DriverVersion, i); + ab->EntryCnt++; + size += FOURBYTES + 16; + + /* #8 HBA attribute entry */ + ae = (PATTRIBUTE_ENTRY)((uchar *)rh + size); + ae->ad.bits.AttrType = SWAP_DATA16(OPTION_ROM_VERSION); + ae->ad.bits.AttrLen = SWAP_DATA16(32); + fc_bcopy(binfo->fc_OptionROMVersion, ae->un.OptionROMVersion, 32); + ab->EntryCnt++; + size += FOURBYTES + 32; + + /* #9 HBA attribute entry */ + ae = (PATTRIBUTE_ENTRY)((uchar *)rh + size); + ae->ad.bits.AttrType = SWAP_DATA16(FIRMWARE_VERSION); + ae->ad.bits.AttrLen = SWAP_DATA16(32); + str = decode_firmware_rev(binfo, vp); + fc_bcopy((uchar *)str, ae->un.FirmwareVersion, 32); + ab->EntryCnt++; + size += FOURBYTES + 32; + + /* #10 HBA attribute entry */ + ae = (PATTRIBUTE_ENTRY)((uchar *)rh + size); + ae->ad.bits.AttrType = SWAP_DATA16(VENDOR_SPECIFIC); + ae->ad.bits.AttrLen = SWAP_DATA16(4); + id = SWAP_LONG(id); + id = (((SWAP_ALWAYS16(id >> 16)) << 16) | SWAP_ALWAYS16(id)); + ae->un.VendorSpecific = id; + ab->EntryCnt++; + size += FOURBYTES + 4; + + /* #11 HBA attribute entry */ + ae = (PATTRIBUTE_ENTRY)((uchar *)rh + size); + ae->ad.bits.AttrType = SWAP_DATA16(DRIVER_NAME); + ae->ad.bits.AttrLen = SWAP_DATA16(4); + fc_bcopy("lpfc", ae->un.DriverName, 4); + ab->EntryCnt++; + size += FOURBYTES + 4; + + + + ab->EntryCnt = SWAP_DATA(ab->EntryCnt); + /* Total size */ + size = GID_REQUEST_SZ - 4 + size; + } + break; + + case SLI_MGMT_RPRT : + { + fc_vpd_t * vp; + SERV_PARM * hsp; + + vp = &VPD; + + CtReq->CommandResponse.bits.CmdRsp = SWAP_DATA16(SLI_MGMT_RPRT); + CtReq->CommandResponse.bits.Size = 0; + pab = (PREG_PORT_ATTRIBUTE)&CtReq->un.PortID; + size = sizeof(NAME_TYPE) + sizeof(NAME_TYPE) + FOURBYTES; + fc_bcopy((uchar * )&binfo->fc_sparam.portName, (uchar * )&pab->HBA_PortName, + sizeof(NAME_TYPE)); + fc_bcopy((uchar * )&binfo->fc_sparam.portName, (uchar * )&pab->PortName, + sizeof(NAME_TYPE)); + pab->ab.EntryCnt = 0; + + /* #1 Port attribute entry */ + ae = (PATTRIBUTE_ENTRY)((uchar *)pab + size); + ae->ad.bits.AttrType = SWAP_DATA16(SUPPORTED_FC4_TYPES); + ae->ad.bits.AttrLen = SWAP_DATA16(8); + ae->un.SupportFC4Types[4] = 1; + pab->ab.EntryCnt++; + size += FOURBYTES + 8; + + /* #2 Port attribute entry */ + ae = (PATTRIBUTE_ENTRY)((uchar *)pab + size); + ae->ad.bits.AttrType = SWAP_DATA16(SUPPORTED_SPEED); + ae->ad.bits.AttrLen = SWAP_DATA16(4); + if(FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) + ae->un.SupportSpeed = HBA_PORTSPEED_2GBIT; + else + ae->un.SupportSpeed = HBA_PORTSPEED_1GBIT; + pab->ab.EntryCnt++; + size += FOURBYTES + 4; + + /* #3 Port attribute entry */ + ae = (PATTRIBUTE_ENTRY)((uchar *)pab + size); + ae->ad.bits.AttrType = SWAP_DATA16(PORT_SPEED); + ae->ad.bits.AttrLen = SWAP_DATA16(4); + if( binfo->fc_linkspeed == LA_2GHZ_LINK) + ae->un.PortSpeed = HBA_PORTSPEED_2GBIT; + else + ae->un.PortSpeed = HBA_PORTSPEED_1GBIT; + pab->ab.EntryCnt++; + size += FOURBYTES + 4; + + /* #4 Port attribute entry */ + ae = (PATTRIBUTE_ENTRY)((uchar *)pab + size); + ae->ad.bits.AttrType = SWAP_DATA16(MAX_FRAME_SIZE); + ae->ad.bits.AttrLen = SWAP_DATA16(4); + hsp = (SERV_PARM *)&binfo->fc_sparam; + ae->un.MaxFrameSize = (((uint32)hsp->cmn.bbRcvSizeMsb) << 8) | + (uint32)hsp->cmn.bbRcvSizeLsb; + pab->ab.EntryCnt++; + size += FOURBYTES + 4; + + /* #5 Port attribute entry */ + ae = (PATTRIBUTE_ENTRY)((uchar *)pab + size); + ae->ad.bits.AttrType = SWAP_DATA16(OS_DEVICE_NAME); + ae->ad.bits.AttrLen = SWAP_DATA16(4); + fc_bcopy("lpfc", (uchar * )&ae->un.DriverName, 4); + pab->ab.EntryCnt++; + size += FOURBYTES + 4; + + pab->ab.EntryCnt = SWAP_DATA(pab->ab.EntryCnt); + /* Total size */ + size = GID_REQUEST_SZ - 4 + size; + } + break; + + case SLI_MGMT_DHBA : + CtReq->CommandResponse.bits.CmdRsp = SWAP_DATA16(SLI_MGMT_DHBA); + CtReq->CommandResponse.bits.Size = 0; + pe = (PPORT_ENTRY)&CtReq->un.PortID; + fc_bcopy((uchar * )&binfo->fc_sparam.portName, (uchar * )&pe->PortName, + sizeof(NAME_TYPE)); + size = GID_REQUEST_SZ - 4 + sizeof(NAME_TYPE); + break; + + case SLI_MGMT_DPRT : + CtReq->CommandResponse.bits.CmdRsp = SWAP_DATA16(SLI_MGMT_DPRT); + CtReq->CommandResponse.bits.Size = 0; + pe = (PPORT_ENTRY)&CtReq->un.PortID; + fc_bcopy((uchar * )&binfo->fc_sparam.portName, (uchar * )&pe->PortName, + sizeof(NAME_TYPE)); + size = GID_REQUEST_SZ - 4 + sizeof(NAME_TYPE); + break; + } + + bpl = (ULP_BDE64 * )bmp->virt; + bpl->addrHigh = PCIMEM_LONG((uint32)putPaddrHigh(mp->phys)); + bpl->addrLow = PCIMEM_LONG((uint32)putPaddrLow(mp->phys)); + bpl->tus.f.bdeFlags = 0; + bpl->tus.f.bdeSize = size; + bpl->tus.w = PCIMEM_LONG(bpl->tus.w); + + + if(fc_ct_cmd(p_dev_ctl, mp, bmp, ndlp)) { + fc_mem_put(binfo, MEM_BUF, (uchar * )mp); + fc_mem_put(binfo, MEM_BPL, (uchar * )bmp); + } + return(0); +} /* End fc_ns_cmd */ + +/**************************************************/ +/** fc_fdmi_rsp **/ +/** **/ +/** Description: **/ +/** Process Rsp from HBA Management Server **/ +/** SLI_MGMT_RHBA **/ +/** SLI_MGMT_RPRT **/ +/** SLI_MGMT_DHBA **/ +/** SLI_MGMT_DPRT **/ +/** **/ +/** Returns: **/ +/** **/ +/**************************************************/ +_static_ void +fc_fdmi_rsp( +fc_dev_ctl_t *p_dev_ctl, +MATCHMAP *mp, +MATCHMAP *rsp_mp) + +{ + FC_BRD_INFO * binfo; + SLI_CT_REQUEST * Cmd; + SLI_CT_REQUEST * Rsp; + NODELIST * ndlp; + ushort fdmi_cmd; + ushort fdmi_rsp; + int rc; + + binfo = &BINFO; + + ndlp = (NODELIST *)mp->fc_mptr; + Cmd = (SLI_CT_REQUEST *)mp->virt; + Rsp = (SLI_CT_REQUEST *)rsp_mp->virt; + + fdmi_rsp = Rsp->CommandResponse.bits.CmdRsp; + + fdmi_cmd = Cmd->CommandResponse.bits.CmdRsp; + rc = 1; + + switch (SWAP_DATA16(fdmi_cmd)) { + case SLI_MGMT_RHBA : + rc = fc_fdmi_cmd(p_dev_ctl, ndlp, SLI_MGMT_RPRT); + break; + + case SLI_MGMT_RPRT : + break; + + case SLI_MGMT_DHBA : + rc = fc_fdmi_cmd(p_dev_ctl, ndlp, SLI_MGMT_RHBA); + break; + + case SLI_MGMT_DPRT : + rc = fc_fdmi_cmd(p_dev_ctl, ndlp, SLI_MGMT_DHBA); + break; + } + + if (rc) { + /* FDMI rsp failed */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0251, /* ptr to msg structure */ + fc_mes0251, /* ptr to msg */ + fc_msgBlk0251.msgPreambleStr, /* begin varargs */ + SWAP_DATA16(fdmi_cmd) ); /* end varargs */ + } +} /* fc_fdmi_rsp */ + + +/*****************************************************************************/ +/* + * NAME: fc_plogi_put + * + * FUNCTION: put iocb cmd onto the iocb plogi queue. + * + * EXECUTION ENVIRONMENT: process and interrupt level. + * + * NOTES: + * + * CALLED FROM: + * issue_els_cmd + * + * INPUT: + * binfo - pointer to the device info area + * iocbq - pointer to iocb queue entry + * + * RETURNS: + * NULL - command queued + */ +/*****************************************************************************/ +_static_ void +fc_plogi_put( +FC_BRD_INFO *binfo, +IOCBQ *iocbq) /* pointer to iocbq entry */ +{ + if (binfo->fc_plogi.q_first) { + /* queue command to end of list */ + ((IOCBQ * )binfo->fc_plogi.q_last)->q = (uchar * )iocbq; + binfo->fc_plogi.q_last = (uchar * )iocbq; + } else { + /* add command to empty list */ + binfo->fc_plogi.q_first = (uchar * )iocbq; + binfo->fc_plogi.q_last = (uchar * )iocbq; + } + + iocbq->q = NULL; + binfo->fc_plogi.q_cnt++; + binfo->fc_flag |= FC_DELAY_NSLOGI; + return; + +} /* End fc_plogi_put */ + + +/*****************************************************************************/ +/* + * NAME: fc_plogi_get + * + * FUNCTION: get a iocb command from iocb plogi command queue + * + * EXECUTION ENVIRONMENT: interrupt level. + * + * NOTES: + * + * CALLED FROM: + * handle_mb_event + * + * INPUT: + * binfo - pointer to the device info area + * + * RETURNS: + * NULL - no match found + * iocb pointer - pointer to a iocb command + */ +/*****************************************************************************/ +_static_ IOCBQ * +fc_plogi_get( +FC_BRD_INFO *binfo) +{ + IOCBQ * p_first = NULL; + + if (binfo->fc_plogi.q_first) { + p_first = (IOCBQ * )binfo->fc_plogi.q_first; + if ((binfo->fc_plogi.q_first = p_first->q) == 0) { + binfo->fc_plogi.q_last = 0; + binfo->fc_flag &= ~FC_DELAY_NSLOGI; + } + p_first->q = NULL; + binfo->fc_plogi.q_cnt--; + } + return(p_first); + +} /* End fc_plogi_get */ + diff -purN -X /home/mbligh/.diff.exclude reference/drivers/scsi/lpfc/fcfgparm.h current/drivers/scsi/lpfc/fcfgparm.h --- reference/drivers/scsi/lpfc/fcfgparm.h 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/scsi/lpfc/fcfgparm.h 2004-04-09 11:53:03.000000000 -0700 @@ -0,0 +1,341 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Enterprise Fibre Channel Host Bus Adapters. * + * Refer to the README file included with this package for * + * driver version and adapter support. * + * Copyright (C) 2003 Emulex Corporation. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of the GNU General Public License * + * as published by the Free Software Foundation; either version 2 * + * of the License, or (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details, a copy of which * + * can be found in the file COPYING included with this package. * + *******************************************************************/ + +#ifndef _H_CFGPARAM +#define _H_CFGPARAM + +#define LPFC_DFT_POST_IP_BUF 128 +#define LPFC_MIN_POST_IP_BUF 64 +#define LPFC_MAX_POST_IP_BUF 1024 +#define LPFC_DFT_XMT_QUE_SIZE 256 +#define LPFC_MIN_XMT_QUE_SIZE 128 +#define LPFC_MAX_XMT_QUE_SIZE 10240 +#define LPFC_DFT_NUM_IOCBS 1024 +#define LPFC_MIN_NUM_IOCBS 128 +#define LPFC_MAX_NUM_IOCBS 10240 +#define LPFC_DFT_NUM_BUFS 1024 +#define LPFC_MIN_NUM_BUFS 64 +#define LPFC_MAX_NUM_BUFS 4096 +#define LPFC_DFT_NUM_NODES 510 +#define LPFC_MIN_NUM_NODES 64 +#define LPFC_MAX_NUM_NODES 4096 +#define LPFC_DFT_TOPOLOGY 0 +#define LPFC_DFT_FC_CLASS 3 + +#define LPFC_DFT_NO_DEVICE_DELAY 1 /* 1 sec */ +#define LPFC_MAX_NO_DEVICE_DELAY 30 /* 30 sec */ +#define LPFC_DFT_FABRIC_TIMEOUT 0 +#define LPFC_MAX_FABRIC_TIMEOUT 255 /* 255 sec */ +#define LPFC_DFT_LNKDWN_TIMEOUT 30 +#define LPFC_MAX_LNKDWN_TIMEOUT 255 /* 255 sec */ +#define LPFC_DFT_NODEV_TIMEOUT 0 +#define LPFC_MAX_NODEV_TIMEOUT 255 /* 255 sec */ +#define LPFC_DFT_RSCN_NS_DELAY 0 +#define LPFC_MAX_RSCN_NS_DELAY 255 /* 255 sec */ + +#define LPFC_MAX_TGT_Q_DEPTH 10240 /* max cmds allowed per tgt */ +#define LPFC_DFT_TGT_Q_DEPTH 0 /* default max cmds per tgt */ + +#define LPFC_MAX_LUN_Q_DEPTH 128 /* max cmds to allow per lun */ +#define LPFC_DFT_LUN_Q_DEPTH 30 /* default max cmds per lun */ + +#define LPFC_MAX_DQFULL_THROTTLE 1 /* Boolean (max value) */ + +#define CFG_INTR_ACK 0 /* intr-ack */ +#define CFG_LOG_VERBOSE 1 /* log-verbose */ +#define CFG_LOG_ONLY 2 /* log-only */ +#define CFG_IDENTIFY_SELF 3 /* identify-self */ +#define CFG_NUM_IOCBS 4 /* num-iocbs */ +#define CFG_NUM_BUFS 5 /* num-bufs */ +#define CFG_FCP_ON 6 /* fcp-on */ +#define CFG_DEVICE_REPORT 7 /* device-report */ +#define CFG_AUTOMAP 8 /* automap */ +#define CFG_DFT_TGT_Q_DEPTH 9 /* tgt_queue_depth */ +#define CFG_DFT_LUN_Q_DEPTH 10 /* lun_queue_depth */ +#define CFG_FIRST_CHECK 11 /* first-check */ +#define CFG_FCPFABRIC_TMO 12 /* fcpfabric-tmo */ +#define CFG_FCP_CLASS 13 /* fcp-class */ +#define CFG_USE_ADISC 14 /* use-adisc */ +#define CFG_NO_DEVICE_DELAY 15 /* no-device-delay */ +#define CFG_NETWORK_ON 16 /* network-on */ +#define CFG_POST_IP_BUF 17 /* post-ip-buf */ +#define CFG_XMT_Q_SIZE 18 /* xmt-que-size */ +#define CFG_IP_CLASS 19 /* ip-class */ +#define CFG_ACK0 20 /* ack0 */ +#define CFG_TOPOLOGY 21 /* topology */ +#define CFG_SCAN_DOWN 22 /* scan-down */ +#define CFG_LINKDOWN_TMO 23 /* linkdown-tmo */ +#define CFG_USE_LOMEM 24 /* use-lomempages */ +#define CFG_ZONE_RSCN 25 /* zone-rscn */ +#define CFG_HOLDIO 26 /* nodev-holdio */ +#define CFG_DELAY_RSP_ERR 27 /* delay-rsp-err */ +#define CFG_CHK_COND_ERR 28 /* check-cond-err */ +#define CFG_NODEV_TMO 29 /* nodev-tmo */ +#define CFG_DQFULL_THROTTLE 30 /* dqfull-throttle */ +#define CFG_LINK_SPEED 31 /* link-speed NEW_FETURE */ +#define CFG_QFULL_RSP_ERR 32 /* qfull-rsp-err */ +#define CFG_DQFULL_THROTTLE_UP_TIME 33 /* dqfull-throttle-up-time */ +#define CFG_DQFULL_THROTTLE_UP_INC 34 /* dqfull-throttle-up-inc */ +#define CFG_NUM_NODES 35 /* num-nodes */ +#define CFG_CR_DELAY 36 /* cr-delay */ +#define CFG_CR_COUNT 37 /* cr-count */ +#define NUM_CFG_PARAM 38 + +#ifdef DEF_ICFG +_static_ iCfgParam icfgparam[NUM_CFG_PARAM] = { + + /* general driver parameters */ + { "intr-ack", + 0, 1, TRUE, 0, + (ushort)0, + (ushort)CFG_DYNAMIC, + "Claim interrupt even if no work discovered" }, + + { "log-verbose", + 0, 0xffff, FALSE, 0, + (ushort)0, + (ushort)CFG_DYNAMIC, + "Verbose logging mask" }, + + { "log-only", + 0, 2, TRUE, 0, + (ushort)0, + (ushort)CFG_DYNAMIC, + "Log messages only go to system logger, not console" }, + + { "identify-self", + 0, 2, TRUE, 0, + (ushort)0, + (ushort)CFG_REBOOT, + "Driver startup will report driver version and release information" }, + + { "num-iocbs", + LPFC_MIN_NUM_IOCBS, LPFC_MAX_NUM_IOCBS, LPFC_DFT_NUM_IOCBS, 0, + (ushort)0, + (ushort)CFG_RESTART, + "Number of outstanding IOCBs driver can queue to adapter" }, + + { "num-bufs", + LPFC_MIN_NUM_BUFS, LPFC_MAX_NUM_BUFS, LPFC_DFT_NUM_BUFS, 0, + (ushort)0, + (ushort)CFG_RESTART, + "Number of buffers driver uses for ELS commands and Buffer Pointer Lists." }, + + /* FCP specific parameters */ + { "fcp-on", + 0, 1, TRUE, 0, + (ushort)0, + (ushort)CFG_REBOOT, + "Enable FCP processing" }, + + { "device-report", + 0, 1, TRUE, 0, + (ushort)0, + (ushort)CFG_RESTART, + "Driver will report FCP devices as it finds them" }, + + { "automap", + 0, 3, 2, 0, + (ushort)0, + (ushort)CFG_RESTART, + "Automatically bind FCP devices as they are discovered" }, + + { "tgt-queue-depth", + 0, LPFC_MAX_TGT_Q_DEPTH, LPFC_DFT_TGT_Q_DEPTH, 0, + (ushort)0, + (ushort)CFG_RESTART, + "Max number of FCP commands we can queue to a specific target" }, + + { "lun-queue-depth", + 0, LPFC_MAX_LUN_Q_DEPTH, LPFC_DFT_LUN_Q_DEPTH, 0, + (ushort)0, + (ushort)CFG_RESTART, + "Max number of FCP commands we can queue to a specific LUN" }, + + { "first-check", + 0, 1, + FALSE, + 0, + (ushort)0, + (ushort)CFG_DYNAMIC, + "Retry the first 29xx check condition for FCP devices during discovery" }, + + { "fcpfabric-tmo", + 0, LPFC_MAX_FABRIC_TIMEOUT, LPFC_DFT_FABRIC_TIMEOUT, 0, + (ushort)0, + (ushort)CFG_DYNAMIC, + "Extra FCP command timeout when connected to a fabric" }, + + { "fcp-class", + 2, 3, LPFC_DFT_FC_CLASS, 0, + (ushort)0, + (ushort)CFG_DYNAMIC, + "Select Fibre Channel class of service for FCP sequences" }, + + { "use-adisc", + 0, 1, FALSE, 0, + (ushort)0, + (ushort)CFG_DYNAMIC, + "Use ADISC on rediscovery to authenticate FCP devices" }, + + { "no-device-delay", + 0, LPFC_MAX_NO_DEVICE_DELAY, LPFC_DFT_NO_DEVICE_DELAY, 0, + (ushort)0, + (ushort)CFG_DYNAMIC, + "No FCP device failed I/O sec delay" }, + + /* IP specific parameters */ + { "network-on", + 0, 1, FALSE, 0, + (ushort)0, + (ushort)CFG_REBOOT, + "Enable IP processing" }, + + { "post-ip-buf", + LPFC_MIN_POST_IP_BUF, LPFC_MAX_POST_IP_BUF, LPFC_DFT_POST_IP_BUF, 0, + (ushort)0, + (ushort)CFG_RESTART, + "Number of IP buffers to post to adapter" }, + + { "xmt-que-size", + LPFC_MIN_XMT_QUE_SIZE, LPFC_MAX_XMT_QUE_SIZE, LPFC_DFT_XMT_QUE_SIZE, 0, + (ushort)0, + (ushort)CFG_RESTART, + "Number of outstanding IP cmds for an adapter" }, + + { "ip-class", + 2, 3, LPFC_DFT_FC_CLASS, 0, + (ushort)0, + (ushort)CFG_DYNAMIC, + "Select Fibre Channel class of service for IP sequences" }, + + /* Fibre Channel specific parameters */ + { "ack0", + 0, 1, FALSE, 0, + (ushort)0, + (ushort)CFG_RESTART, + "Enable ACK0 support" }, + + { "topology", + 0, 6, LPFC_DFT_TOPOLOGY, 0, + (ushort)0, + (ushort)CFG_RESTART, + "Select Fibre Channel topology" }, + + { "scan-down", + 0, 2, 2, 0, + (ushort)0, + (ushort)CFG_DYNAMIC, + "Start scanning for devices from highest ALPA to lowest" }, + + { "linkdown-tmo", + 0, LPFC_MAX_LNKDWN_TIMEOUT, LPFC_DFT_LNKDWN_TIMEOUT, 0, + (ushort)0, + (ushort)CFG_DYNAMIC, + "Seconds driver will wait before deciding link is really down" }, + + { "use-lomempages", + 0, 1, FALSE, 0, + (ushort)0, + (ushort)CFG_RESTART, + "Use low memory for adapter DMA buffers" }, + + { "zone-rscn", + 0, 1, FALSE, 0, + (ushort)0, + (ushort)CFG_DYNAMIC, + "Force RSCNs to always check NameServer for N_Port IDs" }, + + { "nodev-holdio", + 0, 1, FALSE, 0, + (ushort)0, + (ushort)CFG_DYNAMIC, + "Hold I/O errors if device disappears " }, + + { "delay-rsp-err", + 0, 1, FALSE, 0, + (ushort)0, + (ushort)CFG_DYNAMIC, + "Delay FCP error return for FCP RSP error and Check Condition" }, + + { "check-cond-err", + 0, 1, FALSE, 0, + (ushort)0, + (ushort)CFG_DYNAMIC, + "Treat special Check Conditions as a FCP error" }, + + { "nodev-tmo", + 0, LPFC_MAX_NODEV_TIMEOUT, LPFC_DFT_NODEV_TIMEOUT, 0, + (ushort)0, + (ushort)CFG_DYNAMIC, + "Seconds driver will hold I/O waiting for a device to come back" }, + + { "dqfull-throttle", + 0, 1, 1, 0, + (ushort)0, + (ushort)CFG_DYNAMIC, + "Decrement LUN throttle on a queue full condition" }, + + { "link-speed", + 0, 2, 0, 0, + (ushort)0, + (ushort)CFG_RESTART, + "Select link speed" }, + + { "qfull-rsp-err", + 0, 1, FALSE, 0, + (ushort)0, + (ushort)CFG_DYNAMIC, + "Return BUSY (default) or TERMINATED as SCSI status on a queue full condition" }, + + { "dqfull-throttle-up-time", + 0, 30, 1, 0, + (ushort)0, + (ushort)CFG_RESTART, + "When to increment the current Q depth " }, + + { "dqfull-throttle-up-inc", + 0, LPFC_MAX_LUN_Q_DEPTH, 1, 0, + (ushort)0, + (ushort)CFG_RESTART, + "Increment the current Q depth by dqfull-throttle-up-inc" }, + + { "num-nodes", + LPFC_MIN_NUM_NODES, LPFC_MAX_NUM_NODES, LPFC_DFT_NUM_NODES, 0, + (ushort)0, + (ushort)CFG_RESTART, + "Number of fibre channel nodes (NPorts) the driver will support." }, + + { "cr-delay", + 0, 63, 0, 0, + (ushort)0, + (ushort)CFG_RESTART, + "A count of milliseconds after which an interrupt response is generated" }, + + { "cr-count", + 1, 255, 0, 0, + (ushort)0, + (ushort)CFG_RESTART, + "A count of I/O completions after which an interrupt response is generated" }, + + }; +#endif /* DEF_ICFG */ + +#endif /* _H_CFGPARAM */ diff -purN -X /home/mbligh/.diff.exclude reference/drivers/scsi/lpfc/fcmboxb.c current/drivers/scsi/lpfc/fcmboxb.c --- reference/drivers/scsi/lpfc/fcmboxb.c 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/scsi/lpfc/fcmboxb.c 2004-04-09 11:53:03.000000000 -0700 @@ -0,0 +1,1013 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Enterprise Fibre Channel Host Bus Adapters. * + * Refer to the README file included with this package for * + * driver version and adapter support. * + * Copyright (C) 2003 Emulex Corporation. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of the GNU General Public License * + * as published by the Free Software Foundation; either version 2 * + * of the License, or (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details, a copy of which * + * can be found in the file COPYING included with this package. * + *******************************************************************/ + +#include "fc_os.h" + +#include "fc_hw.h" +#include "fc.h" + +#include "fcdiag.h" +#include "fcfgparm.h" +#include "fcmsg.h" +#include "fc_crtn.h" +#include "fc_ertn.h" + +extern fc_dd_ctl_t DD_CTL; +extern iCfgParam icfgparam[]; + +/* Routine Declaration - Local */ +/* There currently are no local routine declarations */ +/* End Routine Declaration - Local */ + + +/**********************************************/ +/** fc_restart Issue a RESTART **/ +/** mailbox command **/ +/**********************************************/ +_static_ void +fc_restart( +FC_BRD_INFO *binfo, +MAILBOX *mb, +int doit) +{ + void *ioa; + MAILBOX * mbox; + fc_dev_ctl_t *p_dev_ctl; + + fc_bzero((void *)mb, sizeof(MAILBOXQ)); + mb->mbxCommand = MBX_RESTART; + mb->mbxHc = OWN_CHIP; + mb->mbxOwner = OWN_HOST; + + if (doit) { + /* use REAL SLIM !!! */ + p_dev_ctl = (fc_dev_ctl_t *)(binfo->fc_p_dev_ctl); + binfo->fc_mboxaddr = 0; + binfo->fc_flag &= ~FC_SLI2; + + ioa = (void *)FC_MAP_MEM(&binfo->fc_iomap_mem); /* map in SLIM */ + mbox = FC_MAILBOX(binfo, ioa); + WRITE_SLIM_COPY(binfo, (uint32 *)&mb->un.varWords, (uint32 *)&mbox->un.varWords, + (MAILBOX_CMD_WSIZE - 1)); + FC_UNMAP_MEMIO(ioa); + } + return; +} /* End fc_restart */ + + +/**********************************************/ +/** fc_dump_mem Issue a DUMP MEMORY **/ +/** mailbox command **/ +/**********************************************/ +_static_ void +fc_dump_mem( +FC_BRD_INFO *binfo, +MAILBOX *mb) +{ + /* Setup to dump VPD region */ + fc_bzero((void *)mb, sizeof(MAILBOXQ)); + mb->mbxCommand = MBX_DUMP_MEMORY; + mb->un.varDmp.cv = 1; + mb->un.varDmp.type = DMP_NV_PARAMS; + mb->un.varDmp.region_id = DMP_REGION_VPD; + mb->un.varDmp.word_cnt = (DMP_VPD_SIZE / sizeof(uint32)); + + mb->un.varDmp.co = 0; + mb->un.varDmp.resp_offset = 0; + mb->mbxOwner = OWN_HOST; + return; +} /* End fc_dump_mem */ + + +/**********************************************/ +/** fc_read_nv Issue a READ NVPARAM **/ +/** mailbox command **/ +/**********************************************/ +_static_ void +fc_read_nv( +FC_BRD_INFO *binfo, +MAILBOX *mb) +{ + fc_bzero((void *)mb, sizeof(MAILBOXQ)); + mb->mbxCommand = MBX_READ_NV; + mb->mbxOwner = OWN_HOST; + return; +} /* End fc_read_nv */ + +/**********************************************/ +/** fc_read_rev Issue a READ REV **/ +/** mailbox command **/ +/**********************************************/ +_static_ void +fc_read_rev( +FC_BRD_INFO *binfo, +MAILBOX *mb) +{ + fc_bzero((void *)mb, sizeof(MAILBOXQ)); + mb->un.varRdRev.cv = 1; + mb->mbxCommand = MBX_READ_REV; + mb->mbxOwner = OWN_HOST; + return; +} /* End fc_read_rev */ + +/**********************************************/ +/** fc_runBIUdiag Issue a RUN_BIU_DIAG **/ +/** mailbox command **/ +/**********************************************/ +_static_ int +fc_runBIUdiag( +FC_BRD_INFO *binfo, +MAILBOX *mb, +uchar *in, +uchar *out) +{ + fc_bzero((void *)mb, sizeof(MAILBOXQ)); + + if (binfo->fc_flag & FC_SLI2) { + mb->mbxCommand = MBX_RUN_BIU_DIAG64; + mb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize = FCELSSIZE; + mb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh = (uint32)putPaddrHigh(in); + mb->un.varBIUdiag.un.s2.xmit_bde64.addrLow = (uint32)putPaddrLow(in); + mb->un.varBIUdiag.un.s2.rcv_bde64.tus.f.bdeSize = FCELSSIZE; + mb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh = (uint32)putPaddrHigh(out); + mb->un.varBIUdiag.un.s2.rcv_bde64.addrLow = (uint32)putPaddrLow(out); + } else { + mb->mbxCommand = MBX_RUN_BIU_DIAG; + mb->un.varBIUdiag.un.s1.xmit_bde.bdeSize = FCELSSIZE; + mb->un.varBIUdiag.un.s1.xmit_bde.bdeAddress = (uint32)putPaddrLow(in); + mb->un.varBIUdiag.un.s1.rcv_bde.bdeSize = FCELSSIZE; + mb->un.varBIUdiag.un.s1.rcv_bde.bdeAddress = (uint32)putPaddrLow(out); + } + + mb->mbxOwner = OWN_HOST; + return(0); +} /* End fc_runBIUdiag */ + + +/**********************************************/ +/** fc_read_la Issue a READ LA **/ +/** mailbox command **/ +/**********************************************/ +_static_ int +fc_read_la( +fc_dev_ctl_t *p_dev_ctl, +MAILBOX *mb) +{ + FC_BRD_INFO * binfo; + MATCHMAP * mp; + + binfo = &BINFO; + fc_bzero((void *)mb, sizeof(MAILBOXQ)); + + if ((mp = (MATCHMAP * )fc_mem_get(binfo, MEM_BUF)) == 0) { + + if (binfo->fc_flag & FC_SLI2) + mb->mbxCommand = MBX_READ_LA64; + else + mb->mbxCommand = MBX_READ_LA; + /* READ_LA: no buffers */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0300, /* ptr to msg structure */ + fc_mes0300, /* ptr to msg */ + fc_msgBlk0300.msgPreambleStr); /* begin & end varargs */ + return(1); + } + + if (binfo->fc_flag & FC_SLI2) { + mb->mbxCommand = MBX_READ_LA64; + mb->un.varReadLA.un.lilpBde64.tus.f.bdeSize = 128; + mb->un.varReadLA.un.lilpBde64.addrHigh = (uint32)putPaddrHigh(mp->phys); + mb->un.varReadLA.un.lilpBde64.addrLow = (uint32)putPaddrLow(mp->phys); + } else { + mb->mbxCommand = MBX_READ_LA; + mb->un.varReadLA.un.lilpBde.bdeSize = 128; + mb->un.varReadLA.un.lilpBde.bdeAddress = (uint32)putPaddrLow(mp->phys); + } + + /* save address for completion */ + ((MAILBOXQ * )mb)->bp = (uchar * )mp; + + mb->mbxOwner = OWN_HOST; + return(0); +} /* End fc_read_la */ + + +/**********************************************/ +/** fc_clear_la Issue a CLEAR LA **/ +/** mailbox command **/ +/**********************************************/ +_static_ void +fc_clear_la( +FC_BRD_INFO *binfo, +MAILBOX *mb) +{ + fc_bzero((void *)mb, sizeof(MAILBOXQ)); + mb->un.varClearLA.eventTag = binfo->fc_eventTag; + mb->mbxCommand = MBX_CLEAR_LA; + mb->mbxOwner = OWN_HOST; + return; +} /* End fc_clear_la */ + + +/**********************************************/ +/** fc_read_status Issue a READ STATUS **/ +/** mailbox command **/ +/**********************************************/ +_static_ void +fc_read_status( +FC_BRD_INFO *binfo, +MAILBOX *mb) +{ + fc_bzero((void *)mb, sizeof(MAILBOXQ)); + mb->mbxCommand = MBX_READ_STATUS; + mb->mbxOwner = OWN_HOST; + return; +} /* End fc_read_status */ + +/**********************************************/ +/** fc_read_lnk_stat Issue a LINK STATUS **/ +/** mailbox command **/ +/**********************************************/ +_static_ void +fc_read_lnk_stat( +FC_BRD_INFO *binfo, +MAILBOX *mb) +{ + fc_bzero((void *)mb, sizeof(MAILBOXQ)); + mb->mbxCommand = MBX_READ_LNK_STAT; + mb->mbxOwner = OWN_HOST; + return; +} /* End fc_read_lnk_stat */ + + +/**************************************************/ +/** fc_config_ring Issue a CONFIG RING **/ +/** mailbox command **/ +/**************************************************/ +_static_ void +fc_config_ring( +FC_BRD_INFO *binfo, +int ring, +int profile, +MAILBOX *mb) +{ + int i; + int j; + + fc_bzero((void *)mb, sizeof(MAILBOXQ)); + mb->un.varCfgRing.ring = ring; + mb->un.varCfgRing.profile = profile; + mb->un.varCfgRing.maxOrigXchg = 0; + mb->un.varCfgRing.maxRespXchg = 0; + mb->un.varCfgRing.recvNotify = 1; + mb->un.varCfgRing.numMask = binfo->fc_nummask[ring]; + + j = 0; + for (i = 0; i < ring; i++) + j += binfo->fc_nummask[i]; + + for (i = 0; i < binfo->fc_nummask[ring]; i++) { + mb->un.varCfgRing.rrRegs[i].rval = binfo->fc_rval[j + i]; + if (mb->un.varCfgRing.rrRegs[i].rval != FC_ELS_REQ) /* ELS request */ + mb->un.varCfgRing.rrRegs[i].rmask = 0xff; + else + mb->un.varCfgRing.rrRegs[i].rmask = 0xfe; + mb->un.varCfgRing.rrRegs[i].tval = binfo->fc_tval[j + i]; + mb->un.varCfgRing.rrRegs[i].tmask = 0xff; + } + + mb->mbxCommand = MBX_CONFIG_RING; + mb->mbxOwner = OWN_HOST; + return; +} /* End fc_config_ring */ + + +/**************************************************/ +/** fc_config_link Issue a CONFIG LINK **/ +/** mailbox command **/ +/**************************************************/ +_static_ void +fc_config_link( +fc_dev_ctl_t *p_dev_ctl, +MAILBOX *mb) +{ + FC_BRD_INFO * binfo; + iCfgParam * clp; + + binfo = &BINFO; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + fc_bzero((void *)mb, sizeof(MAILBOXQ)); + + if(clp[CFG_CR_DELAY].a_current) { + mb->un.varCfgLnk.cr = 1; + mb->un.varCfgLnk.ci = 1; + mb->un.varCfgLnk.cr_delay = clp[CFG_CR_DELAY].a_current; + mb->un.varCfgLnk.cr_count = clp[CFG_CR_COUNT].a_current; + } + + mb->un.varCfgLnk.myId = binfo->fc_myDID; + mb->un.varCfgLnk.edtov = binfo->fc_edtov; + mb->un.varCfgLnk.arbtov = binfo->fc_arbtov; + mb->un.varCfgLnk.ratov = binfo->fc_ratov; + mb->un.varCfgLnk.rttov = binfo->fc_rttov; + mb->un.varCfgLnk.altov = binfo->fc_altov; + mb->un.varCfgLnk.crtov = binfo->fc_crtov; + mb->un.varCfgLnk.citov = binfo->fc_citov; + if(clp[CFG_ACK0].a_current) + mb->un.varCfgLnk.ack0_enable = 1; + + mb->mbxCommand = MBX_CONFIG_LINK; + mb->mbxOwner = OWN_HOST; + return; +} /* End fc_config_link */ + + +/**********************************************/ +/** fc_init_link Issue an INIT LINK **/ +/** mailbox command **/ +/**********************************************/ +_static_ void +fc_init_link( +FC_BRD_INFO *binfo, +MAILBOX *mb, +uint32 topology, +uint32 linkspeed) +{ + iCfgParam * clp; + fc_vpd_t * vpd; + + clp = DD_CTL.p_config[binfo->fc_brd_no]; + fc_bzero((void *)mb, sizeof(MAILBOXQ)); + + switch (topology) { + case FLAGS_TOPOLOGY_MODE_LOOP_PT: + mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP; + mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER; + break; + case FLAGS_TOPOLOGY_MODE_PT_PT: + mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT; + break; + case FLAGS_TOPOLOGY_MODE_LOOP: + mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP; + break; + case FLAGS_TOPOLOGY_MODE_PT_LOOP: + mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT; + mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER; + break; + } + + vpd = &((fc_dev_ctl_t *)(binfo->fc_p_dev_ctl))->vpd; + if (binfo->fc_flag & FC_2G_CAPABLE) { + if ((vpd->rev.feaLevelHigh >= 0x02) && (linkspeed > 0)) { + mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; + mb->un.varInitLnk.link_speed = linkspeed; + } + } + + mb->mbxCommand = (volatile uchar)MBX_INIT_LINK; + mb->mbxOwner = OWN_HOST; + mb->un.varInitLnk.fabric_AL_PA = binfo->fc_pref_ALPA; + return; +} /* End fc_init_link */ + + +/**********************************************/ +/** fc_down_link Issue a DOWN LINK **/ +/** mailbox command **/ +/**********************************************/ +_static_ void +fc_down_link( +FC_BRD_INFO *binfo, +MAILBOX *mb) +{ + fc_bzero((void *)mb, sizeof(MAILBOXQ)); + + mb->mbxCommand = MBX_DOWN_LINK; + mb->mbxOwner = OWN_HOST; + return; +} + + +/**********************************************/ +/** fc_read_sparam Issue a READ SPARAM **/ +/** mailbox command **/ +/**********************************************/ +_static_ int +fc_read_sparam( +fc_dev_ctl_t *p_dev_ctl, +MAILBOX *mb) +{ + FC_BRD_INFO * binfo; + MATCHMAP * mp; + + binfo = &BINFO; + fc_bzero((void *)mb, sizeof(MAILBOXQ)); + + mb->mbxOwner = OWN_HOST; + + if ((mp = (MATCHMAP * )fc_mem_get(binfo, MEM_BUF)) == 0) { + + if (binfo->fc_flag & FC_SLI2) + mb->mbxCommand = MBX_READ_SPARM64; + else + mb->mbxCommand = MBX_READ_SPARM; + /* READ_SPARAM: no buffers */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0301, /* ptr to msg structure */ + fc_mes0301, /* ptr to msg */ + fc_msgBlk0301.msgPreambleStr); /* begin & end varargs */ + return(1); + } + + if (binfo->fc_flag & FC_SLI2) { + mb->mbxCommand = MBX_READ_SPARM64; + mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof(SERV_PARM); + mb->un.varRdSparm.un.sp64.addrHigh = (uint32)putPaddrHigh(mp->phys); + mb->un.varRdSparm.un.sp64.addrLow = (uint32)putPaddrLow(mp->phys); + } else { + mb->mbxCommand = MBX_READ_SPARM; + mb->un.varRdSparm.un.sp.bdeSize = sizeof(SERV_PARM); + mb->un.varRdSparm.un.sp.bdeAddress = (uint32)putPaddrLow(mp->phys); + } + + /* save address for completion */ + ((MAILBOXQ * )mb)->bp = (uchar * )mp; + + return(0); +} /* End fc_read_sparam */ + + +/**********************************************/ +/** fc_read_rpi Issue a READ RPI **/ +/** mailbox command **/ +/**********************************************/ +_static_ int +fc_read_rpi( +FC_BRD_INFO *binfo, +uint32 rpi, +MAILBOX *mb, +uint32 flag) +{ + fc_bzero((void *)mb, sizeof(MAILBOXQ)); + + mb->un.varRdRPI.reqRpi = (volatile ushort)rpi; + + if (binfo->fc_flag & FC_SLI2) { + mb->mbxCommand = MBX_READ_RPI64; + } else { + mb->mbxCommand = MBX_READ_RPI; + } + + mb->mbxOwner = OWN_HOST; + + mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */ + + return(0); +} /* End fc_read_rpi */ + + +/**********************************************/ +/** fc_read_xri Issue a READ XRI **/ +/** mailbox command **/ +/**********************************************/ +_static_ int +fc_read_xri( +FC_BRD_INFO *binfo, +uint32 xri, +MAILBOX *mb, +uint32 flag) +{ + fc_bzero((void *)mb, sizeof(MAILBOXQ)); + + mb->un.varRdXRI.reqXri = (volatile ushort)xri; + + mb->mbxCommand = MBX_READ_XRI; + mb->mbxOwner = OWN_HOST; + + mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */ + + return(0); +} /* End fc_read_xri */ + + +/**********************************************/ +/** fc_reg_login Issue a REG_LOGIN **/ +/** mailbox command **/ +/**********************************************/ +_static_ int +fc_reg_login( +FC_BRD_INFO *binfo, +uint32 did, +uchar *param, +MAILBOX *mb, +uint32 flag) +{ + uchar * sparam; + MATCHMAP * mp; + fc_dev_ctl_t *p_dev_ctl; + + fc_bzero((void *)mb, sizeof(MAILBOXQ)); + + mb->un.varRegLogin.rpi = 0; + mb->un.varRegLogin.did = did; + mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */ + + mb->mbxOwner = OWN_HOST; + + if ((mp = (MATCHMAP * )fc_mem_get(binfo, MEM_BUF)) == 0) { + + if (binfo->fc_flag & FC_SLI2) + mb->mbxCommand = MBX_REG_LOGIN64; + else + mb->mbxCommand = MBX_REG_LOGIN; + /* REG_LOGIN: no buffers */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0302, /* ptr to msg structure */ + fc_mes0302, /* ptr to msg */ + fc_msgBlk0302.msgPreambleStr, /* begin varargs */ + (uint32)did, + (uint32)flag); /* end varargs */ + return(1); + } + + sparam = mp->virt; + + /* Copy param's into a new buffer */ + fc_bcopy((void *)param, (void *)sparam, sizeof(SERV_PARM)); + + p_dev_ctl = (fc_dev_ctl_t *)(binfo->fc_p_dev_ctl); + fc_mpdata_sync(mp->dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV); + + /* save address for completion */ + ((MAILBOXQ * )mb)->bp = (uchar * )mp; + + if (binfo->fc_flag & FC_SLI2) { + mb->mbxCommand = MBX_REG_LOGIN64; + mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof(SERV_PARM); + mb->un.varRegLogin.un.sp64.addrHigh = (uint32)putPaddrHigh(mp->phys); + mb->un.varRegLogin.un.sp64.addrLow = (uint32)putPaddrLow(mp->phys); + } else { + mb->mbxCommand = MBX_REG_LOGIN; + mb->un.varRegLogin.un.sp.bdeSize = sizeof(SERV_PARM); + mb->un.varRegLogin.un.sp.bdeAddress = (uint32)putPaddrLow(mp->phys); + } + + return(0); +} /* End fc_reg_login */ + + +/**********************************************/ +/** fc_unreg_login Issue a UNREG_LOGIN **/ +/** mailbox command **/ +/**********************************************/ +_static_ void +fc_unreg_login( +FC_BRD_INFO *binfo, +uint32 rpi, +MAILBOX *mb) +{ + fc_bzero((void *)mb, sizeof(MAILBOXQ)); + + mb->un.varUnregLogin.rpi = (ushort)rpi; + mb->un.varUnregLogin.rsvd1 = 0; + + mb->mbxCommand = MBX_UNREG_LOGIN; + mb->mbxOwner = OWN_HOST; + return; +} /* End fc_unreg_login */ + + +/**********************************************/ +/** fc_unreg_did Issue a UNREG_DID **/ +/** mailbox command **/ +/**********************************************/ +_static_ void +fc_unreg_did( +FC_BRD_INFO *binfo, +uint32 did, +MAILBOX *mb) +{ + fc_bzero((void *)mb, sizeof(MAILBOXQ)); + + mb->un.varUnregDID.did = did; + + mb->mbxCommand = MBX_UNREG_D_ID; + mb->mbxOwner = OWN_HOST; + return; +} /* End fc_unreg_did */ + + +/**********************************************/ +/** fc_set_slim Issue a special debug mbox */ +/** command to write slim */ +/**********************************************/ +_static_ void +fc_set_slim( +FC_BRD_INFO *binfo, +MAILBOX *mb, +uint32 addr, +uint32 value) +{ + fc_bzero((void *)mb, sizeof(MAILBOXQ)); + + /* addr = 0x090597 is AUTO ABTS disable for ELS commands */ + /* addr = 0x052198 is DELAYED ABTS enable for ELS commands */ + + /* + * Always turn on DELAYED ABTS for ELS timeouts + */ + if ((addr == 0x052198) && (value == 0)) + value = 1; + + mb->un.varWords[0] = addr; + mb->un.varWords[1] = value; + + mb->mbxCommand = MBX_SET_SLIM; + mb->mbxOwner = OWN_HOST; + return; +} /* End fc_set_slim */ + + +/* Disable Traffic Cop */ +_static_ void +fc_disable_tc( +FC_BRD_INFO *binfo, +MAILBOX *mb) +{ + fc_bzero((void *)mb, sizeof(MAILBOXQ)); + mb->un.varWords[0] = 0x50797; + mb->un.varWords[1] = 0; + mb->un.varWords[2] = 0xfffffffe; + + mb->mbxCommand = MBX_SET_SLIM; + mb->mbxOwner = OWN_HOST; +} /* End fc_set_tc */ + + +/**********************************************/ +/** fc_config_port Issue a CONFIG_PORT **/ +/** mailbox command **/ +/**********************************************/ +_static_ int +fc_config_port( +FC_BRD_INFO *binfo, +MAILBOX *mb, +uint32 *hbainit) +{ + RING * rp; + fc_dev_ctl_t * p_dev_ctl; + iCfgParam * clp; + int ring_3_active; /* 4th ring */ + struct pci_dev *pdev; + + p_dev_ctl = (fc_dev_ctl_t *)(binfo->fc_p_dev_ctl); + pdev = p_dev_ctl->pcidev ; + + clp = DD_CTL.p_config[binfo->fc_brd_no]; + fc_bzero((void *)mb, sizeof(MAILBOXQ)); + + mb->mbxCommand = MBX_CONFIG_PORT; + mb->mbxOwner = OWN_HOST; + + ring_3_active = 0; /* Preset to inactive */ + + mb->un.varCfgPort.pcbLen = sizeof(PCB); + mb->un.varCfgPort.pcbLow = + (uint32)putPaddrLow( & ((SLI2_SLIM * )binfo->fc_slim2.phys)->pcb); + mb->un.varCfgPort.pcbHigh = + (uint32)putPaddrHigh( & ((SLI2_SLIM * )binfo->fc_slim2.phys)->pcb); + if((pdev->device == PCI_DEVICE_ID_TFLY)|| + (pdev->device == PCI_DEVICE_ID_PFLY)) + fc_bcopy((uchar*) hbainit, (uchar*) mb->un.varCfgPort.hbainit, 20); + else + fc_bcopy((uchar*) hbainit, (uchar*) mb->un.varCfgPort.hbainit, 4); + + /* Now setup pcb */ + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.type = TYPE_NATIVE_SLI2; + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.feature = FEATURE_INITIAL_SLI2; + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.maxRing = (binfo->fc_ffnumrings-1); + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.mailBoxSize = sizeof(MAILBOX); + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.mbAddrHigh = (uint32) + putPaddrHigh( & ((SLI2_SLIM * )binfo->fc_slim2.phys)->mbx); + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.mbAddrLow = (uint32) + putPaddrLow( & ((SLI2_SLIM * )binfo->fc_slim2.phys)->mbx); + + /* SLIM POINTER */ + if (binfo->fc_busflag & FC_HOSTPTR) { + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.hgpAddrHigh = (uint32) + putPaddrHigh( & ((SLI2_SLIM * )binfo->fc_slim2.phys)->mbx.us.s2.host); + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.hgpAddrLow = (uint32) + putPaddrLow( & ((SLI2_SLIM * )binfo->fc_slim2.phys)->mbx.us.s2.host); + } else { + uint32 Laddr; + + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.hgpAddrHigh = (uint32) + fc_rdpci_32((fc_dev_ctl_t *)binfo->fc_p_dev_ctl, PCI_BAR_1_REGISTER); + Laddr = fc_rdpci_32((fc_dev_ctl_t *)binfo->fc_p_dev_ctl, PCI_BAR_0_REGISTER); + Laddr &= ~0x4; + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.hgpAddrLow = (uint32)(Laddr + (SLIMOFF*4)); + } + + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.pgpAddrHigh = (uint32) + putPaddrHigh( & ((SLI2_SLIM * )binfo->fc_slim2.phys)->mbx.us.s2.port); + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.pgpAddrLow = (uint32) + putPaddrLow( & ((SLI2_SLIM * )binfo->fc_slim2.phys)->mbx.us.s2.port); + + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.rdsc[0].cmdEntries = SLI2_IOCB_CMD_R0_ENTRIES; + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.rdsc[0].rspEntries = SLI2_IOCB_RSP_R0_ENTRIES; + if(clp[CFG_NETWORK_ON].a_current == 0) { + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.rdsc[1].cmdEntries = + (SLI2_IOCB_CMD_R1_ENTRIES - SLI2_IOCB_CMD_R1XTRA_ENTRIES); + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.rdsc[1].rspEntries = + (SLI2_IOCB_RSP_R1_ENTRIES - SLI2_IOCB_RSP_R1XTRA_ENTRIES); + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.rdsc[2].cmdEntries = + (SLI2_IOCB_CMD_R2_ENTRIES + SLI2_IOCB_CMD_R2XTRA_ENTRIES); + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.rdsc[2].rspEntries = + (SLI2_IOCB_RSP_R2_ENTRIES + SLI2_IOCB_RSP_R2XTRA_ENTRIES); + } + else { + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.rdsc[1].cmdEntries = SLI2_IOCB_CMD_R1_ENTRIES; + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.rdsc[1].rspEntries = SLI2_IOCB_RSP_R1_ENTRIES; + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.rdsc[2].cmdEntries = SLI2_IOCB_CMD_R2_ENTRIES; + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.rdsc[2].rspEntries = SLI2_IOCB_RSP_R2_ENTRIES; + } + if( ring_3_active == 0) { + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.rdsc[3].cmdEntries = 0; + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.rdsc[3].rspEntries = 0; + } + + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.rdsc[0].cmdAddrHigh = (uint32) + putPaddrHigh( & ((SLI2_SLIM * )binfo->fc_slim2.phys)->IOCBs[0]); + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.rdsc[0].cmdAddrLow = (uint32) + putPaddrLow( & ((SLI2_SLIM * )binfo->fc_slim2.phys)->IOCBs[0]); + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.rdsc[0].rspAddrHigh = (uint32) + putPaddrHigh( & ((SLI2_SLIM * )binfo->fc_slim2.phys)->IOCBs[ + SLI2_IOCB_CMD_R0_ENTRIES]); + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.rdsc[0].rspAddrLow = (uint32) + putPaddrLow( & ((SLI2_SLIM * )binfo->fc_slim2.phys)->IOCBs[ + SLI2_IOCB_CMD_R0_ENTRIES]); + rp = &binfo->fc_ring[0]; + rp->fc_cmdringaddr = (void *) & ((SLI2_SLIM * )binfo->fc_slim2.virt)->IOCBs[0]; + rp->fc_rspringaddr = (void *) & ((SLI2_SLIM * )binfo->fc_slim2.virt)->IOCBs[ + SLI2_IOCB_CMD_R0_ENTRIES]; + + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.rdsc[1].cmdAddrHigh = (uint32) + putPaddrHigh( & ((SLI2_SLIM * )binfo->fc_slim2.phys)->IOCBs[ + SLI2_IOCB_CMD_R0_ENTRIES + SLI2_IOCB_RSP_R0_ENTRIES]); + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.rdsc[1].cmdAddrLow = (uint32) + putPaddrLow( & ((SLI2_SLIM * )binfo->fc_slim2.phys)->IOCBs[ + SLI2_IOCB_CMD_R0_ENTRIES + SLI2_IOCB_RSP_R0_ENTRIES]); + if(clp[CFG_NETWORK_ON].a_current == 0) { + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.rdsc[1].rspAddrHigh = (uint32) + putPaddrHigh( & ((SLI2_SLIM * )binfo->fc_slim2.phys)->IOCBs[ + SLI2_IOCB_CMD_R0_ENTRIES + SLI2_IOCB_RSP_R0_ENTRIES + + SLI2_IOCB_CMD_R1_ENTRIES - SLI2_IOCB_CMD_R1XTRA_ENTRIES]); + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.rdsc[1].rspAddrLow = (uint32) + putPaddrLow( & ((SLI2_SLIM * )binfo->fc_slim2.phys)->IOCBs[ + SLI2_IOCB_CMD_R0_ENTRIES + SLI2_IOCB_RSP_R0_ENTRIES + + SLI2_IOCB_CMD_R1_ENTRIES - SLI2_IOCB_CMD_R1XTRA_ENTRIES]); + rp = &binfo->fc_ring[1]; + rp->fc_cmdringaddr = (void *) & ((SLI2_SLIM * )binfo->fc_slim2.virt)->IOCBs[ + SLI2_IOCB_CMD_R0_ENTRIES + SLI2_IOCB_RSP_R0_ENTRIES]; + rp->fc_rspringaddr = (void *) & ((SLI2_SLIM * )binfo->fc_slim2.virt)->IOCBs[ + SLI2_IOCB_CMD_R0_ENTRIES + SLI2_IOCB_RSP_R0_ENTRIES + + SLI2_IOCB_CMD_R1_ENTRIES - SLI2_IOCB_CMD_R1XTRA_ENTRIES]; + + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.rdsc[2].cmdAddrHigh = (uint32) + putPaddrHigh( & ((SLI2_SLIM * )binfo->fc_slim2.phys)->IOCBs[ + SLI2_IOCB_CMD_R0_ENTRIES + SLI2_IOCB_RSP_R0_ENTRIES + + SLI2_IOCB_CMD_R1_ENTRIES + SLI2_IOCB_RSP_R1_ENTRIES - + SLI2_IOCB_CMD_R1XTRA_ENTRIES - SLI2_IOCB_RSP_R1XTRA_ENTRIES]); + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.rdsc[2].cmdAddrLow = (uint32) + putPaddrLow( & ((SLI2_SLIM * )binfo->fc_slim2.phys)->IOCBs[ + SLI2_IOCB_CMD_R0_ENTRIES + SLI2_IOCB_RSP_R0_ENTRIES + + SLI2_IOCB_CMD_R1_ENTRIES + SLI2_IOCB_RSP_R1_ENTRIES - + SLI2_IOCB_CMD_R1XTRA_ENTRIES - SLI2_IOCB_RSP_R1XTRA_ENTRIES]); + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.rdsc[2].rspAddrHigh = (uint32) + putPaddrHigh( & ((SLI2_SLIM * )binfo->fc_slim2.phys)->IOCBs[ + SLI2_IOCB_CMD_R0_ENTRIES + SLI2_IOCB_RSP_R0_ENTRIES + + SLI2_IOCB_CMD_R1_ENTRIES + SLI2_IOCB_RSP_R1_ENTRIES + + SLI2_IOCB_CMD_R2_ENTRIES + SLI2_IOCB_CMD_R2XTRA_ENTRIES - + SLI2_IOCB_CMD_R1XTRA_ENTRIES - SLI2_IOCB_RSP_R1XTRA_ENTRIES]); + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.rdsc[2].rspAddrLow = (uint32) + putPaddrLow( & ((SLI2_SLIM * )binfo->fc_slim2.phys)->IOCBs[ + SLI2_IOCB_CMD_R0_ENTRIES + SLI2_IOCB_RSP_R0_ENTRIES + + SLI2_IOCB_CMD_R1_ENTRIES + SLI2_IOCB_RSP_R1_ENTRIES + + SLI2_IOCB_CMD_R2_ENTRIES + SLI2_IOCB_CMD_R2XTRA_ENTRIES - + SLI2_IOCB_CMD_R1XTRA_ENTRIES - SLI2_IOCB_RSP_R1XTRA_ENTRIES]); + rp = &binfo->fc_ring[2]; + rp->fc_cmdringaddr = (void *) & ((SLI2_SLIM * )binfo->fc_slim2.virt)->IOCBs[ + SLI2_IOCB_CMD_R0_ENTRIES + SLI2_IOCB_RSP_R0_ENTRIES + + SLI2_IOCB_CMD_R1_ENTRIES + SLI2_IOCB_RSP_R1_ENTRIES - + SLI2_IOCB_CMD_R1XTRA_ENTRIES - SLI2_IOCB_RSP_R1XTRA_ENTRIES]; + rp->fc_rspringaddr = (void *) & ((SLI2_SLIM * )binfo->fc_slim2.virt)->IOCBs[ + SLI2_IOCB_CMD_R0_ENTRIES + SLI2_IOCB_RSP_R0_ENTRIES + + SLI2_IOCB_CMD_R1_ENTRIES + SLI2_IOCB_RSP_R1_ENTRIES + + SLI2_IOCB_CMD_R2_ENTRIES + SLI2_IOCB_CMD_R2XTRA_ENTRIES - + SLI2_IOCB_CMD_R1XTRA_ENTRIES - SLI2_IOCB_RSP_R1XTRA_ENTRIES]; + } + else { + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.rdsc[1].rspAddrHigh = (uint32) + putPaddrHigh( & ((SLI2_SLIM * )binfo->fc_slim2.phys)->IOCBs[ + SLI2_IOCB_CMD_R0_ENTRIES + SLI2_IOCB_RSP_R0_ENTRIES + + SLI2_IOCB_CMD_R1_ENTRIES]); + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.rdsc[1].rspAddrLow = (uint32) + putPaddrLow( & ((SLI2_SLIM * )binfo->fc_slim2.phys)->IOCBs[ + SLI2_IOCB_CMD_R0_ENTRIES + SLI2_IOCB_RSP_R0_ENTRIES + + SLI2_IOCB_CMD_R1_ENTRIES]); + rp = &binfo->fc_ring[1]; + rp->fc_cmdringaddr = (void *) & ((SLI2_SLIM * )binfo->fc_slim2.virt)->IOCBs[ + SLI2_IOCB_CMD_R0_ENTRIES + SLI2_IOCB_RSP_R0_ENTRIES]; + rp->fc_rspringaddr = (void *) & ((SLI2_SLIM * )binfo->fc_slim2.virt)->IOCBs[ + SLI2_IOCB_CMD_R0_ENTRIES + SLI2_IOCB_RSP_R0_ENTRIES + + SLI2_IOCB_CMD_R1_ENTRIES]; + + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.rdsc[2].cmdAddrHigh = (uint32) + putPaddrHigh( & ((SLI2_SLIM * )binfo->fc_slim2.phys)->IOCBs[ + SLI2_IOCB_CMD_R0_ENTRIES + SLI2_IOCB_RSP_R0_ENTRIES + + SLI2_IOCB_CMD_R1_ENTRIES + SLI2_IOCB_RSP_R1_ENTRIES]); + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.rdsc[2].cmdAddrLow = (uint32) + putPaddrLow( & ((SLI2_SLIM * )binfo->fc_slim2.phys)->IOCBs[ + SLI2_IOCB_CMD_R0_ENTRIES + SLI2_IOCB_RSP_R0_ENTRIES + + SLI2_IOCB_CMD_R1_ENTRIES + SLI2_IOCB_RSP_R1_ENTRIES]); + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.rdsc[2].rspAddrHigh = (uint32) + putPaddrHigh( & ((SLI2_SLIM * )binfo->fc_slim2.phys)->IOCBs[ + SLI2_IOCB_CMD_R0_ENTRIES + SLI2_IOCB_RSP_R0_ENTRIES + + SLI2_IOCB_CMD_R1_ENTRIES + SLI2_IOCB_RSP_R1_ENTRIES + + SLI2_IOCB_CMD_R2_ENTRIES]); + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.rdsc[2].rspAddrLow = (uint32) + putPaddrLow( & ((SLI2_SLIM * )binfo->fc_slim2.phys)->IOCBs[ + SLI2_IOCB_CMD_R0_ENTRIES + SLI2_IOCB_RSP_R0_ENTRIES + + SLI2_IOCB_CMD_R1_ENTRIES + SLI2_IOCB_RSP_R1_ENTRIES + + SLI2_IOCB_CMD_R2_ENTRIES]); + rp = &binfo->fc_ring[2]; + rp->fc_cmdringaddr = (void *) & ((SLI2_SLIM * )binfo->fc_slim2.virt)->IOCBs[ + SLI2_IOCB_CMD_R0_ENTRIES + SLI2_IOCB_RSP_R0_ENTRIES + + SLI2_IOCB_CMD_R1_ENTRIES + SLI2_IOCB_RSP_R1_ENTRIES]; + rp->fc_rspringaddr = (void *) & ((SLI2_SLIM * )binfo->fc_slim2.virt)->IOCBs[ + SLI2_IOCB_CMD_R0_ENTRIES + SLI2_IOCB_RSP_R0_ENTRIES + + SLI2_IOCB_CMD_R1_ENTRIES + SLI2_IOCB_RSP_R1_ENTRIES + + SLI2_IOCB_CMD_R2_ENTRIES]; + } + + if( ring_3_active == 0) { + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.rdsc[3].cmdAddrHigh = 0; + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.rdsc[3].rspAddrHigh = 0; + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.rdsc[3].cmdAddrLow = 0; + ((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb.rdsc[3].rspAddrLow = 0; + } + + fc_pcimem_bcopy((uint32 * )(&((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb), + (uint32 * )(&((SLI2_SLIM * )binfo->fc_slim2.virt)->pcb), sizeof(PCB)); + + fc_mpdata_sync(binfo->fc_slim2.dma_handle, (off_t)0, (size_t)0, + DDI_DMA_SYNC_FORDEV); + /* Service Level Interface (SLI) 2 selected */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0405, /* ptr to msg structure */ + fc_mes0405, /* ptr to msg */ + fc_msgBlk0405.msgPreambleStr); /* begin & end varargs */ + return(0); +} /* End fc_config_port */ + +/**********************************************/ +/** fc_config_farp Issue a CONFIG FARP **/ +/** mailbox command **/ +/**********************************************/ +_static_ void +fc_config_farp( +FC_BRD_INFO *binfo, +MAILBOX *mb) +{ + fc_bzero((void *)mb, sizeof(MAILBOXQ)); + + mb->un.varCfgFarp.filterEnable = 1; + mb->un.varCfgFarp.portName = 1; + mb->un.varCfgFarp.nodeName = 1; + + fc_bcopy((uchar * )&binfo->fc_portname, (uchar *)&mb->un.varCfgFarp.portname, sizeof(NAME_TYPE)); + fc_bcopy((uchar * )&binfo->fc_portname, (uchar *)&mb->un.varCfgFarp.nodename, sizeof(NAME_TYPE)); + + mb->mbxCommand = MBX_CONFIG_FARP; + mb->mbxOwner = OWN_HOST; + return; +} + + +/**********************************************/ +/** fc_read_nv Issue a READ CONFIG **/ +/** mailbox command **/ +/**********************************************/ +_static_ void +fc_read_config( +FC_BRD_INFO *binfo, +MAILBOX *mb) +{ + fc_bzero((void *)mb, sizeof(MAILBOXQ)); + mb->mbxCommand = MBX_READ_CONFIG; + mb->mbxOwner = OWN_HOST; + return; +} /* End fc_read_config */ + +/*****************************************************************************/ +/* + * NAME: fc_mbox_put + * + * FUNCTION: put mailbox cmd onto the mailbox queue. + * + * EXECUTION ENVIRONMENT: process and interrupt level. + * + * NOTES: + * + * CALLED FROM: + * issue_mb_cmd + * + * INPUT: + * binfo - pointer to the device info area + * mbp - pointer to mailbox queue entry of mailbox cmd + * + * RETURNS: + * NULL - command queued + */ +/*****************************************************************************/ +_static_ void +fc_mbox_put( +FC_BRD_INFO *binfo, +MAILBOXQ *mbq) /* pointer to mbq entry */ +{ + if (binfo->fc_mbox.q_first) { + /* queue command to end of list */ + ((MAILBOXQ * )binfo->fc_mbox.q_last)->q = (uchar * )mbq; + binfo->fc_mbox.q_last = (uchar * )mbq; + } else { + /* add command to empty list */ + binfo->fc_mbox.q_first = (uchar * )mbq; + binfo->fc_mbox.q_last = (uchar * )mbq; + } + + mbq->q = NULL; + binfo->fc_mbox.q_cnt++; + + return; + +} /* End fc_mbox_put */ + + +/*****************************************************************************/ +/* + * NAME: fc_mbox_get + * + * FUNCTION: get a mailbox command from mailbox command queue + * + * EXECUTION ENVIRONMENT: interrupt level. + * + * NOTES: + * + * CALLED FROM: + * handle_mb_event + * + * INPUT: + * binfo - pointer to the device info area + * + * RETURNS: + * NULL - no match found + * mb pointer - pointer to a mailbox command + */ +/*****************************************************************************/ +_static_ MAILBOXQ * +fc_mbox_get( +FC_BRD_INFO *binfo) +{ + MAILBOXQ * p_first = NULL; + + if (binfo->fc_mbox.q_first) { + p_first = (MAILBOXQ * )binfo->fc_mbox.q_first; + if ((binfo->fc_mbox.q_first = p_first->q) == 0) { + binfo->fc_mbox.q_last = 0; + } + p_first->q = NULL; + binfo->fc_mbox.q_cnt--; + } + + return(p_first); + +} /* End fc_mbox_get */ + + diff -purN -X /home/mbligh/.diff.exclude reference/drivers/scsi/lpfc/fcmemb.c current/drivers/scsi/lpfc/fcmemb.c --- reference/drivers/scsi/lpfc/fcmemb.c 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/scsi/lpfc/fcmemb.c 2004-04-09 11:53:03.000000000 -0700 @@ -0,0 +1,810 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Enterprise Fibre Channel Host Bus Adapters. * + * Refer to the README file included with this package for * + * driver version and adapter support. * + * Copyright (C) 2003 Emulex Corporation. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of the GNU General Public License * + * as published by the Free Software Foundation; either version 2 * + * of the License, or (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details, a copy of which * + * can be found in the file COPYING included with this package. * + *******************************************************************/ + +#include "fc_os.h" + +#include "fc_hw.h" +#include "fc.h" + +#include "fcdiag.h" +#include "fcfgparm.h" +#include "fcmsg.h" +#include "fc_crtn.h" /* Core - external routine definitions */ +#include "fc_ertn.h" /* Environment - external routine definitions */ + +extern uint32 fcPAGESIZE; +extern fc_dd_ctl_t DD_CTL; +extern iCfgParam icfgparam[]; +extern int fc_max_els_sent; + + +/* + * Define the following to Enable SANITY check logic in routines + * fc_getvaddr() and fc_mapvaddr(). + * #define FC_DBG_VADDR_SANITY_CHK + */ + +/* Routine Declaration - Local */ +/* There currently are no local routine declarations */ +/* End Routine Declaration - Local */ + +/***************************************************/ +/** fc_malloc_buffer **/ +/** **/ +/** This routine will allocate iocb/data buffer **/ +/** space and setup the buffers for all rings on **/ +/** the specified board to use. The data buffers **/ +/** can be posted to the ring with the **/ +/** fc_post_buffer routine. The iocb buffers **/ +/** are used to make a temp copy of the response **/ +/** ring iocbs. Returns 0 if not enough memory, **/ +/** Returns 1 if successful. **/ +/***************************************************/ +_static_ int +fc_malloc_buffer( +fc_dev_ctl_t *p_dev_ctl) +{ + FC_BRD_INFO * binfo = &BINFO; + iCfgParam * clp; + int i, j; + uchar * bp; + uchar * oldbp; + RING * rp; + MEMSEG * mp; + MATCHMAP * matp; + MBUF_INFO * buf_info; + MBUF_INFO bufinfo; + unsigned long iflag; + + buf_info = &bufinfo; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + + + for (i = 0; i < binfo->fc_ffnumrings; i++) { + rp = &binfo->fc_ring[i]; + rp->fc_mpon = 0; + rp->fc_mpoff = 0; + } + + if(clp[CFG_FCP_ON].a_current) { + buf_info->size = (MAX_FCP_CMDS * sizeof(void *)); + buf_info->flags = 0; + buf_info->align = sizeof(void *); + buf_info->dma_handle = 0; + + /* Create a table to relate FCP iotags to fc_buf addresses */ + fc_malloc(p_dev_ctl, buf_info); + if (buf_info->virt == NULL) { + fc_free_buffer(p_dev_ctl); + return(0); + } + binfo->fc_table = (FCPTBL * )buf_info->virt; + fc_bzero((char *)binfo->fc_table, MAX_FCP_CMDS * sizeof(void *)); + } + + /* Initialize xmit/receive buffer structure */ + /* Three buffers per response entry will initially be posted to ELS ring */ + iflag = lpfc_mempool_disable_lock(p_dev_ctl); + mp = &binfo->fc_memseg[MEM_BUF]; + mp->fc_memsize = FCELSSIZE; + if(clp[CFG_NUM_BUFS].a_current < 50) + mp->fc_numblks = 50; + else + mp->fc_numblks = (ushort)clp[CFG_NUM_BUFS].a_current; + + /* MEM_BUF is same pool as MEM_BPL */ + if (binfo->fc_sli == 2) + mp->fc_numblks += MAX_SLI2_IOCB; + + mp->fc_memflag = FC_MEM_DMA; + mp->fc_lowmem = (3 * fc_max_els_sent) + 8; + + if((2*mp->fc_lowmem) > mp->fc_numblks) + mp->fc_lowmem = (mp->fc_numblks / 2); + + /* Initialize mailbox cmd buffer structure */ + mp = &binfo->fc_memseg[MEM_MBOX]; + mp->fc_memsize = sizeof(MAILBOXQ); + mp->fc_numblks = (short)clp[CFG_NUM_NODES].a_current + 32; + mp->fc_memflag = 0; + mp->fc_lowmem = (2 * fc_max_els_sent) + 8; + + /* Initialize iocb buffer structure */ + mp = &binfo->fc_memseg[MEM_IOCB]; + mp->fc_memsize = sizeof(IOCBQ); + mp->fc_numblks = (ushort)clp[CFG_NUM_IOCBS].a_current + MIN_CLK_BLKS; + mp->fc_memflag = 0; + mp->fc_lowmem = (2 * fc_max_els_sent) + 8; + + /* Initialize iocb buffer structure */ + mp = &binfo->fc_memseg[MEM_NLP]; + mp->fc_memsize = sizeof(NODELIST); + mp->fc_numblks = (short)clp[CFG_NUM_NODES].a_current + 2; + mp->fc_memflag = 0; + mp->fc_lowmem = 0; + + + /* Allocate buffer pools for above buffer structures */ + for (j = 0; j < FC_MAX_SEG; j++) { + mp = &binfo->fc_memseg[j]; + + + mp->fc_memptr = 0; + mp->fc_endmemptr = 0; + mp->fc_memhi = 0; + mp->fc_memlo = 0; + + for (i = 0; i < mp->fc_numblks; i++) { + /* If this is a DMA buffer we need alignment on a page so we don't + * want to worry about buffers spanning page boundries when mapping + * memory for the adapter. + */ + if (mp->fc_memflag & FC_MEM_DMA) { + buf_info->size = sizeof(MATCHMAP); + buf_info->flags = 0; + buf_info->align = sizeof(void *); + buf_info->dma_handle = 0; + + fc_malloc(p_dev_ctl, buf_info); + if (buf_info->virt == NULL) { + lpfc_mempool_unlock_enable(p_dev_ctl, iflag); + fc_free_buffer(p_dev_ctl); + return(0); + } + + matp = (MATCHMAP * )buf_info->virt; + fc_bzero(matp, sizeof(MATCHMAP)); + if((ulong)matp > (ulong)(mp->fc_memhi)) + mp->fc_memhi = (uchar *)matp; + if(mp->fc_memlo == 0) + mp->fc_memlo = (uchar *)matp; + else { + if((ulong)matp < (ulong)(mp->fc_memlo)) + mp->fc_memlo = (uchar *)matp; + } + + buf_info->size = mp->fc_memsize; + buf_info->flags = FC_MBUF_DMA; + buf_info->dma_handle = 0; + + switch (mp->fc_memsize) { + case sizeof(FCP_CMND): + buf_info->align = sizeof(FCP_CMND); + break; + + case 1024: + buf_info->align = 1024; + break; + + case 2048: + buf_info->align = 2048; + break; + + case 4096: + buf_info->align = 4096; + break; + + default: + buf_info->align = sizeof(void *); + break; + } + + fc_malloc(p_dev_ctl, buf_info); + if (buf_info->virt == NULL) { + fc_free_buffer(p_dev_ctl); + return(0); + } + bp = (uchar * )buf_info->virt; + fc_bzero(bp, mp->fc_memsize); + + /* Link buffer into beginning of list. The first pointer in + * each buffer is a forward pointer to the next buffer. + */ + oldbp = mp->fc_memptr; + if(oldbp == 0) + mp->fc_endmemptr = (uchar *)matp; + mp->fc_memptr = (uchar * )matp; + matp->fc_mptr = oldbp; + matp->virt = bp; + if (buf_info->dma_handle) { + matp->dma_handle = buf_info->dma_handle; + matp->data_handle = buf_info->data_handle; + } + matp->phys = (uchar * )buf_info->phys; + } else { + buf_info->size = mp->fc_memsize; + buf_info->flags = 0; + buf_info->align = sizeof(void *); + buf_info->dma_handle = 0; + + fc_malloc(p_dev_ctl, buf_info); + if (buf_info->virt == NULL) { + lpfc_mempool_unlock_enable(p_dev_ctl, iflag); + fc_free_buffer(p_dev_ctl); + return(0); + } + bp = (uchar * )buf_info->virt; + fc_bzero(bp, mp->fc_memsize); + if((ulong)bp > (ulong)(mp->fc_memhi)) + mp->fc_memhi = (uchar *)bp; + if(mp->fc_memlo == 0) + mp->fc_memlo = (uchar *)bp; + else { + if((ulong)bp < (ulong)(mp->fc_memlo)) + mp->fc_memlo = (uchar *)bp; + } + + /* Link buffer into beginning of list. The first pointer in + * each buffer is a forward pointer to the next buffer. + */ + oldbp = mp->fc_memptr; + if(oldbp == 0) + mp->fc_endmemptr = bp; + mp->fc_memptr = bp; + *((uchar * *)bp) = oldbp; + } + } + + /* free blocks = total blocks right now */ + mp->fc_free = i; + } + + lpfc_mempool_unlock_enable(p_dev_ctl, iflag); + return(1); +} /* End fc_malloc_buffer */ + + +/***************************************************/ +/** fc_free_buffer **/ +/** **/ +/** This routine will free iocb/data buffer space */ +/***************************************************/ +_static_ int +fc_free_buffer( +fc_dev_ctl_t *p_dev_ctl) +{ + FC_BRD_INFO * binfo = &BINFO; + int j, ipri; + uchar * bp; + MEMSEG * mp; + MATCHMAP * mm; + NODELIST * ndlp; + NODELIST * ondlp; + RING * rp; + IOCBQ * iocbq, *save; + MAILBOXQ * mbox, *mbsave; + MBUF_INFO * buf_info; + MBUF_INFO bufinfo; + FCCLOCK_INFO * clock_info; + FCCLOCK * cb; + FCCLOCK * nextcb; + unsigned long iflag; + + buf_info = &bufinfo; + + /* free the mapped address match area for each ring */ + for (j = 0; j < binfo->fc_ffnumrings; j++) { + rp = &binfo->fc_ring[j]; + + /* Free everything on tx queue */ + iocbq = (IOCBQ * )(rp->fc_tx.q_first); + while (iocbq) { + save = iocbq; + iocbq = (IOCBQ * )iocbq->q; + fc_mem_put(binfo, MEM_IOCB, (uchar * )save); + } + + if (j != FC_FCP_RING) { + /* Free everything on txp queue */ + unsigned long iflag; + + iflag = lpfc_q_disable_lock(p_dev_ctl); + iocbq = (IOCBQ * )(rp->fc_txp.q_first); + while (iocbq) { + save = iocbq; + iocbq = (IOCBQ * )iocbq->q; + fc_mem_put(binfo, MEM_IOCB, (uchar * )save); + } + lpfc_q_unlock_enable(p_dev_ctl, iflag); + + while (rp->fc_mpoff) { + uchar * addr; + + addr = 0; + mm = (MATCHMAP * )(rp->fc_mpoff); + if (j == FC_IP_RING) + addr = (uchar * )(fcnextpkt((fcipbuf_t * )mm)); + else if (j == FC_ELS_RING) + addr = mm->phys; + if ((mm = fc_getvaddr(p_dev_ctl, rp, addr))) { + if (j == FC_ELS_RING) { + fc_mem_put(binfo, MEM_BUF, (uchar * )mm); + } + else if (j == FC_IP_RING) { + fcipbuf_t * mbuf; + + mbuf = (fcipbuf_t * )mm; + fcnextdata(mbuf) = 0; + fcnextpkt(mbuf) = 0; + m_freem(mbuf); + } + } + } + } + } + + /* Free any delayed ELS xmits */ + if(binfo->fc_delayxmit) { + iocbq = binfo->fc_delayxmit; + binfo->fc_delayxmit = 0; + while(iocbq) { + mm = (MATCHMAP * )iocbq->bp; + if (binfo->fc_flag & FC_SLI2) { + fc_mem_put(binfo, MEM_BPL, (uchar * )iocbq->bpl); + } + fc_mem_put(binfo, MEM_BUF, (uchar * )mm); + fc_mem_put(binfo, MEM_BUF, (uchar * )iocbq->info); + save = iocbq; + iocbq = (IOCBQ *)save->q; + fc_mem_put(binfo, MEM_IOCB, (uchar * )save); + } + } + + if (binfo->fc_table) { + buf_info->size = (MAX_FCP_CMDS * sizeof(void *)); + buf_info->virt = (uint32 * )binfo->fc_table; + buf_info->phys = 0; + buf_info->flags = 0; + buf_info->dma_handle = 0; + fc_free(p_dev_ctl, buf_info); + binfo->fc_table = 0; + } + + /* Free everything on mbox queue */ + mbox = (MAILBOXQ * )(binfo->fc_mbox.q_first); + while (mbox) { + mbsave = mbox; + mbox = (MAILBOXQ * )mbox->q; + fc_mem_put(binfo, MEM_MBOX, (uchar * )mbsave); + } + binfo->fc_mbox.q_first = 0; + binfo->fc_mbox.q_last = 0; + binfo->fc_mbox_active = 0; + + /* Free everything on iocb plogi queue */ + iocbq = (IOCBQ * )(binfo->fc_plogi.q_first); + while (iocbq) { + save = iocbq; + iocbq = (IOCBQ * )iocbq->q; + fc_mem_put(binfo, MEM_IOCB, (uchar * )save); + } + binfo->fc_plogi.q_first = 0; + binfo->fc_plogi.q_last = 0; + + /* Now cleanup unexpired clock blocks */ + clock_info = &DD_CTL.fc_clock_info; + ipri = disable_lock(FC_LVL, &CLOCK_LOCK); + + cb = clock_info->fc_clkhdr.cl_f; + while (cb != (FCCLOCK * ) & clock_info->fc_clkhdr) { + nextcb = cb->cl_fw; + if(cb->cl_p_dev_ctl == (void *)p_dev_ctl) { + fc_clock_deque(cb); + /* Release clock block */ + fc_clkrelb(p_dev_ctl, cb); + /* start over */ + } + cb = nextcb; + } + unlock_enable(ipri, &CLOCK_LOCK); + + /* Free all node table entries */ + ndlp = binfo->fc_nlpbind_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpbind_start) + ndlp = binfo->fc_nlpunmap_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + while(ndlp != (NODELIST *)&binfo->fc_nlpmap_start) { + ondlp = ndlp; + ndlp = (NODELIST *)ndlp->nlp_listp_next; + if(ndlp == (NODELIST *)&binfo->fc_nlpbind_start) + ndlp = binfo->fc_nlpunmap_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + fc_mem_put(binfo, MEM_NLP, (uchar * )ondlp); + } + binfo->fc_nlpbind_start = (NODELIST *)&binfo->fc_nlpbind_start; + binfo->fc_nlpbind_end = (NODELIST *)&binfo->fc_nlpbind_start; + binfo->fc_nlpmap_start = (NODELIST *)&binfo->fc_nlpmap_start; + binfo->fc_nlpmap_end = (NODELIST *)&binfo->fc_nlpmap_start; + binfo->fc_nlpunmap_start = (NODELIST *)&binfo->fc_nlpunmap_start; + binfo->fc_nlpunmap_end = (NODELIST *)&binfo->fc_nlpunmap_start; + + iflag = lpfc_mempool_disable_lock(p_dev_ctl); + /* Loop through all memory buffer pools */ + for (j = 0; j < FC_MAX_SEG; j++) { + mp = &binfo->fc_memseg[j]; + /* Free memory associated with all buffers on free buffer pool */ + while ((bp = mp->fc_memptr) != NULL) { + mp->fc_memptr = *((uchar * *)bp); + if (mp->fc_memflag & FC_MEM_DMA) { + mm = (MATCHMAP * )bp; + bp = mm->virt; + buf_info->size = mp->fc_memsize; + buf_info->virt = (uint32 * )bp; + buf_info->phys = (uint32 * )mm->phys; + buf_info->flags = FC_MBUF_DMA; + if (mm->dma_handle) { + buf_info->dma_handle = mm->dma_handle; + buf_info->data_handle = mm->data_handle; + } + fc_free(p_dev_ctl, buf_info); + + buf_info->size = sizeof(MATCHMAP); + buf_info->virt = (uint32 * )mm; + buf_info->phys = 0; + buf_info->flags = 0; + buf_info->dma_handle = 0; + fc_free(p_dev_ctl, buf_info); + } else { + buf_info->size = mp->fc_memsize; + buf_info->virt = (uint32 * )bp; + buf_info->phys = 0; + buf_info->flags = 0; + buf_info->dma_handle = 0; + fc_free(p_dev_ctl, buf_info); + } + } + mp->fc_endmemptr = NULL; + mp->fc_free = 0; + } + lpfc_mempool_unlock_enable(p_dev_ctl, iflag); + return(0); +} /* End fc_free_buffer */ + + + +/**************************************************/ +/** fc_mem_get **/ +/** **/ +/** This routine will get a free memory buffer. **/ +/** seg identifies which buffer pool to use. **/ +/** Returns the free buffer ptr or 0 for no buf **/ +/**************************************************/ +_static_ uchar * +fc_mem_get( +FC_BRD_INFO *binfo, +uint32 arg) +{ + uchar * bp; + MEMSEG * mp; + uint32 seg = arg & MEM_SEG_MASK; + int low; + fc_dev_ctl_t *p_dev_ctl; + unsigned long iflag; + + /* range check on seg argument */ + if (seg >= FC_MAX_SEG) + return((uchar * )0); + + p_dev_ctl = (fc_dev_ctl_t *)binfo->fc_p_dev_ctl; + iflag = lpfc_mempool_disable_lock(p_dev_ctl); + mp = &binfo->fc_memseg[seg]; + + if ((low = (!(arg & MEM_PRI) && (mp->fc_free <= mp->fc_lowmem)))) { + /* Memory Buffer Pool is below low water mark */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0406, /* ptr to msg structure */ + fc_mes0406, /* ptr to msg */ + fc_msgBlk0406.msgPreambleStr, /* begin varargs */ + seg, + mp->fc_lowmem, + low); /* end varargs */ + /* Low priority request and not enough buffers, so fail */ + lpfc_mempool_unlock_enable(p_dev_ctl, iflag); + return((uchar * )0); + } + + bp = mp->fc_memptr; + + if (bp) { + if(((ulong)bp > (ulong)(mp->fc_memhi)) || ((ulong)bp < (ulong)(mp->fc_memlo))) { + /* Memory Buffer Pool is corrupted */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0407, /* ptr to msg structure */ + fc_mes0407, /* ptr to msg */ + fc_msgBlk0407.msgPreambleStr, /* begin varargs */ + seg, + (ulong)bp, + (ulong)mp->fc_memhi, + (ulong)mp->fc_memlo); /* end varargs */ + mp->fc_memptr = 0; + lpfc_mempool_unlock_enable(p_dev_ctl, iflag); + return((uchar * )0); + } + + /* If a memory block exists, take it off freelist + * and return it to the user. + */ + if(mp->fc_endmemptr == bp) { + mp->fc_endmemptr = 0; + } + mp->fc_memptr = *((uchar * *)bp); + *((uchar * *)bp) = 0; + mp->fc_free--; + } else { + /* Memory Buffer Pool is out of buffers */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0409, /* ptr to msg structure */ + fc_mes0409, /* ptr to msg */ + fc_msgBlk0409.msgPreambleStr, /* begin varargs */ + seg, + mp->fc_free, + binfo->fc_mbox.q_cnt, + (ulong)(mp->fc_memhi)); /* end varargs */ + FCSTATCTR.memAllocErr++; + } + + if (seg == MEM_NLP) { + /* GET nodelist */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0927, /* ptr to msg structure */ + fc_mes0927, /* ptr to msg */ + fc_msgBlk0927.msgPreambleStr, /* begin varargs */ + (ulong)bp, + mp->fc_free); /* end varargs */ + } + + lpfc_mempool_unlock_enable(p_dev_ctl, iflag); + return(bp); +} /* End fc_mem_get */ + + +/**************************************************/ +/** fc_mem_put **/ +/** **/ +/** This routine will put a memory buffer back **/ +/** on the freelist. **/ +/** seg identifies which buffer pool to use. **/ +/**************************************************/ +_static_ uchar * +fc_mem_put( +FC_BRD_INFO *binfo, +uint32 seg, +uchar *bp) +{ + MEMSEG * mp; + uchar * oldbp; + fc_dev_ctl_t *p_dev_ctl; + unsigned long iflag; + /* range check on seg argument */ + if (seg >= FC_MAX_SEG) + return((uchar * )0); + + p_dev_ctl = (fc_dev_ctl_t *)binfo->fc_p_dev_ctl; + iflag = lpfc_mempool_disable_lock(p_dev_ctl); + mp = &binfo->fc_memseg[seg]; + + if (bp) { + + if(((ulong)bp > (ulong)(mp->fc_memhi)) || ((ulong)bp < (ulong)(mp->fc_memlo))) { + /* Memory Buffer Pool is corrupted */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0408, /* ptr to msg structure */ + fc_mes0408, /* ptr to msg */ + fc_msgBlk0408.msgPreambleStr, /* begin varargs */ + seg, + (ulong)bp, + (ulong)mp->fc_memhi, + (ulong)mp->fc_memlo); /* end varargs */ + lpfc_mempool_unlock_enable(p_dev_ctl, iflag); + return((uchar * )0); + } + /* If a memory block exists, put it on freelist + * and return it to the user. + */ + oldbp = mp->fc_memptr; + mp->fc_memptr = bp; + *((uchar * *)bp) = oldbp; + if(oldbp == 0) + mp->fc_endmemptr = bp; + mp->fc_free++; + } + + if (seg == MEM_NLP) { + /* PUT nodelist */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0928, /* ptr to msg structure */ + fc_mes0928, /* ptr to msg */ + fc_msgBlk0928.msgPreambleStr, /* begin varargs */ + (ulong)bp, + mp->fc_free); /* end varargs */ + } + + lpfc_mempool_unlock_enable(p_dev_ctl, iflag); + return(bp); +} /* End fc_mem_put */ + + +/* Look up the virtual address given a mapped address */ +_static_ MATCHMAP * +fc_getvaddr( +fc_dev_ctl_t *p_dev_ctl, +RING *rp, +uchar *mapbp) +{ + FC_BRD_INFO * binfo; + + binfo = &BINFO; + /* While there are available slots in the list */ + if (rp->fc_ringno == FC_ELS_RING) { + MATCHMAP * mp; + MATCHMAP * mpoff; + + mpoff = (MATCHMAP * )rp->fc_mpoff; + mp = 0; + + while (mpoff) { + /* Check for a match */ + if (mpoff->phys == mapbp) { + /* If we matched on the first slot */ + if (mp == 0) { + rp->fc_mpoff = mpoff->fc_mptr; + } else { + mp->fc_mptr = mpoff->fc_mptr; + } + + if (rp->fc_mpon == (uchar * )mpoff) { + rp->fc_mpon = (uchar * )mp; + } + rp->fc_bufcnt--; + mpoff->fc_mptr = 0; + + fc_mpdata_sync(mpoff->dma_handle, 0, 0, DDI_DMA_SYNC_FORKERNEL); + + /* Return entry */ + return(mpoff); + } + mp = mpoff; + mpoff = (MATCHMAP * )mpoff->fc_mptr; + } + } + + if (rp->fc_ringno == FC_IP_RING) { + fcipbuf_t * mb; + fcipbuf_t * mboff; + + mboff = (fcipbuf_t * )rp->fc_mpoff; + mb = 0; + + while (mboff) { + /* Check for a match */ + if (fcnextpkt(mboff) == (fcipbuf_t * )mapbp) { + /* If we matched on the first slot */ + if (mb == 0) { + rp->fc_mpoff = (uchar * )fcnextdata(mboff); + } else { + fcnextdata(mb) = fcnextdata(mboff); + } + + if (rp->fc_mpon == (uchar * )mboff) { + rp->fc_mpon = (uchar * )mb; + } + rp->fc_bufcnt--; + + if (fcnextpkt(mboff)) { + MBUF_INFO * buf_info; + MBUF_INFO bufinfo; + + buf_info = &bufinfo; + buf_info->dma_handle = (ulong * )fcgethandle(mboff); + if (buf_info->dma_handle) { + fc_mpdata_sync(buf_info->dma_handle, 0, 0, + DDI_DMA_SYNC_FORKERNEL); + } + buf_info->virt = 0; + buf_info->flags = (FC_MBUF_PHYSONLY | FC_MBUF_DMA); + buf_info->phys = (uint32 * )fcnextpkt(mboff); + buf_info->size = fcPAGESIZE; + fc_free(p_dev_ctl, buf_info); + } + + fcsethandle(mboff, 0); + fcnextpkt(mboff) = 0; + fcnextdata(mboff) = 0; + /* Return entry */ + return((MATCHMAP * )mboff); + } + mb = mboff; + mboff = (fcipbuf_t * )fcnextdata(mboff); + } + } + /* Cannot find virtual addr for mapped buf on ring (num) */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0410, /* ptr to msg structure */ + fc_mes0410, /* ptr to msg */ + fc_msgBlk0410.msgPreambleStr, /* begin varargs */ + rp->fc_ringno, + (ulong)mapbp, + (ulong)rp->fc_mpoff, + (ulong)rp->fc_mpon); /* end varargs */ + FCSTATCTR.noVirtPtr++; + return(0); +} /* End fc_getvaddr */ + + +/* Given a virtual address, bp, generate the physical mapped address + * and place it where addr points to. Save the address pair for lookup later. + */ +_static_ void +fc_mapvaddr( +FC_BRD_INFO *binfo, +RING *rp, +MATCHMAP *mp, +uint32 *haddr, +uint32 *laddr) +{ + fcipbuf_t * mbuf; + + switch (rp->fc_ringno) { + case FC_ELS_RING: + mp->fc_mptr = 0; + /* Update slot fc_mpon points to then bump it */ + if (rp->fc_mpoff == 0) { + rp->fc_mpoff = (uchar * )mp; + rp->fc_mpon = (uchar * )mp; + } else { + ((MATCHMAP * )(rp->fc_mpon))->fc_mptr = (uchar * )mp; + rp->fc_mpon = (uchar * )mp; + } + if (binfo->fc_flag & FC_SLI2) { + *haddr = (uint32)putPaddrHigh(mp->phys); /* return mapped address */ + *laddr = (uint32)putPaddrLow(mp->phys); /* return mapped address */ + } else { + *laddr = (uint32)putPaddrLow(mp->phys); /* return mapped address */ + } + break; + + case FC_IP_RING: + mbuf = (fcipbuf_t * )mp; + fcnextdata(mbuf) = 0; + /* Update slot fc_mpon points to then bump it */ + if (rp->fc_mpoff == 0) { + rp->fc_mpoff = (uchar * )mbuf; + rp->fc_mpon = (uchar * )mbuf; + } else { + fcnextdata((fcipbuf_t * )(rp->fc_mpon)) = mbuf; + rp->fc_mpon = (uchar * )mbuf; + } + if (binfo->fc_flag & FC_SLI2) { + *haddr = (uint32)putPaddrHigh(fcnextpkt(mbuf)); + *laddr = (uint32)putPaddrLow(fcnextpkt(mbuf)); + } else { + *laddr = (uint32)putPaddrLow(fcnextpkt(mbuf)); + } + break; + } + + rp->fc_bufcnt++; + return; +} /* End fc_mapvaddr */ + + + diff -purN -X /home/mbligh/.diff.exclude reference/drivers/scsi/lpfc/fcmsg.h current/drivers/scsi/lpfc/fcmsg.h --- reference/drivers/scsi/lpfc/fcmsg.h 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/scsi/lpfc/fcmsg.h 2004-04-09 11:53:03.000000000 -0700 @@ -0,0 +1,1082 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Enterprise Fibre Channel Host Bus Adapters. * + * Refer to the README file included with this package for * + * driver version and adapter support. * + * Copyright (C) 2003 Emulex Corporation. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of the GNU General Public License * + * as published by the Free Software Foundation; either version 2 * + * of the License, or (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details, a copy of which * + * can be found in the file COPYING included with this package. * + *******************************************************************/ + +#ifndef _H_FCMSG +#define _H_FCMSG + +/* + * LOG Message Group Numbering Sequence + * + * Message Group Preamble From To + * String + * + * ELS ELx 100 199 + * DISCOVERY DIx 200 299 + * MBOX MBx 300 399 + * INIT INx 400 499 + * Unused 500 599 + * IP IPx 600 699 + * FCP FPx 700 799 + * Unused 800 899 + * NODE NDx 900 999 + * MISC MIx 1200 1299 + * LINK LKx 1300 1399 + * SLI SLx 1400 1499 + * CHK_CONDITION CKx 1500 1599 + */ + +/* + * Log Message Structure + * + * The following structure supports LOG messages only. + * Every LOG message is associated to a msgBlkLogDef structure of the + * following type. + */ + +typedef struct msgLogType { + int msgNum; /* Message number */ + char * msgStr; /* Ptr to log message */ + char * msgPreambleStr; /* Ptr to log message preamble */ + int msgOutput; /* Message output target - bitmap */ + /* + * This member controls message OUTPUT. + * + * The phase 'global controls' refers to user configurable parameters + * such as LOG_VERBOSE that control message output on a global basis. + */ + +#define FC_MSG_OPUT_GLOB_CTRL 0x0 /* Use global control */ +#define FC_MSG_OPUT_DISA 0x1 /* Override global control */ +#define FC_MSG_OPUT_FORCE 0x2 /* Override global control */ + int msgType; /* Message LOG type - bitmap */ +#define FC_LOG_MSG_TYPE_INFO 0x1 /* Maskable */ +#define FC_LOG_MSG_TYPE_WARN 0x2 /* Non-Maskable */ +#define FC_LOG_MSG_TYPE_ERR_CFG 0x4 /* Non-Maskable */ +#define FC_LOG_MSG_TYPE_ERR 0x8 /* Non-Maskable */ +#define FC_LOG_MSG_TYPE_PANIC 0x10 /* Non-Maskable */ + int msgMask; /* Message LOG mask - bitmap */ + /* + * NOTE: Only LOG messages of types MSG_TYPE_WARN & MSG_TYPE_INFO are + * maskable at the GLOBAL level. + * + * Any LOG message regardless of message type can be disabled (override verbose) + * at the msgBlkLogDef struct level my setting member msgOutput = FC_MSG_OPUT_DISA. + * The message will never be displayed regardless of verbose mask. + * + * Any LOG message regardless of message type can be enable (override verbose) + * at the msgBlkLogDef struct level my setting member msgOutput = FC_MSG_OPUT_FORCE. + * The message will always be displayed regardless of verbose mask. + */ +#define LOG_ELS 0x1 /* ELS events */ +#define LOG_DISCOVERY 0x2 /* Link discovery events */ +#define LOG_MBOX 0x4 /* Mailbox events */ +#define LOG_INIT 0x8 /* Initialization events */ +#define LOG_LINK_EVENT 0x10 /* Link events */ +#define LOG_IP 0x20 /* IP traffic history */ +#define LOG_FCP 0x40 /* FCP traffic history */ +#define LOG_NODE 0x80 /* Node table events */ +#define LOG_MISC 0x400 /* Miscellaneous events */ +#define LOG_SLI 0x800 /* SLI events */ +#define LOG_CHK_COND 0x1000 /* FCP Check condition flag */ +#define LOG_ALL_MSG 0x1fff /* LOG all messages */ + unsigned int msgLogID; /* Message LOG ID */ +#define ERRID_LOG_TIMEOUT 0xfdefefa7 /* Fibre Channel timeout */ +#define ERRID_LOG_HDW_ERR 0x1ae4fffc /* Fibre Channel hardware failure */ +#define ERRID_LOG_UNEXPECT_EVENT 0xbdb7e728 /* Fibre Channel unexpected event */ +#define ERRID_LOG_INIT 0xbe1043b8 /* Fibre Channel init failure */ +#define ERRID_LOG_NO_RESOURCE 0x474c1775 /* Fibre Channel no resources */ + } msgLogDef; + +/* + * External Declarations for LOG Messages + */ + +/* ELS LOG Messages */ +extern char fc_mes0100[]; +extern char fc_mes0101[]; +extern char fc_mes0102[]; +extern char fc_mes0103[]; +extern char fc_mes0104[]; +extern char fc_mes0105[]; +extern char fc_mes0106[]; +extern char fc_mes0107[]; +extern char fc_mes0108[]; +extern char fc_mes0109[]; +extern char fc_mes0110[]; +extern char fc_mes0111[]; +extern char fc_mes0112[]; +extern char fc_mes0113[]; +extern char fc_mes0114[]; +extern char fc_mes0115[]; +extern char fc_mes0116[]; +extern char fc_mes0117[]; +extern char fc_mes0118[]; +extern char fc_mes0119[]; +extern char fc_mes0120[]; +extern char fc_mes0121[]; +extern char fc_mes0122[]; +extern char fc_mes0123[]; +extern char fc_mes0124[]; +extern char fc_mes0125[]; +extern char fc_mes0126[]; +extern char fc_mes0127[]; +extern char fc_mes0128[]; +extern char fc_mes0129[]; +extern char fc_mes0130[]; +extern char fc_mes0131[]; +extern char fc_mes0132[]; +extern char fc_mes0133[]; +extern char fc_mes0134[]; +extern char fc_mes0135[]; +extern char fc_mes0136[]; + +/* DISCOVERY LOG Messages */ +extern char fc_mes0200[]; +extern char fc_mes0201[]; +extern char fc_mes0202[]; +extern char fc_mes0203[]; +extern char fc_mes0204[]; +extern char fc_mes0205[]; +extern char fc_mes0206[]; +extern char fc_mes0207[]; +extern char fc_mes0208[]; +extern char fc_mes0209[]; +extern char fc_mes0210[]; +extern char fc_mes0211[]; +extern char fc_mes0212[]; +extern char fc_mes0213[]; +extern char fc_mes0214[]; +extern char fc_mes0215[]; +extern char fc_mes0216[]; +extern char fc_mes0217[]; +extern char fc_mes0218[]; +extern char fc_mes0219[]; +extern char fc_mes0220[]; +extern char fc_mes0221[]; +extern char fc_mes0222[]; +extern char fc_mes0223[]; +extern char fc_mes0224[]; +extern char fc_mes0225[]; +extern char fc_mes0226[]; +extern char fc_mes0227[]; +extern char fc_mes0228[]; +extern char fc_mes0229[]; +extern char fc_mes0230[]; +extern char fc_mes0231[]; +extern char fc_mes0232[]; +extern char fc_mes0233[]; +extern char fc_mes0234[]; +extern char fc_mes0235[]; +extern char fc_mes0236[]; +extern char fc_mes0237[]; +extern char fc_mes0238[]; +extern char fc_mes0239[]; +extern char fc_mes0240[]; +extern char fc_mes0241[]; +extern char fc_mes0242[]; +extern char fc_mes0243[]; +extern char fc_mes0244[]; +extern char fc_mes0245[]; +extern char fc_mes0246[]; +extern char fc_mes0247[]; +extern char fc_mes0248[]; +extern char fc_mes0249[]; +extern char fc_mes0250[]; +extern char fc_mes0251[]; +extern char fc_mes0252[]; + +/* MAILBOX LOG Messages */ +extern char fc_mes0300[]; +extern char fc_mes0301[]; +extern char fc_mes0302[]; +extern char fc_mes0303[]; +extern char fc_mes0304[]; +extern char fc_mes0305[]; +extern char fc_mes0306[]; +extern char fc_mes0307[]; +extern char fc_mes0308[]; +extern char fc_mes0309[]; +extern char fc_mes0310[]; +extern char fc_mes0311[]; +extern char fc_mes0312[]; + +/* INIT LOG Messages */ +extern char fc_mes0400[]; +extern char fc_mes0401[]; +extern char fc_mes0402[]; +extern char fc_mes0403[]; +extern char fc_mes0404[]; +extern char fc_mes0405[]; +extern char fc_mes0406[]; +extern char fc_mes0407[]; +extern char fc_mes0408[]; +extern char fc_mes0409[]; +extern char fc_mes0410[]; +extern char fc_mes0411[]; +extern char fc_mes0412[]; +extern char fc_mes0413[]; +extern char fc_mes0414[]; +extern char fc_mes0415[]; +extern char fc_mes0416[]; +extern char fc_mes0417[]; +extern char fc_mes0418[]; +extern char fc_mes0419[]; +extern char fc_mes0420[]; +extern char fc_mes0421[]; +extern char fc_mes0422[]; +extern char fc_mes0423[]; +extern char fc_mes0424[]; +extern char fc_mes0425[]; +extern char fc_mes0426[]; +extern char fc_mes0427[]; +extern char fc_mes0428[]; +extern char fc_mes0429[]; +extern char fc_mes0430[]; +extern char fc_mes0431[]; +extern char fc_mes0432[]; +extern char fc_mes0433[]; +extern char fc_mes0434[]; +extern char fc_mes0435[]; +extern char fc_mes0436[]; +extern char fc_mes0437[]; +extern char fc_mes0438[]; +extern char fc_mes0439[]; +extern char fc_mes0440[]; +extern char fc_mes0440[]; +extern char fc_mes0441[]; +extern char fc_mes0442[]; +extern char fc_mes0443[]; +extern char fc_mes0444[]; +extern char fc_mes0445[]; +extern char fc_mes0446[]; +extern char fc_mes0447[]; +extern char fc_mes0448[]; +extern char fc_mes0449[]; +extern char fc_mes0450[]; +extern char fc_mes0451[]; +extern char fc_mes0452[]; +extern char fc_mes0453[]; +extern char fc_mes0454[]; +extern char fc_mes0455[]; +extern char fc_mes0456[]; +extern char fc_mes0457[]; +extern char fc_mes0458[]; +extern char fc_mes0459[]; +extern char fc_mes0460[]; +extern char fc_mes0461[]; + +/* UNUSED */ +/* +extern char fc_mes0500[]; +*/ + +/* IP LOG Messages */ +extern char fc_mes0600[]; +extern char fc_mes0601[]; +extern char fc_mes0602[]; +extern char fc_mes0603[]; +extern char fc_mes0604[]; +extern char fc_mes0605[]; +extern char fc_mes0606[]; +extern char fc_mes0607[]; +extern char fc_mes0608[]; + +/* FCP LOG Messages */ +extern char fc_mes0700[]; +extern char fc_mes0701[]; +extern char fc_mes0702[]; +extern char fc_mes0703[]; +extern char fc_mes0704[]; +extern char fc_mes0705[]; +extern char fc_mes0706[]; +extern char fc_mes0707[]; +extern char fc_mes0708[]; +extern char fc_mes0709[]; +extern char fc_mes0710[]; +extern char fc_mes0711[]; +extern char fc_mes0712[]; +extern char fc_mes0713[]; +extern char fc_mes0714[]; +extern char fc_mes0715[]; +extern char fc_mes0716[]; +extern char fc_mes0717[]; +extern char fc_mes0718[]; +extern char fc_mes0719[]; +extern char fc_mes0720[]; +extern char fc_mes0721[]; +extern char fc_mes0722[]; +extern char fc_mes0723[]; +extern char fc_mes0724[]; +extern char fc_mes0725[]; +extern char fc_mes0726[]; +extern char fc_mes0727[]; +extern char fc_mes0728[]; +extern char fc_mes0729[]; +extern char fc_mes0730[]; +extern char fc_mes0731[]; +extern char fc_mes0732[]; +extern char fc_mes0733[]; +extern char fc_mes0734[]; +extern char fc_mes0735[]; +extern char fc_mes0736[]; +extern char fc_mes0737[]; +extern char fc_mes0738[]; +extern char fc_mes0739[]; +extern char fc_mes0740[]; +extern char fc_mes0741[]; +extern char fc_mes0742[]; +extern char fc_mes0743[]; +extern char fc_mes0744[]; +extern char fc_mes0745[]; +extern char fc_mes0746[]; +extern char fc_mes0747[]; +extern char fc_mes0748[]; +extern char fc_mes0749[]; +extern char fc_mes0750[]; +extern char fc_mes0751[]; +extern char fc_mes0752[]; +extern char fc_mes0753[]; +extern char fc_mes0754[]; +extern char fc_mes0756[]; + +/* UNUSED */ +/* +extern char fc_mes0800[]; +*/ + +/* NODE LOG Messages */ +extern char fc_mes0900[]; +extern char fc_mes0901[]; +extern char fc_mes0902[]; +extern char fc_mes0903[]; +extern char fc_mes0904[]; +extern char fc_mes0905[]; +extern char fc_mes0906[]; +extern char fc_mes0907[]; +extern char fc_mes0908[]; +extern char fc_mes0909[]; +extern char fc_mes0910[]; +extern char fc_mes0911[]; +extern char fc_mes0912[]; +extern char fc_mes0913[]; +extern char fc_mes0914[]; +extern char fc_mes0915[]; +extern char fc_mes0916[]; +extern char fc_mes0917[]; +extern char fc_mes0918[]; +extern char fc_mes0919[]; +extern char fc_mes0920[]; +extern char fc_mes0921[]; +extern char fc_mes0922[]; +extern char fc_mes0923[]; +extern char fc_mes0924[]; +extern char fc_mes0925[]; +extern char fc_mes0926[]; +extern char fc_mes0927[]; +extern char fc_mes0928[]; + + + +/* MISC LOG messages */ +extern char fc_mes1200[]; +extern char fc_mes1201[]; +extern char fc_mes1202[]; +extern char fc_mes1203[]; +extern char fc_mes1204[]; +extern char fc_mes1205[]; +extern char fc_mes1206[]; +extern char fc_mes1207[]; +extern char fc_mes1208[]; +extern char fc_mes1209[]; +extern char fc_mes1210[]; +extern char fc_mes1211[]; +extern char fc_mes1212[]; +extern char fc_mes1213[]; + +/* LINK LOG Messages */ +extern char fc_mes1300[]; +extern char fc_mes1301[]; +extern char fc_mes1302[]; +extern char fc_mes1303[]; +extern char fc_mes1304[]; +extern char fc_mes1305[]; +extern char fc_mes1306[]; +extern char fc_mes1307[]; + +/* SLI LOG Messages */ +extern char fc_mes1400[]; +extern char fc_mes1401[]; +extern char fc_mes1402[]; + +/* CHK CONDITION LOG Messages */ +/* +extern char fc_mes1500[]; +*/ + +/* + * External Declarations for LOG Message Structure msgBlkLogDef + */ + +/* ELS LOG Message Structures */ +extern msgLogDef fc_msgBlk0100; +extern msgLogDef fc_msgBlk0101; +extern msgLogDef fc_msgBlk0102; +extern msgLogDef fc_msgBlk0103; +extern msgLogDef fc_msgBlk0104; +extern msgLogDef fc_msgBlk0105; +extern msgLogDef fc_msgBlk0106; +extern msgLogDef fc_msgBlk0107; +extern msgLogDef fc_msgBlk0108; +extern msgLogDef fc_msgBlk0109; +extern msgLogDef fc_msgBlk0110; +extern msgLogDef fc_msgBlk0111; +extern msgLogDef fc_msgBlk0112; +extern msgLogDef fc_msgBlk0113; +extern msgLogDef fc_msgBlk0114; +extern msgLogDef fc_msgBlk0115; +extern msgLogDef fc_msgBlk0116; +extern msgLogDef fc_msgBlk0117; +extern msgLogDef fc_msgBlk0118; +extern msgLogDef fc_msgBlk0119; +extern msgLogDef fc_msgBlk0120; +extern msgLogDef fc_msgBlk0121; +extern msgLogDef fc_msgBlk0122; +extern msgLogDef fc_msgBlk0123; +extern msgLogDef fc_msgBlk0124; +extern msgLogDef fc_msgBlk0125; +extern msgLogDef fc_msgBlk0126; +extern msgLogDef fc_msgBlk0127; +extern msgLogDef fc_msgBlk0128; +extern msgLogDef fc_msgBlk0129; +extern msgLogDef fc_msgBlk0130; +extern msgLogDef fc_msgBlk0131; +extern msgLogDef fc_msgBlk0132; +extern msgLogDef fc_msgBlk0133; +extern msgLogDef fc_msgBlk0134; +extern msgLogDef fc_msgBlk0135; +extern msgLogDef fc_msgBlk0136; + +/* DISCOVERY LOG Message Structures */ +extern msgLogDef fc_msgBlk0200; +extern msgLogDef fc_msgBlk0201; +extern msgLogDef fc_msgBlk0202; +extern msgLogDef fc_msgBlk0203; +extern msgLogDef fc_msgBlk0204; +extern msgLogDef fc_msgBlk0205; +extern msgLogDef fc_msgBlk0206; +extern msgLogDef fc_msgBlk0207; +extern msgLogDef fc_msgBlk0208; +extern msgLogDef fc_msgBlk0209; +extern msgLogDef fc_msgBlk0210; +extern msgLogDef fc_msgBlk0211; +extern msgLogDef fc_msgBlk0212; +extern msgLogDef fc_msgBlk0213; +extern msgLogDef fc_msgBlk0214; +extern msgLogDef fc_msgBlk0215; +extern msgLogDef fc_msgBlk0216; +extern msgLogDef fc_msgBlk0217; +extern msgLogDef fc_msgBlk0218; +extern msgLogDef fc_msgBlk0219; +extern msgLogDef fc_msgBlk0220; +extern msgLogDef fc_msgBlk0221; +extern msgLogDef fc_msgBlk0222; +extern msgLogDef fc_msgBlk0223; +extern msgLogDef fc_msgBlk0224; +extern msgLogDef fc_msgBlk0225; +extern msgLogDef fc_msgBlk0226; +extern msgLogDef fc_msgBlk0227; +extern msgLogDef fc_msgBlk0228; +extern msgLogDef fc_msgBlk0229; +extern msgLogDef fc_msgBlk0230; +extern msgLogDef fc_msgBlk0231; +extern msgLogDef fc_msgBlk0232; +extern msgLogDef fc_msgBlk0233; +extern msgLogDef fc_msgBlk0234; +extern msgLogDef fc_msgBlk0235; +extern msgLogDef fc_msgBlk0236; +extern msgLogDef fc_msgBlk0237; +extern msgLogDef fc_msgBlk0238; +extern msgLogDef fc_msgBlk0239; +extern msgLogDef fc_msgBlk0240; +extern msgLogDef fc_msgBlk0241; +extern msgLogDef fc_msgBlk0242; +extern msgLogDef fc_msgBlk0243; +extern msgLogDef fc_msgBlk0244; +extern msgLogDef fc_msgBlk0245; +extern msgLogDef fc_msgBlk0246; +extern msgLogDef fc_msgBlk0247; +extern msgLogDef fc_msgBlk0248; +extern msgLogDef fc_msgBlk0249; +extern msgLogDef fc_msgBlk0250; +extern msgLogDef fc_msgBlk0251; +extern msgLogDef fc_msgBlk0252; + +/* MAILBOX LOG Message Structures */ +extern msgLogDef fc_msgBlk0300; +extern msgLogDef fc_msgBlk0301; +extern msgLogDef fc_msgBlk0302; +extern msgLogDef fc_msgBlk0303; +extern msgLogDef fc_msgBlk0304; +extern msgLogDef fc_msgBlk0305; +extern msgLogDef fc_msgBlk0306; +extern msgLogDef fc_msgBlk0307; +extern msgLogDef fc_msgBlk0308; +extern msgLogDef fc_msgBlk0309; +extern msgLogDef fc_msgBlk0310; +extern msgLogDef fc_msgBlk0311; +extern msgLogDef fc_msgBlk0312; + +/* INIT LOG Message Structures */ +extern msgLogDef fc_msgBlk0400; +extern msgLogDef fc_msgBlk0401; +extern msgLogDef fc_msgBlk0402; +extern msgLogDef fc_msgBlk0403; +extern msgLogDef fc_msgBlk0404; +extern msgLogDef fc_msgBlk0405; +extern msgLogDef fc_msgBlk0406; +extern msgLogDef fc_msgBlk0407; +extern msgLogDef fc_msgBlk0408; +extern msgLogDef fc_msgBlk0409; +extern msgLogDef fc_msgBlk0410; +extern msgLogDef fc_msgBlk0411; +extern msgLogDef fc_msgBlk0412; +extern msgLogDef fc_msgBlk0413; +extern msgLogDef fc_msgBlk0414; +extern msgLogDef fc_msgBlk0415; +extern msgLogDef fc_msgBlk0416; +extern msgLogDef fc_msgBlk0417; +extern msgLogDef fc_msgBlk0418; +extern msgLogDef fc_msgBlk0419; +extern msgLogDef fc_msgBlk0420; +extern msgLogDef fc_msgBlk0421; +extern msgLogDef fc_msgBlk0422; +extern msgLogDef fc_msgBlk0423; +extern msgLogDef fc_msgBlk0424; +extern msgLogDef fc_msgBlk0425; +extern msgLogDef fc_msgBlk0426; +extern msgLogDef fc_msgBlk0427; +extern msgLogDef fc_msgBlk0428; +extern msgLogDef fc_msgBlk0429; +extern msgLogDef fc_msgBlk0430; +extern msgLogDef fc_msgBlk0431; +extern msgLogDef fc_msgBlk0432; +extern msgLogDef fc_msgBlk0433; +extern msgLogDef fc_msgBlk0434; +extern msgLogDef fc_msgBlk0435; +extern msgLogDef fc_msgBlk0436; +extern msgLogDef fc_msgBlk0437; +extern msgLogDef fc_msgBlk0438; +extern msgLogDef fc_msgBlk0439; +extern msgLogDef fc_msgBlk0440; +extern msgLogDef fc_msgBlk0441; +extern msgLogDef fc_msgBlk0442; +extern msgLogDef fc_msgBlk0443; +extern msgLogDef fc_msgBlk0444; +extern msgLogDef fc_msgBlk0445; +extern msgLogDef fc_msgBlk0446; +extern msgLogDef fc_msgBlk0447; +extern msgLogDef fc_msgBlk0448; +extern msgLogDef fc_msgBlk0449; +extern msgLogDef fc_msgBlk0450; +extern msgLogDef fc_msgBlk0451; +extern msgLogDef fc_msgBlk0452; +extern msgLogDef fc_msgBlk0453; +extern msgLogDef fc_msgBlk0454; +extern msgLogDef fc_msgBlk0455; +extern msgLogDef fc_msgBlk0456; +extern msgLogDef fc_msgBlk0457; +extern msgLogDef fc_msgBlk0458; +extern msgLogDef fc_msgBlk0459; +extern msgLogDef fc_msgBlk0460; +extern msgLogDef fc_msgBlk0461; + +/* UNUSED */ +/* +extern msgLogDef fc_msgBlk0500; +*/ + +/* IP LOG Message Structures */ +extern msgLogDef fc_msgBlk0600; +extern msgLogDef fc_msgBlk0601; +extern msgLogDef fc_msgBlk0602; +extern msgLogDef fc_msgBlk0603; +extern msgLogDef fc_msgBlk0604; +extern msgLogDef fc_msgBlk0605; +extern msgLogDef fc_msgBlk0606; +extern msgLogDef fc_msgBlk0607; +extern msgLogDef fc_msgBlk0608; + +/* FCP LOG Message Structures */ +extern msgLogDef fc_msgBlk0700; +extern msgLogDef fc_msgBlk0701; +extern msgLogDef fc_msgBlk0702; +extern msgLogDef fc_msgBlk0703; +extern msgLogDef fc_msgBlk0704; +extern msgLogDef fc_msgBlk0705; +extern msgLogDef fc_msgBlk0706; +extern msgLogDef fc_msgBlk0707; +extern msgLogDef fc_msgBlk0708; +extern msgLogDef fc_msgBlk0709; +extern msgLogDef fc_msgBlk0710; +extern msgLogDef fc_msgBlk0711; +extern msgLogDef fc_msgBlk0712; +extern msgLogDef fc_msgBlk0713; +extern msgLogDef fc_msgBlk0714; +extern msgLogDef fc_msgBlk0715; +extern msgLogDef fc_msgBlk0716; +extern msgLogDef fc_msgBlk0717; +extern msgLogDef fc_msgBlk0718; +extern msgLogDef fc_msgBlk0719; +extern msgLogDef fc_msgBlk0720; +extern msgLogDef fc_msgBlk0721; +extern msgLogDef fc_msgBlk0722; +extern msgLogDef fc_msgBlk0723; +extern msgLogDef fc_msgBlk0724; +extern msgLogDef fc_msgBlk0725; +extern msgLogDef fc_msgBlk0726; +extern msgLogDef fc_msgBlk0727; +extern msgLogDef fc_msgBlk0728; +extern msgLogDef fc_msgBlk0729; +extern msgLogDef fc_msgBlk0730; +extern msgLogDef fc_msgBlk0731; +extern msgLogDef fc_msgBlk0732; +extern msgLogDef fc_msgBlk0733; +extern msgLogDef fc_msgBlk0734; +extern msgLogDef fc_msgBlk0735; +extern msgLogDef fc_msgBlk0736; +extern msgLogDef fc_msgBlk0737; +extern msgLogDef fc_msgBlk0738; +extern msgLogDef fc_msgBlk0739; +extern msgLogDef fc_msgBlk0740; +extern msgLogDef fc_msgBlk0741; +extern msgLogDef fc_msgBlk0742; +extern msgLogDef fc_msgBlk0743; +extern msgLogDef fc_msgBlk0744; +extern msgLogDef fc_msgBlk0745; +extern msgLogDef fc_msgBlk0746; +extern msgLogDef fc_msgBlk0747; +extern msgLogDef fc_msgBlk0748; +extern msgLogDef fc_msgBlk0749; +extern msgLogDef fc_msgBlk0750; +extern msgLogDef fc_msgBlk0751; +extern msgLogDef fc_msgBlk0752; +extern msgLogDef fc_msgBlk0753; +extern msgLogDef fc_msgBlk0754; +extern msgLogDef fc_msgBlk0756; + +/* UNUSED */ +/* +extern msgLogDef fc_msgBlk0800; +*/ + +/* NODE LOG Message Structures */ +extern msgLogDef fc_msgBlk0900; +extern msgLogDef fc_msgBlk0901; +extern msgLogDef fc_msgBlk0902; +extern msgLogDef fc_msgBlk0903; +extern msgLogDef fc_msgBlk0904; +extern msgLogDef fc_msgBlk0905; +extern msgLogDef fc_msgBlk0906; +extern msgLogDef fc_msgBlk0907; +extern msgLogDef fc_msgBlk0908; +extern msgLogDef fc_msgBlk0909; +extern msgLogDef fc_msgBlk0910; +extern msgLogDef fc_msgBlk0911; +extern msgLogDef fc_msgBlk0912; +extern msgLogDef fc_msgBlk0913; +extern msgLogDef fc_msgBlk0914; +extern msgLogDef fc_msgBlk0915; +extern msgLogDef fc_msgBlk0916; +extern msgLogDef fc_msgBlk0917; +extern msgLogDef fc_msgBlk0918; +extern msgLogDef fc_msgBlk0919; +extern msgLogDef fc_msgBlk0920; +extern msgLogDef fc_msgBlk0921; +extern msgLogDef fc_msgBlk0922; +extern msgLogDef fc_msgBlk0923; +extern msgLogDef fc_msgBlk0924; +extern msgLogDef fc_msgBlk0925; +extern msgLogDef fc_msgBlk0926; +extern msgLogDef fc_msgBlk0927; +extern msgLogDef fc_msgBlk0928; + + + +/* MISC LOG Message Structures */ +extern msgLogDef fc_msgBlk1200; +extern msgLogDef fc_msgBlk1201; +extern msgLogDef fc_msgBlk1202; +extern msgLogDef fc_msgBlk1203; +extern msgLogDef fc_msgBlk1204; +extern msgLogDef fc_msgBlk1205; +extern msgLogDef fc_msgBlk1206; +extern msgLogDef fc_msgBlk1207; +extern msgLogDef fc_msgBlk1208; +extern msgLogDef fc_msgBlk1209; +extern msgLogDef fc_msgBlk1210; +extern msgLogDef fc_msgBlk1211; +extern msgLogDef fc_msgBlk1212; +extern msgLogDef fc_msgBlk1213; + +/* LINK LOG Message Structures */ +extern msgLogDef fc_msgBlk1300; +extern msgLogDef fc_msgBlk1301; +extern msgLogDef fc_msgBlk1302; +extern msgLogDef fc_msgBlk1303; +extern msgLogDef fc_msgBlk1304; +extern msgLogDef fc_msgBlk1305; +extern msgLogDef fc_msgBlk1306; +extern msgLogDef fc_msgBlk1307; + +/* SLI LOG Message Structures */ +extern msgLogDef fc_msgBlk1400; +extern msgLogDef fc_msgBlk1401; +extern msgLogDef fc_msgBlk1402; + +/* CHK CONDITION LOG Message Structures */ +/* +extern msgLogDef fc_msgBlk1500; +*/ + +/* + * LOG Messages Numbers + */ + +/* ELS LOG Message Numbers */ +#define FC_LOG_MSG_EL_0100 100 +#define FC_LOG_MSG_EL_0101 101 +#define FC_LOG_MSG_EL_0102 102 +#define FC_LOG_MSG_EL_0103 103 +#define FC_LOG_MSG_EL_0104 104 +#define FC_LOG_MSG_EL_0105 105 +#define FC_LOG_MSG_EL_0106 106 +#define FC_LOG_MSG_EL_0107 107 +#define FC_LOG_MSG_EL_0108 108 +#define FC_LOG_MSG_EL_0109 109 +#define FC_LOG_MSG_EL_0110 110 +#define FC_LOG_MSG_EL_0111 111 +#define FC_LOG_MSG_EL_0112 112 +#define FC_LOG_MSG_EL_0113 113 +#define FC_LOG_MSG_EL_0114 114 +#define FC_LOG_MSG_EL_0115 115 +#define FC_LOG_MSG_EL_0116 116 +#define FC_LOG_MSG_EL_0117 117 +#define FC_LOG_MSG_EL_0118 118 +#define FC_LOG_MSG_EL_0119 119 +#define FC_LOG_MSG_EL_0120 120 +#define FC_LOG_MSG_EL_0121 121 +#define FC_LOG_MSG_EL_0122 122 +#define FC_LOG_MSG_EL_0123 123 +#define FC_LOG_MSG_EL_0124 124 +#define FC_LOG_MSG_EL_0125 125 +#define FC_LOG_MSG_EL_0126 126 +#define FC_LOG_MSG_EL_0127 127 +#define FC_LOG_MSG_EL_0128 128 +#define FC_LOG_MSG_EL_0129 129 +#define FC_LOG_MSG_EL_0130 130 +#define FC_LOG_MSG_EL_0131 131 +#define FC_LOG_MSG_EL_0132 132 +#define FC_LOG_MSG_EL_0133 133 +#define FC_LOG_MSG_EL_0134 134 +#define FC_LOG_MSG_EL_0135 135 +#define FC_LOG_MSG_EL_0136 136 + +/* DISCOVERY LOG Message Numbers */ +#define FC_LOG_MSG_DI_0200 200 +#define FC_LOG_MSG_DI_0201 201 +#define FC_LOG_MSG_DI_0202 202 +#define FC_LOG_MSG_DI_0203 203 +#define FC_LOG_MSG_DI_0204 204 +#define FC_LOG_MSG_DI_0205 205 +#define FC_LOG_MSG_DI_0206 206 +#define FC_LOG_MSG_DI_0207 207 +#define FC_LOG_MSG_DI_0208 208 +#define FC_LOG_MSG_DI_0209 209 +#define FC_LOG_MSG_DI_0210 210 +#define FC_LOG_MSG_DI_0211 211 +#define FC_LOG_MSG_DI_0212 212 +#define FC_LOG_MSG_DI_0213 213 +#define FC_LOG_MSG_DI_0214 214 +#define FC_LOG_MSG_DI_0215 215 +#define FC_LOG_MSG_DI_0216 216 +#define FC_LOG_MSG_DI_0217 217 +#define FC_LOG_MSG_DI_0218 218 +#define FC_LOG_MSG_DI_0219 219 +#define FC_LOG_MSG_DI_0220 220 +#define FC_LOG_MSG_DI_0221 221 +#define FC_LOG_MSG_DI_0222 222 +#define FC_LOG_MSG_DI_0223 223 +#define FC_LOG_MSG_DI_0224 224 +#define FC_LOG_MSG_DI_0225 225 +#define FC_LOG_MSG_DI_0226 226 +#define FC_LOG_MSG_DI_0227 227 +#define FC_LOG_MSG_DI_0228 228 +#define FC_LOG_MSG_DI_0229 229 +#define FC_LOG_MSG_DI_0230 230 +#define FC_LOG_MSG_DI_0231 231 +#define FC_LOG_MSG_DI_0232 232 +#define FC_LOG_MSG_DI_0233 233 +#define FC_LOG_MSG_DI_0234 234 +#define FC_LOG_MSG_DI_0235 235 +#define FC_LOG_MSG_DI_0236 236 +#define FC_LOG_MSG_DI_0237 237 +#define FC_LOG_MSG_DI_0238 238 +#define FC_LOG_MSG_DI_0239 239 +#define FC_LOG_MSG_DI_0240 240 +#define FC_LOG_MSG_DI_0241 241 +#define FC_LOG_MSG_DI_0242 242 +#define FC_LOG_MSG_DI_0243 243 +#define FC_LOG_MSG_DI_0244 244 +#define FC_LOG_MSG_DI_0245 245 +#define FC_LOG_MSG_DI_0246 246 +#define FC_LOG_MSG_DI_0247 247 +#define FC_LOG_MSG_DI_0248 248 +#define FC_LOG_MSG_DI_0249 249 +#define FC_LOG_MSG_DI_0250 250 +#define FC_LOG_MSG_DI_0251 251 +#define FC_LOG_MSG_DI_0252 252 + +/* MAILBOX LOG Message Numbers */ +#define FC_LOG_MSG_MB_0300 300 +#define FC_LOG_MSG_MB_0301 301 +#define FC_LOG_MSG_MB_0302 302 +#define FC_LOG_MSG_MB_0303 303 +#define FC_LOG_MSG_MB_0304 304 +#define FC_LOG_MSG_MB_0305 305 +#define FC_LOG_MSG_MB_0306 306 +#define FC_LOG_MSG_MB_0307 307 +#define FC_LOG_MSG_MB_0308 308 +#define FC_LOG_MSG_MB_0309 309 +#define FC_LOG_MSG_MB_0310 310 +#define FC_LOG_MSG_MB_0311 311 +#define FC_LOG_MSG_MB_0312 312 + +/* INIT LOG Message Numbers */ +#define FC_LOG_MSG_IN_0400 400 +#define FC_LOG_MSG_IN_0401 401 +#define FC_LOG_MSG_IN_0402 402 +#define FC_LOG_MSG_IN_0403 403 +#define FC_LOG_MSG_IN_0404 404 +#define FC_LOG_MSG_IN_0405 405 +#define FC_LOG_MSG_IN_0406 406 +#define FC_LOG_MSG_IN_0407 407 +#define FC_LOG_MSG_IN_0408 408 +#define FC_LOG_MSG_IN_0409 409 +#define FC_LOG_MSG_IN_0410 410 +#define FC_LOG_MSG_IN_0411 411 +#define FC_LOG_MSG_IN_0412 412 +#define FC_LOG_MSG_IN_0413 413 +#define FC_LOG_MSG_IN_0414 414 +#define FC_LOG_MSG_IN_0415 415 +#define FC_LOG_MSG_IN_0416 416 +#define FC_LOG_MSG_IN_0417 417 +#define FC_LOG_MSG_IN_0418 418 +#define FC_LOG_MSG_IN_0419 419 +#define FC_LOG_MSG_IN_0420 420 +#define FC_LOG_MSG_IN_0421 421 +#define FC_LOG_MSG_IN_0422 422 +#define FC_LOG_MSG_IN_0423 423 +#define FC_LOG_MSG_IN_0424 424 +#define FC_LOG_MSG_IN_0425 425 +#define FC_LOG_MSG_IN_0426 426 +#define FC_LOG_MSG_IN_0427 427 +#define FC_LOG_MSG_IN_0428 428 +#define FC_LOG_MSG_IN_0429 429 +#define FC_LOG_MSG_IN_0430 430 +#define FC_LOG_MSG_IN_0431 431 +#define FC_LOG_MSG_IN_0432 432 +#define FC_LOG_MSG_IN_0433 433 +#define FC_LOG_MSG_IN_0434 434 +#define FC_LOG_MSG_IN_0435 435 +#define FC_LOG_MSG_IN_0436 436 +#define FC_LOG_MSG_IN_0437 437 +#define FC_LOG_MSG_IN_0438 438 +#define FC_LOG_MSG_IN_0439 439 +#define FC_LOG_MSG_IN_0440 440 +#define FC_LOG_MSG_IN_0441 441 +#define FC_LOG_MSG_IN_0442 442 +#define FC_LOG_MSG_IN_0443 443 +#define FC_LOG_MSG_IN_0444 444 +#define FC_LOG_MSG_IN_0445 445 +#define FC_LOG_MSG_IN_0446 446 +#define FC_LOG_MSG_IN_0447 447 +#define FC_LOG_MSG_IN_0448 448 +#define FC_LOG_MSG_IN_0449 449 +#define FC_LOG_MSG_IN_0450 450 +#define FC_LOG_MSG_IN_0451 451 +#define FC_LOG_MSG_IN_0452 452 +#define FC_LOG_MSG_IN_0453 453 +#define FC_LOG_MSG_IN_0454 454 +#define FC_LOG_MSG_IN_0455 455 +#define FC_LOG_MSG_IN_0456 456 +#define FC_LOG_MSG_IN_0457 457 +#define FC_LOG_MSG_IN_0458 458 +#define FC_LOG_MSG_IN_0459 459 +#define FC_LOG_MSG_IN_0460 460 +#define FC_LOG_MSG_IN_0461 461 + +/* UNUSED */ +/* +#define FC_LOG_MSG_IN_0500 500 +*/ + +/* IP LOG Message Numbers */ +#define FC_LOG_MSG_IP_0600 600 +#define FC_LOG_MSG_IP_0601 601 +#define FC_LOG_MSG_IP_0602 602 +#define FC_LOG_MSG_IP_0603 603 +#define FC_LOG_MSG_IP_0604 604 +#define FC_LOG_MSG_IP_0605 605 +#define FC_LOG_MSG_IP_0606 606 +#define FC_LOG_MSG_IP_0607 607 +#define FC_LOG_MSG_IP_0608 608 + +/* FCP LOG Message Numbers */ +#define FC_LOG_MSG_FP_0700 700 +#define FC_LOG_MSG_FP_0701 701 +#define FC_LOG_MSG_FP_0702 702 +#define FC_LOG_MSG_FP_0703 703 +#define FC_LOG_MSG_FP_0704 704 +#define FC_LOG_MSG_FP_0705 705 +#define FC_LOG_MSG_FP_0706 706 +#define FC_LOG_MSG_FP_0707 707 +#define FC_LOG_MSG_FP_0708 708 +#define FC_LOG_MSG_FP_0709 709 +#define FC_LOG_MSG_FP_0710 710 +#define FC_LOG_MSG_FP_0711 711 +#define FC_LOG_MSG_FP_0712 712 +#define FC_LOG_MSG_FP_0713 713 +#define FC_LOG_MSG_FP_0714 714 +#define FC_LOG_MSG_FP_0715 715 +#define FC_LOG_MSG_FP_0716 716 +#define FC_LOG_MSG_FP_0717 717 +#define FC_LOG_MSG_FP_0718 718 +#define FC_LOG_MSG_FP_0719 719 +#define FC_LOG_MSG_FP_0720 720 +#define FC_LOG_MSG_FP_0721 721 +#define FC_LOG_MSG_FP_0722 722 +#define FC_LOG_MSG_FP_0723 723 +#define FC_LOG_MSG_FP_0724 724 +#define FC_LOG_MSG_FP_0725 725 +#define FC_LOG_MSG_FP_0726 726 +#define FC_LOG_MSG_FP_0727 727 +#define FC_LOG_MSG_FP_0728 728 +#define FC_LOG_MSG_FP_0729 729 +#define FC_LOG_MSG_FP_0730 730 +#define FC_LOG_MSG_FP_0731 731 +#define FC_LOG_MSG_FP_0732 732 +#define FC_LOG_MSG_FP_0733 733 +#define FC_LOG_MSG_FP_0734 734 +#define FC_LOG_MSG_FP_0735 735 +#define FC_LOG_MSG_FP_0736 736 +#define FC_LOG_MSG_FP_0737 737 +#define FC_LOG_MSG_FP_0738 738 +#define FC_LOG_MSG_FP_0739 739 +#define FC_LOG_MSG_FP_0740 740 +#define FC_LOG_MSG_FP_0741 741 +#define FC_LOG_MSG_FP_0742 742 +#define FC_LOG_MSG_FP_0743 743 +#define FC_LOG_MSG_FP_0744 744 +#define FC_LOG_MSG_FP_0745 745 +#define FC_LOG_MSG_FP_0746 746 +#define FC_LOG_MSG_FP_0747 747 +#define FC_LOG_MSG_FP_0748 748 +#define FC_LOG_MSG_FP_0749 749 +#define FC_LOG_MSG_FP_0750 750 +#define FC_LOG_MSG_FP_0751 751 +#define FC_LOG_MSG_FP_0752 752 +#define FC_LOG_MSG_FP_0753 753 +#define FC_LOG_MSG_FP_0754 754 +#define FC_LOG_MSG_FP_0756 756 + +/* UNUSED */ +/* +#define FC_LOG_MSG_FP_0800 800 +*/ + +/* NODE LOG Message Numbers */ +#define FC_LOG_MSG_ND_0900 900 +#define FC_LOG_MSG_ND_0901 901 +#define FC_LOG_MSG_ND_0902 902 +#define FC_LOG_MSG_ND_0903 903 +#define FC_LOG_MSG_ND_0904 904 +#define FC_LOG_MSG_ND_0905 905 +#define FC_LOG_MSG_ND_0906 906 +#define FC_LOG_MSG_ND_0907 907 +#define FC_LOG_MSG_ND_0908 908 +#define FC_LOG_MSG_ND_0909 909 +#define FC_LOG_MSG_ND_0910 910 +#define FC_LOG_MSG_ND_0911 911 +#define FC_LOG_MSG_ND_0912 912 +#define FC_LOG_MSG_ND_0913 913 +#define FC_LOG_MSG_ND_0914 914 +#define FC_LOG_MSG_ND_0915 915 +#define FC_LOG_MSG_ND_0916 916 +#define FC_LOG_MSG_ND_0917 917 +#define FC_LOG_MSG_ND_0918 918 +#define FC_LOG_MSG_ND_0919 919 +#define FC_LOG_MSG_ND_0920 920 +#define FC_LOG_MSG_ND_0921 921 +#define FC_LOG_MSG_ND_0922 922 +#define FC_LOG_MSG_ND_0923 923 +#define FC_LOG_MSG_ND_0924 924 +#define FC_LOG_MSG_ND_0925 925 +#define FC_LOG_MSG_ND_0926 926 +#define FC_LOG_MSG_ND_0927 927 +#define FC_LOG_MSG_ND_0928 928 + + + +/* MISC LOG Message Numbers */ +#define FC_LOG_MSG_MI_1200 1200 +#define FC_LOG_MSG_MI_1201 1201 +#define FC_LOG_MSG_MI_1202 1202 +#define FC_LOG_MSG_MI_1203 1203 +#define FC_LOG_MSG_MI_1204 1204 +#define FC_LOG_MSG_MI_1205 1205 +#define FC_LOG_MSG_MI_1206 1206 +#define FC_LOG_MSG_MI_1207 1207 +#define FC_LOG_MSG_MI_1208 1208 +#define FC_LOG_MSG_MI_1209 1209 +#define FC_LOG_MSG_MI_1210 1210 +#define FC_LOG_MSG_MI_1211 1211 +#define FC_LOG_MSG_MI_1212 1212 +#define FC_LOG_MSG_MI_1213 1213 + +/* LINK LOG Message Numbers */ +#define FC_LOG_MSG_LK_1300 1300 +#define FC_LOG_MSG_LK_1301 1301 +#define FC_LOG_MSG_LK_1302 1302 +#define FC_LOG_MSG_LK_1303 1303 +#define FC_LOG_MSG_LK_1304 1304 +#define FC_LOG_MSG_LK_1305 1305 +#define FC_LOG_MSG_LK_1306 1306 +#define FC_LOG_MSG_LK_1307 1307 + +/* SLI LOG Message Numbers */ +#define FC_LOG_MSG_LK_1400 1400 +#define FC_LOG_MSG_LK_1401 1401 +#define FC_LOG_MSG_LK_1402 1402 + +/* CHK COMDITION LOG Message Numbers */ +/* +#define FC_LOG_MSG_LK_1500 1500 +*/ +#endif /* _H_FCMSG */ diff -purN -X /home/mbligh/.diff.exclude reference/drivers/scsi/lpfc/fcmsgcom.c current/drivers/scsi/lpfc/fcmsgcom.c --- reference/drivers/scsi/lpfc/fcmsgcom.c 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/scsi/lpfc/fcmsgcom.c 2004-04-09 11:53:03.000000000 -0700 @@ -0,0 +1,6231 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Enterprise Fibre Channel Host Bus Adapters. * + * Refer to the README file included with this package for * + * driver version and adapter support. * + * Copyright (C) 2003 Emulex Corporation. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of the GNU General Public License * + * as published by the Free Software Foundation; either version 2 * + * of the License, or (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details, a copy of which * + * can be found in the file COPYING included with this package. * + *******************************************************************/ + +/* +LOG Message Preamble Strings + +Preamble strings are displayed at the start of LOG messages. +The 3rd letter of the preamble string identifies the +message type as follows: + +i = Information fc_msgPreamble??i where i = Information +w = Warning fc_msgPreamble??w where w = Warning +c = Error config fc_msgPreamble??c where c = Error config +e = Error fc_msgPreamble??e where e = Error +p = Panic fc_msgPreamble??p where p = Panic +*/ + +/* ELS Log Message Preamble Strings - 100 */ +char fc_msgPreambleELi[] = "ELi:"; /* ELS Information */ +char fc_msgPreambleELw[] = "ELw:"; /* ELS Warning */ +char fc_msgPreambleELe[] = "ELe:"; /* ELS Error */ +char fc_msgPreambleELp[] = "ELp:"; /* ELS Panic */ + +/* DISCOVERY Log Message Preamble Strings - 200 */ +char fc_msgPreambleDIi[] = "DIi:"; /* Discovery Information */ +char fc_msgPreambleDIw[] = "DIw:"; /* Discovery Warning */ +char fc_msgPreambleDIe[] = "DIe:"; /* Discovery Error */ +char fc_msgPreambleDIp[] = "DIp:"; /* Discovery Panic */ + +/* MAIBOX Log Message Preamble Strings - 300 */ +char fc_msgPreambleMBi[] = "MBi:"; /* Mailbox Information */ +char fc_msgPreambleMBw[] = "MBw:"; /* Mailbox Warning */ +char fc_msgPreambleMBe[] = "MBe:"; /* Mailbox Error */ +char fc_msgPreambleMBp[] = "MBp:"; /* Mailbox Panic */ + +/* INIT Log Message Preamble Strings - 400, 500 */ +char fc_msgPreambleINi[] = "INi:"; /* INIT Information */ +char fc_msgPreambleINw[] = "INw:"; /* INIT Warning */ +char fc_msgPreambleINc[] = "INc:"; /* INIT Error Config*/ +char fc_msgPreambleINe[] = "INe:"; /* INIT Error */ +char fc_msgPreambleINp[] = "INp:"; /* INIT Panic */ + +/* IP Log Message Preamble Strings - 600 */ +char fc_msgPreambleIPi[] = "IPi:"; /* IP Information */ +char fc_msgPreambleIPw[] = "IPw:"; /* IP Warning */ +char fc_msgPreambleIPe[] = "IPe:"; /* IP Error */ +char fc_msgPreambleIPp[] = "IPp:"; /* IP Panic */ + +/* FCP Log Message Preamble Strings - 700, 800 */ +char fc_msgPreambleFPi[] = "FPi:"; /* FP Information */ +char fc_msgPreambleFPw[] = "FPw:"; /* FP Warning */ +char fc_msgPreambleFPe[] = "FPe:"; /* FP Error */ +char fc_msgPreambleFPp[] = "FPp:"; /* FP Panic */ + +/* NODE Log Message Preamble Strings - 900 */ +char fc_msgPreambleNDi[] = "NDi:"; /* Node Information */ +char fc_msgPreambleNDe[] = "NDe:"; /* Node Error */ +char fc_msgPreambleNDp[] = "NDp:"; /* Node Panic */ + + + +/* MISC Log Message Preamble Strings - 1200 */ +char fc_msgPreambleMIi[] = "MIi:"; /* MISC Information */ +char fc_msgPreambleMIw[] = "MIw:"; /* MISC Warning */ +char fc_msgPreambleMIc[] = "MIc:"; /* MISC Error Config */ +char fc_msgPreambleMIe[] = "MIe:"; /* MISC Error */ +char fc_msgPreambleMIp[] = "MIp:"; /* MISC Panic */ + +/* Link Log Message Preamble Strings - 1300 */ +char fc_msgPreambleLKi[] = "LKi:"; /* Link Information */ +char fc_msgPreambleLKw[] = "LKw:"; /* Link Warning */ +char fc_msgPreambleLKe[] = "LKe:"; /* Link Error */ +char fc_msgPreambleLKp[] = "Lkp:"; /* Link Panic */ + +/* SLI Log Message Preamble Strings - 1400 */ +char fc_msgPreambleSLe[] = "SLe:"; /* SLI Error */ + +/* CHECK CONDITION Log Message Preamble Strings - 1500 */ +char fc_msgPreambleCKi[] = "CKi:"; /* Check Condition Information */ +char fc_msgPreambleCKe[] = "CKe:"; /* Check Condition Error */ +char fc_msgPreambleCKp[] = "CKp:"; /* Check Condition Panic */ + + +/* + * Begin ELS LOG message structures + */ + +/* +msgName: fc_mes0100 +message: Abort delay xmit clock +descript: The driver is canceling the delay timer for sending an ELS + command. +data: (1) did (2) remoteID (3) ulpIoTag +severity: Warning +log: LOG_ELS verbose +module: fcclockb.c +action: None required +*/ +char fc_mes0100[] = "%sAbort delay xmit clock Data: x%x x%x x%x"; +msgLogDef fc_msgBlk0100 = { + FC_LOG_MSG_EL_0100, /* LOG message number */ + fc_mes0100, /* LOG message pointer */ + fc_msgPreambleELw, /* LOG message preamble pointer */ + FC_MSG_OPUT_GLOB_CTRL, /* LOG message output control */ + FC_LOG_MSG_TYPE_WARN, /* LOG message type */ + LOG_ELS, /* LOG message mask & group */ + ERRID_LOG_UNEXPECT_EVENT }; /* LOG message error ID */ + +/* +msgName: fc_mes0101 +message: Abort delay xmit context +descript: The driver is canceling the delay timer for sending an ELS + command. +data: (1) did (2) remoteID (3) ulpIoTag +severity: Warning +log: LOG_ELS verbose +module: fcclockb.c +action: None required +*/ +char fc_mes0101[] = "%sAbort delay xmit context Data: x%x x%x x%x"; +msgLogDef fc_msgBlk0101 = { + FC_LOG_MSG_EL_0101, + fc_mes0101, + fc_msgPreambleELw, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_WARN, + LOG_ELS, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0102 +message: Stray ELS completion +descript: Received an ELS command completion without issuing a + corresponding ELS Command (based on the IOTAG field + in the CMD_ELS_REQUEST_CR IOCB). +data: (1) ulpCommand (2) ulpIoTag +severity: Error +log: Always +module: fcelsb.c +action: This error could indicate a software driver or firmware + problem. If problems persist report these errors to + Technical Support. +*/ +char fc_mes0102[] = "%sStray ELS completion Data: x%x x%x"; +msgLogDef fc_msgBlk0102 = { + FC_LOG_MSG_EL_0102, + fc_mes0102, + fc_msgPreambleELe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_ELS, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0103 +message: Dropping ELS rsp +descript: Dropping ELS response because there is no node table entry. +data: (1) ldata (2) ldid +severity: Error +log: Always +module: fcscsib.c +action: This error could indicate a software driver or firmware + problem. If problems persist report these errors to + Technical Support. +*/ +char fc_mes0103[] = "%sDropping ELS rsp Data: x%x x%x"; +msgLogDef fc_msgBlk0103 = { + FC_LOG_MSG_EL_0103, + fc_mes0103, + fc_msgPreambleELe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_ELS, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0104 +message: Aborted ELS IOCB +descript: Driver decided to abort any action taken as a result of this ELS + command completing. +data: (1) ulpCommand (2) ulpIoTag +severity: Information +log: LOG_ELS verbose +module: fcelsb.c +action: None required. +*/ +char fc_mes0104[] = "%sAborted ELS IOCB Data: x%x x%x"; +msgLogDef fc_msgBlk0104 = { + FC_LOG_MSG_EL_0104, + fc_mes0104, + fc_msgPreambleELi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_ELS, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0105 +message: ELS completion +descript: Adapter has notified the driver of ELS command completion. +data: (1) ulpCommand (2) ulpIoTag (3) ulpStatus (4) ulpWord[4] +severity: Information +log: LOG_ELS verbose +module: fcelsb.c +action: None required. +*/ +char fc_mes0105[] = "%sELS completion Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0105 = { + FC_LOG_MSG_EL_0105, + fc_mes0105, + fc_msgPreambleELi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_ELS, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0106 +message: Unknown ELS command +descript: Received an unknown ELS command completion. +data: None +severity: Error +log: Always +module: fcelsb.c +action: This error could indicate a software driver or firmware + problem. If problems persist report these errors to + Technical Support. +*/ +char fc_mes0106[] = "%sUnknown ELS command x%x"; +msgLogDef fc_msgBlk0106 = { + FC_LOG_MSG_EL_0106, + fc_mes0106, + fc_msgPreambleELe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_ELS, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0107 +message: ELS command completion error +descript: A driver initiated ELS command completed with an error status. +data: (1) ulpCommand (2) ulpStatus (3) ulpWord[4] (4) ulpWord[5] +severity: Information +log: LOG_ELS verbose +module: fcelsb.c +action: None required +*/ +char fc_mes0107[] = "%sELS command completion error Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0107 = { + FC_LOG_MSG_EL_0107, + fc_mes0107, + fc_msgPreambleELi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_ELS, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0108 +message: ELS response completion error +descript: An ELS response sent in response to a received ELS command + completed with an error status. +data: (1) ulpCommand (2) ulpStatus (3) ulpWord[4] (4) ulpWord[5] +severity: Information +log: LOG_ELS verbose +module: fcelsb.c +action: None required. +*/ +char fc_mes0108[] = "%sELS response completion error Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0108 = { + FC_LOG_MSG_EL_0108, + fc_mes0108, + fc_msgPreambleELi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_ELS, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0109 +message: ELS response completion +descript: An ELS response sent in response to a received ELS command + completed successfully. +data: (1) nlp_DID (2) nlp_type (3) nlp_flag (4) nlp_state +severity: Information +log: LOG_ELS verbose +module: fcelsb.c +action: None required +*/ +char fc_mes0109[] = "%sELS response completion Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0109 = { + FC_LOG_MSG_EL_0109, + fc_mes0109, + fc_msgPreambleELi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_ELS, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0110 +message: ELS command completion error +descript: Adapter has notified the driver of ELS command completion. +data: (1) command (2) ulpStatus (3) ulpWord[4] (4) ulpWord[5] +severity: Information +log: LOG_ELS verbose +module: fcelsb.c +action: None required +*/ +char fc_mes0110[] = "%sELS command completion error Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0110 = { + FC_LOG_MSG_EL_0110, + fc_mes0110, + fc_msgPreambleELi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_ELS, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0111 +message: Unknown ELS command +descript: Received an unknown ELS command completion. +data: None +severity: Error +log: Always +module: fcelsb.c +action: This error could indicate a software driver or firmware + problem. If problems persist report these errors to + Technical Support. +*/ +char fc_mes0111[] = "%sUnknown ELS command x%x"; +msgLogDef fc_msgBlk0111 = { + FC_LOG_MSG_EL_0111, + fc_mes0111, + fc_msgPreambleELe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_ELS, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0112 +message: PLOGI completes successfully +descript: A PLOGI to a Fibre Channel NPORT completed successfully +data: (1) remoteID (2) ulpWord[4] (3) ulpWord[5] (4) fc_ffstate +severity: Information +log: LOG_ELS verbose +module: fcelsb.c +action: None required +*/ +char fc_mes0112[] = "%sPLOGI completes successfully Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0112 = { + FC_LOG_MSG_EL_0112, + fc_mes0112, + fc_msgPreambleELi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_ELS, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0113 +message: PRLI completes successfully +descript: A PRLI to a FCP target completed successfully. +data: (1) remoteID (2) ulpWord[4] (3) ulpWord[5] (4) fc_ffstate +severity: Information +log: LOG_ELS & LOG_DISCOVERY verbose +module: fcelsb.c +action: None required. +*/ +char fc_mes0113[] = "%sPRLI completes successfully Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0113 = { + FC_LOG_MSG_EL_0113, + fc_mes0113, + fc_msgPreambleELi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_ELS | LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0114 +message: PRLO completes successfully +descript: A PRLO to a FCP target completed successfully. +data: (1) remoteID (2) ulpWord[4] (3) ulpWord[5] (4) fc_ffstate +severity: Information +severity: Information +log: LOG_ELS ELS & LOG_DISCOVERY verbose +module: fcelsb.c +action: None required. +*/ +char fc_mes0114[] = "%sPRLO completes successfully Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0114 = { + FC_LOG_MSG_EL_0114, + fc_mes0114, + fc_msgPreambleELi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_ELS, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0115 +message: LOGO completes successfully +descript: A LOGO to a FCP target completed successfully. +data: (1) remoteID (2) ulpWord[4] (3) ulpWord[5] (4) fc_ffstate +severity: Information +log: LOG_ELS ELS & LOG_DISCOVERY verbose +module: fcelsb.c +action: None required. +*/ +char fc_mes0115[] = "%sLOGO completes successfully Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0115 = { + FC_LOG_MSG_EL_0115, + fc_mes0115, + fc_msgPreambleELi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_ELS, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0116 +message: PDISC completes successfully +descript: A PDISC to a FCP target completed successfully. +data: (1) remoteID (2) ulpWord[4] (3) ulpWord[5] (4) fc_ffstate +severity: Information +log: LOG_ELS ELS & LOG_DISCOVERY verbose +module: fcelsb.c +action: None required. +*/ +char fc_mes0116[] = "%sPDISC completes successfully Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0116 = { + FC_LOG_MSG_EL_0116, + fc_mes0116, + fc_msgPreambleELi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_ELS, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0117 +message: ADISC completes successfully +descript: A ADISC to a FCP target completed successfully. +data: (1) remoteID (2) ulpWord[4] (3) ulpWord[5] (4) fc_ffstate +severity: Information +log: ELS or LOG_DISCOVERY verbose +module: fcelsb.c +action: None required. +*/ +char fc_mes0117[] = "%sADISC completes successfully Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0117 = { + FC_LOG_MSG_EL_0117, + fc_mes0117, + fc_msgPreambleELi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_ELS, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0118 +message: FARP completes successfully +descript: A FARP completed successfully. +data: (1) remoteID (2) ulpWord[4] (3) ulpWord[5] (4) command +severity: Information +log: LOG_ELS verbose +module: fcelsb.c +action: None required. +*/ +char fc_mes0118[] = "%sFARP completes successfully Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0118 = { + FC_LOG_MSG_EL_0118, + fc_mes0118, + fc_msgPreambleELi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_ELS, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0119 +message: SCR completes successfully +descript: A SCR completed successfully. +data: (1) remoteID (2) ulpWord[4] (3) ulpWord[5] (4) fc_ffstate +severity: Information +log: LOG_ELS verbose +module: fcelsb.c +action: None required. +*/ +char fc_mes0119[] = "%sSCR completes successfully Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0119 = { + FC_LOG_MSG_EL_0119, + fc_mes0119, + fc_msgPreambleELi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_ELS, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0120 +message: RNID completes successfully +descript: A RNID completed successfully. +data: (1) remoteID (2) ulpWord[4] (3) ulpWord[5] (4) fc_ffstate +severity: Information +log: LOG_ELS verbose +module: fcelsb.c +action: None required. +*/ +char fc_mes0120[] = "%sRNID completes successfully Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0120 = { + FC_LOG_MSG_EL_0120, + fc_mes0120, + fc_msgPreambleELi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_ELS, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0121 +message: Unknown ELS command completed +descript: Received an unknown ELS command completion. +data: None +severity: Error +log: Always +module: fcelsb.c +action: This error could indicate a software driver or firmware + problem. If problems persist report these errors to + Technical Support. +*/ +char fc_mes0121[] = "%sUnknown ELS command x%x completed"; +msgLogDef fc_msgBlk0121 = { + FC_LOG_MSG_EL_0121, + fc_mes0121, + fc_msgPreambleELe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_ELS, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0122 +message: Unknown ELS IOCB +descript: An unknown IOCB command completed in the ELS ring +data: (1) ulpCommand +severity: Error +log: Always +module: fcelsb.c +action: This error could indicate a software driver or firmware + problem. If problems persist report these errors to + Technical Support. +*/ +char fc_mes0122[] = "%sUnknown ELS IOCB Data: x%x"; +msgLogDef fc_msgBlk0122 = { + FC_LOG_MSG_EL_0122, + fc_mes0122, + fc_msgPreambleELe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_ELS, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0123 +message: Received ELS command +descript: An ELS command was received. +data: (1) ulpWord[5] (2) ulpStatus (3) fc_ffstate +severity: Information +log: LOG_ELS verbose +module: fcelsb.c +action: None required +*/ +char fc_mes0123[] = "%sReceived ELS command x%x Data: x%x x%x x%x"; +msgLogDef fc_msgBlk0123 = { + FC_LOG_MSG_EL_0123, + fc_mes0123, + fc_msgPreambleELi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_ELS, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0124 +message: An FLOGI ELS command was received from DID in Loop Mode +descript: While in Loop Mode an unknown or unsupported ELS commnad + was received. +data: None +severity: Error +log: Always +module: fcelsb.c +action: Check device DID +*/ +char fc_mes0124[] = "%sAn FLOGI ELS command x%x was received from DID x%x in Loop Mode"; +msgLogDef fc_msgBlk0124 = { + FC_LOG_MSG_EL_0124, + fc_mes0124, + fc_msgPreambleELe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_ELS, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0125 +message: Received PLOGI command +descript: A PLOGI command was received. +data: (1) nlp_DID (2) nlp_state (3) nlp_flag (4) nlp_Rpi +severity: Information +log: LOG_ELS verbose +module: fcelsb.c +action: None required +*/ +char fc_mes0125[] = "%sReceived PLOGI command Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0125 = { + FC_LOG_MSG_EL_0125, + fc_mes0125, + fc_msgPreambleELi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_ELS, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0126 +message: PLOGI chkparm OK +descript: Received a PLOGI from a remote NPORT and its Fibre Channel service + parameters match this HBA. Request can be accepted. +data: (1) nlp_DID (2) nlp_state (3) nlp_flag (4) nlp_Rpi +severity: Information +log: LOG_ELS verbose +module: fcelsb.c +action: None required +*/ +char fc_mes0126[] = "%sPLOGI chkparm OK Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0126 = { + FC_LOG_MSG_EL_0126, + fc_mes0126, + fc_msgPreambleELi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_ELS, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0127 +message: Unknown ELS command received from NPORT +descript: Received an unsupported ELS command from a remote NPORT. +data: None +severity: Error +log: Always +module: fcelsb.c +action: Check remote NPORT for potential problem. +*/ +char fc_mes0127[] = "%sUnknown ELS command x%x received from NPORT x%x"; +msgLogDef fc_msgBlk0127 = { + FC_LOG_MSG_EL_0127, + fc_mes0127, + fc_msgPreambleELe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_ELS, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0128 +message: Xmit unknown ELS command +descript: The Fibre Channel driver is attempting to send an + unsupported or unknown ELS command. +data: None +severity: Error +log: Always +module: fcelsb.c +action: This error could indicate a software driver or firmware + problem. If problems persist report these errors to + Technical Support. +*/ +char fc_mes0128[] = "%sXmit unknown ELS command x%x"; +msgLogDef fc_msgBlk0128 = { + FC_LOG_MSG_EL_0128, + fc_mes0128, + fc_msgPreambleELe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_ELS, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0129 +message: Xmit ELS command to remote NPORT +descript: Xmit ELS command to remote NPORT +data: (1) icmd->ulpIoTag (2) binfo->fc_ffstate +severity: Information +log: LOG_ELS verbose +module: fcelsb.c +action: None required +*/ +char fc_mes0129[] = "%sXmit ELS command x%x to remote NPORT x%x Data: x%x x%x"; +msgLogDef fc_msgBlk0129 = { + FC_LOG_MSG_EL_0129, + fc_mes0129, + fc_msgPreambleELi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_ELS, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0130 +message: Xmit unknown ELS response (elsCmd> +descript: The Fibre Channel driver is attempting to send an + unsupported or unknown ELS response. +data: None +severity: Error +log: Always +module: fcelsb.c +action: This error could indicate a software driver or firmware + problem. If problems persist report these errors to + Technical Support. +*/ +char fc_mes0130[] = "%sXmit unknown ELS response x%x"; +msgLogDef fc_msgBlk0130 = { + FC_LOG_MSG_EL_0130, + fc_mes0130, + fc_msgPreambleELe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_ELS, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0131 +message: Xmit ELS response to remote NPORT +descript: Xmit ELS response to remote NPORT +data: (1) icmd->ulpIoTag (2) size +severity: Information +log: LOG_ELS verbose +module: fcelsb.c +action: None required +*/ +char fc_mes0131[] = "%sXmit ELS response x%x to remote NPORT x%x Data: x%x x%x"; +msgLogDef fc_msgBlk0131 = { + FC_LOG_MSG_EL_0131, + fc_mes0131, + fc_msgPreambleELi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_ELS, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0132 +message: ELS Retry failed +descript: If an ELS command fails, it may be retried up + to 3 times. This message will be recorded if + the driver gives up retrying a specific ELS + command. +data: (1) ELS command, (2) remote PortID +severity: Information +log: LOG_ELS verbose +module: fcelsb.c +action: If the ELS command is a PRLI, and the destination + PortID is not an FCP Target, no action is required. + Otherwise, check physical connections to Fibre + Channel network and the state of the remote PortID. +*/ +char fc_mes0132[] = "%sELS Retry failed Data: x%x x%x"; +msgLogDef fc_msgBlk0132 = { + FC_LOG_MSG_EL_0132, + fc_mes0132, + fc_msgPreambleELi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_ELS, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0133 +message: Xmit CT response on exchange +descript: Xmit a CT response on the appropriate exchange. +data: (1) ulpIoTag (2) fc_ffstate +severity: Information +log: LOG_ELS verbose +module: fcelsb.c +action: None required +*/ +char fc_mes0133[] = "%sXmit CT response on exchange x%x Data: x%x x%x"; +msgLogDef fc_msgBlk0133 = { + FC_LOG_MSG_EL_0133, + fc_mes0133, + fc_msgPreambleELi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_ELS, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0134 +message: Issue GEN REQ IOCB for NPORT +descript: Issue a GEN REQ IOCB for remote NPORT. These are typically + used for CT request. +data: (1) ulpIoTag (2) fc_ffstate +severity: Information +log: LOG_ELS verbose +module: fcelsb.c +action: None required +*/ +char fc_mes0134[] = "%sIssue GEN REQ IOCB for NPORT x%x Data: x%x x%x"; +msgLogDef fc_msgBlk0134 = { + FC_LOG_MSG_EL_0134, + fc_mes0134, + fc_msgPreambleELi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_ELS, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0135 +message: Issue GEN REQ IOCB for RNID +descript: Issue a GEN REQ IOCB to support an ELS RNID command +data: (1) ulpWord[5] (2) ulpIoTag (3) fc_ffstate +severity: Information +log: LOG_ELS verbose +module: fcelsb.c +action: None required +*/ +char fc_mes0135[] = "%sIssue GEN REQ IOCB for RNID Data: x%x x%x x%x"; +msgLogDef fc_msgBlk0135 = { + FC_LOG_MSG_EL_0135, + fc_mes0135, + fc_msgPreambleELi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_ELS, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0136 +message: Delayxmit ELS command timeout +descript: The delay for issuing an ELS command has expired. The ELS + command is queued to HBA to be xmitted. +data: (1) ulpIoTag (2) retry (3) remoteID +severity: Information +log: LOG_ELS verbose +module: fcscsib.c +action: None required +*/ +char fc_mes0136[] = "%sDelayxmit ELS command x%x timeout Data: x%x x%x x%x"; +msgLogDef fc_msgBlk0136 = { + FC_LOG_MSG_EL_0136, + fc_mes0136, + fc_msgPreambleELi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_ELS, + ERRID_LOG_TIMEOUT }; + +/* + * Begin DSCOVERY LOG Message Structures + */ + +/* +msgName: fc_mes0200 +message: Device Discovery Started +descript: Device discovery / rediscovery after FLOGI or FAN has started. +data: None +severity: Information +log: LOG_DISCOVERY verbose +module: fcrpib.c +action: None required +*/ +char fc_mes0200[] = "%sDevice Discovery Started"; +msgLogDef fc_msgBlk0200 = { + FC_LOG_MSG_DI_0200, + fc_mes0200, + fc_msgPreambleDIi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0201 +message: Device Discovery completion error +descript: This indicates an uncorrectable error was encountered + during device (re)discovery after a link up. Fibre + Channel devices will not be accessible if this message + is displayed. +data: None +severity: Error +log: Always +module: fcrpib.c +action: Reboot system. If problem persists, contact Technical + Support. Run with verbose mode on for more details. +*/ +char fc_mes0201[] = "%sDevice Discovery completion error"; +msgLogDef fc_msgBlk0201 = { + FC_LOG_MSG_DI_0201, + fc_mes0201, + fc_msgPreambleDIe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0202 +message: Device Discovery Started +descript: Device discovery / rediscovery after FLOGI or FAN has started. +data: None +severity: Information +log: LOG_DISCOVERY verbose +module: fcrpib.c +action: None required +*/ +char fc_mes0202[] = "%sDevice Discovery Started"; +msgLogDef fc_msgBlk0202 = { + FC_LOG_MSG_DI_0202, + fc_mes0202, + fc_msgPreambleDIi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0203 +message: Device Discovery continues +descript: Device discovery in process +data: (1) firstndlp (2) fc_ffstate +severity: Information +log: LOG_DISCOVERY verbose +module: fcrpib.c +action: None required +*/ +char fc_mes0203[] = "%sDevice Discovery continues Data: x%x x%x"; +msgLogDef fc_msgBlk0203 = { + FC_LOG_MSG_DI_0203, + fc_mes0203, + fc_msgPreambleDIi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0204 +message: Device Discovery completion error +descript: This indicates an uncorrectable error was encountered + during device (re)discovery after a link up. Fibre + Channel devices will not be accessible if this message + is displayed. +data: None +severity: Error +log: Always +module: fcrpib.c +action: Reboot system. If problem persists, contact Technical + Support. Run with verbose mode on for more details. +*/ +char fc_mes0204[] = "%sDevice Discovery completion error"; +msgLogDef fc_msgBlk0204 = { + FC_LOG_MSG_DI_0204, + fc_mes0204, + fc_msgPreambleDIe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0205 +message: Device Discovery authentication +descript: The driver has marked NPORTs in its none table that require ADISC + for authentication. +data: (1) cnt (2) cnt1 (3) cnt2 +severity: Information +log: LOG_DISCOVERY verbose +module: fcrpib.c +action: None required +*/ +char fc_mes0205[] = "%sDevice Discovery authentication Data: x%x x%x x%x"; +msgLogDef fc_msgBlk0205 = { + FC_LOG_MSG_DI_0205, + fc_mes0205, + fc_msgPreambleDIi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0206 +message: Device Discovery completion error +descript: This indicates an uncorrectable error was encountered + during device (re)discovery after a link up. Fibre + Channel devices will not be accessible if this message + is displayed. +data: (1) ulpStatus (2) ulpWord[4] (3) ulpWord[5] +severity: Error +log: Always +module: fcelsb.c +action: Reboot system. If problem persists, contact Technical + Support. Run with verbose mode on for more details. +*/ +char fc_mes0206[] = "%sDevice Discovery completion error Data: x%x x%x x%x"; +msgLogDef fc_msgBlk0206 = { + FC_LOG_MSG_DI_0206, + fc_mes0206, + fc_msgPreambleDIe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0207 +message: Device Discovery completion error +descript: This indicates an uncorrectable error was encountered + during device (re)discovery after a link up. Fibre + Channel devices will not be accessible if this message + is displayed. +data: None +severity: Error +log: Always +module: fcelsb.c +action: Reboot system. If problem persists, contact Technical + Support. Run with verbose mode on for more details. +*/ +char fc_mes0207[] = "%sDevice Discovery completion error"; +msgLogDef fc_msgBlk0207 = { + FC_LOG_MSG_DI_0207, + fc_mes0207, + fc_msgPreambleDIe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0208 +message: FLOGI completes successfully +descript: Fabric Login completed successfully. +data: (1) ulpWord[4] (2) e_d_tov (3) r_a_tov (4) edtovResolution +severity: Information +log: LOG_DISCOVERY verbose +module: fcelsb.c +action: None required. +*/ +char fc_mes0208[] = "%sFLOGI completes successfully Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0208 = { + FC_LOG_MSG_DI_0208, + fc_mes0208, + fc_msgPreambleDIi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0209 +message: Device Discovery completes +descript: This indicates successful completion of device + (re)discovery after a link up. +data: None +severity: Information +log: LOG_DISCOVERY verbose +module: fcelsb.c +action: None required +*/ +char fc_mes0209[] = "%sDevice Discovery completes"; +msgLogDef fc_msgBlk0209 = { + FC_LOG_MSG_DI_0209, + fc_mes0209, + fc_msgPreambleDIi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0210 +message: PRLI target assigned +descript: The driver has assigned a SCSI ID to the FCP target. +data: (1) ulpWord[5] (2) nlp_pan (3) nlp_sid +severity: Information +log: LOG_DISCOVERY verbose +module: fcelsb.c +action: None required +*/ +char fc_mes0210[] = "%sPRLI target assigned Data: x%x x%x x%x"; +msgLogDef fc_msgBlk0210 = { + FC_LOG_MSG_DI_0210, + fc_mes0210, + fc_msgPreambleDIi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0211 +message: Received RSCN command +descript: The driver has received an RSCN command from the fabric. This + indicates a device was potentially added or removed from the + Fibre Channel network. +data: (1) fc_flag (2) defer_rscn.q_cnt (3) fc_rscn.q_cnt (4) fc_mbox_active +severity: Information +log: LOG_DISCOVERY verbose +module: fcelsb.c +action: None required +*/ +char fc_mes0211[] = "%sReceived RSCN command Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0211 = { + FC_LOG_MSG_DI_0211, + fc_mes0211, + fc_msgPreambleDIi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0212 +message: Device Discovery completes +descript: This indicates successful completion of device + (re)discovery after a link up. +data: None +severity: Information +log: LOG_DISCOVERY verbose +module: fcelsb.c +action: None required +*/ +char fc_mes0212[] = "%sDevice Discovery completes"; +msgLogDef fc_msgBlk0212 = { + FC_LOG_MSG_DI_0212, + fc_mes0212, + fc_msgPreambleDIi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0213 +message: FAN received +descript: A FAN ELS command was received from a Fabric. +data: (1) ulpWord[4] (2) fc_ffstate +severity: Information +log: LOG_DISCOVERY verbose +module: fcelsb.c +action: None required. +*/ +char fc_mes0213[] = "%sFAN received Data: x%x x%x"; +msgLogDef fc_msgBlk0213 = { + FC_LOG_MSG_DI_0213, + fc_mes0213, + fc_msgPreambleDIi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0214 +message: RSCN received +descript: A RSCN ELS command was received from a Fabric. +data: (1) fc_flag (2) i (3) *lp (4) fc_rscn_id_cnt +severity: Information +log: LOG_DISCOVERY verbose +module: fcelsb.c +action: None required. +*/ +char fc_mes0214[] = "%sRSCN received Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0214 = { + FC_LOG_MSG_DI_0214, + fc_mes0214, + fc_msgPreambleDIi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0215 +message: RSCN processed +descript: A RSCN ELS command was received from a Fabric and processed. +data: (1) fc_flag (2) cnt (3) fc_rscn_id_cnt (4) fc_ffstate +severity: Information +log: LOG_DISCOVERY verbose +module: fcelsb.c +action: None required. +*/ +char fc_mes0215[] = "%sRSCN processed Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0215 = { + FC_LOG_MSG_DI_0215, + fc_mes0215, + fc_msgPreambleDIi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0216 +message: Unknown Identifier in RSCN payload +descript: Typically the identifier in the RSCN payload specifies + a domain, area or a specific NportID. If neither of + these are specified, a warning will be recorded. +data: (1) didp->un.word +severity: Error +log: Always +module: fcelsb.c +action: Potential problem with Fabric. Check with Fabric vendor. +*/ +char fc_mes0216[] = "%sUnknown Identifier in RSCN payload Data: x%x"; +msgLogDef fc_msgBlk0216 = { + FC_LOG_MSG_DI_0216, + fc_mes0216, + fc_msgPreambleDIe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0217 +message: Device Discovery completion error +descript: This indicates an uncorrectable error was encountered + during device (re)discovery after a link up. Fibre + Channel devices will not be accessible if this message + is displayed. +data: None +severity: Error +log: Always +module: fcelsb.c +action: Reboot system. If problem persists, contact Technical + Support. Run with verbose mode on for more details. +*/ +char fc_mes0217[] = "%sDevice Discovery completion error"; +msgLogDef fc_msgBlk0217 = { + FC_LOG_MSG_DI_0217, + fc_mes0217, + fc_msgPreambleDIe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0218 +message: FDMI Request +descript: The driver is sending an FDMI request to the fabric. +data: (1) cmdcode (2) fc_flag +severity: Information +log: LOG_DISCOVERY verbose +module: fcelsb.c +action: None required. +*/ +char fc_mes0218[] = "%sFDMI Req Data: x%x x%x"; +msgLogDef fc_msgBlk0218 = { + FC_LOG_MSG_DI_0218, + fc_mes0218, + fc_msgPreambleDIi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + + +/* +msgName: fc_mes0219 +message: Issue FDMI request failed +descript: Cannot issue FDMI request to HBA. +data: (1) SLI_MGMT_DPRT +severity: Information +log: LOG_DISCOVERY verbose +module: fcscsib.c +action: None required +*/ +char fc_mes0219[] = "%sIssue FDMI request failed Data: x%x"; +msgLogDef fc_msgBlk0219 = { + FC_LOG_MSG_DI_0219, + fc_mes0219, + fc_msgPreambleDIi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0220 +message: Device Discovery completion error +descript: This indicates an uncorrectable error was encountered + during device (re)discovery after a link up. Fibre + Channel devices will not be accessible if this message + is displayed. +data: None +severity: Error +log: Always +module: fcscsib.c +action: Reboot system. If problem persists, contact Technical + Support. Run with verbose mode on for more details. +*/ +char fc_mes0220[] = "%sDevice Discovery completion error"; +msgLogDef fc_msgBlk0220 = { + FC_LOG_MSG_DI_0220, + fc_mes0220, + fc_msgPreambleDIe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0221 +message: FAN timeout +descript: A link up event was received without the login bit set, + so the driver waits E_D_TOV for the Fabric to send a FAN. + If no FAN if received, a FLOGI will be sent after the timeout. +data: None +severity: Warning +log: LOG_DISCOVERY verbose +module: fcscsib.c +action: None required. The driver recovers from this condition by + issuing a FLOGI to the Fabric. +*/ +char fc_mes0221[] = "%sFAN timeout"; +msgLogDef fc_msgBlk0221 = { + FC_LOG_MSG_DI_0221, + fc_mes0221, + fc_msgPreambleDIw, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_WARN, + LOG_DISCOVERY, + ERRID_LOG_TIMEOUT }; + +/* +msgName: fc_mes0222 +message: Initial FLOGI timeout +descript: The driver is sending initial FLOGI to fabric. +data: None +severity: Error +log: Always +module: fcscsib.c +action: Check Fabric configuration. The driver recovers from this and + continues with device discovery. +*/ +char fc_mes0222[] = "%sInitial FLOGI timeout"; +msgLogDef fc_msgBlk0222 = { + FC_LOG_MSG_DI_0222, + fc_mes0222, + fc_msgPreambleDIe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_DISCOVERY, + ERRID_LOG_TIMEOUT }; + +/* +msgName: fc_mes0223 +message: NameServer Registration timeout +descript: Our registration request to the Fabric was not acknowledged + within RATOV. +data: (1) fc_ns_retry (2) fc_max_ns_retry +severity: Error +log: Always +module: fcscsib.c +action: Check Fabric configuration. The driver recovers from this and + continues with device discovery. +*/ +char fc_mes0223[] = "%sNameServer Registration timeout Data: x%x x%x"; +msgLogDef fc_msgBlk0223 = { + FC_LOG_MSG_DI_0223, + fc_mes0223, + fc_msgPreambleDIe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_DISCOVERY, + ERRID_LOG_TIMEOUT }; + +/* +msgName: fc_mes0224 +message: NameServer Query timeout +descript: Node authentication timeout, node Discovery timeout. A NameServer + Query to the Fabric or discovery of reported remote NPorts is not + acknowledged within R_A_TOV. +data: (1) fc_ns_retry (2) fc_max_ns_retry +severity: Error +log: Always +module: fcscsib.c +action: Check Fabric configuration. The driver recovers from this and + continues with device discovery. +*/ +char fc_mes0224[] = "%sNameServer Query timeout Data: x%x x%x"; +msgLogDef fc_msgBlk0224 = { + FC_LOG_MSG_DI_0224, + fc_mes0224, + fc_msgPreambleDIe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_DISCOVERY, + ERRID_LOG_TIMEOUT }; + +/* +msgName: fc_mes0225 +message: Device Discovery completes +descript: This indicates successful completion of device + (re)discovery after a link up. +data: None +severity: Information +log: LOG_DISCOVERY verbose +module: fcscsib.c +action: None required +*/ +char fc_mes0225[] = "%sDevice Discovery completes"; +msgLogDef fc_msgBlk0225 = { + FC_LOG_MSG_DI_0225, + fc_mes0225, + fc_msgPreambleDIi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0226 +message: Device Discovery completion error +descript: This indicates an uncorrectable error was encountered + during device (re)discovery after a link up. Fibre + Channel devices will not be accessible if this message + is displayed. +data: None +severity: Error +log: Always +module: fcscsib.c +action: Reboot system. If problem persists, contact Technical + Support. Run with verbose mode on for more details. +*/ +char fc_mes0226[] = "%sDevice Discovery completion error"; +msgLogDef fc_msgBlk0226 = { + FC_LOG_MSG_DI_0226, + fc_mes0226, + fc_msgPreambleDIe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0227 +message: Node Authentication timeout +descript: The driver has lost track of what NPORTs are being authenticated. +data: None +severity: Error +log: Always +module: fcscsib.c +action: None required. Driver should recover from this event. +*/ +char fc_mes0227[] = "%sNode Authentication timeout"; +msgLogDef fc_msgBlk0227 = { + FC_LOG_MSG_DI_0227, + fc_mes0227, + fc_msgPreambleDIe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_DISCOVERY, + ERRID_LOG_TIMEOUT }; + +/* +msgName: fc_mes0228 +message: Node Discovery timeout +descript: The driver has lost track of what NPORTs are being discovered. +data: None +severity: Error +log: Always +module: fcscsib.c +action: None required. Driver should recover from this event. +*/ +char fc_mes0228[] = "%sNode Discovery timeout"; +msgLogDef fc_msgBlk0228 = { + FC_LOG_MSG_DI_0228, + fc_mes0228, + fc_msgPreambleDIe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_DISCOVERY, + ERRID_LOG_TIMEOUT }; + +/* +msgName: fc_mes0229 +message: Node Discovery timeout +descript: The driver has lost track of what NPORTs are being discovered. +data: (1) nlp_DID (2) nlp_flag (3) nlp_state (4) nlp_type +severity: Error +log: Always +module: fcscsib.c +action: None required. Driver should recover from this event. +*/ +char fc_mes0229[] = "%sNode Discovery timeout Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0229 = { + FC_LOG_MSG_DI_0229, + fc_mes0229, + fc_msgPreambleDIe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_DISCOVERY, + ERRID_LOG_TIMEOUT }; + +/* +msgName: fc_mes0230 +message: Device Discovery completion error +descript: This indicates an uncorrectable error was encountered + during device (re)discovery after a link up. Fibre + Channel devices will not be accessible if this message + is displayed. +data: None +severity: Error +log: Always +module: fcscsib.c +action: Reboot system. If problem persists, contact Technical + Support. Run with verbose mode on for more details. +*/ +char fc_mes0230[] = "%sDevice Discovery completion error"; +msgLogDef fc_msgBlk0230 = { + FC_LOG_MSG_DI_0230, + fc_mes0230, + fc_msgPreambleDIe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0231 +message: RSCN timeout +descript: The driver has lost track of what NPORTs have RSCNs pending. +data: (1) fc_ns_retry (2) fc_max_ns_retry +severity: Error +log: Always +module: fcscsib.c +action: None required. Driver should recover from this event. +*/ +char fc_mes0231[] = "%sRSCN timeout Data: x%x x%x"; +msgLogDef fc_msgBlk0231 = { + FC_LOG_MSG_DI_0231, + fc_mes0231, + fc_msgPreambleDIe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_DISCOVERY, + ERRID_LOG_TIMEOUT }; + +/* +msgName: fc_mes0232 +message: Node RSCN timeout +descript: The driver is cleaning up the node table entry for a node + that had a pending RSCN. +data: (1) nlp_DID (2) nlp_flag (3) nlp_state (4) nlp_type +severity: Error +log: Always +module: fcscsib.c +action: None required. Driver should recover from this event. +*/ +char fc_mes0232[] = "%sNode RSCN timeout Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0232 = { + FC_LOG_MSG_DI_0232, + fc_mes0232, + fc_msgPreambleDIe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_DISCOVERY, + ERRID_LOG_TIMEOUT }; + +/* +msgName: fc_mes0233 +message: PT2PT link up timeout +descript: A PLOGI has not been received, within R_A_TOV, after a + successful FLOGI, which indicates our topology is + point-to-point with another NPort. Typically this PLOGI + is used to assign a NPortID. +data: None +severity: Warning +log: LOG_DISCOVERY verbose +module: fcscsib.c +action: None required. Driver will recover by configuring NPortID as 0. +*/ +char fc_mes0233[] = "%sPT2PT link up timeout"; +msgLogDef fc_msgBlk0233 = { + FC_LOG_MSG_DI_0233, + fc_mes0233, + fc_msgPreambleDIw, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_WARN, + LOG_DISCOVERY, + ERRID_LOG_TIMEOUT }; + +/* +msgName: fc_mes0234 +message: Device Discovery completes +descript: This indicates successful completion of device + (re)discovery after a link up. +data: None +severity: Information +log: LOG_DISCOVERY verbose +module: fcscsib.c +action: None required +*/ +char fc_mes0234[] = "%sDevice Discovery completes"; +msgLogDef fc_msgBlk0234 = { + FC_LOG_MSG_DI_0234, + fc_mes0234, + fc_msgPreambleDIi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0235 +message: Device Discovery completion error +descript: This indicates an uncorrectable error was encountered + during device (re)discovery after a link up. Fibre + Channel devices will not be accessible if this message + is displayed. +data: None +severity: Error +log: Always +module: fcscsib.c +action: Reboot system. If problem persists, contact Technical + Support. Run with verbose mode on for more details. +*/ +char fc_mes0235[] = "%sDevice Discovery completion error"; +msgLogDef fc_msgBlk0235 = { + FC_LOG_MSG_DI_0235, + fc_mes0235, + fc_msgPreambleDIe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0236 +message: NameServer Req +descript: The driver is issuing a nameserver request to the fabric. +data: (1) cmdcode (2) fc_flag (3) fc_rscn_id_cnt +severity: Information +log: LOG_DISCOVERY verbose +module: fcscsib.c +action: None required +*/ +char fc_mes0236[] = "%sNameServer Req Data: x%x x%x x%x"; +msgLogDef fc_msgBlk0236 = { + FC_LOG_MSG_DI_0236, + fc_mes0236, + fc_msgPreambleDIi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0237 +message: Unknown Identifier in RSCN list +descript: A RSCN list entry contains an unknown identifier. +data: (1) rscn_did.un.word +severity: Error +log: Always +module: fcscsib.c +action: Potential problem with Fabric. Check with Fabric vendor. +*/ +char fc_mes0237[] = "%sUnknown Identifier in RSCN list Data: x%x"; +msgLogDef fc_msgBlk0237 = { + FC_LOG_MSG_DI_0237, + fc_mes0237, + fc_msgPreambleDIe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0238 +message: NameServer Rsp +descript: The driver received a nameserver response. +data: (1) Did (2) nlp_flag (3) fc_flag (4) fc_rscn_id_cnt +severity: Information +log: LOG_DISCOVERY verbose +module: fcscsib.c +action: None required +*/ +char fc_mes0238[] = "%sNameServer Rsp Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0238 = { + FC_LOG_MSG_DI_0238, + fc_mes0238, + fc_msgPreambleDIi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0239 +message: NameServer Rsp +descript: The driver received a nameserver response. +data: (1) Did (2) ndlp (3) fc_flag (4) fc_rscn_id_cnt +severity: Information +log: LOG_DISCOVERY verbose +module: fcscsib.c +action: None required +*/ +char fc_mes0239[] = "%sNameServer Rsp Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0239 = { + FC_LOG_MSG_DI_0239, + fc_mes0239, + fc_msgPreambleDIi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0240 +message: NameServer Rsp Error +descript: The driver received a nameserver response containig a status error. +data: (1) CommandResponse.bits.CmdRsp (2) ReasonCode (3) Explanation + (4) fc_flag +severity: Information +log: LOG_DISCOVERY verbose +module: fcscsib.c +action: Check Fabric configuration. The driver recovers from this and + continues with device discovery. +*/ +char fc_mes0240[] = "%sNameServer Rsp Error Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0240 = { + FC_LOG_MSG_DI_0240, + fc_mes0240, + fc_msgPreambleDIi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0241 +message: NameServer Rsp Error +descript: The driver received a nameserver response containing a status error. +data: (1) CommandResponse.bits.CmdRsp (2) ReasonCode (3) Explanation + (4) fc_flag +severity: Information +log: LOG_DISCOVERY verbose +module: fcscsib.c +action: Check Fabric configuration. The driver recovers from this and + continues with device discovery. +*/ +char fc_mes0241[] = "%sNameServer Rsp Error Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0241 = { + FC_LOG_MSG_DI_0241, + fc_mes0241, + fc_msgPreambleDIi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0242 +message: Device Discovery nextnode +descript: The driver continuing with discovery. +data: (1) nlp_state (2) nlp_DID (3) nlp_flag (4) fc_ffstate +severity: Information +log: LOG_DISCOVERY verbose +module: fcscsib.c +action: None required +*/ +char fc_mes0242[] = "%sDevice Discovery nextnode Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0242 = { + FC_LOG_MSG_DI_0242, + fc_mes0242, + fc_msgPreambleDIi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0243 +message: Device Discovery nextdisc +descript: The driver continuing with NPORT discovery. +data: (1) fc_nlp_cnt (2) sndcnt (3) fc_mbox_active +severity: Information +log: LOG_DISCOVERY verbose +module: fcscsib.c +action: None required +*/ +char fc_mes0243[] = "%sDevice Discovery nextdisc Data: x%x x%x x%x"; +msgLogDef fc_msgBlk0243 = { + FC_LOG_MSG_DI_0243, + fc_mes0243, + fc_msgPreambleDIi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0244 +message: Device Discovery completion error +descript: This indicates an uncorrectable error was encountered + during device (re)discovery after a link up. Fibre + Channel devices will not be accessible if this message + is displayed. +data: None +severity: Error +log: Always +module: fcscsib.c +action: Reboot system. If problem persists, contact Technical + Support. Run with verbose mode on for more details. +*/ +char fc_mes0244[] = "%sDevice Discovery completion error"; +msgLogDef fc_msgBlk0244 = { + FC_LOG_MSG_DI_0244, + fc_mes0244, + fc_msgPreambleDIe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0245 +message: Device Discovery next authentication +descript: The driver is continuing with NPORT authentication. +data: (1) fc_nlp_cnt (2) sndcnt (3) fc_mbox_active +severity: Information +log: LOG_DISCOVERY verbose +module: fcscsib.c +action: None required +*/ +char fc_mes0245[] = "%sDevice Discovery next authentication Data: x%x x%x x%x"; +msgLogDef fc_msgBlk0245 = { + FC_LOG_MSG_DI_0245, + fc_mes0245, + fc_msgPreambleDIi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0246 +message: Device Discovery next RSCN +descript: The driver is continuing with RSCN processing. +data: (1) fc_nlp_cnt (2) sndcnt (3) fc_mbox_active (4) fc_flag +severity: Information +log: LOG_DISCOVERY verbose +module: fcscsib.c +action: None required +*/ +char fc_mes0246[] = "%sDevice Discovery next RSCN Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0246 = { + FC_LOG_MSG_DI_0246, + fc_mes0246, + fc_msgPreambleDIi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0247 +message: Discovery RSCN +descript: The number / type of RSCNs has forced the driver to go to + the nameserver and re-discover all NPORTs. +data: (1) fc_defer_rscn.q_cnt (2) fc_flag (3) fc_rscn_disc_wdt +severity: Information +log: LOG_DISCOVERY verbose +module: fcscsib.c +action: None required +*/ +char fc_mes0247[] = "%sDiscovery RSCN Data: x%x x%x x%x"; +msgLogDef fc_msgBlk0247 = { + FC_LOG_MSG_DI_0247, + fc_mes0247, + fc_msgPreambleDIi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0248 +message: Deferred RSCN +descript: The driver has received multiple RSCNs and has deferred the + processing of the most recent RSCN. +data: (1) fc_defer_rscn.q_cnt (2) fc_flag +severity: Information +log: LOG_DISCOVERY verbose +module: fcscsib.c +action: None required +*/ +char fc_mes0248[] = "%sDeferred RSCN Data: x%x x%x"; +msgLogDef fc_msgBlk0248 = { + FC_LOG_MSG_DI_0248, + fc_mes0248, + fc_msgPreambleDIi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0249 +message: Device Discovery completes +descript: This indicates successful completion of device + (re)discovery after a link up. +data: (1) fc_flag +severity: Information +log: LOG_DISCOVERY verbose +module: fcscsib.c +action: None required +*/ +char fc_mes0249[] = "%sDevice Discovery completes Data: x%x"; +msgLogDef fc_msgBlk0249 = { + FC_LOG_MSG_DI_0249, + fc_mes0249, + fc_msgPreambleDIi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0250 +message: Pending Link Event during Discovery +descript: Received link event during discovery. Causes discovery restart. +data: (1) ulpCommand (2) ulpIoTag (3) ulpStatus (4) ulpWord[4] +severity: Warning +log: LOG_DISCOVERY verbose +module: fcelsb.c +action: None required unless problem persist. If problems persist, check + cabling. +*/ +char fc_mes0250[] = "%sPending Link Event during Discovery Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0250 = { + FC_LOG_MSG_DI_0250, + fc_mes0250, + fc_msgPreambleDIw, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_WARN, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0251 +message: FDMI rsp failed +descript: An error response was received to FDMI request +data: (1) SWAP_DATA16(fdmi_cmd) +severity: Information +log: LOG_DISCOVERY verbose +module: fcelsb.c +action: The fabric does not support FDMI, check fabric configuration. +*/ +char fc_mes0251[] = "%sFDMI rsp failed Data: x%x"; +msgLogDef fc_msgBlk0251 = { + FC_LOG_MSG_DI_0251, + fc_mes0251, + fc_msgPreambleDIi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_DISCOVERY, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0252 +message: EXPIRED RSCN disc timer +descript: The driver timed out when processing an RSCN command from the + fabric. +data: (1) fc_flag +severity: Information +log: LOG_DISCOVERY | LOG_ELS verbose +module: fcscsib.c +action: This error could indicate a software driver or firmware + problem. If problems persist report these errors to + Technical Support. +*/ +char fc_mes0252[] = "%sEXPIRED RSCN disc timer Data: x%x"; +msgLogDef fc_msgBlk0252 = { + FC_LOG_MSG_DI_0252, + fc_mes0252, + fc_msgPreambleDIi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_DISCOVERY | LOG_ELS, + ERRID_LOG_UNEXPECT_EVENT }; + +/* + * Begin MAILBOX LOG Message Structures + */ + +/* +msgName: fc_mes0300 +message: READ_LA: no buffers +descript: The driver attempted to issue READ_LA mailbox command to the HBA + but there were no buffer available. +data: None +severity: Warning +log: LOG_MBOX verbose +module: fcmboxb.c +action: This message indicates (1) a possible lack of memory resources. Try + increasing the lpfc 'num_bufs' configuration parameter to allocate + more buffers. (2) A possble driver buffer management problem. If + this problem persists, report these errors to Technical Support. +*/ +char fc_mes0300[] = "%sREAD_LA: no buffers"; +msgLogDef fc_msgBlk0300 = { + FC_LOG_MSG_MB_0300, + fc_mes0300, + fc_msgPreambleMBw, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_WARN, + LOG_MBOX, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0301 +message: READ_SPARAM: no buffers +descript: The driver attempted to issue READ_SPARAM mailbox command to the + HBA but there were no buffer available. +data: None +severity: Warning +log: LOG_MBOX verbose +module: fcmboxb.c +action: This message indicates (1) a possible lack of memory resources. Try + increasing the lpfc 'num_bufs' configuration parameter to allocate + more buffers. (2) A possble driver buffer management problem. If + this problem persists, report these errors to Technical Support. +*/ +char fc_mes0301[] = "%sREAD_SPARAM: no buffers"; +msgLogDef fc_msgBlk0301 = { + FC_LOG_MSG_MB_0301, + fc_mes0301, + fc_msgPreambleMBw, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_WARN, + LOG_MBOX, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0302 +message: REG_LOGIN: no buffers +descript: The driver attempted to issue REG_LOGIN mailbox command to the HBA + but there were no buffer available. +data: None +severity: Warning +log: LOG_MBOX verbose +module: fcmboxb.c +action: This message indicates (1) a possible lack of memory resources. Try + increasing the lpfc 'num_bufs' configuration parameter to allocate + more buffers. (2) A possble driver buffer management problem. If + this problem persists, report these errors to Technical Support. +*/ +char fc_mes0302[] = "%sREG_LOGIN: no buffers Data x%x x%x"; +msgLogDef fc_msgBlk0302 = { + FC_LOG_MSG_MB_0302, + fc_mes0302, + fc_msgPreambleMBw, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_WARN, + LOG_MBOX, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0303 +message: Adapter initialization error, mbxCmd READ_NVPARM, + mbxStatus +descript: A mailbox command failed during initialization. +data: None +severity: Error +log: Always +module: fcLINUX.c +action: This error could indicate a hardware or firmware problem. If + problems persist report these errors to Technical Support. +*/ +char fc_mes0303[] = "%sAdapter init error, mbxCmd x%x READ_NVPARM, mbxStatus x%x"; +msgLogDef fc_msgBlk0303 = { + FC_LOG_MSG_MB_0303, + fc_mes0303, + fc_msgPreambleINe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_INIT, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0304 +message: Stray Mailbox Interrupt, mbxCommand mbxStatus . +descript: Received a mailbox completion interrupt and there are no + outstanding mailbox commands. +data: None +severity: Error +log: Always +module: lp6000.c +action: This error could indicate a hardware or firmware problem. If + problems persist report these errors to Technical Support. +*/ +char fc_mes0304[] = "%sStray Mailbox Interrupt mbxCommand x%x mbxStatus x%x"; +msgLogDef fc_msgBlk0304 = { + FC_LOG_MSG_MB_0304, + fc_mes0304, + fc_msgPreambleMBe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_MBOX, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0305 +message: Mbox cmd cmpl error - RETRYing +descript: A mailbox command completed with an error status that causes the + driver to reissue the mailbox command. +data: (1) mbxCommand (2) word0 (3) fc_ffstate (4) fc_flag +severity: Information +log: LOG_MBOX verbose +module: lp6000.c +action: None required +*/ +char fc_mes0305[] = "%sMbox cmd cmpl error - RETRYing Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0305 = { + FC_LOG_MSG_MB_0305, + fc_mes0305, + fc_msgPreambleMBi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_MBOX, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0306 +message: Mbox cmd cmpl error +descript: A mailbox command completed with an error status. +data: (1) mbxCommand (2) word0 (3) ff_state (4) fc_flag +severity: Information +log: LOG_MBOX verbose +module: lp6000.c +action: None required +*/ +char fc_mes0306[] = "%sMbox cmd cmpl error Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0306 = { + FC_LOG_MSG_MB_0306, + fc_mes0306, + fc_msgPreambleMBi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_MBOX, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0307 +message: Mbox cmd cmpl +descript: A mailbox command completed.. +data: (1) mbxCommand (2) word0 (3) ff_state (4) fc_flag +severity: Information +log: LOG_MBOX verbose +module: lp6000.c +action: None required +*/ +char fc_mes0307[] = "%sMbox cmd cmpl Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0307 = { + FC_LOG_MSG_MB_0307, + fc_mes0307, + fc_msgPreambleMBi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_MBOX, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0308 +message: Mbox cmd issue - BUSY +descript: The driver attempted to issue a mailbox command while the mailbox + was busy processing the previous command. The processing of the + new command will be deferred until the mailbox becomes available. +data: (1) mbxCommand (2) ff_state (3) fc_flag (4) flag +severity: Information +log: LOG_MBOX verbose +module: lp6000.c +action: None required +*/ +char fc_mes0308[] = "%sMbox cmd issue - BUSY Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0308 = { + FC_LOG_MSG_MB_0308, + fc_mes0308, + fc_msgPreambleMBi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_MBOX, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0309 +message: Mailbox cmd issue +descript: The driver is in the process of issuing a mailbox command. +data: (1) ff_state (2) fc_flag (3) flag +severity: Information +log: LOG_MBOX verbose +module: lp6000.c +action: None required +*/ +char fc_mes0309[] = "%sMailbox cmd x%x issue Data: x%x x%x x%x"; +msgLogDef fc_msgBlk0309 = { + FC_LOG_MSG_MB_0309, + fc_mes0309, + fc_msgPreambleMBi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_MBOX, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0310 +message: Mailbox command timeout, status +descript: A Mailbox command was posted to the adapter and did + not complete within 30 seconds. +data: None +severity: Error +log: Always +module: fcscsib.c +action: This error could indicate a software driver or firmware + problem. If no I/O is going through the adapter, reboot + the system. If these problems persist, report these + errors to Technical Support. +*/ +char fc_mes0310[] = "%sMailbox command x%x timeout, status x%x"; +msgLogDef fc_msgBlk0310 = { + FC_LOG_MSG_MB_0310, + fc_mes0310, + fc_msgPreambleMBe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_MBOX, + ERRID_LOG_TIMEOUT }; + +/* +msgName: fc_mes0311 +message: REG_LOGIN cmpl +descript: REG LOGIN mailbox command completed successfully. +data: (1) nlp_DID (2) nlp_state (3) nlp_flag (4) nlp_Rpi +severity: Information +log: LOG_MBOX verbose +module: fcscsib.c +action: None required +*/ +char fc_mes0311[] = "%sREG_LOGIN cmpl Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0311 = { + FC_LOG_MSG_MB_0311, + fc_mes0311, + fc_msgPreambleMBi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_MBOX, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0312 +message: Unknown Mailbox command completion +descript: An unsupported or illegal Mailbox command completed. +data: None +severity: Error +log: Always +module: fcscsib.c +action: This error could indicate a software driver or firmware + problem. If problems persist report these errors to + Technical Support. +*/ +char fc_mes0312[] = "%sUnknown Mailbox command x%x completion"; +msgLogDef fc_msgBlk0312 = { + FC_LOG_MSG_MB_0312, + fc_mes0312, + fc_msgPreambleMBe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_MBOX, + ERRID_LOG_UNEXPECT_EVENT }; + +/* + * Begin INIT LOG Message Structures + */ + +/* +msgName: fc_mes0400 +message: dfc_ioctl entry +descript: Entry point for processing diagnostic ioctl. +data: (1) c_cmd (2) c_arg1 (3) c_arg2 (4) c_outsz +severity: Information +log: LOG_INIT verbose +module: dfcdd.c +action: None required +*/ +char fc_mes0400[] = "%sdfc_ioctl entry Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0400 = { + FC_LOG_MSG_IN_0400, + fc_mes0400, + fc_msgPreambleINi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_INIT, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0401 +message: dfc_ioctl exit +descript: Exit point for processing diagnostic ioctl. +data: (1) rc (2) c_outsz (3) c_dataout +severity: Information +log: LOG_INIT verbose +module: dfcdd.c +action: None required +*/ +char fc_mes0401[] = "%sdfc_ioctl exit Data: x%x x%x x%x"; +msgLogDef fc_msgBlk0401 = { + FC_LOG_MSG_IN_0401, + fc_mes0401, + fc_msgPreambleINi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_INIT, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0402 +message: dfc_data_alloc +descript: Allocating data buffer to process dfc ioct. +data: (1) fc_dataout (2) fc_outsz +severity: Iniformation +log: LOG_INIT verbose +module: dfcdd.c +action: None required +*/ +char fc_mes0402[] = "%sdfc_data_alloc Data: x%x x%x"; +msgLogDef fc_msgBlk0402 = { + FC_LOG_MSG_IN_0402, + fc_mes0402, + fc_msgPreambleINi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_INIT, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0403 +message: dfc_data_free +descript: Freeing data buffer to process dfc ioct. +data: (1) fc_dataout (2) fc_outsz +severity: Information +log: LOG_INIT verbose +module: dfcdd.c +action: None required +*/ +char fc_mes0403[] = "%sdfc_data_free Data: x%x x%x"; +msgLogDef fc_msgBlk0403 = { + FC_LOG_MSG_IN_0403, + fc_mes0403, + fc_msgPreambleINi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_INIT, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0404 +message: Service Level Interface (SLI) 1 selected +descript: A PART_SLIM (SLI1) mailbox command was issued. +data: None +severity: Information +log: LOG_INIT verbose +module: fcmboxb.c +action: None required. +*/ +char fc_mes0404[] = "%sService Level Interface (SLI) 1 selected"; +msgLogDef fc_msgBlk0404 = { + FC_LOG_MSG_IN_0404, + fc_mes0404, + fc_msgPreambleINi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_INIT, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0405 +message: Service Level Interface (SLI) 2 selected +descript: A CONFIG_PORT (SLI2) mailbox command was issued. +data: None +severity: Information +log: LOG_INIT verbose +module: fcmboxb.c +action: None required. +*/ +char fc_mes0405[] = "%sService Level Interface (SLI) 2 selected"; +msgLogDef fc_msgBlk0405 = { + FC_LOG_MSG_IN_0405, + fc_mes0405, + fc_msgPreambleINi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_INIT, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0406 +message: Memory Buffer Pool is below low water mark +descript: A driver memory buffer pool is low on buffers. +data: (1) seg (2) fc_lowmem (3) low +severity: Warning +log: LOG_INIT verbose +module: fcmemb.c +action: None required. Driver will recover as buffers are returned to pool. +*/ +char fc_mes0406[] = "%sMem Buf Pool is below low water mark Data: x%x x%x x%x"; +msgLogDef fc_msgBlk0406 = { + FC_LOG_MSG_IN_0406, + fc_mes0406, + fc_msgPreambleINw, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_WARN, + LOG_INIT, + ERRID_LOG_NO_RESOURCE }; + +/* +msgName: fc_mes0407 +message: Memory Buffer Pool is corrupted +descript: The buffer address received from the pool is outside + the range of the pool and is therefore corrupt. +data: (1) seg (2) bp (3) fc_memhi (4) fc_memlo +severity: Error +log: Always +module: fcmemb.c +action: This error could indicate a software driver or firmware + problem. If problems persist report these errors to + Technical Support. +*/ +char fc_mes0407[] = "%sMemory Buffer Pool is corrupted Data x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0407 = { + FC_LOG_MSG_IN_0407, + fc_mes0407, + fc_msgPreambleINe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_INIT, + ERRID_LOG_NO_RESOURCE }; + +/* +msgName: fc_mes0408 +message: Memory Buffer Pool is corrupted +descript: The buffer address returned to the pool is outside + the range of the pool and is therefore corrupt. +data: (1) seg (2) bp (3) fc_memhi (4) fc_memlo +severity: Error +log: Always +module: fcmemb.c +action: This error could indicate a software driver or firmware + problem. If problems persist report these errors to + Technical Support. +*/ +char fc_mes0408[] = "%sMemory Buffer Pool is corrupted Data x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0408 = { + FC_LOG_MSG_IN_0408, + fc_mes0408, + fc_msgPreambleINe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_INIT, + ERRID_LOG_NO_RESOURCE }; + +/* +msgName: fc_mes0409 +message: Memory Buffer Pool is out of buffers +descript: A driver memory buffer pool is exhausted. +data: (1) seg (2) fc_free (3) fc_mbox.q_cnt (4) fc_memhi +severity: Error +log: Always +module: fcmemb.c +action: Configure more resources for that buffer pool. If + problems persist report these errors to Technical + Support. +*/ +char fc_mes0409[] = "%sMemory Buffer Pool is out of buffers Data x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0409 = { + FC_LOG_MSG_IN_0409, + fc_mes0409, + fc_msgPreambleINe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_INIT, + ERRID_LOG_NO_RESOURCE }; + +/* +msgName: fc_mes0410 +message: Cannot find virtual addr for mapped buf on ring +descript: The driver cannot find the specified buffer in its + mapping table. Thus it cannot find the virtual address + needed to access the data. +data: (1) mapbp (2) fc_mpoff (3) fc_mpon +severity: Error +log: Always +module: fcmemb.c +action: This error could indicate a software driver or firmware + problem. If problems persist report these errors to + Technical Support. +*/ +char fc_mes0410[] = "%sCannot find virtual addr for mapped buf on ring %d Data x%x x%x x%x"; +msgLogDef fc_msgBlk0410 = { + FC_LOG_MSG_IN_0410, + fc_mes0410, + fc_msgPreambleINe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_INIT, + ERRID_LOG_NO_RESOURCE }; + +/* +msgName: fc_mes0411 +message: Scan-down is 2 with Persistent binding - ignoring scan-down +descript: The configuration parameter for Scan-down conflicts with + Persistent binding parameter. +data: (1) a_current (2) fcp_mapping +severity: Error config +log: Always +module: fcLINUXfcp.c +action: Make neccessary changes to lpfc configuration file. +*/ +char fc_mes0411[] = "%sScan-down is 2 with Persistent binding - ignoring scan-down Data: x%x x%x"; +msgLogDef fc_msgBlk0411 = { + FC_LOG_MSG_IN_0411, + fc_mes0411, + fc_msgPreambleINc, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR_CFG, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0412 +message: Scan-down is out of range - ignoring scan-down +descript: The configuration parameter for Scan-down is out of range. +data: (1) clp[CFG_SCAN_DOWN].a_current (2) fcp_mapping +severity: Error +log: Always +module: fcLINUXfcp.c +action: Make neccessary changes to lpfc configuration file. +*/ +char fc_mes0412[] = "%sScan-down is out of range - ignoring scan-down Data: x%x x%x"; +msgLogDef fc_msgBlk0412 = { + FC_LOG_MSG_IN_0412, + fc_mes0412, + fc_msgPreambleINe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0413 +message: Num-iocbs too low, resetting +descript: The configuration parameter for Num-iocs is too low, resetting + parameter to default value. +data: (1) a_current (2) LPFC_MIN_NUM_IOCBS +severity: Error config +log: Always +module: fcLINUXfcp.c +action: Make neccessary changes to lpfc configuration file. +*/ +char fc_mes0413[] = "%sNum-iocbs too low, resetting Data: x%x x%x"; +msgLogDef fc_msgBlk0413 = { + FC_LOG_MSG_IN_0413, + fc_mes0413, + fc_msgPreambleINc, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR_CFG, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0414 +message: Num-iocbs too high, resetting +descript: The configuration parameter for Num-iocs is too high, resetting + parameter to default value. +data: (1) clp[CFG_NUM_IOCBS].a_current (2) LPFC_MAX_NUM_IOCBS +severity: Error config +log: Always +module: fcLINUXfcp.c +action: Make neccessary changes to lpfc configuration file. +*/ +char fc_mes0414[] = "%sNum-iocbs too high, resetting Data: x%x x%x"; +msgLogDef fc_msgBlk0414 = { + FC_LOG_MSG_IN_0414, + fc_mes0414, + fc_msgPreambleINc, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR_CFG, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0415 +message: Num-bufs too low, resetting +descript: The configuration parameter for Num-bufs is too low, resetting + parameter to default value. +data: (1) a_current (2) LPFC_MIN_NUM_BUFS +severity: Error config +log: Always +module: fcLINUXfcp.c +action: Make neccessary changes to lpfc configuration file. +*/ +char fc_mes0415[] = "%sNum-bufs too low, resetting Data: x%x x%x"; +msgLogDef fc_msgBlk0415 = { + FC_LOG_MSG_IN_0415, + fc_mes0415, + fc_msgPreambleINc, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR_CFG, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0416 +message: Num-bufs too high, resetting +descript: The configuration parameter for Num-bufs is too high, resetting + parameter to default value. +data: (1) a_current (2) LPFC_MAX_NUM_BUFS +severity: Error config +log: Always +module: fcLINUXfcp.c +action: Make neccessary changes to lpfc configuration file. +*/ +char fc_mes0416[] = "%sNum-bufs too high, resetting Data: x%x x%x"; +msgLogDef fc_msgBlk0416 = { + FC_LOG_MSG_IN_0416, + fc_mes0416, + fc_msgPreambleINc, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR_CFG, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0417 +message: Target qdepth too high, resetting to max +descript: The configuration parameter for Target queue depth is too high, + resetting parameter to default value. +data: (1) a_current (2) LPFC_MAX_TGT_Q_DEPTH +severity: Error config +log: Always +module: fcLINUXfcp.c +action: Make neccessary changes to lpfc configuration file. +*/ +char fc_mes0417[] = "%sTarget qdepth too high, resetting to max Data: x%x x%x"; +msgLogDef fc_msgBlk0417 = { + FC_LOG_MSG_IN_0417, + fc_mes0417, + fc_msgPreambleINc, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR_CFG, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0418 +message: LUN qdepth too high, resetting to max +descript: The configuration parameter for LUN queue depth is too high, + resetting parameter to maximum default value. +data: (1) a_current (2) LPFC_MAX_LUN_Q_DEPTH +severity: Error config +log: Always +module: fcLINUXfcp.c +action: Make neccessary changes to lpfc configuration file. +*/ +char fc_mes0418[] = "%sLUN qdepth too high, resetting to max Data: x%x x%x"; +msgLogDef fc_msgBlk0418 = { + FC_LOG_MSG_IN_0418, + fc_mes0418, + fc_msgPreambleINc, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR_CFG, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0419 +message: LUN qdepth cannot be , resetting to 1 +descript: The configuration parameter for LUN queue depth is set to 0. + Resetting parameter to default value of 1. +data: (1) a_current +severity: Error config +log: Always +module: fcLINUXfcp.c +action: Make neccessary changes to lpfc configuration file. +*/ +char fc_mes0419[] = "%sLUN qdepth cannot be %d, resetting to 1"; +msgLogDef fc_msgBlk0419 = { + FC_LOG_MSG_IN_0419, + fc_mes0419, + fc_msgPreambleINc, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR_CFG, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0420 +message: Fcpfabric_tmo too high, resetting +descript: The configuration parameter for Fcpfabric_tmo is too high, + resetting parameter to default value. +data: (1) a_current (2) LPFC_MAX_FABRIC_TIMEOUT +severity: Error config +log: Always +module: fcLINUXfcp.c +action: Make neccessary changes to lpfc configuration file. +*/ +char fc_mes0420[] = "%sFcpfabric_tmo too high, resetting Data: x%x x%x"; +msgLogDef fc_msgBlk0420 = { + FC_LOG_MSG_IN_0420, + fc_mes0420, + fc_msgPreambleINc, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR_CFG, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0421 +message: Fcp-class is illegal, resetting to default +descript: The configuration parameter for Fcp-class is illegal, resetting + parameter to default value. +data: (1) a_current (2) CLASS3 +severity: Error config +log: Always +module: fcLINUXfcp.c +action: Make neccessary changes to lpfc configuration file. +*/ +char fc_mes0421[] = "%sFcp-class is illegal, resetting Data: x%x x%x"; +msgLogDef fc_msgBlk0421 = { + FC_LOG_MSG_IN_0421, + fc_mes0421, + fc_msgPreambleINc, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR_CFG, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0422 +message: No-device-delay too high, resetting to max +descript: The configuration parameter for No-device-delay is too high, + resetting parameter to maximum default value. +data: (1) a_current (2) LPFC_MAX_NO_DEVICE_DELAY +severity: Error config +log: Always +module: fcLINUXfcp.c +action: Make neccessary changes to lpfc configuration file. +*/ +char fc_mes0422[] = "%sNo-device-delay too high, resetting to max Data: x%x x%x"; +msgLogDef fc_msgBlk0422 = { + FC_LOG_MSG_IN_0422, + fc_mes0422, + fc_msgPreambleINc, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR_CFG, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0423 +message: Post_ip_buf too low, resetting +descript: The configuration parameter for Post_ip_buf is too low, resetting + parameter to default value. +data: (1) a_current (2) LPFC_MIN_POST_IP_BUF +severity: Error config +log: Always +module: fcLINUXfcp.c +action: Make neccessary changes to lpfc configuration file. +*/ +char fc_mes0423[] = "%sPost_ip_buf too low, resetting Data: x%x x%x"; +msgLogDef fc_msgBlk0423 = { + FC_LOG_MSG_IN_0423, + fc_mes0423, + fc_msgPreambleINc, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR_CFG, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0424 +message: Post_ip_buf too high, resetting +descript: The configuration parameter for Post_ip_buf is too high, resetting + parameter to default value. +data: (1) a_current (2) LPFC_MAX_POST_IP_BUF +severity: Error config +log: Always +module: fcLINUXfcp.c +action: Make neccessary changes to lpfc configuration file. +*/ +char fc_mes0424[] = "%sPost_ip_buf too high, resetting Data: x%x x%x"; +msgLogDef fc_msgBlk0424 = { + FC_LOG_MSG_IN_0424, + fc_mes0424, + fc_msgPreambleINc, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR_CFG, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0425 +message: Xmt-que_size too low, resetting +descript: The configuration parameter for Xmt-que_size is too low, resetting + parameter to default value. +data: (1) a_current (2) LPFC_MIN_XMT_QUE_SIZE +severity: Error config +log: Always +module: fcLINUXcp.c +action: Make neccessary changes to lpfc configuration file. +*/ +char fc_mes0425[] = "%sXmt-que_size too low, resetting Data: x%x x%x"; +msgLogDef fc_msgBlk0425 = { + FC_LOG_MSG_IN_0425, + fc_mes0425, + fc_msgPreambleINc, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR_CFG, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0426 +message: Xmt-que_size too high, resetting +descript: The configuration parameter for Xmt-que_size is too high, resetting + parameter to default value. +data: (1) a_current (2) LPFC_MAX_XMT_QUE_SIZE +severity: Error config +log: Always +module: fcLINUXfcp.c +action: Make neccessary changes to lpfc configuration file. +*/ +char fc_mes0426[] = "%sXmt-que_size too high, resetting Data: x%x x%x"; +msgLogDef fc_msgBlk0426 = { + FC_LOG_MSG_IN_0426, + fc_mes0426, + fc_msgPreambleINc, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR_CFG, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0427 +message: Ip-class is illegal, resetting +descript: The configuration parameter for Ip-class is illegal, resetting + parameter to default value. +data: (1) a_current (2) CLASS3 +severity: Error config +log: Always +module: fcLINUXfcp.c +action: Make neccessary changes to lpfc configuration file. +*/ +char fc_mes0427[] = "%sIp-class is illegal, resetting Data: x%x x%x"; +msgLogDef fc_msgBlk0427 = { + FC_LOG_MSG_IN_0427, + fc_mes0427, + fc_msgPreambleINc, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR_CFG, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0428 +message: Topology is illegal, resetting +descript: The configuration parameter for Topology is illegal, resetting + parameter to default value. +data: (1) a_current (2) LPFC_DFT_TOPOLOGY +severity: Error config +log: Always +module: fcLINUXfcp.c +action: Make neccessary changes to lpfc configuration file. +*/ +char fc_mes0428[] = "%sTopology is illegal, resetting Data: x%x x%x"; +msgLogDef fc_msgBlk0428 = { + FC_LOG_MSG_IN_0428, + fc_mes0428, + fc_msgPreambleINc, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR_CFG, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0429 +message: Linkdown_tmo too high, resetting +descript: The configuration parameter for Linkdown_tmo is too high, resetting + parameter to default value. +data: (1) a_current (2) LPFC_MAX_LNKDWN_TIMEOUT +severity: Error config +log: Always +module: fcLINUXfcp.c +action: Make neccessary changes to lpfc configuration file. +*/ +char fc_mes0429[] = "%sLinkdown_tmo too high, resetting Data: x%x x%x"; +msgLogDef fc_msgBlk0429 = { + FC_LOG_MSG_IN_0429, + fc_mes0429, + fc_msgPreambleINc, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR_CFG, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0430 +message: WWPN binding entry : Syntax error code +descript: A syntax error occured while parsing WWPN binding + configuraion information. +data: None +detail: Binding syntax error codes + 0 FC_SYNTAX_OK + 1 FC_SYNTAX_OK_BUT_NOT_THIS_BRD + 2 FC_SYNTAX_ERR_ASC_CONVERT + 3 FC_SYNTAX_ERR_EXP_COLON + 4 FC_SYNTAX_ERR_EXP_LPFC + 5 FC_SYNTAX_ERR_INV_LPFC_NUM + 6 FC_SYNTAX_ERR_EXP_T + 7 FC_SYNTAX_ERR_INV_TARGET_NUM + 8 FC_SYNTAX_ERR_EXP_D + 9 FC_SYNTAX_ERR_INV_DEVICE_NUM + 10 FC_SYNTAX_ERR_INV_RRATIO_NUM + 11 FC_SYNTAX_ERR_EXP_NULL_TERM +severity: Error config +log: Always +module: fcLINUXfcp.c +action: Make neccessary changes to lpfc configuration file. +*/ +char fc_mes0430[] = "%sWWPN binding entry %d: Syntax error code %d"; +msgLogDef fc_msgBlk0430 = { + FC_LOG_MSG_IN_0430, + fc_mes0430, + fc_msgPreambleINc, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR_CFG, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0431 +message: WWNN binding entry : Syntax error code +descript: A syntax error occured while parsing WWNN binding + configuraion information. +data: None +detail: Binding syntax error codes + 0 FC_SYNTAX_OK + 1 FC_SYNTAX_OK_BUT_NOT_THIS_BRD + 2 FC_SYNTAX_ERR_ASC_CONVERT + 3 FC_SYNTAX_ERR_EXP_COLON + 4 FC_SYNTAX_ERR_EXP_LPFC + 5 FC_SYNTAX_ERR_INV_LPFC_NUM + 6 FC_SYNTAX_ERR_EXP_T + 7 FC_SYNTAX_ERR_INV_TARGET_NUM + 8 FC_SYNTAX_ERR_EXP_D + 9 FC_SYNTAX_ERR_INV_DEVICE_NUM + 10 FC_SYNTAX_ERR_INV_RRATIO_NUM + 11 FC_SYNTAX_ERR_EXP_NULL_TERM +severity: Error config +log: Always +module: fcLINUXfcp.c +action: Make neccessary changes to lpfc configuration file. +*/ +char fc_mes0431[] = "%sWWNN binding entry %d: Syntax error code %d"; +msgLogDef fc_msgBlk0431 = { + FC_LOG_MSG_IN_0431, + fc_mes0431, + fc_msgPreambleINc, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR_CFG, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0432 +message: WWPN binding entry: node table full +descript: More bindings entries were configured than the driver can handle. +data: None +severity: Error config +log: Always +module: fcLINUXfcp.c +action: Make neccessary changes to lpfc configuration file such that + fewer bindings are configured. +*/ +char fc_mes0432[] = "%sWWPN binding entry: node table full"; +msgLogDef fc_msgBlk0432 = { + FC_LOG_MSG_IN_0432, + fc_mes0432, + fc_msgPreambleINc, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR_CFG, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0433 +message: WWNN binding entry: node table full +descript: More bindings entries were configured than the driver can handle. +data: None +severity: Error config +log: Always +module: fcLINUXfcp.c +action: Make neccessary changes to lpfc configuration file such that + fewer bindings are configured. +*/ +char fc_mes0433[] = "%sWWNN binding entry: node table full"; +msgLogDef fc_msgBlk0433 = { + FC_LOG_MSG_IN_0433, + fc_mes0433, + fc_msgPreambleINc, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR_CFG, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0434 +message: DID binding entry : Syntax error code +descript: A syntax error occured while parsing DID binding + configuraion information. +data: None +detail: Binding syntax error codes + 0 FC_SYNTAX_OK + 1 FC_SYNTAX_OK_BUT_NOT_THIS_BRD + 2 FC_SYNTAX_ERR_ASC_CONVERT + 3 FC_SYNTAX_ERR_EXP_COLON + 4 FC_SYNTAX_ERR_EXP_LPFC + 5 FC_SYNTAX_ERR_INV_LPFC_NUM + 6 FC_SYNTAX_ERR_EXP_T + 7 FC_SYNTAX_ERR_INV_TARGET_NUM + 8 FC_SYNTAX_ERR_EXP_D + 9 FC_SYNTAX_ERR_INV_DEVICE_NUM + 10 FC_SYNTAX_ERR_INV_RRATIO_NUM + 11 FC_SYNTAX_ERR_EXP_NULL_TERM +severity: Error config +log: Always +module: fcLINUXfcp.c +action: Make neccessary changes to lpfc configuration file. +*/ +char fc_mes0434[] = "%sDID binding entry %d: Syntax error code %d"; +msgLogDef fc_msgBlk0434 = { + FC_LOG_MSG_IN_0434, + fc_mes0434, + fc_msgPreambleINc, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR_CFG, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0435 +message: DID binding entry: node table full +descript: More bindings entries were configured than the driver can handle. +data: None +severity: Error config +log: Always +module: fcLINUXfcp.c +action: Make neccessary changes to lpfc configuration file such that + fewer bindings are configured. +*/ +char fc_mes0435[] = "%sDID binding entry: node table full"; +msgLogDef fc_msgBlk0435 = { + FC_LOG_MSG_IN_0435, + fc_mes0435, + fc_msgPreambleINc, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR_CFG, + LOG_INIT, + ERRID_LOG_INIT }; +/* +msgName: fc_mes0436 +message: Adapter failed to init, timeout, status reg +descript: The adapter failed during powerup diagnostics after it was reset. +data: None +severity: Error +log: Always +module: lp6000.c +action: This error could indicate a hardware or firmware problem. If + problems persist report these errors to Technical Support. +*/ +char fc_mes0436[] = "%sAdapter failed to init, timeout, status reg x%x"; +msgLogDef fc_msgBlk0436 = { + FC_LOG_MSG_IN_0436, + fc_mes0436, + fc_msgPreambleINe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0437 +message: Adapter failed to init, chipset, status reg +descript: The adapter failed during powerup diagnostics after it was reset. +data: None +severity: Error +log: Always +module: lp6000.c +action: This error could indicate a hardware or firmware problem. If + problems persist report these errors to Technical Support. +*/ +char fc_mes0437[] = "%sAdapter failed to init, chipset, status reg x%x"; +msgLogDef fc_msgBlk0437 = { + FC_LOG_MSG_IN_0437, + fc_mes0437, + fc_msgPreambleINe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0438 +message: Adapter failed to init, chipset, status reg +descript: The adapter failed during powerup diagnostics after it was reset. +data: None +severity: Error +log: Always +module: lp6000.c +action: This error could indicate a hardware or firmware problem. If + problems persist report these errors to Technical Support. +*/ +char fc_mes0438[] = "%sAdapter failed to init, chipset, status reg x%x"; +msgLogDef fc_msgBlk0438 = { + FC_LOG_MSG_IN_0438, + fc_mes0438, + fc_msgPreambleINe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0439 +message: Adapter failed to init, mbxCmd READ_REV, mbxStatus +descript: Adapter initialization failed when issuing READ_REV mailbox command. +data: None +severity: Error +log: Always +module: lp6000.c +action: This error could indicate a hardware or firmware problem. If + problems persist report these errors to Technical Support. +*/ +char fc_mes0439[] = "%sAdapter failed to init, mbxCmd x%x READ_REV, mbxStatus x%x"; +msgLogDef fc_msgBlk0439 = { + FC_LOG_MSG_IN_0439, + fc_mes0439, + fc_msgPreambleINe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0440 +message: Adapter failed to init, mbxCmd READ_REV detected outdated firmware +descript: Outdated firmware was detected during initialization. +data: (1) read_rev_reset +severity: Error +log: Always +module: lp6000.c +action: This error could indicate a hardware or firmware problem. Update + firmware. If problems persist report these errors to Technical + Support. +*/ +char fc_mes0440[] = "%sAdapter failed to init, mbxCmd x%x READ_REV detected outdated firmware Data: x%x"; +msgLogDef fc_msgBlk0440 = { + FC_LOG_MSG_IN_0440, + fc_mes0440, + fc_msgPreambleINe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0441 +message: Adapter failed to init, mbxCmd DUMP VPD, mbxStatus +descript: Adapter initialization failed when issuing DUMP_VPD mailbox command. +data: None +severity: Error +log: Always +module: lp6000.c +action: This error could indicate a hardware or firmware problem. If + problems persist report these errors to Technical Support. +*/ +char fc_mes0441[] = "%sAdapter failed to init, mbxCmd x%x DUMP VPD, mbxStatus x%x"; +msgLogDef fc_msgBlk0441 = { + FC_LOG_MSG_IN_0441, + fc_mes0441, + fc_msgPreambleINw, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_WARN, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0442 +message: Adapter failed to init, mbxCmd CONFIG_PORT, mbxStatus +descript: Adapter initialization failed when issuing CONFIG_PORT mailbox + command. +data: 0 +severity: Error +log: Always +module: lp6000.c +action: This error could indicate a hardware or firmware problem. If + problems persist report these errors to Technical Support. +*/ +char fc_mes0442[] = "%sAdapter failed to init, mbxCmd x%x CONFIG_PORT, mbxStatus x%x Data: x%x"; +msgLogDef fc_msgBlk0442 = { + FC_LOG_MSG_IN_0442, + fc_mes0442, + fc_msgPreambleINe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0443 +message: SLI1 not supported, mbxCmd , mbxStatus +descript: The driver no longer support SLI-1 mode. +data: 0 +severity: Error +log: Always +module: lp6000.c +action: This error could indicate a driver problem. If problems persist + report these errors to Technical Support. +*/ +char fc_mes0443[] = "%sSLI1 not supported, mbxCmd x%x, mbxStatus x%x Data: x%x"; +msgLogDef fc_msgBlk0443 = { + FC_LOG_MSG_IN_0443, + fc_mes0443, + fc_msgPreambleINe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0444 +message: Adapter failed to init, no buffers for RUN_BIU_DIAG +descript: The driver attempted to issue RUN_BIU_DIAG mailbox command to + the HBA but there were no buffer available. +data: None +severity: Error +log: Always +module: lp6000.c +action: This message indicates (1) a possible lack of memory resources. + Try increasing the lpfc 'num_bufs' configuration parameter to + allocate more buffers. (2) A possble driver buffer management + problem. If this problem persists, report these errors to + Technical Support. +*/ +char fc_mes0444[] = "%sAdapter failed to init, no buffers for RUN_BIU_DIAG"; +msgLogDef fc_msgBlk0444 = { + FC_LOG_MSG_IN_0444, + fc_mes0444, + fc_msgPreambleINe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0445 +message: RUN_BIU_DIAG failed +descript: Adapter failed to init properly because a PCI bus DMA + test failed. +data: None +severity: Error +log: Always +module: lp6000.c +action: This error usually indicates a hardware problem with the + adapter. Run diagnostics. +*/ +char fc_mes0445[] = "%sRUN_BIU_DIAG failed"; +msgLogDef fc_msgBlk0445 = { + FC_LOG_MSG_IN_0445, + fc_mes0445, + fc_msgPreambleINe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0446 +message: Adapter failed to init, mbxCmd CFG_RING, mbxStatus , ring +descript: Adapter initialization failed when issuing CFG_RING mailbox command. +data: None +severity: Error +log: Always +module: lp6000.c +action: This error could indicate a hardware or firmware problem. If + problems persist report these errors to Technical Support. +*/ +char fc_mes0446[] = "%sAdapter failed to init, mbxCmd x%x CFG_RING, mbxStatus x%x, ring %d"; +msgLogDef fc_msgBlk0446 = { + FC_LOG_MSG_IN_0446, + fc_mes0446, + fc_msgPreambleINe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0447 +message: Adapter failed init, mbxCmd rubBIUdiag mbxStatus +descript: Adapter initialization failed when issuing runBIUdiag mailbox + command. +data: None +severity: Error +log: Always +module: lp6000.c +action: This error could indicate a hardware or firmware problem. If + problems persist report these errors to Technical Support. +*/ +char fc_mes0447[] = "%sAdapter failed init, mbxCmd x%x CONFIG_LINK mbxStatus x%x"; +msgLogDef fc_msgBlk0447 = { + FC_LOG_MSG_IN_0447, + fc_mes0447, + fc_msgPreambleINe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0448 +message: Adapter failed to init, mbxCmd READ_SPARM, mbxStatus +descript: Adapter initialization failed when issuing READ_SPARM mailbox + command. +data: None +severity: Error +log: Always +module: lp6000.c +action: This error could indicate a hardware or firmware problem. If + problems persist report these errors to Technical Support. +*/ +char fc_mes0448[] = "%sAdapter failed init, mbxCmd x%x READ_SPARM mbxStatus x%x"; +msgLogDef fc_msgBlk0448 = { + FC_LOG_MSG_IN_0448, + fc_mes0448, + fc_msgPreambleINe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0449 +message: WorldWide PortName Type doesn't conform to IP Profile +descript: In order to run IP, the WorldWide PortName must be of type + IEEE (NAA = 1). This message displays if the adapter WWPN + doesn't conform with the standard. +data: None +severity: Error +log: Always +module: lp6000.c +action: Turn off the network-on configuration parameter or configure + a different WWPN. +*/ +char fc_mes0449[] = "%sWorldWide PortName Type x%x doesn't conform to IP Profile"; +msgLogDef fc_msgBlk0449 = { + FC_LOG_MSG_IN_0449, + fc_mes0449, + fc_msgPreambleINe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0450 +message: Adapter failed to init, mbxCmd FARP, mbxStatus +descript: Adapter initialization failed when issuing FARP mailbox command. +data: None +severity: Warning +log: LOG_INIT verbose +module: lp6000.c +action: None required +*/ +char fc_mes0450[] = "%sAdapter failed to init, mbxCmd x%x FARP, mbxStatus x%x"; +msgLogDef fc_msgBlk0450 = { + FC_LOG_MSG_IN_0450, + fc_mes0450, + fc_msgPreambleINw, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_WARN, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0451 +message: Enable interrupt handler failed +descript: The driver attempted to register the HBA interrupt service + routine with the host operating system but failed. +data: None +severity: Error +log: Always +module: lp6000.c +action: This error could indicate a hardware or driver problem. If + problems persist report these errors to Technical Support. +*/ +char fc_mes0451[] = "%sEnable interrupt handler failed"; +msgLogDef fc_msgBlk0451 = { + FC_LOG_MSG_IN_0451, + fc_mes0451, + fc_msgPreambleINe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0452 +message: Bring Adapter offline +descript: The FC driver has received a request to bring the adapter + offline. This may occur when running lputil. +data: None +severity: Warning +log: LOG_INIT verbose +module: fcscsib.c +action: None required +*/ +char fc_mes0452[] = "%sBring Adapter offline"; +msgLogDef fc_msgBlk0452 = { + FC_LOG_MSG_IN_0452, + fc_mes0452, + fc_msgPreambleINw, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_WARN, + LOG_INIT, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0453 +message: Adapter failed to init, mbxCmd READ_CONFIG, mbxStatus +descript: Adapter initialization failed when issuing READ_CONFIG mailbox + command. +data: None +severity: Error +log: Always +module: lp6000.c +action: This error could indicate a hardware or firmware problem. If + problems persist report these errors to Technical Support. +*/ +char fc_mes0453[] = "%sAdapter failed to init, mbxCmd x%x READ_CONFIG, mbxStatus x%x"; +msgLogDef fc_msgBlk0453 = { + FC_LOG_MSG_IN_0453, + fc_mes0453, + fc_msgPreambleINe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0454 +message: Adapter failed to init, mbxCmd INIT_LINK, mbxStatus +descript: Adapter initialization failed when issuing INIT_LINK mailbox command. +data: None +severity: Error +log: Always +module: lp6000.c +action: This error could indicate a hardware or firmware problem. If + problems persist report these errors to Technical Support. +*/ +char fc_mes0454[] = "%sAdapter failed to init, mbxCmd x%x INIT_LINK, mbxStatus x%x"; +msgLogDef fc_msgBlk0454 = { + FC_LOG_MSG_IN_0454, + fc_mes0454, + fc_msgPreambleINe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0455 +message: Vital Product +descript: Vital Product Data (VPD) contained in HBA flash. +data: (1) vpd[0] (2) vpd[1] (3) vpd[2] (4) vpd[3] +severity: Information +log: LOG_INIT verbose +module: lp6000.c +action: None required +*/ +char fc_mes0455[] = "%sVital Product Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0455 = { + FC_LOG_MSG_IN_0455, + fc_mes0455, + fc_msgPreambleINi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0457 +message: Adapter Hardware Error +descript: The driver received an interrupt indicting a possible hardware + problem. +data: (1) status (2) status1 (3) status2 +severity: Error +log: Always +module: lp6000.c +action: This error could indicate a hardware or firmware problem. If + problems persist report these errors to Technical Support. +*/ +char fc_mes0457[] = "%sAdapter Hardware Error Data: x%x x%x x%x"; +msgLogDef fc_msgBlk0457 = { + FC_LOG_MSG_IN_0457, + fc_mes0457, + fc_msgPreambleINe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_INIT, + ERRID_LOG_INIT }; + +/* +msgName: fc_mes0458 +message: Bring Adapter online +descript: The FC driver has received a request to bring the adapter + online. This may occur when running lputil. +data: None +severity: Warning +log: LOG_INIT verbose +module: fcscsib.c +action: None required +*/ +char fc_mes0458[] = "%sBring Adapter online"; +msgLogDef fc_msgBlk0458 = { + FC_LOG_MSG_IN_0458, + fc_mes0458, + fc_msgPreambleINw, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_WARN, + LOG_INIT, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0459 +message: Bring Adapter online +descript: The FC driver has received a request to bring the adapter + online. This may occur when running lputil. +data: None +severity: Warning +log: LOG_INIT verbose +module: fcscsib.c +action: None required +*/ +char fc_mes0459[] = "%sBring Adapter online"; +msgLogDef fc_msgBlk0459 = { + FC_LOG_MSG_IN_0459, + fc_mes0459, + fc_msgPreambleINw, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_WARN, + LOG_INIT, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0460 +message: Bring Adapter offline +descript: The FC driver has received a request to bring the adapter + offline. This may occur when running lputil. +data: None +severity: Warning +log: LOG_INIT verbose +module: fcscsib.c +action: None required +*/ +char fc_mes0460[] = "%sBring Adapter offline"; +msgLogDef fc_msgBlk0460 = { + FC_LOG_MSG_IN_0460, + fc_mes0460, + fc_msgPreambleINw, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_WARN, + LOG_INIT, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0461 +message: Adapter failed init, mbxCmd CONFIG_LINK mbxStatus +descript: Adapter initialization failed when issuing CONFIG_LINK mailbox + command. +data: None +severity: Error +log: Always +module: lp6000.c +action: This error could indicate a hardware or firmware problem. If + problems persist report these errors to Technical Support. +*/ +char fc_mes0461[] = "%sAdapter failed init, mbxCmd x%x CONFIG_LINK mbxStatus x%x"; +msgLogDef fc_msgBlk0461 = { + FC_LOG_MSG_IN_0461, + fc_mes0461, + fc_msgPreambleINe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_INIT, + ERRID_LOG_INIT }; + +/* + * UNUSED 0500 + */ + +/* + * Begin IP LOG Message Structures + */ + +/* +msgName: fc_mes0600 +message: FARP-RSP received from DID . +descript: A FARP ELS command response was received. +data: None +severity: Information +log: LOG_IP verbose +module: fcelsb.c +action: None required +*/ +char fc_mes0600[] = "%sFARP-RSP received from DID x%x"; +msgLogDef fc_msgBlk0600 = { + FC_LOG_MSG_IP_0600, + fc_mes0600, + fc_msgPreambleIPi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_IP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0601 +message: FARP-REQ received fron DID +descript: A FARP ELS command request was received. . +data: None +severity: Information +log: LOG_IP verbose +module: fcelsb.c +action: None required +*/ +char fc_mes0601[] = "%sFARP-REQ received from DID x%x"; +msgLogDef fc_msgBlk0601 = { + FC_LOG_MSG_IP_0601, + fc_mes0601, + fc_msgPreambleIPi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_IP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0602 +message: IP Response Ring out of posted buffers +descript: The IP ring returned all posted buffers to the driver + and is waiting for the driver to post new buffers. This + could mean the host system is out of TCP/IP buffers. +data: (1) fc_missbufcnt (2) NoRcvBuf +severity: Warning +log: LOG_IP verbose +module: fcscsib.c +action: Try allocating more IP buffers (STREAMS buffers or mbufs) + of size 4096 and/or increasing the post-ip-buf lpfc + configuration parameter. Reboot the system. +*/ +char fc_mes0602[] = "%sIP Response Ring %d out of posted buffers Data: x%x x%x"; +msgLogDef fc_msgBlk0602 = { + FC_LOG_MSG_IP_0602, + fc_mes0602, + fc_msgPreambleIPw, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_WARN, + LOG_IP, + ERRID_LOG_NO_RESOURCE }; + +/* +msgName: fc_mes0603 +message: Rcv Ring out of posted buffers +descript: The ring returned all posted buffers to the driver + and is waiting for the driver to post new buffers. This + could mean the host system is out of ELS or CT buffers. +data: (1) fc_missbufcnt (2) NoRcvBuf +severity: Error +log: Always +module: fcscsib.c +action: Try allocating more buffers by increasing the num-buf lpfc + configuration parameter. Reboot the system. +*/ +char fc_mes0603[] = "%sRcv Ring %d out of posted buffers Data: x%x x%x"; +msgLogDef fc_msgBlk0603 = { + FC_LOG_MSG_IP_0603, + fc_mes0603, + fc_msgPreambleIPe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_IP, + ERRID_LOG_NO_RESOURCE }; + +/* +msgName: fc_mes0604 +message: Post buffer for IP ring failed +descript: The driver cannot allocate a buffer to post to the IP ring. + This usually means the host system is out of TCP/IP buffers. +data: (1) missbufcnt +severity: Error +log: Always +module: fcscsib.c +action: Try allocating more IP buffers (STREAMS buffers or mbufs) + of size 4096. Reboot the system. +*/ +char fc_mes0604[] = "%sPost buffer for IP ring %d failed Data: x%x"; +msgLogDef fc_msgBlk0604 = { + FC_LOG_MSG_IP_0604, + fc_mes0604, + fc_msgPreambleIPe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_IP, + ERRID_LOG_NO_RESOURCE }; + +/* +msgName: fc_mes0605 +message: No room on IP xmit queue +descript: The system is generating IOCB commands to be processed + faster than the adapter can process them. +data: (1) xmitnoroom +severity: Warning +log: LOG_IP verbose +module: fcxmitb.c +action: Check the state of the link. If the link is up and running, + reconfigure the xmit queue size to be larger. Note, a larger + queue size may require more system IP buffers. If the link + is down, check physical connections to Fibre Channel network. +*/ +char fc_mes0605[] = "%sNo room on IP xmit queue Data: x%x"; +msgLogDef fc_msgBlk0605 = { + FC_LOG_MSG_IP_0605, + fc_mes0605, + fc_msgPreambleIPw, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_WARN, + LOG_IP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0606 +message: Stray XmitSequence completion +descript: Received an XMIT_SEQUENCE IOCB completion without issuing + a corresponding XMIT_SEQUENCE Command (based on the IOTAG + field in the XMIT_SEQUENCE_CR iocb). +data: (1) ulpCommand (2) ulpIoTag +severity: Error +log: Always +module: fcxmitb.c +action: This error could indicate a software driver or firmware + problem. If problems persist report these errors to + Technical Support. +*/ +char fc_mes0606[] = "%sStray XmitSequence completion Data: x%x x%x"; +msgLogDef fc_msgBlk0606 = { + FC_LOG_MSG_IP_0606, + fc_mes0606, + fc_msgPreambleIPe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_IP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0607 +message: Xmit Sequence completion error +descript: A XMIT_SEQUENCE command completed with a status error + in the IOCB. +data: (1) ulpStatus (2) ulpToTag (3) ulpWord[4] (4) did +severity: Warning +log: LOG_IP verbose +module: fcxmitb.c +action: If there are many errors to one device, check physical + connections to Fibre Channel network and the state of + the remote PortID. The driver attempts to recover by + creating a new exchange to the remote device. +*/ +char fc_mes0607[] = "%sXmit Sequence completion error Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0607 = { + FC_LOG_MSG_IP_0607, + fc_mes0607, + fc_msgPreambleIPw, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_WARN, + LOG_IP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0608 +message: Stray CreateXRI completion +descript: Received a CREATE_XRI command completion without + issuing a corresponding CREATE_XRI Command (based + on the IOTAG field in the CREATE_XRI_CR iocb). +data: (1) ulpCommad (2) ulpToTag +severity: Error +log: Always +module: fcxmitb.c +action: This error could indicate a software driver or + firmware problem. If problems persist report these + errors to Technical Support. +*/ +char fc_mes0608[] = "%sStray CreateXRI completion Data: x%x x%x"; +msgLogDef fc_msgBlk0608 = { + FC_LOG_MSG_IP_0608, + fc_mes0608, + fc_msgPreambleIPe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_IP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* + * Begin FCP LOG Message Structures + */ + +/* +msgName: fc_mes0700 +message: Start nodev timer +descript: A target disappeared from the Fibre Channel network. If the + target does not return within nodev-tmo timeout all I/O to + the target will fail. +data: (1) nlp (2) nlp_flag (3) nlp_state (4) nlp_DID +severity: Information +log: LOG_FCP verbose +module: fcrpib.c +action: None required +*/ +char fc_mes0700[] = "%sSTART nodev timer Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0700 = { + FC_LOG_MSG_FP_0700, + fc_mes0700, + fc_msgPreambleFPi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0701 +message: Issue Abort Task Set I/O for LUN +descript: The SCSI layer detected that it needs to abort all I/O + to a specific device. This results in an FCP Task + Management command to abort the I/O in progress. +data: (1) did (2) sid (3) flags +severity: Information +log: LOG_FCP verbose +module: fcstratb.c +action: Check state of device in question. +*/ +char fc_mes0701[] = "%sIssue Abort Task Set I/O for LUN %d Data: x%x x%x x%x"; +msgLogDef fc_msgBlk0701 = { + FC_LOG_MSG_FP_0701, + fc_mes0701, + fc_msgPreambleFPi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0702 +message: Issue Target Reset I/O +descript: The SCSI layer detected that it needs to abort all I/O + to a specific target. This results in an FCP Task + Management command to abort the I/O in progress. +data: (1) lun (2) did (3) sid (4) flags +severity: Information +log: LOG_FCP verbose +module: fcstratb.c +action: Check state of target in question. +*/ +char fc_mes0702[] = "%sIssue Target Reset I/O Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0702 = { + FC_LOG_MSG_FP_0702, + fc_mes0702, + fc_msgPreambleFPi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0703 +message: Issue LUN Reset I/O for LUN +descript: The SCSI layer detected that it needs to abort all I/O + to a specific device. This results in an FCP Task + Management command to abort the I/O in progress. +data: (1) did (2) sid (3) flags +severity: Information +log: LOG_FCP verbose +module: fcstratb.c +action: Check state of device in question. +*/ +char fc_mes0703[] = "%sIssue LUN Reset I/O for LUN %d Data: x%x x%x x%x"; +msgLogDef fc_msgBlk0703 = { + FC_LOG_MSG_FP_0703, + fc_mes0703, + fc_msgPreambleFPi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0704 +message: STOP nodev timer +descript: The FCP target was rediscovered and I/O can be resumed. +data: (1) ndlp (2) nlp_flag (3) nlp_state (4) nlp_DID +severity: Information +log: LOG_FCP verbose +module: fcstratb.c +action: None required +*/ +char fc_mes0704[] = "%sSTOP nodev timer Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0704 = { + FC_LOG_MSG_FP_0704, + fc_mes0704, + fc_msgPreambleFPi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0705 +message: STOP nodev timer +descript: The FCP target was rediscovered and I/O can be resumed. +data: (1) ndlp (2) nlp_flag (3) nlp_state (4) nlp_DID +severity: Information +log: LOG_FCP verbose +module: fcstratb.c +action: None required +*/ +char fc_mes0705[] = "%sSTOP nodev timer Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0705 = { + FC_LOG_MSG_FP_0705, + fc_mes0705, + fc_msgPreambleFPi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0706 +message: Cannot issue FCP command +descript: A valid ELS login with the FCP target no longer exists. +data: (1) did (2) sid +severity: Warning +log: LOG_FCP verbose +module: fcstratb.c +action: Check the state of the target in question. +*/ +char fc_mes0706[] = "%sCannot issue FCP command Data: x%x x%x"; +msgLogDef fc_msgBlk0706 = { + FC_LOG_MSG_FP_0706, + fc_mes0706, + fc_msgPreambleFPw, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_WARN, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0707 +message: Bad SCSI CDB length for LUN DID +descript: This error indicates a SCSI command sent to the + FC driver from the SCSI layer has an invalid length. +data: (1) cmd_cdblen (2) fcpCdb +severity: Error +log: Always +module: fcstratb.c +action: This error could indicate a host operating system SCSI + layer problem. If problems persist report these errors + to Technical Support. +*/ +char fc_mes0707[] = "%sBad SCSI CDB length for LUN %d DID x%x Data: x%x x%x"; +msgLogDef fc_msgBlk0707 = { + FC_LOG_MSG_FP_0707, + fc_mes0707, + fc_msgPreambleFPe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0708 +message: NULL sp in flush_done +descript: This error indicates a potential FC driver problem + related to a FCP command iodone +data: (1) cmnd[0] (2) serial_number (3) retries (4) result +severity: Error +log: Always +module: fcLINUXfcp.c +action: This error could indicate a driver problem. If problems + persist report these errors to Technical Support. +*/ +char fc_mes0708[] = "%sNULL sp in flush_done Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0708 = { + FC_LOG_MSG_FP_0708, + fc_mes0708, + fc_msgPreambleFPe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0709 +message: NULL sp in DPC flush_done +descript: This error indicates a potential FC driver problem + related to a FCP command iodone +data: (1) cmnd[0] (2) serial_number (3) retries (4) result +severity: Error +log: Always +module: fcLINUXfcp.c +action: This error could indicate a driver problem. If problems + persist report these errors to Technical Support. +*/ +char fc_mes0709[] = "%sNULL sp in DPC flush_done Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0709 = { + FC_LOG_MSG_FP_0709, + fc_mes0709, + fc_msgPreambleFPe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0710 +message: iodone error return +descript: This error indicates the FC driver is returning SCSI + command to the SCSI layer in error or with sense data. +data: (1) target (2) retries (3) result (4) *iptr +severity: Information +log: LOG_FCP verbose +module: fcLINUXfcp.c +action: None required +*/ +char fc_mes0710[] = "%siodone error return Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0710 = { + FC_LOG_MSG_FP_0710, + fc_mes0710, + fc_msgPreambleFPi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0711 +message: iodone error return +descript: This error indicates the FC driver is returning SCSI + command to the SCSI layer in error or with sense data. +data: (1) target (2) retries (3) result (4) *iptr +severity: Information +log: LOG_FCP verbose +module: fcLINUXfcp.c +action: None required +*/ +char fc_mes0711[] = "%siodone error return Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0711 = { + FC_LOG_MSG_FP_0711, + fc_mes0711, + fc_msgPreambleFPi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0712 +message: SCSI layer issued abort device +descript: The SCSI layer is requesting the driver to abort + I/O to a specific device. +data: (1) target (2) lun (3) cmnd[0] (4) serial_number +severity: Error +log: Always +module: fcLINUXfcp.c +action: Check state of device in question. +*/ +char fc_mes0712[] = "%sSCSI layer issued abort device Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0712 = { + FC_LOG_MSG_FP_0712, + fc_mes0712, + fc_msgPreambleFPe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0713 +message: SCSI layer issued target reset +descript: The SCSI layer is requesting the driver to abort + I/O to a specific target. +data: (1) target (2) lun (3) dev_index +severity: Error +log: Always +module: fcLINUXfcp.c +action: Check state of target in question. +*/ +char fc_mes0713[] = "%sSCSI layer issued target reset Data: x%x x%x x%x"; +msgLogDef fc_msgBlk0713 = { + FC_LOG_MSG_FP_0713, + fc_mes0713, + fc_msgPreambleFPe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0714 +message: SCSI layer issued Bus Reset +descript: The SCSI layer is requesting the driver to abort + all I/Os to all targets on this HBA. +data: (1) target (2) lun +severity: Error +log: Always +module: fcLINUXfcp.c +action: Check state of targets in question. +*/ +char fc_mes0714[] = "%sSCSI layer issued Bus Reset Data: x%x x%x"; +msgLogDef fc_msgBlk0714 = { + FC_LOG_MSG_FP_0714, + fc_mes0714, + fc_msgPreambleFPe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0715 +message: SCSI layer issued Host Reset +descript: The SCSI layer is requesting the driver to reset the link + on this HBA. +data: (1) target (2) lun +severity: Error +log: Always +module: fcLINUXfcp.c +action: Check state of HBA link. +*/ +char fc_mes0715[] = "%sSCSI layer issued Host Reset Data: x%x x%x"; +msgLogDef fc_msgBlk0715 = { + FC_LOG_MSG_FP_0715, + fc_mes0715, + fc_msgPreambleFPe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0716 +message: FCP residual underrun, expected , residual +descript: FCP device provided less data than was requested. +data: (1) cmnd[0] (2) underflow +severity: Information +log: LOG_FCP verbose +module: fcLINUXfcp.c +action: None required +*/ +char fc_mes0716[] = "%sFCP residual underrun, expected %d, residual %d Data: x%x x%x"; +msgLogDef fc_msgBlk0716 = { + FC_LOG_MSG_FP_0716, + fc_mes0716, + fc_msgPreambleFPi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0717 +message: FCP command residual underrun converted to error +descript: The driver converts this underrun condition to an error based + on the underflow field in the SCSI cmnd. +data: (1) underflow (2) len (3) resid +severity: Information +log: LOG_FCP verbose +module: fcLINUXfcp.c +action: None required +*/ +char fc_mes0717[] = "%sFCP cmd x%x resid urun convrt'd to err Data: x%x x%x x%x"; +msgLogDef fc_msgBlk0717 = { + FC_LOG_MSG_FP_0717, + fc_mes0717, + fc_msgPreambleFPi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0718 +message: LUN address out of range +descript: Invalid LUN number in the SCSI command passed to the driver. +data: (1) target (2) lun +severity: Error +log: Always +module: fcLINUXfcp.c +action: This error could indicate a host operating system SCSI + layer problem. If problems persist report these errors + to Technical Support. +*/ +char fc_mes0718[] = "%sLUN address out of range Data: x%x x%x"; +msgLogDef fc_msgBlk0718 = { + FC_LOG_MSG_FP_0718, + fc_mes0718, + fc_msgPreambleFPe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0719 +message: Waiting for REPORT LUN cmpl before issuing INQUIRY SN +descript: Waiting for REPORT LUN completion before issuing INQUIRY SN +data: (1) scsi_id (2) lun_id (3) flags +severity: Information +log: LOG_FCP verbose +module: fcscsib.c +action: None required +*/ +char fc_mes0719[] = "%sWaiting for REPORT LUN cmpl before issuing INQUIRY SN Data: x%x x%x x%x"; +msgLogDef fc_msgBlk0719 = { + FC_LOG_MSG_FP_0719, + fc_mes0719, + fc_msgPreambleFPi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0720 +message: Stray FCP completion +descript: Received an FCP command completion without issuing a + corresponding FCP Command (based on the IOTAG field + in the FCP IOCB). +data: (1) ulpCommand (2) ulpIoTag (3) ulpStatus (4) ulpWord[4] +severity: Error +log: Always +module: fcscsib.c +action: This error could indicate a software driver or firmware + problem. If problems persist report these errors to + Technical Support. +*/ +char fc_mes0720[] = "%sStray FCP completion Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0720 = { + FC_LOG_MSG_FP_0720, + fc_mes0720, + fc_msgPreambleFPe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0721 +message: INQUIRY SN cmpl +descript: An INQUIRY Serial Number (page x83) completed. This information + is saved by the driver. +data: (1) scsi_id (2) lun_id (3) statLocalError (4) cmd + WD7 +severity: Information +log: LOG_FCP verbose +module: fcscsib.c +action: None required +*/ +char fc_mes0721[] = "%sINQUIRY SN cmpl Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0721 = { + FC_LOG_MSG_FP_0721, + fc_mes0721, + fc_msgPreambleFPi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0722 +message: INQUIRY SN info +descript: This is the serial number of the device that will be saved. +data: (1) *datap (2) *datap + 3 (3) datap + 7 (4) rspResId +severity: Information +log: LOG_FCP verbose +module: fcscsib.c +action: None required +*/ +char fc_mes0722[] = "%sINQUIRY SN info Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0722 = { + FC_LOG_MSG_FP_0722, + fc_mes0722, + fc_msgPreambleFPi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0723 +message: Issue INQUIRY SN +descript: Issuing an INQUIRY Serial Number (page x83) FCP command. +data: (1) scsi_id (2) lun_id +severity: Information +log: LOG_FCP verbose +module: fcscsib.c +action: None required +*/ +char fc_mes0723[] = "%sIssue INQUIRY SN Data: x%x x%x"; +msgLogDef fc_msgBlk0723 = { + FC_LOG_MSG_FP_0723, + fc_mes0723, + fc_msgPreambleFPi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0724 +message: Issue INQUIRY Page 0 +descript: Issuing an INQUIRY (page x0) FCP command. +data: (1) scsi_id (2) lun_id +severity: Information +log: LOG_FCP verbose +module: fcscsib.c +action: None required +*/ +char fc_mes0724[] = "%sIssue INQUIRY Page 0 Data: x%x x%x"; +msgLogDef fc_msgBlk0724 = { + FC_LOG_MSG_FP_0724, + fc_mes0724, + fc_msgPreambleFPi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0725 +message: Inquiry Serial Number: invalid length +descript: An INQUIRY SN command completed with an invalid serial number length. +data: (1) sizeSN (2) j (3) scsi_id (4) lun_id +severity: Error +log: Always +module: fcscsib.c +action: Check remote NPORT for potential problem. +*/ +char fc_mes0725[] = "%sINQ Serial Number: invalid length Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0725= { + FC_LOG_MSG_FP_0725, + fc_mes0725, + fc_msgPreambleFPe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_FCP, + ERRID_LOG_HDW_ERR }; + +/* +msgName: fc_mes0726 +message: INQUIRY SN cmd failed +descript: The INQUIRY Serial Number (page x83) failed. +data: (1) ulpStatus (2) fcpi_parm (3) m_target (4) m_lun +severity: Error +log: Always +module: fcscsib.c +action: Check if target device supports this command +*/ +char fc_mes0726[] = "%sINQUIRY SN cmd failed Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0726= { + FC_LOG_MSG_FP_0726, + fc_mes0726, + fc_msgPreambleFPe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_FCP, + ERRID_LOG_HDW_ERR }; + +/* +msgName: fc_mes0727 +message: INQUIRY Page 0 cmpl +descript: An INQUIRY (page 0) completed. This information is saved by + the driver. +data: (1) scsi_id (2) lun_id (3) statLocalError (4) cmd + WD7 +severity: Information +log: LOG_FCP verbose +module: fcscsib.c +action: None required +*/ +char fc_mes0727[] = "%sINQUIRY Page 0 cmpl Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0727 = { + FC_LOG_MSG_FP_0727, + fc_mes0727, + fc_msgPreambleFPi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0728 +message: INQUIRY Page 0 cmd failed +descript: The INQUIRY (page 0) failed. +data: (1) ulpStatus (2) fcpi_parm (3) scsi_id (4) lun_id +severity: Error +log: Always +module: fcscsib.c +action: Check if target device supports this command +*/ +char fc_mes0728[] = "%sINQUIRY Page 0 cmd failed Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0728= { + FC_LOG_MSG_FP_0728, + fc_mes0728, + fc_msgPreambleFPe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_FCP, + ERRID_LOG_HDW_ERR }; + +/* +msgName: fc_mes0729 +message: FCP cmd failed on device (, ) DID +descript: The specifed device failed an FCP command. +data: (1) rspInfo3 (2) statLocalError (3) *cmd + WD6 (4) *cmd + WD7 +severity: Warning +log: LOG_FCP verbose +module: fcscsib.c +action: Check the state of the target in question. +*/ +char fc_mes0729[] = "%sFCP cmd x%x failed on device (%d, %d), DID x%x Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0729= { + FC_LOG_MSG_FP_0729, + fc_mes0729, + fc_msgPreambleFPw, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_WARN, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0730 +message: FCP command failed: RSP +descript: The FCP command failed with a response error. +data: (1) lp[2] (2) lp[3] (3) lp[4] (4) lp[5] +severity: Warning +log: LOG_FCP verbose +module: fcscsib.c +action: Check the state of the target in question. +*/ +char fc_mes0730[] = "%sFCP command failed: RSP Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0730= { + FC_LOG_MSG_FP_0730, + fc_mes0730, + fc_msgPreambleFPw, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_WARN, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0731 +message: FCP command failed: SNS +descript: The FCP command failed with sense information. +data: (1) lp[0] (2) lp[1] (3) lp[2] (4) lp[3] + (5) lp[4] (6) lp[5] (7) lp6[6] (8) lp[7] +severity: Warning +log: LOG_FCP verbose +module: fcscsib.c +action: Check the state of the target in question. +*/ +char fc_mes0731[] = "%sFCP command failed: SNS Data: x%x x%x x%x x%x x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0731= { + FC_LOG_MSG_FP_0731, + fc_mes0731, + fc_msgPreambleFPw, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_WARN, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0732 +message: Retry FCP command due to 29,00 check condition +descript: The issued FCP command got a 29,00 check condition and will + be retried by the driver. +data: (1) *lp (2) *lp+1 (3) *lp+2 (4) *lp+3 +severity: Information +log: LOG_FCP verbose +module: fcscsib.c +action: None required +*/ +char fc_mes0732[] = "%sRetry FCP command due to 29,00 check condition Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0732 = { + FC_LOG_MSG_FP_0732, + fc_mes0732, + fc_msgPreambleFPi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0733 +message: FCP Read Underrun +descript: The issued FCP command returned a Read Underrun +data: (1) *cmd + WD7 (2) ulpContext (3) rspResId (4) fcpi_parm +severity: Information +log: LOG_FCP verbose +module: fcscsib.c +action: None required +*/ +char fc_mes0733[] = "%sFCP Read Underrun Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0733 = { + FC_LOG_MSG_FP_0733, + fc_mes0733, + fc_msgPreambleFPi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0734 +message: FCP Read Check Error +descript: The issued FCP command returned a Read Check Error +data: (1) *cmd + WD7 (2) ulpContext (3) rspResId (4) fcpi_parm +severity: Error +log: Always +module: fcscsib.c +action: Check the state of the target in question. +*/ +char fc_mes0734[] = "%sFCP Read Check Error Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0734= { + FC_LOG_MSG_FP_0734, + fc_mes0734, + fc_msgPreambleFPe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_FCP, + ERRID_LOG_HDW_ERR }; + +/* +msgName: fc_mes0735 +message: FCP Read Check Error with Check Condition +descript: The issued FCP command returned a Read Check Error and a + Check condition. +data: (1) *cmd + WD7 (2) ulpContext (3) rspResId (4) fcpi_parm +severity: Error +log: Always +module: fcscsib.c +action: Check the state of the target in question. +*/ +char fc_mes0735[] = "%sFCP Read Check Error with Check Condition Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0735= { + FC_LOG_MSG_FP_0735, + fc_mes0735, + fc_msgPreambleFPe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_FCP | LOG_CHK_COND, + ERRID_LOG_HDW_ERR }; + +/* +msgName: fc_mes0736 +message: FCP QUEUE Full +descript: Received a Queue Full status from the FCP device. +data: (1) fcp_cur_queue_depth (2) active_io_count (3) flags (4) a_current +severity: Information +log: LOG_FCP verbose +module: fcscsib.c +action: None required +*/ +char fc_mes0736[] = "%sFCP QUEUE Full Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0736 = { + FC_LOG_MSG_FP_0736, + fc_mes0736, + fc_msgPreambleFPi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0737 +message: FCP error: Check condition +descript: The issued FCP command resulted in a Check Condition. +data: (1) *cmd + WD7 (2) ulpIoTag (3) ulpContext (4) statLocalError +severity: Information +log: LOG_FCP | LOG_CHK_COND verbose +module: fcscsib.c +action: None required +*/ +char fc_mes0737[] = "%sFCP error: Check condition Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0737 = { + FC_LOG_MSG_FP_0737, + fc_mes0737, + fc_msgPreambleFPi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_FCP | LOG_CHK_COND, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0738 +message: 29,00 Check condition received +descript: The received check condition indicates the device was powered + on or reset. +data: (1) lp[0] (2) lp[1] (3) lp[2] (4) lp[3] +severity: Information +log: LOG_FCP | LOG_CHK_COND verbose +module: fcscsib.c +action: None required +*/ +char fc_mes0738[] = "%s29,00 Check condition received Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0738 = { + FC_LOG_MSG_FP_0738, + fc_mes0738, + fc_msgPreambleFPi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_FCP | LOG_CHK_COND, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0739 +message: Check condition received ERR1 +descript: The command SCSI3_PERSISTENT_RESERVE_IN resulted in a Invalid + Command operation code check condition. +data: (1) lp[0] (2) lp[1] (3) lp[2] (4) lp[3] +severity: Information +log: LOG_FCP | LOG_CHK_COND verbose +module: fcscsib.c +action: None required +*/ +char fc_mes0739[] = "%sCheck condition received ERR1 Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0739 = { + FC_LOG_MSG_FP_0739, + fc_mes0739, + fc_msgPreambleFPi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_FCP | LOG_CHK_COND, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0740 +message: Check condition received ERR2 +descript: The check condition meets the criteria for the configuration + parameters lpfc_check_cond_err and lpfc_delay_rsp_err. +data: (1) lp[0] (2) lp[1] (3) lp[2] (4) lp[3] +severity: Information +log: LOG_FCP | LOG_CHK_COND verbose +module: fcscsib.c +action: None required +*/ +char fc_mes0740[] = "%sCheck condition received ERR2 Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0740 = { + FC_LOG_MSG_FP_0740, + fc_mes0740, + fc_msgPreambleFPi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_FCP | LOG_CHK_COND, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0741 +message: Check condition received +descript: The issued FCP command resulted in a Check Condition. +data: (1) lp[0] (2) lp[1] (3) lp[2] (4) lp[3] +severity: Information +log: LOG_FCP | LOG_CHK_COND verbose +module: fcscsib.c +action: None required +*/ +char fc_mes0741[] = "%sCheck condition received Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0741 = { + FC_LOG_MSG_FP_0741, + fc_mes0741, + fc_msgPreambleFPi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_FCP | LOG_CHK_COND, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0742 +message: FCP completion error +descript: An FCP command completed with a status error in the IOCB. +data: (1) ulpStatus (2) ulpWord[4] (3) did. +severity: Information +log: LOG_FCP verbose +module: fcscsib.c +action: If there are many errors to one device, check physical + connections to Fibre Channel network and the state of the + remote PortID. +*/ +char fc_mes0742[] = "%sFCP completion error Data: x%x x%x x%x"; +msgLogDef fc_msgBlk0742 = { + FC_LOG_MSG_FP_0742, + fc_mes0742, + fc_msgPreambleFPi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0743 +message: FCP completion error +descript: An FCP command completed with a status error in the IOCB. +data: (1) ulpStatus (2) ulpWord[4] (3) did. +severity: Information +log: LOG_FCP verbose +module: fcscsib.c +action: If there are many errors to one device, check physical + connections to Fibre Channel network and the state of the + remote PortID. +*/ +char fc_mes0743[] = "%sFCP completion error Data: x%x x%x x%x"; +msgLogDef fc_msgBlk0743 = { + FC_LOG_MSG_FP_0743, + fc_mes0743, + fc_msgPreambleFPi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_FCP, + ERRID_LOG_HDW_ERR }; + +/* +msgName: fc_mes0744 +message: FCP completion error +descript: An FCP command completed with a status error in the IOCB. +data: (1) did (2) *lp (3) *(lp+2) (4) *(lp+3) +severity: Information +log: LOG_FCP verbose +module: fcscsib.c +action: If there are many errors to one device, check physical + connections to Fibre Channel network and the state of the + remote PortID. +*/ +char fc_mes0744[] = "%sFCP completion error Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0744 = { + FC_LOG_MSG_FP_0744, + fc_mes0744, + fc_msgPreambleFPi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0745 +message: FCP completion error +descript: An FCP command completed with a status error in the IOCB. +data: (1) ulpStatus (2) ulpWord[4] (3) did. +severity: Information +log: LOG_FCP verbose +module: fcscsib.c +action: If there are many errors to one device, check physical + connections to Fibre Channel network and the state of the + remote PortID. +*/ +char fc_mes0745[] = "%sFCP completion error Data: x%x x%x x%x"; +msgLogDef fc_msgBlk0745 = { + FC_LOG_MSG_FP_0745, + fc_mes0745, + fc_msgPreambleFPi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_FCP, + ERRID_LOG_HDW_ERR }; + +/* +msgName: fc_mes0746 +message: FCP completion error +descript: An FCP command completed with a status error in the IOCB. +data: (1) ulpStatus (2) ulpWord[4] (3) did. +severity: Information +log: LOG_FCP verbose +module: fcscsib.c +action: If there are many errors to one device, check physical + connections to Fibre Channel network and the state of the + remote PortID. +*/ +char fc_mes0746[] = "%sFCP completion error Data: x%x x%x x%x"; +msgLogDef fc_msgBlk0746 = { + FC_LOG_MSG_FP_0746, + fc_mes0746, + fc_msgPreambleFPi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0747 +message: Cmpl Target Reset +descript: A driver initiated Target Reset completed. +data: (1) scsi_id (2) lun_id (3) statLocalError (4) *cmd + WD7 +severity: Information +log: LOG_FCP verbose +module: fcscsib.c +action: None required +*/ +char fc_mes0747[] = "%sCmpl Target Reset Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0747 = { + FC_LOG_MSG_FP_0747, + fc_mes0747, + fc_msgPreambleFPi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0748 +message: Cmpl LUN Reset +descript: A driver initiated LUN Reset completed. +data: (1) scsi_id (2) lun_id (3) statLocalError (4) *cmd + WD7 +severity: Information +log: LOG_FCP verbose +module: fcscsib.c +action: None required +*/ +char fc_mes0748[] = "%sCmpl LUN Reset Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0748 = { + FC_LOG_MSG_FP_0748, + fc_mes0748, + fc_msgPreambleFPi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0749 +message: Cmpl Abort Task Set +descript: A driver initiated Abort Task Set completed. +data: (1) scsi_id (2) lun_id (3) statLocalError (4) *cmd + WD7 +severity: Information +log: LOG_FCP verbose +module: fcscsib.c +action: None required +*/ +char fc_mes0749[] = "%sCmpl Abort Task Set Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0749 = { + FC_LOG_MSG_FP_0749, + fc_mes0749, + fc_msgPreambleFPi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0750 +message: EXPIRED linkdown timer +descript: The link was down for greater than the configuration parameter + (lpfc_linkdown_tmo) seconds. All I/O associated with the devices + on this link will be failed. +data: (1) fc_ffstate +severity: Information +log: LOG_FCP | LOG_LINK_EVENT verbose +module: fcscsib.c +action: Check HBA cable/connection to Fibre Channel network. +*/ +char fc_mes0750[] = "%sEXPIRED linkdown timer Data: x%x"; +msgLogDef fc_msgBlk0750 = { + FC_LOG_MSG_FP_0750, + fc_mes0750, + fc_msgPreambleFPi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_FCP | LOG_LINK_EVENT, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0751 +message: EXPIRED nodev timer +descript: A device disappeared for greater than the configuration parameter + (lpfc_nodev_tmo) seconds. All I/O associated with this device + will be failed. +data: (1) ndlp (2) nlp_flag (3) nlp_state (4) nlp_DID +severity: Information +log: LOG_FCP verbose +module: fcscsib.c +action: Check physical connections to Fibre Channel network and the + state of the remote PortID. +*/ +char fc_mes0751[] = "%sEXPIRED nodev timer Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0751 = { + FC_LOG_MSG_FP_0751, + fc_mes0751, + fc_msgPreambleFPi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0752 +message: Device disappeared, nodev timeout +descript: A device disappeared for greater than the configuration + parameter (lpfc_nodev_tmo) seconds. All I/O associated with + this device will be failed. +data: (1) did (2) sid (3) pan (4) a_current +severity: Information +log: LOG_FCP verbose +module: fcscsib.c +action: Check physical connections to Fibre Channel network and the + state of the remote PortID. +*/ +char fc_mes0752[] = "%sDevice disappeared, nodev timeout Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0752 = { + FC_LOG_MSG_FP_0752, + fc_mes0752, + fc_msgPreambleFPi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0753 +message: Inquiry Serial Number: invalid length +descript: An INQUIRY SN command completed with an invalid serial number length. +data: (1) sizeSN (2) j (3) scsi_id (4) lun_id +severity: Error +log: Always +module: fcscsib.c +action: Check state of target in question. +*/ +char fc_mes0753[] = "%sInquiry Serial Number: invalid length Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0753= { + FC_LOG_MSG_FP_0753, + fc_mes0753, + fc_msgPreambleFPe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_FCP, + ERRID_LOG_HDW_ERR }; + +/* +msgName: fc_mes0754 +message: SCSI timeout +descript: An FCP IOCB command was posted to a ring and did not complete + within ULP timeout seconds. +data: (1) did (2) sid +severity: Warning +log: LOG_FCP verbose +module: fcscsib.c +action: If no I/O is going through the adapter, reboot the system; + otherwise check the state of the target in question. +*/ +char fc_mes0754[] = "%sSCSI timeout Data: x%x x%x"; +msgLogDef fc_msgBlk0754 = { + FC_LOG_MSG_FP_0754, + fc_mes0754, + fc_msgPreambleFPw, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_WARN, + LOG_FCP, + ERRID_LOG_TIMEOUT }; + +/* +msgName: fc_mes0756 +message: Local_timeout Skipping clock tick +descript: The DPC thread has not been scheduled within several seconds +data: (1) dpc_ha_copy (2) ha_copy (3) dpc_cnt (4) fc_ffstate +severity: Warning +log: LOG_FCP verbose +module: fcLINUXfcp.c +action: Check the state of the target in question. +*/ +char fc_mes0756[] = "%sLocal_timeout Skipping clock tick Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0756= { + FC_LOG_MSG_FP_0756, + fc_mes0756, + fc_msgPreambleFPw, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_WARN, + LOG_FCP, + ERRID_LOG_UNEXPECT_EVENT }; + +/* + * UNUSED 0800 + */ + +/* + * Begin NODE LOG Message Structures + */ + +/* +msgName: fc_mes0900 +message: FIND node rpi +descript: The driver is looking up the node table entry for a remote + NPORT based on its RPI. +data: (1) ndlp (2) rpi +severity: Information +log: LOG_NODE verbose +module: fcrpib.c +action: None requird +*/ +char fc_mes0900[] = "%sFIND node rpi Data: x%x x%x"; +msgLogDef fc_msgBlk0900 = { + FC_LOG_MSG_ND_0900, + fc_mes0900, + fc_msgPreambleNDi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_NODE, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0901 +message: Free node tbl +descript: The driver is freeing a node table entry. +data: (1) nlp_DID (2) nlp_flag (3) nlp_Rpi (4) data1 +severity: Information +log: LOG_NODE verbose +module: fcrpib.c +action: None required +*/ +char fc_mes0901[] = "%sFree node tbl Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0901 = { + FC_LOG_MSG_ND_0901, + fc_mes0901, + fc_msgPreambleNDi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_NODE, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0902 +message: Free node IEEE +descript: The driver freeing a node table entry. +data: (1) IEEE[2] (2) IEEE[3] (3) IEEE[4] (4) IEEE[5] +severity: Information +log: LOG_NODE verbose +module: fcrpib.c +action: None required +*/ +char fc_mes0902[] = "%sFree node IEEE Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0902 = { + FC_LOG_MSG_ND_0902, + fc_mes0902, + fc_msgPreambleNDi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_NODE, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0903 +message: BIND node tbl +descript: The driver is putting the node table entry on the binding list. +data: (1) nlp (2) nlp_DID (3) nlp_flag (4) data1 +severity: Information +log: LOG_NODE verbose +module: fcrpib.c +action: None required +*/ +char fc_mes0903[] = "%sBIND node tbl Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0903= { + FC_LOG_MSG_ND_0903, + fc_mes0903, + fc_msgPreambleNDi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_NODE, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0904 +message: UNMAP node tbl +descript: The driver is putting the node table entry on the unmapped node list. +data: (1) nlp (2) nlp_DID (3) nlp_flag (4) data1 +severity: Information +log: LOG_NODE verbose +module: fcrpib.c +action: None required +*/ +char fc_mes0904[] = "%sUNMAP node tbl Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0904 = { + FC_LOG_MSG_ND_0904, + fc_mes0904, + fc_msgPreambleNDi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_NODE, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0905 +message: MAP node tbl +descript: The driver is putting the node table entry on the mapped node list. +data: (1) nlp (2) nlp_DID (3) nlp_flag (4) data1 +severity: Information +log: LOG_NODE verbose +module: fcrpib.c +action: None required +*/ +char fc_mes0905[] = "%sMAP node tbl Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0905 = { + FC_LOG_MSG_ND_0905, + fc_mes0905, + fc_msgPreambleNDi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_NODE, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0906 +message: FIND node DID unmapped +descript: The driver is searching for a node table entry, on the + unmapped node list, based on DID. +data: (1) nlp (2) nlp_DID (3) nlp_flag (4) data1 +severity: Information +log: LOG_NODE verbose +module: fcrpib.c +action: None required +*/ +char fc_mes0906[] = "%sFIND node DID unmapped Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0906 = { + FC_LOG_MSG_ND_0906, + fc_mes0906, + fc_msgPreambleNDi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_NODE, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0907 +message: FIND node DID mapped +descript: The driver is searching for a node table entry, on the + mapped node list, based on DID. +data: (1) nlp (2) nlp_DID (3) nlp_flag (4) data1 +severity: Information +log: LOG_NODE verbose +module: fcrpib.c +action: None required +*/ +char fc_mes0907[] = "%sFIND node DID mapped Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0907 = { + FC_LOG_MSG_ND_0907, + fc_mes0907, + fc_msgPreambleNDi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_NODE, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0908 +message: FIND node DID bind +descript: The driver is searching for a node table entry, on the + binding list, based on DID. +data: (1) nlp (2) nlp_DID (3) nlp_flag (4) data1 +severity: Information +log: LOG_NODE verbose +module: fcrpib.c +action: None required +*/ +char fc_mes0908[] = "%sFIND node DID bind Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0908 = { + FC_LOG_MSG_ND_0908, + fc_mes0908, + fc_msgPreambleNDi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_NODE, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0909 +message: FIND node did NOT FOUND +descript: The driver was searching for a node table entry based on DID + and the entry was not found. +data: (1) order +severity: Information +log: LOG_NODE verbose +module: fcrpib.c +action: None required +*/ +char fc_mes0909[] = "%sFIND node did x%x NOT FOUND Data: x%x"; +msgLogDef fc_msgBlk0909 = { + FC_LOG_MSG_ND_0909, + fc_mes0909, + fc_msgPreambleNDi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_NODE, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0910 +message: FIND node scsi_id unmapped +descript: The driver is searching for a node table entry, on the + unmapped node list, based on the SCSI ID. +data: (1) nlp (2) nlp_DID (3) nlp_flag (4) data1 +severity: Information +log: LOG_NODE verbose +module: fcrpib.c +action: None required +*/ +char fc_mes0910[] = "%sFIND node scsi_id unmapped Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0910 = { + FC_LOG_MSG_ND_0910, + fc_mes0910, + fc_msgPreambleNDi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_NODE, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0911 +message: FIND node scsi_id mapped +descript: The driver is searching for a node table entry, on the + mapped node list, based on the SCSI ID. +data: (1) nlp (2) nlp_DID (3) nlp_flag (4) data1 +severity: Information +log: LOG_NODE verbose +module: fcrpib.c +action: None required +*/ +char fc_mes0911[] = "%sFIND node scsi_id mapped Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0911 = { + FC_LOG_MSG_ND_0911, + fc_mes0911, + fc_msgPreambleNDi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_NODE, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0912 +message: FIND node scsi_id bind +descript: The driver is searching for a node table entry, on the + binding list, based on the SCSI ID. +data: (1) nlp (2) nlp_DID (3) nlp_flag (4) data1 +severity: Information +log: LOG_NODE verbose +module: fcrpib.c +action: None required +*/ +char fc_mes0912[] = "%sFIND node scsi_id bind Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0912 = { + FC_LOG_MSG_ND_0912, + fc_mes0912, + fc_msgPreambleNDi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_NODE, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0913 +message: FIND node scsi_id NOT FOUND +descript: The driver was searching for a node table entry based on SCSI ID + and the entry was not found. +data: (1) scsid (2) order +severity: Information +log: LOG_NODE verbose +module: fcrpib.c +action: None required +*/ +char fc_mes0913[] = "%sFIND node scsi_id NOT FOUND Data: x%x x%x"; +msgLogDef fc_msgBlk0913 = { + FC_LOG_MSG_ND_0913, + fc_mes0913, + fc_msgPreambleNDi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_NODE, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0914 +message: FIND node wwnn unmapped +descript: The driver is searching for a node table entry, on the + unmapped port list, based on the WWNN. +data: (1) nlp (2) nlp_DID (3) nlp_flag (4) data1 +severity: Information +log: LOG_NODE verbose +module: fcrpib.c +action: None required +*/ +char fc_mes0914[] = "%sFIND node wwnn unmapped Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0914 = { + FC_LOG_MSG_ND_0914, + fc_mes0914, + fc_msgPreambleNDi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_NODE, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0915 +message: FIND node wwnn mapped +descript: The driver is searching for a node table entry, on the + mapped port list, based on the WWNN. +data: (1) nlp (2) nlp_DID (3) nlp_flag (4) data1 +severity: Information +log: LOG_NODE verbose +module: fcrpib.c +action: None required +*/ +char fc_mes0915[] = "%sFIND node wwnn mapped Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0915 = { + FC_LOG_MSG_ND_0915, + fc_mes0915, + fc_msgPreambleNDi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_NODE, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0916 +message: FIND node wwnn bind +descript: The driver is searching for a node table entry, on the + binding list, based on the WWNN. +data: (1) nlp (2) nlp_DID (3) nlp_flag (4) data1 +severity: Information +log: LOG_NODE verbose +module: fcrpib.c +action: None required +*/ +char fc_mes0916[] = "%sFIND node wwnn bind Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0916 = { + FC_LOG_MSG_ND_0916, + fc_mes0916, + fc_msgPreambleNDi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_NODE, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0917 +message: PUT END nodelist +descript: The driver is freeing a node table entry buffer. +data: (1) bp (2) fc_free +severity: Information +log: LOG_NODE verbose +module: fcmemb.c +action: None required +*/ +char fc_mes0917[] = "%sPUT END nodelist Data: x%x x%x"; +msgLogDef fc_msgBlk0917 = { + FC_LOG_MSG_ND_0917, + fc_mes0917, + fc_msgPreambleNDi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_NODE, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0918 +message: FIND node wwnn NOT FOUND +descript: The driver was searching for a node table entry based on WWNN + and the entry was not found. +data: (1) order +severity: Information +log: LOG_NODE verbose +module: fcrpib.c +action: None required +*/ +char fc_mes0918[] = "%sFIND node wwnn NOT FOUND Data: x%x"; +msgLogDef fc_msgBlk0918 = { + FC_LOG_MSG_ND_0918, + fc_mes0918, + fc_msgPreambleNDi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_NODE, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0919 +message: FIND node wwpn unmapped +descript: The driver is searching for a node table entry, on the + unmapped port list, based on the WWPN. +data: (1) nlp (2) nlp_DID (3) nlp_flag (4) data1 +severity: Information +log: LOG_NODE verbose +module: fcrpib.c +action: None required +*/ +char fc_mes0919[] = "%sFIND node wwpn unmapped Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0919 = { + FC_LOG_MSG_ND_0919, + fc_mes0919, + fc_msgPreambleNDi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_NODE, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0920 +message: FIND node wwpn mapped +descript: The driver is searching for a node table entry, on the + mapped port list, based on the WWPN. +data: (1) nlp (2) nlp_DID (3) nlp_flag (4) data1 +severity: Information +log: LOG_NODE verbose +module: fcrpib.c +action: None required +*/ +char fc_mes0920[] = "%sFIND node wwpn mapped Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0920 = { + FC_LOG_MSG_ND_0920, + fc_mes0920, + fc_msgPreambleNDi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_NODE, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0921 +message: FIND node wwpn bind +descript: The driver is searching for a node table entry, on the + binding list, based on the WWPN. +data: (1) nlp (2) nlp_DID (3) nlp_flag (4) data1 +severity: Information +log: LOG_NODE verbose +module: fcrpib.c +action: None required +*/ +char fc_mes0921[] = "%sFIND node wwpn bind Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0921 = { + FC_LOG_MSG_ND_0921, + fc_mes0921, + fc_msgPreambleNDi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_NODE, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0922 +message: FIND node wwpn NOT FOUND +descript: The driver was searching for a node table entry based on WWPN + and the entry was not found. +data: (1) order +severity: Information +log: LOG_NODE verbose +module: fcrpib.c +action: None required +*/ +char fc_mes0922[] = "%sFIND node wwpn NOT FOUND Data: x%x"; +msgLogDef fc_msgBlk0922 = { + FC_LOG_MSG_ND_0922, + fc_mes0922, + fc_msgPreambleNDi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_NODE, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0923 +message: FIND node xri unmapped +descript: The driver is searching for a node table entry, on the + unmapped port list, based on the XRI. +data: (1) nlp (2) nlp_Xri (3) nlp_flag (4) data1 +severity: Information +log: LOG_NODE verbose +module: fcrpib.c +action: None required +*/ +char fc_mes0923[] = "%sFIND node xri unmapped Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0923 = { + FC_LOG_MSG_ND_0923, + fc_mes0923, + fc_msgPreambleNDi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_NODE, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0924 +message: FIND node xri mapped +descript: The driver is searching for a node table entry, on the + mapped port list, based on the XRI. +data: (1) nlp (2) nlp_Xri (3) nlp_flag (4) data1 +severity: Information +log: LOG_NODE verbose +module: fcrpib.c +action: None required +*/ +char fc_mes0924[] = "%sFIND node xri mapped Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0924 = { + FC_LOG_MSG_ND_0924, + fc_mes0924, + fc_msgPreambleNDi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_NODE, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0925 +message: FIND node xri bind +descript: The driver is searching for a node table entry, on the + binding list, based on the XRI. +data: (1) nlp (2) nlp_Xri (3) nlp_flag (4) data1 +severity: Information +log: LOG_NODE verbose +module: fcrpib.c +action: None required +*/ +char fc_mes0925[] = "%sFIND node xri bind Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk0925 = { + FC_LOG_MSG_ND_0925, + fc_mes0925, + fc_msgPreambleNDi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_NODE, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0926 +message: FIND node xri NOT FOUND +descript: The driver was searching for a node table entry based on the + XRI and the entry was not found. +data: (1) xri (2) order +severity: Information +log: LOG_NODE verbose +module: fcrpib.c +action: None required +*/ +char fc_mes0926[] = "%sFIND node xri NOT FOUND Data: x%x x%x"; +msgLogDef fc_msgBlk0926 = { + FC_LOG_MSG_ND_0926, + fc_mes0926, + fc_msgPreambleNDi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_NODE, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0927 +message: GET nodelist +descript: The driver is allocating a buffer to hold a node table entry. +data: (1) bp (2) fc_free +severity: Information +log: LOG_NODE verbose +module: fcmemb.c +action: None required +*/ +char fc_mes0927[] = "%sGET nodelist Data: x%x x%x"; +msgLogDef fc_msgBlk0927 = { + FC_LOG_MSG_ND_0927, + fc_mes0927, + fc_msgPreambleNDi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_NODE, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes0928 +message: PUT nodelist +descript: The driver is freeing a node table entry buffer. +data: (1) bp (2) fc_free +severity: Information +log: LOG_NODE verbose +module: fcmemb.c +action: None required +*/ +char fc_mes0928[] = "%sPUT nodelist Data: x%x x%x"; +msgLogDef fc_msgBlk0928 = { + FC_LOG_MSG_ND_0928, + fc_mes0928, + fc_msgPreambleNDi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_NODE, + ERRID_LOG_UNEXPECT_EVENT }; + + + +/* + * Begin MISC LOG message structures + */ + +/* +msgName: fc_mes1200 +message: Cannot unload driver while lpfcdiag Interface is active Data +descript: An attempt was made to unload the driver while the DFC + interface was active. +data: (1) lpfcdiag_cnt (2) instance +severity: Error +log: Always +module: fcLINUXfcp.c +action: Exit any application that uses the DFC diagnostic interface + before attempting to unload the driver. +*/ +char fc_mes1200[] = "%sCannot unload driver while lpfcdiag Interface is active Data: x%x x%x"; +msgLogDef fc_msgBlk1200 = { + FC_LOG_MSG_MI_1200, + fc_mes1200, + fc_msgPreambleMIe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_MISC, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes1201 +message: lpfc_kmalloc: Bad p_dev_ctl +descript: The driver manages its own memory for internal usage. This + error indicates a problem occurred in the driver memory + management routines. This error could also indicate the host + system in low on memory resources. +data: (1) size (2) type (3) fc_idx_dmapool +severity: Error +log: Always +module: fcLINUXfcp.c +action: This error could indicate a driver or host operating system + problem. If problems persist report these errors to Technical + Support. +*/ +char fc_mes1201[] = "%slpfc_kmalloc: Bad p_dev_ctl Data: x%x x%x x%x"; +msgLogDef fc_msgBlk1201 = { + FC_LOG_MSG_MI_1201, + fc_mes1201, + fc_msgPreambleMIe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_MISC, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes1202 +message: lpfc_kmalloc: Bad size +descript: The driver manages its own memory for internal usage. This + error indicates a problem occurred in the driver memory + management routines. This error could also indicate the host + system in low on memory resources. +data: (1) size (2) type (3) fc_idx_dmapool +severity: Error +log: Always +module: fcLINUXfcp.c +action: This error could indicate a driver or host operating system + problem. If problems persist report these errors to Technical + Support. +*/ +char fc_mes1202[] = "%slpfc_kmalloc: Bad size Data: x%x x%x x%x"; +msgLogDef fc_msgBlk1202 = { + FC_LOG_MSG_MI_1202, + fc_mes1202, + fc_msgPreambleMIe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_MISC, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes1203 +message: lpfc_kmalloc: Virt addr failed to alloc +descript: The driver manages its own memory for internal usage. This + error indicates a problem occurred in the driver memory + management routines. This error could also indicate the host + system in low on memory resources. +data: (1) size (2) type +severity: Error +log: Always +module: fcLINUXfcp.c +action: This error could indicate a driver or host operating system + problem. If problems persist report these errors to Technical + Support. +*/ +char fc_mes1203[] = "%slpfc_kmalloc: Virt addr failed to alloc Data: x%x x%x"; +msgLogDef fc_msgBlk1203 = { + FC_LOG_MSG_MI_1203, + fc_mes1203, + fc_msgPreambleMIe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_MISC, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes1204 +message: lpfc_kmalloc: Bad virtual addr +descript: The driver manages its own memory for internal usage. This + error indicates a problem occurred in the driver memory + management routines. This error could also indicate the host + system in low on memory resources. +data: (1) i (2) size ( 3) type (4) fc_idx_dmapool +severity: Error +log: Always +module: fcLINUXfcp.c +action: This error could indicate a driver or host operating system + problem. If problems persist report these errors to Technical + Support. +*/ +char fc_mes1204[] = "%slpfc_kmalloc: Bad virtual addr Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk1204 = { + FC_LOG_MSG_MI_1204, + fc_mes1204, + fc_msgPreambleMIe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_MISC, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes1205 +message: lpfc_kmalloc: dmapool FULL +descript: The driver manages its own memory for internal usage. This + error indicates a problem occurred in the driver memory + management routines. This error could also indicate the host + system in low on memory resources. +data: (1) i (2) size (3) type (4) fc_idx_dmapool +severity: Error +log: Always +module: fcLINUXfcp.c +action: This error could indicate a driver or host operating system + problem. If problems persist report these errors to Technical + Support. +*/ +char fc_mes1205[] = "%slpfc_kmalloc: dmapool FULL Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk1205 = { + FC_LOG_MSG_MI_1205, + fc_mes1205, + fc_msgPreambleMIe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_MISC, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes1206 +message: lpfc_kfree: Bad p_dev_ctl +descript: The driver manages its own memory for internal usage. This + error indicates a problem occurred in the driver memory + management routines. This error could also indicate the host + system in low on memory resources. +data: (1) size (2) fc_idx_dmapool +severity: Error +log: Always +module: fcLINUXfcp.c +action: This error could indicate a driver or host operating system + problem. If problems persist report these errors to Technical + Support. +*/ +char fc_mes1206[] = "%slpfc_kfree: Bad p_dev_ctl Data: x%x x%x"; +msgLogDef fc_msgBlk1206 = { + FC_LOG_MSG_MI_1206, + fc_mes1206, + fc_msgPreambleMIe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_MISC, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes1207 +message: lpfc_kfree: NOT in dmapool +descript: The driver manages its own memory for internal usage. This + error indicates a problem occurred in the driver memory + management routines. This error could also indicate the host + system in low on memory resources. +data: (1) virt (2) size (3) fc_idx_dmapool +severity: Error +log: Always +module: fcLINUXfcp.c +action: This error could indicate a driver or host operating system + problem. If problems persist report these errors to Technical + Support. +*/ +char fc_mes1207[] = "%slpfc_kfree: NOT in dmapool Data: x%x x%x x%x"; +msgLogDef fc_msgBlk1207 = { + FC_LOG_MSG_MI_1207, + fc_mes1207, + fc_msgPreambleMIe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_MISC, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes1208 +descript: The CT response returned more data than the user buffer could hold. +message: C_CT Request error +data: (1) dfc_flag (2) 4096 +severity: Information +log: LOG_MISC verbose +module: dfcdd.c +action: Modify user application issuing CT request to allow for a larger + response buffer. +*/ +char fc_mes1208[] = "%sC_CT Request error Data: x%x x%x"; +msgLogDef fc_msgBlk1208 = { + FC_LOG_MSG_MI_1208, + fc_mes1208, + fc_msgPreambleMIi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_MISC, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes1209 +message: RNID Request error +descript: RNID sent back a response that was larger than the driver supports. +data: (1) fc_mptr (2) 4096 +severity: Information +log: LOG_MISC verbose +module: dfcdd.c +action: None required +*/ +char fc_mes1209[] = "%sRNID Request error Data: x%x x%x"; +msgLogDef fc_msgBlk1209 = { + FC_LOG_MSG_MI_1209, + fc_mes1209, + fc_msgPreambleMIi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_MISC, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes1210 +message: Convert ASC to hex. Input byte cnt < 1 +descript: ASCII string to hex conversion failed. Input byte count < 1. +data: none +severity: Error +log: Always +action: This error could indicate a software driver problem. + If problems persist report these errors to Technical Support. +*/ +char fc_mes1210[] = "%sConvert ASC to hex. Input byte cnt < 1"; +msgLogDef fc_msgBlk1210 = { + FC_LOG_MSG_MI_1210, + fc_mes1210, + fc_msgPreambleMIe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_MISC, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes1211 +message: Convert ASC to hex. Input byte cnt > max +descript: ASCII string to hex conversion failed. Input byte count > max . +data: none +severity: Error +log: Always +action: This error could indicate a software driver problem. + If problems persist report these errors to Technical Support. +*/ +char fc_mes1211[] = "%sConvert ASC to hex. Input byte cnt > max %d"; +msgLogDef fc_msgBlk1211 = { + FC_LOG_MSG_MI_1211, + fc_mes1211, + fc_msgPreambleMIe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_MISC, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes1212 +message: Convert ASC to hex. Output buffer to small +descript: ASCII string to hex conversion failed. The output buffer byte + size is less than 1/2 of input byte count. Every 2 input chars + (bytes) require 1 output byte. +data: none +severity: Error +log: Always +action: This error could indicate a software driver problem. + If problems persist report these errors to Technical Support. +*/ +char fc_mes1212[] = "%sConvert ASC to hex. Output buffer too small"; +msgLogDef fc_msgBlk1212 = { + FC_LOG_MSG_MI_1212, + fc_mes1212, + fc_msgPreambleMIe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_MISC, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes1213 +message: Convert ASC to hex. Input char seq not ASC hex. +descript: The ASCII hex input string contains a non-ASCII hex characters +data: none +severity: Error configuration +log: Always +action: Make neccessary changes to lpfc configuration file. +*/ +char fc_mes1213[] = "%sConvert ASC to hex. Input char seq not ASC hex."; +msgLogDef fc_msgBlk1213 = { + FC_LOG_MSG_MI_1213, + fc_mes1213, + fc_msgPreambleMIc, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR_CFG, + LOG_MISC, + ERRID_LOG_UNEXPECT_EVENT }; + +/* + * Begin LINK LOG Message Structures + */ + +/* +msgName: fc_mes1300 +message: Re-establishing Link, timer expired +descript: The driver detected a condition where it had to re-initialize + the link. +data: (1) fc_flag (2) fc_ffstate +severity: Error +log: Always +module: fcclockb.c +action: If numerous link events are occurring, check physical + connections to Fibre Channel network. +*/ +char fc_mes1300[] = "%sRe-establishing Link, timer expired Data: x%x x%x"; +msgLogDef fc_msgBlk1300 = { + FC_LOG_MSG_LK_1300, + fc_mes1300, + fc_msgPreambleLKe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_LINK_EVENT, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes1301 +message: Re-establishing Link +descript: The driver detected a condition where it had to re-initialize + the link. +data: (1) status (2) status1 (3) status2 +severity: Information +log: LOG_LINK_EVENT verbose +module: lp6000.c +action: If numerous link events are occurring, check physical + connections to Fibre Channel network. +*/ +char fc_mes1301[] = "%sRe-establishing Link Data: x%x x%x x%x"; +msgLogDef fc_msgBlk1301 = { + FC_LOG_MSG_LK_1301, + fc_mes1301, + fc_msgPreambleLKi, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_INFO, + LOG_LINK_EVENT, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes1302 +message: Reset link speed to auto. 1G node detected in loop. +descript: The driver is reinitializing the link speed to auto-detect. + This acton results if a 2G HBA is configured for a link + speed of 2G and the HBA detects a node that does NOT + support 2G link speed. All nodes on that loop will come + up with a link speed equal to 1G. +data: none +severity: Warning +log: LOG_LINK_EVENT verbose +module: lp6000.c +action: None required +*/ +char fc_mes1302[] = "%sReset link speed to auto. 1G node detected in loop."; +msgLogDef fc_msgBlk1302 = { + FC_LOG_MSG_LK_1302, + fc_mes1302, + fc_msgPreambleLKw, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_WARN, + LOG_LINK_EVENT, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes1303 +message: Reset link speed to auto. 1G HBA cfg'd for 2G. +descript: The driver is reinitializing the link speed to auto-detect. + This acton results if a 1G HBA is configured for 2G + link speed operation. All nodes on that loop will come + up with a link speed equal to 1G. +data: none +severity: Warning +log: LOG_LINK_EVENT verbose +module: fcfcp.c +action: None required +*/ +char fc_mes1303[] = "%sReset link speed to auto. 1G HBA cfg'd for 2G"; +msgLogDef fc_msgBlk1303 = { + FC_LOG_MSG_LK_1303, + fc_mes1303, + fc_msgPreambleLKw, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_WARN, + LOG_LINK_EVENT, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes1304 +message: Link Up Event received +descript: A link up event was received. It is also possible for + multiple link events to be received together. +data: (1) eventTag (2) fc_eventTag (3) granted_AL_PA (4) alpa_map[0] +detail: If multiple link events received, log (1) current event + number, (2) last event number received, (3) ALPA granted, + (4) number of entries in the loop init LILP ALPA map. + An ALPA map message is also recorded if LINK_EVENT + verbose mode is set. Each ALPA map message contains + 16 ALPAs. +severity: Error +log: Always +module: fcscsib.c +action: If numerous link events are occurring, check physical + connections to Fibre Channel network. +*/ +char fc_mes1304[] = "%sLink Up Event received Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk1304 = { + FC_LOG_MSG_LK_1304, + fc_mes1304, + fc_msgPreambleLKe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_LINK_EVENT, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes1305 +message: Link Up Event ALPA map +descript: A link up event was received. +data: (1) wd1 (2) wd2 (3) wd3 (4) wd4 +severity: Warning +log: LOG_LINK_EVENT verbose +module: fcscsib.c +action: If numerous link events are occurring, check physical + connections to Fibre Channel network. +*/ +char fc_mes1305[] = "%sLink Up Event ALPA map Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk1305 = { + FC_LOG_MSG_LK_1305, + fc_mes1305, + fc_msgPreambleLKw, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_WARN, + LOG_LINK_EVENT, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes1306 +message: Link Down Event received +descript: A link down event was received. +data: (1) eventTag (2) fc_eventTag (3) granted_AL_PA (4) alpa_map[0] +severity: Error +log: Always +module: fcscsib.c +action: If numerous link events are occurring, check physical + connections to Fibre Channel network. +*/ +char fc_mes1306[] = "%sLink Down Event received Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk1306 = { + FC_LOG_MSG_LK_1306, + fc_mes1306, + fc_msgPreambleLKe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_LINK_EVENT, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes1307 +message: SCSI Link Reset +descript: The SCSI layer has determined the link needs to be reset. + A LIP is sent to restart loop initialization. +data: None +severity: Error +log: Always +module: fcscsib.c +action: None required +*/ +char fc_mes1307[] = "%sSCSI Link Reset"; +msgLogDef fc_msgBlk1307 = { + FC_LOG_MSG_LK_1307, + fc_mes1307, + fc_msgPreambleLKe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_LINK_EVENT, + ERRID_LOG_UNEXPECT_EVENT }; + +/* + * Begin SLI LOG Message Structures + */ + +/* +msgName: fc_mes1400 +message: Unknown IOCB command +descript: Received an unknown IOCB command completion. +data: (1) ulpCommand (2) ulpStatus (3) ulpIoTag (4) ulpContext) +severity: Error +log: Always +module: lp6000.c +action: This error could indicate a software driver or firmware + problem. If these problems persist, report these errors + to Technical Support. +*/ +char fc_mes1400[] = "%sUnknown IOCB command Data: x%x x%x x%x x%x"; +msgLogDef fc_msgBlk1400 = { + FC_LOG_MSG_LK_1400, + fc_mes1400, + fc_msgPreambleSLe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_SLI, + ERRID_LOG_UNEXPECT_EVENT }; + +/* +msgName: fc_mes1401 +message: Command ring timeout +descript: An IOCB command was posted to a ring and did not complete + within a timeout based on RATOV. +data: (1) IOCB command (2) ulpCommand +severity: Error +log: Always +module: fcscsib.c +action: This error could indicate a software driver or firmware + problem. If no I/O is going through the adapter, reboot + the system. If these problems persist, report these errors + to Technical Support. +*/ +char fc_mes1401[] = "%sCommand ring %d timeout Data: x%x"; +msgLogDef fc_msgBlk1401 = { + FC_LOG_MSG_LK_1401, + fc_mes1401, + fc_msgPreambleSLe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_SLI, + ERRID_LOG_TIMEOUT }; + +/* +msgName: fc_mes1402 +message: Command ring timeout +descript: An IOCB command was posted to a ring and did not complete + within a timeout based on RATOV. +data: None +severity: Error +log: Always +module: fcscsib.c +action: This error could indicate a software driver or firmware + problem. If no I/O is going through the adapter, reboot + the system. If these problems persist, report these errors + to Technical Support. +*/ +char fc_mes1402[] = "%sCommand ring %d timeout"; +msgLogDef fc_msgBlk1402 = { + FC_LOG_MSG_LK_1402, + fc_mes1402, + fc_msgPreambleSLe, + FC_MSG_OPUT_GLOB_CTRL, + FC_LOG_MSG_TYPE_ERR, + LOG_SLI, + ERRID_LOG_TIMEOUT }; + + diff -purN -X /home/mbligh/.diff.exclude reference/drivers/scsi/lpfc/fcrpib.c current/drivers/scsi/lpfc/fcrpib.c --- reference/drivers/scsi/lpfc/fcrpib.c 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/scsi/lpfc/fcrpib.c 2004-04-09 11:53:03.000000000 -0700 @@ -0,0 +1,2675 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Enterprise Fibre Channel Host Bus Adapters. * + * Refer to the README file included with this package for * + * driver version and adapter support. * + * Copyright (C) 2003 Emulex Corporation. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of the GNU General Public License * + * as published by the Free Software Foundation; either version 2 * + * of the License, or (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details, a copy of which * + * can be found in the file COPYING included with this package. * + *******************************************************************/ + +#include "fc_os.h" + +#include "fc_hw.h" +#include "fc.h" + +#include "fcdiag.h" +#include "fcfgparm.h" +#include "fcmsg.h" +#include "fc_crtn.h" +#include "fc_ertn.h" + +extern fc_dd_ctl_t DD_CTL; +extern iCfgParam icfgparam[]; +extern int fc_max_els_sent; + +/* Routine Declaration - Local */ +_local_ int fc_addrauth(fc_dev_ctl_t *p_dev_ctl); +_local_ void fc_clear_fcp_iocbq(fc_dev_ctl_t *p_dev_ctl, NODELIST *nlp); +_local_ void fc_clear_ip_iocbq(fc_dev_ctl_t *p_dev_ctl, NODELIST *nlp); +_local_ int fc_matchdid(FC_BRD_INFO *binfo, NODELIST *nlp, uint32 did); +/* End Routine Declaration - Local */ + +/* + * Array of all 126 valid AL_PA's (excluding FL_PORT AL_PA 0) + */ + +static uchar staticAlpaArray[] = +{ + 0x01, 0x02, 0x04, 0x08, 0x0F, 0x10, 0x17, 0x18, 0x1B, 0x1D, + 0x1E, 0x1F, 0x23, 0x25, 0x26, 0x27, 0x29, 0x2A, 0x2B, 0x2C, + 0x2D, 0x2E, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x39, 0x3A, + 0x3C, 0x43, 0x45, 0x46, 0x47, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, + 0x4E, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x59, 0x5A, 0x5C, + 0x63, 0x65, 0x66, 0x67, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, + 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x79, 0x7A, 0x7C, 0x80, + 0x81, 0x82, 0x84, 0x88, 0x8F, 0x90, 0x97, 0x98, 0x9B, 0x9D, + 0x9E, 0x9F, 0xA3, 0xA5, 0xA6, 0xA7, 0xA9, 0xAA, 0xAB, 0xAC, + 0xAD, 0xAE, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB9, 0xBA, + 0xBC, 0xC3, 0xC5, 0xC6, 0xC7, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, + 0xCE, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD9, 0xDA, 0xDC, + 0xE0, 0xE1, 0xE2, 0xE4, 0xE8, 0xEF +}; + +int fc_fdmi_on = 0; + +_local_ int +fc_matchdid( +FC_BRD_INFO *binfo, +NODELIST *ndlp, +uint32 did) +{ + D_ID mydid; + D_ID odid; + D_ID ndid; + int zero_did; + + if (did == Bcast_DID) + return(0); + + zero_did = 0; + if (ndlp->nlp_DID == 0) { + ndlp->nlp_DID = ndlp->nlp_oldDID; + zero_did = 1; + } + + /* First check for Direct match */ + if (ndlp->nlp_DID == did) + return(1); + + /* Next check for area/domain == 0 match */ + mydid.un.word = binfo->fc_myDID; + if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) { + goto out; + } + + ndid.un.word = did; + odid.un.word = ndlp->nlp_DID; + if (ndid.un.b.id == odid.un.b.id) { + if ((mydid.un.b.domain == ndid.un.b.domain) && + (mydid.un.b.area == ndid.un.b.area)) { + ndid.un.word = ndlp->nlp_DID; + odid.un.word = did; + if ((ndid.un.b.domain == 0) && + (ndid.un.b.area == 0)) { + if (ndid.un.b.id) + return(1); + } + goto out; + } + + ndid.un.word = ndlp->nlp_DID; + if ((mydid.un.b.domain == ndid.un.b.domain) && + (mydid.un.b.area == ndid.un.b.area)) { + odid.un.word = ndlp->nlp_DID; + ndid.un.word = did; + if ((ndid.un.b.domain == 0) && + (ndid.un.b.area == 0)) { + if (ndid.un.b.id) + return(1); + } + } + } +out: + if(zero_did) + ndlp->nlp_DID = 0; + return(0); +} /* End fc_matchdid */ + + +/**************************************************/ +/** fc_nlpadjust **/ +/** **/ +/** This routine adjusts the timestamp in the **/ +/** nlplist when the counter wraps **/ +/**************************************************/ +_static_ int +fc_nlpadjust( +FC_BRD_INFO *binfo) +{ + NODELIST * ndlp; + NODELIST * nlphi, *nlpprev; + uint32 rpts; + + nlphi = 0; + nlpprev = 0; + rpts = 0; + ndlp = binfo->fc_nlpbind_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpbind_start) + ndlp = binfo->fc_nlpunmap_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + while(ndlp != (NODELIST *)&binfo->fc_nlpmap_start) { + if (ndlp->nlp_time > rpts) { + rpts = ndlp->nlp_time; + nlpprev = nlphi; + nlphi = ndlp; + } + + switch (ndlp->nlp_state) { + case NLP_LIMBO: + case NLP_LOGOUT: + ndlp->nlp_time = 1; + break; + + case NLP_ALLOC: + ndlp->nlp_time = 3; + break; + + default: + ndlp->nlp_time = 2; + break; + } + ndlp = (NODELIST *)ndlp->nlp_listp_next; + if(ndlp == (NODELIST *)&binfo->fc_nlpbind_start) + ndlp = binfo->fc_nlpunmap_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + } + + if (nlpprev) + nlpprev->nlp_time = 4; + if (nlphi) + nlphi->nlp_time = 5; + binfo->nlptimer = 6; + return(0); +} /* End fc_nlpadjust */ + + +/**************************************************/ +/** fc_findnode_rpi **/ +/** **/ +/** This routine find a node by rpi **/ +/**************************************************/ +_static_ NODELIST * +fc_findnode_rpi( +FC_BRD_INFO *binfo, +uint32 rpi) +{ + NODELIST * ndlp = 0; + + if (rpi < NLP_MAXRPI) + ndlp = binfo->fc_nlplookup[rpi]; + /* FIND node rpi */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0900, /* ptr to msg structure */ + fc_mes0900, /* ptr to msg */ + fc_msgBlk0900.msgPreambleStr, /* begin varargs */ + (ulong)ndlp, + rpi); /* end varargs */ + return(ndlp); +} /* End fc_findnode_rpi */ + + +/**************************************************/ +/** fc_freenode_did **/ +/** **/ +/** This routine will free an NODELIST entry **/ +/** associated with did. **/ +/**************************************************/ +_static_ int +fc_freenode_did( +FC_BRD_INFO *binfo, +uint32 did, +int rm) +{ + NODELIST * ndlp; + + + if(((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, did)) == 0) || + (ndlp->nlp_state == NLP_SEED)) { + /* no match found */ + return(0); + } + + fc_freenode(binfo, ndlp, rm); + if(rm == 0) { + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + } + return(1); +} /* End fc_freenode_did */ + + +/**************************************************/ +/** fc_free_rpilist **/ +/** **/ +/** This routine will free all NODELIST entries **/ +/** and associated buffers. **/ +/**************************************************/ +_static_ int +fc_free_rpilist( +fc_dev_ctl_t *p_dev_ctl, +int keeprpi) +{ + FC_BRD_INFO * binfo; + NODELIST * ndlp; + NODELIST * new_ndlp; + RING * rp; + IOCBQ * xmitiq; + iCfgParam * clp; + struct buf * bp; + T_SCSIBUF * sbp; + dvi_t * dev_ptr; + fc_buf_t * fcptr; + node_t * node_ptr; + + binfo = &BINFO; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + + /* if keeprpi == 0, toss everything on ELS xmitq and xmit pending queue */ + if (keeprpi == 0) { + rp = &binfo->fc_ring[FC_ELS_RING]; + /* get next command from ring xmit queue */ + while ((xmitiq = fc_ringtx_drain(rp)) != 0) { + fc_freebufq(p_dev_ctl, rp, xmitiq); + } + + /* look up xmit next compl */ + while ((xmitiq = fc_ringtxp_get(rp, 0)) != 0) { + fc_freebufq(p_dev_ctl, rp, xmitiq); + } + } + + /* Toss everything on LAN xmitq and xmit pending queue */ + rp = &binfo->fc_ring[FC_IP_RING]; + /* get next command from ring xmit queue */ + while ((xmitiq = fc_ringtx_drain(rp)) != 0) { + fc_freebufq(p_dev_ctl, rp, xmitiq); + } + + /* look up xmit next compl */ + while ((xmitiq = fc_ringtxp_get(rp, 0)) != 0) { + fc_freebufq(p_dev_ctl, rp, xmitiq); + } + + NDDSTAT.ndd_xmitque_cur = 0; + + if(clp[CFG_FCP_ON].a_current) { + int i; + + rp = &binfo->fc_ring[FC_FCP_RING]; + + for(i=0;ifc_table->fcp_array[i]) && + (fcptr = fc_deq_fcbuf_active(rp, (ushort)i))) { + dev_ptr = fcptr->dev_ptr; + + if(dev_ptr->queue_state == ACTIVE_PASSTHRU) { + node_t * map_node_ptr; + struct dev_info * map_dev_ptr; + + map_node_ptr = (node_t *)dev_ptr->pend_head; + map_dev_ptr = (struct dev_info *)dev_ptr->pend_tail; + dev_ptr->pend_head = 0; + dev_ptr->pend_tail = 0; + dev_ptr->queue_state = HALTED; + dev_ptr->active_io_count--; + if(map_dev_ptr) + map_dev_ptr->active_io_count--; + if(map_node_ptr) + map_node_ptr->num_active_io--; + + dev_ptr->ioctl_event = IOSTAT_LOCAL_REJECT; + + dev_ptr->ioctl_errno = IOERR_SEQUENCE_TIMEOUT; + dev_ptr->sense_length = 0; + dev_ptr->clear_count = 0; + continue; + } /* end ACTIVE_PASSTHRU management */ + + sbp = fcptr->sc_bufp; + bp = (struct buf *) sbp; + + + /* E_D_TOV timeout */ + bp->b_error = ETIMEDOUT; + + sbp->adap_q_status = SC_DID_NOT_CLEAR_Q; + bp->b_flags |= B_ERROR; + bp->b_resid = bp->b_bcount; + sbp->status_validity = SC_ADAPTER_ERROR; + SET_ADAPTER_STATUS(sbp, SC_CMD_TIMEOUT) + + if (fcptr->fcp_cmd.fcpCntl2) { + /* This is a task management command */ + if (bp->b_flags & B_ERROR) + dev_ptr->ioctl_errno = bp->b_error; + else + dev_ptr->ioctl_errno = 0; + + if (fcptr->fcp_cmd.fcpCntl2 == ABORT_TASK_SET) + dev_ptr->flags &= ~SCSI_ABORT_TSET; + + if (fcptr->fcp_cmd.fcpCntl2 & TARGET_RESET) + dev_ptr->flags &= ~SCSI_TARGET_RESET; + + if (fcptr->fcp_cmd.fcpCntl2 & LUN_RESET) + dev_ptr->flags &= ~SCSI_LUN_RESET; + + if (dev_ptr->ioctl_wakeup == 1) { + dev_ptr->ioctl_wakeup = 0; + + fc_admin_wakeup(p_dev_ctl, dev_ptr, sbp); + } else { + fc_do_iodone(bp); + } + } else { + fc_do_iodone(bp); + } + dev_ptr->active_io_count--; + dev_ptr->nodep->num_active_io--; + fc_enq_fcbuf(fcptr); + } + } + + fc_failio(p_dev_ctl); + + ndlp = binfo->fc_nlpmap_start; + while(ndlp != (NODELIST *)&binfo->fc_nlpmap_start) { + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + + /* Make sure pendq is empty */ + i = INDEX(ndlp->id.nlp_pan, ndlp->id.nlp_sid); + if ((node_ptr = binfo->device_queue_hash[i].node_ptr) != NULL) { + for (dev_ptr = node_ptr->lunlist; dev_ptr != NULL; + dev_ptr = dev_ptr->next) { + while ((sbp = dev_ptr->pend_head) != NULL) { + dev_ptr->pend_count--; + dev_ptr->pend_head = (T_SCSIBUF *) sbp->bufstruct.av_forw; + if (dev_ptr->pend_head == NULL) + dev_ptr->pend_tail = NULL; + else + dev_ptr->pend_head->bufstruct.av_back = NULL; + + sbp->bufstruct.b_flags |= B_ERROR; + sbp->bufstruct.b_error = EIO; + sbp->bufstruct.b_resid = sbp->bufstruct.b_bcount; + sbp->status_validity = SC_ADAPTER_ERROR; + SET_ADAPTER_STATUS(sbp, SC_NO_DEVICE_RESPONSE) + + sbp->bufstruct.av_forw = 0; + fc_do_iodone((struct buf *) sbp); + } + } + } + ndlp = binfo->fc_nlpmap_start; + } + } + + /* Put all Mapped and unmapped nodes on the bind list */ + ndlp = binfo->fc_nlpunmap_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + while(ndlp != (NODELIST *)&binfo->fc_nlpmap_start) { + new_ndlp = (NODELIST *)ndlp->nlp_listp_next; + + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + + ndlp = new_ndlp; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + } + + /* Put adapter into an error state and return all outstanding I/Os */ + fc_linkdown(p_dev_ctl); + + return(0); +} /* End fc_free_rpilist */ + + +/* + * Issue an ABORT_XRI_CX iocb command to abort an exchange + */ +_static_ int +fc_rpi_abortxri( +FC_BRD_INFO *binfo, +ushort xri) +{ + IOCB * icmd; + IOCBQ * temp; + RING * rp; + + /* Use IP ring so ABTS comes out after CLEAR_LA */ + rp = &binfo->fc_ring[FC_IP_RING]; + + /* Get an iocb buffer */ + if ((temp = (IOCBQ * )fc_mem_get(binfo, MEM_IOCB)) == 0) { + return(1); + } + fc_bzero((void *)temp, sizeof(IOCBQ)); + icmd = &temp->iocb; + + icmd->un.acxri.abortType = ABORT_TYPE_ABTS; + icmd->ulpContext = xri; + + /* set up an iotag */ + icmd->ulpIoTag = rp->fc_iotag++; + if (rp->fc_iotag == 0) { + rp->fc_iotag = 1; + } + + icmd->ulpLe = 1; + icmd->ulpClass = CLASS3; + icmd->ulpCommand = CMD_ABORT_XRI_CX; + icmd->ulpOwner = OWN_CHIP; + + issue_iocb_cmd(binfo, rp, temp); + + return(0); +} /* End fc_rpi_abortxri */ + + +/**************************************************/ +/** fc_freenode **/ +/** **/ +/** This routine will remove NODELIST entries **/ +/** rm determines state to leave entry at, **/ +/** either NLP_UNUSED or NLP_LOGOUT **/ +/**************************************************/ +_static_ int +fc_freenode( +FC_BRD_INFO *binfo, +NODELIST *nlp, +int rm) +{ + MAILBOXQ * mbox; + node_t * node_ptr; + uint32 data1; + + data1 = ( ((uint32)nlp->nlp_state << 24) | + ((uint32)nlp->nlp_action << 16) | + ((uint32)nlp->nlp_type << 8) | + rm ); + + /* Free node tbl */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0901, /* ptr to msg structure */ + fc_mes0901, /* ptr to msg */ + fc_msgBlk0901.msgPreambleStr, /* begin varargs */ + nlp->nlp_DID, + nlp->nlp_flag, + nlp->nlp_Rpi, + data1); /* end varargs */ + /* FREE node IEEE */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0902, /* ptr to msg structure */ + fc_mes0902, /* ptr to msg */ + fc_msgBlk0902.msgPreambleStr, /* begin varargs */ + nlp->nlp_nodename.IEEE[2], + nlp->nlp_nodename.IEEE[3], + nlp->nlp_nodename.IEEE[4], + nlp->nlp_nodename.IEEE[5]); /* end varargs */ + + + if(nlp == &binfo->fc_fcpnodev) + return(1); + + if(nlp->nlp_listp_next) { + if (nlp->nlp_flag & NLP_MAPPED) { + nlp->nlp_time = binfo->nlptimer++; + if (nlp->nlp_time == 0) { + fc_nlpadjust(binfo); + } + nlp->nlp_flag &= ~NLP_MAPPED; + binfo->fc_map_cnt--; + fc_deque(nlp); + } + else if (nlp->nlp_flag & NLP_UNMAPPED) { + nlp->nlp_time = binfo->nlptimer++; + if (nlp->nlp_time == 0) { + fc_nlpadjust(binfo); + } + nlp->nlp_flag &= ~NLP_UNMAPPED; + binfo->fc_unmap_cnt--; + fc_deque(nlp); + } + else if (nlp->nlp_flag & NLP_BIND) { + nlp->nlp_flag &= ~NLP_BIND; + binfo->fc_bind_cnt--; + fc_deque(nlp); + } + } + + /* Unregister login with firmware for this node */ + if (nlp->nlp_Rpi) { + /* Unregister login */ + if ((mbox = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX | MEM_PRI))) { + fc_unreg_login(binfo, nlp->nlp_Rpi, (MAILBOX * )mbox); + if (nlp->nlp_flag & NLP_UNREG_LOGO) { + ((MAILBOX *)mbox)->un.varWords[30] = nlp->nlp_DID; + } + if (issue_mb_cmd(binfo, (MAILBOX * )mbox, MBX_NOWAIT) != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mbox); + } + } + binfo->fc_nlplookup[nlp->nlp_Rpi] = 0; + nlp->nlp_Rpi = 0; + } + + if (nlp->nlp_DID) { + RING * rp; + IOCBQ * iocbq; + unsigned long iflag; + fc_dev_ctl_t *p_dev_ctl; + + /* Look through ELS ring and remove any ELS cmds in progress */ + p_dev_ctl = (fc_dev_ctl_t *)binfo->fc_p_dev_ctl; + iflag = lpfc_q_disable_lock(p_dev_ctl); + rp = &binfo->fc_ring[FC_ELS_RING]; + iocbq = (IOCBQ * )(rp->fc_txp.q_first); + while (iocbq) { + if (iocbq->iocb.un.elsreq.remoteID == nlp->nlp_DID) { + iocbq->retry = 0xff; /* Mark for abort */ + if((binfo->fc_flag & FC_RSCN_MODE) || + (binfo->fc_ffstate < FC_READY)) { + if((nlp->nlp_state >= NLP_PLOGI) && + (nlp->nlp_state <= NLP_PRLI)) { + nlp->nlp_action &= ~NLP_DO_RSCN; + binfo->fc_nlp_cnt--; + if ((nlp->nlp_type & NLP_IP_NODE) && nlp->nlp_bp) { + m_freem((fcipbuf_t *)nlp->nlp_bp); + nlp->nlp_bp = (uchar * )0; + } + } + } + } + iocbq = (IOCBQ * )iocbq->q; + } + lpfc_q_unlock_enable(p_dev_ctl, iflag); + /* In case its on fc_delay_timeout list */ + fc_abort_delay_els_cmd((fc_dev_ctl_t *)binfo->fc_p_dev_ctl, nlp->nlp_DID); + + nlp->nlp_oldDID = nlp->nlp_DID; /* save the old DID */ + } + + if (nlp->nlp_flag & (NLP_REQ_SND | NLP_REQ_SND_ADISC)) { + nlp->nlp_flag &= ~(NLP_REQ_SND | NLP_REQ_SND_ADISC); + /* Goto next entry */ + fc_nextnode((fc_dev_ctl_t * )(binfo->fc_p_dev_ctl), nlp); + } + + + if (nlp->nlp_type & NLP_IP_NODE) { + if (nlp->nlp_bp) { + m_freem((fcipbuf_t * )nlp->nlp_bp); + nlp->nlp_bp = 0; + } + + /* Go remove all entries for this node from the IP IOCBQ */ + fc_clear_ip_iocbq((fc_dev_ctl_t * )(binfo->fc_p_dev_ctl), nlp); + } + + if (nlp->nlp_type & NLP_FCP_TARGET) { + iCfgParam * clp; + + clp = DD_CTL.p_config[binfo->fc_brd_no]; + + /* Go remove all entries for this RPI from the FCP IOCBQ */ + fc_clear_fcp_iocbq((fc_dev_ctl_t *)binfo->fc_p_dev_ctl, nlp); + + if ((node_ptr = (node_t * )(nlp->nlp_targetp)) != NULL) { + dvi_t * dev_ptr; + + node_ptr->rpi = 0xfffe; + node_ptr->flags &= ~FC_FCP2_RECOVERY; + for (dev_ptr = node_ptr->lunlist; dev_ptr != NULL; + dev_ptr = dev_ptr->next) { + if(binfo->fc_ffstate == FC_READY){ + /* + * Cause the standby queue to drain. + */ + fc_return_standby_queue(dev_ptr, + (uchar)((binfo->fc_flag & FC_BUS_RESET) ? EIO : EFAULT), 0); + + fc_fail_pendq(dev_ptr, + (uchar)((binfo->fc_flag & FC_BUS_RESET) ? EIO : EFAULT), 0); + /* UNREG_LOGIN from freenode should abort txp I/Os */ + fc_fail_cmd(dev_ptr, (char)((binfo->fc_flag & FC_BUS_RESET) ? + EIO : EFAULT), 0); + + /* Call iodone for all the CLEARQ error bufs */ + fc_free_clearq(dev_ptr); + } + dev_ptr->queue_state = HALTED; + } + } + + /* Is this node, automapped or seeded */ + if(nlp->nlp_type & (NLP_AUTOMAP | NLP_SEED_MASK)) { + /* If FCP we need to keep entry around for the wwpn - sid mapping */ + nlp->nlp_type = (NLP_FCP_TARGET | + (nlp->nlp_type & (NLP_AUTOMAP | NLP_SEED_MASK))); + if(nlp->nlp_type & NLP_SEED_DID) { + fc_bzero((void *)&nlp->nlp_portname, sizeof(NAME_TYPE)); + fc_bzero((void *)&nlp->nlp_nodename, sizeof(NAME_TYPE)); + } + else { + nlp->nlp_DID = 0; + } + } + else { + nlp->nlp_flag = 0; + nlp->nlp_action = 0; + nlp->nlp_type = 0; + nlp->nlp_targetp = 0; + nlp->id.nlp_pan = 0; + nlp->id.nlp_sid = 0; + } + + if(node_ptr && (clp[CFG_NODEV_TMO].a_current) && + ((node_ptr->flags & FC_NODEV_TMO) == 0)) { + if(node_ptr->nodev_tmr == 0) { + /* START nodev timer */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0700, /* ptr to msg structure */ + fc_mes0700, /* ptr to msg */ + fc_msgBlk0700.msgPreambleStr, /* begin varargs */ + (ulong)nlp, + nlp->nlp_flag, + nlp->nlp_state, + nlp->nlp_DID); /* end varargs */ + + if(binfo->fc_ffstate != FC_LINK_DOWN) { + node_ptr->nodev_tmr = + fc_clk_set((fc_dev_ctl_t * )(binfo->fc_p_dev_ctl), + clp[CFG_NODEV_TMO].a_current, fc_nodev_timeout, + (void *)node_ptr, 0); + } + else { + node_ptr->nodev_tmr = + fc_clk_set((fc_dev_ctl_t * )(binfo->fc_p_dev_ctl), + (clp[CFG_NODEV_TMO].a_current + clp[CFG_LINKDOWN_TMO].a_current), + fc_nodev_timeout, (void *)node_ptr, 0); + } + } + } + } + else { + nlp->nlp_targetp = 0; + nlp->id.nlp_pan = 0; + nlp->id.nlp_sid = 0; + nlp->nlp_type = 0; + } + + nlp->nlp_Xri = 0; + nlp->nlp_action = 0; + + if (rm) { + fc_bzero((void *)nlp, sizeof(NODELIST)); + fc_mem_put(binfo, MEM_NLP, (uchar *)nlp); + } else { + if(nlp->nlp_flag & NLP_NS_REMOVED) + nlp->nlp_flag = NLP_NS_REMOVED; + else + nlp->nlp_flag = 0; + + /* Keep the entry cached */ + nlp->nlp_state = NLP_LIMBO; + /* Let the caller put it on the appropriate q at the appropriate time */ + } + return(1); +} /* End fc_freenode */ + + +/************************************************************************/ +/* */ +/* NAME: fc_clear_fcp_iocbq */ +/* */ +/* FUNCTION: Fail All FCP Commands in IOCBQ for one RPI */ +/* */ +/* This routine is called to clear out all FCP commands */ +/* in the IOCBQ for a specific SCSI FCP device RPI. */ +/* */ +/* EXECUTION ENVIRONMENT: */ +/* This routine can only be called on priority levels */ +/* equal to that of the interrupt handler. */ +/* */ +/* DATA STRUCTURES: */ +/* sc_buf - input/output request struct used between the adapter */ +/* driver and the calling SCSI device driver */ +/* */ +/* INPUTS: */ +/* NODELIST structure - pointer to device node structure */ +/* */ +/* RETURN VALUE DESCRIPTION: */ +/* none */ +/* */ +/************************************************************************/ +_local_ void +fc_clear_fcp_iocbq( +fc_dev_ctl_t *p_dev_ctl, +NODELIST *ndlp) +{ + FC_BRD_INFO * binfo; + T_SCSIBUF * sbp; + RING * rp; + IOCBQ * iocb_cmd, *next; + IOCB * icmd; + dvi_t * dev_ptr; + fc_buf_t * fcptr; + struct buf * bp; + Q tmpq; + + binfo = &BINFO; + + /* Clear out all fc_buf structures in the iocb queue for this RPI */ + rp = &binfo->fc_ring[FC_FCP_RING]; + tmpq.q_first = NULL; + + /* Get next command from ring xmit queue */ + iocb_cmd = fc_ringtx_get(rp); + + while (iocb_cmd) { + icmd = &iocb_cmd->iocb; + if ((icmd->ulpCommand != CMD_IOCB_CONTINUE_CN) && + (icmd->ulpContext == ndlp->nlp_Rpi)) { + + if ((fcptr = fc_deq_fcbuf_active(rp, icmd->ulpIoTag)) != NULL) { + dev_ptr = fcptr->dev_ptr; + /* Reject this command with error */ + if(dev_ptr && (dev_ptr->queue_state == ACTIVE_PASSTHRU)) { + node_t * map_node_ptr; + struct dev_info * map_dev_ptr; + + map_node_ptr = (node_t *)dev_ptr->pend_head; + map_dev_ptr = (struct dev_info *)dev_ptr->pend_tail; + dev_ptr->pend_head = 0; + dev_ptr->pend_tail = 0; + dev_ptr->queue_state = HALTED; + dev_ptr->active_io_count--; + if(map_dev_ptr) + map_dev_ptr->active_io_count--; + if(map_node_ptr) + map_node_ptr->num_active_io--; + + dev_ptr->ioctl_event = IOSTAT_LOCAL_REJECT; + + dev_ptr->ioctl_errno = IOERR_SEQUENCE_TIMEOUT; + dev_ptr->sense_length = 0; + dev_ptr->clear_count = 0; + while ((iocb_cmd = fc_ringtx_get(rp)) != NULL) { + icmd = &iocb_cmd->iocb; + if (icmd->ulpCommand != CMD_IOCB_CONTINUE_CN) + break; + fc_mem_put(binfo, MEM_IOCB, (uchar * )iocb_cmd); + } + continue; + } /* end ACTIVE_PASSTHRU management */ + + if (fcptr->fcp_cmd.fcpCntl2) { + + bp = (struct buf *)fcptr->sc_bufp; + + if(dev_ptr) { + /* This is a task management command */ + dev_ptr->ioctl_errno = ENXIO; + dev_ptr->active_io_count--; + dev_ptr->nodep->num_active_io--; + + if (fcptr->fcp_cmd.fcpCntl2 == ABORT_TASK_SET) + dev_ptr->flags &= ~SCSI_ABORT_TSET; + + if (fcptr->fcp_cmd.fcpCntl2 & TARGET_RESET) + dev_ptr->flags &= ~SCSI_TARGET_RESET; + + if (fcptr->fcp_cmd.fcpCntl2 & LUN_RESET) + dev_ptr->flags &= ~SCSI_LUN_RESET; + + if (fcptr->dev_ptr->ioctl_wakeup) { + fcptr->dev_ptr->ioctl_wakeup = 0; + fc_admin_wakeup(((fc_dev_ctl_t *)(binfo->fc_p_dev_ctl)), + fcptr->dev_ptr, fcptr->sc_bufp); + } + } + } else { + /* This is a regular FCP command */ + + bp = (struct buf *)fcptr->sc_bufp; + bp->b_error = ENXIO; + bp->b_resid = bp->b_bcount; + bp->b_flags |= B_ERROR; + + sbp = (T_SCSIBUF *)bp; + sbp->status_validity = SC_ADAPTER_ERROR; + SET_ADAPTER_STATUS(sbp, SC_NO_DEVICE_RESPONSE) + + dev_ptr = fcptr->dev_ptr; + if(dev_ptr) { + dev_ptr->active_io_count--; + dev_ptr->nodep->num_active_io--; + } + + fc_delay_iodone(p_dev_ctl, sbp); + } + fc_enq_fcbuf(fcptr); + } + + fc_mem_put(binfo, MEM_IOCB, (uchar * )iocb_cmd); + + while ((iocb_cmd = fc_ringtx_get(rp)) != NULL) { + icmd = &iocb_cmd->iocb; + if (icmd->ulpCommand != CMD_IOCB_CONTINUE_CN) + break; + fc_mem_put(binfo, MEM_IOCB, (uchar * )iocb_cmd); + } + } else { + /* Queue this iocb to the temporary queue */ + if (tmpq.q_first) { + ((IOCBQ * )tmpq.q_last)->q = (uchar * )iocb_cmd; + tmpq.q_last = (uchar * )iocb_cmd; + } else { + tmpq.q_first = (uchar * )iocb_cmd; + tmpq.q_last = (uchar * )iocb_cmd; + } + iocb_cmd->q = NULL; + + iocb_cmd = fc_ringtx_get(rp); + } + } + + /* Put the temporary queue back in the FCP iocb queue */ + iocb_cmd = (IOCBQ * )tmpq.q_first; + while (iocb_cmd) { + next = (IOCBQ * )iocb_cmd->q; + fc_ringtx_put(rp, iocb_cmd); + iocb_cmd = next; + } + + return; +} /* End fc_clear_fcp_iocbq */ + + +/************************************************************************/ +/* */ +/* NAME: fc_clear_ip_iocbq */ +/* */ +/* FUNCTION: Fail All IP Commands in IOCBQ for one RPI */ +/* */ +/* This routine is called to clear out all IP commands */ +/* in the IOCBQ for a specific IP device RPI. */ +/* */ +/* EXECUTION ENVIRONMENT: */ +/* This routine can only be called on priority levels */ +/* equal to that of the interrupt handler. */ +/* */ +/* INPUTS: */ +/* NODELIST structure - pointer to device node structure */ +/* */ +/* RETURN VALUE DESCRIPTION: */ +/* none */ +/* */ +/************************************************************************/ +_local_ void +fc_clear_ip_iocbq( +fc_dev_ctl_t *p_dev_ctl, +NODELIST *ndlp) +{ + FC_BRD_INFO * binfo; + RING * rp; + IOCBQ * xmitiq, *next; + IOCB * icmd; + Q tmpq; + fcipbuf_t * p_mbuf; + fcipbuf_t * m_net; + int i; + ULP_BDE64 * bpl; + MATCHMAP * bmp; + + binfo = &BINFO; + /* Clear out all commands in the iocb queue for this RPI */ + rp = &binfo->fc_ring[FC_IP_RING]; + tmpq.q_first = NULL; + + /* Get next command from ring xmit queue */ + xmitiq = fc_ringtx_drain(rp); + + while (xmitiq) { + icmd = &xmitiq->iocb; + if (((icmd->ulpCommand == CMD_XMIT_SEQUENCE_CX) || + (icmd->ulpCommand == CMD_XMIT_SEQUENCE64_CX) || + (icmd->ulpCommand == 0)) && + (ndlp == (NODELIST * )xmitiq->info)) { + + /* get mbuf ptr for xmit */ + m_net = (fcipbuf_t * )xmitiq->bp; + if (ndlp->nlp_DID == NameServer_DID) { + if (binfo->fc_flag & FC_SLI2) { + fc_mem_put(binfo, MEM_BPL, (uchar * )xmitiq->bpl); + } + fc_mem_put(binfo, MEM_IOCB, (uchar * )xmitiq); + fc_mem_put(binfo, MEM_BUF, (uchar * )m_net); + goto out; + } + + /* Loop thru BDEs and unmap memory pages associated with mbuf */ + if (binfo->fc_flag & FC_SLI2) { + bmp = (MATCHMAP * )xmitiq->bpl; + bpl = (ULP_BDE64 * )bmp->virt; + while (bpl && xmitiq->iocb.un.xseq64.bdl.bdeSize) { + fc_bufunmap(p_dev_ctl, (uchar *)getPaddr(bpl->addrHigh, bpl->addrLow), 0, bpl->tus.f.bdeSize); + bpl++; + xmitiq->iocb.un.xseq64.bdl.bdeSize -= sizeof(ULP_BDE64); + } + fc_mem_put(binfo, MEM_BPL, (uchar * )bmp); + fc_mem_put(binfo, MEM_IOCB, (uchar * )xmitiq); + } else { + for (i = 0; i < (int)xmitiq->iocb.ulpBdeCount; i++) { + fc_bufunmap(p_dev_ctl, (uchar *)((ulong)xmitiq->iocb.un.cont[i].bdeAddress), 0, (uint32)xmitiq->iocb.un.cont[i].bdeSize); + } + fc_mem_put(binfo, MEM_IOCB, (uchar * )xmitiq); + } + + /* Take care of Continuation IOCBs for this mbuf */ + while ((xmitiq = fc_ringtx_drain(rp)) != NULL) { + icmd = &xmitiq->iocb; + if ((icmd->ulpCommand != CMD_IOCB_CONTINUE_CN) && + (icmd->ulpCommand != CMD_IOCB_CONTINUE64_CN)) + break; + /* Loop thru BDEs and unmap memory pages associated with IOCB */ + for (i = 0; i < (int)xmitiq->iocb.ulpBdeCount; i++) { + if (binfo->fc_flag & FC_SLI2) + fc_bufunmap(p_dev_ctl, (uchar *)getPaddr(xmitiq->iocb.un.cont64[i].addrHigh, xmitiq->iocb.un.cont64[i].addrLow), 0, xmitiq->iocb.un.cont64[i].tus.f.bdeSize); + else + fc_bufunmap(p_dev_ctl, (uchar *) ((ulong)xmitiq->iocb.un.cont[i].bdeAddress), 0, (uint32)xmitiq->iocb.un.cont[i].bdeSize); + } + fc_mem_put(binfo, MEM_IOCB, (uchar * )xmitiq); + } + + p_mbuf = fcnextdata(m_net); + fcnextdata(m_net) = 0; + fcfreehandle(p_dev_ctl, m_net); + m_freem(m_net); + + if (p_mbuf) { + /* free mbuf */ + fcfreehandle(p_dev_ctl, p_mbuf); + m_freem(p_mbuf); + } + + } else { + /* Queue this iocb to the temporary queue */ + if (tmpq.q_first) { + ((IOCBQ * )tmpq.q_last)->q = (uchar * )xmitiq; + tmpq.q_last = (uchar * )xmitiq; + } else { + tmpq.q_first = (uchar * )xmitiq; + tmpq.q_last = (uchar * )xmitiq; + } + xmitiq->q = NULL; + + xmitiq = fc_ringtx_drain(rp); + } + } + +out: + /* Put the temporary queue back in the IP iocb queue */ + xmitiq = (IOCBQ * )tmpq.q_first; + while (xmitiq) { + next = (IOCBQ * )xmitiq->q; + fc_ringtx_put(rp, xmitiq); + xmitiq = next; + } + + return; +} /* End fc_clear_ip_iocbq */ + + +/**************************************************/ +/** fc_fanovery **/ +/** **/ +/** This routine will recover after a FAN rcvd **/ +/**************************************************/ +_static_ int +fc_fanovery( +fc_dev_ctl_t *p_dev_ctl) +{ + FC_BRD_INFO * binfo; + iCfgParam * clp; + NODELIST * ndlp; + NODELIST * firstndlp; + MAILBOXQ * mb; + int j, addrauth; + D_ID did; + + binfo = &BINFO; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + /* Device Discovery Started */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0200, /* ptr to msg structure */ + fc_mes0200, /* ptr to msg */ + fc_msgBlk0200.msgPreambleStr); /* begin & end varargs */ + binfo->fc_ffstate = FC_LOOP_DISC; + binfo->fc_nlp_cnt = 0; + binfo->fc_flag &= ~(FC_NLP_MORE | FC_DELAY_PLOGI); + + firstndlp = 0; + addrauth = 0; + /* Next, lets seed the nodeList with our ALPAmap */ + if (binfo->fc_topology == TOPOLOGY_LOOP) { + if (binfo->alpa_map[0]) { + for (j = 1; j <= binfo->alpa_map[0]; j++) { + if (((binfo->fc_myDID & 0xff) == binfo->alpa_map[j]) || + (binfo->alpa_map[j] == 0)) + continue; + /* Skip if the node is already in the node table */ + if ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, binfo->alpa_map[j]))) { + ndlp->nlp_DID = binfo->alpa_map[j]; + /* Mark slot for address authentication */ + ndlp->nlp_action |= NLP_DO_ADDR_AUTH; + addrauth++; + continue; + } + if((ndlp = (NODELIST *)fc_mem_get(binfo, MEM_NLP))) { + fc_bzero((void *)ndlp, sizeof(NODELIST)); + ndlp->sync = binfo->fc_sync; + ndlp->capabilities = binfo->fc_capabilities; + ndlp->nlp_DID = binfo->alpa_map[j]; + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + } + else + continue; + + ndlp->nlp_action |= NLP_DO_DISC_START; + if (firstndlp == 0) + firstndlp = ndlp; + } + } else { + /* No alpamap, so try all alpa's */ + for (j = 0; j < FC_MAXLOOP; j++) { + int index; + + if (clp[CFG_SCAN_DOWN].a_current) + index = FC_MAXLOOP - j - 1; + else + index = j; + if ((binfo->fc_myDID & 0xff) == staticAlpaArray[index]) + continue; + /* Skip if the node is already in the node table */ + if ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, staticAlpaArray[index]))) { + /* Mark slot for address authentication */ + ndlp->nlp_action |= NLP_DO_ADDR_AUTH; + ndlp->nlp_DID = staticAlpaArray[index]; + addrauth++; + continue; + } + if((ndlp = (NODELIST *)fc_mem_get(binfo, MEM_NLP))) { + fc_bzero((void *)ndlp, sizeof(NODELIST)); + ndlp->sync = binfo->fc_sync; + ndlp->capabilities = binfo->fc_capabilities; + ndlp->nlp_DID = staticAlpaArray[index]; + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + } + else + continue; + ndlp->nlp_action |= NLP_DO_DISC_START; + if (firstndlp == 0) + firstndlp = ndlp; + } + } + + /* Next mark ALL unmarked local nodes for Authentication */ + ndlp = binfo->fc_nlpunmap_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + while(ndlp != (NODELIST *)&binfo->fc_nlpmap_start) { + + /* Skip over Fabric nodes, myself, and nodes partially logged in */ + if ((ndlp->nlp_DID == binfo->fc_myDID) || + (ndlp->nlp_type & NLP_FABRIC) || + (ndlp->nlp_action & (NLP_DO_DISC_START | NLP_DO_ADDR_AUTH))) { + ndlp = (NODELIST *)ndlp->nlp_listp_next; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + continue; + } + + /* If it is a local node */ + did.un.word = ndlp->nlp_DID; + did.un.b.domain = 0; + did.un.b.area = 0; + if (fc_matchdid(binfo, ndlp, did.un.word)) { + /* Mark slot for address authentication */ + ndlp->nlp_action |= NLP_DO_ADDR_AUTH; + addrauth++; + } + ndlp = (NODELIST *)ndlp->nlp_listp_next; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + } + + if (addrauth) { + fc_nextauth(p_dev_ctl, fc_max_els_sent); + return(0); + } else { + if (firstndlp) { + /* We can start discovery right now */ + fc_nextdisc(p_dev_ctl, fc_max_els_sent); + return(0); + } + } + } + + /* This should turn off DELAYED ABTS for ELS timeouts */ + if ((mb = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX))) { + fc_set_slim(binfo, (MAILBOX * )mb, 0x052198, 0); + if (issue_mb_cmd(binfo, (MAILBOX * )mb, MBX_NOWAIT) != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } + + /* This is at init, clear la */ + if ((mb = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX | MEM_PRI))) { + binfo->fc_ffstate = FC_CLEAR_LA; + fc_clear_la(binfo, (MAILBOX * )mb); + if (issue_mb_cmd(binfo, (MAILBOX * )mb, MBX_NOWAIT) != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } else { + binfo->fc_ffstate = FC_ERROR; + /* Device Discovery completion error */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0201, /* ptr to msg structure */ + fc_mes0201, /* ptr to msg */ + fc_msgBlk0201.msgPreambleStr); /* begin & end varargs */ + } + + if(FABRICTMO) { + fc_clk_can(p_dev_ctl, FABRICTMO); + FABRICTMO = 0; + } + return(0); +} /* End fc_fanovery */ + + +/**************************************************/ +/** fc_discovery **/ +/** **/ +/** This routine will find devices in network **/ +/**************************************************/ +_static_ int +fc_discovery( +fc_dev_ctl_t *p_dev_ctl) +{ + FC_BRD_INFO * binfo; + iCfgParam * clp; + NODELIST * ndlp; + NODELIST * new_ndlp; + NODELIST * firstndlp; + MAILBOXQ * mb; + int j, addrauth; + + binfo = &BINFO; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + /* Device Discovery Started */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0202, /* ptr to msg structure */ + fc_mes0202, /* ptr to msg */ + fc_msgBlk0202.msgPreambleStr); /* begin & end varargs */ + + binfo->fc_ffstate = FC_LOOP_DISC; + binfo->fc_nlp_cnt = 0; + binfo->fc_flag &= ~(FC_NLP_MORE | FC_DELAY_PLOGI); + + /* If this is not our first time doing discovery and we don't have + * a new myDID, then rediscovery (address authentication) is appropriate. + */ + if ((p_dev_ctl->init_eventTag) && (binfo->fc_prevDID == binfo->fc_myDID)) { + /* do rediscovery on the loop */ + addrauth = fc_addrauth(p_dev_ctl); + } else { + addrauth = 0; + p_dev_ctl->init_eventTag = 1; + /* First make sure all nodes in nlplist are free */ + ndlp = binfo->fc_nlpunmap_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + while(ndlp != (NODELIST *)&binfo->fc_nlpmap_start) { + new_ndlp = (NODELIST *)ndlp->nlp_listp_next; + + /* Skip over FABRIC nodes and myself */ + if ((ndlp->nlp_DID == binfo->fc_myDID) || + (ndlp->nlp_type & NLP_FABRIC)) { + ndlp = new_ndlp; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + continue; + } + + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + ndlp->nlp_action |= NLP_DO_DISC_START; + + ndlp = new_ndlp; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + } + } + binfo->fc_flag &= ~FC_SCSI_RLIP; + + /* If FCP is not enabled, go right to CLEAR_LA */ + if(!(clp[CFG_FCP_ON].a_current)) { + if (addrauth) { + /* Start authentication */ + fc_nextauth(p_dev_ctl, fc_max_els_sent); + return(0); + } + /* Nothing to discover, so goto CLEAR_LA */ + goto cla; + } + + firstndlp = 0; + + /* If FC_FABRIC is set, we need to do some NameServer stuff + * to seed the nodeList with additional entries. + */ + if (binfo->fc_flag & FC_FABRIC) { + /* We can LOGIN to the NameServer first */ + fc_els_cmd(binfo, ELS_CMD_PLOGI, (void *)NameServer_DID, + (uint32)0, (ushort)0, (NODELIST *)0); + if(fc_fdmi_on) { + /* HBA Mgmt */ + fc_els_cmd(binfo, ELS_CMD_PLOGI, (void *)FDMI_DID, + (uint32)0, (ushort)0, (NODELIST *)0); + } + return(0); + } + + /* No Fabric, just seed the nodeList with our ALPAmap */ + if (binfo->fc_topology == TOPOLOGY_LOOP) { + if (binfo->alpa_map[0]) { + for (j = 1; j <= binfo->alpa_map[0]; j++) { + if (((binfo->fc_myDID & 0xff) == binfo->alpa_map[j]) || + (binfo->alpa_map[j] == 0)) + continue; + /* Skip if the node is already marked address authentication */ + if((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, binfo->alpa_map[j]))) { + if(ndlp->nlp_action & (NLP_DO_ADDR_AUTH | NLP_DO_DISC_START)) { + ndlp->nlp_DID = binfo->alpa_map[j]; + if (firstndlp == 0) + firstndlp = ndlp; + continue; + } + } + else { + if((ndlp = (NODELIST *)fc_mem_get(binfo, MEM_NLP))) { + fc_bzero((void *)ndlp, sizeof(NODELIST)); + ndlp->sync = binfo->fc_sync; + ndlp->capabilities = binfo->fc_capabilities; + ndlp->nlp_DID = binfo->alpa_map[j]; + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + } + else + continue; + } + ndlp->nlp_action |= NLP_DO_DISC_START; + if (firstndlp == 0) + firstndlp = ndlp; + } + } else { + /* No alpamap, so try all alpa's */ + for (j = 0; j < FC_MAXLOOP; j++) { + int index; + + if (clp[CFG_SCAN_DOWN].a_current) + index = FC_MAXLOOP - j - 1; + else + index = j; + if ((binfo->fc_myDID & 0xff) == staticAlpaArray[index]) + continue; + /* Skip if the node is already marked address authentication */ + if((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, staticAlpaArray[index]))) { + if(ndlp->nlp_action & (NLP_DO_ADDR_AUTH | NLP_DO_DISC_START)) { + ndlp->nlp_DID = staticAlpaArray[index]; + if (firstndlp == 0) + firstndlp = ndlp; + continue; + } + } + else { + if((ndlp = (NODELIST *)fc_mem_get(binfo, MEM_NLP))) { + fc_bzero((void *)ndlp, sizeof(NODELIST)); + ndlp->sync = binfo->fc_sync; + ndlp->capabilities = binfo->fc_capabilities; + ndlp->nlp_DID = staticAlpaArray[index]; + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + } + else + continue; + } + ndlp->nlp_action |= NLP_DO_DISC_START; + if (firstndlp == 0) + firstndlp = ndlp; + } + } + } + /* Device Discovery continues */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0203, /* ptr to msg structure */ + fc_mes0203, /* ptr to msg */ + fc_msgBlk0203.msgPreambleStr, /* begin varargs */ + (ulong)firstndlp, + binfo->fc_ffstate); /* end varargs */ + if (addrauth) { + /* Start authentication */ + if(fc_nextauth(p_dev_ctl, fc_max_els_sent)) + return(0); + } + + if (firstndlp) { + /* We can start discovery right now */ + fc_nextdisc(p_dev_ctl, fc_max_els_sent); + return(0); + } + else { + /* Make sure no nodes are marked for discovery */ + /* go through all nodes in nlplist */ + ndlp = binfo->fc_nlpbind_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpbind_start) + ndlp = binfo->fc_nlpunmap_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + while(ndlp != (NODELIST *)&binfo->fc_nlpmap_start) { + if (ndlp->nlp_action & (NLP_DO_ADDR_AUTH | NLP_DO_DISC_START)) + ndlp->nlp_action &= ~(NLP_DO_ADDR_AUTH | NLP_DO_DISC_START); + ndlp = (NODELIST *)ndlp->nlp_listp_next; + if(ndlp == (NODELIST *)&binfo->fc_nlpbind_start) + ndlp = binfo->fc_nlpunmap_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + } + } + +cla: + /* This should turn off DELAYED ABTS for ELS timeouts */ + if ((mb = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX))) { + fc_set_slim(binfo, (MAILBOX * )mb, 0x052198, 0); + if (issue_mb_cmd(binfo, (MAILBOX * )mb, MBX_NOWAIT) != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } + + /* This is at init, clear la */ + if ((mb = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX | MEM_PRI))) { + binfo->fc_ffstate = FC_CLEAR_LA; + fc_clear_la(binfo, (MAILBOX * )mb); + if (issue_mb_cmd(binfo, (MAILBOX * )mb, MBX_NOWAIT) != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } else { + binfo->fc_ffstate = FC_ERROR; + /* Device Discovery completion error */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0204, /* ptr to msg structure */ + fc_mes0204, /* ptr to msg */ + fc_msgBlk0204.msgPreambleStr); /* begin & end varargs */ + } + if(FABRICTMO) { + fc_clk_can(p_dev_ctl, FABRICTMO); + FABRICTMO = 0; + } + return(0); +} /* End fc_discovery */ + + +/**************************************************/ +/** fc_addrauth **/ +/** **/ +/** This routine will validate NODELIST entries **/ +/**************************************************/ +_local_ int +fc_addrauth( +fc_dev_ctl_t *p_dev_ctl) +{ + FC_BRD_INFO * binfo; + iCfgParam * clp; + NODELIST * ndlp; + NODELIST * new_ndlp; + int cnt; + int cnt1, cnt2; + + binfo = &BINFO; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + + binfo->fc_nlp_cnt = 0; + binfo->fc_flag &= ~FC_NLP_MORE; + cnt = 0; + cnt1 = 0; + cnt2 = 0; + + /* go through all nodes in nlplist */ + ndlp = binfo->fc_nlpunmap_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + while(ndlp != (NODELIST *)&binfo->fc_nlpmap_start) { + new_ndlp = (NODELIST *)ndlp->nlp_listp_next; + + /* Skip over Fabric nodes, myself, and nodes partially logged in */ + if ((ndlp->nlp_DID == binfo->fc_myDID) || + (ndlp->nlp_type & NLP_FABRIC)) { + ndlp = new_ndlp; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + continue; + } + + if ((ndlp->nlp_type & NLP_FCP_TARGET) && + ((!clp[CFG_USE_ADISC].a_current) || (ndlp->nlp_Rpi == 0) || + (binfo->fc_flag & FC_SCSI_RLIP))) { + /* Force regular discovery on this node */ + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + ndlp->nlp_action &= ~NLP_DO_ADDR_AUTH; + ndlp->nlp_action |= NLP_DO_DISC_START; + cnt1++; + } else { + if ((ndlp->nlp_type & NLP_IP_NODE) && (ndlp->nlp_Rpi == 0)) { + /* Force regular discovery on this node */ + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + ndlp->nlp_action &= ~NLP_DO_ADDR_AUTH; + ndlp->nlp_action |= NLP_DO_DISC_START; + cnt2++; + } else { + /* Mark slot for address authentication */ + ndlp->nlp_action |= NLP_DO_ADDR_AUTH; + cnt++; + } + } + ndlp = new_ndlp; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + } + + /* Device Discovery authentication */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0205, /* ptr to msg structure */ + fc_mes0205, /* ptr to msg */ + fc_msgBlk0205.msgPreambleStr, /* begin varargs */ + cnt, + cnt1, + cnt2); /* end varargs */ + return(cnt); +} /* End fc_addrauth */ + + +/**************************************************/ +/** fc_freebufq **/ +/** **/ +/** This routine will free a iocb cmd block and **/ +/** all associated memory. **/ +/**************************************************/ +_static_ void +fc_freebufq( +fc_dev_ctl_t *p_dev_ctl, +RING *rp, +IOCBQ *xmitiq) +{ + FC_BRD_INFO * binfo; + fcipbuf_t * p_mbuf; + fcipbuf_t * m_net; + NODELIST * ndlp; + ULP_BDE64 * bpl; + MATCHMAP * bmp; + int i; + + binfo = &BINFO; + switch (xmitiq->iocb.ulpCommand) { + case 0: + case CMD_XMIT_BCAST_CN: /* process xmit completion */ + case CMD_XMIT_BCAST_CX: + case CMD_XMIT_SEQUENCE_CX: + case CMD_XMIT_BCAST64_CN: /* process xmit completion */ + case CMD_XMIT_BCAST64_CX: + case CMD_XMIT_SEQUENCE64_CX: + /* get mbuf ptr for xmit */ + m_net = (fcipbuf_t * )xmitiq->bp; + ndlp = (NODELIST * ) xmitiq->info; + + /* Loop thru BDEs and unmap memory pages associated with mbuf */ + if (binfo->fc_flag & FC_SLI2) { + bmp = (MATCHMAP * )xmitiq->bpl; + if(bmp) { + bpl = (ULP_BDE64 * )bmp->virt; + while (bpl && xmitiq->iocb.un.xseq64.bdl.bdeSize) { + fc_bufunmap(p_dev_ctl, (uchar *)getPaddr(bpl->addrHigh, bpl->addrLow), 0, bpl->tus.f.bdeSize); + bpl++; + xmitiq->iocb.un.xseq64.bdl.bdeSize -= sizeof(ULP_BDE64); + } + fc_mem_put(binfo, MEM_BPL, (uchar * )bmp); + } + fc_mem_put(binfo, MEM_IOCB, (uchar * )xmitiq); + } else { + for (i = 0; i < (int)xmitiq->iocb.ulpBdeCount; i++) { + fc_bufunmap(p_dev_ctl, (uchar *)((ulong)xmitiq->iocb.un.cont[i].bdeAddress), 0, (uint32)xmitiq->iocb.un.cont[i].bdeSize); + } + fc_mem_put(binfo, MEM_IOCB, (uchar * )xmitiq); + } + + if (ndlp && (ndlp->nlp_DID == NameServer_DID)) { + fc_mem_put(binfo, MEM_BUF, (uchar * )m_net); + } else { + /* free mbuf */ + p_mbuf = fcnextdata(m_net); + fcnextdata(m_net) = 0; + fcfreehandle(p_dev_ctl, m_net); + m_freem(m_net); + if (p_mbuf) { + fcfreehandle(p_dev_ctl, p_mbuf); + m_freem(p_mbuf); + } + } + break; + + case CMD_IOCB_CONTINUE_CN: + case CMD_IOCB_CONTINUE64_CN: + /* This is valid only for the IP ring, not the ELS ring */ + if (rp->fc_ringno == FC_ELS_RING) + break; + /* Loop thru BDEs and unmap memory pages associated with IOCB */ + for (i = 0; i < (int)xmitiq->iocb.ulpBdeCount; i++) { + if (binfo->fc_flag & FC_SLI2) + fc_bufunmap(p_dev_ctl, (uchar *)getPaddr(xmitiq->iocb.un.cont64[i].addrHigh, xmitiq->iocb.un.cont64[i].addrLow), 0, xmitiq->iocb.un.cont64[i].tus.f.bdeSize); + else + fc_bufunmap(p_dev_ctl, (uchar *)((ulong)xmitiq->iocb.un.cont[i].bdeAddress), 0, (uint32)xmitiq->iocb.un.cont[i].bdeSize); + } + fc_mem_put(binfo, MEM_IOCB, (uchar * )xmitiq); + break; + + case CMD_XMIT_ELS_RSP_CX: + case CMD_XMIT_ELS_RSP64_CX: + if (xmitiq->bp) { + fc_mem_put(binfo, MEM_BUF, (uchar * )xmitiq->bp); + } + if (binfo->fc_flag & FC_SLI2) { + fc_mem_put(binfo, MEM_BPL, (uchar * )xmitiq->bpl); + } + fc_mem_put(binfo, MEM_IOCB, (uchar * )xmitiq); + break; + + case CMD_ELS_REQUEST_CR: + case CMD_ELS_REQUEST64_CR: + if (xmitiq->bp) { + fc_mem_put(binfo, MEM_BUF, (uchar * )xmitiq->bp); + } + if (xmitiq->info) { + fc_mem_put(binfo, MEM_BUF, (uchar * )xmitiq->info); + } + if (binfo->fc_flag & FC_SLI2) { + fc_mem_put(binfo, MEM_BPL, (uchar * )xmitiq->bpl); + } + fc_mem_put(binfo, MEM_IOCB, (uchar * )xmitiq); + break; + + case CMD_QUE_RING_BUF_CN: + case CMD_QUE_RING_BUF64_CN: + /* Loop thru BDEs and return MEM_BUFs associated with IOCB */ + for (i = 0; i < (int)xmitiq->iocb.ulpBdeCount; i++) { + MATCHMAP * matp; + + if (binfo->fc_flag & FC_SLI2) + matp = fc_getvaddr(p_dev_ctl, rp, + (uchar * )getPaddr(xmitiq->iocb.un.cont64[i].addrHigh, xmitiq->iocb.un.cont64[i].addrLow)); + else + matp = fc_getvaddr(p_dev_ctl, rp, + (uchar * )((ulong)xmitiq->iocb.un.cont[i].bdeAddress)); + if (matp) { + if (rp->fc_ringno == FC_ELS_RING) { + fc_mem_put(binfo, MEM_BUF, (uchar * )matp); + } + if (rp->fc_ringno == FC_IP_RING) { + p_mbuf = (fcipbuf_t * )matp; + fcnextdata(p_mbuf) = 0; + fcnextpkt(p_mbuf) = 0; + m_freem(p_mbuf); + } + } + } + fc_mem_put(binfo, MEM_IOCB, (uchar * )xmitiq); + break; + + case CMD_CREATE_XRI_CX: + case CMD_CREATE_XRI_CR: + default: + fc_mem_put(binfo, MEM_IOCB, (uchar * )xmitiq); + break; + } +} /* End fc_freebufq */ + + +/**************************************************/ +/** fc_emac_lookup **/ +/** **/ +/** This routine will find an NODELIST entry **/ +/** that matches the destination MAC address. **/ +/** The XID for that entry is returned and rpi **/ +/** is updated with a ptr to the NODELIST entry. **/ +/**************************************************/ +_static_ ushort +fc_emac_lookup( +FC_BRD_INFO *binfo, +uchar *addr, +NODELIST **ndlpp) +{ + int j; + NODELIST * ndlp; + + ndlp = binfo->fc_nlpbind_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpbind_start) + ndlp = binfo->fc_nlpunmap_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + while(ndlp != (NODELIST *)&binfo->fc_nlpmap_start) { + + /* IF portname matches IEEE address, return NODELIST entry */ + if ((ndlp->nlp_portname.IEEE[0] == addr[0])) { + if((ndlp->nlp_state == NLP_SEED) || + ((ndlp->nlp_DID != Bcast_DID) && + ((ndlp->nlp_DID & CT_DID_MASK) == CT_DID_MASK))) { + ndlp = (NODELIST *)ndlp->nlp_listp_next; + if(ndlp == (NODELIST *)&binfo->fc_nlpbind_start) + ndlp = binfo->fc_nlpunmap_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + continue; + } + + /* check rest of bytes in address / portname */ + for (j = 1; j < NADDR_LEN; j++) { + if (ndlp->nlp_portname.IEEE[j] != addr[j]) + break; + } + /* do all NADDR_LEN bytes match? */ + if (j == NADDR_LEN) { + if ((ndlp->nlp_portname.nameType == NAME_IEEE) && + (ndlp->nlp_portname.IEEEextMsn == 0) && + (ndlp->nlp_portname.IEEEextLsb == 0)) { + *ndlpp = ndlp; + return(ndlp->nlp_Xri); + } + } + } + ndlp = (NODELIST *)ndlp->nlp_listp_next; + if(ndlp == (NODELIST *)&binfo->fc_nlpbind_start) + ndlp = binfo->fc_nlpunmap_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + } + + /* no match so return 0 */ + + *ndlpp = 0; + return(0); +} /* End fc_emac_lookup */ + + + +/* Put nlp on bind list */ +_static_ int +fc_nlp_bind( +FC_BRD_INFO *binfo, +NODELIST *nlp) +{ + NODELIST *end_nlp; + uint32 data1; + + data1 = ( ((uint32)nlp->nlp_state << 24) | + ((uint32)nlp->nlp_action << 16) | + ((uint32)nlp->nlp_type << 8) | + ((uint32)nlp->nlp_Rpi & 0xff) ); + /* BIND node tbl */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0903, /* ptr to msg structure */ + fc_mes0903, /* ptr to msg */ + fc_msgBlk0903.msgPreambleStr, /* begin varargs */ + (ulong)nlp, + nlp->nlp_DID, + nlp->nlp_flag, + data1); /* end varargs */ + /* First take it off any list its on */ + if(nlp->nlp_listp_next) { + if (nlp->nlp_flag & NLP_MAPPED) { + nlp->nlp_time = binfo->nlptimer++; + if (nlp->nlp_time == 0) { + fc_nlpadjust(binfo); + } + nlp->nlp_flag &= ~NLP_MAPPED; + binfo->fc_map_cnt--; + fc_deque(nlp); + } + else if (nlp->nlp_flag & NLP_UNMAPPED) { + nlp->nlp_time = binfo->nlptimer++; + if (nlp->nlp_time == 0) { + fc_nlpadjust(binfo); + } + nlp->nlp_flag &= ~NLP_UNMAPPED; + binfo->fc_unmap_cnt--; + fc_deque(nlp); + } + else if (nlp->nlp_flag & NLP_BIND) { + return(0); /* Already on bind list */ + } + } + + /* Put it at the begining of the bind list */ + binfo->fc_bind_cnt++; + + if(binfo->fc_nlpbind_start == (NODELIST *)&binfo->fc_nlpbind_start) { + nlp->nlp_listp_next = &binfo->fc_nlpbind_start; + binfo->fc_nlpbind_end = nlp; + } + else { + end_nlp = binfo->fc_nlpbind_start; + nlp->nlp_listp_next = end_nlp; + end_nlp->nlp_listp_prev = nlp; + } + binfo->fc_nlpbind_start = nlp;; + nlp->nlp_listp_prev = &binfo->fc_nlpbind_start; + + nlp->nlp_flag |= NLP_BIND; + + return(0); +} + +/* Put nlp on unmap list */ +_static_ int +fc_nlp_unmap( +FC_BRD_INFO *binfo, +NODELIST *nlp) +{ + NODELIST * end_nlp; + RING * rp; + IOCBQ * iocbq; + uint32 data1; + fc_dev_ctl_t * p_dev_ctl; + + data1 = ( ((uint32)nlp->nlp_state << 24) | + ((uint32)nlp->nlp_action << 16) | + ((uint32)nlp->nlp_type << 8) | + ((uint32)nlp->nlp_Rpi & 0xff) ); + /* UNMAP node tbl */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0904, /* ptr to msg structure */ + fc_mes0904, /* ptr to msg */ + fc_msgBlk0904.msgPreambleStr, /* begin varargs */ + (ulong)nlp, + nlp->nlp_DID, + nlp->nlp_flag, + data1); /* end varargs */ + /* First take it off any list its on */ + if(nlp->nlp_listp_next) { + if (nlp->nlp_flag & NLP_MAPPED) { + nlp->nlp_time = binfo->nlptimer++; + if (nlp->nlp_time == 0) { + fc_nlpadjust(binfo); + } + nlp->nlp_flag &= ~NLP_MAPPED; + binfo->fc_map_cnt--; + fc_deque(nlp); + } + else if (nlp->nlp_flag & NLP_BIND) { + nlp->nlp_time = binfo->nlptimer++; + if (nlp->nlp_time == 0) { + fc_nlpadjust(binfo); + } + nlp->nlp_flag &= ~NLP_BIND; + binfo->fc_bind_cnt--; + fc_deque(nlp); + /* If we are going from bind to unmapped, check for duplicate + * WWNN on bind list. + */ + /* Missing SP */ + p_dev_ctl = (fc_dev_ctl_t * )(binfo->fc_p_dev_ctl); + + /* Fabric nodes are always mapped by DID only */ + if((nlp->nlp_DID & Fabric_DID_MASK) == Fabric_DID_MASK) + goto out; + + switch(p_dev_ctl->fcp_mapping) { + + case FCP_SEED_DID: + break; + + case FCP_SEED_WWNN: + if((end_nlp = fc_findnode_wwnn(binfo, NLP_SEARCH_BIND, &nlp->nlp_nodename))) { + /* Found one so remove it */ + unsigned long iflag; + end_nlp->nlp_flag &= ~NLP_BIND; + binfo->fc_bind_cnt--; + fc_deque(end_nlp); + /* Look through ELS ring and remove any ELS cmds in progress for rnlp */ + iflag = lpfc_q_disable_lock(p_dev_ctl); + rp = &binfo->fc_ring[FC_ELS_RING]; + iocbq = (IOCBQ * )(rp->fc_txp.q_first); + while (iocbq) { + if ((iocbq->iocb.un.elsreq.remoteID == end_nlp->nlp_DID) || + ((end_nlp->nlp_DID == 0) && + (iocbq->iocb.un.elsreq.remoteID == end_nlp->nlp_oldDID))) { + iocbq->retry = 0xff; /* Mark for abort */ + if((binfo->fc_flag & FC_RSCN_MODE) || + (binfo->fc_ffstate < FC_READY)) { + if((end_nlp->nlp_state >= NLP_PLOGI) && + (end_nlp->nlp_state <= NLP_PRLI)) { + end_nlp->nlp_action &= ~NLP_DO_RSCN; + binfo->fc_nlp_cnt--; + if ((end_nlp->nlp_type & NLP_IP_NODE) && end_nlp->nlp_bp) { + m_freem((fcipbuf_t *)end_nlp->nlp_bp); + end_nlp->nlp_bp = (uchar * )0; + } + } + } + } + iocbq = (IOCBQ * )iocbq->q; + } + lpfc_q_unlock_enable(p_dev_ctl, iflag); + /* In case its on fc_delay_timeout list */ + fc_abort_delay_els_cmd(p_dev_ctl, end_nlp->nlp_DID); + fc_bzero((void *)end_nlp, sizeof(NODELIST)); + fc_mem_put(binfo, MEM_NLP, (uchar *)end_nlp); + } + break; + + case FCP_SEED_WWPN: + if((end_nlp = fc_findnode_wwpn(binfo, NLP_SEARCH_BIND, &nlp->nlp_portname))) { + /* Found one so remove it */ + unsigned long iflag; + end_nlp->nlp_flag &= ~NLP_BIND; + binfo->fc_bind_cnt--; + fc_deque(end_nlp); + /* Look through ELS ring and remove any ELS cmds in progress for rnlp */ + iflag = lpfc_q_disable_lock(p_dev_ctl); + rp = &binfo->fc_ring[FC_ELS_RING]; + iocbq = (IOCBQ * )(rp->fc_txp.q_first); + while (iocbq) { + if ((iocbq->iocb.un.elsreq.remoteID == end_nlp->nlp_DID) || + ((end_nlp->nlp_DID == 0) && (iocbq->iocb.un.elsreq.remoteID == end_nlp->nlp_oldDID))) { + iocbq->retry = 0xff; /* Mark for abort */ + if((binfo->fc_flag & FC_RSCN_MODE) || + (binfo->fc_ffstate < FC_READY)) { + if((end_nlp->nlp_state >= NLP_PLOGI) && + (end_nlp->nlp_state <= NLP_PRLI)) { + end_nlp->nlp_action &= ~NLP_DO_RSCN; + binfo->fc_nlp_cnt--; + if ((end_nlp->nlp_type & NLP_IP_NODE) && end_nlp->nlp_bp) { + m_freem((fcipbuf_t *)end_nlp->nlp_bp); + end_nlp->nlp_bp = (uchar * )0; + } + } + } + } + iocbq = (IOCBQ * )iocbq->q; + } + lpfc_q_unlock_enable(p_dev_ctl, iflag); + /* In case its on fc_delay_timeout list */ + fc_abort_delay_els_cmd(p_dev_ctl, end_nlp->nlp_DID); + fc_bzero((void *)end_nlp, sizeof(NODELIST)); + fc_mem_put(binfo, MEM_NLP, (uchar *)end_nlp); + } + break; + } /* switch */ + } + else if (nlp->nlp_flag & NLP_UNMAPPED) { + return(0); /* Already on unmap list */ + } + } + +out: + /* Put it on the end of the unmapped list */ + binfo->fc_unmap_cnt++; + end_nlp = binfo->fc_nlpunmap_end; + fc_enque(nlp, end_nlp); + nlp->nlp_flag |= NLP_UNMAPPED; + return(0); +} + +/* Put nlp on map list */ +_static_ int +fc_nlp_map( +FC_BRD_INFO *binfo, +NODELIST *nlp) +{ + NODELIST *end_nlp; + uint32 data1; + + data1 = ( ((uint32)nlp->nlp_state << 24) | + ((uint32)nlp->nlp_action << 16) | + ((uint32)nlp->nlp_type << 8) | + ((uint32)nlp->nlp_Rpi & 0xff) ); + /* MAP node tbl */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0905, /* ptr to msg structure */ + fc_mes0905, /* ptr to msg */ + fc_msgBlk0905.msgPreambleStr, /* begin varargs */ + (ulong)nlp, + nlp->nlp_DID, + nlp->nlp_flag, + data1); /* end varargs */ + /* First take it off any list its on */ + if(nlp->nlp_listp_next) { + if (nlp->nlp_flag & NLP_UNMAPPED) { + nlp->nlp_time = binfo->nlptimer++; + if (nlp->nlp_time == 0) { + fc_nlpadjust(binfo); + } + nlp->nlp_flag &= ~NLP_UNMAPPED; + binfo->fc_unmap_cnt--; + fc_deque(nlp); + } + else if (nlp->nlp_flag & NLP_BIND) { + nlp->nlp_time = binfo->nlptimer++; + if (nlp->nlp_time == 0) { + fc_nlpadjust(binfo); + } + nlp->nlp_flag &= ~NLP_BIND; + binfo->fc_bind_cnt--; + fc_deque(nlp); + } + else if (nlp->nlp_flag & NLP_MAPPED) { + return(0); /* Already on map list */ + } + } + + /* Put it on the end of the mapped list */ + binfo->fc_map_cnt++; + end_nlp = binfo->fc_nlpmap_end; + fc_enque(nlp, end_nlp); + nlp->nlp_flag |= NLP_MAPPED; + return(0); +} + +/**************************************************/ +/** fc_findnode_odid **/ +/** **/ +/** This routine find a node by did **/ +/**************************************************/ +_static_ NODELIST * +fc_findnode_odid( +FC_BRD_INFO *binfo, +uint32 order, +uint32 did) +{ + NODELIST * nlp; + uint32 data1; + + if(order & NLP_SEARCH_UNMAPPED) { + nlp = binfo->fc_nlpunmap_start; + while(nlp != (NODELIST *)&binfo->fc_nlpunmap_start) { + if (fc_matchdid(binfo, nlp, did)) { + + data1 = ( ((uint32)nlp->nlp_state << 24) | + ((uint32)nlp->nlp_action << 16) | + ((uint32)nlp->nlp_type << 8) | + ((uint32)nlp->nlp_Rpi & 0xff) ); + /* FIND node DID unmapped */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0906, /* ptr to msg structure */ + fc_mes0906, /* ptr to msg */ + fc_msgBlk0906.msgPreambleStr, /* begin varargs */ + (ulong)nlp, + nlp->nlp_DID, + nlp->nlp_flag, + data1); /* end varargs */ + return(nlp); + } + nlp = (NODELIST *)nlp->nlp_listp_next; + } + } + if(order & NLP_SEARCH_MAPPED) { + nlp = binfo->fc_nlpmap_start; + while(nlp != (NODELIST *)&binfo->fc_nlpmap_start) { + if (fc_matchdid(binfo, nlp, did)) { + + data1 = ( ((uint32)nlp->nlp_state << 24) | + ((uint32)nlp->nlp_action << 16) | + ((uint32)nlp->nlp_type << 8) | + ((uint32)nlp->nlp_Rpi & 0xff) ); + /* FIND node did mapped */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0907, /* ptr to msg structure */ + fc_mes0907, /* ptr to msg */ + fc_msgBlk0907.msgPreambleStr, /* begin varargs */ + (ulong)nlp, + nlp->nlp_DID, + nlp->nlp_flag, + data1); /* end varargs */ + return(nlp); + } + nlp = (NODELIST *)nlp->nlp_listp_next; + } + } + if(order & NLP_SEARCH_BIND) { + nlp = binfo->fc_nlpbind_start; + while(nlp != (NODELIST *)&binfo->fc_nlpbind_start) { + if (fc_matchdid(binfo, nlp, did)) { + + data1 = ( ((uint32)nlp->nlp_state << 24) | + ((uint32)nlp->nlp_action << 16) | + ((uint32)nlp->nlp_type << 8) | + ((uint32)nlp->nlp_Rpi & 0xff) ); + /* FIND node DID bind */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0908, /* ptr to msg structure */ + fc_mes0908, /* ptr to msg */ + fc_msgBlk0908.msgPreambleStr, /* begin varargs */ + (ulong)nlp, + nlp->nlp_DID, + nlp->nlp_flag, + data1); /* end varargs */ + return(nlp); + } + nlp = (NODELIST *)nlp->nlp_listp_next; + } + } + /* FIND node did NOT FOUND */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0909, /* ptr to msg structure */ + fc_mes0909, /* ptr to msg */ + fc_msgBlk0909.msgPreambleStr, /* begin varargs */ + did, + order); /* end varargs */ + /* no match found */ + return((NODELIST * )0); +} /* End fc_findnode_odid */ + + +/**************************************************/ +/** fc_findnode_scsid **/ +/** **/ +/** This routine find a node by scsid **/ +/**************************************************/ +_static_ NODELIST * +fc_findnode_scsid( +FC_BRD_INFO *binfo, +uint32 order, +uint32 scsid) +{ + NODELIST * nlp; + uint32 data1; + + if(order & NLP_SEARCH_UNMAPPED) { + nlp = binfo->fc_nlpunmap_start; + if(nlp == 0) { + return(0); + } + while(nlp != (NODELIST *)&binfo->fc_nlpunmap_start) { + if ((nlp->nlp_type & NLP_FCP_TARGET) && + (INDEX(nlp->id.nlp_pan, nlp->id.nlp_sid) == scsid)) { + + data1 = ( ((uint32)nlp->nlp_state << 24) | + ((uint32)nlp->nlp_action << 16) | + ((uint32)nlp->nlp_type << 8) | + ((uint32)scsid & 0xff) ); + /* FIND node scsi_id unmapped */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0910, /* ptr to msg structure */ + fc_mes0910, /* ptr to msg */ + fc_msgBlk0910.msgPreambleStr, /* begin varargs */ + (ulong)nlp, + nlp->nlp_DID, + nlp->nlp_flag, + data1); /* end varargs */ + return(nlp); + } + nlp = (NODELIST *)nlp->nlp_listp_next; + if(nlp == 0) { + return(0); + } + } + } + if(order & NLP_SEARCH_MAPPED) { + nlp = binfo->fc_nlpmap_start; + if(nlp == 0) { + return(0); + } + while(nlp != (NODELIST *)&binfo->fc_nlpmap_start) { + if ((nlp->nlp_type & NLP_FCP_TARGET) && + (INDEX(nlp->id.nlp_pan, nlp->id.nlp_sid) == scsid)) { + + data1 = ( ((uint32)nlp->nlp_state << 24) | + ((uint32)nlp->nlp_action << 16) | + ((uint32)nlp->nlp_type << 8) | + ((uint32)scsid & 0xff) ); + /* FIND node scsi_id mapped */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0911, /* ptr to msg structure */ + fc_mes0911, /* ptr to msg */ + fc_msgBlk0911.msgPreambleStr, /* begin varargs */ + (ulong)nlp, + nlp->nlp_DID, + nlp->nlp_flag, + data1); /* end varargs */ + return(nlp); + } + nlp = (NODELIST *)nlp->nlp_listp_next; + if(nlp == 0) { + return(0); + } + } + } + if(order & NLP_SEARCH_BIND) { + nlp = binfo->fc_nlpbind_start; + if(nlp == 0) { + return(0); + } + while(nlp != (NODELIST *)&binfo->fc_nlpbind_start) { + if ((nlp->nlp_type & NLP_FCP_TARGET) && + (INDEX(nlp->id.nlp_pan, nlp->id.nlp_sid) == scsid)) { + + data1 = ( ((uint32)nlp->nlp_state << 24) | + ((uint32)nlp->nlp_action << 16) | + ((uint32)nlp->nlp_type << 8) | + ((uint32)scsid & 0xff) ); + /* FIND node scsi_id bind */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0912, /* ptr to msg structure */ + fc_mes0912, /* ptr to msg */ + fc_msgBlk0912.msgPreambleStr, /* begin varargs */ + (ulong)nlp, + nlp->nlp_DID, + nlp->nlp_flag, + data1); /* end varargs */ + return(nlp); + } + nlp = (NODELIST *)nlp->nlp_listp_next; + if(nlp == 0) { + return(0); + } + } + } + /* FIND node scsi_id NOT FOUND */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0913, /* ptr to msg structure */ + fc_mes0913, /* ptr to msg */ + fc_msgBlk0913.msgPreambleStr, /* begin varargs */ + scsid, + order); /* end varargs */ + /* no match found */ + return((NODELIST * )0); +} /* End fc_findnode_scsid */ + + +/**************************************************/ +/** fc_findnode_wwnn **/ +/** **/ +/** This routine find a node by WWNN **/ +/**************************************************/ +_static_ NODELIST * +fc_findnode_wwnn( +FC_BRD_INFO *binfo, +uint32 order, +NAME_TYPE * wwnn) +{ + NODELIST * nlp; + uint32 data1; + + if(order & NLP_SEARCH_UNMAPPED) { + nlp = binfo->fc_nlpunmap_start; + while(nlp != (NODELIST *)&binfo->fc_nlpunmap_start) { + if(fc_geportname(&nlp->nlp_nodename, wwnn) == 2) { + + data1 = ( ((uint32)nlp->nlp_state << 24) | + ((uint32)nlp->nlp_action << 16) | + ((uint32)nlp->nlp_type << 8) | + ((uint32)nlp->nlp_Rpi & 0xff) ); + /* FIND node wwnn unmapped */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0914, /* ptr to msg structure */ + fc_mes0914, /* ptr to msg */ + fc_msgBlk0914.msgPreambleStr, /* begin varargs */ + (ulong)nlp, + nlp->nlp_DID, + nlp->nlp_flag, + data1); /* end varargs */ + return(nlp); + } + nlp = (NODELIST *)nlp->nlp_listp_next; + } + } + if(order & NLP_SEARCH_MAPPED) { + nlp = binfo->fc_nlpmap_start; + while(nlp != (NODELIST *)&binfo->fc_nlpmap_start) { + if(fc_geportname(&nlp->nlp_nodename, wwnn) == 2) { + + data1 = ( ((uint32)nlp->nlp_state << 24) | + ((uint32)nlp->nlp_action << 16) | + ((uint32)nlp->nlp_type << 8) | + ((uint32)nlp->nlp_Rpi & 0xff) ); + /* FIND node wwnn mapped */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0915, /* ptr to msg structure */ + fc_mes0915, /* ptr to msg */ + fc_msgBlk0915.msgPreambleStr, /* begin varargs */ + (ulong)nlp, + nlp->nlp_DID, + nlp->nlp_flag, + data1); /* end varargs */ + return(nlp); + } + nlp = (NODELIST *)nlp->nlp_listp_next; + } + } + if(order & NLP_SEARCH_BIND) { + nlp = binfo->fc_nlpbind_start; + while(nlp != (NODELIST *)&binfo->fc_nlpbind_start) { + if(fc_geportname(&nlp->nlp_nodename, wwnn) == 2) { + + data1 = ( ((uint32)nlp->nlp_state << 24) | + ((uint32)nlp->nlp_action << 16) | + ((uint32)nlp->nlp_type << 8) | + ((uint32)nlp->nlp_Rpi & 0xff) ); + /* FIND node wwnn bind */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0916, /* ptr to msg structure */ + fc_mes0916, /* ptr to msg */ + fc_msgBlk0916.msgPreambleStr, /* begin varargs */ + (ulong)nlp, + nlp->nlp_DID, + nlp->nlp_flag, + data1); /* end varargs */ + return(nlp); + } + nlp = (NODELIST *)nlp->nlp_listp_next; + } + } + /* FIND node wwnn NOT FOUND */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0918, /* ptr to msg structure */ + fc_mes0918, /* ptr to msg */ + fc_msgBlk0918.msgPreambleStr, /* begin varargs */ + order); /* end varargs */ + /* no match found */ + return((NODELIST * )0); +} /* End fc_findnode_wwnn */ + + + +/**************************************************/ +/** fc_findnode_wwpn **/ +/** **/ +/** This routine find a node by WWPN **/ +/**************************************************/ +_static_ NODELIST * +fc_findnode_wwpn( +FC_BRD_INFO *binfo, +uint32 order, +NAME_TYPE * wwpn) +{ + NODELIST * nlp; + uint32 data1; + + if(order & NLP_SEARCH_UNMAPPED) { + nlp = binfo->fc_nlpunmap_start; + while(nlp != (NODELIST *)&binfo->fc_nlpunmap_start) { + if(fc_geportname(&nlp->nlp_portname, wwpn) == 2) { + + data1 = ( ((uint32)nlp->nlp_state << 24) | + ((uint32)nlp->nlp_action << 16) | + ((uint32)nlp->nlp_type << 8) | + ((uint32)nlp->nlp_Rpi & 0xff) ); + /* FIND node wwpn unmapped */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0919, /* ptr to msg structure */ + fc_mes0919, /* ptr to msg */ + fc_msgBlk0919.msgPreambleStr, /* begin varargs */ + (ulong)nlp, + nlp->nlp_DID, + nlp->nlp_flag, + data1); /* end varargs */ + return(nlp); + } + nlp = (NODELIST *)nlp->nlp_listp_next; + } + } + if(order & NLP_SEARCH_MAPPED) { + nlp = binfo->fc_nlpmap_start; + while(nlp != (NODELIST *)&binfo->fc_nlpmap_start) { + if(fc_geportname(&nlp->nlp_portname, wwpn) == 2) { + + data1 = ( ((uint32)nlp->nlp_state << 24) | + ((uint32)nlp->nlp_action << 16) | + ((uint32)nlp->nlp_type << 8) | + ((uint32)nlp->nlp_Rpi & 0xff) ); + /* FIND node wwpn mapped */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0920, /* ptr to msg structure */ + fc_mes0920, /* ptr to msg */ + fc_msgBlk0920.msgPreambleStr, /* begin varargs */ + (ulong)nlp, + nlp->nlp_DID, + nlp->nlp_flag, + data1); /* end varargs */ + return(nlp); + } + nlp = (NODELIST *)nlp->nlp_listp_next; + } + } + if(order & NLP_SEARCH_BIND) { + nlp = binfo->fc_nlpbind_start; + while(nlp != (NODELIST *)&binfo->fc_nlpbind_start) { + if(fc_geportname(&nlp->nlp_portname, wwpn) == 2) { + + data1 = ( ((uint32)nlp->nlp_state << 24) | + ((uint32)nlp->nlp_action << 16) | + ((uint32)nlp->nlp_type << 8) | + ((uint32)nlp->nlp_Rpi & 0xff) ); + /* FIND node wwpn bind */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0921, /* ptr to msg structure */ + fc_mes0921, /* ptr to msg */ + fc_msgBlk0921.msgPreambleStr, /* begin varargs */ + (ulong)nlp, + nlp->nlp_DID, + nlp->nlp_flag, + data1); /* end varargs */ + return(nlp); + } + nlp = (NODELIST *)nlp->nlp_listp_next; + } + } + /* FIND node wwpn NOT FOUND */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0922, /* ptr to msg structure */ + fc_mes0922, /* ptr to msg */ + fc_msgBlk0922.msgPreambleStr, /* begin varargs */ + order); /* end varargs */ + /* no match found */ + return((NODELIST * )0); +} /* End fc_findnode_wwpn */ + + +/**************************************************/ +/** fc_findnode_oxri **/ +/** **/ +/** This routine find a node by OXri **/ +/**************************************************/ +_static_ NODELIST * +fc_findnode_oxri( +FC_BRD_INFO *binfo, +uint32 order, +uint32 xri) +{ + NODELIST * nlp; + uint32 data1; + + if(order & NLP_SEARCH_UNMAPPED) { + nlp = binfo->fc_nlpunmap_start; + while(nlp != (NODELIST *)&binfo->fc_nlpunmap_start) { + if (nlp->nlp_Xri == xri) { + + data1 = ( ((uint32)nlp->nlp_state << 24) | + ((uint32)nlp->nlp_action << 16) | + ((uint32)nlp->nlp_type << 8) | + ((uint32)nlp->nlp_Rpi & 0xff) ); + /* FIND node xri unmapped */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0923, /* ptr to msg structure */ + fc_mes0923, /* ptr to msg */ + fc_msgBlk0923.msgPreambleStr, /* begin varargs */ + (ulong)nlp, + nlp->nlp_Xri, + nlp->nlp_flag, + data1); /* end varargs */ + return(nlp); + } + nlp = (NODELIST *)nlp->nlp_listp_next; + } + } + if(order & NLP_SEARCH_MAPPED) { + nlp = binfo->fc_nlpmap_start; + while(nlp != (NODELIST *)&binfo->fc_nlpmap_start) { + if (nlp->nlp_Xri == xri) { + + data1 = ( ((uint32)nlp->nlp_state << 24) | + ((uint32)nlp->nlp_action << 16) | + ((uint32)nlp->nlp_type << 8) | + ((uint32)nlp->nlp_Rpi & 0xff) ); + /* FIND node xri mapped */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0924, /* ptr to msg structure */ + fc_mes0924, /* ptr to msg */ + fc_msgBlk0924.msgPreambleStr, /* begin varargs */ + (ulong)nlp, + nlp->nlp_Xri, + nlp->nlp_flag, + data1); /* end varargs */ + return(nlp); + } + nlp = (NODELIST *)nlp->nlp_listp_next; + } + } + if(order & NLP_SEARCH_BIND) { + nlp = binfo->fc_nlpbind_start; + while(nlp != (NODELIST *)&binfo->fc_nlpbind_start) { + if (nlp->nlp_Xri == xri) { + + data1 = ( ((uint32)nlp->nlp_state << 24) | + ((uint32)nlp->nlp_action << 16) | + ((uint32)nlp->nlp_type << 8) | + ((uint32)nlp->nlp_Rpi & 0xff) ); + /* FIND node xri bind */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0925, /* ptr to msg structure */ + fc_mes0925, /* ptr to msg */ + fc_msgBlk0925.msgPreambleStr, /* begin varargs */ + (ulong)nlp, + nlp->nlp_Xri, + nlp->nlp_flag, + data1); /* end varargs */ + return(nlp); + } + nlp = (NODELIST *)nlp->nlp_listp_next; + } + } + /* FIND node xri NOT FOUND */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0926, /* ptr to msg structure */ + fc_mes0926, /* ptr to msg */ + fc_msgBlk0926.msgPreambleStr, /* begin varargs */ + xri, + order); /* end varargs */ + /* no match found */ + return((NODELIST * )0); +} /* End fc_findnode_oxri */ + +/* Put nlp in PLOGI state */ +_static_ int +fc_nlp_logi( +FC_BRD_INFO *binfo, +NODELIST *nlp, +NAME_TYPE *wwpnp, +NAME_TYPE *wwnnp) +{ + fc_dev_ctl_t * p_dev_ctl; + NODELIST * rnlp; + + if (nlp->nlp_flag & NLP_UNMAPPED) { + nlp->nlp_flag &= ~NLP_UNMAPPED; + binfo->fc_unmap_cnt--; + fc_deque(nlp); + } + else if (nlp->nlp_flag & NLP_BIND) { + nlp->nlp_flag &= ~NLP_BIND; + binfo->fc_bind_cnt--; + fc_deque(nlp); + } + else if (nlp->nlp_flag & NLP_MAPPED) { + nlp->nlp_flag &= ~NLP_MAPPED; + binfo->fc_map_cnt--; + fc_deque(nlp); + } + + p_dev_ctl = (fc_dev_ctl_t * )(binfo->fc_p_dev_ctl); + + /* Fabric nodes are always mapped by DID only */ + if((nlp->nlp_DID & Fabric_DID_MASK) == Fabric_DID_MASK) + goto out; + + switch(p_dev_ctl->fcp_mapping) { + case FCP_SEED_DID: + fc_bcopy(wwpnp, &nlp->nlp_portname, sizeof(NAME_TYPE)); + fc_bcopy(wwnnp, &nlp->nlp_nodename, sizeof(NAME_TYPE)); + break; + case FCP_SEED_WWNN: + /* Check to see if this WWNN already has a binding setup */ + if(fc_geportname(&nlp->nlp_nodename, wwnnp) != 2) { + if (nlp->nlp_type & NLP_SEED_WWNN) { + /* Get a new entry to save old binding info */ + if((rnlp = (NODELIST *)fc_mem_get(binfo, MEM_NLP))) { + fc_bzero((void *)rnlp, sizeof(NODELIST)); + rnlp->nlp_state = NLP_LIMBO; + fc_nlp_swapinfo(binfo, nlp, rnlp); + fc_nlp_bind(binfo, rnlp); + } + } + /* Search for existing entry with that binding */ + if((rnlp = fc_findnode_wwnn(binfo, NLP_SEARCH_ALL, wwnnp)) && + (rnlp->nlp_type & NLP_SEED_WWNN)) { + + if (rnlp->nlp_flag & NLP_MAPPED) { + rnlp->nlp_flag &= ~NLP_MAPPED; + binfo->fc_map_cnt--; + fc_deque(rnlp); + } + else if (rnlp->nlp_flag & NLP_UNMAPPED) { + rnlp->nlp_flag &= ~NLP_UNMAPPED; + binfo->fc_unmap_cnt--; + fc_deque(rnlp); + } + else if (rnlp->nlp_flag & NLP_BIND) { + rnlp->nlp_flag &= ~NLP_BIND; + binfo->fc_bind_cnt--; + fc_deque(rnlp); + } + + /* found, so copy binding info into nlp */ + fc_nlp_swapinfo(binfo, rnlp, nlp); + if(rnlp->nlp_action || (rnlp->nlp_flag & NLP_REQ_SND)) { + fc_nlp_bind(binfo, rnlp); + } + else { + fc_freenode(binfo, rnlp, 1); + } + } + fc_bcopy(wwpnp, &nlp->nlp_portname, sizeof(NAME_TYPE)); + fc_bcopy(wwnnp, &nlp->nlp_nodename, sizeof(NAME_TYPE)); + } + else { + /* DID and WWNN match existing entry */ + fc_bcopy(wwpnp, &nlp->nlp_portname, sizeof(NAME_TYPE)); + } + break; + case FCP_SEED_WWPN: + /* Check to see if this WWPN already has a binding setup */ + if(fc_geportname(&nlp->nlp_portname, wwpnp) != 2) { + if (nlp->nlp_type & NLP_SEED_WWPN) { + /* Get a new entry to save old binding info */ + if((rnlp = (NODELIST *)fc_mem_get(binfo, MEM_NLP))) { + fc_bzero((void *)rnlp, sizeof(NODELIST)); + rnlp->nlp_state = NLP_LIMBO; + fc_nlp_swapinfo(binfo, nlp, rnlp); + fc_nlp_bind(binfo, rnlp); + } + } + /* Search for existing entry with that binding */ + if((rnlp = fc_findnode_wwpn(binfo, NLP_SEARCH_ALL, wwpnp)) && + (rnlp->nlp_type & NLP_SEED_WWPN)) { + + if (rnlp->nlp_flag & NLP_MAPPED) { + rnlp->nlp_flag &= ~NLP_MAPPED; + binfo->fc_map_cnt--; + fc_deque(rnlp); + } + else if (rnlp->nlp_flag & NLP_UNMAPPED) { + rnlp->nlp_flag &= ~NLP_UNMAPPED; + binfo->fc_unmap_cnt--; + fc_deque(rnlp); + } + else if (rnlp->nlp_flag & NLP_BIND) { + rnlp->nlp_flag &= ~NLP_BIND; + binfo->fc_bind_cnt--; + fc_deque(rnlp); + } + /* found, so copy binding info into nlp */ + fc_nlp_swapinfo(binfo, rnlp, nlp); + if(rnlp->nlp_action || (rnlp->nlp_flag & NLP_REQ_SND)) { + fc_nlp_bind(binfo, rnlp); + } + else { + fc_freenode(binfo, rnlp, 1); + } + } +out: + fc_bcopy(wwpnp, &nlp->nlp_portname, sizeof(NAME_TYPE)); + fc_bcopy(wwnnp, &nlp->nlp_nodename, sizeof(NAME_TYPE)); + } + else { + /* DID and WWPN match existing entry */ + fc_bcopy(wwnnp, &nlp->nlp_nodename, sizeof(NAME_TYPE)); + } + break; + } + + nlp->nlp_state = NLP_PLOGI; + fc_nlp_bind(binfo, nlp); + return(0); +} + +_static_ int +fc_nlp_swapinfo( +FC_BRD_INFO *binfo, +NODELIST *old_nlp, +NODELIST *new_nlp) +{ + int index; + + fc_bcopy(&old_nlp->nlp_nodename, &new_nlp->nlp_nodename, sizeof(NAME_TYPE)); + fc_bcopy(&old_nlp->nlp_portname, &new_nlp->nlp_portname, sizeof(NAME_TYPE)); + new_nlp->nlp_type = old_nlp->nlp_type; + new_nlp->id.nlp_pan = old_nlp->id.nlp_pan; + new_nlp->id.nlp_sid = old_nlp->id.nlp_sid; + new_nlp->nlp_targetp = old_nlp->nlp_targetp; + new_nlp->target_scsi_options = old_nlp->target_scsi_options; + new_nlp->capabilities = old_nlp->capabilities; + new_nlp->sync = old_nlp->sync; + + if((old_nlp->nlp_type & NLP_FCP_TARGET) && old_nlp->nlp_targetp != NULL) { + index = INDEX(new_nlp->id.nlp_pan, new_nlp->id.nlp_sid); + if(binfo->device_queue_hash[index].node_ptr && + binfo->device_queue_hash[index].node_ptr->nlp == old_nlp) { + binfo->device_queue_hash[index].node_ptr->nlp = new_nlp; + new_nlp->nlp_targetp = (uchar *)binfo->device_queue_hash[index].node_ptr; + } + } + + old_nlp->nlp_type = 0; + old_nlp->id.nlp_pan = 0; + old_nlp->id.nlp_sid = 0; + old_nlp->nlp_targetp = 0; + old_nlp->sync = binfo->fc_sync; + old_nlp->capabilities = binfo->fc_capabilities; + fc_bzero(&old_nlp->nlp_nodename, sizeof(NAME_TYPE)); + fc_bzero(&old_nlp->nlp_portname, sizeof(NAME_TYPE)); + return(0); +} + diff -purN -X /home/mbligh/.diff.exclude reference/drivers/scsi/lpfc/fcscsib.c current/drivers/scsi/lpfc/fcscsib.c --- reference/drivers/scsi/lpfc/fcscsib.c 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/scsi/lpfc/fcscsib.c 2004-04-09 11:53:03.000000000 -0700 @@ -0,0 +1,7611 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Enterprise Fibre Channel Host Bus Adapters. * + * Refer to the README file included with this package for * + * driver version and adapter support. * + * Copyright (C) 2003 Emulex Corporation. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of the GNU General Public License * + * as published by the Free Software Foundation; either version 2 * + * of the License, or (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details, a copy of which * + * can be found in the file COPYING included with this package. * + *******************************************************************/ + +#include "fc_os.h" + +#include "fc_hw.h" +#include "fc.h" + +#include "fcdiag.h" +#include "hbaapi.h" +#include "fcfgparm.h" +#include "fcmsg.h" +#include "fc_crtn.h" +#include "fc_ertn.h" /* Environment - external routine definitions */ + +extern clock_t fc_ticks_per_second; +extern fc_dd_ctl_t DD_CTL; +extern iCfgParam icfgparam[]; +extern int lpfc_nethdr; +extern uint32 fcPAGESIZE; +extern uint32 fc_diag_state; +extern int fcinstance[]; + +/* Routine Declaration - Local */ +_local_ void fc_rscndisc_timeout(fc_dev_ctl_t *p_dev_ctl, void *l1, void *l2); +_local_ int fc_ring_txcnt(FC_BRD_INFO *binfo, int flag); +_local_ int fc_ring_txpcnt(FC_BRD_INFO *binfo, int flag); +/* End Routine Declaration - Local */ + +static uchar fcbroadcastaddr[MACADDR_LEN] = +{ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; + +_static_ int fc_max_ns_retry = 3; +_static_ int fc_inq_hbeat_tmo = 60; +_static_ int fc_inq_sn_tmo = 30; +_static_ int fc_offline_stop_io = 1; +_static_ int fc_max_els_sent = 32; + +#define INQ_LEN 0x100 + +#define RPTLUN_MIN_LEN 0x1000 +#define WD6 (IOCB_WORD_SZ-2) /* IOCB wd 6 */ +#define WD7 (IOCB_WORD_SZ-1) /* IOCB wd 7 */ +/********************************************/ +/** fc_strncmp **/ +/** **/ +/** Compare string 1 to string 2. **/ +/** Compare terminates on count N **/ +/** **/ +/** Return value: **/ +/** Less than 0 = str1 < str2 **/ +/** Zero = str1 egual str2 **/ +/** Greater than 0 = str1 > str2 **/ +/********************************************/ +_forward_ int +fc_strncmp( char *str1, + char *str2, + int cnt) +{ + int c1, c2; + int dif; + + while( cnt--) { + c1 = (int)*str1++ & 0xff; + c2 = (int)*str2++ & 0xff; + if( (c1 | c2) == 0) + return(0); /* strings equal */ + if( (dif = c1 - c2) == 0) + continue; /* chars are equal */ + if( c1 == 0) + return(-1); /* str1 < str2 */ + if( c2 == 0) + return(1); /* str1 > str2 */ + return(dif); + } + return(0); /* strings equal */ +} /* fc_strncmp */ + +/********************************************/ +/* fc_strcpy */ +/* */ +/* Copy str2 to str1. . */ +/* Str2 must be a pointer to a null */ +/* terminated string. It is the caller's */ +/* responsibility to insure that str1 is */ +/* large enough to hold str2. */ +/* */ +/* Return value: */ +/* pointer to str1 */ +/********************************************/ +_static_ char * +fc_strcpy( char *str1, /* dest */ + char *str2) /* src */ +{ + char *temp; + temp = str1; + + while( (*str1++ = *str2++) != '\0') { + continue; + } + return(temp); +} /* fc_strcpy */ + +/************************************************/ +/** fc_setup_ring **/ +/** **/ +/** After ring has been configured, this **/ +/** routine is called to initialize the ring **/ +/************************************************/ +_static_ void +fc_setup_ring( +fc_dev_ctl_t *p_dev_ctl, /* point to dev_ctl area */ +int ring) +{ + FC_BRD_INFO * binfo; + iCfgParam * clp; + RING * rp; + + binfo = &BINFO; + rp = &binfo->fc_ring[ring]; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + + /* set up the watchdog timer control structure section */ + if (!rp->fc_wdt_inited) { + if (ring == FC_FCP_RING) { + if(clp[CFG_LINKDOWN_TMO].a_current) { + rp->fc_ringtmo = clp[CFG_LINKDOWN_TMO].a_current; + } + else { + rp->fc_ringtmo = clp[CFG_LINKDOWN_TMO].a_default; + } + } else { + rp->fc_ringtmo = RING_TMO_DFT; + } + RINGTMO = 0; + rp->fc_wdt_inited = 1; + } +} /* End fc_setup_ring */ + +/************************************************************************/ +/* */ +/* NAME: fc_closewait */ +/* */ +/* FUNCTION: Adapter Driver Wait for Close Routine */ +/* This routine waits for the adapter to finish all requests */ +/* */ +/* EXECUTION ENVIRONMENT: */ +/* This routine can be called by a process. */ +/* */ +/* INPUTS: */ +/* fc_dev_ctl_t - adapter unique data structure (one per adapter) */ +/* */ +/* RETURN VALUE DESCRIPTION: none */ +/* */ +/* EXTERNAL PROCEDURES CALLED: */ +/* disable_lock lock_enable */ +/* */ +/************************************************************************/ +_static_ void +fc_closewait( +fc_dev_ctl_t *p_dev_ctl) /* point to dev_ctl area */ +{ + FC_BRD_INFO * binfo; + int ipri; + struct buf *bp, *nextbp; + + binfo = &BINFO; + + ipri = disable_lock(FC_LVL, &CMD_LOCK); + + /* wait for all operations to complete */ + while ((fc_ring_txcnt(binfo, FC_FCP_RING) + || fc_ring_txpcnt(binfo, FC_FCP_RING) + || binfo->fc_mbox.q_cnt)) { + unlock_enable(ipri, &CMD_LOCK); + DELAYMSctx(1000); /* delay 1 second */ + ipri = disable_lock(FC_LVL, &CMD_LOCK); + } + + /* Clear out timeout queue */ + for (bp = p_dev_ctl->timeout_head; bp != NULL; ) { + nextbp = bp->av_forw; + bp->b_error = ETIMEDOUT; + bp->b_flags |= B_ERROR; + bp->b_resid = bp->b_bcount; + FCSTATCTR.fcpScsiTmo++; + fc_do_iodone(bp); + bp = nextbp; + } + p_dev_ctl->timeout_head = NULL; + p_dev_ctl->timeout_count = 0; + + /* update the device state */ + binfo->fc_open_count &= ~FC_FCP_OPEN; + if (binfo->fc_open_count == 0) + p_dev_ctl->device_state = CLOSED; + + unlock_enable(ipri, &CMD_LOCK); + return; + +} /* End fc_closewait */ + + +/* + * This routine copies data from src + * then potentially swaps the destination to big endian. + * Assumes cnt is a multiple of sizeof(uint32). + */ +_static_ void +fc_pcimem_bcopy( +uint32 *src, +uint32 *dest, +uint32 cnt) +{ + uint32 ldata; + int i; + + for (i = 0; i < (int)cnt; i += sizeof(uint32)) { + ldata = *src++; + ldata = PCIMEM_LONG(ldata); + *dest++ = ldata; + } +} /* End fc_pcimem_bcopy */ + + +#define SCSI3_PERSISTENT_RESERVE_IN 0x5e + +/******************************************************/ +/** handle_fcp_event **/ +/** **/ +/** Description: Process an FCP Rsp Ring completion **/ +/** **/ +/******************************************************/ +_static_ void +handle_fcp_event( +fc_dev_ctl_t *p_dev_ctl, +RING *rp, +IOCBQ *temp) +{ + FC_BRD_INFO * binfo; + iCfgParam * clp; + IOCB * cmd; + fc_buf_t * fcptr; + struct buf * bp; + T_SCSIBUF * sbp; + FCP_RSP * fcpRsp; + uint32 * lp, i, qfull; + dvi_t * dev_ptr, * next_dev_ptr; + NODELIST * ndlp; + + /* called from host_interrupt() to process R2ATT */ + binfo = &BINFO; + cmd = &temp->iocb; + qfull = 0; + ndlp = 0; + + /* look up FCP complete by IoTag */ + if ((fcptr = fc_deq_fcbuf_active(rp, cmd->ulpIoTag)) == NULL) { + /* ERROR: completion with missing FCP command */ + if (!((cmd->ulpStatus == IOSTAT_LOCAL_REJECT) && + ((cmd->un.grsp.perr.statLocalError == IOERR_INVALID_RPI) || + (cmd->un.grsp.perr.statLocalError == IOERR_ABORT_IN_PROGRESS) || + (cmd->un.grsp.perr.statLocalError == IOERR_SEQUENCE_TIMEOUT) || + (cmd->un.grsp.perr.statLocalError == IOERR_ABORT_REQUESTED)))) { + /* Stray FCP completion */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0720, /* ptr to msg structure */ + fc_mes0720, /* ptr to msg */ + fc_msgBlk0720.msgPreambleStr, /* begin varargs */ + cmd->ulpCommand, + cmd->ulpIoTag, + cmd->ulpStatus, + cmd->un.ulpWord[4]); /* end varargs */ + } + + FCSTATCTR.fcpStrayCmpl++; + return; + } + FCSTATCTR.fcpCmpl++; + + dev_ptr = fcptr->dev_ptr; + dev_ptr->stop_send_io = 0; + + + if(dev_ptr->queue_state == ACTIVE_PASSTHRU) { + node_t * map_node_ptr; + struct dev_info * map_dev_ptr; + + map_node_ptr = (node_t *)dev_ptr->pend_head; + map_dev_ptr = (struct dev_info *)dev_ptr->pend_tail; + dev_ptr->pend_head = 0; + dev_ptr->pend_tail = 0; + dev_ptr->queue_state = HALTED; + dev_ptr->active_io_count--; + if(map_dev_ptr) + map_dev_ptr->active_io_count--; + if(map_node_ptr) + map_node_ptr->num_active_io--; + + dev_ptr->ioctl_event = cmd->ulpStatus; + dev_ptr->ioctl_errno = (uint32)cmd->un.grsp.perr.statLocalError; + fcpRsp = &fcptr->fcp_rsp; + dev_ptr->sense_length = SWAP_DATA(fcpRsp->rspSnsLen); + if(cmd->ulpCommand == CMD_FCP_IWRITE64_CX) { + if (cmd->ulpStatus == IOSTAT_SUCCESS) { + dev_ptr->clear_count = 1; + } + else { + dev_ptr->clear_count = 0; + } + } + else { + dev_ptr->clear_count = cmd->un.fcpi.fcpi_parm; + } + return; + } + + /* + * Is it a SCSI Report Lun command ? + */ + if ((fcptr->fcp_cmd.fcpCdb[0] == FCP_SCSI_REPORT_LUNS) && + (fcptr->flags & FCBUF_INTERNAL)) { + uchar *datap; + MBUF_INFO *mbufp; + node_t *nodep; + uint32 rptLunLen; + uint32 *datap32; + uint32 max, lun; + + mbufp = (MBUF_INFO *)fcptr->sc_bufp; + fcptr->sc_bufp = 0; + mbufp->size = 4096; + nodep = dev_ptr->nodep; + if(nodep == 0) { + fc_mem_put(binfo, MEM_IOCB, (uchar * )mbufp); + dev_ptr->active_io_count--; + fc_enq_fcbuf(fcptr); + return; + } + if ((cmd->ulpStatus == IOSTAT_SUCCESS) || + ((cmd->ulpStatus == IOSTAT_FCP_RSP_ERROR) && + (fcptr->fcp_rsp.rspStatus2 & RESID_UNDER) && + (fcptr->fcp_rsp.rspStatus3 == SCSI_STAT_GOOD))) { + + datap = (uchar *)mbufp->virt; + /* + * if Lun0 uses VSA, we assume others use too. + */ + if ((datap[8] & 0xc0) == 0x40) { + nodep->addr_mode = VOLUME_SET_ADDRESSING; + } + /* + * Skip retry + */ + datap32 = (uint32 *)mbufp->virt; + rptLunLen = SWAP_DATA(*datap32); + /* search for the max lun */ + max = 0; + for(i=0; i < rptLunLen; i+=8) { + datap32 += 2; + lun = (((* datap32) >> FC_LUN_SHIFT) & 0xff); + if(lun > max) + max = lun; + } + if(i) { + nodep->max_lun = max + 1; + } + + if(nodep->virtRptLunData == 0) { + if(rptLunLen > 8) { /* more than 1 lun */ + nodep->virtRptLunData = mbufp->virt; + nodep->physRptLunData = mbufp->phys; + } else { + fc_free(p_dev_ctl, mbufp); + } + } + } else { + datap = 0; + fc_free(p_dev_ctl, mbufp); + nodep->virtRptLunData = 0; + nodep->physRptLunData = 0; + } + + fc_mem_put(binfo, MEM_IOCB, (uchar * )mbufp); + + dev_ptr->active_io_count--; + nodep->num_active_io--; + fc_enq_fcbuf(fcptr); + + if ((datap == 0) && (!(nodep->flags & RETRY_RPTLUN))) { + nodep->flags |= RETRY_RPTLUN; + /* Wait a little bit for ABTSs to settle down */ + fc_clk_set(p_dev_ctl, 1, issue_report_lun, (void *)dev_ptr, 0); + } + else { + nodep->flags &= ~RETRY_RPTLUN; + nodep->rptlunstate = REPORT_LUN_COMPLETE; + } + return; + } + + + sbp = fcptr->sc_bufp; + bp = (struct buf *) sbp; + + + if (cmd->ulpStatus) { + fcpRsp = &fcptr->fcp_rsp; + { + uint32 did; + uint32 pan; + uint32 sid; + + if ((dev_ptr->nodep) && (dev_ptr->nodep->nlp)) { + ndlp = dev_ptr->nodep->nlp; + did = ndlp->nlp_DID; + pan = ndlp->id.nlp_pan; + sid = ndlp->id.nlp_sid; + if(ndlp->nlp_action & NLP_DO_RSCN) + qfull = 1; + } else { + did = 0; + pan = 0; + sid = 0; + } + /* FCP cmd failed on device (, ) DID */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0729, /* ptr to msg structure */ + fc_mes0729, /* ptr to msg */ + fc_msgBlk0729.msgPreambleStr, /* begin varargs */ + fcptr->fcp_cmd.fcpCdb[0], + FC_SCSID(pan, sid), + (uint32)(dev_ptr->lun_id), + did, + (uint32)fcpRsp->rspInfo3, + (uint32)cmd->un.grsp.perr.statLocalError, + *((uint32 *)(((uint32 *)cmd) + WD6)), /* IOCB wd 6 */ + *((uint32 *)(((uint32 *)cmd) + WD7))); /* IOCB wd 7, end varargs */ + + } + lp = (uint32 * )fcpRsp; + i = 0; + /* FCP command failed: RSP */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0730, /* ptr to msg structure */ + fc_mes0730, /* ptr to msg */ + fc_msgBlk0730.msgPreambleStr, /* begin varargs */ + lp[2], + lp[3], + lp[4], + lp[5]); /* end varargs */ + if (fcpRsp->rspStatus2 & RSP_LEN_VALID) { + i = SWAP_DATA(fcpRsp->rspRspLen); + } + if (fcpRsp->rspStatus2 & SNS_LEN_VALID) { + lp = (uint32 * )(((uchar * ) & fcpRsp->rspInfo0) + i); + /* FCP command failed: SNS */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0731, /* ptr to msg structure */ + fc_mes0731, /* ptr to msg */ + fc_msgBlk0731.msgPreambleStr, /* begin varargs */ + lp[0], + lp[1], + lp[2], + lp[3], + lp[4], + lp[5], + lp[6], + lp[7]); /* end varargs */ + if (i > sizeof(FCP_RSP)) { + cmd->ulpStatus = IOSTAT_REMOTE_STOP; + goto handle_iocb; + } + + if(binfo->fc_process_LA == 0) + goto skip4; /* link down processing */ + if (dev_ptr->first_check & FIRST_CHECK_COND) { + clp = DD_CTL.p_config[binfo->fc_brd_no]; + dev_ptr->first_check &= ~FIRST_CHECK_COND; + if((clp[CFG_FIRST_CHECK].a_current) && + (SWAP_DATA((lp[3]) & SWAP_DATA(0xFF000000)) == 0x29000000)) { + FCSTATCTR.fcpFirstCheck++; + + lp = (uint32 *)&cmd->un.ulpWord[4]; + /* Retry FCP command due to 29,00 check condition */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0732, /* ptr to msg structure */ + fc_mes0732, /* ptr to msg */ + fc_msgBlk0732.msgPreambleStr, /* begin varargs */ + *lp, + *(lp+1), + *(lp+2), + *(lp+3)); /* end varargs */ + fc_fcp_bufunmap(p_dev_ctl, sbp); + + + /* + * Queue this command to the head of the device's + * pending queue for processing by fc_issue_cmd. + */ + if (dev_ptr->pend_head == NULL) { /* Is queue empty? */ + dev_ptr->pend_head = sbp; + dev_ptr->pend_tail = sbp; + bp->av_forw = NULL; + fc_enq_wait(dev_ptr); + } else { /* Queue not empty */ + bp->av_forw = (struct buf *) dev_ptr->pend_head; + dev_ptr->pend_head = sbp; + } + dev_ptr->pend_count++; + dev_ptr->active_io_count--; + dev_ptr->nodep->num_active_io--; + fc_enq_fcbuf(fcptr); + return; + } + } + + fc_bcopy(((uchar * ) & fcpRsp->rspInfo0) + i, dev_ptr->sense, + MAX_FCP_SNS); + dev_ptr->sense_valid = 1; + dev_ptr->sense_length = SWAP_DATA(fcpRsp->rspSnsLen); + + } else { + if ((cmd->ulpStatus == IOSTAT_FCP_RSP_ERROR) && + ((((uchar)fcpRsp->rspStatus3) & SCSI_STATUS_MASK) == SCSI_STAT_QUE_FULL) && + (dev_ptr->qfull_retries > 0) && + (sbp->qfull_retry_count < dev_ptr->qfull_retries)) { + clp = DD_CTL.p_config[binfo->fc_brd_no]; + if (clp[CFG_DQFULL_THROTTLE].a_current) { + if (dev_ptr->fcp_cur_queue_depth > FC_MIN_QFULL) { + if(dev_ptr->active_io_count > FC_MIN_QFULL) + dev_ptr->fcp_cur_queue_depth = dev_ptr->active_io_count - 1; + else + dev_ptr->fcp_cur_queue_depth = FC_MIN_QFULL; + } + } + + fc_qfull_retry((void *)fcptr); + + sbp->qfull_retry_count++; + + dev_ptr->qfullcnt++; + dev_ptr->active_io_count--; + dev_ptr->nodep->num_active_io--; + fc_enq_fcbuf(fcptr); + return; + } + } + } else { + fcpRsp = &fcptr->fcp_rsp; + } + +handle_iocb: + + switch (cmd->ulpStatus) { + case IOSTAT_SUCCESS: + FCSTATCTR.fcpGood++; + break; + + case IOSTAT_FCP_RSP_ERROR: + /* ERROR: all is not well with the FCP Response */ + bp->b_error = EIO; + bp->b_flags |= B_ERROR; + + bp->b_resid = 0; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + FCSTATCTR.fcpRspErr++; + + if(binfo->fc_process_LA == 0) + goto skip4; /* link down processing */ + + + if ((fcpRsp->rspStatus2 & RESID_UNDER) || + (fcpRsp->rspStatus2 & RESID_OVER)) { + if (fcpRsp->rspStatus2 & RESID_UNDER) { + /* This is not an error! */ + bp->b_resid = SWAP_DATA(fcpRsp->rspResId); + + bp->b_error = 0; + bp->b_flags &= ~B_ERROR; + /* FCP Read Underrun */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0733, /* ptr to msg structure */ + fc_mes0733, /* ptr to msg */ + fc_msgBlk0733.msgPreambleStr, /* begin varargs */ + *((uint32 *)(((uint32 *)cmd) + WD7)), /* IOCB wd 7 */ + (uint32)cmd->ulpContext, + SWAP_DATA(fcpRsp->rspResId), + cmd->un.fcpi.fcpi_parm); /* end varargs */ + } + /* Overrun already has error set */ + } + else { + if ((bp->b_flags & B_READ) && cmd->un.fcpi.fcpi_parm) { + /* This is ALWAYS a readcheck error!! */ + + /* Give Check Condition priority over Read Check */ + if (fcpRsp->rspStatus3 != SCSI_STAT_CHECK_COND) { + bp->b_error = EIO; + bp->b_flags |= B_ERROR; + bp->b_resid = bp->b_bcount; + /* FCP Read Check Error */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0734, /* ptr to msg structure */ + fc_mes0734, /* ptr to msg */ + fc_msgBlk0734.msgPreambleStr, /* begin varargs */ + *((uint32 *)(((uint32 *)cmd) + WD7)), /* IOCB wd 7 */ + (uint32)cmd->ulpContext, + SWAP_DATA(fcpRsp->rspResId), + cmd->un.fcpi.fcpi_parm); /* end varargs */ + } + else { + /* FCP Read Check Error with Check Condition */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0735, /* ptr to msg structure */ + fc_mes0735, /* ptr to msg */ + fc_msgBlk0735.msgPreambleStr, /* begin varargs */ + *((uint32 *)(((uint32 *)cmd) + WD7)), /* IOCB wd 7 */ + (uint32)cmd->ulpContext, + SWAP_DATA(fcpRsp->rspResId), + cmd->un.fcpi.fcpi_parm); /* end varargs */ + } + } + } + + /* For QUE_FULL we will delay the iodone */ + if((((uchar) fcpRsp->rspStatus3) & SCSI_STATUS_MASK) == SCSI_STAT_QUE_FULL) { + dev_ptr->qfullcnt++; + if (clp[CFG_DQFULL_THROTTLE].a_current) { + if (dev_ptr->fcp_cur_queue_depth > FC_MIN_QFULL) { + if(dev_ptr->active_io_count > 1) + dev_ptr->fcp_cur_queue_depth = dev_ptr->active_io_count - 1; + else + dev_ptr->fcp_cur_queue_depth = 1; + } + if (dev_ptr->active_io_count > FC_MIN_QFULL) { + /* + * Try out + * stop_send_io will be decreament by 1 in fc_q_depth_up(); + */ + dev_ptr->stop_send_io = clp[CFG_NO_DEVICE_DELAY].a_current; + } + } + /* FCP QUEUE Full */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0736, /* ptr to msg structure */ + fc_mes0736, /* ptr to msg */ + fc_msgBlk0736.msgPreambleStr, /* begin varargs */ + dev_ptr->fcp_cur_queue_depth, + dev_ptr->active_io_count, + dev_ptr->flags, + clp[CFG_DQFULL_THROTTLE].a_current); /* end varargs */ + qfull = 1; + /* Set any necessary flags for buf error */ + bp->b_error = EBUSY; + bp->b_flags |= B_ERROR; + bp->b_resid = bp->b_bcount; + } + lpfc_handle_fcp_error(pkt, fcptr, cmd); + + if ((fcpRsp->rspStatus2 & RSP_LEN_VALID) && + (fcpRsp->rspInfo3 != RSP_NO_FAILURE)) { + + /* Error detected in the FCP layer */ + sbp->status_validity = SC_ADAPTER_ERROR; + + if(clp[CFG_DELAY_RSP_ERR].a_current) + qfull = clp[CFG_DELAY_RSP_ERR].a_current; + + switch (fcpRsp->rspInfo3) { + case RSP_TM_NOT_SUPPORTED: + SET_ADAPTER_STATUS(sbp, SC_NO_DEVICE_RESPONSE) + break; + + case RSP_DATA_BURST_ERR: + case RSP_CMD_FIELD_ERR: + case RSP_RO_MISMATCH_ERR: + if (fcpRsp->rspStatus3 != SCSI_STAT_GOOD) { + sbp->status_validity = SC_SCSI_ERROR; + sbp->scsi_status = fcpRsp->rspStatus3; + if ((fcpRsp->rspStatus3 == SC_CHECK_CONDITION) || + (fcpRsp->rspStatus3 == SC_COMMAND_TERMINATED)) { + sbp->adap_q_status = SC_DID_NOT_CLEAR_Q; + } + + + break; + } + + case RSP_TM_NOT_COMPLETED: + default: + SET_ADAPTER_STATUS(sbp, SC_ADAPTER_HDW_FAILURE) + break; + } + } else if (fcpRsp->rspStatus3 != SCSI_STAT_GOOD) { + /* SCSI layer detected error */ + if (fcpRsp->rspStatus3 == SCSI_STAT_CHECK_COND) { + uint32 cc; + /* FCP error: Check condition */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0737, /* ptr to msg structure */ + fc_mes0737, /* ptr to msg */ + fc_msgBlk0737.msgPreambleStr, /* begin varargs */ + *((uint32 *)(((uint32 *)cmd) + WD7)), /* IOCB wd 7 */ + (uint32)cmd->ulpIoTag, + (uint32)cmd->ulpContext, + (uint32)cmd->un.grsp.perr.statLocalError); /* end varargs */ + i = SWAP_DATA(fcpRsp->rspRspLen); + lp = (uint32 * )(((uchar * ) & fcpRsp->rspInfo0) + i); + + cc = (SWAP_DATA((lp[3]) & SWAP_DATA(0xFF000000))); + switch(cc) { + case 0x29000000: /* Power on / reset */ + i = 0; + /* 29,00 Check condition received */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0738, /* ptr to msg structure */ + fc_mes0738, /* ptr to msg */ + fc_msgBlk0738.msgPreambleStr, /* begin varargs */ + lp[0], + lp[1], + lp[2], + lp[3]); /* end varargs */ + break; + case 0x0: /* No additional sense info */ + if((lp[3]) & SWAP_DATA(0x00FF0000)) /* if ASCQ != 0 */ + goto default_chk; + case 0x44000000: /* Internal Target failure */ + case 0x25000000: /* Login Unit Not Support */ + case 0x20000000: /* Invalid Command operation code */ + if ((cc == 0x20000000) && (fcptr->fcp_cmd.fcpCdb[0] == SCSI3_PERSISTENT_RESERVE_IN)) { + /* Check condition received ERR1 */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0739, /* ptr to msg structure */ + fc_mes0739, /* ptr to msg */ + fc_msgBlk0739.msgPreambleStr, /* begin varargs */ + lp[0], + lp[1], + lp[2], + lp[3]); /* end varargs */ + goto out; + } + if(clp[CFG_CHK_COND_ERR].a_current) { + /* We want to return check cond on TUR cmd */ + if (fcptr->fcp_cmd.fcpCdb[0] == FCP_SCSI_TEST_UNIT_READY) + goto default_chk; + fc_bzero((void * )dev_ptr->sense, MAX_FCP_SNS); + dev_ptr->sense_valid = 0; + dev_ptr->sense_length = 0; + fcpRsp->rspStatus3 = SC_COMMAND_TERMINATED; + bp->b_error = EIO; + bp->b_flags |= B_ERROR; + bp->b_resid = bp->b_bcount; + sbp->status_validity = SC_ADAPTER_ERROR; + SET_ADAPTER_STATUS(sbp, SC_SCSI_BUS_RESET) + i = 0; + if(clp[CFG_DELAY_RSP_ERR].a_current) + qfull = clp[CFG_DELAY_RSP_ERR].a_current; + /* Check condition received ERR2 */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0740, /* ptr to msg structure */ + fc_mes0740, /* ptr to msg */ + fc_msgBlk0740.msgPreambleStr, /* begin varargs */ + lp[0], + lp[1], + lp[2], + lp[3]); /* end varargs */ + goto out; + } + default: + if(clp[CFG_DELAY_RSP_ERR].a_current) + qfull = clp[CFG_DELAY_RSP_ERR].a_current; +default_chk: + i = 0; + /* Check condition received */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0741, /* ptr to msg structure */ + fc_mes0741, /* ptr to msg */ + fc_msgBlk0741.msgPreambleStr, /* begin varargs */ + lp[0], + lp[1], + lp[2], + lp[3]); /* end varargs */ + break; + } + } + else { + if(clp[CFG_DELAY_RSP_ERR].a_current) + qfull = clp[CFG_DELAY_RSP_ERR].a_current; + } + + sbp->status_validity = SC_SCSI_ERROR; + sbp->scsi_status = fcpRsp->rspStatus3; + if ((fcpRsp->rspStatus3 == SC_CHECK_CONDITION) || + (fcpRsp->rspStatus3 == SC_COMMAND_TERMINATED)) { + sbp->adap_q_status = SC_DID_NOT_CLEAR_Q; + + } + } + break; + + case IOSTAT_REMOTE_STOP: + /* ERROR: ABTS/ABTX by remote N_PORT */ + FCSTATCTR.fcpRemoteStop++; + bp->b_error = EIO; + bp->b_flags |= B_ERROR; + bp->b_resid = bp->b_bcount; + sbp->status_validity = SC_SCSI_ERROR; + sbp->scsi_status = SC_COMMAND_TERMINATED; + sbp->adap_q_status = SC_DID_NOT_CLEAR_Q; + + break; + + case IOSTAT_LOCAL_REJECT: + FCSTATCTR.fcpLocalErr++; + switch (cmd->un.grsp.perr.statLocalError) { + case IOERR_SEQUENCE_TIMEOUT: + FCSTATCTR.fcpLocalTmo++; + /* E_D_TOV timeout */ + bp->b_error = ETIMEDOUT; + sbp->adap_q_status = SC_DID_NOT_CLEAR_Q; + bp->b_flags |= B_ERROR; + bp->b_resid = bp->b_bcount; + sbp->status_validity = SC_ADAPTER_ERROR; + SET_ADAPTER_STATUS(sbp, SC_CMD_TIMEOUT) + break; + + case IOERR_NO_RESOURCES: + FCSTATCTR.fcpLocalNores++; + fc_qfull_retry((void *)fcptr); + dev_ptr->active_io_count--; + dev_ptr->nodep->num_active_io--; + fc_enq_fcbuf(fcptr); + return; + case IOERR_BUFFER_SHORTAGE: + FCSTATCTR.fcpLocalBufShort++; + /* The adapter is too busy to deal with this command */ + bp->b_error = EBUSY; + bp->b_flags |= B_ERROR; + bp->b_resid = bp->b_bcount; + sbp->status_validity = 0; + break; + + case IOERR_MISSING_CONTINUE: + case IOERR_ILLEGAL_COMMAND: + case IOERR_ILLEGAL_FIELD: + case IOERR_BAD_CONTINUE: + case IOERR_TOO_MANY_BUFFERS: + case IOERR_EXTRA_DATA: + case IOERR_ILLEGAL_LENGTH: + case IOERR_UNSUPPORTED_FEATURE: + /* Let's call these driver software errors */ + qfull = 1; + FCSTATCTR.fcpLocalSfw++; + bp->b_error = EINVAL; + bp->b_flags |= B_ERROR; + bp->b_resid = bp->b_bcount; + sbp->status_validity = 0; + + { + uint32 did; + + did = 0; + if ((dev_ptr->nodep) && (dev_ptr->nodep->nlp)) + did = dev_ptr->nodep->nlp->nlp_DID; + /* FCP completion error */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0742, /* ptr to msg structure */ + fc_mes0742, /* ptr to msg */ + fc_msgBlk0742.msgPreambleStr, /* begin varargs */ + cmd->ulpStatus, + cmd->un.ulpWord[4], + did ); /* end varargs */ + } + break; + + case IOERR_TX_DMA_FAILED: + FCSTATCTR.fcpLocalTxDMA++; + goto skip2; + case IOERR_RX_DMA_FAILED: + FCSTATCTR.fcpLocalRxDMA++; + goto skip2; + case IOERR_INTERNAL_ERROR: + FCSTATCTR.fcpLocalinternal++; + goto skip2; + case IOERR_CORRUPTED_DATA: + case IOERR_CORRUPTED_RPI: + FCSTATCTR.fcpLocalCorrupt++; +skip2: + /* Let's call these adapter hardware errors */ + bp->b_error = EIO; + bp->b_flags |= B_ERROR; + bp->b_resid = bp->b_bcount; + sbp->status_validity = SC_ADAPTER_ERROR; + SET_ADAPTER_STATUS(sbp, SC_ADAPTER_HDW_FAILURE) + + { + uint32 did; + + did = 0; + if ((dev_ptr->nodep) && (dev_ptr->nodep->nlp)) + did = dev_ptr->nodep->nlp->nlp_DID; + /* FCP completion error */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0743, /* ptr to msg structure */ + fc_mes0743, /* ptr to msg */ + fc_msgBlk0743.msgPreambleStr, /* begin varargs */ + cmd->ulpStatus, + cmd->un.ulpWord[4], + did ); /* end varargs */ + } + + break; + + case IOERR_ILLEGAL_FRAME: + FCSTATCTR.fcpLocalIllFrm++; + goto skip3; + case IOERR_DUP_FRAME: + FCSTATCTR.fcpLocalDupFrm++; + goto skip3; + case IOERR_LINK_CONTROL_FRAME: + FCSTATCTR.fcpLocalLnkCtlFrm++; +skip3: + qfull = 1; + /* Let's call these device hardware errors */ + bp->b_error = EINVAL; + bp->b_flags |= B_ERROR; + bp->b_resid = bp->b_bcount; + sbp->status_validity = 0; + + { + uint32 did; + + did = 0; + if ((dev_ptr->nodep) && (dev_ptr->nodep->nlp)) + did = dev_ptr->nodep->nlp->nlp_DID; + + lp = (uint32 *)&cmd->un.ulpWord[4]; + /* FCP completion error */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0744, /* ptr to msg structure */ + fc_mes0744, /* ptr to msg */ + fc_msgBlk0744.msgPreambleStr, /* begin varargs */ + did, + *lp, + *(lp+2), + *(lp+3) ); /* end varargs */ + } + + break; + + case IOERR_LOOP_OPEN_FAILURE: + FCSTATCTR.fcpLocalLoopOpen++; + /* The device disappeared from the loop! */ + qfull = 1; + bp->b_error = EIO; + bp->b_flags |= B_ERROR; + bp->b_resid = bp->b_bcount; + sbp->status_validity = SC_ADAPTER_ERROR; + SET_ADAPTER_STATUS(sbp, SC_NO_DEVICE_RESPONSE) + if(dev_ptr->nodep && (dev_ptr->nodep->flags & FC_NODEV_TMO)) { + break; + } + if(binfo->fc_ffstate != FC_READY) { + break; + } + /* Will HALT, CLEARQ, and kick off discovery, below */ + /* Try to relogin, and if unsuccessful reject future cmds */ + if((ndlp == 0) && dev_ptr->nodep) + ndlp = fc_findnode_rpi(binfo, (uint32)dev_ptr->nodep->rpi); + + if ((ndlp) && !(ndlp->nlp_flag & (NLP_NODEV_TMO | NLP_REQ_SND))) { + ndlp->nlp_flag &= ~NLP_RM_ENTRY; + /* We are in FC_READY state */ + if (!(ndlp->nlp_action & NLP_DO_RSCN)) { + binfo->fc_flag |= FC_RSCN_MODE; + ndlp->nlp_action |= NLP_DO_RSCN; + fc_nextrscn(p_dev_ctl, 1); + } + } + break; + + case IOERR_INVALID_RPI: + FCSTATCTR.fcpLocalInvalRpi++; + goto skip4; + case IOERR_LINK_DOWN: + FCSTATCTR.fcpLocalLinkDown++; +skip4: + /* Retry these failures */ + qfull=1; + bp->b_error = EIO; + bp->b_flags |= B_ERROR; + bp->b_resid = bp->b_bcount; + sbp->status_validity = SC_ADAPTER_ERROR; + SET_ADAPTER_STATUS(sbp, SC_SCSI_BUS_RESET) + break; + + case IOERR_OUT_OF_ORDER_DATA: + case IOERR_OUT_OF_ORDER_ACK: + FCSTATCTR.fcpLocalOOO++; + /* Retry these failures */ + bp->b_error = ENXIO; + bp->b_flags |= B_ERROR; + bp->b_resid = bp->b_bcount; + sbp->status_validity = 0; + break; + + case IOERR_ABORT_IN_PROGRESS: + FCSTATCTR.fcpLocalAbtInp++; + goto skip5; + case IOERR_ABORT_REQUESTED: + FCSTATCTR.fcpLocalAbtReq++; +skip5: + /* Abort requested by us */ + if (fcptr->flags & FCBUF_ABTS) { + /* ABTS sent because of operation timeout */ + bp->b_error = ETIMEDOUT; + sbp->status_validity = SC_ADAPTER_ERROR; + SET_ADAPTER_STATUS(sbp, SC_CMD_TIMEOUT) + } else { + bp->b_error = ENXIO; + sbp->status_validity = 0; + } + bp->b_flags |= B_ERROR; + bp->b_resid = bp->b_bcount; + break; + + case IOERR_SUCCESS: + case IOERR_NO_XRI: + case IOERR_XCHG_DROPPED: + case IOERR_RCV_BUFFER_WAITING: + case IOERR_RECEIVE_BUFFER_TIMEOUT: + case IOERR_RING_RESET: + case IOERR_BAD_HOST_ADDRESS: + case IOERR_RCV_HDRBUF_WAITING: + case IOERR_MISSING_HDR_BUFFER: + case IOERR_MSEQ_CHAIN_CORRUPTED: + case IOERR_ABORTMULT_REQUESTED: + default: + FCSTATCTR.fcpLocal++; + bp->b_error = EIO; + bp->b_flags |= B_ERROR; + bp->b_resid = bp->b_bcount; + sbp->status_validity = SC_ADAPTER_ERROR; + SET_ADAPTER_STATUS(sbp, SC_NO_DEVICE_RESPONSE) + + { + uint32 did; + + did = 0; + if ((dev_ptr->nodep) && (dev_ptr->nodep->nlp)) + did = dev_ptr->nodep->nlp->nlp_DID; + /* FCP completion error */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0745, /* ptr to msg structure */ + fc_mes0745, /* ptr to msg */ + fc_msgBlk0745.msgPreambleStr, /* begin varargs */ + cmd->ulpStatus, + cmd->un.ulpWord[4], + did ); /* end varargs */ + } + break; + } + break; + + case IOSTAT_NPORT_RJT: + case IOSTAT_FABRIC_RJT: + FCSTATCTR.fcpPortRjt++; + /* The fabric or port rejected this command */ + if (cmd->un.grsp.perr.statAction == RJT_RETRYABLE) { + bp->b_error = ENXIO; + sbp->status_validity = SC_SCSI_ERROR; + sbp->scsi_status = SC_BUSY_STATUS; + } else { + bp->b_error = EIO; + sbp->status_validity = 0; + } + bp->b_flags |= B_ERROR; + bp->b_resid = bp->b_bcount; + break; + + case IOSTAT_NPORT_BSY: + case IOSTAT_FABRIC_BSY: + FCSTATCTR.fcpPortBusy++; + /* The fabric or port is too busy to deal with this command */ + bp->b_error = ENXIO; + bp->b_flags |= B_ERROR; + bp->b_resid = bp->b_bcount; + sbp->status_validity = SC_SCSI_ERROR; + sbp->scsi_status = SC_BUSY_STATUS; + break; + + case IOSTAT_INTERMED_RSP: + case IOSTAT_LS_RJT: + case IOSTAT_BA_RJT: + default: + FCSTATCTR.fcpError++; + /* ERROR: None of these errors should occur! */ + bp->b_error = EIO; + bp->b_flags |= B_ERROR; + bp->b_resid = bp->b_bcount; + sbp->status_validity = SC_ADAPTER_ERROR; + SET_ADAPTER_STATUS(sbp, SC_NO_DEVICE_RESPONSE) + + { + uint32 did; + + did = 0; + if ((dev_ptr->nodep) && (dev_ptr->nodep->nlp)) + did = dev_ptr->nodep->nlp->nlp_DID; + /* FCP completion error */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0746, /* ptr to msg structure */ + fc_mes0746, /* ptr to msg */ + fc_msgBlk0746.msgPreambleStr, /* begin varargs */ + cmd->ulpStatus, + cmd->un.ulpWord[4], + did ); /* end varargs */ + } + break; + } +out: + + if (fcptr->fcp_cmd.fcpCntl2) + { + /* This is a task management command */ + if (bp->b_flags & B_ERROR) + dev_ptr->ioctl_errno = bp->b_error; + else + dev_ptr->ioctl_errno = 0; + + + + if (fcptr->fcp_cmd.fcpCntl2 & TARGET_RESET) { + /* Cmpl Target Reset */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0747, /* ptr to msg structure */ + fc_mes0747, /* ptr to msg */ + fc_msgBlk0747.msgPreambleStr, /* begin varargs */ + (uint32)dev_ptr->nodep->scsi_id, + (uint32)dev_ptr->lun_id, + (uint32)cmd->un.grsp.perr.statLocalError, + *((uint32 *)(((uint32 *)cmd) + WD7))); /* end varargs */ + clp = DD_CTL.p_config[binfo->fc_brd_no]; + dev_ptr->flags &= ~SCSI_TARGET_RESET; + for (next_dev_ptr = dev_ptr->nodep->lunlist; next_dev_ptr != NULL; + next_dev_ptr = next_dev_ptr->next) { + + next_dev_ptr->flags &= ~SCSI_TARGET_RESET; + /* First send ABTS on any outstanding I/O in txp queue */ + fc_abort_fcp_txpq(binfo, next_dev_ptr); + fc_fail_cmd(next_dev_ptr, ENXIO, STAT_DEV_RESET); + next_dev_ptr->fcp_cur_queue_depth = + (ushort)clp[CFG_DFT_LUN_Q_DEPTH].a_current; + if (next_dev_ptr->ioctl_wakeup == 0) + fc_restart_device(next_dev_ptr); + } + } + + if (fcptr->fcp_cmd.fcpCntl2 & LUN_RESET) { + /* Cmpl LUN Reset */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0748, /* ptr to msg structure */ + fc_mes0748, /* ptr to msg */ + fc_msgBlk0748.msgPreambleStr, /* begin varargs */ + (uint32)dev_ptr->nodep->scsi_id, + (uint32)dev_ptr->lun_id, + (uint32)cmd->un.grsp.perr.statLocalError, + *((uint32 *)(((uint32 *)cmd) + WD7))); /* end varargs */ + dev_ptr->flags &= ~SCSI_LUN_RESET; + /* First send ABTS on any outstanding I/O in txp queue */ + fc_abort_fcp_txpq(binfo, dev_ptr); + fc_fail_cmd(dev_ptr, ENXIO, STAT_DEV_RESET); + if (dev_ptr->ioctl_wakeup == 0) + fc_restart_device(dev_ptr); + } + + if (fcptr->fcp_cmd.fcpCntl2 & ABORT_TASK_SET) { + /* Cmpl Abort Task Set */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0749, /* ptr to msg structure */ + fc_mes0749, /* ptr to msg */ + fc_msgBlk0749.msgPreambleStr, /* begin varargs */ + (uint32)dev_ptr->nodep->scsi_id, + (uint32)dev_ptr->lun_id, + (uint32)cmd->un.grsp.perr.statLocalError, + *((uint32 *)(((uint32 *)cmd) + WD7))); /* end varargs */ + dev_ptr->flags &= ~SCSI_ABORT_TSET; + /* First send ABTS on any outstanding I/O in txp queue */ + fc_abort_fcp_txpq(binfo, dev_ptr); + fc_fail_cmd(dev_ptr, ENXIO, STAT_DEV_RESET); + if (dev_ptr->ioctl_wakeup == 0) + fc_restart_device(dev_ptr); + } + + if (dev_ptr->ioctl_wakeup == 1) { + dev_ptr->ioctl_wakeup = 0; + fc_admin_wakeup(p_dev_ctl, dev_ptr, sbp); + } + } else { + if ((bp->b_flags & B_ERROR) && + (dev_ptr->queue_state != STOPPING)) { + /* An error has occurred, so halt the queues */ + sbp->adap_q_status = SC_DID_NOT_CLEAR_Q; + if(qfull) + fc_delay_iodone(p_dev_ctl, sbp); + else + fc_do_iodone(bp); + } else { + if(qfull) + fc_delay_iodone(p_dev_ctl, sbp); + else + fc_do_iodone(bp); + } + } + + + dev_ptr->active_io_count--; + dev_ptr->nodep->num_active_io--; + fc_enq_fcbuf(fcptr); + + + if ((dev_ptr->nodep->tgt_queue_depth) && + (dev_ptr->nodep->tgt_queue_depth == dev_ptr->nodep->num_active_io)) { + re_issue_fcp_cmd(dev_ptr->nodep->last_dev); + } + return; +} /* End handle_fcp_event */ + + +int +fc_delay_iodone( +fc_dev_ctl_t *p_dev_ctl, +T_SCSIBUF * sbp) +{ + FC_BRD_INFO * binfo; + iCfgParam * clp; + uint32 tmout; + + binfo = &BINFO; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + + if(clp[CFG_NO_DEVICE_DELAY].a_current) { + /* Need to set a timer so iodone can be called + * for buffer upon expiration. + */ + tmout = clp[CFG_NO_DEVICE_DELAY].a_current; + + if(fc_clk_set(p_dev_ctl, tmout, + lpfc_scsi_selto_timeout, (void *)sbp, 0) != 0) + return(1); + } + fc_do_iodone((struct buf *)sbp); + return(0); +} /* End fc_delay_iodone */ + + +/**********************************************/ +/** handle_iprcv_seq **/ +/** **/ +/**********************************************/ +_static_ int +handle_iprcv_seq( +fc_dev_ctl_t *p_dev_ctl, +RING *rp, +IOCBQ *temp) +{ + MAILBOXQ * mb; + FC_BRD_INFO * binfo; + IOCB * cmd = 0; + IOCB * savecmd; + IOCBQ * savetemp; + NETHDR * nh; + fcipbuf_t * p_mbuf, *mp, *last_mp; + ndd_t * p_ndd; + NODELIST * ndlp; + MATCHMAP * matp; + uchar * daddr; + uchar * saddr; + int count, i, la; + + binfo = &BINFO; + p_ndd = (ndd_t * ) & (NDD); + + p_mbuf = 0; + matp = (MATCHMAP *)0; /* prevent compiler warning */ + + if (++NDDSTAT.ndd_recvintr_lsw == 0) { + NDDSTAT.ndd_recvintr_msw++; + } + + mp = 0; + last_mp = 0; + count = 0; + la = 0; + + savetemp = temp; + if (binfo->fc_ffstate < FC_READY) { + if (binfo->fc_ffstate < rp->fc_xmitstate) { + goto dropout; + } + la = 1; + } + + savecmd = &temp->iocb; + while (temp) { + cmd = &temp->iocb; + if (cmd->ulpStatus) { + if ((cmd->ulpStatus == IOSTAT_LOCAL_REJECT) && + ((cmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) { + FCSTATCTR.NoRcvBuf++; + if(!(binfo->fc_flag & FC_NO_RCV_BUF)) { + /* IP Response Ring out of posted buffers */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0602, /* ptr to msg structure */ + fc_mes0602, /* ptr to msg */ + fc_msgBlk0602.msgPreambleStr, /* begin varargs */ + rp->fc_ringno, + rp->fc_missbufcnt, + FCSTATCTR.NoRcvBuf); /* end varargs */ + } + binfo->fc_flag |= FC_NO_RCV_BUF; + + fc_post_mbuf(p_dev_ctl, rp, 0); + } + else + NDDSTAT.ndd_ierrors++; +dropout: + NDDSTAT.ndd_ipackets_drop++; + fc_free_iocb_buf(p_dev_ctl, rp, savetemp); + if (p_mbuf) { + m_freem(p_mbuf); + } + return(0); + } + + if (cmd->ulpBdeCount == 0) { + temp = (IOCBQ * )temp->q; + continue; + } + for (i = 0; i < (int)cmd->ulpBdeCount; i++) { + matp = fc_getvaddr(p_dev_ctl, rp, (uchar *) + getPaddr(cmd->un.cont64[i].addrHigh, cmd->un.cont64[i].addrLow)); + if (matp == 0) { + uchar *bdeAddr; + + bdeAddr = (uchar *)getPaddr(cmd->un.cont64[0].addrHigh, + cmd->un.cont64[0].addrLow); + goto dropout; + } + + mp = (fcipbuf_t * )matp; + if (last_mp) { + fcnextdata(last_mp) = mp; + } else { + p_mbuf = mp; + } + last_mp = mp; + fcnextdata(mp) = 0; + fcsetdatalen(mp, cmd->un.cont64[i].tus.f.bdeSize); + count += cmd->un.cont64[i].tus.f.bdeSize; + } + + fc_post_mbuf(p_dev_ctl, rp, i); + cmd->ulpBdeCount = 0; + temp = (IOCBQ * )temp->q; + } + + if (p_mbuf == 0) { + goto dropout; + } + + binfo->fc_flag &= ~FC_NO_RCV_BUF; + + /* Set any IP buffer flags to indicate a recieve buffer, if needed */ + + if (++NDDSTAT.ndd_ipackets_lsw == 0) + NDDSTAT.ndd_ipackets_msw++; + + NDDSTAT.ndd_ibytes_lsw += count; + if ((int)NDDSTAT.ndd_ibytes_lsw < count) + NDDSTAT.ndd_ibytes_msw++; + nh = (NETHDR * )fcdata(p_mbuf); + + if(lpfc_nethdr == 0) { + emac_t * ep; + + /* Adjust mbuf count now */ + count -= 2; + + fcpktlen(p_mbuf) = count; /* total data in mbuf */ + fcincdatalen(p_mbuf, -2); + + fcdata(p_mbuf) += 2; + ep = (emac_t * )(fcdata(p_mbuf)); + daddr = (uchar *)ep->dest_addr; + saddr = (uchar *)ep->src_addr; + ep->llc_len = (count - sizeof(emac_t)); + } + else { + NETHDR * np; + + np = (NETHDR * )(fcdata(p_mbuf)); + daddr = np->fc_destname.IEEE; + saddr = np->fc_srcname.IEEE; + fcpktlen(p_mbuf) = count; /* total data in mbuf */ + } + + if (count < (HDR_LEN + sizeof(snaphdr_t))) + goto dropout; + + /* If this is first broadcast received from that address */ + if (savecmd->un.xrseq.w5.hcsw.Fctl & BC) { +bcst: + FCSTATCTR.frameRcvBcast++; + if (++NDDSTAT.ndd_ifInBcastPkts_lsw == 0) + NDDSTAT.ndd_ifInBcastPkts_msw++; + + fc_bcopy((char *)fcbroadcastaddr, (char *)daddr, MACADDR_LEN); + + if ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, + (uint32)savecmd->un.xrseq.xrsqRo)) == 0) { + + /* Need to cache the did / portname */ + if((ndlp = (NODELIST *)fc_mem_get(binfo, MEM_NLP))) { + fc_bzero((void *)ndlp, sizeof(NODELIST)); + ndlp->sync = binfo->fc_sync; + ndlp->capabilities = binfo->fc_capabilities; + ndlp->nlp_DID = savecmd->un.xrseq.xrsqRo; + fc_bcopy(&nh->fc_srcname, &ndlp->nlp_portname, sizeof(NAME_TYPE)); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + } + else { + goto dropout; + } + } + } else { + if ((ndlp = binfo->fc_nlplookup[savecmd->ulpIoTag]) == 0) { + if(nh->fc_destname.IEEE[0] == 0xff) { + if((nh->fc_destname.IEEE[1] == 0xff) && + (nh->fc_destname.IEEE[2] == 0xff) && + (nh->fc_destname.IEEE[3] == 0xff) && + (nh->fc_destname.IEEE[4] == 0xff) && + (nh->fc_destname.IEEE[5] == 0xff)) { + goto bcst; + } + } + /* Need to send LOGOUT for this RPI */ + if ((mb = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX))) { + fc_read_rpi(binfo, (uint32)savecmd->ulpIoTag, + (MAILBOX * )mb, (uint32)ELS_CMD_LOGO); + if (issue_mb_cmd(binfo, (MAILBOX * )mb, MBX_NOWAIT) != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } + goto dropout; + } + } + + + if(lpfc_nethdr == 0) { + fc_bcopy(ndlp->nlp_portname.IEEE, (char *)saddr, MACADDR_LEN); + } + if ((p_dev_ctl->device_state != OPENED) || + (p_ndd->nd_receive == 0)) { + goto dropout; + } + ndlp->nlp_type |= NLP_IP_NODE; + + unlock_enable(FC_LVL, &CMD_LOCK); + (*(p_ndd->nd_receive))(p_ndd, p_mbuf, p_dev_ctl); + i = disable_lock(FC_LVL, &CMD_LOCK); + + return(1); +} /* End handle_iprcv_seq */ + +/**********************************************/ +/** handle_elsrcv_seq **/ +/** **/ +/**********************************************/ +_static_ int +handle_elsrcv_seq( +fc_dev_ctl_t *p_dev_ctl, +RING *rp, +IOCBQ *temp) +{ + FC_BRD_INFO * binfo; + IOCB * cmd = 0; + IOCB * savecmd; + IOCBQ * savetemp; + MATCHMAP * p_mbuf, *last_mp; + ndd_t * p_ndd; + MATCHMAP * matp; + uint32 ctx; + int count, i, la; + + binfo = &BINFO; + p_ndd = (ndd_t * ) & (NDD); + + p_mbuf = 0; + matp = (MATCHMAP *)0; /* prevent compiler warning */ + + last_mp = 0; + count = 0; + la = 0; + + savetemp = temp; + if (binfo->fc_ffstate < FC_READY) { + goto dropout; + } + + ctx = 0; + savecmd = &temp->iocb; + while (temp) { + cmd = &temp->iocb; + if(ctx == 0) + ctx = (uint32)(cmd->ulpContext); + if (cmd->ulpStatus) { + if ((cmd->ulpStatus == IOSTAT_LOCAL_REJECT) && + ((cmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) { + FCSTATCTR.NoRcvBuf++; + if(!(binfo->fc_flag & FC_NO_RCV_BUF)) { + /* Rcv Ring out of posted buffers */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0603, /* ptr to msg structure */ + fc_mes0603, /* ptr to msg */ + fc_msgBlk0603.msgPreambleStr, /* begin varargs */ + rp->fc_ringno, + rp->fc_missbufcnt, + FCSTATCTR.NoRcvBuf); /* end varargs */ + } + binfo->fc_flag |= FC_NO_RCV_BUF; + + fc_post_buffer(p_dev_ctl, rp, 0); + } + goto dropout; + } + + if (cmd->ulpBdeCount == 0) { + temp = (IOCBQ * )temp->q; + continue; + } + for (i = 0; i < (int)cmd->ulpBdeCount; i++) { + matp = fc_getvaddr(p_dev_ctl, rp, (uchar *) + getPaddr(cmd->un.cont64[i].addrHigh, cmd->un.cont64[i].addrLow)); + if (matp == 0) { + uchar *bdeAddr; + + bdeAddr = (uchar *)getPaddr(cmd->un.cont64[0].addrHigh, + cmd->un.cont64[0].addrLow); + + goto dropout; + } + + /* Typically for Unsolicited CT requests */ + + if (last_mp) { + last_mp->fc_mptr = (void *)matp; + } else { + p_mbuf = matp; + } + last_mp = matp; + matp->fc_mptr = 0; + count += cmd->un.cont64[i].tus.f.bdeSize; + } + + fc_post_buffer(p_dev_ctl, rp, i); + cmd->ulpBdeCount = 0; + temp = (IOCBQ * )temp->q; + } + + if (p_mbuf == 0) { + goto dropout; + } + binfo->fc_flag &= ~FC_NO_RCV_BUF; + if(dfc_put_event(p_dev_ctl, FC_REG_CT_EVENT, ctx, (void *)p_mbuf, (void *)((ulong)count))) { + fc_free_iocb_buf(p_dev_ctl, rp, savetemp); + return(0); + } + +dropout: + fc_free_iocb_buf(p_dev_ctl, rp, savetemp); + while (p_mbuf) { + matp = p_mbuf; + p_mbuf = (MATCHMAP *)matp->fc_mptr; + fc_mem_put(binfo, MEM_BUF, (uchar * )matp); + } + return(0); +} /* End handle_elsrcv_seq */ + + +/**************************************************/ +/** fc_post_buffer **/ +/** **/ +/** This routine will post count buffers to the **/ +/** ring with the QUE_RING_BUF_CN command. This **/ +/** allows 3 buffers / command to be posted. **/ +/** Returns the number of buffers NOT posted. **/ +/**************************************************/ +_static_ int +fc_post_buffer( +fc_dev_ctl_t *p_dev_ctl, +RING *rp, +int cnt) +{ + IOCB * icmd; + IOCBQ * temp; + int i, j; + ushort tag; + ushort maxqbuf; + MATCHMAP * mp; + FC_BRD_INFO * binfo; + + binfo = &BINFO; + mp = 0; + if (binfo->fc_flag & FC_SLI2) + maxqbuf = 2; + else + maxqbuf = 3; + + tag = (ushort)cnt; + cnt += rp->fc_missbufcnt; + /* While there are buffers to post */ + while (cnt) { + if ((temp = (IOCBQ * )fc_mem_get(binfo, MEM_IOCB)) == 0) { + rp->fc_missbufcnt = cnt; + return(cnt); + } + fc_bzero((void *)temp, sizeof(IOCBQ)); + icmd = &temp->iocb; + + /* Max buffers can be posted per command */ + for (i = 0; i < maxqbuf; i++) { + if (cnt <= 0) + break; + + /* fill in BDEs for command */ + if ((mp = (MATCHMAP * )fc_mem_get(binfo, MEM_BUF)) == 0) { + icmd->ulpBdeCount = i; + for (j = 0; j < i; j++) { + if (binfo->fc_flag & FC_SLI2) { + mp = fc_getvaddr(p_dev_ctl, rp, + (uchar * )getPaddr(icmd->un.cont64[j].addrHigh, + icmd->un.cont64[j].addrLow)); + } + else { + mp = fc_getvaddr(p_dev_ctl, rp, + (uchar * )((ulong)icmd->un.cont[j].bdeAddress)); + } + if (mp) { + fc_mem_put(binfo, MEM_BUF, (uchar * )mp); + } + } + + rp->fc_missbufcnt = cnt + i; + fc_mem_put(binfo, MEM_IOCB, (uchar * )temp); + return(cnt + i); + } + + /* map that page and save the address pair for lookup later */ + if (binfo->fc_flag & FC_SLI2) { + fc_mapvaddr(binfo, rp, mp, + (uint32 *) & icmd->un.cont64[i].addrHigh, + (uint32 *) & icmd->un.cont64[i].addrLow); + icmd->un.cont64[i].tus.f.bdeSize = FCELSSIZE; + icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; + } else { + fc_mapvaddr(binfo, rp, mp, + 0, (uint32 *) & icmd->un.cont[i].bdeAddress); + icmd->un.cont[i].bdeSize = FCELSSIZE; + icmd->ulpCommand = CMD_QUE_RING_BUF_CN; + } + cnt--; + } + + icmd->ulpIoTag = tag; + icmd->ulpBdeCount = i; + icmd->ulpLe = 1; + + icmd->ulpOwner = OWN_CHIP; + + temp->bp = (uchar * )mp; /* used for delimiter between commands */ + + + FCSTATCTR.cmdQbuf++; + issue_iocb_cmd(binfo, rp, temp); + } + + rp->fc_missbufcnt = 0; + return(0); +} /* End fc_post_buffer */ + + +/**************************************************/ +/** fc_post_mbuf **/ +/** **/ +/** This routine will post count buffers to the **/ +/** ring with the QUE_RING_BUF_CN command. This **/ +/** allows 3 buffers / command to be posted. **/ +/** Returns the number of buffers NOT posted. **/ +/**************************************************/ +_static_ int +fc_post_mbuf( +fc_dev_ctl_t *p_dev_ctl, +RING *rp, +int cnt) +{ + FC_BRD_INFO * binfo; + IOCB * icmd; + IOCBQ * temp; + int i, j; + ushort tag; + ushort maxqbuf; + fcipbuf_t * mp; + + binfo = &BINFO; + mp = 0; + if (binfo->fc_flag & FC_SLI2) + maxqbuf = 2; + else + maxqbuf = 3; + + tag = (ushort)cnt; + cnt += rp->fc_missbufcnt; + /* While there are buffers to post */ + while (cnt) { + if ((temp = (IOCBQ * )fc_mem_get(binfo, MEM_IOCB)) == 0) { + rp->fc_missbufcnt = cnt; + return(cnt); + } + fc_bzero((void *)temp, sizeof(IOCBQ)); + icmd = &temp->iocb; + + /* Max buffers can be posted per command */ + for (i = 0; i < maxqbuf; i++) { + if (cnt <= 0) + break; + + /* fill in BDEs for command */ + if ((mp = (fcipbuf_t * )m_getclust(M_DONTWAIT, MT_DATA)) == 0) { + +out: + /* Post buffer for IP ring failed */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0604, /* ptr to msg structure */ + fc_mes0604, /* ptr to msg */ + fc_msgBlk0604.msgPreambleStr, /* begin varargs */ + rp->fc_ringno, + rp->fc_missbufcnt); /* end varargs */ + icmd->ulpBdeCount = i; + for (j = 0; j < i; j++) { + if (binfo->fc_flag & FC_SLI2) { + mp = (fcipbuf_t * )fc_getvaddr(p_dev_ctl, rp, + (uchar * )getPaddr(icmd->un.cont64[j].addrHigh, + icmd->un.cont64[j].addrLow)); + } + else { + mp = (fcipbuf_t * )fc_getvaddr(p_dev_ctl, rp, + (uchar * )((ulong)icmd->un.cont[j].bdeAddress)); + } + if (mp) { + fcnextdata(mp) = 0; + fcnextpkt(mp) = 0; + m_freem(mp); + } + } + + rp->fc_missbufcnt = cnt + i; + fc_mem_put(binfo, MEM_IOCB, (uchar * )temp); + return(cnt + i); + } + { + MBUF_INFO * buf_info; + MBUF_INFO bufinfo; + + buf_info = &bufinfo; + buf_info->virt = (uint32 * )fcdata(mp); + buf_info->size = fcPAGESIZE; + buf_info->flags = (FC_MBUF_PHYSONLY | FC_MBUF_DMA); + buf_info->align = 0; + buf_info->dma_handle = 0; + + /* Map page of memory associated with m_data for read/write */ + fc_malloc(p_dev_ctl, buf_info); + if (buf_info->phys == NULL) { + /* mapping that page failed */ + goto out; + } + fcnextpkt(mp) = (fcipbuf_t * )buf_info->phys; + fcsethandle(mp, buf_info->dma_handle); + } + /* map that page and save the address pair for lookup later */ + if (binfo->fc_flag & FC_SLI2) { + fc_mapvaddr(binfo, rp, (MATCHMAP * )mp, + (uint32 *) & icmd->un.cont64[i].addrHigh, + (uint32 *) & icmd->un.cont64[i].addrLow); + icmd->un.cont64[i].tus.f.bdeSize = FC_RCV_BUF_SIZE; + icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; + } else { + fc_mapvaddr(binfo, rp, (MATCHMAP * )mp, + 0, (uint32 *) & icmd->un.cont[i].bdeAddress); + icmd->un.cont[i].bdeSize = FC_RCV_BUF_SIZE; + icmd->ulpCommand = CMD_QUE_RING_BUF_CN; + } + cnt--; + } + + icmd->ulpIoTag = tag; + icmd->ulpBdeCount = i; + icmd->ulpLe = 1; + + icmd->ulpOwner = OWN_CHIP; + + temp->bp = (uchar * )mp; /* used for delimiter between commands */ + + FCSTATCTR.cmdQbuf++; + issue_iocb_cmd(binfo, rp, temp); + } + + rp->fc_missbufcnt = 0; + return(0); +} /* End fc_post_mbuf */ + + +_static_ int +fc_free_iocb_buf( +fc_dev_ctl_t *p_dev_ctl, +RING *rp, +IOCBQ *temp) +{ + FC_BRD_INFO * binfo; + IOCB * cmd; + int i; + MATCHMAP * mp; + + binfo = &BINFO; + while (temp) { + cmd = &temp->iocb; + for (i = 0; i < (int)cmd->ulpBdeCount; i++) { + if (binfo->fc_flag & FC_SLI2) { + mp = fc_getvaddr(p_dev_ctl, rp, (uchar * ) + getPaddr(cmd->un.cont64[i].addrHigh, cmd->un.cont64[i].addrLow)); + } + else { + mp = fc_getvaddr(p_dev_ctl, rp, (uchar * )((ulong)cmd->un.cont[i].bdeAddress)); + } + + if (mp) { + if (rp->fc_ringno == FC_ELS_RING) { + fc_mem_put(binfo, MEM_BUF, (uchar * )mp); + } + else if (rp->fc_ringno == FC_IP_RING) { + fcipbuf_t * mbuf; + + mbuf = (fcipbuf_t * )mp; + fcnextdata(mbuf) = 0; + fcnextpkt(mbuf) = 0; + m_freem(mbuf); + } + } + } + switch (rp->fc_ringno) { + case FC_ELS_RING: + fc_post_buffer(p_dev_ctl, rp, i); + break; + case FC_IP_RING: + fc_post_mbuf(p_dev_ctl, rp, i); + break; + } + temp = (IOCBQ * )temp->q; + } + return(0); +} /* End fc_free_iocb_buf */ + + +/* + * Returns 0 if pn1 < pn2 + * Returns 1 if pn1 > pn2 + * Returns 2 if pn1 = pn2 + */ +_static_ int +fc_geportname( +NAME_TYPE *pn1, +NAME_TYPE *pn2) +{ + int i; + uchar * cp1, *cp2; + + i = sizeof(NAME_TYPE); + cp1 = (uchar * )pn1; + cp2 = (uchar * )pn2; + while (i--) { + if (*cp1 < *cp2) { + return(0); + } + if (*cp1 > *cp2) { + return(1); + } + cp1++; + cp2++; + } + + return(2); /* equal */ +} /* End fc_geportname */ + + +_local_ int +fc_ring_txcnt( +FC_BRD_INFO *binfo, +int flag) +{ + int sum = 0; + + if ((binfo->fc_flag & FC_SLI2) && (FCSTATCTR.linkEvent == 0)) + return(0); + + switch (flag) { + case FC_IP_RING: + sum += binfo->fc_ring[FC_IP_RING].fc_tx.q_cnt; + sum += binfo->fc_ring[FC_ELS_RING].fc_tx.q_cnt; + break; + case FC_FCP_RING: + sum += binfo->fc_ring[FC_FCP_RING].fc_tx.q_cnt; + sum += binfo->fc_ring[FC_ELS_RING].fc_tx.q_cnt; + break; + default: + sum = 1; + break; + } + return(sum); +} /* End fc_ring_txcnt */ + + +_local_ int +fc_ring_txpcnt( +FC_BRD_INFO *binfo, +int flag) +{ + int sum = 0; + + switch (flag) { + case FC_IP_RING: + sum += binfo->fc_ring[FC_IP_RING].fc_txp.q_cnt; + sum += binfo->fc_ring[FC_ELS_RING].fc_txp.q_cnt; + break; + case FC_FCP_RING: + sum += binfo->fc_ring[FC_FCP_RING].fc_txp.q_cnt; + sum += binfo->fc_ring[FC_ELS_RING].fc_txp.q_cnt; + break; + default: + sum = 1; + break; + } + return(sum); +} /* End fc_ring_txpcnt */ + + +/*****************************************************************************/ +/* + * NAME: fc_cmdring_timeout + * + * FUNCTION: Fibre Channel driver cmd ring watchdog timer timeout routine. + * + * EXECUTION ENVIRONMENT: interrupt only + * + * CALLED FROM: + * Timer function + * + * RETURNS: + * none + */ +/*****************************************************************************/ +_static_ void +fc_cmdring_timeout( +fc_dev_ctl_t * p_dev_ctl, +void *l1, +void *l2) +{ + FC_BRD_INFO * binfo; + RING * rp; + int i; + uint32 command; + uint32 * lp0; + IOCBQ * xmitiq; + IOCBQ * save; + IOCB * icmd; + MAILBOXQ * mb; + MATCHMAP * mp; + NODELIST * ndlp; + ELS_PKT * ep; + fcipbuf_t * p_mbuf; + fcipbuf_t * m_net; + + if (!p_dev_ctl) { + return; + } + rp = (RING *)l1; + RINGTMO = 0; + + binfo = &BINFO; + if ((xmitiq = fc_ringtxp_get(rp, 0)) != NULL) { + icmd = &xmitiq->iocb; + switch (icmd->ulpCommand) { + case CMD_ELS_REQUEST_CR: + case CMD_ELS_REQUEST64_CR: + mp = (MATCHMAP * )xmitiq->bp; + lp0 = (uint32 * )mp->virt; + command = *lp0; + switch (command) { + case ELS_CMD_FLOGI: /* Fabric login */ + fc_freenode_did(binfo, Fabric_DID, 1); + if (binfo->fc_ffstate == FC_FLOGI) { + binfo->fc_flag &= ~FC_FABRIC; + if (binfo->fc_topology == TOPOLOGY_LOOP) { + binfo->fc_edtov = FF_DEF_EDTOV; + binfo->fc_ratov = FF_DEF_RATOV; + if ((mb=(MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX | MEM_PRI))) { + fc_config_link(p_dev_ctl, (MAILBOX * )mb); + if (issue_mb_cmd(binfo, (MAILBOX * )mb, MBX_NOWAIT) + != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } + binfo->fc_flag |= FC_DELAY_DISC; + } else { + /* Device Discovery completion error */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0220, /* ptr to msg structure */ + fc_mes0220, /* ptr to msg */ + fc_msgBlk0220.msgPreambleStr); /* begin & end varargs */ + binfo->fc_ffstate = FC_ERROR; + if ((mb=(MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX | MEM_PRI))) { + fc_clear_la(binfo, (MAILBOX * )mb); + if (issue_mb_cmd(binfo, (MAILBOX * )mb, MBX_NOWAIT) + != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } + } + } + break; + + case ELS_CMD_PLOGI: /* NPort login */ + if ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, + icmd->un.elsreq.remoteID)) == 0) + break; + + /* If we are in the middle of Discovery */ + if (ndlp->nlp_action & NLP_DO_DISC_START) { + /* Goto next entry */ + fc_nextnode(p_dev_ctl, ndlp); + } + ndlp->nlp_flag &= ~NLP_REQ_SND; + + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + break; + + case ELS_CMD_PRLI: /* Process Log In */ + if ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, + icmd->un.elsreq.remoteID)) == 0) + break; + + /* If we are in the middle of Discovery */ + if (ndlp->nlp_action & NLP_DO_DISC_START) { + /* Goto next entry */ + fc_nextnode(p_dev_ctl, ndlp); + } + ndlp->nlp_flag &= ~NLP_REQ_SND; + ndlp->nlp_state = NLP_LOGIN; + fc_nlp_unmap(binfo, ndlp); + break; + + case ELS_CMD_PDISC: /* Pdisc */ + case ELS_CMD_ADISC: /* Adisc */ + if ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, + icmd->un.elsreq.remoteID)) == 0) + break; + + /* If we are in the middle of Address Authentication */ + if (ndlp->nlp_action & (NLP_DO_ADDR_AUTH)) { + ndlp->nlp_action &= ~NLP_DO_ADDR_AUTH; + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + + ndlp->nlp_action |= NLP_DO_DISC_START; + /* Goto next entry */ + fc_nextnode(p_dev_ctl, ndlp); + } else { + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + } + break; + + case ELS_CMD_LOGO: /* Logout */ + if ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, + icmd->un.elsreq.remoteID)) == 0) + break; + + /* If we are in the middle of Discovery */ + if (ndlp->nlp_action & NLP_DO_DISC_START) { + /* Goto next entry */ + fc_nextnode(p_dev_ctl, ndlp); + } + + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + break; + + case ELS_CMD_FARP: /* Farp-req */ + case ELS_CMD_FARPR: /* Farp-res */ + ep = (ELS_PKT * )lp0; + if((ndlp = fc_findnode_wwpn(binfo, NLP_SEARCH_ALL, + &ep->un.farp.RportName)) == 0) + break; + ndlp->nlp_flag &= ~NLP_FARP_SND; + ndlp->nlp_action &= ~NLP_DO_ADDR_AUTH; + + /* Check for a FARP generated nlplist entry */ + if (ndlp->nlp_DID == Bcast_DID) { + fc_freenode(binfo, ndlp, 1); + } + break; + + case ELS_CMD_SCR: /* State Change Registration */ + break; + + default: + FCSTATCTR.elsCmdPktInval++; + break; + } + if (xmitiq->bp) { + fc_mem_put(binfo, MEM_BUF, (uchar * )xmitiq->bp); + } + if (xmitiq->info) { + fc_mem_put(binfo, MEM_BUF, (uchar * )xmitiq->info); + } + if ((binfo->fc_flag & FC_SLI2) && (xmitiq->bpl)) { + fc_mem_put(binfo, MEM_BPL, (uchar * )xmitiq->bpl); + } + break; + + case CMD_XMIT_ELS_RSP_CX: + case CMD_XMIT_ELS_RSP64_CX: + ndlp = (NODELIST * )xmitiq->ndlp; + /* No retries */ + if ((ndlp) && (ndlp->nlp_flag & NLP_RM_ENTRY) && + !(ndlp->nlp_flag & NLP_REQ_SND)) { + if (ndlp->nlp_type & NLP_FCP_TARGET) { + ndlp->nlp_flag &= ~NLP_RM_ENTRY; + ndlp->nlp_action &= ~NLP_DO_ADDR_AUTH; + if(binfo->fc_ffstate == FC_READY) { + if ((ndlp->nlp_flag & NLP_NODEV_TMO) && + (ndlp->nlp_DID != (uint32)0)) { + ndlp->nlp_flag |= NLP_NODEV_TMO; + if(!(ndlp->nlp_flag & NLP_NS_REMOVED)) { + fc_els_cmd(binfo, ELS_CMD_PLOGI, + (void *)((ulong)ndlp->nlp_DID), (uint32)0, (ushort)0, ndlp); + } + } + if(!(binfo->fc_flag & FC_RSCN_MODE)) { + binfo->fc_flag |= FC_RSCN_MODE; + ndlp->nlp_action |= NLP_DO_RSCN; + ndlp->nlp_flag &= ~NLP_NODEV_TMO; + fc_nextrscn(p_dev_ctl, 1); + } + } + else { + ndlp->nlp_action |= NLP_DO_DISC_START; + fc_nextdisc(p_dev_ctl, 1); + } + } else + fc_freenode_did(binfo, ndlp->nlp_DID, 0); + } + if (xmitiq->bp) { + fc_mem_put(binfo, MEM_BUF, (uchar * )xmitiq->bp); + } + if ((binfo->fc_flag & FC_SLI2) && (xmitiq->bpl)) { + fc_mem_put(binfo, MEM_BPL, (uchar * )xmitiq->bpl); + } + break; + + case CMD_ABORT_XRI_CN: + break; + + case CMD_CREATE_XRI_CR: + break; + + case CMD_XMIT_SEQUENCE_CX: + case CMD_XMIT_BCAST_CN: + case CMD_XMIT_SEQUENCE64_CX: + case CMD_XMIT_BCAST64_CN: + if (rp->fc_ringno != FC_IP_RING) { + break; + } + NDDSTAT.ndd_xmitque_cur--; + /* get mbuf ptr for completed xmit */ + m_net = (fcipbuf_t * )xmitiq->bp; + /* Loop through iocb chain unmap memory pages associated with mbuf */ + if (binfo->fc_flag & FC_SLI2) { + ULP_BDE64 * bpl; + MATCHMAP * bmp; + + bmp = (MATCHMAP * )xmitiq->bpl; + bpl = (ULP_BDE64 * )bmp->virt; + while (bpl && xmitiq->iocb.un.xseq64.bdl.bdeSize) { + fc_bufunmap(p_dev_ctl, (uchar *)getPaddr(bpl->addrHigh, bpl->addrLow), 0, bpl->tus.f.bdeSize); + bpl++; + xmitiq->iocb.un.xseq64.bdl.bdeSize -= sizeof(ULP_BDE64); + } + fc_mem_put(binfo, MEM_BPL, (uchar * )bmp); + fc_mem_put(binfo, MEM_IOCB, (uchar * )xmitiq); + xmitiq = 0; + } else { + while (xmitiq) { + for (i = 0; i < (int)xmitiq->iocb.ulpBdeCount; i++) { + fc_bufunmap(p_dev_ctl, (uchar *)((ulong)xmitiq->iocb.un.cont[i].bdeAddress), 0, (uint32)xmitiq->iocb.un.cont[i].bdeSize); + } + save = (IOCBQ * )xmitiq->q; + fc_mem_put(binfo, MEM_IOCB, (uchar * )xmitiq); + xmitiq = save; + } + } + + /* free mbuf */ + if (m_net) { + p_mbuf = fcnextdata(m_net); + fcnextdata(m_net) = 0; + fcfreehandle(p_dev_ctl, m_net); + m_freem(m_net); + if (p_mbuf) { + fcfreehandle(p_dev_ctl, p_mbuf); + m_freem(p_mbuf); + } + } + break; + + default: + break; + } + if (xmitiq) + fc_mem_put(binfo, MEM_IOCB, (uchar * )xmitiq); + /* Command ring timeout */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk1401, /* ptr to msg structure */ + fc_mes1401, /* ptr to msg */ + fc_msgBlk1401.msgPreambleStr, /* begin varargs */ + rp->fc_ringno, + icmd->ulpCommand ); /* end varargs */ + } else { + /* Command ring timeout */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk1402, /* ptr to msg structure */ + fc_mes1402, /* ptr to msg */ + fc_msgBlk1402.msgPreambleStr, /* begin varargs */ + rp->fc_ringno ); /* end varargs */ + } + + if ((rp->fc_ringno == FC_IP_RING) && + (binfo->fc_flag & FC_LNK_DOWN)) { + IOCBQ * xmitiq; + + /* If linkdown, flush out tx and txp queues */ + /* get next command from ring xmit queue */ + while ((xmitiq = fc_ringtx_drain(rp)) != 0) { + fc_freebufq(p_dev_ctl, rp, xmitiq); + } + + /* look up xmit next compl */ + while ((xmitiq = fc_ringtxp_get(rp, 0)) != 0) { + fc_freebufq(p_dev_ctl, rp, xmitiq); + } + NDDSTAT.ndd_xmitque_cur = 0; + } + + return; +} /* End fc_cmdring_timeout */ + + +/*****************************************************************************/ +/* + * NAME: fc_linkdown_timeout + * + * FUNCTION: Fibre Channel driver link down watchdog timer timeout routine. + * + * EXECUTION ENVIRONMENT: interrupt only + * + * CALLED FROM: + * Timer function + * + * RETURNS: + * none + */ +/*****************************************************************************/ +_static_ void +fc_linkdown_timeout( +fc_dev_ctl_t * p_dev_ctl, +void *l1, +void *l2) +{ + FC_BRD_INFO * binfo; + iCfgParam * clp; + RING * rp; + NODELIST * ndlp; + NODELIST * new_ndlp; + + if (!p_dev_ctl) { + return; + } + + binfo = &BINFO; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + rp = &binfo->fc_ring[FC_FCP_RING]; + RINGTMO = 0; + + /* EXPIRED linkdown timer */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0750, /* ptr to msg structure */ + fc_mes0750, /* ptr to msg */ + fc_msgBlk0750.msgPreambleStr, /* begin varargs */ + (ulong)binfo->fc_ffstate ); /* end varargs */ + if (binfo->fc_ffstate == FC_READY) { + return; + } + + if ((binfo->fc_ffstate > FC_LINK_DOWN) && + (binfo->fc_ffstate < FC_READY)) { + + /* Set the link down watchdog timer expired flag */ + binfo->fc_flag |= FC_LD_TIMEOUT; + goto out; + } + + /* Since the link has been down for so long, call fc_freenode for all + * SCSI device and clear out all SCSI queues + */ + ndlp = binfo->fc_nlpmap_start; + while(ndlp != (NODELIST *)&binfo->fc_nlpmap_start) { + new_ndlp = (NODELIST *)ndlp->nlp_listp_next; + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + ndlp = new_ndlp; + } + + /* Set the link down watchdog timer expired flag */ + binfo->fc_flag |= FC_LD_TIMEOUT; + + if((clp[CFG_LINKDOWN_TMO].a_current) && (clp[CFG_HOLDIO].a_current == 0)) { + fc_failio(p_dev_ctl); + } + +out: + + return; +} /* End fc_linkdown_timeout */ + + +/*****************************************************************************/ +/* + * NAME: fc_mbox_timeout + * + * FUNCTION: Fibre Channel driver mailbox watchdog timer timeout routine. + * + * EXECUTION ENVIRONMENT: interrupt only + * + * CALLED FROM: + * Timer function + * + * RETURNS: + * none + */ +/*****************************************************************************/ +_static_ void +fc_mbox_timeout( +fc_dev_ctl_t * p_dev_ctl, +void *l1, +void *l2) +{ + FC_BRD_INFO * binfo; + MAILBOXQ * mbox; + MAILBOX * swpmb, *mb; + void *ioa; + volatile uint32 word0; + + if (!p_dev_ctl) { + return; + } + + binfo = &BINFO; + MBOXTMO = 0; + + binfo->fc_mbox_active = 0; + + if (binfo->fc_flag & FC_SLI2) { + fc_mpdata_sync(binfo->fc_slim2.dma_handle, 0, 0, + DDI_DMA_SYNC_FORKERNEL); + /* First copy command data */ + mb = FC_SLI2_MAILBOX(binfo); + word0 = *((volatile uint32 * )mb); + word0 = PCIMEM_LONG(word0); + } else { + /* First copy command data */ + ioa = (void *)FC_MAP_MEM(&binfo->fc_iomap_mem); /* map in SLIM */ + mb = FC_MAILBOX(binfo, ioa); + word0 = READ_SLIM_ADDR(binfo, ((volatile uint32 * )mb)); + FC_UNMAP_MEMIO(ioa); + } + swpmb = (MAILBOX * ) & word0; + + /* Mailbox command timeout, status fc_brd_no, + &fc_msgBlk0310, /* ptr to msg structure */ + fc_mes0310, /* ptr to msg */ + fc_msgBlk0310.msgPreambleStr, /* begin varargs */ + swpmb->mbxCommand, + swpmb->mbxStatus); /* end varargs */ + if ((mbox = fc_mbox_get(binfo))) { + if (issue_mb_cmd(binfo, (MAILBOX * )mbox, MBX_NOWAIT) != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mbox); + } + } + + return; +} /* End fc_mbox_timeout */ + + + +/*****************************************************************************/ +/* + * NAME: fc_fabric_timeout + * + * FUNCTION: Fibre Channel driver fabric watchdog timer timeout routine. + * + * EXECUTION ENVIRONMENT: interrupt only + * + * CALLED FROM: + * Timer function + * + * RETURNS: + * none + */ +/*****************************************************************************/ +_static_ void +fc_fabric_timeout( +fc_dev_ctl_t * p_dev_ctl, +void *l1, +void *l2) +{ + FC_BRD_INFO * binfo; + NODELIST * ndlp; + NODELIST * new_ndlp; + MAILBOXQ * mb; + iCfgParam * clp; + + if (!p_dev_ctl) { + return; + } + + binfo = &BINFO; + FABRICTMO = 0; + + /* Check for wait for FAN timeout */ + if (binfo->fc_ffstate == FC_FLOGI) { + if((binfo->fc_topology == TOPOLOGY_LOOP) && + (binfo->fc_flag & FC_PUBLIC_LOOP)) { + /* FAN timeout */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0221, /* ptr to msg structure */ + fc_mes0221, /* ptr to msg */ + fc_msgBlk0221.msgPreambleStr); /* begin & end varargs */ + } + else { + /* Initial FLOGI timeout */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0222, /* ptr to msg structure */ + fc_mes0222, /* ptr to msg */ + fc_msgBlk0222.msgPreambleStr); /* begin & end varargs */ + } + + fc_freenode_did(binfo, Fabric_DID, 1); + /* FAN timeout, so just do FLOGI instead */ + /* Now build FLOGI payload and issue ELS command */ + fc_els_cmd(binfo, ELS_CMD_FLOGI, (void *)Fabric_DID, + (uint32)0, (ushort)0, (NODELIST *)0); + goto out; + } + + /* Check for wait for NameServer Rsp timeout */ + if (binfo->fc_ffstate == FC_NS_REG) { + /* NameServer Registration timeout */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0223, /* ptr to msg structure */ + fc_mes0223, /* ptr to msg */ + fc_msgBlk0223.msgPreambleStr, /* begin varargs */ + binfo->fc_ns_retry, + fc_max_ns_retry); /* end varargs */ + if ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, NameServer_DID))) { + if(binfo->fc_ns_retry) { + if(binfo->fc_ns_retry < fc_max_ns_retry) { + /* Try it one more time */ + if (fc_ns_cmd(p_dev_ctl, ndlp, SLI_CTNS_RFT_ID) == 0) { + goto out; + } + } + binfo->fc_ns_retry = 0; + } + /* Complete discovery, then issue an INIT_LINK */ + goto ns_tmout; + } + goto out; + } + + /* Check for wait for NameServer Rsp timeout */ + if (binfo->fc_ffstate == FC_NS_QRY) { + /* NameServer Query timeout */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0224, /* ptr to msg structure */ + fc_mes0224, /* ptr to msg */ + fc_msgBlk0224.msgPreambleStr, /* begin varargs */ + binfo->fc_ns_retry, + fc_max_ns_retry); /* end varargs */ + if ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, NameServer_DID))) { + if(binfo->fc_ns_retry) { + if(binfo->fc_ns_retry < fc_max_ns_retry) { + /* Try it one more time */ + if (fc_ns_cmd(p_dev_ctl, ndlp, SLI_CTNS_GID_FT) == 0) { + goto out; + } + } + binfo->fc_ns_retry = 0; + } + +ns_tmout: + /* Complete discovery, then issue an INIT_LINK */ + /* This should turn off DELAYED ABTS for ELS timeouts */ + if ((mb = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX))) { + fc_set_slim(binfo, (MAILBOX * )mb, 0x052198, 0); + if (issue_mb_cmd(binfo, (MAILBOX * )mb, MBX_NOWAIT) != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } + + /* Nothing to authenticate, so CLEAR_LA right now */ + if ((mb = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX | MEM_PRI))) { + binfo->fc_ffstate = FC_CLEAR_LA; + fc_clear_la(binfo, (MAILBOX * )mb); + if (issue_mb_cmd(binfo, (MAILBOX * )mb, MBX_NOWAIT) != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + /* Device Discovery completes */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0225, /* ptr to msg structure */ + fc_mes0225, /* ptr to msg */ + fc_msgBlk0225.msgPreambleStr); /* begin & end varargs */ + } else { + /* Device Discovery completion error */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0226, /* ptr to msg structure */ + fc_mes0226, /* ptr to msg */ + fc_msgBlk0226.msgPreambleStr); /* begin & end varargs */ + binfo->fc_ffstate = FC_ERROR; + } + + binfo->fc_firstopen++; + if(binfo->fc_firstopen >= fc_max_ns_retry) { + goto out; + } + + /* Get a buffer to use for the mailbox command */ + if ((mb = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX | MEM_PRI))) { + clp = DD_CTL.p_config[binfo->fc_brd_no]; + + /* Setup and issue mailbox INITIALIZE LINK command */ + fc_linkdown(p_dev_ctl); + fc_init_link(binfo, (MAILBOX * )mb, clp[CFG_TOPOLOGY].a_current, + clp[CFG_LINK_SPEED].a_current); + ((MAILBOX *)mb)->un.varInitLnk.lipsr_AL_PA = 0; + if (issue_mb_cmd(binfo, (MAILBOX * )mb, MBX_NOWAIT) != MBX_BUSY) + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } + goto out; + } + + /* Check for Node Authentication timeout */ + if (binfo->fc_ffstate == FC_LOOP_DISC) { + int disc; + + disc = 0; + /* Node Authentication timeout */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0227, /* ptr to msg structure */ + fc_mes0227, /* ptr to msg */ + fc_msgBlk0227.msgPreambleStr); /* begin & end varargs */ + ndlp = binfo->fc_nlpbind_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpbind_start) + ndlp = binfo->fc_nlpunmap_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + while(ndlp != (NODELIST *)&binfo->fc_nlpmap_start) { + new_ndlp = (NODELIST *)ndlp->nlp_listp_next; + + /* Clean up all nodes marked for authentication */ + if (ndlp->nlp_action & NLP_DO_ADDR_AUTH) { + ndlp->nlp_action &= ~NLP_DO_ADDR_AUTH; + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + if (ndlp->nlp_DID != NameServer_DID) { + ndlp->nlp_action |= NLP_DO_DISC_START; + disc++; + } + } + else if (ndlp->nlp_action & NLP_DO_DISC_START) { + if (ndlp->nlp_DID != NameServer_DID) { + disc++; + } + } + ndlp = new_ndlp; + if(ndlp == (NODELIST *)&binfo->fc_nlpbind_start) + ndlp = binfo->fc_nlpunmap_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + } + if(disc) { + fc_nextdisc(p_dev_ctl, fc_max_els_sent); + } + else { + goto ns_tmout; + } + goto out; + } + + /* Check for Node Discovery timeout */ + if (binfo->fc_ffstate == FC_NODE_DISC) { + /* Node Discovery timeout */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0228, /* ptr to msg structure */ + fc_mes0228, /* ptr to msg */ + fc_msgBlk0228.msgPreambleStr); /* begin & end varargs */ + ndlp = binfo->fc_nlpbind_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpbind_start) + ndlp = binfo->fc_nlpunmap_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + while(ndlp != (NODELIST *)&binfo->fc_nlpmap_start) { + new_ndlp = (NODELIST *)ndlp->nlp_listp_next; + + /* Clean up all nodes marked for discovery/authentication */ + if (ndlp->nlp_action & (NLP_DO_ADDR_AUTH | NLP_DO_DISC_START)) { + /* Node Discovery timeout */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0229, /* ptr to msg structure */ + fc_mes0229, /* ptr to msg */ + fc_msgBlk0229.msgPreambleStr, /* begin varargs */ + ndlp->nlp_DID, + ndlp->nlp_flag, + ndlp->nlp_state, + ndlp->nlp_type); /* end varargs */ + ndlp->nlp_flag &= ~(NLP_REQ_SND | NLP_REG_INP | NLP_REQ_SND_ADISC); + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + } + ndlp = new_ndlp; + if(ndlp == (NODELIST *)&binfo->fc_nlpbind_start) + ndlp = binfo->fc_nlpunmap_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + } + /* Nothing to discover, so CLEAR_LA right now */ + if ((mb = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX | MEM_PRI))) { + binfo->fc_ffstate = FC_CLEAR_LA; + fc_clear_la(binfo, (MAILBOX * )mb); + if (issue_mb_cmd(binfo, (MAILBOX * )mb,MBX_NOWAIT) != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } else { + /* Device Discovery completion error */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0230, /* ptr to msg structure */ + fc_mes0230, /* ptr to msg */ + fc_msgBlk0230.msgPreambleStr); /* begin & end varargs */ + binfo->fc_ffstate = FC_ERROR; + } + goto out; + } + + /* Check for RSCN timeout */ + if ((binfo->fc_flag & FC_RSCN_MODE) && (binfo->fc_ffstate == FC_READY)) { + + if(binfo->fc_ns_retry) { + if ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, NameServer_DID))) { + if(binfo->fc_ns_retry < fc_max_ns_retry) { + /* Try it one more time */ + if (fc_ns_cmd(p_dev_ctl, ndlp, SLI_CTNS_GID_FT) == 0) { + goto out; + } + } + } + } + /* RSCN timeout */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0231, /* ptr to msg structure */ + fc_mes0231, /* ptr to msg */ + fc_msgBlk0231.msgPreambleStr, /* begin varargs */ + binfo->fc_ns_retry, + fc_max_ns_retry ); /* end varargs */ + ndlp = binfo->fc_nlpbind_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpbind_start) + ndlp = binfo->fc_nlpunmap_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + while(ndlp != (NODELIST *)&binfo->fc_nlpmap_start) { + new_ndlp = (NODELIST *)ndlp->nlp_listp_next; + + /* Clean up all nodes marked for rscn */ + if (ndlp->nlp_action & NLP_DO_RSCN) { + /* Node RSCN timeout */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0232, /* ptr to msg structure */ + fc_mes0232, /* ptr to msg */ + fc_msgBlk0232.msgPreambleStr, /* begin varargs */ + ndlp->nlp_DID, + ndlp->nlp_flag, + ndlp->nlp_state, + ndlp->nlp_type); /* end varargs */ + ndlp->nlp_flag &= ~(NLP_REQ_SND | NLP_REG_INP | NLP_REQ_SND_ADISC); + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + } + ndlp = new_ndlp; + if(ndlp == (NODELIST *)&binfo->fc_nlpbind_start) + ndlp = binfo->fc_nlpunmap_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + } + + binfo->fc_flag &= ~FC_NLP_MORE; + binfo->fc_nlp_cnt = 0; + binfo->fc_ns_retry = 0; + /* fc_nextrscn(p_dev_ctl, fc_max_els_sent); */ + fc_rlip(p_dev_ctl); + goto out; + } + + /* Check for pt2pt link up timeout */ + if ((binfo->fc_flag & FC_PT2PT) && (binfo->fc_ffstate != FC_READY)) { + /* PT2PT link up timeout */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0233, /* ptr to msg structure */ + fc_mes0233, /* ptr to msg */ + fc_msgBlk0233.msgPreambleStr); /* begin & end varargs */ + binfo->fc_ffstate = FC_LINK_UP; + binfo->fc_flag &= ~(FC_LNK_DOWN | FC_PT2PT | FC_PT2PT_PLOGI | + FC_LBIT | FC_RSCN_MODE | FC_NLP_MORE | + FC_RSCN_DISC_TMR | FC_RSCN_DISCOVERY); + + binfo->fc_myDID = 0; + if ((mb = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX | MEM_PRI))) { + binfo->fc_ffstate = FC_CFG_LINK; + fc_config_link(p_dev_ctl, (MAILBOX * )mb); + if (issue_mb_cmd(binfo, (MAILBOX * )mb, MBX_NOWAIT) != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } + goto out; + } + +out: + return; +} /* End fc_fabric_timeout */ + + +/*****************************************************************************/ +/* + * NAME: fc_delay_timeout + * + * FUNCTION: Fibre Channel driver delay watchdog timer timeout routine. + * + * EXECUTION ENVIRONMENT: interrupt only + * + * CALLED FROM: + * Timer function + * + * RETURNS: + * none + */ +/*****************************************************************************/ +_static_ void +fc_delay_timeout( +fc_dev_ctl_t * p_dev_ctl, +void *l1, +void *l2) +{ + FC_BRD_INFO * binfo; + IOCBQ * iocbq; + RING * rp; + MATCHMAP * rmp; + NODELIST * ndlp; + + binfo = &BINFO; + rp = &binfo->fc_ring[FC_ELS_RING]; + iocbq = (IOCBQ *)l1; + + if(((rmp = (MATCHMAP *)iocbq->info) != 0) && + ((ndlp = (NODELIST *)rmp->fc_mptr) != 0)) { + /* Don't send PLOGI if we are already logged in! */ + if(ndlp->nlp_state >= NLP_LOGIN) { + if(iocbq->bp) { + fc_mem_put(binfo, MEM_BUF, (uchar * )iocbq->bp); + } + if (iocbq->info) { + fc_mem_put(binfo, MEM_BUF, (uchar * )iocbq->info); + } + if (iocbq->bpl) { + fc_mem_put(binfo, MEM_BPL, (uchar * )iocbq->bpl); + } + + fc_mem_put(binfo, MEM_IOCB, (uchar * )iocbq); + return; + } + } + /* Delayxmit ELS command timeout */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0136, /* ptr to msg structure */ + fc_mes0136, /* ptr to msg */ + fc_msgBlk0136.msgPreambleStr, /* begin varargs */ + iocbq->iocb.ulpCommand, + iocbq->iocb.ulpIoTag, + iocbq->retry, + iocbq->iocb.un.elsreq.remoteID); /* end varargs */ + issue_iocb_cmd(binfo, rp, iocbq); + + if (((binfo->fc_flag & FC_RSCN_MODE) && (binfo->fc_ffstate == FC_READY)) || + (binfo->fc_ffstate == FC_LOOP_DISC) || + (binfo->fc_ffstate == FC_NODE_DISC)) { + binfo->fc_fabrictmo = (2 * binfo->fc_ratov) + + ((4 * binfo->fc_edtov) / 1000) + 1; + if(FABRICTMO) { + fc_clk_res(p_dev_ctl, binfo->fc_fabrictmo, FABRICTMO); + } + else { + FABRICTMO = fc_clk_set(p_dev_ctl, binfo->fc_fabrictmo, + fc_fabric_timeout, 0, 0); + } + } + + return; +} + +/*****************************************************************************/ +/* + * NAME: fc_nodev_timeout + * + * FUNCTION: Fibre Channel driver FCP device disappearing timeout routine. + * + * EXECUTION ENVIRONMENT: interrupt only + * + * CALLED FROM: + * Timer interrupt + * + * INPUT: + * tp - pointer to the timer structure + * + * RETURNS: + * none + */ +/*****************************************************************************/ +_static_ void +fc_nodev_timeout( +fc_dev_ctl_t * p_dev_ctl, +void * np, +void *l2) +{ + node_t * nodep; + dvi_t * dev_ptr; + FC_BRD_INFO * binfo; + iCfgParam * clp; + NODELIST * ndlp; + RING * rp; + IOCBQ * temp; + IOCBQ * nexttemp, *prevtemp; + IOCB * cmd; + unsigned long iflag; + + nodep = (node_t *)np; + binfo = &BINFO; + rp = &binfo->fc_ring[FC_FCP_RING]; + if(nodep) { + uint32 did; + uint32 pan; + uint32 sid; + uint32 rpi; + + clp = DD_CTL.p_config[p_dev_ctl->info.fc_brd_no]; + if(nodep->rpi != 0xfffe) + rpi = nodep->rpi; + else + rpi = 0; + + if((ndlp = nodep->nlp) == 0) { + /* + * Find the target from the nlplist based on SCSI ID + */ + ndlp = fc_findnode_scsid(binfo, NLP_SEARCH_MAPPED, nodep->scsi_id); + } + + if (ndlp) { + RING * rp; + IOCBQ * iocbq; + + /* EXPIRED nodev timer */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0751, /* ptr to msg structure */ + fc_mes0751, /* ptr to msg */ + fc_msgBlk0751.msgPreambleStr, /* begin varargs */ + (ulong)ndlp, + ndlp->nlp_flag, + ndlp->nlp_state, + ndlp->nlp_DID); /* end varargs */ + pan = ndlp->id.nlp_pan; + sid = ndlp->id.nlp_sid; + ndlp->nlp_flag &= ~NLP_NODEV_TMO; + did = ndlp->nlp_DID; + if(ndlp->nlp_Rpi) + rpi = ndlp->nlp_Rpi; + if(did == 0) { + if((ndlp->nlp_type & (NLP_AUTOMAP | NLP_SEED_MASK)) && + (ndlp->nlp_state == NLP_LIMBO) && ndlp->nlp_oldDID) + did = ndlp->nlp_oldDID; + + if (ndlp->nlp_flag & NLP_REQ_SND) { + /* Look through ELS ring and abort ELS cmd */ + rp = &binfo->fc_ring[FC_ELS_RING]; + iocbq = (IOCBQ * )(rp->fc_txp.q_first); + while (iocbq) { + if(iocbq->iocb.un.elsreq.remoteID == did) { + iocbq->retry = 0xff; + if((binfo->fc_flag & FC_RSCN_MODE) || + (binfo->fc_ffstate < FC_READY)) { + if((ndlp->nlp_state >= NLP_PLOGI) && + (ndlp->nlp_state <= NLP_PRLI)) { + ndlp->nlp_action &= ~NLP_DO_RSCN; + binfo->fc_nlp_cnt--; + if ((ndlp->nlp_type & NLP_IP_NODE) && ndlp->nlp_bp) { + m_freem((fcipbuf_t *)ndlp->nlp_bp); + ndlp->nlp_bp = (uchar * )0; + } + } + } + } + iocbq = (IOCBQ * )iocbq->q; + } + /* In case its on fc_delay_timeout list */ + fc_abort_delay_els_cmd(p_dev_ctl, ndlp->nlp_DID); + } + } + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + } else { + did = 0; + pan = 0; + sid = 0; + } + /* Device disappeared, nodev timeout */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0752, /* ptr to msg structure */ + fc_mes0752, /* ptr to msg */ + fc_msgBlk0752.msgPreambleStr, /* begin varargs */ + did, + sid, + pan, + clp[CFG_NODEV_TMO].a_current); /* end varargs */ + nodep->flags |= FC_NODEV_TMO; + nodep->flags &= ~FC_FCP2_RECOVERY; + nodep->nodev_tmr = 0; + for (dev_ptr = nodep->lunlist; dev_ptr != NULL; + dev_ptr = dev_ptr->next) { + + /* UNREG_LOGIN from freenode should abort txp I/Os */ + if(ndlp == 0) { + /* First send ABTS on outstanding I/Os in txp queue */ + fc_abort_fcp_txpq(binfo, dev_ptr); + } + + fc_fail_pendq(dev_ptr, ENXIO, STAT_ABORTED); + fc_fail_cmd(dev_ptr, ENXIO, STAT_ABORTED); + fc_return_standby_queue(dev_ptr, ENXIO, STAT_ABORTED); + + /* Call iodone for all the CLEARQ error bufs */ + fc_free_clearq(dev_ptr); + } + /* fail everything on txq that matches rpi */ + iflag = lpfc_q_disable_lock(p_dev_ctl); + prevtemp = 0; + temp = (IOCBQ *)rp->fc_tx.q_first; + while (temp != NULL) { + nexttemp = (IOCBQ *)temp->q; + cmd = &temp->iocb; + if(cmd->ulpContext == rpi) { + if(prevtemp) + prevtemp->q = (uchar *)nexttemp; + else + rp->fc_tx.q_first = (uchar *)nexttemp; + if(rp->fc_tx.q_last == (uchar * )temp) { + rp->fc_tx.q_last =0; + break; + } + cmd->ulpStatus = IOSTAT_LOCAL_REJECT; + cmd->un.grsp.perr.statLocalError = IOERR_INVALID_RPI; + + lpfc_q_unlock_enable(p_dev_ctl, iflag); + handle_fcp_event(p_dev_ctl, rp, temp); + iflag = lpfc_q_disable_lock(p_dev_ctl); + + fc_mem_put(binfo, MEM_IOCB, (uchar * )temp); + } + else + prevtemp = temp; + + if(rp->fc_tx.q_last == (uchar * )temp) + break; + temp = nexttemp; + } + lpfc_q_unlock_enable(p_dev_ctl, iflag); + } +} + +/*****************************************************************************/ +/* + * NAME: fc_rscndisc_timeout + * + * FUNCTION: Fibre Channel driver RSCN timer timeout routine. + * + * EXECUTION ENVIRONMENT: interrupt only + * + * CALLED FROM: + * Timer function + * + * RETURNS: + * none + */ +/*****************************************************************************/ +_local_ void +fc_rscndisc_timeout( +fc_dev_ctl_t * p_dev_ctl, +void *l1, +void *l2) +{ + FC_BRD_INFO * binfo; + + binfo = &BINFO; + binfo->fc_flag |= (FC_RSCN_DISC_TMR | FC_RSCN_DISCOVERY); + /* EXPIRED RSCN disc timer */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0252, /* ptr to msg structure */ + fc_mes0252, /* ptr to msg */ + fc_msgBlk0252.msgPreambleStr, /* begin varargs */ + (ulong)binfo->fc_flag ); /* end varargs */ + fc_nextrscn(p_dev_ctl, fc_max_els_sent); +} + +_static_ int +fc_free_fcp_txq( +fc_dev_ctl_t * p_dev_ctl, +uint32 iotag) +{ + FC_BRD_INFO * binfo; + RING * rp; + IOCBQ * temp; + IOCBQ * prev_temp; + IOCB * cmd; + unsigned long iflag; + + iflag = lpfc_q_disable_lock(p_dev_ctl); + binfo = &BINFO; + rp = &binfo->fc_ring[FC_FCP_RING]; + + /* Check to see if iotag is still queued on txq */ + prev_temp = 0; + temp = (IOCBQ *)(rp->fc_tx.q_first); + while(temp) { + cmd = &temp->iocb; + if(iotag == cmd->ulpIoTag) { + /* A match so dequeue it */ + if(prev_temp) { + prev_temp->q = temp->q; + } + else { + rp->fc_tx.q_first = (uchar *)(temp->q); + } + if(rp->fc_tx.q_last == (uchar * )temp) + rp->fc_tx.q_last = (uchar * )prev_temp; + rp->fc_tx.q_cnt--; + prev_temp = temp; + temp = (IOCBQ *)(temp->q); + fc_mem_put(binfo, MEM_IOCB, (uchar * )prev_temp); + lpfc_q_unlock_enable(p_dev_ctl, iflag); + return(1); + } + prev_temp = temp; + temp = (IOCBQ *)(temp->q); + } + lpfc_q_unlock_enable(p_dev_ctl, iflag); + return(0); +} /* End fc_free_fcp_txq */ + +/*****************************************************************************/ +/* + * NAME: fc_scsi_timeout + * + * FUNCTION: Fibre Channel driver SCSI FCP timeout routine. + * + * EXECUTION ENVIRONMENT: interrupt only + * + * CALLED FROM: + * Timer interrupt + * + * INPUT: + * tp - pointer to the timer structure + * + * RETURNS: + * none + */ +/*****************************************************************************/ +_static_ void +fc_scsi_timeout( +fc_dev_ctl_t * p, +void *l1, +void *l2) +{ + fc_dev_ctl_t * p_dev_ctl; + FC_BRD_INFO * binfo; + iCfgParam * clp; + RING * rp; + fc_buf_t * fcptr, *next_fcptr; + T_SCSIBUF * sbp; + struct buf * clrptr; + dvi_t * dev_ptr; + int j, ipri, do_rlip; + uint32 now; + + curtime(&now); + + /* + * Search through all outstanding SCSI commands for any that timed out + */ + for (j = 0; j < MAX_FC_BRDS; j++) { + p_dev_ctl = DD_CTL.p_dev[j]; + if (p_dev_ctl) { + do_rlip = 0; + binfo = &BINFO; + if(binfo->fc_flag & FC_ESTABLISH_LINK) { + continue; + } + clp = DD_CTL.p_config[binfo->fc_brd_no]; + if (clp[CFG_FCP_ON].a_current) { + rp = &binfo->fc_ring[FC_FCP_RING]; + + if(((clp[CFG_LINKDOWN_TMO].a_current == 0) || + clp[CFG_HOLDIO].a_current) && (binfo->fc_ffstate != FC_READY)) { + continue; + } + + ipri = disable_lock(FC_LVL, &CMD_LOCK); + fcptr = (fc_buf_t * ) rp->fc_txp.q_first; + while (fcptr != NULL) { + next_fcptr = fcptr->fc_fwd; + + if(fcptr->dev_ptr->queue_state == ACTIVE_PASSTHRU) { + /* Don't manage PASSTRU CMD HERE */ + fc_free_fcp_txq(p_dev_ctl, fcptr->iotag); + fcptr = next_fcptr; + continue; + } /* end ACTIVE_PASSTHRU management */ + + if(ntimercmp(fcptr->timeout, now, < ) && + ntimerisset(&fcptr->timeout)) { + + { + uint32 did; + uint32 pan; + uint32 sid; + + dev_ptr = fcptr->dev_ptr; + if ((dev_ptr->nodep) && (dev_ptr->nodep->nlp)) { + did = dev_ptr->nodep->nlp->nlp_DID; + pan = dev_ptr->nodep->nlp->id.nlp_pan; + sid = dev_ptr->nodep->nlp->id.nlp_sid; + } else { + did = 0; + pan = 0; + sid = 0; + } + /* SCSI timeout */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0754, /* ptr to msg structure */ + fc_mes0754, /* ptr to msg */ + fc_msgBlk0754.msgPreambleStr, /* begin varargs */ + did, + FC_SCSID(pan, sid) ); /* end varargs */ + } + if (!(fcptr->flags & FCBUF_ABTS2)) { + /* Operation timeout, send ABTS for this exchange */ + if (fc_abort_xri(binfo, fcptr->dev_ptr, + fcptr->iotag, ABORT_TYPE_ABTS)) { + /* ABTS not sent this time, out of IOCBs */ + goto skip_rlip; + } else { + if (fcptr->flags & FCBUF_ABTS) { + /* Second ABTS sent for this command */ + fcptr->flags |= FCBUF_ABTS2; + } else { + /* First ABTS sent for this command */ + fcptr->flags |= FCBUF_ABTS; + } + } + fcptr = next_fcptr; + continue; + } + + /* Operation timeout, start loop initialization (LIP) */ + if (dev_ptr->queue_state != STOPPING) { + dev_ptr->queue_state = HALTED; + } + + do_rlip = 1; + +skip_rlip: + sbp = fcptr->sc_bufp; + fc_deq_fcbuf_active(rp, fcptr->iotag); + + sbp->bufstruct.b_error = ETIMEDOUT; + sbp->bufstruct.b_flags |= B_ERROR; + sbp->bufstruct.b_resid = sbp->bufstruct.b_bcount; + sbp->status_validity = SC_ADAPTER_ERROR; + SET_ADAPTER_STATUS(sbp, SC_CMD_TIMEOUT) + + if (fcptr->fcp_cmd.fcpCntl2) { + /* This is a task management command */ + dev_ptr->ioctl_errno = ETIMEDOUT; + + if (dev_ptr->ioctl_wakeup == 1) { + dev_ptr->ioctl_wakeup = 0; + + fc_admin_wakeup(p_dev_ctl, dev_ptr, sbp); + } + } else { + /* Don't iodone this buf until adapter cleared out */ + if(fcptr->flags & FCBUF_INTERNAL) { + if(fcptr->fcp_cmd.fcpCdb[0] != FCP_SCSI_REPORT_LUNS) { + fc_free(p_dev_ctl, (MBUF_INFO *)sbp); + } + fc_mem_put(binfo, MEM_IOCB, (uchar * )sbp); + + if((fcptr->fcp_cmd.fcpCdb[0] == FCP_SCSI_REPORT_LUNS) && + (dev_ptr->nodep) && + (dev_ptr->nodep->rptlunstate == REPORT_LUN_ONGOING)) { + dev_ptr->nodep->flags &= ~RETRY_RPTLUN; + dev_ptr->nodep->rptlunstate = REPORT_LUN_REQUIRED; + } + } + else { + if (p_dev_ctl->timeout_head == NULL) + p_dev_ctl->timeout_head = (struct buf *)sbp; + else { + clrptr = p_dev_ctl->timeout_head; + while (clrptr->av_forw) + clrptr = clrptr->av_forw; + clrptr->av_forw = (struct buf *)sbp; + } + p_dev_ctl->timeout_count++; + } + dev_ptr->active_io_count--; + dev_ptr->nodep->num_active_io--; + sbp->bufstruct.av_forw = NULL; + } + + fc_free_fcp_txq(p_dev_ctl, fcptr->iotag); + fc_enq_fcbuf(fcptr); + } + fcptr = next_fcptr; + } + unlock_enable(ipri, &CMD_LOCK); + } + + /* fix multiple init_link problem */ + if(do_rlip) { + ipri = disable_lock(FC_LVL, &CMD_LOCK); + fc_rlip(p_dev_ctl); + unlock_enable(ipri, &CMD_LOCK); + } + continue; + } + } + + SCSI_TMO = fc_clk_set(0, 5, fc_scsi_timeout, 0, 0); + return; +} /* End fc_scsi_timeout */ + +_static_ int +fc_abort_fcp_txpq( +FC_BRD_INFO *binfo, +dvi_t *dev_ptr) +{ + fc_buf_t * fcptr1, * next_fcptr; + RING * rp; + int cnt; + fc_dev_ctl_t * p_dev_ctl; + unsigned long iflag; + + p_dev_ctl = (fc_dev_ctl_t *)binfo->fc_p_dev_ctl; + iflag = lpfc_q_disable_lock(p_dev_ctl); + rp = &binfo->fc_ring[FC_FCP_RING]; + cnt = 0; + + /* send ABTS on any outstanding I/O in txp queue */ + fcptr1 = (fc_buf_t *)rp->fc_txp.q_first; + while (fcptr1 != NULL) { + next_fcptr = fcptr1->fc_fwd; + if (fcptr1->dev_ptr == dev_ptr) { + lpfc_q_unlock_enable(p_dev_ctl, iflag); + fc_abort_xri(binfo, fcptr1->dev_ptr, fcptr1->iotag, ABORT_TYPE_ABTS); + iflag = lpfc_q_disable_lock(p_dev_ctl); + cnt++; + } + fcptr1 = next_fcptr; + } + lpfc_q_unlock_enable(p_dev_ctl, iflag); + return(cnt); +} + +/* + * Issue an ABORT_XRI_CN iocb command to abort an FCP command already issued. + */ +_static_ int +fc_abort_xri( +FC_BRD_INFO *binfo, +dvi_t *dev_ptr, +ushort iotag, +int flag) +{ + IOCB * icmd; + IOCBQ * temp; + RING * rp; + + rp = &binfo->fc_ring[FC_FCP_RING]; + + if ((binfo->fc_ffstate != FC_READY) || + (dev_ptr->nodep->rpi == 0xfffe)) { + return(1); + } + + /* Get an iocb buffer */ + if ((temp = (IOCBQ * )fc_mem_get(binfo, MEM_IOCB)) == 0) { + return(1); + } + fc_bzero((void *)temp, sizeof(IOCBQ)); + icmd = &temp->iocb; + + icmd->un.acxri.abortType = flag; + icmd->un.acxri.abortContextTag = dev_ptr->nodep->rpi; + icmd->un.acxri.abortIoTag = iotag; + + /* set up an iotag using special ABTS iotags */ + icmd->ulpIoTag = (unsigned)rp->fc_bufcnt++; + if (rp->fc_bufcnt == 0) { + rp->fc_bufcnt = MAX_FCP_CMDS; + } + + icmd->ulpLe = 1; + icmd->ulpClass = (dev_ptr->nodep->nlp->id.nlp_fcp_info & 0x0f); + icmd->ulpCommand = CMD_ABORT_XRI_CN; + icmd->ulpOwner = OWN_CHIP; + + issue_iocb_cmd(binfo, rp, temp); + + return(0); +} /* End fc_abort_xri */ + + +/* + * Issue an ABORT_XRI_CX iocb command to abort an IXri. + */ +_static_ int +fc_abort_ixri_cx( +FC_BRD_INFO *binfo, +ushort xri, +uint32 cmd, +RING *rp) +{ + IOCB * icmd; + IOCBQ * temp; + NODELIST * ndlp; + + /* Get an iocb buffer */ + if ((temp = (IOCBQ * )fc_mem_get(binfo, MEM_IOCB)) == 0) { + return(1); + } + + if( (ndlp = fc_findnode_oxri(binfo, NLP_SEARCH_MAPPED | NLP_SEARCH_UNMAPPED, xri)) == 0 ) { + fc_mem_put(binfo, MEM_IOCB, (uchar * )temp); + return (1); + } + + fc_bzero((void *)temp, sizeof(IOCBQ)); + icmd = &temp->iocb; + + icmd->un.acxri.abortType = ABORT_TYPE_ABTS; + icmd->ulpContext = xri; + + /* set up an iotag */ + icmd->ulpIoTag0 = (unsigned)rp->fc_iotag++; + if ((rp->fc_iotag & 0x3fff) == 0) { + rp->fc_iotag = 1; + } + + icmd->ulpLe = 1; + icmd->ulpClass = ndlp->id.nlp_ip_info; + icmd->ulpCommand = cmd; + icmd->ulpOwner = OWN_CHIP; + + issue_iocb_cmd(binfo, rp, temp); + + return(0); +} /* End fc_abort_ixri_cx */ + + +/**************************************************/ +/** handle_mb_cmd **/ +/** **/ +/** Description: Process a Mailbox Command. **/ +/** Called from host_interrupt to process MBATT **/ +/** **/ +/** Returns: **/ +/** **/ +/**************************************************/ +_static_ int +handle_mb_cmd( +fc_dev_ctl_t *p_dev_ctl, +MAILBOX *mb, +uint32 cmd) +{ + FC_BRD_INFO * binfo; + iCfgParam * clp; + MAILBOXQ * mbox; + NODELIST * ndlp; + NODELIST * new_ndlp; + struct buf *bp, *nextbp; + RING * rp; + int i; + void *ioa; + uint32 control, ldid, lrpi, ldata; + node_t * node_ptr; + + binfo = &BINFO; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + + /* Mailbox command completed successfully, process completion */ + switch (cmd) { + case MBX_LOAD_SM: + case MBX_READ_NV: /* a READ NVPARAMS command completed */ + case MBX_WRITE_NV: /* a WRITE NVPARAMS command completed */ + case MBX_RUN_BIU_DIAG: + case MBX_INIT_LINK: /* a LINK INIT command completed */ + case MBX_SET_SLIM: + case MBX_SET_DEBUG: + case MBX_PART_SLIM: /* a PARTITION SLIM command completed */ + case MBX_CONFIG_RING: /* a CONFIGURE RING command completed */ + case MBX_RESET_RING: + case MBX_READ_CONFIG: + case MBX_READ_RCONFIG: + case MBX_READ_STATUS: + case MBX_READ_XRI: + case MBX_READ_REV: + case MBX_UNREG_D_ID: + case MBX_READ_LNK_STAT: + case MBX_DUMP_MEMORY: + case MBX_LOAD_AREA: + break; + + case MBX_CONFIG_LINK: /* a CONFIGURE LINK command completed */ + /* Change the cmdring_timeout value for IP and ELS commands */ + rp = &binfo->fc_ring[FC_ELS_RING]; + rp->fc_ringtmo = (2 * binfo->fc_ratov) + ((4 * binfo->fc_edtov) / 1000) + 1; + rp = &binfo->fc_ring[FC_IP_RING]; + rp->fc_ringtmo = (2 * binfo->fc_ratov) + ((4 * binfo->fc_edtov) / 1000) + 1; + binfo->fc_fabrictmo = (2 * binfo->fc_ratov) + ((4 * binfo->fc_edtov) / 1000) + 1; + + if (binfo->fc_ffstate == FC_CFG_LINK) { + binfo->fc_ffstate = FC_FLOGI; + if (binfo->fc_topology == TOPOLOGY_LOOP) { + /* If we are public loop and L bit was set */ + if ((binfo->fc_flag & FC_PUBLIC_LOOP) && + !(binfo->fc_flag & FC_LBIT)) { + /* Need to wait for FAN - use fabric timer for timeout. + */ + binfo->fc_fabrictmo = ((binfo->fc_edtov) / 1000) + 1; + if(FABRICTMO) { + fc_clk_res(p_dev_ctl, binfo->fc_fabrictmo, FABRICTMO); + } + else { + FABRICTMO = fc_clk_set(p_dev_ctl, binfo->fc_fabrictmo, + fc_fabric_timeout, 0, 0); + } + break; + } + + binfo->fc_fabrictmo = (2*(binfo->fc_edtov) / 1000) + 1; + if(FABRICTMO) { + fc_clk_res(p_dev_ctl, binfo->fc_fabrictmo, FABRICTMO); + } + else { + FABRICTMO = fc_clk_set(p_dev_ctl, binfo->fc_fabrictmo, + fc_fabric_timeout, 0, 0); + } + /* For power_up == 0 see fc_ffinit */ + if(p_dev_ctl->power_up) + fc_initial_flogi(p_dev_ctl); + } + else { /* pt2pt */ + /* For power_up == 0 see fc_ffinit */ + if(p_dev_ctl->power_up) + fc_initial_flogi(p_dev_ctl); + } + } else { + if (binfo->fc_flag & FC_DELAY_DISC) { + /* Config_link is done, so start discovery */ + binfo->fc_flag &= ~FC_DELAY_DISC; + fc_discovery(p_dev_ctl); + if (binfo->fc_flag & FC_FABRIC) { + /* Register with Fabric for receiving RSCNs */ + fc_els_cmd(binfo, ELS_CMD_SCR, (void *)SCR_DID, + (uint32)0, (ushort)0, (NODELIST *)0); + } + } + } + break; + + case MBX_READ_SPARM: /* a READ SPARAM command completed */ + case MBX_READ_SPARM64: /* a READ SPARAM command completed */ + { + MATCHMAP * mp; + + mp = (MATCHMAP * )binfo->fc_mbbp; + + if(mp) { + fc_mpdata_sync(mp->dma_handle, 0, sizeof(SERV_PARM), + DDI_DMA_SYNC_FORKERNEL); + fc_mpdata_outcopy(p_dev_ctl, mp, (uchar * ) & binfo->fc_sparam, + sizeof(SERV_PARM)); + + fc_mem_put(binfo, MEM_BUF, (uchar * )mp); + + binfo->fc_mbbp = 0; + + + fc_bcopy((uchar * ) & binfo->fc_sparam.nodeName, (uchar * ) & binfo->fc_nodename, + sizeof(NAME_TYPE)); + fc_bcopy((uchar * ) & binfo->fc_sparam.portName, (uchar * ) & binfo->fc_portname, + sizeof(NAME_TYPE)); + fc_bcopy(binfo->fc_portname.IEEE, p_dev_ctl->phys_addr, 6); + } + break; + } + + case MBX_READ_RPI: + case MBX_READ_RPI64: + if (binfo->fc_flag & FC_SLI2) { + /* First copy command data */ + mb = FC_SLI2_MAILBOX(binfo); + ldata = mb->un.varWords[0]; /* get rpi */ + ldata = PCIMEM_LONG(ldata); + lrpi = ldata & 0xffff; + ldata = mb->un.varWords[1]; /* get did */ + ldata = PCIMEM_LONG(ldata); + ldid = ldata & Mask_DID; + ldata = mb->un.varWords[30]; + ldata = PCIMEM_LONG(ldata); + } else { + /* First copy command data */ + ioa = (void *)FC_MAP_MEM(&binfo->fc_iomap_mem); /* map in SLIM */ + mb = FC_MAILBOX(binfo, ioa); + ldata = READ_SLIM_ADDR(binfo, (uint32 * ) & mb->un.varWords[0]); + lrpi = ldata & 0xffff; + ldata = READ_SLIM_ADDR(binfo, (uint32 * ) & mb->un.varWords[1]); + ldid = ldata & Mask_DID; + ldata = READ_SLIM_ADDR(binfo, (uint32 * ) & mb->un.varWords[30]); + FC_UNMAP_MEMIO(ioa); + } + + if (ldata == ELS_CMD_LOGO) { + if (((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, ldid)) == 0) || + (!(ndlp->nlp_action & NLP_DO_ADDR_AUTH) && + !(ndlp->nlp_flag & (NLP_FARP_SND | NLP_REQ_SND)))) { + + if (ndlp) { + if (ndlp->nlp_Rpi) + break; /* Now we have an rpi so don't logout */ + } + fc_els_cmd(binfo, ELS_CMD_LOGO, (void *)((ulong)ldid), + (uint32)0, (ushort)0, ndlp); + } + } + break; + + case MBX_REG_LOGIN: + case MBX_REG_LOGIN64: + if (binfo->fc_mbbp) { + fc_mem_put(binfo, MEM_BUF, (uchar * )binfo->fc_mbbp); + binfo->fc_mbbp = 0; + } + + if (binfo->fc_flag & FC_SLI2) { + /* First copy command data */ + mb = FC_SLI2_MAILBOX(binfo); + ldata = mb->un.varWords[0]; /* get rpi */ + ldata = PCIMEM_LONG(ldata); + lrpi = ldata & 0xffff; + ldata = mb->un.varWords[1]; /* get did */ + ldata = PCIMEM_LONG(ldata); + ldid = ldata & Mask_DID; + ldata = mb->un.varWords[30]; + ldata = PCIMEM_LONG(ldata); + } else { + /* First copy command data */ + ioa = (void *)FC_MAP_MEM(&binfo->fc_iomap_mem); /* map in SLIM */ + mb = FC_MAILBOX(binfo, ioa); + ldata = READ_SLIM_ADDR(binfo, (uint32 * ) & mb->un.varWords[0]); + lrpi = ldata & 0xffff; + ldata = READ_SLIM_ADDR(binfo, (uint32 * ) & mb->un.varWords[1]); + ldid = ldata & Mask_DID; + ldata = READ_SLIM_ADDR(binfo, (uint32 * ) & mb->un.varWords[30]); + FC_UNMAP_MEMIO(ioa); + } + + /* Register RPI, will fill in XRI later */ + if ((ndlp=fc_findnode_odid(binfo, NLP_SEARCH_ALL, ldid))) { + ndlp->nlp_Rpi = (short)lrpi; + binfo->fc_nlplookup[lrpi] = ndlp; + ndlp->nlp_state = NLP_LOGIN; + /* REG_LOGIN cmpl */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0311, /* ptr to msg structure */ + fc_mes0311, /* ptr to msg */ + fc_msgBlk0311.msgPreambleStr, /* begin varargs */ + ndlp->nlp_DID, + ndlp->nlp_state, + ndlp->nlp_flag, + ndlp->nlp_Rpi ); /* end varargs */ + fc_nlp_unmap(binfo, ndlp); + + /* If word30 is set, send back ACC */ + if (ldata) { + REG_WD30 wd30; + + wd30.word = ldata; + + /* Wait for ACC to complete before issuing PRLI */ + fc_els_rsp(binfo, ELS_CMD_ACC, (uint32)wd30.f.xri, + (uint32)wd30.f.class, (void *)0, (uint32)sizeof(SERV_PARM), ndlp); + ndlp->nlp_flag |= NLP_REG_INP; + break; + } + + if (ndlp->nlp_DID == binfo->fc_myDID) { + ndlp->nlp_state = NLP_LOGIN; + } else { + fc_process_reglogin(p_dev_ctl, ndlp); + } + } else { + if (ldata) { + /* Dropping ELS rsp */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0103, /* ptr to msg structure */ + fc_mes0103, /* ptr to msg */ + fc_msgBlk0103.msgPreambleStr, /* begin varargs */ + ldata, + ldid ); /* end varargs */ + } + + /* Can't find NODELIST entry for this login, so unregister it */ + if ((mbox = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX))) { + fc_unreg_login(binfo, lrpi, (MAILBOX * )mbox); + if (issue_mb_cmd(binfo, (MAILBOX * )mbox, MBX_NOWAIT) != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mbox); + } + } + } + + break; + + case MBX_UNREG_LOGIN: + if (binfo->fc_flag & FC_SLI2) { + /* First copy command data */ + mb = FC_SLI2_MAILBOX(binfo); + ldata = mb->un.varWords[0]; /* get rpi */ + ldata = PCIMEM_LONG(ldata); + lrpi = ldata & 0xffff; + ldata = mb->un.varWords[30]; + ldata = PCIMEM_LONG(ldata); + } else { + /* First copy command data */ + ioa = (void *)FC_MAP_MEM(&binfo->fc_iomap_mem); /* map in SLIM */ + mb = FC_MAILBOX(binfo, ioa); + ldata = READ_SLIM_ADDR(binfo, (uint32 * ) & mb->un.varWords[0]); + lrpi = ldata & 0xffff; + ldata = READ_SLIM_ADDR(binfo, (uint32 * ) & mb->un.varWords[30]); + FC_UNMAP_MEMIO(ioa); + } + + /* If word30 is set, send back LOGO */ + if (ldata) { + fc_els_cmd(binfo, ELS_CMD_LOGO, (void *)((ulong)ldata), (uint32)0, (ushort)1, (NODELIST *)0); + } + break; + + case MBX_READ_LA: + case MBX_READ_LA64: + { + READ_LA_VAR la; + MATCHMAP * mp; + + if (binfo->fc_flag & FC_SLI2) { + /* First copy command data */ + mb = FC_SLI2_MAILBOX(binfo); + fc_pcimem_bcopy((uint32 * )((char *)mb + sizeof(uint32)), (uint32 * ) & la, + sizeof(READ_LA_VAR)); + } else { + /* First copy command data */ + ioa = (void *)FC_MAP_MEM(&binfo->fc_iomap_mem); /* map in SLIM */ + mb = FC_MAILBOX(binfo, ioa); + READ_SLIM_COPY(binfo, (uint32 * ) & la, + (uint32 * )((char *)mb + sizeof(uint32)), + (sizeof(READ_LA_VAR) / sizeof(uint32))); + FC_UNMAP_MEMIO(ioa); + } + + mp = (MATCHMAP * )binfo->fc_mbbp; + if(mp) { + fc_mpdata_sync(mp->dma_handle, 0, 128, DDI_DMA_SYNC_FORKERNEL); + fc_mpdata_outcopy(p_dev_ctl, mp, (uchar * )binfo->alpa_map, 128); + + binfo->fc_mbbp = 0; + fc_mem_put(binfo, MEM_BUF, (uchar * )mp); + } + + if (la.pb) + binfo->fc_flag |= FC_BYPASSED_MODE; + else + binfo->fc_flag &= ~FC_BYPASSED_MODE; + + if (((binfo->fc_eventTag + 1) < la.eventTag) || + (binfo->fc_eventTag == la.eventTag)) { + FCSTATCTR.LinkMultiEvent++; + if (la.attType == AT_LINK_UP) { + if (binfo->fc_eventTag != 0) { /* Pegasus */ + fc_linkdown(p_dev_ctl); + if (!(binfo->fc_flag & FC_LD_TIMER)) { + /* Start the link down watchdog timer until CLA done */ + rp = &binfo->fc_ring[FC_FCP_RING]; + RINGTMO = fc_clk_set(p_dev_ctl, rp->fc_ringtmo, + fc_linkdown_timeout, 0, 0); + if((clp[CFG_LINKDOWN_TMO].a_current == 0) || + clp[CFG_HOLDIO].a_current) { + binfo->fc_flag |= FC_LD_TIMEOUT; + } + binfo->fc_flag |= FC_LD_TIMER; + } + } + } + } + + binfo->fc_eventTag = la.eventTag; + + if (la.attType == AT_LINK_UP) { + FCSTATCTR.LinkUp++; + /* Link Up Event received */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk1304, /* ptr to msg structure */ + fc_mes1304, /* ptr to msg */ + fc_msgBlk1304.msgPreambleStr, /* begin varargs */ + la.eventTag, + binfo->fc_eventTag, + la.granted_AL_PA, + binfo->alpa_map[0] ); /* end varargs */ + if(clp[CFG_NETWORK_ON].a_current) { + /* Stop the link down watchdog timer */ + rp = &binfo->fc_ring[FC_IP_RING]; + if(RINGTMO) { + fc_clk_can(p_dev_ctl, RINGTMO); + RINGTMO = 0; + } + } + binfo->fc_ffstate = FC_LINK_UP; + binfo->fc_flag &= ~(FC_LNK_DOWN | FC_PT2PT | FC_PT2PT_PLOGI | + FC_LBIT | FC_RSCN_MODE | FC_NLP_MORE | FC_DELAY_DISC | + FC_RSCN_DISC_TMR | FC_RSCN_DISCOVERY); + binfo->fc_ns_retry = 0; + + if( la.UlnkSpeed == LA_2GHZ_LINK) + binfo->fc_linkspeed = LA_2GHZ_LINK; + else + binfo->fc_linkspeed = 0; + + if ((binfo->fc_topology = la.topology) == TOPOLOGY_LOOP) { + + if (la.il) { + binfo->fc_flag |= FC_LBIT; + fc_freenode_did(binfo, Fabric_DID, 1); + } + + binfo->fc_myDID = la.granted_AL_PA; + + dfc_hba_put_event(p_dev_ctl, HBA_EVENT_LINK_UP, binfo->fc_myDID, + la.topology, la.lipType, la.UlnkSpeed); + dfc_put_event(p_dev_ctl, FC_REG_LINK_EVENT, 0, 0, 0); + + if (binfo->fc_flag & FC_SLI2) { + i = la.un.lilpBde64.tus.f.bdeSize; + } else { + i = la.un.lilpBde.bdeSize; + } + if (i == 0) { + binfo->alpa_map[0] = 0; + } else { + if(clp[CFG_LOG_VERBOSE].a_current & DBG_LINK_EVENT) { + int numalpa, j, k; + union { + uchar pamap[16]; + struct { + uint32 wd1; + uint32 wd2; + uint32 wd3; + uint32 wd4; + } pa; + } un; + + numalpa = binfo->alpa_map[0]; + j = 0; + while (j < numalpa) { + fc_bzero(un.pamap, 16); + for (k = 1; j < numalpa; k++) { + un.pamap[k-1] = binfo->alpa_map[j+1]; + j++; + if (k == 16) + break; + } + /* Link Up Event ALPA map */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk1305, /* ptr to msg structure */ + fc_mes1305, /* ptr to msg */ + fc_msgBlk1305.msgPreambleStr, /* begin varargs */ + un.pa.wd1, + un.pa.wd2, + un.pa.wd3, + un.pa.wd4 ); /* end varargs */ + } + } + } + } else { + fc_freenode_did(binfo, Fabric_DID, 1); + + binfo->fc_myDID = binfo->fc_pref_DID; + + dfc_hba_put_event(p_dev_ctl, HBA_EVENT_LINK_UP, binfo->fc_myDID, + la.topology, la.lipType, la.UlnkSpeed); + dfc_put_event(p_dev_ctl, FC_REG_LINK_EVENT, 0, 0, 0); + } + + if ((mbox = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX | MEM_PRI))) { + /* This should turn on DELAYED ABTS for ELS timeouts */ + fc_set_slim(binfo, (MAILBOX * )mbox, 0x052198, 0x1); + /* unreg_login mailbox command could be executing, + * queue this command to be processed later. + */ + fc_mbox_put(binfo, mbox); + } + + if ((mbox = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX | MEM_PRI))) { + if(fc_read_sparam(p_dev_ctl, (MAILBOX * )mbox) == 0) { + /* set_slim mailbox command needs to execute first, + * queue this command to be processed later. + */ + fc_mbox_put(binfo, mbox); + } else { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mbox); + } + } + if ((mbox = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX | MEM_PRI))) { + binfo->fc_ffstate = FC_CFG_LINK; + fc_config_link(p_dev_ctl, (MAILBOX * )mbox); + /* read_sparam mailbox command needs to execute first, + * queue this command to be processed later. + */ + fc_mbox_put(binfo, mbox); + } + + + } /* end if link up */ + else { + FCSTATCTR.LinkDown++; + dfc_hba_put_event(p_dev_ctl, HBA_EVENT_LINK_DOWN, binfo->fc_myDID, + la.topology, la.lipType, la.UlnkSpeed); + dfc_put_event(p_dev_ctl, FC_REG_LINK_EVENT, 0, 0, 0); + /* Link Down Event received */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk1306, /* ptr to msg structure */ + fc_mes1306, /* ptr to msg */ + fc_msgBlk1306.msgPreambleStr, /* begin varargs */ + la.eventTag, + binfo->fc_eventTag, + la.granted_AL_PA, + binfo->alpa_map[0] ); /* end varargs */ + fc_linkdown(p_dev_ctl); + + if (!(binfo->fc_flag & FC_LD_TIMER)) { + /* Start the link down watchdog timer until CLA done */ + rp = &binfo->fc_ring[FC_FCP_RING]; + RINGTMO = fc_clk_set(p_dev_ctl, rp->fc_ringtmo, + fc_linkdown_timeout, 0, 0); + if((clp[CFG_LINKDOWN_TMO].a_current == 0) || + clp[CFG_HOLDIO].a_current) { + binfo->fc_flag |= FC_LD_TIMEOUT; + } + binfo->fc_flag |= FC_LD_TIMER; + } + + /* turn on Link Attention interrupts - no CLEAR_LA needed */ + binfo->fc_process_LA = 1; + ioa = (void *)FC_MAP_IO(&binfo->fc_iomap_io); /* map in io registers */ + control = READ_CSR_REG(binfo, FC_HC_REG(binfo, ioa)); + control |= HC_LAINT_ENA; + WRITE_CSR_REG(binfo, FC_HC_REG(binfo, ioa), control); + FC_UNMAP_MEMIO(ioa); + } + break; + } + + case MBX_CLEAR_LA: + /* Turn on Link Attention interrupts */ + binfo->fc_process_LA = 1; + + ioa = (void *)FC_MAP_IO(&binfo->fc_iomap_io); /* map in io registers */ + control = READ_CSR_REG(binfo, FC_HC_REG(binfo, ioa)); + control |= HC_LAINT_ENA; + WRITE_CSR_REG(binfo, FC_HC_REG(binfo, ioa), control); + FC_UNMAP_MEMIO(ioa); + + if ((!(binfo->fc_flag & FC_LNK_DOWN)) && + (binfo->fc_ffstate != FC_ERROR) && + (mb->mbxStatus != 0x1601)) { /* Link is Up */ + + if (!(binfo->fc_flag & FC_PT2PT)) { + /* Device Discovery completes */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0234, /* ptr to msg structure */ + fc_mes0234, /* ptr to msg */ + fc_msgBlk0234.msgPreambleStr); /* begin & end varargs */ + binfo->fc_nlp_cnt = 0; /* In case we need to do RSCNs */ + binfo->fc_firstopen = 0; + + /* Fix up any changed RPIs in FCP IOCBs queued up a txq */ + fc_fcp_fix_txq(p_dev_ctl); + + binfo->fc_ffstate = FC_READY; + + /* Check to see if we need to process an RSCN */ + if(binfo->fc_flag & FC_RSCN_MODE) { + fc_nextrscn(p_dev_ctl, fc_max_els_sent); + } + } + + /* Do FDMI to Register HBA and port */ + if ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, FDMI_DID))) { + if (fc_fdmi_cmd(p_dev_ctl, ndlp, SLI_MGMT_DPRT)) { + /* Issue FDMI request failed */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0219, /* ptr to msg structure */ + fc_mes0219, /* ptr to msg */ + fc_msgBlk0219.msgPreambleStr, /* begin varargs */ + SLI_MGMT_DPRT ); /* end varargs */ + } + } + + ndlp = binfo->fc_nlpunmap_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + while(ndlp != (NODELIST *)&binfo->fc_nlpmap_start) { + new_ndlp = (NODELIST *)ndlp->nlp_listp_next; + + /* skip myself, fabric nodes and partially logged in nodes */ + if ((ndlp->nlp_DID == binfo->fc_myDID) || + (ndlp->nlp_type & NLP_FABRIC) || + (ndlp->nlp_state != NLP_ALLOC)) + goto loop1; + + /* Allocate exchanges for all IP (non-FCP) nodes */ + if ((ndlp->nlp_Rpi) && + (ndlp->nlp_Xri == 0) && + ((ndlp->nlp_DID & CT_DID_MASK) != CT_DID_MASK) && + !(ndlp->nlp_flag & NLP_RPI_XRI) && + !(ndlp->nlp_type & NLP_FCP_TARGET)) { + ndlp->nlp_flag |= NLP_RPI_XRI; + fc_create_xri(binfo, &binfo->fc_ring[FC_ELS_RING], ndlp); + } + + if (ndlp->nlp_type & NLP_FCP_TARGET) { + int dev_index; + + dev_index = INDEX(ndlp->id.nlp_pan, ndlp->id.nlp_sid); + node_ptr = binfo->device_queue_hash[dev_index].node_ptr; + if(node_ptr) { + /* This is a new device that entered the loop */ + node_ptr->nlp = ndlp; + node_ptr->rpi = ndlp->nlp_Rpi; + node_ptr->last_good_rpi = ndlp->nlp_Rpi; + node_ptr->scsi_id = dev_index; + ndlp->nlp_targetp = (uchar *)node_ptr; + node_ptr->flags &= ~FC_NODEV_TMO; + ndlp->nlp_flag &= ~NLP_NODEV_TMO; + } + } + + if (ndlp->nlp_type & NLP_IP_NODE) { + fc_restartio(p_dev_ctl, ndlp); + } +loop1: + ndlp = new_ndlp; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + } + + /* If we are not point to point, reglogin to ourself */ + if (!(binfo->fc_flag & FC_PT2PT)) { + /* Build nlplist entry and Register login to ourself */ + if ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, binfo->fc_myDID))) { + ndlp->nlp_DID = binfo->fc_myDID; + fc_nlp_logi(binfo, ndlp, &(binfo->fc_portname), &(binfo->fc_nodename)); + } + else { + if((ndlp = (NODELIST *)fc_mem_get(binfo, MEM_NLP))) { + fc_bzero((void *)ndlp, sizeof(NODELIST)); + ndlp->sync = binfo->fc_sync; + ndlp->capabilities = binfo->fc_capabilities; + ndlp->nlp_DID = binfo->fc_myDID; + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + fc_nlp_logi(binfo, ndlp, &(binfo->fc_portname), &(binfo->fc_nodename)); + } + } + if(ndlp) { + if ((mbox = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX | MEM_PRI))){ + fc_reg_login(binfo, binfo->fc_myDID, + (uchar * ) & binfo->fc_sparam, (MAILBOX * )mbox, 0); + if (issue_mb_cmd(binfo, (MAILBOX * )mbox, MBX_NOWAIT) != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mbox); + } + } + } + } else { + /* We are pt2pt no fabric */ + if (binfo->fc_flag & FC_PT2PT_PLOGI) { + /* Build nlplist entry and Register login to ourself */ + if ((ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, binfo->fc_myDID))) { + ndlp->nlp_DID = binfo->fc_myDID; + fc_nlp_logi(binfo, ndlp, &(binfo->fc_portname), &(binfo->fc_nodename)); + } + else { + if((ndlp = (NODELIST *)fc_mem_get(binfo, MEM_NLP))) { + fc_bzero((void *)ndlp, sizeof(NODELIST)); + ndlp->sync = binfo->fc_sync; + ndlp->capabilities = binfo->fc_capabilities; + ndlp->nlp_DID = binfo->fc_myDID; + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + fc_nlp_logi(binfo, ndlp, &(binfo->fc_portname), &(binfo->fc_nodename)); + } + } + if(ndlp) { + if ((mbox = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX | MEM_PRI))){ + fc_reg_login(binfo, binfo->fc_myDID, + (uchar * ) & binfo->fc_sparam, (MAILBOX * )mbox, 0); + if (issue_mb_cmd(binfo, (MAILBOX * )mbox, MBX_NOWAIT) != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mbox); + } + } + fc_els_cmd(binfo, ELS_CMD_PLOGI, (void *)PT2PT_RemoteID, + (uint32)0, (ushort)0, (NODELIST *)0); + } + } + } + + if(binfo->fc_flag & FC_ESTABLISH_LINK) { + binfo->fc_flag &= ~FC_ESTABLISH_LINK; + } + + if(p_dev_ctl->fc_estabtmo) { + fc_clk_can(p_dev_ctl, p_dev_ctl->fc_estabtmo); + p_dev_ctl->fc_estabtmo = 0; + } + + if(((clp[CFG_LINKDOWN_TMO].a_current == 0) || + clp[CFG_HOLDIO].a_current) && + (binfo->fc_flag & FC_LD_TIMEOUT)) { + fc_failio(p_dev_ctl); + } + + /* Stop the link down watchdog timer */ + rp = &binfo->fc_ring[FC_FCP_RING]; + if(RINGTMO) { + fc_clk_can(p_dev_ctl, RINGTMO); + RINGTMO = 0; + } + binfo->fc_flag &= ~(FC_LD_TIMEOUT | FC_LD_TIMER); + + if(clp[CFG_FCP_ON].a_current) { + fc_restart_all_devices(p_dev_ctl); + + /* Call iodone for any commands that timed out previously */ + for (bp = p_dev_ctl->timeout_head; bp != NULL; ) { + nextbp = bp->av_forw; + bp->b_error = ETIMEDOUT; + bp->b_flags |= B_ERROR; + bp->b_resid = bp->b_bcount; + FCSTATCTR.fcpScsiTmo++; + fc_do_iodone(bp); + bp = nextbp; + } + p_dev_ctl->timeout_count = 0; + p_dev_ctl->timeout_head = NULL; + + /* Send down any saved FCP commands */ + fc_issue_cmd(p_dev_ctl); + } + + if (binfo->fc_deferip) { + handle_ring_event(p_dev_ctl, FC_IP_RING, + (uint32)binfo->fc_deferip); + binfo->fc_deferip = 0; + } + } + break; + + default: + /* Unknown Mailbox command completion */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0312, /* ptr to msg structure */ + fc_mes0312, /* ptr to msg */ + fc_msgBlk0312.msgPreambleStr, /* begin varargs */ + cmd ); /* end varargs */ + FCSTATCTR.mboxCmdInval++; + break; + } + + binfo->fc_mbbp = 0; + return(0); +} /* End handle_mb_cmd */ + + +/**************************************************/ +/** fc_linkdown **/ +/** **/ +/** Description: Process a Link Down event. **/ +/** Called from host_intupt to process LinkDown **/ +/** **/ +/** Returns: **/ +/** **/ +/**************************************************/ +_static_ int +fc_linkdown( +fc_dev_ctl_t *p_dev_ctl) +{ + FC_BRD_INFO * binfo; + iCfgParam * clp; + RING * rp; + NODELIST * ndlp; + NODELIST * new_ndlp; + MAILBOXQ * mb; + IOCBQ * xmitiq; + IOCBQ * iocbq; + MATCHMAP * mp; + ULP_BDE64 * addr; + + binfo = &BINFO; + binfo->fc_prevDID = binfo->fc_myDID; + binfo->fc_ffstate = FC_LINK_DOWN; + binfo->fc_flag |= FC_LNK_DOWN; + binfo->fc_flag &= ~FC_DELAY_PLOGI; + + + if ((mb = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX | MEM_PRI))) { + fc_unreg_did(binfo, 0xffffffff, (MAILBOX * )mb); + if (issue_mb_cmd(binfo, (MAILBOX * )mb, MBX_NOWAIT) != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } + + /* Free all nodes in nlplist */ + ndlp = binfo->fc_nlpbind_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpbind_start) + ndlp = binfo->fc_nlpunmap_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + while(ndlp != (NODELIST *)&binfo->fc_nlpmap_start) { + new_ndlp = (NODELIST *)ndlp->nlp_listp_next; + + /* Any RSCNs in progress don't matter at this point */ + ndlp->nlp_action &= ~NLP_DO_RSCN; + + if ((ndlp->nlp_type & NLP_IP_NODE) && ndlp->nlp_bp) { + m_freem((fcipbuf_t *)ndlp->nlp_bp); + ndlp->nlp_bp = (uchar * )0; + } + + /* Need to abort all exchanges, used only on IP */ + if (ndlp->nlp_Xri) { + fc_rpi_abortxri(binfo, ndlp->nlp_Xri); + ndlp->nlp_Xri = 0; + } + + /* Need to free all nodes in the process of login / registration + * as well as all Fabric nodes and myself. + */ + if ((ndlp->nlp_DID == binfo->fc_myDID) || + (!(ndlp->nlp_type & NLP_FABRIC) && ((ndlp->nlp_DID & CT_DID_MASK) == CT_DID_MASK)) || + (binfo->fc_flag & FC_PT2PT) || + (ndlp->nlp_state < NLP_ALLOC)) { + NAME_TYPE zero_pn; + + fc_bzero((void *)&zero_pn, sizeof(NAME_TYPE)); + if ((fc_geportname(&ndlp->nlp_portname, &zero_pn) == 2) && + (ndlp->nlp_state < NLP_LOGIN) && + ((ndlp->nlp_type & (NLP_AUTOMAP | NLP_SEED_MASK)) == 0)) { + fc_freenode(binfo, ndlp, 1); + } + else { + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + } + } + + /* If we are not using ADISC, free fcp nodes here to avoid excessive + * actitivity when during PLOGIs when link comes back up. + */ + clp = DD_CTL.p_config[binfo->fc_brd_no]; + if((ndlp->nlp_state == NLP_ALLOC) && + (ndlp->nlp_type & NLP_FCP_TARGET) && + ((!clp[CFG_USE_ADISC].a_current))) { + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + } + + /* Any Discovery in progress doesn't matter at this point */ + ndlp->nlp_action &= ~(NLP_DO_ADDR_AUTH | NLP_DO_DISC_START); + + ndlp = new_ndlp; + if(ndlp == (NODELIST *)&binfo->fc_nlpbind_start) + ndlp = binfo->fc_nlpunmap_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + } + + if (binfo->fc_flag & FC_PT2PT) { + binfo->fc_myDID = 0; + if ((mb = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX | MEM_PRI))) { + fc_config_link(p_dev_ctl, (MAILBOX * )mb); + if (issue_mb_cmd(binfo, (MAILBOX * )mb, MBX_NOWAIT) != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } + binfo->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI); + } + + clp = DD_CTL.p_config[binfo->fc_brd_no]; + if(clp[CFG_NETWORK_ON].a_current) { + rp = &binfo->fc_ring[FC_IP_RING]; + /* flush all xmit compls */ + while ((xmitiq = fc_ringtxp_get(rp, 0)) != 0) { + fc_freebufq(p_dev_ctl, rp, xmitiq); + } + NDDSTAT.ndd_xmitque_cur = 0; + } + + + fc_flush_clk_set(p_dev_ctl, fc_delay_timeout); + + if(FABRICTMO) { + fc_clk_can(p_dev_ctl, FABRICTMO); + FABRICTMO = 0; + } + + if(binfo->fc_rscn_disc_wdt) { + fc_clk_can(p_dev_ctl, binfo->fc_rscn_disc_wdt); + binfo->fc_rscn_disc_wdt = 0; + } + binfo->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISC_TMR | FC_RSCN_DISCOVERY); + binfo->fc_rscn_id_cnt = 0; + + /* Free any deferred RSCNs */ + fc_flush_rscn_defer(p_dev_ctl); + + /* Free any delayed ELS xmits */ + fc_abort_delay_els_cmd(p_dev_ctl, 0xffffffff); + + /* Look through ELS ring and remove any ELS cmds in progress */ + rp = &binfo->fc_ring[FC_ELS_RING]; + iocbq = (IOCBQ * )(rp->fc_txp.q_first); + while (iocbq) { + iocbq->retry = 0xff; /* Mark for abort */ + iocbq = (IOCBQ * )iocbq->q; + } + + if (rp->fc_tx.q_cnt) { + IOCB * icmd; + /* get next command from ring xmit queue */ + xmitiq = fc_ringtx_get(rp); + + while (xmitiq) { + icmd = &xmitiq->iocb; + if (icmd->ulpCommand == CMD_IOCB_CONTINUE_CN) { + fc_mem_put(binfo, MEM_IOCB, (uchar * )xmitiq); + xmitiq = fc_ringtx_get(rp); + continue; + } + + if(xmitiq->bp) { + fc_mem_put(binfo, MEM_BUF, (uchar * )xmitiq->bp); + } + + if (binfo->fc_flag & FC_SLI2) { + + mp = (MATCHMAP *)xmitiq->bpl; + if(mp) { + addr = (ULP_BDE64 * )mp->virt; + addr++; /* goto the next one */ + + switch (icmd->ulpCommand) { + case CMD_ELS_REQUEST_CR: + case CMD_ELS_REQUEST64_CR: + case CMD_ELS_REQUEST_CX: + case CMD_ELS_REQUEST64_CX: + fc_mem_put(binfo, MEM_BUF, (uchar * )xmitiq->info); + break; + default: + if(xmitiq->info) + fc_mem_put(binfo, MEM_BUF, (uchar * )xmitiq->info); + } + fc_mem_put(binfo, MEM_BPL, (uchar * )mp); + } + } + else { + if (icmd->un.cont[1].bdeAddress) { + fc_mem_put(binfo, MEM_BUF, (uchar * )xmitiq->info); + } + } + fc_mem_put(binfo, MEM_IOCB, (uchar * )xmitiq); + xmitiq = fc_ringtx_get(rp); + } + } + + return(0); +} /* End fc_linkdown */ + +/**************************************************/ +/** fc_rlip **/ +/** **/ +/** Description: **/ +/** Called to reset the link with an init_link **/ +/** **/ +/** Returns: **/ +/** **/ +/**************************************************/ +_static_ int +fc_rlip( +fc_dev_ctl_t *p_dev_ctl) +{ + FC_BRD_INFO * binfo; + iCfgParam * clp; + MAILBOX * mb; + + binfo = &BINFO; + + /* Start the Fibre Channel reset LIP process */ + if (binfo->fc_ffstate == FC_READY) { + /* Get a buffer to use for the mailbox command */ + if ((mb = (MAILBOX * )fc_mem_get(binfo, MEM_MBOX | MEM_PRI)) == NULL) { + /* Device Discovery completion error */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0235, /* ptr to msg structure */ + fc_mes0235, /* ptr to msg */ + fc_msgBlk0235.msgPreambleStr); /* begin & end varargs */ + binfo->fc_ffstate = FC_ERROR; + return(1); + } + + binfo->fc_flag |= FC_SCSI_RLIP; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + + /* Setup and issue mailbox INITIALIZE LINK command */ + fc_linkdown(p_dev_ctl); + fc_init_link(binfo, (MAILBOX * )mb, clp[CFG_TOPOLOGY].a_current, clp[CFG_LINK_SPEED].a_current); + mb->un.varInitLnk.lipsr_AL_PA = 0; + if (issue_mb_cmd(binfo, (MAILBOX * )mb, MBX_NOWAIT) != MBX_BUSY) + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + /* SCSI Link Reset */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk1307, /* ptr to msg structure */ + fc_mes1307, /* ptr to msg */ + fc_msgBlk1307.msgPreambleStr); /* begin & end varargs */ + } + return(0); +} /* End fc_rlip */ + +/**************************************************/ +/** fc_ns_cmd **/ +/** **/ +/** Description: **/ +/** Issue Cmd to NameServer **/ +/** SLI_CTNS_GID_FT **/ +/** SLI_CTNS_RFT_ID **/ +/** **/ +/** Returns: **/ +/** **/ +/**************************************************/ +_static_ int +fc_ns_cmd( +fc_dev_ctl_t *p_dev_ctl, +NODELIST *ndlp, +int cmdcode) +{ + FC_BRD_INFO * binfo; + iCfgParam * clp; + MATCHMAP * mp, *bmp; + SLI_CT_REQUEST * CtReq; + ULP_BDE64 * bpl; + + binfo = &BINFO; + + /* fill in BDEs for command */ + /* Allocate buffer for command payload */ + if ((mp = (MATCHMAP * )fc_mem_get(binfo, MEM_BUF)) == 0) { + return(1); + } + + bmp = 0; + + /* Allocate buffer for Buffer ptr list */ + if ((bmp = (MATCHMAP * )fc_mem_get(binfo, MEM_BPL)) == 0) { + fc_mem_put(binfo, MEM_BUF, (uchar * )mp); + return(1); + } + bpl = (ULP_BDE64 * )bmp->virt; + bpl->addrHigh = PCIMEM_LONG((uint32)putPaddrHigh(mp->phys)); + bpl->addrLow = PCIMEM_LONG((uint32)putPaddrLow(mp->phys)); + bpl->tus.f.bdeFlags = 0; + if (cmdcode == SLI_CTNS_GID_FT) + bpl->tus.f.bdeSize = GID_REQUEST_SZ; + else if (cmdcode == SLI_CTNS_RFT_ID) + bpl->tus.f.bdeSize = RFT_REQUEST_SZ; + else + bpl->tus.f.bdeSize = 0; + bpl->tus.w = PCIMEM_LONG(bpl->tus.w); + + CtReq = (SLI_CT_REQUEST * )mp->virt; + /* NameServer Req */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0236, /* ptr to msg structure */ + fc_mes0236, /* ptr to msg */ + fc_msgBlk0236.msgPreambleStr, /* begin varargs */ + cmdcode, + binfo->fc_flag, + binfo->fc_rscn_id_cnt); /* end varargs */ + fc_bzero((void *)CtReq, sizeof(SLI_CT_REQUEST)); + + CtReq->RevisionId.bits.Revision = SLI_CT_REVISION; + CtReq->RevisionId.bits.InId = 0; + + CtReq->FsType = SLI_CT_DIRECTORY_SERVICE; + CtReq->FsSubType = SLI_CT_DIRECTORY_NAME_SERVER; + + CtReq->CommandResponse.bits.Size = 0; + switch (cmdcode) { + case SLI_CTNS_GID_FT: + CtReq->CommandResponse.bits.CmdRsp = SWAP_DATA16(SLI_CTNS_GID_FT); + CtReq->un.gid.Fc4Type = SLI_CTPT_FCP; + if(binfo->fc_ffstate != FC_READY) + binfo->fc_ffstate = FC_NS_QRY; + break; + case SLI_CTNS_RFT_ID: + clp = DD_CTL.p_config[binfo->fc_brd_no]; + CtReq->CommandResponse.bits.CmdRsp = SWAP_DATA16(SLI_CTNS_RFT_ID); + CtReq->un.rft.PortId = SWAP_DATA(binfo->fc_myDID); + if(clp[CFG_FCP_ON].a_current) { + CtReq->un.rft.fcpReg = 1; + } + if(clp[CFG_NETWORK_ON].a_current) { + CtReq->un.rft.ipReg = 1; + } + if(binfo->fc_ffstate != FC_READY) + binfo->fc_ffstate = FC_NS_REG; + break; + } + + binfo->fc_ns_retry++; + if(FABRICTMO) { + fc_clk_can(p_dev_ctl, FABRICTMO); + } + FABRICTMO = fc_clk_set(p_dev_ctl, binfo->fc_ratov, + fc_fabric_timeout, 0, 0); + + if(fc_ct_cmd(p_dev_ctl, mp, bmp, ndlp)) { + fc_mem_put(binfo, MEM_BUF, (uchar * )mp); + fc_mem_put(binfo, MEM_BPL, (uchar * )bmp); + } + return(0); +} /* End fc_ns_cmd */ + +_static_ int +fc_free_ct_rsp( +fc_dev_ctl_t * p_dev_ctl, +MATCHMAP * mlist) +{ + FC_BRD_INFO * binfo; + MATCHMAP * mlast; + + binfo = &BINFO; + while(mlist) { + mlast = mlist; + mlist = (MATCHMAP *)mlist->fc_mptr; + + fc_mem_put(binfo, MEM_BUF, (uchar * )mlast); + } + return(0); +} + +_local_ MATCHMAP * +fc_alloc_ct_rsp( +fc_dev_ctl_t * p_dev_ctl, +ULP_BDE64 * bpl, +uint32 size, +int * entries) +{ + FC_BRD_INFO * binfo; + MATCHMAP * mlist; + MATCHMAP * mlast; + MATCHMAP * mp; + int cnt, i; + + binfo = &BINFO; + mlist = 0; + mlast = 0; + i = 0; + + while(size) { + + /* We get chucks of FCELSSIZE */ + if(size > FCELSSIZE) + cnt = FCELSSIZE; + else + cnt = size; + + /* Allocate buffer for rsp payload */ + if ((mp = (MATCHMAP * )fc_mem_get(binfo, MEM_BUF)) == 0) { + fc_free_ct_rsp(p_dev_ctl, mlist); + return(0); + } + + /* Queue it to a linked list */ + if(mlast == 0) { + mlist = mp; + mlast = mp; + } + else { + mlast->fc_mptr = (uchar *)mp; + mlast = mp; + } + mp->fc_mptr = 0; + + bpl->tus.f.bdeFlags = BUFF_USE_RCV; + + /* build buffer ptr list for IOCB */ + bpl->addrLow = PCIMEM_LONG(putPaddrLow((ulong)mp->phys)); + bpl->addrHigh = PCIMEM_LONG(putPaddrHigh((ulong)mp->phys)); + bpl->tus.f.bdeSize = (ushort)cnt; + bpl->tus.w = PCIMEM_LONG(bpl->tus.w); + bpl++; + + i++; + size -= cnt; + } + + *entries = i; + return(mlist); +} + +_static_ int +fc_ct_cmd( +fc_dev_ctl_t *p_dev_ctl, +MATCHMAP *inmp, +MATCHMAP *bmp, +NODELIST *ndlp) +{ + FC_BRD_INFO * binfo; + ULP_BDE64 * bpl; + MATCHMAP * outmp; + int cnt; + + binfo = &BINFO; + bpl = (ULP_BDE64 * )bmp->virt; + bpl++; /* Skip past ct request */ + + cnt = 0; + /* Put buffer(s) for ct rsp in bpl */ + if((outmp = fc_alloc_ct_rsp(p_dev_ctl, bpl, FC_MAX_NS_RSP, &cnt)) == 0) { + return(ENOMEM); + } + + /* save ndlp for cmpl */ + inmp->fc_mptr = (uchar *)ndlp; + + if((fc_gen_req(binfo, bmp, inmp, outmp, ndlp->nlp_Rpi, 0, (cnt+1), 0))) { + fc_free_ct_rsp(p_dev_ctl, outmp); + return(ENOMEM); + } + return(0); +} /* End fc_ct_cmd */ + + +/**************************************************/ +/** fc_ns_rsp **/ +/** **/ +/** Description: **/ +/** Process NameServer response **/ +/** **/ +/** Returns: **/ +/** **/ +/**************************************************/ +_static_ int +fc_ns_rsp( +fc_dev_ctl_t *p_dev_ctl, +NODELIST *nslp, +MATCHMAP *mp, +uint32 Size) +{ + FC_BRD_INFO * binfo; + SLI_CT_REQUEST * Response; + NODELIST * ndlp; + NODELIST * new_ndlp; + MATCHMAP * mlast; + D_ID rscn_did; + D_ID ns_did; + uint32 * tptr; + uint32 Did; + uint32 Temp; + int j, Cnt, match, new_node; + + binfo = &BINFO; + ndlp = 0; + binfo->fc_ns_retry = 0; + + binfo->fc_fabrictmo = (2 * binfo->fc_ratov) + + ((4 * binfo->fc_edtov) / 1000) + 1; + if(FABRICTMO) { + fc_clk_can(p_dev_ctl, FABRICTMO); + FABRICTMO = 0; + } + + Response = (SLI_CT_REQUEST * )mp->virt; + + if ((Response->CommandResponse.bits.CmdRsp == SWAP_DATA16(SLI_CT_RESPONSE_FS_ACC)) && + ((binfo->fc_ffstate == FC_NS_QRY) || + ((binfo->fc_ffstate == FC_READY) && (binfo->fc_flag & FC_RSCN_MODE)))) { + + tptr = (uint32 * ) & Response->un.gid.PortType; + while(mp) { + mlast = mp; + mp = (MATCHMAP *)mp->fc_mptr; + fc_mpdata_sync(mlast->dma_handle, 0, 0, DDI_DMA_SYNC_FORKERNEL); + + if(Size > FCELSSIZE) + Cnt = FCELSSIZE; + else + Cnt = Size; + Size -= Cnt; + + if(tptr == 0) + tptr = (uint32 * )mlast->virt; + else + Cnt -= 16; /* subtract length of CT header */ + + while(Cnt) { + /* Loop through entire NameServer list of DIDs */ + + /* Get next DID from NameServer List */ + Temp = *tptr++; + Did = (SWAP_DATA(Temp) & Mask_DID); + + ndlp = 0; + if ((Did) && (Did != binfo->fc_myDID)) { + new_node = 0; + ndlp = fc_findnode_odid(binfo, NLP_SEARCH_ALL, Did); + if(ndlp) { + ndlp->nlp_DID = Did; + /* Skip nodes already marked for discovery / rscn */ + if(ndlp->nlp_action & + (NLP_DO_ADDR_AUTH | NLP_DO_DISC_START | NLP_DO_RSCN)) + goto nsout; + } + else { + new_node = 1; + if((ndlp = (NODELIST *)fc_mem_get(binfo, MEM_NLP))) { + fc_bzero((void *)ndlp, sizeof(NODELIST)); + ndlp->sync = binfo->fc_sync; + ndlp->capabilities = binfo->fc_capabilities; + ndlp->nlp_DID = Did; + fc_nlp_bind(binfo, ndlp); + } + else + goto nsout; + } + + if ((new_node) || + (!(ndlp->nlp_flag & NLP_REQ_SND) && + (ndlp->nlp_state < NLP_ALLOC)) ) { + + if ((binfo->fc_ffstate == FC_READY) && + (binfo->fc_flag & FC_RSCN_MODE)) { + /* we are in RSCN node, so match Did from NameServer with + * with list recieved from previous RSCN commands. + * Do NOT add it to our RSCN discovery list unless we have + * a match. + */ + match = 0; + for(j=0;jfc_rscn_id_cnt;j++) { + + rscn_did.un.word = binfo->fc_rscn_id_list[j]; + ns_did.un.word = Did; + + switch (rscn_did.un.b.resv) { + case 0: /* Single N_Port ID effected */ + if (ns_did.un.word == rscn_did.un.word) { + match = 1; + } + break; + + case 1: /* Whole N_Port Area effected */ + if ((ns_did.un.b.domain == rscn_did.un.b.domain) && + (ns_did.un.b.area == rscn_did.un.b.area)) { + match = 1; + } + break; + + case 2: /* Whole N_Port Domain effected */ + if (ns_did.un.b.domain == rscn_did.un.b.domain) { + match = 1; + } + break; + + case 3: /* Whole Fabric effected */ + match = 1; + break; + + default: + /* Unknown Identifier in RSCN list */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0237, /* ptr to msg structure */ + fc_mes0237, /* ptr to msg */ + fc_msgBlk0237.msgPreambleStr, /* begin varargs */ + rscn_did.un.word); /* end varargs */ + break; + + } + if(match) + break; + } + if(match == 0) /* Skip it */ + goto nsout; + } + + /* Add it to our discovery list */ + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + if ((binfo->fc_ffstate == FC_READY) && + (binfo->fc_flag & FC_RSCN_MODE)) { + ndlp->nlp_action |= NLP_DO_RSCN; + ndlp->nlp_flag &= ~NLP_NODEV_TMO; + } + else { + ndlp->nlp_action |= NLP_DO_DISC_START; + } + } + else { + if (binfo->fc_ffstate < FC_READY) { + /* Add it to our discovery list */ + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + ndlp->nlp_action |= NLP_DO_DISC_START; + } + } + } +nsout: + + /* Mark all node table entries that are in the Nameserver */ + if(ndlp) { + ndlp->nlp_flag |= NLP_NS_NODE; + ndlp->nlp_flag &= ~NLP_NS_REMOVED; + /* NameServer Rsp */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0238, /* ptr to msg structure */ + fc_mes0238, /* ptr to msg */ + fc_msgBlk0238.msgPreambleStr, /* begin varargs */ + Did, + ndlp->nlp_flag, + binfo->fc_flag, + binfo->fc_rscn_id_cnt); /* end varargs */ + } + else { + /* NameServer Rsp */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0239, /* ptr to msg structure */ + fc_mes0239, /* ptr to msg */ + fc_msgBlk0239.msgPreambleStr, /* begin varargs */ + Did, + (ulong)ndlp, + binfo->fc_flag, + binfo->fc_rscn_id_cnt); /* end varargs */ + } + + if (Temp & SWAP_DATA(SLI_CT_LAST_ENTRY)) + goto nsout1; + Cnt -= sizeof(uint32); + } + tptr = 0; + } + +nsout1: + /* Take out all node table entries that are not in the NameServer */ + ndlp = binfo->fc_nlpbind_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpbind_start) + ndlp = binfo->fc_nlpunmap_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + while(ndlp != (NODELIST *)&binfo->fc_nlpmap_start) { + new_ndlp = (NODELIST *)ndlp->nlp_listp_next; + if ( (ndlp->nlp_state == NLP_LIMBO) || + (ndlp->nlp_state == NLP_SEED) || + (ndlp->nlp_DID == binfo->fc_myDID) || + (ndlp->nlp_DID == NameServer_DID) || + (ndlp->nlp_DID == FDMI_DID) || + (ndlp->nlp_type & NLP_FABRIC) || + (ndlp->nlp_flag & NLP_NS_NODE)) { + if(ndlp->nlp_flag & NLP_NS_NODE) { + ndlp->nlp_flag &= ~NLP_NS_NODE; + } else { + if(ndlp->nlp_DID != NameServer_DID) + ndlp->nlp_action &= ~(NLP_DO_ADDR_AUTH | NLP_DO_DISC_START | NLP_DO_RSCN); + } + goto loop1; + } + if ((binfo->fc_ffstate == FC_READY) && + (binfo->fc_flag & FC_RSCN_MODE) && + !(ndlp->nlp_action & NLP_DO_RSCN)) + goto loop1; + + if ((ndlp->nlp_DID != 0) && !(ndlp->nlp_flag & NLP_NODEV_TMO)) { + RING * rp; + IOCBQ * iocbq; + /* Look through ELS ring and remove any ELS cmds in progress */ + rp = &binfo->fc_ring[FC_ELS_RING]; + iocbq = (IOCBQ * )(rp->fc_txp.q_first); + while (iocbq) { + if(iocbq->iocb.un.elsreq.remoteID == ndlp->nlp_DID) { + iocbq->retry = 0xff; /* Mark for abort */ + } + iocbq = (IOCBQ * )iocbq->q; + } + /* In case its on fc_delay_timeout list */ + fc_abort_delay_els_cmd(p_dev_ctl, ndlp->nlp_DID); + + ndlp->nlp_flag &= ~(NLP_REQ_SND | NLP_REQ_SND_ADISC); + } + + ndlp->nlp_action &= ~(NLP_DO_ADDR_AUTH | NLP_DO_DISC_START | NLP_DO_RSCN); + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + ndlp->nlp_flag |= NLP_NS_REMOVED; + ndlp->nlp_type &= ~(NLP_FABRIC | NLP_IP_NODE); +loop1: + ndlp = new_ndlp; + if(ndlp == (NODELIST *)&binfo->fc_nlpbind_start) + ndlp = binfo->fc_nlpunmap_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + } + + } else if ((Response->CommandResponse.bits.CmdRsp == SWAP_DATA16(SLI_CT_RESPONSE_FS_RJT)) && + ((binfo->fc_ffstate == FC_NS_QRY) || + ((binfo->fc_ffstate == FC_READY) && (binfo->fc_flag & FC_RSCN_MODE)))) { + /* NameServer Rsp Error */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0240, /* ptr to msg structure */ + fc_mes0240, /* ptr to msg */ + fc_msgBlk0240.msgPreambleStr, /* begin varargs */ + Response->CommandResponse.bits.CmdRsp, + (uint32)Response->ReasonCode, + (uint32)Response->Explanation, + binfo->fc_flag); /* end varargs */ + goto nsout1; + + } else { + /* NameServer Rsp Error */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0241, /* ptr to msg structure */ + fc_mes0241, /* ptr to msg */ + fc_msgBlk0241.msgPreambleStr, /* begin varargs */ + Response->CommandResponse.bits.CmdRsp, + (uint32)Response->ReasonCode, + (uint32)Response->Explanation, + binfo->fc_flag); /* end varargs */ + } + + if (binfo->fc_ffstate == FC_NS_REG) { + /* Issue GID_FT to Nameserver */ + if (fc_ns_cmd(p_dev_ctl, nslp, SLI_CTNS_GID_FT)) + goto out; + } else { +out: + /* Done with NameServer for now, but leave logged in */ + + /* We can start discovery right now */ + /* Fire out PLOGIs on all nodes marked for discovery */ + binfo->fc_rscn_id_cnt = 0; + if ((binfo->fc_nlp_cnt <= 0) && !(binfo->fc_flag & FC_NLP_MORE)) { + binfo->fc_nlp_cnt = 0; + if ((binfo->fc_ffstate == FC_READY) && + (binfo->fc_flag & FC_RSCN_MODE)) { + nslp->nlp_action &= ~(NLP_DO_ADDR_AUTH | NLP_DO_RSCN); + fc_nextrscn(p_dev_ctl, fc_max_els_sent); + } + else { + nslp->nlp_action |= NLP_DO_ADDR_AUTH; + fc_nextnode(p_dev_ctl, nslp); + } + } + else { + nslp->nlp_action |= NLP_DO_ADDR_AUTH; + fc_nextnode(p_dev_ctl, nslp); + } + } + return(0); +} /* End fc_ns_rsp */ + +/**************************************************/ +/** fc_free_clearq **/ +/** **/ +/** Description: **/ +/** Called to free all clearq bufs for a device **/ +/** **/ +/** Returns: **/ +/** **/ +/**************************************************/ +_static_ void +fc_free_clearq( +dvi_t *dev_ptr) +{ + struct buf *bp, *nextbp; + FC_BRD_INFO * binfo; + + binfo = &dev_ptr->nodep->ap->info; + + /* Call iodone for all the CLEARQ error bufs */ + for (bp = dev_ptr->clear_head; bp != NULL; ) { + dev_ptr->clear_count--; + nextbp = bp->av_forw; + FCSTATCTR.fcpScsiTmo++; + fc_do_iodone(bp); + bp = nextbp; + } + dev_ptr->clear_head = NULL; + dev_ptr->flags &= ~SCSI_TQ_HALTED & ~SCSI_TQ_CLEARING; + + fc_restart_device(dev_ptr); + return; +} /* End fc_free_clearq */ + + +/****************************************************/ +/** fc_nextnode **/ +/** **/ +/** Description: **/ +/** Called during discovery or rediscovery **/ +/** **/ +/** Returns: **/ +/** **/ +/****************************************************/ +_static_ int +fc_nextnode( +fc_dev_ctl_t *p_dev_ctl, +NODELIST *ndlp) +{ + FC_BRD_INFO * binfo; + node_t * node_ptr; + dvi_t * dev_ptr; + iCfgParam * clp; + + binfo = &BINFO; + binfo->fc_fabrictmo = (2 * binfo->fc_ratov) + + ((4 * binfo->fc_edtov) / 1000) + 1; + + clp = DD_CTL.p_config[binfo->fc_brd_no]; + + /* Device Discovery nextnode */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0242, /* ptr to msg structure */ + fc_mes0242, /* ptr to msg */ + fc_msgBlk0242.msgPreambleStr, /* begin varargs */ + (uint32)ndlp->nlp_state, + ndlp->nlp_DID, + (uint32)ndlp->nlp_flag, + binfo->fc_ffstate); /* end varargs */ + if (binfo->fc_flag & FC_FABRIC) { + if (binfo->fc_ffstate < FC_NS_QRY) { + return(0); + } + if ((binfo->fc_ffstate < FC_NODE_DISC) && binfo->fc_ns_retry) { + return(0); + } + } + + if(FABRICTMO) { + fc_clk_res(p_dev_ctl, binfo->fc_fabrictmo, FABRICTMO); + } + else { + FABRICTMO = fc_clk_set(p_dev_ctl, binfo->fc_fabrictmo, + fc_fabric_timeout, 0, 0); + } + + if ((ndlp->nlp_type & NLP_FCP_TARGET) && (ndlp->nlp_state == NLP_ALLOC)) { + if(clp[CFG_FIRST_CHECK].a_current) { + /* If we are an FCP node, update first_check flag for all LUNs */ + if ((node_ptr = (node_t * )ndlp->nlp_targetp) != NULL) { + for (dev_ptr = node_ptr->lunlist; dev_ptr != NULL; + dev_ptr = dev_ptr->next) { + dev_ptr->first_check = FIRST_CHECK_COND; + fc_device_changed(p_dev_ctl, dev_ptr); + } + } + } + } + + /* Check for ADISC Address Authentication */ + if (ndlp->nlp_action & NLP_DO_ADDR_AUTH) { + ndlp->nlp_flag &= ~(NLP_REQ_SND | NLP_REQ_SND_ADISC); + ndlp->nlp_action &= ~NLP_DO_ADDR_AUTH; + + if(ndlp->nlp_DID != NameServer_DID) + binfo->fc_nlp_cnt--; + + if (binfo->fc_nlp_cnt <= 0) { + /* If no nodes left to authenticate, redo discovery on any + * new nodes. + */ + if (fc_nextauth(p_dev_ctl, fc_max_els_sent) == 0) { + binfo->fc_nlp_cnt = 0; + fc_nextdisc(p_dev_ctl, fc_max_els_sent); + } + } else { + fc_nextauth(p_dev_ctl, 1); + } + + return(0); + } + + /* Check for RSCN Discovery */ + if (ndlp->nlp_action & NLP_DO_RSCN) { + ndlp->nlp_flag &= ~(NLP_REQ_SND | NLP_REQ_SND_ADISC); + ndlp->nlp_action &= ~NLP_DO_RSCN; + binfo->fc_nlp_cnt--; + if ((ndlp->nlp_type & NLP_IP_NODE) && ndlp->nlp_bp) { + m_freem((fcipbuf_t *)ndlp->nlp_bp); + ndlp->nlp_bp = (uchar * )0; + } + + if (ndlp->nlp_type & NLP_FCP_TARGET) { + node_t * node_ptr; + dvi_t * dev_ptr; + + if ((node_ptr = (node_t * )ndlp->nlp_targetp) != NULL) { + /* restart any I/Os on this node */ + for (dev_ptr = node_ptr->lunlist; + dev_ptr != NULL; dev_ptr = dev_ptr->next) { + dev_ptr->queue_state = HALTED; + } + } + } + + if (binfo->fc_nlp_cnt <= 0) { + binfo->fc_nlp_cnt = 0; + fc_nextrscn(p_dev_ctl, fc_max_els_sent); + } else { + fc_nextrscn(p_dev_ctl, 1); + } + } + + /* Check for Address Discovery */ + if ((ndlp->nlp_action & NLP_DO_DISC_START) || + (ndlp->nlp_flag & NLP_REQ_SND)) { + ndlp->nlp_flag &= ~NLP_REQ_SND; + ndlp->nlp_action &= ~NLP_DO_DISC_START; + binfo->fc_nlp_cnt--; + + if (binfo->fc_nlp_cnt <= 0) { + binfo->fc_nlp_cnt = 0; + fc_nextdisc(p_dev_ctl, fc_max_els_sent); + } else { + fc_nextdisc(p_dev_ctl, 1); + } + } + + return(0); +} /* End fc_nextnode */ + + +/****************************************************/ +/** fc_nextdisc **/ +/** **/ +/** Description: **/ +/** Called during discovery or rediscovery **/ +/** **/ +/** Returns: **/ +/** **/ +/****************************************************/ +_static_ int +fc_nextdisc( +fc_dev_ctl_t *p_dev_ctl, +int sndcnt) +{ + FC_BRD_INFO * binfo; + MAILBOXQ * mb; + NODELIST * ndlp; + NODELIST * new_ndlp; + int cnt, skip; + uint32 did; + + binfo = &BINFO; + /* Device Discovery nextdisc */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0243, /* ptr to msg structure */ + fc_mes0243, /* ptr to msg */ + fc_msgBlk0243.msgPreambleStr, /* begin varargs */ + binfo->fc_nlp_cnt, + sndcnt, + binfo->fc_mbox_active); /* end varargs */ + binfo->fc_ffstate = FC_NODE_DISC; + binfo->fc_fabrictmo = (2 * binfo->fc_ratov) + + ((4 * binfo->fc_edtov) / 1000) + 1; + if(FABRICTMO) { + fc_clk_res(p_dev_ctl, binfo->fc_fabrictmo, FABRICTMO); + } + else { + FABRICTMO = fc_clk_set(p_dev_ctl, binfo->fc_fabrictmo, + fc_fabric_timeout, 0, 0); + } + + /* For MAXREQ requests, we must make sure all outstanding Mailbox + * commands have been processed. This is to ensure UNREG_LOGINs + * complete before we try to relogin. + */ + if (sndcnt == fc_max_els_sent) { + if (binfo->fc_mbox_active) { + binfo->fc_flag |= FC_DELAY_PLOGI; + return(fc_max_els_sent); + } + } + + cnt = 0; + skip = 0; + binfo->fc_flag &= ~FC_NLP_MORE; + + /* We can start discovery right now */ + /* Fire out PLOGIs on all nodes marked for discovery */ + ndlp = binfo->fc_nlpbind_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpbind_start) + ndlp = binfo->fc_nlpunmap_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + while(ndlp != (NODELIST *)&binfo->fc_nlpmap_start) { + new_ndlp = (NODELIST *)ndlp->nlp_listp_next; + + if ((ndlp->nlp_action & NLP_DO_DISC_START) && + (ndlp->nlp_DID != NameServer_DID)) { + if(!(ndlp->nlp_flag & (NLP_REQ_SND | NLP_REG_INP | NLP_RM_ENTRY))) { + binfo->fc_nlp_cnt++; + did = ndlp->nlp_DID; + if(did == 0) + did = ndlp->nlp_oldDID; + /* Start discovery for this node */ + fc_els_cmd(binfo, ELS_CMD_PLOGI, (void *)((ulong)did), + (uint32)0, (ushort)0, ndlp); + cnt++; + + if ((binfo->fc_nlp_cnt >= fc_max_els_sent) || + (cnt == sndcnt)) { + binfo->fc_flag |= FC_NLP_MORE; + return(cnt); + } + } + else + skip++; + } + ndlp = new_ndlp; + if(ndlp == (NODELIST *)&binfo->fc_nlpbind_start) + ndlp = binfo->fc_nlpunmap_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + } + + if ((binfo->fc_nlp_cnt) || skip) + return(binfo->fc_nlp_cnt); + + /* This should turn off DELAYED ABTS for ELS timeouts */ + if ((mb = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX))) { + fc_set_slim(binfo, (MAILBOX * )mb, 0x052198, 0); + if (issue_mb_cmd(binfo, (MAILBOX * )mb, MBX_NOWAIT) != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } + + /* Nothing to authenticate, so CLEAR_LA right now */ + if ((mb = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX | MEM_PRI))) { + binfo->fc_ffstate = FC_CLEAR_LA; + fc_clear_la(binfo, (MAILBOX * )mb); + if (issue_mb_cmd(binfo, (MAILBOX * )mb, MBX_NOWAIT) != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } else { + /* Device Discovery completion error */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0244, /* ptr to msg structure */ + fc_mes0244, /* ptr to msg */ + fc_msgBlk0244.msgPreambleStr); /* begin & end varargs */ + binfo->fc_ffstate = FC_ERROR; + } + + if(FABRICTMO) { + fc_clk_can(p_dev_ctl, FABRICTMO); + FABRICTMO = 0; + } + return(0); +} /* End fc_nextdisc */ + + +/****************************************************/ +/** fc_nextauth **/ +/** **/ +/** Description: **/ +/** Called during rediscovery **/ +/** **/ +/** Returns: **/ +/** **/ +/****************************************************/ +_static_ int +fc_nextauth( +fc_dev_ctl_t *p_dev_ctl, +int sndcnt) +{ + FC_BRD_INFO * binfo; + iCfgParam * clp; + NODELIST * ndlp; + NODELIST * new_ndlp; + int cnt; + + binfo = &BINFO; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + /* Device Discovery next authentication */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0245, /* ptr to msg structure */ + fc_mes0245, /* ptr to msg */ + fc_msgBlk0245.msgPreambleStr, /* begin varargs */ + binfo->fc_nlp_cnt, + sndcnt, + binfo->fc_mbox_active); /* end varargs */ + cnt = 0; + binfo->fc_flag &= ~FC_NLP_MORE; + binfo->fc_fabrictmo = (2 * binfo->fc_ratov) + + ((4 * binfo->fc_edtov) / 1000) + 1; + if(FABRICTMO) { + fc_clk_res(p_dev_ctl, binfo->fc_fabrictmo, FABRICTMO); + } + else { + FABRICTMO = fc_clk_set(p_dev_ctl, binfo->fc_fabrictmo, + fc_fabric_timeout, 0, 0); + } + + /* We can start rediscovery right now */ + /* Fire out ADISC on all nodes marked for addr_auth */ + + ndlp = binfo->fc_nlpbind_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpbind_start) + ndlp = binfo->fc_nlpunmap_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + while(ndlp != (NODELIST *)&binfo->fc_nlpmap_start) { + new_ndlp = (NODELIST *)ndlp->nlp_listp_next; + + if (ndlp->nlp_action & NLP_DO_ADDR_AUTH) { + if (ndlp->nlp_flag & (NLP_RM_ENTRY | NLP_REQ_SND_ADISC | + NLP_REQ_SND | NLP_REG_INP)) + goto loop1; + + if ((ndlp->nlp_type & NLP_FCP_TARGET) && + ((!clp[CFG_USE_ADISC].a_current) || (ndlp->nlp_Rpi == 0))) { + /* Force regular discovery on this node */ + ndlp->nlp_action &= ~NLP_DO_ADDR_AUTH; + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + ndlp->nlp_action |= NLP_DO_DISC_START; + goto loop1; + } else { + if ((ndlp->nlp_type & NLP_IP_NODE) && (ndlp->nlp_Rpi == 0)) { + /* Force regular discovery on this node */ + ndlp->nlp_action &= ~NLP_DO_ADDR_AUTH; + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + goto loop1; + } + } + + if((ndlp->nlp_type & (NLP_FCP_TARGET | NLP_IP_NODE)) == 0) { + /* Force regular discovery on this node */ + ndlp->nlp_action &= ~NLP_DO_ADDR_AUTH; + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + ndlp->nlp_action |= NLP_DO_DISC_START; + goto loop1; + } + + binfo->fc_nlp_cnt++; + /* Start authentication */ + fc_els_cmd(binfo, ELS_CMD_ADISC, (void *)((ulong)ndlp->nlp_DID), + (uint32)0, (ushort)0, ndlp); + cnt++; + if ((binfo->fc_nlp_cnt >= fc_max_els_sent) || + (cnt == sndcnt)) { + binfo->fc_flag |= FC_NLP_MORE; + return(cnt); + } + } +loop1: + ndlp = new_ndlp; + if(ndlp == (NODELIST *)&binfo->fc_nlpbind_start) + ndlp = binfo->fc_nlpunmap_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + } + + return(binfo->fc_nlp_cnt); +} /* End fc_nextauth */ + +/****************************************************/ +/** fc_nextrscn **/ +/** **/ +/** Description: **/ +/** Called during RSCN processing **/ +/** **/ +/** Returns: **/ +/** **/ +/****************************************************/ +_static_ int +fc_nextrscn( +fc_dev_ctl_t *p_dev_ctl, +int sndcnt) +{ + FC_BRD_INFO * binfo; + iCfgParam * clp; + NODELIST * ndlp; + NODELIST * new_ndlp; + MAILBOXQ * mb; + struct buf * bp, * nextbp; + RING * rp; + IOCBQ * xmitiq; + IOCB * iocb; + MATCHMAP * mp; + int i, j, cnt; + uint32 did; + + binfo = &BINFO; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + /* Device Discovery next RSCN */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0246, /* ptr to msg structure */ + fc_mes0246, /* ptr to msg */ + fc_msgBlk0246.msgPreambleStr, /* begin varargs */ + binfo->fc_nlp_cnt, + sndcnt, + binfo->fc_mbox_active, + binfo->fc_flag); /* end varargs */ + cnt = 0; + if (binfo->fc_flag & FC_RSCN_DISC_TMR) + goto out; + + /* Are we waiting for a NameServer Query to complete */ + if (binfo->fc_flag & FC_NSLOGI_TMR) + return(1); + + if (binfo->fc_mbox_active) { + binfo->fc_flag |= FC_DELAY_PLOGI; + return(1); + } + + binfo->fc_flag &= ~FC_NLP_MORE; + + if(FABRICTMO) { + fc_clk_res(p_dev_ctl, binfo->fc_fabrictmo, FABRICTMO); + } + else { + FABRICTMO = fc_clk_set(p_dev_ctl, binfo->fc_fabrictmo, + fc_fabric_timeout, 0, 0); + } + + /* We can start rediscovery right now */ + /* Fire out PLOGI on all nodes marked for rscn */ + ndlp = binfo->fc_nlpbind_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpbind_start) + ndlp = binfo->fc_nlpunmap_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + while(ndlp != (NODELIST *)&binfo->fc_nlpmap_start) { + new_ndlp = (NODELIST *)ndlp->nlp_listp_next; + + if (ndlp->nlp_action & NLP_DO_RSCN) { + if (ndlp->nlp_flag & (NLP_RM_ENTRY | NLP_REQ_SND_ADISC | + NLP_REQ_SND | NLP_REG_INP)) + goto loop2; + + did = ndlp->nlp_DID; + if(did == 0) { + did = ndlp->nlp_oldDID; + if(did == 0) + goto loop2; + } + + binfo->fc_nlp_cnt++; + + /* We are always using ADISC for RSCN validation */ + if ((ndlp->nlp_type & NLP_FCP_TARGET) && (ndlp->nlp_Rpi == 0)) { + /* Force regular discovery on this node */ + fc_els_cmd(binfo, ELS_CMD_PLOGI, (void *)((ulong)did), + (uint32)0, (ushort)0, ndlp); + } else { + if (((ndlp->nlp_type & NLP_IP_NODE) && (ndlp->nlp_Rpi == 0)) || + ((ndlp->nlp_type & (NLP_FCP_TARGET | NLP_IP_NODE)) == 0)) { + /* Force regular discovery on this node */ + fc_els_cmd(binfo, ELS_CMD_PLOGI, (void *)((ulong)did), + (uint32)0, (ushort)0, ndlp); + } + else { + fc_els_cmd(binfo, ELS_CMD_ADISC,(void *)((ulong)did), + (uint32)0, (ushort)0, ndlp); + } + } + cnt++; + if ((binfo->fc_nlp_cnt >= fc_max_els_sent) || + (cnt == sndcnt)) { + binfo->fc_flag |= FC_NLP_MORE; + return(cnt); + } + } +loop2: + ndlp = new_ndlp; + if(ndlp == (NODELIST *)&binfo->fc_nlpbind_start) + ndlp = binfo->fc_nlpunmap_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + } + + if (binfo->fc_nlp_cnt) + return(binfo->fc_nlp_cnt); + +out: + if (binfo->fc_flag & FC_RSCN_DISCOVERY) { + /* Discovery RSCN */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0247, /* ptr to msg structure */ + fc_mes0247, /* ptr to msg */ + fc_msgBlk0247.msgPreambleStr, /* begin varargs */ + binfo->fc_defer_rscn.q_cnt, + binfo->fc_flag, + (ulong)(binfo->fc_rscn_disc_wdt)); /* end varargs */ + if(binfo->fc_rscn_disc_wdt == 0) { + binfo->fc_rscn_disc_wdt = fc_clk_set(p_dev_ctl, + ((binfo->fc_edtov / 1000) + 1), fc_rscndisc_timeout, 0, 0); + /* Free any deferred RSCNs */ + fc_flush_rscn_defer(p_dev_ctl); + return(0); + } + + if(!(binfo->fc_flag & FC_RSCN_DISC_TMR)) + return(0); + + binfo->fc_flag &= ~(FC_RSCN_DISC_TMR | FC_RSCN_DISCOVERY); + binfo->fc_rscn_disc_wdt = 0; + + /* RSCN match on all DIDs in NameServer */ + binfo->fc_rscn_id_list[0] = 0x03000000; + binfo->fc_rscn_id_cnt = 1; + + /* Free any deferred RSCNs */ + fc_flush_rscn_defer(p_dev_ctl); + + /* Authenticate all nodes in nlplist */ + ndlp = binfo->fc_nlpbind_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpbind_start) + ndlp = binfo->fc_nlpunmap_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + while(ndlp != (NODELIST *)&binfo->fc_nlpmap_start) { + new_ndlp = (NODELIST *)ndlp->nlp_listp_next; + + /* Skip over FABRIC nodes and myself */ + if ((ndlp->nlp_DID == binfo->fc_myDID) || + (ndlp->nlp_type & NLP_FABRIC) || + ((ndlp->nlp_DID & CT_DID_MASK) == CT_DID_MASK)) + goto loop3; + + if (ndlp->nlp_state == NLP_ALLOC) { + /* Mark node for authentication */ + ndlp->nlp_action |= NLP_DO_RSCN; + ndlp->nlp_flag &= ~NLP_NODEV_TMO; + + /* We are always using ADISC for RSCN validation */ + } +loop3: + ndlp = new_ndlp; + if(ndlp == (NODELIST *)&binfo->fc_nlpbind_start) + ndlp = binfo->fc_nlpunmap_start; + if(ndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + ndlp = binfo->fc_nlpmap_start; + } + + fc_issue_ns_query(p_dev_ctl, (void *)0, (void *)0); + return(0); + } + + if (binfo->fc_defer_rscn.q_first) { + uint32 * lp; + D_ID rdid; + uint32 cmd; + + /* process any deferred RSCNs */ + /* Deferred RSCN */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0248, /* ptr to msg structure */ + fc_mes0248, /* ptr to msg */ + fc_msgBlk0248.msgPreambleStr, /* begin varargs */ + binfo->fc_defer_rscn.q_cnt, + binfo->fc_flag); /* end varargs */ + binfo->fc_rscn_id_cnt = 0; + rp = &binfo->fc_ring[FC_ELS_RING]; + while (binfo->fc_defer_rscn.q_first) { + xmitiq = (IOCBQ * )binfo->fc_defer_rscn.q_first; + if ((binfo->fc_defer_rscn.q_first = xmitiq->q) == 0) { + binfo->fc_defer_rscn.q_last = 0; + } + binfo->fc_defer_rscn.q_cnt--; + iocb = &xmitiq->iocb; + mp = *((MATCHMAP **)iocb); + *((MATCHMAP **)iocb) = 0; + xmitiq->q = NULL; + + lp = (uint32 * )mp->virt; + cmd = *lp++; + i = SWAP_DATA(cmd) & 0xffff; /* payload length */ + i -= sizeof(uint32); /* take off word 0 */ + while (i) { + rdid.un.word = *lp++; + rdid.un.word = SWAP_DATA(rdid.un.word); + if(binfo->fc_rscn_id_cnt < FC_MAX_HOLD_RSCN) { + for(j=0;jfc_rscn_id_cnt;j++) { + if(binfo->fc_rscn_id_list[j] == rdid.un.word) { + goto skip_id; + } + } + binfo->fc_rscn_id_list[binfo->fc_rscn_id_cnt++] = rdid.un.word; + } + else { + binfo->fc_flag |= FC_RSCN_DISCOVERY; + goto out1; + } +skip_id: + cnt += (fc_handle_rscn(p_dev_ctl, &rdid)); + i -= sizeof(uint32); + } + +out1: + fc_mem_put(binfo, MEM_BUF, (uchar * )mp); + + i = 1; + /* free resources associated with this iocb and repost the ring buffers */ + if (!(binfo->fc_flag & FC_SLI2)) { + for (i = 1; i < (int)iocb->ulpBdeCount; i++) { + mp = fc_getvaddr(p_dev_ctl, rp, (uchar * )((ulong)iocb->un.cont[i].bdeAddress)); + if (mp) { + fc_mem_put(binfo, MEM_BUF, (uchar * )mp); + } + } + } + fc_mem_put(binfo, MEM_IOCB, (uchar * )xmitiq); + if (binfo->fc_flag & FC_RSCN_DISCOVERY) + goto out; + } + if (cnt == 0) { + /* no need for nameserver login */ + fc_nextrscn(p_dev_ctl, fc_max_els_sent); + } + else + fc_issue_ns_query(p_dev_ctl, (void *)0, (void *)0); + + return(0); + } + + binfo->fc_flag &= ~FC_RSCN_MODE; + binfo->fc_rscn_id_cnt = 0; + + /* This should turn off DELAYED ABTS for ELS timeouts */ + if ((mb = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX))) { + fc_set_slim(binfo, (MAILBOX * )mb, 0x052198, 0); + if (issue_mb_cmd(binfo, (MAILBOX * )mb, MBX_NOWAIT) != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } + /* Device Discovery completes */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0249, /* ptr to msg structure */ + fc_mes0249, /* ptr to msg */ + fc_msgBlk0249.msgPreambleStr, /* begin varargs */ + binfo->fc_flag); /* end varargs */ + /* Fix up any changed RPIs in FCP IOCBs queued up a txq */ + fc_fcp_fix_txq(p_dev_ctl); + + + if(FABRICTMO) { + fc_clk_can(p_dev_ctl, FABRICTMO); + FABRICTMO = 0; + } + + if(clp[CFG_FCP_ON].a_current) { + + fc_restart_all_devices(p_dev_ctl); + + /* Call iodone for any commands that timed out previously */ + for (bp = p_dev_ctl->timeout_head; bp != NULL; ) { + nextbp = bp->av_forw; + bp->b_error = ETIMEDOUT; + bp->b_flags |= B_ERROR; + bp->b_resid = bp->b_bcount; + FCSTATCTR.fcpScsiTmo++; + fc_do_iodone(bp); + bp = nextbp; + } + p_dev_ctl->timeout_count = 0; + p_dev_ctl->timeout_head = NULL; + /* Send down any saved FCP commands */ + fc_issue_cmd(p_dev_ctl); + } + return(0); +} /* End fc_nextrscn */ + + +_static_ int +fc_online( +fc_dev_ctl_t * p_dev_ctl) +{ + FC_BRD_INFO * binfo; + int ipri; + int j; + + if(p_dev_ctl) { + ipri = disable_lock(FC_LVL, &CMD_LOCK); + binfo = &BINFO; + if (!(binfo->fc_flag & FC_OFFLINE_MODE)) { + unlock_enable(ipri, &CMD_LOCK); + return(0); + } + /* Bring Adapter online */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0458, /* ptr to msg structure */ + fc_mes0458, /* ptr to msg */ + fc_msgBlk0458.msgPreambleStr); /* begin & end varargs */ + binfo->fc_flag &= ~FC_OFFLINE_MODE; + + fc_brdreset(p_dev_ctl); + unlock_enable(ipri, &CMD_LOCK); + fc_cfg_init(p_dev_ctl); + return(0); + } + + fc_diag_state = DDI_ONDI; + + /* + * find the device in the dev_array if it is there + */ + for (j = 0; j < MAX_FC_BRDS; j++) { + p_dev_ctl = DD_CTL.p_dev[j]; + if (p_dev_ctl) { + ipri = disable_lock(FC_LVL, &CMD_LOCK); + binfo = &BINFO; + if (!(binfo->fc_flag & FC_OFFLINE_MODE)) + continue; + /* Bring Adapter online */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0459, /* ptr to msg structure */ + fc_mes0459, /* ptr to msg */ + fc_msgBlk0459.msgPreambleStr); /* begin & end varargs */ + binfo->fc_flag &= ~FC_OFFLINE_MODE; + + fc_brdreset(p_dev_ctl); + unlock_enable(ipri, &CMD_LOCK); + fc_cfg_init(p_dev_ctl); + continue; + } + } + return(0); +} /* End fc_online */ + + +_static_ int +fc_offline( +fc_dev_ctl_t * p_dev_ctl) +{ + FC_BRD_INFO * binfo; + int ipri; + int j; + + if(p_dev_ctl) { + ipri = disable_lock(FC_LVL, &CMD_LOCK); + binfo = &BINFO; + if (binfo->fc_flag & FC_OFFLINE_MODE) { + unlock_enable(ipri, &CMD_LOCK); + return(0); + } + /* Bring Adapter offline */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0460, /* ptr to msg structure */ + fc_mes0460, /* ptr to msg */ + fc_msgBlk0460.msgPreambleStr); /* begin & end varargs */ + + fc_cfg_remove(p_dev_ctl); + binfo->fc_flag |= FC_OFFLINE_MODE; + + unlock_enable(ipri, &CMD_LOCK); + return(0); + } + fc_diag_state = DDI_OFFDI; + + /* + * find the device in the dev_array if it is there + */ + for (j = 0; j < MAX_FC_BRDS; j++) { + p_dev_ctl = DD_CTL.p_dev[j]; + if (p_dev_ctl) { + ipri = disable_lock(FC_LVL, &CMD_LOCK); + binfo = &BINFO; + if (binfo->fc_flag & FC_OFFLINE_MODE) { + unlock_enable(ipri, &CMD_LOCK); + continue; + } + /* Bring Adapter offline */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0452, /* ptr to msg structure */ + fc_mes0452, /* ptr to msg */ + fc_msgBlk0452.msgPreambleStr); /* begin & end varargs */ + + fc_cfg_remove(p_dev_ctl); + binfo->fc_flag |= FC_OFFLINE_MODE; + unlock_enable(ipri, &CMD_LOCK); + continue; + } + } + return(0); +} /* End fc_offline */ + + +_static_ int +fc_attach( +int index, +uint32 *p_uio) /* pointer to driver specific structure */ +{ + fc_dev_ctl_t * p_dev_ctl; + FC_BRD_INFO * binfo; + iCfgParam * clp; + int rc, i; + + if ((p_dev_ctl = DD_CTL.p_dev[index]) == NULL) { + rc = ENOMEM; + return(rc); + } + + binfo = &BINFO; + fc_diag_state = DDI_ONDI; + + binfo->fc_brd_no = index; /* FC board number */ + binfo->fc_p_dev_ctl = (uchar * )p_dev_ctl; + binfo->nlptimer = 1; + binfo->fc_fcpnodev.nlp_Rpi = 0xfffe; + binfo->fc_nlpbind_start = (NODELIST *)&binfo->fc_nlpbind_start; + binfo->fc_nlpbind_end = (NODELIST *)&binfo->fc_nlpbind_start; + binfo->fc_nlpmap_start = (NODELIST *)&binfo->fc_nlpmap_start; + binfo->fc_nlpmap_end = (NODELIST *)&binfo->fc_nlpmap_start; + binfo->fc_nlpunmap_start = (NODELIST *)&binfo->fc_nlpunmap_start; + binfo->fc_nlpunmap_end = (NODELIST *)&binfo->fc_nlpunmap_start; + + /* Initialize current value of config parameters from default */ + clp = DD_CTL.p_config[binfo->fc_brd_no]; + for(i=0;ifc_sli = (uchar)2; + clp[CFG_ZONE_RSCN].a_current = 1; /* ALWAYS force NS login on RSCN */ + + /* config the device */ + if ((rc = fc_cfg_init(p_dev_ctl))) { + return(rc); + } + return(0); +} + + +_static_ int +fc_detach( +int index) /* device unit number */ +{ + fc_dev_ctl_t * p_dev_ctl; + FC_BRD_INFO * binfo; + + p_dev_ctl = DD_CTL.p_dev[index]; + binfo = &BINFO; + if (p_dev_ctl == 0) + return(0); + + + if (!(binfo->fc_flag & FC_OFFLINE_MODE)) { + /* Free the device resources */ + fc_cfg_remove(p_dev_ctl); + } + + /* De-register the interrupt handler */ + if (p_dev_ctl->intr_inited) { + i_clear(&IHS); + p_dev_ctl->intr_inited = 0; + } + + fc_unmemmap(p_dev_ctl); + return(0); +} + + +/*****************************************************************************/ +/* + * NAME: fc_cfg_init + * + * FUNCTION: perform CFG_INIT function. Initialize the device control + * structure and get the adapter VPD data. + * + * EXECUTION ENVIRONMENT: process only + * + * CALLED FROM: + * fc_config + * + * INPUT: + * p_dev_ctl - pointer to the dev_ctl area + * + * RETURNS: + * 0 - OK + * EEXIST - device name in use (from ns_attach) + * EINVAL - invalid parameter was passed + * EIO - permanent I/O error + */ +/*****************************************************************************/ +_static_ int +fc_cfg_init( +fc_dev_ctl_t *p_dev_ctl) /* pointer to the device control area */ +{ + int rc; /* return code */ + int i; + FC_BRD_INFO * binfo; + iCfgParam * clp; + + binfo = &BINFO; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + p_dev_ctl->ctl_correlator = (void * ) & DD_CTL; + + for (i = 0; i < NLP_MAXPAN; i++) { + p_dev_ctl->adapter_state[i] = CLOSED; + } + + if ((rc = fc_pcimap(p_dev_ctl))) { + return(rc); + } + + if ((rc = fc_memmap(p_dev_ctl))) { + return(rc); + } + + /* offset from beginning of SLIM */ + BINFO.fc_mboxaddr = 0; + + BINFO.fc_mbox_active = 0; + BINFO.fc_ns_retry = 0; + BINFO.fc_process_LA = 0; + BINFO.fc_edtov = FF_DEF_EDTOV; + BINFO.fc_ratov = FF_DEF_RATOV; + BINFO.fc_altov = FF_DEF_ALTOV; + BINFO.fc_arbtov = FF_DEF_ARBTOV; + + /* offset from beginning of register space */ + BINFO.fc_HAregaddr = (sizeof(uint32) * HA_REG_OFFSET); + BINFO.fc_FFregaddr = (sizeof(uint32) * CA_REG_OFFSET); + BINFO.fc_STATregaddr = (sizeof(uint32) * HS_REG_OFFSET); + BINFO.fc_HCregaddr = (sizeof(uint32) * HC_REG_OFFSET); + BINFO.fc_BCregaddr = (sizeof(uint32) * BC_REG_OFFSET); + + + /* save the dev_ctl address in the NDD correlator field */ + NDD.ndd_name = DDS.logical_name;/* point to the name contained in the dds */ + NDD.ndd_alias = DDS.dev_alias; /* point to the name contained in the dds */ + + + + binfo->fc_ring[FC_IP_RING].fc_tx.q_max = + (ushort)clp[CFG_XMT_Q_SIZE].a_current; + + p_dev_ctl->iostrat_event = EVENT_NULL; + p_dev_ctl->iostrat_head = NULL; + p_dev_ctl->iostrat_tail = NULL; + + /* + * Perform any device-specific initialization necessary at the + * CFG_INIT time. If there is any error during the device initialization, + * the CFG_INIT will fail. Also get VPD data. + */ + if ((rc = fc_ffinit(p_dev_ctl))) { + return(rc); + } + + /* Now setup physical address */ + fc_bcopy(binfo->fc_portname.IEEE, p_dev_ctl->phys_addr, 6); + + return(0); +} /* End fc_cfg_init */ + + +/*****************************************************************************/ +/* + * NAME: fc_cfg_remove + * + * FUNCTION: Remove the device resources that have been allocated during + * CFG_INIT configuration time. + * + * EXECUTION ENVIRONMENT: process only + * + * NOTES: + * + * CALLED FROM: + * fc_config + * + * INPUT: + * p_dev_ctl - address of a pointer to the dev control structure + * + * RETURNS: + * none. + */ +/*****************************************************************************/ +_static_ void +fc_cfg_remove( +fc_dev_ctl_t *p_dev_ctl) /* point to the dev_ctl area */ +{ + fc_free_rpilist(p_dev_ctl, 0); + + /* Release the watchdog timers and disable board interrupts */ + fc_ffcleanup(p_dev_ctl); + + fc_free_buffer(p_dev_ctl); /* free device buffers */ + + fc_brdreset(p_dev_ctl); + +} /* End fc_cfg_remove */ + + +/*****************************************************************************/ +/* + * NAME: fc_ffcleanup + * + * EXECUTION ENVIRONMENT: process only + * + * CALLED FROM: + * CFG_TERM + * + * INPUT: + * p_dev_ctl - pointer to the dev_ctl area. + * + * RETURNS: + * none + */ +/*****************************************************************************/ +_static_ void +fc_ffcleanup( +fc_dev_ctl_t *p_dev_ctl) /* pointer to the dev_ctl area */ +{ + int i; + RING * rp; + FC_BRD_INFO * binfo; + void *ioa; + MAILBOX * mb; + + binfo = &BINFO; + binfo->fc_process_LA = 0; + + /* Disable all but the mailbox interrupt */ + ioa = (void *)FC_MAP_IO(&binfo->fc_iomap_io); /* map in io registers */ + WRITE_CSR_REG(binfo, FC_HC_REG(binfo, ioa), HC_MBINT_ENA); + FC_UNMAP_MEMIO(ioa); + + /* Issue unreg_login command to logout all nodes */ + if (p_dev_ctl->init_eventTag) { + /* Get a buffer for mailbox command */ + if ((mb = (MAILBOX * )fc_mem_get(binfo, MEM_MBOX)) == NULL) { + } else { + fc_unreg_login(binfo, 0xffff, (MAILBOX * )mb); + if (issue_mb_cmd(binfo, (MAILBOX * )mb, MBX_NOWAIT) != MBX_BUSY) + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } + + ioa = (void *)FC_MAP_IO(&binfo->fc_iomap_io); /* map in io registers */ + /* Clear all interrupt enable conditions */ + WRITE_CSR_REG(binfo, FC_HC_REG(binfo, ioa), 0); + FC_UNMAP_MEMIO(ioa); + + for (i = 0; i < binfo->fc_ffnumrings; i++) { + rp = &binfo->fc_ring[i]; + /* Clear the transmit watchdog timer */ + if (rp->fc_wdt_inited) { + if(RINGTMO) { + fc_clk_can(p_dev_ctl, RINGTMO); + RINGTMO = 0; + } + rp->fc_wdt_inited = 0; + } + } + + if(MBOXTMO) { + fc_clk_can(p_dev_ctl, MBOXTMO); + MBOXTMO = 0; + } + if(FABRICTMO) { + fc_clk_can(p_dev_ctl, FABRICTMO); + FABRICTMO = 0; + } + + fc_flush_rscn_defer(p_dev_ctl); + + fc_flush_clk_set(p_dev_ctl, fc_delay_timeout); + + fc_flush_clk_set(p_dev_ctl, lpfc_scsi_selto_timeout); + +} /* End fc_ffcleanup */ + + +/*****************************************************************************/ +/* + * NAME: fc_start + * + * FUNCTION: Initialize and activate the adapter. + * + * EXECUTION ENVIRONMENT: process or interrupt + * + * CALLED FROM: + * fc_config + * + * INPUT: + * p_dev_ctl - pointer to the dev_ctl area. + * + * RETURNS: + * NONE + */ +/*****************************************************************************/ + +_static_ void +fc_start( +fc_dev_ctl_t *p_dev_ctl) /* pointer to the dev_ctl area */ +{ + uint32 i, j; + FC_BRD_INFO * binfo; + iCfgParam * clp; + void * ioa; + RING * rp; + + /* Activate the adapter and allocate all the resources needed */ + + binfo = &BINFO; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + + /* Enable appropriate host interrupts */ + i = (uint32) (HC_MBINT_ENA | HC_ERINT_ENA); + if (binfo->fc_ffnumrings > 0) + i |= HC_R0INT_ENA; + if (binfo->fc_ffnumrings > 1) + i |= HC_R1INT_ENA; + if (binfo->fc_ffnumrings > 2) + i |= HC_R2INT_ENA; + if (binfo->fc_ffnumrings > 3) + i |= HC_R3INT_ENA; + + ioa = (void *)FC_MAP_IO(&binfo->fc_iomap_io); /* map in io registers */ + WRITE_CSR_REG(binfo, FC_HC_REG(binfo, ioa), i); + FC_UNMAP_MEMIO(ioa); + + for (i = 0; i < (uint32)binfo->fc_ffnumrings; i++) { + /* Initialize / post buffers to ring */ + fc_setup_ring(p_dev_ctl, i); + + if (i == FC_ELS_RING) { + /* Now post receive buffers to the ring */ + rp = &binfo->fc_ring[i]; + for (j = 0; j < 64; j++) + fc_post_buffer(p_dev_ctl, rp, 2); + } + } + + clp = DD_CTL.p_config[binfo->fc_brd_no]; + if(clp[CFG_NETWORK_ON].a_current) { + rp = &binfo->fc_ring[FC_IP_RING]; + i = clp[CFG_POST_IP_BUF].a_current; + while(i) { + fc_post_mbuf(p_dev_ctl, rp, 2); + i -= 2; + } + } + + /* set up the watchdog timer control structure section */ + binfo->fc_fabrictmo = FF_DEF_RATOV + 1; + +} /* End fc_start */ + + +_static_ void +fc_process_reglogin( +fc_dev_ctl_t *p_dev_ctl, /* pointer to the dev_ctl area */ +NODELIST *ndlp) +{ + node_t * node_ptr; + RING * rp; + FC_BRD_INFO * binfo; + iCfgParam * clp; + + binfo = &BINFO; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + + ndlp->nlp_flag &= ~NLP_REG_INP; + if (ndlp->nlp_DID == Fabric_DID) { + ndlp->nlp_flag &= ~NLP_FARP_SND; + ndlp->nlp_action &= ~NLP_DO_ADDR_AUTH; + } else { + /* If we are an FCP node, update the rpi */ + if (ndlp->nlp_type & NLP_FCP_TARGET) { + if ((node_ptr = (node_t * )ndlp->nlp_targetp) != NULL) { + node_ptr->rpi = (ushort)ndlp->nlp_Rpi; + node_ptr->last_good_rpi = (ushort)ndlp->nlp_Rpi; + node_ptr->nlp = ndlp; + node_ptr->flags &= ~FC_NODEV_TMO; + ndlp->nlp_flag &= ~NLP_NODEV_TMO; + } + else { + int dev_index; + + dev_index = INDEX(ndlp->id.nlp_pan, ndlp->id.nlp_sid); + node_ptr = binfo->device_queue_hash[dev_index].node_ptr; + if(node_ptr) { + /* This is a new device that entered the loop */ + node_ptr->nlp = ndlp; + node_ptr->rpi = (ushort)ndlp->nlp_Rpi; + node_ptr->last_good_rpi = (ushort)ndlp->nlp_Rpi; + node_ptr->scsi_id = dev_index; + ndlp->nlp_targetp = (uchar *)node_ptr; + node_ptr->flags &= ~FC_NODEV_TMO; + ndlp->nlp_flag &= ~NLP_NODEV_TMO; + } + } + } + + if((ndlp->nlp_DID & CT_DID_MASK) == CT_DID_MASK) + ndlp->nlp_state = NLP_LOGIN; + + /* HBA Mgmt */ + if(ndlp->nlp_DID == FDMI_DID) { + ndlp->nlp_state = NLP_LOGIN; + return; + } + + /* If we are a NameServer, go to next phase */ + if (ndlp->nlp_DID == NameServer_DID) { + int fabcmd; + + ndlp->nlp_state = NLP_LOGIN; + + if(binfo->fc_ffstate == FC_READY) { + fabcmd = SLI_CTNS_GID_FT; + } + else { + fabcmd = SLI_CTNS_RFT_ID; + } + + /* Issue RFT_ID / GID_FT to Nameserver */ + if (fc_ns_cmd(p_dev_ctl, ndlp, fabcmd)) { + /* error so start discovery */ + /* Done with NameServer for now, but keep logged in */ + ndlp->nlp_action &= ~NLP_DO_RSCN; + + /* Fire out PLOGIs on nodes marked for discovery */ + if ((binfo->fc_nlp_cnt <= 0) && + !(binfo->fc_flag & FC_NLP_MORE)) { + binfo->fc_nlp_cnt = 0; + if ((binfo->fc_ffstate == FC_READY) && + (binfo->fc_flag & FC_RSCN_MODE)) { + fc_nextrscn(p_dev_ctl, fc_max_els_sent); + } + else { + fc_nextnode(p_dev_ctl, ndlp); + } + } + else { + fc_nextnode(p_dev_ctl, ndlp); + } + ndlp->nlp_action &= ~NLP_DO_ADDR_AUTH; + } + return; + } + + /* If we are in the middle of Discovery */ + if ((ndlp->nlp_type & NLP_FCP_TARGET) || + (ndlp->nlp_action & NLP_DO_DISC_START) || + (ndlp->nlp_action & NLP_DO_ADDR_AUTH) || + (ndlp->nlp_action & NLP_DO_RSCN) || + (ndlp->nlp_action & NLP_DO_SCSICMD) || + (binfo->fc_flag & FC_PT2PT) || + (ndlp->nlp_portname.nameType != NAME_IEEE)) { + + ndlp->nlp_flag &= ~NLP_FARP_SND; + ndlp->nlp_action &= ~NLP_DO_ADDR_AUTH; + if((!(binfo->fc_flag & FC_PT2PT)) && (ndlp->nlp_action == 0)) { + if(binfo->fc_ffstate == FC_READY) { + ndlp->nlp_action |= NLP_DO_RSCN; + } + else { + ndlp->nlp_action |= NLP_DO_DISC_START; + } + } + if(clp[CFG_FCP_ON].a_current) { + ndlp->nlp_state = NLP_PRLI; + if((ndlp->nlp_flag & NLP_RCV_PLOGI) && + (!(ndlp->nlp_action) || (ndlp->nlp_flag & NLP_REQ_SND)) && + !(binfo->fc_flag & FC_PT2PT)) { + ndlp->nlp_state = NLP_LOGIN; + } + else { + if((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) { + fc_els_cmd(binfo, ELS_CMD_PRLI, + (void *)((ulong)ndlp->nlp_DID), (uint32)0, (ushort)0, ndlp); + } + else + fc_nextnode(p_dev_ctl, ndlp); + } + } else { + /* establish a new exchange for login registration */ + if ((ndlp->nlp_Xri == 0) && + (ndlp->nlp_type & NLP_IP_NODE) && + ((ndlp->nlp_DID & CT_DID_MASK) != CT_DID_MASK) && + !(ndlp->nlp_flag & NLP_RPI_XRI)) { + ndlp->nlp_flag |= NLP_RPI_XRI; + rp = &binfo->fc_ring[FC_ELS_RING]; + fc_create_xri(binfo, rp, ndlp); + } + if(!(ndlp->nlp_flag & NLP_RCV_PLOGI)) + fc_nextnode(p_dev_ctl, ndlp); + } + } else { + ndlp->nlp_flag &= ~NLP_FARP_SND; + ndlp->nlp_action &= ~NLP_DO_ADDR_AUTH; + /* establish a new exchange for Nport login registration */ + if ((ndlp->nlp_Xri == 0) && + ((ndlp->nlp_DID & CT_DID_MASK) != CT_DID_MASK) && + !(ndlp->nlp_flag & NLP_RPI_XRI)) { + ndlp->nlp_flag |= NLP_RPI_XRI; + rp = &binfo->fc_ring[FC_ELS_RING]; + fc_create_xri(binfo, rp, ndlp); /* IP ONLY */ + } + } + ndlp->nlp_flag &= ~NLP_RCV_PLOGI; + } + return; +} + +_static_ int +fc_snd_scsi_req( +fc_dev_ctl_t *p_dev_ctl, +NAME_TYPE *wwn, +MATCHMAP *bmp, +DMATCHMAP *fcpmp, +DMATCHMAP *omatp, +uint32 count, +struct dev_info *dev_ptr) +{ + FC_BRD_INFO *binfo; + NODELIST * ndlp; + RING * rp; + IOCBQ * temp; + IOCB * cmd; + ULP_BDE64 * bpl; + FCP_CMND * inqcmnd; + fc_buf_t * fcptr; + node_t * map_node_ptr; + struct dev_info * map_dev_ptr; + uint32 did; + fc_lun_t lun; + int i; + + binfo = &BINFO; + if(((ndlp = fc_findnode_wwpn(binfo, NLP_SEARCH_ALL, wwn)) == 0) || + (!(binfo->fc_flag & FC_SLI2))) { /* MUST be SLI2 */ + return(EACCES); + } + + if(ndlp->nlp_flag & NLP_REQ_SND) { + return(ENODEV); + } + + if(ndlp->nlp_state <= NLP_LOGIN) { + if ((ndlp->nlp_DID == binfo->fc_myDID) || + (ndlp->nlp_DID & Fabric_DID_MASK)) { + return(ENODEV); + } + ndlp->nlp_action |= NLP_DO_SCSICMD; + if((ndlp->nlp_state == NLP_LOGIN) && ndlp->nlp_Rpi) { + /* Need to send PRLI */ + fc_els_cmd(binfo, ELS_CMD_PRLI, + (void *)((ulong)ndlp->nlp_DID), (uint32)0, (ushort)0, ndlp); + } + else { + /* Need to send PLOGI */ + did = ndlp->nlp_DID; + if(did == 0) { + did = ndlp->nlp_oldDID; + } + if(!(ndlp->nlp_flag & NLP_NS_REMOVED)) { + fc_els_cmd(binfo, ELS_CMD_PLOGI, + (void *)((ulong)did), (uint32)0, (ushort)0, ndlp); + } + } + return(ENODEV); + } + + inqcmnd = (FCP_CMND *)fcpmp->dfc.virt; + lun = ((inqcmnd->fcpLunMsl >> FC_LUN_SHIFT) & 0xff); + + map_node_ptr = 0; + map_dev_ptr = 0; + + if (ndlp->nlp_type & NLP_SEED_MASK) { + /* If this is a mapped target, check qdepth limits */ + i = INDEX(ndlp->id.nlp_pan, ndlp->id.nlp_sid); + if ((map_node_ptr = binfo->device_queue_hash[i].node_ptr) != NULL) { + + if (map_node_ptr->tgt_queue_depth && + (map_node_ptr->tgt_queue_depth == map_node_ptr->num_active_io)) + return(ENODEV); + + if ((map_dev_ptr = fc_find_lun(binfo, i, lun))) { + if ((map_dev_ptr->active_io_count >= map_dev_ptr->fcp_cur_queue_depth) || + (map_dev_ptr->stop_send_io)) + return(ENODEV); + } + } + } + + rp = &binfo->fc_ring[FC_FCP_RING]; + if ((temp = (IOCBQ * )fc_mem_get(binfo, MEM_IOCB)) == NULL) { + return(EACCES); + } + + fc_bzero((void *)dev_ptr, sizeof(dev_ptr)); + dev_ptr->lun_id = lun; + dev_ptr->opened = TRUE; + dev_ptr->fcp_lun_queue_depth = 1; + dev_ptr->fcp_cur_queue_depth = 1; + dev_ptr->queue_state = ACTIVE_PASSTHRU; + dev_ptr->pend_head = (T_SCSIBUF *)map_node_ptr; + dev_ptr->pend_tail = (T_SCSIBUF *)map_dev_ptr; + + fcptr = (fc_buf_t *)fcpmp->dfc.virt; + fcptr->dev_ptr = dev_ptr; + fcptr->phys_adr = (char *)fcpmp->dfc.phys; + fcptr->sc_bufp = (T_SCSIBUF *)omatp; + fcptr->flags = 0; + /* set up an iotag so we can match the completion iocb */ + for (i = 0; i < MAX_FCP_CMDS; i++) { + fcptr->iotag = rp->fc_iotag++; + if (rp->fc_iotag >= MAX_FCP_CMDS) + rp->fc_iotag = 1; + if (binfo->fc_table->fcp_array[fcptr->iotag] == 0) + break; + } + if (i >= MAX_FCP_CMDS) { + /* No more command slots available, retry later */ + fc_mem_put(binfo, MEM_IOCB, (uchar * )temp); + return(EACCES); + } + + fc_bzero((void *)temp, sizeof(IOCBQ)); + cmd = &temp->iocb; + + bpl = (ULP_BDE64 * )bmp->virt; + + cmd->un.fcpi64.bdl.ulpIoTag32 = (uint32)0; + cmd->un.fcpi64.bdl.addrHigh = (uint32)putPaddrHigh(bmp->phys); + cmd->un.fcpi64.bdl.addrLow = (uint32)putPaddrLow(bmp->phys); + cmd->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDL; + cmd->ulpBdeCount = 1; + fcptr->bmp = bmp; + temp->bpl = (uchar *)0; + + cmd->ulpContext = ndlp->nlp_Rpi; + cmd->ulpIoTag = fcptr->iotag; + /* + * if device is FCP-2 device, set the following bit that says + * to run the FC-TAPE protocol. + */ + if (ndlp->id.nlp_fcp_info & NLP_FCP_2_DEVICE) { + cmd->ulpFCP2Rcvy = 1; + } + cmd->ulpClass = (ndlp->id.nlp_fcp_info & 0x0f); + cmd->ulpOwner = OWN_CHIP; + + /* Hardcode 30 second timeout for I/O to complete */ + curtime(&fcptr->timeout); + cmd->ulpRsvdByte = fc_inq_sn_tmo; + fcptr->timeout = ((ulong)fcptr->timeout + (31 * fc_ticks_per_second)); + + switch(fcptr->fcp_cmd.fcpCntl3) { + case READ_DATA: + /* Set up for SCSI read */ + cmd->ulpCommand = CMD_FCP_IREAD64_CR; + cmd->ulpPU = PARM_READ_CHECK; + cmd->un.fcpi.fcpi_parm = count; + cmd->un.fcpi64.bdl.bdeSize = ((omatp->dfc_flag+2) * sizeof(ULP_BDE64)); + cmd->ulpBdeCount = 1; + break; + + case WRITE_DATA: + /* Set up for SCSI write */ + cmd->ulpCommand = CMD_FCP_IWRITE64_CR; + cmd->un.fcpi64.bdl.bdeSize = ((omatp->dfc_flag+2) * sizeof(ULP_BDE64)); + cmd->ulpBdeCount = 1; + break; + default: + /* Set up for SCSI command */ + cmd->ulpCommand = CMD_FCP_ICMND64_CR; + cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(ULP_BDE64)); + cmd->ulpBdeCount = 1; + break; + } + + cmd->ulpLe = 1; + /* Queue cmd chain to last iocb entry in xmit queue */ + if (rp->fc_tx.q_first == NULL) { + rp->fc_tx.q_first = (uchar * )temp; + } else { + ((IOCBQ * )(rp->fc_tx.q_last))->q = (uchar * )temp; + } + rp->fc_tx.q_last = (uchar * )temp; + rp->fc_tx.q_cnt++; + + fc_enq_fcbuf_active(rp, fcptr); + + if(map_dev_ptr) + map_dev_ptr->active_io_count++; + if(map_node_ptr) + map_node_ptr->num_active_io++; + dev_ptr->active_io_count++; + FCSTATCTR.fcpCmd++; + + issue_iocb_cmd(binfo, rp, 0); + return(0); +} + + +/****************************************************************************** +* Function name : fc_parse_binding_entry +* +* Description : Parse binding entry for WWNN & WWPN +* +* ASCII Input string example: 2000123456789abc:lpfc1t0 +* +* Return : 0 = Success +* Greater than 0 = Binding entry syntax error. SEE defs +* FC_SYNTAX_ERR_XXXXXX. +******************************************************************************/ +_static_ int +fc_parse_binding_entry( fc_dev_ctl_t *p_dev_ctl, + uchar *inbuf, uchar *outbuf, + int in_size, int out_size, + int bind_type, + unsigned int *sum, int entry, int *lpfc_num) +{ + int brd; + int c1, cvert_cnt, sumtmp; + + FC_BRD_INFO * binfo = &BINFO; + + char ds_lpfc[] = "lpfc"; + + *lpfc_num = -1; + + /* Parse 16 digit ASC hex address */ + if( bind_type == FC_BIND_DID) outbuf++; + cvert_cnt = fc_asc_seq_to_hex( p_dev_ctl, in_size, out_size, (char *)inbuf, (char *)outbuf); + if(cvert_cnt < 0) + return(FC_SYNTAX_ERR_ASC_CONVERT); + inbuf += (ulong)cvert_cnt; + + /* Parse colon */ + if(*inbuf++ != ':') + return(FC_SYNTAX_ERR_EXP_COLON); + + /* Parse lpfc */ + if(fc_strncmp( (char *)inbuf, ds_lpfc, (sizeof(ds_lpfc)-1))) + return(FC_SYNTAX_ERR_EXP_LPFC); + inbuf += sizeof(ds_lpfc)-1; + + /* Parse lpfc number */ + /* Get 1st lpfc digit */ + c1 = *inbuf++; + if(fc_is_digit(c1) == 0) + goto err_lpfc_num; + sumtmp = c1 - 0x30; + + /* Get 2nd lpfc digit */ + c1 = *inbuf; + if(fc_is_digit(c1) == 0) + goto convert_instance; + inbuf++; + sumtmp = (sumtmp * 10) + c1 - 0x30; + if((sumtmp < 0) || (sumtmp > 15)) + goto err_lpfc_num; + goto convert_instance; + +err_lpfc_num: + + return(FC_SYNTAX_ERR_INV_LPFC_NUM); + + /* Convert from ddi instance number to adapter number */ +convert_instance: + + for(brd = 0; brd < MAX_FC_BRDS; brd++) { + if(fcinstance[brd] == sumtmp) + break; + } + if(binfo->fc_brd_no != brd) { + /* Skip this entry */ + return(FC_SYNTAX_OK_BUT_NOT_THIS_BRD); + } + + + /* Parse 't' */ + if(*inbuf++ != 't') + return(FC_SYNTAX_ERR_EXP_T); + + /* Parse target number */ + /* Get 1st target digit */ + c1 = *inbuf++; + if(fc_is_digit(c1) == 0) + goto err_target_num; + sumtmp = c1 - 0x30; + + /* Get 2nd target digit */ + c1 = *inbuf; + if(fc_is_digit(c1) == 0) + goto check_for_term; + inbuf++; + sumtmp = (sumtmp * 10) + c1 - 0x30; + + /* Get 3nd target digit */ + c1 = *inbuf; + if(fc_is_digit(c1) == 0) + goto check_for_term; + inbuf++; + sumtmp = (sumtmp * 10) + c1 - 0x30; + if((sumtmp < 0) || (sumtmp > 999)) + goto err_target_num; + goto check_for_term; + +err_target_num: + return(FC_SYNTAX_ERR_INV_TARGET_NUM); + + /* Test that input string in NULL terminated - End of input */ +check_for_term: + + if(*inbuf != 0) + return(FC_SYNTAX_ERR_EXP_NULL_TERM); + + + *sum = sumtmp; + return(FC_SYNTAX_OK); /* Success */ +} /* fc_parse_binding_entry */ + +void +issue_report_lun( +fc_dev_ctl_t *pd, +void *l1, +void *l2) +{ + FC_BRD_INFO * binfo = &pd->info; + dvi_t * di = (dvi_t *)l1; + RING * rp; + fc_buf_t * fcptr; + IOCBQ * temp; + IOCB * cmd; + ULP_BDE64 * bpl; + MATCHMAP * bmp; + MBUF_INFO * mbufp; + node_t * nodep; + int i, tmo; + + rp = &binfo->fc_ring[FC_FCP_RING]; + nodep = di->nodep; + + mbufp = (MBUF_INFO * )fc_mem_get(binfo, MEM_IOCB); + if (mbufp == NULL) { + nodep->rptlunstate = REPORT_LUN_COMPLETE; + return; + } + mbufp->virt = 0; + mbufp->phys = 0; + mbufp->flags = FC_MBUF_DMA; + mbufp->align = (int)4096; + mbufp->size = 4096; + + if (nodep->virtRptLunData == 0) { + fc_malloc(pd, mbufp); + if (mbufp->phys == NULL) { + fc_mem_put(binfo, MEM_IOCB, (uchar * )mbufp); + nodep->rptlunstate = REPORT_LUN_COMPLETE; + return; + } + } else { + mbufp->phys = nodep->physRptLunData; + mbufp->virt = nodep->virtRptLunData; + } + + if ((fcptr = fc_deq_fcbuf(di)) == NULL) { + if (nodep->virtRptLunData == 0) + fc_free(pd, mbufp); + fc_mem_put(binfo, MEM_IOCB, (uchar * )mbufp); + nodep->rptlunstate = REPORT_LUN_COMPLETE; + return; + } + + if ((temp = (IOCBQ * )fc_mem_get(binfo, MEM_IOCB)) == NULL) { + if (nodep->virtRptLunData == 0) + fc_free(pd, mbufp); + fc_mem_put(binfo, MEM_IOCB, (uchar * )mbufp); + fc_enq_fcbuf(fcptr); + nodep->rptlunstate = REPORT_LUN_COMPLETE; + return; + } + + fc_bzero((void *)fcptr, sizeof(FCP_CMND) + sizeof(FCP_RSP)); + + /* + * Save the MBUF pointer. + * Buffer will be freed by handle_fcp_event(). + */ + fcptr->sc_bufp = (void *)mbufp; + + /* + * Setup SCSI command block in FCP payload + */ + fcptr->fcp_cmd.fcpCdb[0]= 0xA0; /* SCSI Report Lun Command */ + + fcptr->fcp_cmd.fcpCdb[8]= 0x10; + fcptr->fcp_cmd.fcpCntl3 = READ_DATA; + fcptr->fcp_cmd.fcpDl = SWAP_DATA(RPTLUN_MIN_LEN); + + /* + * set up an iotag so we can match the completion iocb + */ + for (i = 0; i < MAX_FCP_CMDS; i++) { + fcptr->iotag = rp->fc_iotag++; + if (rp->fc_iotag >= MAX_FCP_CMDS) + rp->fc_iotag = 1; + if (binfo->fc_table->fcp_array[fcptr->iotag] == 0) + break; + } + if (i >= MAX_FCP_CMDS) { + /* + * No more command slots available + */ + if (nodep->virtRptLunData == 0) + fc_free(pd, mbufp); + fc_mem_put(binfo, MEM_IOCB, (uchar * )mbufp); + fc_mem_put(binfo, MEM_IOCB, (uchar * )temp); + fc_enq_fcbuf(fcptr); + nodep->rptlunstate = REPORT_LUN_COMPLETE; + return; + } + + fc_bzero((void *)temp, sizeof(IOCBQ)); + cmd = &temp->iocb; + temp->q = NULL; + + /* + * Allocate buffer for Buffer ptr list + */ + if ((bmp = (MATCHMAP * )fc_mem_get(binfo, MEM_BPL)) == 0) { + if (nodep->virtRptLunData == 0) + fc_free(pd, mbufp); + fc_mem_put(binfo, MEM_IOCB, (uchar * )mbufp); + fc_mem_put(binfo, MEM_IOCB, (uchar * )temp); + fc_enq_fcbuf(fcptr); + nodep->rptlunstate = REPORT_LUN_COMPLETE; + return; + } + + bpl = (ULP_BDE64 * )bmp->virt; + bpl->addrHigh = PCIMEM_LONG((uint32)putPaddrHigh(GET_PAYLOAD_PHYS_ADDR(fcptr))); + bpl->addrLow = PCIMEM_LONG((uint32)putPaddrLow(GET_PAYLOAD_PHYS_ADDR(fcptr))); + bpl->tus.f.bdeSize = sizeof(FCP_CMND); + bpl->tus.f.bdeFlags = BUFF_USE_CMND; + bpl->tus.w = PCIMEM_LONG(bpl->tus.w); + bpl++; + bpl->addrHigh = PCIMEM_LONG((uint32)putPaddrHigh(GET_PAYLOAD_PHYS_ADDR(fcptr)+sizeof(FCP_CMND))); + bpl->addrLow = PCIMEM_LONG((uint32)putPaddrLow(GET_PAYLOAD_PHYS_ADDR(fcptr)+sizeof(FCP_CMND))); + bpl->tus.f.bdeSize = sizeof(FCP_RSP); + bpl->tus.f.bdeFlags = (BUFF_USE_CMND | BUFF_USE_RCV); + bpl->tus.w = PCIMEM_LONG(bpl->tus.w); + bpl++; + + cmd->un.fcpi64.bdl.ulpIoTag32 = (uint32)0; + cmd->un.fcpi64.bdl.addrHigh = (uint32)putPaddrHigh(bmp->phys); + cmd->un.fcpi64.bdl.addrLow = (uint32)putPaddrLow(bmp->phys); + cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(ULP_BDE64)); + cmd->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDL; + cmd->ulpBdeCount = 1; + fcptr->bmp = bmp; + temp->bpl = (uchar *)0; + + cmd->ulpContext = nodep->rpi; + cmd->ulpIoTag = fcptr->iotag; + + /* + * if device is FCP-2 device, set the following bit that says + * to run the FC-TAPE protocol. + */ + if (nodep->nlp->id.nlp_fcp_info & NLP_FCP_2_DEVICE) { + cmd->ulpFCP2Rcvy = 1; + } + cmd->ulpClass = (nodep->nlp->id.nlp_fcp_info & 0x0f); + cmd->ulpOwner = OWN_CHIP; + + /* + * Hardcode 2*RATOV second timeout for I/O to complete + */ + tmo = (2 * binfo->fc_ratov); + curtime(&fcptr->timeout); + cmd->ulpRsvdByte = tmo; + tmo++; /* Make scsi timeout longer then cmd tmo */ + fcptr->timeout = ((ulong)fcptr->timeout + (tmo * fc_ticks_per_second)); + + /* + * Read Data + */ + cmd->ulpCommand = CMD_FCP_IREAD64_CR; + cmd->ulpPU = PARM_READ_CHECK; + cmd->un.fcpi.fcpi_parm = RPTLUN_MIN_LEN; + + bpl->addrHigh = PCIMEM_LONG((uint32)putPaddrHigh(mbufp->phys)); + bpl->addrLow = PCIMEM_LONG((uint32)putPaddrLow(mbufp->phys)); + bpl->tus.f.bdeSize = RPTLUN_MIN_LEN; + bpl->tus.f.bdeFlags = BUFF_USE_RCV; + bpl->tus.w = PCIMEM_LONG(bpl->tus.w); + bpl++; + + cmd->un.fcpi64.bdl.bdeSize += sizeof(ULP_BDE64); + cmd->ulpBdeCount = 1; + + cmd->ulpLe = 1; + + /* + * Queue cmd chain to last iocb entry in xmit queue + */ + if (rp->fc_tx.q_first == NULL) { + rp->fc_tx.q_first = (uchar * )temp; + } else { + ((IOCBQ * )(rp->fc_tx.q_last))->q = (uchar * )temp; + } + rp->fc_tx.q_last = (uchar * )temp; + rp->fc_tx.q_cnt++; + + fc_enq_fcbuf_active(rp, fcptr); + fcptr->flags |= FCBUF_INTERNAL; + + di->active_io_count++; + nodep->num_active_io++; + FCSTATCTR.fcpCmd++; + + issue_iocb_cmd(binfo, rp, 0); + return; +} +/****************************************/ +/* Print Format Declarations Start Here */ +/****************************************/ +_local_ int fc_sprintf_fargs( uchar *string, void *control, char *fixArgs); + +#define LENGTH_LINE 71 +#define MAX_IO_SIZE 32 * 2 /* iobuf cache size */ +#define MAX_TBUFF 18 * 2 /* temp buffer size */ + +typedef union { /* Pointer to table of arguments. */ + ulong *ip; + ulong *lip; + ulong *uip; + ulong *luip; + ulong **luipp; + uchar *cp; + uchar **csp; +} ARGLIST; + +typedef struct { + uchar *string; + long index; + int count; + uchar buf[MAX_IO_SIZE + MAX_TBUFF]; /* extra room to convert numbers */ +} PRINTBLK; + +/* + * ASCII string declarations + */ +static char dig[] = {"0123456789ABCDEF"}; +static char ds_disabled[] = "disabled"; +static char ds_enabled[] = "enabled"; +static char ds_none[] = "none"; +static char ds_null_string[] = ""; +static char ds_unknown[] = "unknown"; + +/* + * Function Declarations + */ +_local_ int add_char( PRINTBLK * io, uchar ch); +_local_ int add_string( PRINTBLK * io, uchar * string); +_local_ int fmtout( uchar *ostr, uchar *control, va_list inarg); +_local_ void print_string( PRINTBLK * io); +_local_ int long_itos( long val, uchar * cp, long base); + + +/**********************************************************/ +/** expanded_len */ +/** determine the length of the string after expanding */ +/**********************************************************/ +_local_ +int expanded_len( +uchar *sp) +{ + register int i; + uchar c; + + i = 0; + while ((c = *sp++) != 0) { + if (c < 0x1b) { + if ((c == '\r') || (c == '\n')) + break; /* stop at cr or lf */ + i++; /* double it */ + } + i++; + } + return (i); +} /* expanded_len */ + +/*************************************************/ +/** long_itos **/ +/** Convert long integer to decimal string. **/ +/** Returns the string length. **/ +/*************************************************/ +_local_ +int long_itos( +long val, /* Number to convert. */ +uchar * cp, /* Store the string here. */ +long base) /* Conversion base. */ +{ + uchar tempc[16]; + uchar *tcp; + int n=0; /* number of characters in result */ + ulong uval; /* unsigned value */ + + *(tcp=(tempc+15))=0; + if (base<0) { + /* needs signed conversion */ + base= -base; + if (val<0) { + n=1; + val = -val; + } + do { + *(--tcp)=dig[ (int)(val%base)]; + val /= base; + } while (val); + } + else { + uval=val; + do { + *(--tcp)=dig[ (int)(uval%base)]; + uval/=base; + } while(uval); + } + if (n) + *(--tcp)='-'; + n=(int)((long)&tempc[15] - (long)tcp); + fc_bcopy( tcp, cp, n+1); /* from, to, cnt */ + return(n); +} /* long_itos */ + +/****************************************/ +/** add_char **/ +/****************************************/ +_local_ +int add_char( +PRINTBLK * io, +uchar ch) +{ + int index; + + if (ch < 0x1b) { + switch (ch) { + case 0xd: /* carriage return */ + io->count = -1; /* will be incremented to 0, below */ + break; + case 0x8: /* back space */ + io->count -= 2; /* will be incremented to 1 less, below */ + break; + case 0xa: /* line feed */ + case 0x7: /* bell */ + case 0x9: /* hortizontal tab */ + case 0xe: /* shift out */ + case 0xf: /* shift in */ + io->count--; /* will be incremented to same, below */ + break; + default: + add_char(io, '^'); + ch |= 0x40; + break; + } + } + io->count++; + if (io->string != NULL) { + *io->string = ch; + *++io->string = '\0'; + return (0); + } + + index = io->index; + if( index < (MAX_IO_SIZE + MAX_TBUFF -2)) { + io->buf[index] = ch; + io->buf[++index] = '\0'; + } + return (++io->index); +} /* add_char */ + +/****************************************/ +/** add_string **/ +/****************************************/ +_local_ +int add_string( +PRINTBLK * io, +uchar * string) +{ + if (io->string != NULL) { + io->string = + (uchar *)fc_strcpy( (char *)io->string, (char *)string); /* dst, src */ + return (0); + } + return (io->index = ((long)(fc_strcpy( (char *)&io->buf[io->index], + (char *)string))) - ((long)((char *)io->buf))); /* dst, src */ +} /* add_string */ + +/*****************************************************/ +/** print_string **/ +/** takes print defn, prints data, zeroes index **/ +/*****************************************************/ +_local_ +void print_string( +PRINTBLK * io) +{ + io->index = 0; + fc_print( (char *)&io->buf[0],0,0); +} /* print_string */ + +/*VARARGS*/ +/*****************************************/ +/** fmtout **/ +/** Low-level string print routines. **/ +/*****************************************/ +_local_ +int fmtout ( +uchar *ostr, /* Output buffer, or NULL if temp */ +uchar *control, /* Control string */ +va_list inarg) /* Argument list */ +{ + short temp; /* Output channel number if string NULL. */ + int leftadj; /* Negative pararameter width specified. */ + int longflag; /* Integer is long. */ + int box = FALSE; /* not from body */ + int chr; /* control string character */ + uchar padchar; /* Pad character, typically space. */ + int width; /* Width of subfield. */ + int length; /* Length of subfield. */ + uchar *altctl; /* temp control string */ + ARGLIST altarg; + ARGLIST arg; + PRINTBLK io; + + union { /* Accumulate parameter value here. */ + uint16 tlong; + uint16 tulong; + long ltlong; + ulong ltulong; + uchar str[4]; + uint16 twds[2]; + } lw; + + union { /* Used by case %c */ + int intchar; + uchar chr[2]; + } ichar; + + arg.uip = (ulong *)inarg; + io.index = 0; + io.count = 0; + + if( (io.string = ostr) != (uchar *)NULL) + *ostr = 0; /* initialize output string to null */ + control--; + + mainloop: + altctl = NULL; + + while ((length = *++control) != 0) + { /* while more in control string */ + if (length !='%') { /* format control */ + if ((length == '\n') && box) { + fc_print( (char *)&io.buf[0],0,0); + continue; + } + if (add_char( &io, (uchar) length) >= MAX_IO_SIZE) + print_string(&io); /* write it */ + continue; + } + leftadj = (*++control == '-'); + if (leftadj) + ++control; + padchar = ' '; + width = 0; + if ((uint16)(length = (*control - '0')) <= 9) { + if (length == 0) + padchar = '0'; + width = length; + while ((uint16)(length = (*++control - '0')) <= 9 ) + width = width*10+length; + } + longflag = ( *control == 'l'); + if ( longflag) + ++control; + + chr = (int)(*control); + if( chr != 'E') { + chr |= 0x20; + } + + switch (chr) { + case 'a': + longflag = 1; + temp=16; + padchar = '0'; + length = width = 8; + goto nosign; + case 'b': + temp=2; + goto nosign; + case 'o': + temp=8; + goto nosign; + case 'u': + temp=10; + goto nosign; + case 'x': + temp=16; + goto nosign; + + case 'e': + ostr = (uchar *)va_arg(inarg, char *); + if ((chr == 'e') && + ((*(long *)ostr) == (long)NULL) && + ((*(uint16 *)&ostr[4]) == (uint16)0)) { + ostr = (uchar *)ds_unknown; + length = 7; + break; + } + temp = -1; + length = MAX_IO_SIZE -1; + fc_strcpy((char *)&io.buf[MAX_IO_SIZE], + "00-00-00-00-00-00"); /* dst, src */ + do { + long_itos((long)( ostr[++temp] + 256), lw.str, 16); + io.buf[++length] = lw.str[1]; + io.buf[++length] = lw.str[2]; + } while (++length < MAX_IO_SIZE+17); + ostr = &io.buf[MAX_IO_SIZE]; + length = 17; + break; + + case 'E': + ostr = (uchar *)va_arg(inarg, char *); + if ((chr == 'E') && + ((*(long *)ostr) == (long)NULL) && + ((*(long *)&ostr[4]) == (long)NULL)) { + ostr = (uchar *)ds_unknown; + length = 7; + break; + } + temp = -1; + length = MAX_IO_SIZE -1; + fc_strcpy( (char *)&io.buf[MAX_IO_SIZE], + "00-00-00-00-00-00-00-00"); /* dst, src */ + do { + long_itos((long)( ostr[++temp] + 256), lw.str, 16); + io.buf[++length] = lw.str[1]; + io.buf[++length] = lw.str[2]; + } while (++length < MAX_IO_SIZE+23); + ostr = &io.buf[MAX_IO_SIZE]; + length = 23; + break; + + case 'f': /* flags */ + ostr = (uchar *)ds_disabled; + length = 8; + if (va_arg(inarg, char *) != 0) { /* test value */ + ostr = (uchar *)ds_enabled; + length = 7; + } + if (chr == 'F') { + length -= 7; + ostr = (uchar *)"-"; + } + break; + + case 'i': + ostr = (uchar *)va_arg(inarg, char *); + if ((chr == 'i') && *(long *)ostr == (long)NULL) + goto putnone; + temp = 0; + length = MAX_IO_SIZE; + do { + length += long_itos((long) ostr[temp], &io.buf[length], 10); + if ( ++temp >= 4) + break; + io.buf[length] = '.'; + length++; + } while (TRUE); + ostr = &io.buf[MAX_IO_SIZE]; + length -= MAX_IO_SIZE; + break; + + case 'y': /* flags */ + if ( va_arg(inarg, char *) != 0) { /* test value */ + ostr = (uchar*)"yes"; + length = 3; + } + else { + ostr = (uchar*)"no"; + length = 2; + } + break; + + case 'c': + if (chr == 'C') { /* normal, control, or none */ + if ((length = va_arg(inarg, int)) < ' ') { + if (length == 0) { + ostr = (uchar *)ds_none; + length = 4; + } + else { + io.buf[MAX_IO_SIZE] = '^'; + io.buf[MAX_IO_SIZE+1] = ((uchar)length) + '@'; + io.buf[MAX_IO_SIZE+2] = 0; + ostr = &io.buf[MAX_IO_SIZE]; + length = 2; + } + arg.ip++; + break; + } + } /* normal, control, or none */ + + ichar.intchar = va_arg(inarg, int); + ostr = &ichar.chr[0]; + length=1; + break; + + case 'd': + temp = -10; + nosign: + if (longflag) + lw.ltulong = va_arg(inarg, ulong); + else if (temp < 0) + lw.ltlong = va_arg(inarg, long); + else + lw.ltulong = va_arg(inarg, ulong); +/* + nosign2: +*/ + length = long_itos( lw.ltlong, ostr = &io.buf[MAX_IO_SIZE], temp); + break; + + case 's': + ostr = (uchar *)va_arg(inarg, char *); /* string */ + if ((chr == 's') || (*ostr != '\0')) { + length = expanded_len(ostr); + break; + } + putnone: + ostr = (uchar *)ds_none; + length = 4; + break; + + case 't': /* tabbing */ + if ((width -= io.count) < 0) /* Spaces required to get to column. */ + width = 0; + length = 0; /* nothing other than width padding. */ + ostr = (uchar *)ds_null_string; + break; + case ' ': + width = va_arg(inarg, int); + length = 0; /* nothing other than width padding. */ + ostr = (uchar *)ds_null_string; + break; + + default: + ostr=control; + length=1; + break; + } /* switch on control */ + + if (length < 0) { /* non printing */ + if (add_string( &io, ostr) >= MAX_IO_SIZE) + print_string(&io); /* no more room, dump current buffer */ + continue; + } /* non printing */ + + + if (!leftadj && width > length) { + while (--width >= length) { + if (add_char( &io, padchar) >= MAX_IO_SIZE) + print_string(&io); /* write it */ + } + } + + if (width>length) + width -= length; + else + width = 0; + + if (length <= 1) { + if (length == 1) { + if (add_char( &io, *ostr) >= MAX_IO_SIZE) + print_string(&io); /* write it */ + } + } + else { + while ((temp = *ostr++) != 0) { + if (add_char( &io, (uchar) temp) >= MAX_IO_SIZE) + print_string(&io); /* write it */ + } + } + + while (--width >= 0) { + if (add_char( &io, padchar) >= MAX_IO_SIZE) + print_string(&io); /* write it */ + } + + } /* while more in control string */ + + if (altctl != NULL) { + control = altctl; + arg.ip = altarg.ip; + goto mainloop; + } + + if (io.index) /* anything left? */ + print_string(&io); /* write it */ + + return(io.count); +} /* fmtout */ +/*FIXARGS*/ +_local_ int +fc_sprintf_fargs( +uchar *string, /* output buffer */ +void *control, /* format string */ +char *fixArgs) /* control arguments */ +{ + return( fmtout((uchar *)string, (uchar *)control, fixArgs)); +} /* fc_sprintf_fargs */ +/*VARARGS*/ +int fc_sprintf_vargs( + void *string, /* output buffer */ + void *control, /* format string */ + ...) /* control arguments */ +{ + int iocnt; + va_list args; + va_start(args, control); + + iocnt = fmtout((uchar *)string, (uchar *)control, args); + va_end( args); + return( iocnt); +} /* fc_sprintf_vargs */ +/****************************************/ +/** fc_log_printf_msg_vargs **/ +/****************************************/ +/* +All log messages come through this routine. +All log messages are unique. +All log messages are define by a msgLogDef messages structure. +*/ +/*VARARGS*/ +_static_ int +fc_log_printf_msg_vargs( + int brdno, + msgLogDef * msg, /* Pionter to LOG msg structure */ + void * control, + ...) +{ + uchar str2[MAX_IO_SIZE + MAX_TBUFF]; /* extra room to convert numbers */ + int iocnt; + int log_only; + va_list args; + va_start(args, control); + + log_only = 0; + if( fc_check_this_log_msg_disabled( brdno, msg, &log_only)) + return(0); /* This LOG message disabled */ + + /* + If LOG message is disabled via any SW method, we SHOULD NOT get this far! + We should have taken the above return. + */ + + str2[0] = '\0'; + iocnt = fc_sprintf_fargs(str2, control, args); + va_end( args); + + return( log_printf_msgblk( brdno, msg, (char *)str2, log_only)); +} /* fc_log_printf_msg_vargs */ + +/*****************************************************/ +/** Function name : fc_check_this_log_msg_disabled **/ +/** **/ +/** Description : **/ +/** **/ +/** Return : 0 LOG message enabled **/ +/** : 1 LOG message disabled **/ +/*****************************************************/ +int fc_check_this_log_msg_disabled( int brdno, + msgLogDef *msg, + int *log_only) +{ + fc_dev_ctl_t * p_dev_ctl; + iCfgParam * clp; + int verbose; + + verbose = 0; + if( msg->msgOutput == FC_MSG_OPUT_DISA) + return(1); /* This LOG message disabled */ + + if ((p_dev_ctl = DD_CTL.p_dev[brdno])) { + clp = DD_CTL.p_config[brdno]; + if((*log_only = clp[CFG_LOG_ONLY].a_current) > 1) + return(1); /* This LOG message disabled */ + verbose = clp[CFG_LOG_VERBOSE].a_current; + } + + if( msg->msgOutput == FC_MSG_OPUT_FORCE) + return(0); /* This LOG message enabled */ + /* + * If this is a verbose message (INFO or WARN) and we are not in + * verbose mode, return 1. If it is a verbose message and the verbose + * error doesn't match our verbose mask, return 1. + */ + if( (msg->msgType == FC_LOG_MSG_TYPE_INFO) || + (msg->msgType == FC_LOG_MSG_TYPE_WARN)) { + /* LOG msg is INFO or WARN */ + if ((msg->msgMask & verbose) == 0) + return(1); /* This LOG mesaage disabled */ + } + return(0); /* This LOG message enabled */ +} /* fc_check_this_log_msg_disabled */ + +/*************************************************/ +/** fc_asc_to_hex **/ +/** Convert an ASCII hex character to hex. **/ +/** Return Hex value if success **/ +/** -1 if character not ASCII hex **/ +/*************************************************/ + +_forward_ int +fc_asc_to_hex( + uchar c) /* Character to convert */ +{ +if (c >= '0' && c <= '9') + return(c - '0'); +else if (c >= 'A' && c <= 'F') + return(c - 'A'+ 10); +else if (c >= 'a' && c <= 'f') + return(c - 'a'+ 10); +else + return(-1); +} /* fc_asc_to_hex */ + +/***************************************************/ +/** fc_asc_seq_to_hex **/ +/** **/ +/** Convert an ASCII character sequence to a **/ +/** hex number sequence **/ +/** **/ +/** return >0 Success. Return number of ASCII **/ +/** hex characters converted. **/ +/** -1 Input byte count < 1 **/ +/** -2 Input byte count > max **/ +/** -3 Output buffer to small **/ +/** -4 Input character sequence not **/ +/** ASCII hex. **/ +/***************************************************/ + +/* +This routine converts an ASCII char stream of byte into +a stream of hex bytes. The byte order of the input and +output stream are identical. The caller must deal with +SWAPPING bytes if required. + +The maximum number of ASCII hex characters that can be +convert to hex is hard coded by the LOCAL define +MAX_ASC_HEX_CHARS_INPUT. + +Two ASCII hex input characters require 1 byte of output +buffer. + +A NULL terminator at the end of an ASCII hex input string +is not required nor is it counted in the strings byte size. + +To determine the byte size of the output buffer: +(1) Add 1 to input buffer byte size if size is odd. +(2) Output buffer size = input buffer size / 2. + +Therefore an input buffer containing 10 ASC hex chars +requires an output buffer size of 5 bytes. + +An input buffer containing 11 ASC hex chars requires an +output buffer size of 6 bytes. +*/ + +_forward_ int +fc_asc_seq_to_hex( fc_dev_ctl_t *p_dev_ctl, + int input_bc, /* Number of bytes (ASC hex chars) to be converted */ + int output_bc, /* Number of bytes in hex output buffer (modulo INT) */ + char *inp, /* Pointer to ASC hex input character sequence */ + char *outp) /* Pointer to hex output buffer */ +{ +#define HEX_DIGITS_PER_BYTE 2 +#define MAX_ASC_HEX_CHARS_INPUT 32 /* Limit damage if over-write */ +#define MAX_BUF_SIZE_HEX_OUTPUT (MAX_ASC_HEX_CHARS_INPUT / HEX_DIGITS_PER_BYTE) + + FC_BRD_INFO *binfo; + int lowNib, hiNib; + int inputCharsConverted; + uchar twoHexDig; + + binfo = &BINFO; + inputCharsConverted = 0; + lowNib = -1; + hiNib = -1; + + if(input_bc < 1) { + /* Convert ASC to hex. Input byte cnt < 1. */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk1210, /* ptr to msg structure */ + fc_mes1210, /* ptr to msg */ + fc_msgBlk1210.msgPreambleStr); /* begin & end varargs */ + return(-1); + } + if(input_bc > MAX_ASC_HEX_CHARS_INPUT) { + /* Convert ASC to hex. Input byte cnt > max */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk1211, /* ptr to msg structure */ + fc_mes1211, /* ptr to msg */ + fc_msgBlk1211.msgPreambleStr, /* begin varargs */ + MAX_ASC_HEX_CHARS_INPUT); /* end varargs */ + return(-2); + } + if((output_bc * 2) < input_bc) { + /* Convert ASC to hex. Output buffer to small. */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk1212, /* ptr to msg structure */ + fc_mes1212, /* ptr to msg */ + fc_msgBlk1212.msgPreambleStr); /* begin & end varargs */ + return(-4); + } + + while( input_bc) { + twoHexDig = 0; + lowNib = -1; + hiNib = fc_asc_to_hex( *inp++); + if( --input_bc > 0) { + lowNib = fc_asc_to_hex( *inp++); + input_bc--; + } + if ((lowNib < 0) || (hiNib < 0)) { + /* Convert ASC to hex. Input char seq not ASC hex. */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk1213, /* ptr to msg structure */ + fc_mes1213, /* ptr to msg */ + fc_msgBlk1213.msgPreambleStr); /* begin & end varargs */ + return( -4); + } + if( lowNib >= 0) { + /* There were 2 digits */ + hiNib <<= 4; + twoHexDig = (hiNib | lowNib); + inputCharsConverted += 2; + } + else { + /* There was a single digit */ + twoHexDig = lowNib; + inputCharsConverted++; + } + *outp++ = twoHexDig; + } /* while */ + return(inputCharsConverted); /* ASC to hex conversion complete. Return # of chars converted */ +} /* fc_asc_seq_to_hex */ + +/********************************************/ +/** fc_is_digit **/ +/** **/ +/** Check if ASCII input value is numeric. **/ +/** **/ +/** Return 0 = input NOT numeric **/ +/** 1 = input IS numeric **/ +/********************************************/ +_forward_ int +fc_is_digit(int chr) +{ + if( (chr >= '0') && (chr <= '9')) + return(1); + return(0); +} /* fc_is_digit */ + diff -purN -X /home/mbligh/.diff.exclude reference/drivers/scsi/lpfc/fcstratb.c current/drivers/scsi/lpfc/fcstratb.c --- reference/drivers/scsi/lpfc/fcstratb.c 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/scsi/lpfc/fcstratb.c 2004-04-09 11:53:04.000000000 -0700 @@ -0,0 +1,2080 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Enterprise Fibre Channel Host Bus Adapters. * + * Refer to the README file included with this package for * + * driver version and adapter support. * + * Copyright (C) 2003 Emulex Corporation. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of the GNU General Public License * + * as published by the Free Software Foundation; either version 2 * + * of the License, or (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details, a copy of which * + * can be found in the file COPYING included with this package. * + *******************************************************************/ + +#include "fc_os.h" + +#include "fc_hw.h" +#include "fc.h" + +#include "fcdiag.h" +#include "fcfgparm.h" +#include "fcmsg.h" +#include "fc_crtn.h" +#include "fc_ertn.h" + +extern fc_dd_ctl_t DD_CTL; +extern iCfgParam icfgparam[]; + + + + +/* Some timers in data structures are stored in seconds, some environments + * timeout functions work in ticks, thus some conversion is required. + * Other externs, as needed for environemt are defined here. + */ +extern uint32 fc_scsi_abort_timeout_ticks; +extern uint32 fc_ticks_per_second; + + + + +/* Routine Declaration - Local */ +_local_ void fc_deq_abort_bdr(dvi_t *dev_ptr); +_local_ void fc_deq_wait(dvi_t *dev_ptr); +/* End Routine Declaration - Local */ + +/* AlpaArray for assignment of scsid for scan-down == 2 */ +_static_ uchar AlpaArray[] = + { + 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6, + 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA, + 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5, + 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, + 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97, + 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79, + 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B, + 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56, + 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A, + 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35, + 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, + 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17, + 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01 + }; + + +_static_ dvi_t * +fc_fcp_abort( + fc_dev_ctl_t * p_dev_ctl, + int flag, + int target, + int lun) +{ + FC_BRD_INFO * binfo; + node_t * node_ptr; + dvi_t * dev_ptr; + dvi_t * xmt_devp, * devp; + RING * rp; + int i; + + binfo = &BINFO; + if(binfo->fc_flag & FC_ESTABLISH_LINK) + return(0); + + rp = &binfo->fc_ring[FC_FCP_RING]; + xmt_devp = 0; + /* Clear the queues for one or more SCSI devices + * flag will indicate perform a Target Reset, Lun Reset, or Abort Task Set + * if target = -1, all targets (bus reset). + * if lun = -1 all luns on the target. + */ + for (i = 0; i < MAX_FC_TARGETS; i++) { + if ((node_ptr = binfo->device_queue_hash[i].node_ptr) != NULL) { + if ((target != -1) && (node_ptr->scsi_id != target)) + continue; + dev_ptr = node_ptr->lunlist; + if((flag == TARGET_RESET) && + (dev_ptr != NULL)) { + if((node_ptr->rpi != 0xFFFE) && (binfo->fc_ffstate == FC_READY)) { + if(dev_ptr->flags & (SCSI_LUN_RESET | SCSI_ABORT_TSET)) { + /* check if we sent abort task set or reset lun */ + for (devp = p_dev_ctl->ABORT_BDR_head; (devp != NULL); + devp = devp->ABORT_BDR_fwd) { + if(devp == dev_ptr) + break; + } + if(devp) { + /* we found devp, its not sent yet, + * so change it to target reset. + */ + dev_ptr->flags &= ~CHK_SCSI_ABDR; + dev_ptr->flags |= SCSI_TARGET_RESET; + } + else { + /* just Q another task mgmt cmd, target reset */ + dev_ptr->flags |= SCSI_TARGET_RESET; + fc_enq_abort_bdr(dev_ptr); + } + xmt_devp = dev_ptr; + } + else if(!(dev_ptr->flags & SCSI_TARGET_RESET)) { + dev_ptr->flags |= SCSI_TARGET_RESET; + fc_enq_abort_bdr(dev_ptr); + fc_issue_cmd(p_dev_ctl); + xmt_devp = dev_ptr; + } + } + } + for (dev_ptr = node_ptr->lunlist; dev_ptr != NULL; + dev_ptr = dev_ptr->next) { + if ((lun != -1) && (dev_ptr->lun_id != lun)) + continue; + + if(flag == TARGET_RESET) { + if((node_ptr->rpi != 0xFFFE) && (binfo->fc_ffstate == FC_READY)) { + dev_ptr->flags |= SCSI_TARGET_RESET; + dev_ptr->queue_state = HALTED; + fc_fail_pendq(dev_ptr, (char) EIO, 0); + } + else { + /* First send ABTS on outstanding I/Os in txp queue */ + fc_abort_fcp_txpq(binfo, dev_ptr); + + fc_fail_pendq(dev_ptr, (char) EIO, 0); + fc_fail_cmd(dev_ptr, (char) EIO, 0); + } + } + + if((flag == LUN_RESET) && + !(dev_ptr->flags & CHK_SCSI_ABDR)) { + + if((node_ptr->rpi != 0xFFFE) && (binfo->fc_ffstate == FC_READY)) { + dev_ptr->flags |= SCSI_LUN_RESET; + fc_enq_abort_bdr(dev_ptr); + fc_issue_cmd(p_dev_ctl); + xmt_devp = dev_ptr; + dev_ptr->queue_state = HALTED; + fc_fail_pendq(dev_ptr, (char) EIO, 0); + } + else { + /* First send ABTS on outstanding I/Os in txp queue */ + fc_abort_fcp_txpq(binfo, dev_ptr); + + fc_fail_pendq(dev_ptr, (char) EIO, 0); + fc_fail_cmd(dev_ptr, (char) EIO, 0); + } + } + + if((flag == ABORT_TASK_SET) && + !(dev_ptr->flags & CHK_SCSI_ABDR)) { + + if((node_ptr->rpi != 0xFFFE) && (binfo->fc_ffstate == FC_READY)) { + dev_ptr->flags |= SCSI_ABORT_TSET; + fc_enq_abort_bdr(dev_ptr); + fc_issue_cmd(p_dev_ctl); + xmt_devp = dev_ptr; + dev_ptr->queue_state = HALTED; + fc_fail_pendq(dev_ptr, (char) EIO, 0); + } + else { + /* First send ABTS on outstanding I/Os in txp queue */ + fc_abort_fcp_txpq(binfo, dev_ptr); + + fc_fail_pendq(dev_ptr, (char) EIO, 0); + fc_fail_cmd(dev_ptr, (char) EIO, 0); + } + } + } + } + } + return(xmt_devp); +} + +_static_ int +issue_abdr( + fc_dev_ctl_t * ap, + dvi_t * dev_ptr, + RING * rp, + fc_lun_t lun) +{ + FC_BRD_INFO * binfo; + iCfgParam * clp; + fc_buf_t * fcptr; + T_SCSIBUF * sbp; + IOCB * cmd; + IOCBQ * temp; + uint32 * lp; + MATCHMAP * bmp; + ULP_BDE64 * bpl; + int i; + + binfo = &ap->info; + if ((fcptr = fc_deq_fcbuf(dev_ptr)) == NULL) { + return(EIO); + } + + if ((temp = (IOCBQ * )fc_mem_get(binfo, MEM_IOCB)) == NULL) { + fc_enq_fcbuf(fcptr); + return(EIO); + } + + { + uint32 did; + uint32 pan; + uint32 sid; + + if ((dev_ptr->nodep) && (dev_ptr->nodep->nlp)) { + did = dev_ptr->nodep->nlp->nlp_DID; + pan = dev_ptr->nodep->nlp->id.nlp_pan; + sid = dev_ptr->nodep->nlp->id.nlp_sid; + } else { + did = 0; + pan = 0; + sid = 0; + } + + if (dev_ptr->flags & SCSI_ABORT_TSET) { + /* Issue Abort Task Set I/O for LUN */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0701, /* ptr to msg structure */ + fc_mes0701, /* ptr to msg */ + fc_msgBlk0701.msgPreambleStr, /* begin varargs */ + (uint32)lun, + did, + FC_SCSID(pan, sid), + dev_ptr->flags); /* end varargs */ + } else if (dev_ptr->flags & SCSI_TARGET_RESET) { + /* Issue Target Reset I/O */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0702, /* ptr to msg structure */ + fc_mes0702, /* ptr to msg */ + fc_msgBlk0702.msgPreambleStr, /* begin varargs */ + (uint32)lun, + did, + FC_SCSID(pan, sid), + dev_ptr->flags); /* end varargs */ + } else if (dev_ptr->flags & SCSI_LUN_RESET) { + /* Issue LUN Reset I/O for LUN */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0703, /* ptr to msg structure */ + fc_mes0703, /* ptr to msg */ + fc_msgBlk0703.msgPreambleStr, /* begin varargs */ + (uint32)lun, + did, + FC_SCSID(pan, sid), + dev_ptr->flags); /* end varargs */ + } + } + + sbp = &dev_ptr->scbuf; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + + fc_bzero((void *)fcptr, sizeof(FCP_CMND) + sizeof(FCP_RSP)); + + /* shift lun id into the right payload byte */ + fcptr->fcp_cmd.fcpLunMsl = lun << FC_LUN_SHIFT; + fcptr->fcp_cmd.fcpLunLsl = 0; + if (dev_ptr->nodep->addr_mode == VOLUME_SET_ADDRESSING) { + fcptr->fcp_cmd.fcpLunMsl |= SWAP_DATA(0x40000000); + } + + fcptr->sc_bufp = sbp; + fcptr->flags = 0; + sbp->bufstruct.b_flags = 0; + sbp->bufstruct.b_error = 0; + + if (dev_ptr->flags & SCSI_ABORT_TSET) { + /* Issue an Abort Task Set task management command */ + fcptr->fcp_cmd.fcpCntl2 = ABORT_TASK_SET; + } else if (dev_ptr->flags & SCSI_TARGET_RESET) { + /* Issue a Target Reset task management command */ + fcptr->fcp_cmd.fcpCntl2 = TARGET_RESET; + } else if (dev_ptr->flags & SCSI_LUN_RESET) { + /* Issue a Lun Reset task management command */ + fcptr->fcp_cmd.fcpCntl2 = LUN_RESET; + } + + /* set up an iotag so we can match the completion iocb */ + for (i = 0; i < MAX_FCP_CMDS; i++) { + fcptr->iotag = rp->fc_iotag++; + if (rp->fc_iotag >= MAX_FCP_CMDS) + rp->fc_iotag = 1; + if (binfo->fc_table->fcp_array[fcptr->iotag] == 0) + break; + } + + if (i >= MAX_FCP_CMDS) { + /* No more command slots available, retry later */ + fc_mem_put(binfo, MEM_IOCB, (uchar * )temp); + fc_enq_fcbuf(fcptr); + return(EIO); + } + + fc_bzero((void *)temp, sizeof(IOCBQ)); /* zero the iocb entry */ + cmd = &temp->iocb; + + if (binfo->fc_flag & FC_SLI2) { + /* Allocate buffer for Buffer ptr list */ + if ((bmp = (MATCHMAP * )fc_mem_get(binfo, MEM_BPL)) == 0) { + fc_mem_put(binfo, MEM_IOCB, (uchar * )temp); + fc_enq_fcbuf(fcptr); + return(EIO); + } + bpl = (ULP_BDE64 * )bmp->virt; + bpl->addrHigh = PCIMEM_LONG((uint32)putPaddrHigh(GET_PAYLOAD_PHYS_ADDR(fcptr))); + bpl->addrLow = PCIMEM_LONG((uint32)putPaddrLow(GET_PAYLOAD_PHYS_ADDR(fcptr))); + bpl->tus.f.bdeSize = sizeof(FCP_CMND); + bpl->tus.f.bdeFlags = BUFF_USE_CMND; + bpl->tus.w = PCIMEM_LONG(bpl->tus.w); + + bpl++; + bpl->addrHigh = PCIMEM_LONG((uint32)putPaddrHigh(GET_PAYLOAD_PHYS_ADDR(fcptr)+sizeof(FCP_CMND))); + bpl->addrLow = PCIMEM_LONG((uint32)putPaddrLow(GET_PAYLOAD_PHYS_ADDR(fcptr)+sizeof(FCP_CMND))); + bpl->tus.f.bdeSize = sizeof(FCP_RSP); + bpl->tus.f.bdeFlags = (BUFF_USE_CMND | BUFF_USE_RCV); + bpl->tus.w = PCIMEM_LONG(bpl->tus.w); + + bpl++; + cmd->un.fcpi64.bdl.ulpIoTag32 = (uint32)0; + cmd->un.fcpi64.bdl.addrHigh = (uint32)putPaddrHigh(bmp->phys); + cmd->un.fcpi64.bdl.addrLow = (uint32)putPaddrLow(bmp->phys); + cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(ULP_BDE64)); + cmd->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDL; + + cmd->ulpCommand = CMD_FCP_ICMND64_CR; + cmd->ulpBdeCount = 1; + fcptr->bmp = bmp; + temp->bpl = (uchar *)0; + } else { + cmd->un.fcpi.fcpi_cmnd.bdeAddress = (uint32)putPaddrLow(GET_PAYLOAD_PHYS_ADDR(fcptr)); + cmd->un.fcpi.fcpi_cmnd.bdeSize = sizeof(FCP_CMND); + cmd->un.fcpi.fcpi_rsp.bdeAddress = (uint32)(putPaddrLow((GET_PAYLOAD_PHYS_ADDR(fcptr) + sizeof(FCP_CMND)))); + cmd->un.fcpi.fcpi_rsp.bdeSize = sizeof(FCP_RSP); + cmd->ulpCommand = CMD_FCP_ICMND_CR; + cmd->ulpBdeCount = 2; + temp->bpl = (uchar *)0; + } + cmd->ulpContext = dev_ptr->nodep->rpi; + cmd->ulpIoTag = fcptr->iotag; + cmd->ulpClass = (dev_ptr->nodep->nlp->id.nlp_fcp_info & 0x0f); + cmd->ulpOwner = OWN_CHIP; + cmd->ulpLe = 1; + + /* Timeout for this command is 30 seconds */ + curtime(&fcptr->timeout); + + + /* Need to set the FCP timeout in the fcptr structure and the IOCB + * for this I/O to get the adapter to run a timer. + */ + { + uint32 time_out; + + time_out = fc_scsi_abort_timeout_ticks; + if (binfo->fc_flag & FC_FABRIC) { + time_out += (fc_ticks_per_second * + (clp[CFG_FCPFABRIC_TMO].a_current + (2 * binfo->fc_ratov))); + } + + fcptr->timeout = ((ulong)fcptr->timeout + time_out + fc_scsi_abort_timeout_ticks); + + /* Set the FCP timeout in the IOCB to get the adapter to run a timer */ + if ((time_out / fc_ticks_per_second) < 256) + cmd->ulpTimeout = time_out / fc_ticks_per_second; + + } + + lp = (uint32 * ) & fcptr->fcp_cmd; + dev_ptr->active_io_count++; + dev_ptr->nodep->num_active_io++; + + /* Queue command to last iocb entry in xmit queue */ + if (rp->fc_tx.q_first == NULL) { + rp->fc_tx.q_first = (uchar * )temp; + } else { + ((IOCBQ * )(rp->fc_tx.q_last))->q = (uchar * )temp; + } + rp->fc_tx.q_last = (uchar * )temp; + rp->fc_tx.q_cnt++; + + fc_enq_fcbuf_active(rp, fcptr); + return (0); +} + + +/************************************************************************/ +/* */ +/* NAME: fc_issue_cmd */ +/* */ +/* FUNCTION: issues a waiting FCP command, or ABORT/BDR command */ +/* */ +/* EXECUTION ENVIRONMENT: */ +/* Called by a process or the interrupt handler */ +/* */ +/* INPUTS: */ +/* ap pointer to the adapter structure */ +/* */ +/* RETURN VALUE DESCRIPTION: none */ +/* */ +/* ERROR DESCRIPTION: none */ +/* */ +/* EXTERNAL PROCEDURES CALLED: */ +/* iodone */ +/************************************************************************/ +_static_ void +fc_issue_cmd( + fc_dev_ctl_t * ap) +{ + dvi_t * dev_ptr, * requeue_ptr; + T_SCSIBUF * sbp; + int rc, requeue, exit; + FC_BRD_INFO * binfo; + RING * rp; + node_t * nodep; + + binfo = &ap->info; + if(binfo->fc_flag & FC_ESTABLISH_LINK) + return; + + rp = &binfo->fc_ring[FC_FCP_RING]; + + /* ALUN */ + /* If the abort/bdr queue is not empty we deal with it first */ + for (dev_ptr = ap->ABORT_BDR_head; (dev_ptr != NULL); + dev_ptr = ap->ABORT_BDR_head) { + + if(dev_ptr->flags & CHK_SCSI_ABDR) { + rc = issue_abdr(ap, dev_ptr, rp, dev_ptr->lun_id); + if (rc != 0) { + break; + } + } + + fc_deq_abort_bdr(dev_ptr); + } + + requeue_ptr = NULL; + exit = 0; + + /* See if there is something on the waiting queue */ + while (((dev_ptr = ap->DEVICE_WAITING_head) != NULL) + && (binfo->fc_ffstate == FC_READY) + && (dev_ptr != requeue_ptr)) { + + nodep = dev_ptr->nodep; + /* Check if a target queue depth is set */ + if (nodep->rptlunstate == REPORT_LUN_ONGOING) { + requeue = 1; + } else if (nodep->tgt_queue_depth && + (nodep->tgt_queue_depth == nodep->num_active_io)) { + if (dev_ptr->nodep->last_dev == NULL) + dev_ptr->nodep->last_dev = dev_ptr; + requeue = 1; + } else if (dev_ptr->flags & (CHK_SCSI_ABDR | SCSI_TQ_HALTED)) { + requeue = 1; + } else { + requeue = 0; + + while ((sbp = dev_ptr->pend_head) != NULL) + { + if ((dev_ptr->active_io_count >= dev_ptr->fcp_cur_queue_depth) || + (dev_ptr->stop_send_io)) { + requeue = 1; + break; + } + if ((rc = issue_fcp_cmd(ap, dev_ptr, sbp, 1))) { + if (rc & FCP_REQUEUE) { + requeue = 1; + break; + } else if (rc & FCP_EXIT) { + exit = 1; + break; + } + continue; + } + dev_ptr->pend_count--; + dev_ptr->pend_head = (T_SCSIBUF *) sbp->bufstruct.av_forw; + if (dev_ptr->pend_head == NULL) + dev_ptr->pend_tail = NULL; + else + dev_ptr->pend_head->bufstruct.av_back = NULL; + + /* Check if a target queue depth is set */ + if (nodep->tgt_queue_depth && + (nodep->tgt_queue_depth == nodep->num_active_io)) { + /* requeue I/O if max cmds to tgt are outstanding */ + if (dev_ptr->nodep->last_dev == NULL) + dev_ptr->nodep->last_dev = dev_ptr; + requeue = 1; + break; + } + } /* while pend_head */ + } + if (exit) + break; + + fc_deq_wait(dev_ptr); + + if (requeue) { + if (requeue_ptr == NULL) + requeue_ptr = dev_ptr; + fc_enq_wait(dev_ptr); + } + + } /* while wait queue */ + + if (rp->fc_tx.q_cnt) { + issue_iocb_cmd(binfo, rp, 0); + /* [SYNC] */ + if (binfo->fc_flag & FC_POLL_MODE) { + fc_polling(binfo, HA_R2ATT); + } + } + + return; + +} /* End fc_issue_cmd */ + + +/**************************************************************************/ +/* */ +/* NAME: fc_enq_fcbuf_active, fc_enq_wait, fc_enq_fcbuf, fc_enq_abort_bdr */ +/* */ +/* FUNCTION: */ +/* Utility routines to handle queuing of device structures to each */ +/* of the queues in use. */ +/* */ +/* EXECUTION ENVIRONMENT: */ +/* */ +/* RETURN VALUE DESCRIPTION: none */ +/* */ +/* ERROR DESCRIPTION: The following errno values may be returned: */ +/* none */ +/* */ +/**************************************************************************/ +_static_ void +fc_enq_fcbuf_active( +RING *rp, /* Pointer to ring for fcbufs */ +fc_buf_t *fcptr) /* Pointer to fcbuf to enqueue */ +{ + FC_BRD_INFO * binfo; + fc_dev_ctl_t *p_dev_ctl; + + binfo = (FC_BRD_INFO * )(rp->fc_binfo); + p_dev_ctl = (fc_dev_ctl_t *)(binfo->fc_p_dev_ctl); + /* Sync the FCP_CMND payload data */ + /* Use correct offset and size for syncing */ + fc_mpdata_sync(fcptr->fc_cmd_dma_handle, (off_t)fcptr->offset, + sizeof(FCP_CMND), DDI_DMA_SYNC_FORDEV); + + /* Enqueue the fcbuf on the FCP ring active queue */ + if (rp->fc_txp.q_first) { + fcptr->fc_bkwd = (fc_buf_t * )rp->fc_txp.q_last; + ((fc_buf_t * )rp->fc_txp.q_last)->fc_fwd = fcptr; + rp->fc_txp.q_last = (uchar * )fcptr; + } else { + rp->fc_txp.q_first = (uchar * )fcptr; + rp->fc_txp.q_last = (uchar * )fcptr; + fcptr->fc_bkwd = NULL; + } + + fcptr->fc_fwd = NULL; + rp->fc_txp.q_cnt++; + if(rp->fc_txp.q_cnt > rp->fc_txp.q_max) { + rp->fc_txp.q_max = rp->fc_txp.q_cnt; + } + binfo->fc_table->fcp_array[fcptr->iotag] = fcptr; +} /* End fc_enq_fcbuf_active */ + + +/* + * Name: fc_enq_wait + * Function: Place dev_ptr on the adapter's wait queue. + * Input: dvi_t *dev_ptr dev_ptr to enqueue. + * Returns: nothing. + */ +_static_ void +fc_enq_wait( +dvi_t *dev_ptr) +{ + fc_dev_ctl_t * ap; + + ap = dev_ptr->nodep->ap; + + /* Queue the dev_ptr if it is not already on the queue */ + if ((dev_ptr->DEVICE_WAITING_fwd == NULL) + && (ap->DEVICE_WAITING_tail != dev_ptr)) { + + if (ap->DEVICE_WAITING_head == NULL) { + ap->DEVICE_WAITING_head = dev_ptr; + } else { + ap->DEVICE_WAITING_tail->DEVICE_WAITING_fwd = dev_ptr; + } + ap->DEVICE_WAITING_tail = dev_ptr; + } +} /* End fc_enq_wait */ + +/* ALUN */ +/* + * Name: fc_enq_fcbuf + * Function: Place fc_buf on the device's free queue. + * Input: fc_buf_t *fcptr fc_buf to enqueue + * Returns: nothing. + */ +_static_ void +fc_enq_fcbuf( +fc_buf_t *fcptr) +{ + dvi_t * dev_ptr; + + dev_ptr = fcptr->dev_ptr; + + if (dev_ptr->fcbuf_head == NULL) { + dev_ptr->fcbuf_head = fcptr; + } else { + dev_ptr->fcbuf_tail->fc_fwd = fcptr; + } + fcptr->fc_fwd = NULL; + dev_ptr->fcbuf_tail = fcptr; + dev_ptr->numfcbufs++; + + if (dev_ptr->numfcbufs == dev_ptr->fcp_lun_queue_depth) { + if (dev_ptr->flags & SCSI_TQ_CLEARING) { + /* Call iodone for all the CLEARQ error bufs */ + fc_free_clearq(dev_ptr); + } + if (dev_ptr->queue_state == STOPPING) { + /* If we are trying to close, check to see if all done */ + } + } + + return; +} /* End fc_enq_fcbuf */ + + +/* + * Name: fc_enq_abort_bdr + * Function: Place dev_ptr on the adapter's Bus Device Reset queue. + * Input: dvi_t *dev_ptr dev_ptr to enqueue. + * Returns: nothing. + */ +_static_ void +fc_enq_abort_bdr( + dvi_t * dev_ptr) +{ + fc_dev_ctl_t * ap; + + ap = dev_ptr->nodep->ap; + + if (ap->ABORT_BDR_head == NULL) { + dev_ptr->ABORT_BDR_fwd = NULL; + dev_ptr->ABORT_BDR_bkwd = NULL; + ap->ABORT_BDR_head = dev_ptr; + ap->ABORT_BDR_tail = dev_ptr; + } else { + dev_ptr->ABORT_BDR_bkwd = ap->ABORT_BDR_tail; + dev_ptr->ABORT_BDR_fwd = NULL; + ap->ABORT_BDR_tail->ABORT_BDR_fwd = dev_ptr; + ap->ABORT_BDR_tail = dev_ptr; + } +} /* End fc_enq_abort_bdr */ + + +/**************************************************************************/ +/* */ +/* NAME: fc_deq_fcbuf_active, fc_deq_wait, fc_deq_fcbuf, fc_deq_abort_bdr */ +/* */ +/* FUNCTION: */ +/* Utility routines to handle dequeueing device structures from */ +/* each of the queues in use. */ +/* */ +/* EXECUTION ENVIRONMENT: */ +/* */ +/* ERROR DESCRIPTION: The following errno values may be returned: */ +/* none */ +/* */ +/**************************************************************************/ +_static_ fc_buf_t * +fc_deq_fcbuf_active( + RING * rp, + ushort iotag) /* tag to match I/O */ +{ + FC_BRD_INFO * binfo; + fc_dev_ctl_t * p_dev_ctl; + fc_buf_t * fcptr = NULL; + + binfo = (FC_BRD_INFO * )(rp->fc_binfo); + p_dev_ctl = (fc_dev_ctl_t *)(binfo->fc_p_dev_ctl); + /* Remove an fcbuf from the FCP ring active queue based on iotag */ + + if ((iotag < MAX_FCP_CMDS) && + (fcptr = binfo->fc_table->fcp_array[iotag])) { + + /* Remove fcbuf from list, adjust first, last and cnt */ + if (fcptr->fc_bkwd) { + fcptr->fc_bkwd->fc_fwd = fcptr->fc_fwd; + } else { + rp->fc_txp.q_first = (uchar * )fcptr->fc_fwd; + } + + if (fcptr->fc_fwd) { + fcptr->fc_fwd->fc_bkwd = fcptr->fc_bkwd; + } else { + rp->fc_txp.q_last = (uchar * )fcptr->fc_bkwd; + } + + rp->fc_txp.q_cnt--; + binfo->fc_table->fcp_array[iotag] = NULL; + } + + if (fcptr) { + if (binfo->fc_flag & FC_SLI2) { + MATCHMAP * next_bmp; + + while(fcptr->bmp) { + next_bmp = (MATCHMAP *)fcptr->bmp->fc_mptr; + fc_mem_put(binfo, MEM_BPL, (uchar *)fcptr->bmp); + fcptr->bmp = next_bmp; + } + } + fcptr->bmp = 0; + + /* Use correct offset and size for syncing */ + fc_mpdata_sync(fcptr->fc_cmd_dma_handle, + (off_t)(fcptr->offset + sizeof(FCP_CMND)), + (u_int) sizeof(FCP_RSP), DDI_DMA_SYNC_FORCPU); + + } + return(fcptr); +} /* End fc_deq_fcbuf_active */ + + +/* + * Name: fc_deq_wait + * Function: Remove a dev_ptr from the adapter's wait queue. + * Input: dvi_t *dev_ptr dev_ptr to be dequeued. + * Returns: nothing. + */ +_local_ void +fc_deq_wait( + dvi_t * dev_ptr) +{ + fc_dev_ctl_t * ap; + dvi_t *prev_ptr; + + if(dev_ptr == NULL) { + return; + } + ap = dev_ptr->nodep->ap; + if(ap->DEVICE_WAITING_head == NULL) { + return; + } + + if(dev_ptr != ap->DEVICE_WAITING_head) { + prev_ptr = ap->DEVICE_WAITING_head; + while(prev_ptr->DEVICE_WAITING_fwd != dev_ptr && + prev_ptr != ap->DEVICE_WAITING_tail) + { + prev_ptr=prev_ptr->DEVICE_WAITING_fwd; + } + if(prev_ptr->DEVICE_WAITING_fwd == dev_ptr) { + prev_ptr->DEVICE_WAITING_fwd = dev_ptr->DEVICE_WAITING_fwd; + if(ap->DEVICE_WAITING_tail == dev_ptr) { + ap->DEVICE_WAITING_tail = prev_ptr; + } + dev_ptr->DEVICE_WAITING_fwd = NULL; + } + return; + } + if (ap->DEVICE_WAITING_head == ap->DEVICE_WAITING_tail) { + ap->DEVICE_WAITING_head = NULL; + ap->DEVICE_WAITING_tail = NULL; + } else { + ap->DEVICE_WAITING_head = dev_ptr->DEVICE_WAITING_fwd; + } + dev_ptr->DEVICE_WAITING_fwd = NULL; + +} /* End fc_deq_wait */ + + +/* + * Name: fc_deq_fcbuf + * Function: Remove an fc_buf from the device's free queue. + * Input: dvi_t *dev_ptr dev_ptr with the free list. + * Returns: pointer to the fc_buf, or NULL if none exist. + */ +_static_ fc_buf_t * +fc_deq_fcbuf( + dvi_t * dev_ptr) +{ + fc_buf_t * fcptr; + + if (dev_ptr->fcbuf_head == NULL) + return(NULL); + + fcptr = dev_ptr->fcbuf_head; + if (dev_ptr->fcbuf_head == dev_ptr->fcbuf_tail) { + dev_ptr->fcbuf_head = NULL; + dev_ptr->fcbuf_tail = NULL; + } else { + dev_ptr->fcbuf_head = fcptr->fc_fwd; + } + dev_ptr->numfcbufs--; + + return(fcptr); + +} /* End fc_deq_fcbuf */ + + +/* + * Name: fc_deq_abort_bdr + * Function: Removes a dev_ptr from the adapter's abort Bus Device Reset + * queue. + * Input: dvi_t *dev_ptr dev_ptr to be removed. + * Returns: nothing. + */ +_local_ void +fc_deq_abort_bdr( +dvi_t *dev_ptr) +{ + fc_dev_ctl_t * ap; + + ap = dev_ptr->nodep->ap; + + if (ap->ABORT_BDR_head == ap->ABORT_BDR_tail) { + ap->ABORT_BDR_head = NULL; + ap->ABORT_BDR_tail = NULL; + } else if (ap->ABORT_BDR_head == dev_ptr) { + /* first one */ + ap->ABORT_BDR_head = dev_ptr->ABORT_BDR_fwd; + dev_ptr->ABORT_BDR_fwd->ABORT_BDR_bkwd = dev_ptr->ABORT_BDR_bkwd; + } else if (ap->ABORT_BDR_tail == dev_ptr) { + /* last one */ + ap->ABORT_BDR_tail = dev_ptr->ABORT_BDR_bkwd; + dev_ptr->ABORT_BDR_bkwd->ABORT_BDR_fwd = dev_ptr->ABORT_BDR_fwd; + } else { + /* in the middle */ + dev_ptr->ABORT_BDR_bkwd->ABORT_BDR_fwd = dev_ptr->ABORT_BDR_fwd; + dev_ptr->ABORT_BDR_fwd->ABORT_BDR_bkwd = dev_ptr->ABORT_BDR_bkwd; + } + dev_ptr->ABORT_BDR_fwd = NULL; + dev_ptr->ABORT_BDR_bkwd = NULL; + +} /* End fc_deq_abort_bdr */ + + +/* Assign a SCSI ID to a nodelist table entry */ +_static_ int +fc_assign_scsid( +fc_dev_ctl_t *p_dev_ctl, +NODELIST *ndlp) +{ + FC_BRD_INFO * binfo; + iCfgParam * clp; + node_t * node_ptr; + nodeh_t * hp; + NODELIST * seedndlp; + NODELIST * new_ndlp; + int dev_index, i; + + binfo = &BINFO; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + /* Next check to see if our binding already has a SCSI ID */ + for (dev_index = 0; dev_index < MAX_FC_TARGETS; dev_index++) { + hp = &binfo->device_queue_hash[dev_index]; + i = (hp->node_flag & FCP_SEED_MASK); + if ((i & FCP_SEED_DID) && (ndlp->nlp_DID == hp->un.dev_did)) + break; /* a match */ + else if ((i & FCP_SEED_WWPN) && + (fc_geportname(&ndlp->nlp_portname, &hp->un.dev_portname) == 2)) + break; /* a match */ + else if ((i & FCP_SEED_WWNN) && + (fc_geportname(&ndlp->nlp_nodename, &hp->un.dev_nodename) == 2)) + break; /* a match */ + } + + /* If not, assign a new SCSI ID / pan number */ + if (dev_index == MAX_FC_TARGETS) { + seedndlp = binfo->fc_nlpbind_start; + if(seedndlp == (NODELIST *)&binfo->fc_nlpbind_start) + seedndlp = binfo->fc_nlpunmap_start; + if(seedndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + seedndlp = binfo->fc_nlpmap_start; + while(seedndlp != (NODELIST *)&binfo->fc_nlpmap_start) { + new_ndlp = (NODELIST *)seedndlp->nlp_listp_next; + + if (seedndlp->nlp_type & NLP_SEED_MASK) { + if (seedndlp->nlp_type & NLP_SEED_WWNN) { + if (fc_geportname(&ndlp->nlp_nodename, + &seedndlp->nlp_nodename) == 2) { + ndlp->id.nlp_pan = seedndlp->id.nlp_pan; + ndlp->id.nlp_sid = seedndlp->id.nlp_sid; + ndlp->nlp_type |= NLP_SEED_WWNN; + if(seedndlp != ndlp) { + seedndlp->nlp_type &= ~NLP_FCP_TARGET; + fc_freenode(binfo, seedndlp, 0); + seedndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, seedndlp); + } + dev_index = INDEX(ndlp->id.nlp_pan, ndlp->id.nlp_sid); + hp = &binfo->device_queue_hash[dev_index]; + + /* Claim SCSI ID by copying bind parameter to + * proper index in device_queue_hash. + */ + if(hp->node_ptr) + ndlp->nlp_targetp = (uchar *)hp->node_ptr; + fc_bcopy(&ndlp->nlp_nodename, &hp->un.dev_nodename, + sizeof(NAME_TYPE)); + hp->node_flag &= ~FCP_SEED_MASK; + hp->node_flag |= FCP_SEED_WWNN; + goto out1; + } + } + if (seedndlp->nlp_type & NLP_SEED_WWPN) { + if (fc_geportname(&ndlp->nlp_portname, + &seedndlp->nlp_portname) == 2) { + ndlp->id.nlp_pan = seedndlp->id.nlp_pan; + ndlp->id.nlp_sid = seedndlp->id.nlp_sid; + ndlp->nlp_type |= NLP_SEED_WWPN; + if(seedndlp != ndlp) { + seedndlp->nlp_type &= ~NLP_FCP_TARGET; + fc_freenode(binfo, seedndlp, 0); + seedndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, seedndlp); + } + dev_index = INDEX(ndlp->id.nlp_pan, ndlp->id.nlp_sid); + hp = &binfo->device_queue_hash[dev_index]; + + /* Claim SCSI ID by copying bind parameter to + * proper index in device_queue_hash. + */ + if(hp->node_ptr) + ndlp->nlp_targetp = (uchar *)hp->node_ptr; + fc_bcopy(&ndlp->nlp_portname, &hp->un.dev_portname, + sizeof(NAME_TYPE)); + hp->node_flag &= ~FCP_SEED_MASK; + hp->node_flag |= FCP_SEED_WWPN; + goto out1; + } + } + if (seedndlp->nlp_type & NLP_SEED_DID) { + if (ndlp->nlp_DID == seedndlp->nlp_DID) { + ndlp->id.nlp_pan = seedndlp->id.nlp_pan; + ndlp->id.nlp_sid = seedndlp->id.nlp_sid; + ndlp->nlp_type |= NLP_SEED_DID; + if(seedndlp != ndlp) { + seedndlp->nlp_type &= ~NLP_FCP_TARGET; + fc_freenode(binfo, seedndlp, 0); + seedndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, seedndlp); + } + dev_index = INDEX(ndlp->id.nlp_pan, ndlp->id.nlp_sid); + hp = &binfo->device_queue_hash[dev_index]; + + /* Claim SCSI ID by copying bind parameter to + * proper index in device_queue_hash. + */ + if(hp->node_ptr) + ndlp->nlp_targetp = (uchar *)hp->node_ptr; + hp->un.dev_did = ndlp->nlp_DID; + hp->node_flag &= ~FCP_SEED_MASK; + hp->node_flag |= FCP_SEED_DID; + goto out1; + } + } + } + seedndlp = new_ndlp; + if(seedndlp == (NODELIST *)&binfo->fc_nlpbind_start) + seedndlp = binfo->fc_nlpunmap_start; + if(seedndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + seedndlp = binfo->fc_nlpmap_start; + } + + if(clp[CFG_AUTOMAP].a_current) { + /* Fill in nodelist entry */ + if (DEV_PAN(p_dev_ctl->sid_cnt) == NLP_MAXPAN) { + return(0); /* No more available SCSI IDs */ + } + + /* If scan-down == 2 and we are private loop, automap + * method is based on ALPA. + */ + if((clp[CFG_SCAN_DOWN].a_current == 2) && + !(binfo->fc_flag & (FC_PUBLIC_LOOP | FC_FABRIC)) && + (binfo->fc_topology == TOPOLOGY_LOOP)) { + for (i = 0; i < FC_MAXLOOP; i++) { + if(ndlp->nlp_DID == (uint32)AlpaArray[i]) + break; + } + if(i == FC_MAXLOOP) { + goto jmp_auto; + } + ndlp->id.nlp_pan = DEV_PAN(i); + ndlp->id.nlp_sid = DEV_SID(i); + } + else { + /* Check to make sure assigned scsi id does not overlap + * with a seeded value. + */ +jmp_auto: + seedndlp = binfo->fc_nlpbind_start; + if(seedndlp == (NODELIST *)&binfo->fc_nlpbind_start) + seedndlp = binfo->fc_nlpunmap_start; + if(seedndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + seedndlp = binfo->fc_nlpmap_start; + while(seedndlp != (NODELIST *)&binfo->fc_nlpmap_start) { + if ((seedndlp->nlp_state == NLP_SEED) || + (seedndlp->nlp_type & NLP_SEED_MASK)) { + if ((seedndlp->id.nlp_pan == DEV_PAN(p_dev_ctl->sid_cnt)) && + (seedndlp->id.nlp_sid == DEV_SID(p_dev_ctl->sid_cnt))) { + /* We overlap, so pick a new id and start again */ + p_dev_ctl->sid_cnt++; + goto jmp_auto; + } + } + seedndlp = (NODELIST *)seedndlp->nlp_listp_next; + if(seedndlp == (NODELIST *)&binfo->fc_nlpbind_start) + seedndlp = binfo->fc_nlpunmap_start; + if(seedndlp == (NODELIST *)&binfo->fc_nlpunmap_start) + seedndlp = binfo->fc_nlpmap_start; + } + + ndlp->id.nlp_pan = DEV_PAN(p_dev_ctl->sid_cnt); + ndlp->id.nlp_sid = DEV_SID(p_dev_ctl->sid_cnt); + p_dev_ctl->sid_cnt++; + } + ndlp->nlp_type |= NLP_AUTOMAP; + + dev_index = INDEX(ndlp->id.nlp_pan, ndlp->id.nlp_sid); + hp = &binfo->device_queue_hash[dev_index]; + + /* Claim SCSI ID by copying bind parameter to + * proper index in device_queue_hash. + */ + if(hp->node_ptr) + ndlp->nlp_targetp = (uchar *)hp->node_ptr; + switch(p_dev_ctl->fcp_mapping) { + case FCP_SEED_DID: + hp->un.dev_did = ndlp->nlp_DID; + ndlp->nlp_type |= NLP_SEED_DID; + break; + case FCP_SEED_WWPN: + fc_bcopy(&ndlp->nlp_portname, &hp->un.dev_portname, sizeof(NAME_TYPE)); + ndlp->nlp_type |= NLP_SEED_WWPN; + break; + case FCP_SEED_WWNN: + default: + fc_bcopy(&ndlp->nlp_nodename, &hp->un.dev_nodename, sizeof(NAME_TYPE)); + ndlp->nlp_type |= NLP_SEED_WWNN; + break; + } + hp->node_flag &= ~FCP_SEED_MASK; + hp->node_flag |= p_dev_ctl->fcp_mapping; + goto out1; + } + return(0); /* Cannot assign a scsi id */ + } + + /* If scan-down == 2 and we are private loop, automap + * method is based on ALPA. + */ + if((clp[CFG_SCAN_DOWN].a_current == 2) && + !(binfo->fc_flag & (FC_PUBLIC_LOOP | FC_FABRIC)) && + (binfo->fc_topology == TOPOLOGY_LOOP)) { + for (i = 0; i < FC_MAXLOOP; i++) { + if(ndlp->nlp_DID == (uint32)AlpaArray[i]) + break; + } + if(i == FC_MAXLOOP) { + goto jmp_auto; + } + ndlp->id.nlp_pan = DEV_PAN(i); + ndlp->id.nlp_sid = DEV_SID(i); + goto out1; + } + /* Copy SCSI ID for the WWN into nodelist */ + ndlp->id.nlp_pan = DEV_PAN(dev_index); + ndlp->id.nlp_sid = DEV_SID(dev_index); + + /* Update rpi for that SCSI ID's device node info */ + if ((node_ptr = (node_t * )ndlp->nlp_targetp) != NULL) { + node_ptr->rpi = ndlp->nlp_Rpi; + node_ptr->last_good_rpi = ndlp->nlp_Rpi; + node_ptr->nlp = ndlp; + node_ptr->flags &= ~FC_NODEV_TMO; + ndlp->nlp_flag &= ~NLP_NODEV_TMO; + if(node_ptr->nodev_tmr) { + /* STOP nodev timer */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0704, /* ptr to msg structure */ + fc_mes0704, /* ptr to msg */ + fc_msgBlk0704.msgPreambleStr, /* begin varargs */ + (ulong)ndlp, + ndlp->nlp_flag, + ndlp->nlp_state, + ndlp->nlp_DID); /* end varargs */ + fc_clk_can(p_dev_ctl, node_ptr->nodev_tmr); + node_ptr->nodev_tmr = 0; + } + } + else { + int dev_index; +out1: + dev_index = INDEX(ndlp->id.nlp_pan, ndlp->id.nlp_sid); + node_ptr = binfo->device_queue_hash[dev_index].node_ptr; + if(node_ptr) { + /* This is a new device that entered the loop */ + node_ptr->nlp = ndlp; + node_ptr->rpi = ndlp->nlp_Rpi; + node_ptr->last_good_rpi = ndlp->nlp_Rpi; + node_ptr->scsi_id = dev_index; + ndlp->nlp_targetp = (uchar *)node_ptr; + node_ptr->flags &= ~FC_NODEV_TMO; + ndlp->nlp_flag &= ~NLP_NODEV_TMO; + if(node_ptr->nodev_tmr) { + /* STOP nodev timer */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0705, /* ptr to msg structure */ + fc_mes0705, /* ptr to msg */ + fc_msgBlk0705.msgPreambleStr, /* begin varargs */ + (ulong)ndlp, + ndlp->nlp_flag, + ndlp->nlp_state, + ndlp->nlp_DID); /* end varargs */ + fc_clk_can(p_dev_ctl, node_ptr->nodev_tmr); + node_ptr->nodev_tmr = 0; + } + } + } + return(1); +} /* End fc_assign_scsid */ + + +/************************************************************************/ +/* */ +/* NAME: fc_fail_cmd */ +/* */ +/* FUNCTION: Fail All Pending Commands Routine */ +/* */ +/* This routine is called to clear out all pending commands */ +/* for a SCSI FCP device. */ +/* */ +/* EXECUTION ENVIRONMENT: */ +/* This routine can only be called on priority levels */ +/* equal to that of the interrupt handler. */ +/* */ +/* DATA STRUCTURES: */ +/* sc_buf - input/output request struct used between the adapter */ +/* driver and the calling SCSI device driver */ +/* */ +/* INPUTS: */ +/* dev_info structure - pointer to device information structure */ +/* */ +/* RETURN VALUE DESCRIPTION: The following are the return values: */ +/* none */ +/* */ +/************************************************************************/ +_static_ void +fc_fail_cmd( + dvi_t * dev_ptr, + char error, + uint32 statistic) +{ + T_SCSIBUF * sbp; + RING * rp; + IOCBQ * iocb_cmd, *next; + IOCB * icmd; + Q tmpq; + fc_buf_t * fcptr; + struct buf * bp; + dvi_t * next_dev_ptr; + fc_dev_ctl_t * p_dev_ctl; + FC_BRD_INFO * binfo; + iCfgParam * clp; + + p_dev_ctl = dev_ptr->nodep->ap; + binfo = &BINFO; + rp = &binfo->fc_ring[FC_FCP_RING]; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + /* First clear out all sc_buf structures in the pending queue */ + if(! clp[CFG_HOLDIO].a_current) { + if((dev_ptr->nodep) && + (dev_ptr->nodep->rptlunstate == REPORT_LUN_ONGOING)) + goto out; + sbp = dev_ptr->pend_head; + dev_ptr->pend_head = NULL; /* reset tail pointer */ + dev_ptr->pend_tail = NULL; /* reset tail pointer */ + dev_ptr->pend_count = 0; + + while (sbp != NULL) { + T_SCSIBUF *nextsbp; + + sbp->bufstruct.b_flags |= B_ERROR; /* set b_flags B_ERROR flag */ + sbp->bufstruct.b_error = error; + sbp->bufstruct.b_resid = sbp->bufstruct.b_bcount; + if (error) { + sbp->status_validity = SC_ADAPTER_ERROR; + SET_ADAPTER_STATUS(sbp,SC_NO_DEVICE_RESPONSE) + } else { + sbp->status_validity = 0; + } + + /* Point to next sc_buf in pending chain, if any */ + nextsbp = (T_SCSIBUF *) sbp->bufstruct.av_forw; + sbp->bufstruct.av_forw = 0; + fc_do_iodone((struct buf *) sbp); /* This could reque to pend_head */ + sbp = nextsbp; + } + } + +out: + /* Next clear out all fc_buf structures in the iocb queue for this device */ + tmpq.q_first = NULL; + + /* Get next command from ring xmit queue */ + iocb_cmd = fc_ringtx_get(rp); + + while (iocb_cmd) { + icmd = &iocb_cmd->iocb; + if ((icmd->ulpCommand != CMD_IOCB_CONTINUE_CN) && + (icmd->ulpContext == dev_ptr->nodep->last_good_rpi) && + (icmd->ulpIoTag < MAX_FCP_CMDS) && + (fcptr = binfo->fc_table->fcp_array[icmd->ulpIoTag]) && + (fcptr->dev_ptr == dev_ptr)) { + + if ((fcptr = fc_deq_fcbuf_active(rp, icmd->ulpIoTag)) != NULL) { + bp = (struct buf *)fcptr->sc_bufp; + + /* Reject this command with error */ + if (fcptr->fcp_cmd.fcpCntl2) { + + /* This is a task management command */ + dev_ptr->ioctl_errno = error; + if (fcptr->fcp_cmd.fcpCntl2 == ABORT_TASK_SET) + dev_ptr->flags &= ~SCSI_ABORT_TSET; + + if (fcptr->fcp_cmd.fcpCntl2 & TARGET_RESET) { + dev_ptr->flags &= ~SCSI_TARGET_RESET; + for (next_dev_ptr = dev_ptr->nodep->lunlist; + next_dev_ptr != NULL; next_dev_ptr = next_dev_ptr->next) { + next_dev_ptr->flags &= ~SCSI_TARGET_RESET; + } + } + + if (fcptr->fcp_cmd.fcpCntl2 & LUN_RESET) + dev_ptr->flags &= ~SCSI_LUN_RESET; + + if (dev_ptr->ioctl_wakeup == 1) { + dev_ptr->ioctl_wakeup = 0; + + fc_admin_wakeup(p_dev_ctl, dev_ptr, fcptr->sc_bufp); + } + else { + fc_do_iodone(bp); + } + + dev_ptr->active_io_count--; + dev_ptr->nodep->num_active_io--; + + } else { + /* This is a regular FCP command */ + bp->b_error = error; + bp->b_resid = bp->b_bcount; + bp->b_flags |= B_ERROR; + if (error) { + sbp = fcptr->sc_bufp; + sbp->status_validity = SC_ADAPTER_ERROR; + SET_ADAPTER_STATUS(sbp,SC_NO_DEVICE_RESPONSE) + } + + sbp = fcptr->sc_bufp; + + dev_ptr->active_io_count--; + dev_ptr->nodep->num_active_io--; + fc_do_iodone(bp); + } + fc_enq_fcbuf(fcptr); + } + + fc_mem_put(binfo, MEM_IOCB, (uchar * )iocb_cmd); + + while ((iocb_cmd = fc_ringtx_get(rp)) != NULL) { + icmd = &iocb_cmd->iocb; + if (icmd->ulpCommand != CMD_IOCB_CONTINUE_CN) + break; + fc_mem_put(binfo, MEM_IOCB, (uchar * )iocb_cmd); + } + } else { + /* Queue this iocb to the temporary queue */ + if (tmpq.q_first) { + ((IOCBQ * )tmpq.q_last)->q = (uchar * )iocb_cmd; + tmpq.q_last = (uchar * )iocb_cmd; + } else { + tmpq.q_first = (uchar * )iocb_cmd; + tmpq.q_last = (uchar * )iocb_cmd; + } + iocb_cmd->q = NULL; + + iocb_cmd = fc_ringtx_get(rp); + } + } + + /* Put the temporary queue back in the FCP iocb queue */ + iocb_cmd = (IOCBQ * )tmpq.q_first; + while (iocb_cmd) { + next = (IOCBQ * )iocb_cmd->q; + fc_ringtx_put(rp, iocb_cmd); + iocb_cmd = next; + } + + return; +} /* End fc_fail_cmd */ + +/* Fix up any changed RPIs in FCP IOCBs queued up a txq + * Called from CLEAR_LA after a link up. + */ +_static_ void +fc_fcp_fix_txq( + fc_dev_ctl_t * p_dev_ctl) +{ + RING * rp; + FC_BRD_INFO * binfo; + fc_buf_t * fcptr; + IOCBQ * temp; + IOCB * cmd; + dvi_t * dev_ptr; + unsigned long iflag; + + iflag = lpfc_q_disable_lock(p_dev_ctl); + binfo = &BINFO; + rp = &binfo->fc_ring[FC_FCP_RING]; + + /* Make sure all RPIs on txq are still ok */ + temp = (IOCBQ *)rp->fc_tx.q_first; + while (temp != NULL) { + cmd = &temp->iocb; + if ((fcptr = binfo->fc_table->fcp_array[cmd->ulpIoTag]) != NULL) { + dev_ptr = fcptr->dev_ptr; + if((dev_ptr) && (dev_ptr->nodep) && + (cmd->ulpContext != dev_ptr->nodep->rpi)) { + cmd->ulpContext = dev_ptr->nodep->rpi; + } + } + if(rp->fc_tx.q_last == (uchar * )temp) + break; + temp = (IOCBQ *)temp->q; + } + lpfc_q_unlock_enable(p_dev_ctl, iflag); + return; +} /* End fc_fcp_fix_txq */ + +_static_ void +fc_fail_pendq( + dvi_t * dev_ptr, + char error, + uint32 statistic) +{ + T_SCSIBUF * sbp; + RING * rp; + fc_dev_ctl_t * p_dev_ctl; + FC_BRD_INFO * binfo; + iCfgParam * clp; + + p_dev_ctl = dev_ptr->nodep->ap; + binfo = &BINFO; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + rp = &binfo->fc_ring[FC_FCP_RING]; + + if((dev_ptr->nodep) && + (dev_ptr->nodep->rptlunstate == REPORT_LUN_ONGOING)) + goto out; + + if(clp[CFG_HOLDIO].a_current) + goto out; + + sbp = dev_ptr->pend_head; + dev_ptr->pend_head = NULL; /* reset tail pointer */ + dev_ptr->pend_tail = NULL; /* reset tail pointer */ + dev_ptr->pend_count = 0; + + while (sbp != NULL) { + T_SCSIBUF *nextsbp; + + sbp->bufstruct.b_flags |= B_ERROR; /* set b_flags B_ERROR flag */ + sbp->bufstruct.b_error = error; + sbp->bufstruct.b_resid = sbp->bufstruct.b_bcount; + if (error) { + sbp->status_validity = SC_ADAPTER_ERROR; + SET_ADAPTER_STATUS(sbp,SC_NO_DEVICE_RESPONSE) + } else { + sbp->status_validity = 0; + } + + /* Point to next sc_buf in pending chain, if any */ + nextsbp = (T_SCSIBUF *) sbp->bufstruct.av_forw; + sbp->bufstruct.av_forw = 0; + fc_do_iodone((struct buf *) sbp); /* This could reque to pend_head */ + sbp = nextsbp; + } + +out: + return; +} /* End fc_fail_pendq */ + +/************************************************************************/ +/* */ +/* NAME:issue_fcp_cmd */ +/* */ +/* FUNCTION:Issue an FCP command to the adapter iocb queue */ +/* */ +/* EXECUTION ENVIRONMENT: */ +/* This routine always runs at interrupt level */ +/* */ +/* DATA STRUCTURES: */ +/* sc_buf- input/output request struct used between the adapter */ +/* driver and the calling SCSI device driver */ +/* */ +/* RETURN VALUE DESCRIPTION: 0 = success */ +/* 1 = continue */ +/* 2 = requeue */ +/* 4 = exit */ +/* */ +/************************************************************************/ +_static_ int +issue_fcp_cmd( + fc_dev_ctl_t * p_dev_ctl, + dvi_t * dev_ptr, + T_SCSIBUF * sbp, + int pend) +{ + FC_BRD_INFO * binfo = &BINFO; + iCfgParam * clp; + struct buf * bp; + fc_buf_t * fcptr; + int i, rc; + RING * rp; + IOCBQ * temp; + IOCB * cmd; + uint32 count, * lp; + fc_lun_t lun; + ULP_BDE64 * bpl; + MATCHMAP * bmp; + NODELIST * ndlp; + + rp = &binfo->fc_ring[FC_FCP_RING]; + bp = (struct buf *) sbp; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + + if((dev_ptr->nodep == 0) || + ((ndlp = dev_ptr->nodep->nlp) == 0)) + return(FCP_REQUEUE); + + if ( !(ndlp->capabilities & FC_CAP_AUTOSENSE) ) { + if (dev_ptr->sense_valid && + (sbp->scsi_command.scsi_cmd.scsi_op_code == SCSI_REQUEST_SENSE)) { + + /* Request sense command - use saved sense data */ + if (bp->b_bcount > (int)dev_ptr->sense_length) { + bp->b_resid = bp->b_bcount - (int)dev_ptr->sense_length; + count = dev_ptr->sense_length; + } else { + count = bp->b_bcount; + } + lp = (uint32 * )dev_ptr->sense; + lpfc_copy_sense(dev_ptr, bp); + bp->b_error = 0; + bp->b_flags &= ~B_ERROR; + + if (pend) { + dev_ptr->pend_head = (T_SCSIBUF *) bp->av_forw; + if (dev_ptr->pend_head == NULL) + dev_ptr->pend_tail = NULL; + else + dev_ptr->pend_head->bufstruct.av_back = NULL; + dev_ptr->pend_count--; + } + dev_ptr->sense_valid = 0; + + FCSTATCTR.fcpSense++; + fc_do_iodone(bp); + return(FCP_CONTINUE); + } + } + + + if(dev_ptr->queue_state != ACTIVE) { + return(FCP_REQUEUE); + } + + if(binfo->fc_process_LA == 0) { + return(FCP_REQUEUE); + } + + /* Check if device is in process of resetting */ + if (dev_ptr->flags & SCSI_DEV_RESET) { + return(FCP_REQUEUE); + } + + if (dev_ptr->nodep->rpi == 0xFFFE) { + + if(clp[CFG_HOLDIO].a_current) { + return(FCP_REQUEUE); + } + + if((clp[CFG_NODEV_TMO].a_current) && + ((dev_ptr->nodep->flags & FC_NODEV_TMO) == 0)) { + + /* Kick off first PLOGI to device */ + if (!(ndlp->nlp_flag & NLP_REQ_SND)) { + uint32 did; + + did = ndlp->nlp_DID; + if(did == (uint32)0) { + if((ndlp->nlp_type & (NLP_AUTOMAP | NLP_SEED_MASK)) && + (ndlp->nlp_state == NLP_LIMBO) && ndlp->nlp_oldDID) + did = ndlp->nlp_oldDID; + } + ndlp->nlp_flag &= ~NLP_RM_ENTRY; + if ((!(ndlp->nlp_flag & NLP_NODEV_TMO)) && + (did != (uint32)0)) { + if(!(ndlp->nlp_flag & NLP_NS_REMOVED)) { + ndlp->nlp_flag |= NLP_NODEV_TMO; + fc_els_cmd(binfo, ELS_CMD_PLOGI, (void *)((ulong)did), + (uint32)0, (ushort)0, ndlp); + } + } + } + else { + ndlp->nlp_flag |= NLP_NODEV_TMO; + } + return(FCP_REQUEUE); + } + + /* The device is not active at this time */ + bp->b_error = EIO; + bp->b_resid = bp->b_bcount; + bp->b_flags |= B_ERROR; + sbp->status_validity = SC_ADAPTER_ERROR; + SET_ADAPTER_STATUS(sbp,SC_NO_DEVICE_RESPONSE) + if (pend) { + dev_ptr->pend_head = (T_SCSIBUF *) bp->av_forw; + if (dev_ptr->pend_head == NULL) + dev_ptr->pend_tail = NULL; + else + dev_ptr->pend_head->bufstruct.av_back = NULL; + dev_ptr->pend_count--; + } + + FCSTATCTR.fcpNoDevice++; + fc_delay_iodone(p_dev_ctl, sbp); + + { + uint32 did; + uint32 pan; + uint32 sid; + + did = ndlp->nlp_DID; + pan = ndlp->id.nlp_pan; + sid = ndlp->id.nlp_sid; + + if (!(dev_ptr->flags & DONT_LOG_INVALID_RPI)) { + /* Cannot issue FCP command */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0706, /* ptr to msg structure */ + fc_mes0706, /* ptr to msg */ + fc_msgBlk0706.msgPreambleStr, /* begin varargs */ + did, + FC_SCSID(pan, sid)); /* end varargs */ + dev_ptr->flags |= DONT_LOG_INVALID_RPI; + } + } + return(FCP_CONTINUE); + } + + if (ndlp->nlp_action & NLP_DO_RSCN) { + return(FCP_REQUEUE); + } + + if ((fcptr = fc_deq_fcbuf(dev_ptr)) == NULL) { + return(FCP_REQUEUE); + } + + if ((temp = (IOCBQ * )fc_mem_get(binfo, MEM_IOCB)) == NULL) { + fc_enq_fcbuf(fcptr); + return(FCP_EXIT); + } + + fc_bzero((void *)fcptr, sizeof(FCP_CMND) + sizeof(FCP_RSP)); + + /* Copy SCSI cmd into FCP payload for xmit.*/ + lun = (uint32) sbp->scsi_command.scsi_lun; + { + int i; + fcptr->fcp_cmd.fcpCdb[0]= sbp->scsi_command.scsi_cmd.scsi_op_code; + fcptr->fcp_cmd.fcpCdb[1]= sbp->scsi_command.scsi_cmd.lun; + for(i=0; i< (sizeof(struct sc_cmd)-2); i++) + fcptr->fcp_cmd.fcpCdb[i+2]= sbp->scsi_command.scsi_cmd.scsi_bytes[i]; + fcptr->fcp_cmd.fcpCntl1 = sbp->scsi_command.flags; + } + + /* Put LUN in the FCP command using the Peripheral Addressing Method */ + fcptr->fcp_cmd.fcpLunMsl = lun << FC_LUN_SHIFT; + fcptr->fcp_cmd.fcpLunLsl = 0; + + /* + * The Logical Unit Addressing method is not supported at + * this current release. + */ + if (dev_ptr->nodep->addr_mode == VOLUME_SET_ADDRESSING) { + fcptr->fcp_cmd.fcpLunMsl |= SWAP_DATA(0x40000000); + } + + fcptr->fcp_cmd.fcpDl = SWAP_DATA(bp->b_bcount); + + fcptr->sc_bufp = sbp; + fcptr->flags = 0; + + /* set up an iotag so we can match the completion iocb */ + for (i = 0; i < MAX_FCP_CMDS; i++) { + fcptr->iotag = rp->fc_iotag++; + if (rp->fc_iotag >= MAX_FCP_CMDS) + rp->fc_iotag = 1; + if (binfo->fc_table->fcp_array[fcptr->iotag] == 0) + break; + } + if (i >= MAX_FCP_CMDS) { + /* No more command slots available, retry later */ + fc_mem_put(binfo, MEM_IOCB, (uchar * )temp); + fc_enq_fcbuf(fcptr); + return(FCP_EXIT); + } + + fc_bzero((void *)temp, sizeof(IOCBQ)); /* zero the iocb entry */ + cmd = &temp->iocb; + + if (binfo->fc_flag & FC_SLI2) { + /* Allocate buffer for Buffer ptr list */ + if ((bmp = (MATCHMAP * )fc_mem_get(binfo, MEM_BPL)) == 0) { + fc_mem_put(binfo, MEM_IOCB, (uchar * )temp); + fc_enq_fcbuf(fcptr); + return(FCP_EXIT); + } + bpl = (ULP_BDE64 * )bmp->virt; + bpl->addrHigh = PCIMEM_LONG((uint32)putPaddrHigh(GET_PAYLOAD_PHYS_ADDR(fcptr))); + bpl->addrLow = PCIMEM_LONG((uint32)putPaddrLow(GET_PAYLOAD_PHYS_ADDR(fcptr))); + bpl->tus.f.bdeSize = sizeof(FCP_CMND); + bpl->tus.f.bdeFlags = BUFF_USE_CMND; + bpl->tus.w = PCIMEM_LONG(bpl->tus.w); + bpl++; + bpl->addrHigh = PCIMEM_LONG((uint32)putPaddrHigh(GET_PAYLOAD_PHYS_ADDR(fcptr)+sizeof(FCP_CMND))); + bpl->addrLow = PCIMEM_LONG((uint32)putPaddrLow(GET_PAYLOAD_PHYS_ADDR(fcptr)+sizeof(FCP_CMND))); + bpl->tus.f.bdeSize = sizeof(FCP_RSP); + bpl->tus.f.bdeFlags = (BUFF_USE_CMND | BUFF_USE_RCV); + bpl->tus.w = PCIMEM_LONG(bpl->tus.w); + bpl++; + + cmd->un.fcpi64.bdl.ulpIoTag32 = (uint32)0; + cmd->un.fcpi64.bdl.addrHigh = (uint32)putPaddrHigh(bmp->phys); + cmd->un.fcpi64.bdl.addrLow = (uint32)putPaddrLow(bmp->phys); + cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(ULP_BDE64)); + cmd->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDL; + cmd->ulpBdeCount = 1; + fcptr->bmp = bmp; + temp->bpl = (uchar *)0; + } else { + bpl = 0; + cmd->un.fcpi.fcpi_cmnd.bdeAddress = (uint32)putPaddrLow(GET_PAYLOAD_PHYS_ADDR(fcptr)); + cmd->un.fcpi.fcpi_cmnd.bdeSize = sizeof(FCP_CMND); + cmd->un.fcpi.fcpi_rsp.bdeAddress = (uint32)(putPaddrLow((GET_PAYLOAD_PHYS_ADDR(fcptr) + sizeof(FCP_CMND)))); + cmd->un.fcpi.fcpi_rsp.bdeSize = sizeof(FCP_RSP); + cmd->ulpBdeCount = 2; + fcptr->bmp = 0; + temp->bpl = (uchar *)0; + } + + cmd->ulpContext = dev_ptr->nodep->rpi; + cmd->ulpIoTag = fcptr->iotag; + /* + * if device is FCP-2 device, set the following bit that says + * to run the FC-TAPE protocol. + */ + if (ndlp->id.nlp_fcp_info & NLP_FCP_2_DEVICE) { + cmd->ulpFCP2Rcvy = 1; + } + cmd->ulpClass = (ndlp->id.nlp_fcp_info & 0x0f); + cmd->ulpOwner = OWN_CHIP; + + if (sbp->timeout_value == 0) + sbp->timeout_value = 3600; /* One hour in seconds */ + + curtime(&fcptr->timeout); + + /* Need to set the FCP timeout in the fcptr structure and the IOCB + * for this I/O to get the adapter to run a timer. + */ + { + uint32 time_out; + + if(sbp->timeout_value) + time_out = sbp->timeout_value * fc_ticks_per_second; + else + time_out = 30 * fc_ticks_per_second; + + if (binfo->fc_flag & FC_FABRIC) { + time_out += (fc_ticks_per_second * + (clp[CFG_FCPFABRIC_TMO].a_current + (2 * binfo->fc_ratov))); + } + + fcptr->timeout = ((ulong)fcptr->timeout + time_out + (300 * fc_ticks_per_second)); + + /* Set the FCP timeout in the IOCB to get the adapter to run a timer */ + if ((time_out / fc_ticks_per_second) < 256) + cmd->ulpTimeout = time_out / fc_ticks_per_second; + } + + if (bp->b_bcount == 0) { + /* Set up for SCSI command */ + if (binfo->fc_flag & FC_SLI2) + cmd->ulpCommand = CMD_FCP_ICMND64_CR; + else + cmd->ulpCommand = CMD_FCP_ICMND_CR; + + if (((fcptr->fcp_cmd.fcpCdb[0] & 0xBF) == SCSI_RESERVE_UNIT) || + ((fcptr->fcp_cmd.fcpCdb[0] & 0xBF) == SCSI_RELEASE_UNIT)) { + /* Mask off the lun field for reserve/release commands */ + fcptr->fcp_cmd.fcpCdb[1] &= 0x1f; + } + if(bpl) { + bpl->addrHigh = 0; + bpl->addrLow = 0; + bpl->tus.w = 0; + } + cmd->un.fcpi.fcpi_parm = 0; + fcptr->fcp_cmd.fcpCntl3 = 0; + + cmd->ulpLe = 1; + /* Queue cmd chain to last iocb entry in xmit queue */ + if (rp->fc_tx.q_first == NULL) { + rp->fc_tx.q_first = (uchar * )temp; + } else { + ((IOCBQ * )(rp->fc_tx.q_last))->q = (uchar * )temp; + } + rp->fc_tx.q_last = (uchar * )temp; + rp->fc_tx.q_cnt++; + + } else if (bp->b_flags & B_READ) { + /* Set up for SCSI read */ + if (binfo->fc_flag & FC_SLI2) + cmd->ulpCommand = CMD_FCP_IREAD64_CR; + else + cmd->ulpCommand = CMD_FCP_IREAD_CR; + cmd->ulpPU = PARM_READ_CHECK; + cmd->un.fcpi.fcpi_parm = bp->b_bcount; + fcptr->fcp_cmd.fcpCntl3 = READ_DATA; + if((rc = fc_fcp_bufmap(p_dev_ctl, sbp, fcptr, temp, bpl, dev_ptr, pend)) != 0) + return(rc); + } else { + /* Set up for SCSI write */ + if (binfo->fc_flag & FC_SLI2) + cmd->ulpCommand = CMD_FCP_IWRITE64_CR; + else + cmd->ulpCommand = CMD_FCP_IWRITE_CR; + fcptr->fcp_cmd.fcpCntl3 = WRITE_DATA; + if((rc = fc_fcp_bufmap(p_dev_ctl, sbp, fcptr, temp, bpl, dev_ptr, pend)) != 0) + return(rc); + } + + if(dev_ptr->nodep->flags & FC_FCP2_RECOVERY) + cmd->ulpFCP2Rcvy = 1; + + lp = (uint32 * ) & fcptr->fcp_cmd; + fc_enq_fcbuf_active(rp, fcptr); + + dev_ptr->active_io_count++; + dev_ptr->nodep->num_active_io++; + FCSTATCTR.fcpCmd++; + + return(0); +} /* End issue_fcp_cmd */ + + +_static_ int +fc_failio( + fc_dev_ctl_t * p_dev_ctl) +{ + FC_BRD_INFO * binfo; + node_t * node_ptr; + dvi_t * dev_ptr; + struct buf *bp, *nextbp; + int i; + + binfo = &BINFO; + + /* Clear the queues for one or more SCSI devices */ + for (i = 0; i < MAX_FC_TARGETS; i++) { + if ((node_ptr = binfo->device_queue_hash[i].node_ptr) != NULL) { + for (dev_ptr = node_ptr->lunlist; dev_ptr != NULL; + dev_ptr = dev_ptr->next) { + + dev_ptr->queue_state = HALTED; + fc_return_standby_queue(dev_ptr, + (uchar)((binfo->fc_flag & FC_BUS_RESET) ? EIO : EFAULT), 0); + + /* First send ABTS on outstanding I/Os in txp queue */ + fc_abort_fcp_txpq(binfo, dev_ptr); + + fc_fail_pendq(dev_ptr, (char)((binfo->fc_flag & FC_BUS_RESET) ? + EIO : EFAULT), 0); + + fc_fail_cmd(dev_ptr, (char)((binfo->fc_flag & FC_BUS_RESET) ? + EIO : EFAULT), 0); + + /* Call iodone for all the CLEARQ error bufs */ + fc_free_clearq(dev_ptr); + } + } + } + /* Call iodone for any commands that timed out previously */ + for (bp = p_dev_ctl->timeout_head; bp != NULL; ) { + nextbp = bp->av_forw; + bp->b_error = ETIMEDOUT; + bp->b_flags |= B_ERROR; + fc_do_iodone(bp); + bp = nextbp; + } + p_dev_ctl->timeout_head = NULL; + p_dev_ctl->timeout_count = 0; + return(0); +} + + +_static_ void +fc_return_standby_queue( + dvi_t * dev_ptr, + uchar status, + uint32 statistic) +{ + T_SCSIBUF * sp; + + /* It is possible to have IOs on the pending queue because + of the way the scheduler works. */ + + while ((sp = dev_ptr->standby_queue_head) != NULL) { + dev_ptr->standby_count--; + dev_ptr->standby_queue_head = (T_SCSIBUF *)sp->bufstruct.av_forw; + fc_do_iodone((struct buf *) sp); + } + dev_ptr->standby_queue_head = NULL; + dev_ptr->standby_queue_tail = NULL; +} + +/* + * Restart all devices for a given adapter. Should only be + * invoked at the conclusion of loop {re}discovery. + */ +_static_ int +fc_restart_all_devices( + fc_dev_ctl_t * p_dev_ctl) +{ + dvi_t * dev_ptr; + FC_BRD_INFO * binfo; + int i; + node_t * node_ptr; + + binfo = &BINFO; + + for (i = 0; i < MAX_FC_TARGETS; ++i) { + if ((node_ptr = binfo->device_queue_hash[i].node_ptr) != NULL) { + dev_ptr = node_ptr->lunlist; + while (dev_ptr) { + if ((dev_ptr->queue_state == RESTART_WHEN_READY) || + (dev_ptr->queue_state == HALTED)) { + fc_restart_device(dev_ptr); + } + + if (dev_ptr->nodep->rpi != 0xfffe) + dev_ptr->flags &= ~(NORPI_RESET_DONE | DONT_LOG_INVALID_RPI); + else + dev_ptr->flags &= ~DONT_LOG_INVALID_RPI; + fc_enq_wait(dev_ptr); + dev_ptr = dev_ptr->next; + } + } + } + return(0); +} /* End fc_restart_all_devices */ + +/* + * Restart a device by draining its standby queue. + */ +_static_ int +fc_restart_device( + dvi_t * dev_ptr) +{ + FC_BRD_INFO * binfo; + + binfo = &dev_ptr->nodep->ap->info; + if (binfo->fc_ffstate != FC_READY || + (dev_ptr->flags & (SCSI_TQ_CLEARING | CHK_SCSI_ABDR))) { + + dev_ptr->queue_state = RESTART_WHEN_READY; + return(0); + } + + dev_ptr->queue_state = ACTIVE; + dev_ptr->flags &= ~(SCSI_TQ_HALTED | CHK_SCSI_ABDR); + fc_return_standby_queue(dev_ptr, + (uchar)((binfo->fc_flag & FC_BUS_RESET) ? EIO : EFAULT), 0); + + return(1); +} /* End fc_restart_device */ + +/* Called to reissue fcp command if tgt throttle was reached */ +_static_ void +re_issue_fcp_cmd( + dvi_t * dev_ptr) +{ + fc_dev_ctl_t * ap; + dvi_t * next_ptr; + dvi_t * start_ptr; + T_SCSIBUF * sbp = NULL; + int rc; + FC_BRD_INFO * binfo; + RING * rp; + + if (dev_ptr == NULL) + return; + + ap = dev_ptr->nodep->ap; + binfo = &ap->info; + + rp = &binfo->fc_ring[FC_FCP_RING]; + + next_ptr = dev_ptr; + start_ptr = next_ptr->nodep->lunlist; + + if (start_ptr == NULL) + return; + + do { + + if ((sbp = next_ptr->pend_head) != NULL) + break; + + next_ptr = next_ptr->next; + if (!next_ptr) + next_ptr = start_ptr; + } while ( next_ptr != dev_ptr ); + + if (! sbp) { + next_ptr->nodep->last_dev = NULL; + return; + } + + if ((rc = issue_fcp_cmd(ap, next_ptr, sbp, 1))) { + if ((rc & FCP_REQUEUE) || (rc & FCP_EXIT)) + return; + } + next_ptr->pend_count--; + next_ptr->pend_head = (T_SCSIBUF *) sbp->bufstruct.av_forw; + if (next_ptr->pend_head == NULL) + next_ptr->pend_tail = NULL; + else + next_ptr->pend_head->bufstruct.av_back = NULL; + + if (next_ptr->pend_count == 0) + fc_deq_wait(next_ptr); + + next_ptr->nodep->last_dev = next_ptr->next; + if (next_ptr->nodep->last_dev == NULL) + next_ptr->nodep->last_dev = next_ptr->nodep->lunlist; + + if (rp->fc_tx.q_cnt) + issue_iocb_cmd(binfo, rp, 0); + + return; +} /* End re_issue_fcp_cmd */ + +/* Find a SCSI device structure for a given LUN */ +_static_ dvi_t * +fc_find_lun( +FC_BRD_INFO *binfo, +int hash_index, +fc_lun_t lun) +{ + node_t * node_ptr; + dvi_t * dev_ptr; + + if ((hash_index < 0) || (hash_index > MAX_FC_TARGETS)) + return(NULL); + + node_ptr = binfo->device_queue_hash[hash_index].node_ptr; + + if (node_ptr == NULL) { + dev_ptr = NULL; + } else { + for (dev_ptr = node_ptr->lunlist; dev_ptr != NULL; + dev_ptr = dev_ptr->next) { + + if (dev_ptr->lun_id == lun) { + /* We found the correct entry */ + break; + } + } + } + return(dev_ptr); +} /* End fc_find_lun */ + +_static_ int +fc_reset_dev_q_depth( + fc_dev_ctl_t * p_dev_ctl) +{ + dvi_t * dev_ptr; + FC_BRD_INFO * binfo; + int i; + iCfgParam * clp; + node_t * node_ptr; + + binfo = &BINFO; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + + for (i = 0; i < MAX_FC_TARGETS; ++i) { + if ((node_ptr = binfo->device_queue_hash[i].node_ptr) != NULL) { + dev_ptr = node_ptr->lunlist; + while (dev_ptr) { + dev_ptr->fcp_cur_queue_depth = (ushort)clp[CFG_DFT_LUN_Q_DEPTH].a_current; + dev_ptr = dev_ptr->next; + } + } + } + return(0); +} /* End fc_reset_dev_q_depth */ + + +/* [SYNC] */ +_static_ void +fc_polling( + FC_BRD_INFO *binfo, + uint32 att_bit) +{ + volatile uint32 ha_copy; + void *ioa; + fc_dev_ctl_t * p_dev_ctl; + + p_dev_ctl = (fc_dev_ctl_t *)binfo->fc_p_dev_ctl; + do { + ioa = (void *)FC_MAP_IO(&binfo->fc_iomap_io); /* map in io registers */ + ha_copy = READ_CSR_REG(binfo, FC_HA_REG(binfo, ioa)); + FC_UNMAP_MEMIO(ioa); + if (ha_copy & att_bit) + break; + } while (1); + fc_intr((struct intr *)p_dev_ctl); +} diff -purN -X /home/mbligh/.diff.exclude reference/drivers/scsi/lpfc/fcxmitb.c current/drivers/scsi/lpfc/fcxmitb.c --- reference/drivers/scsi/lpfc/fcxmitb.c 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/scsi/lpfc/fcxmitb.c 2004-04-09 11:53:04.000000000 -0700 @@ -0,0 +1,1442 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Enterprise Fibre Channel Host Bus Adapters. * + * Refer to the README file included with this package for * + * driver version and adapter support. * + * Copyright (C) 2003 Emulex Corporation. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of the GNU General Public License * + * as published by the Free Software Foundation; either version 2 * + * of the License, or (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details, a copy of which * + * can be found in the file COPYING included with this package. * + *******************************************************************/ + +#include "fc_os.h" + +#include "fc_hw.h" +#include "fc.h" + +#include "fcdiag.h" +#include "fcfgparm.h" +#include "fcmsg.h" +#include "fc_crtn.h" /* Core - external routine definitions */ +#include "fc_ertn.h" /* Environment - external routine definitions */ + +extern fc_dd_ctl_t DD_CTL; +extern iCfgParam icfgparam[]; +extern int lpfc_nethdr; + +/* Routine Declaration - Local */ +_local_ int fc_mbuf_to_iocb(fc_dev_ctl_t *p_dev_ctl, fcipbuf_t *p_mbuf); +_local_ fcipbuf_t *fc_txq_put(fc_dev_ctl_t *p_dev_ctl, RING *rp, + fcipbuf_t *p_mbuf); +/* End Routine Declaration - Local */ + +/*****************************************************************************/ +/* + * NAME: fc_ringtx_put + * + * FUNCTION: put xmit iocb onto the ring transmit queue. + * + * EXECUTION ENVIRONMENT: process and interrupt level. + * + * CALLED FROM: + * fc_els_cmd + * + * INPUT: + * binfo - pointer to the device info area + * iocbq - pointer to iocbq entry of xmit iocb + * + * RETURNS: + * none + */ +/*****************************************************************************/ +_static_ void +fc_ringtx_put( +RING *rp, +IOCBQ *iocbq) /* pointer to iocbq entry */ +{ + FC_BRD_INFO * binfo; + + binfo = (FC_BRD_INFO * )rp->fc_binfo; + if (rp->fc_tx.q_first) { + ((IOCBQ * )rp->fc_tx.q_last)->q = (uchar * )iocbq; + rp->fc_tx.q_last = (uchar * )iocbq; + } else { + rp->fc_tx.q_first = (uchar * )iocbq; + rp->fc_tx.q_last = (uchar * )iocbq; + } + + iocbq->q = NULL; + rp->fc_tx.q_cnt++; + + return; + +} /* End fc_ringtx_put */ + + +/*****************************************************************************/ +/* + * NAME: fc_ringtx_get + * + * FUNCTION: get a packet off the ring transmit queue. + * + * EXECUTION ENVIRONMENT: interrupt level. + * + * CALLED FROM: + * fc_els_cmd + * + * INPUT: + * rp - pointer to the ring to get an iocb from + * + * RETURNS: + * NULL - no iocbs found + * iocb pointer - pointer to an iocb to transmit + */ +/*****************************************************************************/ +_static_ IOCBQ * +fc_ringtx_get( +RING *rp) +{ + FC_BRD_INFO * binfo; + NODELIST * nlp; + IOCBQ * p_first = NULL; + IOCBQ * prev = NULL; + uchar * daddr; + ushort xri; + + binfo = (FC_BRD_INFO * )rp->fc_binfo; + if (rp->fc_tx.q_first) { + p_first = (IOCBQ * )rp->fc_tx.q_first; + + /* Make sure we already have a login and exchange to the remote node */ + while (p_first->iocb.ulpCommand == 0) { + if (rp->fc_ringno == FC_IP_RING) { + NETHDR * np; + + /* check to see if nlplist entry exists yet */ + np = (NETHDR * )(fcdata(((fcipbuf_t * )(p_first->bp)))); + daddr = np->fc_destname.IEEE; + if ((xri = fc_emac_lookup(binfo, daddr, &nlp))) { + /* exchange to destination already exists */ + if (binfo->fc_flag & FC_SLI2) + p_first->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CX; + else + p_first->iocb.ulpCommand = CMD_XMIT_SEQUENCE_CX; + p_first->iocb.ulpContext = xri; + p_first->info = (uchar * )nlp; + break; + } + } + + /* loop past continuation iocbs */ + while (p_first->iocb.ulpLe == 0) { + prev = p_first; + if ((p_first = (IOCBQ * )p_first->q) == 0) { + return(0); + } + } + prev = p_first; + if ((p_first = (IOCBQ * )p_first->q) == 0) { + return(0); + } + } + + /* adjust last if necessary */ + if (p_first->q == 0) { + rp->fc_tx.q_last = (uchar * )prev; + } + + /* remove iocb chain to process */ + if (prev == 0) { + rp->fc_tx.q_first = p_first->q; + } else { + prev->q = (uchar * )p_first->q; + } + + p_first->q = NULL; + rp->fc_tx.q_cnt--; + } + return(p_first); + +} /* End fc_ringtx_get */ + + +/*****************************************************************************/ +/* + * NAME: fc_ringtx_drain + * + * FUNCTION: get all packets off the ring transmit queue. + * + * EXECUTION ENVIRONMENT: interrupt level. + * + * NOTES: + * + * CALLED FROM: + * fc_els_cmd + * + * INPUT: + * binfo - pointer to the device info area + * + * RETURNS: + * NULL - no match found + * mbuf pointer - pointer to a mbuf chain which contains a packet. + */ +/*****************************************************************************/ +_static_ IOCBQ * +fc_ringtx_drain( +RING *rp) +{ + FC_BRD_INFO * binfo; + IOCBQ * p_first; + IOCBQ * prev; + + binfo = (FC_BRD_INFO * )rp->fc_binfo; + p_first = (IOCBQ * )rp->fc_tx.q_first; + if (p_first) { + prev = (IOCBQ * )p_first->q; + + /* remove iocb chain to process */ + if (prev == 0) { + rp->fc_tx.q_first = 0; + rp->fc_tx.q_last = 0; + } else { + rp->fc_tx.q_first = (uchar * )prev; + } + + p_first->q = NULL; + rp->fc_tx.q_cnt--; + } + + return(p_first); + +} /* End fc_ringtx_drain */ + + + + +/*****************************************************************************/ +/* + * NAME: fc_ringtxp_put + * + * FUNCTION: put xmit iocb onto the ring pending queue. + * + * EXECUTION ENVIRONMENT: process and interrupt level. + * + * CALLED FROM: + * fc_elsp_cmd + * + * INPUT: + * rp - pointer to the ring + * iocbq - pointer to iocbq entry of xmit iocb + * + * RETURNS: + * none + */ +/*****************************************************************************/ +_static_ void +fc_ringtxp_put( +RING *rp, +IOCBQ *iocbq) /* pointer to iocbq entry */ +{ + fc_dev_ctl_t * p_dev_ctl; + FC_BRD_INFO * binfo; + unsigned long iflag; + + binfo = (FC_BRD_INFO * )rp->fc_binfo; + p_dev_ctl = (fc_dev_ctl_t *)binfo->fc_p_dev_ctl; + + iflag = lpfc_q_disable_lock(p_dev_ctl); + if (rp->fc_txp.q_first) { + ((IOCBQ * )rp->fc_txp.q_last)->q = (uchar * )iocbq; + rp->fc_txp.q_last = (uchar * )iocbq; + } else { + rp->fc_txp.q_first = (uchar * )iocbq; + rp->fc_txp.q_last = (uchar * )iocbq; + + /* start watchdog timer on first xmit only */ + if (rp->fc_ringno != FC_FCP_RING) { + lpfc_q_unlock_enable(p_dev_ctl, iflag); + RINGTMO = fc_clk_set((fc_dev_ctl_t *)(binfo->fc_p_dev_ctl), + rp->fc_ringtmo, fc_cmdring_timeout, (void *)rp, 0); + iflag = lpfc_q_disable_lock(p_dev_ctl); + } + } + + iocbq->q = NULL; + rp->fc_txp.q_cnt++; + + lpfc_q_unlock_enable(p_dev_ctl, iflag); + return; + +} /* End fc_ringtxp_put */ + + +/*****************************************************************************/ +/* + * NAME: fc_ringtxp_get + * + * FUNCTION: get a packet off the ring pending queue. + * + * EXECUTION ENVIRONMENT: interrupt level. + * + * CALLED FROM: + * fc_els_cmd + * + * INPUT: + * rp - pointer to the ring + * + * RETURNS: + * NULL - no match found + * iocbq pointer - pointer to iocbq which matches the iotag + */ +/*****************************************************************************/ +_static_ IOCBQ * +fc_ringtxp_get( +RING *rp, +ushort iotag) /* tag to match i/o */ +{ + fc_dev_ctl_t * p_dev_ctl; + FC_BRD_INFO * binfo; + IOCBQ * iocbq; /* pointer to iocbq entry */ + IOCBQ * pq; /* pointer to previous iocbq entry */ + IOCBQ * save; /* pointer to iocb entry of chain */ + unsigned long iflag; + + binfo = (FC_BRD_INFO * )rp->fc_binfo; + p_dev_ctl = (fc_dev_ctl_t *)binfo->fc_p_dev_ctl; + pq = 0; + save = 0; + + /* Right now this just loops through the linked list looking + * for a match on iotag. This can get optimized in the future + * to have iotag just index into an array. + */ + iflag = lpfc_q_disable_lock(p_dev_ctl); + iocbq = (IOCBQ * )(rp->fc_txp.q_first); + while (iocbq) { + /* do we match on iotag */ + if ((iocbq->iocb.ulpIoTag == iotag) || (iotag == 0)) { + /* loop past continuation iocbs */ + while (iocbq->iocb.ulpLe == 0) { + rp->fc_txp.q_cnt--; + save = iocbq; + if ((iocbq = (IOCBQ * )iocbq->q) == 0) { + iocbq = save; + break; + } + } + save = iocbq; + iocbq = (IOCBQ * )iocbq->q; + + save->q = 0; /* NULL terminate iocb chain */ + + /* Remove iocbq chain from list, adjust first, last and cnt */ + if (iocbq == 0) + rp->fc_txp.q_last = (uchar * )pq; + + if (pq) { + save = (IOCBQ * )pq->q; + pq->q = (uchar * )iocbq; + } else { + save = (IOCBQ * )rp->fc_txp.q_first; + rp->fc_txp.q_first = (uchar * )iocbq; + } + rp->fc_txp.q_cnt--; + + /* stop watchdog timer */ + if(RINGTMO) { + lpfc_q_unlock_enable(p_dev_ctl, iflag); + fc_clk_can((fc_dev_ctl_t *)(binfo->fc_p_dev_ctl), RINGTMO); + iflag = lpfc_q_disable_lock(p_dev_ctl); + RINGTMO = 0; + } + + /* if xmits are still pending, restart the watchdog timer */ + if (rp->fc_txp.q_cnt > 0) { + /* start watchdog timer */ + if (rp->fc_ringno != FC_FCP_RING) { + lpfc_q_unlock_enable(p_dev_ctl, iflag); + RINGTMO = fc_clk_set((fc_dev_ctl_t *)(binfo->fc_p_dev_ctl), + rp->fc_ringtmo, fc_cmdring_timeout, (void *)rp, 0); + iflag = lpfc_q_disable_lock(p_dev_ctl); + } + } + break; + } + + pq = iocbq; + iocbq = (IOCBQ * )iocbq->q; + } + + lpfc_q_unlock_enable(p_dev_ctl, iflag); + return(save); +} /* End fc_ringtxp_get */ + + +/*****************************************************************************/ +/* + * NAME: fc_xmit + * + * FUNCTION: Fibre Channel driver output routine. + * + * EXECUTION ENVIRONMENT: process only + * + * NOTES: + * + * CALLED FROM: + * fc_output fc_intr + * + * INPUT: + * p_dev_ctl - pointer to device information. + * p_mbuf - pointer to a mbuf (chain) for outgoing packets + * + * RETURNS: + * 0 - successful + * EAGAIN - transmit queue is full + */ +/*****************************************************************************/ +int +fc_xmit( +fc_dev_ctl_t *p_dev_ctl, +fcipbuf_t *p_mbuf) +{ + fcipbuf_t * p_cur_mbuf; + fcipbuf_t * buf_tofree; + RING * rp; + FC_BRD_INFO * binfo; + iCfgParam * clp; + + binfo = &BINFO; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + if(clp[CFG_NETWORK_ON].a_current == 0) + return(EIO); + + rp = &binfo->fc_ring[FC_IP_RING]; + buf_tofree = fc_txq_put(p_dev_ctl, rp, p_mbuf); + if (NDDSTAT.ndd_xmitque_max < rp->fc_tx.q_cnt) { + NDDSTAT.ndd_xmitque_max = rp->fc_tx.q_cnt; + } + + /* xmit queue was totally full */ + if (buf_tofree == p_mbuf) { + while (p_mbuf) { + NDDSTAT.ndd_xmitque_ovf++; + NDDSTAT.ndd_opackets_drop++; + p_mbuf = fcnextpkt(p_mbuf); + } + + /* send the packet(s) on the xmit queue */ + issue_iocb_cmd(binfo, rp, 0); + + return(EAGAIN); + } + + /* xmit queue could not fit entire chain */ + while ((p_cur_mbuf = buf_tofree) != NULL) { + NDDSTAT.ndd_xmitque_ovf++; + NDDSTAT.ndd_opackets_drop++; + buf_tofree = fcnextpkt(buf_tofree); + fcnextpkt(p_cur_mbuf) = NULL; + m_freem(p_cur_mbuf); + } + + /* send the packet(s) on the xmit queue */ + issue_iocb_cmd(binfo, rp, 0); + + return(0); +} /* End fc_xmit */ + + +/*****************************************************************************/ +/* + * NAME: fc_txq_put + * + * FUNCTION: put packets onto the transmit queue. + * + * EXECUTION ENVIRONMENT: process and interrupt level. + * + * NOTES: + * + * CALLED FROM: + * fc_xmit + * + * INPUT: + * p_dev_ctl - pointer to the device information area + * rp - pointer to the device information area + * p_mbuf - pointer to a mbuf chain + * + * RETURNS: + * NULL - all mbufs are queued. + * mbuf pointer - point to a mbuf chain which contains packets + * that overflows the transmit queue. + */ +/*****************************************************************************/ +_local_ fcipbuf_t * +fc_txq_put( +fc_dev_ctl_t *p_dev_ctl, +RING *rp, +fcipbuf_t *p_mbuf) /* pointer to a mbuf chain */ +{ + FC_BRD_INFO * binfo; + fcipbuf_t * p_last, *p_over, *p_next; + int room; + + room = rp->fc_tx.q_max - NDDSTAT.ndd_xmitque_cur; + binfo = &BINFO; + if (room > 0) { + p_over = 0; + p_next = p_mbuf; + while (p_next) { + p_last = fcnextpkt(p_next); + fcnextpkt(p_next) = NULL; + if (fc_mbuf_to_iocb(p_dev_ctl, p_next)) { + fcnextpkt(p_next) = p_last; + p_over = p_next; + break; + } + p_next = p_last; + if ( --room <= 0) { + p_over = p_next; + break; + } + } + binfo->fc_flag &= ~FC_NO_ROOM_IP; + } else { + FCSTATCTR.xmitnoroom++; + p_over = p_mbuf; + + if(!(binfo->fc_flag & FC_NO_ROOM_IP)) { + /* No room on IP xmit queue */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0605, /* ptr to msg structure */ + fc_mes0605, /* ptr to msg */ + fc_msgBlk0605.msgPreambleStr, /* begin varargs */ + FCSTATCTR.xmitnoroom); /* end varargs */ + } + binfo->fc_flag |= FC_NO_ROOM_IP; + } + + return(p_over); + +} /* End fc_txq_put */ + + + +/*****************************************************************************/ +/* + * NAME: fc_mbuf_to_iocb + * + * FUNCTION: converts and mbuf into an iocb cmd chain and put on transmit q + * + * EXECUTION ENVIRONMENT: process and interrupt + * + * NOTES: + * + * CALLED FROM: + * + * INPUT: + * p_dev_ctl - pointer to the device information area + * p_mbuf - pointer to a packet in mbuf + * + * RETURNS: + * 0 - OK + * -1 - error occurred during transmit + */ +/*****************************************************************************/ +_local_ int +fc_mbuf_to_iocb( +fc_dev_ctl_t *p_dev_ctl, +fcipbuf_t *p_mbuf) /* pointer to the packet in mbuf */ +{ + FC_BRD_INFO * binfo; + iCfgParam * clp; + uchar * daddr; + RING * rp; + IOCBQ * temp; + IOCBQ * qhead, * qtail; + IOCB * cmd; + NODELIST * nlp; + fcipbuf_t * p_cur_mbuf; /* pointer to current packet in mbuf */ + fcipbuf_t * m_net; + ushort * sp1, * sp2; + ULP_BDE64 * bpl, * topbpl; + MATCHMAP * bmp; + MATCHMAP * bmphead, *bmptail; + MATCHMAP * savebmp; + void * handle; + emac_t * ep; + NETHDR * np; + int i, j, mapcnt; + int count, firstbuflen; + int num_iocbs, num_bdes, numble; + ushort leftover, xri; + uchar isbcast, ismcast; + + binfo = &BINFO; + rp = &binfo->fc_ring[FC_IP_RING]; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + + /* First get a temporary iocb buffers. temp will be + * used for the first iocb entry XMIT_SEQUENCE, and will + * be used for each successive IOCB_CONTINUE entry. + * qhead will be saved for the return + */ + if ((temp = (IOCBQ * )fc_mem_get(binfo, MEM_IOCB)) == 0) { + m_freem(p_mbuf); + return(0); + } + + fc_bzero((void *)temp, sizeof(IOCBQ)); /* initially zero the iocb entry */ + cmd = &temp->iocb; + mapcnt = 0; + numble = 0; + qhead = 0; + qtail = 0; + leftover = 0; + bmp = 0; + topbpl = 0; + if (binfo->fc_flag & FC_SLI2) { + bmphead = 0; + bmptail = 0; + /* Allocate buffer for Buffer ptr list */ + if ((bmp = (MATCHMAP * )fc_mem_get(binfo, MEM_BPL)) == 0) { + fc_mem_put(binfo, MEM_IOCB, (uchar * )temp); + m_freem(p_mbuf); + return(0); + } + bpl = (ULP_BDE64 * )bmp->virt; + cmd->un.xseq64.bdl.ulpIoTag32 = (uint32)0; + cmd->un.xseq64.bdl.addrHigh = (uint32)putPaddrHigh(bmp->phys); + cmd->un.xseq64.bdl.addrLow = (uint32)putPaddrLow(bmp->phys); + cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDL; + temp->bpl = (uchar *)bmp; + } + else { + bpl = 0; + bmphead = 0; + bmptail = 0; + } + + if(lpfc_nethdr == 0) { + ep = (emac_t * )(fcdata(p_mbuf)); + daddr = ep->dest_addr; + + /* We need to convert 802.3 header (14 bytes) into + * fc network header (16 bytes). Since the header is at + * the begining of the buffer, we need to allocate extra space. + */ + + count = fcpktlen(p_mbuf) + 2; /* total data in mbuf after copy */ + firstbuflen = fcdatalen(p_mbuf); + /* Assume first data buffer holds emac and LLC/SNAP at a minimun */ + if (firstbuflen < sizeof(struct fc_hdr )) { + FCSTATCTR.mbufcopy++; + fc_mem_put(binfo, MEM_IOCB, (uchar * )temp); + if (bmp) { + fc_mem_put(binfo, MEM_BPL, (uchar * ) bmp); + } + m_freem(p_mbuf); + return(0); + } + + + /* Allocate a buffer big enough to hold the Fibre Channel header + * and the LLC/SNAP header. + */ + if ((m_net = (fcipbuf_t * )m_getclustm(M_DONTWAIT, MT_DATA, + (sizeof(NETHDR) + sizeof(snaphdr_t)))) == 0) { + fc_mem_put(binfo, MEM_IOCB, (uchar * )temp); + if (bmp) { + fc_mem_put(binfo, MEM_BPL, (uchar * ) bmp); + } + return(EIO); + } + fcsethandle(m_net, 0); + + np = (NETHDR * )fcdata(m_net); + + /* Copy data from emac_t header to network header */ + sp1 = (ushort * ) & np->fc_destname; + *sp1++ = 0; + np->fc_destname.nameType = NAME_IEEE; /* IEEE name */ + sp2 = (ushort * )ep->dest_addr; + + if (*sp2 & SWAP_DATA16(0x8000)) /* Check for multicast */ { + ismcast = 1; + if (*sp2 != 0xffff) /* Check for broadcast */ + isbcast = 0; + else + isbcast = 1; + } else { + ismcast = 0; + isbcast = 0; + } + + /* First copy over the dest IEEE address */ + *sp1++ = *sp2++; + if (isbcast && (*sp2 != 0xffff)) + isbcast = 0; + *sp1++ = *sp2++; + if (isbcast && (*sp2 != 0xffff)) + isbcast = 0; + *sp1++ = *sp2++; + + /* Next copy over the src IEEE address */ + sp1 = (ushort * ) & np->fc_srcname; + *sp1++ = 0; + np->fc_srcname.nameType = NAME_IEEE; /* IEEE name */ + sp2 = (ushort * )binfo->fc_portname.IEEE; + *sp1++ = *sp2++; + *sp1++ = *sp2++; + *sp1++ = *sp2++; + + sp2 = (ushort * )((uchar *)ep + sizeof(emac_t)); + + /* Now Copy LLC/SNAP */ + *sp1++ = *sp2++; + *sp1++ = *sp2++; + *sp1++ = *sp2++; + *sp1++ = *sp2++; + + p_cur_mbuf = m_net; + fcsetdatalen(m_net, (sizeof(NETHDR) + sizeof(snaphdr_t))); + + fcincdatalen(p_mbuf, (-(sizeof(struct fc_hdr )))); + + fcdata(p_mbuf) += sizeof(struct fc_hdr ); + + /* Fixup mbuf chain so data is in line */ + fcnextdata(m_net) = p_mbuf; + } + else { + np = (NETHDR * )(fcdata(((fcipbuf_t * )(p_mbuf)))); + daddr = np->fc_destname.IEEE; + count = fcpktlen(p_mbuf); + p_cur_mbuf = p_mbuf; + m_net = p_mbuf; + + sp2 = (ushort * )daddr; + if (*sp2 & SWAP_DATA16(0x8000)) /* Check for multicast */ { + ismcast = 1; + if (*sp2 != 0xffff) /* Check for broadcast */ + isbcast = 0; + else + isbcast = 1; + } else { + ismcast = 0; + isbcast = 0; + } + } + + num_iocbs = 0; /* count number of iocbs needed to xmit p_mbuf */ + num_bdes = 2; /* Will change to 3 for IOCB_CONTINUE */ + nlp = 0; + + /* + * While there's data left to send and we are not at the end of + * the mbuf chain, put the data from each mbuf in the chain into + * a seperate iocb entry. + */ + while (count && p_cur_mbuf) { + if (binfo->fc_flag & FC_SLI2) { + qhead = temp; + qtail = temp; + /* Set to max number of ULP_BDE64's that fit into a bpl */ + /* Save the last BDE for a continuation ptr, if needed */ + num_bdes = ((FCELSSIZE / sizeof(ULP_BDE64)) - 1); + numble = 0; + if (bmphead == 0) { + bmphead = bmp; + bmptail = bmp; + } else { + bmptail->fc_mptr = (uchar * )bmp; + bmptail = bmp; + } + bmp->fc_mptr = 0; + } else { + if (qhead == 0) { + qhead = temp; + qtail = temp; + } else { + qtail->q = (uchar * )temp; + qtail = temp; + } + } + temp->q = 0; + /* + * copy data pointers into iocb entry + */ + for (i = 0; i < num_bdes; i++) { + /* Skip mblk's with 0 data length */ + while (p_cur_mbuf && (fcdatalen(p_cur_mbuf) == 0)) + p_cur_mbuf = fcnextdata(p_cur_mbuf); /* goto next mbuf in chain */ + + if ((count <= 0) || (p_cur_mbuf == 0)) + break; + + if (leftover == 0) { + mapcnt = fc_bufmap(p_dev_ctl, (uchar * )(fcdata(p_cur_mbuf)), + (uint32)fcdatalen(p_cur_mbuf), binfo->physaddr, binfo->cntaddr, &handle); + + /* fill in BDEs for command */ + if (mapcnt <= 0) { + cmd->ulpBdeCount = i; + goto out; + } + + /* Save dmahandle if one was returned */ + fcsethandle(p_cur_mbuf, handle); + } + + for (j = leftover; j < mapcnt; j++) { + if ((i + j - leftover) >= num_bdes) { + i = num_bdes; + leftover = j; + goto lim; + } + if (binfo->fc_flag & FC_SLI2) { + bpl->addrHigh = (uint32)putPaddrHigh(binfo->physaddr[j]); + bpl->addrHigh = PCIMEM_LONG(bpl->addrHigh); + bpl->addrLow = (uint32)putPaddrLow(binfo->physaddr[j]); + bpl->addrLow = PCIMEM_LONG(bpl->addrLow); + bpl->tus.f.bdeSize = binfo->cntaddr[j]; + bpl->tus.f.bdeFlags = BDE64_SIZE_WORD; + bpl->tus.w = PCIMEM_LONG(bpl->tus.w); + bpl++; + numble++; + } else { + cmd->un.cont[i+j-leftover].bdeAddress = (uint32)putPaddrLow(binfo->physaddr[j]); + cmd->un.cont[i+j-leftover].bdeSize = binfo->cntaddr[j]; + cmd->un.cont[i+j-leftover].bdeAddrHigh = 0; + cmd->un.cont[i+j-leftover].bdeReserved = 0; + } + } + + i = i + j - leftover - 1; + count -= fcdatalen(p_cur_mbuf); /* adjust count of data left */ + leftover = 0; + p_cur_mbuf = fcnextdata(p_cur_mbuf); /* goto next mbuf in chain */ + } + +lim: + /* Fill in rest of iocb entry, all non-zero fields */ + + cmd->ulpBdeCount = i; + + /* Setup command to use accordingly */ + if (++num_iocbs > 1) { + if (!(binfo->fc_flag & FC_SLI2)) { + cmd->ulpCommand = CMD_IOCB_CONTINUE_CN; + temp->bp = 0; + temp->info = 0; + } + } else { + /* set up an iotag so we can match the completion to an iocb/mbuf */ + cmd->ulpIoTag = rp->fc_iotag++; + if (rp->fc_iotag == 0) { + rp->fc_iotag = 1; + } + + /* Setup fibre channel header information */ + cmd->un.xrseq.w5.hcsw.Fctl = 0; + cmd->un.xrseq.w5.hcsw.Dfctl = FC_NET_HDR; /* network headers */ + cmd->un.xrseq.w5.hcsw.Rctl = FC_UNSOL_DATA; + cmd->un.xrseq.w5.hcsw.Type = FC_LLC_SNAP; + + if (isbcast) { + if (++NDDSTAT.ndd_ifOutBcastPkts_lsw == 0) + NDDSTAT.ndd_ifOutBcastPkts_msw++; + if (binfo->fc_flag & FC_SLI2) + cmd->ulpCommand = CMD_XMIT_BCAST64_CN; + else + cmd->ulpCommand = CMD_XMIT_BCAST_CN; + cmd->ulpContext = 0; + nlp = 0; + } else if (ismcast) { + if (++NDDSTAT.ndd_ifOutMcastPkts_lsw == 0) + NDDSTAT.ndd_ifOutMcastPkts_msw++; + if (binfo->fc_flag & FC_SLI2) + cmd->ulpCommand = CMD_XMIT_BCAST64_CN; + else + cmd->ulpCommand = CMD_XMIT_BCAST_CN; + cmd->ulpContext = 0; + nlp = 0; + } else { + if (++NDDSTAT.ndd_ifOutUcastPkts_lsw == 0) + NDDSTAT.ndd_ifOutUcastPkts_msw++; + + /* data from upper layer has a full MAC header on it. We + * need to match the destination address with the portname + * field in our nlp table to determine if we already have an + * exchange opened to this destination. + */ + if (((xri = fc_emac_lookup(binfo, daddr, &nlp)) != 0) && + !(nlp->nlp_action & NLP_DO_RSCN) && + (nlp->nlp_bp == 0)) { + /* exchange to destination already exists */ + if (binfo->fc_flag & FC_SLI2) + cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX; + else + cmd->ulpCommand = CMD_XMIT_SEQUENCE_CX; + cmd->ulpContext = xri; + nlp->nlp_type |= NLP_IP_NODE; + } else { /* need to wait for exchange to destination */ + FCSTATCTR.frameXmitDelay++; + cmd->ulpCommand = 0; + cmd->ulpContext = 0; + + if ((binfo->fc_flag & FC_LNK_DOWN) || + (binfo->fc_ffstate < rp->fc_xmitstate)) + goto out; + + if (nlp == 0) { + /* A partial entry doesn't even exist, so initiate + * ELS login by sending a FARP + */ + /* Add FARP code here */ + fc_els_cmd(binfo, ELS_CMD_FARP, (void *)daddr, + (uint32)0, (ushort)0, (NODELIST *)0); + } else { + if ((nlp->nlp_DID != Bcast_DID) && + !(nlp->nlp_action & NLP_DO_ADDR_AUTH) && + !(nlp->nlp_action & NLP_DO_RSCN) && + !(nlp->nlp_flag & (NLP_FARP_SND | NLP_REQ_SND | NLP_RPI_XRI))) { + /* If a cached entry exists, PLOGI first */ + if ((nlp->nlp_state == NLP_LIMBO) || + (nlp->nlp_state == NLP_LOGOUT)) { + fc_els_cmd(binfo, ELS_CMD_PLOGI, + (void *)((ulong)nlp->nlp_DID), (uint32)0, (ushort)0, nlp); + } + /* establish a new exchange */ + if ((nlp->nlp_Rpi) && (nlp->nlp_Xri == 0)) { + nlp->nlp_flag |= NLP_RPI_XRI; + fc_create_xri(binfo, &binfo->fc_ring[FC_ELS_RING], nlp); + } + } + } + + cmd = &temp->iocb; + if (binfo->fc_flag & FC_SLI2) { + while (bpl && (bpl != (ULP_BDE64 * )bmp->virt)) { + bpl--; + fc_bufunmap(p_dev_ctl, + (uchar *)getPaddr(bpl->addrHigh, bpl->addrLow), 0, bpl->tus.f.bdeSize); + } + if (bmp) { + fc_mem_put(binfo, MEM_BPL, (uchar * ) bmp); + } + } else { + for (i = 0; i < (int)cmd->ulpBdeCount; i++) { + fc_bufunmap(p_dev_ctl, (uchar *)((ulong)cmd->un.cont[i].bdeAddress), 0, (uint32)cmd->un.cont[i].bdeSize); + } + } + if(lpfc_nethdr == 0) { + /* Free Resources */ + fcnextdata(m_net) = 0; + fcfreehandle(p_dev_ctl, m_net); + m_freem(m_net); + + /* Put p_mbuf back the way it was, without NETHDR */ + fcincdatalen(p_mbuf, sizeof(struct fc_hdr )); + fcdata(p_mbuf) -= sizeof(struct fc_hdr ); + } + + fcfreehandle(p_dev_ctl, p_mbuf); + + fc_mem_put(binfo, MEM_IOCB, (uchar * )temp); + + /* save buffer till ELS login completes */ + + if (nlp == 0) { + m_freem(p_mbuf); + return(0); + } + + if (nlp->nlp_bp == 0) { + nlp->nlp_bp = (uchar * )p_mbuf; + } else { + /* Only keep one mbuf chain per node "on deck" */ + p_cur_mbuf = (fcipbuf_t * )nlp->nlp_bp; + nlp->nlp_bp = (uchar * )p_mbuf; + m_freem(p_cur_mbuf); + } + return(0); + } + cmd->ulpClass = nlp->id.nlp_ip_info; + } + + num_bdes = 3; /* in case IOCB_CONTINUEs are needed */ + temp->bp = (uchar * )m_net; + temp->info = (uchar * )nlp; + } + + cmd->ulpOwner = OWN_CHIP; + + /* is this the last iocb entry we will need */ + if ((count == 0) || (p_cur_mbuf == 0)) { + temp = 0; + cmd->ulpLe = 1; + /* if so queue cmd chain to last iocb entry in xmit queue */ + if (rp->fc_tx.q_first == 0) { + rp->fc_tx.q_first = (uchar * )qhead; + rp->fc_tx.q_last = (uchar * )qtail; + } else { + ((IOCBQ * )(rp->fc_tx.q_last))->q = (uchar * )qhead; + rp->fc_tx.q_last = (uchar * )qtail; + } + rp->fc_tx.q_cnt += num_iocbs; + NDDSTAT.ndd_xmitque_cur++; + break; + } else { + cmd->ulpLe = 0; + } + + /* get another iocb entry buffer */ + if (binfo->fc_flag & FC_SLI2) { + /* Allocate buffer for Buffer ptr list */ + if ((bmp = (MATCHMAP * )fc_mem_get(binfo, MEM_BPL)) == 0) { + goto out; + } + /* Fill in continuation entry to next bpl */ + bpl->addrHigh = (uint32)putPaddrHigh(bmp->phys); + bpl->addrHigh = PCIMEM_LONG(bpl->addrHigh); + bpl->addrLow = (uint32)putPaddrLow(bmp->phys); + bpl->addrLow = PCIMEM_LONG(bpl->addrLow); + bpl->tus.f.bdeFlags = BPL64_SIZE_WORD; + numble++; + if (num_iocbs == 1) { + cmd->un.xseq64.bdl.bdeSize = (numble * sizeof(ULP_BDE64)); + } else { + topbpl->tus.f.bdeSize = (numble * sizeof(ULP_BDE64)); + topbpl->tus.w = PCIMEM_LONG(topbpl->tus.w); + } + topbpl = bpl; + bpl = (ULP_BDE64 * )bmp->virt; + leftover = 0; + } else { + if ((temp = (IOCBQ * )fc_mem_get(binfo, MEM_IOCB)) == 0) { +out: + /* no more available, so toss mbuf by freeing + * resources associated with qhead + */ + if (binfo->fc_flag & FC_SLI2) { + num_bdes = ((FCELSSIZE / sizeof(ULP_BDE64)) - 1); + bmp = bmphead; + while (bmp) { + i = 0; + bpl = (ULP_BDE64 * )bmp->virt; + while (bpl && (i < num_bdes)) { + bpl++; + i++; + fc_bufunmap(p_dev_ctl, + (uchar *)getPaddr(bpl->addrHigh, bpl->addrLow), 0, bpl->tus.f.bdeSize); + } + savebmp = (MATCHMAP * )bmp->fc_mptr; + if (bmp) { + fc_mem_put(binfo, MEM_BPL, (uchar * )bmp); + } + bmp = savebmp; + } + + fc_mem_put(binfo, MEM_IOCB, (uchar * )temp); + } else { + while (qhead) { + temp = qhead; + cmd = &temp->iocb; + for (i = 0; i < (int)cmd->ulpBdeCount; i++) { + fc_bufunmap(p_dev_ctl, (uchar *)((ulong)cmd->un.cont[i].bdeAddress), 0, (uint32)cmd->un.cont[i].bdeSize); + } + qhead = (IOCBQ * )temp->q; + fc_mem_put(binfo, MEM_IOCB, (uchar * )temp); + } + } + + if(lpfc_nethdr == 0) { + fcnextdata(m_net) = 0; + fcfreehandle(p_dev_ctl, m_net); + m_freem(m_net); + + /* Put p_mbuf back the way it was, without NETHDR */ + fcincdatalen(p_mbuf, sizeof(struct fc_hdr )); + fcdata(p_mbuf) -= sizeof(struct fc_hdr ); + } + + fcfreehandle(p_dev_ctl, p_mbuf); + + if (binfo->fc_flag & FC_SLI2) { + m_freem(p_mbuf); + return(0); + } + return(EIO); + } + fc_bzero((void *)temp, sizeof(IOCBQ)); + cmd = &temp->iocb; + } + } + + if (binfo->fc_flag & FC_SLI2) { + bpl->addrHigh = 0; + bpl->addrLow = 0; + bpl->tus.w = 0; + cmd->ulpBdeCount = 1; + if (num_iocbs == 1) { + cmd->un.xseq64.bdl.bdeSize = (numble * sizeof(ULP_BDE64)); + } else { + topbpl->tus.f.bdeSize = (numble * sizeof(ULP_BDE64)); + topbpl->tus.w = PCIMEM_LONG(topbpl->tus.w); + } + } + + if (temp) + fc_mem_put(binfo, MEM_IOCB, (uchar * )temp); + + return(0); +} /* End fc_mbuf_to_iocb */ + + + +/**********************************************/ +/** handle_xmit_cmpl **/ +/** **/ +/** Process all transmit completions **/ +/** **/ +/**********************************************/ +_static_ int +handle_xmit_cmpl( +fc_dev_ctl_t *p_dev_ctl, +RING *rp, +IOCBQ *temp) +{ + FC_BRD_INFO * binfo; + IOCB * cmd; + IOCBQ * xmitiq; + IOCBQ * save; + NODELIST * nlp; + fcipbuf_t * p_mbuf; + fcipbuf_t * m_net; + int i, cnt; + ULP_BDE64 * bpl; + MATCHMAP * bmp; + DMATCHMAP * indmp; + + cmd = &temp->iocb; + binfo = &BINFO; + if (++NDDSTAT.ndd_xmitintr_lsw == 0) { + NDDSTAT.ndd_xmitintr_msw++; + } + + /* look up xmit compl by IoTag */ + if ((xmitiq = fc_ringtxp_get(rp, cmd->ulpIoTag)) == 0) { + FCSTATCTR.strayXmitCmpl++; + /* Stray XmitSequence completion */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0606, /* ptr to msg structure */ + fc_mes0606, /* ptr to msg */ + fc_msgBlk0606.msgPreambleStr, /* begin varargs */ + cmd->ulpCommand, + cmd->ulpIoTag); /* end varargs */ + /* completion with missing xmit command */ + return(EIO); + } + + if (rp->fc_ringno == FC_ELS_RING) { + indmp = (DMATCHMAP * )xmitiq->bp; + if (cmd->ulpStatus) { + indmp->dfc_flag = -1; + } + else { + indmp->dfc_flag = xmitiq->iocb.un.xseq64.bdl.bdeSize; + } + fc_mem_put(binfo, MEM_IOCB, (uchar * )xmitiq); + return(0); + } + + + NDDSTAT.ndd_xmitque_cur--; + + /* get mbuf ptr for completed xmit */ + m_net = (fcipbuf_t * )xmitiq->bp; + + /* check for first xmit completion in sequence */ + nlp = (NODELIST * ) xmitiq->info; + + if (cmd->ulpStatus) { + uint32 did = 0; + + NDDSTAT.ndd_oerrors++; + + if (nlp) + did = nlp->nlp_DID; + /* Xmit Sequence completion error */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0607, /* ptr to msg structure */ + fc_mes0607, /* ptr to msg */ + fc_msgBlk0607.msgPreambleStr, /* begin varargs */ + cmd->ulpStatus, + cmd->ulpIoTag, + cmd->un.ulpWord[4], + did); /* end varargs */ + if (nlp && (nlp->nlp_state >= NLP_LOGIN)) { + /* If XRI in xmit sequence with status error matches XRI + * in nlplist entry, we need to create a new one. + */ + if ((nlp->nlp_Xri == cmd->ulpContext) && + !(nlp->nlp_flag & NLP_RPI_XRI)) { + /* on xmit error, exchange is aborted */ + nlp->nlp_Xri = 0; /* xri */ + /* establish a new exchange */ + if ((nlp->nlp_Rpi) && + (binfo->fc_ffstate == FC_READY)) { + nlp->nlp_flag |= NLP_RPI_XRI; + fc_create_xri(binfo, &binfo->fc_ring[FC_ELS_RING], nlp); + } + } + } + } else { + if (++NDDSTAT.ndd_opackets_lsw == 0) + NDDSTAT.ndd_opackets_msw++; + + if (m_net && + ((nlp && ((nlp->nlp_DID & CT_DID_MASK) != CT_DID_MASK)) || + (xmitiq->iocb.ulpCommand == CMD_XMIT_BCAST_CX))) { + + if(lpfc_nethdr == 0) { + p_mbuf = fcnextdata(m_net); + cnt = fcpktlen(p_mbuf); + } + else { + p_mbuf = m_net; + cnt = fcpktlen(p_mbuf) - sizeof(NETHDR); /* total data in mbuf */ + } + + NDDSTAT.ndd_obytes_lsw += cnt; + if ((int)NDDSTAT.ndd_obytes_lsw < cnt) + NDDSTAT.ndd_obytes_msw++; + } + } + + if (nlp && (nlp->nlp_DID == NameServer_DID)) { + MATCHMAP * mp; + + mp = (MATCHMAP * )m_net; + if (binfo->fc_flag & FC_SLI2) { + fc_mem_put(binfo, MEM_BPL, (uchar * )xmitiq->bpl); + } + fc_mem_put(binfo, MEM_IOCB, (uchar * )xmitiq); + fc_mem_put(binfo, MEM_BUF, (uchar * )mp); + return(0); + } + + /* Loop through iocb chain and unmap memory pages associated with mbuf */ + if (binfo->fc_flag & FC_SLI2) { + MATCHMAP * savebmp; + int cnt; + + bmp = (MATCHMAP * )xmitiq->bpl; + cnt = xmitiq->iocb.un.xseq64.bdl.bdeSize; + while (bmp) { + bpl = (ULP_BDE64 * )bmp->virt; + while (bpl && cnt) { + bpl->addrHigh = PCIMEM_LONG(bpl->addrHigh); + bpl->addrLow = PCIMEM_LONG(bpl->addrLow); + bpl->tus.w = PCIMEM_LONG(bpl->tus.w); + switch (bpl->tus.f.bdeFlags) { + case BPL64_SIZE_WORD: + cnt = bpl->tus.f.bdeSize; + bpl = 0; + break; + case BDE64_SIZE_WORD: + fc_bufunmap(p_dev_ctl, (uchar *)getPaddr(bpl->addrHigh, bpl->addrLow), 0, bpl->tus.f.bdeSize); + bpl++; + cnt -= sizeof(ULP_BDE64); + break; + default: + bpl = 0; + cnt = 0; + break; + } + } + savebmp = (MATCHMAP * )bmp->fc_mptr; + fc_mem_put(binfo, MEM_BPL, (uchar * )bmp); + bmp = savebmp; + } + fc_mem_put(binfo, MEM_IOCB, (uchar * )xmitiq); + } else { + while (xmitiq) { + for (i = 0; i < (int)xmitiq->iocb.ulpBdeCount; i++) { + fc_bufunmap(p_dev_ctl, (uchar *)((ulong)xmitiq->iocb.un.cont[i].bdeAddress), 0, (uint32)xmitiq->iocb.un.cont[i].bdeSize); + } + save = (IOCBQ * )xmitiq->q; + fc_mem_put(binfo, MEM_IOCB, (uchar * )xmitiq); + xmitiq = save; + } + } + + /* free mbuf */ + if (m_net) { + if(lpfc_nethdr == 0) { + p_mbuf = fcnextdata(m_net); + fcnextdata(m_net) = 0; + fcfreehandle(p_dev_ctl, m_net); + m_freem(m_net); + + /* Put p_mbuf back the way it was, without NETHDR */ + fcincdatalen(p_mbuf, sizeof(struct fc_hdr )); + + fcdata(p_mbuf) -= sizeof(struct fc_hdr ); + } + else { + p_mbuf = m_net; + } + + fcfreehandle(p_dev_ctl, p_mbuf); + m_freem(p_mbuf); + } + + fc_restartio(p_dev_ctl, nlp); + + return(0); +} /* End handle_xmit_cmpl */ + + +/* + * Issue an iocb command to create an exchange with the remote + * specified by the NODELIST entry. + */ +_static_ int +fc_create_xri( +FC_BRD_INFO *binfo, +RING *rp, +NODELIST *nlp) +{ + IOCB * icmd; + IOCBQ * temp; + + /* While there are buffers to post */ + if ((temp = (IOCBQ * )fc_mem_get(binfo, MEM_IOCB)) == 0) { + return(1); + } + fc_bzero((void *)temp, sizeof(IOCBQ)); + icmd = &temp->iocb; + + /* set up an iotag so we can match the completion to an iocb/mbuf */ + icmd->ulpIoTag = rp->fc_iotag++; + if (rp->fc_iotag == 0) { + rp->fc_iotag = 1; + } + icmd->ulpContext = nlp->nlp_Rpi; + icmd->ulpLe = 1; + + icmd->ulpCommand = CMD_CREATE_XRI_CR; + icmd->ulpOwner = OWN_CHIP; + + temp->bp = (uchar * )nlp; /* used for delimiter between commands */ + + FCSTATCTR.cmdCreateXri++; + + issue_iocb_cmd(binfo, rp, temp); + return(0); +} /* End fc_create_xri */ + + +/* + * Process a create_xri command completion. + */ +_static_ int +handle_create_xri( +fc_dev_ctl_t *p_dev_ctl, +RING *rp, +IOCBQ *temp) +{ + FC_BRD_INFO * binfo; + IOCB * cmd; + NODELIST * nlp; + IOCBQ * xmitiq; + + cmd = &temp->iocb; + binfo = &BINFO; + /* look up xmit compl by IoTag */ + if ((xmitiq = fc_ringtxp_get(rp, cmd->ulpIoTag)) == 0) { + FCSTATCTR.strayXmitCmpl++; + /* Stray CreateXRI completion */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0608, /* ptr to msg structure */ + fc_mes0608, /* ptr to msg */ + fc_msgBlk0608.msgPreambleStr, /* begin varargs */ + cmd->ulpCommand, + cmd->ulpIoTag); /* end varargs */ + /* completion with missing xmit command */ + return(EIO); + } + + /* check for first xmit completion in sequence */ + nlp = (NODELIST * ) xmitiq->bp; + + if (cmd->ulpStatus) { + fc_mem_put(binfo, MEM_IOCB, (uchar * )xmitiq); + + nlp->nlp_flag &= ~NLP_RPI_XRI; + + fc_freenode_did(binfo, nlp->nlp_DID, 0); + + FCSTATCTR.xriStatErr++; + return(EIO); + } + + FCSTATCTR.xriCmdCmpl++; + + nlp->nlp_Xri = cmd->ulpContext; + nlp->nlp_flag &= ~NLP_RPI_XRI; + + fc_mem_put(binfo, MEM_IOCB, (uchar * )xmitiq); + + fc_restartio(p_dev_ctl, nlp); + return(0); +} /* End handle_create_xri */ + + +_static_ void +fc_restartio( +fc_dev_ctl_t *p_dev_ctl, +NODELIST *nlp) +{ + FC_BRD_INFO * binfo; + RING * rp; + fcipbuf_t * p_cur_mbuf; + fcipbuf_t * buf_tofree; + + binfo = &BINFO; + rp = &binfo->fc_ring[FC_IP_RING]; + + if (nlp) { + if ((nlp->nlp_bp) && (nlp->nlp_Xri)) { + p_cur_mbuf = (fcipbuf_t * )nlp->nlp_bp; + nlp->nlp_bp = 0; + buf_tofree = fc_txq_put(p_dev_ctl, rp, p_cur_mbuf); + while ((p_cur_mbuf = buf_tofree) != 0) { + NDDSTAT.ndd_opackets_drop++; + buf_tofree = fcnextpkt(buf_tofree); + fcnextpkt(p_cur_mbuf) = NULL; + m_freem(p_cur_mbuf); + } + } + } + + /* Is there a xmit waiting to be started */ + if (rp->fc_tx.q_first) { + /* If so, start it */ + issue_iocb_cmd(binfo, rp, 0); + } + + /* If needed */ +} /* End fc_restartio */ + + diff -purN -X /home/mbligh/.diff.exclude reference/drivers/scsi/lpfc/hbaapi.h current/drivers/scsi/lpfc/hbaapi.h --- reference/drivers/scsi/lpfc/hbaapi.h 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/scsi/lpfc/hbaapi.h 2004-04-09 11:53:04.000000000 -0700 @@ -0,0 +1,311 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Enterprise Fibre Channel Host Bus Adapters. * + * Refer to the README file included with this package for * + * driver version and adapter support. * + * Copyright (C) 2003 Emulex Corporation. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of the GNU General Public License * + * as published by the Free Software Foundation; either version 2 * + * of the License, or (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details, a copy of which * + * can be found in the file COPYING included with this package. * + *******************************************************************/ + +#ifndef HBA_API_H +#define HBA_API_H + +/* Library version string */ +#define HBA_LIBVERSION 1 + +/* DLL imports for WIN32 operation */ +#define HBA_API + +/* OS specific definitions */ + + +typedef unsigned char HBA_UINT8; /* Unsigned 8 bits */ +typedef char HBA_INT8; /* Signed 8 bits */ +typedef unsigned short HBA_UINT16; /* Unsigned 16 bits */ +typedef short HBA_INT16; /* Signed 16 bits */ +typedef unsigned int HBA_UINT32; /* Unsigned 32 bits */ +typedef int HBA_INT32; /* Signed 32 bits */ +typedef void* HBA_PVOID; /* Pointer to void */ +typedef HBA_UINT32 HBA_VOID32; /* Opaque 32 bits */ +typedef long long HBA_INT64; +typedef long long HBA_UINT64; + + + +/* 4.2.1 Handle to Device */ +typedef HBA_UINT32 HBA_HANDLE; + +/* 4.2.2 Status Return Values */ +typedef HBA_UINT32 HBA_STATUS; + +#define HBA_STATUS_OK 0 +#define HBA_STATUS_ERROR 1 /* Error */ +#define HBA_STATUS_ERROR_NOT_SUPPORTED 2 /* Function not supported.*/ +#define HBA_STATUS_ERROR_INVALID_HANDLE 3 /* invalid handle */ +#define HBA_STATUS_ERROR_ARG 4 /* Bad argument */ +#define HBA_STATUS_ERROR_ILLEGAL_WWN 5 /* WWN not recognized */ +#define HBA_STATUS_ERROR_ILLEGAL_INDEX 6 /* Index not recognized */ +#define HBA_STATUS_ERROR_MORE_DATA 7 /* Larger buffer required */ +#define HBA_STATUS_ERROR_STALE_DATA 8 /* need a refresh */ + + + +/* 4.2.3 Port Operational Modes Values */ + +typedef HBA_UINT32 HBA_PORTTYPE; + +#define HBA_PORTTYPE_UNKNOWN 1 /* Unknown */ +#define HBA_PORTTYPE_OTHER 2 /* Other */ +#define HBA_PORTTYPE_NOTPRESENT 3 /* Not present */ +#define HBA_PORTTYPE_NPORT 5 /* Fabric */ +#define HBA_PORTTYPE_NLPORT 6 /* Public Loop */ +#define HBA_PORTTYPE_FLPORT 7 /* Fabric Loop Port */ +#define HBA_PORTTYPE_FPORT 8 /* Fabric Port */ +#define HBA_PORTTYPE_EPORT 9 /* Fabric expansion port */ +#define HBA_PORTTYPE_GPORT 10 /* Generic Fabric Port */ +#define HBA_PORTTYPE_LPORT 20 /* Private Loop */ +#define HBA_PORTTYPE_PTP 21 /* Point to Point */ + + +typedef HBA_UINT32 HBA_PORTSTATE; +#define HBA_PORTSTATE_UNKNOWN 1 /* Unknown */ +#define HBA_PORTSTATE_ONLINE 2 /* Operational */ +#define HBA_PORTSTATE_OFFLINE 3 /* User Offline */ +#define HBA_PORTSTATE_BYPASSED 4 /* Bypassed */ +#define HBA_PORTSTATE_DIAGNOSTICS 5 /* In diagnostics mode */ +#define HBA_PORTSTATE_LINKDOWN 6 /* Link Down */ +#define HBA_PORTSTATE_ERROR 7 /* Port Error */ +#define HBA_PORTSTATE_LOOPBACK 8 /* Loopback */ + + +typedef HBA_UINT32 HBA_PORTSPEED; +#define HBA_PORTSPEED_1GBIT 1 /* 1 GBit/sec */ +#define HBA_PORTSPEED_2GBIT 2 /* 2 GBit/sec */ +#define HBA_PORTSPEED_10GBIT 4 /* 10 GBit/sec */ + + + +/* 4.2.4 Class of Service Values - See GS-2 Spec.*/ + +typedef HBA_UINT32 HBA_COS; + + +/* 4.2.5 Fc4Types Values */ + +typedef struct HBA_fc4types { + HBA_UINT8 bits[32]; /* 32 bytes of FC-4 per GS-2 */ +} HBA_FC4TYPES, *PHBA_FC4TYPES; + +/* 4.2.6 Basic Types */ + +typedef struct HBA_wwn { + HBA_UINT8 wwn[8]; +} HBA_WWN, *PHBA_WWN; + +typedef struct HBA_ipaddress { + int ipversion; /* see enumerations in RNID */ + union + { + unsigned char ipv4address[4]; + unsigned char ipv6address[16]; + } ipaddress; +} HBA_IPADDRESS, *PHBA_IPADDRESS; + +/* 4.2.7 Adapter Attributes */ +typedef struct hba_AdapterAttributes { + char Manufacturer[64]; /*Emulex */ + char SerialNumber[64]; /* A12345 */ + char Model[256]; /* QLA2200 */ + char ModelDescription[256]; /* Agilent TachLite */ + HBA_WWN NodeWWN; + char NodeSymbolicName[256]; /* From GS-3 */ + char HardwareVersion[256]; /* Vendor use */ + char DriverVersion[256]; /* Vendor use */ + char OptionROMVersion[256]; /* Vendor use - i.e. hardware boot ROM*/ + char FirmwareVersion[256]; /* Vendor use */ + HBA_UINT32 VendorSpecificID; /* Vendor specific */ + HBA_UINT32 NumberOfPorts; + char DriverName[256]; /* Binary path and/or name of driver file. */ +} HBA_ADAPTERATTRIBUTES, *PHBA_ADAPTERATTRIBUTES; + +/* 4.2.8 Port Attributes */ +typedef struct HBA_PortAttributes { + HBA_WWN NodeWWN; + HBA_WWN PortWWN; + HBA_UINT32 PortFcId; + HBA_PORTTYPE PortType; /*PTP, Fabric, etc. */ + HBA_PORTSTATE PortState; + HBA_COS PortSupportedClassofService; + HBA_FC4TYPES PortSupportedFc4Types; + HBA_FC4TYPES PortActiveFc4Types; + char PortSymbolicName[256]; + char OSDeviceName[256]; /* \device\ScsiPort3 */ + HBA_PORTSPEED PortSupportedSpeed; + HBA_PORTSPEED PortSpeed; + HBA_UINT32 PortMaxFrameSize; + HBA_WWN FabricName; + HBA_UINT32 NumberofDiscoveredPorts; +} HBA_PORTATTRIBUTES, *PHBA_PORTATTRIBUTES; + + + +/* 4.2.9 Port Statistics */ + +typedef struct HBA_PortStatistics { + HBA_INT64 SecondsSinceLastReset; + HBA_INT64 TxFrames; + HBA_INT64 TxWords; + HBA_INT64 RxFrames; + HBA_INT64 RxWords; + HBA_INT64 LIPCount; + HBA_INT64 NOSCount; + HBA_INT64 ErrorFrames; + HBA_INT64 DumpedFrames; + HBA_INT64 LinkFailureCount; + HBA_INT64 LossOfSyncCount; + HBA_INT64 LossOfSignalCount; + HBA_INT64 PrimitiveSeqProtocolErrCount; + HBA_INT64 InvalidTxWordCount; + HBA_INT64 InvalidCRCCount; +} HBA_PORTSTATISTICS, *PHBA_PORTSTATISTICS; + + + +/* 4.2.10 FCP Attributes */ + +typedef enum HBA_fcpbindingtype { TO_D_ID, TO_WWN } HBA_FCPBINDINGTYPE; + +typedef struct HBA_ScsiId { + char OSDeviceName[256]; /* \device\ScsiPort3 */ + HBA_UINT32 ScsiBusNumber; /* Bus on the HBA */ + HBA_UINT32 ScsiTargetNumber; /* SCSI Target ID to OS */ + HBA_UINT32 ScsiOSLun; +} HBA_SCSIID, *PHBA_SCSIID; + +typedef struct HBA_FcpId { + HBA_UINT32 FcId; + HBA_WWN NodeWWN; + HBA_WWN PortWWN; + HBA_UINT64 FcpLun; +} HBA_FCPID, *PHBA_FCPID; + +typedef struct HBA_FcpScsiEntry { + HBA_SCSIID ScsiId; + HBA_FCPID FcpId; +} HBA_FCPSCSIENTRY, *PHBA_FCPSCSIENTRY; + +typedef struct HBA_FCPTargetMapping { + HBA_UINT32 NumberOfEntries; + HBA_FCPSCSIENTRY entry[1]; /* Variable length array containing mappings*/ +} HBA_FCPTARGETMAPPING, *PHBA_FCPTARGETMAPPING; + +typedef struct HBA_FCPBindingEntry { + HBA_FCPBINDINGTYPE type; + HBA_SCSIID ScsiId; + HBA_FCPID FcpId; /* WWN valid only if type is to WWN, FcpLun always valid */ + HBA_UINT32 FcId; /* valid only if type is to DID */ +} HBA_FCPBINDINGENTRY, *PHBA_FCPBINDINGENTRY; + +typedef struct HBA_FCPBinding { + HBA_UINT32 NumberOfEntries; + HBA_FCPBINDINGENTRY entry[1]; /* Variable length array */ +} HBA_FCPBINDING, *PHBA_FCPBINDING; + +/* 4.2.11 FC-3 Management Atrributes */ + +typedef enum HBA_wwntype { NODE_WWN, PORT_WWN } HBA_WWNTYPE; + +typedef struct HBA_MgmtInfo { + HBA_WWN wwn; + HBA_UINT32 unittype; + HBA_UINT32 PortId; + HBA_UINT32 NumberOfAttachedNodes; + HBA_UINT16 IPVersion; + HBA_UINT16 UDPPort; + HBA_UINT8 IPAddress[16]; + HBA_UINT16 reserved; + HBA_UINT16 TopologyDiscoveryFlags; +} HBA_MGMTINFO, *PHBA_MGMTINFO; + +#define HBA_EVENT_LIP_OCCURRED 1 +#define HBA_EVENT_LINK_UP 2 +#define HBA_EVENT_LINK_DOWN 3 +#define HBA_EVENT_LIP_RESET_OCCURRED 4 +#define HBA_EVENT_RSCN 5 +#define HBA_EVENT_PROPRIETARY 0xFFFF + +typedef struct HBA_Link_EventInfo { + HBA_UINT32 PortFcId; /* Port which this event occurred */ + HBA_UINT32 Reserved[3]; +} HBA_LINK_EVENTINFO, *PHBA_LINK_EVENTINFO; + +typedef struct HBA_RSCN_EventInfo { + HBA_UINT32 PortFcId; /* Port which this event occurred */ + HBA_UINT32 NPortPage; /* Reference FC-FS for RSCN ELS "Affected N-Port Pages"*/ + HBA_UINT32 Reserved[2]; +} HBA_RSCN_EVENTINFO, *PHBA_RSCN_EVENTINFO; + +typedef struct HBA_Pty_EventInfo { + HBA_UINT32 PtyData[4]; /* Proprietary data */ +} HBA_PTY_EVENTINFO, *PHBA_PTY_EVENTINFO; + +typedef struct HBA_EventInfo { + HBA_UINT32 EventCode; + union { + HBA_LINK_EVENTINFO Link_EventInfo; + HBA_RSCN_EVENTINFO RSCN_EventInfo; + HBA_PTY_EVENTINFO Pty_EventInfo; + } Event; +} HBA_EVENTINFO, *PHBA_EVENTINFO; + +/* Used for OSDeviceName */ +typedef struct HBA_osdn { + char drvname[32]; + HBA_UINT32 instance; + HBA_UINT32 target; + HBA_UINT32 lun; + HBA_UINT32 bus; + char flags; + char sizeSN; + char InquirySN[32]; +} HBA_OSDN; + +/* Function Prototypes */ +#if (!defined(_KERNEL) && !defined(__KERNEL__)) +uint32 GetAdapterAttributes(uint32, HBA_ADAPTERATTRIBUTES *); +uint32 GetAdapterPortAttributes(uint32, HBA_UINT32, HBA_PORTATTRIBUTES *); +uint32 GetPortStatistics(uint32, HBA_UINT32, HBA_PORTSTATISTICS *); +uint32 GetDiscoveredPortAttributes(uint32, HBA_UINT32, HBA_UINT32, HBA_PORTATTRIBUTES *); +uint32 GetPortAttributesByWWN(uint32, HBA_WWN *, HBA_PORTATTRIBUTES *); +uint32 GetPortAttributesByIndex(uint32, HBA_UINT32, HBA_UINT32, HBA_PORTATTRIBUTES *); +uint32 GetEventBuffer(uint32, PHBA_EVENTINFO, HBA_UINT32 *); +uint32 SetRNIDMgmtInfo(uint32, HBA_MGMTINFO *); +uint32 GetRNIDMgmtInfo(uint32, HBA_MGMTINFO *); +uint32 SendRNID(uint32, HBA_WWN *, HBA_WWNTYPE, void *, HBA_UINT32 *); +void ResetStatistics(uint32, HBA_UINT32); +uint32 RefreshInformation(uint32); +uint32 GetFcpTargetMapping(uint32, PHBA_FCPTARGETMAPPING); +uint32 GetFcpPersistentBinding(uint32, PHBA_FCPBINDING); +uint32 SendCTPassThru(uint32, void *, HBA_UINT32, void *, HBA_UINT32 *); +uint32 SendReportLUNs(uint32, HBA_WWN *, void *, HBA_UINT32 *, void *, + HBA_UINT32 *); +uint32 SendReadCapacity(uint32, HBA_WWN *, HBA_UINT64, void *, HBA_UINT32 *, + void *, HBA_UINT32 *); +uint32 SendScsiInquiry(uint32, HBA_WWN *, HBA_UINT64, HBA_UINT8, HBA_UINT32, + void *, HBA_UINT32 *, void *, HBA_UINT32 *); +#endif + + +#endif + diff -purN -X /home/mbligh/.diff.exclude reference/drivers/scsi/lpfc/lp6000.c current/drivers/scsi/lpfc/lp6000.c --- reference/drivers/scsi/lpfc/lp6000.c 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/scsi/lpfc/lp6000.c 2004-04-09 11:53:04.000000000 -0700 @@ -0,0 +1,2696 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Enterprise Fibre Channel Host Bus Adapters. * + * Refer to the README file included with this package for * + * driver version and adapter support. * + * Copyright (C) 2003 Emulex Corporation. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of the GNU General Public License * + * as published by the Free Software Foundation; either version 2 * + * of the License, or (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details, a copy of which * + * can be found in the file COPYING included with this package. * + *******************************************************************/ + +/* Routine Declaration - Local */ +_local_ int fc_binfo_init(fc_dev_ctl_t *p_dev_ctl); +_local_ int fc_parse_vpd(fc_dev_ctl_t *p_dev_ctl, uchar *vpd); +_local_ int fc_proc_ring_event( fc_dev_ctl_t *p_dev_ctl, RING *rp, + IOCBQ *saveq); +/* End Routine Declaration - Local */ +extern uint32 fcPAGESIZE; +extern uint32 fc_diag_state; +extern int fcinstance[]; + +int fc_check_for_vpd = 1; +int fc_reset_on_attach = 0; + +extern int fc_max_els_sent; + +#define FC_MAX_VPD_SIZE 0x100 +static uint32 fc_vpd_data[FC_MAX_VPD_SIZE]; + +static uint32 fc_run_biu_test[256] = { + /* Walking ones */ + 0x80000000, 0x40000000, 0x20000000, 0x10000000, + 0x08000000, 0x04000000, 0x02000000, 0x01000000, + 0x00800000, 0x00400000, 0x00200000, 0x00100000, + 0x00080000, 0x00040000, 0x00020000, 0x00010000, + 0x00008000, 0x00004000, 0x00002000, 0x00001000, + 0x00000800, 0x00000400, 0x00000200, 0x00000100, + 0x00000080, 0x00000040, 0x00000020, 0x00000010, + 0x00000008, 0x00000004, 0x00000002, 0x00000001, + + /* Walking zeros */ + 0x7fffffff, 0xbfffffff, 0xdfffffff, 0xefffffff, + 0xf7ffffff, 0xfbffffff, 0xfdffffff, 0xfeffffff, + 0xff7fffff, 0xffbfffff, 0xffdfffff, 0xffefffff, + 0xfff7ffff, 0xfffbffff, 0xfffdffff, 0xfffeffff, + 0xffff7fff, 0xffffbfff, 0xffffdfff, 0xffffefff, + 0xfffff7ff, 0xfffffbff, 0xfffffdff, 0xfffffeff, + 0xffffff7f, 0xffffffbf, 0xffffffdf, 0xffffffef, + 0xfffffff7, 0xfffffffb, 0xfffffffd, 0xfffffffe, + + /* all zeros */ + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + + /* all ones */ + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + + /* all 5's */ + 0x55555555, 0x55555555, 0x55555555, 0x55555555, + 0x55555555, 0x55555555, 0x55555555, 0x55555555, + 0x55555555, 0x55555555, 0x55555555, 0x55555555, + 0x55555555, 0x55555555, 0x55555555, 0x55555555, + 0x55555555, 0x55555555, 0x55555555, 0x55555555, + 0x55555555, 0x55555555, 0x55555555, 0x55555555, + 0x55555555, 0x55555555, 0x55555555, 0x55555555, + 0x55555555, 0x55555555, 0x55555555, 0x55555555, + + /* all a's */ + 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, + 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, + 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, + 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, + 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, + 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, + 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, + 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, + + /* all 5a's */ + 0x5a5a5a5a, 0x5a5a5a5a, 0x5a5a5a5a, 0x5a5a5a5a, + 0x5a5a5a5a, 0x5a5a5a5a, 0x5a5a5a5a, 0x5a5a5a5a, + 0x5a5a5a5a, 0x5a5a5a5a, 0x5a5a5a5a, 0x5a5a5a5a, + 0x5a5a5a5a, 0x5a5a5a5a, 0x5a5a5a5a, 0x5a5a5a5a, + 0x5a5a5a5a, 0x5a5a5a5a, 0x5a5a5a5a, 0x5a5a5a5a, + 0x5a5a5a5a, 0x5a5a5a5a, 0x5a5a5a5a, 0x5a5a5a5a, + 0x5a5a5a5a, 0x5a5a5a5a, 0x5a5a5a5a, 0x5a5a5a5a, + 0x5a5a5a5a, 0x5a5a5a5a, 0x5a5a5a5a, 0x5a5a5a5a, + + /* all a5's */ + 0xa5a5a5a5, 0xa5a5a5a5, 0xa5a5a5a5, 0xa5a5a5a5, + 0xa5a5a5a5, 0xa5a5a5a5, 0xa5a5a5a5, 0xa5a5a5a5, + 0xa5a5a5a5, 0xa5a5a5a5, 0xa5a5a5a5, 0xa5a5a5a5, + 0xa5a5a5a5, 0xa5a5a5a5, 0xa5a5a5a5, 0xa5a5a5a5, + 0xa5a5a5a5, 0xa5a5a5a5, 0xa5a5a5a5, 0xa5a5a5a5, + 0xa5a5a5a5, 0xa5a5a5a5, 0xa5a5a5a5, 0xa5a5a5a5, + 0xa5a5a5a5, 0xa5a5a5a5, 0xa5a5a5a5, 0xa5a5a5a5, + 0xa5a5a5a5, 0xa5a5a5a5, 0xa5a5a5a5, 0xa5a5a5a5 +}; + +extern _static_ void fc_read_nv(FC_BRD_INFO *binfo, MAILBOX *mb); +#define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) +#define BYTESWAP(x) ((x<<24) | (x >> 24) | (0xFF00 & (x >> 8)) | (0xFF0000 & (x << 8))); + +/************************************************************************/ +/* */ +/* fc_swap_bcopy */ +/* */ +/************************************************************************/ +_static_ void +fc_swap_bcopy( +uint32 *src, +uint32 *dest, +uint32 cnt) +{ + uint32 ldata; + int i; + + for (i = 0; i < (int)cnt; i += sizeof(uint32)) { + ldata = *src++; + ldata = cpu_to_be32(ldata); + *dest++ = ldata; + } +} /* End fc_swap_bcopy */ + +/************************************************************************/ +/* */ +/* fc_init_hba */ +/* */ +/************************************************************************/ +uint32 +fc_init_hba( +fc_dev_ctl_t * p_dev_ctl, +MAILBOX * mb, +uint32 * pwwnn) +{ + FC_BRD_INFO * binfo; + uint32 * pText; + char licensed[56] = "key unlock for use with gnu public licensed code only\0"; + pText = (uint32 *) licensed; + fc_swap_bcopy(pText, pText, 56); + binfo = &BINFO; + /* Setup and issue mailbox READ NVPARAMS command */ + binfo->fc_ffstate = FC_INIT_NVPARAMS; + fc_read_nv(binfo, mb); + memset((void*) mb->un.varRDnvp.rsvd3, 0, sizeof(mb->un.varRDnvp.rsvd3)); + memcpy((void*) mb->un.varRDnvp.rsvd3, licensed, sizeof(licensed)); + if (issue_mb_cmd(binfo, mb, MBX_POLL) != MBX_SUCCESS) { + /* Adapter initialization error, mbxCmd READ_NVPARM, mbxStatus */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0303, /* ptr to msg structure */ + fc_mes0303, /* ptr to msg */ + fc_msgBlk0303.msgPreambleStr, /* begin varargs */ + mb->mbxCommand, + mb->mbxStatus); /* end varargs */ + return(0); + } + fc_bcopy ((uchar*)mb->un.varRDnvp.nodename, (uchar*) pwwnn, sizeof(mb->un.varRDnvp.nodename)); + return(1); +} + +/************************************************************************/ +/* */ +/* sha_initialize */ +/* */ +/************************************************************************/ +void +sha_initialize( +uint32 *HashResultPointer) +{ + HashResultPointer[0] = 0x67452301; + HashResultPointer[1] = 0xEFCDAB89; + HashResultPointer[2] = 0x98BADCFE; + HashResultPointer[3] = 0x10325476; + HashResultPointer[4] = 0xC3D2E1F0; +} +/************************************************************************/ +/* */ +/* sha_iterate */ +/* */ +/************************************************************************/ +void +sha_iterate( +uint32 *HashResultPointer, +uint32 *HashWorkingPointer) +{ + int t; + uint32 TEMP; + uint32 A, B, C, D, E; + t = 16; + do + { + HashWorkingPointer[t] = S(1,HashWorkingPointer[t-3]^HashWorkingPointer[t-8]^HashWorkingPointer[t-14]^HashWorkingPointer[t-16]); + } while (++t <= 79); + t = 0; + A = HashResultPointer[0]; + B = HashResultPointer[1]; + C = HashResultPointer[2]; + D = HashResultPointer[3]; + E = HashResultPointer[4]; + + do + { + if (t < 20) + { + TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; + } else if (t < 40) { + TEMP = (B ^ C ^ D) + 0x6ED9EBA1; + } else if (t < 60) { + TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; + } else { + TEMP = (B ^ C ^ D) + 0xCA62C1D6; + } + TEMP += S(5,A) + E + HashWorkingPointer[t]; + E = D; + D = C; + C = S(30,B); + B = A; + A = TEMP; + } while (++t <= 79); + + HashResultPointer[0] += A; + HashResultPointer[1] += B; + HashResultPointer[2] += C; + HashResultPointer[3] += D; + HashResultPointer[4] += E; + +} +/************************************************************************/ +/* */ +/* Challenge_XOR_KEY */ +/* */ +/************************************************************************/ +void +Challenge_XOR_KEY +(uint32 *RandomChallenge, + uint32 *HashWorking) +{ + *HashWorking = (*RandomChallenge ^ *HashWorking); +} + +/************************************************************************/ +/* */ +/* fc_SHA1 */ +/* */ +/************************************************************************/ +void +fc_SHA1( +uint32 * pwwnn, +uint32 * phbainitEx, +uint32 * RandomData) +{ + int t; + uint32 HashWorking[80]; + + fc_bzero(HashWorking, sizeof(HashWorking)); + HashWorking[0] = HashWorking[78] = *pwwnn++; + HashWorking[1] = HashWorking[79] = *pwwnn; + for (t = 0; t < 7 ; t++) { + Challenge_XOR_KEY(RandomData+t,HashWorking+t); + } + sha_initialize(phbainitEx); + sha_iterate(phbainitEx, HashWorking); +} + +/************************************************************************/ +/* */ +/* fc_ffinit */ +/* */ +/************************************************************************/ +_static_ int +fc_ffinit( +fc_dev_ctl_t *p_dev_ctl) +{ + FC_BRD_INFO * binfo; + iCfgParam * clp; + fc_vpd_t * vp; + uint32 status, i, j; + uint32 read_rev_reset, hbainit = 0; + uint32 RandomData[7]; + uint32 hbainitEx[5]; + uint32 wwnn[2]; + struct pci_dev *pdev; + int ipri, flogi_sent; + MBUF_INFO bufinfo; + MBUF_INFO * buf_info; + void * ioa; + RING * rp; + MAILBOX * mb; + MATCHMAP * mp, *mp1, *mp2; + uchar * inptr, *outptr; + + binfo = &BINFO; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + vp = &VPD; + mb = 0; + + pdev = p_dev_ctl->pcidev ; + /* Set board state to initialization started */ + binfo->fc_ffstate = FC_INIT_START; + read_rev_reset = 0; + + if(fc_reset_on_attach) { + binfo->fc_ffstate = 0; + fc_brdreset(p_dev_ctl); + binfo->fc_ffstate = FC_INIT_START; + DELAYMS(2500); + } + +top: + ioa = (void *)FC_MAP_IO(&binfo->fc_iomap_io); /* map in io registers */ + +#if LITTLE_ENDIAN_HOST + /* For Little Endian, BIU_BSE is not supported */ +#else +#ifdef BIU_BSE + status = READ_CSR_REG(binfo, FC_BC_REG(binfo, ioa)); + WRITE_CSR_REG(binfo, FC_BC_REG(binfo, ioa), (BC_BSE_SWAP | status)); + i = READ_CSR_REG(binfo, FC_BC_REG(binfo, ioa)); +#endif +#endif + + status = READ_CSR_REG(binfo, FC_STAT_REG(binfo, ioa)); + FC_UNMAP_MEMIO(ioa); + + i = 0; + + /* Check status register to see what current state is */ + while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) { + + /* Check every 100ms for 5 retries, then every 500ms for 5, then + * every 2.5 sec for 5, then reset board and every 2.5 sec for 4. + */ + if (i++ >= 20) { + /* Adapter failed to init, timeout, status reg */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0436, /* ptr to msg structure */ + fc_mes0436, /* ptr to msg */ + fc_msgBlk0436.msgPreambleStr, /* begin varargs */ + status); /* end varargs */ + binfo->fc_ffstate = FC_ERROR; + return(EIO); + } + + /* Check to see if any errors occurred during init */ + if (status & HS_FFERM) { + /* ERROR: During chipset initialization */ + /* Adapter failed to init, chipset, status reg */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0437, /* ptr to msg structure */ + fc_mes0437, /* ptr to msg */ + fc_msgBlk0437.msgPreambleStr, /* begin varargs */ + status); /* end varargs */ + binfo->fc_ffstate = FC_ERROR; + return(EIO); + } + + if (i <= 5) { + DELAYMS(100); + } + else if (i <= 10) { + DELAYMS(500); + } + else { + DELAYMS(2500); + } + + if (i == 15) { + /* Reset board and try one more time */ + binfo->fc_ffstate = 0; + fc_brdreset(p_dev_ctl); + binfo->fc_ffstate = FC_INIT_START; + } + + ioa = (void *)FC_MAP_IO(&binfo->fc_iomap_io); /* map in io registers */ + status = READ_CSR_REG(binfo, FC_STAT_REG(binfo, ioa)); + FC_UNMAP_MEMIO(ioa); + } + + /* Check to see if any errors occurred during init */ + if (status & HS_FFERM) { + /* ERROR: During chipset initialization */ + /* Adapter failed to init, chipset, status reg */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0438, /* ptr to msg structure */ + fc_mes0438, /* ptr to msg */ + fc_msgBlk0438.msgPreambleStr, /* begin varargs */ + status); /* end varargs */ + binfo->fc_ffstate = FC_ERROR; + return(EIO); + } + + ioa = (void *)FC_MAP_IO(&binfo->fc_iomap_io); /* map in io registers */ + + /* Clear all interrupt enable conditions */ + WRITE_CSR_REG(binfo, FC_HC_REG(binfo, ioa), 0); + + /* setup host attn register */ + WRITE_CSR_REG(binfo, FC_HA_REG(binfo, ioa), 0xffffffff); + + FC_UNMAP_MEMIO(ioa); + + if(read_rev_reset) + goto do_read_rev; + + fc_binfo_init(p_dev_ctl); + + /* Allocate some memory for buffers */ + if (fc_malloc_buffer(p_dev_ctl) == 0) { + binfo->fc_ffstate = FC_ERROR; + return(ENOMEM); + } + + fc_get_dds_bind(p_dev_ctl); + + /* Get a buffer which will be used repeatedly for mailbox commands */ + if ((mb = (MAILBOX * )fc_mem_get(binfo, MEM_MBOX | MEM_PRI)) == 0) { + binfo->fc_ffstate = FC_ERROR; + fc_free_buffer(p_dev_ctl); + return(ENOMEM); + } + +do_read_rev: + if((pdev->device == PCI_DEVICE_ID_TFLY)|| + (pdev->device == PCI_DEVICE_ID_PFLY)) + hbainit = fc_init_hba(p_dev_ctl, mb, wwnn); + /* Setup and issue mailbox READ REV command */ + binfo->fc_ffstate = FC_INIT_REV; + fc_read_rev(binfo, mb); + if (issue_mb_cmd(binfo, mb, MBX_POLL) != MBX_SUCCESS) { + /* Adapter failed to init, mbxCmd READ_REV, mbxStatus */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0439, /* ptr to msg structure */ + fc_mes0439, /* ptr to msg */ + fc_msgBlk0439.msgPreambleStr, /* begin varargs */ + mb->mbxCommand, + mb->mbxStatus); /* end varargs */ + + /* If read_rev fails, give it one more chance */ + if(read_rev_reset == 0) { + binfo->fc_ffstate = 0; + fc_brdreset(p_dev_ctl); + binfo->fc_ffstate = FC_INIT_START; + + DELAYMS(2500); + DELAYMS(2500); + + read_rev_reset = 1; + goto top; + } + binfo->fc_ffstate = FC_ERROR; + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + fc_free_buffer(p_dev_ctl); + return(EIO); + } + + if(mb->un.varRdRev.rr == 0) { + + if(read_rev_reset == 0) { + binfo->fc_ffstate = 0; + fc_brdreset(p_dev_ctl); + binfo->fc_ffstate = FC_INIT_START; + + DELAYMS(2500); + DELAYMS(2500); + + read_rev_reset = 1; + goto top; + } + + vp->rev.rBit = 0; + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0440, /* ptr to msg structure */ + fc_mes0440, /* ptr to msg */ + fc_msgBlk0440.msgPreambleStr, /* begin varargs */ + mb->mbxCommand, + read_rev_reset); /* end varargs */ + } + else { + if(mb->un.varRdRev.un.b.ProgType != 2) { + if(read_rev_reset == 0) { + binfo->fc_ffstate = 0; + fc_brdreset(p_dev_ctl); + binfo->fc_ffstate = FC_INIT_START; + + DELAYMS(2500); + DELAYMS(2500); + + read_rev_reset = 1; + goto top; + } + } + vp->rev.rBit = 1; + vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; + fc_bcopy((uchar *)mb->un.varRdRev.sli1FwName, (uchar *)vp->rev.sli1FwName, 16); + vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; + fc_bcopy((uchar *)mb->un.varRdRev.sli2FwName, (uchar *)vp->rev.sli2FwName, 16); + } + + /* Save information as VPD data */ + vp->rev.biuRev = mb->un.varRdRev.biuRev; + vp->rev.smRev = mb->un.varRdRev.smRev; + vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; + vp->rev.endecRev = mb->un.varRdRev.endecRev; + vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; + vp->rev.fcphLow = mb->un.varRdRev.fcphLow; + vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; + vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; + vp->rev.postKernRev = mb->un.varRdRev.postKernRev; + vp->rev.opFwRev = mb->un.varRdRev.opFwRev; + if((pdev->device == PCI_DEVICE_ID_TFLY)|| + (pdev->device == PCI_DEVICE_ID_PFLY)) + fc_bcopy((uchar *)&mb->un.varWords[24], (uchar *)RandomData, sizeof(RandomData)); + + dfc_fmw_rev(p_dev_ctl); /* Save firmware rev for HBAAPI */ + + if(fc_check_for_vpd) { + /* Get adapter VPD information */ + fc_dump_mem(binfo, mb); + if (issue_mb_cmd(binfo, mb, MBX_POLL) != MBX_SUCCESS) { + /* + * Let it go through even if failed. + */ + /* Adapter failed to init, mbxCmd DUMP VPD, mbxStatus */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0441, /* ptr to msg structure */ + fc_mes0441, /* ptr to msg */ + fc_msgBlk0441.msgPreambleStr, /* begin varargs */ + mb->mbxCommand, + mb->mbxStatus); /* end varargs */ + + /* If dump_mem times out, give it one more chance */ + if((read_rev_reset == 0) && (mb->mbxStatus == 0)) { + binfo->fc_ffstate = 0; + fc_brdreset(p_dev_ctl); + binfo->fc_ffstate = FC_INIT_START; + + DELAYMS(2500); + DELAYMS(2500); + + read_rev_reset = 1; + goto top; + } + } + else { + if((mb->un.varDmp.ra == 1) && + (mb->un.varDmp.word_cnt <= FC_MAX_VPD_SIZE)) { + uint32 *lp1, *lp2; + + lp1 = (uint32 * )&mb->un.varDmp.resp_offset; + lp2 = (uint32 * )&fc_vpd_data[0]; + for(i=0;iun.varDmp.word_cnt;i++) { + status = *lp1++; + *lp2++ = SWAP_LONG(status); + } + fc_parse_vpd(p_dev_ctl, (uchar *)&fc_vpd_data[0]); + } + } + } + + /* Setup and issue mailbox CONFIG_PORT or PARTITION_SLIM command */ + binfo->fc_ffstate = FC_INIT_PARTSLIM; + if (binfo->fc_sli == 2) { + if((pdev->device == PCI_DEVICE_ID_TFLY)|| + (pdev->device == PCI_DEVICE_ID_PFLY)){ + fc_SHA1(wwnn, hbainitEx, RandomData); + fc_config_port(binfo, mb, (uint32 *) hbainitEx); + } + else + fc_config_port(binfo, mb, &hbainit); + if (issue_mb_cmd(binfo, mb, MBX_POLL) != MBX_SUCCESS) { + /* Adapter failed to init, mbxCmd CONFIG_PORT, mbxStatus */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0442, /* ptr to msg structure */ + fc_mes0442, /* ptr to msg */ + fc_msgBlk0442.msgPreambleStr, /* begin varargs */ + mb->mbxCommand, + mb->mbxStatus, + 0); /* end varargs */ + + /* If config_port fails, give it one more chance */ + if(read_rev_reset == 0) { + binfo->fc_ffstate = 0; + fc_brdreset(p_dev_ctl); + binfo->fc_ffstate = FC_INIT_START; + + DELAYMS(2500); + DELAYMS(2500); + + read_rev_reset = 1; + goto top; + } + + binfo->fc_flag &= ~FC_SLI2; + binfo->fc_mboxaddr = 0; + if (binfo->fc_slim2.virt) { + buf_info = &bufinfo; + if (binfo->fc_slim2.phys) { + buf_info->phys = (void * )binfo->fc_slim2.phys; + buf_info->data_handle = binfo->fc_slim2.data_handle; + buf_info->dma_handle = binfo->fc_slim2.dma_handle; + buf_info->flags = FC_MBUF_DMA; + } else { + buf_info->phys = 0; + buf_info->data_handle = 0; + buf_info->dma_handle = 0; + buf_info->flags = 0; + } + buf_info->size = fcPAGESIZE; + buf_info->virt = (void * )binfo->fc_slim2.virt; + fc_free(p_dev_ctl, buf_info); + binfo->fc_slim2.virt = 0; + binfo->fc_slim2.phys = 0; + binfo->fc_slim2.dma_handle = 0; + binfo->fc_slim2.data_handle = 0; + } + binfo->fc_ffstate = FC_ERROR; + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + fc_free_buffer(p_dev_ctl); + return(EIO); + } + } else { + /* SLI1 not supported, mbxCmd , mbxStatus */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0443, /* ptr to msg structure */ + fc_mes0443, /* ptr to msg */ + fc_msgBlk0443.msgPreambleStr, /* begin varargs */ + mb->mbxCommand, + mb->mbxStatus, + 0); /* end varargs */ + binfo->fc_ffstate = FC_ERROR; + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + fc_free_buffer(p_dev_ctl); + return(EIO); + } + + /* Initialize cmd/rsp ring pointers */ + for (i = 0; i < (uint32)binfo->fc_ffnumrings; i++) { + rp = &binfo->fc_ring[i]; + + rp->fc_ringno = (uchar)i; + rp->fc_xmitstate = FC_LINK_UP; + if ((i == FC_IP_RING) || (i == FC_FCP_RING)) + rp->fc_xmitstate = FC_READY; + rp->fc_binfo = (uchar * )binfo; + rp->fc_iocbhd = 0; + rp->fc_iocbtl = 0; + rp->fc_cmdidx = 0; + rp->fc_rspidx = 0; + rp->fc_iotag = 1; /* used to identify each I/O */ + if (i == FC_FCP_RING) + rp->fc_bufcnt = MAX_FCP_CMDS; /* Used for ABTS iotag */ + + /* offsets are from the beginning of SLIM */ + if (!(binfo->fc_flag & FC_SLI2)) { + /* offsets are from the beginning of SLIM */ + rp->fc_cmdringaddr = (void *)((ulong)(mb->un.varSlim.ringdef[i].offCiocb)); + rp->fc_rspringaddr = (void *)((ulong)(mb->un.varSlim.ringdef[i].offRiocb)); + + } + } + + mp1 = 0; + mp2 = 0; + /* Setup and issue mailbox RUN BIU DIAG command */ + /* setup test buffers */ + if (((mp = (MATCHMAP * )fc_mem_get(binfo, MEM_BUF | MEM_PRI)) == 0) || + ((mp1 = (MATCHMAP * )fc_mem_get(binfo, MEM_BUF | MEM_PRI)) == 0) || + ((mp2 = (MATCHMAP * )fc_mem_get(binfo, MEM_BUF | MEM_PRI)) == 0)) { + /* Adapter failed to init, no buffers for RUN_BIU_DIAG */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0444, /* ptr to msg structure */ + fc_mes0444, /* ptr to msg */ + fc_msgBlk0444.msgPreambleStr); /* begin & end varargs */ + if (mp) + fc_mem_put(binfo, MEM_BUF, (uchar * )mp); + if (mp1) + fc_mem_put(binfo, MEM_BUF, (uchar * )mp1); + binfo->fc_ffstate = FC_ERROR; + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + fc_free_buffer(p_dev_ctl); + return(ENOMEM); + } + + fc_mpdata_incopy(p_dev_ctl, mp, (uchar * ) & fc_run_biu_test[0], FCELSSIZE); + fc_mpdata_sync(mp->dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV); + inptr = mp->virt; + /* Issue mailbox command */ + fc_runBIUdiag(binfo, mb, mp->phys, mp1->phys); + if (issue_mb_cmd(binfo, mb, MBX_POLL) != MBX_SUCCESS) { + ioa = (void *)FC_MAP_IO(&binfo->fc_iomap_io); /* map in io registers */ + FC_UNMAP_MEMIO(ioa); + /* Adapter failed init, mailbox cmd runBIUdiag mbxStatus */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0447, /* ptr to msg structure */ + fc_mes0447, /* ptr to msg */ + fc_msgBlk0447.msgPreambleStr, /* begin varargs */ + mb->mbxCommand, + mb->mbxStatus); /* end varargs */ + fc_mem_put(binfo, MEM_BUF, (uchar * )mp); + fc_mem_put(binfo, MEM_BUF, (uchar * )mp1); + fc_mem_put(binfo, MEM_BUF, (uchar * )mp2); + binfo->fc_ffstate = FC_ERROR; + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + fc_free_buffer(p_dev_ctl); + return(EIO); + } + + fc_mpdata_sync(mp1->dma_handle, 0, 0, DDI_DMA_SYNC_FORKERNEL); + fc_mpdata_outcopy(p_dev_ctl, mp1, (uchar * )mp2->virt, FCELSSIZE); + outptr = (uchar * )mp2->virt; + + for (i = 0; i < FCELSSIZE; i++) { + if (*outptr++ != *inptr++) { + outptr--; + inptr--; + /* RUN_BIU_DIAG failed */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0445, /* ptr to msg structure */ + fc_mes0445, /* ptr to msg */ + fc_msgBlk0445.msgPreambleStr); /* begin & end varargs */ + fc_mem_put(binfo, MEM_BUF, (uchar * )mp); + fc_mem_put(binfo, MEM_BUF, (uchar * )mp1); + fc_mem_put(binfo, MEM_BUF, (uchar * )mp2); + binfo->fc_ffstate = FC_ERROR; + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + fc_free_buffer(p_dev_ctl); + return(EIO); + } + } + fc_mem_put(binfo, MEM_BUF, (uchar * )mp); + fc_mem_put(binfo, MEM_BUF, (uchar * )mp1); + fc_mem_put(binfo, MEM_BUF, (uchar * )mp2); + + /* Setup and issue mailbox CONFIGURE RING command */ + for (i = 0; i < (uint32)binfo->fc_ffnumrings; i++) { + binfo->fc_ffstate = FC_INIT_CFGRING; + fc_config_ring(binfo, i, 0, mb); + if (issue_mb_cmd(binfo, mb, MBX_POLL) != MBX_SUCCESS) { + /* Adapter failed to init, mbxCmd CFG_RING, mbxStatus , ring */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0446, /* ptr to msg structure */ + fc_mes0446, /* ptr to msg */ + fc_msgBlk0446.msgPreambleStr, /* begin varargs */ + mb->mbxCommand, + mb->mbxStatus, + i); /* ring num - end varargs */ + binfo->fc_ffstate = FC_ERROR; + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + fc_free_buffer(p_dev_ctl); + return(EIO); + } + } + + /* Setup link timers */ + fc_config_link(p_dev_ctl, mb); + if (issue_mb_cmd(binfo, mb, MBX_POLL) != MBX_SUCCESS) { + /* Adapter failed to init, mbxCmd CONFIG_LINK mbxStatus */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0447, /* ptr to msg structure */ + fc_mes0447, /* ptr to msg */ + fc_msgBlk0447.msgPreambleStr, /* begin varargs */ + mb->mbxCommand, + mb->mbxStatus); /* end varargs */ + binfo->fc_ffstate = FC_ERROR; + fc_ffcleanup(p_dev_ctl); + i_clear(&IHS); + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + fc_free_buffer(p_dev_ctl); + return(EIO); + } + + /* We need to get login parameters for NID */ + fc_read_sparam(p_dev_ctl, mb); + if (issue_mb_cmd(binfo, mb, MBX_POLL) != MBX_SUCCESS) { + /* Adapter failed to init, mbxCmd READ_SPARM mbxStatus */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0448, /* ptr to msg structure */ + fc_mes0448, /* ptr to msg */ + fc_msgBlk0448.msgPreambleStr, /* begin varargs */ + mb->mbxCommand, + mb->mbxStatus); /* end varargs */ + binfo->fc_ffstate = FC_ERROR; + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + fc_free_buffer(p_dev_ctl); + return(EIO); + } + + mp = (MATCHMAP * )binfo->fc_mbbp; + fc_mpdata_sync(mp->dma_handle, 0, sizeof(SERV_PARM), + DDI_DMA_SYNC_FORKERNEL); + fc_mpdata_outcopy(p_dev_ctl, mp, (uchar * ) & binfo->fc_sparam, + sizeof(SERV_PARM)); + + fc_mem_put(binfo, MEM_BUF, (uchar * )mp); + binfo->fc_mbbp = 0; + + fc_bcopy((uchar * )&binfo->fc_sparam.nodeName, (uchar * )&binfo->fc_nodename, + sizeof(NAME_TYPE)); + fc_bcopy((uchar * )&binfo->fc_sparam.portName, (uchar * )&binfo->fc_portname, + sizeof(NAME_TYPE)); + fc_bcopy(binfo->fc_portname.IEEE, p_dev_ctl->phys_addr, 6); + + /* If no serial number in VPD data, use low 6 bytes of WWNN */ + if(binfo->fc_SerialNumber[0] == 0) { + outptr = (uchar *) &binfo->fc_nodename.IEEE[0]; + for(i=0;i<12;i++) { + status = *outptr++; + j = ((status & 0xf0) >> 4); + if(j <= 9) + binfo->fc_SerialNumber[i] = (char)((uchar)0x30 + (uchar)j); + else + binfo->fc_SerialNumber[i] = (char)((uchar)0x61 + (uchar)(j-10)); + i++; + j = (status & 0xf); + if(j <= 9) + binfo->fc_SerialNumber[i] = (char)((uchar)0x30 + (uchar)j); + else + binfo->fc_SerialNumber[i] = (char)((uchar)0x61 + (uchar)(j-10)); + } + } + + if(clp[CFG_NETWORK_ON].a_current) { + if ((binfo->fc_sparam.portName.nameType != NAME_IEEE) || + (binfo->fc_sparam.portName.IEEEextMsn != 0) || + (binfo->fc_sparam.portName.IEEEextLsb != 0)) { + clp[CFG_NETWORK_ON].a_current = 0; + /* WorldWide PortName Type doesn't conform to IP Profile */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0449, /* ptr to msg structure */ + fc_mes0449, /* ptr to msg */ + fc_msgBlk0449.msgPreambleStr, /* begin varargs */ + binfo->fc_sparam.portName.nameType); /* end varargs */ + } + + fc_config_farp(binfo, mb); + if (issue_mb_cmd(binfo, mb, MBX_POLL) != MBX_SUCCESS) { + /* + * Let it go through even if failed. + */ + /* Adapter failed to init, mbxCmd FARP, mbxStatus */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0450, /* ptr to msg structure */ + fc_mes0450, /* ptr to msg */ + fc_msgBlk0450.msgPreambleStr, /* begin varargs */ + mb->mbxCommand, + mb->mbxStatus); /* end varargs */ + } + } + + if (p_dev_ctl->intr_inited != 1) { + /* Add our interrupt routine to kernel's interrupt chain & enable it */ + + + IHS.handler = fc_intr; + + if ((i_init((struct intr *) & IHS)) == INTR_FAIL) { + /* Enable interrupt handler failed */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0451, /* ptr to msg structure */ + fc_mes0451, /* ptr to msg */ + fc_msgBlk0451.msgPreambleStr); /* begin & end varargs */ + binfo->fc_ffstate = FC_ERROR; + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + fc_free_buffer(p_dev_ctl); + return(EIO); + } + p_dev_ctl->intr_inited = 1; + } + + fc_disable_tc(binfo, (MAILBOX * )mb); + if (issue_mb_cmd(binfo, mb, MBX_POLL) != MBX_SUCCESS) { + binfo->fc_ffstate = FC_ERROR; + fc_ffcleanup(p_dev_ctl); + i_clear(&IHS); + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + fc_free_buffer(p_dev_ctl); + return(EIO); + } + + fc_read_config(binfo, (MAILBOX * )mb); + if (issue_mb_cmd(binfo, mb, MBX_POLL) != MBX_SUCCESS) { + /* Adapter failed to init, mbxCmd READ_CONFIG, mbxStatus */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0453, /* ptr to msg structure */ + fc_mes0453, /* ptr to msg */ + fc_msgBlk0453.msgPreambleStr, /* begin varargs */ + mb->mbxCommand, + mb->mbxStatus); /* end varargs */ + binfo->fc_ffstate = FC_ERROR; + fc_ffcleanup(p_dev_ctl); + i_clear(&IHS); + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + fc_free_buffer(p_dev_ctl); + return(EIO); + } + + if (mb->un.varRdConfig.lmt & LMT_2125_10bit) + /* HBA is 2G capable */ + binfo->fc_flag |= FC_2G_CAPABLE; + + binfo->fc_ffstate = FC_LINK_DOWN; + binfo->fc_flag |= FC_LNK_DOWN; + + /* Activate the adapter and allocate all the resources needed for ELS */ + fc_start(p_dev_ctl); + + /* Setup and issue mailbox INITIALIZE LINK command */ + fc_init_link(binfo, mb, clp[CFG_TOPOLOGY].a_current, + clp[CFG_LINK_SPEED].a_current); + if (issue_mb_cmd(binfo, mb, MBX_NOWAIT) != MBX_SUCCESS) { + /* Adapter failed to init, mbxCmd INIT_LINK, mbxStatus */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0454, /* ptr to msg structure */ + fc_mes0454, /* ptr to msg */ + fc_msgBlk0454.msgPreambleStr, /* begin varargs */ + mb->mbxCommand, + mb->mbxStatus); /* end varargs */ + binfo->fc_ffstate = FC_ERROR; + fc_ffcleanup(p_dev_ctl); + i_clear(&IHS); + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + fc_free_buffer(p_dev_ctl); + return(EIO); + } + + + /* Enable link attention interrupt */ + ipri = disable_lock(FC_LVL, &CMD_LOCK); + ioa = (void *)FC_MAP_IO(&binfo->fc_iomap_io); /* map in io registers */ + status = READ_CSR_REG(binfo, FC_HC_REG(binfo, ioa)); + status = status | HC_LAINT_ENA; + WRITE_CSR_REG(binfo, FC_HC_REG(binfo, ioa), status); + FC_UNMAP_MEMIO(ioa); + binfo->fc_process_LA = 1; + p_dev_ctl->fc_waitflogi = (FCCLOCK *)1; + unlock_enable(ipri, &CMD_LOCK); + + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + + binfo->fc_prevDID = Mask_DID; + /* If we are point to point, don't wait for link up */ + if ((clp[CFG_TOPOLOGY].a_current == FLAGS_TOPOLOGY_MODE_PT_PT) && + (clp[CFG_FCP_ON].a_current == 0)) { + goto out; + } + + flogi_sent = 0; + i = 0; + while (binfo->fc_ffstate != FC_READY) { + /* Check every second for 20 retries. */ + if ((i++ > 20) || + ((i >= 10) && (binfo->fc_ffstate <= FC_LINK_DOWN))) { + /* The link is down, so set linkdown timeout */ + rp = &binfo->fc_ring[FC_FCP_RING]; + RINGTMO = fc_clk_set(p_dev_ctl, rp->fc_ringtmo, fc_linkdown_timeout, 0, 0); + break; + } + ipri = disable_lock(FC_LVL, &CMD_LOCK); + if((i > 1) && (binfo->fc_ffstate == FC_FLOGI) && + (flogi_sent == 0) && (p_dev_ctl->power_up == 0)) { + if(p_dev_ctl->fc_waitflogi) { + if (p_dev_ctl->fc_waitflogi != (FCCLOCK *)1) + fc_clk_can(p_dev_ctl, p_dev_ctl->fc_waitflogi); + p_dev_ctl->fc_waitflogi = 0; + } + fc_snd_flogi(p_dev_ctl, 0, 0); + flogi_sent = 1; + rp = &binfo->fc_ring[FC_ELS_RING]; + if(RINGTMO) + fc_clk_res(p_dev_ctl, 20, RINGTMO); + } + unlock_enable(ipri, &CMD_LOCK); + + DELAYMS(1000); + } + +out: + ipri = disable_lock(FC_LVL, &CMD_LOCK); + if((binfo->fc_ffstate == FC_FLOGI) && (p_dev_ctl->power_up == 0)) { + fc_snd_flogi(p_dev_ctl, 0, 0); + } + p_dev_ctl->power_up = 1; + unlock_enable(ipri, &CMD_LOCK); + + return(0); +} /* End fc_ffinit */ + +/************************************************************************/ +/* */ +/* fc_binfo_init */ +/* This routine will initialize the binfo structure */ +/* */ +/************************************************************************/ +_local_ int +fc_binfo_init( +fc_dev_ctl_t *p_dev_ctl) +{ + FC_BRD_INFO * binfo; + iCfgParam * clp; + int idx; + + binfo = &BINFO; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + + /* Initialize configuration parameters */ + if(binfo->fc_flag & FC_ESTABLISH_LINK) + binfo->fc_flag = FC_ESTABLISH_LINK; + else + binfo->fc_flag = 0; /* don't change nvram or tov */ + + binfo->fc_ffnumrings = MAX_CONFIGURED_RINGS - 1; /* number of rings */ + + /* Ring 0 - ELS */ + binfo->fc_nummask[0] = 4; + + binfo->fc_rval[0] = FC_ELS_REQ; /* ELS request */ + binfo->fc_tval[0] = FC_ELS_DATA; /* ELS */ + binfo->fc_rval[1] = FC_ELS_RSP; /* ELS response */ + binfo->fc_tval[1] = FC_ELS_DATA; /* ELS */ + binfo->fc_rval[2] = FC_UNSOL_CTL; /* NameServer Inquiries */ + binfo->fc_tval[2] = FC_COMMON_TRANSPORT_ULP; /* NameServer */ + binfo->fc_rval[3] = FC_SOL_CTL; /* NameServer response */ + binfo->fc_tval[3] = FC_COMMON_TRANSPORT_ULP; /* NameServer */ + if (binfo->fc_sli == 2) { + binfo->fc_ring[0].fc_numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; + binfo->fc_ring[0].fc_numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; + } else { + binfo->fc_ring[0].fc_numCiocb = IOCB_CMD_R0_ENTRIES; + binfo->fc_ring[0].fc_numRiocb = IOCB_RSP_R0_ENTRIES; + } + + /* Ring 1 - IP */ + if(clp[CFG_NETWORK_ON].a_current) { + binfo->fc_nummask[1] = 1; + idx = 5; + } else { + binfo->fc_nummask[1] = 0; + idx = 4; + } + binfo->fc_rval[4] = FC_UNSOL_DATA; /* Unsolicited Data */ + binfo->fc_tval[4] = FC_LLC_SNAP; /* LLC/SNAP */ + if (binfo->fc_sli == 2) { + binfo->fc_ring[1].fc_numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; + binfo->fc_ring[1].fc_numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; + if(clp[CFG_NETWORK_ON].a_current == 0) { + binfo->fc_ring[1].fc_numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; + binfo->fc_ring[1].fc_numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; + } + } else { + binfo->fc_ring[1].fc_numCiocb = IOCB_CMD_R1_ENTRIES; + binfo->fc_ring[1].fc_numRiocb = IOCB_RSP_R1_ENTRIES; + } + + /* Ring 2 - FCP */ + binfo->fc_nummask[2] = 0; + if (binfo->fc_sli == 2) { + binfo->fc_ring[2].fc_numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; + binfo->fc_ring[2].fc_numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; + if(clp[CFG_NETWORK_ON].a_current == 0) { + binfo->fc_ring[2].fc_numCiocb += SLI2_IOCB_CMD_R2XTRA_ENTRIES; + binfo->fc_ring[2].fc_numRiocb += SLI2_IOCB_RSP_R2XTRA_ENTRIES; + } + } else { + binfo->fc_ring[2].fc_numCiocb = IOCB_CMD_R2_ENTRIES; + binfo->fc_ring[2].fc_numRiocb = IOCB_RSP_R2_ENTRIES; + } + + + binfo->ipVersion = RNID_IPV4; + return(0); +} /* End fc_binfo_init */ + +/************************************************************************/ +/* */ +/* fc_parse_vpd */ +/* This routine will parse the VPD data */ +/* */ +/************************************************************************/ +_local_ int +fc_parse_vpd( +fc_dev_ctl_t *p_dev_ctl, +uchar *vpd) +{ + FC_BRD_INFO * binfo; + int finished = 0; + int index = 0; + uchar lenlo, lenhi; + unsigned char *Length; + int i, j; + + binfo = &BINFO; + /* Vital Product */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0455, /* ptr to msg structure */ + fc_mes0455, /* ptr to msg */ + fc_msgBlk0455.msgPreambleStr, /* begin varargs */ + (uint32)vpd[0], + (uint32)vpd[1], + (uint32)vpd[2], + (uint32)vpd[3]); /* end varargs */ + do { + switch (vpd[index]) { + case 0x82: + index += 1; + lenlo = vpd[index]; + index += 1; + lenhi = vpd[index]; + index += 1; + i = ((((unsigned short)lenhi) << 8) + lenlo); + index += i; + break; + case 0x90: + index += 1; + lenlo = vpd[index]; + index += 1; + lenhi = vpd[index]; + index += 1; + i = ((((unsigned short)lenhi) << 8) + lenlo); + do { + /* Look for Serial Number */ + if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { + index += 2; + Length = &vpd[index]; + index += 1; + i = *Length; + j = 0; + while(i--) { + binfo->fc_SerialNumber[j++] = vpd[index++]; + if(j == 31) + break; + } + binfo->fc_SerialNumber[j] = 0; + return(1); + } + else { + index += 2; + Length = &vpd[index]; + index += 1; + j = (int)(*Length); + index += j; + i -= (3 + j); + } + } while (i > 0); + finished = 0; + break; + case 0x78: + finished = 1; + break; + default: + return(0); + } + } while (!finished); + return(1); +} + +_static_ char fwrevision[32]; + +/* TAPE */ +_static_ char * +decode_firmware_rev( +FC_BRD_INFO *binfo, +fc_vpd_t *vp) +{ + uint32 b1, b2, b3, b4, ldata; + char c; + uint32 i, rev; + uint32 *ptr, str[4]; + + if ( vp->rev.rBit ) { + if (binfo->fc_sli == 2) + rev = vp->rev.sli2FwRev; + else + rev = vp->rev.sli1FwRev; + + b1 = (rev & 0x0000f000) >> 12; + b2 = (rev & 0x00000f00) >> 8; + b3 = (rev & 0x000000c0) >> 6; + b4 = (rev & 0x00000030) >> 4; + + switch (b4) { + case 0: + c = 'N'; + break; + case 1: + c = 'A'; + break; + case 2: + c = 'B'; + break; + case 3: + default: + c = 0; + break; + } + b4 = (rev & 0x0000000f); + + if (binfo->fc_sli == 2) { + for (i=0; i<16; i++) { + if (vp->rev.sli2FwName[i] == 0x20) { + vp->rev.sli2FwName[i] = 0; + } + } + ptr = (uint32 *)vp->rev.sli2FwName; + } else { + for (i=0; i<16; i++) { + if (vp->rev.sli1FwName[i] == 0x20) { + vp->rev.sli1FwName[i] = 0; + } + } + ptr = (uint32 *)vp->rev.sli1FwName; + } + for (i=0; i<3; i++) { + ldata = *ptr++; + ldata = SWAP_DATA(ldata); + str[i] = ldata; + } + + fwrevision[0] = (char)((int)'0' + b1); + fwrevision[1] = '.'; + fwrevision[2] = (char)((int)'0' + b2); + fwrevision[3] = (char)((int)'0' + b3); + if(c) { + fwrevision[4] = c; + fwrevision[5] = (char)((int)'0' + b4); + fwrevision[6] = 0; + } + else { + fwrevision[4] = 0; + } + } else { + rev = vp->rev.smFwRev; + + b1 = (rev & 0xff000000) >> 24; + b2 = (rev & 0x00f00000) >> 20; + b3 = (rev & 0x000f0000) >> 16; + c = (char)((rev & 0x0000ff00) >> 8); + b4 = (rev & 0x000000ff); + + fwrevision[0] = (char)((int)'0' + b1); + fwrevision[1] = '.'; + fwrevision[2] = (char)((int)'0' + b2); + fwrevision[3] = (char)((int)'0' + b3); + fwrevision[4] = c; + fwrevision[5] = (char)((int)'0' + b4); + fwrevision[6] = 0; + } + return(fwrevision); +} /* End decode_firmware_rev */ + + +/*****************************************************************************/ +/* + * NAME: fc_intr + * + * FUNCTION: Fibre Channel driver interrupt routine. + * + * EXECUTION ENVIRONMENT: interrupt only + * + * CALLED FROM: + * The FLIH + * + * INPUT: + * p_ihs - point to the interrupt structure. + * + * RETURNS: + * INTR_SUCC - our interrupt + * INTR_FAIL - not our interrupt + */ +/*****************************************************************************/ +_static_ int +fc_intr( +struct intr *p_ihs) /* This also points to device control area */ +{ + fc_dev_ctl_t * p_dev_ctl = (fc_dev_ctl_t * )p_ihs; + volatile uint32 ha_copy; + FC_BRD_INFO * binfo; + iCfgParam * clp; + fcipbuf_t * mbp; + MAILBOXQ * mb; + IOCBQ * delayiocb; + IOCBQ * temp; + IOCBQ * processiocb; + IOCBQ * endiocb; + void * ioa; + int ipri, rc; + + binfo = &BINFO; + + ipri = disable_lock(FC_LVL, &CMD_LOCK); + binfo->fc_flag |= FC_INTR_THREAD; + + ioa = (void *)FC_MAP_IO(&binfo->fc_iomap_io); /* map in io registers */ + + /* Read host attention register to determine interrupt source */ + ha_copy = READ_CSR_REG(binfo, FC_HA_REG(binfo, ioa)); + + /* Clear Attention Sources, except ERROR (to preserve status) & LATT */ + WRITE_CSR_REG(binfo, FC_HA_REG(binfo, ioa), + (ha_copy & ~HA_ERATT & ~HA_LATT)); + + FC_UNMAP_MEMIO(ioa); + + + if (ha_copy) { + rc = INTR_SUCC; + binfo->fc_flag |= FC_INTR_WORK; + } else { + clp = DD_CTL.p_config[binfo->fc_brd_no]; + if (clp[CFG_INTR_ACK].a_current && (binfo->fc_flag&FC_INTR_WORK)) { + rc = INTR_SUCC; /* Just claim the first non-working interrupt */ + binfo->fc_flag &= ~FC_INTR_WORK; + } else { + if (clp[CFG_INTR_ACK].a_current == 2) + rc = INTR_SUCC; /* Always claim the interrupt */ + else + rc = INTR_FAIL; + } + } + + if (binfo->fc_flag & FC_OFFLINE_MODE) { + binfo->fc_flag &= ~FC_INTR_THREAD; + unlock_enable(ipri, &CMD_LOCK); + return(INTR_FAIL); + } + + processiocb = 0; + if(binfo->fc_delayxmit) { + delayiocb = binfo->fc_delayxmit; + binfo->fc_delayxmit = 0; + endiocb = 0; + while(delayiocb) { + temp = delayiocb; + delayiocb = (IOCBQ *)temp->q; + temp->rsvd2--; + /* If retry == 0, process IOCB */ + if(temp->rsvd2 == 0) { + if(processiocb == 0) { + processiocb = temp; + } + else { + endiocb->q = (uchar *)temp; + } + endiocb = temp; + temp->q = 0; + } + else { + /* Make delayxmit point to first non-zero retry */ + if(binfo->fc_delayxmit == 0) + binfo->fc_delayxmit = temp; + } + } + if(processiocb) { + /* Handle any delayed IOCBs */ + endiocb = processiocb; + while(endiocb) { + temp = endiocb; + endiocb = (IOCBQ *)temp->q; + temp->q = 0; + issue_iocb_cmd(binfo, &binfo->fc_ring[FC_ELS_RING], temp); + } + } + } + + + if (ha_copy & HA_ERATT) { /* Link / board error */ + unlock_enable(ipri, &CMD_LOCK); + handle_ff_error(p_dev_ctl); + return (rc); + } else { + if (ha_copy & HA_MBATT) { /* Mailbox interrupt */ + handle_mb_event(p_dev_ctl); + if(binfo->fc_flag & FC_PENDING_RING0) { + binfo->fc_flag &= ~FC_PENDING_RING0; + ha_copy |= HA_R0ATT; /* event on ring 0 */ + } + } + + if (ha_copy & HA_LATT) { /* Link Attention interrupt */ + if (binfo->fc_process_LA) { + handle_link_event(p_dev_ctl); + } + } + + if (ha_copy & HA_R0ATT) { /* event on ring 0 */ + if(binfo->fc_mbox_active == 0) + handle_ring_event(p_dev_ctl, 0, (ha_copy & 0x0000000F)); + else + binfo->fc_flag |= FC_PENDING_RING0; + } + + if (ha_copy & HA_R1ATT) { /* event on ring 1 */ + /* This ring handles IP. Defer processing anything on this ring + * till all FCP ELS traffic settles down. + */ + if (binfo->fc_ffstate <= FC_NODE_DISC) + binfo->fc_deferip |= (uchar)((ha_copy >> 4) & 0x0000000F); + else + handle_ring_event(p_dev_ctl, 1, ((ha_copy >> 4) & 0x0000000F)); + } + + if (ha_copy & HA_R2ATT) { /* event on ring 2 */ + handle_ring_event(p_dev_ctl, 2, ((ha_copy >> 8) & 0x0000000F)); + } + + if (ha_copy & HA_R3ATT) { /* event on ring 3 */ + handle_ring_event(p_dev_ctl, 3, ((ha_copy >> 12) & 0x0000000F)); + } + } + + if((processiocb == 0) && (binfo->fc_delayxmit) && + (binfo->fc_mbox_active == 0)) { + if ((mb = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX))) { + fc_read_rpi(binfo, (uint32)1, (MAILBOX * )mb, (uint32)0); + if (issue_mb_cmd(binfo, (MAILBOX * )mb, MBX_NOWAIT) != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } + } + + binfo->fc_flag &= ~FC_INTR_THREAD; + + while (p_dev_ctl->mbufl_head != 0) { + binfo->fc_flag |= FC_INTR_WORK; + mbp = (fcipbuf_t * )p_dev_ctl->mbufl_head; + p_dev_ctl->mbufl_head = (uchar * )fcnextpkt(mbp); + fcnextpkt(mbp) = 0; + fc_xmit(p_dev_ctl, mbp); + } + p_dev_ctl->mbufl_tail = 0; + + + unlock_enable(ipri, &CMD_LOCK); + return(rc); +} /* End fc_intr */ + + + +/**************************************************/ +/** handle_ff_error **/ +/** **/ +/** Runs at Interrupt level **/ +/** **/ +/**************************************************/ +_static_ void +handle_ff_error( +fc_dev_ctl_t *p_dev_ctl) +{ + volatile uint32 status, status1, status2; + void *ioa; + FC_BRD_INFO * binfo; + iCfgParam * clp; + int ipri; + + ipri = disable_lock(FC_LVL, &CMD_LOCK); + + binfo = &BINFO; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + + status = p_dev_ctl->dpc_hstatus; + p_dev_ctl->dpc_hstatus = 0; + + ioa = (void *)FC_MAP_MEM(&binfo->fc_iomap_mem); /* map in SLIM */ + status1 = READ_SLIM_ADDR(binfo, ((volatile uchar * )ioa + 0xa8)); + status2 = READ_SLIM_ADDR(binfo, ((volatile uchar * )ioa + 0xac)); + FC_UNMAP_MEMIO(ioa); + + + if (status & HS_FFER6) { + + + /* Re-establishing Link */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk1301, /* ptr to msg structure */ + fc_mes1301, /* ptr to msg */ + fc_msgBlk1301.msgPreambleStr, /* begin varargs */ + status, + status1, + status2); /* end varargs */ + binfo->fc_flag |= FC_ESTABLISH_LINK; + fc_cfg_remove(p_dev_ctl); + + binfo->fc_flag |= FC_OFFLINE_MODE; + + lpfc_cfg_init(p_dev_ctl); + + unlock_enable(ipri, &CMD_LOCK); + } else { + /* Adapter Hardware Error */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0457, /* ptr to msg structure */ + fc_mes0457, /* ptr to msg */ + fc_msgBlk0457.msgPreambleStr, /* begin varargs */ + status, + status1, + status2); /* end varargs */ + if (status & HS_FFER8) { /* Chipset error 8 */ + } else if (status & HS_FFER7) { /* Chipset error 7 */ + } else if (status & HS_FFER5) { /* Chipset error 5 */ + } else if (status & HS_FFER4) { /* Chipset error 4 */ + } else if (status & HS_FFER3) { /* Chipset error 3 */ + } else if (status & HS_FFER2) { /* Chipset error 2 */ + } else if (status & HS_FFER1) { /* Chipset error 1 */ + } + + fc_free_rpilist(p_dev_ctl, 0); + + p_dev_ctl->device_state = DEAD; + binfo->fc_ffstate = FC_ERROR; + unlock_enable(ipri, &CMD_LOCK); + } + +} /* End handle_ff_error */ + + +/**************************************************/ +/** handle_link_event **/ +/** **/ +/** Description: Process a Link Attention. **/ +/** **/ +/**************************************************/ +_static_ void +handle_link_event( +fc_dev_ctl_t *p_dev_ctl) +{ + /* called from host_interrupt, to process LATT */ + MAILBOX * mb; + FC_BRD_INFO * binfo; + void *ioa; + volatile uint32 control; + + binfo = &BINFO; + FCSTATCTR.linkEvent++; + + /* Get a buffer which will be used for mailbox commands */ + if ((mb = (MAILBOX * )fc_mem_get(binfo, MEM_MBOX | MEM_PRI))) { + if (fc_read_la(p_dev_ctl, mb) == 0) { + if (issue_mb_cmd(binfo, mb, MBX_NOWAIT) != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + /* Turn off Link Attention interrupts until CLEAR_LA done */ + binfo->fc_process_LA = 0; + ioa = (void *)FC_MAP_IO(&binfo->fc_iomap_io); /* map in io registers */ + control = READ_CSR_REG(binfo, FC_HC_REG(binfo, ioa)); + control &= ~HC_LAINT_ENA; + WRITE_CSR_REG(binfo, FC_HC_REG(binfo, ioa), control); + /* Clear Link Attention in HA REG */ + WRITE_CSR_REG(binfo, FC_HA_REG(binfo, ioa), + (volatile uint32)(HA_LATT)); + FC_UNMAP_MEMIO(ioa); + } + else { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mb); + } + } +} /* End handle_link_event */ + + +/**************************************************/ +/** handle_ring_event **/ +/** **/ +/** Description: Process a Ring Attention. **/ +/** **/ +/**************************************************/ +_static_ void +handle_ring_event( +fc_dev_ctl_t *p_dev_ctl, +int ring_no, +uint32 reg_mask) +{ + FC_BRD_INFO * binfo; + RING * rp; + IOCB * entry; + IOCBQ * saveq; + IOCBQ * temp; + void * ioa; + int fcpfound = 0; + uint32 * xx; + uint32 portGet; + volatile uint32 chipatt; + uint32 portRspPut; + + binfo = &BINFO; + /* called from host_interrupt() to process RxATT */ + + rp = &binfo->fc_ring[ring_no]; + temp = NULL; + fc_mpdata_sync(binfo->fc_slim2.dma_handle, 0, 0, DDI_DMA_SYNC_FORKERNEL); + + /* Gather iocb entries off response ring. + * Ensure entry is owned by the host. + */ + entry = (IOCB * )IOCB_ENTRY(rp->fc_rspringaddr, rp->fc_rspidx); + portRspPut = PCIMEM_LONG(((SLI2_SLIM * )binfo->fc_slim2.virt)->mbx.us.s2.port[ring_no].rspPutInx); + if (portRspPut >= rp->fc_numRiocb) { + return; + } + + while (rp->fc_rspidx != portRspPut) { + if((ring_no == 0) && (binfo->fc_mbox_active)) { + binfo->fc_flag |= FC_PENDING_RING0; + break; + } + /* get an iocb buffer to copy entry into */ + if ((temp = (IOCBQ * )fc_mem_get(binfo, MEM_IOCB | MEM_PRI)) == NULL) { + break; + } + + + fc_pcimem_bcopy((uint32 * )entry, (uint32 * ) & temp->iocb, sizeof(IOCB)); + temp->q = NULL; + + /* bump iocb available response index */ + if (++rp->fc_rspidx >= rp->fc_numRiocb) { + rp->fc_rspidx = 0; + } + + /* SLIM POINTER */ + if (binfo->fc_busflag & FC_HOSTPTR) { + ((SLI2_SLIM * )binfo->fc_slim2.virt)->mbx.us.s2.host[ring_no].rspGetInx = + PCIMEM_LONG(rp->fc_rspidx); + } else { + void * ioa2; + + ioa = (void *)FC_MAP_MEM(&binfo->fc_iomap_mem); /* map in SLIM */ + ioa2 = (void *)((char *)ioa + ((SLIMOFF+(ring_no*2)+1)*4)); + WRITE_SLIM_ADDR(binfo, (volatile uint32 *)ioa2, rp->fc_rspidx); + FC_UNMAP_MEMIO(ioa); + } + + /* chain all iocb entries until LE is set */ + if (rp->fc_iocbhd == NULL) { + rp->fc_iocbhd = temp; + rp->fc_iocbtl = temp; + } else { + rp->fc_iocbtl->q = (uchar * )temp; + rp->fc_iocbtl = temp; + } + + /* when LE is set, entire Command has been received */ + if (temp->iocb.ulpLe) { + saveq = rp->fc_iocbhd; + + rp->fc_iocbhd = NULL; + rp->fc_iocbtl = NULL; + + /* get a ptr to first iocb entry in chain and process it */ + xx = (uint32 * ) & saveq->iocb; + fcpfound = fc_proc_ring_event(p_dev_ctl, rp, saveq); + + /* Free up iocb buffer chain for command just processed */ + while (saveq) { + temp = saveq; + saveq = (IOCBQ * )temp->q; + fc_mem_put(binfo, MEM_IOCB, (uchar * )temp); + } + + } /* Entire Command has been received */ + + entry = (IOCB * )IOCB_ENTRY(rp->fc_rspringaddr, rp->fc_rspidx); + + } /* While(entry->ulpOwner == 0) */ + + if ((temp != NULL) && (reg_mask & HA_R0RE_REQ)) { + /* At least one response entry has been freed */ + FCSTATCTR.chipRingFree++; + ioa = (void *)FC_MAP_IO(&binfo->fc_iomap_io); /* map in io registers */ + /* SET R0RE_RSP in Chip Att register */ + chipatt = ((CA_R0ATT | CA_R0RE_RSP) << (ring_no * 4)); + WRITE_CSR_REG(binfo, FC_FF_REG(binfo, ioa), chipatt); + FC_UNMAP_MEMIO(ioa); + } + + if (reg_mask != 0xffffffff) { + if (fcpfound) { + fc_issue_cmd(p_dev_ctl); + } else if (reg_mask & HA_R0CE_RSP) { + FCSTATCTR.hostRingFree++; + /* Cmd ring is available, queue any available cmds */ + portGet = issue_iocb_cmd(binfo, rp, 0); + if(portGet != PCIMEM_LONG(((SLI2_SLIM * )binfo->fc_slim2.virt)->mbx.us.s2.port[rp->fc_ringno].cmdGetInx)) { + issue_iocb_cmd(binfo, rp, 0); + } + } + FCSTATCTR.ringEvent++; + } + + return; +} /* End handle_ring_event */ + +_static_ int +fc_proc_ring_event( +fc_dev_ctl_t *p_dev_ctl, +RING *rp, +IOCBQ *saveq) +{ + FC_BRD_INFO * binfo; + NODELIST * ndlp; + IOCB * cmd; + int rc; + + binfo = &BINFO; + cmd = &saveq->iocb; + rc = 0; + FCSTATCTR.iocbRsp++; + + switch (cmd->ulpCommand) { + case CMD_FCP_ICMND_CR: + case CMD_FCP_ICMND_CX: + case CMD_FCP_IREAD_CR: + case CMD_FCP_IREAD_CX: + case CMD_FCP_IWRITE_CR: + case CMD_FCP_IWRITE_CX: + case CMD_FCP_ICMND64_CR: + case CMD_FCP_ICMND64_CX: + case CMD_FCP_IREAD64_CR: + case CMD_FCP_IREAD64_CX: + case CMD_FCP_IWRITE64_CR: + case CMD_FCP_IWRITE64_CX: + handle_fcp_event(p_dev_ctl, rp, saveq); + rc = 1; + break; + + case CMD_RCV_SEQUENCE_CX: /* received incoming frame */ + case CMD_RCV_SEQUENCE64_CX: /* received incoming frame */ + switch(rp->fc_ringno) { + case FC_ELS_RING: + handle_elsrcv_seq(p_dev_ctl, rp, saveq); + break; + case FC_IP_RING: + handle_iprcv_seq(p_dev_ctl, rp, saveq); + break; + } + break; + + case CMD_XMIT_BCAST_CN: /* process xmit completion */ + case CMD_XMIT_BCAST_CX: + case CMD_XMIT_SEQUENCE_CX: + case CMD_XMIT_SEQUENCE_CR: + case CMD_XMIT_BCAST64_CN: /* process xmit completion */ + case CMD_XMIT_BCAST64_CX: + case CMD_XMIT_SEQUENCE64_CX: + case CMD_XMIT_SEQUENCE64_CR: + handle_xmit_cmpl(p_dev_ctl, rp, saveq); + break; + + case CMD_RCV_ELS_REQ_CX: /* received an els frame */ + case CMD_RCV_ELS_REQ64_CX: /* received an els frame */ + handle_rcv_els_req(p_dev_ctl, rp, saveq); + break; + + case CMD_CREATE_XRI_CR: + case CMD_CREATE_XRI_CX: + handle_create_xri(p_dev_ctl, rp, saveq); + break; + + case CMD_ELS_REQUEST_CR: /* xmit els frame completion */ + case CMD_ELS_REQUEST_CX: + case CMD_XMIT_ELS_RSP_CX: + case CMD_ELS_REQUEST64_CR: + case CMD_ELS_REQUEST64_CX: + case CMD_XMIT_ELS_RSP64_CX: + case CMD_GEN_REQUEST64_CR: + case CMD_GEN_REQUEST64_CX: + handle_els_event(p_dev_ctl, rp, saveq); + break; + + case CMD_ABORT_XRI_CN: /* Abort fcp command */ + break; + + case CMD_ABORT_XRI_CX: /* Abort command */ + break; + + case CMD_XRI_ABORTED_CX: /* Handle ABORT condition */ + /* + * If we find an NODELIST entry that matches the aborted + * XRI, clear out the Xri field. + */ + if (((ndlp = fc_findnode_oxri(binfo, NLP_SEARCH_UNMAPPED | NLP_SEARCH_MAPPED, + cmd->ulpContext)) != NULL) && !(ndlp->nlp_flag & NLP_RPI_XRI)) { + ndlp->nlp_Xri = 0; /* xri */ + /* establish a new exchange */ + if ((ndlp->nlp_Rpi) && + ((ndlp->nlp_DID & CT_DID_MASK) != CT_DID_MASK) && + (binfo->fc_ffstate == FC_READY)) { + ndlp->nlp_flag |= NLP_RPI_XRI; + fc_create_xri(binfo, &binfo->fc_ring[FC_ELS_RING], ndlp); + } + } + break; + + case CMD_ADAPTER_MSG: + if ((binfo->fc_msgidx + MAX_MSG_DATA) <= FC_MAX_ADPTMSG) { + fc_bcopy((uchar * )cmd, &binfo->fc_adaptermsg[binfo->fc_msgidx], + MAX_MSG_DATA); + binfo->fc_msgidx += MAX_MSG_DATA; + con_print("lpfc%d: %s", binfo->fc_brd_no, binfo->fc_adaptermsg); + fc_bzero((void *)binfo->fc_adaptermsg, FC_MAX_ADPTMSG); + binfo->fc_msgidx = 0; + } else { + con_print("lpfc%d: %s\n", binfo->fc_brd_no, binfo->fc_adaptermsg); + fc_bzero(binfo->fc_adaptermsg, FC_MAX_ADPTMSG); + binfo->fc_msgidx = 0; + } + break; + + + default: + /* Unknown IOCB command */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk1400, /* ptr to msg structure */ + fc_mes1400, /* ptr to msg */ + fc_msgBlk1400.msgPreambleStr, /* begin varargs */ + cmd->ulpCommand, + cmd->ulpStatus, + cmd->ulpIoTag, + cmd->ulpContext); /* end varargs */ + break; + } /* switch(cmd->ulpCommand) */ + + return(rc); +} /* End fc_proc_ring_event */ + + +/**************************************************/ +/** handle_mb_event **/ +/** **/ +/** Description: Process a Mailbox Attention. **/ +/** Called from host_interrupt to process MBATT **/ +/** **/ +/** Returns: **/ +/** **/ +/**************************************************/ +_static_ int +handle_mb_event( +fc_dev_ctl_t *p_dev_ctl) +{ + FC_BRD_INFO * binfo; + MAILBOX * mb; + MAILBOX * swpmb; + MAILBOXQ * mbox; + IOCBQ * iocbq; + NODELIST * ndlp; + void *ioa; + uint32 control; + volatile uint32 word0; + volatile uint32 ldata; + volatile uint32 ldid; + volatile uint32 lrpi; + iCfgParam * clp; + + binfo = &BINFO; + clp = DD_CTL.p_config[binfo->fc_brd_no]; + + if (binfo->fc_flag & FC_SLI2) { + fc_mpdata_sync(binfo->fc_slim2.dma_handle, 0, 0, DDI_DMA_SYNC_FORKERNEL); + /* First copy command data */ + mb = FC_SLI2_MAILBOX(binfo); + word0 = *((volatile uint32 * )mb); + word0 = PCIMEM_LONG(word0); + } else { + /* First copy command data */ + ioa = (void *)FC_MAP_MEM(&binfo->fc_iomap_mem); /* map in SLIM */ + mb = FC_MAILBOX(binfo, ioa); + word0 = READ_SLIM_ADDR(binfo, ((volatile uint32 * )mb)); + FC_UNMAP_MEMIO(ioa); + } + + swpmb = (MAILBOX * ) & word0; + + FCSTATCTR.mboxEvent++; + + /* Sanity check to ensure the host owns the mailbox */ + if (swpmb->mbxOwner != OWN_HOST) { + int i; + + for(i=0; i<10240;i++) { + if (binfo->fc_flag & FC_SLI2) { + fc_mpdata_sync(binfo->fc_slim2.dma_handle, 0, 0, DDI_DMA_SYNC_FORKERNEL); + /* First copy command data */ + mb = FC_SLI2_MAILBOX(binfo); + word0 = *((volatile uint32 * )mb); + word0 = PCIMEM_LONG(word0); + } else { + /* First copy command data */ + ioa = (void *)FC_MAP_MEM(&binfo->fc_iomap_mem); /* map in SLIM */ + mb = FC_MAILBOX(binfo, ioa); + word0 = READ_SLIM_ADDR(binfo, ((volatile uint32 * )mb)); + FC_UNMAP_MEMIO(ioa); + } + + swpmb = (MAILBOX * ) & word0; + if (swpmb->mbxOwner == OWN_HOST) + goto out; + } + /* Stray Mailbox Interrupt, mbxCommand mbxStatus */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0304, /* ptr to msg structure */ + fc_mes0304, /* ptr to msg */ + fc_msgBlk0304.msgPreambleStr, /* begin varargs */ + swpmb->mbxCommand, + swpmb->mbxStatus); /* end varargs */ + return(1); + } + +out: + + /* stop watchdog timer */ + if(MBOXTMO) { + fc_clk_can(p_dev_ctl, MBOXTMO); + MBOXTMO = 0; + } + + if (swpmb->mbxStatus) { + if (swpmb->mbxStatus == MBXERR_NO_RESOURCES) { + FCSTATCTR.mboxStatErr++; + /* Mbox cmd cmpl error - RETRYing */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0305, /* ptr to msg structure */ + fc_mes0305, /* ptr to msg */ + fc_msgBlk0305.msgPreambleStr, /* begin varargs */ + swpmb->mbxCommand, + word0, + binfo->fc_ffstate, + binfo->fc_flag); /* end varargs */ + if ((mbox = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX))) { + if (binfo->fc_flag & FC_SLI2) { + /* First copy mbox command data */ + mb = FC_SLI2_MAILBOX(binfo); + fc_pcimem_bcopy((uint32 * )mb, (uint32 * )mbox, + (sizeof(uint32) * (MAILBOX_CMD_WSIZE))); + } else { + /* First copy mbox command data */ + ioa = (void *)FC_MAP_MEM(&binfo->fc_iomap_mem); /* map in SLIM */ + mb = FC_MAILBOX(binfo, ioa); + READ_SLIM_COPY(binfo, (uint32 *)mbox, (uint32 *)mb, + MAILBOX_CMD_WSIZE); + FC_UNMAP_MEMIO(ioa); + } + switch(((MAILBOX *)mbox)->mbxCommand) { + case MBX_READ_SPARM: + control = ((MAILBOX *)mbox)->un.varRdSparm.un.sp.bdeSize; + if(control == 0) { + fc_read_sparam(p_dev_ctl, (MAILBOX *)mbox); + } + case MBX_READ_SPARM64: + control = ((MAILBOX *)mbox)->un.varRdSparm.un.sp64.tus.f.bdeSize; + if(control == 0) { + fc_read_sparam(p_dev_ctl, (MAILBOX *)mbox); + } + case MBX_REG_LOGIN: + control = ((MAILBOX *)mbox)->un.varRegLogin.un.sp.bdeSize; + if(control == 0) { + goto mbout; + } + case MBX_REG_LOGIN64: + control = ((MAILBOX *)mbox)->un.varRegLogin.un.sp64.tus.f.bdeSize; + if(control == 0) { + goto mbout; + } + case MBX_READ_LA: + control = ((MAILBOX *)mbox)->un.varReadLA.un.lilpBde.bdeSize; + if(control == 0) { + fc_read_la(p_dev_ctl, (MAILBOX *)mbox); + } + case MBX_READ_LA64: + control = ((MAILBOX *)mbox)->un.varReadLA.un.lilpBde64.tus.f.bdeSize; + if(control == 0) { + fc_read_la(p_dev_ctl, (MAILBOX *)mbox); + } + } + ((MAILBOX *)mbox)->mbxOwner = OWN_HOST; + ((MAILBOX *)mbox)->mbxStatus = 0; + mbox->bp = (uchar * )binfo->fc_mbbp; + binfo->fc_mbox_active = 0; + if (issue_mb_cmd(binfo, (MAILBOX * )mbox, MBX_NOWAIT) != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mbox); + } + return(0); + } + } + if (!((swpmb->mbxCommand == MBX_CLEAR_LA) && + (swpmb->mbxStatus == 0x1601))) { + /* Mbox cmd cmpl error */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0306, /* ptr to msg structure */ + fc_mes0306, /* ptr to msg */ + fc_msgBlk0306.msgPreambleStr, /* begin varargs */ + swpmb->mbxCommand, + word0, + binfo->fc_ffstate, + binfo->fc_flag); /* end varargs */ + FCSTATCTR.mboxStatErr++; + switch (swpmb->mbxCommand) { + case MBX_REG_LOGIN: + case MBX_REG_LOGIN64: + if (binfo->fc_flag & FC_SLI2) { + /* First copy command data */ + mb = FC_SLI2_MAILBOX(binfo); + ldata = mb->un.varWords[1]; /* get did */ + ldata = PCIMEM_LONG(ldata); + } else { + /* First copy command data */ + ioa = (void *)FC_MAP_MEM(&binfo->fc_iomap_mem); /* map in SLIM */ + mb = FC_MAILBOX(binfo, ioa); + ldata = READ_SLIM_ADDR(binfo, &mb->un.varWords[1]); + FC_UNMAP_MEMIO(ioa); + } + + ldid = ldata & Mask_DID; + if ((ndlp=fc_findnode_odid(binfo,(NLP_SEARCH_MAPPED | NLP_SEARCH_UNMAPPED), ldid))) { + if (ndlp->nlp_action & NLP_DO_DISC_START) { + /* Goto next entry */ + fc_nextnode(p_dev_ctl, ndlp); + } + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + } + break; + + case MBX_UNREG_LOGIN: + if (binfo->fc_flag & FC_SLI2) { + /* First copy command data */ + mb = FC_SLI2_MAILBOX(binfo); + ldata = mb->un.varWords[0]; /* get rpi */ + ldata = PCIMEM_LONG(ldata); + } else { + /* First copy command data */ + ioa = (void *)FC_MAP_MEM(&binfo->fc_iomap_mem); /* map in SLIM */ + mb = FC_MAILBOX(binfo, ioa); + ldata = READ_SLIM_ADDR(binfo, &mb->un.varWords[0]); + FC_UNMAP_MEMIO(ioa); + } + + lrpi = ldata & 0xffff; + + if ((ndlp = fc_findnode_rpi(binfo, lrpi)) == 0) + break; + binfo->fc_nlplookup[ndlp->nlp_Rpi] = 0; + ndlp->nlp_Rpi = 0; + fc_freenode(binfo, ndlp, 0); + ndlp->nlp_state = NLP_LIMBO; + fc_nlp_bind(binfo, ndlp); + break; + + case MBX_READ_LA: + case MBX_READ_LA64: + case MBX_CLEAR_LA: + /* Turn on Link Attention interrupts */ + binfo->fc_process_LA = 1; + + ioa = (void *)FC_MAP_IO(&binfo->fc_iomap_io); /* map in io registers */ + control = READ_CSR_REG(binfo, FC_HC_REG(binfo, ioa)); + control |= HC_LAINT_ENA; + WRITE_CSR_REG(binfo, FC_HC_REG(binfo, ioa), control); + FC_UNMAP_MEMIO(ioa); + break; + + case MBX_INIT_LINK: + if (binfo->fc_flag & FC_SLI2) { + if ((clp[CFG_LINK_SPEED].a_current > 0) && + ((swpmb->mbxStatus == 0x0011) || (swpmb->mbxStatus == 0x0500))) { + /* Reset link speed to auto. 1G node detected in loop. */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk1302, /* ptr to msg structure */ + fc_mes1302, /* ptr to msg */ + fc_msgBlk1302.msgPreambleStr); /* begin & end varargs */ + clp[CFG_LINK_SPEED].a_current = LINK_SPEED_AUTO; + if ((mbox = (MAILBOXQ * )fc_mem_get(binfo, MEM_MBOX))) { + /* First copy mbox command data */ + mb = FC_SLI2_MAILBOX(binfo); + fc_pcimem_bcopy((uint32 * )mb, (uint32 * )mbox, + (sizeof(uint32) * (MAILBOX_CMD_WSIZE))); + ((MAILBOX *)mbox)->un.varInitLnk.link_flags &= ~FLAGS_LINK_SPEED; + ((MAILBOX *)mbox)->un.varInitLnk.link_speed = 0; /* LINK_SPEED_AUTO */ + ((MAILBOX *)mbox)->mbxOwner = OWN_HOST; + ((MAILBOX *)mbox)->mbxStatus = 0; + mbox->bp = (uchar * )binfo->fc_mbbp; + binfo->fc_mbox_active = 0; + if (issue_mb_cmd(binfo, (MAILBOX * )mbox, MBX_NOWAIT) != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mbox); + } + return(0); + } + } + } + break; + } + if (binfo->fc_mbbp) { + fc_mem_put(binfo, MEM_BUF, (uchar * )binfo->fc_mbbp); + binfo->fc_mbbp = 0; + } + goto mbout; + } + } + + /* Mbox cmd cmpl */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0307, /* ptr to msg structure */ + fc_mes0307, /* ptr to msg */ + fc_msgBlk0307.msgPreambleStr, /* begin varargs */ + swpmb->mbxCommand, + word0, + binfo->fc_ffstate, + binfo->fc_flag); /* end varargs */ + + if(binfo->fc_mbox_active == 2) { + MAILBOX *mbslim; + + /* command was issued by dfc layer, so save mbox cmpl */ + if ((binfo->fc_flag & FC_SLI2) && (!(binfo->fc_flag & FC_OFFLINE_MODE))) { + /* First copy command data */ + mbslim = FC_SLI2_MAILBOX(binfo); + /* copy results back to user */ + fc_pcimem_bcopy((uint32 * )mbslim, (uint32 * )&p_dev_ctl->dfcmb, + (sizeof(uint32) * MAILBOX_CMD_WSIZE)); + } else { + /* First copy command data */ + ioa = (void *)FC_MAP_MEM(&binfo->fc_iomap_mem); /* map in SLIM */ + mbslim = FC_MAILBOX(binfo, ioa); + /* copy results back to user */ + READ_SLIM_COPY(binfo, (uint32 * )&p_dev_ctl->dfcmb, (uint32 * )mbslim, + MAILBOX_CMD_WSIZE); + FC_UNMAP_MEMIO(ioa); + } + } + else { + handle_mb_cmd(p_dev_ctl, swpmb, (uint32)swpmb->mbxCommand); + } + + +mbout: + /* Process next mailbox command if there is one */ + binfo->fc_mbox_active = 0; + if ((mbox = fc_mbox_get(binfo))) { + if (issue_mb_cmd(binfo, (MAILBOX * )mbox, MBX_NOWAIT) != MBX_BUSY) { + fc_mem_put(binfo, MEM_MBOX, (uchar * )mbox); + } + } else { + if (binfo->fc_flag & FC_DELAY_PLOGI) { + binfo->fc_flag &= ~FC_DELAY_PLOGI; + if((binfo->fc_flag & FC_RSCN_MODE) && (binfo->fc_ffstate == FC_READY)) + fc_nextrscn(p_dev_ctl, fc_max_els_sent); + else + fc_nextdisc(p_dev_ctl, fc_max_els_sent); + } + if (binfo->fc_flag & FC_DELAY_NSLOGI) { + if ((iocbq = fc_plogi_get(binfo))) { + fc_els_cmd(binfo, ELS_CMD_PLOGI, + (void *)((ulong)iocbq->iocb.un.elsreq.remoteID), + (uint32)0, (ushort)0, (NODELIST *)0); + fc_mem_put(binfo, MEM_IOCB, (uchar * )iocbq); + } + else { + binfo->fc_flag &= ~FC_DELAY_NSLOGI; + } + } + if (binfo->fc_flag & FC_DELAY_RSCN) { + IOCBQ *temp; + IOCB *iocb; + MATCHMAP *mp; + RING *rp; + int i; + + rp = &binfo->fc_ring[FC_ELS_RING]; + binfo->fc_flag &= ~FC_DELAY_RSCN; + while (binfo->fc_rscn.q_first) { + temp = (IOCBQ * )binfo->fc_rscn.q_first; + if ((binfo->fc_rscn.q_first = temp->q) == 0) { + binfo->fc_rscn.q_last = 0; + } + binfo->fc_rscn.q_cnt--; + iocb = &temp->iocb; + mp = *((MATCHMAP **)iocb); + *((MATCHMAP **)iocb) = 0; + temp->q = NULL; + fc_process_rscn(p_dev_ctl, temp, mp); + + fc_mem_put(binfo, MEM_BUF, (uchar * )mp); + + i = 1; + /* free resources associated with this iocb and repost the ring buffers */ + if (!(binfo->fc_flag & FC_SLI2)) { + for (i = 1; i < (int)iocb->ulpBdeCount; i++) { + mp = fc_getvaddr(p_dev_ctl, rp, (uchar * )((ulong)iocb->un.cont[i].bdeAddress)); + if (mp) { + fc_mem_put(binfo, MEM_BUF, (uchar * )mp); + } + } + } + fc_mem_put(binfo, MEM_IOCB, (uchar * )temp); + } + } + } + return(0); +} /* End handle_mb_event */ + + +/**********************************************************/ +/** issue_mb_cmd Issue a mailbox command. **/ +/** If the mailbox is currently busy, **/ +/** queue command to mbox queue. **/ +/**********************************************************/ +_static_ int +issue_mb_cmd( +FC_BRD_INFO *binfo, +MAILBOX *mb, +int flag) +{ + MAILBOX * mbox; + MAILBOXQ * mbq; + int i; + void *ioa; + uint32 status, evtctr; + uint32 ha_copy; + fc_dev_ctl_t *p_dev_ctl; + volatile uint32 word0, ldata; + + mbq = (MAILBOXQ * )mb; + status = MBX_SUCCESS; + + if (binfo->fc_mbox_active) { + /* Another mailbox command is still being processed, queue this + * command to be processed later. + */ + fc_mbox_put(binfo, mbq); + + /* Mbox cmd issue - BUSY */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0308, /* ptr to msg structure */ + fc_mes0308, /* ptr to msg */ + fc_msgBlk0308.msgPreambleStr, /* begin varargs */ + mb->mbxCommand, + binfo->fc_ffstate, + binfo->fc_flag, + flag); /* end varargs */ + FCSTATCTR.mboxCmdBusy++; + + return(MBX_BUSY); + } + + binfo->fc_mbox_active = 1; + p_dev_ctl = (fc_dev_ctl_t *)(binfo->fc_p_dev_ctl); + + /* Mailbox cmd issue */ + fc_log_printf_msg_vargs( binfo->fc_brd_no, + &fc_msgBlk0309, /* ptr to msg structure */ + fc_mes0309, /* ptr to msg */ + fc_msgBlk0309.msgPreambleStr, /* begin varargs */ + mb->mbxCommand, + binfo->fc_ffstate, + binfo->fc_flag, + flag); /* end varargs */ + /* If we are not polling, turn on watchdog timer */ + if (flag != MBX_POLL) { + MBOXTMO = fc_clk_set(p_dev_ctl, MBOX_TMO_DFT, fc_mbox_timeout, 0, 0); + } + + FCSTATCTR.issueMboxCmd++; + evtctr = FCSTATCTR.mboxEvent; + + /* if there is one, save buffer to release in completion */ + if (mbq->bp) { + binfo->fc_mbbp = mbq->bp; + mbq->bp = 0; + } + + /* next set own bit for the adapter and copy over command word */ + mb->mbxOwner = OWN_CHIP; + + if (binfo->fc_flag & FC_SLI2) { + /* First copy command data */ + mbox = FC_SLI2_MAILBOX(binfo); + fc_pcimem_bcopy((uint32 * )mb, (uint32 * )mbox, + (sizeof(uint32) * (MAILBOX_CMD_WSIZE))); + fc_mpdata_sync(binfo->fc_slim2.dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV); + } else { + if (mb->mbxCommand == MBX_CONFIG_PORT) { + /* copy command data into host mbox for cmpl */ + fc_pcimem_bcopy((uint32 * )mb, + (uint32 * ) & ((SLI2_SLIM * )binfo->fc_slim2.virt)->mbx, + (sizeof(uint32) * (MAILBOX_CMD_WSIZE))); + } + + /* First copy command data */ + ioa = (void *)FC_MAP_MEM(&binfo->fc_iomap_mem); /* map in SLIM */ + + mbox = FC_MAILBOX(binfo, ioa); + WRITE_SLIM_COPY(binfo, &mb->un.varWords, &mbox->un.varWords, + (MAILBOX_CMD_WSIZE - 1)); + + + /* copy over last word, with mbxOwner set */ + ldata = *((volatile uint32 * )mb); + + WRITE_SLIM_ADDR(binfo, ((volatile uint32 * )mbox), ldata); + FC_UNMAP_MEMIO(ioa); + + if (mb->mbxCommand == MBX_CONFIG_PORT) { + /* switch over to host mailbox */ + binfo->fc_mboxaddr = (uint32 *)&((SLI2_SLIM * )binfo->fc_slim2.virt)->mbx; + binfo->fc_flag |= FC_SLI2; + } + } + + + /* interrupt board to doit right away */ + ioa = (void *)FC_MAP_IO(&binfo->fc_iomap_io); /* map in io registers */ + WRITE_CSR_REG(binfo, FC_FF_REG(binfo, ioa), CA_MBATT); + FC_UNMAP_MEMIO(ioa); + + switch (flag) { + case MBX_SLEEP: + case MBX_NOWAIT: + break; + + case MBX_POLL: + i = 0; + if (binfo->fc_flag & FC_SLI2) { + fc_mpdata_sync(binfo->fc_slim2.dma_handle, 0, 0, + DDI_DMA_SYNC_FORKERNEL); + + /* First copy command data */ + mbox = FC_SLI2_MAILBOX(binfo); + word0 = *((volatile uint32 * )mbox); + word0 = PCIMEM_LONG(word0); + } else { + /* First copy command data */ + ioa = (void *)FC_MAP_MEM(&binfo->fc_iomap_mem); /* map in SLIM */ + mbox = FC_MAILBOX(binfo, ioa); + word0 = READ_SLIM_ADDR(binfo, ((volatile uint32 * )mbox)); + FC_UNMAP_MEMIO(ioa); + } + + ioa = (void *)FC_MAP_IO(&binfo->fc_iomap_io); /* map in io registers */ + ha_copy = READ_CSR_REG(binfo, FC_HA_REG(binfo, ioa)); + FC_UNMAP_MEMIO(ioa); + + /* Wait for command to complete */ + while (((word0 & OWN_CHIP) == OWN_CHIP) || !(ha_copy & HA_MBATT)) { + if (i++ >= 100) { + binfo->fc_mbox_active = 0; + return(MBX_NOT_FINISHED); + } + + /* Check if we took a mbox interrupt while we were polling */ + if(((word0 & OWN_CHIP) != OWN_CHIP) && (evtctr != FCSTATCTR.mboxEvent)) + break; + + DELAYMS(i); + + if (binfo->fc_flag & FC_SLI2) { + fc_mpdata_sync(binfo->fc_slim2.dma_handle, 0, 0, + DDI_DMA_SYNC_FORKERNEL); + + /* First copy command data */ + mbox = FC_SLI2_MAILBOX(binfo); + word0 = *((volatile uint32 * )mbox); + word0 = PCIMEM_LONG(word0); + } else { + /* First copy command data */ + ioa = (void *)FC_MAP_MEM(&binfo->fc_iomap_mem); /* map in SLIM */ + mbox = FC_MAILBOX(binfo, ioa); + word0 = READ_SLIM_ADDR(binfo, ((volatile uint32 * )mbox)); + FC_UNMAP_MEMIO(ioa); + } + ioa = (void *)FC_MAP_IO(&binfo->fc_iomap_io); /* map in io registers */ + ha_copy = READ_CSR_REG(binfo, FC_HA_REG(binfo, ioa)); + FC_UNMAP_MEMIO(ioa); + } + + if (binfo->fc_flag & FC_SLI2) { + fc_mpdata_sync(binfo->fc_slim2.dma_handle, 0, 0, + DDI_DMA_SYNC_FORKERNEL); + + /* First copy command data */ + mbox = FC_SLI2_MAILBOX(binfo); + /* copy results back to user */ + fc_pcimem_bcopy((uint32 * )mbox, (uint32 * )mb, + (sizeof(uint32) * MAILBOX_CMD_WSIZE)); + } else { + /* First copy command data */ + ioa = (void *)FC_MAP_MEM(&binfo->fc_iomap_mem); /* map in SLIM */ + mbox = FC_MAILBOX(binfo, ioa); + /* copy results back to user */ + READ_SLIM_COPY(binfo, (uint32 * )mb, (uint32 * )mbox, MAILBOX_CMD_WSIZE); + FC_UNMAP_MEMIO(ioa); + } + + ioa = (void *)FC_MAP_IO(&binfo->fc_iomap_io); /* map in io registers */ + WRITE_CSR_REG(binfo, FC_HA_REG(binfo, ioa), HA_MBATT); + FC_UNMAP_MEMIO(ioa); + + binfo->fc_mbox_active = 0; + status = mb->mbxStatus; + break; + } + return(status); +} /* End issue_mb_cmd */ + + +/* + * This routine will issue as many iocb commands from the + * ring's xmit queue to the adapter as it can. + * If iocb_cmd is specified it will be queued to the xmit queue. + */ +_static_ uint32 +issue_iocb_cmd( +FC_BRD_INFO *binfo, +RING *rp, +IOCBQ *iocb_cmd) +{ + IOCB * iocb; + IOCB * icmd; + void * ioa; + uint32 status; + uint32 * xx; + int onetime; + uint32 portCmdGet, rc; + fc_dev_ctl_t *p_dev_ctl; + + rc = PCIMEM_LONG(((SLI2_SLIM * )binfo->fc_slim2.virt)->mbx.us.s2.port[rp->fc_ringno].cmdGetInx); + onetime = 0; + if ((binfo->fc_flag & FC_LNK_DOWN) || + (binfo->fc_ffstate < rp->fc_xmitstate)) { + if (iocb_cmd) { + icmd = &iocb_cmd->iocb; + if ((icmd->ulpCommand != CMD_QUE_RING_BUF_CN) && + (icmd->ulpCommand != CMD_QUE_RING_BUF64_CN) && + (icmd->ulpCommand != CMD_CREATE_XRI_CR)) { + fc_ringtx_put(rp, iocb_cmd); + + FCSTATCTR.NoIssueIocb++; + /* If link is down, just return */ + return(rc); + } + onetime = 1; + } else { + /* If link is down, just return */ + return(rc); + } + } else { + if (iocb_cmd) { + /* Queue command to ring xmit queue */ + fc_ringtx_put(rp, iocb_cmd); + } + if((binfo->fc_process_LA == 0) && + (rp->fc_ringno == FC_FCP_RING)) { + return(rc); + } + } + + p_dev_ctl = (fc_dev_ctl_t *)(binfo->fc_p_dev_ctl); + fc_mpdata_sync(binfo->fc_slim2.dma_handle, 0, 0, DDI_DMA_SYNC_FORKERNEL); + + /* onetime should only be set for QUE_RING_BUF or CREATE_XRI + * iocbs sent with link down. + */ + + /* get the next available command iocb */ + iocb = (IOCB * )IOCB_ENTRY(rp->fc_cmdringaddr, rp->fc_cmdidx); + + portCmdGet = rc; + + if (portCmdGet >= rp->fc_numCiocb) { + if (iocb_cmd) { + /* Queue command to ring xmit queue */ + fc_ringtx_put(rp, iocb_cmd); + } + return(rc); + } + + /* bump iocb available command index */ + if (++rp->fc_cmdidx >= rp->fc_numCiocb) { + rp->fc_cmdidx = 0; + } + + /* While IOCB entries are available */ + while (rp->fc_cmdidx != portCmdGet) { + /* get next command from ring xmit queue */ + if ((onetime == 0) && ((iocb_cmd = fc_ringtx_get(rp)) == NULL)) { +out: + fc_mpdata_sync(binfo->fc_slim2.dma_handle, 0, 0, + DDI_DMA_SYNC_FORKERNEL); + fc_mpdata_sync(binfo->fc_slim2.dma_handle, 0, 0, + DDI_DMA_SYNC_FORDEV); + + /* SLIM POINTER */ + if (binfo->fc_busflag & FC_HOSTPTR) { + rp->fc_cmdidx = + (uchar)PCIMEM_LONG(((SLI2_SLIM * )binfo->fc_slim2.virt)->mbx.us.s2.host[rp->fc_ringno].cmdPutInx); + } else { + void *ioa2; + + ioa = (void *)FC_MAP_MEM(&binfo->fc_iomap_mem); /* map in SLIM */ + ioa2 = (void *)((char *)ioa +((SLIMOFF+(rp->fc_ringno*2))*4)); + rp->fc_cmdidx = (uchar)READ_SLIM_ADDR(binfo, (volatile uint32 *)ioa2); + FC_UNMAP_MEMIO(ioa); + } + + ioa = (void *)FC_MAP_IO(&binfo->fc_iomap_io); /* map in io registers */ + status = ((CA_R0ATT) << (rp->fc_ringno * 4)); + WRITE_CSR_REG(binfo, FC_FF_REG(binfo, ioa), (volatile uint32)status); + FC_UNMAP_MEMIO(ioa); + return(rc); + } + icmd = &iocb_cmd->iocb; + + xx = (uint32 * ) icmd; + /* issue iocb command to adapter */ + fc_pcimem_bcopy((uint32 * )icmd, (uint32 * )iocb, sizeof(IOCB)); + FCSTATCTR.IssueIocb++; + + if ((icmd->ulpCommand == CMD_QUE_RING_BUF_CN) || + (icmd->ulpCommand == CMD_QUE_RING_BUF64_CN) || + (rp->fc_ringno == FC_FCP_RING) || + (icmd->ulpCommand == CMD_ABORT_XRI_CX) || + (icmd->ulpCommand == CMD_ABORT_XRI_CN)) { + fc_mem_put(binfo, MEM_IOCB, (uchar * )iocb_cmd); + } else { + fc_ringtxp_put(rp, iocb_cmd); + } + + /* SLIM POINTER */ + if (binfo->fc_busflag & FC_HOSTPTR) { + ((SLI2_SLIM * )binfo->fc_slim2.virt)->mbx.us.s2.host[rp->fc_ringno].cmdPutInx = PCIMEM_LONG(rp->fc_cmdidx); + } else { + void *ioa2; + + ioa = (void *)FC_MAP_MEM(&binfo->fc_iomap_mem); /* map in SLIM */ + ioa2 = (void *)((char *)ioa +((SLIMOFF+(rp->fc_ringno*2))*4)); + WRITE_SLIM_ADDR(binfo, (volatile uint32 *)ioa2, rp->fc_cmdidx); + FC_UNMAP_MEMIO(ioa); + } + + if (onetime) { + goto out; + } + + /* get the next available command iocb */ + iocb = (IOCB * )IOCB_ENTRY(rp->fc_cmdringaddr, rp->fc_cmdidx); + + /* bump iocb available command index */ + if (++rp->fc_cmdidx >= rp->fc_numCiocb) { + rp->fc_cmdidx = 0; + } + } + + fc_mpdata_sync(binfo->fc_slim2.dma_handle, 0, 0, + DDI_DMA_SYNC_FORKERNEL); + fc_mpdata_sync(binfo->fc_slim2.dma_handle, 0, 0, + DDI_DMA_SYNC_FORDEV); + + /* SLIM POINTER */ + if (binfo->fc_busflag & FC_HOSTPTR) { + rp->fc_cmdidx = + (uchar)PCIMEM_LONG(((SLI2_SLIM * )binfo->fc_slim2.virt)->mbx.us.s2.host[rp->fc_ringno].cmdPutInx); + } else { + void *ioa2; + + ioa = (void *)FC_MAP_MEM(&binfo->fc_iomap_mem); /* map in SLIM */ + ioa2 = (void *)((char *)ioa +((SLIMOFF+(rp->fc_ringno*2))*4)); + rp->fc_cmdidx = (uchar)READ_SLIM_ADDR(binfo, (volatile uint32 *)ioa2); + FC_UNMAP_MEMIO(ioa); + } + + + /* If we get here, iocb list is full */ + /* + * Set ring 'x' to SET R0CE_REQ in Chip Att register. + * Chip will tell us when an entry is freed. + */ + ioa = (void *)FC_MAP_IO(&binfo->fc_iomap_io); /* map in io registers */ + status = ((CA_R0ATT | CA_R0CE_REQ) << (rp->fc_ringno * 4)); + WRITE_CSR_REG(binfo, FC_FF_REG(binfo, ioa), (volatile uint32)status); + FC_UNMAP_MEMIO(ioa); + + FCSTATCTR.iocbRingBusy++; + + if (onetime) { + /* Queue command to ring xmit queue */ + fc_ringtx_put(rp, iocb_cmd); + } + return(rc); +} /* End issue_iocb_cmd */ + + + + +/*****************************************************************************/ +/* + * NAME: fc_brdreset + * + * FUNCTION: hardware reset of adapter is performed + * + * EXECUTION ENVIRONMENT: process only + * + * NOTES: + * + * CALLED FROM: + * fc_cfg_init + * + * INPUT: + * p_dev_ctl - point to the dev_ctl area + * + */ +/*****************************************************************************/ +_static_ void +fc_brdreset ( +fc_dev_ctl_t *p_dev_ctl) /* point to the dev_ctl area */ +{ + uint32 word0; + ushort cfg_value, skip_post; + void *ioa; + FC_BRD_INFO * binfo; + MAILBOX * swpmb; + MAILBOX * mb; + + binfo = &BINFO; + ioa = (void *)FC_MAP_MEM(&binfo->fc_iomap_mem); /* map in SLIM */ + + + /* use REAL SLIM !!! */ + binfo->fc_mboxaddr = 0; + binfo->fc_flag &= ~FC_SLI2; + + /* Reset the board - First put restart command in mailbox */ + mb = FC_MAILBOX(binfo, ioa); + word0 = 0; + swpmb = (MAILBOX * ) & word0; + swpmb->mbxCommand = MBX_RESTART; + swpmb->mbxHc = 1; + WRITE_SLIM_ADDR(binfo, ((volatile uint32 * )mb), word0); + /* Only skip post after fc_ffinit is completed */ + if (binfo->fc_ffstate) { + skip_post = 1; + WRITE_SLIM_ADDR(binfo, (((volatile uint32 * )mb) + 1), 1); /* Skip post */ + } + else { + skip_post = 0; + } + FC_UNMAP_MEMIO(ioa); + + /* Turn off SERR, PERR in PCI cmd register */ + binfo->fc_ffstate = FC_INIT_START; + + cfg_value = fc_rdpci_cmd(p_dev_ctl); + fc_wrpci_cmd(p_dev_ctl, (ushort)(cfg_value & ~(CMD_PARITY_CHK | CMD_SERR_ENBL))); + + ioa = (void *)FC_MAP_IO(&binfo->fc_iomap_io); /* map in io registers */ + + WRITE_CSR_REG(binfo, FC_HC_REG(binfo, ioa), (volatile uint32)HC_INITFF); + DELAYMS(1); + + WRITE_CSR_REG(binfo, FC_HC_REG(binfo, ioa), (volatile uint32)0); + + FC_UNMAP_MEMIO(ioa); + + /* Restore PCI cmd register */ + fc_wrpci_cmd(p_dev_ctl, cfg_value); + + if(skip_post) { + DELAYMS(100); + } + else { + DELAYMS(2000); + } + + binfo->fc_ffstate = FC_INIT_START; + binfo->fc_eventTag = 0; + binfo->fc_myDID = 0; + binfo->fc_prevDID = 0; + p_dev_ctl->power_up = 0; + return; +} diff -purN -X /home/mbligh/.diff.exclude reference/drivers/scsi/lpfc/lpfc.conf.c current/drivers/scsi/lpfc/lpfc.conf.c --- reference/drivers/scsi/lpfc/lpfc.conf.c 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/scsi/lpfc/lpfc.conf.c 2004-04-09 11:53:04.000000000 -0700 @@ -0,0 +1,336 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Enterprise Fibre Channel Host Bus Adapters. * + * Refer to the README file included with this package for * + * driver version and adapter support. * + * Copyright (C) 2003 Emulex Corporation. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of the GNU General Public License * + * as published by the Free Software Foundation; either version 2 * + * of the License, or (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details, a copy of which * + * can be found in the file COPYING included with this package. * + *******************************************************************/ + +#include +#include "fc_os.h" +#include "fc_hw.h" +#include "fc.h" +#include "fcmsg.h" + +/* +# Verbosity: only turn this flag on if you are willing to risk being +# deluged with LOTS of information. +# You can set a bit mask to record specific types of verbose messages: +# +# LOG_ELS 0x1 ELS events +# LOG_DISCOVERY 0x2 Link discovery events +# LOG_MBOX 0x4 Mailbox events +# LOG_INIT 0x8 Initialization events +# LOG_LINK_EVENT 0x10 Link events +# LOG_IP 0x20 IP traffic history +# LOG_FCP 0x40 FCP traffic history +# LOG_NODE 0x80 Node table events +# LOG_MISC 0x400 Miscellaneous events +# LOG_SLI 0x800 SLI events +# LOG_CHK_COND 0x1000 FCP Check condition flag +# LOG_ALL_MSG 0x1fff LOG all messages +*/ +int lpfc_log_verbose =0; + +/* +# Setting log-only to 0 causes log messages to be printed on the +# console and to be logged to syslog (which may send them to the +# console again if it's configured to do so). +# Setting log-only to 1 causes log messages to go to syslog only. +*/ +int lpfc_log_only =0; + +/* +# lun-queue-depth: the default value lpfc will use to limit +# the number of outstanding commands per FCP LUN. This value is +# global, affecting each LUN recognized by the driver. +*/ +int lpfc_lun_queue_depth =30; + +/* +# lpfc_lun_skip : Is a LINUX OS parameter to support LUN skipping / no LUN +# If this is set to 1, lpfc will fake out the LINUX scsi layer to allow +# it to detect all LUNs if there are LUN holes on a device. +*/ +int lpfc_lun_skip=0; + +/* +# tgt-queue-depth: the default value lpfc will use to limit +# the number of outstanding commands per FCP target. This value is +# global, affecting each target recognized by the driver. +*/ +int lpfc_tgt_queue_depth =0; + +/* +# no-device-delay [0 or 1 to 30] - determines the length of +# the interval between deciding to fail back an I/O because there is no way +# to communicate with its particular device (e.g., due to device failure) and +# the actual fail back. A value of zero implies no delay whatsoever. +# Cautions: (1) This value is in seconds. +# (2) Setting a long delay value may permit I/O to build up, +# each with a pending timeout, which could result in the exhaustion of +# critical LINUX kernel resources. +# +# Note that this value can have an impact on the speed with which a +# system can shut down with I/Os pending and with the HBA not able to +# communicate with the loop or fabric, e.g., with a cable pulled. +*/ +int lpfc_no_device_delay =1; + +/* +# +++ Variables relating to IP networking support. +++ +*/ + +/* +# network-on: true (1) if networking is enabled, false (0) if not +*/ +int lpfc_network_on = 0; + +/* +# xmt-que-size: size of the transmit queue for mbufs (128 - 10240) +*/ +int lpfc_xmt_que_size = 256; + +/* +# +++ Variables common to both SCSI (FCP) and IP networking support. +++ +*/ + +/* +# Some disk devices have a "select ID" or "select Target" capability. +# From a protocol standpoint "select ID" usually means select the +# Fibre channel "ALPA". In the FC-AL Profile there is an "informative +# annex" which contains a table that maps a "select ID" (a number +# between 0 and 7F) to an ALPA. By default, for compatibility with +# older drivers, the lpfc driver scans its ALPA map from low ALPA +# to high ALPA. +# +# Turning on the scan-down variable (on = 1, off = 0) will +# cause the lpfc driver to use an inverted ALPA map, effectively +# scanning ALPAs from high to low as specified in the FC-AL annex. +# A value of 2, will also cause target assignment in a private loop +# environment to be based on the ALPA. Persistent bindings should NOT be +# used if scan-down is 2. +# +# (Note: This "select ID" functionality is a LOOP ONLY characteristic +# and will not work across a fabric.) +*/ +int lpfc_scandown =2; + +/* +# Determine how long the driver will wait to begin linkdown processing +# when a cable has been pulled or the link has otherwise become +# inaccessible, 1 - 255 secs. Linkdown processing includes failing back +# cmds to the target driver that have been waiting around for the link +# to come back up. There's a tradeoff here: small values of the timer +# cause the link to appear to "bounce", while large values of the +# timer can delay failover in a fault tolerant environment. Units are in +# seconds. A value of 0 means never failback cmds until the link comes up. +*/ +int lpfc_linkdown_tmo =30; + +/* +# If set, nodev-holdio will hold all I/O errors on devices that disappear +# until they come back. Default is 0, return errors with no-device-delay +*/ +int lpfc_nodev_holdio =0; + +/* +# If set, nodev-tmo will hold all I/O errors on devices that disappear +# until the timer expires. Default is 0, return errors with no-device-delay. +*/ +int lpfc_nodev_tmo =30; + +/* +# Use no-device-delay to delay FCP RSP errors and certain check conditions +*/ +int lpfc_delay_rsp_err =1; + +/* +# Treat certain check conditions as a FCP error +*/ +int lpfc_check_cond_err =1; + +/* +# num-iocbs: number of iocb buffers to allocate (128 to 10240) +*/ +int lpfc_num_iocbs = 2048; + +/* +# num-bufs: number of ELS buffers to allocate (64 to 4096) +# ELS buffers are needed to support Fibre channel Extended Link Services. +# Also used for SLI-2 FCP buffers, one per FCP command, and Mailbox commands. +*/ +int lpfc_num_bufs = 4096; + +/* +# topology: link topology for init link +# 0x0 = attempt loop mode then point-to-point +# 0x02 = attempt point-to-point mode only +# 0x04 = attempt loop mode only +# 0x06 = attempt point-to-point mode then loop +# Set point-to-point mode if you want to run as an N_Port. +# Set loop mode if you want to run as an NL_Port. +*/ +int lpfc_topology = 0; + +/* +# link-speed:link speed selection for initializing the Fibre Channel connection. +# 0 = auto select (default) +# 1 = 1 Gigabaud +# 2 = 2 Gigabaud +*/ +int lpfc_link_speed = 0; + +/* +# ip-class: FC class (2 or 3) to use for the IP protocol. +*/ +int lpfc_ip_class = 3; + +/* +# fcp-class: FC class (2 or 3) to use for the FCP protocol. +*/ +int lpfc_fcp_class = 3; + +/* +# Use ADISC for FCP rediscovery instead of PLOGI +*/ +int lpfc_use_adisc =0; + +/* +# Extra FCP timeout for fabrics +*/ +int lpfc_fcpfabric_tmo =0; + +/* +# Number of 4k STREAMS buffers to post to IP ring +*/ +int lpfc_post_ip_buf =128; + +/* +#Use dqfull-throttle-up-time to specify when to increment the current Q depth. +# This variable is in seconds. +*/ +int lpfc_dqfull_throttle_up_time =1; + +/* +# Increment the current Q depth by dqfull-throttle-up-inc +*/ +int lpfc_dqfull_throttle_up_inc =1; + +/* +# Use ACK0, instead of ACK1 for class 2 acknowledgement +*/ +int lpfc_ack0support =0; + +/* +# Vendor specific flag for vendor specifc actions. +*/ +int lpfc_vendor =0x1; + +/* +# Linux does not scan past lun 0 is it's missing. Emulex driver can +# work around this limitation if this feature is on (1). +*/ +int lpfc_lun0_missing =0; + +/* +# When a disk disapears, fiber cable being disconnected for example, +# this option will report it missing BUT removable. This allows +# Linux to re-discover the disk later on without scan, when the cable +# is re-connected in our example. +# Warning: when this option is set, statuses and timeout values on +# disk missing reported to the kernel may have an effect +# on other software packages like failover, multipath, etc... +*/ +int lpfc_use_removable =1; + +/* +# specified the maximum number of luns per target. A value of 20 means +# luns from 0 to 19 are valid. +# Default of 0 means to use driver's maximum of 256. + */ +int lpfc_max_lun =0; + +/* +# When the scsi layer passes the driver a command, the SCSI command structure +# has a field in it, sc_data_direction, to indicate if the SCSI command is a +# read or a write SCSI operation. Under some instances, this field is invalid +# and the SCSI opcode can be used to determine the type of SCSI operation. If +# wish to use the SCSI opcode, set this to 0. +# Note: For LINUX kernel revisions <= 2.4.4, this is ignored and the SCSI +# opcode is always used. +*/ +int lpfc_use_data_direction =1; + +/* +# Setup FCP persistent bindings, +# fcp-bind-WWPN binds a specific WorldWide PortName to a target id, +# fcp-bind-WWNN binds a specific WorldWide NodeName to a target id, +# fcp-bind-DID binds a specific DID to a target id. +# Only one binding method can be used. "lpfc_automap" needs to +# be changed to 0 and scan-down should NOT be set to 2 +# when one of these binding methods is used. +# WWNN, WWPN and DID are hexadecimal values. +# WWNN must be 16 digit BCD with leading 0s. +# WWPN must be 16 digit BCD with leading 0s. +# DID must be 6 digit BCD with leading 0s. +# The SCSI ID to bind to consists of two parts, the lpfc interface +# to bind to, and the target number for that interface. +# Thus lpfc0t2 specifies target 2 on interface lpfc0. +# +# Here are some examples: +# WWNN SCSI ID +# char *lpfc_fcp_bind_WWNN[]={"22000020370b8275:lpfc0t1", +# "22000020370b8998:lpfc0t2"}; +# +# WWPN SCSI ID +# char *lpfc_fcp_bind_WWPN[]={"22000020370b8275:lpfc0t1", +# "22000020370b8998:lpfc0t2"}; +# +# DID SCSI ID +# char *lpfc_fcp_bind_DID[]={"0000dc:lpfc0t1", +# "0000e0:lpfc0t2"}; +# +*/ +int lpfc_bind_entries =0; +char *lpfc_fcp_bind_WWNN[MAX_FC_BINDINGS]; +char *lpfc_fcp_bind_WWPN[MAX_FC_BINDINGS]; +char *lpfc_fcp_bind_DID[MAX_FC_BINDINGS]; + +/* +# If automap is set, SCSI IDs for all FCP nodes without +# persistent bindings will be automatically generated. +# If new FCP devices are added to the network when the system is down, +# there is no guarantee that these SCSI IDs will remain the same +# when the system is booted again. +# If one of the above fcp binding methods is specified, then automap +# devices will use the same mapping method to preserve +# SCSI IDs between link down and link up. +# If no bindings are specified above, a value of 1 will force WWNN +# binding, 2 for WWPN binding, and 3 for DID binding. +# If automap is 0, only devices with persistent bindings will be +# recognized by the system. +*/ +int lpfc_automap =2; + +/* +# Default values for I/O colaesing +# cr_delay ms or cr_count outstanding commands +*/ +int lpfc_cr_delay =0; +int lpfc_cr_count =0; + + + diff -purN -X /home/mbligh/.diff.exclude reference/drivers/scsi/lpfc/lpfc.conf.defs current/drivers/scsi/lpfc/lpfc.conf.defs --- reference/drivers/scsi/lpfc/lpfc.conf.defs 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/scsi/lpfc/lpfc.conf.defs 2004-04-09 11:53:04.000000000 -0700 @@ -0,0 +1,3380 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Enterprise Fibre Channel Host Bus Adapters. * + * Refer to the README file included with this package for * + * driver version and adapter support. * + * Copyright (C) 2003 Emulex Corporation. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of the GNU General Public License * + * as published by the Free Software Foundation; either version 2 * + * of the License, or (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details, a copy of which * + * can be found in the file COPYING included with this package. * + *******************************************************************/ + +/* This file is to support different configurations for each HBA */ +/* HBA 0 */ +#ifdef MODULE_PARM +MODULE_PARM(lpfc0_log_verbose, "i"); +MODULE_PARM(lpfc0_log_only, "i"); +MODULE_PARM(lpfc0_lun_queue_depth, "i"); +MODULE_PARM(lpfc0_tgt_queue_depth, "i"); +MODULE_PARM(lpfc0_no_device_delay, "i"); +MODULE_PARM(lpfc0_network_on, "i"); +MODULE_PARM(lpfc0_xmt_que_size, "i"); +MODULE_PARM(lpfc0_scandown, "i"); +MODULE_PARM(lpfc0_linkdown_tmo, "i"); +MODULE_PARM(lpfc0_nodev_tmo, "i"); +MODULE_PARM(lpfc0_delay_rsp_err, "i"); +MODULE_PARM(lpfc0_nodev_holdio, "i"); +MODULE_PARM(lpfc0_check_cond_err, "i"); +MODULE_PARM(lpfc0_num_iocbs, "i"); +MODULE_PARM(lpfc0_num_bufs, "i"); +MODULE_PARM(lpfc0_topology, "i"); +MODULE_PARM(lpfc0_link_speed, "i"); +MODULE_PARM(lpfc0_ip_class, "i"); +MODULE_PARM(lpfc0_fcp_class, "i"); +MODULE_PARM(lpfc0_use_adisc, "i"); +MODULE_PARM(lpfc0_fcpfabric_tmo, "i"); +MODULE_PARM(lpfc0_post_ip_buf, "i"); +MODULE_PARM(lpfc0_dqfull_throttle_up_time, "i"); +MODULE_PARM(lpfc0_dqfull_throttle_up_inc, "i"); +MODULE_PARM(lpfc0_ack0support, "i"); +MODULE_PARM(lpfc0_automap, "i"); +MODULE_PARM(lpfc0_cr_delay, "i"); +MODULE_PARM(lpfc0_cr_count, "i"); +#endif + +static int lpfc0_log_verbose = -1; +static int lpfc0_log_only = -1; +static int lpfc0_lun_queue_depth = -1; +static int lpfc0_tgt_queue_depth = -1; +static int lpfc0_no_device_delay = -1; +static int lpfc0_network_on = -1; +static int lpfc0_xmt_que_size = -1; +static int lpfc0_scandown = -1; +static int lpfc0_linkdown_tmo = -1; +static int lpfc0_nodev_tmo = -1; +static int lpfc0_delay_rsp_err = -1; +static int lpfc0_nodev_holdio = -1; +static int lpfc0_check_cond_err = -1; +static int lpfc0_num_iocbs = -1; +static int lpfc0_num_bufs = -1; +static int lpfc0_topology = -1; +static int lpfc0_link_speed = -1; +static int lpfc0_ip_class = -1; +static int lpfc0_fcp_class = -1; +static int lpfc0_use_adisc = -1; +static int lpfc0_fcpfabric_tmo = -1; +static int lpfc0_post_ip_buf = -1; +static int lpfc0_dqfull_throttle_up_time = -1; +static int lpfc0_dqfull_throttle_up_inc = -1; +static int lpfc0_ack0support = -1; +static int lpfc0_automap = -1; +static int lpfc0_cr_delay = -1; +static int lpfc0_cr_count = -1; + +/* HBA 1 */ +#ifdef MODULE_PARM +MODULE_PARM(lpfc1_log_verbose, "i"); +MODULE_PARM(lpfc1_log_only, "i"); +MODULE_PARM(lpfc1_lun_queue_depth, "i"); +MODULE_PARM(lpfc1_tgt_queue_depth, "i"); +MODULE_PARM(lpfc1_no_device_delay, "i"); +MODULE_PARM(lpfc1_network_on, "i"); +MODULE_PARM(lpfc1_xmt_que_size, "i"); +MODULE_PARM(lpfc1_scandown, "i"); +MODULE_PARM(lpfc1_linkdown_tmo, "i"); +MODULE_PARM(lpfc1_nodev_tmo, "i"); +MODULE_PARM(lpfc1_delay_rsp_err, "i"); +MODULE_PARM(lpfc1_nodev_holdio, "i"); +MODULE_PARM(lpfc1_check_cond_err, "i"); +MODULE_PARM(lpfc1_num_iocbs, "i"); +MODULE_PARM(lpfc1_num_bufs, "i"); +MODULE_PARM(lpfc1_topology, "i"); +MODULE_PARM(lpfc1_link_speed, "i"); +MODULE_PARM(lpfc1_ip_class, "i"); +MODULE_PARM(lpfc1_fcp_class, "i"); +MODULE_PARM(lpfc1_use_adisc, "i"); +MODULE_PARM(lpfc1_fcpfabric_tmo, "i"); +MODULE_PARM(lpfc1_post_ip_buf, "i"); +MODULE_PARM(lpfc1_dqfull_throttle_up_time, "i"); +MODULE_PARM(lpfc1_dqfull_throttle_up_inc, "i"); +MODULE_PARM(lpfc1_ack0support, "i"); +MODULE_PARM(lpfc1_automap, "i"); +MODULE_PARM(lpfc1_cr_delay, "i"); +MODULE_PARM(lpfc1_cr_count, "i"); +#endif + +static int lpfc1_log_verbose = -1; +static int lpfc1_log_only = -1; +static int lpfc1_lun_queue_depth = -1; +static int lpfc1_tgt_queue_depth = -1; +static int lpfc1_no_device_delay = -1; +static int lpfc1_network_on = -1; +static int lpfc1_xmt_que_size = -1; +static int lpfc1_scandown = -1; +static int lpfc1_linkdown_tmo = -1; +static int lpfc1_nodev_tmo = -1; +static int lpfc1_delay_rsp_err = -1; +static int lpfc1_nodev_holdio = -1; +static int lpfc1_check_cond_err = -1; +static int lpfc1_num_iocbs = -1; +static int lpfc1_num_bufs = -1; +static int lpfc1_topology = -1; +static int lpfc1_link_speed = -1; +static int lpfc1_ip_class = -1; +static int lpfc1_fcp_class = -1; +static int lpfc1_use_adisc = -1; +static int lpfc1_fcpfabric_tmo = -1; +static int lpfc1_post_ip_buf = -1; +static int lpfc1_dqfull_throttle_up_time = -1; +static int lpfc1_dqfull_throttle_up_inc = -1; +static int lpfc1_ack0support = -1; +static int lpfc1_automap = -1; +static int lpfc1_cr_delay = -1; +static int lpfc1_cr_count = -1; + +/* HBA 2 */ +#ifdef MODULE_PARM +MODULE_PARM(lpfc2_log_verbose, "i"); +MODULE_PARM(lpfc2_log_only, "i"); +MODULE_PARM(lpfc2_lun_queue_depth, "i"); +MODULE_PARM(lpfc2_tgt_queue_depth, "i"); +MODULE_PARM(lpfc2_no_device_delay, "i"); +MODULE_PARM(lpfc2_network_on, "i"); +MODULE_PARM(lpfc2_xmt_que_size, "i"); +MODULE_PARM(lpfc2_scandown, "i"); +MODULE_PARM(lpfc2_linkdown_tmo, "i"); +MODULE_PARM(lpfc2_nodev_tmo, "i"); +MODULE_PARM(lpfc2_delay_rsp_err, "i"); +MODULE_PARM(lpfc2_nodev_holdio, "i"); +MODULE_PARM(lpfc2_check_cond_err, "i"); +MODULE_PARM(lpfc2_num_iocbs, "i"); +MODULE_PARM(lpfc2_num_bufs, "i"); +MODULE_PARM(lpfc2_topology, "i"); +MODULE_PARM(lpfc2_link_speed, "i"); +MODULE_PARM(lpfc2_ip_class, "i"); +MODULE_PARM(lpfc2_fcp_class, "i"); +MODULE_PARM(lpfc2_use_adisc, "i"); +MODULE_PARM(lpfc2_fcpfabric_tmo, "i"); +MODULE_PARM(lpfc2_post_ip_buf, "i"); +MODULE_PARM(lpfc2_dqfull_throttle_up_time, "i"); +MODULE_PARM(lpfc2_dqfull_throttle_up_inc, "i"); +MODULE_PARM(lpfc2_ack0support, "i"); +MODULE_PARM(lpfc2_automap, "i"); +MODULE_PARM(lpfc2_cr_delay, "i"); +MODULE_PARM(lpfc2_cr_count, "i"); +#endif + +static int lpfc2_log_verbose = -1; +static int lpfc2_log_only = -1; +static int lpfc2_lun_queue_depth = -1; +static int lpfc2_tgt_queue_depth = -1; +static int lpfc2_no_device_delay = -1; +static int lpfc2_network_on = -1; +static int lpfc2_xmt_que_size = -1; +static int lpfc2_scandown = -1; +static int lpfc2_linkdown_tmo = -1; +static int lpfc2_nodev_tmo = -1; +static int lpfc2_delay_rsp_err = -1; +static int lpfc2_nodev_holdio = -1; +static int lpfc2_check_cond_err = -1; +static int lpfc2_num_iocbs = -1; +static int lpfc2_num_bufs = -1; +static int lpfc2_topology = -1; +static int lpfc2_link_speed = -1; +static int lpfc2_ip_class = -1; +static int lpfc2_fcp_class = -1; +static int lpfc2_use_adisc = -1; +static int lpfc2_fcpfabric_tmo = -1; +static int lpfc2_post_ip_buf = -1; +static int lpfc2_dqfull_throttle_up_time = -1; +static int lpfc2_dqfull_throttle_up_inc = -1; +static int lpfc2_ack0support = -1; +static int lpfc2_automap = -1; +static int lpfc2_cr_delay = -1; +static int lpfc2_cr_count = -1; + +/* HBA 3 */ +#ifdef MODULE_PARM +MODULE_PARM(lpfc3_log_verbose, "i"); +MODULE_PARM(lpfc3_log_only, "i"); +MODULE_PARM(lpfc3_lun_queue_depth, "i"); +MODULE_PARM(lpfc3_tgt_queue_depth, "i"); +MODULE_PARM(lpfc3_no_device_delay, "i"); +MODULE_PARM(lpfc3_network_on, "i"); +MODULE_PARM(lpfc3_xmt_que_size, "i"); +MODULE_PARM(lpfc3_scandown, "i"); +MODULE_PARM(lpfc3_linkdown_tmo, "i"); +MODULE_PARM(lpfc3_nodev_tmo, "i"); +MODULE_PARM(lpfc3_delay_rsp_err, "i"); +MODULE_PARM(lpfc3_nodev_holdio, "i"); +MODULE_PARM(lpfc3_check_cond_err, "i"); +MODULE_PARM(lpfc3_num_iocbs, "i"); +MODULE_PARM(lpfc3_num_bufs, "i"); +MODULE_PARM(lpfc3_topology, "i"); +MODULE_PARM(lpfc3_link_speed, "i"); +MODULE_PARM(lpfc3_ip_class, "i"); +MODULE_PARM(lpfc3_fcp_class, "i"); +MODULE_PARM(lpfc3_use_adisc, "i"); +MODULE_PARM(lpfc3_fcpfabric_tmo, "i"); +MODULE_PARM(lpfc3_post_ip_buf, "i"); +MODULE_PARM(lpfc3_dqfull_throttle_up_time, "i"); +MODULE_PARM(lpfc3_dqfull_throttle_up_inc, "i"); +MODULE_PARM(lpfc3_ack0support, "i"); +MODULE_PARM(lpfc3_automap, "i"); +MODULE_PARM(lpfc3_cr_delay, "i"); +MODULE_PARM(lpfc3_cr_count, "i"); +#endif + +static int lpfc3_log_verbose = -1; +static int lpfc3_log_only = -1; +static int lpfc3_lun_queue_depth = -1; +static int lpfc3_tgt_queue_depth = -1; +static int lpfc3_no_device_delay = -1; +static int lpfc3_network_on = -1; +static int lpfc3_xmt_que_size = -1; +static int lpfc3_scandown = -1; +static int lpfc3_linkdown_tmo = -1; +static int lpfc3_nodev_tmo = -1; +static int lpfc3_delay_rsp_err = -1; +static int lpfc3_nodev_holdio = -1; +static int lpfc3_check_cond_err = -1; +static int lpfc3_num_iocbs = -1; +static int lpfc3_num_bufs = -1; +static int lpfc3_topology = -1; +static int lpfc3_link_speed = -1; +static int lpfc3_ip_class = -1; +static int lpfc3_fcp_class = -1; +static int lpfc3_use_adisc = -1; +static int lpfc3_fcpfabric_tmo = -1; +static int lpfc3_post_ip_buf = -1; +static int lpfc3_dqfull_throttle_up_time = -1; +static int lpfc3_dqfull_throttle_up_inc = -1; +static int lpfc3_ack0support = -1; +static int lpfc3_automap = -1; +static int lpfc3_cr_delay = -1; +static int lpfc3_cr_count = -1; + +/* HBA 4 */ +#ifdef MODULE_PARM +MODULE_PARM(lpfc4_log_verbose, "i"); +MODULE_PARM(lpfc4_log_only, "i"); +MODULE_PARM(lpfc4_lun_queue_depth, "i"); +MODULE_PARM(lpfc4_tgt_queue_depth, "i"); +MODULE_PARM(lpfc4_no_device_delay, "i"); +MODULE_PARM(lpfc4_network_on, "i"); +MODULE_PARM(lpfc4_xmt_que_size, "i"); +MODULE_PARM(lpfc4_scandown, "i"); +MODULE_PARM(lpfc4_linkdown_tmo, "i"); +MODULE_PARM(lpfc4_nodev_tmo, "i"); +MODULE_PARM(lpfc4_delay_rsp_err, "i"); +MODULE_PARM(lpfc4_nodev_holdio, "i"); +MODULE_PARM(lpfc4_check_cond_err, "i"); +MODULE_PARM(lpfc4_num_iocbs, "i"); +MODULE_PARM(lpfc4_num_bufs, "i"); +MODULE_PARM(lpfc4_topology, "i"); +MODULE_PARM(lpfc4_link_speed, "i"); +MODULE_PARM(lpfc4_ip_class, "i"); +MODULE_PARM(lpfc4_fcp_class, "i"); +MODULE_PARM(lpfc4_use_adisc, "i"); +MODULE_PARM(lpfc4_fcpfabric_tmo, "i"); +MODULE_PARM(lpfc4_post_ip_buf, "i"); +MODULE_PARM(lpfc4_dqfull_throttle_up_time, "i"); +MODULE_PARM(lpfc4_dqfull_throttle_up_inc, "i"); +MODULE_PARM(lpfc4_ack0support, "i"); +MODULE_PARM(lpfc4_automap, "i"); +MODULE_PARM(lpfc4_cr_delay, "i"); +MODULE_PARM(lpfc4_cr_count, "i"); +#endif + +static int lpfc4_log_verbose = -1; +static int lpfc4_log_only = -1; +static int lpfc4_lun_queue_depth = -1; +static int lpfc4_tgt_queue_depth = -1; +static int lpfc4_no_device_delay = -1; +static int lpfc4_network_on = -1; +static int lpfc4_xmt_que_size = -1; +static int lpfc4_scandown = -1; +static int lpfc4_linkdown_tmo = -1; +static int lpfc4_nodev_tmo = -1; +static int lpfc4_delay_rsp_err = -1; +static int lpfc4_nodev_holdio = -1; +static int lpfc4_check_cond_err = -1; +static int lpfc4_num_iocbs = -1; +static int lpfc4_num_bufs = -1; +static int lpfc4_topology = -1; +static int lpfc4_link_speed = -1; +static int lpfc4_ip_class = -1; +static int lpfc4_fcp_class = -1; +static int lpfc4_use_adisc = -1; +static int lpfc4_fcpfabric_tmo = -1; +static int lpfc4_post_ip_buf = -1; +static int lpfc4_dqfull_throttle_up_time = -1; +static int lpfc4_dqfull_throttle_up_inc = -1; +static int lpfc4_ack0support = -1; +static int lpfc4_automap = -1; +static int lpfc4_cr_delay = -1; +static int lpfc4_cr_count = -1; + +/* HBA 5 */ +#ifdef MODULE_PARM +MODULE_PARM(lpfc5_log_verbose, "i"); +MODULE_PARM(lpfc5_log_only, "i"); +MODULE_PARM(lpfc5_lun_queue_depth, "i"); +MODULE_PARM(lpfc5_tgt_queue_depth, "i"); +MODULE_PARM(lpfc5_no_device_delay, "i"); +MODULE_PARM(lpfc5_network_on, "i"); +MODULE_PARM(lpfc5_xmt_que_size, "i"); +MODULE_PARM(lpfc5_scandown, "i"); +MODULE_PARM(lpfc5_linkdown_tmo, "i"); +MODULE_PARM(lpfc5_nodev_tmo, "i"); +MODULE_PARM(lpfc5_delay_rsp_err, "i"); +MODULE_PARM(lpfc5_nodev_holdio, "i"); +MODULE_PARM(lpfc5_check_cond_err, "i"); +MODULE_PARM(lpfc5_num_iocbs, "i"); +MODULE_PARM(lpfc5_num_bufs, "i"); +MODULE_PARM(lpfc5_topology, "i"); +MODULE_PARM(lpfc5_link_speed, "i"); +MODULE_PARM(lpfc5_ip_class, "i"); +MODULE_PARM(lpfc5_fcp_class, "i"); +MODULE_PARM(lpfc5_use_adisc, "i"); +MODULE_PARM(lpfc5_fcpfabric_tmo, "i"); +MODULE_PARM(lpfc5_post_ip_buf, "i"); +MODULE_PARM(lpfc5_dqfull_throttle_up_time, "i"); +MODULE_PARM(lpfc5_dqfull_throttle_up_inc, "i"); +MODULE_PARM(lpfc5_ack0support, "i"); +MODULE_PARM(lpfc5_automap, "i"); +MODULE_PARM(lpfc5_cr_delay, "i"); +MODULE_PARM(lpfc5_cr_count, "i"); +#endif + +static int lpfc5_log_verbose = -1; +static int lpfc5_log_only = -1; +static int lpfc5_lun_queue_depth = -1; +static int lpfc5_tgt_queue_depth = -1; +static int lpfc5_no_device_delay = -1; +static int lpfc5_network_on = -1; +static int lpfc5_xmt_que_size = -1; +static int lpfc5_scandown = -1; +static int lpfc5_linkdown_tmo = -1; +static int lpfc5_nodev_tmo = -1; +static int lpfc5_delay_rsp_err = -1; +static int lpfc5_nodev_holdio = -1; +static int lpfc5_check_cond_err = -1; +static int lpfc5_num_iocbs = -1; +static int lpfc5_num_bufs = -1; +static int lpfc5_topology = -1; +static int lpfc5_link_speed = -1; +static int lpfc5_ip_class = -1; +static int lpfc5_fcp_class = -1; +static int lpfc5_use_adisc = -1; +static int lpfc5_fcpfabric_tmo = -1; +static int lpfc5_post_ip_buf = -1; +static int lpfc5_dqfull_throttle_up_time = -1; +static int lpfc5_dqfull_throttle_up_inc = -1; +static int lpfc5_ack0support = -1; +static int lpfc5_automap = -1; +static int lpfc5_cr_delay = -1; +static int lpfc5_cr_count = -1; + +/* HBA 6 */ +#ifdef MODULE_PARM +MODULE_PARM(lpfc6_log_verbose, "i"); +MODULE_PARM(lpfc6_log_only, "i"); +MODULE_PARM(lpfc6_lun_queue_depth, "i"); +MODULE_PARM(lpfc6_tgt_queue_depth, "i"); +MODULE_PARM(lpfc6_no_device_delay, "i"); +MODULE_PARM(lpfc6_network_on, "i"); +MODULE_PARM(lpfc6_xmt_que_size, "i"); +MODULE_PARM(lpfc6_scandown, "i"); +MODULE_PARM(lpfc6_linkdown_tmo, "i"); +MODULE_PARM(lpfc6_nodev_tmo, "i"); +MODULE_PARM(lpfc6_delay_rsp_err, "i"); +MODULE_PARM(lpfc6_nodev_holdio, "i"); +MODULE_PARM(lpfc6_check_cond_err, "i"); +MODULE_PARM(lpfc6_num_iocbs, "i"); +MODULE_PARM(lpfc6_num_bufs, "i"); +MODULE_PARM(lpfc6_topology, "i"); +MODULE_PARM(lpfc6_link_speed, "i"); +MODULE_PARM(lpfc6_ip_class, "i"); +MODULE_PARM(lpfc6_fcp_class, "i"); +MODULE_PARM(lpfc6_use_adisc, "i"); +MODULE_PARM(lpfc6_fcpfabric_tmo, "i"); +MODULE_PARM(lpfc6_post_ip_buf, "i"); +MODULE_PARM(lpfc6_dqfull_throttle_up_time, "i"); +MODULE_PARM(lpfc6_dqfull_throttle_up_inc, "i"); +MODULE_PARM(lpfc6_ack0support, "i"); +MODULE_PARM(lpfc6_automap, "i"); +MODULE_PARM(lpfc6_cr_delay, "i"); +MODULE_PARM(lpfc6_cr_count, "i"); +#endif + +static int lpfc6_log_verbose = -1; +static int lpfc6_log_only = -1; +static int lpfc6_lun_queue_depth = -1; +static int lpfc6_tgt_queue_depth = -1; +static int lpfc6_no_device_delay = -1; +static int lpfc6_network_on = -1; +static int lpfc6_xmt_que_size = -1; +static int lpfc6_scandown = -1; +static int lpfc6_linkdown_tmo = -1; +static int lpfc6_nodev_tmo = -1; +static int lpfc6_delay_rsp_err = -1; +static int lpfc6_nodev_holdio = -1; +static int lpfc6_check_cond_err = -1; +static int lpfc6_num_iocbs = -1; +static int lpfc6_num_bufs = -1; +static int lpfc6_topology = -1; +static int lpfc6_link_speed = -1; +static int lpfc6_ip_class = -1; +static int lpfc6_fcp_class = -1; +static int lpfc6_use_adisc = -1; +static int lpfc6_fcpfabric_tmo = -1; +static int lpfc6_post_ip_buf = -1; +static int lpfc6_dqfull_throttle_up_time = -1; +static int lpfc6_dqfull_throttle_up_inc = -1; +static int lpfc6_ack0support = -1; +static int lpfc6_automap = -1; +static int lpfc6_cr_delay = -1; +static int lpfc6_cr_count = -1; + +/* HBA 7 */ +#ifdef MODULE_PARM +MODULE_PARM(lpfc7_log_verbose, "i"); +MODULE_PARM(lpfc7_log_only, "i"); +MODULE_PARM(lpfc7_lun_queue_depth, "i"); +MODULE_PARM(lpfc7_tgt_queue_depth, "i"); +MODULE_PARM(lpfc7_no_device_delay, "i"); +MODULE_PARM(lpfc7_network_on, "i"); +MODULE_PARM(lpfc7_xmt_que_size, "i"); +MODULE_PARM(lpfc7_scandown, "i"); +MODULE_PARM(lpfc7_linkdown_tmo, "i"); +MODULE_PARM(lpfc7_nodev_tmo, "i"); +MODULE_PARM(lpfc7_delay_rsp_err, "i"); +MODULE_PARM(lpfc7_nodev_holdio, "i"); +MODULE_PARM(lpfc7_check_cond_err, "i"); +MODULE_PARM(lpfc7_num_iocbs, "i"); +MODULE_PARM(lpfc7_num_bufs, "i"); +MODULE_PARM(lpfc7_topology, "i"); +MODULE_PARM(lpfc7_link_speed, "i"); +MODULE_PARM(lpfc7_ip_class, "i"); +MODULE_PARM(lpfc7_fcp_class, "i"); +MODULE_PARM(lpfc7_use_adisc, "i"); +MODULE_PARM(lpfc7_fcpfabric_tmo, "i"); +MODULE_PARM(lpfc7_post_ip_buf, "i"); +MODULE_PARM(lpfc7_dqfull_throttle_up_time, "i"); +MODULE_PARM(lpfc7_dqfull_throttle_up_inc, "i"); +MODULE_PARM(lpfc7_ack0support, "i"); +MODULE_PARM(lpfc7_automap, "i"); +MODULE_PARM(lpfc7_cr_delay, "i"); +MODULE_PARM(lpfc7_cr_count, "i"); +#endif + +static int lpfc7_log_verbose = -1; +static int lpfc7_log_only = -1; +static int lpfc7_lun_queue_depth = -1; +static int lpfc7_tgt_queue_depth = -1; +static int lpfc7_no_device_delay = -1; +static int lpfc7_network_on = -1; +static int lpfc7_xmt_que_size = -1; +static int lpfc7_scandown = -1; +static int lpfc7_linkdown_tmo = -1; +static int lpfc7_nodev_tmo = -1; +static int lpfc7_delay_rsp_err = -1; +static int lpfc7_nodev_holdio = -1; +static int lpfc7_check_cond_err = -1; +static int lpfc7_num_iocbs = -1; +static int lpfc7_num_bufs = -1; +static int lpfc7_topology = -1; +static int lpfc7_link_speed = -1; +static int lpfc7_ip_class = -1; +static int lpfc7_fcp_class = -1; +static int lpfc7_use_adisc = -1; +static int lpfc7_fcpfabric_tmo = -1; +static int lpfc7_post_ip_buf = -1; +static int lpfc7_dqfull_throttle_up_time = -1; +static int lpfc7_dqfull_throttle_up_inc = -1; +static int lpfc7_ack0support = -1; +static int lpfc7_automap = -1; +static int lpfc7_cr_delay = -1; +static int lpfc7_cr_count = -1; + +/* HBA 8 */ +#ifdef MODULE_PARM +MODULE_PARM(lpfc8_log_verbose, "i"); +MODULE_PARM(lpfc8_log_only, "i"); +MODULE_PARM(lpfc8_lun_queue_depth, "i"); +MODULE_PARM(lpfc8_tgt_queue_depth, "i"); +MODULE_PARM(lpfc8_no_device_delay, "i"); +MODULE_PARM(lpfc8_network_on, "i"); +MODULE_PARM(lpfc8_xmt_que_size, "i"); +MODULE_PARM(lpfc8_scandown, "i"); +MODULE_PARM(lpfc8_linkdown_tmo, "i"); +MODULE_PARM(lpfc8_nodev_tmo, "i"); +MODULE_PARM(lpfc8_delay_rsp_err, "i"); +MODULE_PARM(lpfc8_nodev_holdio, "i"); +MODULE_PARM(lpfc8_check_cond_err, "i"); +MODULE_PARM(lpfc8_num_iocbs, "i"); +MODULE_PARM(lpfc8_num_bufs, "i"); +MODULE_PARM(lpfc8_topology, "i"); +MODULE_PARM(lpfc8_link_speed, "i"); +MODULE_PARM(lpfc8_ip_class, "i"); +MODULE_PARM(lpfc8_fcp_class, "i"); +MODULE_PARM(lpfc8_use_adisc, "i"); +MODULE_PARM(lpfc8_fcpfabric_tmo, "i"); +MODULE_PARM(lpfc8_post_ip_buf, "i"); +MODULE_PARM(lpfc8_dqfull_throttle_up_time, "i"); +MODULE_PARM(lpfc8_dqfull_throttle_up_inc, "i"); +MODULE_PARM(lpfc8_ack0support, "i"); +MODULE_PARM(lpfc8_automap, "i"); +MODULE_PARM(lpfc8_cr_delay, "i"); +MODULE_PARM(lpfc8_cr_count, "i"); +#endif + +static int lpfc8_log_verbose = -1; +static int lpfc8_log_only = -1; +static int lpfc8_lun_queue_depth = -1; +static int lpfc8_tgt_queue_depth = -1; +static int lpfc8_no_device_delay = -1; +static int lpfc8_network_on = -1; +static int lpfc8_xmt_que_size = -1; +static int lpfc8_scandown = -1; +static int lpfc8_linkdown_tmo = -1; +static int lpfc8_nodev_tmo = -1; +static int lpfc8_delay_rsp_err = -1; +static int lpfc8_nodev_holdio = -1; +static int lpfc8_check_cond_err = -1; +static int lpfc8_num_iocbs = -1; +static int lpfc8_num_bufs = -1; +static int lpfc8_topology = -1; +static int lpfc8_link_speed = -1; +static int lpfc8_ip_class = -1; +static int lpfc8_fcp_class = -1; +static int lpfc8_use_adisc = -1; +static int lpfc8_fcpfabric_tmo = -1; +static int lpfc8_post_ip_buf = -1; +static int lpfc8_dqfull_throttle_up_time = -1; +static int lpfc8_dqfull_throttle_up_inc = -1; +static int lpfc8_ack0support = -1; +static int lpfc8_automap = -1; +static int lpfc8_cr_delay = -1; +static int lpfc8_cr_count = -1; + +/* HBA 9 */ +#ifdef MODULE_PARM +MODULE_PARM(lpfc9_log_verbose, "i"); +MODULE_PARM(lpfc9_log_only, "i"); +MODULE_PARM(lpfc9_lun_queue_depth, "i"); +MODULE_PARM(lpfc9_tgt_queue_depth, "i"); +MODULE_PARM(lpfc9_no_device_delay, "i"); +MODULE_PARM(lpfc9_network_on, "i"); +MODULE_PARM(lpfc9_xmt_que_size, "i"); +MODULE_PARM(lpfc9_scandown, "i"); +MODULE_PARM(lpfc9_linkdown_tmo, "i"); +MODULE_PARM(lpfc9_nodev_tmo, "i"); +MODULE_PARM(lpfc9_delay_rsp_err, "i"); +MODULE_PARM(lpfc9_nodev_holdio, "i"); +MODULE_PARM(lpfc9_check_cond_err, "i"); +MODULE_PARM(lpfc9_num_iocbs, "i"); +MODULE_PARM(lpfc9_num_bufs, "i"); +MODULE_PARM(lpfc9_topology, "i"); +MODULE_PARM(lpfc9_link_speed, "i"); +MODULE_PARM(lpfc9_ip_class, "i"); +MODULE_PARM(lpfc9_fcp_class, "i"); +MODULE_PARM(lpfc9_use_adisc, "i"); +MODULE_PARM(lpfc9_fcpfabric_tmo, "i"); +MODULE_PARM(lpfc9_post_ip_buf, "i"); +MODULE_PARM(lpfc9_dqfull_throttle_up_time, "i"); +MODULE_PARM(lpfc9_dqfull_throttle_up_inc, "i"); +MODULE_PARM(lpfc9_ack0support, "i"); +MODULE_PARM(lpfc9_automap, "i"); +MODULE_PARM(lpfc9_cr_delay, "i"); +MODULE_PARM(lpfc9_cr_count, "i"); +#endif + +static int lpfc9_log_verbose = -1; +static int lpfc9_log_only = -1; +static int lpfc9_lun_queue_depth = -1; +static int lpfc9_tgt_queue_depth = -1; +static int lpfc9_no_device_delay = -1; +static int lpfc9_network_on = -1; +static int lpfc9_xmt_que_size = -1; +static int lpfc9_scandown = -1; +static int lpfc9_linkdown_tmo = -1; +static int lpfc9_nodev_tmo = -1; +static int lpfc9_delay_rsp_err = -1; +static int lpfc9_nodev_holdio = -1; +static int lpfc9_check_cond_err = -1; +static int lpfc9_num_iocbs = -1; +static int lpfc9_num_bufs = -1; +static int lpfc9_topology = -1; +static int lpfc9_link_speed = -1; +static int lpfc9_ip_class = -1; +static int lpfc9_fcp_class = -1; +static int lpfc9_use_adisc = -1; +static int lpfc9_fcpfabric_tmo = -1; +static int lpfc9_post_ip_buf = -1; +static int lpfc9_dqfull_throttle_up_time = -1; +static int lpfc9_dqfull_throttle_up_inc = -1; +static int lpfc9_ack0support = -1; +static int lpfc9_automap = -1; +static int lpfc9_cr_delay = -1; +static int lpfc9_cr_count = -1; + +/* HBA 10 */ +#ifdef MODULE_PARM +MODULE_PARM(lpfc10_log_verbose, "i"); +MODULE_PARM(lpfc10_log_only, "i"); +MODULE_PARM(lpfc10_lun_queue_depth, "i"); +MODULE_PARM(lpfc10_tgt_queue_depth, "i"); +MODULE_PARM(lpfc10_no_device_delay, "i"); +MODULE_PARM(lpfc10_network_on, "i"); +MODULE_PARM(lpfc10_xmt_que_size, "i"); +MODULE_PARM(lpfc10_scandown, "i"); +MODULE_PARM(lpfc10_linkdown_tmo, "i"); +MODULE_PARM(lpfc10_nodev_tmo, "i"); +MODULE_PARM(lpfc10_delay_rsp_err, "i"); +MODULE_PARM(lpfc10_nodev_holdio, "i"); +MODULE_PARM(lpfc10_check_cond_err, "i"); +MODULE_PARM(lpfc10_num_iocbs, "i"); +MODULE_PARM(lpfc10_num_bufs, "i"); +MODULE_PARM(lpfc10_topology, "i"); +MODULE_PARM(lpfc10_link_speed, "i"); +MODULE_PARM(lpfc10_ip_class, "i"); +MODULE_PARM(lpfc10_fcp_class, "i"); +MODULE_PARM(lpfc10_use_adisc, "i"); +MODULE_PARM(lpfc10_fcpfabric_tmo, "i"); +MODULE_PARM(lpfc10_post_ip_buf, "i"); +MODULE_PARM(lpfc10_dqfull_throttle_up_time, "i"); +MODULE_PARM(lpfc10_dqfull_throttle_up_inc, "i"); +MODULE_PARM(lpfc10_ack0support, "i"); +MODULE_PARM(lpfc10_automap, "i"); +MODULE_PARM(lpfc10_cr_delay, "i"); +MODULE_PARM(lpfc10_cr_count, "i"); +#endif + +static int lpfc10_log_verbose = -1; +static int lpfc10_log_only = -1; +static int lpfc10_lun_queue_depth = -1; +static int lpfc10_tgt_queue_depth = -1; +static int lpfc10_no_device_delay = -1; +static int lpfc10_network_on = -1; +static int lpfc10_xmt_que_size = -1; +static int lpfc10_scandown = -1; +static int lpfc10_linkdown_tmo = -1; +static int lpfc10_nodev_tmo = -1; +static int lpfc10_delay_rsp_err = -1; +static int lpfc10_nodev_holdio = -1; +static int lpfc10_check_cond_err = -1; +static int lpfc10_num_iocbs = -1; +static int lpfc10_num_bufs = -1; +static int lpfc10_topology = -1; +static int lpfc10_link_speed = -1; +static int lpfc10_ip_class = -1; +static int lpfc10_fcp_class = -1; +static int lpfc10_use_adisc = -1; +static int lpfc10_fcpfabric_tmo = -1; +static int lpfc10_post_ip_buf = -1; +static int lpfc10_dqfull_throttle_up_time = -1; +static int lpfc10_dqfull_throttle_up_inc = -1; +static int lpfc10_ack0support = -1; +static int lpfc10_automap = -1; +static int lpfc10_cr_delay = -1; +static int lpfc10_cr_count = -1; + +/* HBA 11 */ +#ifdef MODULE_PARM +MODULE_PARM(lpfc11_log_verbose, "i"); +MODULE_PARM(lpfc11_log_only, "i"); +MODULE_PARM(lpfc11_lun_queue_depth, "i"); +MODULE_PARM(lpfc11_tgt_queue_depth, "i"); +MODULE_PARM(lpfc11_no_device_delay, "i"); +MODULE_PARM(lpfc11_network_on, "i"); +MODULE_PARM(lpfc11_xmt_que_size, "i"); +MODULE_PARM(lpfc11_scandown, "i"); +MODULE_PARM(lpfc11_linkdown_tmo, "i"); +MODULE_PARM(lpfc11_nodev_tmo, "i"); +MODULE_PARM(lpfc11_delay_rsp_err, "i"); +MODULE_PARM(lpfc11_nodev_holdio, "i"); +MODULE_PARM(lpfc11_check_cond_err, "i"); +MODULE_PARM(lpfc11_num_iocbs, "i"); +MODULE_PARM(lpfc11_num_bufs, "i"); +MODULE_PARM(lpfc11_topology, "i"); +MODULE_PARM(lpfc11_link_speed, "i"); +MODULE_PARM(lpfc11_ip_class, "i"); +MODULE_PARM(lpfc11_fcp_class, "i"); +MODULE_PARM(lpfc11_use_adisc, "i"); +MODULE_PARM(lpfc11_fcpfabric_tmo, "i"); +MODULE_PARM(lpfc11_post_ip_buf, "i"); +MODULE_PARM(lpfc11_dqfull_throttle_up_time, "i"); +MODULE_PARM(lpfc11_dqfull_throttle_up_inc, "i"); +MODULE_PARM(lpfc11_ack0support, "i"); +MODULE_PARM(lpfc11_automap, "i"); +MODULE_PARM(lpfc11_cr_delay, "i"); +MODULE_PARM(lpfc11_cr_count, "i"); +#endif + +static int lpfc11_log_verbose = -1; +static int lpfc11_log_only = -1; +static int lpfc11_lun_queue_depth = -1; +static int lpfc11_tgt_queue_depth = -1; +static int lpfc11_no_device_delay = -1; +static int lpfc11_network_on = -1; +static int lpfc11_xmt_que_size = -1; +static int lpfc11_scandown = -1; +static int lpfc11_linkdown_tmo = -1; +static int lpfc11_nodev_tmo = -1; +static int lpfc11_delay_rsp_err = -1; +static int lpfc11_nodev_holdio = -1; +static int lpfc11_check_cond_err = -1; +static int lpfc11_num_iocbs = -1; +static int lpfc11_num_bufs = -1; +static int lpfc11_topology = -1; +static int lpfc11_link_speed = -1; +static int lpfc11_ip_class = -1; +static int lpfc11_fcp_class = -1; +static int lpfc11_use_adisc = -1; +static int lpfc11_fcpfabric_tmo = -1; +static int lpfc11_post_ip_buf = -1; +static int lpfc11_dqfull_throttle_up_time = -1; +static int lpfc11_dqfull_throttle_up_inc = -1; +static int lpfc11_ack0support = -1; +static int lpfc11_automap = -1; +static int lpfc11_cr_delay = -1; +static int lpfc11_cr_count = -1; + +/* HBA 12 */ +#ifdef MODULE_PARM +MODULE_PARM(lpfc12_log_verbose, "i"); +MODULE_PARM(lpfc12_log_only, "i"); +MODULE_PARM(lpfc12_lun_queue_depth, "i"); +MODULE_PARM(lpfc12_tgt_queue_depth, "i"); +MODULE_PARM(lpfc12_no_device_delay, "i"); +MODULE_PARM(lpfc12_network_on, "i"); +MODULE_PARM(lpfc12_xmt_que_size, "i"); +MODULE_PARM(lpfc12_scandown, "i"); +MODULE_PARM(lpfc12_linkdown_tmo, "i"); +MODULE_PARM(lpfc12_nodev_tmo, "i"); +MODULE_PARM(lpfc12_delay_rsp_err, "i"); +MODULE_PARM(lpfc12_nodev_holdio, "i"); +MODULE_PARM(lpfc12_check_cond_err, "i"); +MODULE_PARM(lpfc12_num_iocbs, "i"); +MODULE_PARM(lpfc12_num_bufs, "i"); +MODULE_PARM(lpfc12_topology, "i"); +MODULE_PARM(lpfc12_link_speed, "i"); +MODULE_PARM(lpfc12_ip_class, "i"); +MODULE_PARM(lpfc12_fcp_class, "i"); +MODULE_PARM(lpfc12_use_adisc, "i"); +MODULE_PARM(lpfc12_fcpfabric_tmo, "i"); +MODULE_PARM(lpfc12_post_ip_buf, "i"); +MODULE_PARM(lpfc12_dqfull_throttle_up_time, "i"); +MODULE_PARM(lpfc12_dqfull_throttle_up_inc, "i"); +MODULE_PARM(lpfc12_ack0support, "i"); +MODULE_PARM(lpfc12_automap, "i"); +MODULE_PARM(lpfc12_cr_delay, "i"); +MODULE_PARM(lpfc12_cr_count, "i"); +#endif + +static int lpfc12_log_verbose = -1; +static int lpfc12_log_only = -1; +static int lpfc12_lun_queue_depth = -1; +static int lpfc12_tgt_queue_depth = -1; +static int lpfc12_no_device_delay = -1; +static int lpfc12_network_on = -1; +static int lpfc12_xmt_que_size = -1; +static int lpfc12_scandown = -1; +static int lpfc12_linkdown_tmo = -1; +static int lpfc12_nodev_tmo = -1; +static int lpfc12_delay_rsp_err = -1; +static int lpfc12_nodev_holdio = -1; +static int lpfc12_check_cond_err = -1; +static int lpfc12_num_iocbs = -1; +static int lpfc12_num_bufs = -1; +static int lpfc12_topology = -1; +static int lpfc12_link_speed = -1; +static int lpfc12_ip_class = -1; +static int lpfc12_fcp_class = -1; +static int lpfc12_use_adisc = -1; +static int lpfc12_fcpfabric_tmo = -1; +static int lpfc12_post_ip_buf = -1; +static int lpfc12_dqfull_throttle_up_time = -1; +static int lpfc12_dqfull_throttle_up_inc = -1; +static int lpfc12_ack0support = -1; +static int lpfc12_automap = -1; +static int lpfc12_cr_delay = -1; +static int lpfc12_cr_count = -1; + +/* HBA 13 */ +#ifdef MODULE_PARM +MODULE_PARM(lpfc13_log_verbose, "i"); +MODULE_PARM(lpfc13_log_only, "i"); +MODULE_PARM(lpfc13_lun_queue_depth, "i"); +MODULE_PARM(lpfc13_tgt_queue_depth, "i"); +MODULE_PARM(lpfc13_no_device_delay, "i"); +MODULE_PARM(lpfc13_network_on, "i"); +MODULE_PARM(lpfc13_xmt_que_size, "i"); +MODULE_PARM(lpfc13_scandown, "i"); +MODULE_PARM(lpfc13_linkdown_tmo, "i"); +MODULE_PARM(lpfc13_nodev_tmo, "i"); +MODULE_PARM(lpfc13_delay_rsp_err, "i"); +MODULE_PARM(lpfc13_nodev_holdio, "i"); +MODULE_PARM(lpfc13_check_cond_err, "i"); +MODULE_PARM(lpfc13_num_iocbs, "i"); +MODULE_PARM(lpfc13_num_bufs, "i"); +MODULE_PARM(lpfc13_topology, "i"); +MODULE_PARM(lpfc13_link_speed, "i"); +MODULE_PARM(lpfc13_ip_class, "i"); +MODULE_PARM(lpfc13_fcp_class, "i"); +MODULE_PARM(lpfc13_use_adisc, "i"); +MODULE_PARM(lpfc13_fcpfabric_tmo, "i"); +MODULE_PARM(lpfc13_post_ip_buf, "i"); +MODULE_PARM(lpfc13_dqfull_throttle_up_time, "i"); +MODULE_PARM(lpfc13_dqfull_throttle_up_inc, "i"); +MODULE_PARM(lpfc13_ack0support, "i"); +MODULE_PARM(lpfc13_automap, "i"); +MODULE_PARM(lpfc13_cr_delay, "i"); +MODULE_PARM(lpfc13_cr_count, "i"); +#endif + +static int lpfc13_log_verbose = -1; +static int lpfc13_log_only = -1; +static int lpfc13_lun_queue_depth = -1; +static int lpfc13_tgt_queue_depth = -1; +static int lpfc13_no_device_delay = -1; +static int lpfc13_network_on = -1; +static int lpfc13_xmt_que_size = -1; +static int lpfc13_scandown = -1; +static int lpfc13_linkdown_tmo = -1; +static int lpfc13_nodev_tmo = -1; +static int lpfc13_delay_rsp_err = -1; +static int lpfc13_nodev_holdio = -1; +static int lpfc13_check_cond_err = -1; +static int lpfc13_num_iocbs = -1; +static int lpfc13_num_bufs = -1; +static int lpfc13_topology = -1; +static int lpfc13_link_speed = -1; +static int lpfc13_ip_class = -1; +static int lpfc13_fcp_class = -1; +static int lpfc13_use_adisc = -1; +static int lpfc13_fcpfabric_tmo = -1; +static int lpfc13_post_ip_buf = -1; +static int lpfc13_dqfull_throttle_up_time = -1; +static int lpfc13_dqfull_throttle_up_inc = -1; +static int lpfc13_ack0support = -1; +static int lpfc13_automap = -1; +static int lpfc13_cr_delay = -1; +static int lpfc13_cr_count = -1; + +/* HBA 14 */ +#ifdef MODULE_PARM +MODULE_PARM(lpfc14_log_verbose, "i"); +MODULE_PARM(lpfc14_log_only, "i"); +MODULE_PARM(lpfc14_lun_queue_depth, "i"); +MODULE_PARM(lpfc14_tgt_queue_depth, "i"); +MODULE_PARM(lpfc14_no_device_delay, "i"); +MODULE_PARM(lpfc14_network_on, "i"); +MODULE_PARM(lpfc14_xmt_que_size, "i"); +MODULE_PARM(lpfc14_scandown, "i"); +MODULE_PARM(lpfc14_linkdown_tmo, "i"); +MODULE_PARM(lpfc14_nodev_tmo, "i"); +MODULE_PARM(lpfc14_delay_rsp_err, "i"); +MODULE_PARM(lpfc14_nodev_holdio, "i"); +MODULE_PARM(lpfc14_check_cond_err, "i"); +MODULE_PARM(lpfc14_num_iocbs, "i"); +MODULE_PARM(lpfc14_num_bufs, "i"); +MODULE_PARM(lpfc14_topology, "i"); +MODULE_PARM(lpfc14_link_speed, "i"); +MODULE_PARM(lpfc14_ip_class, "i"); +MODULE_PARM(lpfc14_fcp_class, "i"); +MODULE_PARM(lpfc14_use_adisc, "i"); +MODULE_PARM(lpfc14_fcpfabric_tmo, "i"); +MODULE_PARM(lpfc14_post_ip_buf, "i"); +MODULE_PARM(lpfc14_dqfull_throttle_up_time, "i"); +MODULE_PARM(lpfc14_dqfull_throttle_up_inc, "i"); +MODULE_PARM(lpfc14_ack0support, "i"); +MODULE_PARM(lpfc14_automap, "i"); +MODULE_PARM(lpfc14_cr_delay, "i"); +MODULE_PARM(lpfc14_cr_count, "i"); +#endif + +static int lpfc14_log_verbose = -1; +static int lpfc14_log_only = -1; +static int lpfc14_lun_queue_depth = -1; +static int lpfc14_tgt_queue_depth = -1; +static int lpfc14_no_device_delay = -1; +static int lpfc14_network_on = -1; +static int lpfc14_xmt_que_size = -1; +static int lpfc14_scandown = -1; +static int lpfc14_linkdown_tmo = -1; +static int lpfc14_nodev_tmo = -1; +static int lpfc14_delay_rsp_err = -1; +static int lpfc14_nodev_holdio = -1; +static int lpfc14_check_cond_err = -1; +static int lpfc14_num_iocbs = -1; +static int lpfc14_num_bufs = -1; +static int lpfc14_topology = -1; +static int lpfc14_link_speed = -1; +static int lpfc14_ip_class = -1; +static int lpfc14_fcp_class = -1; +static int lpfc14_use_adisc = -1; +static int lpfc14_fcpfabric_tmo = -1; +static int lpfc14_post_ip_buf = -1; +static int lpfc14_dqfull_throttle_up_time = -1; +static int lpfc14_dqfull_throttle_up_inc = -1; +static int lpfc14_ack0support = -1; +static int lpfc14_automap = -1; +static int lpfc14_cr_delay = -1; +static int lpfc14_cr_count = -1; + +/* HBA 15 */ +#ifdef MODULE_PARM +MODULE_PARM(lpfc15_log_verbose, "i"); +MODULE_PARM(lpfc15_log_only, "i"); +MODULE_PARM(lpfc15_lun_queue_depth, "i"); +MODULE_PARM(lpfc15_tgt_queue_depth, "i"); +MODULE_PARM(lpfc15_no_device_delay, "i"); +MODULE_PARM(lpfc15_network_on, "i"); +MODULE_PARM(lpfc15_xmt_que_size, "i"); +MODULE_PARM(lpfc15_scandown, "i"); +MODULE_PARM(lpfc15_linkdown_tmo, "i"); +MODULE_PARM(lpfc15_nodev_tmo, "i"); +MODULE_PARM(lpfc15_delay_rsp_err, "i"); +MODULE_PARM(lpfc15_nodev_holdio, "i"); +MODULE_PARM(lpfc15_check_cond_err, "i"); +MODULE_PARM(lpfc15_num_iocbs, "i"); +MODULE_PARM(lpfc15_num_bufs, "i"); +MODULE_PARM(lpfc15_topology, "i"); +MODULE_PARM(lpfc15_link_speed, "i"); +MODULE_PARM(lpfc15_ip_class, "i"); +MODULE_PARM(lpfc15_fcp_class, "i"); +MODULE_PARM(lpfc15_use_adisc, "i"); +MODULE_PARM(lpfc15_fcpfabric_tmo, "i"); +MODULE_PARM(lpfc15_post_ip_buf, "i"); +MODULE_PARM(lpfc15_dqfull_throttle_up_time, "i"); +MODULE_PARM(lpfc15_dqfull_throttle_up_inc, "i"); +MODULE_PARM(lpfc15_ack0support, "i"); +MODULE_PARM(lpfc15_automap, "i"); +MODULE_PARM(lpfc15_cr_delay, "i"); +MODULE_PARM(lpfc15_cr_count, "i"); +#endif + +static int lpfc15_log_verbose = -1; +static int lpfc15_log_only = -1; +static int lpfc15_lun_queue_depth = -1; +static int lpfc15_tgt_queue_depth = -1; +static int lpfc15_no_device_delay = -1; +static int lpfc15_network_on = -1; +static int lpfc15_xmt_que_size = -1; +static int lpfc15_scandown = -1; +static int lpfc15_linkdown_tmo = -1; +static int lpfc15_nodev_tmo = -1; +static int lpfc15_delay_rsp_err = -1; +static int lpfc15_nodev_holdio = -1; +static int lpfc15_check_cond_err = -1; +static int lpfc15_num_iocbs = -1; +static int lpfc15_num_bufs = -1; +static int lpfc15_topology = -1; +static int lpfc15_link_speed = -1; +static int lpfc15_ip_class = -1; +static int lpfc15_fcp_class = -1; +static int lpfc15_use_adisc = -1; +static int lpfc15_fcpfabric_tmo = -1; +static int lpfc15_post_ip_buf = -1; +static int lpfc15_dqfull_throttle_up_time = -1; +static int lpfc15_dqfull_throttle_up_inc = -1; +static int lpfc15_ack0support = -1; +static int lpfc15_automap = -1; +static int lpfc15_cr_delay = -1; +static int lpfc15_cr_count = -1; + +#ifdef MODULE_PARM +MODULE_PARM(lpfc_log_verbose, "i"); +MODULE_PARM(lpfc_log_only, "i"); +MODULE_PARM(lpfc_lun_queue_depth, "i"); +MODULE_PARM(lpfc_tgt_queue_depth, "i"); +MODULE_PARM(lpfc_no_device_delay, "i"); +MODULE_PARM(lpfc_network_on, "i"); +MODULE_PARM(lpfc_xmt_que_size, "i"); +MODULE_PARM(lpfc_scandown, "i"); +MODULE_PARM(lpfc_linkdown_tmo, "i"); +MODULE_PARM(lpfc_nodev_tmo, "i"); +MODULE_PARM(lpfc_delay_rsp_err, "i"); +MODULE_PARM(lpfc_nodev_holdio, "i"); +MODULE_PARM(lpfc_check_cond_err, "i"); +MODULE_PARM(lpfc_num_iocbs, "i"); +MODULE_PARM(lpfc_num_bufs, "i"); +MODULE_PARM(lpfc_topology, "i"); +MODULE_PARM(lpfc_link_speed, "i"); +MODULE_PARM(lpfc_ip_class, "i"); +MODULE_PARM(lpfc_fcp_class, "i"); +MODULE_PARM(lpfc_use_adisc, "i"); +MODULE_PARM(lpfc_fcpfabric_tmo, "i"); +MODULE_PARM(lpfc_post_ip_buf, "i"); +MODULE_PARM(lpfc_dqfull_throttle_up_time, "i"); +MODULE_PARM(lpfc_dqfull_throttle_up_inc, "i"); +MODULE_PARM(lpfc_ack0support, "i"); +MODULE_PARM(lpfc_automap, "i"); +MODULE_PARM(lpfc_cr_delay, "i"); +MODULE_PARM(lpfc_cr_count, "i"); +#endif + +extern int lpfc_log_verbose; +extern int lpfc_log_only; +extern int lpfc_lun_queue_depth; +extern int lpfc_tgt_queue_depth; +extern int lpfc_no_device_delay; +extern int lpfc_network_on; +extern int lpfc_xmt_que_size; +extern int lpfc_scandown; +extern int lpfc_linkdown_tmo; +extern int lpfc_nodev_tmo; +extern int lpfc_delay_rsp_err; +extern int lpfc_nodev_holdio; +extern int lpfc_check_cond_err; +extern int lpfc_num_iocbs; +extern int lpfc_num_bufs; +extern int lpfc_topology; +extern int lpfc_link_speed; +extern int lpfc_ip_class; +extern int lpfc_fcp_class; +extern int lpfc_use_adisc; +extern int lpfc_fcpfabric_tmo; +extern int lpfc_post_ip_buf; +extern int lpfc_dqfull_throttle_up_time; +extern int lpfc_dqfull_throttle_up_inc; +extern int lpfc_ack0support; +extern int lpfc_automap; +extern int lpfc_cr_delay; +extern int lpfc_cr_count; + + +void * +fc_get_cfg_param( +int brd, +int param) +{ + void *value; + + value = (void *)((ulong)(-1)); + switch(brd) { + case 0: /* HBA 0 */ + switch(param) { + case CFG_LOG_VERBOSE: /* log-verbose */ + value = (void *)((ulong)lpfc_log_verbose); + if(lpfc0_log_verbose != -1) + value = (void *)((ulong)lpfc0_log_verbose); + break; + case CFG_LOG_ONLY: /* log-only */ + value = (void *)((ulong)lpfc_log_only); + if(lpfc0_log_only != -1) + value = (void *)((ulong)lpfc0_log_only); + break; + case CFG_NUM_IOCBS: /* num-iocbs */ + value = (void *)((ulong)lpfc_num_iocbs); + if(lpfc0_num_iocbs != -1) + value = (void *)((ulong)lpfc0_num_iocbs); + break; + case CFG_NUM_BUFS: /* num-bufs */ + value = (void *)((ulong)lpfc_num_bufs); + if(lpfc0_num_bufs != -1) + value = (void *)((ulong)lpfc0_num_bufs); + break; + case CFG_AUTOMAP: /* automap */ + value = (void *)((ulong)lpfc_automap); + if(lpfc0_automap != -1) + value = (void *)((ulong)lpfc0_automap); + break; + case CFG_CR_DELAY: /* cr_delay */ + value = (void *)((ulong)lpfc_cr_delay); + if(lpfc0_cr_delay != -1) + value = (void *)((ulong)lpfc0_cr_delay); + break; + case CFG_CR_COUNT: /* cr_count */ + value = (void *)((ulong)lpfc_cr_count); + if(lpfc0_cr_count != -1) + value = (void *)((ulong)lpfc0_cr_count); + break; + case CFG_DFT_TGT_Q_DEPTH: /* tgt_queue_depth */ + value = (void *)((ulong)lpfc_tgt_queue_depth); + if(lpfc0_tgt_queue_depth != -1) + value = (void *)((ulong)lpfc0_tgt_queue_depth); + break; + case CFG_DFT_LUN_Q_DEPTH: /* lun_queue_depth */ + value = (void *)((ulong)lpfc_lun_queue_depth); + if(lpfc0_lun_queue_depth != -1) + value = (void *)((ulong)lpfc0_lun_queue_depth); + break; + case CFG_FCPFABRIC_TMO: /* fcpfabric-tmo */ + value = (void *)((ulong)lpfc_fcpfabric_tmo); + if(lpfc0_fcpfabric_tmo != -1) + value = (void *)((ulong)lpfc0_fcpfabric_tmo); + break; + case CFG_FCP_CLASS: /* fcp-class */ + value = (void *)((ulong)lpfc_fcp_class); + if(lpfc0_fcp_class != -1) + value = (void *)((ulong)lpfc0_fcp_class); + break; + case CFG_USE_ADISC: /* use-adisc */ + value = (void *)((ulong)lpfc_use_adisc); + if(lpfc0_use_adisc != -1) + value = (void *)((ulong)lpfc0_use_adisc); + break; + case CFG_NO_DEVICE_DELAY: /* no-device-delay */ + value = (void *)((ulong)lpfc_no_device_delay); + if(lpfc0_no_device_delay != -1) + value = (void *)((ulong)lpfc0_no_device_delay); + break; + case CFG_NETWORK_ON: /* network-on */ + value = (void *)((ulong)lpfc_network_on); + if(lpfc0_network_on != -1) + value = (void *)((ulong)lpfc0_network_on); + break; + case CFG_POST_IP_BUF: /* post-ip-buf */ + value = (void *)((ulong)lpfc_post_ip_buf); + if(lpfc0_post_ip_buf != -1) + value = (void *)((ulong)lpfc0_post_ip_buf); + break; + case CFG_XMT_Q_SIZE: /* xmt-que-size */ + value = (void *)((ulong)lpfc_xmt_que_size); + if(lpfc0_xmt_que_size != -1) + value = (void *)((ulong)lpfc0_xmt_que_size); + break; + case CFG_IP_CLASS: /* ip-class */ + value = (void *)((ulong)lpfc_ip_class); + if(lpfc0_ip_class != -1) + value = (void *)((ulong)lpfc0_ip_class); + break; + case CFG_ACK0: /* ack0 */ + value = (void *)((ulong)lpfc_ack0support); + if(lpfc0_ack0support != -1) + value = (void *)((ulong)lpfc0_ack0support); + break; + case CFG_TOPOLOGY: /* topology */ + value = (void *)((ulong)lpfc_topology); + if(lpfc0_topology != -1) + value = (void *)((ulong)lpfc0_topology); + break; + case CFG_SCAN_DOWN: /* scan-down */ + value = (void *)((ulong)lpfc_scandown); + if(lpfc0_scandown != -1) + value = (void *)((ulong)lpfc0_scandown); + break; + case CFG_LINKDOWN_TMO: /* linkdown-tmo */ + value = (void *)((ulong)lpfc_linkdown_tmo); + if(lpfc0_linkdown_tmo != -1) + value = (void *)((ulong)lpfc0_linkdown_tmo); + break; + case CFG_HOLDIO: /* nodev-holdio */ + value = (void *)((ulong)lpfc_nodev_holdio); + if(lpfc0_nodev_holdio != -1) + value = (void *)((ulong)lpfc0_nodev_holdio); + break; + case CFG_DELAY_RSP_ERR: /* delay-rsp-err */ + value = (void *)((ulong)lpfc_delay_rsp_err); + if(lpfc0_delay_rsp_err != -1) + value = (void *)((ulong)lpfc0_delay_rsp_err); + break; + case CFG_CHK_COND_ERR: /* check-cond-err */ + value = (void *)((ulong)lpfc_check_cond_err); + if(lpfc0_check_cond_err != -1) + value = (void *)((ulong)lpfc0_check_cond_err); + break; + case CFG_NODEV_TMO: /* nodev-tmo */ + value = (void *)((ulong)lpfc_nodev_tmo); + if(lpfc0_nodev_tmo != -1) + value = (void *)((ulong)lpfc0_nodev_tmo); + break; + case CFG_LINK_SPEED: /* link-speed */ + value = (void *)((ulong)lpfc_link_speed); + if(lpfc0_link_speed != -1) + value = (void *)((ulong)lpfc0_link_speed); + break; + case CFG_DQFULL_THROTTLE_UP_TIME: /* dqfull-throttle-up-time */ + value = (void *)((ulong)lpfc_dqfull_throttle_up_time); + if(lpfc0_dqfull_throttle_up_time != -1) + value = (void *)((ulong)lpfc0_dqfull_throttle_up_time); + break; + case CFG_DQFULL_THROTTLE_UP_INC: /* dqfull-throttle-up-inc */ + value = (void *)((ulong)lpfc_dqfull_throttle_up_inc); + if(lpfc0_dqfull_throttle_up_inc != -1) + value = (void *)((ulong)lpfc0_dqfull_throttle_up_inc); + break; + default: + break; + } + break; + case 1: /* HBA 1 */ + switch(param) { + case CFG_LOG_VERBOSE: /* log-verbose */ + value = (void *)((ulong)lpfc_log_verbose); + if(lpfc1_log_verbose != -1) + value = (void *)((ulong)lpfc1_log_verbose); + break; + case CFG_LOG_ONLY: /* log-only */ + value = (void *)((ulong)lpfc_log_only); + if(lpfc1_log_only != -1) + value = (void *)((ulong)lpfc1_log_only); + break; + case CFG_NUM_IOCBS: /* num-iocbs */ + value = (void *)((ulong)lpfc_num_iocbs); + if(lpfc1_num_iocbs != -1) + value = (void *)((ulong)lpfc1_num_iocbs); + break; + case CFG_NUM_BUFS: /* num-bufs */ + value = (void *)((ulong)lpfc_num_bufs); + if(lpfc1_num_bufs != -1) + value = (void *)((ulong)lpfc1_num_bufs); + break; + case CFG_AUTOMAP: /* automap */ + value = (void *)((ulong)lpfc_automap); + if(lpfc1_automap != -1) + value = (void *)((ulong)lpfc1_automap); + break; + case CFG_CR_DELAY: /* cr_delay */ + value = (void *)((ulong)lpfc_cr_delay); + if(lpfc1_cr_delay != -1) + value = (void *)((ulong)lpfc1_cr_delay); + break; + case CFG_CR_COUNT: /* cr_count */ + value = (void *)((ulong)lpfc_cr_count); + if(lpfc1_cr_count != -1) + value = (void *)((ulong)lpfc1_cr_count); + break; + case CFG_DFT_TGT_Q_DEPTH: /* tgt_queue_depth */ + value = (void *)((ulong)lpfc_tgt_queue_depth); + if(lpfc1_tgt_queue_depth != -1) + value = (void *)((ulong)lpfc1_tgt_queue_depth); + break; + case CFG_DFT_LUN_Q_DEPTH: /* lun_queue_depth */ + value = (void *)((ulong)lpfc_lun_queue_depth); + if(lpfc1_lun_queue_depth != -1) + value = (void *)((ulong)lpfc1_lun_queue_depth); + break; + case CFG_FCPFABRIC_TMO: /* fcpfabric-tmo */ + value = (void *)((ulong)lpfc_fcpfabric_tmo); + if(lpfc1_fcpfabric_tmo != -1) + value = (void *)((ulong)lpfc1_fcpfabric_tmo); + break; + case CFG_FCP_CLASS: /* fcp-class */ + value = (void *)((ulong)lpfc_fcp_class); + if(lpfc1_fcp_class != -1) + value = (void *)((ulong)lpfc1_fcp_class); + break; + case CFG_USE_ADISC: /* use-adisc */ + value = (void *)((ulong)lpfc_use_adisc); + if(lpfc1_use_adisc != -1) + value = (void *)((ulong)lpfc1_use_adisc); + break; + case CFG_NO_DEVICE_DELAY: /* no-device-delay */ + value = (void *)((ulong)lpfc_no_device_delay); + if(lpfc1_no_device_delay != -1) + value = (void *)((ulong)lpfc1_no_device_delay); + break; + case CFG_NETWORK_ON: /* network-on */ + value = (void *)((ulong)lpfc_network_on); + if(lpfc1_network_on != -1) + value = (void *)((ulong)lpfc1_network_on); + break; + case CFG_POST_IP_BUF: /* post-ip-buf */ + value = (void *)((ulong)lpfc_post_ip_buf); + if(lpfc1_post_ip_buf != -1) + value = (void *)((ulong)lpfc1_post_ip_buf); + break; + case CFG_XMT_Q_SIZE: /* xmt-que-size */ + value = (void *)((ulong)lpfc_xmt_que_size); + if(lpfc1_xmt_que_size != -1) + value = (void *)((ulong)lpfc1_xmt_que_size); + break; + case CFG_IP_CLASS: /* ip-class */ + value = (void *)((ulong)lpfc_ip_class); + if(lpfc1_ip_class != -1) + value = (void *)((ulong)lpfc1_ip_class); + break; + case CFG_ACK0: /* ack0 */ + value = (void *)((ulong)lpfc_ack0support); + if(lpfc1_ack0support != -1) + value = (void *)((ulong)lpfc1_ack0support); + break; + case CFG_TOPOLOGY: /* topology */ + value = (void *)((ulong)lpfc_topology); + if(lpfc1_topology != -1) + value = (void *)((ulong)lpfc1_topology); + break; + case CFG_SCAN_DOWN: /* scan-down */ + value = (void *)((ulong)lpfc_scandown); + if(lpfc1_scandown != -1) + value = (void *)((ulong)lpfc1_scandown); + break; + case CFG_LINKDOWN_TMO: /* linkdown-tmo */ + value = (void *)((ulong)lpfc_linkdown_tmo); + if(lpfc1_linkdown_tmo != -1) + value = (void *)((ulong)lpfc1_linkdown_tmo); + break; + case CFG_HOLDIO: /* nodev-holdio */ + value = (void *)((ulong)lpfc_nodev_holdio); + if(lpfc1_nodev_holdio != -1) + value = (void *)((ulong)lpfc1_nodev_holdio); + break; + case CFG_DELAY_RSP_ERR: /* delay-rsp-err */ + value = (void *)((ulong)lpfc_delay_rsp_err); + if(lpfc1_delay_rsp_err != -1) + value = (void *)((ulong)lpfc1_delay_rsp_err); + break; + case CFG_CHK_COND_ERR: /* check-cond-err */ + value = (void *)((ulong)lpfc_check_cond_err); + if(lpfc1_check_cond_err != -1) + value = (void *)((ulong)lpfc1_check_cond_err); + break; + case CFG_NODEV_TMO: /* nodev-tmo */ + value = (void *)((ulong)lpfc_nodev_tmo); + if(lpfc1_nodev_tmo != -1) + value = (void *)((ulong)lpfc1_nodev_tmo); + break; + case CFG_LINK_SPEED: /* link-speed */ + value = (void *)((ulong)lpfc_link_speed); + if(lpfc1_link_speed != -1) + value = (void *)((ulong)lpfc1_link_speed); + break; + case CFG_DQFULL_THROTTLE_UP_TIME: /* dqfull-throttle-up-time */ + value = (void *)((ulong)lpfc_dqfull_throttle_up_time); + if(lpfc1_dqfull_throttle_up_time != -1) + value = (void *)((ulong)lpfc1_dqfull_throttle_up_time); + break; + case CFG_DQFULL_THROTTLE_UP_INC: /* dqfull-throttle-up-inc */ + value = (void *)((ulong)lpfc_dqfull_throttle_up_inc); + if(lpfc1_dqfull_throttle_up_inc != -1) + value = (void *)((ulong)lpfc1_dqfull_throttle_up_inc); + break; + } + break; + case 2: /* HBA 2 */ + switch(param) { + case CFG_LOG_VERBOSE: /* log-verbose */ + value = (void *)((ulong)lpfc_log_verbose); + if(lpfc2_log_verbose != -1) + value = (void *)((ulong)lpfc2_log_verbose); + break; + case CFG_LOG_ONLY: /* log-only */ + value = (void *)((ulong)lpfc_log_only); + if(lpfc2_log_only != -1) + value = (void *)((ulong)lpfc2_log_only); + break; + case CFG_NUM_IOCBS: /* num-iocbs */ + value = (void *)((ulong)lpfc_num_iocbs); + if(lpfc2_num_iocbs != -1) + value = (void *)((ulong)lpfc2_num_iocbs); + break; + case CFG_NUM_BUFS: /* num-bufs */ + value = (void *)((ulong)lpfc_num_bufs); + if(lpfc2_num_bufs != -1) + value = (void *)((ulong)lpfc2_num_bufs); + break; + case CFG_AUTOMAP: /* automap */ + value = (void *)((ulong)lpfc_automap); + if(lpfc2_automap != -1) + value = (void *)((ulong)lpfc2_automap); + break; + case CFG_CR_DELAY: /* cr_delay */ + value = (void *)((ulong)lpfc_cr_delay); + if(lpfc2_cr_delay != -1) + value = (void *)((ulong)lpfc2_cr_delay); + break; + case CFG_CR_COUNT: /* cr_count */ + value = (void *)((ulong)lpfc_cr_count); + if(lpfc2_cr_count != -1) + value = (void *)((ulong)lpfc2_cr_count); + break; + case CFG_DFT_TGT_Q_DEPTH: /* tgt_queue_depth */ + value = (void *)((ulong)lpfc_tgt_queue_depth); + if(lpfc2_tgt_queue_depth != -1) + value = (void *)((ulong)lpfc2_tgt_queue_depth); + break; + case CFG_DFT_LUN_Q_DEPTH: /* lun_queue_depth */ + value = (void *)((ulong)lpfc_lun_queue_depth); + if(lpfc2_lun_queue_depth != -1) + value = (void *)((ulong)lpfc2_lun_queue_depth); + break; + case CFG_FCPFABRIC_TMO: /* fcpfabric-tmo */ + value = (void *)((ulong)lpfc_fcpfabric_tmo); + if(lpfc2_fcpfabric_tmo != -1) + value = (void *)((ulong)lpfc2_fcpfabric_tmo); + break; + case CFG_FCP_CLASS: /* fcp-class */ + value = (void *)((ulong)lpfc_fcp_class); + if(lpfc2_fcp_class != -1) + value = (void *)((ulong)lpfc2_fcp_class); + break; + case CFG_USE_ADISC: /* use-adisc */ + value = (void *)((ulong)lpfc_use_adisc); + if(lpfc2_use_adisc != -1) + value = (void *)((ulong)lpfc2_use_adisc); + break; + case CFG_NO_DEVICE_DELAY: /* no-device-delay */ + value = (void *)((ulong)lpfc_no_device_delay); + if(lpfc2_no_device_delay != -1) + value = (void *)((ulong)lpfc2_no_device_delay); + break; + case CFG_NETWORK_ON: /* network-on */ + value = (void *)((ulong)lpfc_network_on); + if(lpfc2_network_on != -1) + value = (void *)((ulong)lpfc2_network_on); + break; + case CFG_POST_IP_BUF: /* post-ip-buf */ + value = (void *)((ulong)lpfc_post_ip_buf); + if(lpfc2_post_ip_buf != -1) + value = (void *)((ulong)lpfc2_post_ip_buf); + break; + case CFG_XMT_Q_SIZE: /* xmt-que-size */ + value = (void *)((ulong)lpfc_xmt_que_size); + if(lpfc2_xmt_que_size != -1) + value = (void *)((ulong)lpfc2_xmt_que_size); + break; + case CFG_IP_CLASS: /* ip-class */ + value = (void *)((ulong)lpfc_ip_class); + if(lpfc2_ip_class != -1) + value = (void *)((ulong)lpfc2_ip_class); + break; + case CFG_ACK0: /* ack0 */ + value = (void *)((ulong)lpfc_ack0support); + if(lpfc2_ack0support != -1) + value = (void *)((ulong)lpfc2_ack0support); + break; + case CFG_TOPOLOGY: /* topology */ + value = (void *)((ulong)lpfc_topology); + if(lpfc2_topology != -1) + value = (void *)((ulong)lpfc2_topology); + break; + case CFG_SCAN_DOWN: /* scan-down */ + value = (void *)((ulong)lpfc_scandown); + if(lpfc2_scandown != -1) + value = (void *)((ulong)lpfc2_scandown); + break; + case CFG_LINKDOWN_TMO: /* linkdown-tmo */ + value = (void *)((ulong)lpfc_linkdown_tmo); + if(lpfc2_linkdown_tmo != -1) + value = (void *)((ulong)lpfc2_linkdown_tmo); + break; + case CFG_HOLDIO: /* nodev-holdio */ + value = (void *)((ulong)lpfc_nodev_holdio); + if(lpfc2_nodev_holdio != -1) + value = (void *)((ulong)lpfc2_nodev_holdio); + break; + case CFG_DELAY_RSP_ERR: /* delay-rsp-err */ + value = (void *)((ulong)lpfc_delay_rsp_err); + if(lpfc2_delay_rsp_err != -1) + value = (void *)((ulong)lpfc2_delay_rsp_err); + break; + case CFG_CHK_COND_ERR: /* check-cond-err */ + value = (void *)((ulong)lpfc_check_cond_err); + if(lpfc2_check_cond_err != -1) + value = (void *)((ulong)lpfc2_check_cond_err); + break; + case CFG_NODEV_TMO: /* nodev-tmo */ + value = (void *)((ulong)lpfc_nodev_tmo); + if(lpfc2_nodev_tmo != -1) + value = (void *)((ulong)lpfc2_nodev_tmo); + break; + case CFG_LINK_SPEED: /* link-speed */ + value = (void *)((ulong)lpfc_link_speed); + if(lpfc2_link_speed != -1) + value = (void *)((ulong)lpfc2_link_speed); + break; + case CFG_DQFULL_THROTTLE_UP_TIME: /* dqfull-throttle-up-time */ + value = (void *)((ulong)lpfc_dqfull_throttle_up_time); + if(lpfc2_dqfull_throttle_up_time != -1) + value = (void *)((ulong)lpfc2_dqfull_throttle_up_time); + break; + case CFG_DQFULL_THROTTLE_UP_INC: /* dqfull-throttle-up-inc */ + value = (void *)((ulong)lpfc_dqfull_throttle_up_inc); + if(lpfc2_dqfull_throttle_up_inc != -1) + value = (void *)((ulong)lpfc2_dqfull_throttle_up_inc); + break; + } + break; + case 3: /* HBA 3 */ + switch(param) { + case CFG_LOG_VERBOSE: /* log-verbose */ + value = (void *)((ulong)lpfc_log_verbose); + if(lpfc3_log_verbose != -1) + value = (void *)((ulong)lpfc3_log_verbose); + break; + case CFG_LOG_ONLY: /* log-only */ + value = (void *)((ulong)lpfc_log_only); + if(lpfc3_log_only != -1) + value = (void *)((ulong)lpfc3_log_only); + break; + case CFG_NUM_IOCBS: /* num-iocbs */ + value = (void *)((ulong)lpfc_num_iocbs); + if(lpfc3_num_iocbs != -1) + value = (void *)((ulong)lpfc3_num_iocbs); + break; + case CFG_NUM_BUFS: /* num-bufs */ + value = (void *)((ulong)lpfc_num_bufs); + if(lpfc3_num_bufs != -1) + value = (void *)((ulong)lpfc3_num_bufs); + break; + case CFG_AUTOMAP: /* automap */ + value = (void *)((ulong)lpfc_automap); + if(lpfc3_automap != -1) + value = (void *)((ulong)lpfc3_automap); + break; + case CFG_CR_DELAY: /* cr_delay */ + value = (void *)((ulong)lpfc_cr_delay); + if(lpfc3_cr_delay != -1) + value = (void *)((ulong)lpfc3_cr_delay); + break; + case CFG_CR_COUNT: /* cr_count */ + value = (void *)((ulong)lpfc_cr_count); + if(lpfc3_cr_count != -1) + value = (void *)((ulong)lpfc3_cr_count); + break; + case CFG_DFT_TGT_Q_DEPTH: /* tgt_queue_depth */ + value = (void *)((ulong)lpfc_tgt_queue_depth); + if(lpfc3_tgt_queue_depth != -1) + value = (void *)((ulong)lpfc3_tgt_queue_depth); + break; + case CFG_DFT_LUN_Q_DEPTH: /* lun_queue_depth */ + value = (void *)((ulong)lpfc_lun_queue_depth); + if(lpfc3_lun_queue_depth != -1) + value = (void *)((ulong)lpfc3_lun_queue_depth); + break; + case CFG_FCPFABRIC_TMO: /* fcpfabric-tmo */ + value = (void *)((ulong)lpfc_fcpfabric_tmo); + if(lpfc3_fcpfabric_tmo != -1) + value = (void *)((ulong)lpfc3_fcpfabric_tmo); + break; + case CFG_FCP_CLASS: /* fcp-class */ + value = (void *)((ulong)lpfc_fcp_class); + if(lpfc3_fcp_class != -1) + value = (void *)((ulong)lpfc3_fcp_class); + break; + case CFG_USE_ADISC: /* use-adisc */ + value = (void *)((ulong)lpfc_use_adisc); + if(lpfc3_use_adisc != -1) + value = (void *)((ulong)lpfc3_use_adisc); + break; + case CFG_NO_DEVICE_DELAY: /* no-device-delay */ + value = (void *)((ulong)lpfc_no_device_delay); + if(lpfc3_no_device_delay != -1) + value = (void *)((ulong)lpfc3_no_device_delay); + break; + case CFG_NETWORK_ON: /* network-on */ + value = (void *)((ulong)lpfc_network_on); + if(lpfc3_network_on != -1) + value = (void *)((ulong)lpfc3_network_on); + break; + case CFG_POST_IP_BUF: /* post-ip-buf */ + value = (void *)((ulong)lpfc_post_ip_buf); + if(lpfc3_post_ip_buf != -1) + value = (void *)((ulong)lpfc3_post_ip_buf); + break; + case CFG_XMT_Q_SIZE: /* xmt-que-size */ + value = (void *)((ulong)lpfc_xmt_que_size); + if(lpfc3_xmt_que_size != -1) + value = (void *)((ulong)lpfc3_xmt_que_size); + break; + case CFG_IP_CLASS: /* ip-class */ + value = (void *)((ulong)lpfc_ip_class); + if(lpfc3_ip_class != -1) + value = (void *)((ulong)lpfc3_ip_class); + break; + case CFG_ACK0: /* ack0 */ + value = (void *)((ulong)lpfc_ack0support); + if(lpfc3_ack0support != -1) + value = (void *)((ulong)lpfc3_ack0support); + break; + case CFG_TOPOLOGY: /* topology */ + value = (void *)((ulong)lpfc_topology); + if(lpfc3_topology != -1) + value = (void *)((ulong)lpfc3_topology); + break; + case CFG_SCAN_DOWN: /* scan-down */ + value = (void *)((ulong)lpfc_scandown); + if(lpfc3_scandown != -1) + value = (void *)((ulong)lpfc3_scandown); + break; + case CFG_LINKDOWN_TMO: /* linkdown-tmo */ + value = (void *)((ulong)lpfc_linkdown_tmo); + if(lpfc3_linkdown_tmo != -1) + value = (void *)((ulong)lpfc3_linkdown_tmo); + break; + case CFG_HOLDIO: /* nodev-holdio */ + value = (void *)((ulong)lpfc_nodev_holdio); + if(lpfc3_nodev_holdio != -1) + value = (void *)((ulong)lpfc3_nodev_holdio); + break; + case CFG_DELAY_RSP_ERR: /* delay-rsp-err */ + value = (void *)((ulong)lpfc_delay_rsp_err); + if(lpfc3_delay_rsp_err != -1) + value = (void *)((ulong)lpfc3_delay_rsp_err); + break; + case CFG_CHK_COND_ERR: /* check-cond-err */ + value = (void *)((ulong)lpfc_check_cond_err); + if(lpfc3_check_cond_err != -1) + value = (void *)((ulong)lpfc3_check_cond_err); + break; + case CFG_NODEV_TMO: /* nodev-tmo */ + value = (void *)((ulong)lpfc_nodev_tmo); + if(lpfc3_nodev_tmo != -1) + value = (void *)((ulong)lpfc3_nodev_tmo); + break; + case CFG_LINK_SPEED: /* link-speed */ + value = (void *)((ulong)lpfc_link_speed); + if(lpfc3_link_speed != -1) + value = (void *)((ulong)lpfc3_link_speed); + break; + case CFG_DQFULL_THROTTLE_UP_TIME: /* dqfull-throttle-up-time */ + value = (void *)((ulong)lpfc_dqfull_throttle_up_time); + if(lpfc3_dqfull_throttle_up_time != -1) + value = (void *)((ulong)lpfc3_dqfull_throttle_up_time); + break; + case CFG_DQFULL_THROTTLE_UP_INC: /* dqfull-throttle-up-inc */ + value = (void *)((ulong)lpfc_dqfull_throttle_up_inc); + if(lpfc3_dqfull_throttle_up_inc != -1) + value = (void *)((ulong)lpfc3_dqfull_throttle_up_inc); + break; + } + break; + case 4: /* HBA 4 */ + switch(param) { + case CFG_LOG_VERBOSE: /* log-verbose */ + value = (void *)((ulong)lpfc_log_verbose); + if(lpfc4_log_verbose != -1) + value = (void *)((ulong)lpfc4_log_verbose); + break; + case CFG_LOG_ONLY: /* log-only */ + value = (void *)((ulong)lpfc_log_only); + if(lpfc4_log_only != -1) + value = (void *)((ulong)lpfc4_log_only); + break; + case CFG_NUM_IOCBS: /* num-iocbs */ + value = (void *)((ulong)lpfc_num_iocbs); + if(lpfc4_num_iocbs != -1) + value = (void *)((ulong)lpfc4_num_iocbs); + break; + case CFG_NUM_BUFS: /* num-bufs */ + value = (void *)((ulong)lpfc_num_bufs); + if(lpfc4_num_bufs != -1) + value = (void *)((ulong)lpfc4_num_bufs); + break; + case CFG_AUTOMAP: /* automap */ + value = (void *)((ulong)lpfc_automap); + if(lpfc4_automap != -1) + value = (void *)((ulong)lpfc4_automap); + break; + case CFG_CR_DELAY: /* cr_delay */ + value = (void *)((ulong)lpfc_cr_delay); + if(lpfc4_cr_delay != -1) + value = (void *)((ulong)lpfc4_cr_delay); + break; + case CFG_CR_COUNT: /* cr_count */ + value = (void *)((ulong)lpfc_cr_count); + if(lpfc4_cr_count != -1) + value = (void *)((ulong)lpfc4_cr_count); + break; + case CFG_DFT_TGT_Q_DEPTH: /* tgt_queue_depth */ + value = (void *)((ulong)lpfc_tgt_queue_depth); + if(lpfc4_tgt_queue_depth != -1) + value = (void *)((ulong)lpfc4_tgt_queue_depth); + break; + case CFG_DFT_LUN_Q_DEPTH: /* lun_queue_depth */ + value = (void *)((ulong)lpfc_lun_queue_depth); + if(lpfc4_lun_queue_depth != -1) + value = (void *)((ulong)lpfc4_lun_queue_depth); + break; + case CFG_FCPFABRIC_TMO: /* fcpfabric-tmo */ + value = (void *)((ulong)lpfc_fcpfabric_tmo); + if(lpfc4_fcpfabric_tmo != -1) + value = (void *)((ulong)lpfc4_fcpfabric_tmo); + break; + case CFG_FCP_CLASS: /* fcp-class */ + value = (void *)((ulong)lpfc_fcp_class); + if(lpfc4_fcp_class != -1) + value = (void *)((ulong)lpfc4_fcp_class); + break; + case CFG_USE_ADISC: /* use-adisc */ + value = (void *)((ulong)lpfc_use_adisc); + if(lpfc4_use_adisc != -1) + value = (void *)((ulong)lpfc4_use_adisc); + break; + case CFG_NO_DEVICE_DELAY: /* no-device-delay */ + value = (void *)((ulong)lpfc_no_device_delay); + if(lpfc4_no_device_delay != -1) + value = (void *)((ulong)lpfc4_no_device_delay); + break; + case CFG_NETWORK_ON: /* network-on */ + value = (void *)((ulong)lpfc_network_on); + if(lpfc4_network_on != -1) + value = (void *)((ulong)lpfc4_network_on); + break; + case CFG_POST_IP_BUF: /* post-ip-buf */ + value = (void *)((ulong)lpfc_post_ip_buf); + if(lpfc4_post_ip_buf != -1) + value = (void *)((ulong)lpfc4_post_ip_buf); + break; + case CFG_XMT_Q_SIZE: /* xmt-que-size */ + value = (void *)((ulong)lpfc_xmt_que_size); + if(lpfc4_xmt_que_size != -1) + value = (void *)((ulong)lpfc4_xmt_que_size); + break; + case CFG_IP_CLASS: /* ip-class */ + value = (void *)((ulong)lpfc_ip_class); + if(lpfc4_ip_class != -1) + value = (void *)((ulong)lpfc4_ip_class); + break; + case CFG_ACK0: /* ack0 */ + value = (void *)((ulong)lpfc_ack0support); + if(lpfc4_ack0support != -1) + value = (void *)((ulong)lpfc4_ack0support); + break; + case CFG_TOPOLOGY: /* topology */ + value = (void *)((ulong)lpfc_topology); + if(lpfc4_topology != -1) + value = (void *)((ulong)lpfc4_topology); + break; + case CFG_SCAN_DOWN: /* scan-down */ + value = (void *)((ulong)lpfc_scandown); + if(lpfc4_scandown != -1) + value = (void *)((ulong)lpfc4_scandown); + break; + case CFG_LINKDOWN_TMO: /* linkdown-tmo */ + value = (void *)((ulong)lpfc_linkdown_tmo); + if(lpfc4_linkdown_tmo != -1) + value = (void *)((ulong)lpfc4_linkdown_tmo); + break; + case CFG_HOLDIO: /* nodev-holdio */ + value = (void *)((ulong)lpfc_nodev_holdio); + if(lpfc4_nodev_holdio != -1) + value = (void *)((ulong)lpfc4_nodev_holdio); + break; + case CFG_DELAY_RSP_ERR: /* delay-rsp-err */ + value = (void *)((ulong)lpfc_delay_rsp_err); + if(lpfc4_delay_rsp_err != -1) + value = (void *)((ulong)lpfc4_delay_rsp_err); + break; + case CFG_CHK_COND_ERR: /* check-cond-err */ + value = (void *)((ulong)lpfc_check_cond_err); + if(lpfc4_check_cond_err != -1) + value = (void *)((ulong)lpfc4_check_cond_err); + break; + case CFG_NODEV_TMO: /* nodev-tmo */ + value = (void *)((ulong)lpfc_nodev_tmo); + if(lpfc4_nodev_tmo != -1) + value = (void *)((ulong)lpfc4_nodev_tmo); + break; + case CFG_LINK_SPEED: /* link-speed */ + value = (void *)((ulong)lpfc_link_speed); + if(lpfc4_link_speed != -1) + value = (void *)((ulong)lpfc4_link_speed); + break; + case CFG_DQFULL_THROTTLE_UP_TIME: /* dqfull-throttle-up-time */ + value = (void *)((ulong)lpfc_dqfull_throttle_up_time); + if(lpfc4_dqfull_throttle_up_time != -1) + value = (void *)((ulong)lpfc4_dqfull_throttle_up_time); + break; + case CFG_DQFULL_THROTTLE_UP_INC: /* dqfull-throttle-up-inc */ + value = (void *)((ulong)lpfc_dqfull_throttle_up_inc); + if(lpfc4_dqfull_throttle_up_inc != -1) + value = (void *)((ulong)lpfc4_dqfull_throttle_up_inc); + break; + } + break; + case 5: /* HBA 5 */ + switch(param) { + case CFG_LOG_VERBOSE: /* log-verbose */ + value = (void *)((ulong)lpfc_log_verbose); + if(lpfc5_log_verbose != -1) + value = (void *)((ulong)lpfc5_log_verbose); + break; + case CFG_LOG_ONLY: /* log-only */ + value = (void *)((ulong)lpfc_log_only); + if(lpfc5_log_only != -1) + value = (void *)((ulong)lpfc5_log_only); + break; + case CFG_NUM_IOCBS: /* num-iocbs */ + value = (void *)((ulong)lpfc_num_iocbs); + if(lpfc5_num_iocbs != -1) + value = (void *)((ulong)lpfc5_num_iocbs); + break; + case CFG_NUM_BUFS: /* num-bufs */ + value = (void *)((ulong)lpfc_num_bufs); + if(lpfc5_num_bufs != -1) + value = (void *)((ulong)lpfc5_num_bufs); + break; + case CFG_AUTOMAP: /* automap */ + value = (void *)((ulong)lpfc_automap); + if(lpfc5_automap != -1) + value = (void *)((ulong)lpfc5_automap); + break; + case CFG_CR_DELAY: /* cr_delay */ + value = (void *)((ulong)lpfc_cr_delay); + if(lpfc5_cr_delay != -1) + value = (void *)((ulong)lpfc5_cr_delay); + break; + case CFG_CR_COUNT: /* cr_count */ + value = (void *)((ulong)lpfc_cr_count); + if(lpfc5_cr_count != -1) + value = (void *)((ulong)lpfc5_cr_count); + break; + case CFG_DFT_TGT_Q_DEPTH: /* tgt_queue_depth */ + value = (void *)((ulong)lpfc_tgt_queue_depth); + if(lpfc5_tgt_queue_depth != -1) + value = (void *)((ulong)lpfc5_tgt_queue_depth); + break; + case CFG_DFT_LUN_Q_DEPTH: /* lun_queue_depth */ + value = (void *)((ulong)lpfc_lun_queue_depth); + if(lpfc5_lun_queue_depth != -1) + value = (void *)((ulong)lpfc5_lun_queue_depth); + break; + case CFG_FCPFABRIC_TMO: /* fcpfabric-tmo */ + value = (void *)((ulong)lpfc_fcpfabric_tmo); + if(lpfc5_fcpfabric_tmo != -1) + value = (void *)((ulong)lpfc5_fcpfabric_tmo); + break; + case CFG_FCP_CLASS: /* fcp-class */ + value = (void *)((ulong)lpfc_fcp_class); + if(lpfc5_fcp_class != -1) + value = (void *)((ulong)lpfc5_fcp_class); + break; + case CFG_USE_ADISC: /* use-adisc */ + value = (void *)((ulong)lpfc_use_adisc); + if(lpfc5_use_adisc != -1) + value = (void *)((ulong)lpfc5_use_adisc); + break; + case CFG_NO_DEVICE_DELAY: /* no-device-delay */ + value = (void *)((ulong)lpfc_no_device_delay); + if(lpfc5_no_device_delay != -1) + value = (void *)((ulong)lpfc5_no_device_delay); + break; + case CFG_NETWORK_ON: /* network-on */ + value = (void *)((ulong)lpfc_network_on); + if(lpfc5_network_on != -1) + value = (void *)((ulong)lpfc5_network_on); + break; + case CFG_POST_IP_BUF: /* post-ip-buf */ + value = (void *)((ulong)lpfc_post_ip_buf); + if(lpfc5_post_ip_buf != -1) + value = (void *)((ulong)lpfc5_post_ip_buf); + break; + case CFG_XMT_Q_SIZE: /* xmt-que-size */ + value = (void *)((ulong)lpfc_xmt_que_size); + if(lpfc5_xmt_que_size != -1) + value = (void *)((ulong)lpfc5_xmt_que_size); + break; + case CFG_IP_CLASS: /* ip-class */ + value = (void *)((ulong)lpfc_ip_class); + if(lpfc5_ip_class != -1) + value = (void *)((ulong)lpfc5_ip_class); + break; + case CFG_ACK0: /* ack0 */ + value = (void *)((ulong)lpfc_ack0support); + if(lpfc5_ack0support != -1) + value = (void *)((ulong)lpfc5_ack0support); + break; + case CFG_TOPOLOGY: /* topology */ + value = (void *)((ulong)lpfc_topology); + if(lpfc5_topology != -1) + value = (void *)((ulong)lpfc5_topology); + break; + case CFG_SCAN_DOWN: /* scan-down */ + value = (void *)((ulong)lpfc_scandown); + if(lpfc5_scandown != -1) + value = (void *)((ulong)lpfc5_scandown); + break; + case CFG_LINKDOWN_TMO: /* linkdown-tmo */ + value = (void *)((ulong)lpfc_linkdown_tmo); + if(lpfc5_linkdown_tmo != -1) + value = (void *)((ulong)lpfc5_linkdown_tmo); + break; + case CFG_HOLDIO: /* nodev-holdio */ + value = (void *)((ulong)lpfc_nodev_holdio); + if(lpfc5_nodev_holdio != -1) + value = (void *)((ulong)lpfc5_nodev_holdio); + break; + case CFG_DELAY_RSP_ERR: /* delay-rsp-err */ + value = (void *)((ulong)lpfc_delay_rsp_err); + if(lpfc5_delay_rsp_err != -1) + value = (void *)((ulong)lpfc5_delay_rsp_err); + break; + case CFG_CHK_COND_ERR: /* check-cond-err */ + value = (void *)((ulong)lpfc_check_cond_err); + if(lpfc5_check_cond_err != -1) + value = (void *)((ulong)lpfc5_check_cond_err); + break; + case CFG_NODEV_TMO: /* nodev-tmo */ + value = (void *)((ulong)lpfc_nodev_tmo); + if(lpfc5_nodev_tmo != -1) + value = (void *)((ulong)lpfc5_nodev_tmo); + break; + case CFG_LINK_SPEED: /* link-speed */ + value = (void *)((ulong)lpfc_link_speed); + if(lpfc5_link_speed != -1) + value = (void *)((ulong)lpfc5_link_speed); + break; + case CFG_DQFULL_THROTTLE_UP_TIME: /* dqfull-throttle-up-time */ + value = (void *)((ulong)lpfc_dqfull_throttle_up_time); + if(lpfc5_dqfull_throttle_up_time != -1) + value = (void *)((ulong)lpfc5_dqfull_throttle_up_time); + break; + case CFG_DQFULL_THROTTLE_UP_INC: /* dqfull-throttle-up-inc */ + value = (void *)((ulong)lpfc_dqfull_throttle_up_inc); + if(lpfc5_dqfull_throttle_up_inc != -1) + value = (void *)((ulong)lpfc5_dqfull_throttle_up_inc); + break; + } + break; + case 6: /* HBA 6 */ + switch(param) { + case CFG_LOG_VERBOSE: /* log-verbose */ + value = (void *)((ulong)lpfc_log_verbose); + if(lpfc6_log_verbose != -1) + value = (void *)((ulong)lpfc6_log_verbose); + break; + case CFG_LOG_ONLY: /* log-only */ + value = (void *)((ulong)lpfc_log_only); + if(lpfc6_log_only != -1) + value = (void *)((ulong)lpfc6_log_only); + break; + case CFG_NUM_IOCBS: /* num-iocbs */ + value = (void *)((ulong)lpfc_num_iocbs); + if(lpfc6_num_iocbs != -1) + value = (void *)((ulong)lpfc6_num_iocbs); + break; + case CFG_NUM_BUFS: /* num-bufs */ + value = (void *)((ulong)lpfc_num_bufs); + if(lpfc6_num_bufs != -1) + value = (void *)((ulong)lpfc6_num_bufs); + break; + case CFG_AUTOMAP: /* automap */ + value = (void *)((ulong)lpfc_automap); + if(lpfc6_automap != -1) + value = (void *)((ulong)lpfc6_automap); + break; + case CFG_CR_DELAY: /* cr_delay */ + value = (void *)((ulong)lpfc_cr_delay); + if(lpfc6_cr_delay != -1) + value = (void *)((ulong)lpfc6_cr_delay); + break; + case CFG_CR_COUNT: /* cr_count */ + value = (void *)((ulong)lpfc_cr_count); + if(lpfc6_cr_count != -1) + value = (void *)((ulong)lpfc6_cr_count); + break; + case CFG_DFT_TGT_Q_DEPTH: /* tgt_queue_depth */ + value = (void *)((ulong)lpfc_tgt_queue_depth); + if(lpfc6_tgt_queue_depth != -1) + value = (void *)((ulong)lpfc6_tgt_queue_depth); + break; + case CFG_DFT_LUN_Q_DEPTH: /* lun_queue_depth */ + value = (void *)((ulong)lpfc_lun_queue_depth); + if(lpfc6_lun_queue_depth != -1) + value = (void *)((ulong)lpfc6_lun_queue_depth); + break; + case CFG_FCPFABRIC_TMO: /* fcpfabric-tmo */ + value = (void *)((ulong)lpfc_fcpfabric_tmo); + if(lpfc6_fcpfabric_tmo != -1) + value = (void *)((ulong)lpfc6_fcpfabric_tmo); + break; + case CFG_FCP_CLASS: /* fcp-class */ + value = (void *)((ulong)lpfc_fcp_class); + if(lpfc6_fcp_class != -1) + value = (void *)((ulong)lpfc6_fcp_class); + break; + case CFG_USE_ADISC: /* use-adisc */ + value = (void *)((ulong)lpfc_use_adisc); + if(lpfc6_use_adisc != -1) + value = (void *)((ulong)lpfc6_use_adisc); + break; + case CFG_NO_DEVICE_DELAY: /* no-device-delay */ + value = (void *)((ulong)lpfc_no_device_delay); + if(lpfc6_no_device_delay != -1) + value = (void *)((ulong)lpfc6_no_device_delay); + break; + case CFG_NETWORK_ON: /* network-on */ + value = (void *)((ulong)lpfc_network_on); + if(lpfc6_network_on != -1) + value = (void *)((ulong)lpfc6_network_on); + break; + case CFG_POST_IP_BUF: /* post-ip-buf */ + value = (void *)((ulong)lpfc_post_ip_buf); + if(lpfc6_post_ip_buf != -1) + value = (void *)((ulong)lpfc6_post_ip_buf); + break; + case CFG_XMT_Q_SIZE: /* xmt-que-size */ + value = (void *)((ulong)lpfc_xmt_que_size); + if(lpfc6_xmt_que_size != -1) + value = (void *)((ulong)lpfc6_xmt_que_size); + break; + case CFG_IP_CLASS: /* ip-class */ + value = (void *)((ulong)lpfc_ip_class); + if(lpfc6_ip_class != -1) + value = (void *)((ulong)lpfc6_ip_class); + break; + case CFG_ACK0: /* ack0 */ + value = (void *)((ulong)lpfc_ack0support); + if(lpfc6_ack0support != -1) + value = (void *)((ulong)lpfc6_ack0support); + break; + case CFG_TOPOLOGY: /* topology */ + value = (void *)((ulong)lpfc_topology); + if(lpfc6_topology != -1) + value = (void *)((ulong)lpfc6_topology); + break; + case CFG_SCAN_DOWN: /* scan-down */ + value = (void *)((ulong)lpfc_scandown); + if(lpfc6_scandown != -1) + value = (void *)((ulong)lpfc6_scandown); + break; + case CFG_LINKDOWN_TMO: /* linkdown-tmo */ + value = (void *)((ulong)lpfc_linkdown_tmo); + if(lpfc6_linkdown_tmo != -1) + value = (void *)((ulong)lpfc6_linkdown_tmo); + break; + case CFG_HOLDIO: /* nodev-holdio */ + value = (void *)((ulong)lpfc_nodev_holdio); + if(lpfc6_nodev_holdio != -1) + value = (void *)((ulong)lpfc6_nodev_holdio); + break; + case CFG_DELAY_RSP_ERR: /* delay-rsp-err */ + value = (void *)((ulong)lpfc_delay_rsp_err); + if(lpfc6_delay_rsp_err != -1) + value = (void *)((ulong)lpfc6_delay_rsp_err); + break; + case CFG_CHK_COND_ERR: /* check-cond-err */ + value = (void *)((ulong)lpfc_check_cond_err); + if(lpfc6_check_cond_err != -1) + value = (void *)((ulong)lpfc6_check_cond_err); + break; + case CFG_NODEV_TMO: /* nodev-tmo */ + value = (void *)((ulong)lpfc_nodev_tmo); + if(lpfc6_nodev_tmo != -1) + value = (void *)((ulong)lpfc6_nodev_tmo); + break; + case CFG_LINK_SPEED: /* link-speed */ + value = (void *)((ulong)lpfc_link_speed); + if(lpfc6_link_speed != -1) + value = (void *)((ulong)lpfc6_link_speed); + break; + case CFG_DQFULL_THROTTLE_UP_TIME: /* dqfull-throttle-up-time */ + value = (void *)((ulong)lpfc_dqfull_throttle_up_time); + if(lpfc6_dqfull_throttle_up_time != -1) + value = (void *)((ulong)lpfc6_dqfull_throttle_up_time); + break; + case CFG_DQFULL_THROTTLE_UP_INC: /* dqfull-throttle-up-inc */ + value = (void *)((ulong)lpfc_dqfull_throttle_up_inc); + if(lpfc6_dqfull_throttle_up_inc != -1) + value = (void *)((ulong)lpfc6_dqfull_throttle_up_inc); + break; + } + break; + case 7: /* HBA 7 */ + switch(param) { + case CFG_LOG_VERBOSE: /* log-verbose */ + value = (void *)((ulong)lpfc_log_verbose); + if(lpfc7_log_verbose != -1) + value = (void *)((ulong)lpfc7_log_verbose); + break; + case CFG_LOG_ONLY: /* log-only */ + value = (void *)((ulong)lpfc_log_only); + if(lpfc7_log_only != -1) + value = (void *)((ulong)lpfc7_log_only); + break; + case CFG_NUM_IOCBS: /* num-iocbs */ + value = (void *)((ulong)lpfc_num_iocbs); + if(lpfc7_num_iocbs != -1) + value = (void *)((ulong)lpfc7_num_iocbs); + break; + case CFG_NUM_BUFS: /* num-bufs */ + value = (void *)((ulong)lpfc_num_bufs); + if(lpfc7_num_bufs != -1) + value = (void *)((ulong)lpfc7_num_bufs); + break; + case CFG_AUTOMAP: /* automap */ + value = (void *)((ulong)lpfc_automap); + if(lpfc7_automap != -1) + value = (void *)((ulong)lpfc7_automap); + break; + case CFG_CR_DELAY: /* cr_delay */ + value = (void *)((ulong)lpfc_cr_delay); + if(lpfc7_cr_delay != -1) + value = (void *)((ulong)lpfc7_cr_delay); + break; + case CFG_CR_COUNT: /* cr_count */ + value = (void *)((ulong)lpfc_cr_count); + if(lpfc7_cr_count != -1) + value = (void *)((ulong)lpfc7_cr_count); + break; + case CFG_DFT_TGT_Q_DEPTH: /* tgt_queue_depth */ + value = (void *)((ulong)lpfc_tgt_queue_depth); + if(lpfc7_tgt_queue_depth != -1) + value = (void *)((ulong)lpfc7_tgt_queue_depth); + break; + case CFG_DFT_LUN_Q_DEPTH: /* lun_queue_depth */ + value = (void *)((ulong)lpfc_lun_queue_depth); + if(lpfc7_lun_queue_depth != -1) + value = (void *)((ulong)lpfc7_lun_queue_depth); + break; + case CFG_FCPFABRIC_TMO: /* fcpfabric-tmo */ + value = (void *)((ulong)lpfc_fcpfabric_tmo); + if(lpfc7_fcpfabric_tmo != -1) + value = (void *)((ulong)lpfc7_fcpfabric_tmo); + break; + case CFG_FCP_CLASS: /* fcp-class */ + value = (void *)((ulong)lpfc_fcp_class); + if(lpfc7_fcp_class != -1) + value = (void *)((ulong)lpfc7_fcp_class); + break; + case CFG_USE_ADISC: /* use-adisc */ + value = (void *)((ulong)lpfc_use_adisc); + if(lpfc7_use_adisc != -1) + value = (void *)((ulong)lpfc7_use_adisc); + break; + case CFG_NO_DEVICE_DELAY: /* no-device-delay */ + value = (void *)((ulong)lpfc_no_device_delay); + if(lpfc7_no_device_delay != -1) + value = (void *)((ulong)lpfc7_no_device_delay); + break; + case CFG_NETWORK_ON: /* network-on */ + value = (void *)((ulong)lpfc_network_on); + if(lpfc7_network_on != -1) + value = (void *)((ulong)lpfc7_network_on); + break; + case CFG_POST_IP_BUF: /* post-ip-buf */ + value = (void *)((ulong)lpfc_post_ip_buf); + if(lpfc7_post_ip_buf != -1) + value = (void *)((ulong)lpfc7_post_ip_buf); + break; + case CFG_XMT_Q_SIZE: /* xmt-que-size */ + value = (void *)((ulong)lpfc_xmt_que_size); + if(lpfc7_xmt_que_size != -1) + value = (void *)((ulong)lpfc7_xmt_que_size); + break; + case CFG_IP_CLASS: /* ip-class */ + value = (void *)((ulong)lpfc_ip_class); + if(lpfc7_ip_class != -1) + value = (void *)((ulong)lpfc7_ip_class); + break; + case CFG_ACK0: /* ack0 */ + value = (void *)((ulong)lpfc_ack0support); + if(lpfc7_ack0support != -1) + value = (void *)((ulong)lpfc7_ack0support); + break; + case CFG_TOPOLOGY: /* topology */ + value = (void *)((ulong)lpfc_topology); + if(lpfc7_topology != -1) + value = (void *)((ulong)lpfc7_topology); + break; + case CFG_SCAN_DOWN: /* scan-down */ + value = (void *)((ulong)lpfc_scandown); + if(lpfc7_scandown != -1) + value = (void *)((ulong)lpfc7_scandown); + break; + case CFG_LINKDOWN_TMO: /* linkdown-tmo */ + value = (void *)((ulong)lpfc_linkdown_tmo); + if(lpfc7_linkdown_tmo != -1) + value = (void *)((ulong)lpfc7_linkdown_tmo); + break; + case CFG_HOLDIO: /* nodev-holdio */ + value = (void *)((ulong)lpfc_nodev_holdio); + if(lpfc7_nodev_holdio != -1) + value = (void *)((ulong)lpfc7_nodev_holdio); + break; + case CFG_DELAY_RSP_ERR: /* delay-rsp-err */ + value = (void *)((ulong)lpfc_delay_rsp_err); + if(lpfc7_delay_rsp_err != -1) + value = (void *)((ulong)lpfc7_delay_rsp_err); + break; + case CFG_CHK_COND_ERR: /* check-cond-err */ + value = (void *)((ulong)lpfc_check_cond_err); + if(lpfc7_check_cond_err != -1) + value = (void *)((ulong)lpfc7_check_cond_err); + break; + case CFG_NODEV_TMO: /* nodev-tmo */ + value = (void *)((ulong)lpfc_nodev_tmo); + if(lpfc7_nodev_tmo != -1) + value = (void *)((ulong)lpfc7_nodev_tmo); + break; + case CFG_LINK_SPEED: /* link-speed */ + value = (void *)((ulong)lpfc_link_speed); + if(lpfc7_link_speed != -1) + value = (void *)((ulong)lpfc7_link_speed); + break; + case CFG_DQFULL_THROTTLE_UP_TIME: /* dqfull-throttle-up-time */ + value = (void *)((ulong)lpfc_dqfull_throttle_up_time); + if(lpfc7_dqfull_throttle_up_time != -1) + value = (void *)((ulong)lpfc7_dqfull_throttle_up_time); + break; + case CFG_DQFULL_THROTTLE_UP_INC: /* dqfull-throttle-up-inc */ + value = (void *)((ulong)lpfc_dqfull_throttle_up_inc); + if(lpfc7_dqfull_throttle_up_inc != -1) + value = (void *)((ulong)lpfc7_dqfull_throttle_up_inc); + break; + } + break; + case 8: /* HBA 8 */ + switch(param) { + case CFG_LOG_VERBOSE: /* log-verbose */ + value = (void *)((ulong)lpfc_log_verbose); + if(lpfc8_log_verbose != -1) + value = (void *)((ulong)lpfc8_log_verbose); + break; + case CFG_LOG_ONLY: /* log-only */ + value = (void *)((ulong)lpfc_log_only); + if(lpfc8_log_only != -1) + value = (void *)((ulong)lpfc8_log_only); + break; + case CFG_NUM_IOCBS: /* num-iocbs */ + value = (void *)((ulong)lpfc_num_iocbs); + if(lpfc8_num_iocbs != -1) + value = (void *)((ulong)lpfc8_num_iocbs); + break; + case CFG_NUM_BUFS: /* num-bufs */ + value = (void *)((ulong)lpfc_num_bufs); + if(lpfc8_num_bufs != -1) + value = (void *)((ulong)lpfc8_num_bufs); + break; + case CFG_AUTOMAP: /* automap */ + value = (void *)((ulong)lpfc_automap); + if(lpfc8_automap != -1) + value = (void *)((ulong)lpfc8_automap); + break; + case CFG_CR_DELAY: /* cr_delay */ + value = (void *)((ulong)lpfc_cr_delay); + if(lpfc8_cr_delay != -1) + value = (void *)((ulong)lpfc8_cr_delay); + break; + case CFG_CR_COUNT: /* cr_count */ + value = (void *)((ulong)lpfc_cr_count); + if(lpfc8_cr_count != -1) + value = (void *)((ulong)lpfc8_cr_count); + break; + case CFG_DFT_TGT_Q_DEPTH: /* tgt_queue_depth */ + value = (void *)((ulong)lpfc_tgt_queue_depth); + if(lpfc8_tgt_queue_depth != -1) + value = (void *)((ulong)lpfc8_tgt_queue_depth); + break; + case CFG_DFT_LUN_Q_DEPTH: /* lun_queue_depth */ + value = (void *)((ulong)lpfc_lun_queue_depth); + if(lpfc8_lun_queue_depth != -1) + value = (void *)((ulong)lpfc8_lun_queue_depth); + break; + case CFG_FCPFABRIC_TMO: /* fcpfabric-tmo */ + value = (void *)((ulong)lpfc_fcpfabric_tmo); + if(lpfc8_fcpfabric_tmo != -1) + value = (void *)((ulong)lpfc8_fcpfabric_tmo); + break; + case CFG_FCP_CLASS: /* fcp-class */ + value = (void *)((ulong)lpfc_fcp_class); + if(lpfc8_fcp_class != -1) + value = (void *)((ulong)lpfc8_fcp_class); + break; + case CFG_USE_ADISC: /* use-adisc */ + value = (void *)((ulong)lpfc_use_adisc); + if(lpfc8_use_adisc != -1) + value = (void *)((ulong)lpfc8_use_adisc); + break; + case CFG_NO_DEVICE_DELAY: /* no-device-delay */ + value = (void *)((ulong)lpfc_no_device_delay); + if(lpfc8_no_device_delay != -1) + value = (void *)((ulong)lpfc8_no_device_delay); + break; + case CFG_NETWORK_ON: /* network-on */ + value = (void *)((ulong)lpfc_network_on); + if(lpfc8_network_on != -1) + value = (void *)((ulong)lpfc8_network_on); + break; + case CFG_POST_IP_BUF: /* post-ip-buf */ + value = (void *)((ulong)lpfc_post_ip_buf); + if(lpfc8_post_ip_buf != -1) + value = (void *)((ulong)lpfc8_post_ip_buf); + break; + case CFG_XMT_Q_SIZE: /* xmt-que-size */ + value = (void *)((ulong)lpfc_xmt_que_size); + if(lpfc8_xmt_que_size != -1) + value = (void *)((ulong)lpfc8_xmt_que_size); + break; + case CFG_IP_CLASS: /* ip-class */ + value = (void *)((ulong)lpfc_ip_class); + if(lpfc8_ip_class != -1) + value = (void *)((ulong)lpfc8_ip_class); + break; + case CFG_ACK0: /* ack0 */ + value = (void *)((ulong)lpfc_ack0support); + if(lpfc8_ack0support != -1) + value = (void *)((ulong)lpfc8_ack0support); + break; + case CFG_TOPOLOGY: /* topology */ + value = (void *)((ulong)lpfc_topology); + if(lpfc8_topology != -1) + value = (void *)((ulong)lpfc8_topology); + break; + case CFG_SCAN_DOWN: /* scan-down */ + value = (void *)((ulong)lpfc_scandown); + if(lpfc8_scandown != -1) + value = (void *)((ulong)lpfc8_scandown); + break; + case CFG_LINKDOWN_TMO: /* linkdown-tmo */ + value = (void *)((ulong)lpfc_linkdown_tmo); + if(lpfc8_linkdown_tmo != -1) + value = (void *)((ulong)lpfc8_linkdown_tmo); + break; + case CFG_HOLDIO: /* nodev-holdio */ + value = (void *)((ulong)lpfc_nodev_holdio); + if(lpfc8_nodev_holdio != -1) + value = (void *)((ulong)lpfc8_nodev_holdio); + break; + case CFG_DELAY_RSP_ERR: /* delay-rsp-err */ + value = (void *)((ulong)lpfc_delay_rsp_err); + if(lpfc8_delay_rsp_err != -1) + value = (void *)((ulong)lpfc8_delay_rsp_err); + break; + case CFG_CHK_COND_ERR: /* check-cond-err */ + value = (void *)((ulong)lpfc_check_cond_err); + if(lpfc8_check_cond_err != -1) + value = (void *)((ulong)lpfc8_check_cond_err); + break; + case CFG_NODEV_TMO: /* nodev-tmo */ + value = (void *)((ulong)lpfc_nodev_tmo); + if(lpfc8_nodev_tmo != -1) + value = (void *)((ulong)lpfc8_nodev_tmo); + break; + case CFG_LINK_SPEED: /* link-speed */ + value = (void *)((ulong)lpfc_link_speed); + if(lpfc8_link_speed != -1) + value = (void *)((ulong)lpfc8_link_speed); + break; + case CFG_DQFULL_THROTTLE_UP_TIME: /* dqfull-throttle-up-time */ + value = (void *)((ulong)lpfc_dqfull_throttle_up_time); + if(lpfc8_dqfull_throttle_up_time != -1) + value = (void *)((ulong)lpfc8_dqfull_throttle_up_time); + break; + case CFG_DQFULL_THROTTLE_UP_INC: /* dqfull-throttle-up-inc */ + value = (void *)((ulong)lpfc_dqfull_throttle_up_inc); + if(lpfc8_dqfull_throttle_up_inc != -1) + value = (void *)((ulong)lpfc8_dqfull_throttle_up_inc); + break; + } + break; + case 9: /* HBA 9 */ + switch(param) { + case CFG_LOG_VERBOSE: /* log-verbose */ + value = (void *)((ulong)lpfc_log_verbose); + if(lpfc9_log_verbose != -1) + value = (void *)((ulong)lpfc9_log_verbose); + break; + case CFG_LOG_ONLY: /* log-only */ + value = (void *)((ulong)lpfc_log_only); + if(lpfc9_log_only != -1) + value = (void *)((ulong)lpfc9_log_only); + break; + case CFG_NUM_IOCBS: /* num-iocbs */ + value = (void *)((ulong)lpfc_num_iocbs); + if(lpfc9_num_iocbs != -1) + value = (void *)((ulong)lpfc9_num_iocbs); + break; + case CFG_NUM_BUFS: /* num-bufs */ + value = (void *)((ulong)lpfc_num_bufs); + if(lpfc9_num_bufs != -1) + value = (void *)((ulong)lpfc9_num_bufs); + break; + case CFG_AUTOMAP: /* automap */ + value = (void *)((ulong)lpfc_automap); + if(lpfc9_automap != -1) + value = (void *)((ulong)lpfc9_automap); + break; + case CFG_CR_DELAY: /* cr_delay */ + value = (void *)((ulong)lpfc_cr_delay); + if(lpfc9_cr_delay != -1) + value = (void *)((ulong)lpfc9_cr_delay); + break; + case CFG_CR_COUNT: /* cr_count */ + value = (void *)((ulong)lpfc_cr_count); + if(lpfc9_cr_count != -1) + value = (void *)((ulong)lpfc9_cr_count); + break; + case CFG_DFT_TGT_Q_DEPTH: /* tgt_queue_depth */ + value = (void *)((ulong)lpfc_tgt_queue_depth); + if(lpfc9_tgt_queue_depth != -1) + value = (void *)((ulong)lpfc9_tgt_queue_depth); + break; + case CFG_DFT_LUN_Q_DEPTH: /* lun_queue_depth */ + value = (void *)((ulong)lpfc_lun_queue_depth); + if(lpfc9_lun_queue_depth != -1) + value = (void *)((ulong)lpfc9_lun_queue_depth); + break; + case CFG_FCPFABRIC_TMO: /* fcpfabric-tmo */ + value = (void *)((ulong)lpfc_fcpfabric_tmo); + if(lpfc9_fcpfabric_tmo != -1) + value = (void *)((ulong)lpfc9_fcpfabric_tmo); + break; + case CFG_FCP_CLASS: /* fcp-class */ + value = (void *)((ulong)lpfc_fcp_class); + if(lpfc9_fcp_class != -1) + value = (void *)((ulong)lpfc9_fcp_class); + break; + case CFG_USE_ADISC: /* use-adisc */ + value = (void *)((ulong)lpfc_use_adisc); + if(lpfc9_use_adisc != -1) + value = (void *)((ulong)lpfc9_use_adisc); + break; + case CFG_NO_DEVICE_DELAY: /* no-device-delay */ + value = (void *)((ulong)lpfc_no_device_delay); + if(lpfc9_no_device_delay != -1) + value = (void *)((ulong)lpfc9_no_device_delay); + break; + case CFG_NETWORK_ON: /* network-on */ + value = (void *)((ulong)lpfc_network_on); + if(lpfc9_network_on != -1) + value = (void *)((ulong)lpfc9_network_on); + break; + case CFG_POST_IP_BUF: /* post-ip-buf */ + value = (void *)((ulong)lpfc_post_ip_buf); + if(lpfc9_post_ip_buf != -1) + value = (void *)((ulong)lpfc9_post_ip_buf); + break; + case CFG_XMT_Q_SIZE: /* xmt-que-size */ + value = (void *)((ulong)lpfc_xmt_que_size); + if(lpfc9_xmt_que_size != -1) + value = (void *)((ulong)lpfc9_xmt_que_size); + break; + case CFG_IP_CLASS: /* ip-class */ + value = (void *)((ulong)lpfc_ip_class); + if(lpfc9_ip_class != -1) + value = (void *)((ulong)lpfc9_ip_class); + break; + case CFG_ACK0: /* ack0 */ + value = (void *)((ulong)lpfc_ack0support); + if(lpfc9_ack0support != -1) + value = (void *)((ulong)lpfc9_ack0support); + break; + case CFG_TOPOLOGY: /* topology */ + value = (void *)((ulong)lpfc_topology); + if(lpfc9_topology != -1) + value = (void *)((ulong)lpfc9_topology); + break; + case CFG_SCAN_DOWN: /* scan-down */ + value = (void *)((ulong)lpfc_scandown); + if(lpfc9_scandown != -1) + value = (void *)((ulong)lpfc9_scandown); + break; + case CFG_LINKDOWN_TMO: /* linkdown-tmo */ + value = (void *)((ulong)lpfc_linkdown_tmo); + if(lpfc9_linkdown_tmo != -1) + value = (void *)((ulong)lpfc9_linkdown_tmo); + break; + case CFG_HOLDIO: /* nodev-holdio */ + value = (void *)((ulong)lpfc_nodev_holdio); + if(lpfc9_nodev_holdio != -1) + value = (void *)((ulong)lpfc9_nodev_holdio); + break; + case CFG_DELAY_RSP_ERR: /* delay-rsp-err */ + value = (void *)((ulong)lpfc_delay_rsp_err); + if(lpfc9_delay_rsp_err != -1) + value = (void *)((ulong)lpfc9_delay_rsp_err); + break; + case CFG_CHK_COND_ERR: /* check-cond-err */ + value = (void *)((ulong)lpfc_check_cond_err); + if(lpfc9_check_cond_err != -1) + value = (void *)((ulong)lpfc9_check_cond_err); + break; + case CFG_NODEV_TMO: /* nodev-tmo */ + value = (void *)((ulong)lpfc_nodev_tmo); + if(lpfc9_nodev_tmo != -1) + value = (void *)((ulong)lpfc9_nodev_tmo); + break; + case CFG_LINK_SPEED: /* link-speed */ + value = (void *)((ulong)lpfc_link_speed); + if(lpfc9_link_speed != -1) + value = (void *)((ulong)lpfc9_link_speed); + break; + case CFG_DQFULL_THROTTLE_UP_TIME: /* dqfull-throttle-up-time */ + value = (void *)((ulong)lpfc_dqfull_throttle_up_time); + if(lpfc9_dqfull_throttle_up_time != -1) + value = (void *)((ulong)lpfc9_dqfull_throttle_up_time); + break; + case CFG_DQFULL_THROTTLE_UP_INC: /* dqfull-throttle-up-inc */ + value = (void *)((ulong)lpfc_dqfull_throttle_up_inc); + if(lpfc9_dqfull_throttle_up_inc != -1) + value = (void *)((ulong)lpfc9_dqfull_throttle_up_inc); + break; + } + break; + case 10: /* HBA 10 */ + switch(param) { + case CFG_LOG_VERBOSE: /* log-verbose */ + value = (void *)((ulong)lpfc_log_verbose); + if(lpfc10_log_verbose != -1) + value = (void *)((ulong)lpfc10_log_verbose); + break; + case CFG_LOG_ONLY: /* log-only */ + value = (void *)((ulong)lpfc_log_only); + if(lpfc10_log_only != -1) + value = (void *)((ulong)lpfc10_log_only); + break; + case CFG_NUM_IOCBS: /* num-iocbs */ + value = (void *)((ulong)lpfc_num_iocbs); + if(lpfc10_num_iocbs != -1) + value = (void *)((ulong)lpfc10_num_iocbs); + break; + case CFG_NUM_BUFS: /* num-bufs */ + value = (void *)((ulong)lpfc_num_bufs); + if(lpfc10_num_bufs != -1) + value = (void *)((ulong)lpfc10_num_bufs); + break; + case CFG_AUTOMAP: /* automap */ + value = (void *)((ulong)lpfc_automap); + if(lpfc10_automap != -1) + value = (void *)((ulong)lpfc10_automap); + break; + case CFG_CR_DELAY: /* cr_delay */ + value = (void *)((ulong)lpfc_cr_delay); + if(lpfc10_cr_delay != -1) + value = (void *)((ulong)lpfc10_cr_delay); + break; + case CFG_CR_COUNT: /* cr_count */ + value = (void *)((ulong)lpfc_cr_count); + if(lpfc10_cr_count != -1) + value = (void *)((ulong)lpfc10_cr_count); + break; + case CFG_DFT_TGT_Q_DEPTH: /* tgt_queue_depth */ + value = (void *)((ulong)lpfc_tgt_queue_depth); + if(lpfc10_tgt_queue_depth != -1) + value = (void *)((ulong)lpfc10_tgt_queue_depth); + break; + case CFG_DFT_LUN_Q_DEPTH: /* lun_queue_depth */ + value = (void *)((ulong)lpfc_lun_queue_depth); + if(lpfc10_lun_queue_depth != -1) + value = (void *)((ulong)lpfc10_lun_queue_depth); + break; + case CFG_FCPFABRIC_TMO: /* fcpfabric-tmo */ + value = (void *)((ulong)lpfc_fcpfabric_tmo); + if(lpfc10_fcpfabric_tmo != -1) + value = (void *)((ulong)lpfc10_fcpfabric_tmo); + break; + case CFG_FCP_CLASS: /* fcp-class */ + value = (void *)((ulong)lpfc_fcp_class); + if(lpfc10_fcp_class != -1) + value = (void *)((ulong)lpfc10_fcp_class); + break; + case CFG_USE_ADISC: /* use-adisc */ + value = (void *)((ulong)lpfc_use_adisc); + if(lpfc10_use_adisc != -1) + value = (void *)((ulong)lpfc10_use_adisc); + break; + case CFG_NO_DEVICE_DELAY: /* no-device-delay */ + value = (void *)((ulong)lpfc_no_device_delay); + if(lpfc10_no_device_delay != -1) + value = (void *)((ulong)lpfc10_no_device_delay); + break; + case CFG_NETWORK_ON: /* network-on */ + value = (void *)((ulong)lpfc_network_on); + if(lpfc10_network_on != -1) + value = (void *)((ulong)lpfc10_network_on); + break; + case CFG_POST_IP_BUF: /* post-ip-buf */ + value = (void *)((ulong)lpfc_post_ip_buf); + if(lpfc10_post_ip_buf != -1) + value = (void *)((ulong)lpfc10_post_ip_buf); + break; + case CFG_XMT_Q_SIZE: /* xmt-que-size */ + value = (void *)((ulong)lpfc_xmt_que_size); + if(lpfc10_xmt_que_size != -1) + value = (void *)((ulong)lpfc10_xmt_que_size); + break; + case CFG_IP_CLASS: /* ip-class */ + value = (void *)((ulong)lpfc_ip_class); + if(lpfc10_ip_class != -1) + value = (void *)((ulong)lpfc10_ip_class); + break; + case CFG_ACK0: /* ack0 */ + value = (void *)((ulong)lpfc_ack0support); + if(lpfc10_ack0support != -1) + value = (void *)((ulong)lpfc10_ack0support); + break; + case CFG_TOPOLOGY: /* topology */ + value = (void *)((ulong)lpfc_topology); + if(lpfc10_topology != -1) + value = (void *)((ulong)lpfc10_topology); + break; + case CFG_SCAN_DOWN: /* scan-down */ + value = (void *)((ulong)lpfc_scandown); + if(lpfc10_scandown != -1) + value = (void *)((ulong)lpfc10_scandown); + break; + case CFG_LINKDOWN_TMO: /* linkdown-tmo */ + value = (void *)((ulong)lpfc_linkdown_tmo); + if(lpfc10_linkdown_tmo != -1) + value = (void *)((ulong)lpfc10_linkdown_tmo); + break; + case CFG_HOLDIO: /* nodev-holdio */ + value = (void *)((ulong)lpfc_nodev_holdio); + if(lpfc10_nodev_holdio != -1) + value = (void *)((ulong)lpfc10_nodev_holdio); + break; + case CFG_DELAY_RSP_ERR: /* delay-rsp-err */ + value = (void *)((ulong)lpfc_delay_rsp_err); + if(lpfc10_delay_rsp_err != -1) + value = (void *)((ulong)lpfc10_delay_rsp_err); + break; + case CFG_CHK_COND_ERR: /* check-cond-err */ + value = (void *)((ulong)lpfc_check_cond_err); + if(lpfc10_check_cond_err != -1) + value = (void *)((ulong)lpfc10_check_cond_err); + break; + case CFG_NODEV_TMO: /* nodev-tmo */ + value = (void *)((ulong)lpfc_nodev_tmo); + if(lpfc10_nodev_tmo != -1) + value = (void *)((ulong)lpfc10_nodev_tmo); + break; + case CFG_LINK_SPEED: /* link-speed */ + value = (void *)((ulong)lpfc_link_speed); + if(lpfc10_link_speed != -1) + value = (void *)((ulong)lpfc10_link_speed); + break; + case CFG_DQFULL_THROTTLE_UP_TIME: /* dqfull-throttle-up-time */ + value = (void *)((ulong)lpfc_dqfull_throttle_up_time); + if(lpfc10_dqfull_throttle_up_time != -1) + value = (void *)((ulong)lpfc10_dqfull_throttle_up_time); + break; + case CFG_DQFULL_THROTTLE_UP_INC: /* dqfull-throttle-up-inc */ + value = (void *)((ulong)lpfc_dqfull_throttle_up_inc); + if(lpfc10_dqfull_throttle_up_inc != -1) + value = (void *)((ulong)lpfc10_dqfull_throttle_up_inc); + break; + } + break; + case 11: /* HBA 11 */ + switch(param) { + case CFG_LOG_VERBOSE: /* log-verbose */ + value = (void *)((ulong)lpfc_log_verbose); + if(lpfc11_log_verbose != -1) + value = (void *)((ulong)lpfc11_log_verbose); + break; + case CFG_LOG_ONLY: /* log-only */ + value = (void *)((ulong)lpfc_log_only); + if(lpfc11_log_only != -1) + value = (void *)((ulong)lpfc11_log_only); + break; + case CFG_NUM_IOCBS: /* num-iocbs */ + value = (void *)((ulong)lpfc_num_iocbs); + if(lpfc11_num_iocbs != -1) + value = (void *)((ulong)lpfc11_num_iocbs); + break; + case CFG_NUM_BUFS: /* num-bufs */ + value = (void *)((ulong)lpfc_num_bufs); + if(lpfc11_num_bufs != -1) + value = (void *)((ulong)lpfc11_num_bufs); + break; + case CFG_AUTOMAP: /* automap */ + value = (void *)((ulong)lpfc_automap); + if(lpfc11_automap != -1) + value = (void *)((ulong)lpfc11_automap); + break; + case CFG_CR_DELAY: /* cr_delay */ + value = (void *)((ulong)lpfc_cr_delay); + if(lpfc11_cr_delay != -1) + value = (void *)((ulong)lpfc11_cr_delay); + break; + case CFG_CR_COUNT: /* cr_count */ + value = (void *)((ulong)lpfc_cr_count); + if(lpfc11_cr_count != -1) + value = (void *)((ulong)lpfc11_cr_count); + break; + case CFG_DFT_TGT_Q_DEPTH: /* tgt_queue_depth */ + value = (void *)((ulong)lpfc_tgt_queue_depth); + if(lpfc11_tgt_queue_depth != -1) + value = (void *)((ulong)lpfc11_tgt_queue_depth); + break; + case CFG_DFT_LUN_Q_DEPTH: /* lun_queue_depth */ + value = (void *)((ulong)lpfc_lun_queue_depth); + if(lpfc11_lun_queue_depth != -1) + value = (void *)((ulong)lpfc11_lun_queue_depth); + break; + case CFG_FCPFABRIC_TMO: /* fcpfabric-tmo */ + value = (void *)((ulong)lpfc_fcpfabric_tmo); + if(lpfc11_fcpfabric_tmo != -1) + value = (void *)((ulong)lpfc11_fcpfabric_tmo); + break; + case CFG_FCP_CLASS: /* fcp-class */ + value = (void *)((ulong)lpfc_fcp_class); + if(lpfc11_fcp_class != -1) + value = (void *)((ulong)lpfc11_fcp_class); + break; + case CFG_USE_ADISC: /* use-adisc */ + value = (void *)((ulong)lpfc_use_adisc); + if(lpfc11_use_adisc != -1) + value = (void *)((ulong)lpfc11_use_adisc); + break; + case CFG_NO_DEVICE_DELAY: /* no-device-delay */ + value = (void *)((ulong)lpfc_no_device_delay); + if(lpfc11_no_device_delay != -1) + value = (void *)((ulong)lpfc11_no_device_delay); + break; + case CFG_NETWORK_ON: /* network-on */ + value = (void *)((ulong)lpfc_network_on); + if(lpfc11_network_on != -1) + value = (void *)((ulong)lpfc11_network_on); + break; + case CFG_POST_IP_BUF: /* post-ip-buf */ + value = (void *)((ulong)lpfc_post_ip_buf); + if(lpfc11_post_ip_buf != -1) + value = (void *)((ulong)lpfc11_post_ip_buf); + break; + case CFG_XMT_Q_SIZE: /* xmt-que-size */ + value = (void *)((ulong)lpfc_xmt_que_size); + if(lpfc11_xmt_que_size != -1) + value = (void *)((ulong)lpfc11_xmt_que_size); + break; + case CFG_IP_CLASS: /* ip-class */ + value = (void *)((ulong)lpfc_ip_class); + if(lpfc11_ip_class != -1) + value = (void *)((ulong)lpfc11_ip_class); + break; + case CFG_ACK0: /* ack0 */ + value = (void *)((ulong)lpfc_ack0support); + if(lpfc11_ack0support != -1) + value = (void *)((ulong)lpfc11_ack0support); + break; + case CFG_TOPOLOGY: /* topology */ + value = (void *)((ulong)lpfc_topology); + if(lpfc11_topology != -1) + value = (void *)((ulong)lpfc11_topology); + break; + case CFG_SCAN_DOWN: /* scan-down */ + value = (void *)((ulong)lpfc_scandown); + if(lpfc11_scandown != -1) + value = (void *)((ulong)lpfc11_scandown); + break; + case CFG_LINKDOWN_TMO: /* linkdown-tmo */ + value = (void *)((ulong)lpfc_linkdown_tmo); + if(lpfc11_linkdown_tmo != -1) + value = (void *)((ulong)lpfc11_linkdown_tmo); + break; + case CFG_HOLDIO: /* nodev-holdio */ + value = (void *)((ulong)lpfc_nodev_holdio); + if(lpfc11_nodev_holdio != -1) + value = (void *)((ulong)lpfc11_nodev_holdio); + break; + case CFG_DELAY_RSP_ERR: /* delay-rsp-err */ + value = (void *)((ulong)lpfc_delay_rsp_err); + if(lpfc11_delay_rsp_err != -1) + value = (void *)((ulong)lpfc11_delay_rsp_err); + break; + case CFG_CHK_COND_ERR: /* check-cond-err */ + value = (void *)((ulong)lpfc_check_cond_err); + if(lpfc11_check_cond_err != -1) + value = (void *)((ulong)lpfc11_check_cond_err); + break; + case CFG_NODEV_TMO: /* nodev-tmo */ + value = (void *)((ulong)lpfc_nodev_tmo); + if(lpfc11_nodev_tmo != -1) + value = (void *)((ulong)lpfc11_nodev_tmo); + break; + case CFG_LINK_SPEED: /* link-speed */ + value = (void *)((ulong)lpfc_link_speed); + if(lpfc11_link_speed != -1) + value = (void *)((ulong)lpfc11_link_speed); + break; + case CFG_DQFULL_THROTTLE_UP_TIME: /* dqfull-throttle-up-time */ + value = (void *)((ulong)lpfc_dqfull_throttle_up_time); + if(lpfc11_dqfull_throttle_up_time != -1) + value = (void *)((ulong)lpfc11_dqfull_throttle_up_time); + break; + case CFG_DQFULL_THROTTLE_UP_INC: /* dqfull-throttle-up-inc */ + value = (void *)((ulong)lpfc_dqfull_throttle_up_inc); + if(lpfc11_dqfull_throttle_up_inc != -1) + value = (void *)((ulong)lpfc11_dqfull_throttle_up_inc); + break; + } + break; + case 12: /* HBA 12 */ + switch(param) { + case CFG_LOG_VERBOSE: /* log-verbose */ + value = (void *)((ulong)lpfc_log_verbose); + if(lpfc12_log_verbose != -1) + value = (void *)((ulong)lpfc12_log_verbose); + break; + case CFG_LOG_ONLY: /* log-only */ + value = (void *)((ulong)lpfc_log_only); + if(lpfc12_log_only != -1) + value = (void *)((ulong)lpfc12_log_only); + break; + case CFG_NUM_IOCBS: /* num-iocbs */ + value = (void *)((ulong)lpfc_num_iocbs); + if(lpfc12_num_iocbs != -1) + value = (void *)((ulong)lpfc12_num_iocbs); + break; + case CFG_NUM_BUFS: /* num-bufs */ + value = (void *)((ulong)lpfc_num_bufs); + if(lpfc12_num_bufs != -1) + value = (void *)((ulong)lpfc12_num_bufs); + break; + case CFG_AUTOMAP: /* automap */ + value = (void *)((ulong)lpfc_automap); + if(lpfc12_automap != -1) + value = (void *)((ulong)lpfc12_automap); + break; + case CFG_CR_DELAY: /* cr_delay */ + value = (void *)((ulong)lpfc_cr_delay); + if(lpfc12_cr_delay != -1) + value = (void *)((ulong)lpfc12_cr_delay); + break; + case CFG_CR_COUNT: /* cr_count */ + value = (void *)((ulong)lpfc_cr_count); + if(lpfc12_cr_count != -1) + value = (void *)((ulong)lpfc12_cr_count); + break; + case CFG_DFT_TGT_Q_DEPTH: /* tgt_queue_depth */ + value = (void *)((ulong)lpfc_tgt_queue_depth); + if(lpfc12_tgt_queue_depth != -1) + value = (void *)((ulong)lpfc12_tgt_queue_depth); + break; + case CFG_DFT_LUN_Q_DEPTH: /* lun_queue_depth */ + value = (void *)((ulong)lpfc_lun_queue_depth); + if(lpfc12_lun_queue_depth != -1) + value = (void *)((ulong)lpfc12_lun_queue_depth); + break; + case CFG_FCPFABRIC_TMO: /* fcpfabric-tmo */ + value = (void *)((ulong)lpfc_fcpfabric_tmo); + if(lpfc12_fcpfabric_tmo != -1) + value = (void *)((ulong)lpfc12_fcpfabric_tmo); + break; + case CFG_FCP_CLASS: /* fcp-class */ + value = (void *)((ulong)lpfc_fcp_class); + if(lpfc12_fcp_class != -1) + value = (void *)((ulong)lpfc12_fcp_class); + break; + case CFG_USE_ADISC: /* use-adisc */ + value = (void *)((ulong)lpfc_use_adisc); + if(lpfc12_use_adisc != -1) + value = (void *)((ulong)lpfc12_use_adisc); + break; + case CFG_NO_DEVICE_DELAY: /* no-device-delay */ + value = (void *)((ulong)lpfc_no_device_delay); + if(lpfc12_no_device_delay != -1) + value = (void *)((ulong)lpfc12_no_device_delay); + break; + case CFG_NETWORK_ON: /* network-on */ + value = (void *)((ulong)lpfc_network_on); + if(lpfc12_network_on != -1) + value = (void *)((ulong)lpfc12_network_on); + break; + case CFG_POST_IP_BUF: /* post-ip-buf */ + value = (void *)((ulong)lpfc_post_ip_buf); + if(lpfc12_post_ip_buf != -1) + value = (void *)((ulong)lpfc12_post_ip_buf); + break; + case CFG_XMT_Q_SIZE: /* xmt-que-size */ + value = (void *)((ulong)lpfc_xmt_que_size); + if(lpfc12_xmt_que_size != -1) + value = (void *)((ulong)lpfc12_xmt_que_size); + break; + case CFG_IP_CLASS: /* ip-class */ + value = (void *)((ulong)lpfc_ip_class); + if(lpfc12_ip_class != -1) + value = (void *)((ulong)lpfc12_ip_class); + break; + case CFG_ACK0: /* ack0 */ + value = (void *)((ulong)lpfc_ack0support); + if(lpfc12_ack0support != -1) + value = (void *)((ulong)lpfc12_ack0support); + break; + case CFG_TOPOLOGY: /* topology */ + value = (void *)((ulong)lpfc_topology); + if(lpfc12_topology != -1) + value = (void *)((ulong)lpfc12_topology); + break; + case CFG_SCAN_DOWN: /* scan-down */ + value = (void *)((ulong)lpfc_scandown); + if(lpfc12_scandown != -1) + value = (void *)((ulong)lpfc12_scandown); + break; + case CFG_LINKDOWN_TMO: /* linkdown-tmo */ + value = (void *)((ulong)lpfc_linkdown_tmo); + if(lpfc12_linkdown_tmo != -1) + value = (void *)((ulong)lpfc12_linkdown_tmo); + break; + case CFG_HOLDIO: /* nodev-holdio */ + value = (void *)((ulong)lpfc_nodev_holdio); + if(lpfc12_nodev_holdio != -1) + value = (void *)((ulong)lpfc12_nodev_holdio); + break; + case CFG_DELAY_RSP_ERR: /* delay-rsp-err */ + value = (void *)((ulong)lpfc_delay_rsp_err); + if(lpfc12_delay_rsp_err != -1) + value = (void *)((ulong)lpfc12_delay_rsp_err); + break; + case CFG_CHK_COND_ERR: /* check-cond-err */ + value = (void *)((ulong)lpfc_check_cond_err); + if(lpfc12_check_cond_err != -1) + value = (void *)((ulong)lpfc12_check_cond_err); + break; + case CFG_NODEV_TMO: /* nodev-tmo */ + value = (void *)((ulong)lpfc_nodev_tmo); + if(lpfc12_nodev_tmo != -1) + value = (void *)((ulong)lpfc12_nodev_tmo); + break; + case CFG_LINK_SPEED: /* link-speed */ + value = (void *)((ulong)lpfc_link_speed); + if(lpfc12_link_speed != -1) + value = (void *)((ulong)lpfc12_link_speed); + break; + case CFG_DQFULL_THROTTLE_UP_TIME: /* dqfull-throttle-up-time */ + value = (void *)((ulong)lpfc_dqfull_throttle_up_time); + if(lpfc12_dqfull_throttle_up_time != -1) + value = (void *)((ulong)lpfc12_dqfull_throttle_up_time); + break; + case CFG_DQFULL_THROTTLE_UP_INC: /* dqfull-throttle-up-inc */ + value = (void *)((ulong)lpfc_dqfull_throttle_up_inc); + if(lpfc12_dqfull_throttle_up_inc != -1) + value = (void *)((ulong)lpfc12_dqfull_throttle_up_inc); + break; + } + break; + case 13: /* HBA 13 */ + switch(param) { + case CFG_LOG_VERBOSE: /* log-verbose */ + value = (void *)((ulong)lpfc_log_verbose); + if(lpfc13_log_verbose != -1) + value = (void *)((ulong)lpfc13_log_verbose); + break; + case CFG_LOG_ONLY: /* log-only */ + value = (void *)((ulong)lpfc_log_only); + if(lpfc13_log_only != -1) + value = (void *)((ulong)lpfc13_log_only); + break; + case CFG_NUM_IOCBS: /* num-iocbs */ + value = (void *)((ulong)lpfc_num_iocbs); + if(lpfc13_num_iocbs != -1) + value = (void *)((ulong)lpfc13_num_iocbs); + break; + case CFG_NUM_BUFS: /* num-bufs */ + value = (void *)((ulong)lpfc_num_bufs); + if(lpfc13_num_bufs != -1) + value = (void *)((ulong)lpfc13_num_bufs); + break; + case CFG_AUTOMAP: /* automap */ + value = (void *)((ulong)lpfc_automap); + if(lpfc13_automap != -1) + value = (void *)((ulong)lpfc13_automap); + break; + case CFG_CR_DELAY: /* cr_delay */ + value = (void *)((ulong)lpfc_cr_delay); + if(lpfc13_cr_delay != -1) + value = (void *)((ulong)lpfc13_cr_delay); + break; + case CFG_CR_COUNT: /* cr_count */ + value = (void *)((ulong)lpfc_cr_count); + if(lpfc13_cr_count != -1) + value = (void *)((ulong)lpfc13_cr_count); + break; + case CFG_DFT_TGT_Q_DEPTH: /* tgt_queue_depth */ + value = (void *)((ulong)lpfc_tgt_queue_depth); + if(lpfc13_tgt_queue_depth != -1) + value = (void *)((ulong)lpfc13_tgt_queue_depth); + break; + case CFG_DFT_LUN_Q_DEPTH: /* lun_queue_depth */ + value = (void *)((ulong)lpfc_lun_queue_depth); + if(lpfc13_lun_queue_depth != -1) + value = (void *)((ulong)lpfc13_lun_queue_depth); + break; + case CFG_FCPFABRIC_TMO: /* fcpfabric-tmo */ + value = (void *)((ulong)lpfc_fcpfabric_tmo); + if(lpfc13_fcpfabric_tmo != -1) + value = (void *)((ulong)lpfc13_fcpfabric_tmo); + break; + case CFG_FCP_CLASS: /* fcp-class */ + value = (void *)((ulong)lpfc_fcp_class); + if(lpfc13_fcp_class != -1) + value = (void *)((ulong)lpfc13_fcp_class); + break; + case CFG_USE_ADISC: /* use-adisc */ + value = (void *)((ulong)lpfc_use_adisc); + if(lpfc13_use_adisc != -1) + value = (void *)((ulong)lpfc13_use_adisc); + break; + case CFG_NO_DEVICE_DELAY: /* no-device-delay */ + value = (void *)((ulong)lpfc_no_device_delay); + if(lpfc13_no_device_delay != -1) + value = (void *)((ulong)lpfc13_no_device_delay); + break; + case CFG_NETWORK_ON: /* network-on */ + value = (void *)((ulong)lpfc_network_on); + if(lpfc13_network_on != -1) + value = (void *)((ulong)lpfc13_network_on); + break; + case CFG_POST_IP_BUF: /* post-ip-buf */ + value = (void *)((ulong)lpfc_post_ip_buf); + if(lpfc13_post_ip_buf != -1) + value = (void *)((ulong)lpfc13_post_ip_buf); + break; + case CFG_XMT_Q_SIZE: /* xmt-que-size */ + value = (void *)((ulong)lpfc_xmt_que_size); + if(lpfc13_xmt_que_size != -1) + value = (void *)((ulong)lpfc13_xmt_que_size); + break; + case CFG_IP_CLASS: /* ip-class */ + value = (void *)((ulong)lpfc_ip_class); + if(lpfc13_ip_class != -1) + value = (void *)((ulong)lpfc13_ip_class); + break; + case CFG_ACK0: /* ack0 */ + value = (void *)((ulong)lpfc_ack0support); + if(lpfc13_ack0support != -1) + value = (void *)((ulong)lpfc13_ack0support); + break; + case CFG_TOPOLOGY: /* topology */ + value = (void *)((ulong)lpfc_topology); + if(lpfc13_topology != -1) + value = (void *)((ulong)lpfc13_topology); + break; + case CFG_SCAN_DOWN: /* scan-down */ + value = (void *)((ulong)lpfc_scandown); + if(lpfc13_scandown != -1) + value = (void *)((ulong)lpfc13_scandown); + break; + case CFG_LINKDOWN_TMO: /* linkdown-tmo */ + value = (void *)((ulong)lpfc_linkdown_tmo); + if(lpfc13_linkdown_tmo != -1) + value = (void *)((ulong)lpfc13_linkdown_tmo); + break; + case CFG_HOLDIO: /* nodev-holdio */ + value = (void *)((ulong)lpfc_nodev_holdio); + if(lpfc13_nodev_holdio != -1) + value = (void *)((ulong)lpfc13_nodev_holdio); + break; + case CFG_DELAY_RSP_ERR: /* delay-rsp-err */ + value = (void *)((ulong)lpfc_delay_rsp_err); + if(lpfc13_delay_rsp_err != -1) + value = (void *)((ulong)lpfc13_delay_rsp_err); + break; + case CFG_CHK_COND_ERR: /* check-cond-err */ + value = (void *)((ulong)lpfc_check_cond_err); + if(lpfc13_check_cond_err != -1) + value = (void *)((ulong)lpfc13_check_cond_err); + break; + case CFG_NODEV_TMO: /* nodev-tmo */ + value = (void *)((ulong)lpfc_nodev_tmo); + if(lpfc13_nodev_tmo != -1) + value = (void *)((ulong)lpfc13_nodev_tmo); + break; + case CFG_LINK_SPEED: /* link-speed */ + value = (void *)((ulong)lpfc_link_speed); + if(lpfc13_link_speed != -1) + value = (void *)((ulong)lpfc13_link_speed); + break; + case CFG_DQFULL_THROTTLE_UP_TIME: /* dqfull-throttle-up-time */ + value = (void *)((ulong)lpfc_dqfull_throttle_up_time); + if(lpfc13_dqfull_throttle_up_time != -1) + value = (void *)((ulong)lpfc13_dqfull_throttle_up_time); + break; + case CFG_DQFULL_THROTTLE_UP_INC: /* dqfull-throttle-up-inc */ + value = (void *)((ulong)lpfc_dqfull_throttle_up_inc); + if(lpfc13_dqfull_throttle_up_inc != -1) + value = (void *)((ulong)lpfc13_dqfull_throttle_up_inc); + break; + } + break; + case 14: /* HBA 14 */ + switch(param) { + case CFG_LOG_VERBOSE: /* log-verbose */ + value = (void *)((ulong)lpfc_log_verbose); + if(lpfc14_log_verbose != -1) + value = (void *)((ulong)lpfc14_log_verbose); + break; + case CFG_LOG_ONLY: /* log-only */ + value = (void *)((ulong)lpfc_log_only); + if(lpfc14_log_only != -1) + value = (void *)((ulong)lpfc14_log_only); + break; + case CFG_NUM_IOCBS: /* num-iocbs */ + value = (void *)((ulong)lpfc_num_iocbs); + if(lpfc14_num_iocbs != -1) + value = (void *)((ulong)lpfc14_num_iocbs); + break; + case CFG_NUM_BUFS: /* num-bufs */ + value = (void *)((ulong)lpfc_num_bufs); + if(lpfc14_num_bufs != -1) + value = (void *)((ulong)lpfc14_num_bufs); + break; + case CFG_AUTOMAP: /* automap */ + value = (void *)((ulong)lpfc_automap); + if(lpfc14_automap != -1) + value = (void *)((ulong)lpfc14_automap); + break; + case CFG_CR_DELAY: /* cr_delay */ + value = (void *)((ulong)lpfc_cr_delay); + if(lpfc14_cr_delay != -1) + value = (void *)((ulong)lpfc14_cr_delay); + break; + case CFG_CR_COUNT: /* cr_count */ + value = (void *)((ulong)lpfc_cr_count); + if(lpfc14_cr_count != -1) + value = (void *)((ulong)lpfc14_cr_count); + break; + case CFG_DFT_TGT_Q_DEPTH: /* tgt_queue_depth */ + value = (void *)((ulong)lpfc_tgt_queue_depth); + if(lpfc14_tgt_queue_depth != -1) + value = (void *)((ulong)lpfc14_tgt_queue_depth); + break; + case CFG_DFT_LUN_Q_DEPTH: /* lun_queue_depth */ + value = (void *)((ulong)lpfc_lun_queue_depth); + if(lpfc14_lun_queue_depth != -1) + value = (void *)((ulong)lpfc14_lun_queue_depth); + break; + case CFG_FCPFABRIC_TMO: /* fcpfabric-tmo */ + value = (void *)((ulong)lpfc_fcpfabric_tmo); + if(lpfc14_fcpfabric_tmo != -1) + value = (void *)((ulong)lpfc14_fcpfabric_tmo); + break; + case CFG_FCP_CLASS: /* fcp-class */ + value = (void *)((ulong)lpfc_fcp_class); + if(lpfc14_fcp_class != -1) + value = (void *)((ulong)lpfc14_fcp_class); + break; + case CFG_USE_ADISC: /* use-adisc */ + value = (void *)((ulong)lpfc_use_adisc); + if(lpfc14_use_adisc != -1) + value = (void *)((ulong)lpfc14_use_adisc); + break; + case CFG_NO_DEVICE_DELAY: /* no-device-delay */ + value = (void *)((ulong)lpfc_no_device_delay); + if(lpfc14_no_device_delay != -1) + value = (void *)((ulong)lpfc14_no_device_delay); + break; + case CFG_NETWORK_ON: /* network-on */ + value = (void *)((ulong)lpfc_network_on); + if(lpfc14_network_on != -1) + value = (void *)((ulong)lpfc14_network_on); + break; + case CFG_POST_IP_BUF: /* post-ip-buf */ + value = (void *)((ulong)lpfc_post_ip_buf); + if(lpfc14_post_ip_buf != -1) + value = (void *)((ulong)lpfc14_post_ip_buf); + break; + case CFG_XMT_Q_SIZE: /* xmt-que-size */ + value = (void *)((ulong)lpfc_xmt_que_size); + if(lpfc14_xmt_que_size != -1) + value = (void *)((ulong)lpfc14_xmt_que_size); + break; + case CFG_IP_CLASS: /* ip-class */ + value = (void *)((ulong)lpfc_ip_class); + if(lpfc14_ip_class != -1) + value = (void *)((ulong)lpfc14_ip_class); + break; + case CFG_ACK0: /* ack0 */ + value = (void *)((ulong)lpfc_ack0support); + if(lpfc14_ack0support != -1) + value = (void *)((ulong)lpfc14_ack0support); + break; + case CFG_TOPOLOGY: /* topology */ + value = (void *)((ulong)lpfc_topology); + if(lpfc14_topology != -1) + value = (void *)((ulong)lpfc14_topology); + break; + case CFG_SCAN_DOWN: /* scan-down */ + value = (void *)((ulong)lpfc_scandown); + if(lpfc14_scandown != -1) + value = (void *)((ulong)lpfc14_scandown); + break; + case CFG_LINKDOWN_TMO: /* linkdown-tmo */ + value = (void *)((ulong)lpfc_linkdown_tmo); + if(lpfc14_linkdown_tmo != -1) + value = (void *)((ulong)lpfc14_linkdown_tmo); + break; + case CFG_HOLDIO: /* nodev-holdio */ + value = (void *)((ulong)lpfc_nodev_holdio); + if(lpfc14_nodev_holdio != -1) + value = (void *)((ulong)lpfc14_nodev_holdio); + break; + case CFG_DELAY_RSP_ERR: /* delay-rsp-err */ + value = (void *)((ulong)lpfc_delay_rsp_err); + if(lpfc14_delay_rsp_err != -1) + value = (void *)((ulong)lpfc14_delay_rsp_err); + break; + case CFG_CHK_COND_ERR: /* check-cond-err */ + value = (void *)((ulong)lpfc_check_cond_err); + if(lpfc14_check_cond_err != -1) + value = (void *)((ulong)lpfc14_check_cond_err); + break; + case CFG_NODEV_TMO: /* nodev-tmo */ + value = (void *)((ulong)lpfc_nodev_tmo); + if(lpfc14_nodev_tmo != -1) + value = (void *)((ulong)lpfc14_nodev_tmo); + break; + case CFG_LINK_SPEED: /* link-speed */ + value = (void *)((ulong)lpfc_link_speed); + if(lpfc14_link_speed != -1) + value = (void *)((ulong)lpfc14_link_speed); + break; + case CFG_DQFULL_THROTTLE_UP_TIME: /* dqfull-throttle-up-time */ + value = (void *)((ulong)lpfc_dqfull_throttle_up_time); + if(lpfc14_dqfull_throttle_up_time != -1) + value = (void *)((ulong)lpfc14_dqfull_throttle_up_time); + break; + case CFG_DQFULL_THROTTLE_UP_INC: /* dqfull-throttle-up-inc */ + value = (void *)((ulong)lpfc_dqfull_throttle_up_inc); + if(lpfc14_dqfull_throttle_up_inc != -1) + value = (void *)((ulong)lpfc14_dqfull_throttle_up_inc); + break; + } + break; + case 15: /* HBA 15 */ + switch(param) { + case CFG_LOG_VERBOSE: /* log-verbose */ + value = (void *)((ulong)lpfc_log_verbose); + if(lpfc15_log_verbose != -1) + value = (void *)((ulong)lpfc15_log_verbose); + break; + case CFG_LOG_ONLY: /* log-only */ + value = (void *)((ulong)lpfc_log_only); + if(lpfc15_log_only != -1) + value = (void *)((ulong)lpfc15_log_only); + break; + case CFG_NUM_IOCBS: /* num-iocbs */ + value = (void *)((ulong)lpfc_num_iocbs); + if(lpfc15_num_iocbs != -1) + value = (void *)((ulong)lpfc15_num_iocbs); + break; + case CFG_NUM_BUFS: /* num-bufs */ + value = (void *)((ulong)lpfc_num_bufs); + if(lpfc15_num_bufs != -1) + value = (void *)((ulong)lpfc15_num_bufs); + break; + case CFG_AUTOMAP: /* automap */ + value = (void *)((ulong)lpfc_automap); + if(lpfc15_automap != -1) + value = (void *)((ulong)lpfc15_automap); + break; + case CFG_CR_DELAY: /* cr_delay */ + value = (void *)((ulong)lpfc_cr_delay); + if(lpfc15_cr_delay != -1) + value = (void *)((ulong)lpfc15_cr_delay); + break; + case CFG_CR_COUNT: /* cr_count */ + value = (void *)((ulong)lpfc_cr_count); + if(lpfc15_cr_count != -1) + value = (void *)((ulong)lpfc15_cr_count); + break; + case CFG_DFT_TGT_Q_DEPTH: /* tgt_queue_depth */ + value = (void *)((ulong)lpfc_tgt_queue_depth); + if(lpfc15_tgt_queue_depth != -1) + value = (void *)((ulong)lpfc15_tgt_queue_depth); + break; + case CFG_DFT_LUN_Q_DEPTH: /* lun_queue_depth */ + value = (void *)((ulong)lpfc_lun_queue_depth); + if(lpfc15_lun_queue_depth != -1) + value = (void *)((ulong)lpfc15_lun_queue_depth); + break; + case CFG_FCPFABRIC_TMO: /* fcpfabric-tmo */ + value = (void *)((ulong)lpfc_fcpfabric_tmo); + if(lpfc15_fcpfabric_tmo != -1) + value = (void *)((ulong)lpfc15_fcpfabric_tmo); + break; + case CFG_FCP_CLASS: /* fcp-class */ + value = (void *)((ulong)lpfc_fcp_class); + if(lpfc15_fcp_class != -1) + value = (void *)((ulong)lpfc15_fcp_class); + break; + case CFG_USE_ADISC: /* use-adisc */ + value = (void *)((ulong)lpfc_use_adisc); + if(lpfc15_use_adisc != -1) + value = (void *)((ulong)lpfc15_use_adisc); + break; + case CFG_NO_DEVICE_DELAY: /* no-device-delay */ + value = (void *)((ulong)lpfc_no_device_delay); + if(lpfc15_no_device_delay != -1) + value = (void *)((ulong)lpfc15_no_device_delay); + break; + case CFG_NETWORK_ON: /* network-on */ + value = (void *)((ulong)lpfc_network_on); + if(lpfc15_network_on != -1) + value = (void *)((ulong)lpfc15_network_on); + break; + case CFG_POST_IP_BUF: /* post-ip-buf */ + value = (void *)((ulong)lpfc_post_ip_buf); + if(lpfc15_post_ip_buf != -1) + value = (void *)((ulong)lpfc15_post_ip_buf); + break; + case CFG_XMT_Q_SIZE: /* xmt-que-size */ + value = (void *)((ulong)lpfc_xmt_que_size); + if(lpfc15_xmt_que_size != -1) + value = (void *)((ulong)lpfc15_xmt_que_size); + break; + case CFG_IP_CLASS: /* ip-class */ + value = (void *)((ulong)lpfc_ip_class); + if(lpfc15_ip_class != -1) + value = (void *)((ulong)lpfc15_ip_class); + break; + case CFG_ACK0: /* ack0 */ + value = (void *)((ulong)lpfc_ack0support); + if(lpfc15_ack0support != -1) + value = (void *)((ulong)lpfc15_ack0support); + break; + case CFG_TOPOLOGY: /* topology */ + value = (void *)((ulong)lpfc_topology); + if(lpfc15_topology != -1) + value = (void *)((ulong)lpfc15_topology); + break; + case CFG_SCAN_DOWN: /* scan-down */ + value = (void *)((ulong)lpfc_scandown); + if(lpfc15_scandown != -1) + value = (void *)((ulong)lpfc15_scandown); + break; + case CFG_LINKDOWN_TMO: /* linkdown-tmo */ + value = (void *)((ulong)lpfc_linkdown_tmo); + if(lpfc15_linkdown_tmo != -1) + value = (void *)((ulong)lpfc15_linkdown_tmo); + break; + case CFG_HOLDIO: /* nodev-holdio */ + value = (void *)((ulong)lpfc_nodev_holdio); + if(lpfc15_nodev_holdio != -1) + value = (void *)((ulong)lpfc15_nodev_holdio); + break; + case CFG_DELAY_RSP_ERR: /* delay-rsp-err */ + value = (void *)((ulong)lpfc_delay_rsp_err); + if(lpfc15_delay_rsp_err != -1) + value = (void *)((ulong)lpfc15_delay_rsp_err); + break; + case CFG_CHK_COND_ERR: /* check-cond-err */ + value = (void *)((ulong)lpfc_check_cond_err); + if(lpfc15_check_cond_err != -1) + value = (void *)((ulong)lpfc15_check_cond_err); + break; + case CFG_NODEV_TMO: /* nodev-tmo */ + value = (void *)((ulong)lpfc_nodev_tmo); + if(lpfc15_nodev_tmo != -1) + value = (void *)((ulong)lpfc15_nodev_tmo); + break; + case CFG_LINK_SPEED: /* link-speed */ + value = (void *)((ulong)lpfc_link_speed); + if(lpfc15_link_speed != -1) + value = (void *)((ulong)lpfc15_link_speed); + break; + case CFG_DQFULL_THROTTLE_UP_TIME: /* dqfull-throttle-up-time */ + value = (void *)((ulong)lpfc_dqfull_throttle_up_time); + if(lpfc15_dqfull_throttle_up_time != -1) + value = (void *)((ulong)lpfc15_dqfull_throttle_up_time); + break; + case CFG_DQFULL_THROTTLE_UP_INC: /* dqfull-throttle-up-inc */ + value = (void *)((ulong)lpfc_dqfull_throttle_up_inc); + if(lpfc15_dqfull_throttle_up_inc != -1) + value = (void *)((ulong)lpfc15_dqfull_throttle_up_inc); + break; + } + break; + default: + break; + } + return(value); +} + + diff -purN -X /home/mbligh/.diff.exclude reference/drivers/scsi/lpfc/lpfc.spec current/drivers/scsi/lpfc/lpfc.spec --- reference/drivers/scsi/lpfc/lpfc.spec 1969-12-31 16:00:00.000000000 -0800 +++ current/drivers/scsi/lpfc/lpfc.spec 2004-04-09 11:53:04.000000000 -0700 @@ -0,0 +1,127 @@ +#******************************************************************* +# * This file is part of the Emulex Linux Device Driver for * +# * Enterprise Fibre Channel Host Bus Adapters. * +# * Refer to the README file included with this package for * +# * driver version and adapter support. * +# * Copyright (C) 2003 Emulex Corporation. * +# * www.emulex.com * +# * * +# * This program is free software; you can redistribute it and/or * +# * modify it under the terms of the GNU General Public License * +# * as published by the Free Software Foundation; either version 2 * +# * of the License, or (at your option) any later version. * +# * * +# * This program is distributed in the hope that it will be useful, * +# * but WITHOUT ANY WARRANTY; without even the implied warranty of * +# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * +# * GNU General Public License for more details, a copy of which * +# * can be found in the file COPYING included with this package. * +# ******************************************************************* +%define rel 1 +%define ver LPFC_DRIVER_VERSION +%define tarName lpfcdriver.tar + +Summary: Emulex Driver Kit for Linux +Name: lpfcdriver +Version: %ver +Release: %rel +Copyright: Emulex Corp. +Group: System Environment/Kernel +Source: %tarName +URL: http://www.emulex.com +Vendor: Emulex Corp. +Packager: Regis.Goupil +BuildRoot: /tmp/lpfc-%{ver} +Requires: kernel >= 2.2.0 +Requires: rpm >= 3.0.4 + +%description +Emulex Open Source Fibre Channel driver for Linux version %ver + +%prep + +%build +rm -rf %{buildroot} +mkdir -p %{buildroot}/lpfc-%{ver} +cd lpfc-%{ver} +find . -print | cpio -pdumv %{buildroot}/ + +%install +cd %{buildroot}/lpfc-%{ver} +tar -xf /usr/src/redhat/SOURCES/lpfc-%{ver}/lpfcdriver.tar +if [ -e /usr/src/linux-2.4/drivers/scsi ]; then dirval="linux-2.4"; \ +else if [ -e /usr/src/linux/drivers/scsi ]; then dirval="linux"; \ +else echo "Cannot find the kernel sources directory"; exit 1; fi fi +if [ -e /usr/src/$dirval/drivers/scsi/lpfc/ ]; then \ +rm -fr /usr/src/$dirval/drivers/scsi/lpfc; +fi +mkdir -p /usr/src/$dirval/drivers/scsi/lpfc; +cp * /usr/src/$dirval/drivers/scsi/lpfc; + +# post install (on client machine) section +%post +if [ -e /usr/src/linux-2.4/drivers/scsi ]; then dirval="linux-2.4"; \ +else if [ -e /usr/src/linux/drivers/scsi ]; then dirval="linux"; \ +else echo "Cannot find the kernel sources directory"; exit 1; fi fi +rm -fr lpfc-%{ver}/lpfc; +rm -fr /usr/src/$dirval/drivers/scsi/lpfc; +mkdir /usr/src/$dirval/drivers/scsi/lpfc; +cp lpfc-%{ver}/* /usr/src/$dirval/drivers/scsi/lpfc; + +echo " " +echo "The Emulex Open Source Driver has been installed on your system." +echo "Source files have been installed under a temporary directory /lpfc-%{ver}" +echo "and under /usr/src/$dirval/drivers/scsi/lpfc." +echo "Please refer to the Emulex documentation for the next step." +echo " " + +%preun +if [ -e /usr/src/linux-2.4/drivers/scsi/lpfc ]; then dirval="linux-2.4"; \ +else if [ -e /usr/src/linux/drivers/scsi/lpfc ]; then dirval="linux"; fi fi +echo " " +echo "The lpfcdriver rpm is about to be removed from your system." +echo "All the source files under /usr/src/$dirval/drivers/scsi/lpfc" +echo "and /lpfc-%{ver} will be removed." +echo " " + +%postun +if [ -e /usr/src/linux-2.4/drivers/scsi/lpfc ]; then dirval="linux-2.4"; \ +else if [ -e /usr/src/linux/drivers/scsi/lpfc ]; then dirval="linux"; fi fi +rm -fr /usr/src/$dirval/drivers/scsi/lpfc; +echo " " +echo "The Emulex Open Source Driver has been removed from your system." +echo "Remove the appropriate entry in modules.conf if this file was modified." +echo " " +rm -fr lpfc-%{ver}; + +%files +/lpfc-%{ver}/COPYING +/lpfc-%{ver}/dfcdd.c +/lpfc-%{ver}/dfc.h +/lpfc-%{ver}/fcclockb.c +/lpfc-%{ver}/fc_crtn.h +/lpfc-%{ver}/fcdds.h +/lpfc-%{ver}/fcdiag.h +/lpfc-%{ver}/fcelsb.c +/lpfc-%{ver}/fc_ertn.h +/lpfc-%{ver}/fcfgparm.h +/lpfc-%{ver}/fc.h +/lpfc-%{ver}/fc_hw.h +/lpfc-%{ver}/fcLINUXfcp.c +/lpfc-%{ver}/fcLINUXlan.c +/lpfc-%{ver}/fcmboxb.c +/lpfc-%{ver}/fcmemb.c +/lpfc-%{ver}/fcmsgcom.c +/lpfc-%{ver}/fcmsg.h +/lpfc-%{ver}/fc_os.h +/lpfc-%{ver}/fcrpib.c +/lpfc-%{ver}/fcscsib.c +/lpfc-%{ver}/fcstratb.c +/lpfc-%{ver}/fcxmitb.c +/lpfc-%{ver}/hbaapi.h +/lpfc-%{ver}/lp6000.c +/lpfc-%{ver}/lpfc.conf.c +/lpfc-%{ver}/lpfc.conf.defs +/lpfc-%{ver}/Makefile +/lpfc-%{ver}/README +/lpfc-%{ver}/lpfc.spec diff -purN -X /home/mbligh/.diff.exclude reference/drivers/serial/8250.c current/drivers/serial/8250.c --- reference/drivers/serial/8250.c 2004-04-07 14:54:23.000000000 -0700 +++ current/drivers/serial/8250.c 2004-04-08 15:10:20.000000000 -0700 @@ -834,7 +834,7 @@ receive_chars(struct uart_8250_port *up, if (unlikely(tty->flip.count >= TTY_FLIPBUF_SIZE)) { tty->flip.work.func((void *)tty); if (tty->flip.count >= TTY_FLIPBUF_SIZE) - return; // if TTY_DONT_FLIP is set + return; /* if TTY_DONT_FLIP is set */ } ch = serial_inp(up, UART_RX); *tty->flip.char_buf_ptr = ch; @@ -1195,12 +1195,21 @@ static void serial8250_break_ctl(struct spin_unlock_irqrestore(&up->port.lock, flags); } +#ifdef CONFIG_KGDB +static int kgdb_irq = -1; +#endif + static int serial8250_startup(struct uart_port *port) { struct uart_8250_port *up = (struct uart_8250_port *)port; unsigned long flags; int retval; +#ifdef CONFIG_KGDB + if (up->port.irq == kgdb_irq) + return -EBUSY; +#endif + up->capabilities = uart_config[up->port.type].flags; if (up->port.type == PORT_16C950) { @@ -1866,6 +1875,10 @@ static void __init serial8250_register_p for (i = 0; i < UART_NR; i++) { struct uart_8250_port *up = &serial8250_ports[i]; +#ifdef CONFIG_KGDB + if (up->port.irq == kgdb_irq) + up->port.kgdb = 1; +#endif up->port.line = i; up->port.ops = &serial8250_pops; init_timer(&up->timer); @@ -2145,6 +2158,31 @@ void serial8250_resume_port(int line) uart_resume_port(&serial8250_reg, &serial8250_ports[line].port); } +#ifdef CONFIG_KGDB +/* + * Find all the ports using the given irq and shut them down. + * Result should be that the irq will be released. + */ +void shutdown_for_kgdb(struct async_struct * info) +{ + int irq = info->state->irq; + struct uart_8250_port *up; + int ttyS; + + kgdb_irq = irq; /* save for later init */ + for (ttyS = 0; ttyS < UART_NR; ttyS++){ + up = &serial8250_ports[ttyS]; + if (up->port.irq == irq && (irq_lists + irq)->head) { +#ifdef CONFIG_DEBUG_SPINLOCK /* ugly business... */ + if(up->port.lock.magic != SPINLOCK_MAGIC) + spin_lock_init(&up->port.lock); +#endif + serial8250_shutdown(&up->port); + } + } +} +#endif /* CONFIG_KGDB */ + static int __init serial8250_init(void) { int ret, i; diff -purN -X /home/mbligh/.diff.exclude reference/drivers/serial/serial_core.c current/drivers/serial/serial_core.c --- reference/drivers/serial/serial_core.c 2004-03-11 14:34:56.000000000 -0800 +++ current/drivers/serial/serial_core.c 2004-04-08 15:10:20.000000000 -0700 @@ -1985,6 +1985,11 @@ uart_configure_port(struct uart_driver * { unsigned int flags; +#ifdef CONFIG_KGDB + if (port->kgdb) + return; +#endif + /* * If there isn't a port here, don't do anything further. */ diff -purN -X /home/mbligh/.diff.exclude reference/fs/aio.c current/fs/aio.c --- reference/fs/aio.c 2004-04-07 14:54:28.000000000 -0700 +++ current/fs/aio.c 2004-04-09 13:26:41.000000000 -0700 @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -38,6 +39,9 @@ #define dprintk(x...) do { ; } while (0) #endif +long aio_run = 0; /* for testing only */ +long aio_wakeups = 0; /* for testing only */ + /*------ sysctl variables----*/ atomic_t aio_nr = ATOMIC_INIT(0); /* current system wide number of aio requests */ unsigned aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ @@ -203,6 +207,7 @@ static struct kioctx *ioctx_alloc(unsign { struct mm_struct *mm; struct kioctx *ctx; + int ret = 0; /* Prevent overflows */ if ((nr_events > (0x10000000U / sizeof(struct io_event))) || @@ -232,7 +237,8 @@ static struct kioctx *ioctx_alloc(unsign INIT_LIST_HEAD(&ctx->run_list); INIT_WORK(&ctx->wq, aio_kick_handler, ctx); - if (aio_setup_ring(ctx) < 0) + ret = aio_setup_ring(ctx); + if (unlikely(ret < 0)) goto out_freectx; /* limit the number of system wide aios */ @@ -259,7 +265,7 @@ out_cleanup: out_freectx: mmdrop(mm); kmem_cache_free(kioctx_cachep, ctx); - ctx = ERR_PTR(-ENOMEM); + ctx = ERR_PTR(ret); dprintk("aio: error allocating ioctx %p\n", ctx); return ctx; @@ -281,6 +287,7 @@ static void aio_cancel_all(struct kioctx struct kiocb *iocb = list_kiocb(pos); list_del_init(&iocb->ki_list); cancel = iocb->ki_cancel; + kiocbSetCancelled(iocb); if (cancel) { iocb->ki_users++; spin_unlock_irq(&ctx->ctx_lock); @@ -341,6 +348,11 @@ void fastcall exit_aio(struct mm_struct aio_cancel_all(ctx); wait_for_all_aios(ctx); + /* + * this is an overkill, but ensures we don't leave + * the ctx on the aio_wq + */ + flush_workqueue(aio_wq); if (1 != atomic_read(&ctx->users)) printk(KERN_DEBUG @@ -363,6 +375,7 @@ void fastcall __put_ioctx(struct kioctx if (unlikely(ctx->reqs_active)) BUG(); + flush_workqueue(aio_wq); aio_free_ring(ctx); mmdrop(ctx->mm); ctx->mm = NULL; @@ -400,6 +413,7 @@ static struct kiocb fastcall *__aio_get_ req->ki_cancel = NULL; req->ki_retry = NULL; req->ki_user_obj = NULL; + INIT_LIST_HEAD(&req->ki_run_list); /* Check if the completion queue has enough free space to * accept an event from this io. @@ -541,65 +555,323 @@ struct kioctx *lookup_ioctx(unsigned lon return ioctx; } -static void use_mm(struct mm_struct *mm) +/* + * use_mm + * Makes the calling kernel thread take on the specified + * mm context. + * Called by the retry thread execute retries within the + * iocb issuer's mm context, so that copy_from/to_user + * operations work seamlessly for aio. + * (Note: this routine is intended to be called only + * from a kernel thread context) + */ +void use_mm(struct mm_struct *mm) { - struct mm_struct *active_mm = current->active_mm; + struct mm_struct *active_mm; + struct task_struct *tsk = current; + + task_lock(tsk); + active_mm = tsk->active_mm; atomic_inc(&mm->mm_count); - current->mm = mm; - if (mm != active_mm) { - current->active_mm = mm; - activate_mm(active_mm, mm); - } + tsk->mm = mm; + tsk->active_mm = mm; + activate_mm(active_mm, mm); + task_unlock(tsk); + mmdrop(active_mm); } -static void unuse_mm(struct mm_struct *mm) +/* + * unuse_mm + * Reverses the effect of use_mm, i.e. releases the + * specified mm context which was earlier taken on + * by the calling kernel thread + * (Note: this routine is intended to be called only + * from a kernel thread context) + * + * Comments: Called with ctx->ctx_lock held. This nests + * task_lock instead ctx_lock. + */ +void unuse_mm(struct mm_struct *mm) { - current->mm = NULL; + struct task_struct *tsk = current; + + task_lock(tsk); + tsk->mm = NULL; /* active_mm is still 'mm' */ - enter_lazy_tlb(mm, current); + enter_lazy_tlb(mm, tsk); + task_unlock(tsk); } -/* Run on kevent's context. FIXME: needs to be per-cpu and warn if an - * operation blocks. +/* + * Queue up a kiocb to be retried. Assumes that the kiocb + * has already been marked as kicked, and places it on + * the retry run list for the corresponding ioctx, if it + * isn't already queued. Returns 1 if it actually queued + * the kiocb (to tell the caller to activate the work + * queue to process it), or 0, if it found that it was + * already queued. + * + * Should be called with the spin lock iocb->ki_ctx->ctx_lock + * held */ -static void aio_kick_handler(void *data) +static inline int __queue_kicked_iocb(struct kiocb *iocb) { - struct kioctx *ctx = data; + struct kioctx *ctx = iocb->ki_ctx; - use_mm(ctx->mm); + if (list_empty(&iocb->ki_run_list)) { + list_add_tail(&iocb->ki_run_list, + &ctx->run_list); + iocb->ki_queued++; + return 1; + } + return 0; +} - spin_lock_irq(&ctx->ctx_lock); - while (!list_empty(&ctx->run_list)) { - struct kiocb *iocb; - long ret; +/* aio_run_iocb + * This is the core aio execution routine. It is + * invoked both for initial i/o submission and + * subsequent retries via the aio_kick_handler. + * Expects to be invoked with iocb->ki_ctx->lock + * already held. The lock is released and reaquired + * as needed during processing. + * + * Calls the iocb retry method (already setup for the + * iocb on initial submission) for operation specific + * handling, but takes care of most of common retry + * execution details for a given iocb. The retry method + * needs to be non-blocking as far as possible, to avoid + * holding up other iocbs waiting to be serviced by the + * retry kernel thread. + * + * The trickier parts in this code have to do with + * ensuring that only one retry instance is in progress + * for a given iocb at any time. Providing that guarantee + * simplifies the coding of individual aio operations as + * it avoids various potential races. + */ +static ssize_t aio_run_iocb(struct kiocb *iocb) +{ + struct kioctx *ctx = iocb->ki_ctx; + ssize_t (*retry)(struct kiocb *); + ssize_t ret; - iocb = list_entry(ctx->run_list.next, struct kiocb, - ki_run_list); - list_del(&iocb->ki_run_list); - iocb->ki_users ++; - spin_unlock_irq(&ctx->ctx_lock); + if (iocb->ki_retried++ > 1024*1024) { + printk("Maximal retry count. Bytes done %Zd\n", + iocb->ki_nbytes - iocb->ki_left); + return -EAGAIN; + } + + if (!(iocb->ki_retried & 0xff)) { + pr_debug("%ld retry: %d of %d (kick %ld, Q %ld run %ld, wake %ld)\n", + iocb->ki_retried, + iocb->ki_nbytes - iocb->ki_left, iocb->ki_nbytes, + iocb->ki_kicked, iocb->ki_queued, aio_run, aio_wakeups); + } + + if (!(retry = iocb->ki_retry)) { + printk("aio_run_iocb: iocb->ki_retry = NULL\n"); + return 0; + } + + /* + * We don't want the next retry iteration for this + * operation to start until this one has returned and + * updated the iocb state. However, wait_queue functions + * can trigger a kick_iocb from interrupt context in the + * meantime, indicating that data is available for the next + * iteration. We want to remember that and enable the + * next retry iteration _after_ we are through with + * this one. + * + * So, in order to be able to register a "kick", but + * prevent it from being queued now, we clear the kick + * flag, but make the kick code *think* that the iocb is + * still on the run list until we are actually done. + * When we are done with this iteration, we check if + * the iocb was kicked in the meantime and if so, queue + * it up afresh. + */ + + kiocbClearKicked(iocb); - kiocbClearKicked(iocb); - ret = iocb->ki_retry(iocb); + /* + * This is so that aio_complete knows it doesn't need to + * pull the iocb off the run list (We can't just call + * INIT_LIST_HEAD because we don't want a kick_iocb to + * queue this on the run list yet) + */ + iocb->ki_run_list.next = iocb->ki_run_list.prev = NULL; + spin_unlock_irq(&ctx->ctx_lock); + + /* Quit retrying if the i/o has been cancelled */ + if (kiocbIsCancelled(iocb)) { + ret = -EINTR; + aio_complete(iocb, ret, 0); + /* must not access the iocb after this */ + goto out; + } + + /* + * Now we are all set to call the retry method in async + * context. By setting this thread's io_wait context + * to point to the wait queue entry inside the currently + * running iocb for the duration of the retry, we ensure + * that async notification wakeups are queued by the + * operation instead of blocking waits, and when notified, + * cause the iocb to be kicked for continuation (through + * the aio_wake_function callback). + */ + BUG_ON(current->io_wait != NULL); + current->io_wait = &iocb->ki_wait; + ret = retry(iocb); + current->io_wait = NULL; + + if (-EIOCBRETRY != ret) { if (-EIOCBQUEUED != ret) { + BUG_ON(!list_empty(&iocb->ki_wait.task_list)); aio_complete(iocb, ret, 0); - iocb = NULL; + /* must not access the iocb after this */ } + } else { + /* + * Issue an additional retry to avoid waiting forever if + * no waits were queued (e.g. in case of a short read). + */ + if (list_empty(&iocb->ki_wait.task_list)) + kiocbSetKicked(iocb); + } +out: + spin_lock_irq(&ctx->ctx_lock); - spin_lock_irq(&ctx->ctx_lock); - if (NULL != iocb) - __aio_put_req(ctx, iocb); + if (-EIOCBRETRY == ret) { + /* + * OK, now that we are done with this iteration + * and know that there is more left to go, + * this is where we let go so that a subsequent + * "kick" can start the next iteration + */ + + /* will make __queue_kicked_iocb succeed from here on */ + INIT_LIST_HEAD(&iocb->ki_run_list); + /* we must queue the next iteration ourselves, if it + * has already been kicked */ + if (kiocbIsKicked(iocb)) { + __queue_kicked_iocb(iocb); + } } + return ret; +} + +/* + * __aio_run_iocbs: + * Process all pending retries queued on the ioctx + * run list. + * Assumes it is operating within the aio issuer's mm + * context. Expects to be called with ctx->ctx_lock held + */ +static int __aio_run_iocbs(struct kioctx *ctx) +{ + struct kiocb *iocb; + int count = 0; + LIST_HEAD(run_list); + + list_splice_init(&ctx->run_list, &run_list); + while (!list_empty(&run_list)) { + iocb = list_entry(run_list.next, struct kiocb, + ki_run_list); + list_del(&iocb->ki_run_list); + /* + * Hold an extra reference while retrying i/o. + */ + iocb->ki_users++; /* grab extra reference */ + aio_run_iocb(iocb); + if (__aio_put_req(ctx, iocb)) /* drop extra ref */ + put_ioctx(ctx); + count++; + } + aio_run++; + if (!list_empty(&ctx->run_list)) + return 1; + return 0; +} + +/* + * aio_run_iocbs: + * Process all pending retries queued on the ioctx + * run list. + * Assumes it is operating within the aio issuer's mm + * context. + */ +static inline void aio_run_iocbs(struct kioctx *ctx) +{ + int requeue; + + spin_lock_irq(&ctx->ctx_lock); + requeue = __aio_run_iocbs(ctx); spin_unlock_irq(&ctx->ctx_lock); + if (requeue) + queue_work(aio_wq, &ctx->wq); +} +/* + * aio_kick_handler: + * Work queue handler triggered to process pending + * retries on an ioctx. Takes on the aio issuer's + * mm context before running the iocbs, so that + * copy_xxx_user operates on the issuer's address + * space. + * Run on aiod's context. + */ +static void aio_kick_handler(void *data) +{ + struct kioctx *ctx = data; + mm_segment_t oldfs = get_fs(); + int requeue; + + set_fs(USER_DS); + use_mm(ctx->mm); + spin_lock_irq(&ctx->ctx_lock); + requeue = __aio_run_iocbs(ctx); unuse_mm(ctx->mm); + spin_unlock_irq(&ctx->ctx_lock); + set_fs(oldfs); + if (requeue) + queue_work(aio_wq, &ctx->wq); } -void fastcall kick_iocb(struct kiocb *iocb) + +/* + * Called by kick_iocb to queue the kiocb for retry + * and if required activate the aio work queue to process + * it + */ +void queue_kicked_iocb(struct kiocb *iocb) { struct kioctx *ctx = iocb->ki_ctx; + unsigned long flags; + int run = 0; + + WARN_ON((!list_empty(&iocb->ki_wait.task_list))); + spin_lock_irqsave(&ctx->ctx_lock, flags); + run = __queue_kicked_iocb(iocb); + spin_unlock_irqrestore(&ctx->ctx_lock, flags); + if (run) { + queue_delayed_work(aio_wq, &ctx->wq, HZ/10); + aio_wakeups++; + } +} + +/* + * kick_iocb: + * Called typically from a wait queue callback context + * (aio_wake_function) to trigger a retry of the iocb. + * The retry is usually executed by aio workqueue + * threads (See aio_kick_handler). + */ +void fastcall kick_iocb(struct kiocb *iocb) +{ /* sync iocbs are easy: they can only ever be executing from a * single context. */ if (is_sync_kiocb(iocb)) { @@ -608,12 +880,10 @@ void fastcall kick_iocb(struct kiocb *io return; } + iocb->ki_kicked++; + /* If its already kicked we shouldn't queue it again */ if (!kiocbTryKick(iocb)) { - unsigned long flags; - spin_lock_irqsave(&ctx->ctx_lock, flags); - list_add_tail(&iocb->ki_run_list, &ctx->run_list); - spin_unlock_irqrestore(&ctx->ctx_lock, flags); - schedule_work(&ctx->wq); + queue_kicked_iocb(iocb); } } @@ -666,6 +936,16 @@ int fastcall aio_complete(struct kiocb * */ spin_lock_irqsave(&ctx->ctx_lock, flags); + if (iocb->ki_run_list.prev && !list_empty(&iocb->ki_run_list)) + list_del_init(&iocb->ki_run_list); + + /* + * cancelled requests don't get events, userland was given one + * when the event got cancelled. + */ + if (kiocbIsCancelled(iocb)) + goto put_rq; + ring = kmap_atomic(info->ring_pages[0], KM_IRQ1); tail = info->tail; @@ -694,6 +974,11 @@ int fastcall aio_complete(struct kiocb * pr_debug("added to ring %p at [%lu]\n", iocb, tail); + pr_debug("%ld retries: %d of %d (kicked %ld, Q %ld run %ld wake %ld)\n", + iocb->ki_retried, + iocb->ki_nbytes - iocb->ki_left, iocb->ki_nbytes, + iocb->ki_kicked, iocb->ki_queued, aio_run, aio_wakeups); +put_rq: /* everything turned out well, dispose of the aiocb. */ ret = __aio_put_req(ctx, iocb); @@ -808,13 +1093,15 @@ static int read_events(struct kioctx *ct int i = 0; struct io_event ent; struct timeout to; + int event_loop = 0; /* testing only */ + int retry = 0; /* needed to zero any padding within an entry (there shouldn't be * any, but C is fun! */ memset(&ent, 0, sizeof(ent)); +retry: ret = 0; - while (likely(i < nr)) { ret = aio_read_evt(ctx, &ent); if (unlikely(ret <= 0)) @@ -843,6 +1130,13 @@ static int read_events(struct kioctx *ct /* End fast path */ + /* racey check, but it gets redone */ + if (!retry && unlikely(!list_empty(&ctx->run_list))) { + retry = 1; + aio_run_iocbs(ctx); + goto retry; + } + init_timeout(&to); if (timeout) { struct timespec ts; @@ -857,7 +1151,6 @@ static int read_events(struct kioctx *ct add_wait_queue_exclusive(&ctx->wait, &wait); do { set_task_state(tsk, TASK_INTERRUPTIBLE); - ret = aio_read_evt(ctx, &ent); if (ret) break; @@ -867,6 +1160,7 @@ static int read_events(struct kioctx *ct if (to.timed_out) /* Only check after read evt */ break; schedule(); + event_loop++; if (signal_pending(tsk)) { ret = -EINTR; break; @@ -894,6 +1188,9 @@ static int read_events(struct kioctx *ct if (timeout) clear_timeout(&to); out: + pr_debug("event loop executed %d times\n", event_loop); + pr_debug("aio_run %ld\n", aio_run); + pr_debug("aio_wakeups %ld\n", aio_wakeups); return i ? i : ret; } @@ -985,13 +1282,216 @@ asmlinkage long sys_io_destroy(aio_conte return -EINVAL; } +/* + * Retry method for aio_read (also used for first time submit) + * Responsible for updating iocb state as retries progress + */ +static ssize_t aio_pread(struct kiocb *iocb) +{ + struct file *file = iocb->ki_filp; + struct address_space *mapping = file->f_mapping; + struct inode *inode = mapping->host; + ssize_t ret = 0; + + ret = file->f_op->aio_read(iocb, iocb->ki_buf, + iocb->ki_left, iocb->ki_pos); + + /* + * Can't just depend on iocb->ki_left to determine + * whether we are done. This may have been a short read. + */ + if (ret > 0) { + iocb->ki_buf += ret; + iocb->ki_left -= ret; + /* + * For pipes and sockets we return once we have + * some data; for regular files we retry till we + * complete the entire read or find that we can't + * read any more data (e.g short reads). + */ + if (!S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode)) + ret = -EIOCBRETRY; + } + + /* This means we must have transferred all that we could */ + /* No need to retry anymore */ + if ((ret == 0) || (iocb->ki_left == 0)) + ret = iocb->ki_nbytes - iocb->ki_left; + + return ret; +} + +/* + * Retry method for aio_write (also used for first time submit) + * Responsible for updating iocb state as retries progress + */ +static ssize_t aio_pwrite(struct kiocb *iocb) +{ + struct file *file = iocb->ki_filp; + struct address_space *mapping = file->f_mapping; + struct inode *inode = mapping->host; + ssize_t ret = 0; + + ret = file->f_op->aio_write(iocb, iocb->ki_buf, + iocb->ki_left, iocb->ki_pos); + + /* + * Even if iocb->ki_left = 0, we may need to wait + * for a balance_dirty_pages to complete + */ + if (ret > 0) { + iocb->ki_buf += iocb->ki_buf ? ret : 0; + iocb->ki_left -= ret; + + ret = -EIOCBRETRY; + } + + /* This means we must have transferred all that we could */ + /* No need to retry anymore unless we need to osync data */ + if (ret == 0) { + ret = iocb->ki_nbytes - iocb->ki_left; + if (!iocb->ki_buf) + return ret; + + /* Set things up for potential O_SYNC */ + if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { + iocb->ki_buf = NULL; + iocb->ki_pos -= ret; /* back up fpos */ + iocb->ki_left = ret; /* sync what we have written out */ + iocb->ki_nbytes = ret; + ret = -EIOCBRETRY; + } + } + + return ret; +} + +static ssize_t aio_fdsync(struct kiocb *iocb) +{ + struct file *file = iocb->ki_filp; + ssize_t ret = -EINVAL; + + if (file->f_op->aio_fsync) + ret = file->f_op->aio_fsync(iocb, 1); + return ret; +} + +static ssize_t aio_fsync(struct kiocb *iocb) +{ + struct file *file = iocb->ki_filp; + ssize_t ret = -EINVAL; + + if (file->f_op->aio_fsync) + ret = file->f_op->aio_fsync(iocb, 0); + return ret; +} + +/* + * Retry method for aio_poll (also used for first time submit) + * Responsible for updating iocb state as retries progress + */ +static ssize_t aio_poll(struct kiocb *iocb) +{ + unsigned events = (unsigned)(iocb->ki_buf); + return generic_aio_poll(iocb, events); +} + +/* + * aio_setup_iocb: + * Performs the initial checks and aio retry method + * setup for the kiocb at the time of io submission. + */ +ssize_t aio_setup_iocb(struct kiocb *kiocb) +{ + struct file *file = kiocb->ki_filp; + ssize_t ret = 0; + + switch (kiocb->ki_opcode) { + case IOCB_CMD_PREAD: + ret = -EBADF; + if (unlikely(!(file->f_mode & FMODE_READ))) + break; + ret = -EFAULT; + if (unlikely(!access_ok(VERIFY_WRITE, kiocb->ki_buf, + kiocb->ki_left))) + break; + ret = -EINVAL; + if (file->f_op->aio_read) + kiocb->ki_retry = aio_pread; + break; + case IOCB_CMD_PWRITE: + ret = -EBADF; + if (unlikely(!(file->f_mode & FMODE_WRITE))) + break; + ret = -EFAULT; + if (unlikely(!access_ok(VERIFY_READ, kiocb->ki_buf, + kiocb->ki_left))) + break; + ret = -EINVAL; + if (file->f_op->aio_write) + kiocb->ki_retry = aio_pwrite; + break; + case IOCB_CMD_FDSYNC: + ret = -EINVAL; + if (file->f_op->aio_fsync) + kiocb->ki_retry = aio_fdsync; + break; + case IOCB_CMD_FSYNC: + ret = -EINVAL; + if (file->f_op->aio_fsync) + kiocb->ki_retry = aio_fsync; + break; + case IOCB_CMD_POLL: + ret = -EINVAL; + if (file->f_op->poll) { + memset(kiocb->private, 0, sizeof(kiocb->private)); + kiocb->ki_retry = aio_poll; + } + break; + default: + dprintk("EINVAL: io_submit: no operation provided\n"); + ret = -EINVAL; + } + + if (!kiocb->ki_retry) + return ret; + + return 0; +} + +/* + * aio_wake_function: + * wait queue callback function for aio notification, + * Simply triggers a retry of the operation via kick_iocb. + * + * This callback is specified in the wait queue entry in + * a kiocb (current->io_wait points to this wait queue + * entry when an aio operation executes; it is used + * instead of a synchronous wait when an i/o blocking + * condition is encountered during aio). + * + * Note: + * This routine is executed with the wait queue lock held. + * Since kick_iocb acquires iocb->ctx->ctx_lock, it nests + * the ioctx lock inside the wait queue lock. This is safe + * because this callback isn't used for wait queues which + * are nested inside ioctx lock (i.e. ctx->wait) + */ +int aio_wake_function(wait_queue_t *wait, unsigned mode, int sync) +{ + struct kiocb *iocb = container_of(wait, struct kiocb, ki_wait); + + list_del_init(&wait->task_list); + kick_iocb(iocb); + return 1; +} + int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, struct iocb *iocb) { struct kiocb *req; struct file *file; ssize_t ret; - char __user *buf; /* enforce forwards compatibility on users */ if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2 || @@ -1032,52 +1532,29 @@ int fastcall io_submit_one(struct kioctx req->ki_user_data = iocb->aio_data; req->ki_pos = iocb->aio_offset; - buf = (char __user *)(unsigned long)iocb->aio_buf; + req->ki_buf = (char *)(unsigned long)iocb->aio_buf; + req->ki_left = req->ki_nbytes = iocb->aio_nbytes; + req->ki_opcode = iocb->aio_lio_opcode; + init_waitqueue_func_entry(&req->ki_wait, aio_wake_function); + INIT_LIST_HEAD(&req->ki_wait.task_list); + req->ki_run_list.next = req->ki_run_list.prev = NULL; + req->ki_retry = NULL; + req->ki_retried = 0; + req->ki_kicked = 0; + req->ki_queued = 0; + aio_run = 0; + aio_wakeups = 0; - switch (iocb->aio_lio_opcode) { - case IOCB_CMD_PREAD: - ret = -EBADF; - if (unlikely(!(file->f_mode & FMODE_READ))) - goto out_put_req; - ret = -EFAULT; - if (unlikely(!access_ok(VERIFY_WRITE, buf, iocb->aio_nbytes))) - goto out_put_req; - ret = -EINVAL; - if (file->f_op->aio_read) - ret = file->f_op->aio_read(req, buf, - iocb->aio_nbytes, req->ki_pos); - break; - case IOCB_CMD_PWRITE: - ret = -EBADF; - if (unlikely(!(file->f_mode & FMODE_WRITE))) - goto out_put_req; - ret = -EFAULT; - if (unlikely(!access_ok(VERIFY_READ, buf, iocb->aio_nbytes))) - goto out_put_req; - ret = -EINVAL; - if (file->f_op->aio_write) - ret = file->f_op->aio_write(req, buf, - iocb->aio_nbytes, req->ki_pos); - break; - case IOCB_CMD_FDSYNC: - ret = -EINVAL; - if (file->f_op->aio_fsync) - ret = file->f_op->aio_fsync(req, 1); - break; - case IOCB_CMD_FSYNC: - ret = -EINVAL; - if (file->f_op->aio_fsync) - ret = file->f_op->aio_fsync(req, 0); - break; - default: - dprintk("EINVAL: io_submit: no operation provided\n"); - ret = -EINVAL; - } + ret = aio_setup_iocb(req); + if (ret) + goto out_put_req; + + spin_lock_irq(&ctx->ctx_lock); + list_add_tail(&req->ki_run_list, &ctx->run_list); + __aio_run_iocbs(ctx); + spin_unlock_irq(&ctx->ctx_lock); aio_put_req(req); /* drop extra ref to req */ - if (likely(-EIOCBQUEUED == ret)) - return 0; - aio_complete(req, ret, 0); /* will drop i/o ref to req */ return 0; out_put_req: @@ -1193,6 +1670,7 @@ asmlinkage long sys_io_cancel(aio_contex if (kiocb && kiocb->ki_cancel) { cancel = kiocb->ki_cancel; kiocb->ki_users ++; + kiocbSetCancelled(kiocb); } else cancel = NULL; spin_unlock_irq(&ctx->ctx_lock); diff -purN -X /home/mbligh/.diff.exclude reference/fs/binfmt_elf.c current/fs/binfmt_elf.c --- reference/fs/binfmt_elf.c 2004-04-07 14:54:28.000000000 -0700 +++ current/fs/binfmt_elf.c 2004-04-09 11:53:04.000000000 -0700 @@ -7,6 +7,7 @@ * Tools". * * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com). + * Top-down vma allocation support, William Irwin, IBM, 2003 */ #include @@ -334,8 +335,13 @@ static unsigned long load_elf_interp(str if (retval < 0) goto out_close; +#ifndef CONFIG_MMAP_TOPDOWN eppnt = elf_phdata; for (i=0; ie_phnum; i++, eppnt++) { +#else + eppnt = &elf_phdata[interp_elf_ex->e_phnum - 1]; + for (i = interp_elf_ex->e_phnum - 1; i >= 0; --i, --eppnt) { +#endif if (eppnt->p_type == PT_LOAD) { int elf_type = MAP_PRIVATE | MAP_DENYWRITE; int elf_prot = 0; @@ -349,7 +355,8 @@ static unsigned long load_elf_interp(str if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) elf_type |= MAP_FIXED; - map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type); + map_addr = load_addr_set ? load_addr + vaddr : 0; + map_addr = elf_map(interpreter, map_addr, eppnt, elf_prot, elf_type); error = map_addr; if (BAD_ADDR(map_addr)) goto out_close; diff -purN -X /home/mbligh/.diff.exclude reference/fs/buffer.c current/fs/buffer.c --- reference/fs/buffer.c 2004-04-07 14:54:28.000000000 -0700 +++ current/fs/buffer.c 2004-04-08 15:10:25.000000000 -0700 @@ -837,19 +837,10 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode); * * FIXME: may need to call ->reservepage here as well. That's rather up to the * address_space though. - * - * For now, we treat swapper_space specially. It doesn't use the normal - * block a_ops. */ int __set_page_dirty_buffers(struct page *page) { struct address_space * const mapping = page->mapping; - int ret = 0; - - if (mapping == NULL) { - SetPageDirty(page); - goto out; - } spin_lock(&mapping->private_lock); if (page_has_buffers(page)) { @@ -878,8 +869,7 @@ int __set_page_dirty_buffers(struct page __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); } -out: - return ret; + return 0; } EXPORT_SYMBOL(__set_page_dirty_buffers); @@ -1576,7 +1566,7 @@ static inline void discard_buffer(struct */ int try_to_release_page(struct page *page, int gfp_mask) { - struct address_space * const mapping = page->mapping; + struct address_space * const mapping = page_mapping(page); if (!PageLocked(page)) BUG(); @@ -2881,7 +2871,7 @@ failed: int try_to_free_buffers(struct page *page) { - struct address_space * const mapping = page->mapping; + struct address_space * const mapping = page_mapping(page); struct buffer_head *buffers_to_free = NULL; int ret = 0; @@ -2889,14 +2879,14 @@ int try_to_free_buffers(struct page *pag if (PageWriteback(page)) return 0; - if (mapping == NULL) { /* swapped-in anon page */ + if (mapping == NULL) { /* can this still happen? */ ret = drop_buffers(page, &buffers_to_free); goto out; } spin_lock(&mapping->private_lock); ret = drop_buffers(page, &buffers_to_free); - if (ret && !PageSwapCache(page)) { + if (ret) { /* * If the filesystem writes its buffers by hand (eg ext3) * then we can have clean buffers against a dirty page. We diff -purN -X /home/mbligh/.diff.exclude reference/fs/exec.c current/fs/exec.c --- reference/fs/exec.c 2004-03-11 14:35:06.000000000 -0800 +++ current/fs/exec.c 2004-04-08 15:10:25.000000000 -0700 @@ -45,7 +45,7 @@ #include #include #include -#include +#include #include #include @@ -293,53 +293,46 @@ EXPORT_SYMBOL(copy_strings_kernel); * This routine is used to map in a page into an address space: needed by * execve() for the initial stack and environment pages. * - * tsk->mmap_sem is held for writing. + * tsk->mm->mmap_sem is held for writing. */ void put_dirty_page(struct task_struct *tsk, struct page *page, unsigned long address, pgprot_t prot) { + struct mm_struct *mm = tsk->mm; pgd_t * pgd; pmd_t * pmd; pte_t * pte; - struct pte_chain *pte_chain; if (page_count(page) != 1) printk(KERN_ERR "mem_map disagrees with %p at %08lx\n", page, address); - pgd = pgd_offset(tsk->mm, address); - pte_chain = pte_chain_alloc(GFP_KERNEL); - if (!pte_chain) - goto out_sig; - spin_lock(&tsk->mm->page_table_lock); - pmd = pmd_alloc(tsk->mm, pgd, address); + pgd = pgd_offset(mm, address); + spin_lock(&mm->page_table_lock); + pmd = pmd_alloc(mm, pgd, address); if (!pmd) goto out; - pte = pte_alloc_map(tsk->mm, pmd, address); + pte = pte_alloc_map(mm, pmd, address); if (!pte) goto out; if (!pte_none(*pte)) { pte_unmap(pte); goto out; } + mm->rss++; lru_cache_add_active(page); flush_dcache_page(page); set_pte(pte, pte_mkdirty(pte_mkwrite(mk_pte(page, prot)))); - pte_chain = page_add_rmap(page, pte, pte_chain); + page_add_anon_rmap(page, mm, address); pte_unmap(pte); - tsk->mm->rss++; - spin_unlock(&tsk->mm->page_table_lock); + spin_unlock(&mm->page_table_lock); /* no need for flush_tlb */ - pte_chain_free(pte_chain); return; out: - spin_unlock(&tsk->mm->page_table_lock); -out_sig: + spin_unlock(&mm->page_table_lock); __free_page(page); force_sig(SIGKILL, tsk); - pte_chain_free(pte_chain); - return; } int setup_arg_pages(struct linux_binprm *bprm) diff -purN -X /home/mbligh/.diff.exclude reference/fs/fcntl.c current/fs/fcntl.c --- reference/fs/fcntl.c 2004-04-07 14:54:28.000000000 -0700 +++ current/fs/fcntl.c 2004-04-09 11:53:04.000000000 -0700 @@ -537,9 +537,19 @@ int send_sigurg(struct fown_struct *fown return ret; } -static rwlock_t fasync_lock = RW_LOCK_UNLOCKED; +static spinlock_t fasync_lock = SPIN_LOCK_UNLOCKED; static kmem_cache_t *fasync_cache; +struct fasync_rcu_struct { + struct fasync_struct data; + struct rcu_head rcu; +}; + +static void fasync_free(void *data) +{ + kmem_cache_free(fasync_cache, data); +} + /* * fasync_helper() is used by some character device drivers (mainly mice) * to set up the fasync queue. It returns negative on error, 0 if it did @@ -548,7 +558,7 @@ static kmem_cache_t *fasync_cache; int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp) { struct fasync_struct *fa, **fp; - struct fasync_struct *new = NULL; + struct fasync_rcu_struct *new = NULL; int result = 0; if (on) { @@ -556,15 +566,23 @@ int fasync_helper(int fd, struct file * if (!new) return -ENOMEM; } - write_lock_irq(&fasync_lock); + spin_lock(&fasync_lock); for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) { if (fa->fa_file == filp) { if(on) { + /* RCU violation: + * We are modifying a struct that's visible by + * readers. If there is a fasync notification + * right now, then it could go to either the + * old or the new fd. Shouldn't matter. + * Manfred + */ fa->fa_fd = fd; kmem_cache_free(fasync_cache, new); } else { *fp = fa->fa_next; - kmem_cache_free(fasync_cache, fa); + new = container_of(fa, struct fasync_rcu_struct, data); + call_rcu(&new->rcu, fasync_free, new); result = 1; } goto out; @@ -572,15 +590,16 @@ int fasync_helper(int fd, struct file * } if (on) { - new->magic = FASYNC_MAGIC; - new->fa_file = filp; - new->fa_fd = fd; - new->fa_next = *fapp; - *fapp = new; + new->data.magic = FASYNC_MAGIC; + new->data.fa_file = filp; + new->data.fa_fd = fd; + new->data.fa_next = *fapp; + smp_wmb(); + *fapp = &new->data; result = 1; } out: - write_unlock_irq(&fasync_lock); + spin_unlock(&fasync_lock); return result; } @@ -590,7 +609,8 @@ void __kill_fasync(struct fasync_struct { while (fa) { struct fown_struct * fown; - if (fa->magic != FASYNC_MAGIC) { + read_barrier_depends(); + if (unlikely(fa->magic != FASYNC_MAGIC)) { printk(KERN_ERR "kill_fasync: bad magic number in " "fasync_struct!\n"); return; @@ -613,10 +633,10 @@ void kill_fasync(struct fasync_struct ** * the list is empty. */ if (*fp) { - read_lock(&fasync_lock); + rcu_read_lock(); /* reread *fp after obtaining the lock */ __kill_fasync(*fp, sig, band); - read_unlock(&fasync_lock); + rcu_read_unlock(); } } @@ -625,7 +645,7 @@ EXPORT_SYMBOL(kill_fasync); static int __init fasync_init(void) { fasync_cache = kmem_cache_create("fasync_cache", - sizeof(struct fasync_struct), 0, 0, NULL, NULL); + sizeof(struct fasync_rcu_struct), 0, 0, NULL, NULL); if (!fasync_cache) panic("cannot create fasync slab cache"); return 0; diff -purN -X /home/mbligh/.diff.exclude reference/fs/hugetlbfs/inode.c current/fs/hugetlbfs/inode.c --- reference/fs/hugetlbfs/inode.c 2004-04-07 14:54:29.000000000 -0700 +++ current/fs/hugetlbfs/inode.c 2004-04-09 21:41:39.000000000 -0700 @@ -26,12 +26,17 @@ #include #include #include +#include #include +#include /* some random number */ #define HUGETLBFS_MAGIC 0x958458f6 +extern int mmap_use_hugepages; +extern int mmap_hugepages_map_sz; + static struct super_operations hugetlbfs_ops; static struct address_space_operations hugetlbfs_aops; struct file_operations hugetlbfs_file_operations; @@ -82,7 +87,7 @@ static int hugetlbfs_file_mmap(struct fi unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); #else -static unsigned long +unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { @@ -165,7 +170,7 @@ void truncate_hugepages(struct address_s pagevec_init(&pvec, 0); next = start; while (1) { - if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { + if (!pagevec_lookup(&pvec, mapping, &next, PAGEVEC_SIZE)) { if (next == start) break; next = start; @@ -176,9 +181,6 @@ void truncate_hugepages(struct address_s struct page *page = pvec.pages[i]; lock_page(page); - if (page->index > next) - next = page->index; - ++next; truncate_huge_page(page); unlock_page(page); hugetlb_put_quota(mapping); diff -purN -X /home/mbligh/.diff.exclude reference/fs/jbd/transaction.c current/fs/jbd/transaction.c --- reference/fs/jbd/transaction.c 2004-02-04 16:24:23.000000000 -0800 +++ current/fs/jbd/transaction.c 2004-04-09 21:41:42.000000000 -0700 @@ -1112,26 +1112,21 @@ int journal_dirty_metadata(handle_t *han * I _think_ we're OK here with SMP barriers - a mistaken decision will * result in this test being false, so we go in and take the locks. */ - if (jh->b_transaction == handle->h_transaction && - jh->b_jlist == BJ_Metadata) { + if (jh->b_transaction == transaction && jh->b_jlist == BJ_Metadata) { JBUFFER_TRACE(jh, "fastpath"); J_ASSERT_JH(jh, jh->b_transaction == journal->j_running_transaction); goto out_unlock_bh; } - spin_lock(&journal->j_list_lock); set_buffer_jbddirty(bh); - J_ASSERT_JH(jh, jh->b_transaction != NULL); - /* * Metadata already on the current transaction list doesn't * need to be filed. Metadata on another transaction's list must * be committing, and will be refiled once the commit completes: * leave it alone for now. */ - if (jh->b_transaction != transaction) { JBUFFER_TRACE(jh, "already on other transaction"); J_ASSERT_JH(jh, jh->b_transaction == @@ -1139,17 +1134,15 @@ int journal_dirty_metadata(handle_t *han J_ASSERT_JH(jh, jh->b_next_transaction == transaction); /* And this case is illegal: we can't reuse another * transaction's data buffer, ever. */ - /* FIXME: writepage() should be journalled */ - goto out_unlock_list; + goto out_unlock_bh; } /* That test should have eliminated the following case: */ J_ASSERT_JH(jh, jh->b_frozen_data == 0); JBUFFER_TRACE(jh, "file as BJ_Metadata"); + spin_lock(&journal->j_list_lock); __journal_file_buffer(jh, handle->h_transaction, BJ_Metadata); - -out_unlock_list: spin_unlock(&journal->j_list_lock); out_unlock_bh: jbd_unlock_bh_state(bh); diff -purN -X /home/mbligh/.diff.exclude reference/fs/pipe.c current/fs/pipe.c --- reference/fs/pipe.c 2004-04-07 14:54:31.000000000 -0700 +++ current/fs/pipe.c 2004-04-09 13:23:20.000000000 -0700 @@ -33,15 +33,21 @@ */ /* Drop the inode semaphore and wait for a pipe event, atomically */ -void pipe_wait(struct inode * inode) +int pipe_wait(struct inode * inode) { - DEFINE_WAIT(wait); + DEFINE_WAIT(local_wait); + wait_queue_t *wait = &local_wait; - prepare_to_wait(PIPE_WAIT(*inode), &wait, TASK_INTERRUPTIBLE); + if (current->io_wait) + wait = current->io_wait; + prepare_to_wait(PIPE_WAIT(*inode), wait, TASK_INTERRUPTIBLE); + if (!is_sync_wait(wait)) + return -EIOCBRETRY; up(PIPE_SEM(*inode)); schedule(); - finish_wait(PIPE_WAIT(*inode), &wait); + finish_wait(PIPE_WAIT(*inode), wait); down(PIPE_SEM(*inode)); + return 0; } static inline int @@ -81,11 +87,11 @@ pipe_iov_copy_to_user(struct iovec *iov, iov->iov_base += copy; iov->iov_len -= copy; } - return 0; + return 0; } static ssize_t -pipe_readv(struct file *filp, const struct iovec *_iov, +pipe_aio_readv(struct file *filp, const struct iovec *_iov, unsigned long nr_segs, loff_t *ppos) { struct inode *inode = filp->f_dentry->d_inode; @@ -93,6 +99,7 @@ pipe_readv(struct file *filp, const stru ssize_t ret; struct iovec *iov = (struct iovec *)_iov; size_t total_len; + ssize_t retry; /* pread is not allowed on pipes. */ if (unlikely(ppos != &filp->f_pos)) @@ -156,7 +163,12 @@ pipe_readv(struct file *filp, const stru wake_up_interruptible_sync(PIPE_WAIT(*inode)); kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT); } - pipe_wait(inode); + retry = pipe_wait(inode); + if (retry == -EIOCBRETRY) { + if (!ret) + ret = retry; + break; + } } up(PIPE_SEM(*inode)); /* Signal writers asynchronously that there is more room. */ @@ -173,11 +185,15 @@ static ssize_t pipe_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { struct iovec iov = { .iov_base = buf, .iov_len = count }; - return pipe_readv(filp, &iov, 1, ppos); + ssize_t ret; + ret = pipe_aio_readv(filp, &iov, 1, ppos); + if (ret == -EIOCBRETRY) + BUG(); + return ret; } static ssize_t -pipe_writev(struct file *filp, const struct iovec *_iov, +pipe_aio_writev(struct file *filp, const struct iovec *_iov, unsigned long nr_segs, loff_t *ppos) { struct inode *inode = filp->f_dentry->d_inode; @@ -186,6 +202,7 @@ pipe_writev(struct file *filp, const str int do_wakeup; struct iovec *iov = (struct iovec *)_iov; size_t total_len; + int retry; /* pwrite is not allowed on pipes. */ if (unlikely(ppos != &filp->f_pos)) @@ -254,7 +271,12 @@ pipe_writev(struct file *filp, const str do_wakeup = 0; } PIPE_WAITING_WRITERS(*inode)++; - pipe_wait(inode); + retry = pipe_wait(inode); + if (retry == -EIOCBRETRY) { + if (!ret) + ret = retry; + break; + } PIPE_WAITING_WRITERS(*inode)--; } up(PIPE_SEM(*inode)); @@ -272,7 +294,41 @@ pipe_write(struct file *filp, const char size_t count, loff_t *ppos) { struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count }; - return pipe_writev(filp, &iov, 1, ppos); + return pipe_aio_writev(filp, &iov, 1, ppos); +} + +static int +pipe_aio_cancel(struct kiocb *iocb, struct io_event *evt) +{ + struct inode *inode = iocb->ki_filp->f_dentry->d_inode; + evt->obj = (u64)(unsigned long)iocb->ki_user_obj; + evt->data = iocb->ki_user_data; + evt->res = iocb->ki_nbytes - iocb->ki_left; + if (evt->res == 0) + evt->res = -EINTR; + evt->res2 = 0; + wake_up_interruptible(PIPE_WAIT(*inode)); + aio_put_req(iocb); + return 0; +} + +static ssize_t +pipe_aio_write(struct kiocb *iocb, const char __user *buf, + size_t count, loff_t pos) +{ + struct file *file = iocb->ki_filp; + struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count }; + iocb->ki_cancel = pipe_aio_cancel; + return pipe_aio_writev(file, &iov, 1, &file->f_pos); +} + +static ssize_t +pipe_aio_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos) +{ + struct file *file = iocb->ki_filp; + struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count }; + iocb->ki_cancel = pipe_aio_cancel; + return pipe_aio_readv(file, &iov, 1, &file->f_pos); } static ssize_t @@ -467,7 +523,8 @@ pipe_rdwr_open(struct inode *inode, stru struct file_operations read_fifo_fops = { .llseek = no_llseek, .read = pipe_read, - .readv = pipe_readv, + .readv = pipe_aio_readv, + .aio_read = pipe_aio_read, .write = bad_pipe_w, .poll = fifo_poll, .ioctl = pipe_ioctl, @@ -480,7 +537,8 @@ struct file_operations write_fifo_fops = .llseek = no_llseek, .read = bad_pipe_r, .write = pipe_write, - .writev = pipe_writev, + .writev = pipe_aio_writev, + .aio_write = pipe_aio_write, .poll = fifo_poll, .ioctl = pipe_ioctl, .open = pipe_write_open, @@ -491,9 +549,11 @@ struct file_operations write_fifo_fops = struct file_operations rdwr_fifo_fops = { .llseek = no_llseek, .read = pipe_read, - .readv = pipe_readv, + .readv = pipe_aio_readv, .write = pipe_write, - .writev = pipe_writev, + .writev = pipe_aio_writev, + .aio_write = pipe_aio_write, + .aio_read = pipe_aio_read, .poll = fifo_poll, .ioctl = pipe_ioctl, .open = pipe_rdwr_open, @@ -504,7 +564,8 @@ struct file_operations rdwr_fifo_fops = struct file_operations read_pipe_fops = { .llseek = no_llseek, .read = pipe_read, - .readv = pipe_readv, + .aio_read = pipe_aio_read, + .readv = pipe_aio_readv, .write = bad_pipe_w, .poll = pipe_poll, .ioctl = pipe_ioctl, @@ -517,7 +578,8 @@ struct file_operations write_pipe_fops = .llseek = no_llseek, .read = bad_pipe_r, .write = pipe_write, - .writev = pipe_writev, + .writev = pipe_aio_writev, + .aio_write = pipe_aio_write, .poll = pipe_poll, .ioctl = pipe_ioctl, .open = pipe_write_open, @@ -528,9 +590,11 @@ struct file_operations write_pipe_fops = struct file_operations rdwr_pipe_fops = { .llseek = no_llseek, .read = pipe_read, - .readv = pipe_readv, + .readv = pipe_aio_readv, + .aio_read = pipe_aio_read, + .aio_write = pipe_aio_write, .write = pipe_write, - .writev = pipe_writev, + .writev = pipe_aio_writev, .poll = pipe_poll, .ioctl = pipe_ioctl, .open = pipe_rdwr_open, diff -purN -X /home/mbligh/.diff.exclude reference/fs/proc/array.c current/fs/proc/array.c --- reference/fs/proc/array.c 2004-03-11 14:35:11.000000000 -0800 +++ current/fs/proc/array.c 2004-04-09 21:41:41.000000000 -0700 @@ -345,9 +345,15 @@ int proc_pid_stat(struct task_struct *ta read_lock(&tasklist_lock); ppid = task->pid ? task->real_parent->pid : 0; read_unlock(&tasklist_lock); +#ifdef CONFIG_SCHEDSTATS + res = sprintf(buffer,"%d (%s) %c %d %d %d %d %d %lu %lu \ +%lu %lu %lu %lu %lu %ld %ld %ld %ld %d %ld %llu %lu %ld %lu %lu %lu %lu %lu \ +%lu %lu %lu %lu %lu %lu %lu %lu %d %d %lu %lu %lu %lu %lu\n", +#else res = sprintf(buffer,"%d (%s) %c %d %d %d %d %d %lu %lu \ %lu %lu %lu %lu %lu %ld %ld %ld %ld %d %ld %llu %lu %ld %lu %lu %lu %lu %lu \ %lu %lu %lu %lu %lu %lu %lu %lu %d %d %lu %lu\n", +#endif /* CONFIG_SCHEDSTATS */ task->pid, task->comm, state, @@ -393,7 +399,14 @@ int proc_pid_stat(struct task_struct *ta task->exit_signal, task_cpu(task), task->rt_priority, +#ifdef CONFIG_SCHEDSTATS + task->policy, + task->sched_info.cpu_time, + task->sched_info.run_delay, + task->sched_info.pcnt); +#else task->policy); +#endif /* CONFIG_SCHEDSTATS */ if(mm) mmput(mm); return res; diff -purN -X /home/mbligh/.diff.exclude reference/fs/proc/base.c current/fs/proc/base.c --- reference/fs/proc/base.c 2004-03-11 14:35:11.000000000 -0800 +++ current/fs/proc/base.c 2004-04-09 13:27:12.000000000 -0700 @@ -58,6 +58,7 @@ enum pid_directory_inos { PROC_TGID_STAT, PROC_TGID_STATM, PROC_TGID_MAPS, + PROC_TGID_MAPS_STATS, PROC_TGID_MOUNTS, PROC_TGID_WCHAN, #ifdef CONFIG_SECURITY @@ -81,6 +82,7 @@ enum pid_directory_inos { PROC_TID_STAT, PROC_TID_STATM, PROC_TID_MAPS, + PROC_TID_MAPS_STATS, PROC_TID_MOUNTS, PROC_TID_WCHAN, #ifdef CONFIG_SECURITY @@ -90,6 +92,7 @@ enum pid_directory_inos { PROC_TID_ATTR_EXEC, PROC_TID_ATTR_FSCREATE, #endif + PROC_PID_MAPBASE, PROC_TID_FD_DIR = 0x8000, /* 0x8000-0xffff */ }; @@ -112,6 +115,7 @@ static struct pid_entry tgid_base_stuff[ E(PROC_TGID_STAT, "stat", S_IFREG|S_IRUGO), E(PROC_TGID_STATM, "statm", S_IFREG|S_IRUGO), E(PROC_TGID_MAPS, "maps", S_IFREG|S_IRUGO), + E(PROC_TGID_MAPS_STATS,"maps_stats", S_IFREG|S_IRUGO), E(PROC_TGID_MEM, "mem", S_IFREG|S_IRUSR|S_IWUSR), E(PROC_TGID_CWD, "cwd", S_IFLNK|S_IRWXUGO), E(PROC_TGID_ROOT, "root", S_IFLNK|S_IRWXUGO), @@ -123,6 +127,9 @@ static struct pid_entry tgid_base_stuff[ #ifdef CONFIG_KALLSYMS E(PROC_TGID_WCHAN, "wchan", S_IFREG|S_IRUGO), #endif +#ifdef __HAS_ARCH_PROC_MAPPED_BASE + E(PROC_PID_MAPBASE, "mapped_base",S_IFREG|S_IRUSR|S_IWUSR), +#endif {0,0,NULL,0} }; static struct pid_entry tid_base_stuff[] = { @@ -134,6 +141,7 @@ static struct pid_entry tid_base_stuff[] E(PROC_TID_STAT, "stat", S_IFREG|S_IRUGO), E(PROC_TID_STATM, "statm", S_IFREG|S_IRUGO), E(PROC_TID_MAPS, "maps", S_IFREG|S_IRUGO), + E(PROC_TID_MAPS_STATS, "maps_stats", S_IFREG|S_IRUGO), E(PROC_TID_MEM, "mem", S_IFREG|S_IRUSR|S_IWUSR), E(PROC_TID_CWD, "cwd", S_IFLNK|S_IRWXUGO), E(PROC_TID_ROOT, "root", S_IFLNK|S_IRWXUGO), @@ -473,6 +481,25 @@ static struct file_operations proc_maps_ .release = seq_release, }; +extern struct seq_operations proc_pid_maps_stats_op; +static int maps_stats_open(struct inode *inode, struct file *file) +{ + struct task_struct *task = proc_task(inode); + int ret = seq_open(file, &proc_pid_maps_stats_op); + if (!ret) { + struct seq_file *m = file->private_data; + m->private = task; + } + return ret; +} + +static struct file_operations proc_maps_stats_operations = { + .open = maps_stats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + extern struct seq_operations mounts_op; static int mounts_open(struct inode *inode, struct file *file) { @@ -689,6 +716,58 @@ static struct file_operations proc_mem_o .open = mem_open, }; +#ifdef __HAS_ARCH_PROC_MAPPED_BASE +static ssize_t mapbase_read(struct file * file, char * buf, + size_t count, loff_t *ppos) +{ + struct task_struct *task = PROC_I(file->f_dentry->d_inode)->task; + char buffer[64]; + size_t len; + + sprintf(buffer, "%li", task->map_base); + len=strlen(buffer)+1; + *ppos += len; + if (copy_to_user(buf, buffer, len)) + len = -EFAULT; + + return (len<*ppos)?0:len; +} + +static ssize_t mapbase_write(struct file * file, const char * buf, + size_t count, loff_t *ppos) +{ + struct task_struct *task = PROC_I(file->f_dentry->d_inode)->task; + char buffer[64]; + int len; + unsigned long newbase; + if (!capable(CAP_SYS_ADMIN)) return -EPERM; + memset(buffer, 0, 64); + len = count; + if (len>63) + len = 63; + if (copy_from_user(buffer, buf, len)) + return -EFAULT; + + for (len = 0; len < 64; len++) + if (!buffer[len]) + break; + if (len>60) + return -EFAULT; + + newbase = simple_strtoul(buffer, NULL, 0); + + if (newbase > 0) + task->map_base = newbase; + + return len; +} + +static struct file_operations proc_mapbase_operations = { + read: mapbase_read, + write: mapbase_write, +}; +#endif /* __HAS_ARCH_PROC_MAPPED_BASE */ + static struct inode_operations proc_mem_inode_operations = { .permission = proc_permission, }; @@ -1353,6 +1432,10 @@ static struct dentry *proc_pident_lookup case PROC_TGID_MAPS: inode->i_fop = &proc_maps_operations; break; + case PROC_TID_MAPS_STATS: + case PROC_TGID_MAPS_STATS: + inode->i_fop = &proc_maps_stats_operations; + break; case PROC_TID_MEM: case PROC_TGID_MEM: inode->i_op = &proc_mem_inode_operations; @@ -1391,6 +1474,11 @@ static struct dentry *proc_pident_lookup ei->op.proc_read = proc_pid_wchan; break; #endif +#ifdef __HAS_ARCH_PROC_MAPPED_BASE + case PROC_PID_MAPBASE: + inode->i_fop = &proc_mapbase_operations; + break; +#endif default: printk("procfs: impossible type (%d)",p->type); iput(inode); diff -purN -X /home/mbligh/.diff.exclude reference/fs/proc/proc_misc.c current/fs/proc/proc_misc.c --- reference/fs/proc/proc_misc.c 2004-04-07 14:54:31.000000000 -0700 +++ current/fs/proc/proc_misc.c 2004-04-09 21:41:41.000000000 -0700 @@ -51,6 +51,10 @@ #include #include +#ifdef CONFIG_MCOUNT +#include +#endif + #define LOAD_INT(x) ((x) >> FSHIFT) #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) /* @@ -134,6 +138,41 @@ static struct vmalloc_info get_vmalloc_i return vmi; } +static int real_loadavg_read_proc(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + int a, b, c, cpu; + int len; + + a = tasks_running[0] + (FIXED_1/200); + b = tasks_running[1] + (FIXED_1/200); + c = tasks_running[2] + (FIXED_1/200); + len = sprintf(page,"Domain load1 load2 load3 nr_run/nr_thrd\n"); + len += sprintf(page+len,"SYSTEM %5d.%02d %5d.%02d %5d.%02d %7ld/%7d\n", + LOAD_INT(a), LOAD_FRAC(a), + LOAD_INT(b), LOAD_FRAC(b), + LOAD_INT(c), LOAD_FRAC(c), + nr_running(), nr_threads); + for (cpu = 0; cpu < NR_CPUS; ++cpu) { + unsigned long nr_running; + if (!cpu_online(cpu)) + continue; + preempt_disable(); + a = per_cpu(cpu_tasks_running,cpu)[0] + (FIXED_1/200); + b = per_cpu(cpu_tasks_running,cpu)[1] + (FIXED_1/200); + c = per_cpu(cpu_tasks_running,cpu)[2] + (FIXED_1/200); + nr_running = nr_running_cpu(cpu); + preempt_enable(); + len += sprintf(page+len, "%5d %5d.%02d %5d.%02d %5d.%02d %7ld/%7d\n", + cpu, + LOAD_INT(a), LOAD_FRAC(a), + LOAD_INT(b), LOAD_FRAC(b), + LOAD_INT(c), LOAD_FRAC(c), + nr_running, nr_threads); + } + return proc_calc_metrics(page, start, off, count, eof, len); +} + static int uptime_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { @@ -286,6 +325,10 @@ static struct file_operations proc_vmsta .release = seq_release, }; +#ifdef CONFIG_SCHEDSTATS +extern struct file_operations proc_schedstat_operations; +#endif + #ifdef CONFIG_PROC_HARDWARE static int hardware_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) @@ -342,6 +385,71 @@ static struct file_operations proc_modul }; #endif +#ifdef CONFIG_NUMA +#define K(x) ((x) << (PAGE_SHIFT - 10)) +static int show_meminfo_numa (struct seq_file *m, void *v) +{ + int *d = v; + int nid = *d; + struct sysinfo i; + si_meminfo_node(&i, nid); + seq_printf(m, "\n" + "Node %d MemTotal: %8lu kB\n" + "Node %d MemFree: %8lu kB\n" + "Node %d MemUsed: %8lu kB\n" + "Node %d HighTotal: %8lu kB\n" + "Node %d HighFree: %8lu kB\n" + "Node %d LowTotal: %8lu kB\n" + "Node %d LowFree: %8lu kB\n", + nid, K(i.totalram), + nid, K(i.freeram), + nid, K(i.totalram-i.freeram), + nid, K(i.totalhigh), + nid, K(i.freehigh), + nid, K(i.totalram-i.totalhigh), + nid, K(i.freeram-i.freehigh)); + + return 0; +} +#undef K + +extern struct seq_operations meminfo_numa_op; +static int meminfo_numa_open(struct inode *inode, struct file *file) +{ + return seq_open(file,&meminfo_numa_op); +} + +static struct file_operations proc_meminfo_numa_operations = { + open: meminfo_numa_open, + read: seq_read, + llseek: seq_lseek, + release: seq_release, +}; + +static void *meminfo_numa_start(struct seq_file *m, loff_t *pos) +{ + return *pos < numnodes ? pos : NULL; +} + +static void *meminfo_numa_next(struct seq_file *m, void *v, loff_t *pos) +{ + ++*pos; + return meminfo_numa_start(m, pos); +} + +static void meminfo_numa_stop(struct seq_file *m, void *v) +{ +} + +struct seq_operations meminfo_numa_op = { + .start = meminfo_numa_start, + .next = meminfo_numa_next, + .stop = meminfo_numa_stop, + .show = show_meminfo_numa, +}; + +#endif + extern struct seq_operations slabinfo_op; extern ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *); static int slabinfo_open(struct inode *inode, struct file *file) @@ -651,6 +759,36 @@ static void create_seq_entry(char *name, entry->proc_fops = f; } +#ifdef CONFIG_LOCKMETER +extern ssize_t get_lockmeter_info(char *, size_t, loff_t *); +extern ssize_t put_lockmeter_info(const char *, size_t); +extern int get_lockmeter_info_size(void); + +/* + * This function accesses lock metering information. + */ +static ssize_t read_lockmeter(struct file *file, char *buf, + size_t count, loff_t *ppos) +{ + return get_lockmeter_info(buf, count, ppos); +} + +/* + * Writing to /proc/lockmeter resets the counters + */ +static ssize_t write_lockmeter(struct file * file, const char * buf, + size_t count, loff_t *ppos) +{ + return put_lockmeter_info(buf, count); +} + +static struct file_operations proc_lockmeter_operations = { + NULL, /* lseek */ + read: read_lockmeter, + write: write_lockmeter, +}; +#endif /* CONFIG_LOCKMETER */ + void __init proc_misc_init(void) { struct proc_dir_entry *entry; @@ -659,6 +797,7 @@ void __init proc_misc_init(void) int (*read_proc)(char*,char**,off_t,int,int*,void*); } *p, simple_ones[] = { {"loadavg", loadavg_read_proc}, + {"real_loadavg",real_loadavg_read_proc}, {"uptime", uptime_read_proc}, {"meminfo", meminfo_read_proc}, {"version", version_read_proc}, @@ -698,6 +837,12 @@ void __init proc_misc_init(void) #ifdef CONFIG_MODULES create_seq_entry("modules", 0, &proc_modules_operations); #endif +#ifdef CONFIG_NUMA + create_seq_entry("meminfo.numa",0,&proc_meminfo_numa_operations); +#endif +#ifdef CONFIG_SCHEDSTATS + create_seq_entry("schedstat", 0, &proc_schedstat_operations); +#endif #ifdef CONFIG_PROC_KCORE proc_root_kcore = create_proc_entry("kcore", S_IRUSR, NULL); if (proc_root_kcore) { @@ -718,6 +863,13 @@ void __init proc_misc_init(void) if (entry) entry->proc_fops = &proc_sysrq_trigger_operations; #endif +#ifdef CONFIG_LOCKMETER + entry = create_proc_entry("lockmeter", S_IWUSR | S_IRUGO, NULL); + if (entry) { + entry->proc_fops = &proc_lockmeter_operations; + entry->size = get_lockmeter_info_size(); + } +#endif #ifdef CONFIG_PPC32 { extern struct file_operations ppc_htab_operations; @@ -726,4 +878,13 @@ void __init proc_misc_init(void) entry->proc_fops = &ppc_htab_operations; } #endif +#ifdef CONFIG_MCOUNT + { + extern struct file_operations mcount_operations; + extern struct proc_dir_entry *mcount_pde; + mcount_pde = create_proc_entry("mcount", S_IRUGO|S_IWUSR, NULL); + if (mcount_pde) + mcount_pde->proc_fops = &mcount_operations; + } +#endif } diff -purN -X /home/mbligh/.diff.exclude reference/fs/proc/task_mmu.c current/fs/proc/task_mmu.c --- reference/fs/proc/task_mmu.c 2004-02-04 16:24:24.000000000 -0800 +++ current/fs/proc/task_mmu.c 2004-04-09 13:27:12.000000000 -0700 @@ -112,6 +112,115 @@ static int show_map(struct seq_file *m, return 0; } +static int show_map_stats(struct seq_file *m, void *v) +{ + unsigned long pfn, vaddr, vma_end, end, pgds = 0, pmds = 0, ptes = 0; + unsigned long total_pages = 0, zero_pages = 0, not_present = 0; + unsigned long mapping = 0, multi_count = 0, single_count = 0, none = 0; + unsigned long node_pages[MAX_NUMNODES]; + struct vm_area_struct *vma = v; + struct mm_struct *mm = vma->vm_mm; + pgd_t *pgd_p, *pgd_start, *pgd_end; + pmd_t *pmd_p, *pmd_start, *pmd_end; + pte_t *pte_p, *pte_start, *pte_end; + pte_t pte; + struct page *page; + int node; + + show_map(m, v); + + for (node = 0; node < numnodes; ++node) + node_pages[node] = 0; + + if (!mm) { + seq_printf(m, "mm is NULL!\n"); + return 0; + } + if (is_vm_hugetlb_page(vma)) + return 0; + + spin_lock(&mm->page_table_lock); + + vaddr = vma->vm_start & PAGE_MASK; + /* "vma->vm_end - 1" is the last valid address. */ + vma_end = (vma->vm_end - 1) & PAGE_MASK; + + /* Loop over the pgd entries */ + pgd_start = pgd_offset(mm, vaddr); + pgd_end = pgd_offset(mm, vma_end); + for (pgd_p = pgd_start; pgd_p <= pgd_end; pgd_p++, + vaddr = (vaddr+PGDIR_SIZE) & PGDIR_MASK) { + ++pgds; + + if (pgd_none(*pgd_p) || pgd_bad(*pgd_p)) + continue; + + /* Loop over the pmd entries */ + pmd_start = pmd_offset(pgd_p, vaddr); + end = min(vma_end, ((vaddr+PGDIR_SIZE) & PGDIR_MASK) - 1); + pmd_end = pmd_offset(pgd_p, end); + for (pmd_p = pmd_start; pmd_p <= pmd_end; pmd_p++, + vaddr = (vaddr+PMD_SIZE) & PMD_MASK) { + ++pmds; + + if (pmd_none(*pmd_p) || pmd_bad (*pmd_p)) + continue; + + /* Loop over the pte entries */ + pte_start = pte_offset_map(pmd_p, vaddr); + end = min(vma_end, ((vaddr+PMD_SIZE) & PMD_MASK) - 1); + pte_end = pte_offset_kernel(pmd_p, end); + for (pte_p = pte_start; pte_p <= pte_end; pte_p++, + vaddr += PAGE_SIZE) { + ++ptes; + + pte = *pte_p; + ++total_pages; + if (pte_none(pte)) { + ++none; + continue; + } + if (pte_present(pte)) { + pfn = pte_pfn(pte); + if (!pfn) { + ++zero_pages; + continue; + } + if (!pfn_valid(pfn)) { + continue; + } + node_pages[pfn_to_nid(pfn)]++; + page = pfn_to_page(pfn); + /* if (PageDirect(page)) + * single_count++; + * else + * multi_count++; + */ + /* all mapping should be the same */ + mapping = (unsigned long) page->mapping; + } else { + ++not_present; + } + } + pte_unmap(pte_start); + } + } + spin_unlock(&mm->page_table_lock); + + seq_printf(m, " mapping: %08lx\n", mapping); + seq_printf(m, "%5lu zero:%lu none:%lu absent:%lu single:%lu multi:%lu\n", + total_pages, none, zero_pages, not_present, + single_count, multi_count); +#ifdef CONFIG_NUMA + seq_printf(m, " NUMA: "); + for (node = 0; node < numnodes; ++node) + seq_printf(m, "%d:%lu ", node, node_pages[node]); + seq_printf(m, "\n"); +#endif + + return 0; +} + static void *m_start(struct seq_file *m, loff_t *pos) { struct task_struct *task = m->private; @@ -165,3 +274,11 @@ struct seq_operations proc_pid_maps_op = .stop = m_stop, .show = show_map }; + +struct seq_operations proc_pid_maps_stats_op = { + .start = m_start, + .next = m_next, + .stop = m_stop, + .show = show_map_stats +}; + diff -purN -X /home/mbligh/.diff.exclude reference/fs/select.c current/fs/select.c --- reference/fs/select.c 2003-10-14 15:50:30.000000000 -0700 +++ current/fs/select.c 2004-04-09 13:23:20.000000000 -0700 @@ -21,6 +21,7 @@ #include /* for STICKY_TIMEOUTS */ #include #include +#include #include @@ -39,6 +40,12 @@ struct poll_table_page { struct poll_table_entry entries[0]; }; +struct aio_poll_table { + int init; + struct poll_wqueues wq; + struct poll_table_page table; +}; + #define POLL_TABLE_FULL(table) \ ((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table)) @@ -109,12 +116,34 @@ void __pollwait(struct file *filp, wait_ /* Add a new entry */ { struct poll_table_entry * entry = table->entry; + wait_queue_t *wait; + wait_queue_t *aio_wait = current->io_wait; + + if (aio_wait) { + /* for aio, there can only be one wait_address. + * we might be adding it again via a retry call + * if so, just return. + * if not, bad things are happening + */ + if (table->entry != table->entries) { + if (table->entries[0].wait_address != wait_address) + BUG(); + return; + } + } + table->entry = entry+1; get_file(filp); entry->filp = filp; entry->wait_address = wait_address; init_waitqueue_entry(&entry->wait, current); - add_wait_queue(wait_address,&entry->wait); + + /* if we're in aioland, use current->io_wait */ + if (aio_wait) + wait = aio_wait; + else + wait = &entry->wait; + add_wait_queue(wait_address,wait); } } @@ -533,3 +562,76 @@ out_fds: poll_freewait(&table); return err; } + +static void aio_poll_freewait(struct aio_poll_table *ap, struct kiocb *iocb) +{ + struct poll_table_page * p = ap->wq.table; + if (p) { + struct poll_table_entry * entry = p->entry; + if (entry > p->entries) { + /* + * there is only one entry for aio polls + */ + entry = p->entries; + if (iocb) + finish_wait(entry->wait_address,&iocb->ki_wait); + else + wake_up(entry->wait_address); + fput(entry->filp); + } + } + ap->init = 0; +} + +static int +aio_poll_cancel(struct kiocb *iocb, struct io_event *evt) +{ + struct aio_poll_table *aio_table; + aio_table = (struct aio_poll_table *)iocb->private; + + evt->obj = (u64)(unsigned long)iocb->ki_user_obj; + evt->data = iocb->ki_user_data; + evt->res = iocb->ki_nbytes - iocb->ki_left; + if (evt->res == 0) + evt->res = -EINTR; + evt->res2 = 0; + if (aio_table->init) + aio_poll_freewait(aio_table, NULL); + aio_put_req(iocb); + return 0; +} + +ssize_t generic_aio_poll(struct kiocb *iocb, unsigned events) +{ + struct aio_poll_table *aio_table; + unsigned mask; + struct file *file = iocb->ki_filp; + aio_table = (struct aio_poll_table *)iocb->private; + + /* fast path */ + mask = file->f_op->poll(file, NULL); + mask &= events | POLLERR | POLLHUP; + if (mask) + return mask; + + if ((sizeof(*aio_table) + sizeof(struct poll_table_entry)) > + sizeof(iocb->private)) + BUG(); + + if (!aio_table->init) { + aio_table->init = 1; + poll_initwait(&aio_table->wq); + aio_table->wq.table = &aio_table->table; + aio_table->table.next = NULL; + aio_table->table.entry = aio_table->table.entries; + } + iocb->ki_cancel = aio_poll_cancel; + + mask = file->f_op->poll(file, &aio_table->wq.pt); + mask &= events | POLLERR | POLLHUP; + if (mask) { + aio_poll_freewait(aio_table, iocb); + return mask; + } + return -EIOCBRETRY; +} diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-alpha/ioctls.h current/include/asm-alpha/ioctls.h --- reference/include/asm-alpha/ioctls.h 2003-04-09 11:48:05.000000000 -0700 +++ current/include/asm-alpha/ioctls.h 2004-04-09 13:27:12.000000000 -0700 @@ -91,6 +91,7 @@ #define TIOCGSID 0x5429 /* Return the session ID of FD */ #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ +#define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get real dev no below /dev/console */ #define TIOCSERCONFIG 0x5453 #define TIOCSERGWILD 0x5454 diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-alpha/irq.h current/include/asm-alpha/irq.h --- reference/include/asm-alpha/irq.h 2003-10-01 11:35:30.000000000 -0700 +++ current/include/asm-alpha/irq.h 2004-04-08 15:10:24.000000000 -0700 @@ -93,5 +93,8 @@ extern void enable_irq(unsigned int); struct pt_regs; extern void (*perf_irq)(unsigned long, struct pt_regs *); +struct irqaction; +int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); + #endif /* _ALPHA_IRQ_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-alpha/lockmeter.h current/include/asm-alpha/lockmeter.h --- reference/include/asm-alpha/lockmeter.h 1969-12-31 16:00:00.000000000 -0800 +++ current/include/asm-alpha/lockmeter.h 2004-04-08 15:10:21.000000000 -0700 @@ -0,0 +1,90 @@ +/* + * Written by John Hawkes (hawkes@sgi.com) + * Based on klstat.h by Jack Steiner (steiner@sgi.com) + * + * Modified by Peter Rival (frival@zk3.dec.com) + */ + +#ifndef _ALPHA_LOCKMETER_H +#define _ALPHA_LOCKMETER_H + +#include +#define CPU_CYCLE_FREQUENCY hwrpb->cycle_freq + +#define get_cycles64() get_cycles() + +#define THIS_CPU_NUMBER smp_processor_id() + +#include +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0) +#define local_irq_save(x) \ + __save_and_cli(x) +#define local_irq_restore(x) \ + __restore_flags(x) +#endif /* Linux version 2.2.x */ + +#define SPINLOCK_MAGIC_INIT /**/ + +/* + * Macros to cache and retrieve an index value inside of a lock + * these macros assume that there are less than 65536 simultaneous + * (read mode) holders of a rwlock. + * We also assume that the hash table has less than 32767 entries. + * the high order bit is used for write locking a rw_lock + * Note: although these defines and macros are the same as what is being used + * in include/asm-i386/lockmeter.h, they are present here to easily + * allow an alternate Alpha implementation. + */ +/* + * instrumented spinlock structure -- never used to allocate storage + * only used in macros below to overlay a spinlock_t + */ +typedef struct inst_spinlock_s { + /* remember, Alpha is little endian */ + unsigned short lock; + unsigned short index; +} inst_spinlock_t; +#define PUT_INDEX(lock_ptr,indexv) ((inst_spinlock_t *)(lock_ptr))->index = indexv +#define GET_INDEX(lock_ptr) ((inst_spinlock_t *)(lock_ptr))->index + +/* + * macros to cache and retrieve an index value in a read/write lock + * as well as the cpu where a reader busy period started + * we use the 2nd word (the debug word) for this, so require the + * debug word to be present + */ +/* + * instrumented rwlock structure -- never used to allocate storage + * only used in macros below to overlay a rwlock_t + */ +typedef struct inst_rwlock_s { + volatile int lock; + unsigned short index; + unsigned short cpu; +} inst_rwlock_t; +#define PUT_RWINDEX(rwlock_ptr,indexv) ((inst_rwlock_t *)(rwlock_ptr))->index = indexv +#define GET_RWINDEX(rwlock_ptr) ((inst_rwlock_t *)(rwlock_ptr))->index +#define PUT_RW_CPU(rwlock_ptr,cpuv) ((inst_rwlock_t *)(rwlock_ptr))->cpu = cpuv +#define GET_RW_CPU(rwlock_ptr) ((inst_rwlock_t *)(rwlock_ptr))->cpu + +/* + * return true if rwlock is write locked + * (note that other lock attempts can cause the lock value to be negative) + */ +#define RWLOCK_IS_WRITE_LOCKED(rwlock_ptr) (((inst_rwlock_t *)rwlock_ptr)->lock & 1) +#define IABS(x) ((x) > 0 ? (x) : -(x)) + +#define RWLOCK_READERS(rwlock_ptr) rwlock_readers(rwlock_ptr) +extern inline int rwlock_readers(rwlock_t *rwlock_ptr) +{ + int tmp = (int) ((inst_rwlock_t *)rwlock_ptr)->lock; + /* readers subtract 2, so we have to: */ + /* - andnot off a possible writer (bit 0) */ + /* - get the absolute value */ + /* - divide by 2 (right shift by one) */ + /* to find the number of readers */ + if (tmp == 0) return(0); + else return(IABS(tmp & ~1)>>1); +} + +#endif /* _ALPHA_LOCKMETER_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-alpha/pgtable.h current/include/asm-alpha/pgtable.h --- reference/include/asm-alpha/pgtable.h 2003-10-14 15:50:32.000000000 -0700 +++ current/include/asm-alpha/pgtable.h 2004-04-08 15:10:26.000000000 -0700 @@ -349,6 +349,4 @@ extern void paging_init(void); /* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT. */ #define HAVE_ARCH_UNMAPPED_AREA -typedef pte_t *pte_addr_t; - #endif /* _ALPHA_PGTABLE_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-alpha/rmap.h current/include/asm-alpha/rmap.h --- reference/include/asm-alpha/rmap.h 2002-12-09 18:46:10.000000000 -0800 +++ current/include/asm-alpha/rmap.h 1969-12-31 16:00:00.000000000 -0800 @@ -1,7 +0,0 @@ -#ifndef _ALPHA_RMAP_H -#define _ALPHA_RMAP_H - -/* nothing to see, move along */ -#include - -#endif diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-alpha/spinlock.h current/include/asm-alpha/spinlock.h --- reference/include/asm-alpha/spinlock.h 2003-06-05 14:55:52.000000000 -0700 +++ current/include/asm-alpha/spinlock.h 2004-04-08 15:10:21.000000000 -0700 @@ -6,6 +6,10 @@ #include #include +#ifdef CONFIG_LOCKMETER +#undef DEBUG_SPINLOCK +#undef DEBUG_RWLOCK +#endif /* * Simple spin lock operations. There are two variants, one clears IRQ's @@ -95,9 +99,18 @@ static inline int _raw_spin_trylock(spin typedef struct { volatile int write_lock:1, read_counter:31; +#ifdef CONFIG_LOCKMETER + /* required for LOCKMETER since all bits in lock are used */ + /* need this storage for CPU and lock INDEX ............. */ + unsigned magic; +#endif } /*__attribute__((aligned(32)))*/ rwlock_t; +#ifdef CONFIG_LOCKMETER +#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, 0 } +#else #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 } +#endif #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) #define rwlock_is_locked(x) (*(volatile int *)(x) != 0) @@ -169,4 +182,41 @@ static inline void _raw_read_unlock(rwlo : "m" (*lock) : "memory"); } +#ifdef CONFIG_LOCKMETER +static inline int _raw_write_trylock(rwlock_t *lock) +{ + long temp,result; + + __asm__ __volatile__( + " ldl_l %1,%0\n" + " mov $31,%2\n" + " bne %1,1f\n" + " or $31,1,%2\n" + " stl_c %2,%0\n" + "1: mb\n" + : "=m" (*(volatile int *)lock), "=&r" (temp), "=&r" (result) + : "m" (*(volatile int *)lock) + ); + + return (result); +} + +static inline int _raw_read_trylock(rwlock_t *lock) +{ + unsigned long temp,result; + + __asm__ __volatile__( + " ldl_l %1,%0\n" + " mov $31,%2\n" + " blbs %1,1f\n" + " subl %1,2,%2\n" + " stl_c %2,%0\n" + "1: mb\n" + : "=m" (*(volatile int *)lock), "=&r" (temp), "=&r" (result) + : "m" (*(volatile int *)lock) + ); + return (result); +} +#endif /* CONFIG_LOCKMETER */ + #endif /* _ALPHA_SPINLOCK_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-arm/cacheflush.h current/include/asm-arm/cacheflush.h --- reference/include/asm-arm/cacheflush.h 2004-03-11 14:35:14.000000000 -0800 +++ current/include/asm-arm/cacheflush.h 2004-04-08 15:10:25.000000000 -0700 @@ -283,7 +283,7 @@ flush_cache_page(struct vm_area_struct * * flush_dcache_page is used when the kernel has written to the page * cache page at virtual address page->virtual. * - * If this page isn't mapped (ie, page->mapping = NULL), or it has + * If this page isn't mapped (ie, page_mapping == NULL), or it has * userspace mappings (page->mapping->i_mmap or page->mapping->i_mmap_shared) * then we _must_ always clean + invalidate the dcache entries associated * with the kernel mapping. @@ -299,7 +299,7 @@ extern void __flush_dcache_page(struct p static inline void flush_dcache_page(struct page *page) { - if (page->mapping && !mapping_mapped(page->mapping)) + if (page_mapping(page) && !mapping_mapped(page->mapping)) set_bit(PG_dcache_dirty, &page->flags); else __flush_dcache_page(page); diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-arm/ioctls.h current/include/asm-arm/ioctls.h --- reference/include/asm-arm/ioctls.h 2003-04-09 11:48:05.000000000 -0700 +++ current/include/asm-arm/ioctls.h 2004-04-09 13:27:12.000000000 -0700 @@ -48,6 +48,7 @@ #define TIOCGSID 0x5429 /* Return the session ID of FD */ #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ +#define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get real dev no below /dev/console */ #define FIONCLEX 0x5450 /* these numbers need to be adjusted. */ #define FIOCLEX 0x5451 diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-arm/irq.h current/include/asm-arm/irq.h --- reference/include/asm-arm/irq.h 2003-06-05 14:39:14.000000000 -0700 +++ current/include/asm-arm/irq.h 2004-04-08 15:10:24.000000000 -0700 @@ -44,5 +44,9 @@ void disable_irq_wake(unsigned int irq); void enable_irq_wake(unsigned int irq); int setup_irq(unsigned int, struct irqaction *); +struct irqaction; +struct pt_regs; +int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); + #endif diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-arm/kmap_types.h current/include/asm-arm/kmap_types.h --- reference/include/asm-arm/kmap_types.h 2004-01-15 10:41:16.000000000 -0800 +++ current/include/asm-arm/kmap_types.h 2004-04-08 15:10:26.000000000 -0700 @@ -14,7 +14,6 @@ enum km_type { KM_BIO_DST_IRQ, KM_PTE0, KM_PTE1, - KM_PTE2, KM_IRQ0, KM_IRQ1, KM_SOFTIRQ0, diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-arm/pgtable.h current/include/asm-arm/pgtable.h --- reference/include/asm-arm/pgtable.h 2004-01-15 10:41:16.000000000 -0800 +++ current/include/asm-arm/pgtable.h 2004-04-08 15:10:26.000000000 -0700 @@ -353,8 +353,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD #define io_remap_page_range(vma,from,phys,size,prot) \ remap_page_range(vma,from,phys,size,prot) -typedef pte_t *pte_addr_t; - #define pgtable_cache_init() do { } while (0) #endif /* !__ASSEMBLY__ */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-arm/rmap.h current/include/asm-arm/rmap.h --- reference/include/asm-arm/rmap.h 2002-12-09 18:45:42.000000000 -0800 +++ current/include/asm-arm/rmap.h 1969-12-31 16:00:00.000000000 -0800 @@ -1,6 +0,0 @@ -#ifndef _ARM_RMAP_H -#define _ARM_RMAP_H - -#include - -#endif /* _ARM_RMAP_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-arm26/irq.h current/include/asm-arm26/irq.h --- reference/include/asm-arm26/irq.h 2003-06-19 14:41:50.000000000 -0700 +++ current/include/asm-arm26/irq.h 2004-04-08 15:10:24.000000000 -0700 @@ -45,6 +45,8 @@ extern void enable_irq(unsigned int); int set_irq_type(unsigned int irq, unsigned int type); int setup_irq(unsigned int, struct irqaction *); +struct pt_regs; +int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); #endif diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-arm26/pgtable.h current/include/asm-arm26/pgtable.h --- reference/include/asm-arm26/pgtable.h 2003-10-14 15:50:32.000000000 -0700 +++ current/include/asm-arm26/pgtable.h 2004-04-08 15:10:26.000000000 -0700 @@ -290,8 +290,6 @@ static inline pte_t mk_pte_phys(unsigned #define io_remap_page_range(vma,from,phys,size,prot) \ remap_page_range(vma,from,phys,size,prot) -typedef pte_t *pte_addr_t; - #endif /* !__ASSEMBLY__ */ #endif /* _ASMARM_PGTABLE_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-arm26/rmap.h current/include/asm-arm26/rmap.h --- reference/include/asm-arm26/rmap.h 2003-06-19 14:41:50.000000000 -0700 +++ current/include/asm-arm26/rmap.h 1969-12-31 16:00:00.000000000 -0800 @@ -1,66 +0,0 @@ -#ifndef _ARM_RMAP_H -#define _ARM_RMAP_H - -/* - * linux/include/asm-arm26/proc-armv/rmap.h - * - * Architecture dependant parts of the reverse mapping code, - * - * ARM is different since hardware page tables are smaller than - * the page size and Linux uses a "duplicate" one with extra info. - * For rmap this means that the first 2 kB of a page are the hardware - * page tables and the last 2 kB are the software page tables. - */ - -static inline void pgtable_add_rmap(struct page *page, struct mm_struct * mm, unsigned long address) -{ - page->mapping = (void *)mm; - page->index = address & ~((PTRS_PER_PTE * PAGE_SIZE) - 1); - inc_page_state(nr_page_table_pages); -} - -static inline void pgtable_remove_rmap(struct page *page) -{ - page->mapping = NULL; - page->index = 0; - dec_page_state(nr_page_table_pages); -} - -static inline struct mm_struct * ptep_to_mm(pte_t * ptep) -{ - struct page * page = virt_to_page(ptep); - return (struct mm_struct *)page->mapping; -} - -/* The page table takes half of the page */ -#define PTE_MASK ((PAGE_SIZE / 2) - 1) - -static inline unsigned long ptep_to_address(pte_t * ptep) -{ - struct page * page = virt_to_page(ptep); - unsigned long low_bits; - - low_bits = ((unsigned long)ptep & PTE_MASK) * PTRS_PER_PTE; - return page->index + low_bits; -} - -//FIXME!!! IS these correct? -static inline pte_addr_t ptep_to_paddr(pte_t *ptep) -{ - return (pte_addr_t)ptep; -} - -static inline pte_t *rmap_ptep_map(pte_addr_t pte_paddr) -{ - return (pte_t *)pte_paddr; -} - -static inline void rmap_ptep_unmap(pte_t *pte) -{ - return; -} - - -//#include - -#endif /* _ARM_RMAP_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-cris/irq.h current/include/asm-cris/irq.h --- reference/include/asm-cris/irq.h 2003-07-28 15:31:10.000000000 -0700 +++ current/include/asm-cris/irq.h 2004-04-08 15:10:24.000000000 -0700 @@ -14,6 +14,10 @@ extern void enable_irq(unsigned int); #define disable_irq_nosync disable_irq #define enable_irq_nosync enable_irq +struct irqaction; +struct pt_regs; +int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); + #endif /* _ASM_IRQ_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-cris/pgtable.h current/include/asm-cris/pgtable.h --- reference/include/asm-cris/pgtable.h 2003-07-28 15:31:11.000000000 -0700 +++ current/include/asm-cris/pgtable.h 2004-04-08 15:10:26.000000000 -0700 @@ -337,6 +337,4 @@ extern inline void update_mmu_cache(stru #define pte_to_pgoff(x) (pte_val(x) >> 6) #define pgoff_to_pte(x) __pte(((x) << 6) | _PAGE_FILE) -typedef pte_t *pte_addr_t; - #endif /* _CRIS_PGTABLE_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-cris/rmap.h current/include/asm-cris/rmap.h --- reference/include/asm-cris/rmap.h 2002-12-09 18:46:10.000000000 -0800 +++ current/include/asm-cris/rmap.h 1969-12-31 16:00:00.000000000 -0800 @@ -1,7 +0,0 @@ -#ifndef _CRIS_RMAP_H -#define _CRIS_RMAP_H - -/* nothing to see, move along :) */ -#include - -#endif diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-generic/rmap.h current/include/asm-generic/rmap.h --- reference/include/asm-generic/rmap.h 2003-06-05 14:56:02.000000000 -0700 +++ current/include/asm-generic/rmap.h 1969-12-31 16:00:00.000000000 -0800 @@ -1,90 +0,0 @@ -#ifndef _GENERIC_RMAP_H -#define _GENERIC_RMAP_H -/* - * linux/include/asm-generic/rmap.h - * - * Architecture dependent parts of the reverse mapping code, - * this version should work for most architectures with a - * 'normal' page table layout. - * - * We use the struct page of the page table page to find out - * the process and full address of a page table entry: - * - page->mapping points to the process' mm_struct - * - page->index has the high bits of the address - * - the lower bits of the address are calculated from the - * offset of the page table entry within the page table page - * - * For CONFIG_HIGHPTE, we need to represent the address of a pte in a - * scalar pte_addr_t. The pfn of the pte's page is shifted left by PAGE_SIZE - * bits and is then ORed with the byte offset of the pte within its page. - * - * For CONFIG_HIGHMEM4G, the pte_addr_t is 32 bits. 20 for the pfn, 12 for - * the offset. - * - * For CONFIG_HIGHMEM64G, the pte_addr_t is 64 bits. 52 for the pfn, 12 for - * the offset. - */ -#include - -static inline void pgtable_add_rmap(struct page * page, struct mm_struct * mm, unsigned long address) -{ -#ifdef BROKEN_PPC_PTE_ALLOC_ONE - /* OK, so PPC calls pte_alloc() before mem_map[] is setup ... ;( */ - extern int mem_init_done; - - if (!mem_init_done) - return; -#endif - page->mapping = (void *)mm; - page->index = address & ~((PTRS_PER_PTE * PAGE_SIZE) - 1); - inc_page_state(nr_page_table_pages); -} - -static inline void pgtable_remove_rmap(struct page * page) -{ - page->mapping = NULL; - page->index = 0; - dec_page_state(nr_page_table_pages); -} - -static inline struct mm_struct * ptep_to_mm(pte_t * ptep) -{ - struct page * page = kmap_atomic_to_page(ptep); - return (struct mm_struct *) page->mapping; -} - -static inline unsigned long ptep_to_address(pte_t * ptep) -{ - struct page * page = kmap_atomic_to_page(ptep); - unsigned long low_bits; - low_bits = ((unsigned long)ptep & ~PAGE_MASK) * PTRS_PER_PTE; - return page->index + low_bits; -} - -#ifdef CONFIG_HIGHPTE -static inline pte_addr_t ptep_to_paddr(pte_t *ptep) -{ - pte_addr_t paddr; - paddr = ((pte_addr_t)page_to_pfn(kmap_atomic_to_page(ptep))) << PAGE_SHIFT; - return paddr + (pte_addr_t)((unsigned long)ptep & ~PAGE_MASK); -} -#else -static inline pte_addr_t ptep_to_paddr(pte_t *ptep) -{ - return (pte_addr_t)ptep; -} -#endif - -#ifndef CONFIG_HIGHPTE -static inline pte_t *rmap_ptep_map(pte_addr_t pte_paddr) -{ - return (pte_t *)pte_paddr; -} - -static inline void rmap_ptep_unmap(pte_t *pte) -{ - return; -} -#endif - -#endif /* _GENERIC_RMAP_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-generic/tlb.h current/include/asm-generic/tlb.h --- reference/include/asm-generic/tlb.h 2004-03-11 14:35:14.000000000 -0800 +++ current/include/asm-generic/tlb.h 2004-04-08 15:10:22.000000000 -0700 @@ -146,4 +146,6 @@ static inline void tlb_remove_page(struc __pmd_free_tlb(tlb, pmdp); \ } while (0) +#define tlb_migrate_prepare(mm) do { } while(0) + #endif /* _ASM_GENERIC__TLB_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-h8300/irq.h current/include/asm-h8300/irq.h --- reference/include/asm-h8300/irq.h 2004-04-07 14:54:32.000000000 -0700 +++ current/include/asm-h8300/irq.h 2004-04-08 15:10:24.000000000 -0700 @@ -68,4 +68,8 @@ extern void disable_irq(unsigned int); #define enable_irq_nosync(x) enable_irq(x) #define disable_irq_nosync(x) disable_irq(x) +struct irqaction; +struct pt_regs; +int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); + #endif /* _H8300_IRQ_H_ */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-h8300/pgtable.h current/include/asm-h8300/pgtable.h --- reference/include/asm-h8300/pgtable.h 2003-10-01 11:35:30.000000000 -0700 +++ current/include/asm-h8300/pgtable.h 2004-04-08 15:10:26.000000000 -0700 @@ -7,8 +7,6 @@ #include #include -typedef pte_t *pte_addr_t; - #define pgd_present(pgd) (1) /* pages are always present on NO_MM */ #define pgd_none(pgd) (0) #define pgd_bad(pgd) (0) diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/a.out.h current/include/asm-i386/a.out.h --- reference/include/asm-i386/a.out.h 2002-12-09 18:45:54.000000000 -0800 +++ current/include/asm-i386/a.out.h 2004-04-09 11:53:04.000000000 -0700 @@ -19,7 +19,16 @@ struct exec #ifdef __KERNEL__ +/* + * Typical ELF load address is 0x8048000, which is 128MB + 288KB. + * Shoving the stack very close to it lets smaller programs fit in + * a single pagetable page's worth of virtualspace. + */ +#ifdef CONFIG_MMAP_TOPDOWN +#define STACK_TOP ((128 << 20) + (256 << 10)) +#else #define STACK_TOP TASK_SIZE +#endif #endif diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/apic.h current/include/asm-i386/apic.h --- reference/include/asm-i386/apic.h 2004-02-18 14:57:17.000000000 -0800 +++ current/include/asm-i386/apic.h 2004-04-09 13:23:20.000000000 -0700 @@ -99,6 +99,9 @@ extern unsigned int nmi_watchdog; #define NMI_LOCAL_APIC 2 #define NMI_INVALID 3 +extern void stop_apics(void); +#else +static inline void stop_apics(void) { } #endif /* CONFIG_X86_LOCAL_APIC */ #endif /* __ASM_APIC_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/apicdef.h current/include/asm-i386/apicdef.h --- reference/include/asm-i386/apicdef.h 2003-06-05 14:56:02.000000000 -0700 +++ current/include/asm-i386/apicdef.h 2004-04-09 13:23:20.000000000 -0700 @@ -86,6 +86,7 @@ #define APIC_LVT_REMOTE_IRR (1<<14) #define APIC_INPUT_POLARITY (1<<13) #define APIC_SEND_PENDING (1<<12) +#define APIC_MODE_MASK 0x700 #define GET_APIC_DELIVERY_MODE(x) (((x)>>8)&0x7) #define SET_APIC_DELIVERY_MODE(x,y) (((x)&~0x700)|((y)<<8)) #define APIC_MODE_FIXED 0x0 diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/atomic.h current/include/asm-i386/atomic.h --- reference/include/asm-i386/atomic.h 2004-02-18 14:57:17.000000000 -0800 +++ current/include/asm-i386/atomic.h 2004-04-09 11:53:02.000000000 -0700 @@ -55,6 +55,17 @@ static __inline__ void atomic_add(int i, :"ir" (i), "m" (v->counter)); } +/* Like the above but also returns the result */ +static __inline__ int atomic_add_return(int i, atomic_t *v) +{ + register int oldval; + __asm__ __volatile__( + LOCK "xaddl %2,%0" + :"=m" (v->counter), "=r" (oldval) + :"1" (i), "m" (v->counter) : "memory"); + return oldval + i; +} + /** * atomic_sub - subtract the atomic variable * @i: integer value to subtract diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/atomic_kmap.h current/include/asm-i386/atomic_kmap.h --- reference/include/asm-i386/atomic_kmap.h 1969-12-31 16:00:00.000000000 -0800 +++ current/include/asm-i386/atomic_kmap.h 2004-04-09 11:53:00.000000000 -0700 @@ -0,0 +1,95 @@ +/* + * atomic_kmap.h: temporary virtual kernel memory mappings + * + * Copyright (C) 2003 Ingo Molnar + */ + +#ifndef _ASM_ATOMIC_KMAP_H +#define _ASM_ATOMIC_KMAP_H + +#ifdef __KERNEL__ + +#include +#include + +#ifdef CONFIG_DEBUG_HIGHMEM +#define HIGHMEM_DEBUG 1 +#else +#define HIGHMEM_DEBUG 0 +#endif + +extern pte_t *kmap_pte; +#define kmap_prot PAGE_KERNEL + +#define PKMAP_BASE (0xff000000UL) +#define NR_SHARED_PMDS ((0xffffffff-PKMAP_BASE+1)/PMD_SIZE) + +static inline unsigned long __kmap_atomic_vaddr(enum km_type type) +{ + enum fixed_addresses idx; + + idx = type + KM_TYPE_NR*smp_processor_id(); + return __fix_to_virt(FIX_KMAP_BEGIN + idx); +} + +static inline void *__kmap_atomic_noflush(struct page *page, enum km_type type) +{ + enum fixed_addresses idx; + unsigned long vaddr; + + idx = type + KM_TYPE_NR*smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + /* + * NOTE: entries that rely on some secondary TLB-flush + * effect must not be global: + */ + set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL)); + + return (void*) vaddr; +} + +static inline void *__kmap_atomic(struct page *page, enum km_type type) +{ + enum fixed_addresses idx; + unsigned long vaddr; + + idx = type + KM_TYPE_NR*smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); +#if HIGHMEM_DEBUG + BUG_ON(!pte_none(*(kmap_pte-idx))); +#else + /* + * Performance optimization - do not flush if the new + * pte is the same as the old one: + */ + if (pte_val(*(kmap_pte-idx)) == pte_val(mk_pte(page, kmap_prot))) + return (void *) vaddr; +#endif + set_pte(kmap_pte-idx, mk_pte(page, kmap_prot)); + __flush_tlb_one(vaddr); + + return (void*) vaddr; +} + +static inline void __kunmap_atomic(void *kvaddr, enum km_type type) +{ +#if HIGHMEM_DEBUG + unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; + enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); + + BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx)); + /* + * force other mappings to Oops if they'll try to access + * this pte without first remap it + */ + pte_clear(kmap_pte-idx); + __flush_tlb_one(vaddr); +#endif +} + +#define __kunmap_atomic_type(type) \ + __kunmap_atomic((void *)__kmap_atomic_vaddr(type), (type)) + +#endif /* __KERNEL__ */ + +#endif /* _ASM_ATOMIC_KMAP_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/bugs.h current/include/asm-i386/bugs.h --- reference/include/asm-i386/bugs.h 2003-10-01 11:47:09.000000000 -0700 +++ current/include/asm-i386/bugs.h 2004-04-08 15:10:20.000000000 -0700 @@ -1,11 +1,11 @@ /* * include/asm-i386/bugs.h * - * Copyright (C) 1994 Linus Torvalds + * Copyright (C) 1994 Linus Torvalds * * Cyrix stuff, June 1998 by: * - Rafael R. Reilova (moved everything from head.S), - * + * * - Channing Corn (tests & fixes), * - Andrew D. Balsa (code cleanup). * @@ -25,7 +25,20 @@ #include #include #include - +#ifdef CONFIG_KGDB +/* + * Provied the command line "gdb" initial break + */ +int __init kgdb_initial_break(char * str) +{ + if (*str == '\0'){ + breakpoint(); + return 1; + } + return 0; +} +__setup("gdb",kgdb_initial_break); +#endif static int __init no_halt(char *s) { boot_cpu_data.hlt_works_ok = 0; @@ -140,7 +153,7 @@ static void __init check_popad(void) : "ecx", "edi" ); /* If this fails, it means that any user program may lock the CPU hard. Too bad. */ if (res != 12345678) printk( "Buggy.\n" ); - else printk( "OK.\n" ); + else printk( "OK.\n" ); #endif } diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/checksum.h current/include/asm-i386/checksum.h --- reference/include/asm-i386/checksum.h 2003-11-24 16:12:32.000000000 -0800 +++ current/include/asm-i386/checksum.h 2004-04-09 11:53:00.000000000 -0700 @@ -25,7 +25,7 @@ asmlinkage unsigned int csum_partial(con * better 64-bit) boundary */ -asmlinkage unsigned int csum_partial_copy_generic( const char *src, char *dst, int len, int sum, +asmlinkage unsigned int direct_csum_partial_copy_generic( const char *src, char *dst, int len, int sum, int *src_err_ptr, int *dst_err_ptr); /* @@ -39,14 +39,19 @@ static __inline__ unsigned int csum_partial_copy_nocheck ( const char *src, char *dst, int len, int sum) { - return csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL); + /* + * The direct function is OK for kernel-space => kernel-space copies: + */ + return direct_csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL); } static __inline__ unsigned int csum_partial_copy_from_user ( const char *src, char *dst, int len, int sum, int *err_ptr) { - return csum_partial_copy_generic ( src, dst, len, sum, err_ptr, NULL); + if (copy_from_user(dst, src, len)) + *err_ptr = -EFAULT; + return csum_partial(dst, len, sum); } /* @@ -172,11 +177,26 @@ static __inline__ unsigned short int csu * Copy and checksum to user */ #define HAVE_CSUM_COPY_USER -static __inline__ unsigned int csum_and_copy_to_user(const char *src, char *dst, +static __inline__ unsigned int direct_csum_and_copy_to_user(const char *src, char *dst, int len, int sum, int *err_ptr) { if (access_ok(VERIFY_WRITE, dst, len)) - return csum_partial_copy_generic(src, dst, len, sum, NULL, err_ptr); + return direct_csum_partial_copy_generic(src, dst, len, sum, NULL, err_ptr); + + if (len) + *err_ptr = -EFAULT; + + return -1; /* invalid checksum */ +} + +static __inline__ unsigned int csum_and_copy_to_user(const char *src, char *dst, + int len, int sum, int *err_ptr) +{ + if (access_ok(VERIFY_WRITE, dst, len)) { + if (copy_to_user(dst, src, len)) + *err_ptr = -EFAULT; + return csum_partial(src, len, sum); + } if (len) *err_ptr = -EFAULT; diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/desc.h current/include/asm-i386/desc.h --- reference/include/asm-i386/desc.h 2003-02-27 11:17:19.000000000 -0800 +++ current/include/asm-i386/desc.h 2004-04-09 11:53:00.000000000 -0700 @@ -21,6 +21,13 @@ struct Xgt_desc_struct { extern struct Xgt_desc_struct idt_descr, cpu_gdt_descr[NR_CPUS]; +extern void trap_init_virtual_IDT(void); +extern void trap_init_virtual_GDT(void); + +asmlinkage int system_call(void); +asmlinkage void lcall7(void); +asmlinkage void lcall27(void); + #define load_TR_desc() __asm__ __volatile__("ltr %%ax"::"a" (GDT_ENTRY_TSS*8)) #define load_LDT_desc() __asm__ __volatile__("lldt %%ax"::"a" (GDT_ENTRY_LDT*8)) @@ -30,6 +37,7 @@ extern struct Xgt_desc_struct idt_descr, */ extern struct desc_struct default_ldt[]; extern void set_intr_gate(unsigned int irq, void * addr); +extern void set_trap_gate(unsigned int n, void *addr); #define _set_tssldt_desc(n,addr,limit,type) \ __asm__ __volatile__ ("movw %w3,0(%2)\n\t" \ @@ -90,31 +98,8 @@ static inline void load_TLS(struct threa #undef C } -static inline void clear_LDT(void) -{ - int cpu = get_cpu(); - - set_ldt_desc(cpu, &default_ldt[0], 5); - load_LDT_desc(); - put_cpu(); -} - -/* - * load one particular LDT into the current CPU - */ -static inline void load_LDT_nolock(mm_context_t *pc, int cpu) -{ - void *segments = pc->ldt; - int count = pc->size; - - if (likely(!count)) { - segments = &default_ldt[0]; - count = 5; - } - - set_ldt_desc(cpu, segments, count); - load_LDT_desc(); -} +extern struct page *default_ldt_page; +extern void load_LDT_nolock(mm_context_t *pc, int cpu); static inline void load_LDT(mm_context_t *pc) { @@ -123,6 +108,6 @@ static inline void load_LDT(mm_context_t put_cpu(); } -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLY__ */ #endif diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/fixmap.h current/include/asm-i386/fixmap.h --- reference/include/asm-i386/fixmap.h 2004-03-11 14:35:15.000000000 -0800 +++ current/include/asm-i386/fixmap.h 2004-04-09 21:41:40.000000000 -0700 @@ -18,17 +18,16 @@ #include #include #include -#ifdef CONFIG_HIGHMEM #include #include -#endif +#include /* * Here we define all the compile-time 'special' virtual * addresses. The point is to have a constant address at * compile time, but to set the physical address only - * in the boot process. We allocate these special addresses - * from the end of virtual memory (0xfffff000) backwards. + * in the boot process. We allocate these special addresses + * from the end of virtual memory (0xffffe000) backwards. * Also this lets us do fail-safe vmalloc(), we * can guarantee that these special addresses and * vmalloc()-ed addresses never overlap. @@ -41,11 +40,31 @@ * TLB entries of such buffers will not be flushed across * task switches. */ + +/* + * on UP currently we will have no trace of the fixmap mechanizm, + * no page table allocations, etc. This might change in the + * future, say framebuffers for the console driver(s) could be + * fix-mapped? + */ enum fixed_addresses { FIX_HOLE, FIX_VSYSCALL, +#ifdef CONFIG_VSYSCALL_GTOD +#ifndef CONFIG_X86_4G + FIX_VSYSCALL_GTOD_PAD, +#endif /* !CONFIG_X86_4G */ + FIX_VSYSCALL_GTOD_LAST_PAGE, + FIX_VSYSCALL_GTOD_FIRST_PAGE = FIX_VSYSCALL_GTOD_LAST_PAGE + + VSYSCALL_GTOD_NUMPAGES - 1, +#ifdef CONFIG_X86_4G + FIX_VSYSCALL_GTOD_4GALIGN, +#endif /* CONFIG_X86_4G */ +#endif /* CONFIG_VSYSCALL_GTOD */ #ifdef CONFIG_X86_LOCAL_APIC FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ +#else + FIX_VSTACK_HOLE_1, #endif #ifdef CONFIG_X86_IO_APIC FIX_IO_APIC_BASE_0, @@ -57,16 +76,21 @@ enum fixed_addresses { FIX_LI_PCIA, /* Lithium PCI Bridge A */ FIX_LI_PCIB, /* Lithium PCI Bridge B */ #endif -#ifdef CONFIG_X86_F00F_BUG - FIX_F00F_IDT, /* Virtual mapping for IDT */ -#endif + FIX_IDT, + FIX_GDT_1, + FIX_GDT_0, + FIX_TSS_3, + FIX_TSS_2, + FIX_TSS_1, + FIX_TSS_0, + FIX_ENTRY_TRAMPOLINE_1, + FIX_ENTRY_TRAMPOLINE_0, #ifdef CONFIG_X86_CYCLONE_TIMER FIX_CYCLONE_TIMER, /*cyclone timer register*/ + FIX_VSTACK_HOLE_2, #endif -#ifdef CONFIG_HIGHMEM FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, -#endif #ifdef CONFIG_ACPI_BOOT FIX_ACPI_BEGIN, FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1, @@ -98,12 +122,15 @@ extern void __set_fixmap (enum fixed_add __set_fixmap(idx, 0, __pgprot(0)) /* - * used by vmalloc.c. + * used by vmalloc.c and various other places. * * Leave one empty page between vmalloc'ed areas and * the start of the fixmap. + * + * IMPORTANT: we have to align FIXADDR_TOP so that the virtual stack + * is THREAD_SIZE aligned. */ -#define FIXADDR_TOP (0xfffff000UL) +#define FIXADDR_TOP (0xffffe000UL & ~(THREAD_SIZE-1)) #define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) #define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE) diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/highmem.h current/include/asm-i386/highmem.h --- reference/include/asm-i386/highmem.h 2003-10-14 15:50:32.000000000 -0700 +++ current/include/asm-i386/highmem.h 2004-04-09 11:53:00.000000000 -0700 @@ -25,26 +25,19 @@ #include #include #include +#include /* declarations for highmem.c */ extern unsigned long highstart_pfn, highend_pfn; -extern pte_t *kmap_pte; -extern pgprot_t kmap_prot; extern pte_t *pkmap_page_table; - -extern void kmap_init(void); +extern void kmap_init(void) __init; /* * Right now we initialize only a single pte table. It can be extended * easily, subsequent pte tables have to be allocated in one physical * chunk of RAM. */ -#if NR_CPUS <= 32 -#define PKMAP_BASE (0xff800000UL) -#else -#define PKMAP_BASE (0xff600000UL) -#endif #ifdef CONFIG_X86_PAE #define LAST_PKMAP 512 #else diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/ioctls.h current/include/asm-i386/ioctls.h --- reference/include/asm-i386/ioctls.h 2003-04-09 11:48:05.000000000 -0700 +++ current/include/asm-i386/ioctls.h 2004-04-09 13:27:12.000000000 -0700 @@ -49,6 +49,7 @@ #define TIOCGSID 0x5429 /* Return the session ID of FD */ #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ +#define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get real dev no below /dev/console */ #define FIONCLEX 0x5450 #define FIOCLEX 0x5451 diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/irq.h current/include/asm-i386/irq.h --- reference/include/asm-i386/irq.h 2004-01-15 10:41:17.000000000 -0800 +++ current/include/asm-i386/irq.h 2004-04-08 15:10:24.000000000 -0700 @@ -14,6 +14,7 @@ #include /* include comes from machine specific directory */ #include "irq_vectors.h" +#include static __inline__ int irq_canonicalize(int irq) { @@ -30,4 +31,28 @@ extern int can_request_irq(unsigned int, #define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */ #endif +#ifdef CONFIG_4KSTACKS +/* + * per-CPU IRQ handling contexts (thread information and stack) + */ +union irq_ctx { + struct thread_info tinfo; + u32 stack[THREAD_SIZE/sizeof(u32)]; +}; + +extern union irq_ctx *hardirq_ctx[NR_CPUS]; +extern union irq_ctx *softirq_ctx[NR_CPUS]; + +extern void irq_ctx_init(int cpu); + +#define __ARCH_HAS_DO_SOFTIRQ +#else +#define irq_ctx_init(cpu) do { ; } while (0) +#endif + +struct irqaction; +struct pt_regs; +asmlinkage int handle_IRQ_event(unsigned int, struct pt_regs *, + struct irqaction *); + #endif /* _ASM_IRQ_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/kexec.h current/include/asm-i386/kexec.h --- reference/include/asm-i386/kexec.h 1969-12-31 16:00:00.000000000 -0800 +++ current/include/asm-i386/kexec.h 2004-04-09 13:23:20.000000000 -0700 @@ -0,0 +1,23 @@ +#ifndef _I386_KEXEC_H +#define _I386_KEXEC_H + +#include + +/* + * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return. + * I.e. Maximum page that is mapped directly into kernel memory, + * and kmap is not required. + * + * Someone correct me if FIXADDR_START - PAGEOFFSET is not the correct + * calculation for the amount of memory directly mappable into the + * kernel memory space. + */ + +/* Maximum physical address we can use pages from */ +#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) +/* Maximum address we can reach in physical address mode */ +#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) + +#define KEXEC_REBOOT_CODE_SIZE 4096 + +#endif /* _I386_KEXEC_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/kgdb.h current/include/asm-i386/kgdb.h --- reference/include/asm-i386/kgdb.h 1969-12-31 16:00:00.000000000 -0800 +++ current/include/asm-i386/kgdb.h 2004-04-08 15:10:21.000000000 -0700 @@ -0,0 +1,69 @@ +#ifndef __KGDB +#define __KGDB + +/* + * This file should not include ANY others. This makes it usable + * most anywhere without the fear of include order or inclusion. + * Make it so! + * + * This file may be included all the time. It is only active if + * CONFIG_KGDB is defined, otherwise it stubs out all the macros + * and entry points. + */ +#if defined(CONFIG_KGDB) && !defined(__ASSEMBLY__) + +extern void breakpoint(void); +#define INIT_KGDB_INTS kgdb_enable_ints() + +#ifndef BREAKPOINT +#define BREAKPOINT asm(" int $3") +#endif + +extern void kgdb_schedule_breakpoint(void); +extern void kgdb_process_breakpoint(void); + +extern int kgdb_tty_hook(void); +extern int kgdb_eth_hook(void); +extern int kgdboe; + +/* + * GDB debug stub (or any debug stub) can point the 'linux_debug_hook' + * pointer to its routine and it will be entered as the first thing + * when a trap occurs. + * + * Return values are, at present, undefined. + * + * The debug hook routine does not necessarily return to its caller. + * It has the register image and thus may choose to resume execution + * anywhere it pleases. + */ +struct pt_regs; + +extern int kgdb_handle_exception(int trapno, + int signo, int err_code, struct pt_regs *regs); +extern int in_kgdb(struct pt_regs *regs); + +#ifdef CONFIG_KGDB_TS +void kgdb_tstamp(int line, char *source, int data0, int data1); +/* + * This is the time stamp function. The macro adds the source info and + * does a cast on the data to allow most any 32-bit value. + */ + +#define kgdb_ts(data0,data1) kgdb_tstamp(__LINE__,__FILE__,(int)data0,(int)data1) +#else +#define kgdb_ts(data0,data1) +#endif +#else /* CONFIG_KGDB && ! __ASSEMBLY__ ,stubs follow... */ +#ifndef BREAKPOINT +#define BREAKPOINT +#endif +#define kgdb_ts(data0,data1) +#define in_kgdb +#define kgdb_handle_exception +#define breakpoint +#define INIT_KGDB_INTS +#define kgdb_process_breakpoint() do {} while(0) + +#endif +#endif /* __KGDB */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/kgdb_local.h current/include/asm-i386/kgdb_local.h --- reference/include/asm-i386/kgdb_local.h 1969-12-31 16:00:00.000000000 -0800 +++ current/include/asm-i386/kgdb_local.h 2004-04-08 15:10:20.000000000 -0700 @@ -0,0 +1,102 @@ +#ifndef __KGDB_LOCAL +#define ___KGDB_LOCAL +#include +#include +#include +#include +#include +#include +#include +#include + +#define PORT 0x3f8 +#ifdef CONFIG_KGDB_PORT +#undef PORT +#define PORT CONFIG_KGDB_PORT +#endif +#define IRQ 4 +#ifdef CONFIG_KGDB_IRQ +#undef IRQ +#define IRQ CONFIG_KGDB_IRQ +#endif +#define SB_CLOCK 1843200 +#define SB_BASE (SB_CLOCK/16) +#define SB_BAUD9600 SB_BASE/9600 +#define SB_BAUD192 SB_BASE/19200 +#define SB_BAUD384 SB_BASE/38400 +#define SB_BAUD576 SB_BASE/57600 +#define SB_BAUD1152 SB_BASE/115200 +#ifdef CONFIG_KGDB_9600BAUD +#define SB_BAUD SB_BAUD9600 +#endif +#ifdef CONFIG_KGDB_19200BAUD +#define SB_BAUD SB_BAUD192 +#endif +#ifdef CONFIG_KGDB_38400BAUD +#define SB_BAUD SB_BAUD384 +#endif +#ifdef CONFIG_KGDB_57600BAUD +#define SB_BAUD SB_BAUD576 +#endif +#ifdef CONFIG_KGDB_115200BAUD +#define SB_BAUD SB_BAUD1152 +#endif +#ifndef SB_BAUD +#define SB_BAUD SB_BAUD1152 /* Start with this if not given */ +#endif + +#ifndef CONFIG_X86_TSC +#undef rdtsc +#define rdtsc(a,b) if (a++ > 10000){a = 0; b++;} +#undef rdtscll +#define rdtscll(s) s++ +#endif + +#ifdef _raw_read_unlock /* must use a name that is "define"ed, not an inline */ +#undef spin_lock +#undef spin_trylock +#undef spin_unlock +#define spin_lock _raw_spin_lock +#define spin_trylock _raw_spin_trylock +#define spin_unlock _raw_spin_unlock +#else +#endif +#undef spin_unlock_wait +#define spin_unlock_wait(x) do { cpu_relax(); barrier();} \ + while(spin_is_locked(x)) + +#define SB_IER 1 +#define SB_MCR UART_MCR_OUT2 | UART_MCR_DTR | UART_MCR_RTS + +#define FLAGS 0 +#define SB_STATE { \ + magic: SSTATE_MAGIC, \ + baud_base: SB_BASE, \ + port: PORT, \ + irq: IRQ, \ + flags: FLAGS, \ + custom_divisor:SB_BAUD} +#define SB_INFO { \ + magic: SERIAL_MAGIC, \ + port: PORT,0,FLAGS, \ + state: &state, \ + tty: (struct tty_struct *)&state, \ + IER: SB_IER, \ + MCR: SB_MCR} +extern void putDebugChar(int); +/* RTAI support needs us to really stop/start interrupts */ + +#define kgdb_sti() __asm__ __volatile__("sti": : :"memory") +#define kgdb_cli() __asm__ __volatile__("cli": : :"memory") +#define kgdb_local_save_flags(x) __asm__ __volatile__(\ + "pushfl ; popl %0":"=g" (x): /* no input */) +#define kgdb_local_irq_restore(x) __asm__ __volatile__(\ + "pushl %0 ; popfl": \ + /* no output */ :"g" (x):"memory", "cc") +#define kgdb_local_irq_save(x) kgdb_local_save_flags(x); kgdb_cli() + +#ifdef CONFIG_SERIAL +extern void shutdown_for_kgdb(struct async_struct *info); +#endif +#define INIT_KDEBUG putDebugChar("+"); +#endif /* __KGDB_LOCAL */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/kmap_types.h current/include/asm-i386/kmap_types.h --- reference/include/asm-i386/kmap_types.h 2003-06-05 14:56:03.000000000 -0700 +++ current/include/asm-i386/kmap_types.h 2004-04-09 11:53:01.000000000 -0700 @@ -2,31 +2,38 @@ #define _ASM_KMAP_TYPES_H #include - -#ifdef CONFIG_DEBUG_HIGHMEM -# define D(n) __KM_FENCE_##n , -#else -# define D(n) -#endif +#include enum km_type { -D(0) KM_BOUNCE_READ, -D(1) KM_SKB_SUNRPC_DATA, -D(2) KM_SKB_DATA_SOFTIRQ, -D(3) KM_USER0, -D(4) KM_USER1, -D(5) KM_BIO_SRC_IRQ, -D(6) KM_BIO_DST_IRQ, -D(7) KM_PTE0, -D(8) KM_PTE1, -D(9) KM_PTE2, -D(10) KM_IRQ0, -D(11) KM_IRQ1, -D(12) KM_SOFTIRQ0, -D(13) KM_SOFTIRQ1, -D(14) KM_TYPE_NR -}; - -#undef D + /* + * IMPORTANT: don't move these 3 entries, be wary when adding entries, + * the 4G/4G virtual stack must be THREAD_SIZE aligned on each cpu. + */ + KM_BOUNCE_READ, + KM_VSTACK_BASE, + KM_VSTACK_TOP = KM_VSTACK_BASE + STACK_PAGE_COUNT-1, + KM_LDT_PAGE15, + KM_LDT_PAGE0 = KM_LDT_PAGE15 + 16-1, + KM_USER_COPY, + KM_VSTACK_HOLE, + KM_SKB_SUNRPC_DATA, + KM_SKB_DATA_SOFTIRQ, + KM_USER0, + KM_USER1, + KM_BIO_SRC_IRQ, + KM_BIO_DST_IRQ, + KM_PTE0, + KM_PTE1, + KM_IRQ0, + KM_IRQ1, + KM_SOFTIRQ0, + KM_SOFTIRQ1, + KM_FILLER, + /* + * Be wary when adding entries: + * the 4G/4G virtual stack must be THREAD_SIZE aligned on each cpu. + */ + KM_TYPE_NR +}; #endif diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/lockmeter.h current/include/asm-i386/lockmeter.h --- reference/include/asm-i386/lockmeter.h 1969-12-31 16:00:00.000000000 -0800 +++ current/include/asm-i386/lockmeter.h 2004-04-09 13:27:12.000000000 -0700 @@ -0,0 +1,123 @@ +/* + * Copyright (C) 1999,2000 Silicon Graphics, Inc. + * + * Written by John Hawkes (hawkes@sgi.com) + * Based on klstat.h by Jack Steiner (steiner@sgi.com) + * + * Modified by Ray Bryant (raybry@us.ibm.com) + * Changes Copyright (C) 2000 IBM, Inc. + * Added save of index in spinlock_t to improve efficiency + * of "hold" time reporting for spinlocks. + * Added support for hold time statistics for read and write + * locks. + * Moved machine dependent code here from include/lockmeter.h. + * + */ + +#ifndef _I386_LOCKMETER_H +#define _I386_LOCKMETER_H + +#include +#include + +#include + +#ifdef __KERNEL__ +extern unsigned long cpu_khz; +#define CPU_CYCLE_FREQUENCY (cpu_khz * 1000) +#else +#define CPU_CYCLE_FREQUENCY 450000000 +#endif + +#define THIS_CPU_NUMBER smp_processor_id() + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0) +#define local_irq_save(x) \ + __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory") + +#define local_irq_restore(x) \ + __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory") +#endif /* Linux version 2.2.x */ + +/* + * macros to cache and retrieve an index value inside of a spin lock + * these macros assume that there are less than 65536 simultaneous + * (read mode) holders of a rwlock. Not normally a problem!! + * we also assume that the hash table has less than 65535 entries. + */ +/* + * instrumented spinlock structure -- never used to allocate storage + * only used in macros below to overlay a spinlock_t + */ +typedef struct inst_spinlock_s { + /* remember, Intel is little endian */ + unsigned short lock; + unsigned short index; +} inst_spinlock_t; +#define PUT_INDEX(lock_ptr,indexv) ((inst_spinlock_t *)(lock_ptr))->index = indexv +#define GET_INDEX(lock_ptr) ((inst_spinlock_t *)(lock_ptr))->index + +/* + * macros to cache and retrieve an index value in a read/write lock + * as well as the cpu where a reader busy period started + * we use the 2nd word (the debug word) for this, so require the + * debug word to be present + */ +/* + * instrumented rwlock structure -- never used to allocate storage + * only used in macros below to overlay a rwlock_t + */ +typedef struct inst_rwlock_s { + volatile int lock; + unsigned short index; + unsigned short cpu; +} inst_rwlock_t; +#define PUT_RWINDEX(rwlock_ptr,indexv) ((inst_rwlock_t *)(rwlock_ptr))->index = indexv +#define GET_RWINDEX(rwlock_ptr) ((inst_rwlock_t *)(rwlock_ptr))->index +#define PUT_RW_CPU(rwlock_ptr,cpuv) ((inst_rwlock_t *)(rwlock_ptr))->cpu = cpuv +#define GET_RW_CPU(rwlock_ptr) ((inst_rwlock_t *)(rwlock_ptr))->cpu + +/* + * return the number of readers for a rwlock_t + */ +#define RWLOCK_READERS(rwlock_ptr) rwlock_readers(rwlock_ptr) + +extern inline int rwlock_readers(rwlock_t *rwlock_ptr) +{ + int tmp = (int) rwlock_ptr->lock; + /* read and write lock attempts may cause the lock value to temporarily */ + /* be negative. Until it is >= 0 we know nothing (i. e. can't tell if */ + /* is -1 because it was write locked and somebody tried to read lock it */ + /* or if it is -1 because it was read locked and somebody tried to write*/ + /* lock it. ........................................................... */ + do { + tmp = (int) rwlock_ptr->lock; + } while (tmp < 0); + if (tmp == 0) return(0); + else return(RW_LOCK_BIAS-tmp); +} + +/* + * return true if rwlock is write locked + * (note that other lock attempts can cause the lock value to be negative) + */ +#define RWLOCK_IS_WRITE_LOCKED(rwlock_ptr) ((rwlock_ptr)->lock <= 0) +#define IABS(x) ((x) > 0 ? (x) : -(x)) +#define RWLOCK_IS_READ_LOCKED(rwlock_ptr) ((IABS((rwlock_ptr)->lock) % RW_LOCK_BIAS) != 0) + +/* this is a lot of typing just to get gcc to emit "rdtsc" */ +static inline long long get_cycles64 (void) +{ + union longlong_u { + long long intlong; + struct intint_s { + uint32_t eax; + uint32_t edx; + } intint; + } longlong; + + rdtsc(longlong.intint.eax,longlong.intint.edx); + return longlong.intlong; +} + +#endif /* _I386_LOCKMETER_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/mach-default/irq_vectors.h current/include/asm-i386/mach-default/irq_vectors.h --- reference/include/asm-i386/mach-default/irq_vectors.h 2004-01-15 10:41:17.000000000 -0800 +++ current/include/asm-i386/mach-default/irq_vectors.h 2004-04-09 21:41:39.000000000 -0700 @@ -84,22 +84,7 @@ */ #define NR_VECTORS 256 -#ifdef CONFIG_PCI_USE_VECTOR -#define NR_IRQS FIRST_SYSTEM_VECTOR -#define NR_IRQ_VECTORS NR_IRQS -#else -#ifdef CONFIG_X86_IO_APIC -#define NR_IRQS 224 -# if (224 >= 32 * NR_CPUS) -# define NR_IRQ_VECTORS NR_IRQS -# else -# define NR_IRQ_VECTORS (32 * NR_CPUS) -# endif -#else -#define NR_IRQS 16 -#define NR_IRQ_VECTORS NR_IRQS -#endif -#endif +#include "irq_vectors_limits.h" #define FPU_IRQ 13 diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/mach-default/irq_vectors_limits.h current/include/asm-i386/mach-default/irq_vectors_limits.h --- reference/include/asm-i386/mach-default/irq_vectors_limits.h 1969-12-31 16:00:00.000000000 -0800 +++ current/include/asm-i386/mach-default/irq_vectors_limits.h 2004-04-09 21:41:39.000000000 -0700 @@ -0,0 +1,21 @@ +#ifndef _ASM_IRQ_VECTORS_LIMITS_H +#define _ASM_IRQ_VECTORS_LIMITS_H + +#ifdef CONFIG_PCI_USE_VECTOR +#define NR_IRQS FIRST_SYSTEM_VECTOR +#define NR_IRQ_VECTORS NR_IRQS +#else +#ifdef CONFIG_X86_IO_APIC +#define NR_IRQS 224 +# if (224 >= 32 * NR_CPUS) +# define NR_IRQ_VECTORS NR_IRQS +# else +# define NR_IRQ_VECTORS (32 * NR_CPUS) +# endif +#else +#define NR_IRQS 16 +#define NR_IRQ_VECTORS NR_IRQS +#endif +#endif + +#endif /* _ASM_IRQ_VECTORS_LIMITS_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/mach-generic/irq_vectors_limits.h current/include/asm-i386/mach-generic/irq_vectors_limits.h --- reference/include/asm-i386/mach-generic/irq_vectors_limits.h 1969-12-31 16:00:00.000000000 -0800 +++ current/include/asm-i386/mach-generic/irq_vectors_limits.h 2004-04-09 21:41:39.000000000 -0700 @@ -0,0 +1,14 @@ +#ifndef _ASM_IRQ_VECTORS_LIMITS_H +#define _ASM_IRQ_VECTORS_LIMITS_H + +/* + * For Summit or generic (i.e. installer) kernels, we have lots of I/O APICs, + * even with uni-proc kernels, so use a big array. + * + * This value should be the same in both the generic and summit subarches. + * Change one, change 'em both. + */ +#define NR_IRQS 224 +#define NR_IRQ_VECTORS 1024 + +#endif /* _ASM_IRQ_VECTORS_LIMITS_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/mach-generic/mach_mpspec.h current/include/asm-i386/mach-generic/mach_mpspec.h --- reference/include/asm-i386/mach-generic/mach_mpspec.h 2003-06-24 16:41:44.000000000 -0700 +++ current/include/asm-i386/mach-generic/mach_mpspec.h 2004-04-09 21:41:41.000000000 -0700 @@ -8,6 +8,8 @@ #define MAX_IRQ_SOURCES 256 -#define MAX_MP_BUSSES 32 +/* Summit or generic (i.e. installer) kernels need lots of bus entries. */ +/* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */ +#define MAX_MP_BUSSES 260 #endif /* __ASM_MACH_MPSPEC_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/mach-summit/irq_vectors_limits.h current/include/asm-i386/mach-summit/irq_vectors_limits.h --- reference/include/asm-i386/mach-summit/irq_vectors_limits.h 1969-12-31 16:00:00.000000000 -0800 +++ current/include/asm-i386/mach-summit/irq_vectors_limits.h 2004-04-09 21:41:39.000000000 -0700 @@ -0,0 +1,14 @@ +#ifndef _ASM_IRQ_VECTORS_LIMITS_H +#define _ASM_IRQ_VECTORS_LIMITS_H + +/* + * For Summit or generic (i.e. installer) kernels, we have lots of I/O APICs, + * even with uni-proc kernels, so use a big array. + * + * This value should be the same in both the generic and summit subarches. + * Change one, change 'em both. + */ +#define NR_IRQS 224 +#define NR_IRQ_VECTORS 1024 + +#endif /* _ASM_IRQ_VECTORS_LIMITS_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/mach-summit/mach_mpspec.h current/include/asm-i386/mach-summit/mach_mpspec.h --- reference/include/asm-i386/mach-summit/mach_mpspec.h 2003-06-24 16:41:44.000000000 -0700 +++ current/include/asm-i386/mach-summit/mach_mpspec.h 2004-04-09 21:41:41.000000000 -0700 @@ -8,6 +8,7 @@ #define MAX_IRQ_SOURCES 256 -#define MAX_MP_BUSSES 32 +/* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */ +#define MAX_MP_BUSSES 260 #endif /* __ASM_MACH_MPSPEC_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/mman.h current/include/asm-i386/mman.h --- reference/include/asm-i386/mman.h 2003-10-14 15:50:32.000000000 -0700 +++ current/include/asm-i386/mman.h 2004-04-09 21:41:39.000000000 -0700 @@ -16,6 +16,7 @@ #define MAP_ANONYMOUS 0x20 /* don't use a file */ #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ +#define MAP_HUGETLB 0x0400 /* Backed by hugetlb pages */ #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ #define MAP_LOCKED 0x2000 /* pages are locked */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/mmu.h current/include/asm-i386/mmu.h --- reference/include/asm-i386/mmu.h 2002-12-09 18:46:11.000000000 -0800 +++ current/include/asm-i386/mmu.h 2004-04-09 11:53:00.000000000 -0700 @@ -8,10 +8,13 @@ * * cpu_vm_mask is used to optimize ldt flushing. */ + +#define MAX_LDT_PAGES 16 + typedef struct { int size; struct semaphore sem; - void *ldt; + struct page *ldt_pages[MAX_LDT_PAGES]; } mm_context_t; #endif diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/mmu_context.h current/include/asm-i386/mmu_context.h --- reference/include/asm-i386/mmu_context.h 2003-10-01 11:41:15.000000000 -0700 +++ current/include/asm-i386/mmu_context.h 2004-04-09 11:53:00.000000000 -0700 @@ -29,6 +29,10 @@ static inline void switch_mm(struct mm_s { int cpu = smp_processor_id(); +#ifdef CONFIG_X86_SWITCH_PAGETABLES + if (tsk->mm) + tsk->thread_info->user_pgd = (void *)__pa(tsk->mm->pgd); +#endif if (likely(prev != next)) { /* stop flush ipis for the previous mm */ cpu_clear(cpu, prev->cpu_vm_mask); @@ -39,12 +43,14 @@ static inline void switch_mm(struct mm_s cpu_set(cpu, next->cpu_vm_mask); /* Re-load page tables */ +#if !defined(CONFIG_X86_SWITCH_PAGETABLES) load_cr3(next->pgd); +#endif /* * load the LDT, if the LDT is different: */ - if (unlikely(prev->context.ldt != next->context.ldt)) + if (unlikely(prev->context.size + next->context.size)) load_LDT_nolock(&next->context, cpu); } #ifdef CONFIG_SMP @@ -56,7 +62,9 @@ static inline void switch_mm(struct mm_s /* We were in lazy tlb mode and leave_mm disabled * tlb flush IPI delivery. We must reload %cr3. */ +#if !defined(CONFIG_X86_SWITCH_PAGETABLES) load_cr3(next->pgd); +#endif load_LDT_nolock(&next->context, cpu); } } @@ -67,6 +75,6 @@ static inline void switch_mm(struct mm_s asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0)) #define activate_mm(prev, next) \ - switch_mm((prev),(next),NULL) + switch_mm((prev),(next),current) #endif diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/module.h current/include/asm-i386/module.h --- reference/include/asm-i386/module.h 2004-04-07 14:54:32.000000000 -0700 +++ current/include/asm-i386/module.h 2004-04-08 15:10:24.000000000 -0700 @@ -60,6 +60,12 @@ struct mod_arch_specific #define MODULE_REGPARM "" #endif -#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_REGPARM +#ifdef CONFIG_4KSTACKS +#define MODULE_STACKSIZE "4KSTACKS " +#else +#define MODULE_STACKSIZE "" +#endif + +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_REGPARM MODULE_STACKSIZE #endif /* _ASM_I386_MODULE_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/page.h current/include/asm-i386/page.h --- reference/include/asm-i386/page.h 2003-04-09 11:48:05.000000000 -0700 +++ current/include/asm-i386/page.h 2004-04-09 11:53:00.000000000 -0700 @@ -1,6 +1,8 @@ #ifndef _I386_PAGE_H #define _I386_PAGE_H +#include + /* PAGE_SHIFT determines the page size */ #define PAGE_SHIFT 12 #define PAGE_SIZE (1UL << PAGE_SHIFT) @@ -9,11 +11,10 @@ #define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1)) #define LARGE_PAGE_SIZE (1UL << PMD_SHIFT) -#ifdef __KERNEL__ -#ifndef __ASSEMBLY__ - #include +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ #ifdef CONFIG_X86_USE_3DNOW #include @@ -88,8 +89,19 @@ typedef struct { unsigned long pgprot; } * * If you want more physical memory than this then see the CONFIG_HIGHMEM4G * and CONFIG_HIGHMEM64G options in the kernel configuration. + * + * Note: on PAE the kernel must never go below 32 MB, we use the + * first 8 entries of the 2-level boot pgd for PAE magic. */ +#ifdef CONFIG_X86_4G_VM_LAYOUT +#define __PAGE_OFFSET (0x02000000) +#define TASK_SIZE (0xff000000) +#else +#define __PAGE_OFFSET (0xc0000000) +#define TASK_SIZE (0xc0000000) +#endif + /* * This much address space is reserved for vmalloc() and iomap() * as well as fixmap mappings. @@ -114,16 +126,10 @@ static __inline__ int get_order(unsigned #endif /* __ASSEMBLY__ */ -#ifdef __ASSEMBLY__ -#define __PAGE_OFFSET (0xC0000000) -#else -#define __PAGE_OFFSET (0xC0000000UL) -#endif - - #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE) -#define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE) +#define __MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE) +#define MAXMEM ((unsigned long)(-PAGE_OFFSET-VMALLOC_RESERVE)) #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/param.h current/include/asm-i386/param.h --- reference/include/asm-i386/param.h 2004-03-11 14:35:15.000000000 -0800 +++ current/include/asm-i386/param.h 2004-04-08 15:10:24.000000000 -0700 @@ -2,10 +2,19 @@ #define _ASMi386_PARAM_H #ifdef __KERNEL__ -# define HZ 1000 /* Internal kernel timer frequency */ -# define USER_HZ 100 /* .. some user interfaces are in "ticks" */ -# define CLOCKS_PER_SEC (USER_HZ) /* like times() */ -#endif + #include + + #ifdef CONFIG_1000HZ + #define HZ 1000 /* Internal kernel timer frequency */ + #else + #define HZ 100 + #endif + + #define USER_HZ 100 /* .. some user interfaces are in "ticks" */ + #define CLOCKS_PER_SEC (USER_HZ) /* like times() */ + #define JIFFIES_TO_MSEC(x) (x) + #define MSEC_TO_JIFFIES(x) (x) +#endif /* __KERNEL__ */ #ifndef HZ #define HZ 100 diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/pgtable.h current/include/asm-i386/pgtable.h --- reference/include/asm-i386/pgtable.h 2004-04-07 14:54:32.000000000 -0700 +++ current/include/asm-i386/pgtable.h 2004-04-09 21:41:41.000000000 -0700 @@ -25,6 +25,10 @@ #include #include +#ifdef CONFIG_MMAP_TOPDOWN +#define HAVE_ARCH_UNMAPPED_AREA +#endif + /* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. @@ -32,16 +36,23 @@ #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) extern unsigned long empty_zero_page[1024]; extern pgd_t swapper_pg_dir[1024]; -extern kmem_cache_t *pgd_cache; -extern kmem_cache_t *pmd_cache; +extern kmem_cache_t *pgd_cache, *pmd_cache, *kpmd_cache; extern spinlock_t pgd_lock; extern struct list_head pgd_list; void pmd_ctor(void *, kmem_cache_t *, unsigned long); +void kpmd_ctor(void *, kmem_cache_t *, unsigned long); void pgd_ctor(void *, kmem_cache_t *, unsigned long); void pgd_dtor(void *, kmem_cache_t *, unsigned long); void pgtable_cache_init(void); void paging_init(void); +void setup_identity_mappings(pgd_t *pgd_base, unsigned long start, unsigned long end); + +/* + * The size of the low 1:1 mappings we use during bootup, + * SMP-boot and ACPI-sleep: + */ +#define LOW_MAPPINGS_SIZE (16*1024*1024) #endif /* !__ASSEMBLY__ */ @@ -51,6 +62,11 @@ void paging_init(void); * newer 3-level PAE-mode page tables. */ #ifndef __ASSEMBLY__ + +extern void set_system_gate(unsigned int n, void *addr); +extern void init_entry_mappings(void); +extern void entry_trampoline_setup(void); + #ifdef CONFIG_X86_PAE # include #else @@ -63,7 +79,12 @@ void paging_init(void); #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) -#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) +#if defined(CONFIG_X86_PAE) && defined(CONFIG_X86_4G_VM_LAYOUT) +# define USER_PTRS_PER_PGD 4 +#else +# define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) + ((TASK_SIZE % PGDIR_SIZE) + PGDIR_SIZE-1)/PGDIR_SIZE) +#endif + #define FIRST_USER_PGD_NR 0 #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) @@ -83,8 +104,8 @@ void paging_init(void); * area for the same reason. ;) */ #define VMALLOC_OFFSET (8*1024*1024) -#define VMALLOC_START (((unsigned long) high_memory + 2*VMALLOC_OFFSET-1) & \ - ~(VMALLOC_OFFSET-1)) +#define VMALLOC_START (((unsigned long) high_memory + vmalloc_earlyreserve + \ + 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1)) #ifdef CONFIG_HIGHMEM # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) #else @@ -137,11 +158,15 @@ extern unsigned long __PAGE_KERNEL; #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD) #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) +#define __PAGE_KERNEL_VSYSCALL \ + (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) #define PAGE_KERNEL __pgprot(__PAGE_KERNEL) #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO) #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE) #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE) +#define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL) +#define PAGE_KERNEL_VSYSCALL_NOCACHE __pgprot(__PAGE_KERNEL_VSYSCALL|(__PAGE_KERNEL_RO | _PAGE_PCD)) /* * The i386 can't do page protection for execute, and considers that @@ -233,6 +258,7 @@ static inline void ptep_mkdirty(pte_t *p #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) #define mk_pte_huge(entry) ((entry).pte_low |= _PAGE_PRESENT | _PAGE_PSE) +#define mk_pte_phys(physpage, pgprot) pfn_pte((physpage) >> PAGE_SHIFT, pgprot) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { @@ -308,18 +334,6 @@ static inline pte_t pte_modify(pte_t pte #define pte_unmap_nested(pte) do { } while (0) #endif -#if defined(CONFIG_HIGHPTE) && defined(CONFIG_HIGHMEM4G) -typedef u32 pte_addr_t; -#endif - -#if defined(CONFIG_HIGHPTE) && defined(CONFIG_HIGHMEM64G) -typedef u64 pte_addr_t; -#endif - -#if !defined(CONFIG_HIGHPTE) -typedef pte_t *pte_addr_t; -#endif - /* * The i386 doesn't have any external MMU info: the kernel page * tables contain all the necessary information. diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/processor.h current/include/asm-i386/processor.h --- reference/include/asm-i386/processor.h 2004-03-11 14:35:15.000000000 -0800 +++ current/include/asm-i386/processor.h 2004-04-09 13:27:12.000000000 -0700 @@ -291,15 +291,11 @@ extern unsigned int machine_submodel_id; extern unsigned int BIOS_revision; extern unsigned int mca_pentium_flag; -/* - * User space process size: 3GB (default). - */ -#define TASK_SIZE (PAGE_OFFSET) - /* This decides where the kernel will search for a free chunk of vm * space during mmap's. */ -#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) +#define __TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) +#define TASK_UNMAPPED_BASE (current->map_base) /* * Size of io_bitmap, covering ports 0 to 0x3ff. @@ -403,9 +399,16 @@ struct tss_struct { unsigned long stack[64]; } __attribute__((packed)); +#ifdef CONFIG_4KSTACKS +#define STACK_PAGE_COUNT (4096/PAGE_SIZE) +#else +#define STACK_PAGE_COUNT (8192/PAGE_SIZE) /* THREAD_SIZE/PAGE_SIZE */ +#endif + struct thread_struct { /* cached TLS descriptors. */ struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; + void *stack_page[STACK_PAGE_COUNT]; unsigned long esp0; unsigned long sysenter_cs; unsigned long eip; @@ -449,7 +452,8 @@ struct thread_struct { .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \ } -static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread) +static inline void +load_esp0(struct tss_struct *tss, struct thread_struct *thread) { tss->esp0 = thread->esp0; /* This can only happen when SEP is enabled, no need to test "SEP"arately */ @@ -485,6 +489,23 @@ extern void prepare_to_copy(struct task_ */ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); +#ifdef CONFIG_X86_HIGH_ENTRY +#define virtual_esp0(tsk) \ + ((unsigned long)(tsk)->thread_info->virtual_stack + ((tsk)->thread.esp0 - (unsigned long)(tsk)->thread_info->real_stack)) +#else +# define virtual_esp0(tsk) ((tsk)->thread.esp0) +#endif + +#define load_virtual_esp0(tss, task) \ + do { \ + tss->esp0 = virtual_esp0(task); \ + if (likely(cpu_has_sep) && unlikely(tss->ss1 != task->thread.sysenter_cs)) { \ + tss->ss1 = task->thread.sysenter_cs; \ + wrmsr(MSR_IA32_SYSENTER_CS, \ + task->thread.sysenter_cs, 0); \ + } \ + } while (0) + extern unsigned long thread_saved_pc(struct task_struct *tsk); void show_trace(struct task_struct *task, unsigned long *stack); @@ -646,4 +667,11 @@ extern inline void prefetchw(const void extern void select_idle_routine(const struct cpuinfo_x86 *c); +#ifdef CONFIG_SMP +# define ARCH_HAS_SCHED_DOMAIN +# ifdef CONFIG_SCHED_SMT +# define ARCH_HAS_SCHED_WAKE_IDLE +# endif +#endif + #endif /* __ASM_I386_PROCESSOR_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/rmap.h current/include/asm-i386/rmap.h --- reference/include/asm-i386/rmap.h 2002-12-09 18:46:11.000000000 -0800 +++ current/include/asm-i386/rmap.h 1969-12-31 16:00:00.000000000 -0800 @@ -1,21 +0,0 @@ -#ifndef _I386_RMAP_H -#define _I386_RMAP_H - -/* nothing to see, move along */ -#include - -#ifdef CONFIG_HIGHPTE -static inline pte_t *rmap_ptep_map(pte_addr_t pte_paddr) -{ - unsigned long pfn = (unsigned long)(pte_paddr >> PAGE_SHIFT); - unsigned long off = ((unsigned long)pte_paddr) & ~PAGE_MASK; - return (pte_t *)((char *)kmap_atomic(pfn_to_page(pfn), KM_PTE2) + off); -} - -static inline void rmap_ptep_unmap(pte_t *pte) -{ - kunmap_atomic(pte, KM_PTE2); -} -#endif - -#endif diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/rwlock.h current/include/asm-i386/rwlock.h --- reference/include/asm-i386/rwlock.h 2002-12-09 18:46:25.000000000 -0800 +++ current/include/asm-i386/rwlock.h 2004-04-08 15:10:21.000000000 -0700 @@ -20,28 +20,52 @@ #define RW_LOCK_BIAS 0x01000000 #define RW_LOCK_BIAS_STR "0x01000000" -#define __build_read_lock_ptr(rw, helper) \ - asm volatile(LOCK "subl $1,(%0)\n\t" \ - "js 2f\n" \ - "1:\n" \ - LOCK_SECTION_START("") \ - "2:\tcall " helper "\n\t" \ - "jmp 1b\n" \ - LOCK_SECTION_END \ - ::"a" (rw) : "memory") - -#define __build_read_lock_const(rw, helper) \ - asm volatile(LOCK "subl $1,%0\n\t" \ - "js 2f\n" \ - "1:\n" \ - LOCK_SECTION_START("") \ - "2:\tpushl %%eax\n\t" \ - "leal %0,%%eax\n\t" \ - "call " helper "\n\t" \ - "popl %%eax\n\t" \ - "jmp 1b\n" \ - LOCK_SECTION_END \ - :"=m" (*(volatile int *)rw) : : "memory") +#ifdef CONFIG_SPINLINE + + #define __build_read_lock_ptr(rw, helper) \ + asm volatile(LOCK "subl $1,(%0)\n\t" \ + "jns 1f\n\t" \ + "call " helper "\n\t" \ + "1:\t" \ + ::"a" (rw) : "memory") + + #define __build_read_lock_const(rw, helper) \ + asm volatile(LOCK "subl $1,%0\n\t" \ + "jns 1f\n\t" \ + "pushl %%eax\n\t" \ + "leal %0,%%eax\n\t" \ + "call " helper "\n\t" \ + "popl %%eax\n\t" \ + "1:\t" \ + :"=m" (*(volatile int *)rw) : : "memory") + +#else /* !CONFIG_SPINLINE */ + + #define __build_read_lock_ptr(rw, helper) \ + asm volatile(LOCK "subl $1,(%0)\n\t" \ + "js 2f\n" \ + "1:\n" \ + LOCK_SECTION_START("") \ + "2:\tcall " helper "\n\t" \ + "jmp 1b\n" \ + LOCK_SECTION_END \ + ::"a" (rw) : "memory") + + #define __build_read_lock_const(rw, helper) \ + asm volatile(LOCK "subl $1,%0\n\t" \ + "js 2f\n" \ + "1:\n" \ + LOCK_SECTION_START("") \ + "2:\tpushl %%eax\n\t" \ + "leal %0,%%eax\n\t" \ + "call " helper "\n\t" \ + "popl %%eax\n\t" \ + "jmp 1b\n" \ + LOCK_SECTION_END \ + :"=m" (*(volatile int *)rw) : : "memory") + +#endif /* CONFIG_SPINLINE */ + #define __build_read_lock(rw, helper) do { \ if (__builtin_constant_p(rw)) \ @@ -50,28 +74,51 @@ __build_read_lock_ptr(rw, helper); \ } while (0) -#define __build_write_lock_ptr(rw, helper) \ - asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \ - "jnz 2f\n" \ - "1:\n" \ - LOCK_SECTION_START("") \ - "2:\tcall " helper "\n\t" \ - "jmp 1b\n" \ - LOCK_SECTION_END \ - ::"a" (rw) : "memory") - -#define __build_write_lock_const(rw, helper) \ - asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",%0\n\t" \ - "jnz 2f\n" \ - "1:\n" \ - LOCK_SECTION_START("") \ - "2:\tpushl %%eax\n\t" \ - "leal %0,%%eax\n\t" \ - "call " helper "\n\t" \ - "popl %%eax\n\t" \ - "jmp 1b\n" \ - LOCK_SECTION_END \ - :"=m" (*(volatile int *)rw) : : "memory") +#ifdef CONFIG_SPINLINE + + #define __build_write_lock_ptr(rw, helper) \ + asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \ + "jz 1f\n\t" \ + "call " helper "\n\t" \ + "1:\n" \ + ::"a" (rw) : "memory") + + #define __build_write_lock_const(rw, helper) \ + asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",%0\n\t" \ + "jz 1f\n\t" \ + "pushl %%eax\n\t" \ + "leal %0,%%eax\n\t" \ + "call " helper "\n\t" \ + "popl %%eax\n\t" \ + "1:\n" \ + :"=m" (*(volatile int *)rw) : : "memory") + +#else /* !CONFIG_SPINLINE */ + + #define __build_write_lock_ptr(rw, helper) \ + asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \ + "jnz 2f\n" \ + "1:\n" \ + LOCK_SECTION_START("") \ + "2:\tcall " helper "\n\t" \ + "jmp 1b\n" \ + LOCK_SECTION_END \ + ::"a" (rw) : "memory") + + #define __build_write_lock_const(rw, helper) \ + asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",%0\n\t" \ + "jnz 2f\n" \ + "1:\n" \ + LOCK_SECTION_START("") \ + "2:\tpushl %%eax\n\t" \ + "leal %0,%%eax\n\t" \ + "call " helper "\n\t" \ + "popl %%eax\n\t" \ + "jmp 1b\n" \ + LOCK_SECTION_END \ + :"=m" (*(volatile int *)rw) : : "memory") + +#endif /* CONFIG_SPINLINE */ #define __build_write_lock(rw, helper) do { \ if (__builtin_constant_p(rw)) \ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/smp.h current/include/asm-i386/smp.h --- reference/include/asm-i386/smp.h 2004-03-11 14:35:15.000000000 -0800 +++ current/include/asm-i386/smp.h 2004-04-08 15:10:22.000000000 -0700 @@ -34,7 +34,7 @@ extern void smp_alloc_memory(void); extern int pic_mode; extern int smp_num_siblings; -extern int cpu_sibling_map[]; +extern cpumask_t cpu_sibling_map[]; extern void smp_flush_tlb(void); extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs); diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/spinlock.h current/include/asm-i386/spinlock.h --- reference/include/asm-i386/spinlock.h 2004-03-11 14:35:15.000000000 -0800 +++ current/include/asm-i386/spinlock.h 2004-04-08 15:10:21.000000000 -0700 @@ -43,18 +43,35 @@ typedef struct { #define spin_is_locked(x) (*(volatile signed char *)(&(x)->lock) <= 0) #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) -#define spin_lock_string \ - "\n1:\t" \ - "lock ; decb %0\n\t" \ - "js 2f\n" \ - LOCK_SECTION_START("") \ - "2:\t" \ - "rep;nop\n\t" \ - "cmpb $0,%0\n\t" \ - "jle 2b\n\t" \ - "jmp 1b\n" \ - LOCK_SECTION_END +#ifdef CONFIG_SPINLINE + #define spin_lock_string \ + "\n1:\t" \ + "lock ; decb %0\n\t" \ + "js 2f\n" \ + "jmp 3f\n" \ + "2:\t" \ + "rep;nop\n\t" \ + "cmpb $0,%0\n\t" \ + "jle 2b\n\t" \ + "jmp 1b\n" \ + "3:\t" + +#else /* !CONFIG_SPINLINE */ + + #define spin_lock_string \ + "\n1:\t" \ + "lock ; decb %0\n\t" \ + "js 2f\n" \ + LOCK_SECTION_START("") \ + "2:\t" \ + "rep;nop\n\t" \ + "cmpb $0,%0\n\t" \ + "jle 2b\n\t" \ + "jmp 1b\n" \ + LOCK_SECTION_END + +#endif /* CONFIG_SPINLINE */ /* * This works. Despite all the confusion. * (except on PPro SMP or if we are using OOSTORE) @@ -138,6 +155,11 @@ here: */ typedef struct { volatile unsigned int lock; +#ifdef CONFIG_LOCKMETER + /* required for LOCKMETER since all bits in lock are used */ + /* and we need this storage for CPU and lock INDEX */ + unsigned lockmeter_magic; +#endif #ifdef CONFIG_DEBUG_SPINLOCK unsigned magic; #endif @@ -145,11 +167,19 @@ typedef struct { #define RWLOCK_MAGIC 0xdeaf1eed +#ifdef CONFIG_LOCKMETER +#ifdef CONFIG_DEBUG_SPINLOCK +#define RWLOCK_MAGIC_INIT , 0, RWLOCK_MAGIC +#else +#define RWLOCK_MAGIC_INIT , 0 +#endif +#else /* !CONFIG_LOCKMETER */ #ifdef CONFIG_DEBUG_SPINLOCK #define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC #else #define RWLOCK_MAGIC_INIT /* */ #endif +#endif /* !CONFIG_LOCKMETER */ #define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT } @@ -196,4 +226,60 @@ static inline int _raw_write_trylock(rwl return 0; } +#ifdef CONFIG_LOCKMETER +static inline int _raw_read_trylock(rwlock_t *lock) +{ +/* FIXME -- replace with assembler */ + atomic_t *count = (atomic_t *)lock; + atomic_dec(count); + if (count->counter > 0) + return 1; + atomic_inc(count); + return 0; +} +#endif + +#if defined(CONFIG_LOCKMETER) && defined(CONFIG_HAVE_DEC_LOCK) +extern void _metered_spin_lock (spinlock_t *lock); +extern void _metered_spin_unlock(spinlock_t *lock); + +/* + * Matches what is in arch/i386/lib/dec_and_lock.c, except this one is + * "static inline" so that the spin_lock(), if actually invoked, is charged + * against the real caller, not against the catch-all atomic_dec_and_lock + */ +static inline int atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) +{ + int counter; + int newcount; + +repeat: + counter = atomic_read(atomic); + newcount = counter-1; + + if (!newcount) + goto slow_path; + + asm volatile("lock; cmpxchgl %1,%2" + :"=a" (newcount) + :"r" (newcount), "m" (atomic->counter), "0" (counter)); + + /* If the above failed, "eax" will have changed */ + if (newcount != counter) + goto repeat; + return 0; + +slow_path: + preempt_disable(); + _metered_spin_lock(lock); + if (atomic_dec_and_test(atomic)) + return 1; + _metered_spin_unlock(lock); + preempt_enable(); + return 0; +} + +#define ATOMIC_DEC_AND_LOCK +#endif + #endif /* __ASM_SPINLOCK_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/string.h current/include/asm-i386/string.h --- reference/include/asm-i386/string.h 2004-02-18 14:57:17.000000000 -0800 +++ current/include/asm-i386/string.h 2004-04-09 11:53:00.000000000 -0700 @@ -58,6 +58,29 @@ __asm__ __volatile__( return dest; } +/* + * This is a more generic variant of strncpy_count() suitable for + * implementing string-access routines with all sorts of return + * code semantics. It's used by mm/usercopy.c. + */ +static inline size_t strncpy_count(char * dest,const char *src,size_t count) +{ + __asm__ __volatile__( + + "1:\tdecl %0\n\t" + "js 2f\n\t" + "lodsb\n\t" + "stosb\n\t" + "testb %%al,%%al\n\t" + "jne 1b\n\t" + "2:" + "incl %0" + : "=c" (count) + :"S" (src),"D" (dest),"0" (count) : "memory"); + + return count; +} + static inline char * strcat(char * dest,const char * src) { int d0, d1, d2, d3; diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/thread_info.h current/include/asm-i386/thread_info.h --- reference/include/asm-i386/thread_info.h 2004-03-11 14:35:15.000000000 -0800 +++ current/include/asm-i386/thread_info.h 2004-04-09 11:53:01.000000000 -0700 @@ -9,6 +9,9 @@ #ifdef __KERNEL__ +#include +#include + #ifndef __ASSEMBLY__ #include #endif @@ -29,31 +32,32 @@ struct thread_info { __u32 cpu; /* current CPU */ __s32 preempt_count; /* 0 => preemptable, <0 => BUG */ + mm_segment_t addr_limit; /* thread address space: 0-0xBFFFFFFF for user-thead 0-0xFFFFFFFF for kernel-thread */ + void *real_stack, *virtual_stack, *user_pgd; struct restart_block restart_block; + unsigned long previous_esp; /* ESP of the previous stack in case + of nested (IRQ) stacks + */ __u8 supervisor_stack[0]; }; -#else /* !__ASSEMBLY__ */ - -/* offsets into the thread_info struct for assembly code access */ -#define TI_TASK 0x00000000 -#define TI_EXEC_DOMAIN 0x00000004 -#define TI_FLAGS 0x00000008 -#define TI_STATUS 0x0000000C -#define TI_CPU 0x00000010 -#define TI_PRE_COUNT 0x00000014 -#define TI_ADDR_LIMIT 0x00000018 -#define TI_RESTART_BLOCK 0x000001C - #endif #define PREEMPT_ACTIVE 0x4000000 +/* if you change THREAD_SIZE here, don't forget to change STACK_PAGE_COUNT */ +#ifdef CONFIG_4KSTACKS +#define THREAD_SIZE (4096) +#else +#define THREAD_SIZE (8192) +#endif + +#define STACK_WARN (THREAD_SIZE/8) /* * macros/functions for gaining access to the thread information structure * @@ -61,7 +65,7 @@ struct thread_info { */ #ifndef __ASSEMBLY__ -#define INIT_THREAD_INFO(tsk) \ +#define INIT_THREAD_INFO(tsk, thread_info) \ { \ .task = &tsk, \ .exec_domain = &default_exec_domain, \ @@ -72,12 +76,12 @@ struct thread_info { .restart_block = { \ .fn = do_no_restart_syscall, \ }, \ + .real_stack = &thread_info, \ } #define init_thread_info (init_thread_union.thread_info) #define init_stack (init_thread_union.stack) -#define THREAD_SIZE (2*PAGE_SIZE) /* how to get the thread information struct from C */ static inline struct thread_info *current_thread_info(void) @@ -87,6 +91,14 @@ static inline struct thread_info *curren return ti; } +/* how to get the current stack pointer from C */ +static inline unsigned long current_stack_pointer(void) +{ + unsigned long ti; + __asm__("movl %%esp,%0; ":"=r" (ti) : ); + return ti; +} + /* thread information allocation */ #ifdef CONFIG_DEBUG_STACK_USAGE #define alloc_thread_info(tsk) \ @@ -108,8 +120,6 @@ static inline struct thread_info *curren #else /* !__ASSEMBLY__ */ -#define THREAD_SIZE 8192 - /* how to get the thread information struct from ASM */ #define GET_THREAD_INFO(reg) \ movl $-THREAD_SIZE, reg; \ @@ -133,6 +143,7 @@ static inline struct thread_info *curren #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */ #define TIF_IRET 5 /* return with iret */ +#define TIF_DB7 6 /* has debug registers */ #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define _TIF_SYSCALL_TRACE (1<active_mm) __flush_tlb(); +#endif } static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { +#ifndef CONFIG_X86_SWITCH_PAGETABLES if (vma->vm_mm == current->active_mm) __flush_tlb_one(addr); +#endif } static inline void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { +#ifndef CONFIG_X86_SWITCH_PAGETABLES if (vma->vm_mm == current->active_mm) __flush_tlb(); +#endif } #else @@ -111,11 +117,10 @@ static inline void flush_tlb_range(struc __flush_tlb() extern void flush_tlb_all(void); -extern void flush_tlb_current_task(void); extern void flush_tlb_mm(struct mm_struct *); extern void flush_tlb_page(struct vm_area_struct *, unsigned long); -#define flush_tlb() flush_tlb_current_task() +#define flush_tlb() flush_tlb_all() static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end) { diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/uaccess.h current/include/asm-i386/uaccess.h --- reference/include/asm-i386/uaccess.h 2003-10-01 11:48:22.000000000 -0700 +++ current/include/asm-i386/uaccess.h 2004-04-09 11:53:00.000000000 -0700 @@ -26,7 +26,7 @@ #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFUL) -#define USER_DS MAKE_MM_SEG(PAGE_OFFSET) +#define USER_DS MAKE_MM_SEG(TASK_SIZE) #define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) @@ -149,6 +149,45 @@ extern void __get_user_4(void); :"=a" (ret),"=d" (x) \ :"0" (ptr)) +extern int get_user_size(unsigned int size, void *val, const void *ptr); +extern int put_user_size(unsigned int size, const void *val, void *ptr); +extern int zero_user_size(unsigned int size, void *ptr); +extern int copy_str_fromuser_size(unsigned int size, void *val, const void *ptr); +extern int strlen_fromuser_size(unsigned int size, const void *ptr); + + +# define indirect_get_user(x,ptr) \ +({ int __ret_gu,__val_gu; \ + __typeof__(ptr) __ptr_gu = (ptr); \ + __ret_gu = get_user_size(sizeof(*__ptr_gu), &__val_gu,__ptr_gu) ? -EFAULT : 0;\ + (x) = (__typeof__(*__ptr_gu))__val_gu; \ + __ret_gu; \ +}) +#define indirect_put_user(x,ptr) \ +({ \ + __typeof__(*(ptr)) *__ptr_pu = (ptr), __x_pu = (x); \ + put_user_size(sizeof(*__ptr_pu), &__x_pu, __ptr_pu) ? -EFAULT : 0; \ +}) +#define __indirect_put_user indirect_put_user +#define __indirect_get_user indirect_get_user + +#define indirect_copy_from_user(to,from,n) get_user_size(n,to,from) +#define indirect_copy_to_user(to,from,n) put_user_size(n,from,to) + +#define __indirect_copy_from_user indirect_copy_from_user +#define __indirect_copy_to_user indirect_copy_to_user + +#define indirect_strncpy_from_user(dst, src, count) \ + copy_str_fromuser_size(count, dst, src) + +extern int strlen_fromuser_size(unsigned int size, const void *ptr); +#define indirect_strnlen_user(str, n) strlen_fromuser_size(n, str) +#define indirect_strlen_user(str) indirect_strnlen_user(str, ~0UL >> 1) + +extern int zero_user_size(unsigned int size, void *ptr); + +#define indirect_clear_user(mem, len) zero_user_size(len, mem) +#define __indirect_clear_user clear_user /* Careful: we have to cast the result to the type of the pointer for sign reasons */ /** @@ -168,7 +207,7 @@ extern void __get_user_4(void); * Returns zero on success, or -EFAULT on error. * On error, the variable @x is set to zero. */ -#define get_user(x,ptr) \ +#define direct_get_user(x,ptr) \ ({ int __ret_gu,__val_gu; \ switch(sizeof (*(ptr))) { \ case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \ @@ -198,7 +237,7 @@ extern void __put_user_bad(void); * * Returns zero on success, or -EFAULT on error. */ -#define put_user(x,ptr) \ +#define direct_put_user(x,ptr) \ __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) @@ -222,7 +261,7 @@ extern void __put_user_bad(void); * Returns zero on success, or -EFAULT on error. * On error, the variable @x is set to zero. */ -#define __get_user(x,ptr) \ +#define __direct_get_user(x,ptr) \ __get_user_nocheck((x),(ptr),sizeof(*(ptr))) @@ -245,7 +284,7 @@ extern void __put_user_bad(void); * * Returns zero on success, or -EFAULT on error. */ -#define __put_user(x,ptr) \ +#define __direct_put_user(x,ptr) \ __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) #define __put_user_nocheck(x,ptr,size) \ @@ -396,7 +435,7 @@ unsigned long __copy_from_user_ll(void * * On success, this will be zero. */ static inline unsigned long -__copy_to_user(void __user *to, const void *from, unsigned long n) +__direct_copy_to_user(void __user *to, const void *from, unsigned long n) { if (__builtin_constant_p(n)) { unsigned long ret; @@ -434,7 +473,7 @@ __copy_to_user(void __user *to, const vo * data to the requested size using zero bytes. */ static inline unsigned long -__copy_from_user(void *to, const void __user *from, unsigned long n) +__direct_copy_from_user(void *to, const void __user *from, unsigned long n) { if (__builtin_constant_p(n)) { unsigned long ret; @@ -468,11 +507,11 @@ __copy_from_user(void *to, const void __ * On success, this will be zero. */ static inline unsigned long -copy_to_user(void __user *to, const void *from, unsigned long n) +direct_copy_to_user(void __user *to, const void *from, unsigned long n) { might_sleep(); if (access_ok(VERIFY_WRITE, to, n)) - n = __copy_to_user(to, from, n); + n = __direct_copy_to_user(to, from, n); return n; } @@ -493,11 +532,11 @@ copy_to_user(void __user *to, const void * data to the requested size using zero bytes. */ static inline unsigned long -copy_from_user(void *to, const void __user *from, unsigned long n) +direct_copy_from_user(void *to, const void __user *from, unsigned long n) { might_sleep(); if (access_ok(VERIFY_READ, from, n)) - n = __copy_from_user(to, from, n); + n = __direct_copy_from_user(to, from, n); else memset(to, 0, n); return n; @@ -520,10 +559,68 @@ long __strncpy_from_user(char *dst, cons * If there is a limit on the length of a valid string, you may wish to * consider using strnlen_user() instead. */ -#define strlen_user(str) strnlen_user(str, ~0UL >> 1) -long strnlen_user(const char __user *str, long n); -unsigned long clear_user(void __user *mem, unsigned long len); -unsigned long __clear_user(void __user *mem, unsigned long len); +long direct_strncpy_from_user(char *dst, const char *src, long count); +long __direct_strncpy_from_user(char *dst, const char *src, long count); +#define direct_strlen_user(str) direct_strnlen_user(str, ~0UL >> 1) +long direct_strnlen_user(const char *str, long n); +unsigned long direct_clear_user(void *mem, unsigned long len); +unsigned long __direct_clear_user(void *mem, unsigned long len); + +extern int indirect_uaccess; + +#ifdef CONFIG_X86_UACCESS_INDIRECT + +/* + * Return code and zeroing semantics: + + __clear_user 0 <-> bytes not done + clear_user 0 <-> bytes not done + __copy_to_user 0 <-> bytes not done + copy_to_user 0 <-> bytes not done + __copy_from_user 0 <-> bytes not done, zero rest + copy_from_user 0 <-> bytes not done, zero rest + __get_user 0 <-> -EFAULT + get_user 0 <-> -EFAULT + __put_user 0 <-> -EFAULT + put_user 0 <-> -EFAULT + strlen_user strlen + 1 <-> 0 + strnlen_user strlen + 1 (or n+1) <-> 0 + strncpy_from_user strlen (or n) <-> -EFAULT + + */ + +#define __clear_user(mem,len) __indirect_clear_user(mem,len) +#define clear_user(mem,len) indirect_clear_user(mem,len) +#define __copy_to_user(to,from,n) __indirect_copy_to_user(to,from,n) +#define copy_to_user(to,from,n) indirect_copy_to_user(to,from,n) +#define __copy_from_user(to,from,n) __indirect_copy_from_user(to,from,n) +#define copy_from_user(to,from,n) indirect_copy_from_user(to,from,n) +#define __get_user(val,ptr) __indirect_get_user(val,ptr) +#define get_user(val,ptr) indirect_get_user(val,ptr) +#define __put_user(val,ptr) __indirect_put_user(val,ptr) +#define put_user(val,ptr) indirect_put_user(val,ptr) +#define strlen_user(str) indirect_strlen_user(str) +#define strnlen_user(src,count) indirect_strnlen_user(src,count) +#define strncpy_from_user(dst,src,count) \ + indirect_strncpy_from_user(dst,src,count) + +#else + +#define __clear_user __direct_clear_user +#define clear_user direct_clear_user +#define __copy_to_user __direct_copy_to_user +#define copy_to_user direct_copy_to_user +#define __copy_from_user __direct_copy_from_user +#define copy_from_user direct_copy_from_user +#define __get_user __direct_get_user +#define get_user direct_get_user +#define __put_user __direct_put_user +#define put_user direct_put_user +#define strlen_user direct_strlen_user +#define strnlen_user direct_strnlen_user +#define strncpy_from_user direct_strncpy_from_user + +#endif /* CONFIG_X86_UACCESS_INDIRECT */ #endif /* __i386_UACCESS_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/unistd.h current/include/asm-i386/unistd.h --- reference/include/asm-i386/unistd.h 2004-04-07 14:54:32.000000000 -0700 +++ current/include/asm-i386/unistd.h 2004-04-09 13:23:20.000000000 -0700 @@ -279,8 +279,9 @@ #define __NR_utimes 271 #define __NR_fadvise64_64 272 #define __NR_vserver 273 - -#define NR_syscalls 274 +#define __NR_sys_kexec_load 274 + +#define NR_syscalls 275 /* user-visible error numbers are in the range -1 - -124: see */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-i386/vsyscall-gtod.h current/include/asm-i386/vsyscall-gtod.h --- reference/include/asm-i386/vsyscall-gtod.h 1969-12-31 16:00:00.000000000 -0800 +++ current/include/asm-i386/vsyscall-gtod.h 2004-04-09 21:41:40.000000000 -0700 @@ -0,0 +1,68 @@ +#ifndef _ASM_i386_VSYSCALL_GTOD_H_ +#define _ASM_i386_VSYSCALL_GTOD_H_ + +#ifdef CONFIG_VSYSCALL_GTOD + +/* VSYSCALL_GTOD_START must be the same as + * __fix_to_virt(FIX_VSYSCALL_GTOD FIRST_PAGE) + * and must also be same as addr in vmlinux.lds.S */ +#define VSYSCALL_GTOD_START 0xffffc000 +#define VSYSCALL_GTOD_SIZE 1024 +#define VSYSCALL_GTOD_END (VSYSCALL_GTOD_START + PAGE_SIZE) +#define VSYSCALL_GTOD_NUMPAGES \ + ((VSYSCALL_GTOD_END-VSYSCALL_GTOD_START) >> PAGE_SHIFT) +#define VSYSCALL_ADDR(vsyscall_nr) \ + (VSYSCALL_GTOD_START+VSYSCALL_GTOD_SIZE*(vsyscall_nr)) + +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ +#include + +#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr))) + +/* ReadOnly generic time value attributes*/ +#define __section_vsyscall_timesource __attribute__ ((unused, __section__ (".vsyscall_timesource"))) +#define __section_xtime_lock __attribute__ ((unused, __section__ (".xtime_lock"))) +#define __section_xtime __attribute__ ((unused, __section__ (".xtime"))) +#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"))) +#define __section_wall_jiffies __attribute__ ((unused, __section__ (".wall_jiffies"))) +#define __section_sys_tz __attribute__ ((unused, __section__ (".sys_tz"))) + +/* ReadOnly NTP variables */ +#define __section_tickadj __attribute__ ((unused, __section__ (".tickadj"))) +#define __section_time_adjust __attribute__ ((unused, __section__ (".time_adjust"))) + + +/* ReadOnly TSC time value attributes*/ +#define __section_last_tsc_low __attribute__ ((unused, __section__ (".last_tsc_low"))) +#define __section_tsc_delay_at_last_interrupt __attribute__ ((unused, __section__ (".tsc_delay_at_last_interrupt"))) +#define __section_fast_gettimeoffset_quotient __attribute__ ((unused, __section__ (".fast_gettimeoffset_quotient"))) + +/* ReadOnly Cyclone time value attributes*/ +#define __section_cyclone_timer __attribute__ ((unused, __section__ (".cyclone_timer"))) +#define __section_last_cyclone_low __attribute__ ((unused, __section__ (".last_cyclone_low"))) +#define __section_cyclone_delay_at_last_interrupt __attribute__ ((unused, __section__ (".cyclone_delay_at_last_interrupt"))) + +enum vsyscall_num { + __NR_vgettimeofday, + __NR_vtime, +}; + +enum vsyscall_timesource_e { + VSYSCALL_GTOD_NONE, + VSYSCALL_GTOD_TSC, + VSYSCALL_GTOD_CYCLONE, +}; + +int vsyscall_init(void); +void vsyscall_set_timesource(char* name); + +extern char __vsyscall_0; +#endif /* __ASSEMBLY__ */ +#endif /* __KERNEL__ */ +#else /* CONFIG_VSYSCALL_GTOD */ +#define vsyscall_init() +#define vsyscall_set_timesource(x) +#endif /* CONFIG_VSYSCALL_GTOD */ +#endif /* _ASM_i386_VSYSCALL_GTOD_H_ */ + diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-ia64/ioctls.h current/include/asm-ia64/ioctls.h --- reference/include/asm-ia64/ioctls.h 2004-02-04 16:24:28.000000000 -0800 +++ current/include/asm-ia64/ioctls.h 2004-04-09 13:27:12.000000000 -0700 @@ -55,6 +55,7 @@ #define TIOCGSID 0x5429 /* Return the session ID of FD */ #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ +#define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get real dev no below /dev/console */ #define FIONCLEX 0x5450 /* these numbers need to be adjusted. */ #define FIOCLEX 0x5451 diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-ia64/irq.h current/include/asm-ia64/irq.h --- reference/include/asm-ia64/irq.h 2003-06-05 14:39:17.000000000 -0700 +++ current/include/asm-ia64/irq.h 2004-04-08 15:10:24.000000000 -0700 @@ -29,4 +29,8 @@ extern void disable_irq_nosync (unsigned extern void enable_irq (unsigned int); extern void set_irq_affinity_info (unsigned int irq, int dest, int redir); +struct irqaction; +struct pt_regs; +int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); + #endif /* _ASM_IA64_IRQ_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-ia64/lockmeter.h current/include/asm-ia64/lockmeter.h --- reference/include/asm-ia64/lockmeter.h 1969-12-31 16:00:00.000000000 -0800 +++ current/include/asm-ia64/lockmeter.h 2004-04-08 15:10:21.000000000 -0700 @@ -0,0 +1,72 @@ +/* + * Copyright (C) 1999,2000 Silicon Graphics, Inc. + * + * Written by John Hawkes (hawkes@sgi.com) + * Based on klstat.h by Jack Steiner (steiner@sgi.com) + */ + +#ifndef _IA64_LOCKMETER_H +#define _IA64_LOCKMETER_H + +#ifdef local_cpu_data +#define CPU_CYCLE_FREQUENCY local_cpu_data->itc_freq +#else +#define CPU_CYCLE_FREQUENCY my_cpu_data.itc_freq +#endif +#define get_cycles64() get_cycles() + +#define THIS_CPU_NUMBER smp_processor_id() + +/* + * macros to cache and retrieve an index value inside of a lock + * these macros assume that there are less than 65536 simultaneous + * (read mode) holders of a rwlock. + * we also assume that the hash table has less than 32767 entries. + */ +/* + * instrumented spinlock structure -- never used to allocate storage + * only used in macros below to overlay a spinlock_t + */ +typedef struct inst_spinlock_s { + /* remember, Intel is little endian */ + volatile unsigned short lock; + volatile unsigned short index; +} inst_spinlock_t; +#define PUT_INDEX(lock_ptr,indexv) ((inst_spinlock_t *)(lock_ptr))->index = indexv +#define GET_INDEX(lock_ptr) ((inst_spinlock_t *)(lock_ptr))->index + +/* + * macros to cache and retrieve an index value in a read/write lock + * as well as the cpu where a reader busy period started + * we use the 2nd word (the debug word) for this, so require the + * debug word to be present + */ +/* + * instrumented rwlock structure -- never used to allocate storage + * only used in macros below to overlay a rwlock_t + */ +typedef struct inst_rwlock_s { + volatile int read_counter:31; + volatile int write_lock:1; + volatile unsigned short index; + volatile unsigned short cpu; +} inst_rwlock_t; +#define PUT_RWINDEX(rwlock_ptr,indexv) ((inst_rwlock_t *)(rwlock_ptr))->index = indexv +#define GET_RWINDEX(rwlock_ptr) ((inst_rwlock_t *)(rwlock_ptr))->index +#define PUT_RW_CPU(rwlock_ptr,cpuv) ((inst_rwlock_t *)(rwlock_ptr))->cpu = cpuv +#define GET_RW_CPU(rwlock_ptr) ((inst_rwlock_t *)(rwlock_ptr))->cpu + +/* + * return the number of readers for a rwlock_t + */ +#define RWLOCK_READERS(rwlock_ptr) ((rwlock_ptr)->read_counter) + +/* + * return true if rwlock is write locked + * (note that other lock attempts can cause the lock value to be negative) + */ +#define RWLOCK_IS_WRITE_LOCKED(rwlock_ptr) ((rwlock_ptr)->write_lock) +#define RWLOCK_IS_READ_LOCKED(rwlock_ptr) ((rwlock_ptr)->read_counter) + +#endif /* _IA64_LOCKMETER_H */ + diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-ia64/pgtable.h current/include/asm-ia64/pgtable.h --- reference/include/asm-ia64/pgtable.h 2004-02-04 16:24:28.000000000 -0800 +++ current/include/asm-ia64/pgtable.h 2004-04-08 15:10:26.000000000 -0700 @@ -468,8 +468,6 @@ extern void hugetlb_free_pgtables(struct struct vm_area_struct * prev, unsigned long start, unsigned long end); #endif -typedef pte_t *pte_addr_t; - /* * IA-64 doesn't have any external MMU info: the page tables contain all the necessary * information. However, we use this routine to take care of any (delayed) i-cache diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-ia64/rmap.h current/include/asm-ia64/rmap.h --- reference/include/asm-ia64/rmap.h 2002-12-09 18:45:55.000000000 -0800 +++ current/include/asm-ia64/rmap.h 1969-12-31 16:00:00.000000000 -0800 @@ -1,7 +0,0 @@ -#ifndef _ASM_IA64_RMAP_H -#define _ASM_IA64_RMAP_H - -/* nothing to see, move along */ -#include - -#endif /* _ASM_IA64_RMAP_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-ia64/spinlock.h current/include/asm-ia64/spinlock.h --- reference/include/asm-ia64/spinlock.h 2004-04-07 14:54:33.000000000 -0700 +++ current/include/asm-ia64/spinlock.h 2004-04-08 15:10:21.000000000 -0700 @@ -110,8 +110,18 @@ do { \ typedef struct { volatile int read_counter : 31; volatile int write_lock : 1; +#ifdef CONFIG_LOCKMETER + /* required for LOCKMETER since all bits in lock are used */ + /* and we need this storage for CPU and lock INDEX */ + unsigned lockmeter_magic; +#endif } rwlock_t; + +#ifdef CONFIG_LOCKMETER +#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, 0 } +#else #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 } +#endif #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) #define rwlock_is_locked(x) (*(volatile int *) (x) != 0) @@ -127,6 +137,48 @@ do { \ } \ } while (0) +#ifdef CONFIG_LOCKMETER +/* + * HACK: This works, but still have a timing window that affects performance: + * we see that no one owns the Write lock, then someone * else grabs for Write + * lock before we do a read_lock(). + * This means that on rare occasions our read_lock() will stall and spin-wait + * until we acquire for Read, instead of simply returning a trylock failure. + */ +static inline int _raw_read_trylock(rwlock_t *rw) +{ + if (rw->write_lock) { + return 0; + } else { + _raw_read_lock(rw); + return 1; + } +} + +static inline int _raw_write_trylock(rwlock_t *rw) +{ + if (!(rw->write_lock)) { + /* isn't currently write-locked... that looks promising... */ + if (test_and_set_bit(31, rw) == 0) { + /* now it is write-locked by me... */ + if (rw->read_counter) { + /* really read-locked, so release write-lock and fail */ + clear_bit(31, rw); + } else { + /* we've the the write-lock, no read-lockers... success! */ + barrier(); + return 1; + } + + } + } + + /* falls through ... fails to write-lock */ + barrier(); + return 0; +} +#endif + #define _raw_read_unlock(rw) \ do { \ rwlock_t *__read_lock_ptr = (rw); \ @@ -190,4 +242,25 @@ do { \ clear_bit(31, (x)); \ }) +#ifdef CONFIG_LOCKMETER +extern void _metered_spin_lock (spinlock_t *lock); +extern void _metered_spin_unlock(spinlock_t *lock); + +/* + * Use a less efficient, and inline, atomic_dec_and_lock() if lockmetering + * so we can see the callerPC of who is actually doing the spin_lock(). + * Otherwise, all we see is the generic rollup of all locks done by + * atomic_dec_and_lock(). + */ +static inline int atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) +{ + _metered_spin_lock(lock); + if (atomic_dec_and_test(atomic)) + return 1; + _metered_spin_unlock(lock); + return 0; +} +#define ATOMIC_DEC_AND_LOCK +#endif + #endif /* _ASM_IA64_SPINLOCK_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-ia64/tlb.h current/include/asm-ia64/tlb.h --- reference/include/asm-ia64/tlb.h 2004-03-11 14:35:16.000000000 -0800 +++ current/include/asm-ia64/tlb.h 2004-04-08 15:10:22.000000000 -0700 @@ -211,6 +211,8 @@ __tlb_remove_tlb_entry (struct mmu_gathe tlb->end_addr = address + PAGE_SIZE; } +#define tlb_migrate_prepare(mm) flush_tlb_mm(mm) + #define tlb_start_vma(tlb, vma) do { } while (0) #define tlb_end_vma(tlb, vma) do { } while (0) diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-m68k/ioctls.h current/include/asm-m68k/ioctls.h --- reference/include/asm-m68k/ioctls.h 2003-04-09 11:48:05.000000000 -0700 +++ current/include/asm-m68k/ioctls.h 2004-04-09 13:27:12.000000000 -0700 @@ -48,6 +48,7 @@ #define TIOCGSID 0x5429 /* Return the session ID of FD */ #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ +#define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get real dev no below /dev/console */ #define FIONCLEX 0x5450 /* these numbers need to be adjusted. */ #define FIOCLEX 0x5451 diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-m68k/irq.h current/include/asm-m68k/irq.h --- reference/include/asm-m68k/irq.h 2004-04-07 14:54:33.000000000 -0700 +++ current/include/asm-m68k/irq.h 2004-04-08 15:10:24.000000000 -0700 @@ -124,4 +124,8 @@ extern volatile unsigned int num_spuriou */ extern irq_node_t *new_irq_node(void); +struct irqaction; +struct pt_regs; +int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); + #endif /* _M68K_IRQ_H_ */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-m68k/pgtable.h current/include/asm-m68k/pgtable.h --- reference/include/asm-m68k/pgtable.h 2004-02-04 16:24:29.000000000 -0800 +++ current/include/asm-m68k/pgtable.h 2004-04-08 15:10:26.000000000 -0700 @@ -168,8 +168,6 @@ static inline void update_mmu_cache(stru ? (__pgprot((pgprot_val(prot) & _CACHEMASK040) | _PAGE_NOCACHE_S)) \ : (prot))) -typedef pte_t *pte_addr_t; - #endif /* !__ASSEMBLY__ */ /* diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-m68k/rmap.h current/include/asm-m68k/rmap.h --- reference/include/asm-m68k/rmap.h 2002-12-09 18:46:24.000000000 -0800 +++ current/include/asm-m68k/rmap.h 1969-12-31 16:00:00.000000000 -0800 @@ -1,7 +0,0 @@ -#ifndef _M68K_RMAP_H -#define _M68K_RMAP_H - -/* nothing to see, move along */ -#include - -#endif diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-m68knommu/irq.h current/include/asm-m68knommu/irq.h --- reference/include/asm-m68knommu/irq.h 2004-03-11 14:35:16.000000000 -0800 +++ current/include/asm-m68knommu/irq.h 2004-04-08 15:10:24.000000000 -0700 @@ -121,4 +121,8 @@ extern irq_node_t *new_irq_node(void); #define enable_irq_nosync(x) enable_irq(x) #define disable_irq_nosync(x) disable_irq(x) +struct irqaction; +struct pt_regs; +int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); + #endif /* _M68K_IRQ_H_ */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-m68knommu/pgtable.h current/include/asm-m68knommu/pgtable.h --- reference/include/asm-m68knommu/pgtable.h 2003-06-05 14:56:22.000000000 -0700 +++ current/include/asm-m68knommu/pgtable.h 2004-04-08 15:10:26.000000000 -0700 @@ -11,8 +11,6 @@ #include #include -typedef pte_t *pte_addr_t; - /* * Trivial page table functions. */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-m68knommu/rmap.h current/include/asm-m68knommu/rmap.h --- reference/include/asm-m68knommu/rmap.h 2002-12-09 18:45:53.000000000 -0800 +++ current/include/asm-m68knommu/rmap.h 1969-12-31 16:00:00.000000000 -0800 @@ -1,2 +0,0 @@ -/* Do not need anything here */ - diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-mips/ioctls.h current/include/asm-mips/ioctls.h --- reference/include/asm-mips/ioctls.h 2003-07-02 14:44:55.000000000 -0700 +++ current/include/asm-mips/ioctls.h 2004-04-09 13:27:12.000000000 -0700 @@ -79,6 +79,7 @@ #define TIOCGSID 0x7416 /* Return the session ID of FD */ #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ +#define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get real dev no below /dev/console */ /* I hope the range from 0x5480 on is free ... */ #define TIOCSCTTY 0x5480 /* become controlling tty */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-mips/irq.h current/include/asm-mips/irq.h --- reference/include/asm-mips/irq.h 2004-03-11 14:35:18.000000000 -0800 +++ current/include/asm-mips/irq.h 2004-04-08 15:10:24.000000000 -0700 @@ -31,4 +31,7 @@ extern asmlinkage unsigned int do_IRQ(in extern void init_generic_irq(void); +struct irqaction; +int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); + #endif /* _ASM_IRQ_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-mips/kmap_types.h current/include/asm-mips/kmap_types.h --- reference/include/asm-mips/kmap_types.h 2003-07-02 14:44:55.000000000 -0700 +++ current/include/asm-mips/kmap_types.h 2004-04-08 15:10:26.000000000 -0700 @@ -19,7 +19,6 @@ D(5) KM_BIO_SRC_IRQ, D(6) KM_BIO_DST_IRQ, D(7) KM_PTE0, D(8) KM_PTE1, -D(9) KM_PTE2, D(10) KM_IRQ0, D(11) KM_IRQ1, D(12) KM_SOFTIRQ0, diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-mips/lockmeter.h current/include/asm-mips/lockmeter.h --- reference/include/asm-mips/lockmeter.h 1969-12-31 16:00:00.000000000 -0800 +++ current/include/asm-mips/lockmeter.h 2004-04-08 15:10:21.000000000 -0700 @@ -0,0 +1,126 @@ +/* + * Copyright (C) 1999,2000 Silicon Graphics, Inc. + * + * Written by John Hawkes (hawkes@sgi.com) + * Based on klstat.h by Jack Steiner (steiner@sgi.com) + * Ported to mips32 for Asita Technologies + * by D.J. Barrow ( dj.barrow@asitatechnologies.com ) + */ +#ifndef _ASM_LOCKMETER_H +#define _ASM_LOCKMETER_H + +/* do_gettimeoffset is a function pointer on mips */ +/* & it is not included by */ +#include +#include +#include + +#define SPINLOCK_MAGIC_INIT /* */ + +#define CPU_CYCLE_FREQUENCY get_cpu_cycle_frequency() + +#define THIS_CPU_NUMBER smp_processor_id() + +static uint32_t cpu_cycle_frequency = 0; + +static uint32_t get_cpu_cycle_frequency(void) +{ + /* a total hack, slow and invasive, but ... it works */ + int sec; + uint32_t start_cycles; + struct timeval tv; + + if (cpu_cycle_frequency == 0) { /* uninitialized */ + do_gettimeofday(&tv); + sec = tv.tv_sec; /* set up to catch the tv_sec rollover */ + while (sec == tv.tv_sec) { do_gettimeofday(&tv); } + sec = tv.tv_sec; /* rolled over to a new sec value */ + start_cycles = get_cycles(); + while (sec == tv.tv_sec) { do_gettimeofday(&tv); } + cpu_cycle_frequency = get_cycles() - start_cycles; + } + + return cpu_cycle_frequency; +} + +extern struct timeval xtime; + +static uint64_t get_cycles64(void) +{ + static uint64_t last_get_cycles64 = 0; + uint64_t ret; + unsigned long sec; + unsigned long usec, usec_offset; + +again: + sec = xtime.tv_sec; + usec = xtime.tv_usec; + usec_offset = do_gettimeoffset(); + if ((xtime.tv_sec != sec) || + (xtime.tv_usec != usec)|| + (usec_offset >= 20000)) + goto again; + + ret = ((uint64_t)(usec + usec_offset) * cpu_cycle_frequency); + /* We can't do a normal 64 bit division on mips without libgcc.a */ + do_div(ret,1000000); + ret += ((uint64_t)sec * cpu_cycle_frequency); + + /* XXX why does time go backwards? do_gettimeoffset? general time adj? */ + if (ret <= last_get_cycles64) + ret = last_get_cycles64+1; + last_get_cycles64 = ret; + + return ret; +} + +/* + * macros to cache and retrieve an index value inside of a lock + * these macros assume that there are less than 65536 simultaneous + * (read mode) holders of a rwlock. + * we also assume that the hash table has less than 32767 entries. + * the high order bit is used for write locking a rw_lock + */ +#define INDEX_MASK 0x7FFF0000 +#define READERS_MASK 0x0000FFFF +#define INDEX_SHIFT 16 +#define PUT_INDEX(lockp,index) \ + lockp->lock = (((lockp->lock) & ~INDEX_MASK) | (index) << INDEX_SHIFT) +#define GET_INDEX(lockp) \ + (((lockp->lock) & INDEX_MASK) >> INDEX_SHIFT) + +/* + * macros to cache and retrieve an index value in a read/write lock + * as well as the cpu where a reader busy period started + * we use the 2nd word (the debug word) for this, so require the + * debug word to be present + */ +/* + * instrumented rwlock structure -- never used to allocate storage + * only used in macros below to overlay a rwlock_t + */ +typedef struct inst_rwlock_s { + volatile int lock; + unsigned short index; + unsigned short cpu; +} inst_rwlock_t; +#define PUT_RWINDEX(rwlock_ptr,indexv) ((inst_rwlock_t *)(rwlock_ptr))->index = indexv +#define GET_RWINDEX(rwlock_ptr) ((inst_rwlock_t *)(rwlock_ptr))->index +#define PUT_RW_CPU(rwlock_ptr,cpuv) ((inst_rwlock_t *)(rwlock_ptr))->cpu = cpuv +#define GET_RW_CPU(rwlock_ptr) ((inst_rwlock_t *)(rwlock_ptr))->cpu + +/* + * return the number of readers for a rwlock_t + */ +#define RWLOCK_READERS(rwlock_ptr) rwlock_readers(rwlock_ptr) + +extern inline int rwlock_readers(rwlock_t *rwlock_ptr) +{ + int tmp = (int) rwlock_ptr->lock; + return (tmp >= 0) ? tmp : 0; +} + +#define RWLOCK_IS_WRITE_LOCKED(rwlock_ptr) ((rwlock_ptr)->lock < 0) +#define RWLOCK_IS_READ_LOCKED(rwlock_ptr) ((rwlock_ptr)->lock > 0) + +#endif /* _ASM_LOCKMETER_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-mips/pgtable-32.h current/include/asm-mips/pgtable-32.h --- reference/include/asm-mips/pgtable-32.h 2004-03-11 14:35:20.000000000 -0800 +++ current/include/asm-mips/pgtable-32.h 2004-04-08 15:10:26.000000000 -0700 @@ -216,10 +216,4 @@ static inline pmd_t *pmd_offset(pgd_t *d #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) -#ifdef CONFIG_64BIT_PHYS_ADDR -typedef u64 pte_addr_t; -#else -typedef pte_t *pte_addr_t; -#endif - #endif /* _ASM_PGTABLE_32_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-mips/pgtable-64.h current/include/asm-mips/pgtable-64.h --- reference/include/asm-mips/pgtable-64.h 2004-03-11 14:35:20.000000000 -0800 +++ current/include/asm-mips/pgtable-64.h 2004-04-08 15:10:26.000000000 -0700 @@ -214,6 +214,4 @@ static inline pte_t mk_swap_pte(unsigned #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) -typedef pte_t *pte_addr_t; - #endif /* _ASM_PGTABLE_64_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-mips/rmap.h current/include/asm-mips/rmap.h --- reference/include/asm-mips/rmap.h 2003-07-02 14:44:56.000000000 -0700 +++ current/include/asm-mips/rmap.h 1969-12-31 16:00:00.000000000 -0800 @@ -1,7 +0,0 @@ -#ifndef __ASM_RMAP_H -#define __ASM_RMAP_H - -/* nothing to see, move along */ -#include - -#endif /* __ASM_RMAP_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-mips/spinlock.h current/include/asm-mips/spinlock.h --- reference/include/asm-mips/spinlock.h 2004-03-11 14:35:22.000000000 -0800 +++ current/include/asm-mips/spinlock.h 2004-04-08 15:10:21.000000000 -0700 @@ -91,9 +91,18 @@ static inline unsigned int _raw_spin_try typedef struct { volatile unsigned int lock; +#ifdef CONFIG_LOCKMETER + /* required for LOCKMETER since all bits in lock are used */ + /* and we need this storage for CPU and lock INDEX */ + unsigned lockmeter_magic; +#endif } rwlock_t; +#ifdef CONFIG_LOCKMETER +#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 } +#else #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } +#endif #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-parisc/cacheflush.h current/include/asm-parisc/cacheflush.h --- reference/include/asm-parisc/cacheflush.h 2003-10-14 15:50:33.000000000 -0700 +++ current/include/asm-parisc/cacheflush.h 2004-04-08 15:10:25.000000000 -0700 @@ -69,7 +69,7 @@ extern void __flush_dcache_page(struct p static inline void flush_dcache_page(struct page *page) { - if (page->mapping && list_empty(&page->mapping->i_mmap) && + if (page_mapping(page) && list_empty(&page->mapping->i_mmap) && list_empty(&page->mapping->i_mmap_shared)) { set_bit(PG_dcache_dirty, &page->flags); } else { diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-parisc/irq.h current/include/asm-parisc/irq.h --- reference/include/asm-parisc/irq.h 2003-10-14 15:50:33.000000000 -0700 +++ current/include/asm-parisc/irq.h 2004-04-08 15:10:24.000000000 -0700 @@ -96,4 +96,7 @@ extern unsigned long txn_alloc_addr(int) /* soft power switch support (power.c) */ extern struct tasklet_struct power_tasklet; +struct irqaction; +int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); + #endif /* _ASM_PARISC_IRQ_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-parisc/pgtable.h current/include/asm-parisc/pgtable.h --- reference/include/asm-parisc/pgtable.h 2004-02-04 16:24:29.000000000 -0800 +++ current/include/asm-parisc/pgtable.h 2004-04-08 15:10:26.000000000 -0700 @@ -450,8 +450,6 @@ static inline void ptep_mkdirty(pte_t *p #define pte_same(A,B) (pte_val(A) == pte_val(B)) -typedef pte_t *pte_addr_t; - #endif /* !__ASSEMBLY__ */ #define io_remap_page_range remap_page_range diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-parisc/rmap.h current/include/asm-parisc/rmap.h --- reference/include/asm-parisc/rmap.h 2002-12-09 18:46:23.000000000 -0800 +++ current/include/asm-parisc/rmap.h 1969-12-31 16:00:00.000000000 -0800 @@ -1,7 +0,0 @@ -#ifndef _PARISC_RMAP_H -#define _PARISC_RMAP_H - -/* nothing to see, move along */ -#include - -#endif diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-ppc/ioctls.h current/include/asm-ppc/ioctls.h --- reference/include/asm-ppc/ioctls.h 2003-04-09 11:48:05.000000000 -0700 +++ current/include/asm-ppc/ioctls.h 2004-04-09 13:27:12.000000000 -0700 @@ -88,6 +88,7 @@ #define TIOCGSID 0x5429 /* Return the session ID of FD */ #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ +#define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get real dev no below /dev/console */ #define TIOCSERCONFIG 0x5453 #define TIOCSERGWILD 0x5454 diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-ppc/irq.h current/include/asm-ppc/irq.h --- reference/include/asm-ppc/irq.h 2003-10-01 11:48:23.000000000 -0700 +++ current/include/asm-ppc/irq.h 2004-04-08 15:10:24.000000000 -0700 @@ -211,5 +211,9 @@ extern unsigned long ppc_cached_irq_mask extern unsigned long ppc_lost_interrupts[NR_MASK_WORDS]; extern atomic_t ppc_n_lost_interrupts; +struct irqaction; +struct pt_regs; +int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); + #endif /* _ASM_IRQ_H */ #endif /* __KERNEL__ */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-ppc/pgtable.h current/include/asm-ppc/pgtable.h --- reference/include/asm-ppc/pgtable.h 2004-02-18 14:57:18.000000000 -0800 +++ current/include/asm-ppc/pgtable.h 2004-04-08 15:10:26.000000000 -0700 @@ -670,8 +670,6 @@ extern void kernel_set_cachemode (unsign */ #define pgtable_cache_init() do { } while (0) -typedef pte_t *pte_addr_t; - #endif /* !__ASSEMBLY__ */ #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-ppc/rmap.h current/include/asm-ppc/rmap.h --- reference/include/asm-ppc/rmap.h 2002-12-09 18:46:19.000000000 -0800 +++ current/include/asm-ppc/rmap.h 1969-12-31 16:00:00.000000000 -0800 @@ -1,9 +0,0 @@ -#ifndef _PPC_RMAP_H -#define _PPC_RMAP_H - -/* PPC calls pte_alloc() before mem_map[] is setup ... */ -#define BROKEN_PPC_PTE_ALLOC_ONE - -#include - -#endif diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-ppc64/ioctls.h current/include/asm-ppc64/ioctls.h --- reference/include/asm-ppc64/ioctls.h 2003-04-09 11:48:06.000000000 -0700 +++ current/include/asm-ppc64/ioctls.h 2004-04-09 13:27:12.000000000 -0700 @@ -95,6 +95,7 @@ #define TIOCGSID 0x5429 /* Return the session ID of FD */ #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ +#define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get real dev no below /dev/console */ #define TIOCSERCONFIG 0x5453 #define TIOCSERGWILD 0x5454 diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-ppc64/irq.h current/include/asm-ppc64/irq.h --- reference/include/asm-ppc64/irq.h 2004-03-11 14:35:23.000000000 -0800 +++ current/include/asm-ppc64/irq.h 2004-04-08 15:10:24.000000000 -0700 @@ -48,5 +48,9 @@ static __inline__ int irq_canonicalize(i #define NR_MASK_WORDS ((NR_IRQS + 63) / 64) +struct irqaction; +struct pt_regs; +int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); + #endif /* _ASM_IRQ_H */ #endif /* __KERNEL__ */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-ppc64/mman.h current/include/asm-ppc64/mman.h --- reference/include/asm-ppc64/mman.h 2003-10-01 11:48:24.000000000 -0700 +++ current/include/asm-ppc64/mman.h 2004-04-09 21:41:39.000000000 -0700 @@ -26,6 +26,7 @@ #define MAP_LOCKED 0x80 #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ +#define MAP_HUGETLB 0x0400 /* Backed with hugetlb pages */ #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-ppc64/pgalloc.h current/include/asm-ppc64/pgalloc.h --- reference/include/asm-ppc64/pgalloc.h 2004-02-04 16:24:30.000000000 -0800 +++ current/include/asm-ppc64/pgalloc.h 2004-04-08 15:10:26.000000000 -0700 @@ -48,28 +48,43 @@ pmd_free(pmd_t *pmd) pmd_populate_kernel(mm, pmd, page_address(pte_page)) static inline pte_t * -pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) +pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { - return kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT); + pte_t *pte; + pte = kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT); + if (pte) { + struct page *ptepage = virt_to_page(pte); + ptepage->mapping = (void *) mm; + ptepage->index = address & PMD_MASK; + } + return pte; } static inline struct page * pte_alloc_one(struct mm_struct *mm, unsigned long address) { - pte_t *pte = pte_alloc_one_kernel(mm, address); - - if (pte) - return virt_to_page(pte); - + pte_t *pte; + pte = kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT); + if (pte) { + struct page *ptepage = virt_to_page(pte); + ptepage->mapping = (void *) mm; + ptepage->index = address & PMD_MASK; + return ptepage; + } return NULL; } static inline void pte_free_kernel(pte_t *pte) { + virt_to_page(pte)->mapping = NULL; kmem_cache_free(zero_cache, pte); } -#define pte_free(pte_page) pte_free_kernel(page_address(pte_page)) +static inline void pte_free(struct page *ptepage) +{ + ptepage->mapping = NULL; + kmem_cache_free(zero_cache, page_address(ptepage)); +} struct pte_freelist_batch { diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-ppc64/pgtable.h current/include/asm-ppc64/pgtable.h --- reference/include/asm-ppc64/pgtable.h 2004-03-11 14:35:23.000000000 -0800 +++ current/include/asm-ppc64/pgtable.h 2004-04-08 15:10:26.000000000 -0700 @@ -488,8 +488,6 @@ extern struct vm_struct * im_get_area(un int region_type); unsigned long im_free(void *addr); -typedef pte_t *pte_addr_t; - long pSeries_lpar_hpte_insert(unsigned long hpte_group, unsigned long va, unsigned long prpn, int secondary, unsigned long hpteflags, diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-ppc64/processor.h current/include/asm-ppc64/processor.h --- reference/include/asm-ppc64/processor.h 2004-03-11 14:35:23.000000000 -0800 +++ current/include/asm-ppc64/processor.h 2004-04-09 21:38:50.000000000 -0700 @@ -518,12 +518,16 @@ extern struct task_struct *last_task_use /* This decides where the kernel will search for a free chunk of vm * space during mmap's. + * + * /proc/pid/unmap_base is only supported for 32bit processes for now. */ -#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(STACK_TOP_USER32 / 4)) +#define __TASK_UNMAPPED_BASE (PAGE_ALIGN(STACK_TOP_USER32 / 4)) +#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(current->map_base)) #define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(STACK_TOP_USER64 / 4)) +#define __TASK_UNMAPPED_BASE (PAGE_ALIGN(STACK_TOP_USER32 / 4)) #define TASK_UNMAPPED_BASE ((test_thread_flag(TIF_32BIT)||(ppcdebugset(PPCDBG_BINFMT_32ADDR))) ? \ - TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 ) + (current->map_base) : TASK_UNMAPPED_BASE_USER64 ) typedef struct { unsigned long seg; @@ -618,6 +622,11 @@ static inline void prefetchw(const void #define spin_lock_prefetch(x) prefetchw(x) +#ifdef CONFIG_SCHED_SMT +#define ARCH_HAS_SCHED_DOMAIN +#define ARCH_HAS_SCHED_WAKE_BALANCE +#endif + #endif /* ASSEMBLY */ #endif /* __ASM_PPC64_PROCESSOR_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-ppc64/rmap.h current/include/asm-ppc64/rmap.h --- reference/include/asm-ppc64/rmap.h 2002-12-09 18:46:27.000000000 -0800 +++ current/include/asm-ppc64/rmap.h 1969-12-31 16:00:00.000000000 -0800 @@ -1,9 +0,0 @@ -#ifndef _PPC64_RMAP_H -#define _PPC64_RMAP_H - -/* PPC64 calls pte_alloc() before mem_map[] is setup ... */ -#define BROKEN_PPC_PTE_ALLOC_ONE - -#include - -#endif diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-s390/ioctls.h current/include/asm-s390/ioctls.h --- reference/include/asm-s390/ioctls.h 2003-04-09 11:48:06.000000000 -0700 +++ current/include/asm-s390/ioctls.h 2004-04-09 13:27:12.000000000 -0700 @@ -56,6 +56,7 @@ #define TIOCGSID 0x5429 /* Return the session ID of FD */ #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ +#define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get real dev no below /dev/console */ #define FIONCLEX 0x5450 /* these numbers need to be adjusted. */ #define FIOCLEX 0x5451 diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-s390/irq.h current/include/asm-s390/irq.h --- reference/include/asm-s390/irq.h 2003-07-28 15:33:24.000000000 -0700 +++ current/include/asm-s390/irq.h 2004-04-08 15:10:24.000000000 -0700 @@ -21,6 +21,10 @@ enum interruption_class { #define touch_nmi_watchdog() do { } while(0) +struct irqaction; +struct pt_regs; +int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); + #endif /* __KERNEL__ */ #endif diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-s390/pgtable.h current/include/asm-s390/pgtable.h --- reference/include/asm-s390/pgtable.h 2004-04-07 14:54:34.000000000 -0700 +++ current/include/asm-s390/pgtable.h 2004-04-08 15:10:26.000000000 -0700 @@ -760,8 +760,6 @@ extern inline pte_t mk_swap_pte(unsigned #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) -typedef pte_t *pte_addr_t; - #ifndef __s390x__ # define PTE_FILE_MAX_BITS 26 #else /* __s390x__ */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-s390/processor.h current/include/asm-s390/processor.h --- reference/include/asm-s390/processor.h 2004-04-07 14:54:34.000000000 -0700 +++ current/include/asm-s390/processor.h 2004-04-09 13:27:12.000000000 -0700 @@ -62,14 +62,16 @@ extern struct task_struct *last_task_use #ifndef __s390x__ # define TASK_SIZE (0x80000000UL) -# define TASK_UNMAPPED_BASE (TASK_SIZE / 2) +# define __TASK_UNMAPPED_BASE (TASK_SIZE / 2) +# define TASK_UNMAPPED_BASE (current->mmap_base) #else /* __s390x__ */ # define TASK_SIZE (0x40000000000UL) # define TASK31_SIZE (0x80000000UL) +# define __TASK_UNMAPPED_BASE (TASK31_SIZE/2) # define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \ - (TASK31_SIZE / 2) : (TASK_SIZE / 2)) + (current->map_base) : (TASK_SIZE / 2)) #endif /* __s390x__ */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-s390/rmap.h current/include/asm-s390/rmap.h --- reference/include/asm-s390/rmap.h 2002-12-09 18:46:10.000000000 -0800 +++ current/include/asm-s390/rmap.h 1969-12-31 16:00:00.000000000 -0800 @@ -1,7 +0,0 @@ -#ifndef _S390_RMAP_H -#define _S390_RMAP_H - -/* nothing to see, move along */ -#include - -#endif diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-sh/ioctls.h current/include/asm-sh/ioctls.h --- reference/include/asm-sh/ioctls.h 2003-04-09 11:48:06.000000000 -0700 +++ current/include/asm-sh/ioctls.h 2004-04-09 13:27:12.000000000 -0700 @@ -80,6 +80,7 @@ #define TIOCGSID _IOR('T', 41, pid_t) /* 0x5429 */ /* Return the session ID of FD */ #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ +#define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get real dev no below /dev/console */ #define TIOCSERCONFIG _IO('T', 83) /* 0x5453 */ #define TIOCSERGWILD _IOR('T', 84, int) /* 0x5454 */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-sh/irq.h current/include/asm-sh/irq.h --- reference/include/asm-sh/irq.h 2004-04-07 14:54:34.000000000 -0700 +++ current/include/asm-sh/irq.h 2004-04-08 15:10:24.000000000 -0700 @@ -329,4 +329,8 @@ static inline int generic_irq_demux(int #define irq_canonicalize(irq) (irq) #define irq_demux(irq) __irq_demux(sh_mv.mv_irq_demux(irq)) +struct irqaction; +struct pt_regs; +int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); + #endif /* __ASM_SH_IRQ_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-sh/pgalloc.h current/include/asm-sh/pgalloc.h --- reference/include/asm-sh/pgalloc.h 2004-02-04 16:24:31.000000000 -0800 +++ current/include/asm-sh/pgalloc.h 2004-04-08 15:10:25.000000000 -0700 @@ -101,7 +101,7 @@ static inline pte_t ptep_get_and_clear(p unsigned long pfn = pte_pfn(pte); if (pfn_valid(pfn)) { page = pfn_to_page(pfn); - if (!page->mapping + if (!page_mapping(page) || list_empty(&page->mapping->i_mmap_shared)) __clear_bit(PG_mapped, &page->flags); } diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-sh/pgtable.h current/include/asm-sh/pgtable.h --- reference/include/asm-sh/pgtable.h 2004-04-07 14:54:35.000000000 -0700 +++ current/include/asm-sh/pgtable.h 2004-04-08 15:10:26.000000000 -0700 @@ -274,8 +274,6 @@ extern void update_mmu_cache(struct vm_a #define pte_same(A,B) (pte_val(A) == pte_val(B)) -typedef pte_t *pte_addr_t; - #endif /* !__ASSEMBLY__ */ #define kern_addr_valid(addr) (1) diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-sh/rmap.h current/include/asm-sh/rmap.h --- reference/include/asm-sh/rmap.h 2002-12-09 18:46:22.000000000 -0800 +++ current/include/asm-sh/rmap.h 1969-12-31 16:00:00.000000000 -0800 @@ -1,7 +0,0 @@ -#ifndef _SH_RMAP_H -#define _SH_RMAP_H - -/* nothing to see, move along */ -#include - -#endif diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-sparc/ioctls.h current/include/asm-sparc/ioctls.h --- reference/include/asm-sparc/ioctls.h 2003-04-09 11:48:06.000000000 -0700 +++ current/include/asm-sparc/ioctls.h 2004-04-09 13:27:12.000000000 -0700 @@ -15,6 +15,7 @@ #define TCSETS _IOW('T', 9, struct termios) #define TCSETSW _IOW('T', 10, struct termios) #define TCSETSF _IOW('T', 11, struct termios) +#define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get real dev no below /dev/console */ /* Note that all the ioctls that are not available in Linux have a * double underscore on the front to: a) avoid some programs to diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-sparc/irq.h current/include/asm-sparc/irq.h --- reference/include/asm-sparc/irq.h 2003-10-21 11:16:12.000000000 -0700 +++ current/include/asm-sparc/irq.h 2004-04-08 15:10:24.000000000 -0700 @@ -184,4 +184,8 @@ extern struct sun4m_intregs *sun4m_inter #define SUN4M_INT_SBUS(x) (1 << (x+7)) #define SUN4M_INT_VME(x) (1 << (x)) +struct irqaction; +struct pt_regs; +int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); + #endif diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-sparc/kmap_types.h current/include/asm-sparc/kmap_types.h --- reference/include/asm-sparc/kmap_types.h 2004-01-15 10:41:17.000000000 -0800 +++ current/include/asm-sparc/kmap_types.h 2004-04-08 15:10:26.000000000 -0700 @@ -11,7 +11,6 @@ enum km_type { KM_BIO_DST_IRQ, KM_PTE0, KM_PTE1, - KM_PTE2, KM_IRQ0, KM_IRQ1, KM_SOFTIRQ0, diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-sparc/pgtable.h current/include/asm-sparc/pgtable.h --- reference/include/asm-sparc/pgtable.h 2004-04-07 14:54:35.000000000 -0700 +++ current/include/asm-sparc/pgtable.h 2004-04-08 15:10:26.000000000 -0700 @@ -491,8 +491,6 @@ extern int io_remap_page_range(struct vm #include -typedef pte_t *pte_addr_t; - #endif /* !(__ASSEMBLY__) */ /* We provide our own get_unmapped_area to cope with VA holes for userland */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-sparc/rmap.h current/include/asm-sparc/rmap.h --- reference/include/asm-sparc/rmap.h 2002-12-09 18:46:23.000000000 -0800 +++ current/include/asm-sparc/rmap.h 1969-12-31 16:00:00.000000000 -0800 @@ -1,7 +0,0 @@ -#ifndef _SPARC_RMAP_H -#define _SPARC_RMAP_H - -/* nothing to see, move along */ -#include - -#endif diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-sparc64/ioctls.h current/include/asm-sparc64/ioctls.h --- reference/include/asm-sparc64/ioctls.h 2003-04-09 11:48:06.000000000 -0700 +++ current/include/asm-sparc64/ioctls.h 2004-04-09 13:27:12.000000000 -0700 @@ -16,6 +16,7 @@ #define TCSETS _IOW('T', 9, struct termios) #define TCSETSW _IOW('T', 10, struct termios) #define TCSETSF _IOW('T', 11, struct termios) +#define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get real dev no below /dev/console */ /* Note that all the ioctls that are not available in Linux have a * double underscore on the front to: a) avoid some programs to diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-sparc64/irq.h current/include/asm-sparc64/irq.h --- reference/include/asm-sparc64/irq.h 2003-10-01 11:41:16.000000000 -0700 +++ current/include/asm-sparc64/irq.h 2004-04-08 15:10:24.000000000 -0700 @@ -150,4 +150,8 @@ static __inline__ unsigned long get_soft return retval; } +struct irqaction; +struct pt_regs; +int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); + #endif diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-sparc64/lockmeter.h current/include/asm-sparc64/lockmeter.h --- reference/include/asm-sparc64/lockmeter.h 1969-12-31 16:00:00.000000000 -0800 +++ current/include/asm-sparc64/lockmeter.h 2004-04-08 15:10:21.000000000 -0700 @@ -0,0 +1,45 @@ +/* + * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com) + * Copyright (C) 2003 David S. Miller (davem@redhat.com) + */ + +#ifndef _SPARC64_LOCKMETER_H +#define _SPARC64_LOCKMETER_H + +#include +#include +#include +#include + +/* Actually, this is not the CPU frequency by the system tick + * frequency which is good enough for lock metering. + */ +#define CPU_CYCLE_FREQUENCY (timer_tick_offset * HZ) +#define THIS_CPU_NUMBER smp_processor_id() + +#define PUT_INDEX(lock_ptr,indexv) (lock_ptr)->index = (indexv) +#define GET_INDEX(lock_ptr) (lock_ptr)->index + +#define PUT_RWINDEX(rwlock_ptr,indexv) (rwlock_ptr)->index = (indexv) +#define GET_RWINDEX(rwlock_ptr) (rwlock_ptr)->index +#define PUT_RW_CPU(rwlock_ptr,cpuv) (rwlock_ptr)->cpu = (cpuv) +#define GET_RW_CPU(rwlock_ptr) (rwlock_ptr)->cpu + +#define RWLOCK_READERS(rwlock_ptr) rwlock_readers(rwlock_ptr) + +extern inline int rwlock_readers(rwlock_t *rwlock_ptr) +{ + signed int tmp = rwlock_ptr->lock; + + if (tmp > 0) + return tmp; + else + return 0; +} + +#define RWLOCK_IS_WRITE_LOCKED(rwlock_ptr) ((signed int)((rwlock_ptr)->lock) < 0) +#define RWLOCK_IS_READ_LOCKED(rwlock_ptr) ((signed int)((rwlock_ptr)->lock) > 0) + +#define get_cycles64() get_cycles() + +#endif /* _SPARC64_LOCKMETER_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-sparc64/pgtable.h current/include/asm-sparc64/pgtable.h --- reference/include/asm-sparc64/pgtable.h 2004-01-15 10:41:17.000000000 -0800 +++ current/include/asm-sparc64/pgtable.h 2004-04-08 15:10:26.000000000 -0700 @@ -384,8 +384,6 @@ extern unsigned long get_fb_unmapped_are extern void check_pgt_cache(void); -typedef pte_t *pte_addr_t; - #endif /* !(__ASSEMBLY__) */ #endif /* !(_SPARC64_PGTABLE_H) */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-sparc64/rmap.h current/include/asm-sparc64/rmap.h --- reference/include/asm-sparc64/rmap.h 2002-12-09 18:45:54.000000000 -0800 +++ current/include/asm-sparc64/rmap.h 1969-12-31 16:00:00.000000000 -0800 @@ -1,7 +0,0 @@ -#ifndef _SPARC64_RMAP_H -#define _SPARC64_RMAP_H - -/* nothing to see, move along */ -#include - -#endif diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-sparc64/spinlock.h current/include/asm-sparc64/spinlock.h --- reference/include/asm-sparc64/spinlock.h 2004-03-11 14:35:26.000000000 -0800 +++ current/include/asm-sparc64/spinlock.h 2004-04-08 15:10:21.000000000 -0700 @@ -31,15 +31,23 @@ #ifndef CONFIG_DEBUG_SPINLOCK -typedef unsigned char spinlock_t; -#define SPIN_LOCK_UNLOCKED 0 +typedef struct { + unsigned char lock; + unsigned int index; +} spinlock_t; -#define spin_lock_init(lock) (*((unsigned char *)(lock)) = 0) -#define spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) +#ifdef CONFIG_LOCKMETER +#define SPIN_LOCK_UNLOCKED (spinlock_t) {0, 0} +#else +#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } +#endif -#define spin_unlock_wait(lock) \ +#define spin_lock_init(__lock) do { *(__lock) = SPIN_LOCK_UNLOCKED; } while(0) +#define spin_is_locked(__lock) (*((volatile unsigned char *)(&((__lock)->lock))) != 0) + +#define spin_unlock_wait(__lock) \ do { membar("#LoadLoad"); \ -} while(*((volatile unsigned char *)lock)) +} while(*((volatile unsigned char *)(&(((spinlock_t *)__lock)->lock)))) static __inline__ void _raw_spin_lock(spinlock_t *lock) { @@ -110,17 +118,31 @@ extern int _spin_trylock (spinlock_t *lo #ifndef CONFIG_DEBUG_SPINLOCK -typedef unsigned int rwlock_t; -#define RW_LOCK_UNLOCKED 0 -#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0) -#define rwlock_is_locked(x) (*(x) != RW_LOCK_UNLOCKED) +#ifdef CONFIG_LOCKMETER +typedef struct { + unsigned int lock; + unsigned int index; + unsigned int cpu; +} rwlock_t; +#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, 0xff } +#else +typedef struct { + unsigned int lock; +} rwlock_t; +#define RW_LOCK_UNLOCKED (rwlock_t) { 0 } +#endif + +#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0) +#define rwlock_is_locked(x) ((x)->lock != 0) +extern int __read_trylock(rwlock_t *); extern void __read_lock(rwlock_t *); extern void __read_unlock(rwlock_t *); extern void __write_lock(rwlock_t *); extern void __write_unlock(rwlock_t *); extern int __write_trylock(rwlock_t *); +#define _raw_read_trylock(p) __read_trylock(p) #define _raw_read_lock(p) __read_lock(p) #define _raw_read_unlock(p) __read_unlock(p) #define _raw_write_lock(p) __write_lock(p) diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-um/irq.h current/include/asm-um/irq.h --- reference/include/asm-um/irq.h 2002-12-09 18:46:25.000000000 -0800 +++ current/include/asm-um/irq.h 2004-04-08 15:10:24.000000000 -0700 @@ -32,4 +32,9 @@ extern int um_request_irq(unsigned int i void (*handler)(int, void *, struct pt_regs *), unsigned long irqflags, const char * devname, void *dev_id); + +struct irqaction; +struct pt_regs; +int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); + #endif diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-um/pgtable.h current/include/asm-um/pgtable.h --- reference/include/asm-um/pgtable.h 2003-10-14 15:50:34.000000000 -0700 +++ current/include/asm-um/pgtable.h 2004-04-08 15:10:26.000000000 -0700 @@ -384,18 +384,6 @@ static inline pmd_t * pmd_offset(pgd_t * #define pte_unmap(pte) kunmap_atomic((pte), KM_PTE0) #define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1) -#if defined(CONFIG_HIGHPTE) && defined(CONFIG_HIGHMEM4G) -typedef u32 pte_addr_t; -#endif - -#if defined(CONFIG_HIGHPTE) && defined(CONFIG_HIGHMEM64G) -typedef u64 pte_addr_t; -#endif - -#if !defined(CONFIG_HIGHPTE) -typedef pte_t *pte_addr_t; -#endif - #define update_mmu_cache(vma,address,pte) do ; while (0) /* Encode and de-code a swap entry */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-um/processor-generic.h current/include/asm-um/processor-generic.h --- reference/include/asm-um/processor-generic.h 2004-04-07 14:54:35.000000000 -0700 +++ current/include/asm-um/processor-generic.h 2004-04-09 13:27:12.000000000 -0700 @@ -119,7 +119,8 @@ extern unsigned long task_size; /* This decides where the kernel will search for a free chunk of vm * space during mmap's. */ -#define TASK_UNMAPPED_BASE (0x40000000) +#define __TASK_UNMAPPED_BASE (0x40000000) +#define TASK_UNMAPPED_BASE (current->map_base) extern void start_thread(struct pt_regs *regs, unsigned long entry, unsigned long stack); diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-um/rmap.h current/include/asm-um/rmap.h --- reference/include/asm-um/rmap.h 2002-12-09 18:46:11.000000000 -0800 +++ current/include/asm-um/rmap.h 1969-12-31 16:00:00.000000000 -0800 @@ -1,6 +0,0 @@ -#ifndef __UM_RMAP_H -#define __UM_RMAP_H - -#include "asm/arch/rmap.h" - -#endif diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-v850/irq.h current/include/asm-v850/irq.h --- reference/include/asm-v850/irq.h 2003-06-05 14:39:21.000000000 -0700 +++ current/include/asm-v850/irq.h 2004-04-08 15:10:24.000000000 -0700 @@ -65,4 +65,8 @@ extern void disable_irq_nosync (unsigned #endif /* !__ASSEMBLY__ */ +struct irqaction; +struct pt_regs; +int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); + #endif /* __V850_IRQ_H__ */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-v850/pgtable.h current/include/asm-v850/pgtable.h --- reference/include/asm-v850/pgtable.h 2002-12-09 18:46:13.000000000 -0800 +++ current/include/asm-v850/pgtable.h 2004-04-08 15:10:26.000000000 -0700 @@ -5,8 +5,6 @@ #include -typedef pte_t *pte_addr_t; - #define pgd_present(pgd) (1) /* pages are always present on NO_MM */ #define pgd_none(pgd) (0) #define pgd_bad(pgd) (0) diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-v850/rmap.h current/include/asm-v850/rmap.h --- reference/include/asm-v850/rmap.h 2002-12-09 18:46:17.000000000 -0800 +++ current/include/asm-v850/rmap.h 1969-12-31 16:00:00.000000000 -0800 @@ -1 +0,0 @@ -/* Do not need anything here */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-x86_64/ioctls.h current/include/asm-x86_64/ioctls.h --- reference/include/asm-x86_64/ioctls.h 2003-04-09 11:48:07.000000000 -0700 +++ current/include/asm-x86_64/ioctls.h 2004-04-09 13:27:12.000000000 -0700 @@ -48,6 +48,7 @@ #define TIOCGSID 0x5429 /* Return the session ID of FD */ #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ +#define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get real dev no below /dev/console */ #define FIONCLEX 0x5450 /* these numbers need to be adjusted. */ #define FIOCLEX 0x5451 diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-x86_64/irq.h current/include/asm-x86_64/irq.h --- reference/include/asm-x86_64/irq.h 2004-01-15 10:41:18.000000000 -0800 +++ current/include/asm-x86_64/irq.h 2004-04-08 15:10:24.000000000 -0700 @@ -53,4 +53,8 @@ extern int can_request_irq(unsigned int, #define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */ #endif +struct irqaction; +struct pt_regs; +int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); + #endif /* _ASM_IRQ_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-x86_64/pgtable.h current/include/asm-x86_64/pgtable.h --- reference/include/asm-x86_64/pgtable.h 2004-03-11 14:35:28.000000000 -0800 +++ current/include/asm-x86_64/pgtable.h 2004-04-08 15:10:26.000000000 -0700 @@ -390,8 +390,6 @@ extern inline pte_t pte_modify(pte_t pte #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) -typedef pte_t *pte_addr_t; - #endif /* !__ASSEMBLY__ */ extern int kern_addr_valid(unsigned long addr); diff -purN -X /home/mbligh/.diff.exclude reference/include/asm-x86_64/rmap.h current/include/asm-x86_64/rmap.h --- reference/include/asm-x86_64/rmap.h 2002-12-09 18:46:16.000000000 -0800 +++ current/include/asm-x86_64/rmap.h 1969-12-31 16:00:00.000000000 -0800 @@ -1,7 +0,0 @@ -#ifndef _X8664_RMAP_H -#define _X8664_RMAP_H - -/* nothing to see, move along */ -#include - -#endif diff -purN -X /home/mbligh/.diff.exclude reference/include/linux/aio.h current/include/linux/aio.h --- reference/include/linux/aio.h 2004-04-07 14:54:36.000000000 -0700 +++ current/include/linux/aio.h 2004-04-09 13:23:20.000000000 -0700 @@ -29,21 +29,26 @@ struct kioctx; #define KIF_LOCKED 0 #define KIF_KICKED 1 #define KIF_CANCELLED 2 +#define KIF_SYNCED 3 #define kiocbTryLock(iocb) test_and_set_bit(KIF_LOCKED, &(iocb)->ki_flags) #define kiocbTryKick(iocb) test_and_set_bit(KIF_KICKED, &(iocb)->ki_flags) +#define kiocbTrySync(iocb) test_and_set_bit(KIF_SYNCED, &(iocb)->ki_flags) #define kiocbSetLocked(iocb) set_bit(KIF_LOCKED, &(iocb)->ki_flags) #define kiocbSetKicked(iocb) set_bit(KIF_KICKED, &(iocb)->ki_flags) #define kiocbSetCancelled(iocb) set_bit(KIF_CANCELLED, &(iocb)->ki_flags) +#define kiocbSetSynced(iocb) set_bit(KIF_SYNCED, &(iocb)->ki_flags) #define kiocbClearLocked(iocb) clear_bit(KIF_LOCKED, &(iocb)->ki_flags) #define kiocbClearKicked(iocb) clear_bit(KIF_KICKED, &(iocb)->ki_flags) #define kiocbClearCancelled(iocb) clear_bit(KIF_CANCELLED, &(iocb)->ki_flags) +#define kiocbClearSynced(iocb) clear_bit(KIF_SYNCED, &(iocb)->ki_flags) #define kiocbIsLocked(iocb) test_bit(KIF_LOCKED, &(iocb)->ki_flags) #define kiocbIsKicked(iocb) test_bit(KIF_KICKED, &(iocb)->ki_flags) #define kiocbIsCancelled(iocb) test_bit(KIF_CANCELLED, &(iocb)->ki_flags) +#define kiocbIsSynced(iocb) test_bit(KIF_SYNCED, &(iocb)->ki_flags) struct kiocb { struct list_head ki_run_list; @@ -54,7 +59,7 @@ struct kiocb { struct file *ki_filp; struct kioctx *ki_ctx; /* may be NULL for sync ops */ int (*ki_cancel)(struct kiocb *, struct io_event *); - long (*ki_retry)(struct kiocb *); + ssize_t (*ki_retry)(struct kiocb *); struct list_head ki_list; /* the aio core uses this * for cancellation */ @@ -63,6 +68,16 @@ struct kiocb { __u64 ki_user_data; /* user's data for completion */ loff_t ki_pos; + /* State that we remember to be able to restart/retry */ + unsigned short ki_opcode; + size_t ki_nbytes; /* copy of iocb->aio_nbytes */ + char *ki_buf; /* remaining iocb->aio_buf */ + size_t ki_left; /* remaining bytes */ + wait_queue_t ki_wait; + long ki_retried; /* just for testing */ + long ki_kicked; /* just for testing */ + long ki_queued; /* just for testing */ + char private[KIOCB_PRIVATE_SIZE]; }; @@ -77,6 +92,8 @@ struct kiocb { (x)->ki_ctx = &tsk->active_mm->default_kioctx; \ (x)->ki_cancel = NULL; \ (x)->ki_user_obj = tsk; \ + (x)->ki_user_data = 0; \ + init_wait((&(x)->ki_wait)); \ } while (0) #define AIO_RING_MAGIC 0xa10a10a1 @@ -159,6 +176,17 @@ int FASTCALL(io_submit_one(struct kioctx #define get_ioctx(kioctx) do { if (unlikely(atomic_read(&(kioctx)->users) <= 0)) BUG(); atomic_inc(&(kioctx)->users); } while (0) #define put_ioctx(kioctx) do { if (unlikely(atomic_dec_and_test(&(kioctx)->users))) __put_ioctx(kioctx); else if (unlikely(atomic_read(&(kioctx)->users) < 0)) BUG(); } while (0) +#define in_aio() !is_sync_wait(current->io_wait) +/* may be used for debugging */ +#define warn_if_async() if (in_aio()) {\ + printk(KERN_ERR "%s(%s:%d) called in async context!\n", \ + __FUNCTION__, __FILE__, __LINE__); \ + dump_stack(); \ + } + +#define io_wait_to_kiocb(wait) container_of(wait, struct kiocb, ki_wait) +#define is_retried_kiocb(iocb) ((iocb)->ki_retried > 1) + #include static inline struct kiocb *list_kiocb(struct list_head *h) @@ -170,4 +198,5 @@ static inline struct kiocb *list_kiocb(s extern atomic_t aio_nr; extern unsigned aio_max_nr; +extern ssize_t generic_aio_poll(struct kiocb *, unsigned); #endif /* __LINUX__AIO_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/linux/aio_abi.h current/include/linux/aio_abi.h --- reference/include/linux/aio_abi.h 2002-12-09 18:45:44.000000000 -0800 +++ current/include/linux/aio_abi.h 2004-04-09 13:23:20.000000000 -0700 @@ -38,8 +38,8 @@ enum { IOCB_CMD_FDSYNC = 3, /* These two are experimental. * IOCB_CMD_PREADX = 4, - * IOCB_CMD_POLL = 5, */ + IOCB_CMD_POLL = 5, IOCB_CMD_NOOP = 6, }; diff -purN -X /home/mbligh/.diff.exclude reference/include/linux/buffer_head.h current/include/linux/buffer_head.h --- reference/include/linux/buffer_head.h 2004-02-04 16:24:32.000000000 -0800 +++ current/include/linux/buffer_head.h 2004-04-09 13:23:18.000000000 -0700 @@ -206,12 +206,6 @@ int nobh_prepare_write(struct page*, uns int nobh_commit_write(struct file *, struct page *, unsigned, unsigned); int nobh_truncate_page(struct address_space *, loff_t); -#define OSYNC_METADATA (1<<0) -#define OSYNC_DATA (1<<1) -#define OSYNC_INODE (1<<2) -int generic_osync_inode(struct inode *, struct address_space *, int); - - /* * inline definitions */ diff -purN -X /home/mbligh/.diff.exclude reference/include/linux/compiler-gcc.h current/include/linux/compiler-gcc.h --- reference/include/linux/compiler-gcc.h 2003-10-01 11:48:25.000000000 -0700 +++ current/include/linux/compiler-gcc.h 2004-04-08 15:10:21.000000000 -0700 @@ -13,5 +13,5 @@ shouldn't recognize the original var, and make assumptions about it */ #define RELOC_HIDE(ptr, off) \ ({ unsigned long __ptr; \ - __asm__ ("" : "=g"(__ptr) : "0"(ptr)); \ + __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \ (typeof(ptr)) (__ptr + (off)); }) diff -purN -X /home/mbligh/.diff.exclude reference/include/linux/compiler-gcc3.h current/include/linux/compiler-gcc3.h --- reference/include/linux/compiler-gcc3.h 2004-03-11 14:35:28.000000000 -0800 +++ current/include/linux/compiler-gcc3.h 2004-04-08 15:10:24.000000000 -0700 @@ -3,7 +3,7 @@ /* These definitions are for GCC v3.x. */ #include -#if __GNUC_MINOR__ >= 1 +#if __GNUC_MINOR__ >= 1 && __GNUC_MINOR__ < 4 # define inline __inline__ __attribute__((always_inline)) # define __inline__ __inline__ __attribute__((always_inline)) # define __inline __inline__ __attribute__((always_inline)) diff -purN -X /home/mbligh/.diff.exclude reference/include/linux/config.h current/include/linux/config.h --- reference/include/linux/config.h 2002-12-09 18:46:24.000000000 -0800 +++ current/include/linux/config.h 2004-04-08 15:10:20.000000000 -0700 @@ -2,5 +2,8 @@ #define _LINUX_CONFIG_H #include +#if defined(__i386__) && !defined(IN_BOOTLOADER) +#include +#endif #endif diff -purN -X /home/mbligh/.diff.exclude reference/include/linux/dwarf2-lang.h current/include/linux/dwarf2-lang.h --- reference/include/linux/dwarf2-lang.h 1969-12-31 16:00:00.000000000 -0800 +++ current/include/linux/dwarf2-lang.h 2004-04-08 15:10:20.000000000 -0700 @@ -0,0 +1,132 @@ +#ifndef DWARF2_LANG +#define DWARF2_LANG +#include + +/* + * This is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2, or (at your option) any later + * version. + */ +/* + * This file defines macros that allow generation of DWARF debug records + * for asm files. This file is platform independent. Register numbers + * (which are about the only thing that is platform dependent) are to be + * supplied by a platform defined file. + */ +#define DWARF_preamble() .section .debug_frame,"",@progbits +/* + * This macro starts a debug frame section. The debug_frame describes + * where to find the registers that the enclosing function saved on + * entry. + * + * ORD is use by the label generator and should be the same as what is + * passed to CFI_postamble. + * + * pc, pc register gdb ordinal. + * + * code_align this is the factor used to define locations or regions + * where the given definitions apply. If you use labels to define these + * this should be 1. + * + * data_align this is the factor used to define register offsets. If + * you use struct offset, this should be the size of the register in + * bytes or the negative of that. This is how it is used: you will + * define a register as the reference register, say the stack pointer, + * then you will say where a register is located relative to this + * reference registers value, say 40 for register 3 (the gdb register + * number). The <40> will be multiplied by to define the + * byte offset of the given register (3, in this example). So if your + * <40> is the byte offset and the reference register points at the + * begining, you would want 1 for the data_offset. If <40> was the 40th + * 4-byte element in that structure you would want 4. And if your + * reference register points at the end of the structure you would want + * a negative data_align value(and you would have to do other math as + * well). + */ + +#define CFI_preamble(ORD, pc, code_align, data_align) \ +.section .debug_frame,"",@progbits ; \ +frame/**/_/**/ORD: \ + .long end/**/_/**/ORD-start/**/_/**/ORD; \ +start/**/_/**/ORD: \ + .long DW_CIE_ID; \ + .byte DW_CIE_VERSION; \ + .byte 0 ; \ + .uleb128 code_align; \ + .sleb128 data_align; \ + .byte pc; + +/* + * After the above macro and prior to the CFI_postamble, you need to + * define the initial state. This starts with defining the reference + * register and, usually the pc. Here are some helper macros: + */ + +#define CFA_define_reference(reg, offset) \ + .byte DW_CFA_def_cfa; \ + .uleb128 reg; \ + .uleb128 (offset); + +#define CFA_define_offset(reg, offset) \ + .byte (DW_CFA_offset + reg); \ + .uleb128 (offset); + +#define CFI_postamble(ORD) \ + .align 4; \ +end/**/_/**/ORD: +/* + * So now your code pushs stuff on the stack, you need a new location + * and the rules for what to do. This starts a running description of + * the call frame. You need to describe what changes with respect to + * the call registers as the location of the pc moves through the code. + * The following builds an FDE (fram descriptor entry?). Like the + * above, it has a preamble and a postamble. It also is tied to the CFI + * above. + * The first entry after the preamble must be the location in the code + * that the call frame is being described for. + */ +#define FDE_preamble(ORD, fde_no, initial_address, length) \ + .long FDE_end/**/_/**/fde_no-FDE_start/**/_/**/fde_no; \ +FDE_start/**/_/**/fde_no: \ + .long frame/**/_/**/ORD; \ + .long initial_address; \ + .long length; + +#define FDE_postamble(fde_no) \ + .align 4; \ +FDE_end/**/_/**/fde_no: +/* + * That done, you can now add registers, subtract registers, move the + * reference and even change the reference. You can also define a new + * area of code the info applies to. For discontinuous bits you should + * start a new FDE. You may have as many as you like. + */ + +/* + * To advance the address by + */ + +#define FDE_advance(bytes) \ + .byte DW_CFA_advance_loc4 \ + .long bytes + + + +/* + * With the above you can define all the register locations. But + * suppose the reference register moves... Takes the new offset NOT an + * increment. This is how esp is tracked if it is not saved. + */ + +#define CFA_define_cfa_offset(offset) \ + .byte $DW_CFA_def_cfa_offset; \ + .uleb128 (offset); +/* + * Or suppose you want to use a different reference register... + */ +#define CFA_define_cfa_register(reg) \ + .byte DW_CFA_def_cfa_register; \ + .uleb128 reg; + +#endif diff -purN -X /home/mbligh/.diff.exclude reference/include/linux/dwarf2.h current/include/linux/dwarf2.h --- reference/include/linux/dwarf2.h 1969-12-31 16:00:00.000000000 -0800 +++ current/include/linux/dwarf2.h 2004-04-08 15:10:20.000000000 -0700 @@ -0,0 +1,738 @@ +/* Declarations and definitions of codes relating to the DWARF2 symbolic + debugging information format. + Copyright (C) 1992, 1993, 1995, 1996, 1997, 1999, 2000, 2001, 2002 + Free Software Foundation, Inc. + + Written by Gary Funck (gary@intrepid.com) The Ada Joint Program + Office (AJPO), Florida State Unviversity and Silicon Graphics Inc. + provided support for this effort -- June 21, 1995. + + Derived from the DWARF 1 implementation written by Ron Guilmette + (rfg@netcom.com), November 1990. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation; either version 2, or (at your option) any later + version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING. If not, write to the Free + Software Foundation, 59 Temple Place - Suite 330, Boston, MA + 02111-1307, USA. */ + +/* This file is derived from the DWARF specification (a public document) + Revision 2.0.0 (July 27, 1993) developed by the UNIX International + Programming Languages Special Interest Group (UI/PLSIG) and distributed + by UNIX International. Copies of this specification are available from + UNIX International, 20 Waterview Boulevard, Parsippany, NJ, 07054. + + This file also now contains definitions from the DWARF 3 specification. */ + +/* This file is shared between GCC and GDB, and should not contain + prototypes. */ + +#ifndef _ELF_DWARF2_H +#define _ELF_DWARF2_H + +/* Structure found in the .debug_line section. */ +#ifndef __ASSEMBLY__ +typedef struct +{ + unsigned char li_length [4]; + unsigned char li_version [2]; + unsigned char li_prologue_length [4]; + unsigned char li_min_insn_length [1]; + unsigned char li_default_is_stmt [1]; + unsigned char li_line_base [1]; + unsigned char li_line_range [1]; + unsigned char li_opcode_base [1]; +} +DWARF2_External_LineInfo; + +typedef struct +{ + unsigned long li_length; + unsigned short li_version; + unsigned int li_prologue_length; + unsigned char li_min_insn_length; + unsigned char li_default_is_stmt; + int li_line_base; + unsigned char li_line_range; + unsigned char li_opcode_base; +} +DWARF2_Internal_LineInfo; + +/* Structure found in .debug_pubnames section. */ +typedef struct +{ + unsigned char pn_length [4]; + unsigned char pn_version [2]; + unsigned char pn_offset [4]; + unsigned char pn_size [4]; +} +DWARF2_External_PubNames; + +typedef struct +{ + unsigned long pn_length; + unsigned short pn_version; + unsigned long pn_offset; + unsigned long pn_size; +} +DWARF2_Internal_PubNames; + +/* Structure found in .debug_info section. */ +typedef struct +{ + unsigned char cu_length [4]; + unsigned char cu_version [2]; + unsigned char cu_abbrev_offset [4]; + unsigned char cu_pointer_size [1]; +} +DWARF2_External_CompUnit; + +typedef struct +{ + unsigned long cu_length; + unsigned short cu_version; + unsigned long cu_abbrev_offset; + unsigned char cu_pointer_size; +} +DWARF2_Internal_CompUnit; + +typedef struct +{ + unsigned char ar_length [4]; + unsigned char ar_version [2]; + unsigned char ar_info_offset [4]; + unsigned char ar_pointer_size [1]; + unsigned char ar_segment_size [1]; +} +DWARF2_External_ARange; + +typedef struct +{ + unsigned long ar_length; + unsigned short ar_version; + unsigned long ar_info_offset; + unsigned char ar_pointer_size; + unsigned char ar_segment_size; +} +DWARF2_Internal_ARange; + +#define ENUM(name) enum name { +#define IF_NOT_ASM(a) a +#define COMMA , +#else +#define ENUM(name) +#define IF_NOT_ASM(a) +#define COMMA + +#endif + +/* Tag names and codes. */ +ENUM(dwarf_tag) + + DW_TAG_padding = 0x00 COMMA + DW_TAG_array_type = 0x01 COMMA + DW_TAG_class_type = 0x02 COMMA + DW_TAG_entry_point = 0x03 COMMA + DW_TAG_enumeration_type = 0x04 COMMA + DW_TAG_formal_parameter = 0x05 COMMA + DW_TAG_imported_declaration = 0x08 COMMA + DW_TAG_label = 0x0a COMMA + DW_TAG_lexical_block = 0x0b COMMA + DW_TAG_member = 0x0d COMMA + DW_TAG_pointer_type = 0x0f COMMA + DW_TAG_reference_type = 0x10 COMMA + DW_TAG_compile_unit = 0x11 COMMA + DW_TAG_string_type = 0x12 COMMA + DW_TAG_structure_type = 0x13 COMMA + DW_TAG_subroutine_type = 0x15 COMMA + DW_TAG_typedef = 0x16 COMMA + DW_TAG_union_type = 0x17 COMMA + DW_TAG_unspecified_parameters = 0x18 COMMA + DW_TAG_variant = 0x19 COMMA + DW_TAG_common_block = 0x1a COMMA + DW_TAG_common_inclusion = 0x1b COMMA + DW_TAG_inheritance = 0x1c COMMA + DW_TAG_inlined_subroutine = 0x1d COMMA + DW_TAG_module = 0x1e COMMA + DW_TAG_ptr_to_member_type = 0x1f COMMA + DW_TAG_set_type = 0x20 COMMA + DW_TAG_subrange_type = 0x21 COMMA + DW_TAG_with_stmt = 0x22 COMMA + DW_TAG_access_declaration = 0x23 COMMA + DW_TAG_base_type = 0x24 COMMA + DW_TAG_catch_block = 0x25 COMMA + DW_TAG_const_type = 0x26 COMMA + DW_TAG_constant = 0x27 COMMA + DW_TAG_enumerator = 0x28 COMMA + DW_TAG_file_type = 0x29 COMMA + DW_TAG_friend = 0x2a COMMA + DW_TAG_namelist = 0x2b COMMA + DW_TAG_namelist_item = 0x2c COMMA + DW_TAG_packed_type = 0x2d COMMA + DW_TAG_subprogram = 0x2e COMMA + DW_TAG_template_type_param = 0x2f COMMA + DW_TAG_template_value_param = 0x30 COMMA + DW_TAG_thrown_type = 0x31 COMMA + DW_TAG_try_block = 0x32 COMMA + DW_TAG_variant_part = 0x33 COMMA + DW_TAG_variable = 0x34 COMMA + DW_TAG_volatile_type = 0x35 COMMA + /* DWARF 3. */ + DW_TAG_dwarf_procedure = 0x36 COMMA + DW_TAG_restrict_type = 0x37 COMMA + DW_TAG_interface_type = 0x38 COMMA + DW_TAG_namespace = 0x39 COMMA + DW_TAG_imported_module = 0x3a COMMA + DW_TAG_unspecified_type = 0x3b COMMA + DW_TAG_partial_unit = 0x3c COMMA + DW_TAG_imported_unit = 0x3d COMMA + /* SGI/MIPS Extensions. */ + DW_TAG_MIPS_loop = 0x4081 COMMA + /* GNU extensions. */ + DW_TAG_format_label = 0x4101 COMMA /* For FORTRAN 77 and Fortran 90. */ + DW_TAG_function_template = 0x4102 COMMA /* For C++. */ + DW_TAG_class_template = 0x4103 COMMA /* For C++. */ + DW_TAG_GNU_BINCL = 0x4104 COMMA + DW_TAG_GNU_EINCL = 0x4105 COMMA + /* Extensions for UPC. See: http://upc.gwu.edu/~upc. */ + DW_TAG_upc_shared_type = 0x8765 COMMA + DW_TAG_upc_strict_type = 0x8766 COMMA + DW_TAG_upc_relaxed_type = 0x8767 +IF_NOT_ASM(};) + +#define DW_TAG_lo_user 0x4080 +#define DW_TAG_hi_user 0xffff + +/* Flag that tells whether entry has a child or not. */ +#define DW_children_no 0 +#define DW_children_yes 1 + +/* Form names and codes. */ +ENUM(dwarf_form) + + DW_FORM_addr = 0x01 COMMA + DW_FORM_block2 = 0x03 COMMA + DW_FORM_block4 = 0x04 COMMA + DW_FORM_data2 = 0x05 COMMA + DW_FORM_data4 = 0x06 COMMA + DW_FORM_data8 = 0x07 COMMA + DW_FORM_string = 0x08 COMMA + DW_FORM_block = 0x09 COMMA + DW_FORM_block1 = 0x0a COMMA + DW_FORM_data1 = 0x0b COMMA + DW_FORM_flag = 0x0c COMMA + DW_FORM_sdata = 0x0d COMMA + DW_FORM_strp = 0x0e COMMA + DW_FORM_udata = 0x0f COMMA + DW_FORM_ref_addr = 0x10 COMMA + DW_FORM_ref1 = 0x11 COMMA + DW_FORM_ref2 = 0x12 COMMA + DW_FORM_ref4 = 0x13 COMMA + DW_FORM_ref8 = 0x14 COMMA + DW_FORM_ref_udata = 0x15 COMMA + DW_FORM_indirect = 0x16 +IF_NOT_ASM(};) + +/* Attribute names and codes. */ + +ENUM(dwarf_attribute) + + DW_AT_sibling = 0x01 COMMA + DW_AT_location = 0x02 COMMA + DW_AT_name = 0x03 COMMA + DW_AT_ordering = 0x09 COMMA + DW_AT_subscr_data = 0x0a COMMA + DW_AT_byte_size = 0x0b COMMA + DW_AT_bit_offset = 0x0c COMMA + DW_AT_bit_size = 0x0d COMMA + DW_AT_element_list = 0x0f COMMA + DW_AT_stmt_list = 0x10 COMMA + DW_AT_low_pc = 0x11 COMMA + DW_AT_high_pc = 0x12 COMMA + DW_AT_language = 0x13 COMMA + DW_AT_member = 0x14 COMMA + DW_AT_discr = 0x15 COMMA + DW_AT_discr_value = 0x16 COMMA + DW_AT_visibility = 0x17 COMMA + DW_AT_import = 0x18 COMMA + DW_AT_string_length = 0x19 COMMA + DW_AT_common_reference = 0x1a COMMA + DW_AT_comp_dir = 0x1b COMMA + DW_AT_const_value = 0x1c COMMA + DW_AT_containing_type = 0x1d COMMA + DW_AT_default_value = 0x1e COMMA + DW_AT_inline = 0x20 COMMA + DW_AT_is_optional = 0x21 COMMA + DW_AT_lower_bound = 0x22 COMMA + DW_AT_producer = 0x25 COMMA + DW_AT_prototyped = 0x27 COMMA + DW_AT_return_addr = 0x2a COMMA + DW_AT_start_scope = 0x2c COMMA + DW_AT_stride_size = 0x2e COMMA + DW_AT_upper_bound = 0x2f COMMA + DW_AT_abstract_origin = 0x31 COMMA + DW_AT_accessibility = 0x32 COMMA + DW_AT_address_class = 0x33 COMMA + DW_AT_artificial = 0x34 COMMA + DW_AT_base_types = 0x35 COMMA + DW_AT_calling_convention = 0x36 COMMA + DW_AT_count = 0x37 COMMA + DW_AT_data_member_location = 0x38 COMMA + DW_AT_decl_column = 0x39 COMMA + DW_AT_decl_file = 0x3a COMMA + DW_AT_decl_line = 0x3b COMMA + DW_AT_declaration = 0x3c COMMA + DW_AT_discr_list = 0x3d COMMA + DW_AT_encoding = 0x3e COMMA + DW_AT_external = 0x3f COMMA + DW_AT_frame_base = 0x40 COMMA + DW_AT_friend = 0x41 COMMA + DW_AT_identifier_case = 0x42 COMMA + DW_AT_macro_info = 0x43 COMMA + DW_AT_namelist_items = 0x44 COMMA + DW_AT_priority = 0x45 COMMA + DW_AT_segment = 0x46 COMMA + DW_AT_specification = 0x47 COMMA + DW_AT_static_link = 0x48 COMMA + DW_AT_type = 0x49 COMMA + DW_AT_use_location = 0x4a COMMA + DW_AT_variable_parameter = 0x4b COMMA + DW_AT_virtuality = 0x4c COMMA + DW_AT_vtable_elem_location = 0x4d COMMA + /* DWARF 3 values. */ + DW_AT_allocated = 0x4e COMMA + DW_AT_associated = 0x4f COMMA + DW_AT_data_location = 0x50 COMMA + DW_AT_stride = 0x51 COMMA + DW_AT_entry_pc = 0x52 COMMA + DW_AT_use_UTF8 = 0x53 COMMA + DW_AT_extension = 0x54 COMMA + DW_AT_ranges = 0x55 COMMA + DW_AT_trampoline = 0x56 COMMA + DW_AT_call_column = 0x57 COMMA + DW_AT_call_file = 0x58 COMMA + DW_AT_call_line = 0x59 COMMA + /* SGI/MIPS extensions. */ + DW_AT_MIPS_fde = 0x2001 COMMA + DW_AT_MIPS_loop_begin = 0x2002 COMMA + DW_AT_MIPS_tail_loop_begin = 0x2003 COMMA + DW_AT_MIPS_epilog_begin = 0x2004 COMMA + DW_AT_MIPS_loop_unroll_factor = 0x2005 COMMA + DW_AT_MIPS_software_pipeline_depth = 0x2006 COMMA + DW_AT_MIPS_linkage_name = 0x2007 COMMA + DW_AT_MIPS_stride = 0x2008 COMMA + DW_AT_MIPS_abstract_name = 0x2009 COMMA + DW_AT_MIPS_clone_origin = 0x200a COMMA + DW_AT_MIPS_has_inlines = 0x200b COMMA + /* GNU extensions. */ + DW_AT_sf_names = 0x2101 COMMA + DW_AT_src_info = 0x2102 COMMA + DW_AT_mac_info = 0x2103 COMMA + DW_AT_src_coords = 0x2104 COMMA + DW_AT_body_begin = 0x2105 COMMA + DW_AT_body_end = 0x2106 COMMA + DW_AT_GNU_vector = 0x2107 COMMA + /* VMS extensions. */ + DW_AT_VMS_rtnbeg_pd_address = 0x2201 COMMA + /* UPC extension. */ + DW_AT_upc_threads_scaled = 0x3210 +IF_NOT_ASM(};) + +#define DW_AT_lo_user 0x2000 /* Implementation-defined range start. */ +#define DW_AT_hi_user 0x3ff0 /* Implementation-defined range end. */ + +/* Location atom names and codes. */ +ENUM(dwarf_location_atom) + + DW_OP_addr = 0x03 COMMA + DW_OP_deref = 0x06 COMMA + DW_OP_const1u = 0x08 COMMA + DW_OP_const1s = 0x09 COMMA + DW_OP_const2u = 0x0a COMMA + DW_OP_const2s = 0x0b COMMA + DW_OP_const4u = 0x0c COMMA + DW_OP_const4s = 0x0d COMMA + DW_OP_const8u = 0x0e COMMA + DW_OP_const8s = 0x0f COMMA + DW_OP_constu = 0x10 COMMA + DW_OP_consts = 0x11 COMMA + DW_OP_dup = 0x12 COMMA + DW_OP_drop = 0x13 COMMA + DW_OP_over = 0x14 COMMA + DW_OP_pick = 0x15 COMMA + DW_OP_swap = 0x16 COMMA + DW_OP_rot = 0x17 COMMA + DW_OP_xderef = 0x18 COMMA + DW_OP_abs = 0x19 COMMA + DW_OP_and = 0x1a COMMA + DW_OP_div = 0x1b COMMA + DW_OP_minus = 0x1c COMMA + DW_OP_mod = 0x1d COMMA + DW_OP_mul = 0x1e COMMA + DW_OP_neg = 0x1f COMMA + DW_OP_not = 0x20 COMMA + DW_OP_or = 0x21 COMMA + DW_OP_plus = 0x22 COMMA + DW_OP_plus_uconst = 0x23 COMMA + DW_OP_shl = 0x24 COMMA + DW_OP_shr = 0x25 COMMA + DW_OP_shra = 0x26 COMMA + DW_OP_xor = 0x27 COMMA + DW_OP_bra = 0x28 COMMA + DW_OP_eq = 0x29 COMMA + DW_OP_ge = 0x2a COMMA + DW_OP_gt = 0x2b COMMA + DW_OP_le = 0x2c COMMA + DW_OP_lt = 0x2d COMMA + DW_OP_ne = 0x2e COMMA + DW_OP_skip = 0x2f COMMA + DW_OP_lit0 = 0x30 COMMA + DW_OP_lit1 = 0x31 COMMA + DW_OP_lit2 = 0x32 COMMA + DW_OP_lit3 = 0x33 COMMA + DW_OP_lit4 = 0x34 COMMA + DW_OP_lit5 = 0x35 COMMA + DW_OP_lit6 = 0x36 COMMA + DW_OP_lit7 = 0x37 COMMA + DW_OP_lit8 = 0x38 COMMA + DW_OP_lit9 = 0x39 COMMA + DW_OP_lit10 = 0x3a COMMA + DW_OP_lit11 = 0x3b COMMA + DW_OP_lit12 = 0x3c COMMA + DW_OP_lit13 = 0x3d COMMA + DW_OP_lit14 = 0x3e COMMA + DW_OP_lit15 = 0x3f COMMA + DW_OP_lit16 = 0x40 COMMA + DW_OP_lit17 = 0x41 COMMA + DW_OP_lit18 = 0x42 COMMA + DW_OP_lit19 = 0x43 COMMA + DW_OP_lit20 = 0x44 COMMA + DW_OP_lit21 = 0x45 COMMA + DW_OP_lit22 = 0x46 COMMA + DW_OP_lit23 = 0x47 COMMA + DW_OP_lit24 = 0x48 COMMA + DW_OP_lit25 = 0x49 COMMA + DW_OP_lit26 = 0x4a COMMA + DW_OP_lit27 = 0x4b COMMA + DW_OP_lit28 = 0x4c COMMA + DW_OP_lit29 = 0x4d COMMA + DW_OP_lit30 = 0x4e COMMA + DW_OP_lit31 = 0x4f COMMA + DW_OP_reg0 = 0x50 COMMA + DW_OP_reg1 = 0x51 COMMA + DW_OP_reg2 = 0x52 COMMA + DW_OP_reg3 = 0x53 COMMA + DW_OP_reg4 = 0x54 COMMA + DW_OP_reg5 = 0x55 COMMA + DW_OP_reg6 = 0x56 COMMA + DW_OP_reg7 = 0x57 COMMA + DW_OP_reg8 = 0x58 COMMA + DW_OP_reg9 = 0x59 COMMA + DW_OP_reg10 = 0x5a COMMA + DW_OP_reg11 = 0x5b COMMA + DW_OP_reg12 = 0x5c COMMA + DW_OP_reg13 = 0x5d COMMA + DW_OP_reg14 = 0x5e COMMA + DW_OP_reg15 = 0x5f COMMA + DW_OP_reg16 = 0x60 COMMA + DW_OP_reg17 = 0x61 COMMA + DW_OP_reg18 = 0x62 COMMA + DW_OP_reg19 = 0x63 COMMA + DW_OP_reg20 = 0x64 COMMA + DW_OP_reg21 = 0x65 COMMA + DW_OP_reg22 = 0x66 COMMA + DW_OP_reg23 = 0x67 COMMA + DW_OP_reg24 = 0x68 COMMA + DW_OP_reg25 = 0x69 COMMA + DW_OP_reg26 = 0x6a COMMA + DW_OP_reg27 = 0x6b COMMA + DW_OP_reg28 = 0x6c COMMA + DW_OP_reg29 = 0x6d COMMA + DW_OP_reg30 = 0x6e COMMA + DW_OP_reg31 = 0x6f COMMA + DW_OP_breg0 = 0x70 COMMA + DW_OP_breg1 = 0x71 COMMA + DW_OP_breg2 = 0x72 COMMA + DW_OP_breg3 = 0x73 COMMA + DW_OP_breg4 = 0x74 COMMA + DW_OP_breg5 = 0x75 COMMA + DW_OP_breg6 = 0x76 COMMA + DW_OP_breg7 = 0x77 COMMA + DW_OP_breg8 = 0x78 COMMA + DW_OP_breg9 = 0x79 COMMA + DW_OP_breg10 = 0x7a COMMA + DW_OP_breg11 = 0x7b COMMA + DW_OP_breg12 = 0x7c COMMA + DW_OP_breg13 = 0x7d COMMA + DW_OP_breg14 = 0x7e COMMA + DW_OP_breg15 = 0x7f COMMA + DW_OP_breg16 = 0x80 COMMA + DW_OP_breg17 = 0x81 COMMA + DW_OP_breg18 = 0x82 COMMA + DW_OP_breg19 = 0x83 COMMA + DW_OP_breg20 = 0x84 COMMA + DW_OP_breg21 = 0x85 COMMA + DW_OP_breg22 = 0x86 COMMA + DW_OP_breg23 = 0x87 COMMA + DW_OP_breg24 = 0x88 COMMA + DW_OP_breg25 = 0x89 COMMA + DW_OP_breg26 = 0x8a COMMA + DW_OP_breg27 = 0x8b COMMA + DW_OP_breg28 = 0x8c COMMA + DW_OP_breg29 = 0x8d COMMA + DW_OP_breg30 = 0x8e COMMA + DW_OP_breg31 = 0x8f COMMA + DW_OP_regx = 0x90 COMMA + DW_OP_fbreg = 0x91 COMMA + DW_OP_bregx = 0x92 COMMA + DW_OP_piece = 0x93 COMMA + DW_OP_deref_size = 0x94 COMMA + DW_OP_xderef_size = 0x95 COMMA + DW_OP_nop = 0x96 COMMA + /* DWARF 3 extensions. */ + DW_OP_push_object_address = 0x97 COMMA + DW_OP_call2 = 0x98 COMMA + DW_OP_call4 = 0x99 COMMA + DW_OP_call_ref = 0x9a COMMA + /* GNU extensions. */ + DW_OP_GNU_push_tls_address = 0xe0 +IF_NOT_ASM(};) + +#define DW_OP_lo_user 0xe0 /* Implementation-defined range start. */ +#define DW_OP_hi_user 0xff /* Implementation-defined range end. */ + +/* Type encodings. */ +ENUM(dwarf_type) + + DW_ATE_void = 0x0 COMMA + DW_ATE_address = 0x1 COMMA + DW_ATE_boolean = 0x2 COMMA + DW_ATE_complex_float = 0x3 COMMA + DW_ATE_float = 0x4 COMMA + DW_ATE_signed = 0x5 COMMA + DW_ATE_signed_char = 0x6 COMMA + DW_ATE_unsigned = 0x7 COMMA + DW_ATE_unsigned_char = 0x8 COMMA + /* DWARF 3. */ + DW_ATE_imaginary_float = 0x9 +IF_NOT_ASM(};) + +#define DW_ATE_lo_user 0x80 +#define DW_ATE_hi_user 0xff + +/* Array ordering names and codes. */ +ENUM(dwarf_array_dim_ordering) + + DW_ORD_row_major = 0 COMMA + DW_ORD_col_major = 1 +IF_NOT_ASM(};) + +/* Access attribute. */ +ENUM(dwarf_access_attribute) + + DW_ACCESS_public = 1 COMMA + DW_ACCESS_protected = 2 COMMA + DW_ACCESS_private = 3 +IF_NOT_ASM(};) + +/* Visibility. */ +ENUM(dwarf_visibility_attribute) + + DW_VIS_local = 1 COMMA + DW_VIS_exported = 2 COMMA + DW_VIS_qualified = 3 +IF_NOT_ASM(};) + +/* Virtuality. */ +ENUM(dwarf_virtuality_attribute) + + DW_VIRTUALITY_none = 0 COMMA + DW_VIRTUALITY_virtual = 1 COMMA + DW_VIRTUALITY_pure_virtual = 2 +IF_NOT_ASM(};) + +/* Case sensitivity. */ +ENUM(dwarf_id_case) + + DW_ID_case_sensitive = 0 COMMA + DW_ID_up_case = 1 COMMA + DW_ID_down_case = 2 COMMA + DW_ID_case_insensitive = 3 +IF_NOT_ASM(};) + +/* Calling convention. */ +ENUM(dwarf_calling_convention) + + DW_CC_normal = 0x1 COMMA + DW_CC_program = 0x2 COMMA + DW_CC_nocall = 0x3 +IF_NOT_ASM(};) + +#define DW_CC_lo_user 0x40 +#define DW_CC_hi_user 0xff + +/* Inline attribute. */ +ENUM(dwarf_inline_attribute) + + DW_INL_not_inlined = 0 COMMA + DW_INL_inlined = 1 COMMA + DW_INL_declared_not_inlined = 2 COMMA + DW_INL_declared_inlined = 3 +IF_NOT_ASM(};) + +/* Discriminant lists. */ +ENUM(dwarf_discrim_list) + + DW_DSC_label = 0 COMMA + DW_DSC_range = 1 +IF_NOT_ASM(};) + +/* Line number opcodes. */ +ENUM(dwarf_line_number_ops) + + DW_LNS_extended_op = 0 COMMA + DW_LNS_copy = 1 COMMA + DW_LNS_advance_pc = 2 COMMA + DW_LNS_advance_line = 3 COMMA + DW_LNS_set_file = 4 COMMA + DW_LNS_set_column = 5 COMMA + DW_LNS_negate_stmt = 6 COMMA + DW_LNS_set_basic_block = 7 COMMA + DW_LNS_const_add_pc = 8 COMMA + DW_LNS_fixed_advance_pc = 9 COMMA + /* DWARF 3. */ + DW_LNS_set_prologue_end = 10 COMMA + DW_LNS_set_epilogue_begin = 11 COMMA + DW_LNS_set_isa = 12 +IF_NOT_ASM(};) + +/* Line number extended opcodes. */ +ENUM(dwarf_line_number_x_ops) + + DW_LNE_end_sequence = 1 COMMA + DW_LNE_set_address = 2 COMMA + DW_LNE_define_file = 3 +IF_NOT_ASM(};) + +/* Call frame information. */ +ENUM(dwarf_call_frame_info) + + DW_CFA_advance_loc = 0x40 COMMA + DW_CFA_offset = 0x80 COMMA + DW_CFA_restore = 0xc0 COMMA + DW_CFA_nop = 0x00 COMMA + DW_CFA_set_loc = 0x01 COMMA + DW_CFA_advance_loc1 = 0x02 COMMA + DW_CFA_advance_loc2 = 0x03 COMMA + DW_CFA_advance_loc4 = 0x04 COMMA + DW_CFA_offset_extended = 0x05 COMMA + DW_CFA_restore_extended = 0x06 COMMA + DW_CFA_undefined = 0x07 COMMA + DW_CFA_same_value = 0x08 COMMA + DW_CFA_register = 0x09 COMMA + DW_CFA_remember_state = 0x0a COMMA + DW_CFA_restore_state = 0x0b COMMA + DW_CFA_def_cfa = 0x0c COMMA + DW_CFA_def_cfa_register = 0x0d COMMA + DW_CFA_def_cfa_offset = 0x0e COMMA + + /* DWARF 3. */ + DW_CFA_def_cfa_expression = 0x0f COMMA + DW_CFA_expression = 0x10 COMMA + DW_CFA_offset_extended_sf = 0x11 COMMA + DW_CFA_def_cfa_sf = 0x12 COMMA + DW_CFA_def_cfa_offset_sf = 0x13 COMMA + + /* SGI/MIPS specific. */ + DW_CFA_MIPS_advance_loc8 = 0x1d COMMA + + /* GNU extensions. */ + DW_CFA_GNU_window_save = 0x2d COMMA + DW_CFA_GNU_args_size = 0x2e COMMA + DW_CFA_GNU_negative_offset_extended = 0x2f +IF_NOT_ASM(};) + +#define DW_CIE_ID 0xffffffff +#define DW_CIE_VERSION 1 + +#define DW_CFA_extended 0 +#define DW_CFA_lo_user 0x1c +#define DW_CFA_hi_user 0x3f + +#define DW_CHILDREN_no 0x00 +#define DW_CHILDREN_yes 0x01 + +#define DW_ADDR_none 0 + +/* Source language names and codes. */ +ENUM(dwarf_source_language) + + DW_LANG_C89 = 0x0001 COMMA + DW_LANG_C = 0x0002 COMMA + DW_LANG_Ada83 = 0x0003 COMMA + DW_LANG_C_plus_plus = 0x0004 COMMA + DW_LANG_Cobol74 = 0x0005 COMMA + DW_LANG_Cobol85 = 0x0006 COMMA + DW_LANG_Fortran77 = 0x0007 COMMA + DW_LANG_Fortran90 = 0x0008 COMMA + DW_LANG_Pascal83 = 0x0009 COMMA + DW_LANG_Modula2 = 0x000a COMMA + DW_LANG_Java = 0x000b COMMA + /* DWARF 3. */ + DW_LANG_C99 = 0x000c COMMA + DW_LANG_Ada95 = 0x000d COMMA + DW_LANG_Fortran95 = 0x000e COMMA + /* MIPS. */ + DW_LANG_Mips_Assembler = 0x8001 COMMA + /* UPC. */ + DW_LANG_Upc = 0x8765 +IF_NOT_ASM(};) + +#define DW_LANG_lo_user 0x8000 /* Implementation-defined range start. */ +#define DW_LANG_hi_user 0xffff /* Implementation-defined range start. */ + +/* Names and codes for macro information. */ +ENUM(dwarf_macinfo_record_type) + + DW_MACINFO_define = 1 COMMA + DW_MACINFO_undef = 2 COMMA + DW_MACINFO_start_file = 3 COMMA + DW_MACINFO_end_file = 4 COMMA + DW_MACINFO_vendor_ext = 255 +IF_NOT_ASM(};) + +/* @@@ For use with GNU frame unwind information. */ + +#define DW_EH_PE_absptr 0x00 +#define DW_EH_PE_omit 0xff + +#define DW_EH_PE_uleb128 0x01 +#define DW_EH_PE_udata2 0x02 +#define DW_EH_PE_udata4 0x03 +#define DW_EH_PE_udata8 0x04 +#define DW_EH_PE_sleb128 0x09 +#define DW_EH_PE_sdata2 0x0A +#define DW_EH_PE_sdata4 0x0B +#define DW_EH_PE_sdata8 0x0C +#define DW_EH_PE_signed 0x08 + +#define DW_EH_PE_pcrel 0x10 +#define DW_EH_PE_textrel 0x20 +#define DW_EH_PE_datarel 0x30 +#define DW_EH_PE_funcrel 0x40 +#define DW_EH_PE_aligned 0x50 + +#define DW_EH_PE_indirect 0x80 + +#endif /* _ELF_DWARF2_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/linux/errno.h current/include/linux/errno.h --- reference/include/linux/errno.h 2002-12-09 18:46:15.000000000 -0800 +++ current/include/linux/errno.h 2004-04-09 13:23:18.000000000 -0700 @@ -22,6 +22,7 @@ #define EBADTYPE 527 /* Type not supported by server */ #define EJUKEBOX 528 /* Request initiated, but will not complete before timeout */ #define EIOCBQUEUED 529 /* iocb queued, will get completion event */ +#define EIOCBRETRY 530 /* iocb queued, will trigger a retry */ #endif diff -purN -X /home/mbligh/.diff.exclude reference/include/linux/fs.h current/include/linux/fs.h --- reference/include/linux/fs.h 2004-04-07 14:54:36.000000000 -0700 +++ current/include/linux/fs.h 2004-04-09 13:23:18.000000000 -0700 @@ -767,6 +767,11 @@ extern int vfs_rename(struct inode *, st #define DT_SOCK 12 #define DT_WHT 14 +#define OSYNC_METADATA (1<<0) +#define OSYNC_DATA (1<<1) +#define OSYNC_INODE (1<<2) +int generic_osync_inode(struct inode *, struct address_space *, int); + /* * This is the "filldir" function type, used by readdir() to let * the kernel specify what kind of dirent layout it wants to have. diff -purN -X /home/mbligh/.diff.exclude reference/include/linux/gfp.h current/include/linux/gfp.h --- reference/include/linux/gfp.h 2003-10-01 11:41:17.000000000 -0700 +++ current/include/linux/gfp.h 2004-04-09 11:53:01.000000000 -0700 @@ -32,6 +32,7 @@ #define __GFP_NOFAIL 0x800 /* Retry for ever. Cannot fail */ #define __GFP_NORETRY 0x1000 /* Do not retry. Might fail */ #define __GFP_NO_GROW 0x2000 /* Slab internal usage */ +#define __GFP_NODE_STRICT 0x4000 /* Do not fall back to other nodes */ #define __GFP_BITS_SHIFT 16 /* Room for 16 __GFP_FOO bits */ #define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1) diff -purN -X /home/mbligh/.diff.exclude reference/include/linux/hugetlb.h current/include/linux/hugetlb.h --- reference/include/linux/hugetlb.h 2004-04-07 14:54:36.000000000 -0700 +++ current/include/linux/hugetlb.h 2004-04-09 21:41:39.000000000 -0700 @@ -50,6 +50,9 @@ mark_mm_hugetlb(struct mm_struct *mm, st int prepare_hugepage_range(unsigned long addr, unsigned long len); #endif +unsigned long try_hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long *flags); + #else /* !CONFIG_HUGETLB_PAGE */ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) @@ -123,12 +126,21 @@ static inline void set_file_hugepages(st { file->f_op = &hugetlbfs_file_operations; } + +unsigned long +hugetlb_get_unmapped_area(struct file *, unsigned long, unsigned long, + unsigned long, unsigned long); #else /* !CONFIG_HUGETLBFS */ #define is_file_hugepages(file) 0 #define set_file_hugepages(file) BUG() #define hugetlb_zero_setup(size) ERR_PTR(-ENOSYS) +static inline unsigned long +hugetlb_get_unmapped_area(struct file * a, unsigned long b, unsigned long c, + unsigned long d, unsigned long e) { return -ENOSYS; } #endif /* !CONFIG_HUGETLBFS */ + + #endif /* _LINUX_HUGETLB_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/linux/init_task.h current/include/linux/init_task.h --- reference/include/linux/init_task.h 2004-03-11 14:35:31.000000000 -0800 +++ current/include/linux/init_task.h 2004-04-09 13:27:58.000000000 -0700 @@ -112,6 +112,8 @@ extern struct group_info init_groups; .proc_lock = SPIN_LOCK_UNLOCKED, \ .switch_lock = SPIN_LOCK_UNLOCKED, \ .journal_info = NULL, \ + .io_wait = NULL, \ + .map_base = __TASK_UNMAPPED_BASE, \ } diff -purN -X /home/mbligh/.diff.exclude reference/include/linux/irq.h current/include/linux/irq.h --- reference/include/linux/irq.h 2003-10-01 11:41:17.000000000 -0700 +++ current/include/linux/irq.h 2004-04-08 15:10:24.000000000 -0700 @@ -71,7 +71,6 @@ extern irq_desc_t irq_desc [NR_IRQS]; #include /* the arch dependent stuff */ -extern int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); extern int setup_irq(unsigned int , struct irqaction * ); extern hw_irq_controller no_irq_type; /* needed in every arch ? */ diff -purN -X /home/mbligh/.diff.exclude reference/include/linux/kexec.h current/include/linux/kexec.h --- reference/include/linux/kexec.h 1969-12-31 16:00:00.000000000 -0800 +++ current/include/linux/kexec.h 2004-04-09 13:23:20.000000000 -0700 @@ -0,0 +1,53 @@ +#ifndef LINUX_KEXEC_H +#define LINUX_KEXEC_H + +#if CONFIG_KEXEC +#include +#include +#include + +/* + * This structure is used to hold the arguments that are used when loading + * kernel binaries. + */ + +typedef unsigned long kimage_entry_t; +#define IND_DESTINATION 0x1 +#define IND_INDIRECTION 0x2 +#define IND_DONE 0x4 +#define IND_SOURCE 0x8 + +#define KEXEC_SEGMENT_MAX 8 +struct kexec_segment { + void *buf; + size_t bufsz; + void *mem; + size_t memsz; +}; + +struct kimage { + kimage_entry_t head; + kimage_entry_t *entry; + kimage_entry_t *last_entry; + + unsigned long destination; + unsigned long offset; + + unsigned long start; + struct page *reboot_code_pages; + + unsigned long nr_segments; + struct kexec_segment segment[KEXEC_SEGMENT_MAX+1]; + + struct list_head dest_pages; + struct list_head unuseable_pages; +}; + + +/* kexec interface functions */ +extern void machine_kexec(struct kimage *image); +extern asmlinkage long sys_kexec(unsigned long entry, long nr_segments, + struct kexec_segment *segments); +extern struct kimage *kexec_image; +#endif +#endif /* LINUX_KEXEC_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/linux/lockmeter.h current/include/linux/lockmeter.h --- reference/include/linux/lockmeter.h 1969-12-31 16:00:00.000000000 -0800 +++ current/include/linux/lockmeter.h 2004-04-08 15:10:21.000000000 -0700 @@ -0,0 +1,320 @@ +/* + * Copyright (C) 1999-2002 Silicon Graphics, Inc. + * + * Written by John Hawkes (hawkes@sgi.com) + * Based on klstat.h by Jack Steiner (steiner@sgi.com) + * + * Modified by Ray Bryant (raybry@us.ibm.com) Feb-Apr 2000 + * Changes Copyright (C) 2000 IBM, Inc. + * Added save of index in spinlock_t to improve efficiency + * of "hold" time reporting for spinlocks + * Added support for hold time statistics for read and write + * locks. + * Moved machine dependent code to include/asm/lockmeter.h. + * + */ + +#ifndef _LINUX_LOCKMETER_H +#define _LINUX_LOCKMETER_H + + +/*--------------------------------------------------- + * architecture-independent lockmeter.h + *-------------------------------------------------*/ + +/* + * raybry -- version 2: added efficient hold time statistics + * requires lstat recompile, so flagged as new version + * raybry -- version 3: added global reader lock data + * hawkes -- version 4: removed some unnecessary fields to simplify mips64 port + */ +#define LSTAT_VERSION 5 + +int lstat_update(void*, void*, int); +int lstat_update_time(void*, void*, int, uint32_t); + +/* + * Currently, the mips64 and sparc64 kernels talk to a 32-bit lockstat, so we + * need to force compatibility in the inter-communication data structure. + */ + +#if defined(CONFIG_MIPS32_COMPAT) +#define TIME_T uint32_t +#elif defined(CONFIG_SPARC) || defined(CONFIG_SPARC64) +#define TIME_T uint64_t +#else +#define TIME_T time_t +#endif + +#if defined(__KERNEL__) || (!defined(CONFIG_MIPS32_COMPAT) && !defined(CONFIG_SPARC) && !defined(CONFIG_SPARC64)) || (_MIPS_SZLONG==32) +#define POINTER void * +#else +#define POINTER int64_t +#endif + +/* + * Values for the "action" parameter passed to lstat_update. + * ZZZ - do we want a try-success status here??? + */ +#define LSTAT_ACT_NO_WAIT 0 +#define LSTAT_ACT_SPIN 1 +#define LSTAT_ACT_REJECT 2 +#define LSTAT_ACT_WW_SPIN 3 +#define LSTAT_ACT_SLEPT 4 /* UNUSED */ + +#define LSTAT_ACT_MAX_VALUES 4 /* NOTE: Increase to 5 if use ACT_SLEPT */ + +/* + * Special values for the low 2 bits of an RA passed to + * lstat_update. + */ +/* we use these values to figure out what kind of lock data */ +/* is stored in the statistics table entry at index ....... */ +#define LSTAT_RA_SPIN 0 /* spin lock data */ +#define LSTAT_RA_READ 1 /* read lock statistics */ +#define LSTAT_RA_SEMA 2 /* RESERVED */ +#define LSTAT_RA_WRITE 3 /* write lock statistics*/ + +#define LSTAT_RA(n) \ + ((void*)( ((unsigned long)__builtin_return_address(0) & ~3) | n) ) + +/* + * Constants used for lock addresses in the lstat_directory + * to indicate special values of the lock address. + */ +#define LSTAT_MULTI_LOCK_ADDRESS NULL + +/* + * Maximum size of the lockstats tables. Increase this value + * if its not big enough. (Nothing bad happens if its not + * big enough although some locks will not be monitored.) + * We record overflows of this quantity in lstat_control.dir_overflows + * + * Note: The max value here must fit into the field set + * and obtained by the macro's PUT_INDEX() and GET_INDEX(). + * This value depends on how many bits are available in the + * lock word in the particular machine implementation we are on. + */ +#define LSTAT_MAX_STAT_INDEX 2000 + +/* + * Size and mask for the hash table into the directory. + */ +#define LSTAT_HASH_TABLE_SIZE 4096 /* must be 2**N */ +#define LSTAT_HASH_TABLE_MASK (LSTAT_HASH_TABLE_SIZE-1) + +#define DIRHASH(ra) ((unsigned long)(ra)>>2 & LSTAT_HASH_TABLE_MASK) + +/* + * This defines an entry in the lockstat directory. It contains + * information about a lock being monitored. + * A directory entry only contains the lock identification - + * counts on usage of the lock are kept elsewhere in a per-cpu + * data structure to minimize cache line pinging. + */ +typedef struct { + POINTER caller_ra; /* RA of code that set lock */ + POINTER lock_ptr; /* lock address */ + ushort next_stat_index; /* Used to link multiple locks that have the same hash table value */ +} lstat_directory_entry_t; + +/* + * A multi-dimensioned array used to contain counts for lock accesses. + * The array is 3-dimensional: + * - CPU number. Keep from thrashing cache lines between CPUs + * - Directory entry index. Identifies the lock + * - Action. Indicates what kind of contention occurred on an + * access to the lock. + * + * The index of an entry in the directory is the same as the 2nd index + * of the entry in the counts array. + */ +/* + * This table contains data for spin_locks, write locks, and read locks + * Not all data is used for all cases. In particular, the hold time + * information is not stored here for read locks since that is a global + * (e. g. cannot be separated out by return address) quantity. + * See the lstat_read_lock_counts_t structure for the global read lock + * hold time. + */ +typedef struct { + uint64_t cum_wait_ticks; /* sum of wait times */ + /* for write locks, sum of time a */ + /* writer is waiting for a reader */ + int64_t cum_hold_ticks; /* cumulative sum of holds */ + /* not used for read mode locks */ + /* must be signed. ............... */ + uint32_t max_wait_ticks; /* max waiting time */ + uint32_t max_hold_ticks; /* max holding time */ + uint64_t cum_wait_ww_ticks; /* sum times writer waits on writer*/ + uint32_t max_wait_ww_ticks; /* max wait time writer vs writer */ + /* prev 2 only used for write locks*/ + uint32_t acquire_time; /* time lock acquired this CPU */ + uint32_t count[LSTAT_ACT_MAX_VALUES]; +} lstat_lock_counts_t; + +typedef lstat_lock_counts_t lstat_cpu_counts_t[LSTAT_MAX_STAT_INDEX]; + +/* + * User request to: + * - turn statistic collection on/off, or to reset + */ +#define LSTAT_OFF 0 +#define LSTAT_ON 1 +#define LSTAT_RESET 2 +#define LSTAT_RELEASE 3 + +#define LSTAT_MAX_READ_LOCK_INDEX 1000 +typedef struct { + POINTER lock_ptr; /* address of lock for output stats */ + uint32_t read_lock_count; + int64_t cum_hold_ticks; /* sum of read lock hold times over */ + /* all callers. ....................*/ + uint32_t write_index; /* last write lock hash table index */ + uint32_t busy_periods; /* count of busy periods ended this */ + uint64_t start_busy; /* time this busy period started. ..*/ + uint64_t busy_ticks; /* sum of busy periods this lock. ..*/ + uint64_t max_busy; /* longest busy period for this lock*/ + uint32_t max_readers; /* maximum number of readers ...... */ +#ifdef USER_MODE_TESTING + rwlock_t entry_lock; /* lock for this read lock entry... */ + /* avoid having more than one rdr at*/ + /* needed for user space testing... */ + /* not needed for kernel 'cause it */ + /* is non-preemptive. ............. */ +#endif +} lstat_read_lock_counts_t; +typedef lstat_read_lock_counts_t lstat_read_lock_cpu_counts_t[LSTAT_MAX_READ_LOCK_INDEX]; + +#if defined(__KERNEL__) || defined(USER_MODE_TESTING) + +#ifndef USER_MODE_TESTING +#include +#else +#include "asm_newlockmeter.h" +#endif + +/* + * Size and mask for the hash table into the directory. + */ +#define LSTAT_HASH_TABLE_SIZE 4096 /* must be 2**N */ +#define LSTAT_HASH_TABLE_MASK (LSTAT_HASH_TABLE_SIZE-1) + +#define DIRHASH(ra) ((unsigned long)(ra)>>2 & LSTAT_HASH_TABLE_MASK) + +/* + * This version eliminates the per processor lock stack. What we do is to + * store the index of the lock hash structure in unused bits in the lock + * itself. Then on unlock we can find the statistics record without doing + * any additional hash or lock stack lookup. This works for spin_locks. + * Hold time reporting is now basically as cheap as wait time reporting + * so we ignore the difference between LSTAT_ON_HOLD and LSTAT_ON_WAIT + * as in version 1.1.* of lockmeter. + * + * For rw_locks, we store the index of a global reader stats structure in + * the lock and the writer index is stored in the latter structure. + * For read mode locks we hash at the time of the lock to find an entry + * in the directory for reader wait time and the like. + * At unlock time for read mode locks, we update just the global structure + * so we don't need to know the reader directory index value at unlock time. + * + */ + +/* + * Protocol to change lstat_control.state + * This is complicated because we don't want the cum_hold_time for + * a rw_lock to be decremented in _read_lock_ without making sure it + * is incremented in _read_lock_ and vice versa. So here is the + * way we change the state of lstat_control.state: + * I. To Turn Statistics On + * After allocating storage, set lstat_control.state non-zero. + * This works because we don't start updating statistics for in use + * locks until the reader lock count goes to zero. + * II. To Turn Statistics Off: + * (0) Disable interrupts on this CPU + * (1) Seize the lstat_control.directory_lock + * (2) Obtain the current value of lstat_control.next_free_read_lock_index + * (3) Store a zero in lstat_control.state. + * (4) Release the lstat_control.directory_lock + * (5) For each lock in the read lock list up to the saved value + * (well, -1) of the next_free_read_lock_index, do the following: + * (a) Check validity of the stored lock address + * by making sure that the word at the saved addr + * has an index that matches this entry. If not + * valid, then skip this entry. + * (b) If there is a write lock already set on this lock, + * skip to (d) below. + * (c) Set a non-metered write lock on the lock + * (d) set the cached INDEX in the lock to zero + * (e) Release the non-metered write lock. + * (6) Re-enable interrupts + * + * These rules ensure that a read lock will not have its statistics + * partially updated even though the global lock recording state has + * changed. See put_lockmeter_info() for implementation. + * + * The reason for (b) is that there may be write locks set on the + * syscall path to put_lockmeter_info() from user space. If we do + * not do this check, then we can deadlock. A similar problem would + * occur if the lock was read locked by the current CPU. At the + * moment this does not appear to happen. + */ + +/* + * Main control structure for lockstat. Used to turn statistics on/off + * and to maintain directory info. + */ +typedef struct { + int state; + spinlock_t control_lock; /* used to serialize turning statistics on/off */ + spinlock_t directory_lock; /* for serialize adding entries to directory */ + volatile int next_free_dir_index;/* next free entry in the directory */ + /* FIXME not all of these fields are used / needed .............. */ + /* the following fields represent data since */ + /* first "lstat on" or most recent "lstat reset" */ + TIME_T first_started_time; /* time when measurement first enabled */ + TIME_T started_time; /* time when measurement last started */ + TIME_T ending_time; /* time when measurement last disabled */ + uint64_t started_cycles64; /* cycles when measurement last started */ + uint64_t ending_cycles64; /* cycles when measurement last disabled */ + uint64_t enabled_cycles64; /* total cycles with measurement enabled */ + int intervals; /* number of measurement intervals recorded */ + /* i. e. number of times did lstat on;lstat off */ + lstat_directory_entry_t *dir; /* directory */ + int dir_overflow; /* count of times ran out of space in directory */ + int rwlock_overflow; /* count of times we couldn't allocate a rw block*/ + ushort *hashtab; /* hash table for quick dir scans */ + lstat_cpu_counts_t *counts[NR_CPUS]; /* Array of pointers to per-cpu stats */ + int next_free_read_lock_index; /* next rwlock reader (global) stats block */ + lstat_read_lock_cpu_counts_t *read_lock_counts[NR_CPUS]; /* per cpu read lock stats */ +} lstat_control_t; + +#endif /* defined(__KERNEL__) || defined(USER_MODE_TESTING) */ + +typedef struct { + short lstat_version; /* version of the data */ + short state; /* the current state is returned */ + int maxcpus; /* Number of cpus present */ + int next_free_dir_index; /* index of the next free directory entry */ + TIME_T first_started_time; /* when measurement enabled for first time */ + TIME_T started_time; /* time in secs since 1969 when stats last turned on */ + TIME_T ending_time; /* time in secs since 1969 when stats last turned off */ + uint32_t cycleval; /* cycles per second */ +#ifdef notyet + void *kernel_magic_addr; /* address of kernel_magic */ + void *kernel_end_addr; /* contents of kernel magic (points to "end") */ +#endif + int next_free_read_lock_index; /* index of next (global) read lock stats struct */ + uint64_t started_cycles64; /* cycles when measurement last started */ + uint64_t ending_cycles64; /* cycles when stats last turned off */ + uint64_t enabled_cycles64; /* total cycles with measurement enabled */ + int intervals; /* number of measurement intervals recorded */ + /* i.e. number of times we did lstat on;lstat off*/ + int dir_overflow; /* number of times we wanted more space in directory */ + int rwlock_overflow; /* # of times we wanted more space in read_locks_count */ + struct new_utsname uts; /* info about machine where stats are measured */ + /* -T option of lockstat allows data to be */ + /* moved to another machine. ................. */ +} lstat_user_request_t; + +#endif /* _LINUX_LOCKMETER_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/linux/mcount.h current/include/linux/mcount.h --- reference/include/linux/mcount.h 1969-12-31 16:00:00.000000000 -0800 +++ current/include/linux/mcount.h 2004-04-09 11:53:02.000000000 -0700 @@ -0,0 +1,63 @@ +/* + * include/linux/mcount.h + * + * Implementation of kernel mcount handler and supporting functions. + * + * Code based on kernprof http://oss.sgi.com/projects/kernprof/ + * Copyright (C) SGI 1999, 2000, 2001 + * Written by Dimitris Michailidis (dimitris@engr.sgi.com) + * Modified by John Hawkes (hawkes@engr.sgi.com) + * Contributions from Niels Christiansen (nchr@us.ibm.com) + * Adapted for stand-alone call graphing by Adam Litke (agl@us.ibm.com) + */ + +#ifndef __MCOUNT_H +#define __MCOUNT_H + +#include +#include +#include + +#define DFL_PC_RES 4 /* default PC resolution for this platform */ +#define CG_MAX_ARCS (1 << (8 * sizeof(short))) +#define FUNCTIONPC(func) (*(unsigned long *)&(func)) + +#define pc_out_of_range(pc) \ + ((pc) < (unsigned long) &_stext || (pc) >= (unsigned long) &_etext) + +struct prof_mem_map +{ + unsigned long kernel_buckets; /* number of kernel buckets */ + unsigned long nr_cpus; /* number of processors whether profiled or not */ + unsigned long cg_from_size; /* size of one cg_from array */ + unsigned long cg_to_size; /* size of one cg_to array */ + unsigned long cg_to_offset; /* offset of cg_to array */ + unsigned long kernel_start; /* lowest text address in kernel */ + unsigned long kernel_end; /* highest text address in kernel */ +}; + +struct cg_arc_dest { + unsigned long address; + atomic_t count; + unsigned short link; + unsigned short pad; +}; + +#ifdef CONFIG_X86 +void cg_record_arc(unsigned long frompc, unsigned long selfpc) __attribute__((regparm(2))); +#endif + +int mcount_init(void); + +ssize_t mcount_write(struct file * file, const char * buf, + size_t count, loff_t *ppos); + +ssize_t mcount_read(struct file * file, char * buf, + size_t count, loff_t *ppos); + +static struct file_operations mcount_operations = { + write: mcount_write, + read: mcount_read, +}; + +#endif diff -purN -X /home/mbligh/.diff.exclude reference/include/linux/mm.h current/include/linux/mm.h --- reference/include/linux/mm.h 2004-04-07 14:54:36.000000000 -0700 +++ current/include/linux/mm.h 2004-04-09 21:41:41.000000000 -0700 @@ -19,6 +19,7 @@ extern unsigned long max_mapnr; extern unsigned long num_physpages; extern void * high_memory; +extern unsigned long vmalloc_earlyreserve; extern int page_cluster; #include @@ -150,8 +151,6 @@ struct vm_operations_struct { int (*populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock); }; -/* forward declaration; pte_chain is meant to be internal to rmap.c */ -struct pte_chain; struct mmu_gather; struct inode; @@ -180,16 +179,12 @@ struct page { page_flags_t flags; /* atomic flags, some possibly updated asynchronously */ atomic_t count; /* Usage count, see below. */ + int mapcount; /* rmap counts ptes mapped in mms */ struct list_head list; /* ->mapping has some page lists. */ struct address_space *mapping; /* The inode (or ...) we belong to. */ unsigned long index; /* Our offset within mapping. */ struct list_head lru; /* Pageout list, eg. active_list; protected by zone->lru_lock !! */ - union { - struct pte_chain *chain;/* Reverse pte mapping pointer. - * protected by PG_chainlock */ - pte_addr_t direct; - } pte; unsigned long private; /* mapping-private opaque data */ /* @@ -404,14 +399,15 @@ void page_address_init(void); #endif /* - * Return true if this page is mapped into pagetables. Subtle: test pte.direct - * rather than pte.chain. Because sometimes pte.direct is 64-bit, and .chain - * is only 32-bit. + * On an anonymous page mapped into a user virtual memory area, + * page->mapping points to its anonmm, not to a struct address_space. + * + * Please note that, confusingly, "page_mapping" refers to the inode + * address_space which maps the page from disk; whereas "page_mapped" + * refers to user virtual address space into which the page is mapped. */ -static inline int page_mapped(struct page *page) -{ - return page->pte.direct != 0; -} +#define page_mapping(page) (PageAnon(page)? NULL: (page)->mapping) +#define page_mapped(page) ((page)->mapcount != 0) /* * Error return values for the *_nopage functions @@ -472,6 +468,7 @@ int get_user_pages(struct task_struct *t int __set_page_dirty_buffers(struct page *page); int __set_page_dirty_nobuffers(struct page *page); +int set_page_dirty(struct page *page); int set_page_dirty_lock(struct page *page); /* @@ -498,23 +495,6 @@ extern struct shrinker *set_shrinker(int extern void remove_shrinker(struct shrinker *shrinker); /* - * If the mapping doesn't provide a set_page_dirty a_op, then - * just fall through and assume that it wants buffer_heads. - * FIXME: make the method unconditional. - */ -static inline int set_page_dirty(struct page *page) -{ - if (page->mapping) { - int (*spd)(struct page *); - - spd = page->mapping->a_ops->set_page_dirty; - if (spd) - return (*spd)(page); - } - return __set_page_dirty_buffers(page); -} - -/* * On a two-level page table, this ends up being trivial. Thus the * inlining and the symmetry break with pte_alloc_map() that does all * of this out-of-line. @@ -541,6 +521,9 @@ extern void si_meminfo_node(struct sysin extern void insert_vm_struct(struct mm_struct *, struct vm_area_struct *); extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *, struct rb_node **, struct rb_node *); +extern struct vm_area_struct *copy_vma(struct vm_area_struct *, + unsigned long addr, unsigned long len, unsigned long pgoff); +extern void vma_relink_file(struct vm_area_struct *, struct vm_area_struct *); extern void exit_mmap(struct mm_struct *); extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); diff -purN -X /home/mbligh/.diff.exclude reference/include/linux/mman.h current/include/linux/mman.h --- reference/include/linux/mman.h 2003-10-14 15:50:34.000000000 -0700 +++ current/include/linux/mman.h 2004-04-09 21:41:39.000000000 -0700 @@ -58,6 +58,9 @@ calc_vm_flag_bits(unsigned long flags) return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) | _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) | _calc_vm_trans(flags, MAP_EXECUTABLE, VM_EXECUTABLE) | +#ifdef CONFIG_HUGETLB_PAGE + _calc_vm_trans(flags, MAP_HUGETLB, VM_HUGETLB ) | +#endif _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ); } diff -purN -X /home/mbligh/.diff.exclude reference/include/linux/page-flags.h current/include/linux/page-flags.h --- reference/include/linux/page-flags.h 2004-04-07 14:54:36.000000000 -0700 +++ current/include/linux/page-flags.h 2004-04-08 15:10:25.000000000 -0700 @@ -69,13 +69,14 @@ #define PG_private 12 /* Has something at ->private */ #define PG_writeback 13 /* Page is under writeback */ #define PG_nosave 14 /* Used for system suspend/resume */ -#define PG_chainlock 15 /* lock bit for ->pte_chain */ +#define PG_rmaplock 15 /* Lock bit for reversing to ptes */ -#define PG_direct 16 /* ->pte_chain points directly at pte */ +#define PG_swapcache 16 /* Swap page: swp_entry_t in private */ #define PG_mappedtodisk 17 /* Has blocks allocated on-disk */ #define PG_reclaim 18 /* To be reclaimed asap */ #define PG_compound 19 /* Part of a compound page */ +#define PG_anon 20 /* Anonymous page: anonmm in mapping */ /* * Global page accounting. One instance per CPU. Only unsigned longs are @@ -279,12 +280,6 @@ extern void get_full_page_state(struct p #define ClearPageNosave(page) clear_bit(PG_nosave, &(page)->flags) #define TestClearPageNosave(page) test_and_clear_bit(PG_nosave, &(page)->flags) -#define PageDirect(page) test_bit(PG_direct, &(page)->flags) -#define SetPageDirect(page) set_bit(PG_direct, &(page)->flags) -#define TestSetPageDirect(page) test_and_set_bit(PG_direct, &(page)->flags) -#define ClearPageDirect(page) clear_bit(PG_direct, &(page)->flags) -#define TestClearPageDirect(page) test_and_clear_bit(PG_direct, &(page)->flags) - #define PageMappedToDisk(page) test_bit(PG_mappedtodisk, &(page)->flags) #define SetPageMappedToDisk(page) set_bit(PG_mappedtodisk, &(page)->flags) #define ClearPageMappedToDisk(page) clear_bit(PG_mappedtodisk, &(page)->flags) @@ -298,15 +293,16 @@ extern void get_full_page_state(struct p #define SetPageCompound(page) set_bit(PG_compound, &(page)->flags) #define ClearPageCompound(page) clear_bit(PG_compound, &(page)->flags) -/* - * The PageSwapCache predicate doesn't use a PG_flag at this time, - * but it may again do so one day. - */ +#define PageAnon(page) test_bit(PG_anon, &(page)->flags) +#define SetPageAnon(page) set_bit(PG_anon, &(page)->flags) +#define ClearPageAnon(page) clear_bit(PG_anon, &(page)->flags) + #ifdef CONFIG_SWAP -extern struct address_space swapper_space; -#define PageSwapCache(page) ((page)->mapping == &swapper_space) +#define PageSwapCache(page) test_bit(PG_swapcache, &(page)->flags) +#define SetPageSwapCache(page) set_bit(PG_swapcache, &(page)->flags) +#define ClearPageSwapCache(page) clear_bit(PG_swapcache, &(page)->flags) #else -#define PageSwapCache(page) 0 +#define PageSwapCache(page) 0 #endif struct page; /* forward declaration */ diff -purN -X /home/mbligh/.diff.exclude reference/include/linux/pagemap.h current/include/linux/pagemap.h --- reference/include/linux/pagemap.h 2004-01-15 10:41:19.000000000 -0800 +++ current/include/linux/pagemap.h 2004-04-09 13:23:19.000000000 -0700 @@ -70,7 +70,7 @@ extern struct page * find_trylock_page(s extern struct page * find_or_create_page(struct address_space *mapping, unsigned long index, unsigned int gfp_mask); extern unsigned int find_get_pages(struct address_space *mapping, - pgoff_t start, unsigned int nr_pages, + pgoff_t *next, unsigned int nr_pages, struct page **pages); /* @@ -138,31 +138,30 @@ static inline unsigned long get_page_cac return atomic_read(&nr_pagecache); } -static inline void ___add_to_page_cache(struct page *page, - struct address_space *mapping, unsigned long index) -{ - list_add(&page->list, &mapping->clean_pages); - page->mapping = mapping; - page->index = index; - - mapping->nrpages++; - pagecache_acct(1); -} - extern void FASTCALL(__lock_page(struct page *page)); extern void FASTCALL(unlock_page(struct page *page)); -static inline void lock_page(struct page *page) + +extern int FASTCALL(__lock_page_wq(struct page *page, wait_queue_t *wait)); +static inline int lock_page_wq(struct page *page, wait_queue_t *wait) { if (TestSetPageLocked(page)) - __lock_page(page); + return __lock_page_wq(page, wait); + else + return 0; +} + +static inline void lock_page(struct page *page) +{ + lock_page_wq(page, NULL); } /* * This is exported only for wait_on_page_locked/wait_on_page_writeback. * Never use this directly! */ -extern void FASTCALL(wait_on_page_bit(struct page *page, int bit_nr)); +extern int FASTCALL(wait_on_page_bit_wq(struct page *page, int bit_nr, + wait_queue_t *wait)); /* * Wait for a page to be unlocked. @@ -171,19 +170,33 @@ extern void FASTCALL(wait_on_page_bit(st * ie with increased "page->count" so that the page won't * go away during the wait.. */ -static inline void wait_on_page_locked(struct page *page) +static inline int wait_on_page_locked_wq(struct page *page, wait_queue_t *wait) { if (PageLocked(page)) - wait_on_page_bit(page, PG_locked); + return wait_on_page_bit_wq(page, PG_locked, wait); + return 0; +} + +static inline int wait_on_page_writeback_wq(struct page *page, + wait_queue_t *wait) +{ + if (PageWriteback(page)) + return wait_on_page_bit_wq(page, PG_writeback, wait); + return 0; +} + +static inline void wait_on_page_locked(struct page *page) +{ + wait_on_page_locked_wq(page, NULL); } /* * Wait for a page to complete writeback */ + static inline void wait_on_page_writeback(struct page *page) { - if (PageWriteback(page)) - wait_on_page_bit(page, PG_writeback); + wait_on_page_writeback_wq(page, NULL); } extern void end_page_writeback(struct page *page); diff -purN -X /home/mbligh/.diff.exclude reference/include/linux/pagevec.h current/include/linux/pagevec.h --- reference/include/linux/pagevec.h 2002-12-09 18:46:25.000000000 -0800 +++ current/include/linux/pagevec.h 2004-04-09 13:23:19.000000000 -0700 @@ -23,7 +23,7 @@ void __pagevec_lru_add(struct pagevec *p void __pagevec_lru_add_active(struct pagevec *pvec); void pagevec_strip(struct pagevec *pvec); unsigned int pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, - pgoff_t start, unsigned int nr_pages); + pgoff_t *next, unsigned int nr_pages); static inline void pagevec_init(struct pagevec *pvec, int cold) { diff -purN -X /home/mbligh/.diff.exclude reference/include/linux/pipe_fs_i.h current/include/linux/pipe_fs_i.h --- reference/include/linux/pipe_fs_i.h 2002-12-09 18:46:13.000000000 -0800 +++ current/include/linux/pipe_fs_i.h 2004-04-09 13:23:20.000000000 -0700 @@ -41,7 +41,7 @@ struct pipe_inode_info { #define PIPE_MAX_WCHUNK(inode) (PIPE_SIZE - PIPE_END(inode)) /* Drop the inode semaphore and wait for a pipe event, atomically */ -void pipe_wait(struct inode * inode); +int pipe_wait(struct inode * inode); struct inode* pipe_new(struct inode* inode); diff -purN -X /home/mbligh/.diff.exclude reference/include/linux/reboot.h current/include/linux/reboot.h --- reference/include/linux/reboot.h 2003-10-01 11:41:17.000000000 -0700 +++ current/include/linux/reboot.h 2004-04-09 13:23:20.000000000 -0700 @@ -22,6 +22,7 @@ * POWER_OFF Stop OS and remove all power from system, if possible. * RESTART2 Restart system using given command string. * SW_SUSPEND Suspend system using software suspend if compiled in. + * KEXEC Restart the system using a different kernel. */ #define LINUX_REBOOT_CMD_RESTART 0x01234567 @@ -31,6 +32,7 @@ #define LINUX_REBOOT_CMD_POWER_OFF 0x4321FEDC #define LINUX_REBOOT_CMD_RESTART2 0xA1B2C3D4 #define LINUX_REBOOT_CMD_SW_SUSPEND 0xD000FCE2 +#define LINUX_REBOOT_CMD_KEXEC 0x45584543 #ifdef __KERNEL__ diff -purN -X /home/mbligh/.diff.exclude reference/include/linux/rmap-locking.h current/include/linux/rmap-locking.h --- reference/include/linux/rmap-locking.h 2004-04-07 14:54:36.000000000 -0700 +++ current/include/linux/rmap-locking.h 1969-12-31 16:00:00.000000000 -0800 @@ -1,23 +0,0 @@ -/* - * include/linux/rmap-locking.h - * - * Locking primitives for exclusive access to a page's reverse-mapping - * pte chain. - */ - -#include - -struct pte_chain; -extern kmem_cache_t *pte_chain_cache; - -#define pte_chain_lock(page) bit_spin_lock(PG_chainlock, (unsigned long *)&page->flags) -#define pte_chain_unlock(page) bit_spin_unlock(PG_chainlock, (unsigned long *)&page->flags) - -struct pte_chain *pte_chain_alloc(int gfp_flags); -void __pte_chain_free(struct pte_chain *pte_chain); - -static inline void pte_chain_free(struct pte_chain *pte_chain) -{ - if (pte_chain) - __pte_chain_free(pte_chain); -} diff -purN -X /home/mbligh/.diff.exclude reference/include/linux/rmap.h current/include/linux/rmap.h --- reference/include/linux/rmap.h 1969-12-31 16:00:00.000000000 -0800 +++ current/include/linux/rmap.h 2004-04-09 13:27:12.000000000 -0700 @@ -0,0 +1,70 @@ +#ifndef _LINUX_RMAP_H +#define _LINUX_RMAP_H +/* + * Declarations for Reverse Mapping functions in mm/rmap.c + * Its structures are declared within that file. + */ + +#include +#include + +#define rmap_lock(page) bit_spin_lock(PG_rmaplock, (unsigned long *)&(page)->flags) +#define rmap_unlock(page) bit_spin_unlock(PG_rmaplock, (unsigned long *)&(page)->flags) + +#ifdef CONFIG_MMU + +void fastcall page_add_anon_rmap(struct page *, + struct mm_struct *, unsigned long addr); +void fastcall page_update_anon_rmap(struct page *, + struct mm_struct *, unsigned long addr); +void fastcall page_add_obj_rmap(struct page *); +void fastcall page_remove_rmap(struct page *); + +/** + * page_dup_rmap - duplicate pte mapping to a page + * @page: the page to add the mapping to + * + * For copy_page_range only: minimal extract from page_add_rmap, + * avoiding unnecessary tests (already checked) so it's quicker. + */ +static inline void page_dup_rmap(struct page *page) +{ + rmap_lock(page); + page->mapcount++; + rmap_unlock(page); +} + +/* + * Called from kernel/fork.c to manage anonymous memory + */ +void init_rmap(void); +int exec_rmap(struct mm_struct *); +int dup_rmap(struct mm_struct *, struct mm_struct *oldmm); +void exit_rmap(struct mm_struct *); + +/* + * Called from mm/vmscan.c to handle paging out + */ +int fastcall page_referenced(struct page *); +int fastcall try_to_unmap(struct page *); + +#else /* !CONFIG_MMU */ + +#define init_rmap() do {} while (0) +#define exec_rmap(mm) (0) +#define dup_rmap(mm, oldmm) (0) +#define exit_rmap(mm) do {} while (0) + +#define page_referenced(page) TestClearPageReferenced(page) +#define try_to_unmap(page) SWAP_FAIL + +#endif /* CONFIG_MMU */ + +/* + * Return values of try_to_unmap + */ +#define SWAP_SUCCESS 0 +#define SWAP_AGAIN 1 +#define SWAP_FAIL 2 + +#endif /* _LINUX_RMAP_H */ diff -purN -X /home/mbligh/.diff.exclude reference/include/linux/sched.h current/include/linux/sched.h --- reference/include/linux/sched.h 2004-04-07 14:54:36.000000000 -0700 +++ current/include/linux/sched.h 2004-04-09 21:51:31.000000000 -0700 @@ -71,7 +71,11 @@ struct exec_domain; * the EXP_n values would be 1981, 2034 and 2043 if still using only * 11 bit fractions. */ -extern unsigned long avenrun[]; /* Load averages */ +extern unsigned long avenrun[]; /* Load averages */ +extern unsigned long tasks_running[3]; /* Real load averages */ +DECLARE_PER_CPU(unsigned long[3],cpu_tasks_running); /* Real load averages per cpu */ + +extern unsigned long tasks_running[]; /* Real load averages */ #define FSHIFT 11 /* nr of bits of precision */ #define FIXED_1 (1< #include #include @@ -147,6 +162,7 @@ extern spinlock_t mmlist_lock; typedef struct task_struct task_t; extern void sched_init(void); +extern void sched_init_smp(void); extern void init_idle(task_t *idle, int cpu); extern void show_state(void); @@ -201,6 +217,7 @@ struct mm_struct { * together off init_mm.mmlist, and are protected * by mmlist_lock */ + struct anonmm *anonmm; /* For rmap to track anon mem */ unsigned long start_code, end_code, start_data, end_data; unsigned long start_brk, brk, start_stack; @@ -329,6 +346,18 @@ struct k_itimer { struct sigqueue *sigq; /* signal queue entry. */ }; +#ifdef CONFIG_SCHEDSTATS +struct sched_info { + /* cumulative counters */ + unsigned long cpu_time, /* time spent on the cpu */ + run_delay, /* time spent waiting on a runqueue */ + pcnt; /* # of timeslices run on this cpu */ + + /* timestamps */ + unsigned long last_arrival, /* when we last ran on a cpu */ + last_queued; /* when we were last queued to run */ +}; +#endif /* CONFIG_SCHEDSTATS */ struct io_context; /* See blkdev.h */ void exit_io_context(void); @@ -382,6 +411,10 @@ struct task_struct { cpumask_t cpus_allowed; unsigned int time_slice, first_time_slice; +#ifdef CONFIG_SCHEDSTATS + struct sched_info sched_info; +#endif /* CONFIG_SCHEDSTATS */ + struct list_head tasks; struct list_head ptrace_children; struct list_head ptrace_list; @@ -484,6 +517,8 @@ struct task_struct { void *journal_info; /* VM state */ + /* TASK_UNMAPPED_BASE value */ + unsigned long map_base; struct reclaim_state *reclaim_state; struct dentry *proc_dentry; @@ -493,6 +528,13 @@ struct task_struct { unsigned long ptrace_message; siginfo_t *last_siginfo; /* For ptrace use. */ +/* + * current io wait handle: wait queue entry to use for io waits + * If this thread is processing aio, this points at the waitqueue + * inside the currently handled kiocb. It may be NULL (i.e. default + * to a stack based synchronous wait) if its doing sync IO. + */ + wait_queue_t *io_wait; }; static inline pid_t process_group(struct task_struct *tsk) @@ -505,6 +547,12 @@ extern void __put_task_struct(struct tas #define put_task_struct(tsk) \ do { if (atomic_dec_and_test(&(tsk)->usage)) __put_task_struct(tsk); } while(0) +#ifndef __TASK_UNMAPPED_BASE +#define __TASK_UNMAPPED_BASE 0UL +#else +#define __HAS_ARCH_PROC_MAPPED_BASE +#endif + /* * Per process flags */ @@ -531,6 +579,109 @@ do { if (atomic_dec_and_test(&(tsk)->usa #define PF_SYNCWRITE 0x00200000 /* I am doing a sync write */ #ifdef CONFIG_SMP +#define SCHED_LOAD_SCALE 128UL /* increase resolution of load */ + +#define SD_BALANCE_NEWIDLE 1 /* Balance when about to become idle */ +#define SD_BALANCE_EXEC 2 /* Balance on exec */ +#define SD_WAKE_IDLE 4 /* Wake to idle CPU on task wakeup */ +#define SD_WAKE_AFFINE 8 /* Wake task to waking CPU */ +#define SD_SHARE_CPUPOWER 16 /* Domain members share cpu power */ + +struct sched_group { + struct sched_group *next; /* Must be a circular list */ + cpumask_t cpumask; + + /* + * CPU power of this group, SCHED_LOAD_SCALE being max power for a + * single CPU. This should be read only (except for setup). Although + * it will need to be written to at cpu hot(un)plug time, perhaps the + * cpucontrol semaphore will provide enough exclusion? + */ + unsigned long cpu_power; +}; + +struct sched_domain { + /* These fields must be setup */ + struct sched_domain *parent; /* top domain must be null terminated */ + struct sched_group *groups; /* the balancing groups of the domain */ + cpumask_t span; /* span of all CPUs in this domain */ + unsigned long min_interval; /* Minimum balance interval ms */ + unsigned long max_interval; /* Maximum balance interval ms */ + unsigned int busy_factor; /* less balancing by factor if busy */ + unsigned int imbalance_pct; /* No balance until over watermark */ + unsigned long long cache_hot_time; /* Task considered cache hot (ns) */ + unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ + unsigned int per_cpu_gain; /* CPU % gained by adding domain cpus */ + int flags; /* See SD_* */ + + /* Runtime fields. */ + unsigned long last_balance; /* init to jiffies. units in jiffies */ + unsigned int balance_interval; /* initialise to 1. units in ms. */ + unsigned int nr_balance_failed; /* initialise to 0 */ +}; + +/* Common values for SMT siblings */ +#define SD_SIBLING_INIT (struct sched_domain) { \ + .span = CPU_MASK_NONE, \ + .parent = NULL, \ + .groups = NULL, \ + .min_interval = 1, \ + .max_interval = 2, \ + .busy_factor = 8, \ + .imbalance_pct = 110, \ + .cache_hot_time = 0, \ + .cache_nice_tries = 0, \ + .per_cpu_gain = 15, \ + .flags = SD_BALANCE_NEWIDLE \ + | SD_WAKE_AFFINE \ + | SD_WAKE_IDLE \ + | SD_SHARE_CPUPOWER, \ + .last_balance = jiffies, \ + .balance_interval = 1, \ + .nr_balance_failed = 0, \ +} + +/* Common values for CPUs */ +#define SD_CPU_INIT (struct sched_domain) { \ + .span = CPU_MASK_NONE, \ + .parent = NULL, \ + .groups = NULL, \ + .min_interval = 1, \ + .max_interval = 4, \ + .busy_factor = 64, \ + .imbalance_pct = 125, \ + .cache_hot_time = (5*1000000/2), \ + .cache_nice_tries = 2, \ + .per_cpu_gain = 100, \ + .flags = SD_BALANCE_NEWIDLE \ + | SD_WAKE_AFFINE, \ + .last_balance = jiffies, \ + .balance_interval = 1, \ + .nr_balance_failed = 0, \ +} + +#ifdef CONFIG_SCHED_NUMA +/* Common values for NUMA nodes */ +#define SD_NODE_INIT (struct sched_domain) { \ + .span = CPU_MASK_NONE, \ + .parent = NULL, \ + .groups = NULL, \ + .min_interval = 8, \ + .max_interval = 256*fls(num_online_cpus()),\ + .busy_factor = 32, \ + .imbalance_pct = 125, \ + .cache_hot_time = (10*1000000), \ + .cache_nice_tries = 1, \ + .per_cpu_gain = 100, \ + .flags = SD_BALANCE_EXEC, \ + .last_balance = jiffies, \ + .balance_interval = 1, \ + .nr_balance_failed = 0, \ +} +#endif + +extern void cpu_attach_domain(struct sched_domain *sd, int cpu); + extern int set_cpus_allowed(task_t *p, cpumask_t new_mask); #else static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask) @@ -541,12 +692,10 @@ static inline int set_cpus_allowed(task_ extern unsigned long long sched_clock(void); -#ifdef CONFIG_NUMA +#ifdef CONFIG_SMP extern void sched_balance_exec(void); -extern void node_nr_running_init(void); #else #define sched_balance_exec() {} -#define node_nr_running_init() {} #endif /* Move tasks off this (offline) CPU onto another. */ diff -purN -X /home/mbligh/.diff.exclude reference/include/linux/serial_core.h current/include/linux/serial_core.h --- reference/include/linux/serial_core.h 2004-04-07 14:54:36.000000000 -0700 +++ current/include/linux/serial_core.h 2004-04-08 15:10:20.000000000 -0700 @@ -165,7 +165,9 @@ struct uart_port { unsigned char x_char; /* xon/xoff char */ unsigned char regshift; /* reg offset shift */ unsigned char iotype; /* io access style */ - +#ifdef CONFIG_KGDB + int kgdb; /* in use by kgdb */ +#endif #define UPIO_PORT (0) #define UPIO_HUB6 (1) #define UPIO_MEM (2) diff -purN -X /home/mbligh/.diff.exclude reference/include/linux/spinlock.h current/include/linux/spinlock.h --- reference/include/linux/spinlock.h 2003-07-02 14:45:00.000000000 -0700 +++ current/include/linux/spinlock.h 2004-04-08 15:10:21.000000000 -0700 @@ -15,6 +15,12 @@ #include /* for cpu relax */ #include +#ifdef CONFIG_KGDB +#include +#define SET_WHO(x, him) (x)->who = him; +#else +#define SET_WHO(x, him) +#endif /* * Must define these before including other files, inline functions need them @@ -55,6 +61,9 @@ typedef struct { const char *module; char *owner; int oline; +#ifdef CONFIG_KGDB + struct task_struct *who; +#endif } spinlock_t; #define SPIN_LOCK_UNLOCKED (spinlock_t) { SPINLOCK_MAGIC, 0, 10, __FILE__ , NULL, 0} @@ -66,6 +75,7 @@ typedef struct { (x)->module = __FILE__; \ (x)->owner = NULL; \ (x)->oline = 0; \ + SET_WHO(x, NULL) \ } while (0) #define CHECK_LOCK(x) \ @@ -88,6 +98,7 @@ typedef struct { (x)->lock = 1; \ (x)->owner = __FILE__; \ (x)->oline = __LINE__; \ + SET_WHO(x, current) \ } while (0) /* without debugging, spin_is_locked on UP always says @@ -118,6 +129,7 @@ typedef struct { (x)->lock = 1; \ (x)->owner = __FILE__; \ (x)->oline = __LINE__; \ + SET_WHO(x, current) \ 1; \ }) @@ -184,6 +196,17 @@ typedef struct { #endif /* !SMP */ +#ifdef CONFIG_LOCKMETER +extern void _metered_spin_lock (spinlock_t *lock); +extern void _metered_spin_unlock (spinlock_t *lock); +extern int _metered_spin_trylock(spinlock_t *lock); +extern void _metered_read_lock (rwlock_t *lock); +extern void _metered_read_unlock (rwlock_t *lock); +extern void _metered_write_lock (rwlock_t *lock); +extern void _metered_write_unlock (rwlock_t *lock); +extern int _metered_write_trylock(rwlock_t *lock); +#endif + /* * Define the various spin_lock and rw_lock methods. Note we define these * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various @@ -389,6 +412,141 @@ do { \ _raw_spin_trylock(lock) ? 1 : \ ({preempt_enable(); local_bh_enable(); 0;});}) +#ifdef CONFIG_LOCKMETER +#undef spin_lock +#undef spin_trylock +#undef spin_unlock +#undef spin_lock_irqsave +#undef spin_lock_irq +#undef spin_lock_bh +#undef read_lock +#undef read_unlock +#undef write_lock +#undef write_unlock +#undef write_trylock +#undef spin_unlock_bh +#undef read_lock_irqsave +#undef read_lock_irq +#undef read_lock_bh +#undef read_unlock_bh +#undef write_lock_irqsave +#undef write_lock_irq +#undef write_lock_bh +#undef write_unlock_bh + +#define spin_lock(lock) \ +do { \ + preempt_disable(); \ + _metered_spin_lock(lock); \ +} while(0) + +#define spin_trylock(lock) ({preempt_disable(); _metered_spin_trylock(lock) ? \ + 1 : ({preempt_enable(); 0;});}) +#define spin_unlock(lock) \ +do { \ + _metered_spin_unlock(lock); \ + preempt_enable(); \ +} while (0) + +#define spin_lock_irqsave(lock, flags) \ +do { \ + local_irq_save(flags); \ + preempt_disable(); \ + _metered_spin_lock(lock); \ +} while (0) + +#define spin_lock_irq(lock) \ +do { \ + local_irq_disable(); \ + preempt_disable(); \ + _metered_spin_lock(lock); \ +} while (0) + +#define spin_lock_bh(lock) \ +do { \ + local_bh_disable(); \ + preempt_disable(); \ + _metered_spin_lock(lock); \ +} while (0) + +#define spin_unlock_bh(lock) \ +do { \ + _metered_spin_unlock(lock); \ + preempt_enable(); \ + local_bh_enable(); \ +} while (0) + + +#define read_lock(lock) ({preempt_disable(); _metered_read_lock(lock);}) +#define read_unlock(lock) ({_metered_read_unlock(lock); preempt_enable();}) +#define write_lock(lock) ({preempt_disable(); _metered_write_lock(lock);}) +#define write_unlock(lock) ({_metered_write_unlock(lock); preempt_enable();}) +#define write_trylock(lock) ({preempt_disable();_metered_write_trylock(lock) ? \ + 1 : ({preempt_enable(); 0;});}) +#define spin_unlock_no_resched(lock) \ +do { \ + _metered_spin_unlock(lock); \ + preempt_enable_no_resched(); \ +} while (0) + +#define read_lock_irqsave(lock, flags) \ +do { \ + local_irq_save(flags); \ + preempt_disable(); \ + _metered_read_lock(lock); \ +} while (0) + +#define read_lock_irq(lock) \ +do { \ + local_irq_disable(); \ + preempt_disable(); \ + _metered_read_lock(lock); \ +} while (0) + +#define read_lock_bh(lock) \ +do { \ + local_bh_disable(); \ + preempt_disable(); \ + _metered_read_lock(lock); \ +} while (0) + +#define read_unlock_bh(lock) \ +do { \ + _metered_read_unlock(lock); \ + preempt_enable(); \ + local_bh_enable(); \ +} while (0) + +#define write_lock_irqsave(lock, flags) \ +do { \ + local_irq_save(flags); \ + preempt_disable(); \ + _metered_write_lock(lock); \ +} while (0) + +#define write_lock_irq(lock) \ +do { \ + local_irq_disable(); \ + preempt_disable(); \ + _metered_write_lock(lock); \ +} while (0) + +#define write_lock_bh(lock) \ +do { \ + local_bh_disable(); \ + preempt_disable(); \ + _metered_write_lock(lock); \ +} while (0) + +#define write_unlock_bh(lock) \ +do { \ + _metered_write_unlock(lock); \ + preempt_enable(); \ + local_bh_enable(); \ +} while (0) + +#endif /* !CONFIG_LOCKMETER */ + /* "lock on reference count zero" */ #ifndef ATOMIC_DEC_AND_LOCK #include diff -purN -X /home/mbligh/.diff.exclude reference/include/linux/swap.h current/include/linux/swap.h --- reference/include/linux/swap.h 2004-02-04 16:24:33.000000000 -0800 +++ current/include/linux/swap.h 2004-04-08 15:10:27.000000000 -0700 @@ -76,7 +76,6 @@ struct reclaim_state { #ifdef __KERNEL__ struct address_space; -struct pte_chain; struct sysinfo; struct writeback_control; struct zone; @@ -177,26 +176,11 @@ extern int try_to_free_pages(struct zone extern int shrink_all_memory(int); extern int vm_swappiness; -/* linux/mm/rmap.c */ #ifdef CONFIG_MMU -int FASTCALL(page_referenced(struct page *)); -struct pte_chain *FASTCALL(page_add_rmap(struct page *, pte_t *, - struct pte_chain *)); -void FASTCALL(page_remove_rmap(struct page *, pte_t *)); -int FASTCALL(try_to_unmap(struct page *)); - /* linux/mm/shmem.c */ extern int shmem_unuse(swp_entry_t entry, struct page *page); -#else -#define page_referenced(page) TestClearPageReferenced(page) -#define try_to_unmap(page) SWAP_FAIL #endif /* CONFIG_MMU */ -/* return values of try_to_unmap */ -#define SWAP_SUCCESS 0 -#define SWAP_AGAIN 1 -#define SWAP_FAIL 2 - #ifdef CONFIG_SWAP /* linux/mm/page_io.c */ extern int swap_readpage(struct file *, struct page *); @@ -230,6 +214,8 @@ extern void swap_free(swp_entry_t); extern void free_swap_and_cache(swp_entry_t); extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t); extern struct swap_info_struct *get_swap_info_struct(unsigned); +extern struct swap_info_struct *swap_info_get(swp_entry_t); +extern void swap_info_put(struct swap_info_struct *); extern int can_share_swap_page(struct page *); extern int remove_exclusive_swap_page(struct page *); diff -purN -X /home/mbligh/.diff.exclude reference/include/linux/sysctl.h current/include/linux/sysctl.h --- reference/include/linux/sysctl.h 2004-04-07 14:54:37.000000000 -0700 +++ current/include/linux/sysctl.h 2004-04-09 21:41:41.000000000 -0700 @@ -61,7 +61,8 @@ enum CTL_DEV=7, /* Devices */ CTL_BUS=8, /* Busses */ CTL_ABI=9, /* Binary emulation */ - CTL_CPU=10 /* CPU stuff (speed scaling, etc) */ + CTL_CPU=10, /* CPU stuff (speed scaling, etc) */ + CTL_SCHED=11, /* scheduler tunables */ }; /* CTL_BUS names: */ @@ -131,6 +132,10 @@ enum KERN_PRINTK_RATELIMIT_BURST=61, /* int: tune printk ratelimiting */ KERN_PTY=62, /* dir: pty driver */ KERN_NGROUPS_MAX=63, /* int: NGROUPS_MAX */ + KERN_SHMUSEHUGEPAGES=64, /* int: back shm with huge pages */ + KERN_MMAPUSEHUGEPAGES=65, /* int: back anon mmap with huge pages */ + KERN_HPAGES_PER_FILE=66, /* int: max bigpages per file */ + KERN_HPAGES_MAP_SZ=67, /* int: min size (MB) of mapping */ }; @@ -161,6 +166,18 @@ enum VM_MAX_MAP_COUNT=22, /* int: Maximum number of mmaps/address-space */ }; +/* Tunable scheduler parameters in /proc/sys/sched/ */ +enum { + SCHED_MIN_TIMESLICE=1, /* minimum process timeslice */ + SCHED_MAX_TIMESLICE=2, /* maximum process timeslice */ + SCHED_CHILD_PENALTY=3, /* penalty on fork to child */ + SCHED_PARENT_PENALTY=4, /* penalty on fork to parent */ + SCHED_EXIT_WEIGHT=5, /* penalty to parent of CPU hog child */ + SCHED_PRIO_BONUS_RATIO=6, /* percent of max prio given as bonus */ + SCHED_INTERACTIVE_DELTA=7, /* delta used to scale interactivity */ + SCHED_MAX_SLEEP_AVG=8, /* maximum sleep avg attainable */ + SCHED_STARVATION_LIMIT=9, /* no re-active if expired is starved */ +}; /* CTL_NET names: */ enum diff -purN -X /home/mbligh/.diff.exclude reference/include/linux/timex.h current/include/linux/timex.h --- reference/include/linux/timex.h 2003-10-27 10:41:15.000000000 -0800 +++ current/include/linux/timex.h 2004-04-08 15:10:24.000000000 -0700 @@ -78,7 +78,7 @@ #elif HZ >= 768 && HZ < 1536 # define SHIFT_HZ 10 #else -# error You lose. +# error Please use a HZ value which is between 12 and 1536 #endif /* diff -purN -X /home/mbligh/.diff.exclude reference/include/linux/wait.h current/include/linux/wait.h --- reference/include/linux/wait.h 2003-10-01 11:47:14.000000000 -0700 +++ current/include/linux/wait.h 2004-04-09 13:23:18.000000000 -0700 @@ -80,6 +80,15 @@ static inline int waitqueue_active(wait_ return !list_empty(&q->task_list); } +/* + * Used to distinguish between sync and async io wait context: + * sync i/o typically specifies a NULL wait queue entry or a wait + * queue entry bound to a task (current task) to wake up. + * aio specifies a wait queue entry with an async notification + * callback routine, not associated with any task. + */ +#define is_sync_wait(wait) (!(wait) || ((wait)->task)) + extern void FASTCALL(add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)); extern void FASTCALL(add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait)); extern void FASTCALL(remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)); diff -purN -X /home/mbligh/.diff.exclude reference/include/linux/writeback.h current/include/linux/writeback.h --- reference/include/linux/writeback.h 2003-10-01 11:48:26.000000000 -0700 +++ current/include/linux/writeback.h 2004-04-09 13:23:19.000000000 -0700 @@ -87,6 +87,10 @@ void page_writeback_init(void); void balance_dirty_pages_ratelimited(struct address_space *mapping); int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0); int do_writepages(struct address_space *mapping, struct writeback_control *wbc); +ssize_t sync_page_range(struct inode *inode, struct address_space *mapping, + loff_t pos, size_t count); +ssize_t sync_page_range_nolock(struct inode *inode, struct address_space + *mapping, loff_t pos, size_t count); /* pdflush.c */ extern int nr_pdflush_threads; /* Global so it can be exported to sysctl diff -purN -X /home/mbligh/.diff.exclude reference/init/main.c current/init/main.c --- reference/init/main.c 2004-04-07 14:54:37.000000000 -0700 +++ current/init/main.c 2004-04-08 15:10:25.000000000 -0700 @@ -84,7 +84,6 @@ extern void signals_init(void); extern void buffer_init(void); extern void pidhash_init(void); extern void pidmap_init(void); -extern void pte_chain_init(void); extern void radix_tree_init(void); extern void free_initmem(void); extern void populate_rootfs(void); @@ -420,6 +419,13 @@ asmlinkage void __init start_kernel(void */ smp_prepare_boot_cpu(); + /* + * Set up the scheduler prior starting any interrupts (such as the + * timer interrupt). Full topology setup happens at smp_init() + * time - but meanwhile we still have a functioning scheduler. + */ + sched_init(); + build_all_zonelists(); page_alloc_init(); printk("Kernel command line: %s\n", saved_command_line); @@ -431,7 +437,7 @@ asmlinkage void __init start_kernel(void rcu_init(); init_IRQ(); pidhash_init(); - sched_init(); + init_timers(); softirq_init(); time_init(); @@ -460,7 +466,6 @@ asmlinkage void __init start_kernel(void calibrate_delay(); pidmap_init(); pgtable_cache_init(); - pte_chain_init(); #ifdef CONFIG_X86 if (efi_enabled) efi_enter_virtual_mode(); @@ -571,7 +576,6 @@ static void do_pre_smp_initcalls(void) migration_init(); #endif - node_nr_running_init(); spawn_ksoftirqd(); } @@ -602,6 +606,7 @@ static int init(void * unused) do_pre_smp_initcalls(); smp_init(); + sched_init_smp(); do_basic_setup(); prepare_namespace(); diff -purN -X /home/mbligh/.diff.exclude reference/ipc/shm.c current/ipc/shm.c --- reference/ipc/shm.c 2004-04-07 14:54:37.000000000 -0700 +++ current/ipc/shm.c 2004-04-09 21:41:39.000000000 -0700 @@ -32,6 +32,9 @@ #define shm_flags shm_perm.mode +extern int shm_use_hugepages; +extern int shm_hugepages_per_file; + static struct file_operations shm_file_operations; static struct vm_operations_struct shm_vm_ops; @@ -165,6 +168,31 @@ static struct vm_operations_struct shm_v .nopage = shmem_nopage, }; +#ifdef CONFIG_HUGETLBFS +int shm_with_hugepages(int shmflag, size_t size) +{ + /* flag specified explicitly */ + if (shmflag & SHM_HUGETLB) + return 1; + /* Are we disabled? */ + if (!shm_use_hugepages) + return 0; + /* Must be HPAGE aligned */ + if (size & ~HPAGE_MASK) + return 0; + /* Are we under the max per file? */ + if ((size >> HPAGE_SHIFT) > shm_hugepages_per_file) + return 0; + /* Do we have enough free huge pages? */ + if (!is_hugepage_mem_enough(size)) + return 0; + + return 1; +} +#else +int shm_with_hugepages(int shmflag, size_t size) { return 0; } +#endif + static int newseg (key_t key, int shmflg, size_t size) { int error; @@ -194,8 +222,10 @@ static int newseg (key_t key, int shmflg return error; } - if (shmflg & SHM_HUGETLB) + if (shm_with_hugepages(shmflg, size)) { + shmflg |= SHM_HUGETLB; file = hugetlb_zero_setup(size); + } else { sprintf (name, "SYSV%08x", key); file = shmem_file_setup(name, size, VM_ACCOUNT); diff -purN -X /home/mbligh/.diff.exclude reference/kernel/Makefile current/kernel/Makefile --- reference/kernel/Makefile 2004-03-11 14:35:38.000000000 -0800 +++ current/kernel/Makefile 2004-04-09 13:23:20.000000000 -0700 @@ -12,16 +12,30 @@ obj-y = sched.o fork.o exec_domain.o obj-$(CONFIG_FUTEX) += futex.o obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o obj-$(CONFIG_SMP) += cpu.o +obj-$(CONFIG_LOCKMETER) += lockmeter.o obj-$(CONFIG_UID16) += uid16.o obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_KALLSYMS) += kallsyms.o obj-$(CONFIG_PM) += power/ obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o +obj-$(CONFIG_KEXEC) += kexec.o obj-$(CONFIG_COMPAT) += compat.o obj-$(CONFIG_IKCONFIG) += configs.o obj-$(CONFIG_IKCONFIG_PROC) += configs.o obj-$(CONFIG_STOP_MACHINE) += stop_machine.o +obj-$(CONFIG_MCOUNT) += mcount.o + +ifeq ($(CONFIG_MCOUNT),y) +quiet_cmd_nopg = CC $@ + cmd_nopg = $(CC) $(subst -pg,,$(CFLAGS)) -c $(src)/$(*F).c -o $@ + +$(obj)/mcount.o: alwayscc + $(call cmd,nopg) +alwayscc: + $(Q)rm -f $(obj)/mcount.o +endif + ifneq ($(CONFIG_IA64),y) # According to Alan Modra , the -fno-omit-frame-pointer is # needed for x86 only. Why this used to be enabled for all architectures is beyond diff -purN -X /home/mbligh/.diff.exclude reference/kernel/fork.c current/kernel/fork.c --- reference/kernel/fork.c 2004-03-11 14:35:38.000000000 -0800 +++ current/kernel/fork.c 2004-04-09 21:41:41.000000000 -0700 @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -147,7 +148,12 @@ void fastcall prepare_to_wait(wait_queue spin_lock_irqsave(&q->lock, flags); if (list_empty(&wait->task_list)) __add_wait_queue(q, wait); - set_current_state(state); + /* + * don't alter the task state if this is just going to + * queue an async wait queue callback + */ + if (is_sync_wait(wait)) + set_current_state(state); spin_unlock_irqrestore(&q->lock, flags); } @@ -162,7 +168,12 @@ prepare_to_wait_exclusive(wait_queue_hea spin_lock_irqsave(&q->lock, flags); if (list_empty(&wait->task_list)) __add_wait_queue_tail(q, wait); - set_current_state(state); + /* + * don't alter the task state if this is just going to + * queue an async wait queue callback + */ + if (is_sync_wait(wait)) + set_current_state(state); spin_unlock_irqrestore(&q->lock, flags); } @@ -322,7 +333,7 @@ static inline int dup_mmap(struct mm_str /* insert tmp into the share list, just after mpnt */ down(&file->f_mapping->i_shared_sem); - list_add_tail(&tmp->shared, &mpnt->shared); + list_add(&tmp->shared, &mpnt->shared); up(&file->f_mapping->i_shared_sem); } @@ -417,9 +428,14 @@ struct mm_struct * mm_alloc(void) mm = allocate_mm(); if (mm) { memset(mm, 0, sizeof(*mm)); - return mm_init(mm); + mm = mm_init(mm); + if (mm && exec_rmap(mm)) { + mm_free_pgd(mm); + free_mm(mm); + mm = NULL; + } } - return NULL; + return mm; } /* @@ -446,6 +462,7 @@ void mmput(struct mm_struct *mm) spin_unlock(&mmlist_lock); exit_aio(mm); exit_mmap(mm); + exit_rmap(mm); mmdrop(mm); } } @@ -550,6 +567,12 @@ static int copy_mm(unsigned long clone_f if (!mm_init(mm)) goto fail_nomem; + if (dup_rmap(mm, oldmm)) { + mm_free_pgd(mm); + free_mm(mm); + goto fail_nomem; + } + if (init_new_context(tsk,mm)) goto fail_nocontext; @@ -945,6 +968,10 @@ struct task_struct *copy_process(unsigne p->start_time = get_jiffies_64(); p->security = NULL; p->io_context = NULL; + p->io_wait = NULL; +#ifdef CONFIG_SCHEDSTATS + memset(&p->sched_info, 0, sizeof(p->sched_info)); +#endif /* CONFIG_SCHEDSTATS */ retval = -ENOMEM; if ((retval = security_task_alloc(p))) @@ -1246,4 +1273,6 @@ void __init proc_caches_init(void) SLAB_HWCACHE_ALIGN, NULL, NULL); if(!mm_cachep) panic("vma_init: Cannot alloc mm_struct SLAB cache"); + + init_rmap(); } diff -purN -X /home/mbligh/.diff.exclude reference/kernel/kexec.c current/kernel/kexec.c --- reference/kernel/kexec.c 1969-12-31 16:00:00.000000000 -0800 +++ current/kernel/kexec.c 2004-04-09 13:23:20.000000000 -0700 @@ -0,0 +1,639 @@ +/* + * kexec.c - kexec system call + * Copyright (C) 2002-2003 Eric Biederman + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* When kexec transitions to the new kernel there is a one to one + * mapping between physical and virtual addresses. On processors + * where you can disable the MMU this is trivial, and easy. For + * others it is still a simple predictable page table to setup. + * + * In that environment kexec copies the new kernel to it's final + * resting place. This means I can only support memory whose + * physical address can fit in an unsigned long. In particular + * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled. + * If the assembly stub has more restrictive requirements + * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be + * defined more restrictively in . + * + * The code for the transition from the current kernel to the + * the new kernel is placed in the reboot_code_buffer, whose size + * is given by KEXEC_REBOOT_CODE_SIZE. In the best case only a single + * page of memory is necessary, but some architectures require more. + * Because this memory must be identity mapped in the transition from + * virtual to physical addresses it must live in the range + * 0 - TASK_SIZE, as only the user space mappings are arbitrarily + * modifyable. + * + * The assembly stub in the reboot code buffer is passed a linked list + * of descriptor pages detailing the source pages of the new kernel, + * and the destination addresses of those source pages. As this data + * structure is not used in the context of the current OS, it must + * be self contained. + * + * The code has been made to work with highmem pages and will use a + * destination page in it's final resting place (if it happens + * to allocate it). The end product of this is that most of the + * physical address space, and most of ram can be used. + * + * Future directions include: + * - allocating a page table with the reboot code buffer identity + * mapped, to simplify machine_kexec and make kexec_on_panic, more + * reliable. + * - allocating the pages for a page table for machines that cannot + * disable their MMUs. (Hammer, Alpha...) + */ + +/* KIMAGE_NO_DEST is an impossible destination address..., for + * allocating pages whose destination address we do not care about. + */ +#define KIMAGE_NO_DEST (-1UL) + +static int kimage_is_destination_range( + struct kimage *image, unsigned long start, unsigned long end); +static struct page *kimage_alloc_reboot_code_pages(struct kimage *image); +static struct page *kimage_alloc_page(struct kimage *image, unsigned int gfp_mask, unsigned long dest); + + +static int kimage_alloc(struct kimage **rimage, + unsigned long nr_segments, struct kexec_segment *segments) +{ + int result; + struct kimage *image; + size_t segment_bytes; + struct page *reboot_pages; + unsigned long i; + + /* Allocate a controlling structure */ + result = -ENOMEM; + image = kmalloc(sizeof(*image), GFP_KERNEL); + if (!image) { + goto out; + } + memset(image, 0, sizeof(*image)); + image->head = 0; + image->entry = &image->head; + image->last_entry = &image->head; + + /* Initialize the list of destination pages */ + INIT_LIST_HEAD(&image->dest_pages); + + /* Initialize the list of unuseable pages */ + INIT_LIST_HEAD(&image->unuseable_pages); + + /* Read in the segments */ + image->nr_segments = nr_segments; + segment_bytes = nr_segments * sizeof*segments; + result = copy_from_user(image->segment, segments, segment_bytes); + if (result) + goto out; + + /* Verify we have good destination addresses. The caller is + * responsible for making certain we don't attempt to load + * the new image into invalid or reserved areas of RAM. This + * just verifies it is an address we can use. + */ + result = -EADDRNOTAVAIL; + for(i = 0; i < nr_segments; i++) { + unsigned long mend; + mend = ((unsigned long)(image->segment[i].mem)) + + image->segment[i].memsz; + if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT) + goto out; + } + + /* Find a location for the reboot code buffer, and add it + * the vector of segments so that it's pages will also be + * counted as destination pages. + */ + result = -ENOMEM; + reboot_pages = kimage_alloc_reboot_code_pages(image); + if (!reboot_pages) { + printk(KERN_ERR "Could not allocate reboot_code_buffer\n"); + goto out; + } + image->reboot_code_pages = reboot_pages; + image->segment[nr_segments].buf = 0; + image->segment[nr_segments].bufsz = 0; + image->segment[nr_segments].mem = (void *)(page_to_pfn(reboot_pages) << PAGE_SHIFT); + image->segment[nr_segments].memsz = KEXEC_REBOOT_CODE_SIZE; + image->nr_segments++; + + result = 0; + out: + if (result == 0) { + *rimage = image; + } else { + kfree(image); + } + return result; +} + +static int kimage_is_destination_range( + struct kimage *image, unsigned long start, unsigned long end) +{ + unsigned long i; + for(i = 0; i < image->nr_segments; i++) { + unsigned long mstart, mend; + mstart = (unsigned long)image->segment[i].mem; + mend = mstart + image->segment[i].memsz; + if ((end > mstart) && (start < mend)) { + return 1; + } + } + return 0; +} + +#ifdef CONFIG_MMU +static int identity_map_pages(struct page *pages, int order) +{ + struct mm_struct *mm; + struct vm_area_struct *vma; + int error; + mm = &init_mm; + vma = 0; + + down_write(&mm->mmap_sem); + error = -ENOMEM; + vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + if (!vma) { + goto out; + } + + memset(vma, 0, sizeof(*vma)); + vma->vm_mm = mm; + vma->vm_start = page_to_pfn(pages) << PAGE_SHIFT; + vma->vm_end = vma->vm_start + (1 << (order + PAGE_SHIFT)); + vma->vm_ops = 0; + vma->vm_flags = VM_SHARED \ + | VM_READ | VM_WRITE | VM_EXEC \ + | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC \ + | VM_DONTCOPY | VM_RESERVED; + vma->vm_page_prot = protection_map[vma->vm_flags & 0xf]; + vma->vm_file = NULL; + vma->vm_private_data = NULL; + INIT_LIST_HEAD(&vma->shared); + insert_vm_struct(mm, vma); + + error = remap_page_range(vma, vma->vm_start, vma->vm_start, + vma->vm_end - vma->vm_start, vma->vm_page_prot); + if (error) { + goto out; + } + + error = 0; + out: + if (error && vma) { + kmem_cache_free(vm_area_cachep, vma); + vma = 0; + } + up_write(&mm->mmap_sem); + + return error; +} +#else +#define identity_map_pages(pages, order) 0 +#endif + +struct page *kimage_alloc_reboot_code_pages(struct kimage *image) +{ + /* The reboot code buffer is special. It is the only set of + * pages that must be allocated in their final resting place, + * and the only set of pages whose final resting place we can + * pick. + * + * At worst this runs in O(N) of the image size. + */ + struct list_head extra_pages, *pos, *next; + struct page *pages; + unsigned long addr; + int order, count; + order = get_order(KEXEC_REBOOT_CODE_SIZE); + count = 1 << order; + INIT_LIST_HEAD(&extra_pages); + do { + int i; + pages = alloc_pages(GFP_KERNEL, order); + if (!pages) + break; + for(i = 0; i < count; i++) { + SetPageReserved(pages +i); + } + addr = page_to_pfn(pages) << PAGE_SHIFT; + if ((page_to_pfn(pages) >= (TASK_SIZE >> PAGE_SHIFT)) || + kimage_is_destination_range(image, addr, addr + KEXEC_REBOOT_CODE_SIZE)) { + list_add(&pages->list, &extra_pages); + pages = 0; + } + } while(!pages); + if (pages) { + int result; + result = identity_map_pages(pages, order); + if (result < 0) { + list_add(&pages->list, &extra_pages); + pages = 0; + } + } + /* If I could convert a multi page allocation into a buch of + * single page allocations I could add these pages to + * image->dest_pages. For now it is simpler to just free the + * pages again. + */ + list_for_each_safe(pos, next, &extra_pages) { + struct page *page; + int i; + page = list_entry(pos, struct page, list); + for(i = 0; i < count; i++) { + ClearPageReserved(pages +i); + } + list_del(&extra_pages); + __free_pages(page, order); + } + return pages; +} + +static int kimage_add_entry(struct kimage *image, kimage_entry_t entry) +{ + if (image->offset != 0) { + image->entry++; + } + if (image->entry == image->last_entry) { + kimage_entry_t *ind_page; + struct page *page; + page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST); + if (!page) { + return -ENOMEM; + } + ind_page = page_address(page); + *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION; + image->entry = ind_page; + image->last_entry = + ind_page + ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1); + } + *image->entry = entry; + image->entry++; + image->offset = 0; + return 0; +} + +static int kimage_set_destination( + struct kimage *image, unsigned long destination) +{ + int result; + destination &= PAGE_MASK; + result = kimage_add_entry(image, destination | IND_DESTINATION); + if (result == 0) { + image->destination = destination; + } + return result; +} + + +static int kimage_add_page(struct kimage *image, unsigned long page) +{ + int result; + page &= PAGE_MASK; + result = kimage_add_entry(image, page | IND_SOURCE); + if (result == 0) { + image->destination += PAGE_SIZE; + } + return result; +} + + +static void kimage_free_extra_pages(struct kimage *image) +{ + /* Walk through and free any extra destination pages I may have */ + struct list_head *pos, *next; + list_for_each_safe(pos, next, &image->dest_pages) { + struct page *page; + page = list_entry(pos, struct page, list); + list_del(&page->list); + ClearPageReserved(page); + __free_page(page); + } + /* Walk through and free any unuseable pages I have cached */ + list_for_each_safe(pos, next, &image->unuseable_pages) { + struct page *page; + page = list_entry(pos, struct page, list); + list_del(&page->list); + ClearPageReserved(page); + __free_page(page); + } + +} +static int kimage_terminate(struct kimage *image) +{ + int result; + result = kimage_add_entry(image, IND_DONE); + if (result == 0) { + /* Point at the terminating element */ + image->entry--; + kimage_free_extra_pages(image); + } + return result; +} + +#define for_each_kimage_entry(image, ptr, entry) \ + for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \ + ptr = (entry & IND_INDIRECTION)? \ + phys_to_virt((entry & PAGE_MASK)): ptr +1) + +static void kimage_free(struct kimage *image) +{ + kimage_entry_t *ptr, entry; + kimage_entry_t ind = 0; + int i, count, order; + if (!image) + return; + kimage_free_extra_pages(image); + for_each_kimage_entry(image, ptr, entry) { + if (entry & IND_INDIRECTION) { + /* Free the previous indirection page */ + if (ind & IND_INDIRECTION) { + free_page((unsigned long)phys_to_virt(ind & PAGE_MASK)); + } + /* Save this indirection page until we are + * done with it. + */ + ind = entry; + } + else if (entry & IND_SOURCE) { + free_page((unsigned long)phys_to_virt(entry & PAGE_MASK)); + } + } + order = get_order(KEXEC_REBOOT_CODE_SIZE); + count = 1 << order; + do_munmap(&init_mm, + page_to_pfn(image->reboot_code_pages) << PAGE_SHIFT, + count << PAGE_SHIFT); + for(i = 0; i < count; i++) { + ClearPageReserved(image->reboot_code_pages + i); + } + __free_pages(image->reboot_code_pages, order); + kfree(image); +} + +static kimage_entry_t *kimage_dst_used(struct kimage *image, unsigned long page) +{ + kimage_entry_t *ptr, entry; + unsigned long destination = 0; + for_each_kimage_entry(image, ptr, entry) { + if (entry & IND_DESTINATION) { + destination = entry & PAGE_MASK; + } + else if (entry & IND_SOURCE) { + if (page == destination) { + return ptr; + } + destination += PAGE_SIZE; + } + } + return 0; +} + +static struct page *kimage_alloc_page(struct kimage *image, unsigned int gfp_mask, unsigned long destination) +{ + /* Here we implment safe guards to ensure that a source page + * is not copied to it's destination page before the data on + * the destination page is no longer useful. + * + * To do this we maintain the invariant that a source page is + * either it's own destination page, or it is not a + * destination page at all. + * + * That is slightly stronger than required, but the proof + * that no problems will not occur is trivial, and the + * implemenation is simply to verify. + * + * When allocating all pages normally this algorithm will run + * in O(N) time, but in the worst case it will run in O(N^2) + * time. If the runtime is a problem the data structures can + * be fixed. + */ + struct page *page; + unsigned long addr; + + /* Walk through the list of destination pages, and see if I + * have a match. + */ + list_for_each_entry(page, &image->dest_pages, list) { + addr = page_to_pfn(page) << PAGE_SHIFT; + if (addr == destination) { + list_del(&page->list); + return page; + } + } + page = 0; + while(1) { + kimage_entry_t *old; + /* Allocate a page, if we run out of memory give up */ + page = alloc_page(gfp_mask); + if (!page) { + return 0; + } + SetPageReserved(page); + /* If the page cannot be used file it away */ + if (page_to_pfn(page) > (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) { + list_add(&page->list, &image->unuseable_pages); + continue; + } + addr = page_to_pfn(page) << PAGE_SHIFT; + + /* If it is the destination page we want use it */ + if (addr == destination) + break; + + /* If the page is not a destination page use it */ + if (!kimage_is_destination_range(image, addr, addr + PAGE_SIZE)) + break; + + /* I know that the page is someones destination page. + * See if there is already a source page for this + * destination page. And if so swap the source pages. + */ + old = kimage_dst_used(image, addr); + if (old) { + /* If so move it */ + unsigned long old_addr; + struct page *old_page; + + old_addr = *old & PAGE_MASK; + old_page = pfn_to_page(old_addr >> PAGE_SHIFT); + copy_highpage(page, old_page); + *old = addr | (*old & ~PAGE_MASK); + + /* The old page I have found cannot be a + * destination page, so return it. + */ + addr = old_addr; + page = old_page; + break; + } + else { + /* Place the page on the destination list I + * will use it later. + */ + list_add(&page->list, &image->dest_pages); + } + } + return page; +} + +static int kimage_load_segment(struct kimage *image, + struct kexec_segment *segment) +{ + unsigned long mstart; + int result; + unsigned long offset; + unsigned long offset_end; + unsigned char *buf; + + result = 0; + buf = segment->buf; + mstart = (unsigned long)segment->mem; + + offset_end = segment->memsz; + + result = kimage_set_destination(image, mstart); + if (result < 0) { + goto out; + } + for(offset = 0; offset < segment->memsz; offset += PAGE_SIZE) { + struct page *page; + char *ptr; + size_t size, leader; + page = kimage_alloc_page(image, GFP_HIGHUSER, mstart + offset); + if (page == 0) { + result = -ENOMEM; + goto out; + } + result = kimage_add_page(image, page_to_pfn(page) << PAGE_SHIFT); + if (result < 0) { + goto out; + } + ptr = kmap(page); + if (segment->bufsz < offset) { + /* We are past the end zero the whole page */ + memset(ptr, 0, PAGE_SIZE); + kunmap(page); + continue; + } + size = PAGE_SIZE; + leader = 0; + if ((offset == 0)) { + leader = mstart & ~PAGE_MASK; + } + if (leader) { + /* We are on the first page zero the unused portion */ + memset(ptr, 0, leader); + size -= leader; + ptr += leader; + } + if (size > (segment->bufsz - offset)) { + size = segment->bufsz - offset; + } + if (size < (PAGE_SIZE - leader)) { + /* zero the trailing part of the page */ + memset(ptr + size, 0, (PAGE_SIZE - leader) - size); + } + result = copy_from_user(ptr, buf + offset, size); + kunmap(page); + if (result) { + result = (result < 0)?result : -EIO; + goto out; + } + } + out: + return result; +} + +/* + * Exec Kernel system call: for obvious reasons only root may call it. + * + * This call breaks up into three pieces. + * - A generic part which loads the new kernel from the current + * address space, and very carefully places the data in the + * allocated pages. + * + * - A generic part that interacts with the kernel and tells all of + * the devices to shut down. Preventing on-going dmas, and placing + * the devices in a consistent state so a later kernel can + * reinitialize them. + * + * - A machine specific part that includes the syscall number + * and the copies the image to it's final destination. And + * jumps into the image at entry. + * + * kexec does not sync, or unmount filesystems so if you need + * that to happen you need to do that yourself. + */ +struct kimage *kexec_image = 0; + +asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, + struct kexec_segment *segments, unsigned long flags) +{ + struct kimage *image; + int result; + + /* We only trust the superuser with rebooting the system. */ + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + /* In case we need just a little bit of special behavior for + * reboot on panic + */ + if (flags != 0) + return -EINVAL; + + if (nr_segments > KEXEC_SEGMENT_MAX) + return -EINVAL; + + image = 0; + result = 0; + + if (nr_segments > 0) { + unsigned long i; + result = kimage_alloc(&image, nr_segments, segments); + if (result) { + goto out; + } + image->start = entry; + for (i = 0; i < nr_segments; i++) { + ///result = kimage_load_segment(image, &segments[i]); + result = kimage_load_segment(image, &image->segment[i]); + if (result) { + goto out; + } + } + result = kimage_terminate(image); + if (result) { + goto out; + } + } + + image = xchg(&kexec_image, image); + + out: + kimage_free(image); + return result; +} diff -purN -X /home/mbligh/.diff.exclude reference/kernel/lockmeter.c current/kernel/lockmeter.c --- reference/kernel/lockmeter.c 1969-12-31 16:00:00.000000000 -0800 +++ current/kernel/lockmeter.c 2004-04-08 15:10:21.000000000 -0700 @@ -0,0 +1,1178 @@ +/* + * Copyright (C) 1999,2000 Silicon Graphics, Inc. + * + * Written by John Hawkes (hawkes@sgi.com) + * Based on klstat.c by Jack Steiner (steiner@sgi.com) + * + * Modified by Ray Bryant (raybry@us.ibm.com) + * Changes Copyright (C) 2000 IBM, Inc. + * Added save of index in spinlock_t to improve efficiency + * of "hold" time reporting for spinlocks + * Added support for hold time statistics for read and write + * locks. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define ASSERT(cond) +#define bzero(loc,size) memset(loc,0,size) + +/*<---------------------------------------------------*/ +/* lockmeter.c */ +/*>---------------------------------------------------*/ + +static lstat_control_t lstat_control __cacheline_aligned = + { LSTAT_OFF, SPIN_LOCK_UNLOCKED, SPIN_LOCK_UNLOCKED, + 19 * 0, NR_CPUS * 0, 0, NR_CPUS * 0 }; + +static ushort lstat_make_dir_entry(void *, void *); + +/* + * lstat_lookup + * + * Given a RA, locate the directory entry for the lock. + */ +static ushort +lstat_lookup(void *lock_ptr, void *caller_ra) +{ + ushort index; + lstat_directory_entry_t *dirp; + + dirp = lstat_control.dir; + + index = lstat_control.hashtab[DIRHASH(caller_ra)]; + while (dirp[index].caller_ra != caller_ra) { + if (index == 0) { + return lstat_make_dir_entry(lock_ptr, caller_ra); + } + index = dirp[index].next_stat_index; + } + + if (dirp[index].lock_ptr != NULL && dirp[index].lock_ptr != lock_ptr) { + dirp[index].lock_ptr = NULL; + } + + return index; +} + +/* + * lstat_make_dir_entry + * Called to add a new lock to the lock directory. + */ +static ushort +lstat_make_dir_entry(void *lock_ptr, void *caller_ra) +{ + lstat_directory_entry_t *dirp; + ushort index, hindex; + unsigned long flags; + + /* lock the table without recursively reentering this metering code */ + local_irq_save(flags); + _raw_spin_lock(&lstat_control.directory_lock); + + hindex = DIRHASH(caller_ra); + index = lstat_control.hashtab[hindex]; + dirp = lstat_control.dir; + while (index && dirp[index].caller_ra != caller_ra) + index = dirp[index].next_stat_index; + + if (index == 0) { + if (lstat_control.next_free_dir_index < LSTAT_MAX_STAT_INDEX) { + index = lstat_control.next_free_dir_index++; + lstat_control.dir[index].caller_ra = caller_ra; + lstat_control.dir[index].lock_ptr = lock_ptr; + lstat_control.dir[index].next_stat_index = + lstat_control.hashtab[hindex]; + lstat_control.hashtab[hindex] = index; + } else { + lstat_control.dir_overflow++; + } + } + _raw_spin_unlock(&lstat_control.directory_lock); + local_irq_restore(flags); + return index; +} + +int +lstat_update(void *lock_ptr, void *caller_ra, int action) +{ + int index; + int cpu; + + ASSERT(action < LSTAT_ACT_MAX_VALUES); + + if (lstat_control.state == LSTAT_OFF) + return 0; + + index = lstat_lookup(lock_ptr, caller_ra); + cpu = THIS_CPU_NUMBER; + (*lstat_control.counts[cpu])[index].count[action]++; + (*lstat_control.counts[cpu])[index].acquire_time = get_cycles(); + + return index; +} + +int +lstat_update_time(void *lock_ptr, void *caller_ra, int action, uint32_t ticks) +{ + ushort index; + int cpu; + + ASSERT(action < LSTAT_ACT_MAX_VALUES); + + if (lstat_control.state == LSTAT_OFF) + return 0; + + index = lstat_lookup(lock_ptr, caller_ra); + cpu = THIS_CPU_NUMBER; + (*lstat_control.counts[cpu])[index].count[action]++; + (*lstat_control.counts[cpu])[index].cum_wait_ticks += (uint64_t) ticks; + if ((*lstat_control.counts[cpu])[index].max_wait_ticks < ticks) + (*lstat_control.counts[cpu])[index].max_wait_ticks = ticks; + + (*lstat_control.counts[cpu])[index].acquire_time = get_cycles(); + + return index; +} + +void +_metered_spin_lock(spinlock_t * lock_ptr) +{ + if (lstat_control.state == LSTAT_OFF) { + _raw_spin_lock(lock_ptr); /* do the real lock */ + PUT_INDEX(lock_ptr, 0); /* clean index in case lockmetering */ + /* gets turned on before unlock */ + } else { + void *this_pc = LSTAT_RA(LSTAT_RA_SPIN); + int index; + + if (_raw_spin_trylock(lock_ptr)) { + index = lstat_update(lock_ptr, this_pc, + LSTAT_ACT_NO_WAIT); + } else { + uint32_t start_cycles = get_cycles(); + _raw_spin_lock(lock_ptr); /* do the real lock */ + index = lstat_update_time(lock_ptr, this_pc, + LSTAT_ACT_SPIN, get_cycles() - start_cycles); + } + /* save the index in the lock itself for use in spin unlock */ + PUT_INDEX(lock_ptr, index); + } +} + +int +_metered_spin_trylock(spinlock_t * lock_ptr) +{ + if (lstat_control.state == LSTAT_OFF) { + return _raw_spin_trylock(lock_ptr); + } else { + int retval; + void *this_pc = LSTAT_RA(LSTAT_RA_SPIN); + + if ((retval = _raw_spin_trylock(lock_ptr))) { + int index = lstat_update(lock_ptr, this_pc, + LSTAT_ACT_NO_WAIT); + /* + * save the index in the lock itself for use in spin + * unlock + */ + PUT_INDEX(lock_ptr, index); + } else { + lstat_update(lock_ptr, this_pc, LSTAT_ACT_REJECT); + } + + return retval; + } +} + +void +_metered_spin_unlock(spinlock_t * lock_ptr) +{ + int index = -1; + + if (lstat_control.state != LSTAT_OFF) { + index = GET_INDEX(lock_ptr); + /* + * If statistics were turned off when we set the lock, + * then the index can be zero. If that is the case, + * then collect no stats on this call. + */ + if (index > 0) { + uint32_t hold_time; + int cpu = THIS_CPU_NUMBER; + hold_time = get_cycles() - + (*lstat_control.counts[cpu])[index].acquire_time; + (*lstat_control.counts[cpu])[index].cum_hold_ticks += + (uint64_t) hold_time; + if ((*lstat_control.counts[cpu])[index].max_hold_ticks < + hold_time) + (*lstat_control.counts[cpu])[index]. + max_hold_ticks = hold_time; + } + } + + /* make sure we don't have a stale index value saved */ + PUT_INDEX(lock_ptr, 0); + _raw_spin_unlock(lock_ptr); /* do the real unlock */ +} + +/* + * allocate the next global read lock structure and store its index + * in the rwlock at "lock_ptr". + */ +uint32_t +alloc_rwlock_struct(rwlock_t * rwlock_ptr) +{ + int index; + unsigned long flags; + int cpu = THIS_CPU_NUMBER; + + /* If we've already overflowed, then do a quick exit */ + if (lstat_control.next_free_read_lock_index > + LSTAT_MAX_READ_LOCK_INDEX) { + lstat_control.rwlock_overflow++; + return 0; + } + + local_irq_save(flags); + _raw_spin_lock(&lstat_control.directory_lock); + + /* It is possible this changed while we were waiting for the directory_lock */ + if (lstat_control.state == LSTAT_OFF) { + index = 0; + goto unlock; + } + + /* It is possible someone else got here first and set the index */ + if ((index = GET_RWINDEX(rwlock_ptr)) == 0) { + /* + * we can't turn on read stats for this lock while there are + * readers (this would mess up the running hold time sum at + * unlock time) + */ + if (RWLOCK_READERS(rwlock_ptr) != 0) { + index = 0; + goto unlock; + } + + /* + * if stats are turned on after being off, we may need to + * return an old index from when the statistics were on last + * time. + */ + for (index = 1; index < lstat_control.next_free_read_lock_index; + index++) + if ((*lstat_control.read_lock_counts[cpu])[index]. + lock_ptr == rwlock_ptr) + goto put_index_and_unlock; + + /* allocate the next global read lock structure */ + if (lstat_control.next_free_read_lock_index >= + LSTAT_MAX_READ_LOCK_INDEX) { + lstat_control.rwlock_overflow++; + index = 0; + goto unlock; + } + index = lstat_control.next_free_read_lock_index++; + + /* + * initialize the global read stats data structure for each + * cpu + */ + for (cpu = 0; cpu < num_online_cpus(); cpu++) { + (*lstat_control.read_lock_counts[cpu])[index].lock_ptr = + rwlock_ptr; + } +put_index_and_unlock: + /* store the index for the read lock structure into the lock */ + PUT_RWINDEX(rwlock_ptr, index); + } + +unlock: + _raw_spin_unlock(&lstat_control.directory_lock); + local_irq_restore(flags); + return index; +} + +void +_metered_read_lock(rwlock_t * rwlock_ptr) +{ + void *this_pc; + uint32_t start_cycles; + int index; + int cpu; + unsigned long flags; + int readers_before, readers_after; + uint64_t cycles64; + + if (lstat_control.state == LSTAT_OFF) { + _raw_read_lock(rwlock_ptr); + /* clean index in case lockmetering turns on before an unlock */ + PUT_RWINDEX(rwlock_ptr, 0); + return; + } + + this_pc = LSTAT_RA(LSTAT_RA_READ); + cpu = THIS_CPU_NUMBER; + index = GET_RWINDEX(rwlock_ptr); + + /* allocate the global stats entry for this lock, if needed */ + if (index == 0) + index = alloc_rwlock_struct(rwlock_ptr); + + readers_before = RWLOCK_READERS(rwlock_ptr); + if (_raw_read_trylock(rwlock_ptr)) { + /* + * We have decremented the lock to count a new reader, + * and have confirmed that no writer has it locked. + */ + /* update statistics if enabled */ + if (index > 0) { + local_irq_save(flags); + lstat_update((void *) rwlock_ptr, this_pc, + LSTAT_ACT_NO_WAIT); + /* preserve value of TSC so cum_hold_ticks and start_busy use same value */ + cycles64 = get_cycles64(); + (*lstat_control.read_lock_counts[cpu])[index]. + cum_hold_ticks -= cycles64; + + /* record time and cpu of start of busy period */ + /* this is not perfect (some race conditions are possible) */ + if (readers_before == 0) { + (*lstat_control.read_lock_counts[cpu])[index]. + start_busy = cycles64; + PUT_RW_CPU(rwlock_ptr, cpu); + } + readers_after = RWLOCK_READERS(rwlock_ptr); + if (readers_after > + (*lstat_control.read_lock_counts[cpu])[index]. + max_readers) + (*lstat_control.read_lock_counts[cpu])[index]. + max_readers = readers_after; + local_irq_restore(flags); + } + + return; + } + /* If we get here, then we could not quickly grab the read lock */ + + start_cycles = get_cycles(); /* start counting the wait time */ + + /* Now spin until read_lock is successful */ + _raw_read_lock(rwlock_ptr); + + lstat_update_time((void *) rwlock_ptr, this_pc, LSTAT_ACT_SPIN, + get_cycles() - start_cycles); + + /* update statistics if they are enabled for this lock */ + if (index > 0) { + local_irq_save(flags); + cycles64 = get_cycles64(); + (*lstat_control.read_lock_counts[cpu])[index].cum_hold_ticks -= + cycles64; + + /* this is not perfect (some race conditions are possible) */ + if (readers_before == 0) { + (*lstat_control.read_lock_counts[cpu])[index]. + start_busy = cycles64; + PUT_RW_CPU(rwlock_ptr, cpu); + } + readers_after = RWLOCK_READERS(rwlock_ptr); + if (readers_after > + (*lstat_control.read_lock_counts[cpu])[index].max_readers) + (*lstat_control.read_lock_counts[cpu])[index]. + max_readers = readers_after; + local_irq_restore(flags); + } +} + +void +_metered_read_unlock(rwlock_t * rwlock_ptr) +{ + int index; + int cpu; + unsigned long flags; + uint64_t busy_length; + uint64_t cycles64; + + if (lstat_control.state == LSTAT_OFF) { + _raw_read_unlock(rwlock_ptr); + return; + } + + index = GET_RWINDEX(rwlock_ptr); + cpu = THIS_CPU_NUMBER; + + if (index > 0) { + local_irq_save(flags); + /* + * preserve value of TSC so cum_hold_ticks and busy_ticks are + * consistent. + */ + cycles64 = get_cycles64(); + (*lstat_control.read_lock_counts[cpu])[index].cum_hold_ticks += + cycles64; + (*lstat_control.read_lock_counts[cpu])[index].read_lock_count++; + + /* + * once again, this is not perfect (some race conditions are + * possible) + */ + if (RWLOCK_READERS(rwlock_ptr) == 1) { + int cpu1 = GET_RW_CPU(rwlock_ptr); + uint64_t last_start_busy = + (*lstat_control.read_lock_counts[cpu1])[index]. + start_busy; + (*lstat_control.read_lock_counts[cpu])[index]. + busy_periods++; + if (cycles64 > last_start_busy) { + busy_length = cycles64 - last_start_busy; + (*lstat_control.read_lock_counts[cpu])[index]. + busy_ticks += busy_length; + if (busy_length > + (*lstat_control. + read_lock_counts[cpu])[index]. + max_busy) + (*lstat_control. + read_lock_counts[cpu])[index]. + max_busy = busy_length; + } + } + local_irq_restore(flags); + } + _raw_read_unlock(rwlock_ptr); +} + +void +_metered_write_lock(rwlock_t * rwlock_ptr) +{ + uint32_t start_cycles; + void *this_pc; + uint32_t spin_ticks = 0; /* in anticipation of a potential wait */ + int index; + int write_index = 0; + int cpu; + enum { + writer_writer_conflict, + writer_reader_conflict + } why_wait = writer_writer_conflict; + + if (lstat_control.state == LSTAT_OFF) { + _raw_write_lock(rwlock_ptr); + /* clean index in case lockmetering turns on before an unlock */ + PUT_RWINDEX(rwlock_ptr, 0); + return; + } + + this_pc = LSTAT_RA(LSTAT_RA_WRITE); + cpu = THIS_CPU_NUMBER; + index = GET_RWINDEX(rwlock_ptr); + + /* allocate the global stats entry for this lock, if needed */ + if (index == 0) { + index = alloc_rwlock_struct(rwlock_ptr); + } + + if (_raw_write_trylock(rwlock_ptr)) { + /* We acquired the lock on the first try */ + write_index = lstat_update((void *) rwlock_ptr, this_pc, + LSTAT_ACT_NO_WAIT); + /* save the write_index for use in unlock if stats enabled */ + if (index > 0) + (*lstat_control.read_lock_counts[cpu])[index]. + write_index = write_index; + return; + } + + /* If we get here, then we could not quickly grab the write lock */ + start_cycles = get_cycles(); /* start counting the wait time */ + + why_wait = RWLOCK_READERS(rwlock_ptr) ? + writer_reader_conflict : writer_writer_conflict; + + /* Now set the lock and wait for conflicts to disappear */ + _raw_write_lock(rwlock_ptr); + + spin_ticks = get_cycles() - start_cycles; + + /* update stats -- if enabled */ + if (index > 0 && spin_ticks) { + if (why_wait == writer_reader_conflict) { + /* waited due to a reader holding the lock */ + write_index = lstat_update_time((void *)rwlock_ptr, + this_pc, LSTAT_ACT_SPIN, spin_ticks); + } else { + /* + * waited due to another writer holding the lock + */ + write_index = lstat_update_time((void *)rwlock_ptr, + this_pc, LSTAT_ACT_WW_SPIN, spin_ticks); + (*lstat_control.counts[cpu])[write_index]. + cum_wait_ww_ticks += spin_ticks; + if (spin_ticks > + (*lstat_control.counts[cpu])[write_index]. + max_wait_ww_ticks) { + (*lstat_control.counts[cpu])[write_index]. + max_wait_ww_ticks = spin_ticks; + } + } + + /* save the directory index for use on write_unlock */ + (*lstat_control.read_lock_counts[cpu])[index]. + write_index = write_index; + } +} + +void +_metered_write_unlock(rwlock_t * rwlock_ptr) +{ + int index; + int cpu; + int write_index; + uint32_t hold_time; + + if (lstat_control.state == LSTAT_OFF) { + _raw_write_unlock(rwlock_ptr); + return; + } + + cpu = THIS_CPU_NUMBER; + index = GET_RWINDEX(rwlock_ptr); + + /* update statistics if stats enabled for this lock */ + if (index > 0) { + write_index = + (*lstat_control.read_lock_counts[cpu])[index].write_index; + + hold_time = get_cycles() - + (*lstat_control.counts[cpu])[write_index].acquire_time; + (*lstat_control.counts[cpu])[write_index].cum_hold_ticks += + (uint64_t) hold_time; + if ((*lstat_control.counts[cpu])[write_index].max_hold_ticks < + hold_time) + (*lstat_control.counts[cpu])[write_index]. + max_hold_ticks = hold_time; + } + _raw_write_unlock(rwlock_ptr); +} + +int +_metered_write_trylock(rwlock_t * rwlock_ptr) +{ + int retval; + void *this_pc = LSTAT_RA(LSTAT_RA_WRITE); + + if ((retval = _raw_write_trylock(rwlock_ptr))) { + lstat_update(rwlock_ptr, this_pc, LSTAT_ACT_NO_WAIT); + } else { + lstat_update(rwlock_ptr, this_pc, LSTAT_ACT_REJECT); + } + + return retval; +} + +static void +init_control_space(void) +{ + /* Set all control space pointers to null and indices to "empty" */ + int cpu; + + /* + * Access CPU_CYCLE_FREQUENCY at the outset, which in some + * architectures may trigger a runtime calculation that uses a + * spinlock. Let's do this before lockmetering is turned on. + */ + if (CPU_CYCLE_FREQUENCY == 0) + BUG(); + + lstat_control.hashtab = NULL; + lstat_control.dir = NULL; + for (cpu = 0; cpu < NR_CPUS; cpu++) { + lstat_control.counts[cpu] = NULL; + lstat_control.read_lock_counts[cpu] = NULL; + } +} + +static int +reset_lstat_data(void) +{ + int cpu, flags; + + flags = 0; + lstat_control.next_free_dir_index = 1; /* 0 is for overflows */ + lstat_control.next_free_read_lock_index = 1; + lstat_control.dir_overflow = 0; + lstat_control.rwlock_overflow = 0; + + lstat_control.started_cycles64 = 0; + lstat_control.ending_cycles64 = 0; + lstat_control.enabled_cycles64 = 0; + lstat_control.first_started_time = 0; + lstat_control.started_time = 0; + lstat_control.ending_time = 0; + lstat_control.intervals = 0; + + /* + * paranoia -- in case someone does a "lockstat reset" before + * "lockstat on" + */ + if (lstat_control.hashtab) { + bzero(lstat_control.hashtab, + LSTAT_HASH_TABLE_SIZE * sizeof (short)); + bzero(lstat_control.dir, LSTAT_MAX_STAT_INDEX * + sizeof (lstat_directory_entry_t)); + + for (cpu = 0; cpu < num_online_cpus(); cpu++) { + bzero(lstat_control.counts[cpu], + sizeof (lstat_cpu_counts_t)); + bzero(lstat_control.read_lock_counts[cpu], + sizeof (lstat_read_lock_cpu_counts_t)); + } + } +#ifdef NOTDEF + _raw_spin_unlock(&lstat_control.directory_lock); + local_irq_restore(flags); +#endif + return 1; +} + +static void +release_control_space(void) +{ + /* + * Called when either (1) allocation of kmem + * or (2) when user writes LSTAT_RELEASE to /pro/lockmeter. + * Assume that all pointers have been initialized to zero, + * i.e., nonzero pointers are valid addresses. + */ + int cpu; + + if (lstat_control.hashtab) { + kfree(lstat_control.hashtab); + lstat_control.hashtab = NULL; + } + + if (lstat_control.dir) { + vfree(lstat_control.dir); + lstat_control.dir = NULL; + } + + for (cpu = 0; cpu < NR_CPUS; cpu++) { + if (lstat_control.counts[cpu]) { + vfree(lstat_control.counts[cpu]); + lstat_control.counts[cpu] = NULL; + } + if (lstat_control.read_lock_counts[cpu]) { + kfree(lstat_control.read_lock_counts[cpu]); + lstat_control.read_lock_counts[cpu] = NULL; + } + } +} + +int +get_lockmeter_info_size(void) +{ + return sizeof (lstat_user_request_t) + + num_online_cpus() * sizeof (lstat_cpu_counts_t) + + num_online_cpus() * sizeof (lstat_read_lock_cpu_counts_t) + + (LSTAT_MAX_STAT_INDEX * sizeof (lstat_directory_entry_t)); +} + +ssize_t +get_lockmeter_info(char *buffer, size_t max_len, loff_t * last_index) +{ + lstat_user_request_t req; + struct timeval tv; + ssize_t next_ret_bcount; + ssize_t actual_ret_bcount = 0; + int cpu; + + *last_index = 0; /* a one-shot read */ + + req.lstat_version = LSTAT_VERSION; + req.state = lstat_control.state; + req.maxcpus = num_online_cpus(); + req.cycleval = CPU_CYCLE_FREQUENCY; +#ifdef notyet + req.kernel_magic_addr = (void *) &_etext; + req.kernel_end_addr = (void *) &_etext; +#endif + req.uts = system_utsname; + req.intervals = lstat_control.intervals; + + req.first_started_time = lstat_control.first_started_time; + req.started_time = lstat_control.started_time; + req.started_cycles64 = lstat_control.started_cycles64; + + req.next_free_dir_index = lstat_control.next_free_dir_index; + req.next_free_read_lock_index = lstat_control.next_free_read_lock_index; + req.dir_overflow = lstat_control.dir_overflow; + req.rwlock_overflow = lstat_control.rwlock_overflow; + + if (lstat_control.state == LSTAT_OFF) { + if (req.intervals == 0) { + /* mesasurement is off and no valid data present */ + next_ret_bcount = sizeof (lstat_user_request_t); + req.enabled_cycles64 = 0; + + if ((actual_ret_bcount + next_ret_bcount) > max_len) + return actual_ret_bcount; + + copy_to_user(buffer, (void *) &req, next_ret_bcount); + actual_ret_bcount += next_ret_bcount; + return actual_ret_bcount; + } else { + /* + * measurement is off but valid data present + * fetch time info from lstat_control + */ + req.ending_time = lstat_control.ending_time; + req.ending_cycles64 = lstat_control.ending_cycles64; + req.enabled_cycles64 = lstat_control.enabled_cycles64; + } + } else { + /* + * this must be a read while data active--use current time, + * etc + */ + do_gettimeofday(&tv); + req.ending_time = tv.tv_sec; + req.ending_cycles64 = get_cycles64(); + req.enabled_cycles64 = req.ending_cycles64 - + req.started_cycles64 + lstat_control.enabled_cycles64; + } + + next_ret_bcount = sizeof (lstat_user_request_t); + if ((actual_ret_bcount + next_ret_bcount) > max_len) + return actual_ret_bcount; + + copy_to_user(buffer, (void *) &req, next_ret_bcount); + actual_ret_bcount += next_ret_bcount; + + if (!lstat_control.counts[0]) /* not initialized? */ + return actual_ret_bcount; + + next_ret_bcount = sizeof (lstat_cpu_counts_t); + for (cpu = 0; cpu < num_online_cpus(); cpu++) { + if ((actual_ret_bcount + next_ret_bcount) > max_len) + return actual_ret_bcount; /* leave early */ + copy_to_user(buffer + actual_ret_bcount, + lstat_control.counts[cpu], next_ret_bcount); + actual_ret_bcount += next_ret_bcount; + } + + next_ret_bcount = LSTAT_MAX_STAT_INDEX * + sizeof (lstat_directory_entry_t); + if (((actual_ret_bcount + next_ret_bcount) > max_len) + || !lstat_control.dir) + return actual_ret_bcount; /* leave early */ + + copy_to_user(buffer + actual_ret_bcount, lstat_control.dir, + next_ret_bcount); + actual_ret_bcount += next_ret_bcount; + + next_ret_bcount = sizeof (lstat_read_lock_cpu_counts_t); + for (cpu = 0; cpu < num_online_cpus(); cpu++) { + if (actual_ret_bcount + next_ret_bcount > max_len) + return actual_ret_bcount; + copy_to_user(buffer + actual_ret_bcount, + lstat_control.read_lock_counts[cpu], + next_ret_bcount); + actual_ret_bcount += next_ret_bcount; + } + + return actual_ret_bcount; +} + +/* + * Writing to the /proc lockmeter node enables or disables metering. + * based upon the first byte of the "written" data. + * The following values are defined: + * LSTAT_ON: 1st call: allocates storage, intializes and turns on measurement + * subsequent calls just turn on measurement + * LSTAT_OFF: turns off measurement + * LSTAT_RESET: resets statistics + * LSTAT_RELEASE: releases statistics storage + * + * This allows one to accumulate statistics over several lockstat runs: + * + * lockstat on + * lockstat off + * ...repeat above as desired... + * lockstat get + * ...now start a new set of measurements... + * lockstat reset + * lockstat on + * ... + * + */ +ssize_t +put_lockmeter_info(const char *buffer, size_t len) +{ + int error = 0; + int dirsize, countsize, read_lock_countsize, hashsize; + int cpu; + char put_char; + int i, read_lock_blocks; + unsigned long flags; + rwlock_t *lock_ptr; + struct timeval tv; + + if (len <= 0) + return -EINVAL; + + _raw_spin_lock(&lstat_control.control_lock); + + get_user(put_char, buffer); + switch (put_char) { + + case LSTAT_OFF: + if (lstat_control.state != LSTAT_OFF) { + /* + * To avoid seeing read lock hold times in an + * inconsisent state, we have to follow this protocol + * to turn off statistics + */ + local_irq_save(flags); + /* + * getting this lock will stop any read lock block + * allocations + */ + _raw_spin_lock(&lstat_control.directory_lock); + /* + * keep any more read lock blocks from being + * allocated + */ + lstat_control.state = LSTAT_OFF; + /* record how may read lock blocks there are */ + read_lock_blocks = + lstat_control.next_free_read_lock_index; + _raw_spin_unlock(&lstat_control.directory_lock); + /* now go through the list of read locks */ + cpu = THIS_CPU_NUMBER; + for (i = 1; i < read_lock_blocks; i++) { + lock_ptr = + (*lstat_control.read_lock_counts[cpu])[i]. + lock_ptr; + /* is this saved lock address still valid? */ + if (GET_RWINDEX(lock_ptr) == i) { + /* + * lock address appears to still be + * valid because we only hold one lock + * at a time, this can't cause a + * deadlock unless this is a lock held + * as part of the current system call + * path. At the moment there + * are no READ mode locks held to get + * here from user space, so we solve + * this by skipping locks held in + * write mode. + */ + if (RWLOCK_IS_WRITE_LOCKED(lock_ptr)) { + PUT_RWINDEX(lock_ptr, 0); + continue; + } + /* + * now we know there are no read + * holders of this lock! stop + * statistics collection for this + * lock + */ + _raw_write_lock(lock_ptr); + PUT_RWINDEX(lock_ptr, 0); + _raw_write_unlock(lock_ptr); + } + /* + * it may still be possible for the hold time + * sum to be negative e.g. if a lock is + * reallocated while "busy" we will have to fix + * this up in the data reduction program. + */ + } + local_irq_restore(flags); + lstat_control.intervals++; + lstat_control.ending_cycles64 = get_cycles64(); + lstat_control.enabled_cycles64 += + lstat_control.ending_cycles64 - + lstat_control.started_cycles64; + do_gettimeofday(&tv); + lstat_control.ending_time = tv.tv_sec; + /* + * don't deallocate the structures -- we may do a + * lockstat on to add to the data that is already + * there. Use LSTAT_RELEASE to release storage + */ + } else { + error = -EBUSY; /* already OFF */ + } + break; + + case LSTAT_ON: + if (lstat_control.state == LSTAT_OFF) { +#ifdef DEBUG_LOCKMETER + printk("put_lockmeter_info(cpu=%d): LSTAT_ON\n", + THIS_CPU_NUMBER); +#endif + lstat_control.next_free_dir_index = 1; /* 0 is for overflows */ + + dirsize = LSTAT_MAX_STAT_INDEX * + sizeof (lstat_directory_entry_t); + hashsize = + (1 + LSTAT_HASH_TABLE_SIZE) * sizeof (ushort); + countsize = sizeof (lstat_cpu_counts_t); + read_lock_countsize = + sizeof (lstat_read_lock_cpu_counts_t); +#ifdef DEBUG_LOCKMETER + printk(" dirsize:%d", dirsize); + printk(" hashsize:%d", hashsize); + printk(" countsize:%d", countsize); + printk(" read_lock_countsize:%d\n", + read_lock_countsize); +#endif +#ifdef DEBUG_LOCKMETER + { + int secs; + unsigned long cycles; + uint64_t cycles64; + + do_gettimeofday(&tv); + secs = tv.tv_sec; + do { + do_gettimeofday(&tv); + } while (secs == tv.tv_sec); + cycles = get_cycles(); + cycles64 = get_cycles64(); + secs = tv.tv_sec; + do { + do_gettimeofday(&tv); + } while (secs == tv.tv_sec); + cycles = get_cycles() - cycles; + cycles64 = get_cycles64() - cycles; + printk("lockmeter: cycleFrequency:%d " + "cycles:%d cycles64:%d\n", + CPU_CYCLE_FREQUENCY, cycles, cycles64); + } +#endif + + /* + * if this is the first call, allocate storage and + * initialize + */ + if (!lstat_control.hashtab) { + + spin_lock_init(&lstat_control.directory_lock); + + /* guarantee all pointers at zero */ + init_control_space(); + + lstat_control.hashtab = + kmalloc(hashsize, GFP_KERNEL); + if (!lstat_control.hashtab) { + error = -ENOSPC; +#ifdef DEBUG_LOCKMETER + printk("!!error kmalloc of hashtab\n"); +#endif + } + lstat_control.dir = vmalloc(dirsize); + if (!lstat_control.dir) { + error = -ENOSPC; +#ifdef DEBUG_LOCKMETER + printk("!!error kmalloc of dir\n"); +#endif + } + + for (cpu = 0; cpu < num_online_cpus(); cpu++) { + lstat_control.counts[cpu] = + vmalloc(countsize); + if (!lstat_control.counts[cpu]) { + error = -ENOSPC; +#ifdef DEBUG_LOCKMETER + printk("!!error vmalloc of " + "counts[%d]\n", cpu); +#endif + } + lstat_control.read_lock_counts[cpu] = + (lstat_read_lock_cpu_counts_t *) + kmalloc(read_lock_countsize, + GFP_KERNEL); + if (!lstat_control. + read_lock_counts[cpu]) { + error = -ENOSPC; +#ifdef DEBUG_LOCKMETER + printk("!!error kmalloc of " + "read_lock_counts[%d]\n", + cpu); +#endif + } + } + } + + if (error) { + /* + * One or more kmalloc failures -- free + * everything + */ + release_control_space(); + } else { + + if (!reset_lstat_data()) { + error = -EINVAL; + break; + }; + + /* + * record starting and ending times and the + * like + */ + if (lstat_control.intervals == 0) { + do_gettimeofday(&tv); + lstat_control.first_started_time = + tv.tv_sec; + } + lstat_control.started_cycles64 = get_cycles64(); + do_gettimeofday(&tv); + lstat_control.started_time = tv.tv_sec; + + lstat_control.state = LSTAT_ON; + } + } else { + error = -EBUSY; /* already ON */ + } + break; + + case LSTAT_RESET: + if (lstat_control.state == LSTAT_OFF) { + if (!reset_lstat_data()) + error = -EINVAL; + } else { + error = -EBUSY; /* still on; can't reset */ + } + break; + + case LSTAT_RELEASE: + if (lstat_control.state == LSTAT_OFF) { + release_control_space(); + lstat_control.intervals = 0; + lstat_control.enabled_cycles64 = 0; + } else { + error = -EBUSY; + } + break; + + default: + error = -EINVAL; + } /* switch */ + + _raw_spin_unlock(&lstat_control.control_lock); + return error ? error : len; +} + +#ifdef USER_MODE_TESTING +/* following used for user mode testing */ +void +lockmeter_init() +{ + int dirsize, hashsize, countsize, read_lock_countsize, cpu; + + printf("lstat_control is at %x size=%d\n", &lstat_control, + sizeof (lstat_control)); + printf("sizeof(spinlock_t)=%d\n", sizeof (spinlock_t)); + lstat_control.state = LSTAT_ON; + + lstat_control.directory_lock = SPIN_LOCK_UNLOCKED; + lstat_control.next_free_dir_index = 1; /* 0 is for overflows */ + lstat_control.next_free_read_lock_index = 1; + + dirsize = LSTAT_MAX_STAT_INDEX * sizeof (lstat_directory_entry_t); + hashsize = (1 + LSTAT_HASH_TABLE_SIZE) * sizeof (ushort); + countsize = sizeof (lstat_cpu_counts_t); + read_lock_countsize = sizeof (lstat_read_lock_cpu_counts_t); + + lstat_control.hashtab = (ushort *) malloc(hashsize); + + if (lstat_control.hashtab == 0) { + printf("malloc failure for at line %d in lockmeter.c\n", + __LINE__); + exit(0); + } + + lstat_control.dir = (lstat_directory_entry_t *) malloc(dirsize); + + if (lstat_control.dir == 0) { + printf("malloc failure for at line %d in lockmeter.c\n", cpu, + __LINE__); + exit(0); + } + + for (cpu = 0; cpu < num_online_cpus(); cpu++) { + int j, k; + j = (int) (lstat_control.counts[cpu] = + (lstat_cpu_counts_t *) malloc(countsize)); + k = (int) (lstat_control.read_lock_counts[cpu] = + (lstat_read_lock_cpu_counts_t *) + malloc(read_lock_countsize)); + if (j * k == 0) { + printf("malloc failure for cpu=%d at line %d in " + "lockmeter.c\n", cpu, __LINE__); + exit(0); + } + } + + memset(lstat_control.hashtab, 0, hashsize); + memset(lstat_control.dir, 0, dirsize); + + for (cpu = 0; cpu < num_online_cpus(); cpu++) { + memset(lstat_control.counts[cpu], 0, countsize); + memset(lstat_control.read_lock_counts[cpu], 0, + read_lock_countsize); + } +} + +asm(" \ +.align 4 \ +.globl __write_lock_failed \ +__write_lock_failed: \ + " LOCK "addl $" RW_LOCK_BIAS_STR ",(%eax) \ +1: cmpl $" RW_LOCK_BIAS_STR ",(%eax) \ + jne 1b \ +\ + " LOCK "subl $" RW_LOCK_BIAS_STR ",(%eax) \ + jnz __write_lock_failed \ + ret \ +\ +\ +.align 4 \ +.globl __read_lock_failed \ +__read_lock_failed: \ + lock ; incl (%eax) \ +1: cmpl $1,(%eax) \ + js 1b \ +\ + lock ; decl (%eax) \ + js __read_lock_failed \ + ret \ +"); +#endif + +EXPORT_SYMBOL(_metered_spin_lock); +EXPORT_SYMBOL(_metered_spin_unlock); +EXPORT_SYMBOL(_metered_spin_trylock); +EXPORT_SYMBOL(_metered_read_lock); +EXPORT_SYMBOL(_metered_read_unlock); +EXPORT_SYMBOL(_metered_write_lock); +EXPORT_SYMBOL(_metered_write_unlock); diff -purN -X /home/mbligh/.diff.exclude reference/kernel/mcount.c current/kernel/mcount.c --- reference/kernel/mcount.c 1969-12-31 16:00:00.000000000 -0800 +++ current/kernel/mcount.c 2004-04-09 11:53:02.000000000 -0700 @@ -0,0 +1,203 @@ +/* + * kernel/mcount.c + * + * Implementation of kernel mcount handler and supporting functions. + * + * Code based on kernprof http://oss.sgi.com/projects/kernprof/ + * Copyright (C) SGI 1999, 2000, 2001 + * Written by Dimitris Michailidis (dimitris@engr.sgi.com) + * Modified by John Hawkes (hawkes@engr.sgi.com) + * Contributions from Niels Christiansen (nchr@us.ibm.com) + * Adapted for stand-alone call graphing by Adam Litke (agl@us.ibm.com) + */ + +#include +#include +#include +#include +#include +#include + +void UNKNOWN_KERNEL(void) {} /* Dummy functions to make profiles more */ +void UNKNOWN_MODULE(void) {} /* descriptive */ + +unsigned int mcount_shift, PC_resolution = DFL_PC_RES; + +char* memory_start = NULL; +unsigned short *cg_from_base = NULL; +struct cg_arc_dest *cg_to_base = NULL; +int cg_arc_overflow = 0; /* set when no new arcs can be added to the call graph */ +int n_buckets = 0; +size_t mem_needed; /* space needed for the call graph and the PC samples */ +extern char _stext, _etext, _sinittext, _einittext; + +void (*mcount_hook)(unsigned long, unsigned long) = NULL; +struct proc_dir_entry *mcount_pde; + +static int mcount_alloc_mem(void) +{ + unsigned long cg_from_size, cg_to_size; + size_t text_size = (unsigned long) &_etext - (unsigned long) &_stext; + struct prof_mem_map *memory_map; + + for (mcount_shift = 0; (1 << mcount_shift) < PC_resolution; mcount_shift++); + n_buckets = text_size >> mcount_shift; + cg_from_size = n_buckets * sizeof(short); + cg_to_size = CG_MAX_ARCS * sizeof(struct cg_arc_dest); + mem_needed = sizeof(struct prof_mem_map) + + ((cg_from_size + cg_to_size) * num_online_cpus()); + if ((memory_start = vmalloc(mem_needed)) == NULL) { + return -ENOMEM; + } + memset(memory_start, 0, mem_needed); + + cg_from_base = (unsigned short *) (memory_start + sizeof(struct prof_mem_map)); + cg_to_base = (struct cg_arc_dest *) (memory_start + sizeof(struct prof_mem_map) + + (cg_from_size * num_online_cpus())); + + memory_map = (struct prof_mem_map*) memory_start; + memory_map->kernel_buckets = n_buckets; + memory_map->nr_cpus = num_online_cpus(); + memory_map->cg_from_size = cg_from_size; + memory_map->cg_to_size = cg_to_size; + memory_map->cg_to_offset = cg_from_size * num_online_cpus(); + memory_map->kernel_start = (unsigned long)&_stext; + memory_map->kernel_end = (unsigned long)&_etext; + return 0; +} + +static void mcount_free_mem(void) +{ + vfree(memory_start); + memory_start = NULL; +} + +void mcount_entry(void) +{ + unsigned long frompc, selfpc; + + if(mcount_hook) { + frompc = (unsigned long)__builtin_return_address(2); + selfpc = (unsigned long)__builtin_return_address(1); + mcount_hook(frompc, selfpc); + } + return; +} + +/* Record an arc traversal in the call graph. Called by mcount(). SMP safe */ +void cg_record_arc(unsigned long frompc, unsigned long selfpc) +{ + static spinlock_t cg_record_lock = SPIN_LOCK_UNLOCKED; + unsigned long flags; + int toindex, fromindex, cpu; + unsigned short *q, *cg_from; + struct cg_arc_dest *p, *cg_to; + + cpu = smp_processor_id(); + + cg_from = &cg_from_base[n_buckets * cpu]; + cg_to = &cg_to_base[CG_MAX_ARCS * cpu]; + + if (pc_out_of_range(frompc)) + fromindex = (FUNCTIONPC(UNKNOWN_KERNEL) - (unsigned long) &_stext) + >> mcount_shift; + else + fromindex = (frompc - (unsigned long) &_stext) >> mcount_shift; + q = &cg_from[fromindex]; + + /* Easy case: the arc is already in the call graph */ + for (toindex = *q; toindex != 0; ) { + p = &cg_to[toindex]; + if (p->address == selfpc) { + atomic_inc(&p->count); + return; + } + toindex = p->link; + } + /* + * No luck. We need to add a new arc. Since cg_to[0] is unused, + * we use cg_to[0].count to keep track of the next available arc. + */ + if (cg_arc_overflow) { + return; + } + toindex = atomic_add_return(1, &cg_to->count); + if (toindex >= CG_MAX_ARCS) { + /* + * We have run out of space for arcs. We'll keep incrementing + * the existing ones but we won't try to add any more. + */ + cg_arc_overflow = 1; + atomic_set(&cg_to->count, CG_MAX_ARCS - 1); + return; + } + /* + * We have a secured slot for a new arc and all we need to do is + * initialize it and add it to a hash bucket. We use compare&swap, if + * possible, to avoid any spinlocks whatsoever. + */ + p = &cg_to[toindex]; + p->address = selfpc; + atomic_set(&p->count, 1); + + spin_lock_irqsave(&cg_record_lock, flags); + p->link = *q; + *q = toindex; + spin_unlock_irqrestore(&cg_record_lock, flags); + return; +} + +int mcount_start(void) +{ + if (!memory_start) { + if(mcount_alloc_mem()) + return -ENOMEM; + mcount_pde->size = mem_needed; + } + mcount_hook = cg_record_arc; + return 0; +} + +int mcount_stop(void) +{ + mcount_hook = NULL; + return 0; +} + +int mcount_cleanup(void) +{ + mcount_stop(); + mcount_pde->size = 0; + mcount_free_mem(); + return 0; +} + +ssize_t mcount_read(struct file * file, char * buf, + size_t count, loff_t *ppos) +{ + count = (count + *ppos >= mcount_pde->size) ? + mcount_pde->size - *ppos : count; + copy_to_user(buf, memory_start + *ppos, count); + *ppos += count; + return count; +} + +ssize_t mcount_write(struct file * file, const char * buf, + size_t count, loff_t *ppos) +{ + int ret; + + switch (buf[0]) { + case '0': + ret = mcount_cleanup(); + break; + case '1': + ret = mcount_stop(); + break; + case '2': + ret = mcount_start(); + default: + ret = -EINVAL; + } + return (ret == 0) ? count : ret; +} diff -purN -X /home/mbligh/.diff.exclude reference/kernel/pid.c current/kernel/pid.c --- reference/kernel/pid.c 2004-03-11 14:35:38.000000000 -0800 +++ current/kernel/pid.c 2004-04-08 15:10:20.000000000 -0700 @@ -268,6 +268,9 @@ void switch_exec_pids(task_t *leader, ta * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or * more. */ +#ifdef CONFIG_KGDB +int kgdb_pid_init_done; /* so we don't call prior to... */ +#endif void __init pidhash_init(void) { int i, j, pidhash_size; @@ -289,6 +292,9 @@ void __init pidhash_init(void) for (j = 0; j < pidhash_size; j++) INIT_LIST_HEAD(&pid_hash[i][j]); } +#ifdef CONFIG_KGDB + kgdb_pid_init_done++; +#endif } void __init pidmap_init(void) diff -purN -X /home/mbligh/.diff.exclude reference/kernel/sched.c current/kernel/sched.c --- reference/kernel/sched.c 2004-04-07 14:54:37.000000000 -0700 +++ current/kernel/sched.c 2004-04-09 21:52:53.000000000 -0700 @@ -24,7 +24,9 @@ #include #include #include +#include #include +#include #include #include #include @@ -39,8 +41,10 @@ #include #include #include +#include +#include -#ifdef CONFIG_NUMA +#ifdef CONFIG_SCHED_NUMA #define cpu_to_node_mask(cpu) node_to_cpumask(cpu_to_node(cpu)) #else #define cpu_to_node_mask(cpu) (cpu_online_map) @@ -72,6 +76,13 @@ #define NS_TO_JIFFIES(TIME) ((TIME) / (1000000000 / HZ)) #define JIFFIES_TO_NS(TIME) ((TIME) * (1000000000 / HZ)) +#ifndef JIFFIES_TO_MSEC +# define JIFFIES_TO_MSEC(x) ((x) * 1000 / HZ) +#endif +#ifndef MSEC_TO_JIFFIES +# define MSEC_TO_JIFFIES(x) ((x) * HZ / 1000) +#endif + /* * These are the 'tuning knobs' of the scheduler: * @@ -79,19 +90,30 @@ * maximum timeslice is 200 msecs. Timeslices get refilled after * they expire. */ -#define MIN_TIMESLICE ( 10 * HZ / 1000) -#define MAX_TIMESLICE (200 * HZ / 1000) +int min_timeslice = (10 * HZ) / 1000; +#define MIN_TIMESLICE (min_timeslice) +int max_timeslice = (200 * HZ) / 1000; +#define MAX_TIMESLICE (max_timeslice) #define ON_RUNQUEUE_WEIGHT 30 -#define CHILD_PENALTY 95 -#define PARENT_PENALTY 100 -#define EXIT_WEIGHT 3 -#define PRIO_BONUS_RATIO 25 +int child_penalty = 95; +#define CHILD_PENALTY (child_penalty) +int parent_penalty = 100; +#define PARENT_PENALTY (parent_penalty) +int exit_weight = 3; +#define EXIT_WEIGHT (exit_weight) +int prio_bonus_ratio = 25; +#define PRIO_BONUS_RATIO (prio_bonus_ratio) #define MAX_BONUS (MAX_USER_PRIO * PRIO_BONUS_RATIO / 100) -#define INTERACTIVE_DELTA 2 +int interactive_delta = 2; +#define INTERACTIVE_DELTA (interactive_delta) #define MAX_SLEEP_AVG (AVG_TIMESLICE * MAX_BONUS) #define STARVATION_LIMIT (MAX_SLEEP_AVG) #define NS_MAX_SLEEP_AVG (JIFFIES_TO_NS(MAX_SLEEP_AVG)) -#define NODE_THRESHOLD 125 +/* wasn't here, does it have an equivalent --dvhart + * - #define NODE_THRESHOLD 125 + * + int node_threshold = 125; + * + #define NODE_THRESHOLD (node_threshold) + */ #define CREDIT_LIMIT 100 /* @@ -173,11 +195,14 @@ ((MAX_TIMESLICE - MIN_TIMESLICE) * \ (MAX_PRIO-1 - (p)->static_prio) / (MAX_USER_PRIO-1))) -static inline unsigned int task_timeslice(task_t *p) +static unsigned int task_timeslice(task_t *p) { return BASE_TIMESLICE(p); } +#define task_hot(p, now, sd) \ + (!TASK_INTERACTIVE(p) && ((now)-(p)->timestamp < (sd)->cache_hot_time)) + /* * These are the runqueue data structures: */ @@ -187,7 +212,7 @@ static inline unsigned int task_timeslic typedef struct runqueue runqueue_t; struct prio_array { - int nr_active; + unsigned int nr_active; unsigned long bitmap[BITMAP_SIZE]; struct list_head queue[MAX_PRIO]; }; @@ -201,25 +226,41 @@ struct prio_array { */ struct runqueue { spinlock_t lock; + + unsigned long nr_running; unsigned long long nr_switches; - unsigned long nr_running, expired_timestamp, nr_uninterruptible, - timestamp_last_tick; + unsigned long expired_timestamp, nr_uninterruptible; + unsigned long long timestamp_last_tick; task_t *curr, *idle; +#ifdef CONFIG_SCHEDSTATS + int cpu; /* to make easy reverse-lookups with per-cpu runqueues */ +#endif struct mm_struct *prev_mm; prio_array_t *active, *expired, arrays[2]; - int best_expired_prio, prev_cpu_load[NR_CPUS]; -#ifdef CONFIG_NUMA - atomic_t *node_nr_running; - int prev_node_load[MAX_NUMNODES]; -#endif + int best_expired_prio; + atomic_t nr_iowait; + +#ifdef CONFIG_SMP + struct sched_domain *sd; + + /* For active balancing */ + int active_balance; + int push_cpu; + task_t *migration_thread; struct list_head migration_queue; +#endif - atomic_t nr_iowait; +#ifdef CONFIG_SCHEDSTATS + struct sched_info info; +#endif }; static DEFINE_PER_CPU(struct runqueue, runqueues); +#define for_each_domain(cpu, domain) \ + for (domain = cpu_rq(cpu)->sd; domain; domain = domain->parent) + #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) #define this_rq() (&__get_cpu_var(runqueues)) #define task_rq(p) cpu_rq(task_cpu(p)) @@ -234,57 +275,12 @@ static DEFINE_PER_CPU(struct runqueue, r # define task_running(rq, p) ((rq)->curr == (p)) #endif -#ifdef CONFIG_NUMA - -/* - * Keep track of running tasks. - */ - -static atomic_t node_nr_running[MAX_NUMNODES] ____cacheline_maxaligned_in_smp = - {[0 ...MAX_NUMNODES-1] = ATOMIC_INIT(0)}; - -static inline void nr_running_init(struct runqueue *rq) -{ - rq->node_nr_running = &node_nr_running[0]; -} - -static inline void nr_running_inc(runqueue_t *rq) -{ - atomic_inc(rq->node_nr_running); - rq->nr_running++; -} - -static inline void nr_running_dec(runqueue_t *rq) -{ - atomic_dec(rq->node_nr_running); - rq->nr_running--; -} - -__init void node_nr_running_init(void) -{ - int i; - - for (i = 0; i < NR_CPUS; i++) { - if (cpu_possible(i)) - cpu_rq(i)->node_nr_running = - &node_nr_running[cpu_to_node(i)]; - } -} - -#else /* !CONFIG_NUMA */ - -# define nr_running_init(rq) do { } while (0) -# define nr_running_inc(rq) do { (rq)->nr_running++; } while (0) -# define nr_running_dec(rq) do { (rq)->nr_running--; } while (0) - -#endif /* CONFIG_NUMA */ - /* * task_rq_lock - lock the runqueue a given task resides on and disable * interrupts. Note the ordering: we can safely lookup the task_rq without * explicitly disabling preemption. */ -static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags) +static runqueue_t *task_rq_lock(task_t *p, unsigned long *flags) { struct runqueue *rq; @@ -304,10 +300,176 @@ static inline void task_rq_unlock(runque spin_unlock_irqrestore(&rq->lock, *flags); } + +#ifdef CONFIG_SCHEDSTATS +struct schedstat { + /* sys_sched_yield stats */ + unsigned long yld_exp_empty; + unsigned long yld_act_empty; + unsigned long yld_both_empty; + unsigned long yld_cnt; + + /* schedule stats */ + unsigned long sched_noswitch; + unsigned long sched_switch; + unsigned long sched_cnt; + + /* load_balance stats */ + unsigned long lb_imbalance; + unsigned long lb_idle; + unsigned long lb_busy; + unsigned long lb_cnt; + unsigned long lb_nobusyg; + unsigned long lb_nobusyq; + + /* pull_task stats */ + unsigned long pt_gained_newidle; + unsigned long pt_lost_newidle; + unsigned long pt_gained_idle; + unsigned long pt_lost_idle; + unsigned long pt_gained_notidle; + unsigned long pt_lost_notidle; + + /* active_load_balance stats */ + unsigned long alb_cnt; + unsigned long alb_gained; + unsigned long alb_lost; + + /* load_balance_newidle stats */ + unsigned long lbni_cnt; + unsigned long lbni_imbalance; + + /* migrate_to_cpu stats */ + unsigned long mtc_cnt; + + /* sched_balance_exec stats */ + unsigned long sbe_cnt; +} ____cacheline_aligned; + +/* + * bump this up when changing the output format or the meaning of an existing + * format, so that tools can adapt (or abort) + */ +#define SCHEDSTAT_VERSION 6 + +struct schedstat schedstats[NR_CPUS]; + +static int show_schedstat(struct seq_file *seq, void *v) +{ + struct schedstat sums; + int i; + struct sched_info info, infosums; + + + memset(&sums, 0, sizeof(sums)); + memset(&infosums, 0, sizeof(infosums)); + seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION); + seq_printf(seq, "timestamp %lu\n", jiffies); + for (i = 0; i < NR_CPUS; i++) { + + if (!cpu_online(i)) continue; + + cpu_sched_info(&info, i); + + sums.yld_exp_empty += schedstats[i].yld_exp_empty; + sums.yld_act_empty += schedstats[i].yld_act_empty; + sums.yld_both_empty += schedstats[i].yld_both_empty; + sums.yld_cnt += schedstats[i].yld_cnt; + sums.sched_noswitch += schedstats[i].sched_noswitch; + sums.sched_switch += schedstats[i].sched_switch; + sums.sched_cnt += schedstats[i].sched_cnt; + sums.lb_idle += schedstats[i].lb_idle; + sums.lb_busy += schedstats[i].lb_busy; + sums.lb_cnt += schedstats[i].lb_cnt; + sums.lb_imbalance += schedstats[i].lb_imbalance; + sums.lb_nobusyg += schedstats[i].lb_nobusyg; + sums.lb_nobusyq += schedstats[i].lb_nobusyq; + sums.pt_gained_newidle += schedstats[i].pt_gained_newidle; + sums.pt_lost_newidle += schedstats[i].pt_lost_newidle; + sums.pt_gained_idle += schedstats[i].pt_gained_idle; + sums.pt_lost_idle += schedstats[i].pt_lost_idle; + sums.pt_gained_notidle += schedstats[i].pt_gained_notidle; + sums.pt_lost_notidle += schedstats[i].pt_lost_notidle; + sums.alb_cnt += schedstats[i].alb_cnt; + sums.alb_gained += schedstats[i].alb_gained; + sums.alb_lost += schedstats[i].alb_lost; + sums.sbe_cnt += schedstats[i].sbe_cnt; + sums.mtc_cnt += schedstats[i].mtc_cnt; + sums.lbni_cnt += schedstats[i].lbni_cnt; + sums.lbni_imbalance += schedstats[i].lbni_imbalance; + infosums.cpu_time += info.cpu_time; + infosums.run_delay += info.run_delay; + infosums.pcnt += info.pcnt; + seq_printf(seq, + "cpu%d %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu " + "%lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu " + "%lu %lu %lu %lu %lu\n", + i, schedstats[i].yld_both_empty, + schedstats[i].yld_act_empty, schedstats[i].yld_exp_empty, + schedstats[i].yld_cnt, schedstats[i].sched_noswitch, + schedstats[i].sched_switch, schedstats[i].sched_cnt, + schedstats[i].lb_idle, schedstats[i].lb_busy, + schedstats[i].lb_cnt, schedstats[i].lb_imbalance, + schedstats[i].lb_nobusyg, schedstats[i].lb_nobusyq, + schedstats[i].pt_gained_newidle, schedstats[i].pt_lost_newidle, + schedstats[i].pt_gained_idle, schedstats[i].pt_lost_idle, + schedstats[i].pt_gained_notidle, schedstats[i].pt_lost_notidle, + schedstats[i].alb_cnt, + schedstats[i].alb_gained, schedstats[i].alb_lost, + schedstats[i].sbe_cnt, schedstats[i].mtc_cnt, + schedstats[i].lbni_cnt, schedstats[i].lbni_imbalance, + info.cpu_time, info.run_delay, info.pcnt); + } + seq_printf(seq, + "totals %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu " + "%lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu " + "%lu %lu %lu %lu %lu\n", + sums.yld_both_empty, sums.yld_act_empty, sums.yld_exp_empty, + sums.yld_cnt, sums.sched_noswitch, sums.sched_switch, + sums.sched_cnt, sums.lb_idle, sums.lb_busy, + sums.lb_cnt, sums.lb_imbalance, sums.lb_nobusyg, sums.lb_nobusyq, + sums.pt_gained_newidle, sums.pt_lost_newidle, + sums.pt_gained_idle, sums.pt_lost_idle, + sums.pt_gained_notidle, sums.pt_lost_notidle, + sums.alb_cnt, sums.alb_gained, sums.alb_lost, + sums.sbe_cnt, sums.mtc_cnt, + sums.lbni_cnt, sums.lbni_imbalance, + infosums.cpu_time, infosums.run_delay, infosums.pcnt); + + return 0; +} + +static int schedstat_open(struct inode *inode, struct file *file) +{ + unsigned size = 4096 * (1 + num_online_cpus() / 32); + char *buf = kmalloc(size, GFP_KERNEL); + struct seq_file *m; + int res; + + if (!buf) + return -ENOMEM; + res = single_open(file, show_schedstat, NULL); + if (!res) { + m = file->private_data; + m->buf = buf; + m->size = size; + } else + kfree(buf); + return res; +} + +struct file_operations proc_schedstat_operations = { + .open = schedstat_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +#endif + /* * rq_lock - lock a given runqueue and disable interrupts. */ -static inline runqueue_t *this_rq_lock(void) +static runqueue_t *this_rq_lock(void) { runqueue_t *rq; @@ -323,10 +485,117 @@ static inline void rq_unlock(runqueue_t spin_unlock_irq(&rq->lock); } +#ifdef CONFIG_SCHEDSTATS +/* + * Called when a process is dequeued from the active array and given + * the cpu. We should note that with the exception of interactive + * tasks, the expired queue will become the active queue after the active + * queue is empty, without explicitly dequeuing and requeuing tasks in the + * expired queue. (Interactive tasks may be requeued directly to the + * active queue, thus delaying tasks in the expired queue from running; + * see scheduler_tick()). + * + * This function is only called from sched_info_arrive(), rather than + * dequeue_task(). Even though a task may be queued and dequeued multiple + * times as it is shuffled about, we're really interested in knowing how + * long it was from the *first* time it was queued to the time that it + * finally hit a cpu. + */ +static inline void sched_info_dequeued(task_t *t) +{ + t->sched_info.last_queued = 0; +} + +/* + * Called when a task finally hits the cpu. We can now calculate how + * long it was waiting to run. We also note when it began so that we + * can keep stats on how long its timeslice is. + */ +static inline void sched_info_arrive(task_t *t) +{ + unsigned long now = jiffies; + unsigned long diff = 0; + struct runqueue *rq = task_rq(t); + + if (t->sched_info.last_queued) + diff = now - t->sched_info.last_queued; + sched_info_dequeued(t); + t->sched_info.run_delay += diff; + t->sched_info.last_arrival = now; + t->sched_info.pcnt++; + + if (!rq) + return; + + rq->info.run_delay += diff; + rq->info.pcnt++; +} + +/* + * Called when a process is queued into either the active or expired + * array. The time is noted and later used to determine how long we + * had to wait for us to reach the cpu. Since the expired queue will + * become the active queue after active queue is empty, without dequeuing + * and requeuing any tasks, we are interested in queuing to either. It + * is unusual but not impossible for tasks to be dequeued and immediately + * requeued in the same or another array: this can happen in sched_yield(), + * set_user_nice(), and even load_balance() as it moves tasks from runqueue + * to runqueue. + * + * This function is only called from enqueue_task(), but also only updates + * the timestamp if it is already not set. It's assumed that + * sched_info_dequeued() will clear that stamp when appropriate. + */ +static inline void sched_info_queued(task_t *t) +{ + if (!t->sched_info.last_queued) + t->sched_info.last_queued = jiffies; +} + +/* + * Called when a process ceases being the active-running process, either + * voluntarily or involuntarily. Now we can calculate how long we ran. + */ +static inline void sched_info_depart(task_t *t) +{ + struct runqueue *rq = task_rq(t); + unsigned long diff = jiffies - t->sched_info.last_arrival; + + t->sched_info.cpu_time += diff; + + if (rq) + rq->info.cpu_time += diff; +} + +/* + * Called when tasks are switched involuntarily due, typically, to expiring + * their time slice. (This may also be called when switching to or from + * the idle task.) We are only called when prev != next. + */ +static inline void sched_info_switch(task_t *prev, task_t *next) +{ + struct runqueue *rq = task_rq(prev); + + /* + * prev now departs the cpu. It's not interesting to record + * stats about how efficient we were at scheduling the idle + * process, however. + */ + if (prev != rq->idle) + sched_info_depart(prev); + + if (next != rq->idle) + sched_info_arrive(next); +} +#else +#define sched_info_queued(t) {} +#define sched_info_switch(t, next) {} +#endif /* CONFIG_SCHEDSTATS */ + /* * Adding/removing a task to/from a priority array: */ -static inline void dequeue_task(struct task_struct *p, prio_array_t *array) +static void dequeue_task(struct task_struct *p, prio_array_t *array) { array->nr_active--; list_del(&p->run_list); @@ -334,14 +603,30 @@ static inline void dequeue_task(struct t __clear_bit(p->prio, array->bitmap); } -static inline void enqueue_task(struct task_struct *p, prio_array_t *array) +static void enqueue_task(struct task_struct *p, prio_array_t *array) { + sched_info_queued(p); list_add_tail(&p->run_list, array->queue + p->prio); __set_bit(p->prio, array->bitmap); array->nr_active++; p->array = array; } +#ifdef CONFIG_SMP +/* + * Used by the migration code - we pull tasks from the head of the + * remote queue so we want these tasks to show up at the head of the + * local queue: + */ +static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array) +{ + list_add(&p->run_list, array->queue + p->prio); + __set_bit(p->prio, array->bitmap); + array->nr_active++; + p->array = array; +} +#endif + /* * effective_prio - return the priority that is based on the static * priority but is modified by bonuses/penalties. @@ -379,7 +664,7 @@ static int effective_prio(task_t *p) static inline void __activate_task(task_t *p, runqueue_t *rq) { enqueue_task(p, rq->active); - nr_running_inc(rq); + rq->nr_running++; } static void recalc_task_prio(task_t *p, unsigned long long now) @@ -462,7 +747,7 @@ static void recalc_task_prio(task_t *p, * Update all the scheduling statistics stuff. (sleep average * calculation, priority modifiers, etc.) */ -static inline void activate_task(task_t *p, runqueue_t *rq) +static void activate_task(task_t *p, runqueue_t *rq) { unsigned long long now = sched_clock(); @@ -498,9 +783,9 @@ static inline void activate_task(task_t /* * deactivate_task - remove a task from the runqueue. */ -static inline void deactivate_task(struct task_struct *p, runqueue_t *rq) +static void deactivate_task(struct task_struct *p, runqueue_t *rq) { - nr_running_dec(rq); + rq->nr_running--; if (p->state == TASK_UNINTERRUPTIBLE) rq->nr_uninterruptible++; dequeue_task(p, p->array); @@ -514,9 +799,9 @@ static inline void deactivate_task(struc * might also involve a cross-CPU call to trigger the scheduler on * the target CPU. */ -static inline void resched_task(task_t *p) -{ #ifdef CONFIG_SMP +static void resched_task(task_t *p) +{ int need_resched, nrpolling; preempt_disable(); @@ -528,10 +813,13 @@ static inline void resched_task(task_t * if (!need_resched && !nrpolling && (task_cpu(p) != smp_processor_id())) smp_send_reschedule(task_cpu(p)); preempt_enable(); +} #else +static inline void resched_task(task_t *p) +{ set_tsk_need_resched(p); -#endif } +#endif /** * task_curr - is this task currently executing on a CPU? @@ -543,40 +831,46 @@ inline int task_curr(task_t *p) } #ifdef CONFIG_SMP +enum request_type { + REQ_MOVE_TASK, + REQ_SET_DOMAIN, +}; + typedef struct { struct list_head list; + enum request_type type; + + /* For REQ_MOVE_TASK */ task_t *task; + int dest_cpu; + + /* For REQ_SET_DOMAIN */ + struct sched_domain *sd; + struct completion done; } migration_req_t; /* - * The task's runqueue lock must be held, and the new mask must be valid. + * The task's runqueue lock must be held. * Returns true if you have to wait for migration thread. */ -static int __set_cpus_allowed(task_t *p, cpumask_t new_mask, - migration_req_t *req) +static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req) { runqueue_t *rq = task_rq(p); - p->cpus_allowed = new_mask; - /* - * Can the task run on the task's current CPU? If not then - * migrate the thread off to a proper CPU. - */ - if (cpu_isset(task_cpu(p), new_mask)) - return 0; - /* * If the task is not on a runqueue (and not running), then * it is sufficient to simply update the task's cpu field. */ if (!p->array && !task_running(rq, p)) { - set_task_cpu(p, any_online_cpu(p->cpus_allowed)); + set_task_cpu(p, dest_cpu); return 0; } init_completion(&req->done); + req->type = REQ_MOVE_TASK; req->task = p; + req->dest_cpu = dest_cpu; list_add(&req->list, &rq->migration_queue); return 1; } @@ -631,6 +925,54 @@ void kick_process(task_t *p) EXPORT_SYMBOL_GPL(kick_process); +/* + * Return a low guess at the load of cpu. + */ +static inline unsigned long cpu_load(int cpu) +{ + return cpu_rq(cpu)->nr_running * SCHED_LOAD_SCALE; +} + +#endif + +/* + * wake_idle() is useful especially on SMT architectures to wake a + * task onto an idle sibling if we would otherwise wake it onto a + * busy sibling. + * + * Returns the CPU we should wake onto. + */ +#if defined(ARCH_HAS_SCHED_WAKE_IDLE) +static int wake_idle(int cpu, task_t *p) +{ + cpumask_t tmp; + runqueue_t *rq = cpu_rq(cpu); + struct sched_domain *sd; + int i; + + if (idle_cpu(cpu)) + return cpu; + + sd = rq->sd; + if (!(sd->flags & SD_WAKE_IDLE)) + return cpu; + + cpus_and(tmp, sd->span, cpu_online_map); + for_each_cpu_mask(i, tmp) { + if (!cpu_isset(i, p->cpus_allowed)) + continue; + + if (idle_cpu(i)) + return i; + } + + return cpu; +} +#else +static inline int wake_idle(int cpu, task_t *p) +{ + return cpu; +} #endif /*** @@ -649,52 +991,138 @@ EXPORT_SYMBOL_GPL(kick_process); */ static int try_to_wake_up(task_t * p, unsigned int state, int sync) { + int cpu, this_cpu, success = 0; unsigned long flags; - int success = 0; long old_state; runqueue_t *rq; +#ifdef CONFIG_SMP + unsigned long load, this_load; + struct sched_domain *sd; + unsigned long long now; + int new_cpu; +#endif -repeat_lock_task: rq = task_rq_lock(p, &flags); old_state = p->state; - if (old_state & state) { - if (!p->array) { - /* - * Fast-migrate the task if it's not running or runnable - * currently. Do not violate hard affinity. - */ - if (unlikely(sync && !task_running(rq, p) && - (task_cpu(p) != smp_processor_id()) && - cpu_isset(smp_processor_id(), - p->cpus_allowed) && - !cpu_is_offline(smp_processor_id()))) { - set_task_cpu(p, smp_processor_id()); - task_rq_unlock(rq, &flags); - goto repeat_lock_task; - } - if (old_state == TASK_UNINTERRUPTIBLE) { - rq->nr_uninterruptible--; - /* - * Tasks on involuntary sleep don't earn - * sleep_avg beyond just interactive state. - */ - p->activated = -1; - } - if (sync && (task_cpu(p) == smp_processor_id())) - __activate_task(p, rq); - else { - activate_task(p, rq); - if (TASK_PREEMPTS_CURR(p, rq)) - resched_task(rq->curr); - } - success = 1; - } - p->state = TASK_RUNNING; + if (!(old_state & state)) + goto out; + + if (p->array) + goto out_running; + + cpu = task_cpu(p); + this_cpu = smp_processor_id(); + +#ifdef CONFIG_SMP + if (unlikely(task_running(rq, p) || cpu_is_offline(this_cpu))) + goto out_activate; + + new_cpu = this_cpu; + + if (cpu == this_cpu) + goto out_set_cpu; + + /* + * Passive balance, if the load on the remote CPU is over + * the limit: + */ + load = cpu_load(cpu) * 100; + /* + * add the new task's effect to its new CPU. If sync wakeup then + * subtract current's load effect: this means that they cancel out + * each other in the sync case, the we have +1 load in the !sync case: + */ + this_load = cpu_load(this_cpu); + if (!sync) + this_load += SCHED_LOAD_SCALE; + this_load *= rq->sd->imbalance_pct; + + if (load > this_load) + goto out_set_cpu; + + /* + * Migrate if the source CPU is not idle or the target + * CPU is idle; if the two CPUs share a domain; and if the task + * is not cache-hot. + * + * (Note that these kinds of migrations violate the equilibrium, + * and might trigger follow-on load-balancing - hence we pick + * cache-cold tasks only.) + */ + if (!cpu_load(cpu) && cpu_load(this_cpu)) + goto out_activate; + + now = sched_clock(); + for_each_domain(this_cpu, sd) { + if (!(sd->flags & SD_WAKE_AFFINE)) + break; + if (task_hot(p, now, sd)) + break; + /* + * The two CPUs share a span of a domain that has affine + * wakeups enabled - the task can be migrated: + */ + if (cpu_isset(cpu, sd->span)) + goto out_set_cpu; + } + /* No luck - fall back to the original CPU: */ + new_cpu = cpu; + +out_set_cpu: + new_cpu = wake_idle(new_cpu, p); + if (new_cpu != cpu && cpu_isset(new_cpu, p->cpus_allowed)) { + set_task_cpu(p, new_cpu); + task_rq_unlock(rq, &flags); + + /* might preempt at this point */ + + rq = task_rq_lock(p, &flags); + old_state = p->state; + if (!(old_state & state)) + goto out; + if (p->array) + goto out_running; + + this_cpu = smp_processor_id(); + cpu = task_cpu(p); + } + +out_activate: +#endif /* CONFIG_SMP */ + if (old_state == TASK_UNINTERRUPTIBLE) { + rq->nr_uninterruptible--; + /* + * Tasks on involuntary sleep don't earn + * sleep_avg beyond just interactive state. + */ + p->activated = -1; } + + /* + * Sync wakeups (i.e. those types of wakeups where the waker + * has indicated that it will leave the CPU in short order) + * don't trigger a preemption, if the woken up task will run on + * this cpu. (in this case the 'I will reschedule' promise of + * the waker guarantees that the freshly woken up task is going + * to be considered on this CPU.) + */ + if (sync && cpu == this_cpu) { + __activate_task(p, rq); + } else { + activate_task(p, rq); + if (TASK_PREEMPTS_CURR(p, rq)) + resched_task(rq->curr); + } + success = 1; + +out_running: + p->state = TASK_RUNNING; +out: task_rq_unlock(rq, &flags); return success; } + int fastcall wake_up_process(task_t * p) { return try_to_wake_up(p, TASK_STOPPED | @@ -749,8 +1177,8 @@ void fastcall sched_fork(task_t *p) p->timestamp = sched_clock(); if (!current->time_slice) { /* - * This case is rare, it happens when the parent has only - * a single jiffy left from its timeslice. Taking the + * This case is rare, it happens when the parent has only + * a single jiffy left from its timeslice. Taking the * runqueue lock is not a problem. */ current->time_slice = 1; @@ -798,7 +1226,7 @@ void fastcall wake_up_forked_process(tas list_add_tail(&p->run_list, ¤t->run_list); p->array = current->array; p->array->nr_active++; - nr_running_inc(rq); + rq->nr_running++; } task_rq_unlock(rq, &flags); } @@ -849,7 +1277,7 @@ void fastcall sched_exit(task_t * p) * with the lock held can cause deadlocks; see schedule() for * details.) */ -static inline void finish_task_switch(task_t *prev) +static void finish_task_switch(task_t *prev) { runqueue_t *rq = this_rq(); struct mm_struct *mm = rq->prev_mm; @@ -866,7 +1294,7 @@ static inline void finish_task_switch(ta * still held, otherwise prev could be scheduled on another cpu, die * there before we look at prev->state, and then the reference would * be dropped twice. - * Manfred Spraul + * Manfred Spraul */ prev_task_flags = prev->flags; finish_arch_switch(rq, prev); @@ -928,12 +1356,17 @@ unsigned long nr_running(void) { unsigned long i, sum = 0; - for (i = 0; i < NR_CPUS; i++) + for_each_cpu(i) sum += cpu_rq(i)->nr_running; return sum; } +unsigned long nr_running_cpu(int cpu) +{ + return cpu_rq(cpu)->nr_running; +} + unsigned long nr_uninterruptible(void) { unsigned long i, sum = 0; @@ -964,13 +1397,20 @@ unsigned long nr_iowait(void) return sum; } +#ifdef CONFIG_SCHEDSTATS +void cpu_sched_info(struct sched_info *info, int cpu) +{ + memcpy(info, &cpu_rq(cpu)->info, sizeof(struct sched_info)); +} +#endif /* CONFIG_SCHEDSTATS */ + /* * double_rq_lock - safely lock two runqueues * * Note this does not disable interrupts like task_rq_lock, * you need to do so manually before calling. */ -static inline void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2) +static void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2) { if (rq1 == rq2) spin_lock(&rq1->lock); @@ -991,14 +1431,21 @@ static inline void double_rq_lock(runque * Note this does not restore interrupts like task_rq_unlock, * you need to do so manually after calling. */ -static inline void double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2) +static void double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2) { spin_unlock(&rq1->lock); if (rq1 != rq2) spin_unlock(&rq2->lock); } -#ifdef CONFIG_NUMA +enum idle_type +{ + IDLE, + NOT_IDLE, + NEWLY_IDLE, +}; + +#ifdef CONFIG_SMP /* * If dest_cpu is allowed for this process, migrate the task to it. * This is accomplished by forcing the cpu_allowed mask to only @@ -1007,32 +1454,32 @@ static inline void double_rq_unlock(runq */ static void sched_migrate_task(task_t *p, int dest_cpu) { - runqueue_t *rq; migration_req_t req; + runqueue_t *rq; unsigned long flags; - cpumask_t old_mask, new_mask = cpumask_of_cpu(dest_cpu); lock_cpu_hotplug(); rq = task_rq_lock(p, &flags); - old_mask = p->cpus_allowed; - if (!cpu_isset(dest_cpu, old_mask) || !cpu_online(dest_cpu)) + if (!cpu_isset(dest_cpu, p->cpus_allowed)) goto out; + schedstat_inc(smp_processor_id(), mtc_cnt); /* force the process onto the specified CPU */ - if (__set_cpus_allowed(p, new_mask, &req)) { + if (migrate_task(p, dest_cpu, &req)) { /* Need to wait for migration thread. */ task_rq_unlock(rq, &flags); wake_up_process(rq->migration_thread); wait_for_completion(&req.done); - /* If we raced with sys_sched_setaffinity, don't - * restore mask. */ - rq = task_rq_lock(p, &flags); - if (likely(cpus_equal(p->cpus_allowed, new_mask))) { - /* Restore old mask: won't need migration - * thread, since current cpu is allowed. */ - BUG_ON(__set_cpus_allowed(p, old_mask, NULL)); - } + /* + * we want a new context here. This eliminates TLB + * flushes on the cpus where the process executed prior to + * the migration. + */ + tlb_migrate_prepare(current->mm); + unlock_cpu_hotplug(); + + return; } out: task_rq_unlock(rq, &flags); @@ -1041,412 +1488,622 @@ out: /* * Find the least loaded CPU. Slightly favor the current CPU by - * setting its runqueue length as the minimum to start. + * setting its load as the minimum to start. */ -static int sched_best_cpu(struct task_struct *p) +static int sched_best_cpu(struct task_struct *p, struct sched_domain *sd) { - int i, minload, load, best_cpu, node = 0; - cpumask_t cpumask; + int i = 0, min_load, this_cpu, best_cpu; + cpumask_t tmp; - best_cpu = task_cpu(p); - if (cpu_rq(best_cpu)->nr_running <= 2) - return best_cpu; + best_cpu = this_cpu = task_cpu(p); - minload = 10000000; - for_each_node_with_cpus(i) { - /* - * Node load is always divided by nr_cpus_node to normalise - * load values in case cpu count differs from node to node. - * We first multiply node_nr_running by 10 to get a little - * better resolution. - */ - load = 10 * atomic_read(&node_nr_running[i]) / nr_cpus_node(i); - if (load < minload) { - minload = load; - node = i; - } - } + /* subtract the currently running task's load effect: */ + min_load = cpu_load(this_cpu) - SCHED_LOAD_SCALE; - minload = 10000000; - cpumask = node_to_cpumask(node); - for (i = 0; i < NR_CPUS; ++i) { - if (!cpu_isset(i, cpumask)) - continue; - if (cpu_rq(i)->nr_running < minload) { + cpus_and(tmp, sd->span, cpu_online_map); + cpu_clear(this_cpu, tmp); + + for_each_cpu_mask(i, tmp) { + unsigned long load = cpu_load(i); + + if (min_load > load) { best_cpu = i; - minload = cpu_rq(i)->nr_running; + min_load = load; } } return best_cpu; } +/* + * sched_balance_exec(): find the highest-level, exec-balance-capable + * domain and try to migrate the current task to the least loaded CPU. + * + * execve() is a valuable balancing opportunity, because at this point + * the task has the smallest effective cache footprint - a completely new + * process image is being created, so almost all of the currently existing + * cache footprint is irrelevant. So we attempt to balance this task as + * broadly as possible, without considering migration costs, which costs + * otherwise affect all other types of task migrations. + */ void sched_balance_exec(void) { - int new_cpu; + struct sched_domain *sd, *best_sd = NULL; + int new_cpu, this_cpu = get_cpu(); - if (numnodes > 1) { - new_cpu = sched_best_cpu(current); - if (new_cpu != smp_processor_id()) - sched_migrate_task(current, new_cpu); - } -} + schedstat_inc(this_cpu, sbe_cnt); + /* Prefer the current CPU if there's only this task running: */ + if (this_rq()->nr_running <= 1) + goto out; -/* - * Find the busiest node. All previous node loads contribute with a - * geometrically deccaying weight to the load measure: - * load_{t} = load_{t-1}/2 + nr_node_running_{t} - * This way sudden load peaks are flattened out a bit. - * Node load is divided by nr_cpus_node() in order to compare nodes - * of different cpu count but also [first] multiplied by 10 to - * provide better resolution. - */ -static int find_busiest_node(int this_node) -{ - int i, node = -1, load, this_load, maxload; - - if (!nr_cpus_node(this_node)) - return node; - this_load = maxload = (this_rq()->prev_node_load[this_node] >> 1) - + (10 * atomic_read(&node_nr_running[this_node]) - / nr_cpus_node(this_node)); - this_rq()->prev_node_load[this_node] = this_load; - for_each_node_with_cpus(i) { - if (i == this_node) - continue; - load = (this_rq()->prev_node_load[i] >> 1) - + (10 * atomic_read(&node_nr_running[i]) - / nr_cpus_node(i)); - this_rq()->prev_node_load[i] = load; - if (load > maxload && (100*load > NODE_THRESHOLD*this_load)) { - maxload = load; - node = i; + for_each_domain(this_cpu, sd) + if (sd->flags & SD_BALANCE_EXEC) + best_sd = sd; + + if (best_sd) { + new_cpu = sched_best_cpu(current, best_sd); + if (new_cpu != this_cpu) { + put_cpu(); + sched_migrate_task(current, new_cpu); + return; } } - return node; +out: + put_cpu(); } -#endif /* CONFIG_NUMA */ - -#ifdef CONFIG_SMP - /* - * double_lock_balance - lock the busiest runqueue - * - * this_rq is locked already. Recalculate nr_running if we have to - * drop the runqueue lock. + * double_lock_balance - lock the busiest runqueue, this_rq is locked already. */ -static inline -unsigned int double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest, - int this_cpu, int idle, - unsigned int nr_running) +static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest) { if (unlikely(!spin_trylock(&busiest->lock))) { if (busiest < this_rq) { spin_unlock(&this_rq->lock); spin_lock(&busiest->lock); spin_lock(&this_rq->lock); - /* Need to recalculate nr_running */ - if (idle || (this_rq->nr_running > - this_rq->prev_cpu_load[this_cpu])) - nr_running = this_rq->nr_running; - else - nr_running = this_rq->prev_cpu_load[this_cpu]; } else spin_lock(&busiest->lock); } - return nr_running; } /* - * find_busiest_queue - find the busiest runqueue among the cpus in cpumask. + * pull_task - move a task from a remote runqueue to the local runqueue. + * Both runqueues must be locked. */ static inline -runqueue_t *find_busiest_queue(runqueue_t *this_rq, int this_cpu, int idle, - int *imbalance, cpumask_t cpumask) +void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, + runqueue_t *this_rq, prio_array_t *this_array, int this_cpu) { - int nr_running, load, max_load, i; - runqueue_t *busiest, *rq_src; + dequeue_task(p, src_array); + src_rq->nr_running--; + set_task_cpu(p, this_cpu); + this_rq->nr_running++; + enqueue_task_head(p, this_array); + p->timestamp = sched_clock() - + (src_rq->timestamp_last_tick - p->timestamp); + /* + * Note that idle threads have a prio of MAX_PRIO, for this test + * to be always true for them. + */ + if (TASK_PREEMPTS_CURR(p, this_rq)) + resched_task(this_rq->curr); +} +/* + * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? + */ +static inline +int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu, + struct sched_domain *sd, enum idle_type idle) +{ /* - * We search all runqueues to find the most busy one. - * We do this lockless to reduce cache-bouncing overhead, - * we re-check the 'best' source CPU later on again, with - * the lock held. - * - * We fend off statistical fluctuations in runqueue lengths by - * saving the runqueue length (as seen by the balancing CPU) during - * the previous load-balancing operation and using the smaller one - * of the current and saved lengths. If a runqueue is long enough - * for a longer amount of time then we recognize it and pull tasks - * from it. - * - * The 'current runqueue length' is a statistical maximum variable, - * for that one we take the longer one - to avoid fluctuations in - * the other direction. So for a load-balance to happen it needs - * stable long runqueue on the target CPU and stable short runqueue - * on the local runqueue. - * - * We make an exception if this CPU is about to become idle - in - * that case we are less picky about moving a task across CPUs and - * take what can be taken. + * We do not migrate tasks that are: + * 1) running (obviously), or + * 2) cannot be migrated to this CPU due to cpus_allowed, or + * 3) are cache-hot on their current CPU. */ - if (idle || (this_rq->nr_running > this_rq->prev_cpu_load[this_cpu])) - nr_running = this_rq->nr_running; - else - nr_running = this_rq->prev_cpu_load[this_cpu]; + if (task_running(rq, p)) + return 0; + if (!cpu_isset(this_cpu, p->cpus_allowed)) + return 0; - busiest = NULL; - max_load = 1; - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_isset(i, cpumask)) - continue; + /* Aggressive migration if we've failed balancing */ + if (idle == NEWLY_IDLE || + sd->nr_balance_failed < sd->cache_nice_tries) { + if (task_hot(p, rq->timestamp_last_tick, sd)) + return 0; + } - rq_src = cpu_rq(i); - if (idle || (rq_src->nr_running < this_rq->prev_cpu_load[i])) - load = rq_src->nr_running; - else - load = this_rq->prev_cpu_load[i]; - this_rq->prev_cpu_load[i] = rq_src->nr_running; + return 1; +} - if ((load > max_load) && (rq_src != this_rq)) { - busiest = rq_src; - max_load = load; - } - } +/* + * move_tasks tries to move up to max_nr_move tasks from busiest to this_rq, + * as part of a balancing operation within "domain". Returns the number of + * tasks moved. + * + * Called with both runqueues locked. + */ +static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest, + unsigned long max_nr_move, struct sched_domain *sd, + enum idle_type idle) +{ + prio_array_t *array, *dst_array; + struct list_head *head, *curr; + int ret, idx, pulled = 0; + task_t *tmp; - if (likely(!busiest)) + if (max_nr_move <= 0 || busiest->nr_running <= 1) goto out; - *imbalance = max_load - nr_running; + /* We first consider active tasks. */ + if (busiest->active->nr_active) { + array = busiest->active; + dst_array = this_rq->active; + } else { + array = busiest->expired; + dst_array = this_rq->expired; + } - /* It needs an at least ~25% imbalance to trigger balancing. */ - if (!idle && ((*imbalance)*4 < max_load)) { - busiest = NULL; +new_array: + /* Start searching at priority 0: */ + idx = 0; +skip_bitmap: + if (!idx) + idx = sched_find_first_bit(array->bitmap); + else + idx = find_next_bit(array->bitmap, MAX_PRIO, idx); + if (idx >= MAX_PRIO) { + if (array == busiest->active && busiest->expired->nr_active) { + array = busiest->expired; + dst_array = this_rq->expired; + goto new_array; + } goto out; } - nr_running = double_lock_balance(this_rq, busiest, this_cpu, - idle, nr_running); + head = array->queue + idx; + curr = head->next; +skip_queue: + tmp = list_entry(curr, task_t, run_list); + + curr = curr->next; + + ret = can_migrate_task(tmp, busiest, this_cpu, sd, idle); + if (ret == -1) { + idx++; + goto skip_bitmap; + } + if (!ret) { + if (curr != head) + goto skip_queue; + idx++; + goto skip_bitmap; + } +#ifdef CONFIG_SCHEDSTATS /* - * Make sure nothing changed since we checked the - * runqueue length. + * Right now, this is the only place pull_task() is called, + * so we can safely collect pull_task() stats here. */ - if (busiest->nr_running <= nr_running) { - spin_unlock(&busiest->lock); - busiest = NULL; + switch (idle) { + case NEWLY_IDLE: + schedstat_inc(this_cpu, pt_gained_newidle); + schedstat_inc(busiest->cpu, pt_lost_newidle); + break; + case IDLE: + schedstat_inc(this_cpu, pt_gained_idle); + schedstat_inc(busiest->cpu, pt_lost_idle); + break; + case NOT_IDLE: + schedstat_inc(this_cpu, pt_gained_notidle); + schedstat_inc(busiest->cpu, pt_lost_notidle); + break; + } +#endif + pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu); + pulled++; + + /* We only want to steal up to the prescribed number of tasks. */ + if (pulled < max_nr_move) { + if (curr != head) + goto skip_queue; + idx++; + goto skip_bitmap; } out: - return busiest; + return pulled; } /* - * pull_task - move a task from a remote runqueue to the local runqueue. - * Both runqueues must be locked. + * find_busiest_group finds and returns the busiest CPU group within the + * domain. It calculates and returns the number of tasks which should be + * moved to restore balance via the imbalance parameter. */ -static inline -void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, - runqueue_t *this_rq, int this_cpu) +static struct sched_group * +find_busiest_group(struct sched_domain *sd, int this_cpu, + unsigned long *nr_move, enum idle_type idle) { - dequeue_task(p, src_array); - nr_running_dec(src_rq); - set_task_cpu(p, this_cpu); - nr_running_inc(this_rq); - enqueue_task(p, this_rq->active); - p->timestamp = sched_clock() - - (src_rq->timestamp_last_tick - p->timestamp); - /* - * Note that idle threads have a prio of MAX_PRIO, for this test - * to be always true for them. + struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; + unsigned long max_load, avg_load, total_load, + this_load, total_pwr, delta; + + max_load = this_load = total_load = total_pwr = 0; + + do { + cpumask_t tmp; + int i; + + /* Tally up the load of all CPUs in the group */ + cpus_and(tmp, group->cpumask, cpu_online_map); + WARN_ON(cpus_empty(tmp)); + + avg_load = 0; + for_each_cpu_mask(i, tmp) + avg_load += cpu_load(i); + + total_load += avg_load; + total_pwr += group->cpu_power; + + /* Adjust by relative CPU power of the group */ + avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power; + + if (cpu_isset(this_cpu, group->cpumask)) { + this_load = avg_load; + this = group; + goto nextgroup; + } + if (avg_load > max_load) { + max_load = avg_load; + busiest = group; + } +nextgroup: + group = group->next; + } while (group != sd->groups); + + if (!busiest || this_load >= max_load) + goto out_balanced; + + avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr; + + if ((this_load >= avg_load) + || (100*max_load <= sd->imbalance_pct*this_load)) + goto out_balanced; + + /* + * We're trying to get all the cpus to the average_load, so we don't + * want to push ourselves above the average load, nor do we wish to + * reduce the max loaded cpu below the average load, as either of these + * actions would just result in more rebalancing later, and ping-pong + * tasks around. Thus we look for the minimum possible imbalance. + * Negative imbalances (*we* are more loaded than anyone else) will + * be counted as no imbalance for these purposes -- we can't fix that + * by pulling tasks to us. Be careful of negative numbers as they'll + * appear as very large values with unsigned longs. */ - if (TASK_PREEMPTS_CURR(p, this_rq)) - set_need_resched(); + delta = max_load - this_load; + + if (delta > SCHED_LOAD_SCALE) { + delta = min(max_load - avg_load, avg_load - this_load); + /* + * How many tasks to actually move to equalise the + * imbalance: first round up (which will move us across + * the average unless we can precisely balance to the + * average) and get rid of the scaling factor: + */ + delta += SCHED_LOAD_SCALE-1; + *nr_move = delta / SCHED_LOAD_SCALE; + + if (*nr_move) + return busiest; + } + +out_balanced: + if (busiest && idle == NEWLY_IDLE && max_load > SCHED_LOAD_SCALE) { + *nr_move = 1; + return busiest; + } + + *nr_move = 0; + return NULL; +} +/* + * find_busiest_queue - find the busiest runqueue among the cpus in group. + */ +static runqueue_t *find_busiest_queue(struct sched_group *group) +{ + cpumask_t tmp; + unsigned long load, max_load = 0; + runqueue_t *busiest = NULL; + int i; + + cpus_and(tmp, group->cpumask, cpu_online_map); + for_each_cpu_mask(i, tmp) { + load = cpu_load(i); + + if (load > max_load) { + max_load = load; + busiest = cpu_rq(i); + } + } + + return busiest; } -/* - * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? - */ -static inline -int can_migrate_task(task_t *tsk, runqueue_t *rq, int this_cpu, int idle) -{ - unsigned long delta = rq->timestamp_last_tick - tsk->timestamp; +/* + * Check this_cpu to ensure it is balanced within domain. Attempt to move + * tasks if there is an imbalance. + * + * Called with this_rq unlocked. + */ +static int load_balance(int this_cpu, runqueue_t *this_rq, + struct sched_domain *sd, enum idle_type idle) +{ + struct sched_group *group; + unsigned long imbalance; + runqueue_t *busiest; + int nr_moved; + + spin_lock(&this_rq->lock); + schedstat_inc(this_cpu, lb_cnt); + + group = find_busiest_group(sd, this_cpu, &imbalance, idle); + if (!group) { + schedstat_inc(this_cpu, lb_nobusyg); + goto out_balanced; + } + + busiest = find_busiest_queue(group); + if (!busiest) { + schedstat_inc(this_cpu, lb_nobusyq); + goto out_balanced; + } + + if (unlikely(busiest == this_rq)) { + WARN_ON(1); + goto out_balanced; + } + schedstat_add(this_cpu, lb_imbalance, imbalance); + + /* Attempt to move tasks */ + double_lock_balance(this_rq, busiest); + + nr_moved = move_tasks(this_rq, this_cpu, busiest, imbalance, sd, idle); + spin_unlock(&this_rq->lock); + spin_unlock(&busiest->lock); + + if (!nr_moved) { + sd->nr_balance_failed++; + if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) { + int wake = 0; + + spin_lock(&busiest->lock); + if (!busiest->active_balance) { + busiest->active_balance = 1; + busiest->push_cpu = this_cpu; + wake = 1; + } + spin_unlock(&busiest->lock); + if (wake) + wake_up_process(busiest->migration_thread); + /* + * We've kicked active balancing, reset the + * failure counter: + */ + sd->nr_balance_failed = 0; + } + } else + sd->nr_balance_failed = 0; + + /* We were unbalanced, so reset the balancing interval */ + sd->balance_interval = sd->min_interval; + + return nr_moved; + +out_balanced: + spin_unlock(&this_rq->lock); - /* - * We do not migrate tasks that are: - * 1) running (obviously), or - * 2) cannot be migrated to this CPU due to cpus_allowed, or - * 3) are cache-hot on their current CPU. - */ - if (task_running(rq, tsk)) - return 0; - if (!cpu_isset(this_cpu, tsk->cpus_allowed)) - return 0; - if (!idle && (delta <= JIFFIES_TO_NS(cache_decay_ticks))) - return 0; - return 1; + /* tune up the balancing interval */ + if (sd->balance_interval < sd->max_interval) + sd->balance_interval *= 2; + + return 0; } /* - * Current runqueue is empty, or rebalance tick: if there is an - * inbalance (current runqueue is too short) then pull from - * busiest runqueue(s). + * Check this_cpu to ensure it is balanced within domain. Attempt to move + * tasks if there is an imbalance. * - * We call this with the current runqueue locked, - * irqs disabled. + * Called from schedule when this_rq is about to become idle (NEWLY_IDLE). + * this_rq is locked. */ -static void load_balance(runqueue_t *this_rq, int idle, cpumask_t cpumask) +static int load_balance_newidle(int this_cpu, runqueue_t *this_rq, + struct sched_domain *sd) { - int imbalance, idx, this_cpu = smp_processor_id(); - runqueue_t *busiest; - prio_array_t *array; - struct list_head *head, *curr; - task_t *tmp; - - if (cpu_is_offline(this_cpu)) + struct sched_group *group; + runqueue_t *busiest = NULL; + unsigned long imbalance; + int nr_moved = 0; + + schedstat_inc(this_cpu, lbni_cnt); + group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE); + if (!group) goto out; - busiest = find_busiest_queue(this_rq, this_cpu, idle, - &imbalance, cpumask); - if (!busiest) + busiest = find_busiest_queue(group); + if (!busiest || busiest == this_rq) goto out; - /* - * We only want to steal a number of tasks equal to 1/2 the imbalance, - * otherwise we'll just shift the imbalance to the new queue: - */ - imbalance /= 2; + /* Attempt to move tasks */ + double_lock_balance(this_rq, busiest); - /* - * We first consider expired tasks. Those will likely not be - * executed in the near future, and they are most likely to - * be cache-cold, thus switching CPUs has the least effect - * on them. - */ - if (busiest->expired->nr_active) - array = busiest->expired; - else - array = busiest->active; + schedstat_inc(this_cpu, lbni_imbalance); + nr_moved = move_tasks(this_rq, this_cpu, busiest, + imbalance, sd, NEWLY_IDLE); -new_array: - /* Start searching at priority 0: */ - idx = 0; -skip_bitmap: - if (!idx) - idx = sched_find_first_bit(array->bitmap); - else - idx = find_next_bit(array->bitmap, MAX_PRIO, idx); - if (idx >= MAX_PRIO) { - if (array == busiest->expired) { - array = busiest->active; - goto new_array; + spin_unlock(&busiest->lock); + +out: + return nr_moved; +} + +/* + * idle_balance is called by schedule() if this_cpu is about to become + * idle. Attempts to pull tasks from other CPUs. + */ +static inline void idle_balance(int this_cpu, runqueue_t *this_rq) +{ + struct sched_domain *sd; + + if (unlikely(cpu_is_offline(this_cpu))) + return; + + for_each_domain(this_cpu, sd) { + if (sd->flags & SD_BALANCE_NEWIDLE) { + if (load_balance_newidle(this_cpu, this_rq, sd)) { + /* We've pulled tasks over so stop searching */ + break; + } } - goto out_unlock; } +} - head = array->queue + idx; - curr = head->prev; -skip_queue: - tmp = list_entry(curr, task_t, run_list); +/* + * active_load_balance is run by migration threads. It pushes a running + * task off the cpu. It can be required to correctly have at least 1 task + * running on each physical CPU where possible, and not have a physical / + * logical imbalance. + * + * Called with busiest locked. + */ +static void active_load_balance(runqueue_t *busiest, int busiest_cpu) +{ + struct sched_group *group, *busy_group; + struct sched_domain *sd; + int i; - curr = curr->prev; + schedstat_inc(busiest_cpu, alb_cnt); + if (busiest->nr_running <= 1) + return; - if (!can_migrate_task(tmp, busiest, this_cpu, idle)) { - if (curr != head) - goto skip_queue; - idx++; - goto skip_bitmap; - } - pull_task(busiest, array, tmp, this_rq, this_cpu); + for_each_domain(busiest_cpu, sd) + if (cpu_isset(busiest->push_cpu, sd->span)) + break; - /* Only migrate one task if we are idle */ - if (!idle && --imbalance) { - if (curr != head) - goto skip_queue; - idx++; - goto skip_bitmap; + if (!sd->parent && !cpu_isset(busiest->push_cpu, sd->span)) { + WARN_ON(1); + return; } -out_unlock: - spin_unlock(&busiest->lock); -out: - ; + + group = sd->groups; + while (!cpu_isset(busiest_cpu, group->cpumask)) + group = group->next; + busy_group = group; + + group = sd->groups; + do { + cpumask_t tmp; + runqueue_t *rq; + int push_cpu = 0; + + if (group == busy_group) + goto next_group; + + cpus_and(tmp, group->cpumask, cpu_online_map); + if (!cpus_weight(tmp)) + goto next_group; + + for_each_cpu_mask(i, tmp) { + if (!idle_cpu(i)) + goto next_group; + push_cpu = i; + } + + rq = cpu_rq(push_cpu); + double_lock_balance(busiest, rq); + move_tasks(rq, push_cpu, busiest, 1, sd, IDLE); + schedstat_inc(busiest_cpu, alb_lost); + schedstat_inc(push_cpu, alb_gained); + spin_unlock(&rq->lock); +next_group: + group = group->next; + } while (group != sd->groups); } /* - * One of the idle_cpu_tick() and busy_cpu_tick() functions will - * get called every timer tick, on every CPU. Our balancing action - * frequency and balancing agressivity depends on whether the CPU is - * idle or not. + * rebalance_tick will get called every timer tick, on every CPU. * - * busy-rebalance every 200 msecs. idle-rebalance every 1 msec. (or on - * systems with HZ=100, every 10 msecs.) + * It checks each scheduling domain to see if it is due to be balanced, + * and initiates a balancing operation if so. * - * On NUMA, do a node-rebalance every 400 msecs. + * Balancing parameters are set up in arch_init_sched_domains. */ -#define IDLE_REBALANCE_TICK (HZ/1000 ?: 1) -#define BUSY_REBALANCE_TICK (HZ/5 ?: 1) -#define IDLE_NODE_REBALANCE_TICK (IDLE_REBALANCE_TICK * 5) -#define BUSY_NODE_REBALANCE_TICK (BUSY_REBALANCE_TICK * 2) -#ifdef CONFIG_NUMA -static void balance_node(runqueue_t *this_rq, int idle, int this_cpu) +/* Don't have all balancing operations going off at once */ +#define CPU_OFFSET(cpu) (HZ * cpu / NR_CPUS) + +static void rebalance_tick(int this_cpu, runqueue_t *this_rq, + enum idle_type idle) { - int node = find_busiest_node(cpu_to_node(this_cpu)); + unsigned long j = jiffies + CPU_OFFSET(this_cpu); + struct sched_domain *sd; - if (node >= 0) { - cpumask_t cpumask = node_to_cpumask(node); - cpu_set(this_cpu, cpumask); - spin_lock(&this_rq->lock); - load_balance(this_rq, idle, cpumask); - spin_unlock(&this_rq->lock); - } -} -#endif + if (unlikely(cpu_is_offline(this_cpu))) + return; -static void rebalance_tick(runqueue_t *this_rq, int idle) -{ -#ifdef CONFIG_NUMA - int this_cpu = smp_processor_id(); -#endif - unsigned long j = jiffies; + for_each_domain(this_cpu, sd) { + unsigned long interval = sd->balance_interval; - /* - * First do inter-node rebalancing, then intra-node rebalancing, - * if both events happen in the same tick. The inter-node - * rebalancing does not necessarily have to create a perfect - * balance within the node, since we load-balance the most loaded - * node with the current CPU. (ie. other CPUs in the local node - * are not balanced.) - */ - if (idle) { -#ifdef CONFIG_NUMA - if (!(j % IDLE_NODE_REBALANCE_TICK)) - balance_node(this_rq, idle, this_cpu); -#endif - if (!(j % IDLE_REBALANCE_TICK)) { - spin_lock(&this_rq->lock); - load_balance(this_rq, idle, cpu_to_node_mask(this_cpu)); - spin_unlock(&this_rq->lock); + if (idle != IDLE) + interval *= sd->busy_factor; + + /* scale ms to jiffies */ + interval = MSEC_TO_JIFFIES(interval); + if (unlikely(!interval)) + interval = 1; + + if (j - sd->last_balance >= interval) { +#ifdef CONFIG_SCHEDSTATS + if (idle == IDLE) { + schedstat_inc(this_cpu, lb_idle); + } else { + schedstat_inc(this_cpu, lb_busy); + } +#endif /* CONFIG_SCHEDSTATS */ + if (load_balance(this_cpu, this_rq, sd, idle)) { + /* We've pulled tasks over so no longer idle */ + idle = NOT_IDLE; + } + sd->last_balance += interval; } - return; - } -#ifdef CONFIG_NUMA - if (!(j % BUSY_NODE_REBALANCE_TICK)) - balance_node(this_rq, idle, this_cpu); -#endif - if (!(j % BUSY_REBALANCE_TICK)) { - spin_lock(&this_rq->lock); - load_balance(this_rq, idle, cpu_to_node_mask(this_cpu)); - spin_unlock(&this_rq->lock); } } #else /* * on UP we do not need to balance between CPUs: */ -static inline void rebalance_tick(runqueue_t *this_rq, int idle) +static inline void rebalance_tick(int cpu, runqueue_t *rq, enum idle_type idle) +{ +} +static inline void idle_balance(int cpu, runqueue_t *rq) { } #endif +static inline int wake_priority_sleeper(runqueue_t *rq) +{ +#ifdef CONFIG_SCHED_SMT + /* + * If an SMT sibling task has been put to sleep for priority + * reasons reschedule the idle task to see if it can now run. + */ + if (rq->nr_running) { + resched_task(rq->idle); + return 1; + } +#endif + return 0; +} + DEFINE_PER_CPU(struct kernel_stat, kstat); EXPORT_PER_CPU_SYMBOL(kstat); @@ -1500,7 +2157,9 @@ void scheduler_tick(int user_ticks, int cpustat->iowait += sys_ticks; else cpustat->idle += sys_ticks; - rebalance_tick(rq, 1); + if (wake_priority_sleeper(rq)) + goto out; + rebalance_tick(cpu, rq, IDLE); return; } if (TASK_NICE(p) > 0) @@ -1584,8 +2243,93 @@ void scheduler_tick(int user_ticks, int out_unlock: spin_unlock(&rq->lock); out: - rebalance_tick(rq, 0); + rebalance_tick(cpu, rq, NOT_IDLE); +} + +#ifdef CONFIG_SCHED_SMT +static inline void wake_sleeping_dependent(int cpu, runqueue_t *rq) +{ + int i; + struct sched_domain *sd = rq->sd; + cpumask_t sibling_map; + + if (!(sd->flags & SD_SHARE_CPUPOWER)) + return; + + cpus_and(sibling_map, sd->span, cpu_online_map); + for_each_cpu_mask(i, sibling_map) { + runqueue_t *smt_rq; + + if (i == cpu) + continue; + + smt_rq = cpu_rq(i); + + /* + * If an SMT sibling task is sleeping due to priority + * reasons wake it up now. + */ + if (smt_rq->curr == smt_rq->idle && smt_rq->nr_running) + resched_task(smt_rq->idle); + } +} + +static inline int dependent_sleeper(int cpu, runqueue_t *rq, task_t *p) +{ + struct sched_domain *sd = rq->sd; + cpumask_t sibling_map; + int ret = 0, i; + + if (!(sd->flags & SD_SHARE_CPUPOWER)) + return 0; + + cpus_and(sibling_map, sd->span, cpu_online_map); + for_each_cpu_mask(i, sibling_map) { + runqueue_t *smt_rq; + task_t *smt_curr; + + if (i == cpu) + continue; + + smt_rq = cpu_rq(i); + smt_curr = smt_rq->curr; + + /* + * If a user task with lower static priority than the + * running task on the SMT sibling is trying to schedule, + * delay it till there is proportionately less timeslice + * left of the sibling task to prevent a lower priority + * task from using an unfair proportion of the + * physical cpu's resources. -ck + */ + if (((smt_curr->time_slice * (100 - sd->per_cpu_gain) / 100) > + task_timeslice(p) || rt_task(smt_curr)) && + p->mm && smt_curr->mm && !rt_task(p)) + ret = 1; + + /* + * Reschedule a lower priority task on the SMT sibling, + * or wake it up if it has been put to sleep for priority + * reasons. + */ + if ((((p->time_slice * (100 - sd->per_cpu_gain) / 100) > + task_timeslice(smt_curr) || rt_task(p)) && + smt_curr->mm && p->mm && !rt_task(smt_curr)) || + (smt_curr == smt_rq->idle && smt_rq->nr_running)) + resched_task(smt_curr); + } + return ret; +} +#else +static inline void wake_sleeping_dependent(int cpu, runqueue_t *rq) +{ +} + +static inline int dependent_sleeper(int cpu, runqueue_t *rq, task_t *p) +{ + return 0; } +#endif void scheduling_functions_start_here(void) { } @@ -1601,13 +2345,14 @@ asmlinkage void schedule(void) struct list_head *queue; unsigned long long now; unsigned long run_time; - int idx; + int cpu, idx; /* * Test if we are atomic. Since do_exit() needs to call into * schedule() atomically, we ignore that path for now. * Otherwise, whine if we are scheduling when we should not be. */ + schedstat_inc(smp_processor_id(), sched_cnt); if (likely(!(current->state & (TASK_DEAD | TASK_ZOMBIE)))) { if (unlikely(in_atomic())) { printk(KERN_ERR "bad: scheduling while atomic!\n"); @@ -1651,13 +2396,13 @@ need_resched: deactivate_task(prev, rq); } + cpu = smp_processor_id(); if (unlikely(!rq->nr_running)) { -#ifdef CONFIG_SMP - load_balance(rq, 1, cpu_to_node_mask(smp_processor_id())); -#endif + idle_balance(cpu, rq); if (!rq->nr_running) { next = rq->idle; rq->expired_timestamp = 0; + wake_sleeping_dependent(cpu, rq); goto switch_tasks; } } @@ -1667,17 +2412,24 @@ need_resched: /* * Switch the active and expired arrays. */ + schedstat_inc(cpu, sched_switch); rq->active = rq->expired; rq->expired = array; array = rq->active; rq->expired_timestamp = 0; rq->best_expired_prio = MAX_PRIO; } + schedstat_inc(cpu, sched_noswitch); idx = sched_find_first_bit(array->bitmap); queue = array->queue + idx; next = list_entry(queue->next, task_t, run_list); + if (dependent_sleeper(cpu, rq, next)) { + next = rq->idle; + goto switch_tasks; + } + if (!rt_task(next) && next->activated > 0) { unsigned long long delta = now - next->timestamp; @@ -1703,6 +2455,7 @@ switch_tasks: } prev->timestamp = now; + sched_info_switch(prev, next); if (likely(prev != next)) { next->timestamp = now; rq->nr_switches++; @@ -2014,6 +2767,13 @@ out_unlock: EXPORT_SYMBOL(set_user_nice); +#if defined( CONFIG_KGDB) +struct task_struct * kgdb_get_idle(int this_cpu) +{ + return cpu_rq(this_cpu)->idle; +} +#endif + #ifndef __alpha__ /* @@ -2198,7 +2958,7 @@ static int setscheduler(pid_t pid, int p if (task_running(rq, p)) { if (p->prio > oldprio) resched_task(rq->curr); - } else if (p->prio < rq->curr->prio) + } else if (TASK_PREEMPTS_CURR(p, rq)) resched_task(rq->curr); } @@ -2397,7 +3157,12 @@ asmlinkage long sys_sched_yield(void) { runqueue_t *rq = this_rq_lock(); prio_array_t *array = current->array; + prio_array_t *target = rq->expired; +#ifdef CONFIG_SCHEDSTATS + int this_cpu = smp_processor_id(); +#endif /* CONFIG_SCHEDSTATS */ + schedstat_inc(this_cpu, yld_cnt); /* * We implement yielding by moving the task into the expired * queue. @@ -2405,13 +3170,12 @@ asmlinkage long sys_sched_yield(void) * (special rule: RT tasks will just roundrobin in the active * array.) */ - if (likely(!rt_task(current))) { - dequeue_task(current, array); - enqueue_task(current, rq->expired); - } else { - list_del(¤t->run_list); - list_add_tail(¤t->run_list, array->queue + current->prio); - } + if (unlikely(rt_task(current))) + target = rq->active; + + dequeue_task(current, array); + enqueue_task(current, target); + /* * Since we are going to call schedule() anyway, there's * no need to preempt: @@ -2722,7 +3486,12 @@ int set_cpus_allowed(task_t *p, cpumask_ goto out; } - if (__set_cpus_allowed(p, new_mask, &req)) { + p->cpus_allowed = new_mask; + /* Can the task run on the task's current CPU? If so, we're done */ + if (cpu_isset(task_cpu(p), new_mask)) + goto out; + + if (migrate_task(p, any_online_cpu(new_mask), &req)) { /* Need help from migration thread: drop lock and wait. */ task_rq_unlock(rq, &flags); wake_up_process(rq->migration_thread); @@ -2736,22 +3505,34 @@ out: EXPORT_SYMBOL_GPL(set_cpus_allowed); -/* Move (not current) task off this cpu, onto dest cpu. */ -static void move_task_away(struct task_struct *p, int dest_cpu) +/* + * Move (not current) task off this cpu, onto dest cpu. We're doing + * this because either it can't run here any more (set_cpus_allowed() + * away from this CPU, or CPU going down), or because we're + * attempting to rebalance this task on exec (sched_balance_exec). + * + * So we race with normal scheduler movements, but that's OK, as long + * as the task is no longer on this CPU. + */ +static void __migrate_task(struct task_struct *p, int dest_cpu) { runqueue_t *rq_dest; rq_dest = cpu_rq(dest_cpu); double_rq_lock(this_rq(), rq_dest); + /* Already moved. */ if (task_cpu(p) != smp_processor_id()) - goto out; /* Already moved */ + goto out; + /* Affinity changed (again). */ + if (!cpu_isset(dest_cpu, p->cpus_allowed)) + goto out; set_task_cpu(p, dest_cpu); if (p->array) { deactivate_task(p, this_rq()); activate_task(p, rq_dest); - if (p->prio < rq_dest->curr->prio) + if (TASK_PREEMPTS_CURR(p, rq_dest)) resched_task(rq_dest->curr); } p->timestamp = rq_dest->timestamp_last_tick; @@ -2781,7 +3562,13 @@ static int migration_thread(void * data) refrigerator(PF_IOTHREAD); spin_lock_irq(&rq->lock); + if (rq->active_balance) { + active_load_balance(rq, cpu); + rq->active_balance = 0; + } + head = &rq->migration_queue; + current->state = TASK_INTERRUPTIBLE; if (list_empty(head)) { spin_unlock_irq(&rq->lock); @@ -2790,11 +3577,19 @@ static int migration_thread(void * data) } req = list_entry(head->next, migration_req_t, list); list_del_init(head->next); + spin_unlock(&rq->lock); - move_task_away(req->task, - any_online_cpu(req->task->cpus_allowed)); + if (req->type == REQ_MOVE_TASK) { + __migrate_task(req->task, req->dest_cpu); + } else if (req->type == REQ_SET_DOMAIN) { + rq->sd = req->sd; + } else { + WARN_ON(1); + } + local_irq_enable(); + complete(&req->done); } return 0; @@ -2850,7 +3645,7 @@ void migrate_all_tasks(void) tsk->pid, tsk->comm, src_cpu); } - move_task_away(tsk, dest_cpu); + __migrate_task(tsk, dest_cpu); } while_each_thread(t, tsk); write_unlock(&tasklist_lock); @@ -2929,23 +3724,288 @@ int __init migration_init(void) spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; EXPORT_SYMBOL(kernel_flag); +#ifdef CONFIG_SMP +/* Attach the domain 'sd' to 'cpu' as its base domain */ +void cpu_attach_domain(struct sched_domain *sd, int cpu) +{ + migration_req_t req; + unsigned long flags; + runqueue_t *rq = cpu_rq(cpu); + int local = 1; + + spin_lock_irqsave(&rq->lock, flags); + + if (cpu == smp_processor_id() || cpu_is_offline(cpu)) { + rq->sd = sd; + } else { + init_completion(&req.done); + req.type = REQ_SET_DOMAIN; + req.sd = sd; + list_add(&req.list, &rq->migration_queue); + local = 0; + } + + spin_unlock_irqrestore(&rq->lock, flags); + + if (!local) { + wake_up_process(rq->migration_thread); + wait_for_completion(&req.done); + } +} + +#ifdef ARCH_HAS_SCHED_DOMAIN +extern void __init arch_init_sched_domains(void); +#else +static struct sched_group sched_group_cpus[NR_CPUS]; +static DEFINE_PER_CPU(struct sched_domain, cpu_domains); +#ifdef CONFIG_SCHED_NUMA +static struct sched_group sched_group_nodes[MAX_NUMNODES]; +static DEFINE_PER_CPU(struct sched_domain, node_domains); +static void __init arch_init_sched_domains(void) +{ + int i; + struct sched_group *first_node = NULL, *last_node = NULL; + + /* Set up domains */ + for_each_cpu(i) { + int node = cpu_to_node(i); + cpumask_t nodemask = node_to_cpumask(node); + struct sched_domain *node_sd = &per_cpu(node_domains, i); + struct sched_domain *cpu_sd = &per_cpu(cpu_domains, i); + + *node_sd = SD_NODE_INIT; + node_sd->span = cpu_possible_map; + node_sd->groups = &sched_group_nodes[cpu_to_node(i)]; + + *cpu_sd = SD_CPU_INIT; + cpus_and(cpu_sd->span, nodemask, cpu_possible_map); + cpu_sd->groups = &sched_group_cpus[i]; + cpu_sd->parent = node_sd; + } + + /* Set up groups */ + for (i = 0; i < MAX_NUMNODES; i++) { + struct sched_group *first_cpu = NULL, *last_cpu = NULL; + struct sched_group *node = &sched_group_nodes[i]; + cpumask_t tmp = node_to_cpumask(i); + cpumask_t nodemask; + int j; + + cpus_and(nodemask, tmp, cpu_possible_map); + + if (cpus_empty(nodemask)) + continue; + + node->cpumask = nodemask; + node->cpu_power = SCHED_LOAD_SCALE * cpus_weight(node->cpumask); + + for_each_cpu_mask(j, node->cpumask) { + struct sched_group *cpu = &sched_group_cpus[j]; + + cpus_clear(cpu->cpumask); + cpu_set(j, cpu->cpumask); + cpu->cpu_power = SCHED_LOAD_SCALE; + + if (!first_cpu) + first_cpu = cpu; + if (last_cpu) + last_cpu->next = cpu; + last_cpu = cpu; + } + last_cpu->next = first_cpu; + + if (!first_node) + first_node = node; + if (last_node) + last_node->next = node; + last_node = node; + } + last_node->next = first_node; + + mb(); + for_each_cpu(i) { + struct sched_domain *cpu_sd = &per_cpu(cpu_domains, i); + cpu_attach_domain(cpu_sd, i); + } +} + +#else /* !CONFIG_SCHED_NUMA */ +static void __init arch_init_sched_domains(void) +{ + int i; + struct sched_group *first_cpu = NULL, *last_cpu = NULL; + + /* Set up domains */ + for_each_cpu(i) { + struct sched_domain *cpu_sd = &per_cpu(cpu_domains, i); + + *cpu_sd = SD_CPU_INIT; + cpu_sd->span = cpu_possible_map; + cpu_sd->groups = &sched_group_cpus[i]; + } + + /* Set up CPU groups */ + for_each_cpu_mask(i, cpu_possible_map) { + struct sched_group *cpu = &sched_group_cpus[i]; + + cpus_clear(cpu->cpumask); + cpu_set(i, cpu->cpumask); + cpu->cpu_power = SCHED_LOAD_SCALE; + + if (!first_cpu) + first_cpu = cpu; + if (last_cpu) + last_cpu->next = cpu; + last_cpu = cpu; + } + last_cpu->next = first_cpu; + + mb(); /* domains were modified outside the lock */ + for_each_cpu(i) { + struct sched_domain *cpu_sd = &per_cpu(cpu_domains, i); + cpu_attach_domain(cpu_sd, i); + } +} + +#endif /* CONFIG_SCHED_NUMA */ +#endif /* ARCH_HAS_SCHED_DOMAIN */ + +#define SCHED_DOMAIN_DEBUG +#ifdef SCHED_DOMAIN_DEBUG +void sched_domain_debug(void) +{ + int i; + + for_each_cpu(i) { + runqueue_t *rq = cpu_rq(i); + struct sched_domain *sd; + int level = 0; + + sd = rq->sd; + + printk(KERN_DEBUG "CPU%d: %s\n", + i, (cpu_online(i) ? " online" : "offline")); + + do { + int j; + char str[NR_CPUS]; + struct sched_group *group = sd->groups; + cpumask_t groupmask, tmp; + + cpumask_scnprintf(str, NR_CPUS, sd->span); + cpus_clear(groupmask); + + printk(KERN_DEBUG); + for (j = 0; j < level + 1; j++) + printk(" "); + printk("domain %d: span %s\n", level, str); + + if (!cpu_isset(i, sd->span)) + printk(KERN_DEBUG "ERROR domain->span does not contain CPU%d\n", i); + if (!cpu_isset(i, group->cpumask)) + printk(KERN_DEBUG "ERROR domain->groups does not contain CPU%d\n", i); + if (!group->cpu_power) + printk(KERN_DEBUG "ERROR domain->cpu_power not set\n"); + + printk(KERN_DEBUG); + for (j = 0; j < level + 2; j++) + printk(" "); + printk("groups:"); + do { + if (!group) { + printk(" ERROR: NULL"); + break; + } + + if (!cpus_weight(group->cpumask)) + printk(" ERROR empty group:"); + + cpus_and(tmp, groupmask, group->cpumask); + if (cpus_weight(tmp) > 0) + printk(" ERROR repeated CPUs:"); + + cpus_or(groupmask, groupmask, group->cpumask); + + cpumask_scnprintf(str, NR_CPUS, group->cpumask); + printk(" %s", str); + + group = group->next; + } while (group != sd->groups); + printk("\n"); + + if (!cpus_equal(sd->span, groupmask)) + printk(KERN_DEBUG "ERROR groups don't span domain->span\n"); + + level++; + sd = sd->parent; + + if (sd) { + cpus_and(tmp, groupmask, sd->span); + if (!cpus_equal(tmp, groupmask)) + printk(KERN_DEBUG "ERROR parent span is not a superset of domain->span\n"); + } + + } while (sd); + } +} +#else +#define sched_domain_debug() {} +#endif + +void __init sched_init_smp(void) +{ + arch_init_sched_domains(); + sched_domain_debug(); +} +#else +void __init sched_init_smp(void) +{ +} +#endif /* CONFIG_SMP */ + void __init sched_init(void) { runqueue_t *rq; int i, j, k; +#ifdef CONFIG_SMP + /* Set up an initial dummy domain for early boot */ + static struct sched_domain sched_domain_init; + static struct sched_group sched_group_init; + cpumask_t cpu_mask_all = CPU_MASK_ALL; + + memset(&sched_domain_init, 0, sizeof(struct sched_domain)); + sched_domain_init.span = cpu_mask_all; + sched_domain_init.groups = &sched_group_init; + sched_domain_init.last_balance = jiffies; + sched_domain_init.balance_interval = INT_MAX; /* Don't balance */ + + memset(&sched_group_init, 0, sizeof(struct sched_group)); + sched_group_init.cpumask = cpu_mask_all; + sched_group_init.next = &sched_group_init; + sched_group_init.cpu_power = SCHED_LOAD_SCALE; +#endif + for (i = 0; i < NR_CPUS; i++) { prio_array_t *array; rq = cpu_rq(i); + spin_lock_init(&rq->lock); rq->active = rq->arrays; rq->expired = rq->arrays + 1; +#ifdef CONFIG_SCHEDSTATS + rq->cpu = i; +#endif /* CONFIG_SCHEDSTATS */ rq->best_expired_prio = MAX_PRIO; - spin_lock_init(&rq->lock); +#ifdef CONFIG_SMP + rq->sd = &sched_domain_init; + rq->active_balance = 0; + rq->push_cpu = 0; + rq->migration_thread = NULL; INIT_LIST_HEAD(&rq->migration_queue); +#endif atomic_set(&rq->nr_iowait, 0); - nr_running_init(rq); for (j = 0; j < 2; j++) { array = rq->arrays + j; @@ -2967,8 +4027,6 @@ void __init sched_init(void) set_task_cpu(current, smp_processor_id()); wake_up_forked_process(current); - init_timers(); - /* * The boot idle thread does lazy MMU switching as well: */ diff -purN -X /home/mbligh/.diff.exclude reference/kernel/softirq.c current/kernel/softirq.c --- reference/kernel/softirq.c 2004-04-07 14:54:37.000000000 -0700 +++ current/kernel/softirq.c 2004-04-08 15:10:24.000000000 -0700 @@ -16,6 +16,7 @@ #include #include +#include /* - No shared variables, all the data are CPU local. - If a softirq needs serialization, let it serialize itself @@ -69,53 +70,66 @@ static inline void wakeup_softirqd(void) */ #define MAX_SOFTIRQ_RESTART 10 -asmlinkage void do_softirq(void) +asmlinkage void __do_softirq(void) { - int max_restart = MAX_SOFTIRQ_RESTART; + struct softirq_action *h; __u32 pending; - unsigned long flags; + int max_restart = MAX_SOFTIRQ_RESTART; - if (in_interrupt()) - return; + pending = local_softirq_pending(); - local_irq_save(flags); + local_bh_disable(); +restart: + /* Reset the pending bitmask before enabling irqs */ + local_softirq_pending() = 0; + + local_irq_enable(); + + h = softirq_vec; + + do { + if (pending & 1) + h->action(h); + h++; + pending >>= 1; + } while (pending); + + local_irq_disable(); pending = local_softirq_pending(); + if (pending && --max_restart) + goto restart; - if (pending) { - struct softirq_action *h; + if (pending) + wakeup_softirqd(); - local_bh_disable(); -restart: - /* Reset the pending bitmask before enabling irqs */ - local_softirq_pending() = 0; + __local_bh_enable(); +} - local_irq_enable(); +#ifndef __ARCH_HAS_DO_SOFTIRQ + +asmlinkage void do_softirq(void) +{ + __u32 pending; + unsigned long flags; - h = softirq_vec; + if (in_interrupt()) + return; - do { - if (pending & 1) - h->action(h); - h++; - pending >>= 1; - } while (pending); + local_irq_save(flags); - local_irq_disable(); + pending = local_softirq_pending(); - pending = local_softirq_pending(); - if (pending && --max_restart) - goto restart; - if (pending) - wakeup_softirqd(); - __local_bh_enable(); - } + if (pending) + __do_softirq(); local_irq_restore(flags); } EXPORT_SYMBOL(do_softirq); +#endif + void local_bh_enable(void) { __local_bh_enable(); diff -purN -X /home/mbligh/.diff.exclude reference/kernel/sys.c current/kernel/sys.c --- reference/kernel/sys.c 2004-03-11 14:35:39.000000000 -0800 +++ current/kernel/sys.c 2004-04-09 13:23:20.000000000 -0700 @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -224,6 +225,7 @@ cond_syscall(sys_acct) cond_syscall(sys_lookup_dcookie) cond_syscall(sys_swapon) cond_syscall(sys_swapoff) +cond_syscall(sys_kexec_load) cond_syscall(sys_init_module) cond_syscall(sys_delete_module) cond_syscall(sys_socketpair) @@ -484,6 +486,27 @@ asmlinkage long sys_reboot(int magic1, i machine_restart(buffer); break; +#ifdef CONFIG_KEXEC + case LINUX_REBOOT_CMD_KEXEC: + { + struct kimage *image; + if (arg) { + unlock_kernel(); + return -EINVAL; + } + image = xchg(&kexec_image, 0); + if (!image) { + unlock_kernel(); + return -EINVAL; + } + notifier_call_chain(&reboot_notifier_list, SYS_RESTART, NULL); + system_running = 0; + device_shutdown(); + printk(KERN_EMERG "Starting new kernel\n"); + machine_kexec(image); + break; + } +#endif #ifdef CONFIG_SOFTWARE_SUSPEND case LINUX_REBOOT_CMD_SW_SUSPEND: { diff -purN -X /home/mbligh/.diff.exclude reference/kernel/sysctl.c current/kernel/sysctl.c --- reference/kernel/sysctl.c 2004-04-07 14:54:37.000000000 -0700 +++ current/kernel/sysctl.c 2004-04-09 21:41:41.000000000 -0700 @@ -62,8 +62,20 @@ extern int cad_pid; extern int pid_max; extern int sysctl_lower_zone_protection; extern int min_free_kbytes; + +/* sched_tunables variables */ +extern int min_timeslice; +extern int max_timeslice; +extern int child_penalty; +extern int parent_penalty; +extern int exit_weight; +extern int prio_bonus_ratio; +extern int interactive_delta; + extern int printk_ratelimit_jiffies; extern int printk_ratelimit_burst; +extern int shm_use_hugepages, shm_hugepages_per_file; +extern int mmap_use_hugepages, mmap_hugepages_map_sz; /* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */ static int maxolduid = 65535; @@ -128,6 +140,7 @@ static struct ctl_table_header root_tabl static ctl_table kern_table[]; static ctl_table vm_table[]; +static ctl_table sched_table[]; #ifdef CONFIG_NET extern ctl_table net_table[]; #endif @@ -207,6 +220,12 @@ static ctl_table root_table[] = { .mode = 0555, .child = dev_table, }, + { + .ctl_name = CTL_SCHED, + .procname = "sched", + .mode = 0555, + .child = sched_table, + }, { .ctl_name = 0 } }; @@ -615,12 +634,47 @@ static ctl_table kern_table[] = { .mode = 0444, .proc_handler = &proc_dointvec, }, +#ifdef CONFIG_HUGETLBFS + { + .ctl_name = KERN_SHMUSEHUGEPAGES, + .procname = "shm-use-hugepages", + .data = &shm_use_hugepages, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = KERN_MMAPUSEHUGEPAGES, + .procname = "mmap-use-hugepages", + .data = &mmap_use_hugepages, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = KERN_HPAGES_PER_FILE, + .procname = "shm-hugepages-per-file", + .data = &shm_hugepages_per_file, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = KERN_HPAGES_MAP_SZ, + .procname = "mmap-hugepages-min-mapping", + .data = &mmap_hugepages_map_sz, + .maxlen = sizeof(int), + .mode 0644, + .proc_handler = &proc_dointvec, + }, +#endif { .ctl_name = 0 } }; /* Constants for minimum and maximum testing in vm_table. We use these as one-element integer vectors. */ static int zero; +static int one = 1; static int one_hundred = 100; @@ -700,11 +754,8 @@ static ctl_table vm_table[] = { .procname = "swappiness", .data = &vm_swappiness, .maxlen = sizeof(vm_swappiness), - .mode = 0644, - .proc_handler = &proc_dointvec_minmax, - .strategy = &sysctl_intvec, - .extra1 = &zero, - .extra2 = &one_hundred, + .mode = 0444 /* read-only*/, + .proc_handler = &proc_dointvec, }, #ifdef CONFIG_HUGETLB_PAGE { @@ -865,6 +916,31 @@ static ctl_table dev_table[] = { { .ctl_name = 0 } }; +static ctl_table sched_table[] = { + {SCHED_MAX_TIMESLICE, "max_timeslice", &max_timeslice, + sizeof(int), 0644, NULL, &proc_dointvec_minmax, + &sysctl_intvec, NULL, &one, NULL}, + {SCHED_MIN_TIMESLICE, "min_timeslice", &min_timeslice, + sizeof(int), 0644, NULL, &proc_dointvec_minmax, + &sysctl_intvec, NULL, &one, NULL}, + {SCHED_CHILD_PENALTY, "child_penalty", &child_penalty, + sizeof(int), 0644, NULL, &proc_dointvec_minmax, + &sysctl_intvec, NULL, &zero, NULL}, + {SCHED_PARENT_PENALTY, "parent_penalty", &parent_penalty, + sizeof(int), 0644, NULL, &proc_dointvec_minmax, + &sysctl_intvec, NULL, &zero, NULL}, + {SCHED_EXIT_WEIGHT, "exit_weight", &exit_weight, + sizeof(int), 0644, NULL, &proc_dointvec_minmax, + &sysctl_intvec, NULL, &zero, NULL}, + {SCHED_PRIO_BONUS_RATIO, "prio_bonus_ratio", &prio_bonus_ratio, + sizeof(int), 0644, NULL, &proc_dointvec_minmax, + &sysctl_intvec, NULL, &zero, NULL}, + {SCHED_INTERACTIVE_DELTA, "interactive_delta", &interactive_delta, + sizeof(int), 0644, NULL, &proc_dointvec_minmax, + &sysctl_intvec, NULL, &zero, NULL}, + {0} +}; + extern void init_irq_proc (void); void __init sysctl_init(void) diff -purN -X /home/mbligh/.diff.exclude reference/kernel/timer.c current/kernel/timer.c --- reference/kernel/timer.c 2004-04-07 14:54:38.000000000 -0700 +++ current/kernel/timer.c 2004-04-09 11:53:01.000000000 -0700 @@ -764,6 +764,8 @@ static unsigned long count_active_tasks( * Requires xtime_lock to access. */ unsigned long avenrun[3]; +unsigned long tasks_running[3]; +DEFINE_PER_CPU(unsigned long[3],cpu_tasks_running); /* * calc_load - given tick count, update the avenrun load estimates. @@ -771,7 +773,7 @@ unsigned long avenrun[3]; */ static inline void calc_load(unsigned long ticks) { - unsigned long active_tasks; /* fixed-point */ + unsigned long active_tasks, running_tasks; /* fixed-point */ static int count = LOAD_FREQ; count -= ticks; @@ -781,9 +783,39 @@ static inline void calc_load(unsigned lo CALC_LOAD(avenrun[0], EXP_1, active_tasks); CALC_LOAD(avenrun[1], EXP_5, active_tasks); CALC_LOAD(avenrun[2], EXP_15, active_tasks); + running_tasks = nr_running() * FIXED_1; + CALC_LOAD(tasks_running[0], EXP_1, running_tasks); + CALC_LOAD(tasks_running[1], EXP_5, running_tasks); + CALC_LOAD(tasks_running[2], EXP_15, running_tasks); } } +/* + * This does the frequency calculation a little bit different from the + * global version above. It doesn't ever look at the kernel's concept + * of time, it just updates that stats every LOAD_FREQ times into the + * function. + * + * Using jiffies is more accurate, but there _are_ just statistics, so + * they're not worth messing with xtime_lock and company. If we miss + * an interrupt or two, big deal. + */ +void calc_load_cpu(int cpu) +{ + unsigned long running_tasks; + static DEFINE_PER_CPU(int, count) = { LOAD_FREQ }; + + per_cpu(count, cpu)--; + if (per_cpu(count, cpu) != 0) + return; + + per_cpu(count, cpu) += LOAD_FREQ; + running_tasks = nr_running_cpu(cpu) * FIXED_1; + CALC_LOAD(per_cpu(cpu_tasks_running, cpu)[0], EXP_1, running_tasks); + CALC_LOAD(per_cpu(cpu_tasks_running, cpu)[1], EXP_5, running_tasks); + CALC_LOAD(per_cpu(cpu_tasks_running, cpu)[2], EXP_15, running_tasks); +} + /* jiffies at the most recent update of wall time */ unsigned long wall_jiffies = INITIAL_JIFFIES; diff -purN -X /home/mbligh/.diff.exclude reference/mm/Makefile current/mm/Makefile --- reference/mm/Makefile 2003-10-01 11:47:15.000000000 -0700 +++ current/mm/Makefile 2004-04-09 11:53:00.000000000 -0700 @@ -12,3 +12,4 @@ obj-y := bootmem.o filemap.o mempool.o slab.o swap.o truncate.o vmscan.o $(mmu-y) obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o +obj-$(CONFIG_X86_4G) += usercopy.o diff -purN -X /home/mbligh/.diff.exclude reference/mm/filemap.c current/mm/filemap.c --- reference/mm/filemap.c 2004-04-07 14:54:38.000000000 -0700 +++ current/mm/filemap.c 2004-04-09 13:23:19.000000000 -0700 @@ -118,10 +118,12 @@ void remove_from_page_cache(struct page static inline int sync_page(struct page *page) { - struct address_space *mapping = page->mapping; + struct address_space *mapping = page_mapping(page); if (mapping && mapping->a_ops && mapping->a_ops->sync_page) return mapping->a_ops->sync_page(page); + if (PageSwapCache(page)) + blk_run_queues(); return 0; } @@ -235,13 +237,9 @@ EXPORT_SYMBOL(filemap_fdatawait); * This function is used for two things: adding newly allocated pagecache * pages and for moving existing anon pages into swapcache. * - * In the case of pagecache pages, the page is new, so we can just run - * SetPageLocked() against it. The other page state flags were set by - * rmqueue() - * - * In the case of swapcache, try_to_swap_out() has already locked the page, so - * SetPageLocked() is ugly-but-OK there too. The required page state has been - * set up by swap_out_add_to_swap_cache(). + * This function is used to add newly allocated pagecache pages: + * the page is new, so we can just run SetPageLocked() against it. + * The other page state flags were set by rmqueue(). * * This function does not add the page to the LRU. The caller must do that. */ @@ -256,7 +254,11 @@ int add_to_page_cache(struct page *page, error = radix_tree_insert(&mapping->page_tree, offset, page); if (!error) { SetPageLocked(page); - ___add_to_page_cache(page, mapping, offset); + list_add(&page->list, &mapping->clean_pages); + page->mapping = mapping; + page->index = offset; + mapping->nrpages++; + pagecache_acct(1); } else { page_cache_release(page); } @@ -294,22 +296,42 @@ static wait_queue_head_t *page_waitqueue return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)]; } -void fastcall wait_on_page_bit(struct page *page, int bit_nr) +/* + * wait for the specified page bit to be cleared + * this could be a synchronous wait or could just queue an async + * notification callback depending on the wait queue entry parameter + * + * A NULL wait queue parameter defaults to sync behaviour + */ +int fastcall wait_on_page_bit_wq(struct page *page, int bit_nr, wait_queue_t *wait) { wait_queue_head_t *waitqueue = page_waitqueue(page); - DEFINE_WAIT(wait); + DEFINE_WAIT(local_wait); + + if (!wait) + wait = &local_wait; /* default to a sync wait entry */ do { - prepare_to_wait(waitqueue, &wait, TASK_UNINTERRUPTIBLE); + prepare_to_wait(waitqueue, wait, TASK_UNINTERRUPTIBLE); if (test_bit(bit_nr, &page->flags)) { sync_page(page); + if (!is_sync_wait(wait)) { + /* + * if we've queued an async wait queue + * callback do not block; just tell the + * caller to return and retry later when + * the callback is notified + */ + return -EIOCBRETRY; + } io_schedule(); } } while (test_bit(bit_nr, &page->flags)); - finish_wait(waitqueue, &wait); -} + finish_wait(waitqueue, wait); -EXPORT_SYMBOL(wait_on_page_bit); + return 0; +} +EXPORT_SYMBOL(wait_on_page_bit_wq); /** * unlock_page() - unlock a locked page @@ -319,7 +341,9 @@ EXPORT_SYMBOL(wait_on_page_bit); * Unlocks the page and wakes up sleepers in ___wait_on_page_locked(). * Also wakes sleepers in wait_on_page_writeback() because the wakeup * mechananism between PageLocked pages and PageWriteback pages is shared. - * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. + * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep, + * or in case the wakeup notifies async wait queue entries, as in the case + * of aio, retries would be triggered and may re-queue their callbacks. * * The first mb is necessary to safely close the critical section opened by the * TestSetPageLocked(), the second mb is necessary to enforce ordering between @@ -360,26 +384,51 @@ void end_page_writeback(struct page *pag EXPORT_SYMBOL(end_page_writeback); /* - * Get a lock on the page, assuming we need to sleep to get it. + * Get a lock on the page, assuming we need to either sleep to get it + * or to queue an async notification callback to try again when its + * available. + * + * A NULL wait queue parameter defaults to sync behaviour. Otherwise + * it specifies the wait queue entry to be used for async notification + * or waiting. * * Ugly: running sync_page() in state TASK_UNINTERRUPTIBLE is scary. If some * random driver's requestfn sets TASK_RUNNING, we could busywait. However * chances are that on the second loop, the block layer's plug list is empty, * so sync_page() will then return in state TASK_UNINTERRUPTIBLE. */ -void fastcall __lock_page(struct page *page) +int fastcall __lock_page_wq(struct page *page, wait_queue_t *wait) { wait_queue_head_t *wqh = page_waitqueue(page); - DEFINE_WAIT(wait); + DEFINE_WAIT(local_wait); + + if (!wait) + wait = &local_wait; while (TestSetPageLocked(page)) { - prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); + prepare_to_wait(wqh, wait, TASK_UNINTERRUPTIBLE); if (PageLocked(page)) { sync_page(page); + if (!is_sync_wait(wait)) { + /* + * if we've queued an async wait queue + * callback do not block; just tell the + * caller to return and retry later when + * the callback is notified + */ + return -EIOCBRETRY; + } io_schedule(); } } - finish_wait(wqh, &wait); + finish_wait(wqh, wait); + return 0; +} +EXPORT_SYMBOL(__lock_page_wq); + +void fastcall __lock_page(struct page *page) +{ + __lock_page_wq(page, NULL); } EXPORT_SYMBOL(__lock_page); @@ -523,9 +572,12 @@ EXPORT_SYMBOL(find_or_create_page); * The search returns a group of mapping-contiguous pages with ascending * indexes. There may be holes in the indices due to not-present pages. * - * find_get_pages() returns the number of pages which were found. + * find_get_pages() returns the number of pages which were found + * and also atomically sets the next offset to continue looking up + * mapping contiguous pages from (useful when doing a range of + * pagevec lookups in chunks of PAGEVEC_SIZE). */ -unsigned int find_get_pages(struct address_space *mapping, pgoff_t start, +unsigned int find_get_pages(struct address_space *mapping, pgoff_t *next, unsigned int nr_pages, struct page **pages) { unsigned int i; @@ -533,9 +585,12 @@ unsigned int find_get_pages(struct addre spin_lock(&mapping->page_lock); ret = radix_tree_gang_lookup(&mapping->page_tree, - (void **)pages, start, nr_pages); + (void **)pages, *next, nr_pages); for (i = 0; i < ret; i++) page_cache_get(pages[i]); + if (ret) + *next = pages[ret - 1]->index + 1; + spin_unlock(&mapping->page_lock); return ret; } @@ -597,6 +652,34 @@ void do_generic_mapping_read(struct addr index = *ppos >> PAGE_CACHE_SHIFT; offset = *ppos & ~PAGE_CACHE_MASK; + if (unlikely(in_aio())) { + unsigned long i, last, nr; + /* + * Let the readahead logic know upfront about all + * the pages we'll need to satisfy this request while + * taking care to avoid repeat readaheads during retries. + * Required for reasonable IO ordering with multipage + * streaming AIO requests. + */ + if ((!is_retried_kiocb(io_wait_to_kiocb(current->io_wait))) + || (ra->prev_page + 1 == index)) { + + last = (*ppos + desc->count - 1) >> PAGE_CACHE_SHIFT; + nr = max_sane_readahead(last - index + 1); + + for (i = 0; (i < nr) && ((i == 0)||(i < ra->ra_pages)); + i++) { + page_cache_readahead(mapping, ra, filp, + index + i); + if (bdi_read_congested( + mapping->backing_dev_info)) { + //printk("AIO readahead congestion\n"); + break; + } + } + } + } + for (;;) { struct page *page; unsigned long end_index, nr, ret; @@ -614,7 +697,14 @@ void do_generic_mapping_read(struct addr } cond_resched(); - page_cache_readahead(mapping, ra, filp, index); + /* + * Take care to avoid disturbing the existing readahead + * window (concurrent reads may be active for the same fd, + * in the AIO case) + */ + if (!in_aio() || (ra->prev_page + 1 == index)) + page_cache_readahead(mapping, ra, filp, index); + nr = nr - offset; find_page: @@ -664,7 +754,12 @@ page_not_up_to_date: goto page_ok; /* Get exclusive access to the page ... */ - lock_page(page); + + if (lock_page_wq(page, current->io_wait)) { + pr_debug("queued lock page \n"); + error = -EIOCBRETRY; + goto sync_error; + } /* Did it get unhashed before we got the lock? */ if (!page->mapping) { @@ -686,13 +781,23 @@ readpage: if (!error) { if (PageUptodate(page)) goto page_ok; - wait_on_page_locked(page); + if (wait_on_page_locked_wq(page, current->io_wait)) { + pr_debug("queued wait_on_page \n"); + error = -EIOCBRETRY; + goto sync_error; + } + if (PageUptodate(page)) goto page_ok; error = -EIO; } - /* UHHUH! A synchronous read error occurred. Report it */ +sync_error: + /* We don't have uptodate data in the page yet */ + /* Could be due to an error or because we need to + * retry when we get an async i/o notification. + * Report the reason. + */ desc->error = error; page_cache_release(page); break; @@ -846,22 +951,19 @@ __generic_file_aio_read(struct kiocb *io out: return retval; } - EXPORT_SYMBOL(__generic_file_aio_read); -ssize_t -generic_file_aio_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos) +ssize_t generic_file_aio_read(struct kiocb *iocb, char __user *buf, + size_t count, loff_t pos) { struct iovec local_iov = { .iov_base = buf, .iov_len = count }; - BUG_ON(iocb->ki_pos != pos); return __generic_file_aio_read(iocb, &local_iov, 1, &iocb->ki_pos); } - EXPORT_SYMBOL(generic_file_aio_read); -ssize_t -generic_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) +ssize_t generic_file_read(struct file *filp, char __user *buf, + size_t count, loff_t *ppos) { struct iovec local_iov = { .iov_base = buf, .iov_len = count }; struct kiocb kiocb; @@ -873,10 +975,10 @@ generic_file_read(struct file *filp, cha ret = wait_on_sync_kiocb(&kiocb); return ret; } - EXPORT_SYMBOL(generic_file_read); -int file_send_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size) +int file_send_actor(read_descriptor_t * desc, struct page *page, + unsigned long offset, unsigned long size) { ssize_t written; unsigned long count = desc->count; @@ -1724,7 +1826,7 @@ EXPORT_SYMBOL(generic_write_checks); * okir@monad.swb.de */ ssize_t -generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov, +__generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t *ppos) { struct file *file = iocb->ki_filp; @@ -1894,11 +1996,13 @@ generic_file_aio_write_nolock(struct kio /* * For now, when the user asks for O_SYNC, we'll actually give O_DSYNC */ - if (status >= 0) { - if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) - status = generic_osync_inode(inode, mapping, - OSYNC_METADATA|OSYNC_DATA); - } + if (likely(status >= 0)) { + if (unlikely((file->f_flags & O_SYNC) || IS_SYNC(inode))) { + if (!a_ops->writepage) + status = generic_osync_inode(inode, mapping, + OSYNC_METADATA|OSYNC_DATA); + } + } out_status: err = written ? written : status; @@ -1911,6 +2015,55 @@ out: EXPORT_SYMBOL(generic_file_aio_write_nolock); ssize_t +generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov, + unsigned long nr_segs, loff_t *ppos) +{ + struct file *file = iocb->ki_filp; + struct address_space *mapping = file->f_mapping; + struct inode *inode = mapping->host; + ssize_t ret; + loff_t pos = *ppos; + + if (!iov->iov_base && !is_sync_kiocb(iocb)) { + /* nothing to transfer, may just need to sync data */ + ret = iov->iov_len; /* vector AIO not supported yet */ + goto osync; + } + + ret = __generic_file_aio_write_nolock(iocb, iov, nr_segs, ppos); + + /* + * Avoid doing a sync in parts for aio - its more efficient to + * call in again after all the data has been copied + */ + if (!is_sync_kiocb(iocb)) + return ret; + +osync: + if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { + ret = sync_page_range_nolock(inode, mapping, pos, ret); + if (ret >= 0) + *ppos = pos + ret; + } + return ret; +} + + +ssize_t +__generic_file_write_nolock(struct file *file, const struct iovec *iov, + unsigned long nr_segs, loff_t *ppos) +{ + struct kiocb kiocb; + ssize_t ret; + + init_sync_kiocb(&kiocb, file); + ret = __generic_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos); + if (-EIOCBQUEUED == ret) + ret = wait_on_sync_kiocb(&kiocb); + return ret; +} + +ssize_t generic_file_write_nolock(struct file *file, const struct iovec *iov, unsigned long nr_segs, loff_t *ppos) { @@ -1930,36 +2083,62 @@ ssize_t generic_file_aio_write(struct ki size_t count, loff_t pos) { struct file *file = iocb->ki_filp; - struct inode *inode = file->f_mapping->host; - ssize_t err; - struct iovec local_iov = { .iov_base = (void __user *)buf, .iov_len = count }; + struct address_space *mapping = file->f_mapping; + struct inode *inode = mapping->host; + ssize_t ret; + struct iovec local_iov = { .iov_base = (void __user *)buf, + .iov_len = count }; - BUG_ON(iocb->ki_pos != pos); + if (!buf && !is_sync_kiocb(iocb)) { + /* nothing to transfer, may just need to sync data */ + ret = count; + goto osync; + } down(&inode->i_sem); - err = generic_file_aio_write_nolock(iocb, &local_iov, 1, + ret = __generic_file_aio_write_nolock(iocb, &local_iov, 1, &iocb->ki_pos); up(&inode->i_sem); - return err; -} + /* + * Avoid doing a sync in parts for aio - its more efficient to + * call in again after all the data has been copied + */ + if (!is_sync_kiocb(iocb)) + return ret; +osync: + if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { + ret = sync_page_range(inode, mapping, pos, ret); + if (ret >= 0) + iocb->ki_pos = pos + ret; + } + return ret; +} EXPORT_SYMBOL(generic_file_aio_write); ssize_t generic_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - struct inode *inode = file->f_mapping->host; - ssize_t err; - struct iovec local_iov = { .iov_base = (void __user *)buf, .iov_len = count }; + struct address_space *mapping = file->f_mapping; + struct inode *inode = mapping->host; + ssize_t ret; + struct iovec local_iov = { .iov_base = (void __user *)buf, + .iov_len = count }; down(&inode->i_sem); - err = generic_file_write_nolock(file, &local_iov, 1, ppos); + ret = __generic_file_write_nolock(file, &local_iov, 1, ppos); up(&inode->i_sem); - return err; -} + if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { + ssize_t err; + err = sync_page_range(inode, mapping, *ppos - ret, ret); + if (err < 0) + ret = err; + } + return ret; +} EXPORT_SYMBOL(generic_file_write); ssize_t generic_file_readv(struct file *filp, const struct iovec *iov, @@ -1978,14 +2157,23 @@ ssize_t generic_file_readv(struct file * EXPORT_SYMBOL(generic_file_readv); ssize_t generic_file_writev(struct file *file, const struct iovec *iov, - unsigned long nr_segs, loff_t * ppos) + unsigned long nr_segs, loff_t *ppos) { - struct inode *inode = file->f_mapping->host; + struct address_space *mapping = file->f_mapping; + struct inode *inode = mapping->host; ssize_t ret; down(&inode->i_sem); - ret = generic_file_write_nolock(file, iov, nr_segs, ppos); + ret = __generic_file_write_nolock(file, iov, nr_segs, ppos); up(&inode->i_sem); + + if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { + ssize_t err; + + err = sync_page_range(inode, mapping, *ppos - ret, ret); + if (err < 0) + ret = err; + } return ret; } diff -purN -X /home/mbligh/.diff.exclude reference/mm/fremap.c current/mm/fremap.c --- reference/mm/fremap.c 2004-03-11 14:35:39.000000000 -0800 +++ current/mm/fremap.c 2004-04-09 11:52:59.000000000 -0700 @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #include @@ -36,7 +36,7 @@ static inline void zap_pte(struct mm_str if (!PageReserved(page)) { if (pte_dirty(pte)) set_page_dirty(page); - page_remove_rmap(page, ptep); + page_remove_rmap(page); page_cache_release(page); mm->rss--; } @@ -49,7 +49,7 @@ static inline void zap_pte(struct mm_str } /* - * Install a page to a given virtual memory address, release any + * Install a file page to a given virtual memory address, release any * previously existing mapping. */ int install_page(struct mm_struct *mm, struct vm_area_struct *vma, @@ -60,11 +60,13 @@ int install_page(struct mm_struct *mm, s pgd_t *pgd; pmd_t *pmd; pte_t pte_val; - struct pte_chain *pte_chain; - pte_chain = pte_chain_alloc(GFP_KERNEL); - if (!pte_chain) - goto err; + /* + * We use page_add_obj_rmap below: if install_page is + * ever extended to anonymous pages, this will warn us. + */ + BUG_ON(!page_mapping(page)); + pgd = pgd_offset(mm, addr); spin_lock(&mm->page_table_lock); @@ -81,18 +83,14 @@ int install_page(struct mm_struct *mm, s mm->rss++; flush_icache_page(vma, page); set_pte(pte, mk_pte(page, prot)); - pte_chain = page_add_rmap(page, pte, pte_chain); + page_add_obj_rmap(page); pte_val = *pte; pte_unmap(pte); update_mmu_cache(vma, addr, pte_val); - spin_unlock(&mm->page_table_lock); - pte_chain_free(pte_chain); - return 0; + err = 0; err_unlock: spin_unlock(&mm->page_table_lock); - pte_chain_free(pte_chain); -err: return err; } EXPORT_SYMBOL(install_page); @@ -188,9 +186,12 @@ asmlinkage long sys_remap_file_pages(uns /* * Make sure the vma is shared, that it supports prefaulting, * and that the remapped range is valid and fully within - * the single existing vma: + * the single existing vma. vm_private_data is used as a + * swapout cursor in a VM_NONLINEAR vma (unless VM_RESERVED + * or VM_LOCKED, but VM_LOCKED could be revoked later on). */ if (vma && (vma->vm_flags & VM_SHARED) && + (!vma->vm_private_data || (vma->vm_flags & VM_RESERVED)) && vma->vm_ops && vma->vm_ops->populate && end > start && start >= vma->vm_start && end <= vma->vm_end) { diff -purN -X /home/mbligh/.diff.exclude reference/mm/memory.c current/mm/memory.c --- reference/mm/memory.c 2004-04-07 14:54:38.000000000 -0700 +++ current/mm/memory.c 2004-04-09 21:41:41.000000000 -0700 @@ -43,12 +43,11 @@ #include #include #include -#include +#include #include #include #include -#include #include #include #include @@ -67,12 +66,21 @@ EXPORT_SYMBOL(mem_map); #endif unsigned long num_physpages; +/* + * A number of key systems in x86 including ioremap() rely on the assumption + * that high_memory defines the upper bound on direct map memory, then end + * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and + * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL + * and ZONE_HIGHMEM. + */ void * high_memory; struct page *highmem_start_page; +unsigned long vmalloc_earlyreserve; EXPORT_SYMBOL(num_physpages); EXPORT_SYMBOL(highmem_start_page); EXPORT_SYMBOL(high_memory); +EXPORT_SYMBOL(vmalloc_earlyreserve); /* * We special-case the C-O-W ZERO_PAGE, because it's such @@ -105,11 +113,12 @@ static inline void free_one_pmd(struct m } page = pmd_page(*dir); pmd_clear(dir); - pgtable_remove_rmap(page); + dec_page_state(nr_page_table_pages); pte_free_tlb(tlb, page); } -static inline void free_one_pgd(struct mmu_gather *tlb, pgd_t * dir) +static inline void free_one_pgd(struct mmu_gather *tlb, pgd_t * dir, + int pgd_idx) { int j; pmd_t * pmd; @@ -123,8 +132,11 @@ static inline void free_one_pgd(struct m } pmd = pmd_offset(dir, 0); pgd_clear(dir); - for (j = 0; j < PTRS_PER_PMD ; j++) + for (j = 0; j < PTRS_PER_PMD ; j++) { + if (pgd_idx * PGDIR_SIZE + j * PMD_SIZE >= TASK_SIZE) + break; free_one_pmd(tlb, pmd+j); + } pmd_free_tlb(tlb, pmd); } @@ -137,11 +149,13 @@ static inline void free_one_pgd(struct m void clear_page_tables(struct mmu_gather *tlb, unsigned long first, int nr) { pgd_t * page_dir = tlb->mm->pgd; + int pgd_idx = first; page_dir += first; do { - free_one_pgd(tlb, page_dir); + free_one_pgd(tlb, page_dir, pgd_idx); page_dir++; + pgd_idx++; } while (--nr); } @@ -164,7 +178,7 @@ pte_t fastcall * pte_alloc_map(struct mm pte_free(new); goto out; } - pgtable_add_rmap(new, mm, address); + inc_page_state(nr_page_table_pages); pmd_populate(mm, pmd, new); } out: @@ -190,7 +204,6 @@ pte_t fastcall * pte_alloc_kernel(struct pte_free_kernel(new); goto out; } - pgtable_add_rmap(virt_to_page(new), mm, address); pmd_populate_kernel(mm, pmd, new); } out: @@ -217,20 +230,10 @@ int copy_page_range(struct mm_struct *ds unsigned long address = vma->vm_start; unsigned long end = vma->vm_end; unsigned long cow; - struct pte_chain *pte_chain = NULL; if (is_vm_hugetlb_page(vma)) return copy_hugetlb_page_range(dst, src, vma); - pte_chain = pte_chain_alloc(GFP_ATOMIC | __GFP_NOWARN); - if (!pte_chain) { - spin_unlock(&dst->page_table_lock); - pte_chain = pte_chain_alloc(GFP_KERNEL); - spin_lock(&dst->page_table_lock); - if (!pte_chain) - goto nomem; - } - cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; src_pgd = pgd_offset(src, address)-1; dst_pgd = pgd_offset(dst, address)-1; @@ -329,32 +332,8 @@ skip_copy_pte_range: pte = pte_mkold(pte); get_page(page); dst->rss++; - set_pte(dst_pte, pte); - pte_chain = page_add_rmap(page, dst_pte, - pte_chain); - if (pte_chain) - goto cont_copy_pte_range_noset; - pte_chain = pte_chain_alloc(GFP_ATOMIC | __GFP_NOWARN); - if (pte_chain) - goto cont_copy_pte_range_noset; - - /* - * pte_chain allocation failed, and we need to - * run page reclaim. - */ - pte_unmap_nested(src_pte); - pte_unmap(dst_pte); - spin_unlock(&src->page_table_lock); - spin_unlock(&dst->page_table_lock); - pte_chain = pte_chain_alloc(GFP_KERNEL); - spin_lock(&dst->page_table_lock); - if (!pte_chain) - goto nomem; - spin_lock(&src->page_table_lock); - dst_pte = pte_offset_map(dst_pmd, address); - src_pte = pte_offset_map_nested(src_pmd, - address); + page_dup_rmap(page); cont_copy_pte_range_noset: address += PAGE_SIZE; if (address >= end) { @@ -377,10 +356,8 @@ cont_copy_pmd_range: out_unlock: spin_unlock(&src->page_table_lock); out: - pte_chain_free(pte_chain); return 0; nomem: - pte_chain_free(pte_chain); return -ENOMEM; } @@ -417,11 +394,11 @@ zap_pte_range(struct mmu_gather *tlb, pm if (!PageReserved(page)) { if (pte_dirty(pte)) set_page_dirty(page); - if (page->mapping && pte_young(pte) && - !PageSwapCache(page)) + if (pte_young(pte) && + page_mapping(page)) mark_page_accessed(page); tlb->freed++; - page_remove_rmap(page, ptep); + page_remove_rmap(page); tlb_remove_page(tlb, page); } } @@ -439,7 +416,7 @@ zap_pmd_range(struct mmu_gather *tlb, pg unsigned long address, unsigned long size) { pmd_t * pmd; - unsigned long end; + unsigned long end, pgd_boundary; if (pgd_none(*dir)) return; @@ -450,8 +427,9 @@ zap_pmd_range(struct mmu_gather *tlb, pg } pmd = pmd_offset(dir, address); end = address + size; - if (end > ((address + PGDIR_SIZE) & PGDIR_MASK)) - end = ((address + PGDIR_SIZE) & PGDIR_MASK); + pgd_boundary = ((address + PGDIR_SIZE) & PGDIR_MASK); + if (pgd_boundary && (end > pgd_boundary)) + end = pgd_boundary; do { zap_pte_range(tlb, pmd, address, end - address); address = (address + PMD_SIZE) & PMD_MASK; @@ -1014,7 +992,6 @@ static int do_wp_page(struct mm_struct * { struct page *old_page, *new_page; unsigned long pfn = pte_pfn(pte); - struct pte_chain *pte_chain; pte_t entry; if (unlikely(!pfn_valid(pfn))) { @@ -1039,6 +1016,14 @@ static int do_wp_page(struct mm_struct * entry = maybe_mkwrite(pte_mkyoung(pte_mkdirty(pte)), vma); ptep_establish(vma, address, page_table, entry); + if (PageAnon(old_page)) { + /* + * Optimization: the page may have been + * registered under a long defunct mm: + * now we know it belongs only to this. + */ + page_update_anon_rmap(old_page, mm, address); + } update_mmu_cache(vma, address, entry); pte_unmap(page_table); spin_unlock(&mm->page_table_lock); @@ -1053,9 +1038,6 @@ static int do_wp_page(struct mm_struct * page_cache_get(old_page); spin_unlock(&mm->page_table_lock); - pte_chain = pte_chain_alloc(GFP_KERNEL); - if (!pte_chain) - goto no_pte_chain; new_page = alloc_page(GFP_HIGHUSER); if (!new_page) goto no_new_page; @@ -1069,10 +1051,11 @@ static int do_wp_page(struct mm_struct * if (pte_same(*page_table, pte)) { if (PageReserved(old_page)) ++mm->rss; - page_remove_rmap(old_page, page_table); + else + page_remove_rmap(old_page); break_cow(vma, new_page, address, page_table); - pte_chain = page_add_rmap(new_page, page_table, pte_chain); lru_cache_add_active(new_page); + page_add_anon_rmap(new_page, mm, address); /* Free the old page.. */ new_page = old_page; @@ -1081,12 +1064,9 @@ static int do_wp_page(struct mm_struct * page_cache_release(new_page); page_cache_release(old_page); spin_unlock(&mm->page_table_lock); - pte_chain_free(pte_chain); return VM_FAULT_MINOR; no_new_page: - pte_chain_free(pte_chain); -no_pte_chain: page_cache_release(old_page); return VM_FAULT_OOM; } @@ -1244,7 +1224,6 @@ static int do_swap_page(struct mm_struct swp_entry_t entry = pte_to_swp_entry(orig_pte); pte_t pte; int ret = VM_FAULT_MINOR; - struct pte_chain *pte_chain = NULL; pte_unmap(page_table); spin_unlock(&mm->page_table_lock); @@ -1274,11 +1253,6 @@ static int do_swap_page(struct mm_struct } mark_page_accessed(page); - pte_chain = pte_chain_alloc(GFP_KERNEL); - if (!pte_chain) { - ret = VM_FAULT_OOM; - goto out; - } lock_page(page); /* @@ -1298,11 +1272,13 @@ static int do_swap_page(struct mm_struct /* The page isn't present yet, go ahead with the fault. */ + mm->rss++; + page_add_anon_rmap(page, mm, address); + swap_free(entry); if (vm_swap_full()) remove_exclusive_swap_page(page); - mm->rss++; pte = mk_pte(page, vma->vm_page_prot); if (write_access && can_share_swap_page(page)) pte = maybe_mkwrite(pte_mkdirty(pte), vma); @@ -1310,14 +1286,12 @@ static int do_swap_page(struct mm_struct flush_icache_page(vma, page); set_pte(page_table, pte); - pte_chain = page_add_rmap(page, page_table, pte_chain); /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, address, pte); pte_unmap(page_table); spin_unlock(&mm->page_table_lock); out: - pte_chain_free(pte_chain); return ret; } @@ -1333,20 +1307,7 @@ do_anonymous_page(struct mm_struct *mm, { pte_t entry; struct page * page = ZERO_PAGE(addr); - struct pte_chain *pte_chain; - int ret; - pte_chain = pte_chain_alloc(GFP_ATOMIC | __GFP_NOWARN); - if (!pte_chain) { - pte_unmap(page_table); - spin_unlock(&mm->page_table_lock); - pte_chain = pte_chain_alloc(GFP_KERNEL); - if (!pte_chain) - goto no_mem; - spin_lock(&mm->page_table_lock); - page_table = pte_offset_map(pmd, addr); - } - /* Read-only mapping of ZERO_PAGE. */ entry = pte_wrprotect(mk_pte(ZERO_PAGE(addr), vma->vm_page_prot)); @@ -1368,7 +1329,6 @@ do_anonymous_page(struct mm_struct *mm, pte_unmap(page_table); page_cache_release(page); spin_unlock(&mm->page_table_lock); - ret = VM_FAULT_MINOR; goto out; } mm->rss++; @@ -1377,24 +1337,19 @@ do_anonymous_page(struct mm_struct *mm, vma); lru_cache_add_active(page); mark_page_accessed(page); + page_add_anon_rmap(page, mm, addr); } set_pte(page_table, entry); - /* ignores ZERO_PAGE */ - pte_chain = page_add_rmap(page, page_table, pte_chain); pte_unmap(page_table); /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, addr, entry); spin_unlock(&mm->page_table_lock); - ret = VM_FAULT_MINOR; - goto out; - -no_mem: - ret = VM_FAULT_OOM; out: - pte_chain_free(pte_chain); - return ret; + return VM_FAULT_MINOR; +no_mem: + return VM_FAULT_OOM; } /* @@ -1416,9 +1371,9 @@ do_no_page(struct mm_struct *mm, struct struct page * new_page; struct address_space *mapping = NULL; pte_t entry; - struct pte_chain *pte_chain; int sequence = 0; int ret = VM_FAULT_MINOR; + int anon = 0; if (!vma->vm_ops || !vma->vm_ops->nopage) return do_anonymous_page(mm, vma, page_table, @@ -1440,10 +1395,6 @@ retry: if (new_page == NOPAGE_OOM) return VM_FAULT_OOM; - pte_chain = pte_chain_alloc(GFP_KERNEL); - if (!pte_chain) - goto oom; - /* * Should we do an early C-O-W break? */ @@ -1453,8 +1404,8 @@ retry: goto oom; copy_user_highpage(page, new_page, address); page_cache_release(new_page); - lru_cache_add_active(page); new_page = page; + anon = 1; } spin_lock(&mm->page_table_lock); @@ -1468,7 +1419,6 @@ retry: sequence = atomic_read(&mapping->truncate_count); spin_unlock(&mm->page_table_lock); page_cache_release(new_page); - pte_chain_free(pte_chain); goto retry; } page_table = pte_offset_map(pmd, address); @@ -1492,7 +1442,11 @@ retry: if (write_access) entry = maybe_mkwrite(pte_mkdirty(entry), vma); set_pte(page_table, entry); - pte_chain = page_add_rmap(new_page, page_table, pte_chain); + if (anon) { + lru_cache_add_active(new_page); + page_add_anon_rmap(new_page, mm, address); + } else + page_add_obj_rmap(new_page); pte_unmap(page_table); } else { /* One of our sibling threads was faster, back out. */ @@ -1510,7 +1464,6 @@ oom: page_cache_release(new_page); ret = VM_FAULT_OOM; out: - pte_chain_free(pte_chain); return ret; } diff -purN -X /home/mbligh/.diff.exclude reference/mm/mmap.c current/mm/mmap.c --- reference/mm/mmap.c 2004-04-07 14:54:38.000000000 -0700 +++ current/mm/mmap.c 2004-04-09 21:41:39.000000000 -0700 @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -62,6 +63,9 @@ EXPORT_SYMBOL(sysctl_overcommit_ratio); EXPORT_SYMBOL(sysctl_max_map_count); EXPORT_SYMBOL(vm_committed_space); +int mmap_use_hugepages = 0; +int mmap_hugepages_map_sz = 256; + /* * Requires inode->i_mapping->i_shared_sem */ @@ -333,8 +337,6 @@ static inline int is_mergeable_vma(struc return 0; if (vma->vm_flags != vm_flags) return 0; - if (vma->vm_private_data) - return 0; return 1; } @@ -385,7 +387,8 @@ can_vma_merge_after(struct vm_area_struc * whether that can be merged with its predecessor or its successor. Or * both (it neatly fills a hole). */ -static int vma_merge(struct mm_struct *mm, struct vm_area_struct *prev, +static struct vm_area_struct *vma_merge(struct mm_struct *mm, + struct vm_area_struct *prev, struct rb_node *rb_parent, unsigned long addr, unsigned long end, unsigned long vm_flags, struct file *file, unsigned long pgoff) @@ -399,7 +402,7 @@ static int vma_merge(struct mm_struct *m * vma->vm_flags & VM_SPECIAL, too. */ if (vm_flags & VM_SPECIAL) - return 0; + return NULL; i_shared_sem = file ? &file->f_mapping->i_shared_sem : NULL; @@ -412,7 +415,6 @@ static int vma_merge(struct mm_struct *m * Can it merge with the predecessor? */ if (prev->vm_end == addr && - is_mergeable_vma(prev, file, vm_flags) && can_vma_merge_after(prev, vm_flags, file, pgoff)) { struct vm_area_struct *next; int need_up = 0; @@ -443,12 +445,12 @@ static int vma_merge(struct mm_struct *m mm->map_count--; kmem_cache_free(vm_area_cachep, next); - return 1; + return prev; } spin_unlock(lock); if (need_up) up(i_shared_sem); - return 1; + return prev; } /* @@ -459,7 +461,7 @@ static int vma_merge(struct mm_struct *m merge_next: if (!can_vma_merge_before(prev, vm_flags, file, pgoff, (end - addr) >> PAGE_SHIFT)) - return 0; + return NULL; if (end == prev->vm_start) { if (file) down(i_shared_sem); @@ -469,11 +471,51 @@ static int vma_merge(struct mm_struct *m spin_unlock(lock); if (file) up(i_shared_sem); - return 1; + return prev; } } - return 0; + return NULL; +} + +#ifdef CONFIG_HUGETLBFS +int mmap_hugetlb_implicit(unsigned long len) +{ + /* Are we enabled? */ + if (!mmap_use_hugepages) + return 0; + /* Must be HPAGE aligned */ + if (len & ~HPAGE_MASK) + return 0; + /* Are we under the minimum size? */ + if (mmap_hugepages_map_sz + && len < (mmap_hugepages_map_sz << 20)) + return 0; + + return 1; +} +#else +int mmap_hugetlb_implicit(unsigned long len) { return 0; } +#endif + +unsigned long +try_hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long *flags) +{ + if (!capable(CAP_IPC_LOCK)) + return -EPERM; + + if (*flags & MAP_HUGETLB) { + return hugetlb_get_unmapped_area(NULL, addr, len, pgoff, *flags); + } + + if (mmap_hugetlb_implicit(len)) { + addr = hugetlb_get_unmapped_area(NULL, addr, len, pgoff, *flags); + if (!(addr & ~HPAGE_MASK)) + *flags |= MAP_HUGETLB; + return addr; + } + return -ENOMEM; } /* @@ -492,7 +534,8 @@ unsigned long do_mmap_pgoff(struct file int error; struct rb_node ** rb_link, * rb_parent; int accountable = 1; - unsigned long charged = 0; + unsigned long charged = 0, addr_save = addr; + int hugetlb_explicit = (flags & MAP_HUGETLB) != 0; if (file) { if (is_file_hugepages(file)) @@ -523,8 +566,14 @@ unsigned long do_mmap_pgoff(struct file /* Obtain the address to map to. we verify (or select) it and ensure * that it represents a valid section of the address space. + * VM_HUGETLB will never appear in vm_flags when CONFIG_HUGETLB is + * unset. */ - addr = get_unmapped_area(file, addr, len, pgoff, flags); + addr = try_hugetlb_get_unmapped_area(NULL, addr, len, pgoff, &flags); + if (!(flags & MAP_HUGETLB)) +hugetlb_fallback: + addr = get_unmapped_area(file, addr_save, len, pgoff, flags); + if (addr & ~PAGE_MASK) return addr; @@ -673,10 +722,44 @@ munmap_back: error = file->f_op->mmap(file, vma); if (error) goto unmap_and_free_vma; - } else if (vm_flags & VM_SHARED) { - error = shmem_zero_setup(vma); - if (error) - goto free_vma; + } else if ((vm_flags & VM_SHARED) || (vm_flags & VM_HUGETLB)) { + if (!is_vm_hugetlb_page(vma)) { + error = shmem_zero_setup(vma); + if (error) + goto free_vma; + } else { + /* + * Presumably hugetlb_zero_setup() acquires a + * reference count for us. The difference + * between this and the shmem_zero_setup() + * case is that we can encounter an error + * _after_ allocating the file. The error + * path was adjusted slightly to fput() for us. + */ + struct file *new_file = hugetlb_zero_setup(len); + if (IS_ERR(new_file)) { + if (hugetlb_explicit) { + error = PTR_ERR(new_file); + goto free_vma; + } else { + /* + * We tried an implicit hugetlb mmap + * but we failed to get the pages. + * We basically have to start over. + */ + flags &= ~MAP_HUGETLB; + kmem_cache_free(vm_area_cachep, vma); + if (charged) + vm_unacct_memory(charged); + goto hugetlb_fallback; + } + } else { + vma->vm_file = new_file; + error = new_file->f_op->mmap(new_file, vma); + if (error) + goto unmap_and_free_vma; + } + } } /* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform @@ -724,11 +807,21 @@ out: unmap_and_free_vma: if (correct_wcount) atomic_inc(&inode->i_writecount); - vma->vm_file = NULL; - fput(file); - /* Undo any partial mapping done by a device driver. */ + /* + * Undo any partial mapping done by a device driver. + * hugetlb wants to know the vma's file etc. so nuke + * the file afterward. + */ zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start); + + /* + * vma->vm_file may be different from file in the hugetlb case. + */ + if (vma->vm_file) + fput(vma->vm_file); + vma->vm_file = NULL; + free_vma: kmem_cache_free(vm_area_cachep, vma); unacct_error: @@ -1492,5 +1585,57 @@ void insert_vm_struct(struct mm_struct * if (__vma && __vma->vm_start < vma->vm_end) BUG(); vma_link(mm, vma, prev, rb_link, rb_parent); - validate_mm(mm); +} + +/* + * Copy the vma structure to a new location in the same mm, + * prior to moving page table entries, to effect an mremap move. + */ +struct vm_area_struct *copy_vma(struct vm_area_struct *vma, + unsigned long addr, unsigned long len, unsigned long pgoff) +{ + struct mm_struct *mm = vma->vm_mm; + struct vm_area_struct *new_vma, *prev; + struct rb_node **rb_link, *rb_parent; + + find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); + new_vma = vma_merge(mm, prev, rb_parent, addr, addr + len, + vma->vm_flags, vma->vm_file, pgoff); + if (!new_vma) { + new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + if (new_vma) { + *new_vma = *vma; + INIT_LIST_HEAD(&new_vma->shared); + new_vma->vm_start = addr; + new_vma->vm_end = addr + len; + new_vma->vm_pgoff = pgoff; + if (new_vma->vm_file) + get_file(new_vma->vm_file); + if (new_vma->vm_ops && new_vma->vm_ops->open) + new_vma->vm_ops->open(new_vma); + vma_link(mm, new_vma, prev, rb_link, rb_parent); + } + } + return new_vma; +} + +/* + * Position vma after prev in shared file list: + * for mremap move error recovery racing against vmtruncate. + */ +void vma_relink_file(struct vm_area_struct *vma, struct vm_area_struct *prev) +{ + struct mm_struct *mm = vma->vm_mm; + struct address_space *mapping; + + if (vma->vm_file) { + mapping = vma->vm_file->f_mapping; + if (mapping) { + down(&mapping->i_shared_sem); + spin_lock(&mm->page_table_lock); + list_move(&vma->shared, &prev->shared); + spin_unlock(&mm->page_table_lock); + up(&mapping->i_shared_sem); + } + } } diff -purN -X /home/mbligh/.diff.exclude reference/mm/mremap.c current/mm/mremap.c --- reference/mm/mremap.c 2004-02-18 14:57:24.000000000 -0800 +++ current/mm/mremap.c 2004-04-09 11:52:11.000000000 -0700 @@ -15,7 +15,9 @@ #include #include #include -#include +#include +#include +#include #include #include @@ -79,32 +81,102 @@ static inline pte_t *alloc_one_pte_map(s return pte; } -static int -copy_one_pte(struct vm_area_struct *vma, unsigned long old_addr, - pte_t *src, pte_t *dst, struct pte_chain **pte_chainp) +#ifdef CONFIG_SWAP +/* + * rmap_needs_broken_cow is for mremap MAYMOVE's move_one_page. + * The anonmm objrmap can only track anon page movements if the + * page (or swap entry) is exclusive to the mm, but we don't + * want the waste of early COW break unless it's necessary. + * This tells us, with side-effect to update anon rmap if okay. + * page_table_lock (and mmap_sem) are held throughout. + */ +static int rmap_needs_broken_cow(pte_t *ptep, unsigned long new_addr) { - int error = 0; - pte_t pte; - struct page *page = NULL; - - if (pte_present(*src)) - page = pte_page(*src); + pte_t pte = *ptep; + unsigned long pfn; + struct page *page; + swp_entry_t entry; + struct swap_info_struct *si; + unsigned int mapcount = 0; + + if (pte_present(pte)) { + pfn = pte_pfn(pte); + if (!pfn_valid(pfn)) + return 0; + page = pfn_to_page(pfn); + if (!PageAnon(page)) + return 0; + if (pte_write(pte)) + goto update; +again: + /* + * page->private on a PageAnon page is always the + * swap entry (if PageSwapCache) or 0 (if not): + * so we can peep at page->private without taking + * a lock, no need to check PageSwapCache too. + */ + entry.val = page->private; + smp_rmb(); + mapcount = page->mapcount; + if (mapcount > 1) + return 1; + if (!entry.val) + goto update; + /* + * This is tricky: entry can get freed right here, + * since we don't hold the page lock (and cannot wait + * for it). Use swap_duplicate which, already allows + * for that, before the less forgiving swap_info_get. + */ + if (!swap_duplicate(entry)) + goto again; + si = swap_info_get(entry); + if (si) { + mapcount = si->swap_map[swp_offset(entry)] + + page->mapcount - 2; + swap_info_put(si); + } else + mapcount = 0; + swap_free(entry); + if (entry.val != page->private) + goto again; + if (mapcount > 1) + return 1; +update: + /* Before we forget the struct page, update its rmap */ + page_update_anon_rmap(page, current->mm, new_addr); + return 0; + } - if (!pte_none(*src)) { - if (page) - page_remove_rmap(page, src); - pte = ptep_clear_flush(vma, old_addr, src); - if (!dst) { - /* No dest? We must put it back. */ - dst = src; - error++; + if (!pte_file(pte) && !pte_none(pte)) { + entry = pte_to_swp_entry(pte); + si = swap_info_get(entry); + if (si) { + page = NULL; + mapcount = si->swap_map[swp_offset(entry)]; + if (mapcount == 2) { + page = lookup_swap_cache(entry); + if (page) + mapcount = page->mapcount + 1; + } + swap_info_put(si); + if (page) + page_cache_release(page); } - set_pte(dst, pte); - if (page) - *pte_chainp = page_add_rmap(page, dst, *pte_chainp); } - return error; + + return mapcount > 1; } +#else /* !CONFIG_SWAP */ + +/* + * The swap interfaces used above are not available. Actually, + * all of the anonymous rmap is just a waste of space-time in this case. + * But no enthusiam for peppering the code with #ifdefs right now. + */ +#define rmap_needs_broken_cow(ptep, new_addr) 0 + +#endif /* CONFIG_SWAP */ static int move_one_page(struct vm_area_struct *vma, unsigned long old_addr, @@ -113,13 +185,7 @@ move_one_page(struct vm_area_struct *vma struct mm_struct *mm = vma->vm_mm; int error = 0; pte_t *src, *dst; - struct pte_chain *pte_chain; - pte_chain = pte_chain_alloc(GFP_KERNEL); - if (!pte_chain) { - error = -ENOMEM; - goto out; - } spin_lock(&mm->page_table_lock); src = get_one_pte_map_nested(mm, old_addr); if (src) { @@ -140,22 +206,28 @@ move_one_page(struct vm_area_struct *vma * page_table_lock, we should re-check the src entry... */ if (src) { - error = copy_one_pte(vma, old_addr, src, - dst, &pte_chain); + if (!dst) + error = -ENOMEM; + else if (rmap_needs_broken_cow(src, new_addr)) + error = -EAGAIN; + else { + pte_t pte; + pte = ptep_clear_flush(vma, old_addr, src); + set_pte(dst, pte); + } pte_unmap_nested(src); } pte_unmap(dst); } spin_unlock(&mm->page_table_lock); - pte_chain_free(pte_chain); -out: return error; } static int move_page_tables(struct vm_area_struct *vma, unsigned long new_addr, unsigned long old_addr, unsigned long len) { - unsigned long offset = len; + unsigned long offset = 0; + int ret; flush_cache_range(vma, old_addr, old_addr + len); @@ -164,137 +236,107 @@ static int move_page_tables(struct vm_ar * easy way out on the assumption that most remappings will be * only a few pages.. This also makes error recovery easier. */ - while (offset) { - offset -= PAGE_SIZE; - if (move_one_page(vma, old_addr + offset, new_addr + offset)) - goto oops_we_failed; + while (offset < len) { + ret = move_one_page(vma, old_addr+offset, new_addr+offset); + if (!ret) { + offset += PAGE_SIZE; + continue; + } + if (ret != -EAGAIN) + break; + /* + * The anonmm objrmap can only track anon page movements + * if the page (or swap entry) is exclusive to this mm. + * In the very unusual case when it's shared, break COW + * (take a copy of the page) to make it exclusive. If + * the page is shared and on swap, move_one_page will + * normally succeed on the third attempt (do_swap_page + * does not break COW); but under very great pressure it + * could get swapped out again and need more attempts. + */ + ret = handle_mm_fault(vma->vm_mm, vma, old_addr+offset, 1); + if (ret != VM_FAULT_MINOR && ret != VM_FAULT_MAJOR) + break; } - return 0; - - /* - * Ok, the move failed because we didn't have enough pages for - * the new page table tree. This is unlikely, but we have to - * take the possibility into account. In that case we just move - * all the pages back (this will work, because we still have - * the old page tables) - */ -oops_we_failed: - flush_cache_range(vma, new_addr, new_addr + len); - while ((offset += PAGE_SIZE) < len) - move_one_page(vma, new_addr + offset, old_addr + offset); - zap_page_range(vma, new_addr, len); - return -1; + return offset; } static unsigned long move_vma(struct vm_area_struct *vma, - unsigned long addr, unsigned long old_len, unsigned long new_len, - unsigned long new_addr) + unsigned long old_addr, unsigned long old_len, + unsigned long new_len, unsigned long new_addr) { struct mm_struct *mm = vma->vm_mm; - struct vm_area_struct *new_vma, *next, *prev; - int allocated_vma; + struct vm_area_struct *new_vma; + unsigned long vm_flags = vma->vm_flags; + unsigned long new_pgoff; + unsigned long moved_len; + unsigned long excess = 0; int split = 0; - new_vma = NULL; - next = find_vma_prev(mm, new_addr, &prev); - if (next) { - if (prev && prev->vm_end == new_addr && - can_vma_merge(prev, vma->vm_flags) && !vma->vm_file && - !(vma->vm_flags & VM_SHARED)) { - spin_lock(&mm->page_table_lock); - prev->vm_end = new_addr + new_len; - spin_unlock(&mm->page_table_lock); - new_vma = prev; - if (next != prev->vm_next) - BUG(); - if (prev->vm_end == next->vm_start && - can_vma_merge(next, prev->vm_flags)) { - spin_lock(&mm->page_table_lock); - prev->vm_end = next->vm_end; - __vma_unlink(mm, next, prev); - spin_unlock(&mm->page_table_lock); - if (vma == next) - vma = prev; - mm->map_count--; - kmem_cache_free(vm_area_cachep, next); - } - } else if (next->vm_start == new_addr + new_len && - can_vma_merge(next, vma->vm_flags) && - !vma->vm_file && !(vma->vm_flags & VM_SHARED)) { - spin_lock(&mm->page_table_lock); - next->vm_start = new_addr; - spin_unlock(&mm->page_table_lock); - new_vma = next; - } - } else { - prev = find_vma(mm, new_addr-1); - if (prev && prev->vm_end == new_addr && - can_vma_merge(prev, vma->vm_flags) && !vma->vm_file && - !(vma->vm_flags & VM_SHARED)) { - spin_lock(&mm->page_table_lock); - prev->vm_end = new_addr + new_len; - spin_unlock(&mm->page_table_lock); - new_vma = prev; - } - } + /* + * We'd prefer to avoid failure later on in do_munmap: + * which may split one vma into three before unmapping. + */ + if (mm->map_count >= sysctl_max_map_count - 3) + return -ENOMEM; - allocated_vma = 0; - if (!new_vma) { - new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); - if (!new_vma) - goto out; - allocated_vma = 1; - } - - if (!move_page_tables(vma, new_addr, addr, old_len)) { - unsigned long vm_locked = vma->vm_flags & VM_LOCKED; - - if (allocated_vma) { - *new_vma = *vma; - INIT_LIST_HEAD(&new_vma->shared); - new_vma->vm_start = new_addr; - new_vma->vm_end = new_addr+new_len; - new_vma->vm_pgoff += (addr-vma->vm_start) >> PAGE_SHIFT; - if (new_vma->vm_file) - get_file(new_vma->vm_file); - if (new_vma->vm_ops && new_vma->vm_ops->open) - new_vma->vm_ops->open(new_vma); - insert_vm_struct(current->mm, new_vma); - } + new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); + new_vma = copy_vma(vma, new_addr, new_len, new_pgoff); + if (!new_vma) + return -ENOMEM; - /* Conceal VM_ACCOUNT so old reservation is not undone */ - if (vma->vm_flags & VM_ACCOUNT) { - vma->vm_flags &= ~VM_ACCOUNT; - if (addr > vma->vm_start) { - if (addr + old_len < vma->vm_end) - split = 1; - } else if (addr + old_len == vma->vm_end) - vma = NULL; /* it will be removed */ - } else - vma = NULL; /* nothing more to do */ + moved_len = move_page_tables(vma, new_addr, old_addr, old_len); + if (moved_len < old_len) { + /* + * On error, move entries back from new area to old, + * which will succeed since page tables still there, + * and then proceed to unmap new area instead of old. + * + * Subtle point from Rajesh Venkatasubramanian: before + * moving file-based ptes, move new_vma before old vma + * in the i_mmap or i_mmap_shared list, so when racing + * against vmtruncate we cannot propagate pages to be + * truncated back from new_vma into just cleaned old. + */ + vma_relink_file(vma, new_vma); + move_page_tables(new_vma, old_addr, new_addr, moved_len); + vma = new_vma; + old_len = new_len; + old_addr = new_addr; + new_addr = -ENOMEM; + } - do_munmap(current->mm, addr, old_len); + /* Conceal VM_ACCOUNT so old reservation is not undone */ + if (vm_flags & VM_ACCOUNT) { + vma->vm_flags &= ~VM_ACCOUNT; + excess = vma->vm_end - vma->vm_start - old_len; + if (old_addr > vma->vm_start && + old_addr + old_len < vma->vm_end) + split = 1; + } - /* Restore VM_ACCOUNT if one or two pieces of vma left */ - if (vma) { - vma->vm_flags |= VM_ACCOUNT; - if (split) - vma->vm_next->vm_flags |= VM_ACCOUNT; - } + if (do_munmap(mm, old_addr, old_len) < 0) { + /* OOM: unable to split vma, just get accounts right */ + vm_unacct_memory(excess >> PAGE_SHIFT); + excess = 0; + } - current->mm->total_vm += new_len >> PAGE_SHIFT; - if (vm_locked) { - current->mm->locked_vm += new_len >> PAGE_SHIFT; - if (new_len > old_len) - make_pages_present(new_addr + old_len, - new_addr + new_len); - } - return new_addr; + /* Restore VM_ACCOUNT if one or two pieces of vma left */ + if (excess) { + vma->vm_flags |= VM_ACCOUNT; + if (split) + vma->vm_next->vm_flags |= VM_ACCOUNT; + } + + mm->total_vm += new_len >> PAGE_SHIFT; + if (vm_flags & VM_LOCKED) { + mm->locked_vm += new_len >> PAGE_SHIFT; + if (new_len > old_len) + make_pages_present(new_addr + old_len, + new_addr + new_len); } - if (allocated_vma) - kmem_cache_free(vm_area_cachep, new_vma); - out: - return -ENOMEM; + + return new_addr; } /* @@ -438,6 +480,7 @@ unsigned long do_mremap(unsigned long ad if (flags & MREMAP_MAYMOVE) { if (!(flags & MREMAP_FIXED)) { unsigned long map_flags = 0; + if (vma->vm_flags & VM_MAYSHARE) map_flags |= MAP_SHARED; diff -purN -X /home/mbligh/.diff.exclude reference/mm/nommu.c current/mm/nommu.c --- reference/mm/nommu.c 2004-02-04 16:24:35.000000000 -0800 +++ current/mm/nommu.c 2004-04-08 15:10:25.000000000 -0700 @@ -567,7 +567,3 @@ unsigned long get_unmapped_area(struct f { return -ENOMEM; } - -void pte_chain_init(void) -{ -} diff -purN -X /home/mbligh/.diff.exclude reference/mm/page-writeback.c current/mm/page-writeback.c --- reference/mm/page-writeback.c 2004-02-04 16:24:35.000000000 -0800 +++ current/mm/page-writeback.c 2004-04-09 13:23:20.000000000 -0700 @@ -28,6 +28,7 @@ #include #include #include +#include /* * The maximum number of pages to writeout in a single bdflush/kupdate @@ -532,6 +533,24 @@ int __set_page_dirty_nobuffers(struct pa EXPORT_SYMBOL(__set_page_dirty_nobuffers); /* + * If the mapping doesn't provide a set_page_dirty a_op, then + * just fall through and assume that it wants buffer_heads. + */ +int set_page_dirty(struct page *page) +{ + struct address_space *mapping = page_mapping(page); + int (*spd)(struct page *); + + if (!mapping) { + SetPageDirty(page); + return 0; + } + spd = mapping->a_ops->set_page_dirty; + return spd? (*spd)(page): __set_page_dirty_buffers(page); +} +EXPORT_SYMBOL(set_page_dirty); + +/* * set_page_dirty() is racy if the caller has no reference against * page->mapping->host, and if the page is unlocked. This is because another * CPU could truncate the page off the mapping and then free the mapping. @@ -559,7 +578,7 @@ EXPORT_SYMBOL(set_page_dirty_lock); int test_clear_page_dirty(struct page *page) { if (TestClearPageDirty(page)) { - struct address_space *mapping = page->mapping; + struct address_space *mapping = page_mapping(page); if (mapping && !mapping->backing_dev_info->memory_backed) dec_page_state(nr_dirty); @@ -568,3 +587,152 @@ int test_clear_page_dirty(struct page *p return 0; } EXPORT_SYMBOL(test_clear_page_dirty); + + +static ssize_t operate_on_page_range(struct address_space *mapping, + loff_t pos, size_t count, int (*operator)(struct page *)) +{ + pgoff_t first = pos >> PAGE_CACHE_SHIFT; + pgoff_t last = (pos + count - 1) >> PAGE_CACHE_SHIFT; /* inclusive */ + pgoff_t next = first, curr = first; + struct pagevec pvec; + ssize_t ret = 0, bytes = 0; + int i, nr; + + if (count == 0) + return 0; + + pagevec_init(&pvec, 0); + while ((nr = pagevec_lookup(&pvec, mapping, &next, + min((pgoff_t)PAGEVEC_SIZE, last - next + 1)))) { + for (i = 0; i < pagevec_count(&pvec); i++) { + struct page *page = pvec.pages[i]; + + curr = page->index; + if (page->mapping != mapping) /* truncated ?*/ { + curr = next; + break; + } else { + ret = (*operator)(page); + if (ret == -EIOCBRETRY) + break; + if (PageError(page)) { + if (!ret) + ret = -EIO; + } else + curr++; + } + } + pagevec_release(&pvec); + if ((ret == -EIOCBRETRY) || (next > last)) + break; + } + if (!nr) + curr = last + 1; + + bytes = (curr << PAGE_CACHE_SHIFT) - pos; + if (bytes > count) + bytes = count; + return (bytes && (!ret || (ret == -EIOCBRETRY))) ? bytes : ret; +} + +static int page_waiter(struct page *page) +{ + return wait_on_page_writeback_wq(page, current->io_wait); +} + +static size_t +wait_on_page_range(struct address_space *mapping, loff_t pos, size_t count) +{ + return operate_on_page_range(mapping, pos, count, page_waiter); +} + +static int page_writer(struct page *page) +{ + struct writeback_control wbc = { + .sync_mode = WB_SYNC_ALL, + .nr_to_write = 1, + }; + + lock_page(page); + if (!page->mapping) { /* truncated */ + unlock_page(page); + return 0; + } + if (!test_clear_page_dirty(page)) { + unlock_page(page); + return 0; + } + wait_on_page_writeback(page); + return page->mapping->a_ops->writepage(page, &wbc); +} + +static ssize_t +write_out_page_range(struct address_space *mapping, loff_t pos, size_t count) +{ + return operate_on_page_range(mapping, pos, count, page_writer); +} + +/* + * Write and wait upon all the pages in the passed range. This is a "data + * integrity" operation. It waits upon in-flight writeout before starting and + * waiting upon new writeout. If there was an IO error, return it. + * + * We need to re-take i_sem during the generic_osync_inode list walk because + * it is otherwise livelockable. + */ +ssize_t sync_page_range(struct inode *inode, struct address_space *mapping, + loff_t pos, size_t count) +{ + int ret = 0; + + if (in_aio()) { + /* Already issued writeouts for this iocb ? */ + if (kiocbTrySync(io_wait_to_kiocb(current->io_wait))) + goto do_wait; /* just need to check if done */ + } + if (!mapping->a_ops->writepage) + return 0; + if (mapping->backing_dev_info->memory_backed) + return 0; + ret = write_out_page_range(mapping, pos, count); + if (ret >= 0) { + down(&inode->i_sem); + ret = generic_osync_inode(inode, mapping, OSYNC_METADATA); + up(&inode->i_sem); + } +do_wait: + if (ret >= 0) + ret = wait_on_page_range(mapping, pos, count); + return ret; +} + +/* + * It is really better to use sync_page_range, rather than call + * sync_page_range_nolock while holding i_sem, if you don't + * want to block parallel O_SYNC writes until the pages in this + * range are written out. + */ +ssize_t sync_page_range_nolock(struct inode *inode, struct address_space + *mapping, loff_t pos, size_t count) +{ + ssize_t ret = 0; + + if (in_aio()) { + /* Already issued writeouts for this iocb ? */ + if (kiocbTrySync(io_wait_to_kiocb(current->io_wait))) + goto do_wait; /* just need to check if done */ + } + if (!mapping->a_ops->writepage) + return 0; + if (mapping->backing_dev_info->memory_backed) + return 0; + ret = write_out_page_range(mapping, pos, count); + if (ret >= 0) { + ret = generic_osync_inode(inode, mapping, OSYNC_METADATA); + } +do_wait: + if (ret >= 0) + ret = wait_on_page_range(mapping, pos, count); + return ret; +} diff -purN -X /home/mbligh/.diff.exclude reference/mm/page_alloc.c current/mm/page_alloc.c --- reference/mm/page_alloc.c 2004-04-07 14:54:38.000000000 -0700 +++ current/mm/page_alloc.c 2004-04-09 11:53:01.000000000 -0700 @@ -83,6 +83,9 @@ static void bad_page(const char *functio 1 << PG_lru | 1 << PG_active | 1 << PG_dirty | + 1 << PG_rmaplock | + 1 << PG_anon | + 1 << PG_swapcache | 1 << PG_writeback); set_page_count(page, 0); page->mapping = NULL; @@ -220,6 +223,9 @@ static inline void free_pages_check(cons 1 << PG_active | 1 << PG_reclaim | 1 << PG_slab | + 1 << PG_rmaplock | + 1 << PG_anon | + 1 << PG_swapcache | 1 << PG_writeback ))) bad_page(function, page); if (PageDirty(page)) @@ -327,6 +333,9 @@ static void prep_new_page(struct page *p 1 << PG_active | 1 << PG_dirty | 1 << PG_reclaim | + 1 << PG_rmaplock | + 1 << PG_anon | + 1 << PG_swapcache | 1 << PG_writeback ))) bad_page(__FUNCTION__, page); @@ -570,6 +579,10 @@ __alloc_pages(unsigned int gfp_mask, uns struct zone *z = zones[i]; unsigned long local_low; + if ((__GFP_NODE_STRICT & gfp_mask) && + (pfn_to_nid(z->zone_start_pfn) != numa_node_id())) + continue; + /* * This is the fabled 'incremental min'. We let real-time tasks * dip their real-time paws a little deeper into reserves. diff -purN -X /home/mbligh/.diff.exclude reference/mm/page_io.c current/mm/page_io.c --- reference/mm/page_io.c 2002-12-17 11:36:36.000000000 -0800 +++ current/mm/page_io.c 2004-04-08 15:10:25.000000000 -0700 @@ -16,8 +16,6 @@ #include #include #include -#include /* for block_sync_page() */ -#include #include #include @@ -32,7 +30,7 @@ get_swap_bio(int gfp_flags, struct page swp_entry_t entry; BUG_ON(!PageSwapCache(page)); - entry.val = page->index; + entry.val = page->private; sis = get_swap_info_struct(swp_type(entry)); bio->bi_sector = map_swap_page(sis, swp_offset(entry)) * @@ -130,13 +128,6 @@ out: return ret; } -struct address_space_operations swap_aops = { - .writepage = swap_writepage, - .readpage = swap_readpage, - .sync_page = block_sync_page, - .set_page_dirty = __set_page_dirty_nobuffers, -}; - /* * A scruffy utility function to read or write an arbitrary swap page * and wait on the I/O. @@ -149,10 +140,8 @@ int rw_swap_page_sync(int rw, swp_entry_ }; lock_page(page); - - BUG_ON(page->mapping); - page->mapping = &swapper_space; - page->index = entry.val; + SetPageSwapCache(page); + page->private = entry.val; if (rw == READ) { ret = swap_readpage(NULL, page); @@ -161,7 +150,7 @@ int rw_swap_page_sync(int rw, swp_entry_ ret = swap_writepage(page, &swap_wbc); wait_on_page_writeback(page); } - page->mapping = NULL; + ClearPageSwapCache(page); if (ret == 0 && (!PageUptodate(page) || PageError(page))) ret = -EIO; return ret; diff -purN -X /home/mbligh/.diff.exclude reference/mm/rmap.c current/mm/rmap.c --- reference/mm/rmap.c 2004-04-07 14:54:38.000000000 -0700 +++ current/mm/rmap.c 2004-04-09 11:52:59.000000000 -0700 @@ -4,17 +4,14 @@ * Copyright 2001, Rik van Riel * Released under the General Public License (GPL). * - * - * Simple, low overhead pte-based reverse mapping scheme. - * This is kept modular because we may want to experiment - * with object-based reverse mapping schemes. Please try - * to keep this thing as modular as possible. + * Simple, low overhead reverse mapping scheme. + * Please try to keep this thing as modular as possible. */ /* * Locking: - * - the page->pte.chain is protected by the PG_chainlock bit, - * which nests within the the mm->page_table_lock, + * - the page->mapcount field is protected by the PG_rmaplock bit, + * which nests within the mm->page_table_lock, * which nests within the page lock. * - because swapout locking is opposite to the locking order * in the page fault path, the swapout path uses trylocks @@ -26,96 +23,306 @@ #include #include #include -#include -#include -#include - -#include -#include -#include -#include +#include -/* #define DEBUG_RMAP */ +#include /* - * Shared pages have a chain of pte_chain structures, used to locate - * all the mappings to this page. We only need a pointer to the pte - * here, the page struct for the page table page contains the process - * it belongs to and the offset within that process. + * struct anonmm: to track a bundle of anonymous memory mappings. * - * We use an array of pte pointers in this structure to minimise cache misses - * while traversing reverse maps. - */ -#define NRPTE ((L1_CACHE_BYTES - sizeof(unsigned long))/sizeof(pte_addr_t)) + * Could be embedded in mm_struct, but mm_struct is rather heavyweight, + * and we may need the anonmm to stay around long after the mm_struct + * and its pgd have been freed: because pages originally faulted into + * that mm have been duped into forked mms, and still need tracking. + */ +struct anonmm { + atomic_t count; /* ref count, incl. 1 per page */ + spinlock_t lock; /* head's locks list; others unused */ + struct mm_struct *mm; /* assoc mm_struct, NULL when gone */ + struct anonmm *head; /* exec starts new chain from head */ + struct list_head list; /* chain of associated anonmms */ +}; +static kmem_cache_t *anonmm_cachep; -/* - * next_and_idx encodes both the address of the next pte_chain and the - * offset of the lowest-index used pte in ptes[] (which is equal also - * to the offset of the highest-index unused pte in ptes[], plus one). - */ -struct pte_chain { - unsigned long next_and_idx; - pte_addr_t ptes[NRPTE]; -} ____cacheline_aligned; - -kmem_cache_t *pte_chain_cache; +/** + ** Functions for creating and destroying struct anonmm. + **/ -static inline struct pte_chain *pte_chain_next(struct pte_chain *pte_chain) +void __init init_rmap(void) { - return (struct pte_chain *)(pte_chain->next_and_idx & ~NRPTE); + anonmm_cachep = kmem_cache_create("anonmm", + sizeof(struct anonmm), 0, + SLAB_HWCACHE_ALIGN, NULL, NULL); + if (!anonmm_cachep) + panic("init_rmap: Cannot alloc anonmm SLAB cache"); } -static inline struct pte_chain *pte_chain_ptr(unsigned long pte_chain_addr) +int exec_rmap(struct mm_struct *mm) { - return (struct pte_chain *)(pte_chain_addr & ~NRPTE); + struct anonmm *anonmm; + + anonmm = kmem_cache_alloc(anonmm_cachep, SLAB_KERNEL); + if (unlikely(!anonmm)) + return -ENOMEM; + + atomic_set(&anonmm->count, 2); /* ref by mm and head */ + anonmm->lock = SPIN_LOCK_UNLOCKED; /* this lock is used */ + anonmm->mm = mm; + anonmm->head = anonmm; + INIT_LIST_HEAD(&anonmm->list); + mm->anonmm = anonmm; + return 0; } -static inline int pte_chain_idx(struct pte_chain *pte_chain) +int dup_rmap(struct mm_struct *mm, struct mm_struct *oldmm) { - return pte_chain->next_and_idx & NRPTE; + struct anonmm *anonmm; + struct anonmm *anonhd = oldmm->anonmm->head; + + anonmm = kmem_cache_alloc(anonmm_cachep, SLAB_KERNEL); + if (unlikely(!anonmm)) + return -ENOMEM; + + /* + * copy_mm calls us before dup_mmap has reset the mm fields, + * so reset rss ourselves before adding to anonhd's list, + * to keep away from this mm until it's worth examining. + */ + mm->rss = 0; + + atomic_set(&anonmm->count, 1); /* ref by mm */ + anonmm->lock = SPIN_LOCK_UNLOCKED; /* this lock is not used */ + anonmm->mm = mm; + anonmm->head = anonhd; + spin_lock(&anonhd->lock); + atomic_inc(&anonhd->count); /* ref by anonmm's head */ + list_add_tail(&anonmm->list, &anonhd->list); + spin_unlock(&anonhd->lock); + mm->anonmm = anonmm; + return 0; +} + +void exit_rmap(struct mm_struct *mm) +{ + struct anonmm *anonmm = mm->anonmm; + struct anonmm *anonhd = anonmm->head; + + mm->anonmm = NULL; + spin_lock(&anonhd->lock); + anonmm->mm = NULL; + if (atomic_dec_and_test(&anonmm->count)) { + BUG_ON(anonmm == anonhd); + list_del(&anonmm->list); + kmem_cache_free(anonmm_cachep, anonmm); + if (atomic_dec_and_test(&anonhd->count)) + BUG(); + } + spin_unlock(&anonhd->lock); + if (atomic_read(&anonhd->count) == 1) { + BUG_ON(anonhd->mm); + BUG_ON(!list_empty(&anonhd->list)); + kmem_cache_free(anonmm_cachep, anonhd); + } } -static inline unsigned long -pte_chain_encode(struct pte_chain *pte_chain, int idx) +static void free_anonmm(struct anonmm *anonmm) +{ + struct anonmm *anonhd = anonmm->head; + + BUG_ON(anonmm->mm); + BUG_ON(anonmm == anonhd); + spin_lock(&anonhd->lock); + list_del(&anonmm->list); + if (atomic_dec_and_test(&anonhd->count)) + BUG(); + spin_unlock(&anonhd->lock); + kmem_cache_free(anonmm_cachep, anonmm); +} + +static inline void clear_page_anon(struct page *page) { - return (unsigned long)pte_chain | idx; + struct anonmm *anonmm = (struct anonmm *) page->mapping; + + page->mapping = NULL; + ClearPageAnon(page); + if (atomic_dec_and_test(&anonmm->count)) + free_anonmm(anonmm); } +/** + ** VM stuff below this comment + **/ + /* - * pte_chain list management policy: - * - * - If a page has a pte_chain list then it is shared by at least two processes, - * because a single sharing uses PageDirect. (Well, this isn't true yet, - * coz this code doesn't collapse singletons back to PageDirect on the remove - * path). - * - A pte_chain list has free space only in the head member - all succeeding - * members are 100% full. - * - If the head element has free space, it occurs in its leading slots. - * - All free space in the pte_chain is at the start of the head member. - * - Insertion into the pte_chain puts a pte pointer in the last free slot of - * the head member. - * - Removal from a pte chain moves the head pte of the head member onto the - * victim pte and frees the head member if it became empty. + * At what user virtual address is page expected in file-backed vma? */ +#define NOADDR (~0UL) /* impossible user virtual address */ +static inline unsigned long +vma_address(struct page *page, struct vm_area_struct *vma) +{ + unsigned long pgoff; + unsigned long address; + + pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); + address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); + return (address >= vma->vm_start && address < vma->vm_end)? + address: NOADDR; +} /** - ** VM stuff below this comment + ** Subfunctions of page_referenced: page_referenced_one called + ** repeatedly from either page_referenced_anon or page_referenced_obj. **/ +static int page_referenced_one(struct page *page, + struct mm_struct *mm, unsigned long address, int *mapcount) +{ + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + int referenced = 0; + + if (!spin_trylock(&mm->page_table_lock)) + return 0; + + pgd = pgd_offset(mm, address); + if (!pgd_present(*pgd)) + goto out_unlock; + + pmd = pmd_offset(pgd, address); + if (!pmd_present(*pmd)) + goto out_unlock; + + pte = pte_offset_map(pmd, address); + if (!pte_present(*pte)) + goto out_unmap; + + if (page_to_pfn(page) != pte_pfn(*pte)) + goto out_unmap; + + if (ptep_test_and_clear_young(pte)) + referenced++; + + (*mapcount)--; + +out_unmap: + pte_unmap(pte); + +out_unlock: + spin_unlock(&mm->page_table_lock); + return referenced; +} + +static inline int page_referenced_anon(struct page *page, int *mapcount) +{ + struct anonmm *anonmm = (struct anonmm *) page->mapping; + struct anonmm *anonhd = anonmm->head; + struct list_head *seek_head; + int referenced = 0; + + spin_lock(&anonhd->lock); + /* + * First try the indicated mm, it's the most likely. + */ + if (anonmm->mm && anonmm->mm->rss) { + referenced += page_referenced_one( + page, anonmm->mm, page->index, mapcount); + if (!*mapcount) + goto out; + } + + /* + * Then down the rest of the list, from that as the head. Stop + * when we reach anonhd? No: although a page cannot get dup'ed + * into an older mm, once swapped, its indicated mm may not be + * the oldest, just the first into which it was faulted back. + */ + seek_head = &anonmm->list; + list_for_each_entry(anonmm, seek_head, list) { + if (!anonmm->mm || !anonmm->mm->rss) + continue; + referenced += page_referenced_one( + page, anonmm->mm, page->index, mapcount); + if (!*mapcount) + goto out; + } +out: + spin_unlock(&anonhd->lock); + return referenced; +} + +/** + * page_referenced_obj - referenced check for object-based rmap + * @page: the page we're checking references on. + * + * For an object-based mapped page, find all the places it is mapped and + * check/clear the referenced flag. This is done by following the page->mapping + * pointer, then walking the chain of vmas it holds. It returns the number + * of references it found. + * + * This function is only called from page_referenced for object-based pages. + * + * The semaphore address_space->i_shared_sem is tried. If it can't be gotten, + * assume a reference count of 0, so try_to_unmap will then have a go. + */ +static inline int page_referenced_obj(struct page *page, int *mapcount) +{ + struct address_space *mapping = page->mapping; + struct vm_area_struct *vma; + unsigned long address; + int referenced = 0; + + if (down_trylock(&mapping->i_shared_sem)) + return 0; + + list_for_each_entry(vma, &mapping->i_mmap, shared) { + if (!vma->vm_mm->rss) + continue; + address = vma_address(page, vma); + if (address == NOADDR) + continue; + if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE)) == + (VM_LOCKED|VM_MAYSHARE)) { + referenced++; + goto out; + } + referenced += page_referenced_one( + page, vma->vm_mm, address, mapcount); + if (!*mapcount) + goto out; + } + + list_for_each_entry(vma, &mapping->i_mmap_shared, shared) { + if (!vma->vm_mm->rss || (vma->vm_flags & VM_NONLINEAR)) + continue; + address = vma_address(page, vma); + if (address == NOADDR) + continue; + if (vma->vm_flags & (VM_LOCKED|VM_RESERVED)) { + referenced++; + goto out; + } + referenced += page_referenced_one( + page, vma->vm_mm, address, mapcount); + if (!*mapcount) + goto out; + } +out: + up(&mapping->i_shared_sem); + return referenced; +} + /** * page_referenced - test if the page was referenced * @page: the page to test * * Quick test_and_clear_referenced for all mappings to a page, - * returns the number of processes which referenced the page. - * Caller needs to hold the pte_chain_lock. - * - * If the page has a single-entry pte_chain, collapse that back to a PageDirect - * representation. This way, it's only done under memory pressure. + * returns the number of ptes which referenced the page. + * Caller needs to hold the rmap_lock. */ int fastcall page_referenced(struct page * page) { - struct pte_chain *pc; + int mapcount = page->mapcount; int referenced = 0; if (page_test_and_clear_young(page)) @@ -124,410 +331,505 @@ int fastcall page_referenced(struct page if (TestClearPageReferenced(page)) referenced++; - if (PageDirect(page)) { - pte_t *pte = rmap_ptep_map(page->pte.direct); - if (ptep_test_and_clear_young(pte)) - referenced++; - rmap_ptep_unmap(pte); - } else { - int nr_chains = 0; - - /* Check all the page tables mapping this page. */ - for (pc = page->pte.chain; pc; pc = pte_chain_next(pc)) { - int i; - - for (i = pte_chain_idx(pc); i < NRPTE; i++) { - pte_addr_t pte_paddr = pc->ptes[i]; - pte_t *p; - - p = rmap_ptep_map(pte_paddr); - if (ptep_test_and_clear_young(p)) - referenced++; - rmap_ptep_unmap(p); - nr_chains++; - } - } - if (nr_chains == 1) { - pc = page->pte.chain; - page->pte.direct = pc->ptes[NRPTE-1]; - SetPageDirect(page); - pc->ptes[NRPTE-1] = 0; - __pte_chain_free(pc); - } + if (page->mapcount && page->mapping) { + if (PageAnon(page)) + referenced += page_referenced_anon(page, &mapcount); + else + referenced += page_referenced_obj(page, &mapcount); } return referenced; } /** - * page_add_rmap - add reverse mapping entry to a page - * @page: the page to add the mapping to - * @ptep: the page table entry mapping this page + * page_add_anon_rmap - add pte mapping to an anonymous page + * @page: the page to add the mapping to + * @mm: the mm in which the mapping is added + * @address: the user virtual address mapped * - * Add a new pte reverse mapping to a page. * The caller needs to hold the mm->page_table_lock. */ -struct pte_chain * fastcall -page_add_rmap(struct page *page, pte_t *ptep, struct pte_chain *pte_chain) +void fastcall page_add_anon_rmap(struct page *page, + struct mm_struct *mm, unsigned long address) { - pte_addr_t pte_paddr = ptep_to_paddr(ptep); - struct pte_chain *cur_pte_chain; + struct anonmm *anonmm; - if (PageReserved(page)) - return pte_chain; + BUG_ON(PageReserved(page)); + BUG_ON(page_mapping(page)); - pte_chain_lock(page); - - if (page->pte.direct == 0) { - page->pte.direct = pte_paddr; - SetPageDirect(page); + rmap_lock(page); + if (!page->mapcount) { + anonmm = mm->anonmm; + SetPageAnon(page); + page->index = address & PAGE_MASK; + page->mapping = (void *) anonmm; + atomic_inc(&anonmm->count); inc_page_state(nr_mapped); - goto out; } + page->mapcount++; + rmap_unlock(page); +} - if (PageDirect(page)) { - /* Convert a direct pointer into a pte_chain */ - ClearPageDirect(page); - pte_chain->ptes[NRPTE-1] = page->pte.direct; - pte_chain->ptes[NRPTE-2] = pte_paddr; - pte_chain->next_and_idx = pte_chain_encode(NULL, NRPTE-2); - page->pte.direct = 0; - page->pte.chain = pte_chain; - pte_chain = NULL; /* We consumed it */ - goto out; - } +/** + * page_update_anon_rmap - move pte mapping of an anonymous page + * @page: the page to update the mapping of + * @mm: the new mm in which the mapping is found + * @address: the new user virtual address mapped + * + * The caller needs to hold the mm->page_table_lock. + * + * For do_wp_page: to update mapping to the one remaining mm. + * For copy_one_pte: to update address when vma is mremapped. + */ +void fastcall page_update_anon_rmap(struct page *page, + struct mm_struct *mm, unsigned long address) +{ + struct anonmm *anonmm; - cur_pte_chain = page->pte.chain; - if (cur_pte_chain->ptes[0]) { /* It's full */ - pte_chain->next_and_idx = pte_chain_encode(cur_pte_chain, - NRPTE - 1); - page->pte.chain = pte_chain; - pte_chain->ptes[NRPTE-1] = pte_paddr; - pte_chain = NULL; /* We consumed it */ - goto out; + BUG_ON(!PageAnon(page)); + if (page->mapcount != 1) + return; + + anonmm = mm->anonmm; + address &= PAGE_MASK; + if (anonmm == (struct anonmm *) page->mapping && + address == page->index) + return; + + rmap_lock(page); + if (page->mapcount == 1) { + page->index = address; + if (anonmm != (struct anonmm *) page->mapping) { + clear_page_anon(page); + SetPageAnon(page); + page->mapping = (void *) anonmm; + atomic_inc(&anonmm->count); + } } - cur_pte_chain->ptes[pte_chain_idx(cur_pte_chain) - 1] = pte_paddr; - cur_pte_chain->next_and_idx--; -out: - pte_chain_unlock(page); - return pte_chain; + rmap_unlock(page); } /** - * page_remove_rmap - take down reverse mapping to a page - * @page: page to remove mapping from - * @ptep: page table entry to remove + * page_add_obj_rmap - add pte mapping to a file page + * @page: the page to add the mapping to * - * Removes the reverse mapping from the pte_chain of the page, - * after that the caller can clear the page table entry and free - * the page. - * Caller needs to hold the mm->page_table_lock. + * The caller needs to hold the mm->page_table_lock. */ -void fastcall page_remove_rmap(struct page *page, pte_t *ptep) +void fastcall page_add_obj_rmap(struct page *page) { - pte_addr_t pte_paddr = ptep_to_paddr(ptep); - struct pte_chain *pc; - + BUG_ON(PageAnon(page)); if (!pfn_valid(page_to_pfn(page)) || PageReserved(page)) return; - pte_chain_lock(page); + rmap_lock(page); + if (!page->mapcount) + inc_page_state(nr_mapped); + page->mapcount++; + rmap_unlock(page); +} - if (!page_mapped(page)) - goto out_unlock; /* remap_page_range() from a driver? */ +/** + * page_remove_rmap - take down pte mapping from a page + * @page: page to remove mapping from + * + * Caller needs to hold the mm->page_table_lock. + */ +void fastcall page_remove_rmap(struct page *page) +{ + BUG_ON(PageReserved(page)); + BUG_ON(!page->mapcount); - if (PageDirect(page)) { - if (page->pte.direct == pte_paddr) { - page->pte.direct = 0; - ClearPageDirect(page); - goto out; - } - } else { - struct pte_chain *start = page->pte.chain; - struct pte_chain *next; - int victim_i = pte_chain_idx(start); - - for (pc = start; pc; pc = next) { - int i; - - next = pte_chain_next(pc); - if (next) - prefetch(next); - for (i = pte_chain_idx(pc); i < NRPTE; i++) { - pte_addr_t pa = pc->ptes[i]; - - if (pa != pte_paddr) - continue; - pc->ptes[i] = start->ptes[victim_i]; - start->ptes[victim_i] = 0; - if (victim_i == NRPTE-1) { - /* Emptied a pte_chain */ - page->pte.chain = pte_chain_next(start); - __pte_chain_free(start); - } else { - start->next_and_idx++; - } - goto out; - } - } - } -out: - if (page->pte.direct == 0 && page_test_and_clear_dirty(page)) - set_page_dirty(page); - if (!page_mapped(page)) + rmap_lock(page); + page->mapcount--; + if (!page->mapcount) { + if (page_test_and_clear_dirty(page)) + set_page_dirty(page); + if (PageAnon(page)) + clear_page_anon(page); dec_page_state(nr_mapped); -out_unlock: - pte_chain_unlock(page); - return; + } + rmap_unlock(page); } /** - * try_to_unmap_one - worker function for try_to_unmap - * @page: page to unmap - * @ptep: page table entry to unmap from page - * - * Internal helper function for try_to_unmap, called for each page - * table entry mapping a page. Because locking order here is opposite - * to the locking order used by the page fault path, we use trylocks. - * Locking: - * page lock shrink_list(), trylock - * pte_chain_lock shrink_list() - * mm->page_table_lock try_to_unmap_one(), trylock - */ -static int FASTCALL(try_to_unmap_one(struct page *, pte_addr_t)); -static int fastcall try_to_unmap_one(struct page * page, pte_addr_t paddr) -{ - pte_t *ptep = rmap_ptep_map(paddr); - unsigned long address = ptep_to_address(ptep); - struct mm_struct * mm = ptep_to_mm(ptep); - struct vm_area_struct * vma; - pte_t pte; - int ret; + ** Subfunctions of try_to_unmap: try_to_unmap_one called + ** repeatedly from either try_to_unmap_anon or try_to_unmap_obj. + **/ - if (!mm) - BUG(); +static int try_to_unmap_one(struct page *page, struct mm_struct *mm, + unsigned long address, int *mapcount, struct vm_area_struct *vma) +{ + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + pte_t pteval; + int ret = SWAP_AGAIN; /* * We need the page_table_lock to protect us from page faults, * munmap, fork, etc... */ - if (!spin_trylock(&mm->page_table_lock)) { - rmap_ptep_unmap(ptep); - return SWAP_AGAIN; - } + if (!spin_trylock(&mm->page_table_lock)) + goto out; + pgd = pgd_offset(mm, address); + if (!pgd_present(*pgd)) + goto out_unlock; - /* During mremap, it's possible pages are not in a VMA. */ - vma = find_vma(mm, address); - if (!vma) { - ret = SWAP_FAIL; + pmd = pmd_offset(pgd, address); + if (!pmd_present(*pmd)) goto out_unlock; + + pte = pte_offset_map(pmd, address); + if (!pte_present(*pte)) + goto out_unmap; + + if (page_to_pfn(page) != pte_pfn(*pte)) + goto out_unmap; + + (*mapcount)--; + + if (!vma) { + vma = find_vma(mm, address); + /* unmap_vmas drops page_table_lock with vma unlinked */ + if (!vma) + goto out_unmap; } - /* The page is mlock()d, we cannot swap it out. */ - if (vma->vm_flags & VM_LOCKED) { + /* + * If the page is mlock()d, we cannot swap it out. + * If it's recently referenced (perhaps page_referenced + * skipped over this mm) then we should reactivate it. + */ + if ((vma->vm_flags & (VM_LOCKED|VM_RESERVED)) || + ptep_test_and_clear_young(pte)) { ret = SWAP_FAIL; - goto out_unlock; + goto out_unmap; } /* Nuke the page table entry. */ flush_cache_page(vma, address); - pte = ptep_clear_flush(vma, address, ptep); + pteval = ptep_clear_flush(vma, address, pte); - if (PageSwapCache(page)) { + if (PageAnon(page)) { + swp_entry_t entry = { .val = page->private }; /* * Store the swap location in the pte. * See handle_pte_fault() ... */ - swp_entry_t entry = { .val = page->index }; + BUG_ON(!PageSwapCache(page)); swap_duplicate(entry); - set_pte(ptep, swp_entry_to_pte(entry)); - BUG_ON(pte_file(*ptep)); - } else { - unsigned long pgidx; - /* - * If a nonlinear mapping then store the file page offset - * in the pte. - */ - pgidx = (address - vma->vm_start) >> PAGE_SHIFT; - pgidx += vma->vm_pgoff; - pgidx >>= PAGE_CACHE_SHIFT - PAGE_SHIFT; - if (page->index != pgidx) { - set_pte(ptep, pgoff_to_pte(page->index)); - BUG_ON(!pte_file(*ptep)); - } + set_pte(pte, swp_entry_to_pte(entry)); + BUG_ON(pte_file(*pte)); } /* Move the dirty bit to the physical page now the pte is gone. */ - if (pte_dirty(pte)) + if (pte_dirty(pteval)) set_page_dirty(page); mm->rss--; + BUG_ON(!page->mapcount); + page->mapcount--; page_cache_release(page); - ret = SWAP_SUCCESS; + +out_unmap: + pte_unmap(pte); out_unlock: - rmap_ptep_unmap(ptep); spin_unlock(&mm->page_table_lock); + +out: return ret; } -/** - * try_to_unmap - try to remove all page table mappings to a page - * @page: the page to get unmapped - * - * Tries to remove all the page table entries which are mapping this - * page, used in the pageout path. Caller must hold the page lock - * and its pte chain lock. Return values are: - * - * SWAP_SUCCESS - we succeeded in removing all mappings - * SWAP_AGAIN - we missed a trylock, try again later - * SWAP_FAIL - the page is unswappable +/* + * try_to_unmap_cluster is only used on VM_NONLINEAR shared object vmas, + * in which objrmap is unable to predict where a page will be found. */ -int fastcall try_to_unmap(struct page * page) -{ - struct pte_chain *pc, *next_pc, *start; - int ret = SWAP_SUCCESS; - int victim_i; +#define CLUSTER_SIZE (32 * PAGE_SIZE) +#if CLUSTER_SIZE > PMD_SIZE +#undef CLUSTER_SIZE +#define CLUSTER_SIZE PMD_SIZE +#endif +#define CLUSTER_MASK (~(CLUSTER_SIZE - 1)) + +static int try_to_unmap_cluster(struct mm_struct *mm, + unsigned long cursor, int *mapcount, struct vm_area_struct *vma) +{ + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + pte_t pteval; + struct page *page; + unsigned long address; + unsigned long end; + unsigned long pfn; + unsigned long pgidx; - /* This page should not be on the pageout lists. */ - if (PageReserved(page)) - BUG(); - if (!PageLocked(page)) - BUG(); - /* We need backing store to swap out a page. */ - if (!page->mapping) - BUG(); + /* + * We need the page_table_lock to protect us from page faults, + * munmap, fork, etc... + */ + if (!spin_trylock(&mm->page_table_lock)) + return SWAP_FAIL; - if (PageDirect(page)) { - ret = try_to_unmap_one(page, page->pte.direct); - if (ret == SWAP_SUCCESS) { - if (page_test_and_clear_dirty(page)) - set_page_dirty(page); - page->pte.direct = 0; - ClearPageDirect(page); - } - goto out; - } + address = (vma->vm_start + cursor) & CLUSTER_MASK; + end = address + CLUSTER_SIZE; + if (address < vma->vm_start) + address = vma->vm_start; + if (end > vma->vm_end) + end = vma->vm_end; - start = page->pte.chain; - victim_i = pte_chain_idx(start); - for (pc = start; pc; pc = next_pc) { - int i; - - next_pc = pte_chain_next(pc); - if (next_pc) - prefetch(next_pc); - for (i = pte_chain_idx(pc); i < NRPTE; i++) { - pte_addr_t pte_paddr = pc->ptes[i]; - - switch (try_to_unmap_one(page, pte_paddr)) { - case SWAP_SUCCESS: - /* - * Release a slot. If we're releasing the - * first pte in the first pte_chain then - * pc->ptes[i] and start->ptes[victim_i] both - * refer to the same thing. It works out. - */ - pc->ptes[i] = start->ptes[victim_i]; - start->ptes[victim_i] = 0; - victim_i++; - if (victim_i == NRPTE) { - page->pte.chain = pte_chain_next(start); - __pte_chain_free(start); - start = page->pte.chain; - victim_i = 0; - } else { - start->next_and_idx++; - } - if (page->pte.direct == 0 && - page_test_and_clear_dirty(page)) - set_page_dirty(page); - break; - case SWAP_AGAIN: - /* Skip this pte, remembering status. */ - ret = SWAP_AGAIN; - continue; - case SWAP_FAIL: - ret = SWAP_FAIL; - goto out; - } + pgd = pgd_offset(mm, address); + if (!pgd_present(*pgd)) + goto out_unlock; + + pmd = pmd_offset(pgd, address); + if (!pmd_present(*pmd)) + goto out_unlock; + + for (pte = pte_offset_map(pmd, address); + address < end; pte++, address += PAGE_SIZE) { + + if (!pte_present(*pte)) + continue; + + pfn = pte_pfn(*pte); + if (!pfn_valid(pfn)) + continue; + + page = pfn_to_page(pfn); + BUG_ON(PageAnon(page)); + if (PageReserved(page)) + continue; + + if (ptep_test_and_clear_young(pte)) + continue; + + /* Nuke the page table entry. */ + flush_cache_page(vma, address); + pteval = ptep_clear_flush(vma, address, pte); + + /* If nonlinear, store the file page offset in the pte. */ + pgidx = (address - vma->vm_start) >> PAGE_SHIFT; + pgidx += vma->vm_pgoff; + pgidx >>= PAGE_CACHE_SHIFT - PAGE_SHIFT; + if (page->index != pgidx) { + set_pte(pte, pgoff_to_pte(page->index)); + BUG_ON(!pte_file(*pte)); } + + /* Move the dirty bit to the physical page now the pte is gone. */ + if (pte_dirty(pteval)) + set_page_dirty(page); + + page_remove_rmap(page); + page_cache_release(page); + mm->rss--; + (*mapcount)--; + } + + pte_unmap(pte); + +out_unlock: + spin_unlock(&mm->page_table_lock); + return SWAP_AGAIN; +} + +static inline int try_to_unmap_anon(struct page *page, int *mapcount) +{ + struct anonmm *anonmm = (struct anonmm *) page->mapping; + struct anonmm *anonhd = anonmm->head; + struct list_head *seek_head; + int ret = SWAP_AGAIN; + + spin_lock(&anonhd->lock); + /* + * First try the indicated mm, it's the most likely. + */ + if (anonmm->mm && anonmm->mm->rss) { + ret = try_to_unmap_one( + page, anonmm->mm, page->index, mapcount, NULL); + if (ret == SWAP_FAIL || !*mapcount) + goto out; + } + + /* + * Then down the rest of the list, from that as the head. Stop + * when we reach anonhd? No: although a page cannot get dup'ed + * into an older mm, once swapped, its indicated mm may not be + * the oldest, just the first into which it was faulted back. + */ + seek_head = &anonmm->list; + list_for_each_entry(anonmm, seek_head, list) { + if (!anonmm->mm || !anonmm->mm->rss) + continue; + ret = try_to_unmap_one( + page, anonmm->mm, page->index, mapcount, NULL); + if (ret == SWAP_FAIL || !*mapcount) + goto out; } out: - if (!page_mapped(page)) - dec_page_state(nr_mapped); + spin_unlock(&anonhd->lock); return ret; } /** - ** No more VM stuff below this comment, only pte_chain helper - ** functions. - **/ - -static void pte_chain_ctor(void *p, kmem_cache_t *cachep, unsigned long flags) -{ - struct pte_chain *pc = p; + * try_to_unmap_obj - unmap a page using the object-based rmap method + * @page: the page to unmap + * + * Find all the mappings of a page using the mapping pointer and the vma chains + * contained in the address_space struct it points to. + * + * This function is only called from try_to_unmap for object-based pages. + * + * The semaphore address_space->i_shared_sem is tried. If it can't be gotten, + * return a temporary error. + */ +static inline int try_to_unmap_obj(struct page *page, int *mapcount) +{ + struct address_space *mapping = page->mapping; + struct vm_area_struct *vma; + unsigned long address; + int ret = SWAP_AGAIN; + unsigned long cursor; + unsigned long max_nl_cursor = 0; + unsigned long max_nl_size = 0; + + if (down_trylock(&mapping->i_shared_sem)) + return ret; + + list_for_each_entry(vma, &mapping->i_mmap, shared) { + if (!vma->vm_mm->rss) + continue; + address = vma_address(page, vma); + if (address == NOADDR) + continue; + ret = try_to_unmap_one( + page, vma->vm_mm, address, mapcount, vma); + if (ret == SWAP_FAIL || !*mapcount) + goto out; + } - memset(pc, 0, sizeof(*pc)); -} + list_for_each_entry(vma, &mapping->i_mmap_shared, shared) { + if (unlikely(vma->vm_flags & VM_NONLINEAR)) { + /* + * Defer unmapping nonlinear to the next loop, + * but take notes while we're here e.g. don't + * want to loop again when no nonlinear vmas. + */ + if (vma->vm_flags & (VM_LOCKED|VM_RESERVED)) + continue; + cursor = (unsigned long) vma->vm_private_data; + if (cursor > max_nl_cursor) + max_nl_cursor = cursor; + cursor = vma->vm_end - vma->vm_start; + if (cursor > max_nl_size) + max_nl_size = cursor; + continue; + } + if (!vma->vm_mm->rss) + continue; + address = vma_address(page, vma); + if (address == NOADDR) + continue; + ret = try_to_unmap_one( + page, vma->vm_mm, address, mapcount, vma); + if (ret == SWAP_FAIL || !*mapcount) + goto out; + } -DEFINE_PER_CPU(struct pte_chain *, local_pte_chain) = 0; + if (max_nl_size == 0) /* no nonlinear vmas of this file */ + goto out; -/** - * __pte_chain_free - free pte_chain structure - * @pte_chain: pte_chain struct to free - */ -void __pte_chain_free(struct pte_chain *pte_chain) -{ - struct pte_chain **pte_chainp; + /* + * We don't try to search for this page in the nonlinear vmas, + * and page_referenced wouldn't have found it anyway. Instead + * just walk the nonlinear vmas trying to age and unmap some. + * The mapcount of the page we came in with is irrelevant, + * but even so use it as a guide to how hard we should try? + */ + rmap_unlock(page); - pte_chainp = &get_cpu_var(local_pte_chain); - if (pte_chain->next_and_idx) - pte_chain->next_and_idx = 0; - if (*pte_chainp) - kmem_cache_free(pte_chain_cache, *pte_chainp); - *pte_chainp = pte_chain; - put_cpu_var(local_pte_chain); -} + max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK; + if (max_nl_cursor == 0) + max_nl_cursor = CLUSTER_SIZE; + + do { + list_for_each_entry(vma, &mapping->i_mmap_shared, shared) { + if (VM_NONLINEAR != (vma->vm_flags & + (VM_NONLINEAR|VM_LOCKED|VM_RESERVED))) + continue; + cursor = (unsigned long) vma->vm_private_data; + while (vma->vm_mm->rss && + cursor < max_nl_cursor && + cursor < vma->vm_end - vma->vm_start) { + ret = try_to_unmap_cluster(vma->vm_mm, + cursor, mapcount, vma); + if (ret == SWAP_FAIL) + break; + cursor += CLUSTER_SIZE; + vma->vm_private_data = (void *) cursor; + if (*mapcount <= 0) + goto relock; + } + if (ret != SWAP_FAIL) + vma->vm_private_data = + (void *) max_nl_cursor; + ret = SWAP_AGAIN; + } + max_nl_cursor += CLUSTER_SIZE; + } while (max_nl_cursor <= max_nl_size); -/* - * pte_chain_alloc(): allocate a pte_chain structure for use by page_add_rmap(). - * - * The caller of page_add_rmap() must perform the allocation because - * page_add_rmap() is invariably called under spinlock. Often, page_add_rmap() - * will not actually use the pte_chain, because there is space available in one - * of the existing pte_chains which are attached to the page. So the case of - * allocating and then freeing a single pte_chain is specially optimised here, - * with a one-deep per-cpu cache. - */ -struct pte_chain *pte_chain_alloc(int gfp_flags) -{ - struct pte_chain *ret; - struct pte_chain **pte_chainp; - - might_sleep_if(gfp_flags & __GFP_WAIT); - - pte_chainp = &get_cpu_var(local_pte_chain); - if (*pte_chainp) { - ret = *pte_chainp; - *pte_chainp = NULL; - put_cpu_var(local_pte_chain); - } else { - put_cpu_var(local_pte_chain); - ret = kmem_cache_alloc(pte_chain_cache, gfp_flags); + /* + * Don't loop forever (perhaps all the remaining pages are + * in locked vmas). Reset cursor on all unreserved nonlinear + * vmas, now forgetting on which ones it had fallen behind. + */ + list_for_each_entry(vma, &mapping->i_mmap_shared, shared) { + if ((vma->vm_flags & (VM_NONLINEAR|VM_RESERVED)) == + VM_NONLINEAR) + vma->vm_private_data = 0; } +relock: + rmap_lock(page); +out: + up(&mapping->i_shared_sem); return ret; } -void __init pte_chain_init(void) +/** + * try_to_unmap - try to remove all page table mappings to a page + * @page: the page to get unmapped + * + * Tries to remove all the page table entries which are mapping this + * page, used in the pageout path. Caller must hold the page lock + * and its rmap_lock. Return values are: + * + * SWAP_SUCCESS - we succeeded in removing all mappings + * SWAP_AGAIN - we missed a trylock, try again later + * SWAP_FAIL - the page is unswappable + */ +int fastcall try_to_unmap(struct page * page) { - pte_chain_cache = kmem_cache_create( "pte_chain", - sizeof(struct pte_chain), - 0, - SLAB_MUST_HWCACHE_ALIGN, - pte_chain_ctor, - NULL); + int mapcount = page->mapcount; + int ret; - if (!pte_chain_cache) - panic("failed to create pte_chain cache!\n"); + BUG_ON(PageReserved(page)); + BUG_ON(!PageLocked(page)); + BUG_ON(!page->mapcount); + + if (PageAnon(page)) + ret = try_to_unmap_anon(page, &mapcount); + else + ret = try_to_unmap_obj(page, &mapcount); + + if (!page->mapcount) { + if (page_test_and_clear_dirty(page)) + set_page_dirty(page); + if (PageAnon(page)) + clear_page_anon(page); + dec_page_state(nr_mapped); + ret = SWAP_SUCCESS; + } + return ret; } diff -purN -X /home/mbligh/.diff.exclude reference/mm/shmem.c current/mm/shmem.c --- reference/mm/shmem.c 2004-04-07 14:54:38.000000000 -0700 +++ current/mm/shmem.c 2004-04-09 21:41:39.000000000 -0700 @@ -40,6 +40,29 @@ #include #include +int shm_use_hugepages; + +/* + * On 64bit archs the vmalloc area is very large, + * so we allocate the array in vmalloc on 64bit archs. + * + * Assuming 2M pages (x86 and x86-64) those default setting + * will allow up to 128G of bigpages in a single file on + * 64bit archs and 64G on 32bit archs using the max + * kmalloc size of 128k. So tweaking in practice is needed + * only to go past 128G of bigpages per file on 64bit archs. + * + * This sysctl is in page units (each page large BIGPAGE_SIZE). + */ +#ifdef CONFIG_HUGETLBFS +#if BITS_PER_LONG == 64 +int shm_hugepages_per_file = 128UL << (30 - HPAGE_SHIFT); +#else +int shm_hugepages_per_file = 131072 / sizeof(struct page *); +#endif +#endif + + /* This magic number is used in glibc for posix shared memory */ #define TMPFS_MAGIC 0x01021994 diff -purN -X /home/mbligh/.diff.exclude reference/mm/slab.c current/mm/slab.c --- reference/mm/slab.c 2004-04-07 14:54:38.000000000 -0700 +++ current/mm/slab.c 2004-04-09 11:53:04.000000000 -0700 @@ -479,6 +479,19 @@ static struct cache_names { #undef CACHE }; +/* Adjustments to cache size limit based on memory size */ +static int cache_limit_multiplier_norm; +static int cache_limit_multiplier_dma; + +struct cache_multipliers { + int memsize; + int mult; +} cache_multipliers[] = { + {0x40000, 4}, + {0x10000, 2}, + {0x0, 1} +}; + struct arraycache_init initarray_cache __initdata = { { 0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; struct arraycache_init initarray_generic __initdata = { { 0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; @@ -676,6 +689,9 @@ void __init kmem_cache_init(void) size_t left_over; struct cache_sizes *sizes; struct cache_names *names; + unsigned long dmasize, normsize; + pg_data_t *pgdat; + int i; /* * Fragmentation resistance on low memory - only use bigger @@ -684,7 +700,21 @@ void __init kmem_cache_init(void) if (num_physpages > (32 << 20) >> PAGE_SHIFT) slab_break_gfp_order = BREAK_GFP_ORDER_HI; - + /* + * Increase cache limits based on the amount of memory in various + * zones. + */ + dmasize = normsize = 0; + for_each_pgdat(pgdat) { + dmasize += pgdat->node_zones[ZONE_DMA].present_pages; + normsize += pgdat->node_zones[ZONE_NORMAL].present_pages; + } + for (i = 0; dmasize < cache_multipliers[i].memsize; i++); + cache_limit_multiplier_dma = cache_multipliers[i].mult; + normsize += dmasize; + for (i = 0; normsize < cache_multipliers[i].memsize; i++); + cache_limit_multiplier_norm = cache_multipliers[i].mult; + /* Bootstrap is tricky, because several objects are allocated * from caches that do not exist yet: * 1) initialize the cache_cache cache: it contains the kmem_cache_t @@ -1290,10 +1320,13 @@ next: * the cache that's used by kmalloc(24), otherwise * the creation of further caches will BUG(). */ - cachep->array[smp_processor_id()] = &initarray_generic.cache; + cachep->array[smp_processor_id()] = + &initarray_generic.cache; g_cpucache_up = PARTIAL; } else { - cachep->array[smp_processor_id()] = kmalloc(sizeof(struct arraycache_init),GFP_KERNEL); + cachep->array[smp_processor_id()] = + kmalloc(sizeof(struct arraycache_init), + GFP_KERNEL); } BUG_ON(!ac_data(cachep)); ac_data(cachep)->avail = 0; @@ -1307,7 +1340,7 @@ next: } cachep->lists.next_reap = jiffies + REAPTIMEOUT_LIST3 + - ((unsigned long)cachep)%REAPTIMEOUT_LIST3; + ((unsigned long)cachep)%REAPTIMEOUT_LIST3; /* Need the semaphore to access the chain. */ down(&cache_chain_sem); @@ -1320,16 +1353,24 @@ next: list_for_each(p, &cache_chain) { kmem_cache_t *pc = list_entry(p, kmem_cache_t, next); char tmp; - /* This happens when the module gets unloaded and doesn't - destroy its slab cache and noone else reuses the vmalloc - area of the module. Print a warning. */ - if (__get_user(tmp,pc->name)) { - printk("SLAB: cache with size %d has lost its name\n", - pc->objsize); + + /* + * This happens when the module gets unloaded and + * doesn't destroy its slab cache and noone else reuses + * the vmalloc area of the module. Print a warning. + */ +#ifdef CONFIG_X86_UACCESS_INDIRECT + if (__direct_get_user(tmp,pc->name)) { +#else + if (__get_user(tmp,pc->name)) { +#endif + printk("SLAB: cache with size %d has lost its " + "name\n", pc->objsize); continue; } if (!strcmp(pc->name,name)) { - printk("kmem_cache_create: duplicate cache %s\n",name); + printk("kmem_cache_create: duplicate " + "cache %s\n",name); up(&cache_chain_sem); unlock_cpu_hotplug(); BUG(); @@ -2473,6 +2514,11 @@ static void enable_cpucache (kmem_cache_ else limit = 120; + if (cachep->gfpflags & GFP_DMA) + limit *= cache_limit_multiplier_dma; + else + limit *= cache_limit_multiplier_norm; + /* Cpu bound tasks (e.g. network routing) can exhibit cpu bound * allocation behaviour: Most allocs on one cpu, most free operations * on another cpu. For these cases, an efficient object passing between @@ -2663,7 +2709,7 @@ static void *s_start(struct seq_file *m, seq_puts(m, "slabinfo - version: 2.0\n"); #endif seq_puts(m, "# name "); - seq_puts(m, " : tunables "); + seq_puts(m, " : tunables "); seq_puts(m, " : slabdata "); #if STATS seq_puts(m, " : globalstat "); diff -purN -X /home/mbligh/.diff.exclude reference/mm/swap.c current/mm/swap.c --- reference/mm/swap.c 2004-04-07 14:54:38.000000000 -0700 +++ current/mm/swap.c 2004-04-09 13:23:19.000000000 -0700 @@ -351,12 +351,15 @@ void pagevec_strip(struct pagevec *pvec) * The search returns a group of mapping-contiguous pages with ascending * indexes. There may be holes in the indices due to not-present pages. * - * pagevec_lookup() returns the number of pages which were found. + * pagevec_lookup() returns the number of pages which were found + * and also atomically sets the next offset to continue looking up + * mapping contiguous pages from (useful when doing a range of + * pagevec lookups in chunks of PAGEVEC_SIZE). */ unsigned int pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, - pgoff_t start, unsigned int nr_pages) + pgoff_t *next, unsigned int nr_pages) { - pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages); + pvec->nr = find_get_pages(mapping, next, nr_pages, pvec->pages); return pagevec_count(pvec); } diff -purN -X /home/mbligh/.diff.exclude reference/mm/swap_state.c current/mm/swap_state.c --- reference/mm/swap_state.c 2003-10-01 11:35:37.000000000 -0700 +++ current/mm/swap_state.c 2004-04-08 15:10:27.000000000 -0700 @@ -21,23 +21,20 @@ static struct backing_dev_info swap_back .memory_backed = 1, /* Does not contribute to dirty memory */ }; -extern struct address_space_operations swap_aops; +static struct address_space_operations swap_aops = { + .writepage = swap_writepage, + .readpage = swap_readpage, + /* + * sync_page and set_page_dirty are special-cased. + */ +}; struct address_space swapper_space = { .page_tree = RADIX_TREE_INIT(GFP_ATOMIC), .page_lock = SPIN_LOCK_UNLOCKED, - .clean_pages = LIST_HEAD_INIT(swapper_space.clean_pages), - .dirty_pages = LIST_HEAD_INIT(swapper_space.dirty_pages), - .io_pages = LIST_HEAD_INIT(swapper_space.io_pages), - .locked_pages = LIST_HEAD_INIT(swapper_space.locked_pages), + .nrpages = 0, .a_ops = &swap_aops, .backing_dev_info = &swap_backing_dev_info, - .i_mmap = LIST_HEAD_INIT(swapper_space.i_mmap), - .i_mmap_shared = LIST_HEAD_INIT(swapper_space.i_mmap_shared), - .i_shared_sem = __MUTEX_INITIALIZER(swapper_space.i_shared_sem), - .truncate_count = ATOMIC_INIT(0), - .private_lock = SPIN_LOCK_UNLOCKED, - .private_list = LIST_HEAD_INIT(swapper_space.private_list), }; #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0) @@ -59,30 +56,55 @@ void show_swap_cache_info(void) swap_cache_info.noent_race, swap_cache_info.exist_race); } +/* + * __add_to_swap_cache resembles add_to_page_cache on swapper_space, + * but sets SwapCache flag and private instead of mapping and index. + */ +static int __add_to_swap_cache(struct page *page, + swp_entry_t entry, int gfp_mask) +{ + int error; + + BUG_ON(PageSwapCache(page)); + BUG_ON(PagePrivate(page)); + error = radix_tree_preload(gfp_mask); + if (!error) { + page_cache_get(page); + spin_lock(&swapper_space.page_lock); + error = radix_tree_insert(&swapper_space.page_tree, + entry.val, page); + if (!error) { + SetPageLocked(page); + SetPageSwapCache(page); + page->private = entry.val; + total_swapcache_pages++; + pagecache_acct(1); + } else + page_cache_release(page); + spin_unlock(&swapper_space.page_lock); + radix_tree_preload_end(); + } + return error; +} + static int add_to_swap_cache(struct page *page, swp_entry_t entry) { int error; - if (page->mapping) - BUG(); if (!swap_duplicate(entry)) { INC_CACHE_INFO(noent_race); return -ENOENT; } - error = add_to_page_cache(page, &swapper_space, entry.val, GFP_KERNEL); + error = __add_to_swap_cache(page, entry, GFP_KERNEL); /* * Anon pages are already on the LRU, we don't run lru_cache_add here. */ - if (error != 0) { + if (error) { swap_free(entry); if (error == -EEXIST) INC_CACHE_INFO(exist_race); return error; } - if (!PageLocked(page)) - BUG(); - if (!PageSwapCache(page)) - BUG(); INC_CACHE_INFO(add_total); return 0; } @@ -96,7 +118,12 @@ void __delete_from_swap_cache(struct pag BUG_ON(!PageLocked(page)); BUG_ON(!PageSwapCache(page)); BUG_ON(PageWriteback(page)); - __remove_from_page_cache(page); + + radix_tree_delete(&swapper_space.page_tree, page->private); + page->private = 0; + ClearPageSwapCache(page); + total_swapcache_pages--; + pagecache_acct(-1); INC_CACHE_INFO(del_total); } @@ -140,8 +167,7 @@ int add_to_swap(struct page * page) /* * Add it to the swap cache and mark it dirty */ - err = add_to_page_cache(page, &swapper_space, - entry.val, GFP_ATOMIC); + err = __add_to_swap_cache(page, entry, GFP_ATOMIC); if (pf_flags & PF_MEMALLOC) current->flags |= PF_MEMALLOC; @@ -149,8 +175,7 @@ int add_to_swap(struct page * page) switch (err) { case 0: /* Success */ SetPageUptodate(page); - ClearPageDirty(page); - set_page_dirty(page); + SetPageDirty(page); INC_CACHE_INFO(add_total); return 1; case -EEXIST: @@ -176,11 +201,12 @@ void delete_from_swap_cache(struct page { swp_entry_t entry; + BUG_ON(!PageSwapCache(page)); BUG_ON(!PageLocked(page)); BUG_ON(PageWriteback(page)); BUG_ON(PagePrivate(page)); - entry.val = page->index; + entry.val = page->private; spin_lock(&swapper_space.page_lock); __delete_from_swap_cache(page); @@ -192,27 +218,13 @@ void delete_from_swap_cache(struct page int move_to_swap_cache(struct page *page, swp_entry_t entry) { - struct address_space *mapping = page->mapping; - int err; - - spin_lock(&swapper_space.page_lock); - spin_lock(&mapping->page_lock); - - err = radix_tree_insert(&swapper_space.page_tree, entry.val, page); - if (!err) { - __remove_from_page_cache(page); - ___add_to_page_cache(page, &swapper_space, entry.val); - } - - spin_unlock(&mapping->page_lock); - spin_unlock(&swapper_space.page_lock); - + int err = __add_to_swap_cache(page, entry, GFP_ATOMIC); if (!err) { + remove_from_page_cache(page); + page_cache_release(page); /* pagecache ref */ if (!swap_duplicate(entry)) BUG(); - /* shift page from clean_pages to dirty_pages list */ - BUG_ON(PageDirty(page)); - set_page_dirty(page); + SetPageDirty(page); INC_CACHE_INFO(add_total); } else if (err == -EEXIST) INC_CACHE_INFO(exist_race); @@ -222,29 +234,9 @@ int move_to_swap_cache(struct page *page int move_from_swap_cache(struct page *page, unsigned long index, struct address_space *mapping) { - swp_entry_t entry; - int err; - - BUG_ON(!PageLocked(page)); - BUG_ON(PageWriteback(page)); - BUG_ON(PagePrivate(page)); - - entry.val = page->index; - - spin_lock(&swapper_space.page_lock); - spin_lock(&mapping->page_lock); - - err = radix_tree_insert(&mapping->page_tree, index, page); + int err = add_to_page_cache(page, mapping, index, GFP_ATOMIC); if (!err) { - __delete_from_swap_cache(page); - ___add_to_page_cache(page, mapping, index); - } - - spin_unlock(&mapping->page_lock); - spin_unlock(&swapper_space.page_lock); - - if (!err) { - swap_free(entry); + delete_from_swap_cache(page); /* shift page from clean_pages to dirty_pages list */ ClearPageDirty(page); set_page_dirty(page); @@ -252,7 +244,6 @@ int move_from_swap_cache(struct page *pa return err; } - /* * If we are the only user, then try to free up the swap cache. * @@ -310,19 +301,17 @@ void free_pages_and_swap_cache(struct pa */ struct page * lookup_swap_cache(swp_entry_t entry) { - struct page *found; + struct page *page; - found = find_get_page(&swapper_space, entry.val); - /* - * Unsafe to assert PageSwapCache and mapping on page found: - * if SMP nothing prevents swapoff from deleting this page from - * the swap cache at this moment. find_lock_page would prevent - * that, but no need to change: we _have_ got the right page. - */ - INC_CACHE_INFO(find_total); - if (found) + spin_lock(&swapper_space.page_lock); + page = radix_tree_lookup(&swapper_space.page_tree, entry.val); + if (page) { + page_cache_get(page); INC_CACHE_INFO(find_success); - return found; + } + spin_unlock(&swapper_space.page_lock); + INC_CACHE_INFO(find_total); + return page; } /* @@ -340,10 +329,14 @@ struct page * read_swap_cache_async(swp_ /* * First check the swap cache. Since this is normally * called after lookup_swap_cache() failed, re-calling - * that would confuse statistics: use find_get_page() - * directly. + * that would confuse statistics. */ - found_page = find_get_page(&swapper_space, entry.val); + spin_lock(&swapper_space.page_lock); + found_page = radix_tree_lookup(&swapper_space.page_tree, + entry.val); + if (found_page) + page_cache_get(found_page); + spin_unlock(&swapper_space.page_lock); if (found_page) break; diff -purN -X /home/mbligh/.diff.exclude reference/mm/swapfile.c current/mm/swapfile.c --- reference/mm/swapfile.c 2004-04-07 14:54:38.000000000 -0700 +++ current/mm/swapfile.c 2004-04-08 15:10:27.000000000 -0700 @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include @@ -158,7 +158,7 @@ out: return entry; } -static struct swap_info_struct * swap_info_get(swp_entry_t entry) +struct swap_info_struct * swap_info_get(swp_entry_t entry) { struct swap_info_struct * p; unsigned long offset, type; @@ -197,7 +197,7 @@ out: return NULL; } -static void swap_info_put(struct swap_info_struct * p) +void swap_info_put(struct swap_info_struct * p) { swap_device_unlock(p); swap_list_unlock(); @@ -247,14 +247,14 @@ static int exclusive_swap_page(struct pa struct swap_info_struct * p; swp_entry_t entry; - entry.val = page->index; + entry.val = page->private; p = swap_info_get(entry); if (p) { /* Is the only swap cache user the cache itself? */ if (p->swap_map[swp_offset(entry)] == 1) { /* Recheck the page count with the pagecache lock held.. */ spin_lock(&swapper_space.page_lock); - if (page_count(page) - !!PagePrivate(page) == 2) + if (page_count(page) == 2) retval = 1; spin_unlock(&swapper_space.page_lock); } @@ -315,7 +315,7 @@ int remove_exclusive_swap_page(struct pa if (page_count(page) != 2) /* 2: us + cache */ return 0; - entry.val = page->index; + entry.val = page->private; p = swap_info_get(entry); if (!p) return 0; @@ -353,8 +353,14 @@ void free_swap_and_cache(swp_entry_t ent p = swap_info_get(entry); if (p) { - if (swap_entry_free(p, swp_offset(entry)) == 1) - page = find_trylock_page(&swapper_space, entry.val); + if (swap_entry_free(p, swp_offset(entry)) == 1) { + spin_lock(&swapper_space.page_lock); + page = radix_tree_lookup(&swapper_space.page_tree, + entry.val); + if (page && TestSetPageLocked(page)) + page = NULL; + spin_unlock(&swapper_space.page_lock); + } swap_info_put(p); } if (page) { @@ -385,19 +391,19 @@ void free_swap_and_cache(swp_entry_t ent /* vma->vm_mm->page_table_lock is held */ static void unuse_pte(struct vm_area_struct *vma, unsigned long address, pte_t *dir, - swp_entry_t entry, struct page *page, struct pte_chain **pte_chainp) + swp_entry_t entry, struct page *page) { vma->vm_mm->rss++; get_page(page); set_pte(dir, pte_mkold(mk_pte(page, vma->vm_page_prot))); - *pte_chainp = page_add_rmap(page, dir, *pte_chainp); + page_add_anon_rmap(page, vma->vm_mm, address); swap_free(entry); } /* vma->vm_mm->page_table_lock is held */ static int unuse_pmd(struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long size, unsigned long offset, - swp_entry_t entry, struct page *page, struct pte_chain **pte_chainp) + swp_entry_t entry, struct page *page) { pte_t * pte; unsigned long end; @@ -422,8 +428,7 @@ static int unuse_pmd(struct vm_area_stru * Test inline before going to call unuse_pte. */ if (unlikely(pte_same(*pte, swp_pte))) { - unuse_pte(vma, offset + address, pte, - entry, page, pte_chainp); + unuse_pte(vma, offset + address, pte, entry, page); pte_unmap(pte); return 1; } @@ -437,7 +442,7 @@ static int unuse_pmd(struct vm_area_stru /* vma->vm_mm->page_table_lock is held */ static int unuse_pgd(struct vm_area_struct * vma, pgd_t *dir, unsigned long address, unsigned long size, - swp_entry_t entry, struct page *page, struct pte_chain **pte_chainp) + swp_entry_t entry, struct page *page) { pmd_t * pmd; unsigned long offset, end; @@ -459,7 +464,7 @@ static int unuse_pgd(struct vm_area_stru BUG(); do { if (unuse_pmd(vma, pmd, address, end - address, - offset, entry, page, pte_chainp)) + offset, entry, page)) return 1; address = (address + PMD_SIZE) & PMD_MASK; pmd++; @@ -469,15 +474,14 @@ static int unuse_pgd(struct vm_area_stru /* vma->vm_mm->page_table_lock is held */ static int unuse_vma(struct vm_area_struct * vma, pgd_t *pgdir, - swp_entry_t entry, struct page *page, struct pte_chain **pte_chainp) + swp_entry_t entry, struct page *page) { unsigned long start = vma->vm_start, end = vma->vm_end; if (start >= end) BUG(); do { - if (unuse_pgd(vma, pgdir, start, end - start, - entry, page, pte_chainp)) + if (unuse_pgd(vma, pgdir, start, end - start, entry, page)) return 1; start = (start + PGDIR_SIZE) & PGDIR_MASK; pgdir++; @@ -485,15 +489,10 @@ static int unuse_vma(struct vm_area_stru return 0; } -static int unuse_process(struct mm_struct * mm, +static void unuse_process(struct mm_struct * mm, swp_entry_t entry, struct page* page) { struct vm_area_struct* vma; - struct pte_chain *pte_chain; - - pte_chain = pte_chain_alloc(GFP_KERNEL); - if (!pte_chain) - return -ENOMEM; /* * Go through process' page directory. @@ -501,12 +500,10 @@ static int unuse_process(struct mm_struc spin_lock(&mm->page_table_lock); for (vma = mm->mmap; vma; vma = vma->vm_next) { pgd_t * pgd = pgd_offset(mm, vma->vm_start); - if (unuse_vma(vma, pgd, entry, page, &pte_chain)) + if (unuse_vma(vma, pgd, entry, page)) break; } spin_unlock(&mm->page_table_lock); - pte_chain_free(pte_chain); - return 0; } /* @@ -654,7 +651,7 @@ static int try_to_unuse(unsigned int typ if (start_mm == &init_mm) shmem = shmem_unuse(entry, page); else - retval = unuse_process(start_mm, entry, page); + unuse_process(start_mm, entry, page); } if (*swap_map > 1) { int set_start_mm = (*swap_map >= swcount); @@ -666,7 +663,7 @@ static int try_to_unuse(unsigned int typ atomic_inc(&new_start_mm->mm_users); atomic_inc(&prev_mm->mm_users); spin_lock(&mmlist_lock); - while (*swap_map > 1 && !retval && + while (*swap_map > 1 && (p = p->next) != &start_mm->mmlist) { mm = list_entry(p, struct mm_struct, mmlist); atomic_inc(&mm->mm_users); @@ -683,7 +680,7 @@ static int try_to_unuse(unsigned int typ set_start_mm = 1; shmem = shmem_unuse(entry, page); } else - retval = unuse_process(mm, entry, page); + unuse_process(mm, entry, page); if (set_start_mm && *swap_map < swcount) { mmput(new_start_mm); atomic_inc(&mm->mm_users); @@ -697,11 +694,6 @@ static int try_to_unuse(unsigned int typ mmput(start_mm); start_mm = new_start_mm; } - if (retval) { - unlock_page(page); - page_cache_release(page); - break; - } /* * How could swap count reach 0x7fff when the maximum @@ -996,14 +988,14 @@ int page_queue_congested(struct page *pa BUG_ON(!PageLocked(page)); /* It pins the swap_info_struct */ - bdi = page->mapping->backing_dev_info; if (PageSwapCache(page)) { - swp_entry_t entry = { .val = page->index }; + swp_entry_t entry = { .val = page->private }; struct swap_info_struct *sis; sis = get_swap_info_struct(swp_type(entry)); bdi = sis->bdev->bd_inode->i_mapping->backing_dev_info; - } + } else + bdi = page->mapping->backing_dev_info; return bdi_write_congested(bdi); } #endif diff -purN -X /home/mbligh/.diff.exclude reference/mm/truncate.c current/mm/truncate.c --- reference/mm/truncate.c 2004-04-07 14:54:38.000000000 -0700 +++ current/mm/truncate.c 2004-04-09 13:23:19.000000000 -0700 @@ -122,14 +122,10 @@ void truncate_inode_pages(struct address pagevec_init(&pvec, 0); next = start; - while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { + while (pagevec_lookup(&pvec, mapping, &next, PAGEVEC_SIZE)) { for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; - pgoff_t page_index = page->index; - if (page_index > next) - next = page_index; - next++; if (TestSetPageLocked(page)) continue; if (PageWriteback(page)) { @@ -155,7 +151,7 @@ void truncate_inode_pages(struct address next = start; for ( ; ; ) { - if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { + if (!pagevec_lookup(&pvec, mapping, &next, PAGEVEC_SIZE)) { if (next == start) break; next = start; @@ -166,9 +162,6 @@ void truncate_inode_pages(struct address lock_page(page); wait_on_page_writeback(page); - if (page->index > next) - next = page->index; - next++; truncate_complete_page(mapping, page); unlock_page(page); } @@ -201,17 +194,13 @@ unsigned long invalidate_mapping_pages(s pagevec_init(&pvec, 0); while (next <= end && - pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { + pagevec_lookup(&pvec, mapping, &next, PAGEVEC_SIZE)) { for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; if (TestSetPageLocked(page)) { - next++; continue; } - if (page->index > next) - next = page->index; - next++; if (PageDirty(page) || PageWriteback(page)) goto unlock; if (page_mapped(page)) @@ -252,14 +241,13 @@ void invalidate_inode_pages2(struct addr int i; pagevec_init(&pvec, 0); - while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { + while (pagevec_lookup(&pvec, mapping, &next, PAGEVEC_SIZE)) { for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; lock_page(page); if (page->mapping == mapping) { /* truncate race? */ wait_on_page_writeback(page); - next = page->index + 1; if (page_mapped(page)) clear_page_dirty(page); else diff -purN -X /home/mbligh/.diff.exclude reference/mm/usercopy.c current/mm/usercopy.c --- reference/mm/usercopy.c 1969-12-31 16:00:00.000000000 -0800 +++ current/mm/usercopy.c 2004-04-09 11:53:01.000000000 -0700 @@ -0,0 +1,290 @@ +/* + * linux/mm/usercopy.c + * + * (C) Copyright 2003 Ingo Molnar + * + * Generic implementation of all the user-VM access functions, without + * relying on being able to access the VM directly. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +/* + * Get kernel address of the user page and pin it. + */ +static inline struct page *pin_page(unsigned long addr, int write) +{ + struct mm_struct *mm = current->mm ? : &init_mm; + struct page *page = NULL; + int ret; + + /* + * Do a quick atomic lookup first - this is the fastpath. + */ +retry: + page = follow_page(mm, addr, write); + if (likely(page != NULL)) { + if (!PageReserved(page)) + get_page(page); + return page; + } + + /* + * No luck - bad address or need to fault in the page: + */ + + /* Release the lock so get_user_pages can sleep */ + spin_unlock(&mm->page_table_lock); + + /* + * In the context of filemap_copy_from_user(), we are not allowed + * to sleep. We must fail this usercopy attempt and allow + * filemap_copy_from_user() to recover: drop its atomic kmap and use + * a sleeping kmap instead. + */ + if (in_atomic()) { + spin_lock(&mm->page_table_lock); + return NULL; + } + + down_read(&mm->mmap_sem); + ret = get_user_pages(current, mm, addr, 1, write, 0, NULL, NULL); + up_read(&mm->mmap_sem); + spin_lock(&mm->page_table_lock); + + if (ret <= 0) + return NULL; + + /* + * Go try the follow_page again. + */ + goto retry; +} + +static inline void unpin_page(struct page *page) +{ + put_page(page); +} + +/* + * Access another process' address space. + * Source/target buffer must be kernel space, + * Do not walk the page table directly, use get_user_pages + */ +static int rw_vm(unsigned long addr, void *buf, int len, int write) +{ + struct mm_struct *mm = current->mm ? : &init_mm; + + if (!len) + return 0; + + spin_lock(&mm->page_table_lock); + + /* ignore errors, just check how much was sucessfully transfered */ + while (len) { + struct page *page = NULL; + int bytes, offset; + void *maddr; + + page = pin_page(addr, write); + if (!page) + break; + + bytes = len; + offset = addr & (PAGE_SIZE-1); + if (bytes > PAGE_SIZE-offset) + bytes = PAGE_SIZE-offset; + + maddr = kmap_atomic(page, KM_USER_COPY); + +#define HANDLE_TYPE(type) \ + case sizeof(type): *(type *)(maddr+offset) = *(type *)(buf); break; + + if (write) { + switch (bytes) { + HANDLE_TYPE(char); + HANDLE_TYPE(int); + HANDLE_TYPE(long long); + default: + memcpy(maddr + offset, buf, bytes); + } + } else { +#undef HANDLE_TYPE +#define HANDLE_TYPE(type) \ + case sizeof(type): *(type *)(buf) = *(type *)(maddr+offset); break; + switch (bytes) { + HANDLE_TYPE(char); + HANDLE_TYPE(int); + HANDLE_TYPE(long long); + default: + memcpy(buf, maddr + offset, bytes); + } +#undef HANDLE_TYPE + } + kunmap_atomic(maddr, KM_USER_COPY); + unpin_page(page); + len -= bytes; + buf += bytes; + addr += bytes; + } + spin_unlock(&mm->page_table_lock); + + return len; +} + +static int str_vm(unsigned long addr, void *buf0, int len, int copy) +{ + struct mm_struct *mm = current->mm ? : &init_mm; + struct page *page; + void *buf = buf0; + + if (!len) + return len; + + spin_lock(&mm->page_table_lock); + + /* ignore errors, just check how much was sucessfully transfered */ + while (len) { + int bytes, offset, left, copied; + char *maddr; + + page = pin_page(addr, copy == 2); + if (!page) { + spin_unlock(&mm->page_table_lock); + return -EFAULT; + } + bytes = len; + offset = addr & (PAGE_SIZE-1); + if (bytes > PAGE_SIZE-offset) + bytes = PAGE_SIZE-offset; + + maddr = kmap_atomic(page, KM_USER_COPY); + if (copy == 2) { + memset(maddr + offset, 0, bytes); + copied = bytes; + left = 0; + } else if (copy == 1) { + left = strncpy_count(buf, maddr + offset, bytes); + copied = bytes - left; + } else { + copied = strnlen(maddr + offset, bytes); + left = bytes - copied; + } + BUG_ON(bytes < 0 || copied < 0); + kunmap_atomic(maddr, KM_USER_COPY); + unpin_page(page); + len -= copied; + buf += copied; + addr += copied; + if (left) + break; + } + spin_unlock(&mm->page_table_lock); + + return len; +} + +/* + * Copies memory from userspace (ptr) into kernelspace (val). + * + * returns # of bytes not copied. + */ +int get_user_size(unsigned int size, void *val, const void *ptr) +{ + int ret; + + if (unlikely(segment_eq(get_fs(), KERNEL_DS))) + ret = __direct_copy_from_user(val, ptr, size); + else + ret = rw_vm((unsigned long)ptr, val, size, 0); + if (ret) + /* + * Zero the rest: + */ + memset(val + size - ret, 0, ret); + return ret; +} + +/* + * Copies memory from kernelspace (val) into userspace (ptr). + * + * returns # of bytes not copied. + */ +int put_user_size(unsigned int size, const void *val, void *ptr) +{ + if (unlikely(segment_eq(get_fs(), KERNEL_DS))) + return __direct_copy_to_user(ptr, val, size); + else + return rw_vm((unsigned long)ptr, (void *)val, size, 1); +} + +int copy_str_fromuser_size(unsigned int size, void *val, const void *ptr) +{ + int copied, left; + + if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { + left = strncpy_count(val, ptr, size); + copied = size - left; + BUG_ON(copied < 0); + + return copied; + } + left = str_vm((unsigned long)ptr, val, size, 1); + if (left < 0) + return left; + copied = size - left; + BUG_ON(copied < 0); + + return copied; +} + +int strlen_fromuser_size(unsigned int size, const void *ptr) +{ + int copied, left; + + if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { + copied = strnlen(ptr, size) + 1; + BUG_ON(copied < 0); + + return copied; + } + left = str_vm((unsigned long)ptr, NULL, size, 0); + if (left < 0) + return 0; + copied = size - left + 1; + BUG_ON(copied < 0); + + return copied; +} + +int zero_user_size(unsigned int size, void *ptr) +{ + int left; + + if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { + memset(ptr, 0, size); + return 0; + } + left = str_vm((unsigned long)ptr, NULL, size, 2); + if (left < 0) + return size; + return left; +} + +EXPORT_SYMBOL(get_user_size); +EXPORT_SYMBOL(put_user_size); +EXPORT_SYMBOL(zero_user_size); +EXPORT_SYMBOL(copy_str_fromuser_size); +EXPORT_SYMBOL(strlen_fromuser_size); diff -purN -X /home/mbligh/.diff.exclude reference/mm/vmscan.c current/mm/vmscan.c --- reference/mm/vmscan.c 2004-04-07 14:54:38.000000000 -0700 +++ current/mm/vmscan.c 2004-04-09 11:53:02.000000000 -0700 @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include #include #include @@ -42,7 +42,7 @@ /* * From 0 .. 100. Higher means more swappy. */ -int vm_swappiness = 60; +int vm_swappiness = 0; static long total_memory; #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) @@ -173,23 +173,23 @@ static int shrink_slab(unsigned long sca return 0; } -/* Must be called with page's pte_chain_lock held. */ +/* Must be called with page's rmap_lock held. */ static inline int page_mapping_inuse(struct page *page) { - struct address_space *mapping = page->mapping; + struct address_space *mapping; /* Page is in somebody's page tables. */ if (page_mapped(page)) return 1; - /* XXX: does this happen ? */ - if (!mapping) - return 0; - /* Be more reluctant to reclaim swapcache than pagecache */ if (PageSwapCache(page)) return 1; + mapping = page_mapping(page); + if (!mapping) + return 0; + /* File is mmap'd by somebody. */ if (!list_empty(&mapping->i_mmap)) return 1; @@ -233,7 +233,7 @@ static void handle_write_error(struct ad struct page *page, int error) { lock_page(page); - if (page->mapping == mapping) { + if (page_mapping(page) == mapping) { if (error == -ENOSPC) set_bit(AS_ENOSPC, &mapping->flags); else @@ -277,29 +277,31 @@ shrink_list(struct list_head *page_list, if (PageWriteback(page)) goto keep_locked; - pte_chain_lock(page); + rmap_lock(page); referenced = page_referenced(page); if (referenced && page_mapping_inuse(page)) { /* In active use or really unfreeable. Activate it. */ - pte_chain_unlock(page); + rmap_unlock(page); goto activate_locked; } - mapping = page->mapping; + mapping = page_mapping(page); #ifdef CONFIG_SWAP /* - * Anonymous process memory without backing store. Try to - * allocate it some swap space here. + * Anonymous process memory has backing store? + * Try to allocate it some swap space here. * * XXX: implement swap clustering ? */ - if (page_mapped(page) && !mapping && !PagePrivate(page)) { - pte_chain_unlock(page); + if (PageSwapCache(page)) + mapping = &swapper_space; + else if (PageAnon(page)) { + rmap_unlock(page); if (!add_to_swap(page)) goto activate_locked; - pte_chain_lock(page); - mapping = page->mapping; + rmap_lock(page); + mapping = &swapper_space; } #endif /* CONFIG_SWAP */ @@ -313,16 +315,16 @@ shrink_list(struct list_head *page_list, if (page_mapped(page) && mapping) { switch (try_to_unmap(page)) { case SWAP_FAIL: - pte_chain_unlock(page); + rmap_unlock(page); goto activate_locked; case SWAP_AGAIN: - pte_chain_unlock(page); + rmap_unlock(page); goto keep_locked; case SWAP_SUCCESS: ; /* try to free the page below */ } } - pte_chain_unlock(page); + rmap_unlock(page); /* * If the page is dirty, only perform writeback if that write @@ -364,7 +366,9 @@ shrink_list(struct list_head *page_list, .for_reclaim = 1, }; - list_move(&page->list, &mapping->locked_pages); + if (!PageSwapCache(page)) + list_move(&page->list, + &mapping->locked_pages); spin_unlock(&mapping->page_lock); SetPageReclaim(page); @@ -429,7 +433,7 @@ shrink_list(struct list_head *page_list, #ifdef CONFIG_SWAP if (PageSwapCache(page)) { - swp_entry_t swap = { .val = page->index }; + swp_entry_t swap = { .val = page->private }; __delete_from_swap_cache(page); spin_unlock(&mapping->page_lock); swap_free(swap); @@ -591,6 +595,7 @@ refill_inactive_zone(struct zone *zone, LIST_HEAD(l_active); /* Pages to go onto the active_list */ struct page *page; struct pagevec pvec; + struct sysinfo i; int reclaim_mapped = 0; long mapped_ratio; long distress; @@ -632,14 +637,39 @@ refill_inactive_zone(struct zone *zone, */ mapped_ratio = (ps->nr_mapped * 100) / total_memory; + si_swapinfo(&i); + if (likely(i.totalswap >= 100)) { + int app_centile, swap_centile; + + /* + * app_centile is the percentage of physical ram used + * by application pages. + */ + si_meminfo(&i); + app_centile = 100 - ((i.freeram + get_page_cache_size() - + swapper_space.nrpages) / (i.totalram / 100)); + + /* + * swap_centile is the percentage of the last (sizeof physical + * ram) of swap free. + */ + swap_centile = i.freeswap / + (min(i.totalswap, i.totalram) / 100); + + /* + * Autoregulate vm_swappiness to be equal to the lowest of + * app_centile and swap_centile. -ck + */ + vm_swappiness = min(app_centile, swap_centile); + } else + vm_swappiness = 0; + /* * Now decide how much we really want to unmap some pages. The mapped * ratio is downgraded - just because there's a lot of mapped memory * doesn't necessarily mean that page reclaim isn't succeeding. * * The distress ratio is important - we don't want to start going oom. - * - * A 100% value of vm_swappiness overrides this algorithm altogether. */ swap_tendency = mapped_ratio / 2 + distress + vm_swappiness; @@ -658,20 +688,19 @@ refill_inactive_zone(struct zone *zone, list_add(&page->lru, &l_active); continue; } - pte_chain_lock(page); + rmap_lock(page); if (page_referenced(page)) { - pte_chain_unlock(page); + rmap_unlock(page); list_add(&page->lru, &l_active); continue; } - pte_chain_unlock(page); + rmap_unlock(page); } /* * FIXME: need to consider page_count(page) here if/when we * reap orphaned pages via the LRU (Daniel's locking stuff) */ - if (total_swap_pages == 0 && !page->mapping && - !PagePrivate(page)) { + if (total_swap_pages == 0 && PageAnon(page)) { list_add(&page->lru, &l_active); continue; } diff -purN -X /home/mbligh/.diff.exclude reference/net/Kconfig current/net/Kconfig --- reference/net/Kconfig 2004-04-07 14:54:38.000000000 -0700 +++ current/net/Kconfig 2004-04-08 15:10:21.000000000 -0700 @@ -658,18 +658,17 @@ source "net/irda/Kconfig" source "net/bluetooth/Kconfig" +config KGDBOE + def_bool X86 && KGDB + config NETPOLL - def_bool NETCONSOLE + def_bool NETCONSOLE || KGDBOE config NETPOLL_RX - bool "Netpoll support for trapping incoming packets" - default n - depends on NETPOLL + def_bool KGDBOE config NETPOLL_TRAP - bool "Netpoll traffic trapping" - default n - depends on NETPOLL + def_bool KGDBOE config NET_POLL_CONTROLLER def_bool NETPOLL diff -purN -X /home/mbligh/.diff.exclude reference/net/core/dev.c current/net/core/dev.c --- reference/net/core/dev.c 2004-04-07 14:54:38.000000000 -0700 +++ current/net/core/dev.c 2004-04-08 15:10:21.000000000 -0700 @@ -1515,7 +1515,6 @@ static void sample_queue(unsigned long d } #endif - /** * netif_rx - post buffer to the network code * @skb: buffer to post @@ -1835,7 +1834,6 @@ static void net_rx_action(struct softirq unsigned long start_time = jiffies; int budget = netdev_max_backlog; - local_irq_disable(); while (!list_empty(&queue->poll_list)) { @@ -1861,6 +1859,10 @@ static void net_rx_action(struct softirq dev_put(dev); local_irq_disable(); } + +#ifdef CONFIG_KGDBOE + kgdb_process_breakpoint(); +#endif } out: local_irq_enable(); diff -purN -X /home/mbligh/.diff.exclude reference/net/ipv4/esp4.c current/net/ipv4/esp4.c --- reference/net/ipv4/esp4.c 2004-03-11 14:35:45.000000000 -0800 +++ current/net/ipv4/esp4.c 2004-04-09 11:53:04.000000000 -0700 @@ -10,6 +10,7 @@ #include #include #include +#include #define MAX_SG_ONSTACK 4 @@ -325,7 +326,15 @@ int esp_input(struct xfrm_state *x, stru skb->h.raw = skb_pull(skb, sizeof(struct ip_esp_hdr) + esp->conf.ivlen); skb->nh.raw += encap_len + sizeof(struct ip_esp_hdr) + esp->conf.ivlen; memcpy(skb->nh.raw, workbuf, iph->ihl*4); - skb->nh.iph->tot_len = htons(skb->len); + iph = skb->nh.iph; + iph->tot_len = htons(skb->len + (skb->data - skb->nh.raw)); + iph->check = 0; + iph->check = ip_fast_csum(skb->nh.raw, iph->ihl); + { + unsigned char *oldmac = skb->mac.raw; + skb->mac.raw += encap_len + sizeof(struct ip_esp_hdr) + esp -> conf.ivlen; + memmove(skb->mac.raw, oldmac, skb->nh.raw - skb->mac.raw); + } } return 0; diff -purN -X /home/mbligh/.diff.exclude reference/net/ipv4/xfrm4_input.c current/net/ipv4/xfrm4_input.c --- reference/net/ipv4/xfrm4_input.c 2004-03-11 14:35:49.000000000 -0800 +++ current/net/ipv4/xfrm4_input.c 2004-04-09 11:53:04.000000000 -0700 @@ -95,6 +95,19 @@ int xfrm4_rcv_encap(struct sk_buff *skb, iph = skb->nh.iph; if (x->props.mode) { + if (iph->protocol == 0xfe) { + skb_push(skb, skb->data - skb->nh.raw); + if (!(skb->dev->flags&IFF_LOOPBACK)){ + dst_release(skb->dst); + skb->dst = NULL; + } + if (skb->sp) { + secpath_put(skb->sp); + skb->sp = NULL; + } + netif_rx(skb); + return 0; + } if (iph->protocol != IPPROTO_IPIP) goto drop; if (!pskb_may_pull(skb, sizeof(struct iphdr))) diff -purN -X /home/mbligh/.diff.exclude reference/scripts/gdb/README current/scripts/gdb/README --- reference/scripts/gdb/README 1969-12-31 16:00:00.000000000 -0800 +++ current/scripts/gdb/README 2004-04-08 15:10:21.000000000 -0700 @@ -0,0 +1,3 @@ +A couple of patches against gdb-6.0 from Jim Houston + +They are required for using kgdb on x86_64 targets. diff -purN -X /home/mbligh/.diff.exclude reference/scripts/gdb/gdb-switch-stacks.patch current/scripts/gdb/gdb-switch-stacks.patch --- reference/scripts/gdb/gdb-switch-stacks.patch 1969-12-31 16:00:00.000000000 -0800 +++ current/scripts/gdb/gdb-switch-stacks.patch 2004-04-08 15:10:21.000000000 -0700 @@ -0,0 +1,83 @@ +Return-Path: +Received: from mnm [127.0.0.1] + by localhost with POP3 (fetchmail-5.9.0) + for akpm@localhost (single-drop); Thu, 08 Jan 2004 17:54:39 -0800 (PST) +Received: from fire-1.osdl.org (air1.pdx.osdl.net [172.20.0.5]) + by mail.osdl.org (8.11.6/8.11.6) with ESMTP id i091r1o18423 + for ; Thu, 8 Jan 2004 17:53:01 -0800 +Received: from rwcrmhc13.comcast.net (rwcrmhc13.comcast.net [204.127.198.39]) + by fire-1.osdl.org (8.12.8/8.12.8) with ESMTP id i091r0Av030132 + for ; Thu, 8 Jan 2004 17:53:01 -0800 +Received: from h00e098094f32.ne.client2.attbi.com ([24.60.234.83]) + by comcast.net (rwcrmhc13) with ESMTP + id <2004010901525401500b89aoe>; Fri, 9 Jan 2004 01:52:54 +0000 +Received: by h00e098094f32.ne.client2.attbi.com (Postfix, from userid 500) + id 6B592C60FC; Thu, 8 Jan 2004 20:52:33 -0500 (EST) +To: Andrew Morton +From: jim.houston@comcast.net +Subject: gdb-switch-stacks.patch +Message-Id: <20040109015233.6B592C60FC@h00e098094f32.ne.client2.attbi.com> +Date: Thu, 8 Jan 2004 20:52:33 -0500 (EST) +X-MIMEDefang-Filter: osdl$Revision: 1.45 $ +X-Scanned-By: MIMEDefang 2.36 +X-Spam-Checker-Version: SpamAssassin 2.60 (1.212-2003-09-23-exp) on mnm +X-Spam-Level: +X-Spam-Status: No, hits=-4.6 required=2.0 tests=BAYES_00,NO_REAL_NAME + autolearn=no version=2.60 + + +Hi Andrew, + +This patch to gdb-6.0 adds an option to disable an error check +which reports a non-contiguous stack as corrupted. This is +needed to get a reliable stack trace using kgdb on Opteron +because the kernel uses a separate per-processor interrupt stack. + +This option is enabled with: + + set backtrace switch-stacks on + +Jim Houston - Concurrent Computer Corp. + +- + +diff -urN old/gdb-6.0/gdb/frame.c new/gdb-6.0/gdb/frame.c +--- old/gdb-6.0/gdb/frame.c 2003-12-31 15:27:45.866840920 -0500 ++++ new/gdb-6.0/gdb/frame.c 2003-12-31 15:27:58.310949128 -0500 +@@ -138,6 +138,7 @@ + /* Flag to indicate whether backtraces should stop at main et.al. */ + + static int backtrace_past_main; ++static int backtrace_switch_stacks; + static unsigned int backtrace_limit = UINT_MAX; + + +@@ -1971,7 +1972,7 @@ + the next frame. This happens when a frame unwind goes backwards. + Since the sentinel frame doesn't really exist, don't compare the + inner-most against that sentinel. */ +- if (this_frame->level > 0 ++ if (!backtrace_switch_stacks && this_frame->level > 0 + && frame_id_inner (get_frame_id (this_frame), + get_frame_id (this_frame->next))) + error ("Previous frame inner to this frame (corrupt stack?)"); +@@ -2461,6 +2462,19 @@ + NULL, NULL, &set_backtrace_cmdlist, + &show_backtrace_cmdlist); + ++ add_setshow_boolean_cmd ("switch-stacks", class_obscure, ++ &backtrace_switch_stacks, "\ ++Set if thread may use multiple stacks. This flag disables checks in\n\ ++the stack trace which expect that the stack grew in a consistent direction.\n\ ++This option is needed for kernel debug when the kernel has separate\n\ ++process and interrupt stacks.", "\ ++Show if thread may use multiple stacks. This flag disables checks in\n\ ++the stack trace which expect that the stack grew in a consistent direction.\n\ ++This option is needed for kernel debug when the kernel has separate\n\ ++process and interrupt stacks.", ++ NULL, NULL, &set_backtrace_cmdlist, ++ &show_backtrace_cmdlist); ++ + add_setshow_uinteger_cmd ("limit", class_obscure, + &backtrace_limit, "\ + Set an upper bound on the number of backtrace levels.\n\ diff -purN -X /home/mbligh/.diff.exclude reference/scripts/gdb/gdb-thread-skip-frame.patch current/scripts/gdb/gdb-thread-skip-frame.patch --- reference/scripts/gdb/gdb-thread-skip-frame.patch 1969-12-31 16:00:00.000000000 -0800 +++ current/scripts/gdb/gdb-thread-skip-frame.patch 2004-04-08 15:10:21.000000000 -0700 @@ -0,0 +1,133 @@ +Received: from mnm [127.0.0.1] + by localhost with POP3 (fetchmail-5.9.0) + for akpm@localhost (single-drop); Thu, 08 Jan 2004 18:10:40 -0800 (PST) +Received: from fire-1.osdl.org (air1.pdx.osdl.net [172.20.0.5]) + by mail.osdl.org (8.11.6/8.11.6) with ESMTP id i0929qo20750 + for ; Thu, 8 Jan 2004 18:09:52 -0800 +Received: from sccrmhc13.comcast.net (sccrmhc13.comcast.net [204.127.202.64]) + by fire-1.osdl.org (8.12.8/8.12.8) with ESMTP id i0929qAv031470 + for ; Thu, 8 Jan 2004 18:09:52 -0800 +Received: from h00e098094f32.ne.client2.attbi.com ([24.60.234.83]) + by comcast.net (sccrmhc13) with ESMTP + id <2004010902094601600ni0oje>; Fri, 9 Jan 2004 02:09:46 +0000 +Received: by h00e098094f32.ne.client2.attbi.com (Postfix, from userid 500) + id 6DA02C60FC; Thu, 8 Jan 2004 21:09:20 -0500 (EST) +From: Jim Houston +To: Andrew Morton +Subject: gdb-thread-skip-frame.patch +Message-Id: <20040109020920.6DA02C60FC@h00e098094f32.ne.client2.attbi.com> +Date: Thu, 8 Jan 2004 21:09:20 -0500 (EST) +X-MIMEDefang-Filter: osdl$Revision: 1.45 $ +X-Scanned-By: MIMEDefang 2.36 +X-Spam-Checker-Version: SpamAssassin 2.60 (1.212-2003-09-23-exp) on mnm +X-Spam-Level: +X-Spam-Status: No, hits=-4.9 required=2.0 tests=BAYES_00 autolearn=ham + version=2.60 + + +Hi Andrew, + +In the i386 kgdb_stub, George Anzinger has code to find the first +stack frame which is not in the scheduler. He returns the register +information for this frame so the "info thread" command will +display an interesting frame. + +Doing this on the x86_64 was not as easy because frame pointers +are not used consistently. The attached patch adds an option to +gdb to skip over frames which are executing function listed in the +option. + +Use: + set skip-frame thread_return,schedule_timeout + +Jim Houston - Concurrent Computer Corp. + +-- + +--- old/gdb-6.0/gdb/thread.c 2004-01-06 12:34:14.786496352 -0500 ++++ new/gdb-6.0/gdb/thread.c 2004-01-06 12:34:28.804365312 -0500 +@@ -404,6 +404,43 @@ + } + } + ++/* ++ * When using gdb as a kernel debugger, its really boring to ++ * see every thread is blocked in schedule. By setting a ++ * list of functions with "set skip-frame schedule,thread_return" ++ * we can display the frame that called into the scheduler. ++ */ ++static char *skip_frame_string; ++ ++int ++skip_frame(struct frame_info *fi) ++{ ++ struct minimal_symbol *msym; ++ CORE_ADDR pc; ++ char *name; ++ char *s, *r; ++ int n; ++ ++ pc = get_frame_pc (fi); ++ msym = lookup_minimal_symbol_by_pc_section(pc, NULL); ++ if (!msym) ++ return 0; ++ name = SYMBOL_LINKAGE_NAME(msym); ++ ++ for (s = skip_frame_string; s && *s ; ) { ++ if ((r = strchr(s, ','))) ++ n = r - s - 1; ++ else ++ n = strlen(s); ++ if (n && strncmp(s, name, n) == 0) ++ return 1; ++ if (!r) ++ break; ++ s = r + 1; ++ } ++ return 0; ++} ++ + /* Print information about currently known threads + + * Note: this has the drawback that it _really_ switches +@@ -416,7 +453,7 @@ + { + struct thread_info *tp; + ptid_t current_ptid; +- struct frame_info *cur_frame; ++ struct frame_info *cur_frame, *fi; + int saved_frame_level = frame_relative_level (get_selected_frame ()); + int counter; + char *extra_info; +@@ -448,6 +485,18 @@ + puts_filtered (" "); + + switch_to_thread (tp->ptid); ++ ++ if (skip_frame_string) { ++ /* skip up the stack to an interesting frame. */ ++ fi = get_selected_frame (); ++ while (fi) { ++ if (!skip_frame(fi)) ++ break; ++ fi = get_prev_frame(fi); ++ if (fi) ++ select_frame(fi); ++ } ++ } + print_stack_frame (get_selected_frame (), -1, 0); + } + +@@ -740,4 +789,12 @@ + + if (!xdb_commands) + add_com_alias ("t", "thread", class_run, 1); ++ ++ add_show_from_set (add_set_cmd ("skip-frame", class_obscure, ++ var_string_noescape, (char *)&skip_frame_string, "\ ++Set list of functions to skip when choosing the frame to display\n\ ++for a info-thread command. When gdb is used for kernel debug this option\n\ ++allows the frame which calls the scheduler to be displayed rather than\n\ ++having all blocked threads showing the same function in the scheduler.", ++ &setlist), &showlist); + } diff -purN -X /home/mbligh/.diff.exclude reference/scripts/schedstats/latency.c current/scripts/schedstats/latency.c --- reference/scripts/schedstats/latency.c 1969-12-31 16:00:00.000000000 -0800 +++ current/scripts/schedstats/latency.c 2004-04-09 21:41:41.000000000 -0700 @@ -0,0 +1,131 @@ +/* + * latency -- measure the scheduling latency of a particular process from + * the extra information provided in /proc/stat by version 4 of + * the schedstat patch. PLEASE NOTE: This program does NOT check to + * make sure that extra information is there; it assumes the last + * three fields in that line are the ones it's interested in. Using + * it on a kernel that does not have the schedstat patch compiled in + * will cause it to happily produce bizarre results. + * + * Note too that this is known to work only with versions 4 and 5 + * of the schedstat patch, for similar reasons. + * + * This currently monitors only one pid at a time but could easily + * be modified to do more. + */ +#include +#include + +extern char *index(), *rindex(); +char procbuf[512]; +char statname[64]; +char *Progname; +FILE *fp; + +void usage() +{ + fprintf(stderr,"Usage: %s [-s sleeptime ] \n", Progname); + exit(-1); +} + +/* + * get_stats() -- we presume that we are interested in the last three + * fields of the line we are handed, and further, that they contain + * only numbers and single spaces. + */ +void get_stats(char *buf, char *id, unsigned int *run_ticks, + unsigned int *wait_ticks, unsigned int *nran) +{ + char *ptr; + + ptr = index(buf, ')') + 1; + *ptr = 0; + strcpy(id, buf); + *ptr = ' '; + + ptr = rindex(buf,' '); + if (!ptr) return; + + *nran = atoi(ptr--); + + while (isdigit(*ptr) && --ptr != buf); + if (ptr == buf) return; + + *wait_ticks = atoi(ptr--); + + while (isdigit(*ptr) && --ptr != buf); + if (ptr == buf) return; + + *run_ticks = atoi(ptr); +} + +main(int argc, char *argv[]) +{ + int c; + unsigned int sleeptime = 5, pid = 0, verbose = 0; + char id[32]; + unsigned int run_ticks, wait_ticks, nran; + unsigned int orun_ticks=0, owait_ticks=0, oran=0; + + Progname = argv[0]; + id[0] = 0; + while ((c = getopt(argc,argv,"s:v")) != -1) { + switch (c) { + case 's': + sleeptime = atoi(optarg); + break; + case 'v': + verbose++; + break; + default: + usage(); + } + } + + if (optind < argc) { + pid = atoi(argv[optind]); + } + + if (!pid) + usage(); + + /* + * now just spin collecting the stats + */ + sprintf(statname,"/proc/%d/stat", pid); + while (fp = fopen(statname, "r")) { + if (!fgets(procbuf, sizeof(procbuf), fp)) + break; + + get_stats(procbuf, id, &run_ticks, &wait_ticks, &nran); + + if (verbose) + printf("%s %d(%d) %d(%d) %d(%d) %4.2f %4.2f\n", + id, run_ticks, run_ticks - orun_ticks, + wait_ticks, wait_ticks - owait_ticks, + nran, nran - oran, + nran - oran ? + (double)(run_ticks-orun_ticks)/(nran - oran) : 0, + nran - oran ? + (double)(wait_ticks-owait_ticks)/(nran - oran) : 0); + else + printf("%s avgrun=%4.2fms avgwait=%4.2fms\n", + id, nran - oran ? + (double)(run_ticks-orun_ticks)/(nran - oran) : 0, + nran - oran ? + (double)(wait_ticks-owait_ticks)/(nran - oran) : 0); + fclose(fp); + oran = nran; + orun_ticks = run_ticks; + owait_ticks = wait_ticks; + sleep(sleeptime); + fp = fopen(statname,"r"); + if (!fp) + break; + } + if (id[0]) + printf("Process %s has exited.\n", id); + else + printf("Process %d does not exist.\n", pid); + exit(0); +} diff -purN -X /home/mbligh/.diff.exclude reference/scripts/schedstats/stats-6.pl current/scripts/schedstats/stats-6.pl --- reference/scripts/schedstats/stats-6.pl 1969-12-31 16:00:00.000000000 -0800 +++ current/scripts/schedstats/stats-6.pl 2004-04-09 21:41:41.000000000 -0700 @@ -0,0 +1,306 @@ +#!/usr/bin/perl + +use Getopt::Std; + +$curr_version = 6; + +$YLD_BOTH_EMPTY = 1; $PT_GAINED_IDLE = 16; +$YLD_ACT_EMPTY = 2; $PT_LOST_IDLE = 17; +$YLD_EXP_EMPTY = 3; $PT_GAINED_NOTIDLE = 18; +$YLD_CNT = 4; $PT_LOST_NOTIDLE = 19; +$SCHED_NOSWITCH = 5; $ALB_CNT = 20; +$SCHED_SWITCH = 6; $ALB_GAINED = 21; +$SCHED_CNT = 7; $ALB_LOST = 22; +$LB_IDLE = 8; $SBE_CNT = 23; +$LB_BUSY = 9; $MTC_CNT = 24; +$LB_CNT = 10; $LBNI_CNT = 25; +$LB_IMBALANCE = 11; $LBNI_IMBALANCE = 26; +$LB_NOBUSYG = 12; $CPU_CPUTIME = 27; +$LB_NOBUSYQ = 13; $CPU_RUNDELAY = 28; +$PT_GAINED_NEWIDLE = 14; $CPU_TRIPCNT = 29; +$PT_LOST_NEWIDLE = 15; + +die "Usage: $0 [-t] [file]\n" unless &getopts("tc"); + +while (<>) { + @curr = split; + if ($curr[0] =~ /cpu(\d+)/) { + $per_cpu_curr[$1] = [ @curr ]; + $max_cpu = $1 if ($1 > $max_cpu); + next; + } + next if (/^$/); + if ($curr[0] eq "version") { + if ($curr[1] != $curr_version) { + die "$0: Version mismatch: input is version $curr[0] but this tool\nis for version $curr_version.\n"; + } + next; + } + if ($curr[0] eq "timestamp") { + $delta = $curr[1] - $otimestamp; + $otimestamp = $curr[1]; + next; + } + + # + # format of line in /proc/schedstat + # + # tag 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 \ + # 25 26 27 28 29 + # + # tag is "cpuN" or "cpu". Right now, we ignore "cpuN" lines (this tool + # doesn't collate per-cpu statistics, although it would be trivial to + # do so.) + # + # version == 6 + # NOTE: the active queue is considered empty if it has only one process + # in it, since obviously the process calling sched_yield is that process. + # + # First four are sched_yield statistics: + # 1) # of times both the active and the expired queue were empty + # 2) # of times just the active queue was empty + # 3) # of times just the expired queue was empty + # 4) # of times sched_yield() was called + # + # Next three are schedule() statistics: + # 5) # of times the active queue had at least one other process on it. + # 6) # of times we switched to the expired queue and reused it + # 7) # of times schedule() was called + # + # Next six are statistics dealing with load balancing: + # 8) # of times load_balance was called at an idle tick + # 9) # of times load_balance was called at an busy tick + # 10) # of times load_balance was called + # 11) sum of imbalances discovered (if any) with each call to + # load_balance + # 12) # of times load_balance was called when we did not find a + # "busiest" cpu group + # 13) # of times load_balance was called when we did not find a + # "busiest" run queue + # + # Next six are statistics dealing with pull_task(): + # 14) # of times pull_task gave a task to this cpu when newly idle + # 15) # of times pull_task took a task from this cpu when newly idle + # 16) # of times pull_task gave a task to this cpu when idle + # 17) # of times pull_task took a task from this cpu when idle + # 18) # of times pull_task gave a task to this cpu when busy + # 19) # of times pull_task took a task from this cpu when busy + # + # Next three are statistics dealing with active_load_balance(): + # 20) # of times active_load_balance() was called + # 21) # of times active_load_balance() caused this cpu to gain a task + # 22) # of times active_load_balance() caused this cpu to lose a task + # + # Next two are function call counters: + # 23) # of times sched_balance_exec() was called + # 24) # of times migrate_to_cpu() was called + # + # Next two are dealing with load_balance_newidle(): + # 25) # of times load_balance_newidle was called + # 26) sum of imbalances discovered (if any) with each call to + # load_balance_newidle() + # + # Next three are statistics dealing with scheduling latency: + # 27) sum of all time spent running by tasks on this processor (in ms) + # 28) sum of all time spent waiting to run by tasks on this processor + # (in ms) + # 29) # of tasks (not necessarily unique) given to the processor + # + + foreach $i (1..29) { + $diff[$i] = $curr[$i] - $prev[$i]; + } + + for ($cpu = 0; $cpu <= $max_cpu; $cpu++) { + @arr_curr = @{$per_cpu_curr[$cpu]}; + @arr_prev = @{$per_cpu_prev[$cpu]}; + foreach $i (1..29) { + $arr_diff[$i] = $arr_curr[$i] - $arr_prev[$i]; + } + $per_cpu_diff[$cpu] = [ @arr_diff ]; + } + + $timestart = $delta if (!$timestart); + $timestamp += $delta; + + if (!$opt_t) { + print_diffs(); + @prev = @curr; + @per_cpu_prev = @per_cpu_curr; + } else { + @prev = @curr if (!defined(@prev)); + } +} + +print_diffs() if ($opt_t); + +sub print_diffs { + printf "%02d:%02d:%02d--------------------------------------------------------------\n", + ($timestamp-$timestart)/3600000, + (($timestamp-$timestart)/60000)%60, + (($timestamp-$timestart)/1000)%60; + + # + # sched_yield() stats + # + printf " %7d sys_sched_yield()\n", $diff[$YLD_CNT]; + printf " %7d(%6.2f%%) found (only) active queue empty on current cpu\n", + $diff[$YLD_ACT_EMPTY]-$diff[$YLD_BOTH_EMPTY], + $diff[$YLD_CNT] ? + (100*($diff[$YLD_ACT_EMPTY]-$diff[$YLD_BOTH_EMPTY])/ + $diff[$YLD_CNT]) : 0; + printf " %7d(%6.2f%%) found (only) expired queue empty on current cpu\n", + $diff[$EXP_EMPTY], + $diff[$YLD_CNT] ? (100*$diff[$EXP_EMPTY]/$diff[$YLD_CNT]) : 0; + printf " %7d(%6.2f%%) found both queues empty on current cpu\n", + $diff[$YLD_BOTH_EMPTY], + $diff[$YLD_CNT] ? (100*$diff[$YLD_BOTH_EMPTY]/$diff[$YLD_CNT]) : 0; + printf " %7d(%6.2f%%) found neither queue empty on current cpu\n\n", + $diff[$YLD_CNT]-($diff[$EXP_EMPTY]+$diff[$YLD_ACT_EMPTY]), + $diff[$YLD_CNT] ? + 100*($diff[$YLD_CNT]-($diff[$EXP_EMPTY]+$diff[$YLD_ACT_EMPTY]))/ + $diff[$YLD_CNT] : 0; + + # + # schedule() stats + # + printf " %7d schedule()\n", $diff[$SCHED_CNT]; + printf " %7d(%6.2f%%) switched active and expired queues\n", + $diff[$SCHED_SWITCH], $diff[$SCHED_CNT] ? (100*$diff[$SCHED_SWITCH]/$diff[$SCHED_CNT]) : 0; + printf " %7d(%6.2f%%) used existing active queue\n", + $diff[$SCHED_SWITCH]-$diff[$SCHED_SWITCH], $diff[$SCHED_CNT] ? (100*($diff[$SCHED_SWITCH]-$diff[$SCHED_SWITCH])/$diff[$SCHED_CNT]) : 0; + printf " %7d(%6.2f%%) processor went idle\n\n", + $diff[$SCHED_CNT] - $diff[$SCHED_SWITCH], $diff[$SCHED_CNT] ? (100*($diff[$SCHED_CNT] - $diff[$SCHED_SWITCH])/$diff[$SCHED_CNT]) : 0; + + # + # load_balance() stats + # + printf " %7d load_balance()\n", $diff[$LB_CNT]; + printf " %7d(%6.2f%%) called while idle\n", $diff[$LB_IDLE], + $diff[$LB_CNT] ? 100*$diff[$LB_IDLE]/$diff[$LB_CNT] : 0; + printf " %7d(%6.2f%%) called while busy\n", $diff[$LB_BUSY], + $diff[$LB_CNT] ? 100*($diff[$LB_BUSY])/$diff[$LB_CNT] : 0; + printf " %7d(%6.2f%%) no \"busiest group\" found\n", $diff[$LB_NOBUSYG], + $diff[$LB_CNT] ? 100*($diff[$LB_NOBUSYG])/$diff[$LB_CNT] : 0; + printf " %7d(%6.2f%%) no \"busiest queue\" found\n", $diff[$LB_NOBUSYQ], + $diff[$LB_CNT] ? 100*($diff[$LB_NOBUSYQ])/$diff[$LB_CNT] : 0; + if ($diff[$LB_CNT]-$diff[$LB_NOBUSYG]-$diff[$LB_NOBUSYQ]) { + printf " imbalance=%d, count=%d\n", $diff[$LB_IMBALANCE], + ($diff[$LB_CNT]-$diff[$LB_NOBUSYG]-$diff[$LB_NOBUSYQ]); + $imbalance = $diff[$LB_IMBALANCE] / + ($diff[$LB_CNT]-$diff[$LB_NOBUSYG]-$diff[$LB_NOBUSYQ]); + if ($imbalance < 10) { + $fmt = "%7.3f"; + } elsif ($imbalance < 100) { + $fmt = "%8.2f"; + } else { + $fmt = "%9.1f"; + } + printf " $fmt average imbalance (over %d)\n", + $imbalance, $diff[$LB_CNT]-$diff[$LB_NOBUSYG]-$diff[$LB_NOBUSYQ]; + } + else { + printf " no imbalances\n"; + } + + # + # pull_task() stats + # + print "\n"; + $active_balance_total = $total = 0; + for ($cpu = 0; $cpu <= $max_cpu; $cpu++) { + @arr = @{$per_cpu_diff[$cpu]}; + $total += $arr[$PT_GAINED_NEWIDLE] + $arr[$PT_GAINED_IDLE] + + $arr[$PT_GAINED_NOTIDLE] + $arr[$ALB_GAINED]; + $active_balance_total += $arr[$ALB_GAINED]; + } + printf " %7d pull_task()\n", $total; + printf " %7d total tasks moved between cpus\n", $total; + if ($opt_c) { + for ($cpu = 0; $cpu <= $max_cpu; $cpu++) { + @arr = @{$per_cpu_diff[$cpu]}; + if ($arr[$PT_GAINED_NEWIDLE] || $arr[$PT_LOST_NEWIDLE]) { + printf " %7d/%-7d cpu %2d lost/gained task to/from another cpu when newly idle\n", + $arr[$PT_GAINED_NEWIDLE], $arr[$PT_LOST_NEWIDLE], $cpu; + } + if ($arr[$PT_GAINED_IDLE] || $arr[$PT_LOST_IDLE]) { + printf " %7d/%-7d cpu %2d lost/gained task to/from another cpu while idle\n", + $arr[$PT_GAINED_IDLE], $arr[$PT_LOST_IDLE], $cpu; + } + if ($arr[$PT_GAINED_NOTIDLE] || $arr[$PT_LOST_NOTIDLE]) { + printf " %7d/%-7d cpu %2d lost/gained task to/from another cpu when busy\n", + $arr[$PT_GAINED_NOTIDLE], $arr[$PT_LOST_NOTIDLE], $cpu; + } + if ($arr[$ALB_GAINED] || $arr[$ALB_LOST]) { + printf " %7d/%-7d cpu %2d lost/gained task to/from another cpu from active_load_balance()\n", + $arr[$ALB_GAINED], $arr[$ALB_LOST], $cpu; + } + } + } else { + $idle = $notidle = $alb = $newidle = 0; + for ($cpu = 0; $cpu <= $max_cpu; $cpu++) { + @arr = @{$per_cpu_diff[$cpu]}; + $newidle += $arr[$PT_GAINED_NEWIDLE]; + $idle += $arr[$PT_GAINED_IDLE]; + $notidle += $arr[$PT_GAINED_NOTIDLE]; + $alb += $arr[$ALB_GAINED]; + } + printf " %7d(%6.2f%%) moved when newly idle\n", + $newidle, $total ? 100*($newidle/$total) : 0; + printf " %7d(%6.2f%%) moved while idle\n", + $idle, $total ? 100*($idle/$total) : 0; + printf " %7d(%6.2f%%) moved while busy\n", + $busy, $total ? 100*($busy/$total) : 0; + printf " %7d(%6.2f%%) moved from active_load_balance()\n", + $alb, $total ? 100*($alb/$total) : 0; + } + print "\n"; + + # + # active_load_balance() stats + # + printf " %7d active_load_balance()\n", $diff[$ALB_CNT]; + + # + # function call counts + # + printf " %7d sched_balance_exec()\n", $diff[$SBE_CNT]; + printf " %7d migrate_to_cpu()\n", $diff[$MTC_CNT]; + printf("\n"); + + # + # load_balance_newidle() stats + # + printf " %7d load_balance_newidle()\n", $diff[$LBNI_CNT]; + if ($diff[$LBNI_CNT]) { + $imbalance = $diff[$LBNI_IMBALANCE] / $diff[$LB_CNT]; + if ($imbalance < 10) { + $fmt = "%7.3f"; + } elsif ($imbalance < 100) { + $fmt = "%8.2f"; + } else { + $fmt = "%9.1f"; + } + printf " $fmt average imbalance\n", $imbalance; + } + else { + printf " no imbalances\n"; + } + + printf("\n"); + + # + # latency stats + # + printf " Latency\n"; + for ($cpu = 0; $cpu <= $max_cpu; $cpu++) { + @arr = @{$per_cpu_diff[$cpu]}; + if ($arr[$CPU_TRIPCNT] && ($arr[$CPU_CPUTIME] || $arr[$CPU_RUNDELAY])) { + printf " %6.2f/%-6.2f avg runtime/latency on cpu %d (ms)\n", + $arr[$CPU_CPUTIME]/$arr[$CPU_TRIPCNT], + $arr[$CPU_RUNDELAY]/$arr[$CPU_TRIPCNT], $cpu; + } + } + + printf("\n"); +}